From d03f220fde879509cab2ac1c73b71b7efb52b737 Mon Sep 17 00:00:00 2001 From: jbj Date: Sat, 16 Oct 2004 01:31:54 +0000 Subject: ... and in with the New ... CVS patchset: 7471 CVS date: 2004/10/16 01:31:54 --- db/LICENSE | 8 +- db/README | 4 +- db/btree/bt_compare.c | 18 +- db/btree/bt_conv.c | 8 +- db/btree/bt_curadj.c | 36 +- db/btree/bt_cursor.c | 292 +- db/btree/bt_delete.c | 142 +- db/btree/bt_method.c | 60 +- db/btree/bt_open.c | 121 +- db/btree/bt_put.c | 8 +- db/btree/bt_rec.c | 302 +- db/btree/bt_reclaim.c | 8 +- db/btree/bt_recno.c | 68 +- db/btree/bt_rsearch.c | 58 +- db/btree/bt_search.c | 101 +- db/btree/bt_split.c | 75 +- db/btree/bt_stat.c | 398 +- db/btree/bt_upgrade.c | 8 +- db/btree/bt_verify.c | 142 +- db/btree/btree.src | 31 +- db/btree/btree_auto.c | 1906 +-- db/btree/btree_autop.c | 514 + db/build_vxworks/BerkeleyDB20.wpj | 342 +- db/build_vxworks/BerkeleyDB20small.wpj | 3107 ++++ db/build_vxworks/BerkeleyDB20small.wsp | 29 + db/build_vxworks/BerkeleyDB22.wpj | 342 +- db/build_vxworks/BerkeleyDB22small.wpj | 3166 ++++ db/build_vxworks/BerkeleyDB22small.wsp | 29 + db/build_vxworks/db.h | 595 +- db/build_vxworks/db_archive/db_archive.c | 16 +- db/build_vxworks/db_checkpoint/db_checkpoint.c | 18 +- db/build_vxworks/db_config.h | 32 +- db/build_vxworks/db_config_small.h | 440 + db/build_vxworks/db_deadlock/db_deadlock.c | 19 +- db/build_vxworks/db_dump/db_dump.c | 141 +- db/build_vxworks/db_int.h | 190 +- db/build_vxworks/db_load/db_load.c | 103 +- db/build_vxworks/db_printlog/db_printlog.c | 126 +- db/build_vxworks/db_recover/db_recover.c | 12 +- db/build_vxworks/db_stat/db_stat.c | 1112 +- db/build_vxworks/db_upgrade/db_upgrade.c | 14 +- db/build_vxworks/db_verify/db_verify.c | 84 +- db/build_vxworks/dbdemo/dbdemo.c | 8 +- db/build_win32/db.h | 608 +- db/build_win32/db_archive.dsp | 12 +- db/build_win32/db_checkpoint.dsp | 12 +- db/build_win32/db_config.h | 34 +- db/build_win32/db_cxx.h | 1017 +- db/build_win32/db_deadlock.dsp | 12 +- db/build_win32/db_dll.dsp | 80 +- db/build_win32/db_dump.dsp | 12 +- db/build_win32/db_int.h | 190 +- db/build_win32/db_java.dsp | 16 +- db/build_win32/db_load.dsp | 12 +- db/build_win32/db_perf.dsp | 12 +- db/build_win32/db_printlog.dsp | 48 +- db/build_win32/db_recover.dsp | 12 +- db/build_win32/db_small.dsp | 72 +- db/build_win32/db_stat.dsp | 12 +- db/build_win32/db_static.dsp | 84 +- db/build_win32/db_tcl.dsp | 8 +- db/build_win32/db_test.dsp | 4 +- db/build_win32/db_upgrade.dsp | 12 +- db/build_win32/db_verify.dsp | 12 +- db/build_win32/dbkill.cpp | 4 +- db/build_win32/ex_access.dsp | 12 +- db/build_win32/ex_btrec.dsp | 12 +- db/build_win32/ex_env.dsp | 12 +- db/build_win32/ex_lock.dsp | 12 +- db/build_win32/ex_mpool.dsp | 12 +- db/build_win32/ex_repquote.dsp | 12 +- db/build_win32/ex_tpcb.dsp | 12 +- db/build_win32/excxx_access.dsp | 12 +- db/build_win32/excxx_btrec.dsp | 12 +- db/build_win32/excxx_env.dsp | 12 +- db/build_win32/excxx_lock.dsp | 12 +- db/build_win32/excxx_mpool.dsp | 12 +- db/build_win32/excxx_tpcb.dsp | 12 +- db/build_win32/include.tcl | 3 +- db/build_win32/java_dsp.src | 8 +- db/build_win32/libdb.def | 288 +- db/build_win32/libdb.rc | 10 +- db/build_win32/libdbrc.src | 2 +- db/build_win32/win_db.h | 37 +- db/build_win64/Berkeley_DB.dsw | 539 + db/build_win64/app_dsp.src | 145 + db/build_win64/build_all.dsp | 96 + db/build_win64/db.h | 2242 +++ db/build_win64/db_archive.dsp | 148 + db/build_win64/db_checkpoint.dsp | 148 + db/build_win64/db_config.h | 440 + db/build_win64/db_cxx.h | 1089 ++ db/build_win64/db_deadlock.dsp | 148 + db/build_win64/db_dll.dsp | 880 ++ db/build_win64/db_dump.dsp | 148 + db/build_win64/db_int.h | 588 + db/build_win64/db_java.dsp | 132 + db/build_win64/db_lib.dsp | 92 + db/build_win64/db_load.dsp | 148 + db/build_win64/db_perf.dsp | 224 + db/build_win64/db_printlog.dsp | 184 + db/build_win64/db_recover.dsp | 148 + db/build_win64/db_small.dsp | 720 + db/build_win64/db_stat.dsp | 148 + db/build_win64/db_static.dsp | 864 ++ db/build_win64/db_tcl.dsp | 148 + db/build_win64/db_test.dsp | 100 + db/build_win64/db_test.src | 97 + db/build_win64/db_upgrade.dsp | 148 + db/build_win64/db_verify.dsp | 148 + db/build_win64/dynamic_dsp.src | 93 + db/build_win64/ex_access.dsp | 148 + db/build_win64/ex_btrec.dsp | 148 + db/build_win64/ex_env.dsp | 148 + db/build_win64/ex_lock.dsp | 148 + db/build_win64/ex_mpool.dsp | 148 + db/build_win64/ex_repquote.dsp | 164 + db/build_win64/ex_repquote.src | 145 + db/build_win64/ex_tpcb.dsp | 148 + db/build_win64/excxx_access.dsp | 148 + db/build_win64/excxx_btrec.dsp | 148 + db/build_win64/excxx_env.dsp | 148 + db/build_win64/excxx_lock.dsp | 148 + db/build_win64/excxx_mpool.dsp | 148 + db/build_win64/excxx_tpcb.dsp | 148 + db/build_win64/java_dsp.src | 129 + db/build_win64/libdbrc.src | 33 + db/build_win64/small_dsp.src | 85 + db/build_win64/srcfile_dsp.src | 4 + db/build_win64/static_dsp.src | 85 + db/build_win64/tcl_dsp.src | 93 + db/build_win64/win_db.h | 94 + db/clib/getcwd.c | 8 +- db/clib/getopt.c | 8 +- db/clib/memcmp.c | 8 +- db/clib/memmove.c | 8 +- db/clib/raise.c | 8 +- db/clib/snprintf.c | 129 +- db/clib/strcasecmp.c | 6 +- db/clib/strdup.c | 6 +- db/clib/strerror.c | 8 +- db/common/crypto_stub.c | 8 +- db/common/db_byteorder.c | 8 +- db/common/db_err.c | 232 +- db/common/db_getlong.c | 8 +- db/common/db_idspace.c | 15 +- db/common/db_log2.c | 8 +- db/common/util_arg.c | 8 +- db/common/util_cache.c | 56 +- db/common/util_log.c | 8 +- db/common/util_sig.c | 8 +- db/crypto/aes_method.c | 13 +- db/crypto/crypto.c | 76 +- db/crypto/mersenne/mt19937db.c | 10 +- db/crypto/rijndael/rijndael-api-fst.c | 2 +- db/cxx/cxx_db.cpp | 111 +- db/cxx/cxx_dbc.cpp | 8 +- db/cxx/cxx_dbt.cpp | 8 +- db/cxx/cxx_env.cpp | 230 +- db/cxx/cxx_except.cpp | 151 +- db/cxx/cxx_lock.cpp | 8 +- db/cxx/cxx_logc.cpp | 10 +- db/cxx/cxx_mpool.cpp | 8 +- db/cxx/cxx_multi.cpp | 8 +- db/cxx/cxx_seq.cpp | 113 + db/cxx/cxx_txn.cpp | 42 +- db/db/crdel.src | 6 +- db/db/crdel_auto.c | 216 +- db/db/crdel_autop.c | 82 + db/db/crdel_rec.c | 20 +- db/db/db.c | 288 +- db/db/db.src | 48 +- db/db/db_am.c | 150 +- db/db/db_auto.c | 2751 ++-- db/db/db_autop.c | 626 + db/db/db_cam.c | 115 +- db/db/db_conv.c | 10 +- db/db/db_dispatch.c | 383 +- db/db/db_dup.c | 124 +- db/db/db_iface.c | 308 +- db/db/db_join.c | 67 +- db/db/db_meta.c | 155 +- db/db/db_method.c | 104 +- db/db/db_open.c | 41 +- db/db/db_overflow.c | 21 +- db/db/db_ovfl_vrfy.c | 8 +- db/db/db_pr.c | 1081 +- db/db/db_rec.c | 600 +- db/db/db_reclaim.c | 41 +- db/db/db_remove.c | 24 +- db/db/db_rename.c | 23 +- db/db/db_ret.c | 10 +- db/db/db_setid.c | 155 + db/db/db_setlsn.c | 83 + db/db/db_stati.c | 500 + db/db/db_truncate.c | 46 +- db/db/db_upg.c | 10 +- db/db/db_upg_opd.c | 50 +- db/db/db_vrfy.c | 95 +- db/db/db_vrfy_stub.c | 11 +- db/db/db_vrfyutil.c | 53 +- db/db185/db185.c | 10 +- db/db185/db185_int.in | 4 +- db/db_archive/db_archive.c | 16 +- db/db_checkpoint/db_checkpoint.c | 18 +- db/db_deadlock/db_deadlock.c | 19 +- db/db_dump/db_dump.c | 141 +- db/db_dump185/db_dump185.c | 8 +- db/db_load/db_load.c | 103 +- db/db_printlog/README | 4 +- db/db_printlog/db_printlog.c | 126 +- db/db_printlog/rectype.awk | 6 +- db/db_printlog/status.awk | 6 +- db/db_recover/db_recover.c | 12 +- db/db_stat/db_stat.c | 1112 +- db/db_stat/dd.sh | 19 +- db/db_upgrade/db_upgrade.c | 14 +- db/db_verify/db_verify.c | 84 +- db/dbinc/btree.h | 7 +- db/dbinc/crypto.h | 4 +- db/dbinc/cxx_int.h | 26 +- db/dbinc/db.in | 580 +- db/dbinc/db_185.in | 4 +- db/dbinc/db_am.h | 39 +- db/dbinc/db_cxx.in | 1017 +- db/dbinc/db_dispatch.h | 6 +- db/dbinc/db_int.in | 190 +- db/dbinc/db_join.h | 4 +- db/dbinc/db_page.h | 22 +- db/dbinc/db_server_int.h | 4 +- db/dbinc/db_shash.h | 4 +- db/dbinc/db_swap.h | 14 +- db/dbinc/db_upgrade.h | 4 +- db/dbinc/db_verify.h | 23 +- db/dbinc/debug.h | 81 +- db/dbinc/fop.h | 4 +- db/dbinc/globals.h | 17 +- db/dbinc/hash.h | 4 +- db/dbinc/hmac.h | 4 +- db/dbinc/lock.h | 38 +- db/dbinc/log.h | 122 +- db/dbinc/mp.h | 56 +- db/dbinc/mutex.h | 72 +- db/dbinc/os.h | 32 +- db/dbinc/qam.h | 10 +- db/dbinc/region.h | 45 +- db/dbinc/rep.h | 210 +- db/dbinc/shqueue.h | 8 +- db/dbinc/tcl_db.h | 111 +- db/dbinc/txn.h | 93 +- db/dbinc/xa.h | 4 +- db/dbinc_auto/btree_auto.h | 14 + db/dbinc_auto/btree_ext.h | 43 +- db/dbinc_auto/clib_ext.h | 6 +- db/dbinc_auto/common_ext.h | 5 +- db/dbinc_auto/crypto_ext.h | 1 + db/dbinc_auto/db_auto.h | 29 +- db/dbinc_auto/db_ext.h | 112 +- db/dbinc_auto/dbreg_ext.h | 16 +- db/dbinc_auto/env_ext.h | 32 +- db/dbinc_auto/ext_def.in | 9 +- db/dbinc_auto/ext_prot.in | 9 +- db/dbinc_auto/fileops_ext.h | 18 +- db/dbinc_auto/hash_auto.h | 2 + db/dbinc_auto/hash_ext.h | 45 +- db/dbinc_auto/int_def.in | 521 +- db/dbinc_auto/lock_ext.h | 41 +- db/dbinc_auto/log_ext.h | 19 +- db/dbinc_auto/mp_ext.h | 27 +- db/dbinc_auto/os_ext.h | 17 +- db/dbinc_auto/qam_ext.h | 27 +- db/dbinc_auto/rep_auto.h | 22 + db/dbinc_auto/rep_ext.h | 33 +- db/dbinc_auto/rpc_client_ext.h | 14 +- db/dbinc_auto/rpc_server_ext.h | 70 +- db/dbinc_auto/sequence_ext.h | 17 + db/dbinc_auto/tcl_ext.h | 14 +- db/dbinc_auto/txn_auto.h | 1 + db/dbinc_auto/txn_ext.h | 27 +- db/dbinc_auto/xa_ext.h | 2 +- db/dbm/dbm.c | 27 +- db/dbreg/dbreg.c | 181 +- db/dbreg/dbreg.src | 6 +- db/dbreg/dbreg_auto.c | 223 +- db/dbreg/dbreg_autop.c | 89 + db/dbreg/dbreg_rec.c | 196 +- db/dbreg/dbreg_stat.c | 97 + db/dbreg/dbreg_util.c | 237 +- db/dist/Makefile.in | 719 +- db/dist/RELEASE | 6 +- db/dist/aclocal/libtool.ac | 550 +- db/dist/aclocal/mutex.ac | 154 +- db/dist/aclocal/options.ac | 54 +- db/dist/aclocal/programs.ac | 19 +- db/dist/aclocal/rpc.ac | 83 + db/dist/aclocal/sequence.ac | 67 + db/dist/aclocal/sosuffix.ac | 8 +- db/dist/aclocal/tcl.ac | 5 +- db/dist/aclocal/types.ac | 75 +- db/dist/buildrel | 12 +- db/dist/config.guess | 117 +- db/dist/config.hin | 32 +- db/dist/config.sub | 89 +- db/dist/configure | 15164 ++++++++++++------- db/dist/configure.ac | 377 +- db/dist/gen_rec.awk | 920 +- db/dist/gen_rpc.awk | 160 +- db/dist/ltmain.sh | 1090 +- db/dist/pubdef.in | 119 +- db/dist/s_include | 31 +- db/dist/s_java | 5 +- db/dist/s_java_const | 73 +- db/dist/s_java_stat | 413 +- db/dist/s_java_swig | 20 +- db/dist/s_je2db | 88 + db/dist/s_perm | 4 +- db/dist/s_recover | 26 +- db/dist/s_rpc | 115 +- db/dist/s_symlink | 3 +- db/dist/s_tags | 3 +- db/dist/s_test | 4 +- db/dist/s_vxworks | 196 +- db/dist/s_win32 | 48 +- db/dist/s_win32_dsp | 92 +- db/dist/srcfiles.in | 334 +- db/dist/tags | 3307 ++-- db/dist/template/db_server_proc | 13 +- db/dist/template/gen_client_ret | 11 +- db/dist/template/rec_btree | 504 +- db/dist/template/rec_db | 124 +- db/dist/vx_2.0/BerkeleyDBsmall.wpj | 251 + db/dist/vx_2.2/BerkeleyDBsmall.wpj | 310 + db/dist/vx_config.in | 26 +- db/dist/vx_setup/LICENSE.TXT | 2 +- db/dist/win_config.in | 28 +- db/dist/win_db.in | 37 +- db/dist/win_exports.in | 80 +- db/docs/api_c/api_index.html | 347 +- db/docs/api_c/c_pindex.html | 1419 +- db/docs/api_c/db_associate.html | 60 +- db/docs/api_c/db_class.html | 32 +- db/docs/api_c/db_close.html | 26 +- db/docs/api_c/db_cursor.html | 42 +- db/docs/api_c/db_del.html | 53 +- db/docs/api_c/db_err.html | 42 +- db/docs/api_c/db_fd.html | 16 +- db/docs/api_c/db_get.html | 124 +- db/docs/api_c/db_get_byteswapped.html | 35 +- db/docs/api_c/db_get_mpf.html | 16 +- db/docs/api_c/db_get_type.html | 31 +- db/docs/api_c/db_getenv.html | 27 +- db/docs/api_c/db_join.html | 58 +- db/docs/api_c/db_key_range.html | 43 +- db/docs/api_c/db_list.html | 50 +- db/docs/api_c/db_open.html | 139 +- db/docs/api_c/db_put.html | 67 +- db/docs/api_c/db_remove.html | 49 +- db/docs/api_c/db_rename.html | 48 +- db/docs/api_c/db_set_alloc.html | 17 +- db/docs/api_c/db_set_append_recno.html | 36 +- db/docs/api_c/db_set_bt_compare.html | 30 +- db/docs/api_c/db_set_bt_minkey.html | 28 +- db/docs/api_c/db_set_bt_prefix.html | 30 +- db/docs/api_c/db_set_cachesize.html | 42 +- db/docs/api_c/db_set_dup_compare.html | 34 +- db/docs/api_c/db_set_encrypt.html | 39 +- db/docs/api_c/db_set_errcall.html | 32 +- db/docs/api_c/db_set_errfile.html | 32 +- db/docs/api_c/db_set_errpfx.html | 24 +- db/docs/api_c/db_set_feedback.html | 32 +- db/docs/api_c/db_set_flags.html | 138 +- db/docs/api_c/db_set_h_ffactor.html | 28 +- db/docs/api_c/db_set_h_hash.html | 25 +- db/docs/api_c/db_set_h_nelem.html | 28 +- db/docs/api_c/db_set_lorder.html | 28 +- db/docs/api_c/db_set_msgcall.html | 69 + db/docs/api_c/db_set_msgfile.html | 72 + db/docs/api_c/db_set_pagesize.html | 28 +- db/docs/api_c/db_set_paniccall.html | 24 +- db/docs/api_c/db_set_q_extentsize.html | 28 +- db/docs/api_c/db_set_re_delim.html | 28 +- db/docs/api_c/db_set_re_len.html | 28 +- db/docs/api_c/db_set_re_pad.html | 28 +- db/docs/api_c/db_set_re_source.html | 25 +- db/docs/api_c/db_stat.html | 158 +- db/docs/api_c/db_sync.html | 26 +- db/docs/api_c/db_truncate.html | 47 +- db/docs/api_c/db_upgrade.html | 33 +- db/docs/api_c/db_verify.html | 54 +- db/docs/api_c/dbc_class.html | 13 +- db/docs/api_c/dbc_close.html | 23 +- db/docs/api_c/dbc_count.html | 29 +- db/docs/api_c/dbc_del.html | 47 +- db/docs/api_c/dbc_dup.html | 39 +- db/docs/api_c/dbc_get.html | 184 +- db/docs/api_c/dbc_list.html | 6 +- db/docs/api_c/dbc_put.html | 70 +- db/docs/api_c/dbm.html | 11 +- db/docs/api_c/dbt_bulk.html | 25 +- db/docs/api_c/dbt_class.html | 42 +- db/docs/api_c/dbt_package.html | 23 + db/docs/api_c/env_class.html | 17 +- db/docs/api_c/env_close.html | 28 +- db/docs/api_c/env_dbremove.html | 59 +- db/docs/api_c/env_dbrename.html | 58 +- db/docs/api_c/env_err.html | 42 +- db/docs/api_c/env_list.html | 63 +- db/docs/api_c/env_open.html | 118 +- db/docs/api_c/env_remove.html | 41 +- db/docs/api_c/env_set_alloc.html | 28 +- db/docs/api_c/env_set_app_dispatch.html | 44 +- db/docs/api_c/env_set_cachesize.html | 43 +- db/docs/api_c/env_set_data_dir.html | 45 +- db/docs/api_c/env_set_encrypt.html | 39 +- db/docs/api_c/env_set_errcall.html | 32 +- db/docs/api_c/env_set_errfile.html | 32 +- db/docs/api_c/env_set_errpfx.html | 24 +- db/docs/api_c/env_set_feedback.html | 30 +- db/docs/api_c/env_set_flags.html | 137 +- db/docs/api_c/env_set_lg_bsize.html | 58 +- db/docs/api_c/env_set_lg_dir.html | 30 +- db/docs/api_c/env_set_lg_max.html | 55 +- db/docs/api_c/env_set_lg_regionmax.html | 28 +- db/docs/api_c/env_set_lk_conflicts.html | 45 +- db/docs/api_c/env_set_lk_detect.html | 56 +- db/docs/api_c/env_set_lk_max_lockers.html | 28 +- db/docs/api_c/env_set_lk_max_locks.html | 36 +- db/docs/api_c/env_set_lk_max_objects.html | 28 +- db/docs/api_c/env_set_mp_mmapsize.html | 33 +- db/docs/api_c/env_set_msgcall.html | 65 + db/docs/api_c/env_set_msgfile.html | 68 + db/docs/api_c/env_set_paniccall.html | 24 +- db/docs/api_c/env_set_rpc_server.html | 39 +- db/docs/api_c/env_set_shm_key.html | 28 +- db/docs/api_c/env_set_tas_spins.html | 28 +- db/docs/api_c/env_set_timeout.html | 46 +- db/docs/api_c/env_set_tmp_dir.html | 32 +- db/docs/api_c/env_set_tx_max.html | 28 +- db/docs/api_c/env_set_tx_timestamp.html | 28 +- db/docs/api_c/env_set_verbose.html | 49 +- db/docs/api_c/env_stat.html | 66 + db/docs/api_c/env_strerror.html | 18 +- db/docs/api_c/env_version.html | 25 +- db/docs/api_c/frame.html | 15 + db/docs/api_c/hsearch.html | 20 +- db/docs/api_c/lock_class.html | 13 +- db/docs/api_c/lock_detect.html | 47 +- db/docs/api_c/lock_get.html | 54 +- db/docs/api_c/lock_id.html | 24 +- db/docs/api_c/lock_id_free.html | 22 +- db/docs/api_c/lock_list.html | 34 +- db/docs/api_c/lock_put.html | 22 +- db/docs/api_c/lock_stat.html | 58 +- db/docs/api_c/lock_vec.html | 86 +- db/docs/api_c/log_archive.html | 45 +- db/docs/api_c/log_compare.html | 21 +- db/docs/api_c/log_cursor.html | 25 +- db/docs/api_c/log_file.html | 29 +- db/docs/api_c/log_flush.html | 22 +- db/docs/api_c/log_list.html | 35 +- db/docs/api_c/log_put.html | 32 +- db/docs/api_c/log_stat.html | 58 +- db/docs/api_c/logc_class.html | 13 +- db/docs/api_c/logc_close.html | 22 +- db/docs/api_c/logc_get.html | 54 +- db/docs/api_c/lsn_class.html | 13 +- db/docs/api_c/memp_fclose.html | 18 +- db/docs/api_c/memp_fcreate.html | 18 +- db/docs/api_c/memp_fget.html | 49 +- db/docs/api_c/memp_fopen.html | 66 +- db/docs/api_c/memp_fput.html | 40 +- db/docs/api_c/memp_fset.html | 36 +- db/docs/api_c/memp_fsync.html | 13 +- db/docs/api_c/memp_list.html | 48 +- db/docs/api_c/memp_maxwrite.html | 82 + db/docs/api_c/memp_openfd.html | 77 + db/docs/api_c/memp_register.html | 31 +- db/docs/api_c/memp_set_clear_len.html | 31 +- db/docs/api_c/memp_set_fileid.html | 19 +- db/docs/api_c/memp_set_flags.html | 34 +- db/docs/api_c/memp_set_ftype.html | 31 +- db/docs/api_c/memp_set_lsn_offset.html | 31 +- db/docs/api_c/memp_set_maxsize.html | 39 +- db/docs/api_c/memp_set_pgcookie.html | 31 +- db/docs/api_c/memp_set_priority.html | 28 +- db/docs/api_c/memp_stat.html | 69 +- db/docs/api_c/memp_sync.html | 22 +- db/docs/api_c/memp_trickle.html | 32 +- db/docs/api_c/mempfile_class.html | 13 +- db/docs/api_c/object.html | 32 + db/docs/api_c/pindex.src | 443 +- db/docs/api_c/rep_elect.html | 59 +- db/docs/api_c/rep_limit.html | 30 +- db/docs/api_c/rep_list.html | 20 +- db/docs/api_c/rep_message.html | 82 +- db/docs/api_c/rep_start.html | 32 +- db/docs/api_c/rep_stat.html | 71 +- db/docs/api_c/rep_transport.html | 54 +- db/docs/api_c/seq_class.html | 77 + db/docs/api_c/seq_close.html | 60 + db/docs/api_c/seq_get.html | 77 + db/docs/api_c/seq_init_value.html | 60 + db/docs/api_c/seq_list.html | 34 + db/docs/api_c/seq_open.html | 107 + db/docs/api_c/seq_remove.html | 71 + db/docs/api_c/seq_set_cachesize.html | 76 + db/docs/api_c/seq_set_flags.html | 89 + db/docs/api_c/seq_set_range.html | 79 + db/docs/api_c/seq_stat.html | 99 + db/docs/api_c/set_func_close.html | 16 +- db/docs/api_c/set_func_dirfree.html | 18 +- db/docs/api_c/set_func_dirlist.html | 18 +- db/docs/api_c/set_func_exists.html | 18 +- db/docs/api_c/set_func_free.html | 16 +- db/docs/api_c/set_func_fsync.html | 16 +- db/docs/api_c/set_func_ftruncate.html | 58 + db/docs/api_c/set_func_ioinfo.html | 18 +- db/docs/api_c/set_func_malloc.html | 16 +- db/docs/api_c/set_func_map.html | 18 +- db/docs/api_c/set_func_open.html | 16 +- db/docs/api_c/set_func_pread.html | 56 + db/docs/api_c/set_func_pwrite.html | 56 + db/docs/api_c/set_func_read.html | 16 +- db/docs/api_c/set_func_realloc.html | 16 +- db/docs/api_c/set_func_rename.html | 16 +- db/docs/api_c/set_func_seek.html | 27 +- db/docs/api_c/set_func_sleep.html | 18 +- db/docs/api_c/set_func_unlink.html | 16 +- db/docs/api_c/set_func_unmap.html | 18 +- db/docs/api_c/set_func_write.html | 16 +- db/docs/api_c/set_func_yield.html | 18 +- db/docs/api_c/txn_abort.html | 13 +- db/docs/api_c/txn_begin.html | 42 +- db/docs/api_c/txn_checkpoint.html | 32 +- db/docs/api_c/txn_class.html | 13 +- db/docs/api_c/txn_commit.html | 24 +- db/docs/api_c/txn_discard.html | 22 +- db/docs/api_c/txn_id.html | 13 +- db/docs/api_c/txn_list.html | 33 +- db/docs/api_c/txn_prepare.html | 29 +- db/docs/api_c/txn_recover.html | 32 +- db/docs/api_c/txn_set_timeout.html | 46 +- db/docs/api_c/txn_stat.html | 62 +- db/docs/api_cxx/api_index.html | 357 +- db/docs/api_cxx/cxx_pindex.html | 1390 +- db/docs/api_cxx/db_associate.html | 60 +- db/docs/api_cxx/db_class.html | 27 +- db/docs/api_cxx/db_close.html | 26 +- db/docs/api_cxx/db_cursor.html | 42 +- db/docs/api_cxx/db_del.html | 47 +- db/docs/api_cxx/db_err.html | 42 +- db/docs/api_cxx/db_fd.html | 16 +- db/docs/api_cxx/db_get.html | 114 +- db/docs/api_cxx/db_get_byteswapped.html | 35 +- db/docs/api_cxx/db_get_mpf.html | 16 +- db/docs/api_cxx/db_get_type.html | 31 +- db/docs/api_cxx/db_getenv.html | 28 +- db/docs/api_cxx/db_join.html | 58 +- db/docs/api_cxx/db_key_range.html | 37 +- db/docs/api_cxx/db_list.html | 54 +- db/docs/api_cxx/db_open.html | 133 +- db/docs/api_cxx/db_put.html | 61 +- db/docs/api_cxx/db_remove.html | 49 +- db/docs/api_cxx/db_rename.html | 48 +- db/docs/api_cxx/db_set_alloc.html | 17 +- db/docs/api_cxx/db_set_append_recno.html | 36 +- db/docs/api_cxx/db_set_bt_compare.html | 30 +- db/docs/api_cxx/db_set_bt_minkey.html | 28 +- db/docs/api_cxx/db_set_bt_prefix.html | 30 +- db/docs/api_cxx/db_set_cachesize.html | 42 +- db/docs/api_cxx/db_set_dup_compare.html | 34 +- db/docs/api_cxx/db_set_encrypt.html | 39 +- db/docs/api_cxx/db_set_errcall.html | 32 +- db/docs/api_cxx/db_set_errfile.html | 33 +- db/docs/api_cxx/db_set_error_stream.html | 30 +- db/docs/api_cxx/db_set_errpfx.html | 24 +- db/docs/api_cxx/db_set_feedback.html | 32 +- db/docs/api_cxx/db_set_flags.html | 138 +- db/docs/api_cxx/db_set_h_ffactor.html | 28 +- db/docs/api_cxx/db_set_h_hash.html | 25 +- db/docs/api_cxx/db_set_h_nelem.html | 28 +- db/docs/api_cxx/db_set_lorder.html | 28 +- db/docs/api_cxx/db_set_msg_stream.html | 64 + db/docs/api_cxx/db_set_msgcall.html | 70 + db/docs/api_cxx/db_set_msgfile.html | 72 + db/docs/api_cxx/db_set_pagesize.html | 28 +- db/docs/api_cxx/db_set_paniccall.html | 24 +- db/docs/api_cxx/db_set_q_extentsize.html | 28 +- db/docs/api_cxx/db_set_re_delim.html | 28 +- db/docs/api_cxx/db_set_re_len.html | 28 +- db/docs/api_cxx/db_set_re_pad.html | 28 +- db/docs/api_cxx/db_set_re_source.html | 25 +- db/docs/api_cxx/db_stat.html | 159 +- db/docs/api_cxx/db_sync.html | 26 +- db/docs/api_cxx/db_truncate.html | 41 +- db/docs/api_cxx/db_upgrade.html | 33 +- db/docs/api_cxx/db_verify.html | 54 +- db/docs/api_cxx/dbc_class.html | 13 +- db/docs/api_cxx/dbc_close.html | 17 +- db/docs/api_cxx/dbc_count.html | 29 +- db/docs/api_cxx/dbc_del.html | 41 +- db/docs/api_cxx/dbc_dup.html | 39 +- db/docs/api_cxx/dbc_get.html | 178 +- db/docs/api_cxx/dbc_list.html | 6 +- db/docs/api_cxx/dbc_put.html | 64 +- db/docs/api_cxx/dbt_bulk_class.html | 49 +- db/docs/api_cxx/dbt_class.html | 104 +- db/docs/api_cxx/dbt_package.html | 22 + db/docs/api_cxx/deadlock_class.html | 14 +- db/docs/api_cxx/env_class.html | 19 +- db/docs/api_cxx/env_close.html | 28 +- db/docs/api_cxx/env_dbremove.html | 53 +- db/docs/api_cxx/env_dbrename.html | 52 +- db/docs/api_cxx/env_err.html | 42 +- db/docs/api_cxx/env_list.html | 64 +- db/docs/api_cxx/env_open.html | 118 +- db/docs/api_cxx/env_remove.html | 41 +- db/docs/api_cxx/env_set_alloc.html | 28 +- db/docs/api_cxx/env_set_app_dispatch.html | 44 +- db/docs/api_cxx/env_set_cachesize.html | 43 +- db/docs/api_cxx/env_set_data_dir.html | 45 +- db/docs/api_cxx/env_set_encrypt.html | 39 +- db/docs/api_cxx/env_set_errcall.html | 32 +- db/docs/api_cxx/env_set_errfile.html | 33 +- db/docs/api_cxx/env_set_error_stream.html | 30 +- db/docs/api_cxx/env_set_errpfx.html | 24 +- db/docs/api_cxx/env_set_feedback.html | 30 +- db/docs/api_cxx/env_set_flags.html | 137 +- db/docs/api_cxx/env_set_lg_bsize.html | 58 +- db/docs/api_cxx/env_set_lg_dir.html | 30 +- db/docs/api_cxx/env_set_lg_max.html | 55 +- db/docs/api_cxx/env_set_lg_regionmax.html | 28 +- db/docs/api_cxx/env_set_lk_conflicts.html | 43 +- db/docs/api_cxx/env_set_lk_detect.html | 56 +- db/docs/api_cxx/env_set_lk_max_lockers.html | 28 +- db/docs/api_cxx/env_set_lk_max_locks.html | 36 +- db/docs/api_cxx/env_set_lk_max_objects.html | 28 +- db/docs/api_cxx/env_set_mp_mmapsize.html | 33 +- db/docs/api_cxx/env_set_msg_stream.html | 60 + db/docs/api_cxx/env_set_msgcall.html | 66 + db/docs/api_cxx/env_set_msgfile.html | 70 + db/docs/api_cxx/env_set_paniccall.html | 24 +- db/docs/api_cxx/env_set_rpc_server.html | 39 +- db/docs/api_cxx/env_set_shm_key.html | 28 +- db/docs/api_cxx/env_set_tas_spins.html | 28 +- db/docs/api_cxx/env_set_timeout.html | 46 +- db/docs/api_cxx/env_set_tmp_dir.html | 32 +- db/docs/api_cxx/env_set_tx_max.html | 28 +- db/docs/api_cxx/env_set_tx_timestamp.html | 28 +- db/docs/api_cxx/env_set_verbose.html | 49 +- db/docs/api_cxx/env_stat.html | 67 + db/docs/api_cxx/env_strerror.html | 18 +- db/docs/api_cxx/env_version.html | 25 +- db/docs/api_cxx/exc_package.html | 23 + db/docs/api_cxx/except_class.html | 27 +- db/docs/api_cxx/frame.html | 15 + db/docs/api_cxx/lock_class.html | 13 +- db/docs/api_cxx/lock_detect.html | 47 +- db/docs/api_cxx/lock_get.html | 42 +- db/docs/api_cxx/lock_id.html | 24 +- db/docs/api_cxx/lock_id_free.html | 22 +- db/docs/api_cxx/lock_list.html | 34 +- db/docs/api_cxx/lock_put.html | 22 +- db/docs/api_cxx/lock_stat.html | 59 +- db/docs/api_cxx/lock_vec.html | 74 +- db/docs/api_cxx/lockng_class.html | 58 +- db/docs/api_cxx/log_archive.html | 45 +- db/docs/api_cxx/log_compare.html | 21 +- db/docs/api_cxx/log_cursor.html | 25 +- db/docs/api_cxx/log_file.html | 36 +- db/docs/api_cxx/log_flush.html | 22 +- db/docs/api_cxx/log_list.html | 35 +- db/docs/api_cxx/log_put.html | 32 +- db/docs/api_cxx/log_stat.html | 59 +- db/docs/api_cxx/logc_class.html | 13 +- db/docs/api_cxx/logc_close.html | 22 +- db/docs/api_cxx/logc_get.html | 54 +- db/docs/api_cxx/lsn_class.html | 13 +- db/docs/api_cxx/memp_class.html | 20 +- db/docs/api_cxx/memp_fclose.html | 18 +- db/docs/api_cxx/memp_fcreate.html | 18 +- db/docs/api_cxx/memp_fget.html | 47 +- db/docs/api_cxx/memp_fopen.html | 64 +- db/docs/api_cxx/memp_fput.html | 40 +- db/docs/api_cxx/memp_fset.html | 36 +- db/docs/api_cxx/memp_fsync.html | 13 +- db/docs/api_cxx/memp_list.html | 48 +- db/docs/api_cxx/memp_maxwrite.html | 87 + db/docs/api_cxx/memp_openfd.html | 82 + db/docs/api_cxx/memp_register.html | 31 +- db/docs/api_cxx/memp_set_clear_len.html | 32 +- db/docs/api_cxx/memp_set_fileid.html | 19 +- db/docs/api_cxx/memp_set_flags.html | 34 +- db/docs/api_cxx/memp_set_ftype.html | 32 +- db/docs/api_cxx/memp_set_lsn_offset.html | 32 +- db/docs/api_cxx/memp_set_maxsize.html | 37 +- db/docs/api_cxx/memp_set_pgcookie.html | 32 +- db/docs/api_cxx/memp_set_priority.html | 28 +- db/docs/api_cxx/memp_stat.html | 75 +- db/docs/api_cxx/memp_sync.html | 22 +- db/docs/api_cxx/memp_trickle.html | 36 +- db/docs/api_cxx/mempfile_class.html | 13 +- db/docs/api_cxx/object.html | 33 + db/docs/api_cxx/pindex.src | 342 +- db/docs/api_cxx/rep_elect.html | 60 +- db/docs/api_cxx/rep_limit.html | 30 +- db/docs/api_cxx/rep_list.html | 20 +- db/docs/api_cxx/rep_message.html | 83 +- db/docs/api_cxx/rep_start.html | 32 +- db/docs/api_cxx/rep_stat.html | 72 +- db/docs/api_cxx/rep_transport.html | 54 +- db/docs/api_cxx/runrec_class.html | 16 +- db/docs/api_cxx/seq_class.html | 71 + db/docs/api_cxx/seq_close.html | 64 + db/docs/api_cxx/seq_get.html | 76 + db/docs/api_cxx/seq_init_value.html | 64 + db/docs/api_cxx/seq_list.html | 34 + db/docs/api_cxx/seq_open.html | 108 + db/docs/api_cxx/seq_remove.html | 75 + db/docs/api_cxx/seq_set_cachesize.html | 80 + db/docs/api_cxx/seq_set_flags.html | 93 + db/docs/api_cxx/seq_set_range.html | 83 + db/docs/api_cxx/seq_stat.html | 100 + db/docs/api_cxx/txn_abort.html | 13 +- db/docs/api_cxx/txn_begin.html | 40 +- db/docs/api_cxx/txn_checkpoint.html | 32 +- db/docs/api_cxx/txn_class.html | 13 +- db/docs/api_cxx/txn_commit.html | 24 +- db/docs/api_cxx/txn_discard.html | 22 +- db/docs/api_cxx/txn_id.html | 13 +- db/docs/api_cxx/txn_list.html | 33 +- db/docs/api_cxx/txn_prepare.html | 29 +- db/docs/api_cxx/txn_recover.html | 32 +- db/docs/api_cxx/txn_set_timeout.html | 46 +- db/docs/api_cxx/txn_stat.html | 63 +- db/docs/api_tcl/db_close.html | 17 +- db/docs/api_tcl/db_count.html | 13 +- db/docs/api_tcl/db_cursor.html | 17 +- db/docs/api_tcl/db_del.html | 21 +- db/docs/api_tcl/db_get.html | 33 +- db/docs/api_tcl/db_get_join.html | 17 +- db/docs/api_tcl/db_get_type.html | 13 +- db/docs/api_tcl/db_is_byteswapped.html | 13 +- db/docs/api_tcl/db_join.html | 13 +- db/docs/api_tcl/db_open.html | 83 +- db/docs/api_tcl/db_put.html | 27 +- db/docs/api_tcl/db_remove.html | 25 +- db/docs/api_tcl/db_rename.html | 25 +- db/docs/api_tcl/db_stat.html | 17 +- db/docs/api_tcl/db_sync.html | 13 +- db/docs/api_tcl/db_truncate.html | 19 +- db/docs/api_tcl/dbc_close.html | 13 +- db/docs/api_tcl/dbc_del.html | 13 +- db/docs/api_tcl/dbc_dup.html | 17 +- db/docs/api_tcl/dbc_get.html | 47 +- db/docs/api_tcl/dbc_put.html | 30 +- db/docs/api_tcl/env_close.html | 13 +- db/docs/api_tcl/env_dbremove.html | 19 +- db/docs/api_tcl/env_dbrename.html | 19 +- db/docs/api_tcl/env_open.html | 53 +- db/docs/api_tcl/env_remove.html | 29 +- db/docs/api_tcl/tcl_index.html | 8 +- db/docs/api_tcl/txn.html | 23 +- db/docs/api_tcl/txn_abort.html | 13 +- db/docs/api_tcl/txn_checkpoint.html | 21 +- db/docs/api_tcl/txn_commit.html | 19 +- db/docs/api_tcl/version.html | 17 +- db/docs/collections/tutorial/BasicProgram.html | 457 + .../tutorial/BerkeleyDB-Java-Collections.pdf | Bin 0 -> 461364 bytes db/docs/collections/tutorial/Entity.html | 359 + .../collections/tutorial/SerializableEntity.html | 348 + .../tutorial/SerializedObjectStorage.html | 84 + db/docs/collections/tutorial/Summary.html | 193 + db/docs/collections/tutorial/Tuple.html | 212 + .../collections/tutorial/UsingCollectionsAPI.html | 637 + db/docs/collections/tutorial/UsingSecondaries.html | 457 + .../tutorial/UsingStoredCollections.html | 804 + .../collections/tutorial/addingdatabaseitems.html | 230 + .../collections/tutorial/collectionOverview.html | 473 + .../tutorial/collectionswithentities.html | 164 + .../tutorial/createbindingscollections.html | 283 + .../tutorial/creatingentitybindings.html | 268 + db/docs/collections/tutorial/developing.html | 216 + .../tutorial/entitieswithcollections.html | 252 + db/docs/collections/tutorial/gettingStarted.css | 41 + .../collections/tutorial/handlingexceptions.html | 215 + db/docs/collections/tutorial/implementingmain.html | 251 + db/docs/collections/tutorial/index.html | 525 + .../collections/tutorial/indexedcollections.html | 248 + db/docs/collections/tutorial/intro.html | 220 + db/docs/collections/tutorial/openclasscatalog.html | 208 + db/docs/collections/tutorial/opendatabases.html | 170 + .../collections/tutorial/opendbenvironment.html | 200 + .../collections/tutorial/openingforeignkeys.html | 237 + db/docs/collections/tutorial/preface.html | 129 + .../tutorial/removingredundantvalueclasses.html | 130 + .../collections/tutorial/retrievingbyindexkey.html | 287 + .../tutorial/retrievingdatabaseitems.html | 249 + .../collections/tutorial/sortedcollections.html | 149 + .../tutorial/transientfieldsinbinding.html | 177 + .../tutorial/tuple-serialentitybindings.html | 197 + db/docs/collections/tutorial/tuplekeybindings.html | 220 + .../tutorial/tupleswithkeycreators.html | 206 + .../collections/tutorial/tutorialintroduction.html | 407 + .../collections/tutorial/usingtransactions.html | 221 + db/docs/gsg/C/BerkeleyDB-Core-C-GSG.pdf | Bin 0 -> 467269 bytes db/docs/gsg/C/CoreCursorUsage.html | 360 + db/docs/gsg/C/CoreDBAdmin.html | 149 + db/docs/gsg/C/CoreDbUsage.html | 299 + db/docs/gsg/C/CoreEnvUsage.html | 156 + db/docs/gsg/C/Cursors.html | 180 + db/docs/gsg/C/DB.html | 192 + db/docs/gsg/C/DBEntry.html | 232 + db/docs/gsg/C/DBOpenFlags.html | 118 + db/docs/gsg/C/DbUsage.html | 478 + db/docs/gsg/C/DeleteEntryWCursor.html | 109 + db/docs/gsg/C/Positioning.html | 574 + db/docs/gsg/C/PutEntryWCursor.html | 224 + db/docs/gsg/C/ReplacingEntryWCursor.html | 137 + db/docs/gsg/C/accessmethods.html | 281 + db/docs/gsg/C/btree.html | 572 + db/docs/gsg/C/cachesize.html | 98 + db/docs/gsg/C/concepts.html | 168 + db/docs/gsg/C/coredbclose.html | 106 + db/docs/gsg/C/coreindexusage.html | 728 + db/docs/gsg/C/cstructs.html | 324 + db/docs/gsg/C/databaseLimits.html | 81 + db/docs/gsg/C/dbErrorReporting.html | 204 + db/docs/gsg/C/dbconfig.html | 401 + db/docs/gsg/C/environments.html | 150 + db/docs/gsg/C/gettingStarted.css | 41 + db/docs/gsg/C/gettingit.html | 77 + db/docs/gsg/C/index.html | 477 + db/docs/gsg/C/indexes.html | 357 + db/docs/gsg/C/introduction.html | 229 + db/docs/gsg/C/joins.html | 304 + db/docs/gsg/C/keyCreator.html | 149 + db/docs/gsg/C/preface.html | 138 + db/docs/gsg/C/readSecondary.html | 140 + db/docs/gsg/C/secondaryCursor.html | 158 + db/docs/gsg/C/secondaryDelete.html | 210 + db/docs/gsg/C/usingDbt.html | 397 + db/docs/gsg/CXX/BerkeleyDB-Core-Cxx-GSG.pdf | Bin 0 -> 440972 bytes db/docs/gsg/CXX/CoreCursorUsage.html | 293 + db/docs/gsg/CXX/CoreDBAdmin.html | 147 + db/docs/gsg/CXX/CoreDbCXXUsage.html | 191 + db/docs/gsg/CXX/CoreEnvUsage.html | 165 + db/docs/gsg/CXX/Cursors.html | 179 + db/docs/gsg/CXX/DB.html | 182 + db/docs/gsg/CXX/DBEntry.html | 191 + db/docs/gsg/CXX/DBOpenFlags.html | 118 + db/docs/gsg/CXX/DbCXXUsage.html | 530 + db/docs/gsg/CXX/DeleteEntryWCursor.html | 110 + db/docs/gsg/CXX/Positioning.html | 582 + db/docs/gsg/CXX/PutEntryWCursor.html | 222 + db/docs/gsg/CXX/ReplacingEntryWCursor.html | 138 + db/docs/gsg/CXX/accessmethods.html | 281 + db/docs/gsg/CXX/btree.html | 547 + db/docs/gsg/CXX/cachesize.html | 98 + db/docs/gsg/CXX/concepts.html | 168 + db/docs/gsg/CXX/coreExceptions.html | 118 + db/docs/gsg/CXX/coredbclose.html | 109 + db/docs/gsg/CXX/coreindexusage.html | 560 + db/docs/gsg/CXX/databaseLimits.html | 81 + db/docs/gsg/CXX/dbErrorReporting.html | 211 + db/docs/gsg/CXX/dbconfig.html | 401 + db/docs/gsg/CXX/environments.html | 150 + db/docs/gsg/CXX/gettingStarted.css | 41 + db/docs/gsg/CXX/gettingit.html | 77 + db/docs/gsg/CXX/index.html | 465 + db/docs/gsg/CXX/indexes.html | 327 + db/docs/gsg/CXX/introduction.html | 234 + db/docs/gsg/CXX/joins.html | 282 + db/docs/gsg/CXX/keyCreator.html | 147 + db/docs/gsg/CXX/preface.html | 144 + db/docs/gsg/CXX/readSecondary.html | 133 + db/docs/gsg/CXX/secondaryCursor.html | 154 + db/docs/gsg/CXX/secondaryDelete.html | 181 + db/docs/gsg/CXX/usingDbt.html | 370 + db/docs/gsg/JAVA/BerkeleyDB-Core-JAVA-GSG.pdf | Bin 0 -> 502662 bytes db/docs/gsg/JAVA/CoreEnvUsage.html | 158 + db/docs/gsg/JAVA/CoreJavaUsage.html | 183 + db/docs/gsg/JAVA/Cursors.html | 194 + db/docs/gsg/JAVA/DB.html | 182 + db/docs/gsg/JAVA/DBAdmin.html | 120 + db/docs/gsg/JAVA/DBEntry.html | 250 + db/docs/gsg/JAVA/DeleteEntryWCursor.html | 111 + db/docs/gsg/JAVA/Positioning.html | 599 + db/docs/gsg/JAVA/PutEntryWCursor.html | 209 + db/docs/gsg/JAVA/ReplacingEntryWCursor.html | 132 + db/docs/gsg/JAVA/accessmethods.html | 281 + db/docs/gsg/JAVA/bindAPI.html | 760 + db/docs/gsg/JAVA/btree.html | 540 + db/docs/gsg/JAVA/cachesize.html | 98 + db/docs/gsg/JAVA/concepts.html | 168 + db/docs/gsg/JAVA/coreExceptions.html | 102 + db/docs/gsg/JAVA/coredbclose.html | 103 + db/docs/gsg/JAVA/cursorJavaUsage.html | 278 + db/docs/gsg/JAVA/databaseLimits.html | 81 + db/docs/gsg/JAVA/dbErrorReporting.html | 147 + db/docs/gsg/JAVA/db_config.html | 175 + db/docs/gsg/JAVA/dbconfig.html | 399 + db/docs/gsg/JAVA/dbtJavaUsage.html | 703 + db/docs/gsg/JAVA/environments.html | 150 + db/docs/gsg/JAVA/gettingStarted.css | 41 + db/docs/gsg/JAVA/gettingit.html | 77 + db/docs/gsg/JAVA/index.html | 503 + db/docs/gsg/JAVA/indexes.html | 379 + db/docs/gsg/JAVA/introduction.html | 234 + db/docs/gsg/JAVA/javaindexusage.html | 498 + db/docs/gsg/JAVA/joins.html | 366 + db/docs/gsg/JAVA/keyCreator.html | 244 + db/docs/gsg/JAVA/preface.html | 142 + db/docs/gsg/JAVA/readSecondary.html | 124 + db/docs/gsg/JAVA/returns.html | 81 + db/docs/gsg/JAVA/secondaryCursor.html | 158 + db/docs/gsg/JAVA/secondaryDelete.html | 158 + db/docs/gsg/JAVA/secondaryProps.html | 92 + db/docs/gsg/JAVA/usingDbt.html | 421 + db/docs/index.html | 26 +- db/docs/java/allclasses-frame.html | 273 +- db/docs/java/allclasses-noframe.html | 273 +- .../java/com/sleepycat/bind/ByteArrayBinding.html | 296 + db/docs/java/com/sleepycat/bind/EntityBinding.html | 270 + db/docs/java/com/sleepycat/bind/EntryBinding.html | 245 + .../com/sleepycat/bind/RecordNumberBinding.html | 354 + .../sleepycat/bind/class-use/ByteArrayBinding.html | 136 + .../sleepycat/bind/class-use/EntityBinding.html | 321 + .../com/sleepycat/bind/class-use/EntryBinding.html | 475 + .../bind/class-use/RecordNumberBinding.html | 136 + db/docs/java/com/sleepycat/bind/package-frame.html | 47 + .../java/com/sleepycat/bind/package-summary.html | 189 + db/docs/java/com/sleepycat/bind/package-tree.html | 152 + db/docs/java/com/sleepycat/bind/package-use.html | 239 + .../com/sleepycat/bind/serial/ClassCatalog.html | 302 + .../com/sleepycat/bind/serial/SerialBinding.html | 338 + .../com/sleepycat/bind/serial/SerialInput.html | 303 + .../com/sleepycat/bind/serial/SerialOutput.html | 330 + .../sleepycat/bind/serial/SerialSerialBinding.html | 435 + .../bind/serial/SerialSerialKeyCreator.html | 411 + .../sleepycat/bind/serial/StoredClassCatalog.html | 354 + .../sleepycat/bind/serial/TupleSerialBinding.html | 437 + .../bind/serial/TupleSerialKeyCreator.html | 415 + .../bind/serial/TupleSerialMarshalledBinding.html | 370 + .../serial/TupleSerialMarshalledKeyCreator.html | 330 + .../bind/serial/class-use/ClassCatalog.html | 283 + .../bind/serial/class-use/SerialBinding.html | 197 + .../bind/serial/class-use/SerialInput.html | 136 + .../bind/serial/class-use/SerialOutput.html | 136 + .../bind/serial/class-use/SerialSerialBinding.html | 136 + .../serial/class-use/SerialSerialKeyCreator.html | 136 + .../bind/serial/class-use/StoredClassCatalog.html | 136 + .../bind/serial/class-use/TupleSerialBinding.html | 173 + .../serial/class-use/TupleSerialKeyCreator.html | 172 + .../class-use/TupleSerialMarshalledBinding.html | 171 + .../class-use/TupleSerialMarshalledKeyCreator.html | 175 + .../com/sleepycat/bind/serial/package-frame.html | 61 + .../com/sleepycat/bind/serial/package-summary.html | 220 + .../com/sleepycat/bind/serial/package-tree.html | 170 + .../com/sleepycat/bind/serial/package-use.html | 217 + .../com/sleepycat/bind/tuple/BooleanBinding.html | 387 + .../java/com/sleepycat/bind/tuple/ByteBinding.html | 387 + .../com/sleepycat/bind/tuple/CharacterBinding.html | 387 + .../com/sleepycat/bind/tuple/DoubleBinding.html | 387 + .../com/sleepycat/bind/tuple/FloatBinding.html | 387 + .../com/sleepycat/bind/tuple/IntegerBinding.html | 387 + .../java/com/sleepycat/bind/tuple/LongBinding.html | 387 + .../sleepycat/bind/tuple/MarshalledTupleEntry.html | 252 + .../bind/tuple/MarshalledTupleKeyEntity.html | 305 + .../com/sleepycat/bind/tuple/ShortBinding.html | 387 + .../com/sleepycat/bind/tuple/StringBinding.html | 386 + .../com/sleepycat/bind/tuple/TupleBinding.html | 537 + .../java/com/sleepycat/bind/tuple/TupleInput.html | 861 ++ .../sleepycat/bind/tuple/TupleInputBinding.html | 300 + .../bind/tuple/TupleMarshalledBinding.html | 314 + .../java/com/sleepycat/bind/tuple/TupleOutput.html | 759 + .../sleepycat/bind/tuple/TupleTupleBinding.html | 420 + .../sleepycat/bind/tuple/TupleTupleKeyCreator.html | 383 + .../bind/tuple/TupleTupleMarshalledBinding.html | 351 + .../bind/tuple/TupleTupleMarshalledKeyCreator.html | 329 + .../bind/tuple/class-use/BooleanBinding.html | 136 + .../bind/tuple/class-use/ByteBinding.html | 136 + .../bind/tuple/class-use/CharacterBinding.html | 136 + .../bind/tuple/class-use/DoubleBinding.html | 136 + .../bind/tuple/class-use/FloatBinding.html | 136 + .../bind/tuple/class-use/IntegerBinding.html | 136 + .../bind/tuple/class-use/LongBinding.html | 136 + .../bind/tuple/class-use/MarshalledTupleEntry.html | 136 + .../tuple/class-use/MarshalledTupleKeyEntity.html | 136 + .../bind/tuple/class-use/ShortBinding.html | 136 + .../bind/tuple/class-use/StringBinding.html | 136 + .../bind/tuple/class-use/TupleBinding.html | 269 + .../sleepycat/bind/tuple/class-use/TupleInput.html | 414 + .../bind/tuple/class-use/TupleInputBinding.html | 136 + .../tuple/class-use/TupleMarshalledBinding.html | 136 + .../bind/tuple/class-use/TupleOutput.html | 615 + .../bind/tuple/class-use/TupleTupleBinding.html | 174 + .../bind/tuple/class-use/TupleTupleKeyCreator.html | 172 + .../class-use/TupleTupleMarshalledBinding.html | 171 + .../class-use/TupleTupleMarshalledKeyCreator.html | 136 + .../com/sleepycat/bind/tuple/package-frame.html | 79 + .../com/sleepycat/bind/tuple/package-summary.html | 267 + .../com/sleepycat/bind/tuple/package-tree.html | 168 + .../java/com/sleepycat/bind/tuple/package-use.html | 225 + .../sleepycat/collections/CurrentTransaction.html | 387 + .../sleepycat/collections/MapEntryParameter.html | 408 + .../sleepycat/collections/PrimaryKeyAssigner.html | 224 + .../sleepycat/collections/StoredCollection.html | 665 + .../sleepycat/collections/StoredCollections.html | 407 + .../com/sleepycat/collections/StoredContainer.html | 599 + .../com/sleepycat/collections/StoredEntrySet.html | 384 + .../com/sleepycat/collections/StoredIterator.html | 714 + .../com/sleepycat/collections/StoredKeySet.html | 395 + .../java/com/sleepycat/collections/StoredList.html | 895 ++ .../java/com/sleepycat/collections/StoredMap.html | 837 + .../collections/StoredSortedEntrySet.html | 555 + .../sleepycat/collections/StoredSortedKeySet.html | 594 + .../com/sleepycat/collections/StoredSortedMap.html | 686 + .../collections/StoredSortedValueSet.html | 593 + .../com/sleepycat/collections/StoredValueSet.html | 449 + .../sleepycat/collections/TransactionRunner.html | 501 + .../sleepycat/collections/TransactionWorker.html | 223 + .../sleepycat/collections/TupleSerialFactory.html | 354 + .../collections/class-use/CurrentTransaction.html | 174 + .../collections/class-use/MapEntryParameter.html | 136 + .../collections/class-use/PrimaryKeyAssigner.html | 217 + .../collections/class-use/StoredCollection.html | 241 + .../collections/class-use/StoredCollections.html | 136 + .../collections/class-use/StoredContainer.html | 268 + .../collections/class-use/StoredEntrySet.html | 173 + .../collections/class-use/StoredIterator.html | 185 + .../collections/class-use/StoredKeySet.html | 174 + .../collections/class-use/StoredList.html | 136 + .../sleepycat/collections/class-use/StoredMap.html | 192 + .../class-use/StoredSortedEntrySet.html | 136 + .../collections/class-use/StoredSortedKeySet.html | 136 + .../collections/class-use/StoredSortedMap.html | 176 + .../class-use/StoredSortedValueSet.html | 136 + .../collections/class-use/StoredValueSet.html | 174 + .../collections/class-use/TransactionRunner.html | 136 + .../collections/class-use/TransactionWorker.html | 174 + .../collections/class-use/TupleSerialFactory.html | 136 + .../com/sleepycat/collections/package-frame.html | 77 + .../com/sleepycat/collections/package-summary.html | 257 + .../com/sleepycat/collections/package-tree.html | 175 + .../com/sleepycat/collections/package-use.html | 226 + .../com/sleepycat/db/BtreePrefixCalculator.html | 223 + db/docs/java/com/sleepycat/db/BtreeStats.html | 740 + db/docs/java/com/sleepycat/db/CacheFile.html | 475 + .../java/com/sleepycat/db/CacheFilePriority.html | 310 + db/docs/java/com/sleepycat/db/CacheFileStats.html | 401 + db/docs/java/com/sleepycat/db/CacheStats.html | 965 ++ .../java/com/sleepycat/db/CheckpointConfig.html | 422 + db/docs/java/com/sleepycat/db/Cursor.html | 1605 ++ db/docs/java/com/sleepycat/db/CursorConfig.html | 520 + db/docs/java/com/sleepycat/db/Database.html | 1671 ++ db/docs/java/com/sleepycat/db/DatabaseConfig.html | 3445 +++++ db/docs/java/com/sleepycat/db/DatabaseEntry.html | 956 ++ .../java/com/sleepycat/db/DatabaseException.html | 285 + db/docs/java/com/sleepycat/db/DatabaseStats.html | 200 + db/docs/java/com/sleepycat/db/DatabaseType.html | 316 + .../java/com/sleepycat/db/DeadlockException.html | 230 + db/docs/java/com/sleepycat/db/Environment.html | 1893 +++ .../java/com/sleepycat/db/EnvironmentConfig.html | 5278 +++++++ db/docs/java/com/sleepycat/db/ErrorHandler.html | 242 + db/docs/java/com/sleepycat/db/FeedbackHandler.html | 284 + db/docs/java/com/sleepycat/db/HashStats.html | 604 + db/docs/java/com/sleepycat/db/Hasher.html | 228 + db/docs/java/com/sleepycat/db/JoinConfig.html | 360 + db/docs/java/com/sleepycat/db/JoinCursor.html | 438 + db/docs/java/com/sleepycat/db/KeyRange.html | 307 + db/docs/java/com/sleepycat/db/Lock.html | 199 + db/docs/java/com/sleepycat/db/LockDetectMode.html | 414 + db/docs/java/com/sleepycat/db/LockMode.html | 301 + .../com/sleepycat/db/LockNotGrantedException.html | 366 + db/docs/java/com/sleepycat/db/LockOperation.html | 354 + db/docs/java/com/sleepycat/db/LockRequest.html | 497 + db/docs/java/com/sleepycat/db/LockRequestMode.html | 344 + db/docs/java/com/sleepycat/db/LockStats.html | 742 + db/docs/java/com/sleepycat/db/LogCursor.html | 463 + .../java/com/sleepycat/db/LogRecordHandler.html | 222 + .../java/com/sleepycat/db/LogSequenceNumber.html | 340 + db/docs/java/com/sleepycat/db/LogStats.html | 684 + db/docs/java/com/sleepycat/db/MemoryException.html | 268 + db/docs/java/com/sleepycat/db/MessageHandler.html | 232 + .../java/com/sleepycat/db/MultipleDataEntry.html | 334 + db/docs/java/com/sleepycat/db/MultipleEntry.html | 273 + .../com/sleepycat/db/MultipleKeyDataEntry.html | 334 + .../com/sleepycat/db/MultipleRecnoDataEntry.html | 338 + db/docs/java/com/sleepycat/db/OperationStatus.html | 294 + db/docs/java/com/sleepycat/db/PanicHandler.html | 235 + .../java/com/sleepycat/db/PreparedTransaction.html | 261 + db/docs/java/com/sleepycat/db/QueueStats.html | 523 + .../com/sleepycat/db/RecordNumberAppender.html | 242 + .../java/com/sleepycat/db/RecoveryOperation.html | 370 + .../db/ReplicationHandleDeadException.html | 230 + .../java/com/sleepycat/db/ReplicationStats.html | 1149 ++ .../java/com/sleepycat/db/ReplicationStatus.html | 482 + .../com/sleepycat/db/ReplicationTransport.html | 325 + .../com/sleepycat/db/RunRecoveryException.html | 228 + db/docs/java/com/sleepycat/db/SecondaryConfig.html | 443 + db/docs/java/com/sleepycat/db/SecondaryCursor.html | 1176 ++ .../java/com/sleepycat/db/SecondaryDatabase.html | 617 + .../java/com/sleepycat/db/SecondaryKeyCreator.html | 274 + db/docs/java/com/sleepycat/db/Sequence.html | 372 + db/docs/java/com/sleepycat/db/SequenceConfig.html | 779 + db/docs/java/com/sleepycat/db/SequenceStats.html | 403 + db/docs/java/com/sleepycat/db/StatsConfig.html | 392 + db/docs/java/com/sleepycat/db/Transaction.html | 632 + .../java/com/sleepycat/db/TransactionConfig.html | 579 + .../com/sleepycat/db/TransactionStats.Active.html | 322 + .../java/com/sleepycat/db/TransactionStats.html | 550 + db/docs/java/com/sleepycat/db/VerifyConfig.html | 601 + .../db/class-use/BtreePrefixCalculator.html | 189 + .../com/sleepycat/db/class-use/BtreeStats.html | 136 + .../java/com/sleepycat/db/class-use/CacheFile.html | 173 + .../sleepycat/db/class-use/CacheFilePriority.html | 238 + .../com/sleepycat/db/class-use/CacheFileStats.html | 174 + .../com/sleepycat/db/class-use/CacheStats.html | 173 + .../sleepycat/db/class-use/CheckpointConfig.html | 190 + .../java/com/sleepycat/db/class-use/Cursor.html | 226 + .../com/sleepycat/db/class-use/CursorConfig.html | 242 + .../java/com/sleepycat/db/class-use/Database.html | 533 + .../com/sleepycat/db/class-use/DatabaseConfig.html | 281 + .../com/sleepycat/db/class-use/DatabaseEntry.html | 1721 +++ .../sleepycat/db/class-use/DatabaseException.html | 1950 +++ .../com/sleepycat/db/class-use/DatabaseStats.html | 207 + .../com/sleepycat/db/class-use/DatabaseType.html | 237 + .../sleepycat/db/class-use/DeadlockException.html | 176 + .../com/sleepycat/db/class-use/Environment.html | 324 + .../sleepycat/db/class-use/EnvironmentConfig.html | 230 + .../com/sleepycat/db/class-use/ErrorHandler.html | 205 + .../sleepycat/db/class-use/FeedbackHandler.html | 205 + .../java/com/sleepycat/db/class-use/HashStats.html | 136 + .../java/com/sleepycat/db/class-use/Hasher.html | 189 + .../com/sleepycat/db/class-use/JoinConfig.html | 240 + .../com/sleepycat/db/class-use/JoinCursor.html | 175 + .../java/com/sleepycat/db/class-use/KeyRange.html | 175 + db/docs/java/com/sleepycat/db/class-use/Lock.html | 246 + .../com/sleepycat/db/class-use/LockDetectMode.html | 288 + .../java/com/sleepycat/db/class-use/LockMode.html | 636 + .../db/class-use/LockNotGrantedException.html | 136 + .../com/sleepycat/db/class-use/LockOperation.html | 277 + .../com/sleepycat/db/class-use/LockRequest.html | 175 + .../sleepycat/db/class-use/LockRequestMode.html | 277 + .../java/com/sleepycat/db/class-use/LockStats.html | 173 + .../java/com/sleepycat/db/class-use/LogCursor.html | 173 + .../sleepycat/db/class-use/LogRecordHandler.html | 189 + .../sleepycat/db/class-use/LogSequenceNumber.html | 342 + .../java/com/sleepycat/db/class-use/LogStats.html | 173 + .../sleepycat/db/class-use/MemoryException.html | 136 + .../com/sleepycat/db/class-use/MessageHandler.html | 205 + .../sleepycat/db/class-use/MultipleDataEntry.html | 136 + .../com/sleepycat/db/class-use/MultipleEntry.html | 192 + .../db/class-use/MultipleKeyDataEntry.html | 136 + .../db/class-use/MultipleRecnoDataEntry.html | 136 + .../sleepycat/db/class-use/OperationStatus.html | 835 + .../com/sleepycat/db/class-use/PanicHandler.html | 205 + .../db/class-use/PreparedTransaction.html | 174 + .../com/sleepycat/db/class-use/QueueStats.html | 136 + .../db/class-use/RecordNumberAppender.html | 192 + .../sleepycat/db/class-use/RecoveryOperation.html | 247 + .../class-use/ReplicationHandleDeadException.html | 136 + .../sleepycat/db/class-use/ReplicationStats.html | 173 + .../sleepycat/db/class-use/ReplicationStatus.html | 176 + .../db/class-use/ReplicationTransport.html | 192 + .../db/class-use/RunRecoveryException.html | 136 + .../sleepycat/db/class-use/SecondaryConfig.html | 226 + .../sleepycat/db/class-use/SecondaryCursor.html | 182 + .../sleepycat/db/class-use/SecondaryDatabase.html | 306 + .../db/class-use/SecondaryKeyCreator.html | 271 + .../java/com/sleepycat/db/class-use/Sequence.html | 175 + .../com/sleepycat/db/class-use/SequenceConfig.html | 201 + .../com/sleepycat/db/class-use/SequenceStats.html | 173 + .../com/sleepycat/db/class-use/StatsConfig.html | 247 + .../com/sleepycat/db/class-use/Transaction.html | 519 + .../sleepycat/db/class-use/TransactionConfig.html | 264 + .../db/class-use/TransactionStats.Active.html | 173 + .../sleepycat/db/class-use/TransactionStats.html | 173 + .../com/sleepycat/db/class-use/VerifyConfig.html | 193 + db/docs/java/com/sleepycat/db/package-frame.html | 169 +- db/docs/java/com/sleepycat/db/package-summary.html | 412 +- db/docs/java/com/sleepycat/db/package-tree.html | 114 +- db/docs/java/com/sleepycat/db/package-use.html | 509 +- .../com/sleepycat/util/ExceptionUnwrapper.html | 289 + .../java/com/sleepycat/util/ExceptionWrapper.html | 222 + .../java/com/sleepycat/util/FastInputStream.html | 583 + .../java/com/sleepycat/util/FastOutputStream.html | 780 + .../com/sleepycat/util/IOExceptionWrapper.html | 280 + .../sleepycat/util/RuntimeExceptionWrapper.html | 280 + db/docs/java/com/sleepycat/util/UtfOps.html | 506 + .../util/class-use/ExceptionUnwrapper.html | 136 + .../sleepycat/util/class-use/ExceptionWrapper.html | 180 + .../sleepycat/util/class-use/FastInputStream.html | 173 + .../sleepycat/util/class-use/FastOutputStream.html | 173 + .../util/class-use/IOExceptionWrapper.html | 136 + .../util/class-use/RuntimeExceptionWrapper.html | 136 + .../java/com/sleepycat/util/class-use/UtfOps.html | 136 + db/docs/java/com/sleepycat/util/package-frame.html | 62 + .../java/com/sleepycat/util/package-summary.html | 211 + db/docs/java/com/sleepycat/util/package-tree.html | 165 + db/docs/java/com/sleepycat/util/package-use.html | 189 + db/docs/java/constant-values.html | 431 +- db/docs/java/deprecated-list.html | 1021 +- db/docs/java/help-doc.html | 71 +- db/docs/java/index-all.html | 8225 +++++----- db/docs/java/index.html | 15 +- db/docs/java/overview-frame.html | 43 +- db/docs/java/overview-summary.html | 113 +- db/docs/java/overview-tree.html | 215 +- db/docs/java/package-list | 12 +- db/docs/java/packages.html | 19 +- db/docs/java/resources/inherit.gif | Bin 0 -> 57 bytes db/docs/java/serialized-form.html | 280 +- db/docs/ref/am/close.html | 10 +- db/docs/ref/am/count.html | 6 +- db/docs/ref/am/curclose.html | 6 +- db/docs/ref/am/curdel.html | 6 +- db/docs/ref/am/curdup.html | 6 +- db/docs/ref/am/curget.html | 40 +- db/docs/ref/am/curput.html | 18 +- db/docs/ref/am/cursor.html | 6 +- db/docs/ref/am/delete.html | 6 +- db/docs/ref/am/get.html | 14 +- db/docs/ref/am/join.html | 24 +- db/docs/ref/am/open.html | 30 +- db/docs/ref/am/opensub.html | 78 +- db/docs/ref/am/ops.html | 50 +- db/docs/ref/am/put.html | 12 +- db/docs/ref/am/second.html | 11 +- db/docs/ref/am/second.javas | 156 + db/docs/ref/am/stat.html | 10 +- db/docs/ref/am/sync.html | 6 +- db/docs/ref/am/truncate.html | 6 +- db/docs/ref/am/upgrade.html | 6 +- db/docs/ref/am/verify.html | 6 +- db/docs/ref/am_conf/bt_compare.html | 12 +- db/docs/ref/am_conf/bt_minkey.html | 6 +- db/docs/ref/am_conf/bt_prefix.html | 24 +- db/docs/ref/am_conf/bt_recnum.html | 6 +- db/docs/ref/am_conf/byteorder.html | 6 +- db/docs/ref/am_conf/cachesize.html | 6 +- db/docs/ref/am_conf/dup.html | 6 +- db/docs/ref/am_conf/extentsize.html | 6 +- db/docs/ref/am_conf/h_ffactor.html | 6 +- db/docs/ref/am_conf/h_hash.html | 6 +- db/docs/ref/am_conf/h_nelem.html | 6 +- db/docs/ref/am_conf/intro.html | 12 +- db/docs/ref/am_conf/logrec.html | 19 +- db/docs/ref/am_conf/malloc.html | 6 +- db/docs/ref/am_conf/pagesize.html | 6 +- db/docs/ref/am_conf/re_source.html | 6 +- db/docs/ref/am_conf/recno.html | 6 +- db/docs/ref/am_conf/renumber.html | 6 +- db/docs/ref/am_conf/select.html | 6 +- db/docs/ref/am_misc/align.html | 6 +- db/docs/ref/am_misc/dbsizes.html | 31 +- db/docs/ref/am_misc/diskspace.html | 6 +- db/docs/ref/am_misc/error.html | 6 +- db/docs/ref/am_misc/faq.html | 12 +- db/docs/ref/am_misc/get_bulk.html | 21 +- db/docs/ref/am_misc/partial.html | 6 +- db/docs/ref/am_misc/perm.html | 6 +- db/docs/ref/am_misc/stability.html | 12 +- db/docs/ref/am_misc/struct.html | 6 +- db/docs/ref/am_misc/tune.html | 18 +- db/docs/ref/apprec/auto.html | 50 +- db/docs/ref/apprec/config.html | 6 +- db/docs/ref/apprec/def.html | 6 +- db/docs/ref/apprec/intro.html | 25 +- db/docs/ref/arch/apis.html | 6 +- db/docs/ref/arch/bigpic.html | 18 +- db/docs/ref/arch/progmodel.html | 6 +- db/docs/ref/arch/script.html | 18 +- db/docs/ref/arch/utilities.html | 30 +- db/docs/ref/build_unix/aix.html | 6 +- db/docs/ref/build_unix/conf.html | 64 +- db/docs/ref/build_unix/flags.html | 6 +- db/docs/ref/build_unix/freebsd.html | 6 +- db/docs/ref/build_unix/hpux.html | 8 +- db/docs/ref/build_unix/install.html | 6 +- db/docs/ref/build_unix/intro.html | 10 +- db/docs/ref/build_unix/irix.html | 6 +- db/docs/ref/build_unix/linux.html | 21 +- db/docs/ref/build_unix/notes.html | 11 +- db/docs/ref/build_unix/osf1.html | 12 +- db/docs/ref/build_unix/qnx.html | 6 +- db/docs/ref/build_unix/sco.html | 6 +- db/docs/ref/build_unix/shlib.html | 6 +- db/docs/ref/build_unix/small.html | 38 +- db/docs/ref/build_unix/solaris.html | 6 +- db/docs/ref/build_unix/sunos.html | 6 +- db/docs/ref/build_unix/test.html | 6 +- db/docs/ref/build_unix/ultrix.html | 6 +- db/docs/ref/build_vxworks/faq.html | 17 +- db/docs/ref/build_vxworks/intro.html | 32 +- db/docs/ref/build_vxworks/introae.html | 6 +- db/docs/ref/build_vxworks/notes.html | 28 +- db/docs/ref/build_win/faq.html | 12 +- db/docs/ref/build_win/intro.html | 60 +- db/docs/ref/build_win/notes.html | 19 +- db/docs/ref/build_win/small.html | 16 +- db/docs/ref/build_win/test.html | 6 +- db/docs/ref/build_win/unicode.html | 40 + db/docs/ref/cam/app.html | 131 + db/docs/ref/cam/intro.html | 16 +- db/docs/ref/debug/common.html | 15 +- db/docs/ref/debug/compile.html | 14 +- db/docs/ref/debug/intro.html | 24 +- db/docs/ref/debug/printlog.html | 20 +- db/docs/ref/debug/runtime.html | 6 +- db/docs/ref/distrib/layout.html | 14 +- db/docs/ref/distrib/port.html | 6 +- db/docs/ref/dumpload/format.html | 6 +- db/docs/ref/dumpload/text.html | 6 +- db/docs/ref/dumpload/utility.html | 12 +- db/docs/ref/env/create.html | 16 +- db/docs/ref/env/db_config.html | 8 +- db/docs/ref/env/encrypt.html | 6 +- db/docs/ref/env/error.html | 6 +- db/docs/ref/env/faq.html | 6 +- db/docs/ref/env/intro.html | 63 +- db/docs/ref/env/naming.html | 30 +- db/docs/ref/env/open.html | 6 +- db/docs/ref/env/region.html | 6 +- db/docs/ref/env/remote.html | 12 +- db/docs/ref/env/security.html | 16 +- db/docs/ref/ext/mod.html | 69 + db/docs/ref/ext/perl.html | 43 + db/docs/ref/ext/php.html | 102 + db/docs/ref/install/file.html | 6 +- db/docs/ref/install/multiple.html | 12 +- db/docs/ref/intro/data.html | 6 +- db/docs/ref/intro/dbis.html | 6 +- db/docs/ref/intro/dbisnot.html | 6 +- db/docs/ref/intro/distrib.html | 6 +- db/docs/ref/intro/need.html | 6 +- db/docs/ref/intro/products.html | 12 +- db/docs/ref/intro/terrain.html | 6 +- db/docs/ref/intro/what.html | 14 +- db/docs/ref/intro/where.html | 6 +- db/docs/ref/java/compat.html | 6 +- db/docs/ref/java/conf.html | 12 +- db/docs/ref/java/faq.html | 72 +- db/docs/ref/java/program.html | 103 +- db/docs/ref/lock/am_conv.html | 6 +- db/docs/ref/lock/cam_conv.html | 10 +- db/docs/ref/lock/config.html | 6 +- db/docs/ref/lock/dead.html | 6 +- db/docs/ref/lock/deaddbg.html | 6 +- db/docs/ref/lock/intro.html | 34 +- db/docs/ref/lock/max.html | 26 +- db/docs/ref/lock/nondb.html | 6 +- db/docs/ref/lock/notxn.html | 6 +- db/docs/ref/lock/page.html | 6 +- db/docs/ref/lock/stdmode.html | 16 +- db/docs/ref/lock/timeout.html | 14 +- db/docs/ref/lock/twopl.html | 6 +- db/docs/ref/log/config.html | 6 +- db/docs/ref/log/intro.html | 49 +- db/docs/ref/log/limits.html | 22 +- db/docs/ref/mp/config.html | 6 +- db/docs/ref/mp/intro.html | 48 +- db/docs/ref/pindex.src | 37 +- db/docs/ref/program/appsignals.html | 6 +- db/docs/ref/program/cache.html | 6 +- db/docs/ref/program/compatible.html | 6 +- db/docs/ref/program/copy.html | 37 +- db/docs/ref/program/environ.html | 12 +- db/docs/ref/program/errorret.html | 21 +- db/docs/ref/program/faq.html | 6 +- db/docs/ref/program/mt.html | 6 +- db/docs/ref/program/namespace.html | 28 +- db/docs/ref/program/ram.html | 65 +- db/docs/ref/program/runtime.html | 11 +- db/docs/ref/program/scope.html | 20 +- db/docs/ref/refs/refs.html | 43 +- db/docs/ref/refs/witold.html | 6 +- db/docs/ref/rep/app.html | 62 +- db/docs/ref/rep/comm.html | 32 +- db/docs/ref/rep/elect.html | 21 +- db/docs/ref/rep/ex.html | 22 +- db/docs/ref/rep/ex_comm.html | 6 +- db/docs/ref/rep/ex_rq.html | 8 +- db/docs/ref/rep/faq.html | 14 +- db/docs/ref/rep/id.html | 12 +- db/docs/ref/rep/intro.html | 20 +- db/docs/ref/rep/newsite.html | 17 +- db/docs/ref/rep/partition.html | 6 +- db/docs/ref/rep/pri.html | 6 +- db/docs/ref/rep/trans.html | 12 +- db/docs/ref/rpc/client.html | 6 +- db/docs/ref/rpc/faq.html | 12 +- db/docs/ref/rpc/intro.html | 12 +- db/docs/ref/rpc/server.html | 6 +- db/docs/ref/sequence/intro.html | 64 + db/docs/ref/tcl/error.html | 6 +- db/docs/ref/tcl/faq.html | 12 +- db/docs/ref/tcl/intro.html | 20 +- db/docs/ref/tcl/program.html | 6 +- db/docs/ref/tcl/using.html | 6 +- db/docs/ref/test/faq.html | 6 +- db/docs/ref/test/run.html | 12 +- db/docs/ref/transapp/admin.html | 6 +- db/docs/ref/transapp/app.html | 29 +- db/docs/ref/transapp/archival.html | 62 +- db/docs/ref/transapp/atomicity.html | 6 +- db/docs/ref/transapp/checkpoint.html | 40 +- db/docs/ref/transapp/cursor.html | 14 +- db/docs/ref/transapp/data_open.html | 19 +- db/docs/ref/transapp/deadlock.html | 6 +- db/docs/ref/transapp/env_open.html | 6 +- db/docs/ref/transapp/faq.html | 26 +- db/docs/ref/transapp/filesys.html | 6 +- db/docs/ref/transapp/hotfail.html | 6 +- db/docs/ref/transapp/inc.html | 6 +- db/docs/ref/transapp/intro.html | 12 +- db/docs/ref/transapp/logfile.html | 6 +- db/docs/ref/transapp/nested.html | 6 +- db/docs/ref/transapp/put.html | 6 +- db/docs/ref/transapp/read.html | 22 +- db/docs/ref/transapp/reclimit.html | 6 +- db/docs/ref/transapp/recovery.html | 20 +- db/docs/ref/transapp/term.html | 22 +- db/docs/ref/transapp/throughput.html | 6 +- db/docs/ref/transapp/tune.html | 49 +- db/docs/ref/transapp/why.html | 14 +- db/docs/ref/txn/config.html | 6 +- db/docs/ref/txn/intro.html | 33 +- db/docs/ref/txn/limits.html | 12 +- db/docs/ref/upgrade.2.0/convert.html | 6 +- db/docs/ref/upgrade.2.0/disk.html | 6 +- db/docs/ref/upgrade.2.0/intro.html | 9 +- db/docs/ref/upgrade.2.0/system.html | 6 +- db/docs/ref/upgrade.2.0/toc.html | 9 +- db/docs/ref/upgrade.3.0/close.html | 6 +- db/docs/ref/upgrade.3.0/cxx.html | 6 +- db/docs/ref/upgrade.3.0/db.html | 6 +- db/docs/ref/upgrade.3.0/db_cxx.html | 6 +- db/docs/ref/upgrade.3.0/dbenv.html | 6 +- db/docs/ref/upgrade.3.0/dbenv_cxx.html | 6 +- db/docs/ref/upgrade.3.0/dbinfo.html | 6 +- db/docs/ref/upgrade.3.0/disk.html | 6 +- db/docs/ref/upgrade.3.0/eacces.html | 6 +- db/docs/ref/upgrade.3.0/eagain.html | 6 +- db/docs/ref/upgrade.3.0/envopen.html | 6 +- db/docs/ref/upgrade.3.0/func.html | 6 +- db/docs/ref/upgrade.3.0/intro.html | 9 +- db/docs/ref/upgrade.3.0/java.html | 14 +- db/docs/ref/upgrade.3.0/join.html | 6 +- db/docs/ref/upgrade.3.0/jump_set.html | 6 +- db/docs/ref/upgrade.3.0/lock_detect.html | 6 +- db/docs/ref/upgrade.3.0/lock_notheld.html | 6 +- db/docs/ref/upgrade.3.0/lock_put.html | 6 +- db/docs/ref/upgrade.3.0/lock_stat.html | 6 +- db/docs/ref/upgrade.3.0/log_register.html | 6 +- db/docs/ref/upgrade.3.0/log_stat.html | 6 +- db/docs/ref/upgrade.3.0/memp_stat.html | 6 +- db/docs/ref/upgrade.3.0/open.html | 6 +- db/docs/ref/upgrade.3.0/rmw.html | 6 +- db/docs/ref/upgrade.3.0/stat.html | 6 +- db/docs/ref/upgrade.3.0/toc.html | 9 +- db/docs/ref/upgrade.3.0/txn_begin.html | 6 +- db/docs/ref/upgrade.3.0/txn_commit.html | 6 +- db/docs/ref/upgrade.3.0/txn_stat.html | 6 +- db/docs/ref/upgrade.3.0/value_set.html | 6 +- db/docs/ref/upgrade.3.0/xa.html | 6 +- db/docs/ref/upgrade.3.1/btstat.html | 6 +- db/docs/ref/upgrade.3.1/config.html | 6 +- db/docs/ref/upgrade.3.1/disk.html | 6 +- db/docs/ref/upgrade.3.1/dup.html | 6 +- db/docs/ref/upgrade.3.1/env.html | 6 +- db/docs/ref/upgrade.3.1/intro.html | 9 +- db/docs/ref/upgrade.3.1/log_register.html | 6 +- db/docs/ref/upgrade.3.1/logalloc.html | 6 +- db/docs/ref/upgrade.3.1/memp_register.html | 6 +- db/docs/ref/upgrade.3.1/put.html | 6 +- db/docs/ref/upgrade.3.1/set_feedback.html | 6 +- db/docs/ref/upgrade.3.1/set_paniccall.html | 6 +- db/docs/ref/upgrade.3.1/set_tx_recover.html | 6 +- db/docs/ref/upgrade.3.1/sysmem.html | 6 +- db/docs/ref/upgrade.3.1/tcl.html | 6 +- db/docs/ref/upgrade.3.1/tmp.html | 6 +- db/docs/ref/upgrade.3.1/toc.html | 9 +- db/docs/ref/upgrade.3.1/txn_check.html | 6 +- db/docs/ref/upgrade.3.2/callback.html | 6 +- db/docs/ref/upgrade.3.2/db_dump.html | 6 +- db/docs/ref/upgrade.3.2/disk.html | 6 +- db/docs/ref/upgrade.3.2/handle.html | 23 +- db/docs/ref/upgrade.3.2/incomplete.html | 18 +- db/docs/ref/upgrade.3.2/intro.html | 9 +- db/docs/ref/upgrade.3.2/mutexlock.html | 6 +- db/docs/ref/upgrade.3.2/notfound.html | 21 +- db/docs/ref/upgrade.3.2/renumber.html | 6 +- db/docs/ref/upgrade.3.2/set_flags.html | 6 +- db/docs/ref/upgrade.3.2/toc.html | 9 +- db/docs/ref/upgrade.3.2/tx_recover.html | 6 +- db/docs/ref/upgrade.3.3/alloc.html | 26 +- db/docs/ref/upgrade.3.3/bigfile.html | 6 +- db/docs/ref/upgrade.3.3/conflict.html | 6 +- db/docs/ref/upgrade.3.3/disk.html | 6 +- db/docs/ref/upgrade.3.3/getswap.html | 6 +- db/docs/ref/upgrade.3.3/gettype.html | 6 +- db/docs/ref/upgrade.3.3/intro.html | 9 +- db/docs/ref/upgrade.3.3/memp_fget.html | 6 +- db/docs/ref/upgrade.3.3/rpc.html | 8 +- db/docs/ref/upgrade.3.3/shared.html | 6 +- db/docs/ref/upgrade.3.3/toc.html | 9 +- db/docs/ref/upgrade.3.3/txn_prepare.html | 6 +- db/docs/ref/upgrade.4.0/asr.html | 6 +- db/docs/ref/upgrade.4.0/cxx.html | 6 +- db/docs/ref/upgrade.4.0/deadlock.html | 6 +- db/docs/ref/upgrade.4.0/disk.html | 6 +- db/docs/ref/upgrade.4.0/env.html | 6 +- db/docs/ref/upgrade.4.0/intro.html | 9 +- db/docs/ref/upgrade.4.0/java.html | 8 +- db/docs/ref/upgrade.4.0/lock.html | 6 +- db/docs/ref/upgrade.4.0/lock_id_free.html | 6 +- db/docs/ref/upgrade.4.0/log.html | 6 +- db/docs/ref/upgrade.4.0/mp.html | 6 +- db/docs/ref/upgrade.4.0/rpc.html | 8 +- db/docs/ref/upgrade.4.0/set_lk_max.html | 6 +- db/docs/ref/upgrade.4.0/toc.html | 9 +- db/docs/ref/upgrade.4.0/txn.html | 6 +- db/docs/ref/upgrade.4.1/app_dispatch.html | 6 +- db/docs/ref/upgrade.4.1/checkpoint.html | 6 +- db/docs/ref/upgrade.4.1/cxx.html | 6 +- db/docs/ref/upgrade.4.1/disk.html | 6 +- db/docs/ref/upgrade.4.1/excl.html | 6 +- db/docs/ref/upgrade.4.1/fop.html | 8 +- db/docs/ref/upgrade.4.1/hash_nelem.html | 6 +- db/docs/ref/upgrade.4.1/incomplete.html | 15 +- db/docs/ref/upgrade.4.1/intro.html | 9 +- db/docs/ref/upgrade.4.1/java.html | 27 +- db/docs/ref/upgrade.4.1/log_register.html | 6 +- db/docs/ref/upgrade.4.1/log_stat.html | 6 +- db/docs/ref/upgrade.4.1/memp_sync.html | 6 +- db/docs/ref/upgrade.4.1/toc.html | 9 +- db/docs/ref/upgrade.4.2/cksum.html | 9 +- db/docs/ref/upgrade.4.2/client.html | 9 +- db/docs/ref/upgrade.4.2/del.html | 28 +- db/docs/ref/upgrade.4.2/disk.html | 12 +- db/docs/ref/upgrade.4.2/intro.html | 9 +- db/docs/ref/upgrade.4.2/java.html | 22 +- db/docs/ref/upgrade.4.2/lockng.html | 9 +- db/docs/ref/upgrade.4.2/nosync.html | 9 +- db/docs/ref/upgrade.4.2/priority.html | 9 +- db/docs/ref/upgrade.4.2/queue.html | 9 +- db/docs/ref/upgrade.4.2/repinit.html | 9 +- db/docs/ref/upgrade.4.2/tcl.html | 6 +- db/docs/ref/upgrade.4.2/toc.html | 9 +- db/docs/ref/upgrade.4.2/verify.html | 9 +- db/docs/ref/upgrade.4.3/cput.html | 26 + db/docs/ref/upgrade.4.3/disk.html | 26 + db/docs/ref/upgrade.4.3/enomem.html | 43 + db/docs/ref/upgrade.4.3/err.html | 33 + db/docs/ref/upgrade.4.3/fileopen.html | 24 + db/docs/ref/upgrade.4.3/intro.html | 26 + db/docs/ref/upgrade.4.3/java.html | 63 + db/docs/ref/upgrade.4.3/log.html | 33 + db/docs/ref/upgrade.4.3/repl.html | 40 + db/docs/ref/upgrade.4.3/rtc.html | 25 + db/docs/ref/upgrade.4.3/stat.html | 26 + db/docs/ref/upgrade.4.3/toc.html | 37 + db/docs/ref/upgrade.4.3/verb.html | 33 + db/docs/ref/upgrade/process.html | 6 +- db/docs/ref/upgrade/version.html | 6 +- db/docs/ref/xa/build.html | 14 +- db/docs/ref/xa/faq.html | 8 +- db/docs/ref/xa/intro.html | 6 +- db/docs/ref/xa/xa_config.html | 6 +- db/docs/ref/xa/xa_intro.html | 6 +- db/docs/sleepycat/contact.html | 6 +- db/docs/sleepycat/legal.html | 20 +- db/docs/sleepycat/license.html | 8 +- db/docs/utility/berkeley_db_svc.html | 41 +- db/docs/utility/db_archive.html | 33 +- db/docs/utility/db_checkpoint.html | 33 +- db/docs/utility/db_deadlock.html | 50 +- db/docs/utility/db_dump.html | 45 +- db/docs/utility/db_load.html | 67 +- db/docs/utility/db_printlog.html | 37 +- db/docs/utility/db_recover.html | 35 +- db/docs/utility/db_stat.html | 90 +- db/docs/utility/db_upgrade.html | 27 +- db/docs/utility/db_verify.html | 29 +- db/docs/utility/index.html | 6 +- db/env/db_salloc.c | 147 +- db/env/db_shash.c | 25 +- db/env/env_file.c | 8 +- db/env/env_method.c | 185 +- db/env/env_open.c | 381 +- db/env/env_recover.c | 215 +- db/env/env_region.c | 207 +- db/env/env_stat.c | 656 + db/examples_c/README | 17 +- db/examples_c/bench_001.c | 6 +- db/examples_c/ex_access.c | 8 +- db/examples_c/ex_apprec/auto_rebuild | 1 + db/examples_c/ex_apprec/ex_apprec.c | 4 +- db/examples_c/ex_apprec/ex_apprec.h | 4 +- db/examples_c/ex_apprec/ex_apprec.src | 4 +- db/examples_c/ex_apprec/ex_apprec_auto.c | 99 +- db/examples_c/ex_apprec/ex_apprec_rec.c | 4 +- db/examples_c/ex_btrec.c | 10 +- db/examples_c/ex_dbclient.c | 5 +- db/examples_c/ex_env.c | 4 +- db/examples_c/ex_lock.c | 9 +- db/examples_c/ex_mpool.c | 4 +- db/examples_c/ex_repquote/ex_repquote.h | 4 +- db/examples_c/ex_repquote/ex_rq_client.c | 6 +- db/examples_c/ex_repquote/ex_rq_main.c | 10 +- db/examples_c/ex_repquote/ex_rq_master.c | 4 +- db/examples_c/ex_repquote/ex_rq_net.c | 4 +- db/examples_c/ex_repquote/ex_rq_util.c | 25 +- db/examples_c/ex_sequence.c | 124 + db/examples_c/ex_thread.c | 4 +- db/examples_c/ex_tpcb.c | 4 +- db/examples_c/ex_tpcb.h | 4 +- .../getting_started/example_database_load.c | 272 + .../getting_started/example_database_read.c | 272 + .../getting_started/gettingstarted_common.c | 235 + .../getting_started/gettingstarted_common.h | 61 + db/examples_c/getting_started/inventory.txt | 800 + db/examples_c/getting_started/vendors.txt | 6 + db/examples_cxx/AccessExample.cpp | 14 +- db/examples_cxx/BtRecExample.cpp | 8 +- db/examples_cxx/EnvExample.cpp | 4 +- db/examples_cxx/LockExample.cpp | 11 +- db/examples_cxx/MpoolExample.cpp | 4 +- db/examples_cxx/SequenceExample.cpp | 130 + db/examples_cxx/TpcbExample.cpp | 4 +- db/examples_cxx/getting_started/MyDb.cpp | 63 + db/examples_cxx/getting_started/MyDb.hpp | 38 + .../excxx_example_database_load.cpp | 221 + .../excxx_example_database_read.cpp | 228 + .../getting_started/gettingStartedCommon.hpp | 181 + db/examples_cxx/getting_started/inventory.txt | 800 + db/examples_cxx/getting_started/vendors.txt | 6 + .../examples/collections/access/AccessExample.java | 286 + .../collections/hello/HelloDatabaseWorld.java | 163 + .../examples/collections/ship/basic/PartData.java | 65 + .../examples/collections/ship/basic/PartKey.java | 41 + .../examples/collections/ship/basic/Sample.java | 267 + .../collections/ship/basic/SampleDatabase.java | 135 + .../collections/ship/basic/SampleViews.java | 123 + .../collections/ship/basic/ShipmentData.java | 42 + .../collections/ship/basic/ShipmentKey.java | 49 + .../collections/ship/basic/SupplierData.java | 58 + .../collections/ship/basic/SupplierKey.java | 41 + .../examples/collections/ship/basic/Weight.java | 50 + .../examples/collections/ship/entity/Part.java | 73 + .../examples/collections/ship/entity/PartData.java | 66 + .../examples/collections/ship/entity/PartKey.java | 41 + .../examples/collections/ship/entity/Sample.java | 250 + .../collections/ship/entity/SampleDatabase.java | 330 + .../collections/ship/entity/SampleViews.java | 307 + .../examples/collections/ship/entity/Shipment.java | 56 + .../collections/ship/entity/ShipmentData.java | 43 + .../collections/ship/entity/ShipmentKey.java | 49 + .../examples/collections/ship/entity/Supplier.java | 64 + .../collections/ship/entity/SupplierData.java | 59 + .../collections/ship/entity/SupplierKey.java | 41 + .../examples/collections/ship/entity/Weight.java | 50 + .../examples/collections/ship/factory/Part.java | 107 + .../examples/collections/ship/factory/PartKey.java | 61 + .../examples/collections/ship/factory/Sample.java | 248 + .../collections/ship/factory/SampleDatabase.java | 225 + .../collections/ship/factory/SampleViews.java | 143 + .../collections/ship/factory/Shipment.java | 103 + .../collections/ship/factory/ShipmentKey.java | 71 + .../collections/ship/factory/Supplier.java | 109 + .../collections/ship/factory/SupplierKey.java | 61 + .../examples/collections/ship/factory/Weight.java | 50 + .../examples/collections/ship/index/PartData.java | 65 + .../examples/collections/ship/index/PartKey.java | 41 + .../examples/collections/ship/index/Sample.java | 303 + .../collections/ship/index/SampleDatabase.java | 330 + .../collections/ship/index/SampleViews.java | 162 + .../collections/ship/index/ShipmentData.java | 42 + .../collections/ship/index/ShipmentKey.java | 49 + .../collections/ship/index/SupplierData.java | 58 + .../collections/ship/index/SupplierKey.java | 41 + .../examples/collections/ship/index/Weight.java | 50 + .../collections/ship/marshal/MarshalledEntity.java | 43 + .../collections/ship/marshal/MarshalledKey.java | 37 + .../examples/collections/ship/marshal/Part.java | 117 + .../examples/collections/ship/marshal/PartKey.java | 60 + .../examples/collections/ship/marshal/Sample.java | 250 + .../collections/ship/marshal/SampleDatabase.java | 259 + .../collections/ship/marshal/SampleViews.java | 277 + .../collections/ship/marshal/Shipment.java | 114 + .../collections/ship/marshal/ShipmentKey.java | 70 + .../collections/ship/marshal/Supplier.java | 119 + .../collections/ship/marshal/SupplierKey.java | 60 + .../examples/collections/ship/marshal/Weight.java | 50 + .../examples/collections/ship/sentity/Part.java | 91 + .../examples/collections/ship/sentity/PartKey.java | 39 + .../examples/collections/ship/sentity/Sample.java | 250 + .../collections/ship/sentity/SampleDatabase.java | 322 + .../collections/ship/sentity/SampleViews.java | 420 + .../collections/ship/sentity/Shipment.java | 76 + .../collections/ship/sentity/ShipmentKey.java | 47 + .../collections/ship/sentity/Supplier.java | 83 + .../collections/ship/sentity/SupplierKey.java | 39 + .../examples/collections/ship/sentity/Weight.java | 50 + .../examples/collections/ship/tuple/Part.java | 73 + .../examples/collections/ship/tuple/PartData.java | 66 + .../examples/collections/ship/tuple/PartKey.java | 39 + .../examples/collections/ship/tuple/Sample.java | 249 + .../collections/ship/tuple/SampleDatabase.java | 322 + .../collections/ship/tuple/SampleViews.java | 397 + .../examples/collections/ship/tuple/Shipment.java | 56 + .../collections/ship/tuple/ShipmentData.java | 43 + .../collections/ship/tuple/ShipmentKey.java | 47 + .../examples/collections/ship/tuple/Supplier.java | 64 + .../collections/ship/tuple/SupplierData.java | 59 + .../collections/ship/tuple/SupplierKey.java | 39 + .../examples/collections/ship/tuple/Weight.java | 50 + .../com/sleepycat/examples/db/AccessExample.java | 161 +- .../com/sleepycat/examples/db/BtRecExample.java | 226 +- .../sleepycat/examples/db/BulkAccessExample.java | 135 +- .../src/com/sleepycat/examples/db/EnvExample.java | 119 +- .../db/GettingStarted/ExampleDatabaseLoad.java | 228 + .../db/GettingStarted/ExampleDatabaseRead.java | 202 + .../examples/db/GettingStarted/Inventory.java | 62 + .../db/GettingStarted/InventoryBinding.java | 46 + .../db/GettingStarted/ItemNameKeyCreator.java | 37 + .../examples/db/GettingStarted/MyDbs.java | 157 + .../examples/db/GettingStarted/Vendor.java | 82 + .../examples/db/GettingStarted/inventory.txt | 800 + .../examples/db/GettingStarted/vendors.txt | 6 + .../src/com/sleepycat/examples/db/LockExample.java | 127 +- .../src/com/sleepycat/examples/db/RPCExample.java | 107 + .../com/sleepycat/examples/db/SequenceExample.java | 93 + .../src/com/sleepycat/examples/db/TpcbExample.java | 470 +- db/fileops/fileops.src | 6 +- db/fileops/fileops_auto.c | 970 +- db/fileops/fileops_autop.c | 306 + db/fileops/fop_basic.c | 8 +- db/fileops/fop_rec.c | 17 +- db/fileops/fop_util.c | 174 +- db/hash/hash.c | 183 +- db/hash/hash.src | 10 +- db/hash/hash_auto.c | 1733 +-- db/hash/hash_autop.c | 486 + db/hash/hash_conv.c | 11 +- db/hash/hash_dup.c | 65 +- db/hash/hash_func.c | 8 +- db/hash/hash_meta.c | 53 +- db/hash/hash_method.c | 20 +- db/hash/hash_open.c | 131 +- db/hash/hash_page.c | 148 +- db/hash/hash_rec.c | 349 +- db/hash/hash_reclaim.c | 10 +- db/hash/hash_stat.c | 343 +- db/hash/hash_stub.c | 47 +- db/hash/hash_upgrade.c | 14 +- db/hash/hash_verify.c | 27 +- db/hmac/hmac.c | 10 +- db/hmac/sha1.c | 7 +- db/hsearch/hsearch.c | 8 +- .../src/com/sleepycat/bind/ByteArrayBinding.java | 43 + db/java/src/com/sleepycat/bind/EntityBinding.java | 49 + db/java/src/com/sleepycat/bind/EntryBinding.java | 38 + .../com/sleepycat/bind/RecordNumberBinding.java | 70 + db/java/src/com/sleepycat/bind/package.html | 7 + .../com/sleepycat/bind/serial/ClassCatalog.java | 72 + .../com/sleepycat/bind/serial/SerialBinding.java | 130 + .../src/com/sleepycat/bind/serial/SerialInput.java | 75 + .../com/sleepycat/bind/serial/SerialOutput.java | 114 + .../sleepycat/bind/serial/SerialSerialBinding.java | 117 + .../bind/serial/SerialSerialKeyCreator.java | 143 + .../sleepycat/bind/serial/StoredClassCatalog.java | 446 + .../sleepycat/bind/serial/TupleSerialBinding.java | 115 + .../bind/serial/TupleSerialKeyCreator.java | 137 + .../bind/serial/TupleSerialMarshalledBinding.java | 93 + .../serial/TupleSerialMarshalledKeyCreator.java | 75 + db/java/src/com/sleepycat/bind/serial/package.html | 6 + .../com/sleepycat/bind/tuple/BooleanBinding.java | 75 + .../src/com/sleepycat/bind/tuple/ByteBinding.java | 74 + .../com/sleepycat/bind/tuple/CharacterBinding.java | 74 + .../com/sleepycat/bind/tuple/DoubleBinding.java | 75 + .../src/com/sleepycat/bind/tuple/FloatBinding.java | 74 + .../com/sleepycat/bind/tuple/IntegerBinding.java | 74 + .../src/com/sleepycat/bind/tuple/LongBinding.java | 74 + .../sleepycat/bind/tuple/MarshalledTupleEntry.java | 45 + .../bind/tuple/MarshalledTupleKeyEntity.java | 71 + .../src/com/sleepycat/bind/tuple/ShortBinding.java | 74 + .../com/sleepycat/bind/tuple/StringBinding.java | 76 + .../src/com/sleepycat/bind/tuple/TupleBinding.java | 179 + .../src/com/sleepycat/bind/tuple/TupleInput.java | 482 + .../sleepycat/bind/tuple/TupleInputBinding.java | 46 + .../bind/tuple/TupleMarshalledBinding.java | 70 + .../src/com/sleepycat/bind/tuple/TupleOutput.java | 398 + .../sleepycat/bind/tuple/TupleTupleBinding.java | 96 + .../sleepycat/bind/tuple/TupleTupleKeyCreator.java | 105 + .../bind/tuple/TupleTupleMarshalledBinding.java | 94 + .../bind/tuple/TupleTupleMarshalledKeyCreator.java | 75 + db/java/src/com/sleepycat/bind/tuple/package.html | 6 + .../sleepycat/collections/CurrentTransaction.java | 433 + .../src/com/sleepycat/collections/DataCursor.java | 690 + .../src/com/sleepycat/collections/DataView.java | 598 + .../src/com/sleepycat/collections/KeyRange.java | 299 + .../sleepycat/collections/KeyRangeException.java | 26 + .../sleepycat/collections/MapEntryParameter.java | 126 + .../sleepycat/collections/PrimaryKeyAssigner.java | 30 + .../src/com/sleepycat/collections/RangeCursor.java | 874 ++ .../sleepycat/collections/StoredCollection.java | 460 + .../sleepycat/collections/StoredCollections.java | 156 + .../com/sleepycat/collections/StoredContainer.java | 415 + .../com/sleepycat/collections/StoredEntrySet.java | 176 + .../com/sleepycat/collections/StoredIterator.java | 600 + .../com/sleepycat/collections/StoredKeySet.java | 144 + .../src/com/sleepycat/collections/StoredList.java | 604 + .../src/com/sleepycat/collections/StoredMap.java | 511 + .../com/sleepycat/collections/StoredMapEntry.java | 41 + .../collections/StoredSortedEntrySet.java | 220 + .../sleepycat/collections/StoredSortedKeySet.java | 241 + .../com/sleepycat/collections/StoredSortedMap.java | 348 + .../collections/StoredSortedValueSet.java | 255 + .../com/sleepycat/collections/StoredValueSet.java | 220 + .../sleepycat/collections/TransactionRunner.java | 221 + .../sleepycat/collections/TransactionWorker.java | 28 + .../sleepycat/collections/TupleSerialFactory.java | 135 + db/java/src/com/sleepycat/collections/package.html | 21 + db/java/src/com/sleepycat/compat/DbCompat.java | 255 + .../com/sleepycat/db/BtreePrefixCalculator.java | 14 + db/java/src/com/sleepycat/db/BtreeStats.java | 146 + db/java/src/com/sleepycat/db/CacheFile.java | 70 + .../src/com/sleepycat/db/CacheFilePriority.java | 61 + db/java/src/com/sleepycat/db/CacheFileStats.java | 68 + db/java/src/com/sleepycat/db/CacheStats.java | 224 + db/java/src/com/sleepycat/db/CheckpointConfig.java | 60 + db/java/src/com/sleepycat/db/Cursor.java | 349 + db/java/src/com/sleepycat/db/CursorConfig.java | 76 + db/java/src/com/sleepycat/db/Database.java | 314 + db/java/src/com/sleepycat/db/DatabaseConfig.java | 628 + db/java/src/com/sleepycat/db/DatabaseEntry.java | 181 + .../src/com/sleepycat/db/DatabaseException.java | 54 + db/java/src/com/sleepycat/db/DatabaseStats.java | 15 + db/java/src/com/sleepycat/db/DatabaseType.java | 65 + .../src/com/sleepycat/db/DeadlockException.java | 20 + db/java/src/com/sleepycat/db/Environment.java | 354 + .../src/com/sleepycat/db/EnvironmentConfig.java | 1076 ++ db/java/src/com/sleepycat/db/ErrorHandler.java | 14 + db/java/src/com/sleepycat/db/FeedbackHandler.java | 16 + db/java/src/com/sleepycat/db/HashStats.java | 116 + db/java/src/com/sleepycat/db/Hasher.java | 14 + db/java/src/com/sleepycat/db/JoinConfig.java | 39 + db/java/src/com/sleepycat/db/JoinCursor.java | 60 + db/java/src/com/sleepycat/db/KeyRange.java | 16 + db/java/src/com/sleepycat/db/Lock.java | 31 + db/java/src/com/sleepycat/db/LockDetectMode.java | 90 + db/java/src/com/sleepycat/db/LockMode.java | 40 + .../com/sleepycat/db/LockNotGrantedException.java | 57 + db/java/src/com/sleepycat/db/LockOperation.java | 65 + db/java/src/com/sleepycat/db/LockRequest.java | 83 + db/java/src/com/sleepycat/db/LockRequestMode.java | 43 + db/java/src/com/sleepycat/db/LockStats.java | 164 + db/java/src/com/sleepycat/db/LogCursor.java | 80 + db/java/src/com/sleepycat/db/LogRecordHandler.java | 17 + .../src/com/sleepycat/db/LogSequenceNumber.java | 38 + db/java/src/com/sleepycat/db/LogStats.java | 146 + db/java/src/com/sleepycat/db/MemoryException.java | 41 + db/java/src/com/sleepycat/db/MessageHandler.java | 14 + .../src/com/sleepycat/db/MultipleDataEntry.java | 57 + db/java/src/com/sleepycat/db/MultipleEntry.java | 28 + .../src/com/sleepycat/db/MultipleKeyDataEntry.java | 63 + .../com/sleepycat/db/MultipleRecnoDataEntry.java | 61 + db/java/src/com/sleepycat/db/OperationStatus.java | 54 + db/java/src/com/sleepycat/db/PanicHandler.java | 14 + .../src/com/sleepycat/db/PreparedTransaction.java | 30 + db/java/src/com/sleepycat/db/QueueStats.java | 98 + .../src/com/sleepycat/db/RecordNumberAppender.java | 15 + .../src/com/sleepycat/db/RecoveryOperation.java | 56 + .../db/ReplicationHandleDeadException.java | 20 + db/java/src/com/sleepycat/db/ReplicationStats.java | 278 + .../src/com/sleepycat/db/ReplicationStatus.java | 121 + .../src/com/sleepycat/db/ReplicationTransport.java | 26 + .../src/com/sleepycat/db/RunRecoveryException.java | 20 + db/java/src/com/sleepycat/db/SecondaryConfig.java | 91 + db/java/src/com/sleepycat/db/SecondaryCursor.java | 250 + .../src/com/sleepycat/db/SecondaryDatabase.java | 106 + .../src/com/sleepycat/db/SecondaryKeyCreator.java | 18 + db/java/src/com/sleepycat/db/Sequence.java | 63 + db/java/src/com/sleepycat/db/SequenceConfig.java | 199 + db/java/src/com/sleepycat/db/SequenceStats.java | 74 + db/java/src/com/sleepycat/db/StatsConfig.java | 56 + db/java/src/com/sleepycat/db/Transaction.java | 75 + .../src/com/sleepycat/db/TransactionConfig.java | 89 + db/java/src/com/sleepycat/db/TransactionStats.java | 147 + db/java/src/com/sleepycat/db/VerifyConfig.java | 81 + db/java/src/com/sleepycat/db/internal/Db.java | 399 + .../src/com/sleepycat/db/internal/DbClient.java | 17 + .../src/com/sleepycat/db/internal/DbConstants.java | 182 + db/java/src/com/sleepycat/db/internal/DbEnv.java | 434 + db/java/src/com/sleepycat/db/internal/DbLock.java | 51 + db/java/src/com/sleepycat/db/internal/DbLogc.java | 54 + .../src/com/sleepycat/db/internal/DbMpoolFile.java | 56 + .../src/com/sleepycat/db/internal/DbSequence.java | 96 + db/java/src/com/sleepycat/db/internal/DbTxn.java | 103 + db/java/src/com/sleepycat/db/internal/DbUtil.java | 179 + db/java/src/com/sleepycat/db/internal/Dbc.java | 73 + db/java/src/com/sleepycat/db/internal/db_java.java | 34 + .../src/com/sleepycat/db/internal/db_javaJNI.java | 263 + db/java/src/com/sleepycat/db/package.html | 28 +- .../src/com/sleepycat/util/ExceptionUnwrapper.java | 69 + .../src/com/sleepycat/util/ExceptionWrapper.java | 25 + .../src/com/sleepycat/util/FastInputStream.java | 179 + .../src/com/sleepycat/util/FastOutputStream.java | 278 + .../src/com/sleepycat/util/IOExceptionWrapper.java | 34 + .../sleepycat/util/RuntimeExceptionWrapper.java | 32 + db/java/src/com/sleepycat/util/UtfOps.java | 281 + db/java/src/com/sleepycat/util/package.html | 6 + db/libdb_java/db.i | 343 +- db/libdb_java/db_java.i | 526 +- db/libdb_java/db_java_wrap.c | 4876 +++--- db/libdb_java/java-post.pl | 23 + db/libdb_java/java_callbacks.i | 106 +- db/libdb_java/java_except.i | 28 +- db/libdb_java/java_stat.i | 86 +- db/libdb_java/java_stat_auto.c | 428 +- db/libdb_java/java_typemaps.i | 236 +- db/libdb_java/java_util.i | 650 +- db/lock/lock.c | 1135 +- db/lock/lock_deadlock.c | 247 +- db/lock/lock_id.c | 408 + db/lock/lock_list.c | 351 + db/lock/lock_method.c | 242 +- db/lock/lock_region.c | 214 +- db/lock/lock_stat.c | 418 +- db/lock/lock_timer.c | 216 + db/lock/lock_util.c | 23 +- db/log/log.c | 596 +- db/log/log_archive.c | 204 +- db/log/log_compare.c | 9 +- db/log/log_get.c | 170 +- db/log/log_method.c | 200 +- db/log/log_put.c | 215 +- db/log/log_stat.c | 337 + db/mod_db4/ABOUT | 47 + db/mod_db4/INSTALL | 14 + db/mod_db4/Makefile.in | 26 + db/mod_db4/config.h.in | 22 + db/mod_db4/configure | 3224 ++++ db/mod_db4/configure.in | 115 + db/mod_db4/mm_hash.c | 137 + db/mod_db4/mm_hash.h | 48 + db/mod_db4/mod_db4.c | 129 + db/mod_db4/mod_db4_export.h | 22 + db/mod_db4/sem_utils.c | 116 + db/mod_db4/sem_utils.h | 24 + db/mod_db4/skiplist.c | 503 + db/mod_db4/skiplist.h | 84 + db/mod_db4/utils.c | 615 + db/mod_db4/utils.h | 35 + db/mp/mp_alloc.c | 28 +- db/mp/mp_bh.c | 77 +- db/mp/mp_fget.c | 92 +- db/mp/mp_fmethod.c | 599 + db/mp/mp_fopen.c | 799 +- db/mp/mp_fput.c | 48 +- db/mp/mp_fset.c | 13 +- db/mp/mp_method.c | 293 +- db/mp/mp_region.c | 201 +- db/mp/mp_register.c | 11 +- db/mp/mp_stat.c | 564 +- db/mp/mp_sync.c | 181 +- db/mp/mp_trickle.c | 29 +- db/mutex/mut_fcntl.c | 9 +- db/mutex/mut_pthread.c | 29 +- db/mutex/mut_tas.c | 25 +- db/mutex/mut_win32.c | 23 +- db/mutex/mutex.c | 47 +- db/mutex/tm.c | 12 +- db/mutex/uts4_cc.s | 4 +- db/os/os_abs.c | 8 +- db/os/os_alloc.c | 40 +- db/os/os_clock.c | 53 +- db/os/os_config.c | 8 +- db/os/os_dir.c | 10 +- db/os/os_errno.c | 8 +- db/os/os_fid.c | 77 +- db/os/os_fsync.c | 31 +- db/os/os_handle.c | 41 +- db/os/os_id.c | 54 +- db/os/os_map.c | 54 +- db/os/os_method.c | 47 +- db/os/os_oflags.c | 80 +- db/os/os_open.c | 145 +- db/os/os_region.c | 59 +- db/os/os_rename.c | 32 +- db/os/os_root.c | 8 +- db/os/os_rpath.c | 8 +- db/os/os_rw.c | 106 +- db/os/os_seek.c | 34 +- db/os/os_sleep.c | 33 +- db/os/os_spin.c | 12 +- db/os/os_stat.c | 32 +- db/os/os_tmpdir.c | 32 +- db/os/os_truncate.c | 58 + db/os/os_unlink.c | 66 +- db/os_vxworks/os_vx_abs.c | 8 +- db/os_vxworks/os_vx_config.c | 8 +- db/os_vxworks/os_vx_map.c | 8 +- db/os_win32/os_abs.c | 8 +- db/os_win32/os_clock.c | 11 +- db/os_win32/os_config.c | 49 +- db/os_win32/os_dir.c | 71 +- db/os_win32/os_errno.c | 48 +- db/os_win32/os_fid.c | 10 +- db/os_win32/os_fsync.c | 27 +- db/os_win32/os_handle.c | 37 +- db/os_win32/os_map.c | 58 +- db/os_win32/os_open.c | 86 +- db/os_win32/os_rename.c | 33 +- db/os_win32/os_rw.c | 105 +- db/os_win32/os_seek.c | 29 +- db/os_win32/os_sleep.c | 17 +- db/os_win32/os_spin.c | 10 +- db/os_win32/os_stat.c | 37 +- db/os_win32/os_truncate.c | 99 + db/os_win32/os_unlink.c | 73 + db/perl/BerkeleyDB/BerkeleyDB.pm | 130 +- db/perl/BerkeleyDB/BerkeleyDB.pod | 90 +- db/perl/BerkeleyDB/BerkeleyDB.pod.P | 90 +- db/perl/BerkeleyDB/BerkeleyDB.xs | 131 +- db/perl/BerkeleyDB/Changes | 28 + db/perl/BerkeleyDB/MANIFEST | 2 + db/perl/BerkeleyDB/Makefile.PL | 15 +- db/perl/BerkeleyDB/README | 39 +- db/perl/BerkeleyDB/config.in | 24 +- db/perl/BerkeleyDB/constants.h | 1376 +- db/perl/BerkeleyDB/mkconsts | 218 +- db/perl/BerkeleyDB/ppport.h | 5 + db/perl/BerkeleyDB/scan | 9 +- db/perl/BerkeleyDB/t/btree.t | 4 +- db/perl/BerkeleyDB/t/cds.t | 80 + db/perl/BerkeleyDB/t/db-3.0.t | 2 +- db/perl/BerkeleyDB/t/db-3.2.t | 4 +- db/perl/BerkeleyDB/t/destroy.t | 2 +- db/perl/BerkeleyDB/t/encrypt.t | 15 +- db/perl/BerkeleyDB/t/env.t | 85 +- db/perl/BerkeleyDB/t/hash.t | 4 +- db/perl/BerkeleyDB/t/join.t | 76 +- db/perl/BerkeleyDB/t/pod.t | 18 + db/perl/BerkeleyDB/t/queue.t | 4 +- db/perl/BerkeleyDB/t/recno.t | 4 +- db/perl/BerkeleyDB/t/strict.t | 10 +- db/perl/BerkeleyDB/t/txn.t | 10 +- db/perl/BerkeleyDB/t/util.pm | 4 + db/perl/BerkeleyDB/typemap | 5 + db/perl/DB_File/Changes | 25 + db/perl/DB_File/DB_File.pm | 31 +- db/perl/DB_File/DB_File.xs | 32 +- db/perl/DB_File/META.yml | 4 +- db/perl/DB_File/Makefile.PL | 5 +- db/perl/DB_File/README | 37 +- db/perl/DB_File/ppport.h | 35 + db/perl/DB_File/t/db-btree.t | 167 +- db/perl/DB_File/t/db-hash.t | 279 +- db/perl/DB_File/t/db-recno.t | 201 +- db/perl/DB_File/typemap | 29 +- db/php_db4/ABOUT | 62 + db/php_db4/INSTALL | 33 + db/php_db4/config.m4 | 59 + db/php_db4/db4.c | 1994 +++ db/php_db4/php_db4.h | 75 + db/qam/qam.c | 306 +- db/qam/qam.src | 6 +- db/qam/qam_auto.c | 936 +- db/qam/qam_autop.c | 274 + db/qam/qam_conv.c | 8 +- db/qam/qam_files.c | 150 +- db/qam/qam_method.c | 256 +- db/qam/qam_open.c | 11 +- db/qam/qam_rec.c | 63 +- db/qam/qam_stat.c | 109 +- db/qam/qam_stub.c | 36 +- db/qam/qam_upgrade.c | 10 +- db/qam/qam_verify.c | 54 +- db/rep/rep.src | 48 + db/rep/rep_auto.c | 268 + db/rep/rep_autop.c | 17 + db/rep/rep_backup.c | 1784 +++ db/rep/rep_method.c | 556 +- db/rep/rep_record.c | 1854 ++- db/rep/rep_region.c | 132 +- db/rep/rep_stat.c | 491 + db/rep/rep_stub.c | 86 +- db/rep/rep_util.c | 453 +- db/rpc_client/client.c | 30 +- db/rpc_client/gen_client.c | 199 +- db/rpc_client/gen_client_ret.c | 95 +- db/rpc_server/c/db_server_proc.c | 338 +- db/rpc_server/c/db_server_util.c | 48 +- db/rpc_server/c/gen_db_server.c | 415 +- db/rpc_server/cxx/db_server_cxxproc.cpp | 138 +- db/rpc_server/cxx/db_server_cxxutil.cpp | 48 +- db/rpc_server/db_server.x | 6 +- db/rpc_server/java/AssociateCallbacks.java | 178 + db/rpc_server/java/Dispatcher.java | 721 + db/rpc_server/java/FreeList.java | 149 +- db/rpc_server/java/JoinCursorAdapter.java | 170 + db/rpc_server/java/LocalIterator.java | 12 +- db/rpc_server/java/README | 2 +- db/rpc_server/java/RpcDb.java | 1601 +- db/rpc_server/java/RpcDbEnv.java | 676 +- db/rpc_server/java/RpcDbTxn.java | 223 +- db/rpc_server/java/RpcDbc.java | 526 +- db/rpc_server/java/Server.java | 328 + db/rpc_server/java/Timer.java | 13 +- db/rpc_server/java/Util.java | 170 + db/rpc_server/java/gen/ServerStubs.java | 657 + db/rpc_server/java/gen/__db_open_reply.java | 5 +- db/rpc_server/java/gen/__db_stat_msg.java | 5 +- db/rpc_server/java/gen/db_server.java | 140 +- db/rpc_server/java/s_jrpcgen | 2 +- db/rpc_server/rpc.src | 20 +- db/sequence/seq_stat.c | 244 + db/sequence/sequence.c | 635 + db/tcl/docs/db.html | 2 +- db/tcl/docs/env.html | 6 +- db/tcl/docs/historic.html | 2 +- db/tcl/docs/index.html | 2 +- db/tcl/docs/library.html | 2 +- db/tcl/docs/lock.html | 2 +- db/tcl/docs/log.html | 2 +- db/tcl/docs/mpool.html | 2 +- db/tcl/docs/rep.html | 2 +- db/tcl/docs/sequence.html | 92 + db/tcl/docs/test.html | 2 +- db/tcl/docs/txn.html | 2 +- db/tcl/tcl_compat.c | 80 +- db/tcl/tcl_db.c | 467 +- db/tcl/tcl_db_pkg.c | 698 +- db/tcl/tcl_dbcursor.c | 69 +- db/tcl/tcl_env.c | 364 +- db/tcl/tcl_internal.c | 169 +- db/tcl/tcl_lock.c | 59 +- db/tcl/tcl_log.c | 18 +- db/tcl/tcl_mp.c | 65 +- db/tcl/tcl_rep.c | 84 +- db/tcl/tcl_seq.c | 526 + db/tcl/tcl_txn.c | 57 +- db/tcl/tcl_util.c | 76 +- db/test/README | 4 +- db/test/TESTS | 334 +- db/test/archive.tcl | 176 +- db/test/bigfile001.tcl | 4 +- db/test/bigfile002.tcl | 4 +- db/test/byteorder.tcl | 16 +- db/test/conscript.tcl | 4 +- db/test/dbm.tcl | 4 +- db/test/dbscript.tcl | 4 +- db/test/ddoyscript.tcl | 4 +- db/test/ddscript.tcl | 8 +- db/test/dead001.tcl | 4 +- db/test/dead002.tcl | 6 +- db/test/dead003.tcl | 4 +- db/test/dead004.tcl | 4 +- db/test/dead005.tcl | 17 +- db/test/dead006.tcl | 4 +- db/test/dead007.tcl | 4 +- db/test/env001.tcl | 4 +- db/test/env002.tcl | 4 +- db/test/env003.tcl | 4 +- db/test/env004.tcl | 24 +- db/test/env005.tcl | 4 +- db/test/env006.tcl | 53 +- db/test/env007.tcl | 166 +- db/test/env008.tcl | 4 +- db/test/env009.tcl | 63 +- db/test/env010.tcl | 4 +- db/test/env011.tcl | 4 +- db/test/fop001.tcl | 42 +- db/test/fop002.tcl | 34 +- db/test/fop003.tcl | 38 +- db/test/fop004.tcl | 366 +- db/test/fop005.tcl | 193 +- db/test/fop006.tcl | 49 +- db/test/fopscript.tcl | 24 +- db/test/foputils.tcl | 152 +- db/test/hsearch.tcl | 4 +- db/test/include.tcl | 1 + db/test/join.tcl | 4 +- db/test/lock001.tcl | 4 +- db/test/lock002.tcl | 4 +- db/test/lock003.tcl | 4 +- db/test/lock004.tcl | 4 +- db/test/lock005.tcl | 4 +- db/test/lock006.tcl | 4 +- db/test/lockscript.tcl | 4 +- db/test/log001.tcl | 57 +- db/test/log002.tcl | 48 +- db/test/log003.tcl | 49 +- db/test/log004.tcl | 12 +- db/test/log005.tcl | 41 +- db/test/log006.tcl | 41 +- db/test/logtrack.list | 10 +- db/test/logtrack.tcl | 7 +- db/test/mdbscript.tcl | 4 +- db/test/memp001.tcl | 4 +- db/test/memp002.tcl | 4 +- db/test/memp003.tcl | 4 +- db/test/memp004.tcl | 4 +- db/test/mpoolscript.tcl | 4 +- db/test/mutex001.tcl | 4 +- db/test/mutex002.tcl | 4 +- db/test/mutex003.tcl | 4 +- db/test/mutexscript.tcl | 4 +- db/test/ndbm.tcl | 4 +- db/test/parallel.tcl | 33 +- db/test/recd001.tcl | 4 +- db/test/recd002.tcl | 4 +- db/test/recd003.tcl | 4 +- db/test/recd004.tcl | 4 +- db/test/recd005.tcl | 4 +- db/test/recd006.tcl | 4 +- db/test/recd007.tcl | 6 +- db/test/recd008.tcl | 4 +- db/test/recd009.tcl | 4 +- db/test/recd010.tcl | 4 +- db/test/recd011.tcl | 4 +- db/test/recd012.tcl | 5 +- db/test/recd013.tcl | 24 +- db/test/recd014.tcl | 4 +- db/test/recd015.tcl | 4 +- db/test/recd016.tcl | 6 +- db/test/recd017.tcl | 4 +- db/test/recd018.tcl | 4 +- db/test/recd019.tcl | 6 +- db/test/recd020.tcl | 80 + db/test/recd021.tcl | 280 + db/test/recd15scr.tcl | 4 +- db/test/recdscript.tcl | 4 +- db/test/rep001.tcl | 242 +- db/test/rep002.tcl | 285 +- db/test/rep003.tcl | 125 +- db/test/rep005.tcl | 324 +- db/test/rep006.tcl | 130 +- db/test/rep007.tcl | 229 +- db/test/rep008.tcl | 109 +- db/test/rep009.tcl | 95 +- db/test/rep010.tcl | 152 +- db/test/rep011.tcl | 125 +- db/test/rep012.tcl | 131 +- db/test/rep013.tcl | 153 +- db/test/rep014.tcl | 155 + db/test/rep015.tcl | 300 + db/test/rep016.tcl | 264 + db/test/rep017.tcl | 240 + db/test/rep017script.tcl | 85 + db/test/rep018.tcl | 159 + db/test/rep018script.tcl | 94 + db/test/rep019.tcl | 155 + db/test/rep020.tcl | 254 + db/test/rep021.tcl | 304 + db/test/rep022.tcl | 281 + db/test/rep023.tcl | 159 + db/test/rep024.tcl | 202 + db/test/rep026.tcl | 250 + db/test/rep027.tcl | 161 + db/test/rep028.tcl | 209 + db/test/rep029.tcl | 207 + db/test/rep030.tcl | 293 + db/test/rep031.tcl | 211 + db/test/rep032.tcl | 144 + db/test/rep033.tcl | 215 + db/test/rep034.tcl | 161 + db/test/rep035.tcl | 242 + db/test/rep035script.tcl | 76 + db/test/rep036.tcl | 172 + db/test/rep036script.tcl | 107 + db/test/rep037.tcl | 168 + db/test/reputils.tcl | 566 +- db/test/rpc001.tcl | 304 +- db/test/rpc002.tcl | 6 +- db/test/rpc003.tcl | 20 +- db/test/rpc004.tcl | 4 +- db/test/rpc005.tcl | 45 +- db/test/rpc006.tcl | 77 + db/test/rsrc001.tcl | 4 +- db/test/rsrc002.tcl | 4 +- db/test/rsrc003.tcl | 4 +- db/test/rsrc004.tcl | 4 +- db/test/scr003/chk.define | 9 +- db/test/scr006/chk.offt | 18 +- db/test/scr007/chk.proto | 3 +- db/test/scr008/chk.pubdef | 85 +- db/test/scr009/chk.srcfiles | 11 +- db/test/scr010/chk.str | 17 +- db/test/scr010/spell.ok | 2581 ++++ db/test/scr011/chk.tags | 7 +- db/test/scr012/chk.vx_code | 19 +- db/test/scr013/chk.stats | 58 +- db/test/scr015/TestConstruct01.cpp | 4 +- db/test/scr015/TestGetSetMethods.cpp | 4 +- db/test/scr015/TestKeyRange.cpp | 4 +- db/test/scr015/TestLogc.cpp | 4 +- db/test/scr015/TestSimpleAccess.cpp | 4 +- db/test/scr015/TestTruncate.cpp | 4 +- db/test/scr015/chk.cxxtests | 6 +- db/test/scr016/CallbackTest.testout | 10 +- db/test/scr016/TestAppendRecno.java | 4 +- db/test/scr016/TestAssociate.java | 4 +- db/test/scr016/TestCallback.java | 4 +- db/test/scr016/TestCallback.testout | 1 - db/test/scr016/TestClosedDb.java | 4 +- db/test/scr016/TestConstruct01.java | 4 +- db/test/scr016/TestConstruct02.java | 4 +- db/test/scr016/TestDbtFlags.java | 4 +- db/test/scr016/TestGetSetMethods.java | 4 +- db/test/scr016/TestKeyRange.java | 4 +- db/test/scr016/TestLockVec.java | 4 +- db/test/scr016/TestLogc.java | 4 +- db/test/scr016/TestOpenEmpty.java | 4 +- db/test/scr016/TestReplication.java | 4 +- db/test/scr016/TestRpcServer.java | 4 +- db/test/scr016/TestSameDbt.java | 4 +- db/test/scr016/TestSimpleAccess.java | 4 +- db/test/scr016/TestStat.java | 4 +- db/test/scr016/TestTruncate.java | 4 +- db/test/scr016/TestUtil.java | 4 +- db/test/scr016/TestXAServlet.java | 4 +- db/test/scr019/chk.include | 8 +- db/test/scr021/chk.flags | 25 +- db/test/scr022/chk.rr | 12 +- db/test/scr024/Makefile | 53 +- db/test/scr024/chk.bdb | 69 +- db/test/scr024/coverage/build.xml | 70 +- .../bind/serial/test/MarshalledObject.java | 128 + .../bind/serial/test/NullClassCatalog.java | 44 + .../bind/serial/test/SerialBindingTest.java | 207 + .../bind/serial/test/TestClassCatalog.java | 60 + .../com/sleepycat/bind/test/BindingSpeedTest.java | 368 + .../bind/tuple/test/MarshalledObject.java | 138 + .../bind/tuple/test/TupleBindingTest.java | 254 + .../sleepycat/bind/tuple/test/TupleFormatTest.java | 756 + .../bind/tuple/test/TupleOrderingTest.java | 357 + .../com/sleepycat/collections/KeyRangeTest.java | 441 + .../sleepycat/collections/test/CollectionTest.java | 2814 ++++ .../com/sleepycat/collections/test/DbTestUtil.java | 132 + .../collections/test/IterDeadlockTest.java | 232 + .../com/sleepycat/collections/test/JoinTest.java | 232 + .../collections/test/NullTransactionRunner.java | 33 + .../collections/test/SecondaryDeadlockTest.java | 193 + .../collections/test/TestDataBinding.java | 34 + .../com/sleepycat/collections/test/TestEntity.java | 45 + .../collections/test/TestEntityBinding.java | 64 + .../com/sleepycat/collections/test/TestEnv.java | 121 + .../collections/test/TestKeyAssigner.java | 45 + .../sleepycat/collections/test/TestKeyCreator.java | 60 + .../com/sleepycat/collections/test/TestStore.java | 279 + .../collections/test/TransactionTest.java | 626 + .../test/serial/StoredClassCatalogTest.java | 179 + .../test/serial/StoredClassCatalogTestInit.java | 159 + .../collections/test/serial/TestSerial.java | 70 + .../test/serial/TestSerial.java.original | 69 + .../sleepycat/util/test/ExceptionWrapperTest.java | 110 + .../src/com/sleepycat/util/test/UtfTest.java | 169 + db/test/scr026/chk.method | 5 +- db/test/scr027/chk.javas | 34 +- db/test/scr028/chk.rtc | 26 + db/test/scr028/t.c | 76 + db/test/scr029/chk.get | 26 + db/test/scr029/t.c | 203 + db/test/scr030/chk.build | 72 + db/test/sdb001.tcl | 7 +- db/test/sdb002.tcl | 10 +- db/test/sdb003.tcl | 4 +- db/test/sdb004.tcl | 4 +- db/test/sdb005.tcl | 4 +- db/test/sdb006.tcl | 4 +- db/test/sdb007.tcl | 8 +- db/test/sdb008.tcl | 4 +- db/test/sdb009.tcl | 4 +- db/test/sdb010.tcl | 46 +- db/test/sdb011.tcl | 8 +- db/test/sdb012.tcl | 4 +- db/test/sdb013.tcl | 185 + db/test/sdbscript.tcl | 4 +- db/test/sdbtest001.tcl | 4 +- db/test/sdbtest002.tcl | 7 +- db/test/sdbutils.tcl | 4 +- db/test/sec001.tcl | 28 +- db/test/sec002.tcl | 14 +- db/test/shelltest.tcl | 7 +- db/test/si001.tcl | 14 +- db/test/si002.tcl | 4 +- db/test/si003.tcl | 4 +- db/test/si004.tcl | 4 +- db/test/si005.tcl | 6 +- db/test/sijointest.tcl | 4 +- db/test/siutils.tcl | 8 +- db/test/sysscript.tcl | 4 +- db/test/t106script.tcl | 4 +- db/test/test.tcl | 454 +- db/test/test001.tcl | 22 +- db/test/test002.tcl | 4 +- db/test/test003.tcl | 9 +- db/test/test004.tcl | 4 +- db/test/test005.tcl | 4 +- db/test/test006.tcl | 10 +- db/test/test007.tcl | 4 +- db/test/test008.tcl | 4 +- db/test/test009.tcl | 4 +- db/test/test010.tcl | 4 +- db/test/test011.tcl | 4 +- db/test/test012.tcl | 4 +- db/test/test013.tcl | 5 +- db/test/test014.tcl | 4 +- db/test/test015.tcl | 4 +- db/test/test016.tcl | 4 +- db/test/test017.tcl | 4 +- db/test/test018.tcl | 4 +- db/test/test019.tcl | 13 +- db/test/test020.tcl | 4 +- db/test/test021.tcl | 4 +- db/test/test022.tcl | 4 +- db/test/test023.tcl | 12 +- db/test/test024.tcl | 4 +- db/test/test025.tcl | 4 +- db/test/test026.tcl | 4 +- db/test/test027.tcl | 4 +- db/test/test028.tcl | 4 +- db/test/test029.tcl | 16 +- db/test/test030.tcl | 4 +- db/test/test031.tcl | 4 +- db/test/test032.tcl | 4 +- db/test/test033.tcl | 4 +- db/test/test034.tcl | 4 +- db/test/test035.tcl | 4 +- db/test/test036.tcl | 4 +- db/test/test037.tcl | 4 +- db/test/test038.tcl | 4 +- db/test/test039.tcl | 4 +- db/test/test040.tcl | 4 +- db/test/test041.tcl | 4 +- db/test/test042.tcl | 6 +- db/test/test043.tcl | 4 +- db/test/test044.tcl | 4 +- db/test/test045.tcl | 4 +- db/test/test046.tcl | 4 +- db/test/test047.tcl | 4 +- db/test/test048.tcl | 7 +- db/test/test049.tcl | 4 +- db/test/test050.tcl | 4 +- db/test/test051.tcl | 6 +- db/test/test052.tcl | 33 +- db/test/test053.tcl | 22 +- db/test/test054.tcl | 17 +- db/test/test055.tcl | 4 +- db/test/test056.tcl | 6 +- db/test/test057.tcl | 75 +- db/test/test058.tcl | 4 +- db/test/test059.tcl | 4 +- db/test/test060.tcl | 4 +- db/test/test061.tcl | 4 +- db/test/test062.tcl | 4 +- db/test/test063.tcl | 4 +- db/test/test064.tcl | 4 +- db/test/test065.tcl | 15 +- db/test/test066.tcl | 4 +- db/test/test067.tcl | 9 +- db/test/test068.tcl | 9 +- db/test/test069.tcl | 4 +- db/test/test070.tcl | 6 +- db/test/test071.tcl | 4 +- db/test/test072.tcl | 11 +- db/test/test073.tcl | 4 +- db/test/test074.tcl | 8 +- db/test/test075.tcl | 4 +- db/test/test076.tcl | 4 +- db/test/test077.tcl | 4 +- db/test/test078.tcl | 163 +- db/test/test079.tcl | 4 +- db/test/test080.tcl | 4 +- db/test/test081.tcl | 4 +- db/test/test082.tcl | 4 +- db/test/test083.tcl | 4 +- db/test/test084.tcl | 4 +- db/test/test085.tcl | 11 +- db/test/test086.tcl | 4 +- db/test/test087.tcl | 6 +- db/test/test088.tcl | 10 +- db/test/test089.tcl | 4 +- db/test/test090.tcl | 4 +- db/test/test091.tcl | 4 +- db/test/test092.tcl | 26 +- db/test/test093.tcl | 4 +- db/test/test094.tcl | 6 +- db/test/test095.tcl | 25 +- db/test/test096.tcl | 74 +- db/test/test097.tcl | 4 +- db/test/test098.tcl | 4 +- db/test/test099.tcl | 4 +- db/test/test100.tcl | 4 +- db/test/test101.tcl | 4 +- db/test/test102.tcl | 18 +- db/test/test103.tcl | 4 +- db/test/test106.tcl | 4 +- db/test/test107.tcl | 163 + db/test/test109.tcl | 288 + db/test/testparams.tcl | 94 +- db/test/testutils.tcl | 306 +- db/test/txn001.tcl | 4 +- db/test/txn002.tcl | 4 +- db/test/txn003.tcl | 4 +- db/test/txn004.tcl | 4 +- db/test/txn005.tcl | 4 +- db/test/txn006.tcl | 4 +- db/test/txn007.tcl | 4 +- db/test/txn008.tcl | 4 +- db/test/txn009.tcl | 4 +- db/test/txn010.tcl | 4 +- db/test/txn011.tcl | 43 +- db/test/txnscript.tcl | 4 +- db/test/update.tcl | 4 +- db/test/upgrade.tcl | 12 +- db/test/wrap.tcl | 4 +- db/txn/txn.c | 230 +- db/txn/txn.src | 19 +- db/txn/txn_auto.c | 954 +- db/txn/txn_autop.c | 281 + db/txn/txn_method.c | 25 +- db/txn/txn_rec.c | 158 +- db/txn/txn_recover.c | 162 +- db/txn/txn_region.c | 91 +- db/txn/txn_stat.c | 315 +- db/txn/txn_util.c | 16 +- db/xa/xa.c | 236 +- db/xa/xa_db.c | 8 +- db/xa/xa_map.c | 64 +- 2535 files changed, 305154 insertions(+), 63539 deletions(-) create mode 100644 db/btree/btree_autop.c create mode 100755 db/build_vxworks/BerkeleyDB20small.wpj create mode 100755 db/build_vxworks/BerkeleyDB20small.wsp create mode 100755 db/build_vxworks/BerkeleyDB22small.wpj create mode 100755 db/build_vxworks/BerkeleyDB22small.wsp create mode 100644 db/build_vxworks/db_config_small.h create mode 100644 db/build_win64/Berkeley_DB.dsw create mode 100644 db/build_win64/app_dsp.src create mode 100644 db/build_win64/build_all.dsp create mode 100644 db/build_win64/db.h create mode 100644 db/build_win64/db_archive.dsp create mode 100644 db/build_win64/db_checkpoint.dsp create mode 100644 db/build_win64/db_config.h create mode 100644 db/build_win64/db_cxx.h create mode 100644 db/build_win64/db_deadlock.dsp create mode 100644 db/build_win64/db_dll.dsp create mode 100644 db/build_win64/db_dump.dsp create mode 100644 db/build_win64/db_int.h create mode 100644 db/build_win64/db_java.dsp create mode 100644 db/build_win64/db_lib.dsp create mode 100644 db/build_win64/db_load.dsp create mode 100644 db/build_win64/db_perf.dsp create mode 100644 db/build_win64/db_printlog.dsp create mode 100644 db/build_win64/db_recover.dsp create mode 100644 db/build_win64/db_small.dsp create mode 100644 db/build_win64/db_stat.dsp create mode 100644 db/build_win64/db_static.dsp create mode 100644 db/build_win64/db_tcl.dsp create mode 100644 db/build_win64/db_test.dsp create mode 100644 db/build_win64/db_test.src create mode 100644 db/build_win64/db_upgrade.dsp create mode 100644 db/build_win64/db_verify.dsp create mode 100644 db/build_win64/dynamic_dsp.src create mode 100644 db/build_win64/ex_access.dsp create mode 100644 db/build_win64/ex_btrec.dsp create mode 100644 db/build_win64/ex_env.dsp create mode 100644 db/build_win64/ex_lock.dsp create mode 100644 db/build_win64/ex_mpool.dsp create mode 100644 db/build_win64/ex_repquote.dsp create mode 100644 db/build_win64/ex_repquote.src create mode 100644 db/build_win64/ex_tpcb.dsp create mode 100644 db/build_win64/excxx_access.dsp create mode 100644 db/build_win64/excxx_btrec.dsp create mode 100644 db/build_win64/excxx_env.dsp create mode 100644 db/build_win64/excxx_lock.dsp create mode 100644 db/build_win64/excxx_mpool.dsp create mode 100644 db/build_win64/excxx_tpcb.dsp create mode 100644 db/build_win64/java_dsp.src create mode 100644 db/build_win64/libdbrc.src create mode 100644 db/build_win64/small_dsp.src create mode 100644 db/build_win64/srcfile_dsp.src create mode 100644 db/build_win64/static_dsp.src create mode 100644 db/build_win64/tcl_dsp.src create mode 100644 db/build_win64/win_db.h create mode 100644 db/cxx/cxx_seq.cpp create mode 100644 db/db/crdel_autop.c create mode 100644 db/db/db_autop.c create mode 100644 db/db/db_setid.c create mode 100644 db/db/db_setlsn.c create mode 100644 db/db/db_stati.c create mode 100644 db/dbinc_auto/rep_auto.h create mode 100644 db/dbinc_auto/sequence_ext.h create mode 100644 db/dbreg/dbreg_autop.c create mode 100644 db/dbreg/dbreg_stat.c create mode 100644 db/dist/aclocal/rpc.ac create mode 100644 db/dist/aclocal/sequence.ac create mode 100644 db/dist/s_je2db create mode 100644 db/dist/vx_2.0/BerkeleyDBsmall.wpj create mode 100644 db/dist/vx_2.2/BerkeleyDBsmall.wpj create mode 100644 db/docs/api_c/db_set_msgcall.html create mode 100644 db/docs/api_c/db_set_msgfile.html create mode 100644 db/docs/api_c/dbt_package.html create mode 100644 db/docs/api_c/env_set_msgcall.html create mode 100644 db/docs/api_c/env_set_msgfile.html create mode 100644 db/docs/api_c/env_stat.html create mode 100644 db/docs/api_c/frame.html create mode 100644 db/docs/api_c/memp_maxwrite.html create mode 100644 db/docs/api_c/memp_openfd.html create mode 100644 db/docs/api_c/object.html create mode 100644 db/docs/api_c/seq_class.html create mode 100644 db/docs/api_c/seq_close.html create mode 100644 db/docs/api_c/seq_get.html create mode 100644 db/docs/api_c/seq_init_value.html create mode 100644 db/docs/api_c/seq_list.html create mode 100644 db/docs/api_c/seq_open.html create mode 100644 db/docs/api_c/seq_remove.html create mode 100644 db/docs/api_c/seq_set_cachesize.html create mode 100644 db/docs/api_c/seq_set_flags.html create mode 100644 db/docs/api_c/seq_set_range.html create mode 100644 db/docs/api_c/seq_stat.html create mode 100644 db/docs/api_c/set_func_ftruncate.html create mode 100644 db/docs/api_c/set_func_pread.html create mode 100644 db/docs/api_c/set_func_pwrite.html create mode 100644 db/docs/api_cxx/db_set_msg_stream.html create mode 100644 db/docs/api_cxx/db_set_msgcall.html create mode 100644 db/docs/api_cxx/db_set_msgfile.html create mode 100644 db/docs/api_cxx/dbt_package.html create mode 100644 db/docs/api_cxx/env_set_msg_stream.html create mode 100644 db/docs/api_cxx/env_set_msgcall.html create mode 100644 db/docs/api_cxx/env_set_msgfile.html create mode 100644 db/docs/api_cxx/env_stat.html create mode 100644 db/docs/api_cxx/exc_package.html create mode 100644 db/docs/api_cxx/frame.html create mode 100644 db/docs/api_cxx/memp_maxwrite.html create mode 100644 db/docs/api_cxx/memp_openfd.html create mode 100644 db/docs/api_cxx/object.html create mode 100644 db/docs/api_cxx/seq_class.html create mode 100644 db/docs/api_cxx/seq_close.html create mode 100644 db/docs/api_cxx/seq_get.html create mode 100644 db/docs/api_cxx/seq_init_value.html create mode 100644 db/docs/api_cxx/seq_list.html create mode 100644 db/docs/api_cxx/seq_open.html create mode 100644 db/docs/api_cxx/seq_remove.html create mode 100644 db/docs/api_cxx/seq_set_cachesize.html create mode 100644 db/docs/api_cxx/seq_set_flags.html create mode 100644 db/docs/api_cxx/seq_set_range.html create mode 100644 db/docs/api_cxx/seq_stat.html create mode 100644 db/docs/collections/tutorial/BasicProgram.html create mode 100644 db/docs/collections/tutorial/BerkeleyDB-Java-Collections.pdf create mode 100644 db/docs/collections/tutorial/Entity.html create mode 100644 db/docs/collections/tutorial/SerializableEntity.html create mode 100644 db/docs/collections/tutorial/SerializedObjectStorage.html create mode 100644 db/docs/collections/tutorial/Summary.html create mode 100644 db/docs/collections/tutorial/Tuple.html create mode 100644 db/docs/collections/tutorial/UsingCollectionsAPI.html create mode 100644 db/docs/collections/tutorial/UsingSecondaries.html create mode 100644 db/docs/collections/tutorial/UsingStoredCollections.html create mode 100644 db/docs/collections/tutorial/addingdatabaseitems.html create mode 100644 db/docs/collections/tutorial/collectionOverview.html create mode 100644 db/docs/collections/tutorial/collectionswithentities.html create mode 100644 db/docs/collections/tutorial/createbindingscollections.html create mode 100644 db/docs/collections/tutorial/creatingentitybindings.html create mode 100644 db/docs/collections/tutorial/developing.html create mode 100644 db/docs/collections/tutorial/entitieswithcollections.html create mode 100644 db/docs/collections/tutorial/gettingStarted.css create mode 100644 db/docs/collections/tutorial/handlingexceptions.html create mode 100644 db/docs/collections/tutorial/implementingmain.html create mode 100644 db/docs/collections/tutorial/index.html create mode 100644 db/docs/collections/tutorial/indexedcollections.html create mode 100644 db/docs/collections/tutorial/intro.html create mode 100644 db/docs/collections/tutorial/openclasscatalog.html create mode 100644 db/docs/collections/tutorial/opendatabases.html create mode 100644 db/docs/collections/tutorial/opendbenvironment.html create mode 100644 db/docs/collections/tutorial/openingforeignkeys.html create mode 100644 db/docs/collections/tutorial/preface.html create mode 100644 db/docs/collections/tutorial/removingredundantvalueclasses.html create mode 100644 db/docs/collections/tutorial/retrievingbyindexkey.html create mode 100644 db/docs/collections/tutorial/retrievingdatabaseitems.html create mode 100644 db/docs/collections/tutorial/sortedcollections.html create mode 100644 db/docs/collections/tutorial/transientfieldsinbinding.html create mode 100644 db/docs/collections/tutorial/tuple-serialentitybindings.html create mode 100644 db/docs/collections/tutorial/tuplekeybindings.html create mode 100644 db/docs/collections/tutorial/tupleswithkeycreators.html create mode 100644 db/docs/collections/tutorial/tutorialintroduction.html create mode 100644 db/docs/collections/tutorial/usingtransactions.html create mode 100644 db/docs/gsg/C/BerkeleyDB-Core-C-GSG.pdf create mode 100644 db/docs/gsg/C/CoreCursorUsage.html create mode 100644 db/docs/gsg/C/CoreDBAdmin.html create mode 100644 db/docs/gsg/C/CoreDbUsage.html create mode 100644 db/docs/gsg/C/CoreEnvUsage.html create mode 100644 db/docs/gsg/C/Cursors.html create mode 100644 db/docs/gsg/C/DB.html create mode 100644 db/docs/gsg/C/DBEntry.html create mode 100644 db/docs/gsg/C/DBOpenFlags.html create mode 100644 db/docs/gsg/C/DbUsage.html create mode 100644 db/docs/gsg/C/DeleteEntryWCursor.html create mode 100644 db/docs/gsg/C/Positioning.html create mode 100644 db/docs/gsg/C/PutEntryWCursor.html create mode 100644 db/docs/gsg/C/ReplacingEntryWCursor.html create mode 100644 db/docs/gsg/C/accessmethods.html create mode 100644 db/docs/gsg/C/btree.html create mode 100644 db/docs/gsg/C/cachesize.html create mode 100644 db/docs/gsg/C/concepts.html create mode 100644 db/docs/gsg/C/coredbclose.html create mode 100644 db/docs/gsg/C/coreindexusage.html create mode 100644 db/docs/gsg/C/cstructs.html create mode 100644 db/docs/gsg/C/databaseLimits.html create mode 100644 db/docs/gsg/C/dbErrorReporting.html create mode 100644 db/docs/gsg/C/dbconfig.html create mode 100644 db/docs/gsg/C/environments.html create mode 100644 db/docs/gsg/C/gettingStarted.css create mode 100644 db/docs/gsg/C/gettingit.html create mode 100644 db/docs/gsg/C/index.html create mode 100644 db/docs/gsg/C/indexes.html create mode 100644 db/docs/gsg/C/introduction.html create mode 100644 db/docs/gsg/C/joins.html create mode 100644 db/docs/gsg/C/keyCreator.html create mode 100644 db/docs/gsg/C/preface.html create mode 100644 db/docs/gsg/C/readSecondary.html create mode 100644 db/docs/gsg/C/secondaryCursor.html create mode 100644 db/docs/gsg/C/secondaryDelete.html create mode 100644 db/docs/gsg/C/usingDbt.html create mode 100644 db/docs/gsg/CXX/BerkeleyDB-Core-Cxx-GSG.pdf create mode 100644 db/docs/gsg/CXX/CoreCursorUsage.html create mode 100644 db/docs/gsg/CXX/CoreDBAdmin.html create mode 100644 db/docs/gsg/CXX/CoreDbCXXUsage.html create mode 100644 db/docs/gsg/CXX/CoreEnvUsage.html create mode 100644 db/docs/gsg/CXX/Cursors.html create mode 100644 db/docs/gsg/CXX/DB.html create mode 100644 db/docs/gsg/CXX/DBEntry.html create mode 100644 db/docs/gsg/CXX/DBOpenFlags.html create mode 100644 db/docs/gsg/CXX/DbCXXUsage.html create mode 100644 db/docs/gsg/CXX/DeleteEntryWCursor.html create mode 100644 db/docs/gsg/CXX/Positioning.html create mode 100644 db/docs/gsg/CXX/PutEntryWCursor.html create mode 100644 db/docs/gsg/CXX/ReplacingEntryWCursor.html create mode 100644 db/docs/gsg/CXX/accessmethods.html create mode 100644 db/docs/gsg/CXX/btree.html create mode 100644 db/docs/gsg/CXX/cachesize.html create mode 100644 db/docs/gsg/CXX/concepts.html create mode 100644 db/docs/gsg/CXX/coreExceptions.html create mode 100644 db/docs/gsg/CXX/coredbclose.html create mode 100644 db/docs/gsg/CXX/coreindexusage.html create mode 100644 db/docs/gsg/CXX/databaseLimits.html create mode 100644 db/docs/gsg/CXX/dbErrorReporting.html create mode 100644 db/docs/gsg/CXX/dbconfig.html create mode 100644 db/docs/gsg/CXX/environments.html create mode 100644 db/docs/gsg/CXX/gettingStarted.css create mode 100644 db/docs/gsg/CXX/gettingit.html create mode 100644 db/docs/gsg/CXX/index.html create mode 100644 db/docs/gsg/CXX/indexes.html create mode 100644 db/docs/gsg/CXX/introduction.html create mode 100644 db/docs/gsg/CXX/joins.html create mode 100644 db/docs/gsg/CXX/keyCreator.html create mode 100644 db/docs/gsg/CXX/preface.html create mode 100644 db/docs/gsg/CXX/readSecondary.html create mode 100644 db/docs/gsg/CXX/secondaryCursor.html create mode 100644 db/docs/gsg/CXX/secondaryDelete.html create mode 100644 db/docs/gsg/CXX/usingDbt.html create mode 100644 db/docs/gsg/JAVA/BerkeleyDB-Core-JAVA-GSG.pdf create mode 100644 db/docs/gsg/JAVA/CoreEnvUsage.html create mode 100644 db/docs/gsg/JAVA/CoreJavaUsage.html create mode 100644 db/docs/gsg/JAVA/Cursors.html create mode 100644 db/docs/gsg/JAVA/DB.html create mode 100644 db/docs/gsg/JAVA/DBAdmin.html create mode 100644 db/docs/gsg/JAVA/DBEntry.html create mode 100644 db/docs/gsg/JAVA/DeleteEntryWCursor.html create mode 100644 db/docs/gsg/JAVA/Positioning.html create mode 100644 db/docs/gsg/JAVA/PutEntryWCursor.html create mode 100644 db/docs/gsg/JAVA/ReplacingEntryWCursor.html create mode 100644 db/docs/gsg/JAVA/accessmethods.html create mode 100644 db/docs/gsg/JAVA/bindAPI.html create mode 100644 db/docs/gsg/JAVA/btree.html create mode 100644 db/docs/gsg/JAVA/cachesize.html create mode 100644 db/docs/gsg/JAVA/concepts.html create mode 100644 db/docs/gsg/JAVA/coreExceptions.html create mode 100644 db/docs/gsg/JAVA/coredbclose.html create mode 100644 db/docs/gsg/JAVA/cursorJavaUsage.html create mode 100644 db/docs/gsg/JAVA/databaseLimits.html create mode 100644 db/docs/gsg/JAVA/dbErrorReporting.html create mode 100644 db/docs/gsg/JAVA/db_config.html create mode 100644 db/docs/gsg/JAVA/dbconfig.html create mode 100644 db/docs/gsg/JAVA/dbtJavaUsage.html create mode 100644 db/docs/gsg/JAVA/environments.html create mode 100644 db/docs/gsg/JAVA/gettingStarted.css create mode 100644 db/docs/gsg/JAVA/gettingit.html create mode 100644 db/docs/gsg/JAVA/index.html create mode 100644 db/docs/gsg/JAVA/indexes.html create mode 100644 db/docs/gsg/JAVA/introduction.html create mode 100644 db/docs/gsg/JAVA/javaindexusage.html create mode 100644 db/docs/gsg/JAVA/joins.html create mode 100644 db/docs/gsg/JAVA/keyCreator.html create mode 100644 db/docs/gsg/JAVA/preface.html create mode 100644 db/docs/gsg/JAVA/readSecondary.html create mode 100644 db/docs/gsg/JAVA/returns.html create mode 100644 db/docs/gsg/JAVA/secondaryCursor.html create mode 100644 db/docs/gsg/JAVA/secondaryDelete.html create mode 100644 db/docs/gsg/JAVA/secondaryProps.html create mode 100644 db/docs/gsg/JAVA/usingDbt.html create mode 100644 db/docs/java/com/sleepycat/bind/ByteArrayBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/EntityBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/EntryBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/RecordNumberBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/class-use/ByteArrayBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/class-use/EntityBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/class-use/EntryBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/class-use/RecordNumberBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/package-frame.html create mode 100644 db/docs/java/com/sleepycat/bind/package-summary.html create mode 100644 db/docs/java/com/sleepycat/bind/package-tree.html create mode 100644 db/docs/java/com/sleepycat/bind/package-use.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/ClassCatalog.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/SerialBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/SerialInput.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/SerialOutput.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/SerialSerialBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/SerialSerialKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/StoredClassCatalog.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/TupleSerialBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/ClassCatalog.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/SerialBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/SerialInput.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/SerialOutput.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/StoredClassCatalog.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/package-frame.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/package-summary.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/package-tree.html create mode 100644 db/docs/java/com/sleepycat/bind/serial/package-use.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/BooleanBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/ByteBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/CharacterBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/DoubleBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/FloatBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/IntegerBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/LongBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleEntry.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/ShortBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/StringBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleInput.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleInputBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleMarshalledBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleOutput.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleTupleBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleTupleKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/BooleanBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/ByteBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/CharacterBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/DoubleBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/FloatBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/IntegerBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/LongBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleEntry.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleKeyEntity.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/ShortBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/StringBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInput.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInputBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleMarshalledBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleOutput.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledBinding.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/package-frame.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/package-summary.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/package-tree.html create mode 100644 db/docs/java/com/sleepycat/bind/tuple/package-use.html create mode 100644 db/docs/java/com/sleepycat/collections/CurrentTransaction.html create mode 100644 db/docs/java/com/sleepycat/collections/MapEntryParameter.html create mode 100644 db/docs/java/com/sleepycat/collections/PrimaryKeyAssigner.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredCollection.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredCollections.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredContainer.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredEntrySet.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredIterator.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredKeySet.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredList.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredMap.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredSortedEntrySet.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredSortedKeySet.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredSortedMap.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredSortedValueSet.html create mode 100644 db/docs/java/com/sleepycat/collections/StoredValueSet.html create mode 100644 db/docs/java/com/sleepycat/collections/TransactionRunner.html create mode 100644 db/docs/java/com/sleepycat/collections/TransactionWorker.html create mode 100644 db/docs/java/com/sleepycat/collections/TupleSerialFactory.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/CurrentTransaction.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/MapEntryParameter.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/PrimaryKeyAssigner.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredCollection.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredCollections.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredContainer.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredEntrySet.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredIterator.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredKeySet.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredList.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredMap.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredSortedEntrySet.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredSortedKeySet.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredSortedMap.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredSortedValueSet.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/StoredValueSet.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/TransactionRunner.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/TransactionWorker.html create mode 100644 db/docs/java/com/sleepycat/collections/class-use/TupleSerialFactory.html create mode 100644 db/docs/java/com/sleepycat/collections/package-frame.html create mode 100644 db/docs/java/com/sleepycat/collections/package-summary.html create mode 100644 db/docs/java/com/sleepycat/collections/package-tree.html create mode 100644 db/docs/java/com/sleepycat/collections/package-use.html create mode 100644 db/docs/java/com/sleepycat/db/BtreePrefixCalculator.html create mode 100644 db/docs/java/com/sleepycat/db/BtreeStats.html create mode 100644 db/docs/java/com/sleepycat/db/CacheFile.html create mode 100644 db/docs/java/com/sleepycat/db/CacheFilePriority.html create mode 100644 db/docs/java/com/sleepycat/db/CacheFileStats.html create mode 100644 db/docs/java/com/sleepycat/db/CacheStats.html create mode 100644 db/docs/java/com/sleepycat/db/CheckpointConfig.html create mode 100644 db/docs/java/com/sleepycat/db/Cursor.html create mode 100644 db/docs/java/com/sleepycat/db/CursorConfig.html create mode 100644 db/docs/java/com/sleepycat/db/Database.html create mode 100644 db/docs/java/com/sleepycat/db/DatabaseConfig.html create mode 100644 db/docs/java/com/sleepycat/db/DatabaseEntry.html create mode 100644 db/docs/java/com/sleepycat/db/DatabaseException.html create mode 100644 db/docs/java/com/sleepycat/db/DatabaseStats.html create mode 100644 db/docs/java/com/sleepycat/db/DatabaseType.html create mode 100644 db/docs/java/com/sleepycat/db/DeadlockException.html create mode 100644 db/docs/java/com/sleepycat/db/Environment.html create mode 100644 db/docs/java/com/sleepycat/db/EnvironmentConfig.html create mode 100644 db/docs/java/com/sleepycat/db/ErrorHandler.html create mode 100644 db/docs/java/com/sleepycat/db/FeedbackHandler.html create mode 100644 db/docs/java/com/sleepycat/db/HashStats.html create mode 100644 db/docs/java/com/sleepycat/db/Hasher.html create mode 100644 db/docs/java/com/sleepycat/db/JoinConfig.html create mode 100644 db/docs/java/com/sleepycat/db/JoinCursor.html create mode 100644 db/docs/java/com/sleepycat/db/KeyRange.html create mode 100644 db/docs/java/com/sleepycat/db/Lock.html create mode 100644 db/docs/java/com/sleepycat/db/LockDetectMode.html create mode 100644 db/docs/java/com/sleepycat/db/LockMode.html create mode 100644 db/docs/java/com/sleepycat/db/LockNotGrantedException.html create mode 100644 db/docs/java/com/sleepycat/db/LockOperation.html create mode 100644 db/docs/java/com/sleepycat/db/LockRequest.html create mode 100644 db/docs/java/com/sleepycat/db/LockRequestMode.html create mode 100644 db/docs/java/com/sleepycat/db/LockStats.html create mode 100644 db/docs/java/com/sleepycat/db/LogCursor.html create mode 100644 db/docs/java/com/sleepycat/db/LogRecordHandler.html create mode 100644 db/docs/java/com/sleepycat/db/LogSequenceNumber.html create mode 100644 db/docs/java/com/sleepycat/db/LogStats.html create mode 100644 db/docs/java/com/sleepycat/db/MemoryException.html create mode 100644 db/docs/java/com/sleepycat/db/MessageHandler.html create mode 100644 db/docs/java/com/sleepycat/db/MultipleDataEntry.html create mode 100644 db/docs/java/com/sleepycat/db/MultipleEntry.html create mode 100644 db/docs/java/com/sleepycat/db/MultipleKeyDataEntry.html create mode 100644 db/docs/java/com/sleepycat/db/MultipleRecnoDataEntry.html create mode 100644 db/docs/java/com/sleepycat/db/OperationStatus.html create mode 100644 db/docs/java/com/sleepycat/db/PanicHandler.html create mode 100644 db/docs/java/com/sleepycat/db/PreparedTransaction.html create mode 100644 db/docs/java/com/sleepycat/db/QueueStats.html create mode 100644 db/docs/java/com/sleepycat/db/RecordNumberAppender.html create mode 100644 db/docs/java/com/sleepycat/db/RecoveryOperation.html create mode 100644 db/docs/java/com/sleepycat/db/ReplicationHandleDeadException.html create mode 100644 db/docs/java/com/sleepycat/db/ReplicationStats.html create mode 100644 db/docs/java/com/sleepycat/db/ReplicationStatus.html create mode 100644 db/docs/java/com/sleepycat/db/ReplicationTransport.html create mode 100644 db/docs/java/com/sleepycat/db/RunRecoveryException.html create mode 100644 db/docs/java/com/sleepycat/db/SecondaryConfig.html create mode 100644 db/docs/java/com/sleepycat/db/SecondaryCursor.html create mode 100644 db/docs/java/com/sleepycat/db/SecondaryDatabase.html create mode 100644 db/docs/java/com/sleepycat/db/SecondaryKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/db/Sequence.html create mode 100644 db/docs/java/com/sleepycat/db/SequenceConfig.html create mode 100644 db/docs/java/com/sleepycat/db/SequenceStats.html create mode 100644 db/docs/java/com/sleepycat/db/StatsConfig.html create mode 100644 db/docs/java/com/sleepycat/db/Transaction.html create mode 100644 db/docs/java/com/sleepycat/db/TransactionConfig.html create mode 100644 db/docs/java/com/sleepycat/db/TransactionStats.Active.html create mode 100644 db/docs/java/com/sleepycat/db/TransactionStats.html create mode 100644 db/docs/java/com/sleepycat/db/VerifyConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/BtreePrefixCalculator.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/BtreeStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/CacheFile.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/CacheFilePriority.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/CacheFileStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/CacheStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/CheckpointConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/Cursor.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/CursorConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/Database.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/DatabaseConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/DatabaseEntry.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/DatabaseException.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/DatabaseStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/DatabaseType.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/DeadlockException.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/Environment.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/EnvironmentConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/ErrorHandler.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/FeedbackHandler.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/HashStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/Hasher.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/JoinConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/JoinCursor.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/KeyRange.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/Lock.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LockDetectMode.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LockMode.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LockNotGrantedException.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LockOperation.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LockRequest.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LockRequestMode.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LockStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LogCursor.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LogRecordHandler.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LogSequenceNumber.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/LogStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/MemoryException.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/MessageHandler.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/MultipleDataEntry.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/MultipleEntry.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/MultipleKeyDataEntry.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/MultipleRecnoDataEntry.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/OperationStatus.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/PanicHandler.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/PreparedTransaction.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/QueueStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/RecordNumberAppender.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/RecoveryOperation.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/ReplicationHandleDeadException.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/ReplicationStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/ReplicationStatus.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/ReplicationTransport.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/RunRecoveryException.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/SecondaryConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/SecondaryCursor.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/SecondaryDatabase.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/SecondaryKeyCreator.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/Sequence.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/SequenceConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/SequenceStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/StatsConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/Transaction.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/TransactionConfig.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/TransactionStats.Active.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/TransactionStats.html create mode 100644 db/docs/java/com/sleepycat/db/class-use/VerifyConfig.html create mode 100644 db/docs/java/com/sleepycat/util/ExceptionUnwrapper.html create mode 100644 db/docs/java/com/sleepycat/util/ExceptionWrapper.html create mode 100644 db/docs/java/com/sleepycat/util/FastInputStream.html create mode 100644 db/docs/java/com/sleepycat/util/FastOutputStream.html create mode 100644 db/docs/java/com/sleepycat/util/IOExceptionWrapper.html create mode 100644 db/docs/java/com/sleepycat/util/RuntimeExceptionWrapper.html create mode 100644 db/docs/java/com/sleepycat/util/UtfOps.html create mode 100644 db/docs/java/com/sleepycat/util/class-use/ExceptionUnwrapper.html create mode 100644 db/docs/java/com/sleepycat/util/class-use/ExceptionWrapper.html create mode 100644 db/docs/java/com/sleepycat/util/class-use/FastInputStream.html create mode 100644 db/docs/java/com/sleepycat/util/class-use/FastOutputStream.html create mode 100644 db/docs/java/com/sleepycat/util/class-use/IOExceptionWrapper.html create mode 100644 db/docs/java/com/sleepycat/util/class-use/RuntimeExceptionWrapper.html create mode 100644 db/docs/java/com/sleepycat/util/class-use/UtfOps.html create mode 100644 db/docs/java/com/sleepycat/util/package-frame.html create mode 100644 db/docs/java/com/sleepycat/util/package-summary.html create mode 100644 db/docs/java/com/sleepycat/util/package-tree.html create mode 100644 db/docs/java/com/sleepycat/util/package-use.html create mode 100644 db/docs/java/resources/inherit.gif create mode 100644 db/docs/ref/am/second.javas create mode 100644 db/docs/ref/build_win/unicode.html create mode 100644 db/docs/ref/cam/app.html create mode 100644 db/docs/ref/ext/mod.html create mode 100644 db/docs/ref/ext/perl.html create mode 100644 db/docs/ref/ext/php.html create mode 100644 db/docs/ref/sequence/intro.html create mode 100644 db/docs/ref/upgrade.4.3/cput.html create mode 100644 db/docs/ref/upgrade.4.3/disk.html create mode 100644 db/docs/ref/upgrade.4.3/enomem.html create mode 100644 db/docs/ref/upgrade.4.3/err.html create mode 100644 db/docs/ref/upgrade.4.3/fileopen.html create mode 100644 db/docs/ref/upgrade.4.3/intro.html create mode 100644 db/docs/ref/upgrade.4.3/java.html create mode 100644 db/docs/ref/upgrade.4.3/log.html create mode 100644 db/docs/ref/upgrade.4.3/repl.html create mode 100644 db/docs/ref/upgrade.4.3/rtc.html create mode 100644 db/docs/ref/upgrade.4.3/stat.html create mode 100644 db/docs/ref/upgrade.4.3/toc.html create mode 100644 db/docs/ref/upgrade.4.3/verb.html create mode 100644 db/env/env_stat.c create mode 100644 db/examples_c/ex_sequence.c create mode 100644 db/examples_c/getting_started/example_database_load.c create mode 100644 db/examples_c/getting_started/example_database_read.c create mode 100644 db/examples_c/getting_started/gettingstarted_common.c create mode 100644 db/examples_c/getting_started/gettingstarted_common.h create mode 100644 db/examples_c/getting_started/inventory.txt create mode 100644 db/examples_c/getting_started/vendors.txt create mode 100644 db/examples_cxx/SequenceExample.cpp create mode 100644 db/examples_cxx/getting_started/MyDb.cpp create mode 100644 db/examples_cxx/getting_started/MyDb.hpp create mode 100644 db/examples_cxx/getting_started/excxx_example_database_load.cpp create mode 100644 db/examples_cxx/getting_started/excxx_example_database_read.cpp create mode 100644 db/examples_cxx/getting_started/gettingStartedCommon.hpp create mode 100644 db/examples_cxx/getting_started/inventory.txt create mode 100644 db/examples_cxx/getting_started/vendors.txt create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/access/AccessExample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/hello/HelloDatabaseWorld.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Sample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleDatabase.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleViews.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Weight.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Part.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Sample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleDatabase.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleViews.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Shipment.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Supplier.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Weight.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Part.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/PartKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Sample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleDatabase.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleViews.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Shipment.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/ShipmentKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Supplier.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SupplierKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Weight.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/Sample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleDatabase.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleViews.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/index/Weight.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledEntity.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Part.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/PartKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Sample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleDatabase.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleViews.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Shipment.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/ShipmentKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Supplier.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SupplierKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Weight.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Part.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/PartKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Sample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleDatabase.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleViews.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Shipment.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/ShipmentKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Supplier.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SupplierKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Weight.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Part.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Sample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleDatabase.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleViews.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Shipment.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Supplier.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierData.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierKey.java create mode 100644 db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Weight.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseLoad.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseRead.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Inventory.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/InventoryBinding.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ItemNameKeyCreator.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/MyDbs.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Vendor.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/inventory.txt create mode 100644 db/examples_java/src/com/sleepycat/examples/db/GettingStarted/vendors.txt create mode 100644 db/examples_java/src/com/sleepycat/examples/db/RPCExample.java create mode 100644 db/examples_java/src/com/sleepycat/examples/db/SequenceExample.java create mode 100644 db/fileops/fileops_autop.c create mode 100644 db/hash/hash_autop.c create mode 100644 db/java/src/com/sleepycat/bind/ByteArrayBinding.java create mode 100644 db/java/src/com/sleepycat/bind/EntityBinding.java create mode 100644 db/java/src/com/sleepycat/bind/EntryBinding.java create mode 100644 db/java/src/com/sleepycat/bind/RecordNumberBinding.java create mode 100644 db/java/src/com/sleepycat/bind/package.html create mode 100644 db/java/src/com/sleepycat/bind/serial/ClassCatalog.java create mode 100644 db/java/src/com/sleepycat/bind/serial/SerialBinding.java create mode 100644 db/java/src/com/sleepycat/bind/serial/SerialInput.java create mode 100644 db/java/src/com/sleepycat/bind/serial/SerialOutput.java create mode 100644 db/java/src/com/sleepycat/bind/serial/SerialSerialBinding.java create mode 100644 db/java/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java create mode 100644 db/java/src/com/sleepycat/bind/serial/StoredClassCatalog.java create mode 100644 db/java/src/com/sleepycat/bind/serial/TupleSerialBinding.java create mode 100644 db/java/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java create mode 100644 db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java create mode 100644 db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java create mode 100644 db/java/src/com/sleepycat/bind/serial/package.html create mode 100644 db/java/src/com/sleepycat/bind/tuple/BooleanBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/ByteBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/CharacterBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/DoubleBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/FloatBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/IntegerBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/LongBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/ShortBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/StringBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleInput.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleInputBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleOutput.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleTupleBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java create mode 100644 db/java/src/com/sleepycat/bind/tuple/package.html create mode 100644 db/java/src/com/sleepycat/collections/CurrentTransaction.java create mode 100644 db/java/src/com/sleepycat/collections/DataCursor.java create mode 100644 db/java/src/com/sleepycat/collections/DataView.java create mode 100644 db/java/src/com/sleepycat/collections/KeyRange.java create mode 100644 db/java/src/com/sleepycat/collections/KeyRangeException.java create mode 100644 db/java/src/com/sleepycat/collections/MapEntryParameter.java create mode 100644 db/java/src/com/sleepycat/collections/PrimaryKeyAssigner.java create mode 100644 db/java/src/com/sleepycat/collections/RangeCursor.java create mode 100644 db/java/src/com/sleepycat/collections/StoredCollection.java create mode 100644 db/java/src/com/sleepycat/collections/StoredCollections.java create mode 100644 db/java/src/com/sleepycat/collections/StoredContainer.java create mode 100644 db/java/src/com/sleepycat/collections/StoredEntrySet.java create mode 100644 db/java/src/com/sleepycat/collections/StoredIterator.java create mode 100644 db/java/src/com/sleepycat/collections/StoredKeySet.java create mode 100644 db/java/src/com/sleepycat/collections/StoredList.java create mode 100644 db/java/src/com/sleepycat/collections/StoredMap.java create mode 100644 db/java/src/com/sleepycat/collections/StoredMapEntry.java create mode 100644 db/java/src/com/sleepycat/collections/StoredSortedEntrySet.java create mode 100644 db/java/src/com/sleepycat/collections/StoredSortedKeySet.java create mode 100644 db/java/src/com/sleepycat/collections/StoredSortedMap.java create mode 100644 db/java/src/com/sleepycat/collections/StoredSortedValueSet.java create mode 100644 db/java/src/com/sleepycat/collections/StoredValueSet.java create mode 100644 db/java/src/com/sleepycat/collections/TransactionRunner.java create mode 100644 db/java/src/com/sleepycat/collections/TransactionWorker.java create mode 100644 db/java/src/com/sleepycat/collections/TupleSerialFactory.java create mode 100644 db/java/src/com/sleepycat/collections/package.html create mode 100644 db/java/src/com/sleepycat/compat/DbCompat.java create mode 100644 db/java/src/com/sleepycat/db/BtreePrefixCalculator.java create mode 100644 db/java/src/com/sleepycat/db/BtreeStats.java create mode 100644 db/java/src/com/sleepycat/db/CacheFile.java create mode 100644 db/java/src/com/sleepycat/db/CacheFilePriority.java create mode 100644 db/java/src/com/sleepycat/db/CacheFileStats.java create mode 100644 db/java/src/com/sleepycat/db/CacheStats.java create mode 100644 db/java/src/com/sleepycat/db/CheckpointConfig.java create mode 100644 db/java/src/com/sleepycat/db/Cursor.java create mode 100644 db/java/src/com/sleepycat/db/CursorConfig.java create mode 100644 db/java/src/com/sleepycat/db/Database.java create mode 100644 db/java/src/com/sleepycat/db/DatabaseConfig.java create mode 100644 db/java/src/com/sleepycat/db/DatabaseEntry.java create mode 100644 db/java/src/com/sleepycat/db/DatabaseException.java create mode 100644 db/java/src/com/sleepycat/db/DatabaseStats.java create mode 100644 db/java/src/com/sleepycat/db/DatabaseType.java create mode 100644 db/java/src/com/sleepycat/db/DeadlockException.java create mode 100644 db/java/src/com/sleepycat/db/Environment.java create mode 100644 db/java/src/com/sleepycat/db/EnvironmentConfig.java create mode 100644 db/java/src/com/sleepycat/db/ErrorHandler.java create mode 100644 db/java/src/com/sleepycat/db/FeedbackHandler.java create mode 100644 db/java/src/com/sleepycat/db/HashStats.java create mode 100644 db/java/src/com/sleepycat/db/Hasher.java create mode 100644 db/java/src/com/sleepycat/db/JoinConfig.java create mode 100644 db/java/src/com/sleepycat/db/JoinCursor.java create mode 100644 db/java/src/com/sleepycat/db/KeyRange.java create mode 100644 db/java/src/com/sleepycat/db/Lock.java create mode 100644 db/java/src/com/sleepycat/db/LockDetectMode.java create mode 100644 db/java/src/com/sleepycat/db/LockMode.java create mode 100644 db/java/src/com/sleepycat/db/LockNotGrantedException.java create mode 100644 db/java/src/com/sleepycat/db/LockOperation.java create mode 100644 db/java/src/com/sleepycat/db/LockRequest.java create mode 100644 db/java/src/com/sleepycat/db/LockRequestMode.java create mode 100644 db/java/src/com/sleepycat/db/LockStats.java create mode 100644 db/java/src/com/sleepycat/db/LogCursor.java create mode 100644 db/java/src/com/sleepycat/db/LogRecordHandler.java create mode 100644 db/java/src/com/sleepycat/db/LogSequenceNumber.java create mode 100644 db/java/src/com/sleepycat/db/LogStats.java create mode 100644 db/java/src/com/sleepycat/db/MemoryException.java create mode 100644 db/java/src/com/sleepycat/db/MessageHandler.java create mode 100644 db/java/src/com/sleepycat/db/MultipleDataEntry.java create mode 100644 db/java/src/com/sleepycat/db/MultipleEntry.java create mode 100644 db/java/src/com/sleepycat/db/MultipleKeyDataEntry.java create mode 100644 db/java/src/com/sleepycat/db/MultipleRecnoDataEntry.java create mode 100644 db/java/src/com/sleepycat/db/OperationStatus.java create mode 100644 db/java/src/com/sleepycat/db/PanicHandler.java create mode 100644 db/java/src/com/sleepycat/db/PreparedTransaction.java create mode 100644 db/java/src/com/sleepycat/db/QueueStats.java create mode 100644 db/java/src/com/sleepycat/db/RecordNumberAppender.java create mode 100644 db/java/src/com/sleepycat/db/RecoveryOperation.java create mode 100644 db/java/src/com/sleepycat/db/ReplicationHandleDeadException.java create mode 100644 db/java/src/com/sleepycat/db/ReplicationStats.java create mode 100644 db/java/src/com/sleepycat/db/ReplicationStatus.java create mode 100644 db/java/src/com/sleepycat/db/ReplicationTransport.java create mode 100644 db/java/src/com/sleepycat/db/RunRecoveryException.java create mode 100644 db/java/src/com/sleepycat/db/SecondaryConfig.java create mode 100644 db/java/src/com/sleepycat/db/SecondaryCursor.java create mode 100644 db/java/src/com/sleepycat/db/SecondaryDatabase.java create mode 100644 db/java/src/com/sleepycat/db/SecondaryKeyCreator.java create mode 100644 db/java/src/com/sleepycat/db/Sequence.java create mode 100644 db/java/src/com/sleepycat/db/SequenceConfig.java create mode 100644 db/java/src/com/sleepycat/db/SequenceStats.java create mode 100644 db/java/src/com/sleepycat/db/StatsConfig.java create mode 100644 db/java/src/com/sleepycat/db/Transaction.java create mode 100644 db/java/src/com/sleepycat/db/TransactionConfig.java create mode 100644 db/java/src/com/sleepycat/db/TransactionStats.java create mode 100644 db/java/src/com/sleepycat/db/VerifyConfig.java create mode 100644 db/java/src/com/sleepycat/db/internal/Db.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbClient.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbConstants.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbEnv.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbLock.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbLogc.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbMpoolFile.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbSequence.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbTxn.java create mode 100644 db/java/src/com/sleepycat/db/internal/DbUtil.java create mode 100644 db/java/src/com/sleepycat/db/internal/Dbc.java create mode 100644 db/java/src/com/sleepycat/db/internal/db_java.java create mode 100644 db/java/src/com/sleepycat/db/internal/db_javaJNI.java create mode 100644 db/java/src/com/sleepycat/util/ExceptionUnwrapper.java create mode 100644 db/java/src/com/sleepycat/util/ExceptionWrapper.java create mode 100644 db/java/src/com/sleepycat/util/FastInputStream.java create mode 100644 db/java/src/com/sleepycat/util/FastOutputStream.java create mode 100644 db/java/src/com/sleepycat/util/IOExceptionWrapper.java create mode 100644 db/java/src/com/sleepycat/util/RuntimeExceptionWrapper.java create mode 100644 db/java/src/com/sleepycat/util/UtfOps.java create mode 100644 db/java/src/com/sleepycat/util/package.html create mode 100644 db/libdb_java/java-post.pl create mode 100644 db/lock/lock_id.c create mode 100644 db/lock/lock_list.c create mode 100644 db/lock/lock_timer.c create mode 100644 db/log/log_stat.c create mode 100644 db/mod_db4/ABOUT create mode 100644 db/mod_db4/INSTALL create mode 100644 db/mod_db4/Makefile.in create mode 100644 db/mod_db4/config.h.in create mode 100755 db/mod_db4/configure create mode 100644 db/mod_db4/configure.in create mode 100644 db/mod_db4/mm_hash.c create mode 100644 db/mod_db4/mm_hash.h create mode 100644 db/mod_db4/mod_db4.c create mode 100644 db/mod_db4/mod_db4_export.h create mode 100644 db/mod_db4/sem_utils.c create mode 100644 db/mod_db4/sem_utils.h create mode 100644 db/mod_db4/skiplist.c create mode 100644 db/mod_db4/skiplist.h create mode 100644 db/mod_db4/utils.c create mode 100644 db/mod_db4/utils.h create mode 100644 db/mp/mp_fmethod.c create mode 100644 db/os/os_truncate.c create mode 100644 db/os_win32/os_truncate.c create mode 100644 db/os_win32/os_unlink.c create mode 100644 db/perl/BerkeleyDB/t/cds.t create mode 100644 db/perl/BerkeleyDB/t/pod.t create mode 100644 db/php_db4/ABOUT create mode 100644 db/php_db4/INSTALL create mode 100644 db/php_db4/config.m4 create mode 100644 db/php_db4/db4.c create mode 100644 db/php_db4/php_db4.h create mode 100644 db/qam/qam_autop.c create mode 100644 db/rep/rep.src create mode 100644 db/rep/rep_auto.c create mode 100644 db/rep/rep_autop.c create mode 100644 db/rep/rep_backup.c create mode 100644 db/rep/rep_stat.c create mode 100644 db/rpc_server/java/AssociateCallbacks.java create mode 100644 db/rpc_server/java/Dispatcher.java create mode 100644 db/rpc_server/java/JoinCursorAdapter.java create mode 100644 db/rpc_server/java/Server.java create mode 100644 db/rpc_server/java/Util.java create mode 100644 db/rpc_server/java/gen/ServerStubs.java create mode 100644 db/sequence/seq_stat.c create mode 100644 db/sequence/sequence.c create mode 100644 db/tcl/docs/sequence.html create mode 100644 db/tcl/tcl_seq.c create mode 100644 db/test/recd020.tcl create mode 100644 db/test/recd021.tcl create mode 100644 db/test/rep014.tcl create mode 100644 db/test/rep015.tcl create mode 100644 db/test/rep016.tcl create mode 100644 db/test/rep017.tcl create mode 100644 db/test/rep017script.tcl create mode 100644 db/test/rep018.tcl create mode 100644 db/test/rep018script.tcl create mode 100644 db/test/rep019.tcl create mode 100644 db/test/rep020.tcl create mode 100644 db/test/rep021.tcl create mode 100644 db/test/rep022.tcl create mode 100644 db/test/rep023.tcl create mode 100644 db/test/rep024.tcl create mode 100644 db/test/rep026.tcl create mode 100644 db/test/rep027.tcl create mode 100644 db/test/rep028.tcl create mode 100644 db/test/rep029.tcl create mode 100644 db/test/rep030.tcl create mode 100644 db/test/rep031.tcl create mode 100644 db/test/rep032.tcl create mode 100644 db/test/rep033.tcl create mode 100644 db/test/rep034.tcl create mode 100644 db/test/rep035.tcl create mode 100644 db/test/rep035script.tcl create mode 100644 db/test/rep036.tcl create mode 100644 db/test/rep036script.tcl create mode 100644 db/test/rep037.tcl create mode 100644 db/test/rpc006.tcl create mode 100644 db/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java create mode 100644 db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/JoinTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TestEntity.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TestEnv.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TestStore.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java create mode 100644 db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original create mode 100644 db/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java create mode 100644 db/test/scr024/src/com/sleepycat/util/test/UtfTest.java create mode 100644 db/test/scr028/chk.rtc create mode 100644 db/test/scr028/t.c create mode 100644 db/test/scr029/chk.get create mode 100644 db/test/scr029/t.c create mode 100644 db/test/scr030/chk.build create mode 100644 db/test/sdb013.tcl create mode 100644 db/test/test107.tcl create mode 100644 db/test/test109.tcl create mode 100644 db/txn/txn_autop.c (limited to 'db') diff --git a/db/LICENSE b/db/LICENSE index fc6b67058..8cb10e79b 100644 --- a/db/LICENSE +++ b/db/LICENSE @@ -1,16 +1,16 @@ /*- - * $Id: LICENSE,v 11.10 2003/01/08 04:00:54 bostic Exp $ + * $Id: LICENSE,v 11.12 2004/03/30 20:49:44 bostic Exp $ */ The following is the license that applies to this copy of the Berkeley DB software. For a license to use the Berkeley DB software under conditions other than those described here, or to purchase support for this software, -please contact Sleepycat Software by email at db@sleepycat.com, or on the -Web at http://www.sleepycat.com. +please contact Sleepycat Software by email at info@sleepycat.com, or on +the Web at http://www.sleepycat.com. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= /* - * Copyright (c) 1990-2003 + * Copyright (c) 1990-2004 * Sleepycat Software. All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/db/README b/db/README index 1d6432569..d0d201851 100644 --- a/db/README +++ b/db/README @@ -1,5 +1,5 @@ -Sleepycat Software: Berkeley DB 4.2.52: (December 3, 2003) +Sleepycat Software: Berkeley DB 4.3.14: (October 14, 2004) -This is version 4.2.52 of Berkeley DB from Sleepycat Software. To view +This is version 4.3.14 of Berkeley DB from Sleepycat Software. To view the release and installation documentation, load the distribution file docs/index.html into your web browser. diff --git a/db/btree/bt_compare.c b/db/btree/bt_compare.c index a329d8044..81ffe098b 100644 --- a/db/btree/bt_compare.c +++ b/db/btree/bt_compare.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: bt_compare.c,v 11.20 2004/02/21 15:54:44 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_compare.c,v 11.18 2003/01/08 04:00:56 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -204,8 +202,12 @@ __bam_defpfx(dbp, a, b) return (cnt); /* - * We know that a->size must be <= b->size, or they wouldn't be - * in this order. + * They match up to the smaller of the two sizes. + * Collate the longer after the shorter. */ - return (a->size < b->size ? a->size + 1 : a->size); + if (a->size < b->size) + return (a->size + 1); + if (b->size < a->size) + return (b->size + 1); + return (b->size); } diff --git a/db/btree/bt_conv.c b/db/btree/bt_conv.c index fd80d8a4c..39a9d8253 100644 --- a/db/btree/bt_conv.c +++ b/db/btree/bt_conv.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_conv.c,v 11.15 2004/01/28 03:35:48 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_conv.c,v 11.14 2003/01/08 04:00:56 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/btree/bt_curadj.c b/db/btree/bt_curadj.c index 3da200c27..477f00b8f 100644 --- a/db/btree/bt_curadj.c +++ b/db/btree/bt_curadj.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_curadj.c,v 11.37 2004/03/13 14:11:33 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_curadj.c,v 11.34 2003/07/09 02:32:24 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -21,30 +19,6 @@ static const char revid[] = "$Id: bt_curadj.c,v 11.34 2003/07/09 02:32:24 margo static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t)); -#ifdef DEBUG -/* - * __bam_cprint -- - * Display the current internal cursor. - * - * PUBLIC: void __bam_cprint __P((DBC *)); - */ -void -__bam_cprint(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - - cp = (BTREE_CURSOR *)dbc->internal; - - fprintf(stderr, "\tinternal: ovflsize: %lu", (u_long)cp->ovflsize); - if (dbc->dbtype == DB_RECNO) - fprintf(stderr, " recno: %lu", (u_long)cp->recno); - if (F_ISSET(cp, C_DELETED)) - fprintf(stderr, " (deleted)"); - fprintf(stderr, "\n"); -} -#endif - /* * Cursor adjustments are logged if they are for subtransactions. This is * because it's possible for a subtransaction to adjust cursors which will @@ -219,8 +193,8 @@ __bam_ca_di(my_dbc, pgno, indx, adjust) MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); if (found != 0 && DBC_LOGGING(my_dbc)) { - if ((ret = __bam_curadj_log(dbp, my_dbc->txn, - &lsn, 0, DB_CA_DI, pgno, 0, 0, adjust, indx, 0)) != 0) + if ((ret = __bam_curadj_log(dbp, my_dbc->txn, &lsn, 0, + DB_CA_DI, pgno, 0, 0, (u_int32_t)adjust, indx, 0)) != 0) return (ret); } diff --git a/db/btree/bt_cursor.c b/db/btree/bt_cursor.c index 067da53be..82d6cc435 100644 --- a/db/btree/bt_cursor.c +++ b/db/btree/bt_cursor.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_cursor.c,v 11.190 2004/09/22 21:46:32 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_cursor.c,v 11.169 2003/11/19 18:41:06 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -53,11 +51,11 @@ static int __bam_isopd __P((DBC *, db_pgno_t *)); * don't -- we don't duplicate locks when we duplicate cursors if we are * running in a transaction environment as there's no point if locks are * never discarded. This means that the cursor may or may not hold a lock. - * In the case where we are decending the tree we always want to - * unlock the held interior page so we use ACQUIRE_COUPLE. + * In the case where we are descending the tree we always want to unlock + * the held interior page so we use ACQUIRE_COUPLE. */ #undef ACQUIRE -#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \ +#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) do { \ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \ if ((pagep) != NULL) { \ ret = __memp_fput(__mpf, pagep, 0); \ @@ -68,10 +66,10 @@ static int __bam_isopd __P((DBC *, db_pgno_t *)); ret = __db_lget(dbc, LCK_COUPLE, lpgno, mode, 0, &(lock));\ if ((ret) == 0) \ ret = __memp_fget(__mpf, &(fpgno), 0, &(pagep)); \ -} +} while (0) #undef ACQUIRE_COUPLE -#define ACQUIRE_COUPLE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \ +#define ACQUIRE_COUPLE(dbc, mode, lpgno, lock, fpgno, pagep, ret) do { \ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \ if ((pagep) != NULL) { \ ret = __memp_fput(__mpf, pagep, 0); \ @@ -83,37 +81,37 @@ static int __bam_isopd __P((DBC *, db_pgno_t *)); LCK_COUPLE_ALWAYS, lpgno, mode, 0, &(lock)); \ if ((ret) == 0) \ ret = __memp_fget(__mpf, &(fpgno), 0, &(pagep)); \ -} +} while (0) /* Acquire a new page/lock for a cursor. */ #undef ACQUIRE_CUR -#define ACQUIRE_CUR(dbc, mode, p, ret) { \ +#define ACQUIRE_CUR(dbc, mode, p, ret) do { \ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ - if (p != __cp->pgno) \ - __cp->pgno = PGNO_INVALID; \ + if (p != __cp->pgno) \ + __cp->pgno = PGNO_INVALID; \ ACQUIRE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \ if ((ret) == 0) { \ __cp->pgno = p; \ __cp->lock_mode = (mode); \ } \ -} +} while (0) /* * Acquire a new page/lock for a cursor and release the previous. - * This is typically used when decending a tree and we do not + * This is typically used when descending a tree and we do not * want to hold the interior nodes locked. */ #undef ACQUIRE_CUR_COUPLE -#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) { \ +#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) do { \ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ - if (p != __cp->pgno) \ - __cp->pgno = PGNO_INVALID; \ + if (p != __cp->pgno) \ + __cp->pgno = PGNO_INVALID; \ ACQUIRE_COUPLE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \ if ((ret) == 0) { \ __cp->pgno = p; \ __cp->lock_mode = (mode); \ } \ -} +} while (0) /* * Acquire a write lock if we don't already have one. @@ -122,7 +120,7 @@ static int __bam_isopd __P((DBC *, db_pgno_t *)); * See ACQUIRE macro on why we handle cursors that don't have locks. */ #undef ACQUIRE_WRITE_LOCK -#define ACQUIRE_WRITE_LOCK(dbc, ret) { \ +#define ACQUIRE_WRITE_LOCK(dbc, ret) do { \ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ ret = 0; \ if (STD_LOCKING(dbc) && \ @@ -131,25 +129,27 @@ static int __bam_isopd __P((DBC *, db_pgno_t *)); LOCK_ISSET(__cp->lock) ? LCK_COUPLE : 0, \ __cp->pgno, DB_LOCK_WRITE, 0, &__cp->lock)) == 0) \ __cp->lock_mode = DB_LOCK_WRITE; \ -} +} while (0) /* Discard the current page/lock for a cursor. */ #undef DISCARD_CUR -#define DISCARD_CUR(dbc, ret) { \ +#define DISCARD_CUR(dbc, ret) do { \ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \ int __t_ret; \ if ((__cp->page) != NULL) { \ - ret = __memp_fput(__mpf, __cp->page, 0); \ + __t_ret = __memp_fput(__mpf, __cp->page, 0); \ __cp->page = NULL; \ } else \ - ret = 0; \ + __t_ret = 0; \ + if (__t_ret != 0 && (ret) == 0) \ + ret = __t_ret; \ __t_ret = __TLPUT((dbc), __cp->lock); \ if (__t_ret != 0 && (ret) == 0) \ ret = __t_ret; \ if ((ret) == 0 && !LOCK_ISSET(__cp->lock)) \ __cp->lock_mode = DB_LOCK_NG; \ -} +} while (0) /* If on-page item is a deleted record. */ #undef IS_DELETED @@ -308,7 +308,7 @@ __bam_c_close(dbc, root_pgno, rmroot) DBC *dbc_opd, *dbc_c; DB_MPOOLFILE *mpf; PAGE *h; - int cdb_lock, ret, t_ret; + int cdb_lock, ret; dbp = dbc->dbp; mpf = dbp->mpf; @@ -426,8 +426,8 @@ __bam_c_close(dbc, root_pgno, rmroot) case DB_QUEUE: case DB_UNKNOWN: default: - return (__db_unknown_type(dbp->dbenv, - "__bam_c_close", dbc->dbtype)); + return (__db_unknown_type( + dbp->dbenv, "__bam_c_close", dbc->dbtype)); } } goto done; @@ -448,32 +448,31 @@ lock: cp_c = (BTREE_CURSOR *)dbc_c->internal; goto err; cdb_lock = 1; } - if ((ret = __memp_fget(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0) - goto err; goto delete; } /* * The variable dbc_c has been initialized to reference the cursor in - * which we're going to do the delete. Initialize the cursor's page - * and lock structures as necessary. + * which we're going to do the delete. Initialize the cursor's lock + * structures as necessary. * * First, we may not need to acquire any locks. If we're in case #3, * that is, the primary database isn't a btree database, our caller * is responsible for acquiring any necessary locks before calling us. */ - if (F_ISSET(dbc, DBC_OPD)) { - if ((ret = __memp_fget(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0) - goto err; + if (F_ISSET(dbc, DBC_OPD)) goto delete; - } /* - * Otherwise, acquire a write lock. If the cursor that did the initial - * logical deletion (and which had a write lock) is not the same as the - * cursor doing the physical deletion (which may have only ever had a - * read lock on the item), we need to upgrade. The confusion comes as - * follows: + * Otherwise, acquire a write lock on the primary database's page. + * + * Lock the primary database page, regardless of whether we're deleting + * an item on a primary database page or an off-page duplicates page. + * + * If the cursor that did the initial logical deletion (and had a write + * lock) is not the same cursor doing the physical deletion (which may + * have only ever had a read lock on the item), we need to upgrade to a + * write lock. The confusion comes as follows: * * C1 created, acquires item read lock * C2 dup C1, create C2, also has item read lock. @@ -483,29 +482,37 @@ lock: cp_c = (BTREE_CURSOR *)dbc_c->internal; * * If we're in a TXN, we know that C2 will be able to acquire the write * lock, because no locker other than the one shared by C1 and C2 can - * acquire a write lock -- the original write lock C1 acquire was never + * acquire a write lock -- the original write lock C1 acquired was never * discarded. * * If we're not in a TXN, it's nastier. Other cursors might acquire * read locks on the item after C1 closed, discarding its write lock, * and such locks would prevent C2 from acquiring a read lock. That's - * OK, though, we'll simply wait until we can acquire a read lock, or + * OK, though, we'll simply wait until we can acquire a write lock, or * we'll deadlock. (Which better not happen, since we're not in a TXN.) * - * Lock the primary database page, regardless of whether we're deleting - * an item on a primary database page or an off-page duplicates page. + * There are similar scenarios with dirty reads, where the cursor may + * have downgraded its write lock to a was-write lock. */ - ACQUIRE(dbc, DB_LOCK_WRITE, - cp->pgno, cp_c->lock, cp_c->pgno, cp_c->page, ret); - if (ret != 0) - goto err; + if (STD_LOCKING(dbc)) + if ((ret = __db_lget(dbc, + LCK_COUPLE, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0) + goto err; delete: /* - * If the delete occurred in a btree, delete the on-page physical item - * referenced by the cursor. + * If the delete occurred in a Btree, we're going to look at the page + * to see if the item has to be physically deleted. Otherwise, we do + * not need the actual page (and it may not even exist, it might have + * been truncated from the file after an allocation aborted). + * + * Delete the on-page physical item referenced by the cursor. */ - if (dbc_c->dbtype == DB_BTREE && (ret = __bam_c_physdel(dbc_c)) != 0) - goto err; + if (dbc_c->dbtype == DB_BTREE) { + if ((ret = __memp_fget(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0) + goto err; + if ((ret = __bam_c_physdel(dbc_c)) != 0) + goto err; + } /* * If we're not working in an off-page duplicate tree, then we're @@ -526,6 +533,9 @@ delete: /* if ((ret = __memp_fget(mpf, &root_pgno, 0, &h)) != 0) goto err; if (NUM_ENT(h) == 0) { + DISCARD_CUR(dbc_c, ret); + if (ret != 0) + goto err; if ((ret = __db_free(dbc, h)) != 0) goto err; } else { @@ -558,14 +568,9 @@ done: /* * Discard the page references and locks, and confirm that the stack * has been emptied. */ - if (dbc_opd != NULL) { - DISCARD_CUR(dbc_opd, t_ret); - if (t_ret != 0 && ret == 0) - ret = t_ret; - } - DISCARD_CUR(dbc, t_ret); - if (t_ret != 0 && ret == 0) - ret = t_ret; + if (dbc_opd != NULL) + DISCARD_CUR(dbc_opd, ret); + DISCARD_CUR(dbc, ret); /* Downgrade any CDB lock we acquired. */ if (cdb_lock) @@ -785,11 +790,11 @@ __bam_c_dup(orig_dbc, new_dbc) * holding inside a transaction because all the locks are retained * until the transaction commits or aborts. */ - if (LOCK_ISSET(orig->lock) && orig_dbc->txn == NULL) { + if (orig_dbc->txn == NULL && LOCK_ISSET(orig->lock)) if ((ret = __db_lget(new_dbc, 0, new->pgno, new->lock_mode, 0, &new->lock)) != 0) return (ret); - } + new->ovflsize = orig->ovflsize; new->recno = orig->recno; new->flags = orig->flags; @@ -1064,9 +1069,9 @@ __bam_bulk(dbc, data, flags) cp = (BTREE_CURSOR *)dbc->internal; /* - * dp tracks the beginging of the page in the buffer. + * dp tracks the beginning of the page in the buffer. * np is the next place to copy things into the buffer. - * dbuf always stays at the beging of the buffer. + * dbuf always stays at the beginning of the buffer. */ dbuf = data->data; np = dp = dbuf; @@ -1172,10 +1177,11 @@ next_pg: get_key_space: /* Nothing added, then error. */ if (offp == endp) { - data->size = - ALIGN(size + + data->size = (u_int32_t) + DB_ALIGN(size + pagesize, 1024); - return (ENOMEM); + return + (DB_BUFFER_SMALL); } /* * We need to back up to the @@ -1246,7 +1252,7 @@ get_key_space: if ((ret = __bam_bulk_duplicates(dbc, bo->pgno, dbuf, is_key ? offp + P_INDX : NULL, &offp, &np, &space, no_dup)) != 0) { - if (ret == ENOMEM) { + if (ret == DB_BUFFER_SMALL) { size = space; space = 0; /* If nothing was added, then error. */ @@ -1307,9 +1313,10 @@ get_space: if (offp >= (is_key ? &endp[-1] : endp) || F_ISSET(dbc, DBC_TRANSIENT)) { - data->size = ALIGN(size + + data->size = (u_int32_t) + DB_ALIGN(size + data->ulen - space, 1024); - return (ENOMEM); + return (DB_BUFFER_SMALL); } break; } @@ -1339,8 +1346,8 @@ get_space: indx += adj; } /* - * Stop when we either run off the page or we - * move to the next key and we are not returning mulitple keys. + * Stop when we either run off the page or we move to the next key and + * we are not returning multiple keys. */ } while ((indx += adj) < NUM_ENT(pg) && (next_key || pg_keyoff == inp[indx])); @@ -1365,14 +1372,14 @@ get_space: if (ret == 0 && indx < pg->entries && F_ISSET(dbc, DBC_TRANSIENT) && pg_keyoff == inp[indx]) { data->size = (data->ulen - space) + size; - return (ENOMEM); + return (DB_BUFFER_SMALL); } /* * Must leave the index pointing at the last record fetched. * If we are not fetching keys, we may have stepped to the * next key. */ - if (ret == ENOMEM || next_key || pg_keyoff == inp[indx]) + if (ret == DB_BUFFER_SMALL || next_key || pg_keyoff == inp[indx]) cp->indx = indx; else cp->indx = indx - P_INDX; @@ -1462,7 +1469,7 @@ __bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup) /* * np is the next place to put data. - * dp is the begining of the current page in the buffer. + * dp is the beginning of the current page in the buffer. */ np = dp = *dpp; first = 1; @@ -1489,7 +1496,7 @@ __bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup) /* Did space underflow? */ if (space > *spacep) { - ret = ENOMEM; + ret = DB_BUFFER_SMALL; if (first == 1) { /* Get the absolute value. */ space = -(int32_t)space; @@ -1503,7 +1510,7 @@ __bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup) bo = (BOVERFLOW *)bk; size = bo->tlen; if (size > space) { - ret = ENOMEM; + ret = DB_BUFFER_SMALL; space = *spacep + size; break; } @@ -1522,7 +1529,7 @@ __bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup) dp = np; size = pagesize - HOFFSET(pg); if (space < size) { - ret = ENOMEM; + ret = DB_BUFFER_SMALL; /* Return space required. */ space = *spacep + size; break; @@ -1565,7 +1572,7 @@ contin: * If we ran out of space back up the pointer. * If we did not return any dups or reached the end, close the opd. */ - if (ret == ENOMEM) { + if (ret == DB_BUFFER_SMALL) { if (opd->dbtype == DB_RECNO) { if (--cp->recno == 0) goto close_opd; @@ -1780,6 +1787,7 @@ __bam_c_put(dbc, key, data, flags, pgnop) u_int32_t flags; db_pgno_t *pgnop; { + BTREE *t; BTREE_CURSOR *cp; DB *dbp; DBT dbt; @@ -1796,39 +1804,16 @@ __bam_c_put(dbc, key, data, flags, pgnop) split: ret = stack = 0; switch (flags) { + case DB_CURRENT: + if (F_ISSET(cp, C_DELETED)) + return (DB_NOTFOUND); + /* FALLTHROUGH */ + case DB_AFTER: case DB_BEFORE: - case DB_CURRENT: iiop = flags; own = 1; - /* - * If the Btree has record numbers (and we're not replacing an - * existing record), we need a complete stack so that we can - * adjust the record counts. The check for flags == DB_CURRENT - * is superfluous but left in for clarity. (If C_RECNUM is set - * we know that flags must be DB_CURRENT, as DB_AFTER/DB_BEFORE - * are illegal in a Btree unless it's configured for duplicates - * and you cannot configure a Btree for both record renumbering - * and duplicates.) - */ - if (flags == DB_CURRENT && - F_ISSET(cp, C_RECNUM) && F_ISSET(cp, C_DELETED)) { - if ((ret = __bam_c_getstack(dbc)) != 0) - goto err; - /* - * Initialize the cursor from the stack. Don't take - * the page number or page index, they should already - * be set. - */ - cp->page = cp->csp->page; - cp->lock = cp->csp->lock; - cp->lock_mode = cp->csp->lock_mode; - - stack = 1; - break; - } - /* Acquire the current page with a write lock. */ ACQUIRE_WRITE_LOCK(dbc, ret); if (ret != 0) @@ -1996,12 +1981,11 @@ split: ret = stack = 0; /* * SR [#6059] - * If we do not own a lock on the page anymore then - * clear the cursor so we don't point at it. - * Even if we call __bam_stkrel above we still - * may have entered the routine with the cursor - * posistioned to a particular record. This - * is in the case where C_RECNUM is set. + * If we do not own a lock on the page any more, then clear the + * cursor so we don't point at it. Even if we call __bam_stkrel + * above we still may have entered the routine with the cursor + * positioned to a particular record. This is in the case + * where C_RECNUM is set. */ if (own == 0) { cp->pgno = PGNO_INVALID; @@ -2019,6 +2003,33 @@ split: ret = stack = 0; err: done: /* + * If we inserted a key into the first or last slot of the tree, + * remember where it was so we can do it more quickly next time. + * If the tree has record numbers, we need a complete stack so + * that we can adjust the record counts, so skipping the tree search + * isn't possible. For subdatabases we need to be careful that the + * page does not move from one db to another, so we track its LSN. + * + * If there are duplicates and we are inserting into the last slot, + * the cursor will point _to_ the last item, not after it, which + * is why we subtract P_INDX below. + */ + + t = dbp->bt_internal; + if (ret == 0 && TYPE(cp->page) == P_LBTREE && + (flags == DB_KEYFIRST || flags == DB_KEYLAST) && + !F_ISSET(cp, C_RECNUM) && + (!F_ISSET(dbp, DB_AM_SUBDB) || + (LOGGING_ON(dbp->dbenv) && !F_ISSET(dbp, DB_AM_NOT_DURABLE))) && + ((NEXT_PGNO(cp->page) == PGNO_INVALID && + cp->indx >= NUM_ENT(cp->page) - P_INDX) || + (PREV_PGNO(cp->page) == PGNO_INVALID && cp->indx == 0))) { + t->bt_lpgno = cp->pgno; + if (F_ISSET(dbp, DB_AM_SUBDB)) + t->bt_llsn = LSN(cp->page); + } else + t->bt_lpgno = PGNO_INVALID; + /* * Discard any pages pinned in the tree and their locks, except for * the leaf page. Note, the leaf page participated in any stack we * acquired, and so we have to adjust the stack as necessary. If @@ -2371,7 +2382,7 @@ __bam_c_search(dbc, root_pgno, key, flags, exactp) db_pgno_t bt_lpgno; db_recno_t recno; u_int32_t sflags; - int cmp, ret; + int cmp, ret, t_ret; dbp = dbc->dbp; cp = (BTREE_CURSOR *)dbc->internal; @@ -2415,12 +2426,8 @@ fast_search: /* * If the application has a history of inserting into the first * or last pages of the database, we check those pages first to * avoid doing a full search. - * - * If the tree has record numbers, we need a complete stack so - * that we can adjust the record counts, so fast_search isn't - * possible. */ - if (F_ISSET(cp, C_RECNUM)) + if (F_ISSET(dbc, DBC_OPD)) goto search; /* @@ -2440,14 +2447,24 @@ fast_search: /* if (bt_lpgno == PGNO_INVALID) goto search; - /* Lock and retrieve the page on which we last inserted. */ + /* + * Lock and retrieve the page on which we last inserted. + * + * The page may not exist: if a transaction created the page + * and then aborted, the page might have been truncated from + * the end of the file. + */ h = NULL; ACQUIRE_CUR(dbc, DB_LOCK_WRITE, bt_lpgno, ret); - if (ret != 0) + if (ret != 0) { + if (ret == DB_PAGE_NOTFOUND) + ret = 0; goto fast_miss; + } h = cp->page; inp = P_INP(dbp, h); + /* * It's okay if the page type isn't right or it's empty, it * just means that the world changed. @@ -2455,6 +2472,10 @@ fast_search: /* if (TYPE(h) != P_LBTREE || NUM_ENT(h) == 0) goto fast_miss; + /* Verify that this page cannot have moved to another db. */ + if (F_ISSET(dbp, DB_AM_SUBDB) && + log_compare(&t->bt_llsn, &LSN(h)) != 0) + goto fast_miss; /* * What we do here is test to see if we're at the beginning or * end of the tree and if the new item sorts before/after the @@ -2537,10 +2558,13 @@ fast_hit: /* Set the exact match flag, we may have found a duplicate. */ fast_miss: /* * This was not the right page, so we do not need to retain * the lock even in the presence of transactions. + * + * This is also an error path, so ret may have been set. */ DISCARD_CUR(dbc, ret); cp->pgno = PGNO_INVALID; - (void)__LPUT(dbc, cp->lock); + if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0) + ret = t_ret; if (ret != 0) return (ret); @@ -2559,20 +2583,6 @@ search: if ((ret = __bam_search(dbc, root_pgno, cp->lock = cp->csp->lock; cp->lock_mode = cp->csp->lock_mode; - /* - * If we inserted a key into the first or last slot of the tree, - * remember where it was so we can do it more quickly next time. - * If there are duplicates and we are inserting into the last slot, - * the cursor will point _to_ the last item, not after it, which - * is why we subtract P_INDX below. - */ - if (TYPE(cp->page) == P_LBTREE && - (flags == DB_KEYFIRST || flags == DB_KEYLAST)) - t->bt_lpgno = - (NEXT_PGNO(cp->page) == PGNO_INVALID && - cp->indx >= NUM_ENT(cp->page) - P_INDX) || - (PREV_PGNO(cp->page) == PGNO_INVALID && - cp->indx == 0) ? cp->pgno : PGNO_INVALID; return (0); } @@ -2661,6 +2671,10 @@ __bam_c_physdel(dbc) } if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0) return (ret); + + /* Clear the deleted flag, the item is gone. */ + F_CLR(cp, C_DELETED); + if (!empty_page) if ((ret = __bam_ca_di(dbc, PGNO(cp->page), cp->indx, -1)) != 0) return (ret); @@ -2759,6 +2773,8 @@ __bam_c_physdel(dbc) * If everything worked, delete the stack, otherwise, release the * stack and page locks without further damage. */ + if (ret == 0) + DISCARD_CUR(dbc, ret); if (ret == 0) ret = __bam_dpages(dbc, cp->sp); else diff --git a/db/btree/bt_delete.c b/db/btree/bt_delete.c index ef6e34caf..018c8ef49 100644 --- a/db/btree/bt_delete.c +++ b/db/btree/bt_delete.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: bt_delete.c,v 11.49 2004/02/27 12:38:28 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_delete.c,v 11.46 2003/06/30 17:19:29 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -263,7 +261,8 @@ __bam_dpages(dbc, stack_epg) for (epg = cp->sp; epg < stack_epg; ++epg) { if ((t_ret = __memp_fput(mpf, epg->page, 0)) != 0 && ret == 0) ret = t_ret; - (void)__TLPUT(dbc, epg->lock); + if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) + ret = t_ret; } if (ret != 0) goto err; @@ -277,7 +276,7 @@ __bam_dpages(dbc, stack_epg) * It will deadlock here. Before we unlink the subtree, we relink the * leaf page chain. */ - if ((ret = __db_relink(dbc, DB_REM_PAGE, cp->csp->page, NULL, 1)) != 0) + if ((ret = __bam_relink(dbc, cp->csp->page, NULL)) != 0) goto err; /* @@ -296,9 +295,11 @@ __bam_dpages(dbc, stack_epg) pgno = PGNO(epg->page); nitems = NUM_ENT(epg->page); - if ((ret = __memp_fput(mpf, epg->page, 0)) != 0) + ret = __memp_fput(mpf, epg->page, 0); + if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) goto err_inc; - (void)__TLPUT(dbc, epg->lock); /* Free the rest of the pages in the stack. */ while (++epg <= cp->csp) { @@ -315,11 +316,12 @@ __bam_dpages(dbc, stack_epg) goto err; } - if ((ret = __db_free(dbc, epg->page)) != 0) { - epg->page = NULL; + ret = __db_free(dbc, epg->page); + epg->page = NULL; + if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) goto err_inc; - } - (void)__TLPUT(dbc, epg->lock); } if (0) { @@ -447,11 +449,13 @@ err: for (; epg <= cp->csp; ++epg) { if (0) { stop: done = 1; } - (void)__TLPUT(dbc, p_lock); + if ((t_ret = __TLPUT(dbc, p_lock)) != 0 && ret == 0) + ret = t_ret; if (parent != NULL && (t_ret = __memp_fput(mpf, parent, 0)) != 0 && ret == 0) ret = t_ret; - (void)__TLPUT(dbc, c_lock); + if ((t_ret = __TLPUT(dbc, c_lock)) != 0 && ret == 0) + ret = t_ret; if (child != NULL && (t_ret = __memp_fput(mpf, child, 0)) != 0 && ret == 0) ret = t_ret; @@ -459,3 +463,111 @@ stop: done = 1; return (ret); } + +/* + * __bam_relink -- + * Relink around a deleted page. + * + * PUBLIC: int __bam_relink __P((DBC *, PAGE *, PAGE **)); + */ +int +__bam_relink(dbc, pagep, new_next) + DBC *dbc; + PAGE *pagep, **new_next; +{ + DB *dbp; + PAGE *np, *pp; + DB_LOCK npl, ppl; + DB_LSN *nlsnp, *plsnp, ret_lsn; + DB_MPOOLFILE *mpf; + int ret, t_ret; + + dbp = dbc->dbp; + np = pp = NULL; + LOCK_INIT(npl); + LOCK_INIT(ppl); + nlsnp = plsnp = NULL; + mpf = dbp->mpf; + ret = 0; + + /* + * Retrieve and lock the one/two pages. For a remove, we may need + * two pages (the before and after). For an add, we only need one + * because, the split took care of the prev. + */ + if (pagep->next_pgno != PGNO_INVALID) { + if ((ret = __db_lget(dbc, + 0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0) + goto err; + if ((ret = __memp_fget(mpf, &pagep->next_pgno, 0, &np)) != 0) { + ret = __db_pgerr(dbp, pagep->next_pgno, ret); + goto err; + } + nlsnp = &np->lsn; + } + if (pagep->prev_pgno != PGNO_INVALID) { + if ((ret = __db_lget(dbc, + 0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0) + goto err; + if ((ret = __memp_fget(mpf, &pagep->prev_pgno, 0, &pp)) != 0) { + ret = __db_pgerr(dbp, pagep->prev_pgno, ret); + goto err; + } + plsnp = &pp->lsn; + } + + /* Log the change. */ + if (DBC_LOGGING(dbc)) { + if ((ret = __bam_relink_log(dbp, dbc->txn, &ret_lsn, 0, + pagep->pgno, &pagep->lsn, pagep->prev_pgno, plsnp, + pagep->next_pgno, nlsnp)) != 0) + goto err; + } else + LSN_NOT_LOGGED(ret_lsn); + if (np != NULL) + np->lsn = ret_lsn; + if (pp != NULL) + pp->lsn = ret_lsn; + pagep->lsn = ret_lsn; + + /* + * Modify and release the two pages. + * + * !!! + * The parameter new_next gets set to the page following the page we + * are removing. If there is no following page, then new_next gets + * set to NULL. + */ + if (np != NULL) { + np->prev_pgno = pagep->prev_pgno; + if (new_next == NULL) + ret = __memp_fput(mpf, np, DB_MPOOL_DIRTY); + else { + *new_next = np; + ret = __memp_fset(mpf, np, DB_MPOOL_DIRTY); + } + if ((t_ret = __TLPUT(dbc, npl)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; + } else if (new_next != NULL) + *new_next = NULL; + + if (pp != NULL) { + pp->next_pgno = pagep->next_pgno; + ret = __memp_fput(mpf, pp, DB_MPOOL_DIRTY); + if ((t_ret = __TLPUT(dbc, ppl)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; + } + return (0); + +err: if (np != NULL) + (void)__memp_fput(mpf, np, 0); + (void)__TLPUT(dbc, npl); + if (pp != NULL) + (void)__memp_fput(mpf, pp, 0); + (void)__TLPUT(dbc, ppl); + return (ret); +} diff --git a/db/btree/bt_method.c b/db/btree/bt_method.c index 84abe96a2..0b67da91e 100644 --- a/db/btree/bt_method.c +++ b/db/btree/bt_method.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_method.c,v 11.38 2004/09/22 03:31:26 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_method.c,v 11.34 2003/06/30 17:19:32 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -21,15 +19,12 @@ static const char revid[] = "$Id: bt_method.c,v 11.34 2003/06/30 17:19:32 bostic #include "dbinc/qam.h" static int __bam_set_bt_maxkey __P((DB *, u_int32_t)); -static int __bam_get_bt_minkey __P((DB *, u_int32_t *)); static int __bam_set_bt_minkey __P((DB *, u_int32_t)); static int __bam_set_bt_prefix __P((DB *, size_t(*)(DB *, const DBT *, const DBT *))); static int __ram_get_re_delim __P((DB *, int *)); static int __ram_set_re_delim __P((DB *, int)); -static int __ram_get_re_len __P((DB *, u_int32_t *)); static int __ram_set_re_len __P((DB *, u_int32_t)); -static int __ram_get_re_pad __P((DB *, int *)); static int __ram_set_re_pad __P((DB *, int)); static int __ram_get_re_source __P((DB *, const char **)); static int __ram_set_re_source __P((DB *, const char *)); @@ -241,8 +236,10 @@ __bam_set_bt_maxkey(dbp, bt_maxkey) /* * __db_get_bt_minkey -- * Get the minimum keys per page. + * + * PUBLIC: int __bam_get_bt_minkey __P((DB *, u_int32_t *)); */ -static int +int __bam_get_bt_minkey(dbp, bt_minkeyp) DB *dbp; u_int32_t *bt_minkeyp; @@ -389,17 +386,34 @@ __ram_set_re_delim(dbp, re_delim) /* * __db_get_re_len -- * Get the variable-length input record length. + * + * PUBLIC: int __ram_get_re_len __P((DB *, u_int32_t *)); */ -static int +int __ram_get_re_len(dbp, re_lenp) DB *dbp; u_int32_t *re_lenp; { BTREE *t; + QUEUE *q; DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO); - t = dbp->bt_internal; - *re_lenp = t->re_len; + + /* + * This has to work for all access methods, before or after opening the + * database. When the record length is set with __ram_set_re_len, the + * value in both the BTREE and QUEUE structs will be correct. + * Otherwise, this only makes sense after the database in opened, in + * which case we know the type. + */ + if (dbp->type == DB_QUEUE) { + q = dbp->q_internal; + *re_lenp = q->re_len; + } else { + t = dbp->bt_internal; + *re_lenp = t->re_len; + } + return (0); } @@ -432,18 +446,34 @@ __ram_set_re_len(dbp, re_len) /* * __db_get_re_pad -- * Get the fixed-length record pad character. + * + * PUBLIC: int __ram_get_re_pad __P((DB *, int *)); */ -static int +int __ram_get_re_pad(dbp, re_padp) DB *dbp; int *re_padp; { BTREE *t; + QUEUE *q; DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO); - t = dbp->bt_internal; - *re_padp = t->re_pad; + /* + * This has to work for all access methods, before or after opening the + * database. When the record length is set with __ram_set_re_pad, the + * value in both the BTREE and QUEUE structs will be correct. + * Otherwise, this only makes sense after the database in opened, in + * which case we know the type. + */ + if (dbp->type == DB_QUEUE) { + q = dbp->q_internal; + *re_padp = q->re_pad; + } else { + t = dbp->bt_internal; + *re_padp = t->re_pad; + } + return (0); } diff --git a/db/btree/bt_open.c b/db/btree/bt_open.c index 20f594fe5..e890c5dd7 100644 --- a/db/btree/bt_open.c +++ b/db/btree/bt_open.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: bt_open.c,v 11.92 2004/04/29 14:39:47 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_open.c,v 11.87 2003/07/17 01:39:09 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -286,6 +284,7 @@ __bam_read_root(dbp, txn, base_pgno, flags) DB_MPOOLFILE *mpf; int ret, t_ret; + COMPQUIET(flags, 0); meta = NULL; t = dbp->bt_internal; LOCK_INIT(metalock); @@ -335,19 +334,6 @@ __bam_read_root(dbp, txn, base_pgno, flags) */ t->bt_lpgno = PGNO_INVALID; - /* - * We must initialize last_pgno, it could be stale. - * We update this without holding the meta page write - * locked. This is ok since two threads in the code - * must be setting it to the same value. SR #7159. - */ - if (!LF_ISSET(DB_RDONLY) && dbp->meta_pgno == PGNO_BASE_MD) { - __memp_last_pgno(mpf, &meta->dbmeta.last_pgno); - ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); - } else - ret = __memp_fput(mpf, meta, 0); - meta = NULL; - err: /* Put the metadata page back. */ if (meta != NULL && (t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) @@ -449,74 +435,76 @@ __bam_new_file(dbp, txn, fhp, name) mpf = dbp->mpf; root = NULL; meta = NULL; - memset(&pdbt, 0, sizeof(pdbt)); buf = NULL; - /* Build meta-data page. */ - if (name == NULL) { + /* Build the meta-data page. */ pgno = PGNO_BASE_MD; - ret = __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &meta); + if ((ret = + __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &meta)) != 0) + return (ret); + LSN_NOT_LOGGED(lsn); + __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); + meta->root = 1; + meta->dbmeta.last_pgno = 1; + ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); + meta = NULL; + if (ret != 0) + goto err; + + /* Build the root page. */ + pgno = 1; + if ((ret = + __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &root)) != 0) + goto err; + P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID, + LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE); + LSN_NOT_LOGGED(root->lsn); + ret = __memp_fput(mpf, root, DB_MPOOL_DIRTY); + root = NULL; + if (ret != 0) + goto err; } else { + memset(&pdbt, 0, sizeof(pdbt)); + + /* Build the meta-data page. */ pginfo.db_pagesize = dbp->pgsize; pginfo.flags = F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP)); pginfo.type = dbp->type; pdbt.data = &pginfo; pdbt.size = sizeof(pginfo); - ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf); + if ((ret = __os_calloc(dbenv, 1, dbp->pgsize, &buf)) != 0) + return (ret); meta = (BTMETA *)buf; - } - if (ret != 0) - return (ret); - - LSN_NOT_LOGGED(lsn); - __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); - meta->root = 1; - meta->dbmeta.last_pgno = 1; - - if (name == NULL) - ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); - else { + LSN_NOT_LOGGED(lsn); + __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); + meta->root = 1; + meta->dbmeta.last_pgno = 1; if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0) goto err; - ret = __fop_write(dbenv, txn, name, - DB_APP_DATA, fhp, dbp->pgsize, 0, 0, buf, dbp->pgsize, 1, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0); - } - if (ret != 0) - goto err; - meta = NULL; - - /* Now build root page. */ - if (name == NULL) { - pgno = 1; - if ((ret = - __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &root)) != 0) + if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp, + dbp->pgsize, 0, 0, buf, dbp->pgsize, 1, F_ISSET( + dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0) goto err; - } else { + meta = NULL; + + /* Build the root page. */ #ifdef DIAGNOSTIC memset(buf, CLEAR_BYTE, dbp->pgsize); #endif root = (PAGE *)buf; - } - - P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID, - LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE); - LSN_NOT_LOGGED(root->lsn); - - if (name == NULL) - ret = __memp_fput(mpf, root, DB_MPOOL_DIRTY); - else { + P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID, + LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE); + LSN_NOT_LOGGED(root->lsn); if ((ret = __db_pgout(dbenv, root->pgno, root, &pdbt)) != 0) goto err; - ret = __fop_write(dbenv, txn, name, - DB_APP_DATA, fhp, dbp->pgsize, 1, 0, buf, dbp->pgsize, 1, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0); + if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp, + dbp->pgsize, 1, 0, buf, dbp->pgsize, 1, F_ISSET( + dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0) + goto err; + root = NULL; } - if (ret != 0) - goto err; - root = NULL; err: if (buf != NULL) __os_free(dbenv, buf); @@ -604,9 +592,8 @@ err: if (root != NULL) if ((t_ret = __memp_fput(mpf, root, 0)) != 0 && ret == 0) ret = t_ret; - if (LOCK_ISSET(metalock)) - if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; + if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; if (dbc != NULL) if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) ret = t_ret; diff --git a/db/btree/bt_put.c b/db/btree/bt_put.c index b98c6c579..060e8970f 100644 --- a/db/btree/bt_put.c +++ b/db/btree/bt_put.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: bt_put.c,v 11.79 2004/01/28 03:35:49 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_put.c,v 11.78 2003/10/31 15:07:40 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/btree/bt_rec.c b/db/btree/bt_rec.c index 1587028b3..e3fa7363c 100644 --- a/db/btree/bt_rec.c +++ b/db/btree/bt_rec.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_rec.c,v 11.70 2004/09/24 00:43:12 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_rec.c,v 11.64 2003/09/13 18:48:58 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -51,7 +49,7 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info) PAGE *_lp, *lp, *np, *pp, *_rp, *rp, *sp; db_pgno_t pgno, root_pgno; u_int32_t ptype; - int cmp, l_update, p_update, r_update, rc, ret, ret_l, rootsplit, t_ret; + int cmp, l_update, p_update, r_update, rc, ret, rootsplit, t_ret; COMPQUIET(info, NULL); REC_PRINT(__bam_split_print); @@ -83,50 +81,40 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info) pgno = PGNO(sp); root_pgno = argp->root_pgno; rootsplit = root_pgno != PGNO_INVALID; - if ((ret_l = __memp_fget(mpf, &argp->left, 0, &lp)) != 0) - lp = NULL; - if (__memp_fget(mpf, &argp->right, 0, &rp) != 0) - rp = NULL; + REC_FGET(mpf, argp->left, &lp, right); +right: REC_FGET(mpf, argp->right, &rp, redo); - if (DB_REDO(op)) { +redo: if (DB_REDO(op)) { l_update = r_update = p_update = 0; /* * Decide if we need to resplit the page. * - * If this is a root split, then the root has to exist, it's - * the page we're splitting and it gets modified. If this is - * not a root split, then the left page has to exist, for the - * same reason. + * If this is a root split, then the root has to exist unless + * we have truncated it due to a future deallocation. */ if (rootsplit) { - if ((ret = __memp_fget(mpf, &pgno, 0, &pp)) != 0) { - ret = __db_pgerr(file_dbp, pgno, ret); - pp = NULL; - goto out; - } - cmp = log_compare(&LSN(pp), &LSN(argp->pg.data)); - CHECK_LSN(op, cmp, &LSN(pp), &LSN(argp->pg.data)); + REC_FGET(mpf, root_pgno, &pp, do_left); + cmp = + log_compare(&LSN(pp), &LSN(argp->pg.data)); + CHECK_LSN(op, + cmp, &LSN(pp), &LSN(argp->pg.data)); p_update = cmp == 0; - } else if (lp == NULL) { - ret = __db_pgerr(file_dbp, argp->left, ret_l); - goto out; } - if (lp != NULL) { +do_left: if (lp != NULL) { cmp = log_compare(&LSN(lp), &argp->llsn); CHECK_LSN(op, cmp, &LSN(lp), &argp->llsn); if (cmp == 0) l_update = 1; - } else - l_update = 1; + } if (rp != NULL) { cmp = log_compare(&LSN(rp), &argp->rlsn); CHECK_LSN(op, cmp, &LSN(rp), &argp->rlsn); if (cmp == 0) r_update = 1; - } else - r_update = 1; + } + if (!p_update && !l_update && !r_update) goto check_next; @@ -159,13 +147,6 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info) NUM_ENT(sp))) != 0) goto out; - /* If the left child is wrong, update it. */ - if (lp == NULL && (ret = __memp_fget( - mpf, &argp->left, DB_MPOOL_CREATE, &lp)) != 0) { - ret = __db_pgerr(file_dbp, argp->left, ret); - lp = NULL; - goto out; - } if (l_update) { memcpy(lp, _lp, file_dbp->pgsize); lp->lsn = *lsnp; @@ -174,13 +155,6 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info) lp = NULL; } - /* If the right child is wrong, update it. */ - if (rp == NULL && (ret = __memp_fget( - mpf, &argp->right, DB_MPOOL_CREATE, &rp)) != 0) { - ret = __db_pgerr(file_dbp, argp->right, ret); - rp = NULL; - goto out; - } if (r_update) { memcpy(rp, _rp, file_dbp->pgsize); rp->lsn = *lsnp; @@ -222,12 +196,19 @@ check_next: /* * previous-page pointer updated to our new page. The next * page must exist because we're redoing the operation. */ - if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) { + if (!rootsplit && argp->npgno != PGNO_INVALID) { if ((ret = __memp_fget(mpf, &argp->npgno, 0, &np)) != 0) { - ret = __db_pgerr(file_dbp, argp->npgno, ret); - np = NULL; - goto out; + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr( + file_dbp, argp->npgno, ret); + goto out; + } else + goto done; } cmp = log_compare(&LSN(np), &argp->nlsn); CHECK_LSN(op, cmp, &LSN(np), &argp->nlsn); @@ -294,7 +275,7 @@ lrundo: if ((rootsplit && lp != NULL) || rp != NULL) { * possible that the next-page never existed, we ignore it as * if there's nothing to undo. */ - if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) { + if (!rootsplit && argp->npgno != PGNO_INVALID) { if ((ret = __memp_fget(mpf, &argp->npgno, 0, &np)) != 0) { np = NULL; @@ -366,17 +347,17 @@ __bam_rsplit_recover(dbenv, dbtp, lsnp, op, info) /* Fix the root page. */ pgno = root_pgno = argp->root_pgno; if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0) { - /* The root page must always exist if we are going forward. */ - if (DB_REDO(op)) { + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { ret = __db_pgerr(file_dbp, pgno, ret); goto out; - } - /* This must be the root of an OPD tree. */ - DB_ASSERT(root_pgno != - ((BTREE *)file_dbp->bt_internal)->bt_root); - ret = 0; - goto do_page; + } else + goto do_page; } + modified = 0; cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->rootlsn); @@ -408,10 +389,15 @@ do_page: * doesn't exist, it's okay and there's nothing further to do. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else goto done; - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; } modified = 0; (void)__ua_memcpy(©_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN)); @@ -468,10 +454,15 @@ __bam_adj_recover(dbenv, dbtp, lsnp, op, info) /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else goto done; - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; } modified = 0; @@ -537,10 +528,15 @@ __bam_cadjust_recover(dbenv, dbtp, lsnp, op, info) /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else goto done; - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; } modified = 0; @@ -621,10 +617,15 @@ __bam_cdel_recover(dbenv, dbtp, lsnp, op, info) /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else goto done; - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; } modified = 0; @@ -692,10 +693,15 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info) /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else goto done; - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; } bk = GET_BKEYDATA(file_dbp, pagep, argp->indx); @@ -796,8 +802,11 @@ __bam_root_recover(dbenv, dbtp, lsnp, op, info) REC_INTRO(__bam_root_read, 0); if ((ret = __memp_fget(mpf, &argp->meta_pgno, 0, &meta)) != 0) { - /* The metadata page must always exist on redo. */ - if (DB_REDO(op)) { + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { ret = __db_pgerr(file_dbp, argp->meta_pgno, ret); goto out; } else @@ -973,3 +982,148 @@ out: if (rdbc != NULL && (t_ret = __db_c_close(rdbc)) != 0 && ret == 0) ret = t_ret; REC_CLOSE; } + +/* + * __bam_relink_recover -- + * Recovery function for relink. + * + * PUBLIC: int __bam_relink_recover + * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + */ +int +__bam_relink_recover(dbenv, dbtp, lsnp, op, info) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops op; + void *info; +{ + __bam_relink_args *argp; + DB *file_dbp; + DBC *dbc; + DB_MPOOLFILE *mpf; + PAGE *pagep; + int cmp_n, cmp_p, modified, ret; + + pagep = NULL; + COMPQUIET(info, NULL); + REC_PRINT(__bam_relink_print); + REC_INTRO(__bam_relink_read, 1); + + /* + * There are up to three pages we need to check -- the page, and the + * previous and next pages, if they existed. For a page add operation, + * the current page is the result of a split and is being recovered + * elsewhere, so all we need do is recover the next page. + */ + if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else + goto next2; + } + modified = 0; + + cmp_p = log_compare(&LSN(pagep), &argp->lsn); + CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn); + if (cmp_p == 0 && DB_REDO(op)) { + /* Redo the relink. */ + pagep->lsn = *lsnp; + modified = 1; + } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { + /* Undo the relink. */ + pagep->next_pgno = argp->next; + pagep->prev_pgno = argp->prev; + + pagep->lsn = argp->lsn; + modified = 1; + } + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; + pagep = NULL; + +next2: if ((ret = __memp_fget(mpf, &argp->next, 0, &pagep)) != 0) { + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->next, ret); + goto out; + } else + goto prev; + } + + modified = 0; + cmp_n = log_compare(lsnp, &LSN(pagep)); + cmp_p = log_compare(&LSN(pagep), &argp->lsn_next); + CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_next); + if (cmp_p == 0 && DB_REDO(op)) { + /* Redo the remove or undo the add. */ + pagep->prev_pgno = argp->prev; + + modified = 1; + } else if (cmp_n == 0 && DB_UNDO(op)) { + /* Undo the remove or redo the add. */ + pagep->prev_pgno = argp->pgno; + + modified = 1; + } + if (modified == 1) { + if (DB_UNDO(op)) + pagep->lsn = argp->lsn_next; + else + pagep->lsn = *lsnp; + } + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; + pagep = NULL; + +prev: if ((ret = __memp_fget(mpf, &argp->prev, 0, &pagep)) != 0) { + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->prev, ret); + goto out; + } else + goto done; + } + + modified = 0; + cmp_p = log_compare(&LSN(pagep), &argp->lsn_prev); + CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_prev); + if (cmp_p == 0 && DB_REDO(op)) { + /* Redo the relink. */ + pagep->next_pgno = argp->next; + + modified = 1; + } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { + /* Undo the relink. */ + pagep->next_pgno = argp->pgno; + + modified = 1; + } + if (modified == 1) { + if (DB_UNDO(op)) + pagep->lsn = argp->lsn_prev; + else + pagep->lsn = *lsnp; + } + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; + pagep = NULL; + +done: *lsnp = argp->prev_lsn; + ret = 0; + +out: if (pagep != NULL) + (void)__memp_fput(mpf, pagep, 0); + REC_CLOSE; +} diff --git a/db/btree/bt_reclaim.c b/db/btree/bt_reclaim.c index bc85bd2d3..ee722a30f 100644 --- a/db/btree/bt_reclaim.c +++ b/db/btree/bt_reclaim.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_reclaim.c,v 11.15 2004/01/28 03:35:49 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_reclaim.c,v 11.14 2003/06/30 17:19:33 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/btree/bt_recno.c b/db/btree/bt_recno.c index 2098e4d94..78f149dd6 100644 --- a/db/btree/bt_recno.c +++ b/db/btree/bt_recno.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_recno.c,v 11.117 2004/03/28 17:01:01 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_recno.c,v 11.113 2003/06/30 17:19:34 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -89,11 +87,13 @@ static int __ram_update __P((DBC *, db_recno_t, int)); * After a search, copy the found page into the cursor, discarding any * currently held lock. */ -#define STACK_TO_CURSOR(cp) { \ +#define STACK_TO_CURSOR(cp, ret) { \ + int __t_ret; \ (cp)->page = (cp)->csp->page; \ (cp)->pgno = (cp)->csp->page->pgno; \ (cp)->indx = (cp)->csp->indx; \ - (void)__TLPUT(dbc, (cp)->lock); \ + if ((__t_ret = __TLPUT(dbc, (cp)->lock)) != 0 && (ret) == 0) \ + ret = __t_ret; \ (cp)->lock = (cp)->csp->lock; \ (cp)->lock_mode = (cp)->csp->lock_mode; \ } @@ -235,7 +235,9 @@ __ram_c_del(dbc) stack = 1; /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp); + STACK_TO_CURSOR(cp, ret); + if (ret != 0) + goto err; /* * If re-numbering records, the on-page deleted flag can only mean @@ -495,7 +497,9 @@ retry: switch (flags) { } /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp); + STACK_TO_CURSOR(cp, ret); + if (ret != 0) + goto err; /* * If re-numbering records, the on-page deleted flag means this @@ -653,7 +657,9 @@ split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0) DB_ASSERT(exact || CD_ISSET(cp)); /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp); + STACK_TO_CURSOR(cp, ret); + if (ret != 0) + goto err; ret = __bam_iitem(dbc, key, data, iiflags, 0); t_ret = __bam_stkrel(dbc, STK_CLRDBC); @@ -988,7 +994,7 @@ __ram_source(dbp) * when it comes time to write the database back to the source. */ if ((t->re_fp = fopen(t->re_source, "r")) == NULL) { - ret = errno; + ret = __os_get_errno(); __db_err(dbp->dbenv, "%s: %s", t->re_source, db_strerror(ret)); return (ret); } @@ -1065,13 +1071,13 @@ __ram_writeback(dbp) */ if (t->re_fp != NULL) { if (fclose(t->re_fp) != 0) { - ret = errno; + ret = __os_get_errno(); goto err; } t->re_fp = NULL; } if ((fp = fopen(t->re_source, "w")) == NULL) { - ret = errno; + ret = __os_get_errno(); __db_err(dbenv, "%s: %s", t->re_source, db_strerror(ret)); goto err; } @@ -1093,23 +1099,24 @@ __ram_writeback(dbp) * and the pad character if we're doing fixed-length records. */ delim = t->re_delim; - if (F_ISSET(dbp, DB_AM_FIXEDLEN)) { - if ((ret = __os_malloc(dbenv, t->re_len, &pad)) != 0) - goto err; - memset(pad, t->re_pad, t->re_len); - } for (keyno = 1;; ++keyno) { switch (ret = __db_get(dbp, NULL, &key, &data, 0)) { case 0: - if (data.size != 0 && (u_int32_t)fwrite( - data.data, 1, data.size, fp) != data.size) + if (data.size != 0 && + fwrite(data.data, 1, data.size, fp) != data.size) goto write_err; break; case DB_KEYEMPTY: - if (F_ISSET(dbp, DB_AM_FIXEDLEN) && - (u_int32_t)fwrite(pad, 1, t->re_len, fp) != - t->re_len) - goto write_err; + if (F_ISSET(dbp, DB_AM_FIXEDLEN)) { + if (pad == NULL) { + if ((ret = __os_malloc( + dbenv, t->re_len, &pad)) != 0) + goto err; + memset(pad, t->re_pad, t->re_len); + } + if (fwrite(pad, 1, t->re_len, fp) != t->re_len) + goto write_err; + } break; case DB_NOTFOUND: ret = 0; @@ -1119,8 +1126,8 @@ __ram_writeback(dbp) } if (!F_ISSET(dbp, DB_AM_FIXEDLEN) && fwrite(&delim, 1, 1, fp) != 1) { -write_err: ret = errno; - __db_err(dbp->dbenv, +write_err: ret = __os_get_errno(); + __db_err(dbenv, "%s: write failed to backing file: %s", t->re_source, strerror(ret)); goto err; @@ -1130,9 +1137,10 @@ write_err: ret = errno; err: done: /* Close the file descriptor. */ if (fp != NULL && fclose(fp) != 0) { + t_ret = __os_get_errno(); if (ret == 0) - ret = errno; - __db_err(dbenv, "%s: %s", t->re_source, db_strerror(errno)); + ret = t_ret; + __db_err(dbenv, "%s: %s", t->re_source, db_strerror(t_ret)); } /* Discard the cursor. */ @@ -1275,7 +1283,9 @@ retry: /* Find the slot for insertion. */ stack = 1; /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp); + STACK_TO_CURSOR(cp, ret); + if (ret != 0) + goto err; /* * The application may modify the data based on the selected record diff --git a/db/btree/bt_rsearch.c b/db/btree/bt_rsearch.c index 92eb82144..0027ec9e4 100644 --- a/db/btree/bt_rsearch.c +++ b/db/btree/bt_rsearch.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -35,14 +35,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: bt_rsearch.c,v 11.40 2004/07/23 17:21:09 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_rsearch.c,v 11.37 2003/06/30 17:19:34 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -78,11 +76,12 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) db_lockmode_t lock_mode; db_pgno_t pg; db_recno_t recno, t_recno, total; - int ret, stack; + int ret, stack, t_ret; dbp = dbc->dbp; mpf = dbp->mpf; cp = (BTREE_CURSOR *)dbc->internal; + h = NULL; BT_STK_CLR(cp); @@ -123,8 +122,11 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) if (!stack && ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) || (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) { - (void)__memp_fput(mpf, h, 0); - (void)__LPUT(dbc, lock); + ret = __memp_fput(mpf, h, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + return (ret); lock_mode = DB_LOCK_WRITE; if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) return (ret); @@ -167,9 +169,11 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) * eliminate any concurrency. A possible fix * would be to lock the last leaf page instead. */ - (void)__memp_fput(mpf, h, 0); - (void)__TLPUT(dbc, lock); - return (DB_NOTFOUND); + ret = __memp_fput(mpf, h, 0); + if ((t_ret = + __TLPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + return (ret == 0 ? DB_NOTFOUND : ret); } } } @@ -201,9 +205,13 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) *exactp = 0; if (!LF_ISSET(S_PAST_EOF) || recno > t_recno + 1) { - (void)__memp_fput(mpf, h, 0); - (void)__TLPUT(dbc, lock); - ret = DB_NOTFOUND; + ret = __memp_fput(mpf, h, 0); + h = NULL; + if ((t_ret = __TLPUT(dbc, + lock)) != 0 && ret == 0) + ret = t_ret; + if (ret == 0) + ret = DB_NOTFOUND; goto err; } } @@ -265,6 +273,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) cp, h, indx, lock, lock_mode, ret); if (ret != 0) goto err; + h = NULL; lock_mode = DB_LOCK_WRITE; if ((ret = @@ -281,7 +290,9 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) (h->level - 1) == LEAFLEVEL) stack = 1; - (void)__memp_fput(mpf, h, 0); + if ((ret = __memp_fput(mpf, h, 0)) != 0) + goto err; + h = NULL; lock_mode = stack && LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ; @@ -292,7 +303,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) * is OK because this only happens when we are * descending the tree holding read-locks. */ - __LPUT(dbc, lock); + (void)__LPUT(dbc, lock); goto err; } } @@ -302,8 +313,12 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) } /* NOTREACHED */ -err: BT_STK_POP(cp); +err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) + ret = t_ret; + + BT_STK_POP(cp); __bam_stkrel(dbc, 0); + return (ret); } @@ -378,7 +393,7 @@ __bam_nrecs(dbc, rep) DB_MPOOLFILE *mpf; PAGE *h; db_pgno_t pgno; - int ret; + int ret, t_ret; dbp = dbc->dbp; mpf = dbp->mpf; @@ -391,10 +406,11 @@ __bam_nrecs(dbc, rep) *rep = RE_NREC(h); - (void)__memp_fput(mpf, h, 0); - (void)__TLPUT(dbc, lock); + ret = __memp_fput(mpf, h, 0); + if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; - return (0); + return (ret); } /* diff --git a/db/btree/bt_search.c b/db/btree/bt_search.c index dc35c7c68..4fb07f446 100644 --- a/db/btree/bt_search.c +++ b/db/btree/bt_search.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: bt_search.c,v 11.50 2004/07/23 17:21:09 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_search.c,v 11.47 2003/06/30 17:19:35 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -85,12 +83,13 @@ __bam_search(dbc, root_pgno, key, flags, stop, recnop, exactp) db_lockmode_t lock_mode; db_pgno_t pg; db_recno_t recno; - int adjust, cmp, deloffset, ret, stack; + int adjust, cmp, deloffset, ret, stack, t_ret; int (*func) __P((DB *, const DBT *, const DBT *)); dbp = dbc->dbp; mpf = dbp->mpf; cp = (BTREE_CURSOR *)dbc->internal; + h = NULL; t = dbp->bt_internal; recno = 0; @@ -135,8 +134,11 @@ try_again: if (!stack && ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) || (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) { - (void)__memp_fput(mpf, h, 0); - (void)__LPUT(dbc, lock); + ret = __memp_fput(mpf, h, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + return (ret); lock_mode = DB_LOCK_WRITE; if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) return (ret); @@ -149,8 +151,11 @@ try_again: (u_int8_t)(stop + 1) >= h->level) || (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) { /* Someone else split the root, start over. */ - (void)__memp_fput(mpf, h, 0); - (void)__LPUT(dbc, lock); + ret = __memp_fput(mpf, h, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + return (ret); goto try_again; } stack = 1; @@ -198,13 +203,19 @@ try_again: if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP) { *exactp = 0; - if (LF_ISSET(S_EXACT)) - goto notfound; + if (LF_ISSET(S_EXACT)) { + ret = DB_NOTFOUND; + goto err; + } if (LF_ISSET(S_STK_ONLY)) { BT_STK_NUM(dbp->dbenv, cp, h, base, ret); - __LPUT(dbc, lock); - (void)__memp_fput(mpf, h, 0); + if ((t_ret = + __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = + __memp_fput(mpf, h, 0)) != 0 && ret == 0) + ret = t_ret; return (ret); } @@ -244,12 +255,17 @@ next: if (recnop != NULL) if (LF_ISSET(S_STK_ONLY)) { if (stop == h->level) { BT_STK_NUM(dbp->dbenv, cp, h, indx, ret); - __LPUT(dbc, lock); - (void)__memp_fput(mpf, h, 0); + if ((t_ret = + __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = + __memp_fput(mpf, h, 0)) != 0 && ret == 0) + ret = t_ret; return (ret); } BT_STK_NUMPUSH(dbp->dbenv, cp, h, indx, ret); (void)__memp_fput(mpf, h, 0); + h = NULL; if ((ret = __db_lget(dbc, LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) { /* @@ -257,7 +273,7 @@ next: if (recnop != NULL) * is OK because it only happens when descending * the tree holding read-locks. */ - __LPUT(dbc, lock); + (void)__LPUT(dbc, lock); return (ret); } } else if (stack) { @@ -273,6 +289,7 @@ next: if (recnop != NULL) cp, h, indx, lock, lock_mode, ret); if (ret != 0) goto err; + h = NULL; lock_mode = DB_LOCK_WRITE; if ((ret = @@ -289,7 +306,9 @@ next: if (recnop != NULL) (h->level - 1) == LEAFLEVEL) stack = 1; - (void)__memp_fput(mpf, h, 0); + if ((ret = __memp_fput(mpf, h, 0)) != 0) + goto err; + h = NULL; lock_mode = stack && LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ; @@ -300,7 +319,7 @@ next: if (recnop != NULL) * is OK because this only happens when we are * descending the tree holding read-locks. */ - __LPUT(dbc, lock); + (void)__LPUT(dbc, lock); goto err; } } @@ -357,8 +376,10 @@ found: *exactp = 1; * If we weren't able to find a non-deleted duplicate, return * DB_NOTFOUND. */ - if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type)) - goto notfound; + if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type)) { + ret = DB_NOTFOUND; + goto err; + } /* * Increment the record counter to point to the found element. @@ -381,23 +402,27 @@ found: *exactp = 1; if (LF_ISSET(S_STK_ONLY)) { BT_STK_NUM(dbp->dbenv, cp, h, indx, ret); - __LPUT(dbc, lock); - (void)__memp_fput(mpf, h, 0); - } else { + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) + ret = t_ret; + } else BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - } + if (ret != 0) + goto err; + return (0); -notfound: - /* Keep the page locked for serializability. */ - (void)__memp_fput(mpf, h, 0); - (void)__TLPUT(dbc, lock); - ret = DB_NOTFOUND; +err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) + ret = t_ret; + + /* Keep any not-found page locked for serializability. */ + if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; -err: BT_STK_POP(cp); + BT_STK_POP(cp); __bam_stkrel(dbc, 0); + return (ret); } @@ -446,10 +471,12 @@ __bam_stkrel(dbc, flags) */ epg->page = NULL; } - if (LF_ISSET(STK_NOLOCK)) - (void)__LPUT(dbc, epg->lock); - else - (void)__TLPUT(dbc, epg->lock); + if (LF_ISSET(STK_NOLOCK)) { + if ((t_ret = __LPUT(dbc, epg->lock)) != 0 && ret == 0) + ret = t_ret; + } else + if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) + ret = t_ret; } /* Clear the stack, all pages have been released. */ diff --git a/db/btree/bt_split.c b/db/btree/bt_split.c index 8c5066aed..3e2cb4e6d 100644 --- a/db/btree/bt_split.c +++ b/db/btree/bt_split.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -35,18 +35,15 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: bt_split.c,v 11.66 2004/10/01 13:00:21 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_split.c,v 11.60 2003/06/30 17:19:35 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include -#include #include #endif @@ -120,7 +117,7 @@ __bam_split(dbc, arg, root_pgnop) arg, S_WRPAIR, level, NULL, &exact) : __bam_rsearch(dbc, (db_recno_t *)arg, S_WRPAIR, level, &exact))) != 0) - return (ret); + break; if (root_pgnop != NULL) *root_pgnop = cp->csp[0].page->pgno == root_pgno ? @@ -134,7 +131,7 @@ __bam_split(dbc, arg, root_pgnop) if (2 * B_MAXSIZEONPAGE(cp->ovflsize) <= (db_indx_t)P_FREESPACE(dbc->dbp, cp->csp[0].page)) { __bam_stkrel(dbc, STK_NOLOCK); - return (0); + break; } ret = cp->csp[0].page->pgno == root_pgno ? __bam_root(dbc, &cp->csp[0]) : @@ -162,10 +159,13 @@ __bam_split(dbc, arg, root_pgnop) dir = UP; break; default: - return (ret); + goto err; } } - /* NOTREACHED */ + +err: if (root_pgnop != NULL) + *root_pgnop = cp->root; + return (ret); } /* @@ -184,10 +184,11 @@ __bam_root(dbc, cp) PAGE *lp, *rp; db_indx_t split; u_int32_t opflags; - int ret; + int ret, t_ret; dbp = dbc->dbp; mpf = dbp->mpf; + lp = rp = NULL; /* Yeah, right. */ if (cp->page->level >= MAXBTREELEVEL) { @@ -198,7 +199,6 @@ __bam_root(dbc, cp) } /* Create new left and right pages for the split. */ - lp = rp = NULL; if ((ret = __db_new(dbc, TYPE(cp->page), &lp)) != 0 || (ret = __db_new(dbc, TYPE(cp->page), &rp)) != 0) goto err; @@ -238,24 +238,21 @@ __bam_root(dbc, cp) goto err; /* Adjust any cursors. */ - if ((ret = __bam_ca_split(dbc, - cp->page->pgno, lp->pgno, rp->pgno, split, 1)) != 0) - goto err; - - /* Success -- write the real pages back to the store. */ - (void)__memp_fput(mpf, cp->page, DB_MPOOL_DIRTY); - (void)__TLPUT(dbc, cp->lock); - (void)__memp_fput(mpf, lp, DB_MPOOL_DIRTY); - (void)__memp_fput(mpf, rp, DB_MPOOL_DIRTY); + ret = __bam_ca_split(dbc, cp->page->pgno, lp->pgno, rp->pgno, split, 1); - return (0); + /* Success or error: release pages and locks. */ +err: if ((t_ret = + __memp_fput(mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0) + ret = t_ret; + if (lp != NULL && + (t_ret = __memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0 && ret == 0) + ret = t_ret; + if (rp != NULL && + (t_ret = __memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0 && ret == 0) + ret = t_ret; -err: if (lp != NULL) - (void)__memp_fput(mpf, lp, 0); - if (rp != NULL) - (void)__memp_fput(mpf, rp, 0); - (void)__memp_fput(mpf, cp->page, 0); - (void)__TLPUT(dbc, cp->lock); return (ret); } @@ -371,9 +368,13 @@ __bam_page(dbc, pp, cp) goto err; /* - * Lock the new page. We need to do this because someone - * could get here through bt_lpgno if this page was recently - * dealocated. They can't look at it before we commit. + * Lock the new page. We need to do this for two reasons: first, the + * fast-lookup code might have a reference to this page in bt_lpgno if + * the page was recently deleted from the tree, and that code doesn't + * walk the tree and so won't encounter the parent's page lock. + * Second, a dirty reader could get to this page via the parent or old + * page after the split is done but before the transaction is committed + * or aborted. */ if ((ret = __db_lget(dbc, 0, PGNO(alloc_rp), DB_LOCK_WRITE, 0, &rplock)) != 0) @@ -460,20 +461,24 @@ __bam_page(dbc, pp, cp) if ((t_ret = __memp_fput(mpf, alloc_rp, DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; - (void)__TLPUT(dbc, rplock); + if ((t_ret = __TLPUT(dbc, rplock)) != 0 && ret == 0) + ret = t_ret; if ((t_ret = __memp_fput(mpf, pp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; - (void)__TLPUT(dbc, pp->lock); + if ((t_ret = __TLPUT(dbc, pp->lock)) != 0 && ret == 0) + ret = t_ret; if ((t_ret = __memp_fput(mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; - (void)__TLPUT(dbc, cp->lock); + if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0) + ret = t_ret; if (tp != NULL) { if ((t_ret = __memp_fput(mpf, tp, DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; - (void)__TLPUT(dbc, tplock); + if ((t_ret = __TLPUT(dbc, tplock)) != 0 && ret == 0) + ret = t_ret; } return (ret); diff --git a/db/btree/bt_stat.c b/db/btree/bt_stat.c index 0e8cff37f..9d99ee2c4 100644 --- a/db/btree/bt_stat.c +++ b/db/btree/bt_stat.c @@ -1,19 +1,18 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_stat.c,v 11.78 2004/09/22 03:31:26 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_stat.c,v 11.61 2003/09/13 18:52:21 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include +#include #include #endif @@ -24,6 +23,7 @@ static const char revid[] = "$Id: bt_stat.c,v 11.61 2003/09/13 18:52:21 bostic E #include "dbinc/lock.h" #include "dbinc/mp.h" +#ifdef HAVE_STATISTICS /* * __bam_stat -- * Gather/print the btree statistics @@ -103,10 +103,12 @@ __bam_stat(dbc, spp, flags) sp->bt_levels = h->level; /* Discard the root page. */ - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; + ret = __memp_fput(mpf, h, 0); h = NULL; - __LPUT(dbc, lock); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; /* Walk the tree. */ if ((ret = __bam_traverse(dbc, @@ -120,10 +122,12 @@ __bam_stat(dbc, spp, flags) write_meta = !F_ISSET(dbp, DB_AM_RDONLY); meta_only: if (t->bt_meta != PGNO_BASE_MD || write_meta != 0) { - if ((ret = __memp_fput(mpf, meta, 0)) != 0) - goto err; + ret = __memp_fput(mpf, meta, 0); meta = NULL; - __LPUT(dbc, metalock); + if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; if ((ret = __db_lget(dbc, 0, t->bt_meta, write_meta == 0 ? @@ -138,14 +142,15 @@ meta_only: if ((ret = __db_lget(dbc, 0, cp->root, DB_LOCK_READ, 0, &lock)) != 0) goto err; - if ((ret = - __memp_fget(mpf, &cp->root, 0, (PAGE **)&h)) != 0) + if ((ret = __memp_fget(mpf, &cp->root, 0, &h)) != 0) goto err; sp->bt_nkeys = RE_NREC(h); } else sp->bt_nkeys = meta->dbmeta.key_count; - sp->bt_ndata = meta->dbmeta.record_count; + + sp->bt_ndata = dbp->type == DB_RECNO ? + sp->bt_nkeys : meta->dbmeta.record_count; } /* Get metadata page statistics. */ @@ -166,12 +171,14 @@ meta_only: *(DB_BTREE_STAT **)spp = sp; err: /* Discard the second page. */ - __LPUT(dbc, lock); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) ret = t_ret; /* Discard the metadata page. */ - __LPUT(dbc, metalock); + if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; if (meta != NULL && (t_ret = __memp_fput( mpf, meta, write_meta == 0 ? 0 : DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; @@ -185,106 +192,113 @@ err: /* Discard the second page. */ } /* - * __bam_traverse -- - * Walk a Btree database. + * __bam_stat_print -- + * Display btree/recno statistics. * - * PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t, - * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *)); + * PUBLIC: int __bam_stat_print __P((DBC *, u_int32_t)); */ int -__bam_traverse(dbc, mode, root_pgno, callback, cookie) +__bam_stat_print(dbc, flags) DBC *dbc; - db_lockmode_t mode; - db_pgno_t root_pgno; - int (*callback)__P((DB *, PAGE *, void *, int *)); - void *cookie; + u_int32_t flags; { - BINTERNAL *bi; - BKEYDATA *bk; + static const FN fn[] = { + { BTM_DUP, "duplicates" }, + { BTM_RECNO, "recno" }, + { BTM_RECNUM, "record-numbers" }, + { BTM_FIXEDLEN, "fixed-length" }, + { BTM_RENUMBER, "renumber" }, + { BTM_SUBDB, "multiple-databases" }, + { BTM_DUPSORT, "sorted duplicates" }, + { 0, NULL } + }; DB *dbp; - DB_LOCK lock; - DB_MPOOLFILE *mpf; - PAGE *h; - RINTERNAL *ri; - db_indx_t indx; - int already_put, ret, t_ret; + DB_BTREE_STAT *sp; + DB_ENV *dbenv; + int lorder, ret; + const char *s; dbp = dbc->dbp; - mpf = dbp->mpf; - already_put = 0; + dbenv = dbp->dbenv; - if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &root_pgno, 0, &h)) != 0) { - __LPUT(dbc, lock); + if ((ret = __bam_stat(dbc, &sp, 0)) != 0) return (ret); + + if (LF_ISSET(DB_STAT_ALL)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Default Btree/Recno database information:"); } - switch (TYPE(h)) { - case P_IBTREE: - for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { - bi = GET_BINTERNAL(dbp, h, indx); - if (B_TYPE(bi->type) == B_OVERFLOW && - (ret = __db_traverse_big(dbp, - ((BOVERFLOW *)bi->data)->pgno, - callback, cookie)) != 0) - goto err; - if ((ret = __bam_traverse( - dbc, mode, bi->pgno, callback, cookie)) != 0) - goto err; - } - break; - case P_IRECNO: - for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { - ri = GET_RINTERNAL(dbp, h, indx); - if ((ret = __bam_traverse( - dbc, mode, ri->pgno, callback, cookie)) != 0) - goto err; - } - break; - case P_LBTREE: - for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) { - bk = GET_BKEYDATA(dbp, h, indx); - if (B_TYPE(bk->type) == B_OVERFLOW && - (ret = __db_traverse_big(dbp, - GET_BOVERFLOW(dbp, h, indx)->pgno, - callback, cookie)) != 0) - goto err; - bk = GET_BKEYDATA(dbp, h, indx + O_INDX); - if (B_TYPE(bk->type) == B_DUPLICATE && - (ret = __bam_traverse(dbc, mode, - GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno, - callback, cookie)) != 0) - goto err; - if (B_TYPE(bk->type) == B_OVERFLOW && - (ret = __db_traverse_big(dbp, - GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno, - callback, cookie)) != 0) - goto err; - } + __db_msg(dbenv, "%lx\tBtree magic number", (u_long)sp->bt_magic); + __db_msg(dbenv, "%lu\tBtree version number", (u_long)sp->bt_version); + + (void)__db_get_lorder(dbp, &lorder); + switch (lorder) { + case 1234: + s = "Little-endian"; break; - case P_LDUP: - case P_LRECNO: - for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { - bk = GET_BKEYDATA(dbp, h, indx); - if (B_TYPE(bk->type) == B_OVERFLOW && - (ret = __db_traverse_big(dbp, - GET_BOVERFLOW(dbp, h, indx)->pgno, - callback, cookie)) != 0) - goto err; - } + case 4321: + s = "Big-endian"; break; default: - return (__db_pgfmt(dbp->dbenv, h->pgno)); + s = "Unrecognized byte order"; + break; } + __db_msg(dbenv, "%s\tByte order", s); + __db_prflags(dbenv, NULL, sp->bt_metaflags, fn, NULL, "\tFlags"); + if (dbp->type == DB_BTREE) { +#ifdef NOT_IMPLEMENTED + __db_dl(dbenv, "Maximum keys per-page", (u_long)sp->bt_maxkey); +#endif + __db_dl(dbenv, "Minimum keys per-page", (u_long)sp->bt_minkey); + } + if (dbp->type == DB_RECNO) { + __db_dl(dbenv, + "Fixed-length record size", (u_long)sp->bt_re_len); + __db_dl(dbenv, + "%#x\tFixed-length record pad", (u_int)sp->bt_re_pad); + } + __db_dl(dbenv, + "Underlying database page size", (u_long)sp->bt_pagesize); + __db_dl(dbenv, "Number of levels in the tree", (u_long)sp->bt_levels); + __db_dl(dbenv, dbp->type == DB_BTREE ? + "Number of unique keys in the tree" : + "Number of records in the tree", (u_long)sp->bt_nkeys); + __db_dl(dbenv, + "Number of data items in the tree", (u_long)sp->bt_ndata); + + __db_dl(dbenv, + "Number of tree internal pages", (u_long)sp->bt_int_pg); + __db_dl_pct(dbenv, + "Number of bytes free in tree internal pages", + (u_long)sp->bt_int_pgfree, + DB_PCT_PG(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize), "ff"); + + __db_dl(dbenv, + "Number of tree leaf pages", (u_long)sp->bt_leaf_pg); + __db_dl_pct(dbenv, "Number of bytes free in tree leaf pages", + (u_long)sp->bt_leaf_pgfree, DB_PCT_PG( + sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize), "ff"); + + __db_dl(dbenv, + "Number of tree duplicate pages", (u_long)sp->bt_dup_pg); + __db_dl_pct(dbenv, + "Number of bytes free in tree duplicate pages", + (u_long)sp->bt_dup_pgfree, + DB_PCT_PG(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize), "ff"); + + __db_dl(dbenv, + "Number of tree overflow pages", (u_long)sp->bt_over_pg); + __db_dl_pct(dbenv, "Number of bytes free in tree overflow pages", + (u_long)sp->bt_over_pgfree, DB_PCT_PG( + sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize), "ff"); + __db_dl(dbenv, "Number of empty pages", (u_long)sp->bt_empty_pg); + + __db_dl(dbenv, "Number of pages on the free list", (u_long)sp->bt_free); + + __os_ufree(dbenv, sp); - ret = callback(dbp, h, cookie, &already_put); - -err: if (!already_put && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret != 0) - ret = t_ret; - __LPUT(dbc, lock); - - return (ret); + return (0); } /* @@ -316,6 +330,9 @@ __bam_stat_callback(dbp, h, cookie, putp) sp->bt_int_pgfree += P_FREESPACE(dbp, h); break; case P_LBTREE: + if (top == 0) + ++sp->bt_empty_pg; + /* Correct for on-page duplicates and deleted items. */ for (indx = 0; indx < top; indx += P_INDX) { type = GET_BKEYDATA(dbp, h, indx + O_INDX)->type; @@ -337,24 +354,28 @@ __bam_stat_callback(dbp, h, cookie, putp) sp->bt_leaf_pgfree += P_FREESPACE(dbp, h); break; case P_LRECNO: + if (top == 0) + ++sp->bt_empty_pg; + /* * If walking a recno tree, then each of these items is a key. * Otherwise, we're walking an off-page duplicate set. */ if (dbp->type == DB_RECNO) { - sp->bt_nkeys += top; - /* - * Correct for deleted items in non-renumbering - * Recno databases. + * Correct for deleted items in non-renumbering Recno + * databases. */ - if (F_ISSET(dbp, DB_AM_RENUMBER)) + if (F_ISSET(dbp, DB_AM_RENUMBER)) { + sp->bt_nkeys += top; sp->bt_ndata += top; - else + } else for (indx = 0; indx < top; indx += O_INDX) { type = GET_BKEYDATA(dbp, h, indx)->type; - if (!B_DISSET(type)) + if (!B_DISSET(type)) { ++sp->bt_ndata; + ++sp->bt_nkeys; + } } ++sp->bt_leaf_pg; @@ -367,6 +388,9 @@ __bam_stat_callback(dbp, h, cookie, putp) } break; case P_LDUP: + if (top == 0) + ++sp->bt_empty_pg; + /* Correct for deleted items. */ for (indx = 0; indx < top; indx += O_INDX) if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type)) @@ -385,6 +409,60 @@ __bam_stat_callback(dbp, h, cookie, putp) return (0); } +/* + * __bam_print_cursor -- + * Display the current internal cursor. + * + * PUBLIC: void __bam_print_cursor __P((DBC *)); + */ +void +__bam_print_cursor(dbc) + DBC *dbc; +{ + static const FN fn[] = { + { C_DELETED, "C_DELETED" }, + { C_RECNUM, "C_RECNUM" }, + { C_RENUMBER, "C_RENUMBER" }, + { 0, NULL } + }; + DB_ENV *dbenv; + BTREE_CURSOR *cp; + + dbenv = dbc->dbp->dbenv; + cp = (BTREE_CURSOR *)dbc->internal; + + STAT_ULONG("Overflow size", cp->ovflsize); + if (dbc->dbtype == DB_RECNO) + STAT_ULONG("Recno", cp->recno); + STAT_ULONG("Order", cp->order); + __db_prflags(dbenv, NULL, cp->flags, fn, NULL, "\tInternal Flags"); +} + +#else /* !HAVE_STATISTICS */ + +int +__bam_stat(dbc, spp, flags) + DBC *dbc; + void *spp; + u_int32_t flags; +{ + COMPQUIET(spp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbc->dbp->dbenv)); +} + +int +__bam_stat_print(dbc, flags) + DBC *dbc; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbc->dbp->dbenv)); +} +#endif + /* * __bam_key_range -- * Return proportion of keys relative to given key. The numbers are @@ -455,3 +533,111 @@ __bam_key_range(dbc, dbt, kp, flags) return (0); } + +/* + * __bam_traverse -- + * Walk a Btree database. + * + * PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t, + * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *)); + */ +int +__bam_traverse(dbc, mode, root_pgno, callback, cookie) + DBC *dbc; + db_lockmode_t mode; + db_pgno_t root_pgno; + int (*callback)__P((DB *, PAGE *, void *, int *)); + void *cookie; +{ + BINTERNAL *bi; + BKEYDATA *bk; + DB *dbp; + DB_LOCK lock; + DB_MPOOLFILE *mpf; + PAGE *h; + RINTERNAL *ri; + db_indx_t indx, *inp; + int already_put, ret, t_ret; + + dbp = dbc->dbp; + mpf = dbp->mpf; + already_put = 0; + + if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0) + return (ret); + if ((ret = __memp_fget(mpf, &root_pgno, 0, &h)) != 0) { + (void)__TLPUT(dbc, lock); + return (ret); + } + + switch (TYPE(h)) { + case P_IBTREE: + for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { + bi = GET_BINTERNAL(dbp, h, indx); + if (B_TYPE(bi->type) == B_OVERFLOW && + (ret = __db_traverse_big(dbp, + ((BOVERFLOW *)bi->data)->pgno, + callback, cookie)) != 0) + goto err; + if ((ret = __bam_traverse( + dbc, mode, bi->pgno, callback, cookie)) != 0) + goto err; + } + break; + case P_IRECNO: + for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { + ri = GET_RINTERNAL(dbp, h, indx); + if ((ret = __bam_traverse( + dbc, mode, ri->pgno, callback, cookie)) != 0) + goto err; + } + break; + case P_LBTREE: + inp = P_INP(dbp, h); + for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) { + bk = GET_BKEYDATA(dbp, h, indx); + if (B_TYPE(bk->type) == B_OVERFLOW && + (indx + P_INDX >= NUM_ENT(h) || + inp[indx] != inp[indx + P_INDX])) { + if ((ret = __db_traverse_big(dbp, + GET_BOVERFLOW(dbp, h, indx)->pgno, + callback, cookie)) != 0) + goto err; + } + bk = GET_BKEYDATA(dbp, h, indx + O_INDX); + if (B_TYPE(bk->type) == B_DUPLICATE && + (ret = __bam_traverse(dbc, mode, + GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno, + callback, cookie)) != 0) + goto err; + if (B_TYPE(bk->type) == B_OVERFLOW && + (ret = __db_traverse_big(dbp, + GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno, + callback, cookie)) != 0) + goto err; + } + break; + case P_LDUP: + case P_LRECNO: + for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { + bk = GET_BKEYDATA(dbp, h, indx); + if (B_TYPE(bk->type) == B_OVERFLOW && + (ret = __db_traverse_big(dbp, + GET_BOVERFLOW(dbp, h, indx)->pgno, + callback, cookie)) != 0) + goto err; + } + break; + default: + return (__db_pgfmt(dbp->dbenv, h->pgno)); + } + + ret = callback(dbp, h, cookie, &already_put); + +err: if (!already_put && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + + return (ret); +} diff --git a/db/btree/bt_upgrade.c b/db/btree/bt_upgrade.c index 71ee84222..f89901789 100644 --- a/db/btree/bt_upgrade.c +++ b/db/btree/bt_upgrade.c @@ -1,15 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: bt_upgrade.c,v 11.30 2004/01/28 03:35:49 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_upgrade.c,v 11.29 2003/05/18 18:10:11 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/btree/bt_verify.c b/db/btree/bt_verify.c index cd8c57a4d..6b78cbd17 100644 --- a/db/btree/bt_verify.c +++ b/db/btree/bt_verify.c @@ -1,18 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. * - * $Id: bt_verify.c,v 1.87 2003/10/06 14:09:23 bostic Exp $ + * $Id: bt_verify.c,v 1.97 2004/10/11 18:47:46 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: bt_verify.c,v 1.87 2003/10/06 14:09:23 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -34,8 +30,6 @@ static int __bam_vrfy_treeorder __P((DB *, db_pgno_t, PAGE *, BINTERNAL *, static int __ram_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, db_indx_t *, u_int32_t)); -#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE) - /* * __bam_vrfy_meta -- * Verify the btree-specific part of a metadata page. @@ -185,6 +179,9 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags) err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) ret = t_ret; + if (LF_ISSET(DB_SALVAGE) && + (t_ret = __db_salvage_markdone(vdp, pgno)) != 0 && ret == 0) + ret = t_ret; return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); } @@ -216,9 +213,6 @@ __ram_vrfy_leaf(dbp, vdp, h, pgno, flags) if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) return (ret); - if ((ret = __db_fchk(dbenv, "__ram_vrfy_leaf", flags, OKFLAGS)) != 0) - goto err; - if (TYPE(h) != P_LRECNO) { /* We should not have been called. */ TYPE_ERR_PRINT(dbenv, "__ram_vrfy_leaf", pgno, TYPE(h)); @@ -510,6 +504,8 @@ err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); } +typedef enum { VRFY_ITEM_NOTSET=0, VRFY_ITEM_BEGIN, VRFY_ITEM_END } VRFY_ITEM; + /* * __bam_vrfy_inp -- * Verify that all entries in inp[] array are reasonable; @@ -528,11 +524,14 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) BOVERFLOW *bo; DB_ENV *dbenv; VRFY_CHILDINFO child; + VRFY_ITEM *pagelayout; VRFY_PAGEINFO *pip; - int isbad, initem, isdupitem, ret, t_ret; - u_int32_t himark, offset; /* These would be db_indx_ts but for algnmt.*/ + u_int32_t himark, offset; /* + * These would be db_indx_ts + * but for alignment. + */ u_int32_t i, endoff, nentries; - u_int8_t *pagelayout; + int isbad, initem, isdupitem, ret, t_ret; dbenv = dbp->dbenv; isbad = isdupitem = 0; @@ -573,9 +572,9 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) * it and the region immediately after it. */ himark = dbp->pgsize; - if ((ret = __os_malloc(dbenv, dbp->pgsize, &pagelayout)) != 0) + if ((ret = __os_calloc( + dbenv, dbp->pgsize, sizeof(pagelayout[0]), &pagelayout)) != 0) goto err; - memset(pagelayout, 0, dbp->pgsize); for (i = 0; i < NUM_ENT(h); i++) { switch (ret = __db_vrfy_inpitem(dbp, h, pgno, i, 1, flags, &himark, &offset)) { @@ -600,11 +599,9 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) * items have no overlaps or gaps. */ bk = GET_BKEYDATA(dbp, h, i); -#define ITEM_BEGIN 1 -#define ITEM_END 2 - if (pagelayout[offset] == 0) - pagelayout[offset] = ITEM_BEGIN; - else if (pagelayout[offset] == ITEM_BEGIN) { + if (pagelayout[offset] == VRFY_ITEM_NOTSET) + pagelayout[offset] = VRFY_ITEM_BEGIN; + else if (pagelayout[offset] == VRFY_ITEM_BEGIN) { /* * Having two inp entries that point at the same patch * of page is legal if and only if the page is @@ -676,12 +673,12 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) * If this is an onpage duplicate key we've seen before, * the end had better coincide too. */ - if (isdupitem && pagelayout[endoff] != ITEM_END) { + if (isdupitem && pagelayout[endoff] != VRFY_ITEM_END) { EPRINT((dbenv, "Page %lu: duplicated item %lu", (u_long)pgno, (u_long)i)); isbad = 1; - } else if (pagelayout[endoff] == 0) - pagelayout[endoff] = ITEM_END; + } else if (pagelayout[endoff] == VRFY_ITEM_NOTSET) + pagelayout[endoff] = VRFY_ITEM_END; isdupitem = 0; /* @@ -771,9 +768,9 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) for (i = himark; i < dbp->pgsize; i++) if (initem == 0) switch (pagelayout[i]) { - case 0: + case VRFY_ITEM_NOTSET: /* May be just for alignment. */ - if (i != ALIGN(i, sizeof(u_int32_t))) + if (i != DB_ALIGN(i, sizeof(u_int32_t))) continue; isbad = 1; @@ -781,13 +778,13 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) "Page %lu: gap between items at offset %lu", (u_long)pgno, (u_long)i)); /* Find the end of the gap */ - for ( ; pagelayout[i + 1] == 0 && + for (; pagelayout[i + 1] == VRFY_ITEM_NOTSET && (size_t)(i + 1) < dbp->pgsize; i++) ; break; - case ITEM_BEGIN: + case VRFY_ITEM_BEGIN: /* We've found an item. Check its alignment. */ - if (i != ALIGN(i, sizeof(u_int32_t))) { + if (i != DB_ALIGN(i, sizeof(u_int32_t))) { isbad = 1; EPRINT((dbenv, "Page %lu: offset %lu unaligned", @@ -796,7 +793,7 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) initem = 1; nentries++; break; - case ITEM_END: + case VRFY_ITEM_END: /* * We've hit the end of an item even though * we don't think we're in one; must @@ -807,22 +804,17 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) "Page %lu: overlapping items at offset %lu", (u_long)pgno, (u_long)i)); break; - default: - /* Should be impossible. */ - DB_ASSERT(0); - ret = EINVAL; - goto err; } else switch (pagelayout[i]) { - case 0: + case VRFY_ITEM_NOTSET: /* In the middle of an item somewhere. Okay. */ break; - case ITEM_END: + case VRFY_ITEM_END: /* End of an item; switch to out-of-item mode.*/ initem = 0; break; - case ITEM_BEGIN: + case VRFY_ITEM_BEGIN: /* * Hit a second item beginning without an * end. Overlap. @@ -1401,11 +1393,11 @@ __bam_vrfy_subtree(dbp, vdp, pgno, l, r, flags, levelp, nrecsp, relenp) * page's next_pgno, and our prev_pgno. */ if (pip->type != vdp->leaf_type) { + isbad = 1; EPRINT((dbenv, "Page %lu: unexpected page type %lu found in leaf chain (expected %lu)", (u_long)pip->pgno, (u_long)pip->type, (u_long)vdp->leaf_type)); - isbad = 1; } /* @@ -1414,20 +1406,20 @@ __bam_vrfy_subtree(dbp, vdp, pgno, l, r, flags, levelp, nrecsp, relenp) */ if (!F_ISSET(vdp, VRFY_LEAFCHAIN_BROKEN)) { if (pip->pgno != vdp->next_pgno) { + isbad = 1; EPRINT((dbenv, "Page %lu: incorrect next_pgno %lu found in leaf chain (should be %lu)", (u_long)vdp->prev_pgno, (u_long)vdp->next_pgno, (u_long)pip->pgno)); - isbad = 1; } if (pip->prev_pgno != vdp->prev_pgno) { -bad_prev: EPRINT((dbenv, - "Page %lu: incorrect prev_pgno %lu found in leaf chain (should be %lu)", +bad_prev: isbad = 1; + EPRINT((dbenv, + "Page %lu: incorrect prev_pgno %lu found in leaf chain (should be %lu)", (u_long)pip->pgno, (u_long)pip->prev_pgno, (u_long)vdp->prev_pgno)); - isbad = 1; } } } @@ -1519,11 +1511,11 @@ bad_prev: EPRINT((dbenv, dbp, vdp, child->pgno, NULL, NULL, stflags | ST_TOPLEVEL, NULL, NULL, NULL)) != 0) { - if (ret != + if (ret == DB_VERIFY_BAD) - goto err; - else isbad = 1; + else + goto err; } } } @@ -1538,10 +1530,10 @@ bad_prev: EPRINT((dbenv, */ if (F_ISSET(pip, VRFY_DUPS_UNSORTED) && LF_ISSET(ST_DUPSORT)) { + isbad = 1; EPRINT((dbenv, "Page %lu: unsorted duplicate set in sorted-dup database", (u_long)pgno)); - isbad = 1; } } } @@ -1602,10 +1594,10 @@ bad_prev: EPRINT((dbenv, if ((ret = __bam_vrfy_subtree(dbp, vdp, child->pgno, NULL, NULL, flags, &child_level, &child_nrecs, &child_relen)) != 0) { - if (ret != DB_VERIFY_BAD) - goto done; - else + if (ret == DB_VERIFY_BAD) isbad = 1; + else + goto done; } if (LF_ISSET(ST_RELEN)) { @@ -1662,10 +1654,10 @@ bad_prev: EPRINT((dbenv, * shouldn't happen. */ if (child->refcnt > 2) { + isbad = 1; EPRINT((dbenv, "Page %lu: overflow page %lu referenced more than twice from internal page", (u_long)pgno, (u_long)child->pgno)); - isbad = 1; } else for (j = 0; j < child->refcnt; j++) if ((ret = __db_vrfy_ovfl_structure(dbp, @@ -1701,7 +1693,7 @@ bad_prev: EPRINT((dbenv, for (i = 0; i < pip->entries; i += O_INDX) { li = GET_BINTERNAL(dbp, h, i); ri = (i + O_INDX < pip->entries) ? - GET_BINTERNAL(dbp, h, i + O_INDX) : NULL; + GET_BINTERNAL(dbp, h, i + O_INDX) : rp; /* * The leftmost key is forcibly sorted less than all entries, @@ -1710,10 +1702,10 @@ bad_prev: EPRINT((dbenv, if ((ret = __bam_vrfy_subtree(dbp, vdp, li->pgno, i == 0 ? NULL : li, ri, flags, &child_level, &child_nrecs, NULL)) != 0) { - if (ret != DB_VERIFY_BAD) - goto done; - else + if (ret == DB_VERIFY_BAD) isbad = 1; + else + goto done; } if (LF_ISSET(ST_RECNUM)) { @@ -1792,10 +1784,10 @@ done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) { goto err; if (NUM_ENT(h) == 0 && ISINTERNAL(h)) { + isbad = 1; EPRINT((dbenv, "Page %lu: internal page is empty and should not be", (u_long)pgno)); - isbad = 1; goto err; } } @@ -1862,9 +1854,9 @@ done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) { * PGNO_INVALID. */ if (vdp->next_pgno != PGNO_INVALID) { + isbad = 1; EPRINT((dbenv, "Page %lu: unterminated leaf chain", (u_long)vdp->prev_pgno)); - isbad = 1; } err: if (toplevel) { @@ -1968,7 +1960,7 @@ __bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags) return (EINVAL); } - /* On error, fall through, free if neeeded, and return. */ + /* On error, fall through, free if needed, and return. */ if ((ret = __bam_cmp(dbp, &dbt, h, 0, func, &cmp)) == 0) { if (cmp > 0) { EPRINT((dbenv, @@ -2004,7 +1996,7 @@ __bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags) return (EINVAL); } - /* On error, fall through, free if neeeded, and return. */ + /* On error, fall through, free if needed, and return. */ if ((ret = __bam_cmp(dbp, &dbt, h, last, func, &cmp)) == 0) { if (cmp < 0) { EPRINT((dbenv, @@ -2049,9 +2041,9 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) DB_ENV *dbenv; BKEYDATA *bk; BOVERFLOW *bo; + VRFY_ITEM *pgmap; db_indx_t i, beg, end, *inp; u_int32_t himark; - u_int8_t *pgmap; void *ovflbuf; int t_ret, ret, err_ret; @@ -2078,12 +2070,9 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0) return (ret); - if (LF_ISSET(DB_AGGRESSIVE)) { - if ((ret = - __os_malloc(dbenv, dbp->pgsize, &pgmap)) != 0) - goto err; - memset(pgmap, 0, dbp->pgsize); - } + if (LF_ISSET(DB_AGGRESSIVE) && (ret = + __os_calloc(dbenv, dbp->pgsize, sizeof(pgmap[0]), &pgmap)) != 0) + goto err; /* * Loop through the inp array, spitting out key/data pairs. @@ -2135,7 +2124,7 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) */ if (key != NULL && (i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY))) - if ((ret = __db_prdbt(key, + if ((ret = __db_vrfy_prdbt(key, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; @@ -2166,7 +2155,8 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) if (!IS_VALID_PGNO(bo->pgno) || (i % P_INDX == 0)) { /* Not much to do on failure. */ - if ((ret = __db_prdbt(&unkdbt, 0, " ", + if ((ret = + __db_vrfy_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; @@ -2179,11 +2169,11 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) break; case B_KEYDATA: - end = - ALIGN(beg + bk->len, sizeof(u_int32_t)) - 1; + end = (db_indx_t)DB_ALIGN( + beg + bk->len, sizeof(u_int32_t)) - 1; dbt.data = bk->data; dbt.size = bk->len; - if ((ret = __db_prdbt(&dbt, + if ((ret = __db_vrfy_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; @@ -2194,11 +2184,11 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) bo->pgno, &dbt, &ovflbuf, flags)) != 0) { err_ret = ret; /* We care about err_ret more. */ - (void)__db_prdbt(&unkdbt, 0, " ", + (void)__db_vrfy_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp); break; } - if ((ret = __db_prdbt(&dbt, + if ((ret = __db_vrfy_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; @@ -2219,8 +2209,8 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) * any bogus inp elements and thereby missed stuff. */ if (LF_ISSET(DB_AGGRESSIVE)) { - pgmap[beg] = ITEM_BEGIN; - pgmap[end] = ITEM_END; + pgmap[beg] = VRFY_ITEM_BEGIN; + pgmap[end] = VRFY_ITEM_END; } } } @@ -2230,7 +2220,7 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) * a datum; fix this imbalance by printing an "UNKNOWN". */ if (pgtype == P_LBTREE && (i % P_INDX == 1) && ((ret = - __db_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0)) + __db_vrfy_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0)) err_ret = ret; err: if (pgmap != NULL) diff --git a/db/btree/btree.src b/db/btree/btree.src index 85faff67f..c4f761de0 100644 --- a/db/btree/btree.src +++ b/db/btree/btree.src @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: btree.src,v 10.39 2003/11/14 05:32:34 ubell Exp $ + * $Id: btree.src,v 10.42 2004/06/17 17:35:12 bostic Exp $ */ PREFIX __bam DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE @@ -29,10 +27,6 @@ INCLUDE #include "dbinc/log.h" INCLUDE #include "dbinc/txn.h" INCLUDE -/* - * NOTE: pg_alloc and pg_free have been moved to db.src, where they belong. - */ - /* * BTREE-split: used to log a page split. * @@ -208,3 +202,24 @@ ARG recno db_recno_t ld /* Order number of the adjustment. */ ARG order u_int32_t ld END + +/* + * BTREE-relink -- Handles relinking around a deleted leaf page. + * + */ +BEGIN relink 147 +/* Fileid of db affected. */ +DB fileid int32_t ld +/* The page being changed. */ +ARG pgno db_pgno_t lu +/* The page's original lsn. */ +POINTER lsn DB_LSN * lu +/* The previous page. */ +ARG prev db_pgno_t lu +/* The previous page's original lsn. */ +POINTER lsn_prev DB_LSN * lu +/* The next page. */ +ARG next db_pgno_t lu +/* The previous page's original lsn. */ +POINTER lsn_next DB_LSN * lu +END diff --git a/db/btree/btree_auto.c b/db/btree/btree_auto.c index 16ebbcad9..9556e5fee 100644 --- a/db/btree/btree_auto.c +++ b/db/btree/btree_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -43,33 +44,42 @@ __bam_split_log(dbp, txnid, ret_lsnp, flags, left, llsn, right, rlsn, indx, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_split; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -92,27 +102,23 @@ __bam_split_log(dbp, txnid, ret_lsnp, flags, left, llsn, right, rlsn, indx, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -192,141 +198,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_split_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_split_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_split_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_split_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_split_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_split_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_split%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tleft: %lu\n", (u_long)argp->left); - (void)printf("\tllsn: [%lu][%lu]\n", - (u_long)argp->llsn.file, (u_long)argp->llsn.offset); - (void)printf("\tright: %lu\n", (u_long)argp->right); - (void)printf("\trlsn: [%lu][%lu]\n", - (u_long)argp->rlsn.file, (u_long)argp->rlsn.offset); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\tnpgno: %lu\n", (u_long)argp->npgno); - (void)printf("\tnlsn: [%lu][%lu]\n", - (u_long)argp->nlsn.file, (u_long)argp->nlsn.offset); - (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno); - (void)printf("\tpg: "); - for (i = 0; i < argp->pg.size; i++) { - ch = ((u_int8_t *)argp->pg.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\topflags: %lu\n", (u_long)argp->opflags); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_split_read __P((DB_ENV *, void *, __bam_split_args **)); */ @@ -344,9 +256,9 @@ __bam_split_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_split_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -425,33 +337,42 @@ __bam_rsplit_log(dbp, txnid, ret_lsnp, flags, pgno, pgdbt, root_pgno, nrec, root DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_rsplit; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -470,27 +391,23 @@ __bam_rsplit_log(dbp, txnid, ret_lsnp, flags, pgno, pgdbt, root_pgno, nrec, root logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -557,140 +474,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_rsplit_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_rsplit_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_rsplit_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_rsplit_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_rsplit_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_rsplit_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_rsplit%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tpgdbt: "); - for (i = 0; i < argp->pgdbt.size; i++) { - ch = ((u_int8_t *)argp->pgdbt.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno); - (void)printf("\tnrec: %lu\n", (u_long)argp->nrec); - (void)printf("\trootent: "); - for (i = 0; i < argp->rootent.size; i++) { - ch = ((u_int8_t *)argp->rootent.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\trootlsn: [%lu][%lu]\n", - (u_long)argp->rootlsn.file, (u_long)argp->rootlsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_rsplit_read __P((DB_ENV *, void *, __bam_rsplit_args **)); */ @@ -708,9 +532,9 @@ __bam_rsplit_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_rsplit_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -775,33 +599,42 @@ __bam_adj_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, indx_copy, is_insert DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_adj; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -819,27 +652,23 @@ __bam_adj_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, indx_copy, is_insert logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -888,127 +717,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_adj_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_adj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_adj_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_adj_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_adj_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_adj_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_adj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\tindx_copy: %lu\n", (u_long)argp->indx_copy); - (void)printf("\tis_insert: %lu\n", (u_long)argp->is_insert); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_adj_read __P((DB_ENV *, void *, __bam_adj_args **)); */ @@ -1026,9 +775,9 @@ __bam_adj_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_adj_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1084,33 +833,42 @@ __bam_cadjust_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, adjust, opflags) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_cadjust; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1128,27 +886,23 @@ __bam_cadjust_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, adjust, opflags) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1197,150 +951,70 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_cadjust_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION /* - * PUBLIC: int __bam_cadjust_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); + * PUBLIC: int __bam_cadjust_read __P((DB_ENV *, void *, + * PUBLIC: __bam_cadjust_args **)); */ int -__bam_cadjust_getpgnos(dbenv, rec, lsnp, notused1, summary) +__bam_cadjust_read(dbenv, recbuf, argpp) DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; + void *recbuf; + __bam_cadjust_args **argpp; { - TXN_RECS *t; + __bam_cadjust_args *argp; + u_int32_t uinttmp; + u_int8_t *bp; int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) + if ((ret = __os_malloc(dbenv, + sizeof(__bam_cadjust_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; + argp->txnid = (DB_TXN *)&argp[1]; - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_cadjust_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_cadjust_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_cadjust_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_cadjust%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\tadjust: %ld\n", (long)argp->adjust); - (void)printf("\topflags: %lu\n", (u_long)argp->opflags); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - -/* - * PUBLIC: int __bam_cadjust_read __P((DB_ENV *, void *, - * PUBLIC: __bam_cadjust_args **)); - */ -int -__bam_cadjust_read(dbenv, recbuf, argpp) - DB_ENV *dbenv; - void *recbuf; - __bam_cadjust_args **argpp; -{ - __bam_cadjust_args *argp; - u_int32_t uinttmp; - u_int8_t *bp; - int ret; - - if ((ret = __os_malloc(dbenv, - sizeof(__bam_cadjust_args) + sizeof(DB_TXN), &argp)) != 0) - return (ret); - argp->txnid = (DB_TXN *)&argp[1]; - - bp = recbuf; - memcpy(&argp->type, bp, sizeof(argp->type)); - bp += sizeof(argp->type); + memcpy(&argp->type, bp, sizeof(argp->type)); + bp += sizeof(argp->type); memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid)); bp += sizeof(argp->txnid->txnid); @@ -1392,33 +1066,42 @@ __bam_cdel_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_cdel; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1434,27 +1117,23 @@ __bam_cdel_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1495,125 +1174,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_cdel_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_cdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_cdel_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_cdel_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_cdel_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_cdel_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_cdel%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_cdel_read __P((DB_ENV *, void *, __bam_cdel_args **)); */ @@ -1631,9 +1232,9 @@ __bam_cdel_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_cdel_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1686,33 +1287,42 @@ __bam_repl_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, isdeleted, orig, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_repl; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1733,27 +1343,23 @@ __bam_repl_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, isdeleted, orig, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1828,142 +1434,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_repl_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_repl_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_repl_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_repl_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_repl_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_repl_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_repl%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\tisdeleted: %lu\n", (u_long)argp->isdeleted); - (void)printf("\torig: "); - for (i = 0; i < argp->orig.size; i++) { - ch = ((u_int8_t *)argp->orig.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\trepl: "); - for (i = 0; i < argp->repl.size; i++) { - ch = ((u_int8_t *)argp->repl.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tprefix: %lu\n", (u_long)argp->prefix); - (void)printf("\tsuffix: %lu\n", (u_long)argp->suffix); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_repl_read __P((DB_ENV *, void *, __bam_repl_args **)); */ @@ -1981,9 +1492,9 @@ __bam_repl_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_repl_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2053,33 +1564,42 @@ __bam_root_log(dbp, txnid, ret_lsnp, flags, meta_pgno, root_pgno, meta_lsn) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_root; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2095,27 +1615,23 @@ __bam_root_log(dbp, txnid, ret_lsnp, flags, meta_pgno, root_pgno, meta_lsn) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2156,125 +1672,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_root_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_root_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_root_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_root_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_root_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_root_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_root%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno); - (void)printf("\tmeta_lsn: [%lu][%lu]\n", - (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_root_read __P((DB_ENV *, void *, __bam_root_args **)); */ @@ -2292,9 +1730,9 @@ __bam_root_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_root_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2346,33 +1784,42 @@ __bam_curadj_log(dbp, txnid, ret_lsnp, flags, mode, from_pgno, to_pgno, left_pgn DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_curadj; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2392,27 +1839,23 @@ __bam_curadj_log(dbp, txnid, ret_lsnp, flags, mode, from_pgno, to_pgno, left_pgn logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { + __os_free(dbenv, lr); + return (ret); + } #else - logrec.data = &lr->data; + logrec.data = lr->data; #endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif - if ((ret = - __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif - return (ret); - } } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2467,128 +1910,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_curadj_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_curadj_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_curadj_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_curadj_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_curadj_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_curadj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tmode: %ld\n", (long)argp->mode); - (void)printf("\tfrom_pgno: %lu\n", (u_long)argp->from_pgno); - (void)printf("\tto_pgno: %lu\n", (u_long)argp->to_pgno); - (void)printf("\tleft_pgno: %lu\n", (u_long)argp->left_pgno); - (void)printf("\tfirst_indx: %lu\n", (u_long)argp->first_indx); - (void)printf("\tfrom_indx: %lu\n", (u_long)argp->from_indx); - (void)printf("\tto_indx: %lu\n", (u_long)argp->to_indx); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_curadj_read __P((DB_ENV *, void *, __bam_curadj_args **)); */ @@ -2606,9 +1968,9 @@ __bam_curadj_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_curadj_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2672,33 +2034,42 @@ __bam_rcuradj_log(dbp, txnid, ret_lsnp, flags, mode, root, recno, order) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___bam_rcuradj; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2715,27 +2086,23 @@ __bam_rcuradj_log(dbp, txnid, ret_lsnp, flags, mode, root, recno, order) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2778,125 +2145,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__bam_rcuradj_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __bam_rcuradj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_rcuradj_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__bam_rcuradj_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __bam_rcuradj_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __bam_rcuradj_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__bam_rcuradj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tmode: %ld\n", (long)argp->mode); - (void)printf("\troot: %ld\n", (long)argp->root); - (void)printf("\trecno: %ld\n", (long)argp->recno); - (void)printf("\torder: %ld\n", (long)argp->order); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __bam_rcuradj_read __P((DB_ENV *, void *, * PUBLIC: __bam_rcuradj_args **)); @@ -2915,9 +2204,9 @@ __bam_rcuradj_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__bam_rcuradj_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2952,90 +2241,252 @@ __bam_rcuradj_read(dbenv, recbuf, argpp) } /* - * PUBLIC: int __bam_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + * PUBLIC: int __bam_relink_log __P((DB *, DB_TXN *, DB_LSN *, + * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, + * PUBLIC: DB_LSN *)); */ int -__bam_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; +__bam_relink_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, prev, lsn_prev, next, + lsn_next) + DB *dbp; + DB_TXN *txnid; + DB_LSN *ret_lsnp; + u_int32_t flags; + db_pgno_t pgno; + DB_LSN * lsn; + db_pgno_t prev; + DB_LSN * lsn_prev; + db_pgno_t next; + DB_LSN * lsn_next; { - int ret; + DBT logrec; + DB_ENV *dbenv; + DB_TXNLOGREC *lr; + DB_LSN *lsnp, null_lsn, *rlsnp; + u_int32_t uinttmp, rectype, txn_num; + u_int npad; + u_int8_t *bp; + int is_durable, ret; - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_split_print, DB___bam_split)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_rsplit_print, DB___bam_rsplit)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_adj_print, DB___bam_adj)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_cadjust_print, DB___bam_cadjust)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_cdel_print, DB___bam_cdel)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_repl_print, DB___bam_repl)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_root_print, DB___bam_root)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_curadj_print, DB___bam_curadj)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_rcuradj_print, DB___bam_rcuradj)) != 0) + dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + + rectype = DB___bam_relink; + npad = 0; + rlsnp = ret_lsnp; + + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE) || + F_ISSET(dbp, DB_AM_NOT_DURABLE)) { + is_durable = 0; + } else + is_durable = 1; + + if (txnid == NULL) { + txn_num = 0; + lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; + } else { + if (TAILQ_FIRST(&txnid->kids) != NULL && + (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) + return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); + txn_num = txnid->txnid; + lsnp = &txnid->last_lsn; + } + + logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN) + + sizeof(u_int32_t) + + sizeof(u_int32_t) + + sizeof(*lsn) + + sizeof(u_int32_t) + + sizeof(*lsn_prev) + + sizeof(u_int32_t) + + sizeof(*lsn_next); + if (CRYPTO_ON(dbenv)) { + npad = + ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size); + logrec.size += npad; + } + + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { + if ((ret = __os_malloc(dbenv, + logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) + return (ret); +#ifdef DIAGNOSTIC + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { + __os_free(dbenv, lr); + return (ret); + } +#else + logrec.data = lr->data; +#endif + } + if (npad > 0) + memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); + + bp = logrec.data; + + memcpy(bp, &rectype, sizeof(rectype)); + bp += sizeof(rectype); + + memcpy(bp, &txn_num, sizeof(txn_num)); + bp += sizeof(txn_num); + + memcpy(bp, lsnp, sizeof(DB_LSN)); + bp += sizeof(DB_LSN); + + DB_ASSERT(dbp->log_filename != NULL); + if (dbp->log_filename->id == DB_LOGFILEID_INVALID && + (ret = __dbreg_lazy_id(dbp)) != 0) return (ret); - return (0); + + uinttmp = (u_int32_t)dbp->log_filename->id; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + if (lsn != NULL) + memcpy(bp, lsn, sizeof(*lsn)); + else + memset(bp, 0, sizeof(*lsn)); + bp += sizeof(*lsn); + + uinttmp = (u_int32_t)prev; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + if (lsn_prev != NULL) + memcpy(bp, lsn_prev, sizeof(*lsn_prev)); + else + memset(bp, 0, sizeof(*lsn_prev)); + bp += sizeof(*lsn_prev); + + uinttmp = (u_int32_t)next; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + if (lsn_next != NULL) + memcpy(bp, lsn_next, sizeof(*lsn_next)); + else + memset(bp, 0, sizeof(*lsn_next)); + bp += sizeof(*lsn_next); + + DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { +#ifdef DIAGNOSTIC + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. + */ + memcpy(lr->data, logrec.data, logrec.size); + rectype |= DB_debug_FLAG; + memcpy(logrec.data, &rectype, sizeof(rectype)); + + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else + ret = 0; +#endif + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); + } + +#ifdef LOG_DIAGNOSTIC + if (ret != 0) + (void)__bam_relink_print(dbenv, + (DBT *)&logrec, ret_lsnp, NULL, NULL); +#endif + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else + if (is_durable || txnid == NULL) + __os_free(dbenv, logrec.data); +#endif + return (ret); } -#ifdef HAVE_REPLICATION /* - * PUBLIC: int __bam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + * PUBLIC: int __bam_relink_read __P((DB_ENV *, void *, __bam_relink_args **)); */ int -__bam_init_getpgnos(dbenv, dtabp, dtabsizep) +__bam_relink_read(dbenv, recbuf, argpp) DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; + void *recbuf; + __bam_relink_args **argpp; { + __bam_relink_args *argp; + u_int32_t uinttmp; + u_int8_t *bp; int ret; - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_split_getpgnos, DB___bam_split)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_rsplit_getpgnos, DB___bam_rsplit)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_adj_getpgnos, DB___bam_adj)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_cadjust_getpgnos, DB___bam_cadjust)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_cdel_getpgnos, DB___bam_cdel)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_repl_getpgnos, DB___bam_repl)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_root_getpgnos, DB___bam_root)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_curadj_getpgnos, DB___bam_curadj)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __bam_rcuradj_getpgnos, DB___bam_rcuradj)) != 0) + if ((ret = __os_malloc(dbenv, + sizeof(__bam_relink_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; + argp->txnid = (DB_TXN *)&argp[1]; + + memcpy(&argp->type, bp, sizeof(argp->type)); + bp += sizeof(argp->type); + + memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid)); + bp += sizeof(argp->txnid->txnid); + + memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN)); + bp += sizeof(DB_LSN); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->fileid = (int32_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&argp->lsn, bp, sizeof(argp->lsn)); + bp += sizeof(argp->lsn); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->prev = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&argp->lsn_prev, bp, sizeof(argp->lsn_prev)); + bp += sizeof(argp->lsn_prev); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->next = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&argp->lsn_next, bp, sizeof(argp->lsn_next)); + bp += sizeof(argp->lsn_next); + + *argpp = argp; return (0); } -#endif /* HAVE_REPLICATION */ /* * PUBLIC: int __bam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, @@ -3076,5 +2527,8 @@ __bam_init_recover(dbenv, dtabp, dtabsizep) if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, __bam_rcuradj_recover, DB___bam_rcuradj)) != 0) return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_relink_recover, DB___bam_relink)) != 0) + return (ret); return (0); } diff --git a/db/btree/btree_autop.c b/db/btree/btree_autop.c new file mode 100644 index 000000000..1db8c3be6 --- /dev/null +++ b/db/btree/btree_autop.c @@ -0,0 +1,514 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/btree.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_split_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_split_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_split_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_split%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tleft: %lu\n", (u_long)argp->left); + (void)printf("\tllsn: [%lu][%lu]\n", + (u_long)argp->llsn.file, (u_long)argp->llsn.offset); + (void)printf("\tright: %lu\n", (u_long)argp->right); + (void)printf("\trlsn: [%lu][%lu]\n", + (u_long)argp->rlsn.file, (u_long)argp->rlsn.offset); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\tnpgno: %lu\n", (u_long)argp->npgno); + (void)printf("\tnlsn: [%lu][%lu]\n", + (u_long)argp->nlsn.file, (u_long)argp->nlsn.offset); + (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno); + (void)printf("\tpg: "); + for (i = 0; i < argp->pg.size; i++) { + ch = ((u_int8_t *)argp->pg.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\topflags: %lu\n", (u_long)argp->opflags); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_rsplit_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_rsplit_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_rsplit_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_rsplit%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tpgdbt: "); + for (i = 0; i < argp->pgdbt.size; i++) { + ch = ((u_int8_t *)argp->pgdbt.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno); + (void)printf("\tnrec: %lu\n", (u_long)argp->nrec); + (void)printf("\trootent: "); + for (i = 0; i < argp->rootent.size; i++) { + ch = ((u_int8_t *)argp->rootent.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\trootlsn: [%lu][%lu]\n", + (u_long)argp->rootlsn.file, (u_long)argp->rootlsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_adj_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_adj_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_adj_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_adj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\tindx_copy: %lu\n", (u_long)argp->indx_copy); + (void)printf("\tis_insert: %lu\n", (u_long)argp->is_insert); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_cadjust_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_cadjust_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_cadjust_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_cadjust%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\tadjust: %ld\n", (long)argp->adjust); + (void)printf("\topflags: %lu\n", (u_long)argp->opflags); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_cdel_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_cdel_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_cdel_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_cdel%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_repl_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_repl_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_repl_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_repl%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\tisdeleted: %lu\n", (u_long)argp->isdeleted); + (void)printf("\torig: "); + for (i = 0; i < argp->orig.size; i++) { + ch = ((u_int8_t *)argp->orig.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\trepl: "); + for (i = 0; i < argp->repl.size; i++) { + ch = ((u_int8_t *)argp->repl.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tprefix: %lu\n", (u_long)argp->prefix); + (void)printf("\tsuffix: %lu\n", (u_long)argp->suffix); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_root_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_root_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_root_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_root%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno); + (void)printf("\tmeta_lsn: [%lu][%lu]\n", + (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_curadj_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_curadj_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_curadj_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_curadj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tmode: %ld\n", (long)argp->mode); + (void)printf("\tfrom_pgno: %lu\n", (u_long)argp->from_pgno); + (void)printf("\tto_pgno: %lu\n", (u_long)argp->to_pgno); + (void)printf("\tleft_pgno: %lu\n", (u_long)argp->left_pgno); + (void)printf("\tfirst_indx: %lu\n", (u_long)argp->first_indx); + (void)printf("\tfrom_indx: %lu\n", (u_long)argp->from_indx); + (void)printf("\tto_indx: %lu\n", (u_long)argp->to_indx); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_rcuradj_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_rcuradj_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_rcuradj_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_rcuradj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tmode: %ld\n", (long)argp->mode); + (void)printf("\troot: %ld\n", (long)argp->root); + (void)printf("\trecno: %ld\n", (long)argp->recno); + (void)printf("\torder: %ld\n", (long)argp->order); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_relink_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__bam_relink_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __bam_relink_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __bam_relink_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__bam_relink%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tprev: %lu\n", (u_long)argp->prev); + (void)printf("\tlsn_prev: [%lu][%lu]\n", + (u_long)argp->lsn_prev.file, (u_long)argp->lsn_prev.offset); + (void)printf("\tnext: %lu\n", (u_long)argp->next); + (void)printf("\tlsn_next: [%lu][%lu]\n", + (u_long)argp->lsn_next.file, (u_long)argp->lsn_next.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __bam_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__bam_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_split_print, DB___bam_split)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_rsplit_print, DB___bam_rsplit)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_adj_print, DB___bam_adj)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_cadjust_print, DB___bam_cadjust)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_cdel_print, DB___bam_cdel)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_repl_print, DB___bam_repl)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_root_print, DB___bam_root)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_curadj_print, DB___bam_curadj)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_rcuradj_print, DB___bam_rcuradj)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __bam_relink_print, DB___bam_relink)) != 0) + return (ret); + return (0); +} diff --git a/db/build_vxworks/BerkeleyDB20.wpj b/db/build_vxworks/BerkeleyDB20.wpj index 795e8db7a..428fb6208 100755 --- a/db/build_vxworks/BerkeleyDB20.wpj +++ b/db/build_vxworks/BerkeleyDB20.wpj @@ -645,24 +645,6 @@ strdup.o C/C++ compiler - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependDone -TRUE - - - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependencies -$(PRJ_DIR)/db_config.h \ - $(PRJ_DIR)/db_int.h \ - $(PRJ_DIR)/db.h - - - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_objects -vsnprintf.o - - - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_tool -C/C++ compiler - - FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependDone TRUE @@ -1311,6 +1293,60 @@ db_ret.o C/C++ compiler + FILE_$(PRJ_DIR)/../db/db_setid.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setid.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setid.c_objects +db_setid.o + + + FILE_$(PRJ_DIR)/../db/db_setid.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_objects +db_setlsn.o + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_stati.c_objects +db_stati.o + + + FILE_$(PRJ_DIR)/../db/db_stati.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../db/db_truncate.c_dependDone TRUE @@ -1455,6 +1491,24 @@ dbreg_rec.o C/C++ compiler + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_objects +dbreg_stat.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependDone TRUE @@ -1599,6 +1653,24 @@ env_region.o C/C++ compiler + FILE_$(PRJ_DIR)/../env/env_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_stat.c_objects +env_stat.o + + + FILE_$(PRJ_DIR)/../env/env_stat.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependDone TRUE @@ -2013,6 +2085,42 @@ lock_deadlock.o C/C++ compiler + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_objects +lock_id.o + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_objects +lock_list.o + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../lock/lock_method.c_dependDone TRUE @@ -2067,6 +2175,24 @@ lock_stat.o C/C++ compiler + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_objects +lock_timer.o + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../lock/lock_util.c_dependDone TRUE @@ -2193,6 +2319,24 @@ log_put.o C/C++ compiler + FILE_$(PRJ_DIR)/../log/log_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_stat.c_objects +log_stat.o + + + FILE_$(PRJ_DIR)/../log/log_stat.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependDone TRUE @@ -2247,6 +2391,24 @@ mp_fget.o C/C++ compiler + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_objects +mp_fmethod.o + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependDone TRUE @@ -2823,6 +2985,24 @@ os_tmpdir.o C/C++ compiler + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_objects +os_truncate.o + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../os/os_unlink.c_dependDone TRUE @@ -3075,165 +3255,129 @@ qam_verify.o C/C++ compiler - FILE_$(PRJ_DIR)/../rep/rep_method.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_auto.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rep/rep_method.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_auto.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rep/rep_method.c_objects -rep_method.o + FILE_$(PRJ_DIR)/../rep/rep_auto.c_objects +rep_auto.o - FILE_$(PRJ_DIR)/../rep/rep_method.c_tool + FILE_$(PRJ_DIR)/../rep/rep_auto.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rep/rep_record.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_backup.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rep/rep_record.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_backup.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rep/rep_record.c_objects -rep_record.o + FILE_$(PRJ_DIR)/../rep/rep_backup.c_objects +rep_backup.o - FILE_$(PRJ_DIR)/../rep/rep_record.c_tool + FILE_$(PRJ_DIR)/../rep/rep_backup.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rep/rep_region.c_dependDone -TRUE - - - FILE_$(PRJ_DIR)/../rep/rep_region.c_dependencies -$(PRJ_DIR)/db_config.h \ - $(PRJ_DIR)/db_int.h \ - $(PRJ_DIR)/db.h - - - FILE_$(PRJ_DIR)/../rep/rep_region.c_objects -rep_region.o - - - FILE_$(PRJ_DIR)/../rep/rep_region.c_tool -C/C++ compiler - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_dependDone -TRUE - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_dependencies -$(PRJ_DIR)/db_config.h \ - $(PRJ_DIR)/db_int.h \ - $(PRJ_DIR)/db.h - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_objects -rep_util.o - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_tool -C/C++ compiler - - - FILE_$(PRJ_DIR)/../rpc_client/client.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_method.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/client.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_method.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/client.c_objects -client.o + FILE_$(PRJ_DIR)/../rep/rep_method.c_objects +rep_method.o - FILE_$(PRJ_DIR)/../rpc_client/client.c_tool + FILE_$(PRJ_DIR)/../rep/rep_method.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_record.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_record.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_objects -db_server_clnt.o + FILE_$(PRJ_DIR)/../rep/rep_record.c_objects +rep_record.o - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_tool + FILE_$(PRJ_DIR)/../rep/rep_record.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_region.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_region.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_objects -gen_client.o + FILE_$(PRJ_DIR)/../rep/rep_region.c_objects +rep_region.o - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_tool + FILE_$(PRJ_DIR)/../rep/rep_region.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_stat.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_stat.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_objects -gen_client_ret.o + FILE_$(PRJ_DIR)/../rep/rep_stat.c_objects +rep_stat.o - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_tool + FILE_$(PRJ_DIR)/../rep/rep_stat.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_util.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_util.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_objects -db_server_xdr.o + FILE_$(PRJ_DIR)/../rep/rep_util.c_objects +rep_util.o - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_tool + FILE_$(PRJ_DIR)/../rep/rep_util.c_tool C/C++ compiler @@ -3458,7 +3602,6 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../clib/snprintf.c \ $(PRJ_DIR)/../clib/strcasecmp.c \ $(PRJ_DIR)/../clib/strdup.c \ - $(PRJ_DIR)/../clib/vsnprintf.c \ $(PRJ_DIR)/../common/db_byteorder.c \ $(PRJ_DIR)/../common/db_err.c \ $(PRJ_DIR)/../common/db_getlong.c \ @@ -3495,6 +3638,9 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../db/db_remove.c \ $(PRJ_DIR)/../db/db_rename.c \ $(PRJ_DIR)/../db/db_ret.c \ + $(PRJ_DIR)/../db/db_setid.c \ + $(PRJ_DIR)/../db/db_setlsn.c \ + $(PRJ_DIR)/../db/db_stati.c \ $(PRJ_DIR)/../db/db_truncate.c \ $(PRJ_DIR)/../db/db_upg.c \ $(PRJ_DIR)/../db/db_upg_opd.c \ @@ -3503,6 +3649,7 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../dbreg/dbreg.c \ $(PRJ_DIR)/../dbreg/dbreg_auto.c \ $(PRJ_DIR)/../dbreg/dbreg_rec.c \ + $(PRJ_DIR)/../dbreg/dbreg_stat.c \ $(PRJ_DIR)/../dbreg/dbreg_util.c \ $(PRJ_DIR)/../env/db_salloc.c \ $(PRJ_DIR)/../env/db_shash.c \ @@ -3511,6 +3658,7 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../env/env_open.c \ $(PRJ_DIR)/../env/env_recover.c \ $(PRJ_DIR)/../env/env_region.c \ + $(PRJ_DIR)/../env/env_stat.c \ $(PRJ_DIR)/../fileops/fileops_auto.c \ $(PRJ_DIR)/../fileops/fop_basic.c \ $(PRJ_DIR)/../fileops/fop_rec.c \ @@ -3534,9 +3682,12 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../hsearch/hsearch.c \ $(PRJ_DIR)/../lock/lock.c \ $(PRJ_DIR)/../lock/lock_deadlock.c \ + $(PRJ_DIR)/../lock/lock_id.c \ + $(PRJ_DIR)/../lock/lock_list.c \ $(PRJ_DIR)/../lock/lock_method.c \ $(PRJ_DIR)/../lock/lock_region.c \ $(PRJ_DIR)/../lock/lock_stat.c \ + $(PRJ_DIR)/../lock/lock_timer.c \ $(PRJ_DIR)/../lock/lock_util.c \ $(PRJ_DIR)/../log/log.c \ $(PRJ_DIR)/../log/log_archive.c \ @@ -3544,9 +3695,11 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../log/log_get.c \ $(PRJ_DIR)/../log/log_method.c \ $(PRJ_DIR)/../log/log_put.c \ + $(PRJ_DIR)/../log/log_stat.c \ $(PRJ_DIR)/../mp/mp_alloc.c \ $(PRJ_DIR)/../mp/mp_bh.c \ $(PRJ_DIR)/../mp/mp_fget.c \ + $(PRJ_DIR)/../mp/mp_fmethod.c \ $(PRJ_DIR)/../mp/mp_fopen.c \ $(PRJ_DIR)/../mp/mp_fput.c \ $(PRJ_DIR)/../mp/mp_fset.c \ @@ -3579,6 +3732,7 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../os/os_spin.c \ $(PRJ_DIR)/../os/os_stat.c \ $(PRJ_DIR)/../os/os_tmpdir.c \ + $(PRJ_DIR)/../os/os_truncate.c \ $(PRJ_DIR)/../os/os_unlink.c \ $(PRJ_DIR)/../os_vxworks/os_vx_abs.c \ $(PRJ_DIR)/../os_vxworks/os_vx_config.c \ @@ -3593,15 +3747,13 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../qam/qam_stat.c \ $(PRJ_DIR)/../qam/qam_upgrade.c \ $(PRJ_DIR)/../qam/qam_verify.c \ + $(PRJ_DIR)/../rep/rep_auto.c \ + $(PRJ_DIR)/../rep/rep_backup.c \ $(PRJ_DIR)/../rep/rep_method.c \ $(PRJ_DIR)/../rep/rep_record.c \ $(PRJ_DIR)/../rep/rep_region.c \ + $(PRJ_DIR)/../rep/rep_stat.c \ $(PRJ_DIR)/../rep/rep_util.c \ - $(PRJ_DIR)/../rpc_client/client.c \ - $(PRJ_DIR)/../rpc_client/db_server_clnt.c \ - $(PRJ_DIR)/../rpc_client/gen_client.c \ - $(PRJ_DIR)/../rpc_client/gen_client_ret.c \ - $(PRJ_DIR)/../rpc_server/c/db_server_xdr.c \ $(PRJ_DIR)/../txn/txn.c \ $(PRJ_DIR)/../txn/txn_auto.c \ $(PRJ_DIR)/../txn/txn_method.c \ diff --git a/db/build_vxworks/BerkeleyDB20small.wpj b/db/build_vxworks/BerkeleyDB20small.wpj new file mode 100755 index 000000000..3729dc49d --- /dev/null +++ b/db/build_vxworks/BerkeleyDB20small.wpj @@ -0,0 +1,3107 @@ +Document file - DO NOT EDIT + + BUILD_PENTIUM_debug_BUILDRULE +BerkeleyDB20small.out + + + BUILD_PENTIUM_debug_MACRO_AR +ar386 + + + BUILD_PENTIUM_debug_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20small_sim.a + + + BUILD_PENTIUM_debug_MACRO_AS +cc386 + + + BUILD_PENTIUM_debug_MACRO_CC +cc386 + + + BUILD_PENTIUM_debug_MACRO_CFLAGS +-g \ + -mpentium \ + -ansi \ + -nostdinc \ + -DRW_MULTI_THREAD \ + -D_REENTRANT \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM \ + -O0 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. \ + -DDIAGNOSTIC \ + -DDEBUG + + + BUILD_PENTIUM_debug_MACRO_CFLAGS_AS +-g \ + -mpentium \ + -ansi \ + -nostdinc \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -x \ + assembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM + + + BUILD_PENTIUM_debug_MACRO_CPP +cc386 -E -P -xc + + + BUILD_PENTIUM_debug_MACRO_LD +ld386 + + + BUILD_PENTIUM_debug_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_debug_MACRO_NM +nm386 -g + + + BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_debug_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_debug_MACRO_SIZE +size386 + + + BUILD_PENTIUM_debug_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_debug_TC +::tc_PENTIUMgnu + + + BUILD_PENTIUM_release_BUILDRULE +BerkeleyDB20small.out + + + BUILD_PENTIUM_release_MACRO_AR +ar386 + + + BUILD_PENTIUM_release_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20small_sim.a + + + BUILD_PENTIUM_release_MACRO_AS +cc386 + + + BUILD_PENTIUM_release_MACRO_CC +cc386 + + + BUILD_PENTIUM_release_MACRO_CFLAGS +-mpentium \ + -ansi \ + -nostdinc \ + -DRW_MULTI_THREAD \ + -D_REENTRANT \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM \ + -O2 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. + + + BUILD_PENTIUM_release_MACRO_CFLAGS_AS +-g \ + -mpentium \ + -ansi \ + -nostdinc \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -x \ + assembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM + + + BUILD_PENTIUM_release_MACRO_CPP +cc386 -E -P -xc + + + BUILD_PENTIUM_release_MACRO_LD +ld386 + + + BUILD_PENTIUM_release_MACRO_LDDEPS + + + + BUILD_PENTIUM_release_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_release_MACRO_NM +nm386 -g + + + BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_release_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_release_MACRO_SIZE +size386 + + + BUILD_PENTIUM_release_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_release_TC +::tc_PENTIUMgnu + + + BUILD_RULE_BerkeleyDB20small.out + + + + BUILD_RULE_BerkeleyDB20small_sim.out + + + + BUILD_RULE_archive + + + + BUILD_RULE_objects + + + + BUILD__CURRENT +PENTIUM_debug + + + BUILD__LIST +PENTIUM_release PENTIUM_debug + + + CORE_INFO_TYPE +::prj_vxApp + + + CORE_INFO_VERSION +2.0 + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_objects +bt_compare.o + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_objects +bt_conv.o + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_objects +bt_curadj.o + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_objects +bt_cursor.o + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_objects +bt_delete.o + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_objects +bt_method.o + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_objects +bt_open.o + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_objects +bt_put.o + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_objects +bt_rec.o + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_objects +bt_reclaim.o + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_objects +bt_recno.o + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_objects +bt_rsearch.o + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_objects +bt_search.o + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_objects +bt_split.o + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_objects +bt_stat.o + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_objects +bt_upgrade.o + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_objects +btree_auto.o + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/getopt.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/getopt.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/getopt.c_objects +getopt.o + + + FILE_$(PRJ_DIR)/../clib/getopt.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_objects +snprintf.o + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_objects +strcasecmp.o + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/strdup.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/strdup.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/strdup.c_objects +strdup.o + + + FILE_$(PRJ_DIR)/../clib/strdup.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_objects +crypto_stub.o + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_objects +db_byteorder.o + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_err.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_err.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_err.c_objects +db_err.o + + + FILE_$(PRJ_DIR)/../common/db_err.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_objects +db_getlong.o + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_objects +db_idspace.o + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_log2.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_log2.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_log2.c_objects +db_log2.o + + + FILE_$(PRJ_DIR)/../common/db_log2.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_arg.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_arg.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_arg.c_objects +util_arg.o + + + FILE_$(PRJ_DIR)/../common/util_arg.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_cache.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_cache.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_cache.c_objects +util_cache.o + + + FILE_$(PRJ_DIR)/../common/util_cache.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_log.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_log.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_log.c_objects +util_log.o + + + FILE_$(PRJ_DIR)/../common/util_log.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_sig.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_sig.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_sig.c_objects +util_sig.o + + + FILE_$(PRJ_DIR)/../common/util_sig.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_objects +crdel_auto.o + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_objects +crdel_rec.o + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db.c_objects +db.o + + + FILE_$(PRJ_DIR)/../db/db.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_am.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_am.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_am.c_objects +db_am.o + + + FILE_$(PRJ_DIR)/../db/db_am.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_auto.c_objects +db_auto.o + + + FILE_$(PRJ_DIR)/../db/db_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_cam.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_cam.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_cam.c_objects +db_cam.o + + + FILE_$(PRJ_DIR)/../db/db_cam.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_conv.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_conv.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_conv.c_objects +db_conv.o + + + FILE_$(PRJ_DIR)/../db/db_conv.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_objects +db_dispatch.o + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_dup.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_dup.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_dup.c_objects +db_dup.o + + + FILE_$(PRJ_DIR)/../db/db_dup.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_iface.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_iface.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_iface.c_objects +db_iface.o + + + FILE_$(PRJ_DIR)/../db/db_iface.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_join.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_join.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_join.c_objects +db_join.o + + + FILE_$(PRJ_DIR)/../db/db_join.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_meta.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_meta.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_meta.c_objects +db_meta.o + + + FILE_$(PRJ_DIR)/../db/db_meta.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_method.c_objects +db_method.o + + + FILE_$(PRJ_DIR)/../db/db_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_open.c_objects +db_open.o + + + FILE_$(PRJ_DIR)/../db/db_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_objects +db_overflow.o + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_pr.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_pr.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_pr.c_objects +db_pr.o + + + FILE_$(PRJ_DIR)/../db/db_pr.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_rec.c_objects +db_rec.o + + + FILE_$(PRJ_DIR)/../db/db_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_objects +db_reclaim.o + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_remove.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_remove.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_remove.c_objects +db_remove.o + + + FILE_$(PRJ_DIR)/../db/db_remove.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_rename.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_rename.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_rename.c_objects +db_rename.o + + + FILE_$(PRJ_DIR)/../db/db_rename.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_ret.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_ret.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_ret.c_objects +db_ret.o + + + FILE_$(PRJ_DIR)/../db/db_ret.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_setid.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setid.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setid.c_objects +db_setid.o + + + FILE_$(PRJ_DIR)/../db/db_setid.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_objects +db_setlsn.o + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_stati.c_objects +db_stati.o + + + FILE_$(PRJ_DIR)/../db/db_stati.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_objects +db_truncate.o + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_upg.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_upg.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_upg.c_objects +db_upg.o + + + FILE_$(PRJ_DIR)/../db/db_upg.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_objects +db_upg_opd.o + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_objects +db_vrfy_stub.o + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_objects +dbreg.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_objects +dbreg_auto.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_objects +dbreg_rec.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_objects +dbreg_stat.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_objects +dbreg_util.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_objects +db_salloc.o + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/db_shash.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/db_shash.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/db_shash.c_objects +db_shash.o + + + FILE_$(PRJ_DIR)/../env/db_shash.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_file.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_file.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_file.c_objects +env_file.o + + + FILE_$(PRJ_DIR)/../env/env_file.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_method.c_objects +env_method.o + + + FILE_$(PRJ_DIR)/../env/env_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_open.c_objects +env_open.o + + + FILE_$(PRJ_DIR)/../env/env_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_recover.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_recover.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_recover.c_objects +env_recover.o + + + FILE_$(PRJ_DIR)/../env/env_recover.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_region.c_objects +env_region.o + + + FILE_$(PRJ_DIR)/../env/env_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_stat.c_objects +env_stat.o + + + FILE_$(PRJ_DIR)/../env/env_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_objects +fileops_auto.o + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_objects +fop_basic.o + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_objects +fop_rec.o + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_objects +fop_util.o + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_objects +hash_func.o + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_objects +hash_stub.o + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_objects +hmac.o + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_objects +sha1.o + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock.c_objects +lock.o + + + FILE_$(PRJ_DIR)/../lock/lock.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_objects +lock_deadlock.o + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_objects +lock_id.o + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_objects +lock_list.o + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_objects +lock_method.o + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_objects +lock_region.o + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_objects +lock_stat.o + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_objects +lock_timer.o + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_objects +lock_util.o + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log.c_objects +log.o + + + FILE_$(PRJ_DIR)/../log/log.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_archive.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_archive.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_archive.c_objects +log_archive.o + + + FILE_$(PRJ_DIR)/../log/log_archive.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_compare.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_compare.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_compare.c_objects +log_compare.o + + + FILE_$(PRJ_DIR)/../log/log_compare.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_get.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_get.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_get.c_objects +log_get.o + + + FILE_$(PRJ_DIR)/../log/log_get.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_method.c_objects +log_method.o + + + FILE_$(PRJ_DIR)/../log/log_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_put.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_put.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_put.c_objects +log_put.o + + + FILE_$(PRJ_DIR)/../log/log_put.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_stat.c_objects +log_stat.o + + + FILE_$(PRJ_DIR)/../log/log_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_objects +mp_alloc.o + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_objects +mp_bh.o + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_objects +mp_fget.o + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_objects +mp_fmethod.o + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_objects +mp_fopen.o + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_objects +mp_fput.o + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_objects +mp_fset.o + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_objects +mp_method.o + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_objects +mp_region.o + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_objects +mp_register.o + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_objects +mp_stat.o + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_objects +mp_sync.o + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_objects +mp_trickle.o + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_objects +mut_tas.o + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_objects +mutex.o + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_objects +os_alloc.o + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_clock.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_clock.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_clock.c_objects +os_clock.o + + + FILE_$(PRJ_DIR)/../os/os_clock.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_dir.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_dir.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_dir.c_objects +os_dir.o + + + FILE_$(PRJ_DIR)/../os/os_dir.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_errno.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_errno.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_errno.c_objects +os_errno.o + + + FILE_$(PRJ_DIR)/../os/os_errno.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_fid.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_fid.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_fid.c_objects +os_fid.o + + + FILE_$(PRJ_DIR)/../os/os_fid.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_objects +os_fsync.o + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_handle.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_handle.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_handle.c_objects +os_handle.o + + + FILE_$(PRJ_DIR)/../os/os_handle.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_id.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_id.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_id.c_objects +os_id.o + + + FILE_$(PRJ_DIR)/../os/os_id.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_method.c_objects +os_method.o + + + FILE_$(PRJ_DIR)/../os/os_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_objects +os_oflags.o + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_open.c_objects +os_open.o + + + FILE_$(PRJ_DIR)/../os/os_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_region.c_objects +os_region.o + + + FILE_$(PRJ_DIR)/../os/os_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_rename.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_rename.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_rename.c_objects +os_rename.o + + + FILE_$(PRJ_DIR)/../os/os_rename.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_root.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_root.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_root.c_objects +os_root.o + + + FILE_$(PRJ_DIR)/../os/os_root.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_objects +os_rpath.o + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_rw.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_rw.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_rw.c_objects +os_rw.o + + + FILE_$(PRJ_DIR)/../os/os_rw.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_seek.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_seek.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_seek.c_objects +os_seek.o + + + FILE_$(PRJ_DIR)/../os/os_seek.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_objects +os_sleep.o + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_spin.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_spin.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_spin.c_objects +os_spin.o + + + FILE_$(PRJ_DIR)/../os/os_spin.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_stat.c_objects +os_stat.o + + + FILE_$(PRJ_DIR)/../os/os_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_objects +os_tmpdir.o + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_objects +os_truncate.o + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_objects +os_unlink.o + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_objects +os_vx_abs.o + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_objects +os_vx_config.o + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_objects +os_vx_map.o + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_objects +qam_stub.o + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_objects +rep_stub.o + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn.c_objects +txn.o + + + FILE_$(PRJ_DIR)/../txn/txn.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_objects +txn_auto.o + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_objects +txn_method.o + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_objects +txn_rec.o + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_objects +txn_recover.o + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_objects +txn_region.o + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_objects +txn_stat.o + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_objects +txn_util.o + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../xa/xa.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../xa/xa.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../xa/xa.c_objects +xa.o + + + FILE_$(PRJ_DIR)/../xa/xa.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_objects +xa_db.o + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_objects +xa_map.o + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_tool +C/C++ compiler + + + PROJECT_FILES +$(PRJ_DIR)/../btree/bt_compare.c \ + $(PRJ_DIR)/../btree/bt_conv.c \ + $(PRJ_DIR)/../btree/bt_curadj.c \ + $(PRJ_DIR)/../btree/bt_cursor.c \ + $(PRJ_DIR)/../btree/bt_delete.c \ + $(PRJ_DIR)/../btree/bt_method.c \ + $(PRJ_DIR)/../btree/bt_open.c \ + $(PRJ_DIR)/../btree/bt_put.c \ + $(PRJ_DIR)/../btree/bt_rec.c \ + $(PRJ_DIR)/../btree/bt_reclaim.c \ + $(PRJ_DIR)/../btree/bt_recno.c \ + $(PRJ_DIR)/../btree/bt_rsearch.c \ + $(PRJ_DIR)/../btree/bt_search.c \ + $(PRJ_DIR)/../btree/bt_split.c \ + $(PRJ_DIR)/../btree/bt_stat.c \ + $(PRJ_DIR)/../btree/bt_upgrade.c \ + $(PRJ_DIR)/../btree/btree_auto.c \ + $(PRJ_DIR)/../clib/getopt.c \ + $(PRJ_DIR)/../clib/snprintf.c \ + $(PRJ_DIR)/../clib/strcasecmp.c \ + $(PRJ_DIR)/../clib/strdup.c \ + $(PRJ_DIR)/../common/crypto_stub.c \ + $(PRJ_DIR)/../common/db_byteorder.c \ + $(PRJ_DIR)/../common/db_err.c \ + $(PRJ_DIR)/../common/db_getlong.c \ + $(PRJ_DIR)/../common/db_idspace.c \ + $(PRJ_DIR)/../common/db_log2.c \ + $(PRJ_DIR)/../common/util_arg.c \ + $(PRJ_DIR)/../common/util_cache.c \ + $(PRJ_DIR)/../common/util_log.c \ + $(PRJ_DIR)/../common/util_sig.c \ + $(PRJ_DIR)/../db/crdel_auto.c \ + $(PRJ_DIR)/../db/crdel_rec.c \ + $(PRJ_DIR)/../db/db.c \ + $(PRJ_DIR)/../db/db_am.c \ + $(PRJ_DIR)/../db/db_auto.c \ + $(PRJ_DIR)/../db/db_cam.c \ + $(PRJ_DIR)/../db/db_conv.c \ + $(PRJ_DIR)/../db/db_dispatch.c \ + $(PRJ_DIR)/../db/db_dup.c \ + $(PRJ_DIR)/../db/db_iface.c \ + $(PRJ_DIR)/../db/db_join.c \ + $(PRJ_DIR)/../db/db_meta.c \ + $(PRJ_DIR)/../db/db_method.c \ + $(PRJ_DIR)/../db/db_open.c \ + $(PRJ_DIR)/../db/db_overflow.c \ + $(PRJ_DIR)/../db/db_pr.c \ + $(PRJ_DIR)/../db/db_rec.c \ + $(PRJ_DIR)/../db/db_reclaim.c \ + $(PRJ_DIR)/../db/db_remove.c \ + $(PRJ_DIR)/../db/db_rename.c \ + $(PRJ_DIR)/../db/db_ret.c \ + $(PRJ_DIR)/../db/db_setid.c \ + $(PRJ_DIR)/../db/db_setlsn.c \ + $(PRJ_DIR)/../db/db_stati.c \ + $(PRJ_DIR)/../db/db_truncate.c \ + $(PRJ_DIR)/../db/db_upg.c \ + $(PRJ_DIR)/../db/db_upg_opd.c \ + $(PRJ_DIR)/../db/db_vrfy_stub.c \ + $(PRJ_DIR)/../dbreg/dbreg.c \ + $(PRJ_DIR)/../dbreg/dbreg_auto.c \ + $(PRJ_DIR)/../dbreg/dbreg_rec.c \ + $(PRJ_DIR)/../dbreg/dbreg_stat.c \ + $(PRJ_DIR)/../dbreg/dbreg_util.c \ + $(PRJ_DIR)/../env/db_salloc.c \ + $(PRJ_DIR)/../env/db_shash.c \ + $(PRJ_DIR)/../env/env_file.c \ + $(PRJ_DIR)/../env/env_method.c \ + $(PRJ_DIR)/../env/env_open.c \ + $(PRJ_DIR)/../env/env_recover.c \ + $(PRJ_DIR)/../env/env_region.c \ + $(PRJ_DIR)/../env/env_stat.c \ + $(PRJ_DIR)/../fileops/fileops_auto.c \ + $(PRJ_DIR)/../fileops/fop_basic.c \ + $(PRJ_DIR)/../fileops/fop_rec.c \ + $(PRJ_DIR)/../fileops/fop_util.c \ + $(PRJ_DIR)/../hash/hash_func.c \ + $(PRJ_DIR)/../hash/hash_stub.c \ + $(PRJ_DIR)/../hmac/hmac.c \ + $(PRJ_DIR)/../hmac/sha1.c \ + $(PRJ_DIR)/../lock/lock.c \ + $(PRJ_DIR)/../lock/lock_deadlock.c \ + $(PRJ_DIR)/../lock/lock_id.c \ + $(PRJ_DIR)/../lock/lock_list.c \ + $(PRJ_DIR)/../lock/lock_method.c \ + $(PRJ_DIR)/../lock/lock_region.c \ + $(PRJ_DIR)/../lock/lock_stat.c \ + $(PRJ_DIR)/../lock/lock_timer.c \ + $(PRJ_DIR)/../lock/lock_util.c \ + $(PRJ_DIR)/../log/log.c \ + $(PRJ_DIR)/../log/log_archive.c \ + $(PRJ_DIR)/../log/log_compare.c \ + $(PRJ_DIR)/../log/log_get.c \ + $(PRJ_DIR)/../log/log_method.c \ + $(PRJ_DIR)/../log/log_put.c \ + $(PRJ_DIR)/../log/log_stat.c \ + $(PRJ_DIR)/../mp/mp_alloc.c \ + $(PRJ_DIR)/../mp/mp_bh.c \ + $(PRJ_DIR)/../mp/mp_fget.c \ + $(PRJ_DIR)/../mp/mp_fmethod.c \ + $(PRJ_DIR)/../mp/mp_fopen.c \ + $(PRJ_DIR)/../mp/mp_fput.c \ + $(PRJ_DIR)/../mp/mp_fset.c \ + $(PRJ_DIR)/../mp/mp_method.c \ + $(PRJ_DIR)/../mp/mp_region.c \ + $(PRJ_DIR)/../mp/mp_register.c \ + $(PRJ_DIR)/../mp/mp_stat.c \ + $(PRJ_DIR)/../mp/mp_sync.c \ + $(PRJ_DIR)/../mp/mp_trickle.c \ + $(PRJ_DIR)/../mutex/mut_tas.c \ + $(PRJ_DIR)/../mutex/mutex.c \ + $(PRJ_DIR)/../os/os_alloc.c \ + $(PRJ_DIR)/../os/os_clock.c \ + $(PRJ_DIR)/../os/os_dir.c \ + $(PRJ_DIR)/../os/os_errno.c \ + $(PRJ_DIR)/../os/os_fid.c \ + $(PRJ_DIR)/../os/os_fsync.c \ + $(PRJ_DIR)/../os/os_handle.c \ + $(PRJ_DIR)/../os/os_id.c \ + $(PRJ_DIR)/../os/os_method.c \ + $(PRJ_DIR)/../os/os_oflags.c \ + $(PRJ_DIR)/../os/os_open.c \ + $(PRJ_DIR)/../os/os_region.c \ + $(PRJ_DIR)/../os/os_rename.c \ + $(PRJ_DIR)/../os/os_root.c \ + $(PRJ_DIR)/../os/os_rpath.c \ + $(PRJ_DIR)/../os/os_rw.c \ + $(PRJ_DIR)/../os/os_seek.c \ + $(PRJ_DIR)/../os/os_sleep.c \ + $(PRJ_DIR)/../os/os_spin.c \ + $(PRJ_DIR)/../os/os_stat.c \ + $(PRJ_DIR)/../os/os_tmpdir.c \ + $(PRJ_DIR)/../os/os_truncate.c \ + $(PRJ_DIR)/../os/os_unlink.c \ + $(PRJ_DIR)/../os_vxworks/os_vx_abs.c \ + $(PRJ_DIR)/../os_vxworks/os_vx_config.c \ + $(PRJ_DIR)/../os_vxworks/os_vx_map.c \ + $(PRJ_DIR)/../qam/qam_stub.c \ + $(PRJ_DIR)/../rep/rep_stub.c \ + $(PRJ_DIR)/../txn/txn.c \ + $(PRJ_DIR)/../txn/txn_auto.c \ + $(PRJ_DIR)/../txn/txn_method.c \ + $(PRJ_DIR)/../txn/txn_rec.c \ + $(PRJ_DIR)/../txn/txn_recover.c \ + $(PRJ_DIR)/../txn/txn_region.c \ + $(PRJ_DIR)/../txn/txn_stat.c \ + $(PRJ_DIR)/../txn/txn_util.c \ + $(PRJ_DIR)/../xa/xa.c \ + $(PRJ_DIR)/../xa/xa_db.c \ + $(PRJ_DIR)/../xa/xa_map.c + + + userComments +BerkeleyDB + diff --git a/db/build_vxworks/BerkeleyDB20small.wsp b/db/build_vxworks/BerkeleyDB20small.wsp new file mode 100755 index 000000000..f1f88b546 --- /dev/null +++ b/db/build_vxworks/BerkeleyDB20small.wsp @@ -0,0 +1,29 @@ +Document file - DO NOT EDIT + + CORE_INFO_TYPE +Workspace + + + CORE_INFO_VERSION +2.0 + + + projectList +$(PRJ_DIR)/BerkeleyDB20small.wpj \ + $(PRJ_DIR)/db_archive/db_archive20.wpj \ + $(PRJ_DIR)/db_checkpoint/db_checkpoint20.wpj \ + $(PRJ_DIR)/db_deadlock/db_deadlock20.wpj \ + $(PRJ_DIR)/db_dump/db_dump20.wpj \ + $(PRJ_DIR)/db_load/db_load20.wpj \ + $(PRJ_DIR)/db_printlog/db_printlog20.wpj \ + $(PRJ_DIR)/db_recover/db_recover20.wpj \ + $(PRJ_DIR)/db_stat/db_stat20.wpj \ + $(PRJ_DIR)/db_upgrade/db_upgrade20.wpj \ + $(PRJ_DIR)/db_verify/db_verify20.wpj \ + $(PRJ_DIR)/dbdemo/dbdemo20.wpj + + + userComments + + + diff --git a/db/build_vxworks/BerkeleyDB22.wpj b/db/build_vxworks/BerkeleyDB22.wpj index 5aa71a330..36ae3efb4 100755 --- a/db/build_vxworks/BerkeleyDB22.wpj +++ b/db/build_vxworks/BerkeleyDB22.wpj @@ -704,24 +704,6 @@ strdup.o C/C++ compiler - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependDone -TRUE - - - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependencies -$(PRJ_DIR)/db_config.h \ - $(PRJ_DIR)/db_int.h \ - $(PRJ_DIR)/db.h - - - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_objects -vsnprintf.o - - - FILE_$(PRJ_DIR)/../clib/vsnprintf.c_tool -C/C++ compiler - - FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependDone TRUE @@ -1370,6 +1352,60 @@ db_ret.o C/C++ compiler + FILE_$(PRJ_DIR)/../db/db_setid.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setid.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setid.c_objects +db_setid.o + + + FILE_$(PRJ_DIR)/../db/db_setid.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_objects +db_setlsn.o + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_stati.c_objects +db_stati.o + + + FILE_$(PRJ_DIR)/../db/db_stati.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../db/db_truncate.c_dependDone TRUE @@ -1514,6 +1550,24 @@ dbreg_rec.o C/C++ compiler + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_objects +dbreg_stat.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependDone TRUE @@ -1658,6 +1712,24 @@ env_region.o C/C++ compiler + FILE_$(PRJ_DIR)/../env/env_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_stat.c_objects +env_stat.o + + + FILE_$(PRJ_DIR)/../env/env_stat.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependDone TRUE @@ -2072,6 +2144,42 @@ lock_deadlock.o C/C++ compiler + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_objects +lock_id.o + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_objects +lock_list.o + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../lock/lock_method.c_dependDone TRUE @@ -2126,6 +2234,24 @@ lock_stat.o C/C++ compiler + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_objects +lock_timer.o + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../lock/lock_util.c_dependDone TRUE @@ -2252,6 +2378,24 @@ log_put.o C/C++ compiler + FILE_$(PRJ_DIR)/../log/log_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_stat.c_objects +log_stat.o + + + FILE_$(PRJ_DIR)/../log/log_stat.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependDone TRUE @@ -2306,6 +2450,24 @@ mp_fget.o C/C++ compiler + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_objects +mp_fmethod.o + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependDone TRUE @@ -2882,6 +3044,24 @@ os_tmpdir.o C/C++ compiler + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_objects +os_truncate.o + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_tool +C/C++ compiler + + FILE_$(PRJ_DIR)/../os/os_unlink.c_dependDone TRUE @@ -3134,165 +3314,129 @@ qam_verify.o C/C++ compiler - FILE_$(PRJ_DIR)/../rep/rep_method.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_auto.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rep/rep_method.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_auto.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rep/rep_method.c_objects -rep_method.o + FILE_$(PRJ_DIR)/../rep/rep_auto.c_objects +rep_auto.o - FILE_$(PRJ_DIR)/../rep/rep_method.c_tool + FILE_$(PRJ_DIR)/../rep/rep_auto.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rep/rep_record.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_backup.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rep/rep_record.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_backup.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rep/rep_record.c_objects -rep_record.o + FILE_$(PRJ_DIR)/../rep/rep_backup.c_objects +rep_backup.o - FILE_$(PRJ_DIR)/../rep/rep_record.c_tool + FILE_$(PRJ_DIR)/../rep/rep_backup.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rep/rep_region.c_dependDone -TRUE - - - FILE_$(PRJ_DIR)/../rep/rep_region.c_dependencies -$(PRJ_DIR)/db_config.h \ - $(PRJ_DIR)/db_int.h \ - $(PRJ_DIR)/db.h - - - FILE_$(PRJ_DIR)/../rep/rep_region.c_objects -rep_region.o - - - FILE_$(PRJ_DIR)/../rep/rep_region.c_tool -C/C++ compiler - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_dependDone -TRUE - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_dependencies -$(PRJ_DIR)/db_config.h \ - $(PRJ_DIR)/db_int.h \ - $(PRJ_DIR)/db.h - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_objects -rep_util.o - - - FILE_$(PRJ_DIR)/../rep/rep_util.c_tool -C/C++ compiler - - - FILE_$(PRJ_DIR)/../rpc_client/client.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_method.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/client.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_method.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/client.c_objects -client.o + FILE_$(PRJ_DIR)/../rep/rep_method.c_objects +rep_method.o - FILE_$(PRJ_DIR)/../rpc_client/client.c_tool + FILE_$(PRJ_DIR)/../rep/rep_method.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_record.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_record.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_objects -db_server_clnt.o + FILE_$(PRJ_DIR)/../rep/rep_record.c_objects +rep_record.o - FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_tool + FILE_$(PRJ_DIR)/../rep/rep_record.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_region.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_region.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_objects -gen_client.o + FILE_$(PRJ_DIR)/../rep/rep_region.c_objects +rep_region.o - FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_tool + FILE_$(PRJ_DIR)/../rep/rep_region.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_stat.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_stat.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_objects -gen_client_ret.o + FILE_$(PRJ_DIR)/../rep/rep_stat.c_objects +rep_stat.o - FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_tool + FILE_$(PRJ_DIR)/../rep/rep_stat.c_tool C/C++ compiler - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependDone + FILE_$(PRJ_DIR)/../rep/rep_util.c_dependDone TRUE - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependencies + FILE_$(PRJ_DIR)/../rep/rep_util.c_dependencies $(PRJ_DIR)/db_config.h \ $(PRJ_DIR)/db_int.h \ $(PRJ_DIR)/db.h - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_objects -db_server_xdr.o + FILE_$(PRJ_DIR)/../rep/rep_util.c_objects +rep_util.o - FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_tool + FILE_$(PRJ_DIR)/../rep/rep_util.c_tool C/C++ compiler @@ -3517,7 +3661,6 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../clib/snprintf.c \ $(PRJ_DIR)/../clib/strcasecmp.c \ $(PRJ_DIR)/../clib/strdup.c \ - $(PRJ_DIR)/../clib/vsnprintf.c \ $(PRJ_DIR)/../common/db_byteorder.c \ $(PRJ_DIR)/../common/db_err.c \ $(PRJ_DIR)/../common/db_getlong.c \ @@ -3554,6 +3697,9 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../db/db_remove.c \ $(PRJ_DIR)/../db/db_rename.c \ $(PRJ_DIR)/../db/db_ret.c \ + $(PRJ_DIR)/../db/db_setid.c \ + $(PRJ_DIR)/../db/db_setlsn.c \ + $(PRJ_DIR)/../db/db_stati.c \ $(PRJ_DIR)/../db/db_truncate.c \ $(PRJ_DIR)/../db/db_upg.c \ $(PRJ_DIR)/../db/db_upg_opd.c \ @@ -3562,6 +3708,7 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../dbreg/dbreg.c \ $(PRJ_DIR)/../dbreg/dbreg_auto.c \ $(PRJ_DIR)/../dbreg/dbreg_rec.c \ + $(PRJ_DIR)/../dbreg/dbreg_stat.c \ $(PRJ_DIR)/../dbreg/dbreg_util.c \ $(PRJ_DIR)/../env/db_salloc.c \ $(PRJ_DIR)/../env/db_shash.c \ @@ -3570,6 +3717,7 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../env/env_open.c \ $(PRJ_DIR)/../env/env_recover.c \ $(PRJ_DIR)/../env/env_region.c \ + $(PRJ_DIR)/../env/env_stat.c \ $(PRJ_DIR)/../fileops/fileops_auto.c \ $(PRJ_DIR)/../fileops/fop_basic.c \ $(PRJ_DIR)/../fileops/fop_rec.c \ @@ -3593,9 +3741,12 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../hsearch/hsearch.c \ $(PRJ_DIR)/../lock/lock.c \ $(PRJ_DIR)/../lock/lock_deadlock.c \ + $(PRJ_DIR)/../lock/lock_id.c \ + $(PRJ_DIR)/../lock/lock_list.c \ $(PRJ_DIR)/../lock/lock_method.c \ $(PRJ_DIR)/../lock/lock_region.c \ $(PRJ_DIR)/../lock/lock_stat.c \ + $(PRJ_DIR)/../lock/lock_timer.c \ $(PRJ_DIR)/../lock/lock_util.c \ $(PRJ_DIR)/../log/log.c \ $(PRJ_DIR)/../log/log_archive.c \ @@ -3603,9 +3754,11 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../log/log_get.c \ $(PRJ_DIR)/../log/log_method.c \ $(PRJ_DIR)/../log/log_put.c \ + $(PRJ_DIR)/../log/log_stat.c \ $(PRJ_DIR)/../mp/mp_alloc.c \ $(PRJ_DIR)/../mp/mp_bh.c \ $(PRJ_DIR)/../mp/mp_fget.c \ + $(PRJ_DIR)/../mp/mp_fmethod.c \ $(PRJ_DIR)/../mp/mp_fopen.c \ $(PRJ_DIR)/../mp/mp_fput.c \ $(PRJ_DIR)/../mp/mp_fset.c \ @@ -3638,6 +3791,7 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../os/os_spin.c \ $(PRJ_DIR)/../os/os_stat.c \ $(PRJ_DIR)/../os/os_tmpdir.c \ + $(PRJ_DIR)/../os/os_truncate.c \ $(PRJ_DIR)/../os/os_unlink.c \ $(PRJ_DIR)/../os_vxworks/os_vx_abs.c \ $(PRJ_DIR)/../os_vxworks/os_vx_config.c \ @@ -3652,15 +3806,13 @@ $(PRJ_DIR)/../btree/bt_compare.c \ $(PRJ_DIR)/../qam/qam_stat.c \ $(PRJ_DIR)/../qam/qam_upgrade.c \ $(PRJ_DIR)/../qam/qam_verify.c \ + $(PRJ_DIR)/../rep/rep_auto.c \ + $(PRJ_DIR)/../rep/rep_backup.c \ $(PRJ_DIR)/../rep/rep_method.c \ $(PRJ_DIR)/../rep/rep_record.c \ $(PRJ_DIR)/../rep/rep_region.c \ + $(PRJ_DIR)/../rep/rep_stat.c \ $(PRJ_DIR)/../rep/rep_util.c \ - $(PRJ_DIR)/../rpc_client/client.c \ - $(PRJ_DIR)/../rpc_client/db_server_clnt.c \ - $(PRJ_DIR)/../rpc_client/gen_client.c \ - $(PRJ_DIR)/../rpc_client/gen_client_ret.c \ - $(PRJ_DIR)/../rpc_server/c/db_server_xdr.c \ $(PRJ_DIR)/../txn/txn.c \ $(PRJ_DIR)/../txn/txn_auto.c \ $(PRJ_DIR)/../txn/txn_method.c \ diff --git a/db/build_vxworks/BerkeleyDB22small.wpj b/db/build_vxworks/BerkeleyDB22small.wpj new file mode 100755 index 000000000..dce76777c --- /dev/null +++ b/db/build_vxworks/BerkeleyDB22small.wpj @@ -0,0 +1,3166 @@ +Document file - DO NOT EDIT + + BUILD_PENTIUM_debug_BUILDRULE +BerkeleyDB22small.out + + + BUILD_PENTIUM_debug_MACRO_AR +arpentium + + + BUILD_PENTIUM_debug_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUM_debug/BerkeleyDB22small.a + + + BUILD_PENTIUM_debug_MACRO_AS +ccpentium + + + BUILD_PENTIUM_debug_MACRO_CC +ccpentium + + + BUILD_PENTIUM_debug_MACRO_CC_ARCH_SPEC +-mcpu=pentiumpro -march=pentiumpro + + + BUILD_PENTIUM_debug_MACRO_CFLAGS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu \ + -O0 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. \ + -DDIAGNOSTIC \ + -DDEBUG + + + BUILD_PENTIUM_debug_MACRO_CFLAGS_AS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -xassembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu + + + BUILD_PENTIUM_debug_MACRO_CPP +ccpentium -E -P + + + BUILD_PENTIUM_debug_MACRO_HEX_FLAGS + + + + BUILD_PENTIUM_debug_MACRO_LD +ldpentium + + + BUILD_PENTIUM_debug_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_debug_MACRO_LD_PARTIAL +ccpentium -r -nostdlib -Wl,-X + + + BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_debug_MACRO_NM +nmpentium -g + + + BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_debug_MACRO_OPTION_DEPEND +-M -w + + + BUILD_PENTIUM_debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE +-MD + + + BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_debug_MACRO_OPTION_LANG_C +-xc + + + BUILD_PENTIUM_debug_MACRO_OPTION_UNDEFINE_MACRO +-U + + + BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_debug_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_debug_MACRO_SIZE +sizepentium + + + BUILD_PENTIUM_debug_MACRO_TOOL_FAMILY +gnu + + + BUILD_PENTIUM_debug_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_debug_TC +::tc_PENTIUM2gnu + + + BUILD_PENTIUM_release_BUILDRULE +BerkeleyDB22small.out + + + BUILD_PENTIUM_release_MACRO_AR +arpentium + + + BUILD_PENTIUM_release_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUM_release/BerkeleyDB22small.a + + + BUILD_PENTIUM_release_MACRO_AS +ccpentium + + + BUILD_PENTIUM_release_MACRO_CC +ccpentium + + + BUILD_PENTIUM_release_MACRO_CC_ARCH_SPEC +-mcpu=pentiumpro -march=pentiumpro + + + BUILD_PENTIUM_release_MACRO_CFLAGS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu \ + -O2 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. + + + BUILD_PENTIUM_release_MACRO_CFLAGS_AS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -xassembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu + + + BUILD_PENTIUM_release_MACRO_CPP +ccpentium -E -P + + + BUILD_PENTIUM_release_MACRO_HEX_FLAGS + + + + BUILD_PENTIUM_release_MACRO_LD +ldpentium + + + BUILD_PENTIUM_release_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_release_MACRO_LD_PARTIAL +ccpentium -r -nostdlib -Wl,-X + + + BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_release_MACRO_NM +nmpentium -g + + + BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_release_MACRO_OPTION_DEPEND +-M -w + + + BUILD_PENTIUM_release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE +-MD + + + BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_release_MACRO_OPTION_LANG_C +-xc + + + BUILD_PENTIUM_release_MACRO_OPTION_UNDEFINE_MACRO +-U + + + BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_release_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_release_MACRO_SIZE +sizepentium + + + BUILD_PENTIUM_release_MACRO_TOOL_FAMILY +gnu + + + BUILD_PENTIUM_release_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_release_TC +::tc_PENTIUM2gnu + + + BUILD_RULE_BerkeleyDB22small.out + + + + BUILD_RULE_BerkeleyDB22small.pl + + + + BUILD_RULE_archive + + + + BUILD_RULE_objects + + + + BUILD__CURRENT +PENTIUM_debug + + + BUILD__LIST +PENTIUM_release PENTIUM_debug + + + CORE_INFO_TYPE +::prj_vxApp + + + CORE_INFO_VERSION +2.2 + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_objects +bt_compare.o + + + FILE_$(PRJ_DIR)/../btree/bt_compare.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_objects +bt_conv.o + + + FILE_$(PRJ_DIR)/../btree/bt_conv.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_objects +bt_curadj.o + + + FILE_$(PRJ_DIR)/../btree/bt_curadj.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_objects +bt_cursor.o + + + FILE_$(PRJ_DIR)/../btree/bt_cursor.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_objects +bt_delete.o + + + FILE_$(PRJ_DIR)/../btree/bt_delete.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_objects +bt_method.o + + + FILE_$(PRJ_DIR)/../btree/bt_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_objects +bt_open.o + + + FILE_$(PRJ_DIR)/../btree/bt_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_objects +bt_put.o + + + FILE_$(PRJ_DIR)/../btree/bt_put.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_objects +bt_rec.o + + + FILE_$(PRJ_DIR)/../btree/bt_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_objects +bt_reclaim.o + + + FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_objects +bt_recno.o + + + FILE_$(PRJ_DIR)/../btree/bt_recno.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_objects +bt_rsearch.o + + + FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_objects +bt_search.o + + + FILE_$(PRJ_DIR)/../btree/bt_search.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_objects +bt_split.o + + + FILE_$(PRJ_DIR)/../btree/bt_split.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_objects +bt_stat.o + + + FILE_$(PRJ_DIR)/../btree/bt_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_objects +bt_upgrade.o + + + FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_objects +btree_auto.o + + + FILE_$(PRJ_DIR)/../btree/btree_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/getopt.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/getopt.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/getopt.c_objects +getopt.o + + + FILE_$(PRJ_DIR)/../clib/getopt.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_objects +snprintf.o + + + FILE_$(PRJ_DIR)/../clib/snprintf.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_objects +strcasecmp.o + + + FILE_$(PRJ_DIR)/../clib/strcasecmp.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../clib/strdup.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../clib/strdup.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../clib/strdup.c_objects +strdup.o + + + FILE_$(PRJ_DIR)/../clib/strdup.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_objects +crypto_stub.o + + + FILE_$(PRJ_DIR)/../common/crypto_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_objects +db_byteorder.o + + + FILE_$(PRJ_DIR)/../common/db_byteorder.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_err.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_err.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_err.c_objects +db_err.o + + + FILE_$(PRJ_DIR)/../common/db_err.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_objects +db_getlong.o + + + FILE_$(PRJ_DIR)/../common/db_getlong.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_objects +db_idspace.o + + + FILE_$(PRJ_DIR)/../common/db_idspace.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/db_log2.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/db_log2.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/db_log2.c_objects +db_log2.o + + + FILE_$(PRJ_DIR)/../common/db_log2.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_arg.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_arg.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_arg.c_objects +util_arg.o + + + FILE_$(PRJ_DIR)/../common/util_arg.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_cache.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_cache.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_cache.c_objects +util_cache.o + + + FILE_$(PRJ_DIR)/../common/util_cache.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_log.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_log.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_log.c_objects +util_log.o + + + FILE_$(PRJ_DIR)/../common/util_log.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../common/util_sig.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../common/util_sig.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../common/util_sig.c_objects +util_sig.o + + + FILE_$(PRJ_DIR)/../common/util_sig.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_objects +crdel_auto.o + + + FILE_$(PRJ_DIR)/../db/crdel_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_objects +crdel_rec.o + + + FILE_$(PRJ_DIR)/../db/crdel_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db.c_objects +db.o + + + FILE_$(PRJ_DIR)/../db/db.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_am.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_am.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_am.c_objects +db_am.o + + + FILE_$(PRJ_DIR)/../db/db_am.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_auto.c_objects +db_auto.o + + + FILE_$(PRJ_DIR)/../db/db_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_cam.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_cam.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_cam.c_objects +db_cam.o + + + FILE_$(PRJ_DIR)/../db/db_cam.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_conv.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_conv.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_conv.c_objects +db_conv.o + + + FILE_$(PRJ_DIR)/../db/db_conv.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_objects +db_dispatch.o + + + FILE_$(PRJ_DIR)/../db/db_dispatch.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_dup.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_dup.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_dup.c_objects +db_dup.o + + + FILE_$(PRJ_DIR)/../db/db_dup.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_iface.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_iface.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_iface.c_objects +db_iface.o + + + FILE_$(PRJ_DIR)/../db/db_iface.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_join.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_join.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_join.c_objects +db_join.o + + + FILE_$(PRJ_DIR)/../db/db_join.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_meta.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_meta.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_meta.c_objects +db_meta.o + + + FILE_$(PRJ_DIR)/../db/db_meta.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_method.c_objects +db_method.o + + + FILE_$(PRJ_DIR)/../db/db_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_open.c_objects +db_open.o + + + FILE_$(PRJ_DIR)/../db/db_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_objects +db_overflow.o + + + FILE_$(PRJ_DIR)/../db/db_overflow.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_pr.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_pr.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_pr.c_objects +db_pr.o + + + FILE_$(PRJ_DIR)/../db/db_pr.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_rec.c_objects +db_rec.o + + + FILE_$(PRJ_DIR)/../db/db_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_objects +db_reclaim.o + + + FILE_$(PRJ_DIR)/../db/db_reclaim.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_remove.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_remove.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_remove.c_objects +db_remove.o + + + FILE_$(PRJ_DIR)/../db/db_remove.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_rename.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_rename.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_rename.c_objects +db_rename.o + + + FILE_$(PRJ_DIR)/../db/db_rename.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_ret.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_ret.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_ret.c_objects +db_ret.o + + + FILE_$(PRJ_DIR)/../db/db_ret.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_setid.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setid.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setid.c_objects +db_setid.o + + + FILE_$(PRJ_DIR)/../db/db_setid.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_objects +db_setlsn.o + + + FILE_$(PRJ_DIR)/../db/db_setlsn.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_stati.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_stati.c_objects +db_stati.o + + + FILE_$(PRJ_DIR)/../db/db_stati.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_objects +db_truncate.o + + + FILE_$(PRJ_DIR)/../db/db_truncate.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_upg.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_upg.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_upg.c_objects +db_upg.o + + + FILE_$(PRJ_DIR)/../db/db_upg.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_objects +db_upg_opd.o + + + FILE_$(PRJ_DIR)/../db/db_upg_opd.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_objects +db_vrfy_stub.o + + + FILE_$(PRJ_DIR)/../db/db_vrfy_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_objects +dbreg.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_objects +dbreg_auto.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_objects +dbreg_rec.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_objects +dbreg_stat.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_objects +dbreg_util.o + + + FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_objects +db_salloc.o + + + FILE_$(PRJ_DIR)/../env/db_salloc.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/db_shash.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/db_shash.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/db_shash.c_objects +db_shash.o + + + FILE_$(PRJ_DIR)/../env/db_shash.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_file.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_file.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_file.c_objects +env_file.o + + + FILE_$(PRJ_DIR)/../env/env_file.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_method.c_objects +env_method.o + + + FILE_$(PRJ_DIR)/../env/env_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_open.c_objects +env_open.o + + + FILE_$(PRJ_DIR)/../env/env_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_recover.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_recover.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_recover.c_objects +env_recover.o + + + FILE_$(PRJ_DIR)/../env/env_recover.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_region.c_objects +env_region.o + + + FILE_$(PRJ_DIR)/../env/env_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../env/env_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../env/env_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../env/env_stat.c_objects +env_stat.o + + + FILE_$(PRJ_DIR)/../env/env_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_objects +fileops_auto.o + + + FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_objects +fop_basic.o + + + FILE_$(PRJ_DIR)/../fileops/fop_basic.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_objects +fop_rec.o + + + FILE_$(PRJ_DIR)/../fileops/fop_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_objects +fop_util.o + + + FILE_$(PRJ_DIR)/../fileops/fop_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_objects +hash_func.o + + + FILE_$(PRJ_DIR)/../hash/hash_func.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_objects +hash_stub.o + + + FILE_$(PRJ_DIR)/../hash/hash_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_objects +hmac.o + + + FILE_$(PRJ_DIR)/../hmac/hmac.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_objects +sha1.o + + + FILE_$(PRJ_DIR)/../hmac/sha1.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock.c_objects +lock.o + + + FILE_$(PRJ_DIR)/../lock/lock.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_objects +lock_deadlock.o + + + FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_objects +lock_id.o + + + FILE_$(PRJ_DIR)/../lock/lock_id.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_objects +lock_list.o + + + FILE_$(PRJ_DIR)/../lock/lock_list.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_objects +lock_method.o + + + FILE_$(PRJ_DIR)/../lock/lock_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_objects +lock_region.o + + + FILE_$(PRJ_DIR)/../lock/lock_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_objects +lock_stat.o + + + FILE_$(PRJ_DIR)/../lock/lock_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_objects +lock_timer.o + + + FILE_$(PRJ_DIR)/../lock/lock_timer.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_objects +lock_util.o + + + FILE_$(PRJ_DIR)/../lock/lock_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log.c_objects +log.o + + + FILE_$(PRJ_DIR)/../log/log.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_archive.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_archive.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_archive.c_objects +log_archive.o + + + FILE_$(PRJ_DIR)/../log/log_archive.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_compare.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_compare.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_compare.c_objects +log_compare.o + + + FILE_$(PRJ_DIR)/../log/log_compare.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_get.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_get.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_get.c_objects +log_get.o + + + FILE_$(PRJ_DIR)/../log/log_get.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_method.c_objects +log_method.o + + + FILE_$(PRJ_DIR)/../log/log_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_put.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_put.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_put.c_objects +log_put.o + + + FILE_$(PRJ_DIR)/../log/log_put.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../log/log_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../log/log_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../log/log_stat.c_objects +log_stat.o + + + FILE_$(PRJ_DIR)/../log/log_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_objects +mp_alloc.o + + + FILE_$(PRJ_DIR)/../mp/mp_alloc.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_objects +mp_bh.o + + + FILE_$(PRJ_DIR)/../mp/mp_bh.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_objects +mp_fget.o + + + FILE_$(PRJ_DIR)/../mp/mp_fget.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_objects +mp_fmethod.o + + + FILE_$(PRJ_DIR)/../mp/mp_fmethod.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_objects +mp_fopen.o + + + FILE_$(PRJ_DIR)/../mp/mp_fopen.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_objects +mp_fput.o + + + FILE_$(PRJ_DIR)/../mp/mp_fput.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_objects +mp_fset.o + + + FILE_$(PRJ_DIR)/../mp/mp_fset.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_objects +mp_method.o + + + FILE_$(PRJ_DIR)/../mp/mp_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_objects +mp_region.o + + + FILE_$(PRJ_DIR)/../mp/mp_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_objects +mp_register.o + + + FILE_$(PRJ_DIR)/../mp/mp_register.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_objects +mp_stat.o + + + FILE_$(PRJ_DIR)/../mp/mp_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_objects +mp_sync.o + + + FILE_$(PRJ_DIR)/../mp/mp_sync.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_objects +mp_trickle.o + + + FILE_$(PRJ_DIR)/../mp/mp_trickle.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_objects +mut_tas.o + + + FILE_$(PRJ_DIR)/../mutex/mut_tas.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_objects +mutex.o + + + FILE_$(PRJ_DIR)/../mutex/mutex.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_objects +os_alloc.o + + + FILE_$(PRJ_DIR)/../os/os_alloc.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_clock.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_clock.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_clock.c_objects +os_clock.o + + + FILE_$(PRJ_DIR)/../os/os_clock.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_dir.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_dir.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_dir.c_objects +os_dir.o + + + FILE_$(PRJ_DIR)/../os/os_dir.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_errno.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_errno.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_errno.c_objects +os_errno.o + + + FILE_$(PRJ_DIR)/../os/os_errno.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_fid.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_fid.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_fid.c_objects +os_fid.o + + + FILE_$(PRJ_DIR)/../os/os_fid.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_objects +os_fsync.o + + + FILE_$(PRJ_DIR)/../os/os_fsync.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_handle.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_handle.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_handle.c_objects +os_handle.o + + + FILE_$(PRJ_DIR)/../os/os_handle.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_id.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_id.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_id.c_objects +os_id.o + + + FILE_$(PRJ_DIR)/../os/os_id.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_method.c_objects +os_method.o + + + FILE_$(PRJ_DIR)/../os/os_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_objects +os_oflags.o + + + FILE_$(PRJ_DIR)/../os/os_oflags.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_open.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_open.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_open.c_objects +os_open.o + + + FILE_$(PRJ_DIR)/../os/os_open.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_region.c_objects +os_region.o + + + FILE_$(PRJ_DIR)/../os/os_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_rename.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_rename.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_rename.c_objects +os_rename.o + + + FILE_$(PRJ_DIR)/../os/os_rename.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_root.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_root.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_root.c_objects +os_root.o + + + FILE_$(PRJ_DIR)/../os/os_root.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_objects +os_rpath.o + + + FILE_$(PRJ_DIR)/../os/os_rpath.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_rw.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_rw.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_rw.c_objects +os_rw.o + + + FILE_$(PRJ_DIR)/../os/os_rw.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_seek.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_seek.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_seek.c_objects +os_seek.o + + + FILE_$(PRJ_DIR)/../os/os_seek.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_objects +os_sleep.o + + + FILE_$(PRJ_DIR)/../os/os_sleep.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_spin.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_spin.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_spin.c_objects +os_spin.o + + + FILE_$(PRJ_DIR)/../os/os_spin.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_stat.c_objects +os_stat.o + + + FILE_$(PRJ_DIR)/../os/os_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_objects +os_tmpdir.o + + + FILE_$(PRJ_DIR)/../os/os_tmpdir.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_objects +os_truncate.o + + + FILE_$(PRJ_DIR)/../os/os_truncate.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_objects +os_unlink.o + + + FILE_$(PRJ_DIR)/../os/os_unlink.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_objects +os_vx_abs.o + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_objects +os_vx_config.o + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_objects +os_vx_map.o + + + FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_objects +qam_stub.o + + + FILE_$(PRJ_DIR)/../qam/qam_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_objects +rep_stub.o + + + FILE_$(PRJ_DIR)/../rep/rep_stub.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn.c_objects +txn.o + + + FILE_$(PRJ_DIR)/../txn/txn.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_objects +txn_auto.o + + + FILE_$(PRJ_DIR)/../txn/txn_auto.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_objects +txn_method.o + + + FILE_$(PRJ_DIR)/../txn/txn_method.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_objects +txn_rec.o + + + FILE_$(PRJ_DIR)/../txn/txn_rec.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_objects +txn_recover.o + + + FILE_$(PRJ_DIR)/../txn/txn_recover.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_objects +txn_region.o + + + FILE_$(PRJ_DIR)/../txn/txn_region.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_objects +txn_stat.o + + + FILE_$(PRJ_DIR)/../txn/txn_stat.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_objects +txn_util.o + + + FILE_$(PRJ_DIR)/../txn/txn_util.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../xa/xa.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../xa/xa.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../xa/xa.c_objects +xa.o + + + FILE_$(PRJ_DIR)/../xa/xa.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_objects +xa_db.o + + + FILE_$(PRJ_DIR)/../xa/xa_db.c_tool +C/C++ compiler + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_dependDone +TRUE + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_dependencies +$(PRJ_DIR)/db_config.h \ + $(PRJ_DIR)/db_int.h \ + $(PRJ_DIR)/db.h + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_objects +xa_map.o + + + FILE_$(PRJ_DIR)/../xa/xa_map.c_tool +C/C++ compiler + + + PROJECT_FILES +$(PRJ_DIR)/../btree/bt_compare.c \ + $(PRJ_DIR)/../btree/bt_conv.c \ + $(PRJ_DIR)/../btree/bt_curadj.c \ + $(PRJ_DIR)/../btree/bt_cursor.c \ + $(PRJ_DIR)/../btree/bt_delete.c \ + $(PRJ_DIR)/../btree/bt_method.c \ + $(PRJ_DIR)/../btree/bt_open.c \ + $(PRJ_DIR)/../btree/bt_put.c \ + $(PRJ_DIR)/../btree/bt_rec.c \ + $(PRJ_DIR)/../btree/bt_reclaim.c \ + $(PRJ_DIR)/../btree/bt_recno.c \ + $(PRJ_DIR)/../btree/bt_rsearch.c \ + $(PRJ_DIR)/../btree/bt_search.c \ + $(PRJ_DIR)/../btree/bt_split.c \ + $(PRJ_DIR)/../btree/bt_stat.c \ + $(PRJ_DIR)/../btree/bt_upgrade.c \ + $(PRJ_DIR)/../btree/btree_auto.c \ + $(PRJ_DIR)/../clib/getopt.c \ + $(PRJ_DIR)/../clib/snprintf.c \ + $(PRJ_DIR)/../clib/strcasecmp.c \ + $(PRJ_DIR)/../clib/strdup.c \ + $(PRJ_DIR)/../common/crypto_stub.c \ + $(PRJ_DIR)/../common/db_byteorder.c \ + $(PRJ_DIR)/../common/db_err.c \ + $(PRJ_DIR)/../common/db_getlong.c \ + $(PRJ_DIR)/../common/db_idspace.c \ + $(PRJ_DIR)/../common/db_log2.c \ + $(PRJ_DIR)/../common/util_arg.c \ + $(PRJ_DIR)/../common/util_cache.c \ + $(PRJ_DIR)/../common/util_log.c \ + $(PRJ_DIR)/../common/util_sig.c \ + $(PRJ_DIR)/../db/crdel_auto.c \ + $(PRJ_DIR)/../db/crdel_rec.c \ + $(PRJ_DIR)/../db/db.c \ + $(PRJ_DIR)/../db/db_am.c \ + $(PRJ_DIR)/../db/db_auto.c \ + $(PRJ_DIR)/../db/db_cam.c \ + $(PRJ_DIR)/../db/db_conv.c \ + $(PRJ_DIR)/../db/db_dispatch.c \ + $(PRJ_DIR)/../db/db_dup.c \ + $(PRJ_DIR)/../db/db_iface.c \ + $(PRJ_DIR)/../db/db_join.c \ + $(PRJ_DIR)/../db/db_meta.c \ + $(PRJ_DIR)/../db/db_method.c \ + $(PRJ_DIR)/../db/db_open.c \ + $(PRJ_DIR)/../db/db_overflow.c \ + $(PRJ_DIR)/../db/db_pr.c \ + $(PRJ_DIR)/../db/db_rec.c \ + $(PRJ_DIR)/../db/db_reclaim.c \ + $(PRJ_DIR)/../db/db_remove.c \ + $(PRJ_DIR)/../db/db_rename.c \ + $(PRJ_DIR)/../db/db_ret.c \ + $(PRJ_DIR)/../db/db_setid.c \ + $(PRJ_DIR)/../db/db_setlsn.c \ + $(PRJ_DIR)/../db/db_stati.c \ + $(PRJ_DIR)/../db/db_truncate.c \ + $(PRJ_DIR)/../db/db_upg.c \ + $(PRJ_DIR)/../db/db_upg_opd.c \ + $(PRJ_DIR)/../db/db_vrfy_stub.c \ + $(PRJ_DIR)/../dbreg/dbreg.c \ + $(PRJ_DIR)/../dbreg/dbreg_auto.c \ + $(PRJ_DIR)/../dbreg/dbreg_rec.c \ + $(PRJ_DIR)/../dbreg/dbreg_stat.c \ + $(PRJ_DIR)/../dbreg/dbreg_util.c \ + $(PRJ_DIR)/../env/db_salloc.c \ + $(PRJ_DIR)/../env/db_shash.c \ + $(PRJ_DIR)/../env/env_file.c \ + $(PRJ_DIR)/../env/env_method.c \ + $(PRJ_DIR)/../env/env_open.c \ + $(PRJ_DIR)/../env/env_recover.c \ + $(PRJ_DIR)/../env/env_region.c \ + $(PRJ_DIR)/../env/env_stat.c \ + $(PRJ_DIR)/../fileops/fileops_auto.c \ + $(PRJ_DIR)/../fileops/fop_basic.c \ + $(PRJ_DIR)/../fileops/fop_rec.c \ + $(PRJ_DIR)/../fileops/fop_util.c \ + $(PRJ_DIR)/../hash/hash_func.c \ + $(PRJ_DIR)/../hash/hash_stub.c \ + $(PRJ_DIR)/../hmac/hmac.c \ + $(PRJ_DIR)/../hmac/sha1.c \ + $(PRJ_DIR)/../lock/lock.c \ + $(PRJ_DIR)/../lock/lock_deadlock.c \ + $(PRJ_DIR)/../lock/lock_id.c \ + $(PRJ_DIR)/../lock/lock_list.c \ + $(PRJ_DIR)/../lock/lock_method.c \ + $(PRJ_DIR)/../lock/lock_region.c \ + $(PRJ_DIR)/../lock/lock_stat.c \ + $(PRJ_DIR)/../lock/lock_timer.c \ + $(PRJ_DIR)/../lock/lock_util.c \ + $(PRJ_DIR)/../log/log.c \ + $(PRJ_DIR)/../log/log_archive.c \ + $(PRJ_DIR)/../log/log_compare.c \ + $(PRJ_DIR)/../log/log_get.c \ + $(PRJ_DIR)/../log/log_method.c \ + $(PRJ_DIR)/../log/log_put.c \ + $(PRJ_DIR)/../log/log_stat.c \ + $(PRJ_DIR)/../mp/mp_alloc.c \ + $(PRJ_DIR)/../mp/mp_bh.c \ + $(PRJ_DIR)/../mp/mp_fget.c \ + $(PRJ_DIR)/../mp/mp_fmethod.c \ + $(PRJ_DIR)/../mp/mp_fopen.c \ + $(PRJ_DIR)/../mp/mp_fput.c \ + $(PRJ_DIR)/../mp/mp_fset.c \ + $(PRJ_DIR)/../mp/mp_method.c \ + $(PRJ_DIR)/../mp/mp_region.c \ + $(PRJ_DIR)/../mp/mp_register.c \ + $(PRJ_DIR)/../mp/mp_stat.c \ + $(PRJ_DIR)/../mp/mp_sync.c \ + $(PRJ_DIR)/../mp/mp_trickle.c \ + $(PRJ_DIR)/../mutex/mut_tas.c \ + $(PRJ_DIR)/../mutex/mutex.c \ + $(PRJ_DIR)/../os/os_alloc.c \ + $(PRJ_DIR)/../os/os_clock.c \ + $(PRJ_DIR)/../os/os_dir.c \ + $(PRJ_DIR)/../os/os_errno.c \ + $(PRJ_DIR)/../os/os_fid.c \ + $(PRJ_DIR)/../os/os_fsync.c \ + $(PRJ_DIR)/../os/os_handle.c \ + $(PRJ_DIR)/../os/os_id.c \ + $(PRJ_DIR)/../os/os_method.c \ + $(PRJ_DIR)/../os/os_oflags.c \ + $(PRJ_DIR)/../os/os_open.c \ + $(PRJ_DIR)/../os/os_region.c \ + $(PRJ_DIR)/../os/os_rename.c \ + $(PRJ_DIR)/../os/os_root.c \ + $(PRJ_DIR)/../os/os_rpath.c \ + $(PRJ_DIR)/../os/os_rw.c \ + $(PRJ_DIR)/../os/os_seek.c \ + $(PRJ_DIR)/../os/os_sleep.c \ + $(PRJ_DIR)/../os/os_spin.c \ + $(PRJ_DIR)/../os/os_stat.c \ + $(PRJ_DIR)/../os/os_tmpdir.c \ + $(PRJ_DIR)/../os/os_truncate.c \ + $(PRJ_DIR)/../os/os_unlink.c \ + $(PRJ_DIR)/../os_vxworks/os_vx_abs.c \ + $(PRJ_DIR)/../os_vxworks/os_vx_config.c \ + $(PRJ_DIR)/../os_vxworks/os_vx_map.c \ + $(PRJ_DIR)/../qam/qam_stub.c \ + $(PRJ_DIR)/../rep/rep_stub.c \ + $(PRJ_DIR)/../txn/txn.c \ + $(PRJ_DIR)/../txn/txn_auto.c \ + $(PRJ_DIR)/../txn/txn_method.c \ + $(PRJ_DIR)/../txn/txn_rec.c \ + $(PRJ_DIR)/../txn/txn_recover.c \ + $(PRJ_DIR)/../txn/txn_region.c \ + $(PRJ_DIR)/../txn/txn_stat.c \ + $(PRJ_DIR)/../txn/txn_util.c \ + $(PRJ_DIR)/../xa/xa.c \ + $(PRJ_DIR)/../xa/xa_db.c \ + $(PRJ_DIR)/../xa/xa_map.c + + + userComments +BerkeleyDB + diff --git a/db/build_vxworks/BerkeleyDB22small.wsp b/db/build_vxworks/BerkeleyDB22small.wsp new file mode 100755 index 000000000..93f2ea86d --- /dev/null +++ b/db/build_vxworks/BerkeleyDB22small.wsp @@ -0,0 +1,29 @@ +Document file - DO NOT EDIT + + CORE_INFO_TYPE +Workspace + + + CORE_INFO_VERSION +2.2 + + + projectList +$(PRJ_DIR)/BerkeleyDB22small.wpj \ + $(PRJ_DIR)/db_archive/db_archive22.wpj \ + $(PRJ_DIR)/db_checkpoint/db_checkpoint22.wpj \ + $(PRJ_DIR)/db_deadlock/db_deadlock22.wpj \ + $(PRJ_DIR)/db_dump/db_dump22.wpj \ + $(PRJ_DIR)/db_load/db_load22.wpj \ + $(PRJ_DIR)/db_printlog/db_printlog22.wpj \ + $(PRJ_DIR)/db_recover/db_recover22.wpj \ + $(PRJ_DIR)/db_stat/db_stat22.wpj \ + $(PRJ_DIR)/db_upgrade/db_upgrade22.wpj \ + $(PRJ_DIR)/db_verify/db_verify22.wpj \ + $(PRJ_DIR)/dbdemo/dbdemo22.wpj + + + userComments + + + diff --git a/db/build_vxworks/db.h b/db/build_vxworks/db.h index b9aaad78e..651946864 100644 --- a/db/build_vxworks/db.h +++ b/db/build_vxworks/db.h @@ -2,10 +2,10 @@ /* * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db.in,v 11.389 2003/10/01 21:33:58 sue Exp $ + * $Id: db.in,v 11.463 2004/10/11 18:47:50 bostic Exp $ * * db.h include file layout: * General. @@ -24,8 +24,6 @@ #ifndef __NO_SYSTEM_INCLUDES #include - -/* does not include on some systems. */ #include #endif @@ -46,9 +44,9 @@ extern "C" { * Berkeley DB version information. */ #define DB_VERSION_MAJOR 4 -#define DB_VERSION_MINOR 2 -#define DB_VERSION_PATCH 52 -#define DB_VERSION_STRING "Sleepycat Software: Berkeley DB 4.2.52: (December 3, 2003)" +#define DB_VERSION_MINOR 3 +#define DB_VERSION_PATCH 14 +#define DB_VERSION_STRING "Sleepycat Software: Berkeley DB 4.3.14: (October 14, 2004)" /* * !!! @@ -68,9 +66,37 @@ extern "C" { typedef unsigned char u_int8_t; typedef unsigned short u_int16_t; typedef unsigned int u_int32_t; + + #endif +/* + * uintmax_t -- + * Largest unsigned type, used to align structures in memory. We don't store + * floating point types in structures, so integral types should be sufficient + * (and we don't have to worry about systems that store floats in other than + * power-of-2 numbers of bytes). Additionally this fixes compilers that rewrite + * structure assignments and ANSI C memcpy calls to be in-line instructions + * that happen to require alignment. Note: this alignment isn't sufficient for + * mutexes, which depend on things like cache line alignment. Mutex alignment + * is handled separately, in mutex.h. + * + * uintptr_t -- + * Unsigned type that's the same size as a pointer. There are places where + * DB modifies pointers by discarding the bottom bits to guarantee alignment. + * We can't use uintmax_t, it may be larger than the pointer, and compilers + * get upset about that. So far we haven't run on any machine where there's + * no unsigned type the same size as a pointer -- here's hoping. + */ +@uintmax_t_decl@ +@uintptr_t_decl@ + +/* + * Sequences are only available on machines with 64-bit integral types. + */ +typedef int db_seq_t; + /* Basic types that are exported or quasi-exported. */ typedef u_int32_t db_pgno_t; /* Page number type. */ typedef u_int16_t db_indx_t; /* Page offset type. */ @@ -82,11 +108,13 @@ typedef u_int32_t db_recno_t; /* Record number type. */ typedef u_int32_t db_timeout_t; /* Type of a timeout. */ /* - * Region offsets are currently limited to 32-bits. I expect that's going - * to have to be fixed in the not-too-distant future, since we won't want to - * split 100Gb memory pools into that many different regions. + * Region offsets are the difference between a pointer in a region and the + * region's base address. With private environments, both addresses are the + * result of calling malloc, and we can't assume anything about what malloc + * will return, so region offsets have to be able to hold differences between + * arbitrary pointers. */ -typedef u_int32_t roff_t; +typedef uintptr_t roff_t; /* * Forward structure declarations, so we can declare pointers and @@ -113,6 +141,9 @@ struct __db_preplist; typedef struct __db_preplist DB_PREPLIST; struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT; struct __db_rep; typedef struct __db_rep DB_REP; struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT; +struct __db_sequence; typedef struct __db_sequence DB_SEQUENCE; +struct __db_seq_record; typedef struct __db_seq_record DB_SEQ_RECORD; +struct __db_seq_stat; typedef struct __db_seq_stat DB_SEQUENCE_STAT; struct __db_txn; typedef struct __db_txn DB_TXN; struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE; struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT; @@ -173,6 +204,8 @@ struct __db_dbt { * DB_AUTO_COMMIT: * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open, * DB->remove, DB->rename, DB->truncate + * DB_DEGREE_2: + * DB->cursor, DB->get, DB->join, DBcursor->c_get, DB_ENV->txn_begin * DB_DIRTY_READ: * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get, * DB_ENV->txn_begin @@ -181,13 +214,14 @@ struct __db_dbt { * DB->remove, DB->rename, DB->truncate * * !!! - * The DB_DIRTY_READ bit mask can't be changed without also changing the - * masks for the flags that can be OR'd into DB access method and cursor - * operation values. + * The DB_DIRTY_READ and DB_DEGREE_2 bit masks can't be changed without + * also changing the masks for the flags that can be OR'd into DB + * access method and cursor operation values. */ -#define DB_AUTO_COMMIT 0x1000000 /* Implied transaction. */ -#define DB_DIRTY_READ 0x2000000 /* Dirty Read. */ -#define DB_NO_AUTO_COMMIT 0x4000000 /* Override env-wide AUTO-COMMIT. */ +#define DB_AUTO_COMMIT 0x01000000/* Implied transaction. */ +#define DB_DEGREE_2 0x02000000/* Degree 2. */ +#define DB_DIRTY_READ 0x04000000/* Dirty Read. */ +#define DB_NO_AUTO_COMMIT 0x08000000/* Override env-wide AUTOCOMMIT. */ /* * Flags private to db_env_create. @@ -236,19 +270,25 @@ struct __db_dbt { /* * Flags private to DB_ENV->set_flags. - * Shared flags up to 0x0000800 */ -#define DB_CDB_ALLDB 0x0001000 /* Set CDB locking per environment. */ -#define DB_DIRECT_DB 0x0002000 /* Don't buffer databases in the OS. */ -#define DB_DIRECT_LOG 0x0004000 /* Don't buffer log files in the OS. */ -#define DB_LOG_AUTOREMOVE 0x0008000 /* Automatically remove log files. */ -#define DB_NOLOCKING 0x0010000 /* Set locking/mutex behavior. */ -#define DB_NOPANIC 0x0020000 /* Set panic state per DB_ENV. */ -#define DB_OVERWRITE 0x0040000 /* Overwrite unlinked region files. */ -#define DB_PANIC_ENVIRONMENT 0x0080000 /* Set panic state per environment. */ -#define DB_REGION_INIT 0x0100000 /* Page-fault regions on open. */ -#define DB_TIME_NOTGRANTED 0x0200000 /* Return NOTGRANTED on timeout. */ -#define DB_TXN_WRITE_NOSYNC 0x0400000 /* Write, don't sync, on txn commit. */ -#define DB_YIELDCPU 0x0800000 /* Yield the CPU (a lot). */ + * Shared flags up to 0x00000800 */ +#define DB_CDB_ALLDB 0x00001000/* Set CDB locking per environment. */ +#define DB_DIRECT_DB 0x00002000/* Don't buffer databases in the OS. */ +#define DB_DIRECT_LOG 0x00004000/* Don't buffer log files in the OS. */ +#define DB_DSYNC_LOG 0x00008000/* Set O_DSYNC on the log. */ +#define DB_LOG_AUTOREMOVE 0x00010000/* Automatically remove log files. */ +#define DB_LOG_INMEMORY 0x00020000/* Store logs in buffers in memory. */ +#define DB_NOLOCKING 0x00040000/* Set locking/mutex behavior. */ +#define DB_NOPANIC 0x00080000/* Set panic state per DB_ENV. */ +#define DB_OVERWRITE 0x00100000/* Overwrite unlinked region files. */ +#define DB_PANIC_ENVIRONMENT 0x00200000/* Set panic state per environment. */ +#define DB_REGION_INIT 0x00400000/* Page-fault regions on open. */ +#define DB_TIME_NOTGRANTED 0x00800000/* Return NOTGRANTED on timeout. */ +/* Shared flags at 0x01000000 */ +/* Shared flags at 0x02000000 */ +/* Shared flags at 0x04000000 */ +/* Shared flags at 0x08000000 */ +#define DB_TXN_WRITE_NOSYNC 0x10000000/* Write, don't sync, on txn commit. */ +#define DB_YIELDCPU 0x20000000/* Yield the CPU (a lot). */ /* * Flags private to DB->set_feedback's callback. @@ -260,8 +300,9 @@ struct __db_dbt { * Flags private to DB_MPOOLFILE->open. * Shared flags up to 0x0000800 */ #define DB_DIRECT 0x0001000 /* Don't buffer the file in the OS. */ -#define DB_EXTENT 0x0002000 /* UNDOC: dealing with an extent. */ -#define DB_ODDFILESIZE 0x0004000 /* Truncate file to N * pgsize. */ +#define DB_DURABLE_UNKNOWN 0x0002000 /* internal: durability on open. */ +#define DB_EXTENT 0x0004000 /* internal: dealing with an extent. */ +#define DB_ODDFILESIZE 0x0008000 /* Truncate file to N * pgsize. */ /* * Flags private to DB->set_flags. @@ -270,15 +311,23 @@ struct __db_dbt { #define DB_DUP 0x0000002 /* Btree, Hash: duplicate keys. */ #define DB_DUPSORT 0x0000004 /* Btree, Hash: duplicate keys. */ #define DB_ENCRYPT 0x0000008 /* Btree, Hash: duplicate keys. */ -#define DB_RECNUM 0x0000010 /* Btree: record numbers. */ -#define DB_RENUMBER 0x0000020 /* Recno: renumber on insert/delete. */ -#define DB_REVSPLITOFF 0x0000040 /* Btree: turn off reverse splits. */ -#define DB_SNAPSHOT 0x0000080 /* Recno: snapshot the input. */ +#define DB_INORDER 0x0000010 /* Queue: strict ordering on consume. */ +#define DB_RECNUM 0x0000020 /* Btree: record numbers. */ +#define DB_RENUMBER 0x0000040 /* Recno: renumber on insert/delete. */ +#define DB_REVSPLITOFF 0x0000080 /* Btree: turn off reverse splits. */ +#define DB_SNAPSHOT 0x0000100 /* Recno: snapshot the input. */ /* - * Flags private to the DB->stat methods. + * Flags private to the DB_ENV->stat_print, DB->stat and DB->stat_print methods. */ -#define DB_STAT_CLEAR 0x0000001 /* Clear stat after returning values. */ +#define DB_STAT_ALL 0x0000001 /* Print: Everything. */ +#define DB_STAT_CLEAR 0x0000002 /* Clear stat after returning values. */ +#define DB_STAT_LOCK_CONF 0x0000004 /* Print: Lock conflict matrix. */ +#define DB_STAT_LOCK_LOCKERS 0x0000008 /* Print: Lockers. */ +#define DB_STAT_LOCK_OBJECTS 0x0000010 /* Print: Lock objects. */ +#define DB_STAT_LOCK_PARAMS 0x0000020 /* Print: Lock parameters. */ +#define DB_STAT_MEMP_HASH 0x0000040 /* Print: Mpool hash buckets. */ +#define DB_STAT_SUBSYSTEM 0x0000080 /* Print: Subsystems too. */ /* * Flags private to DB->join. @@ -295,6 +344,7 @@ struct __db_dbt { #define DB_PR_RECOVERYTEST 0x0000010 /* Recovery test (-dr). */ #define DB_PRINTABLE 0x0000020 /* Use printable format for salvage. */ #define DB_SALVAGE 0x0000040 /* Salvage what looks like data. */ +#define DB_UNREF 0x0000080 /* Report unreferenced pages. */ /* * !!! * These must not go over 0x8000, or they will collide with the flags @@ -321,20 +371,22 @@ struct __db_dbt { #define DB_LOCK_NORUN 0 #define DB_LOCK_DEFAULT 1 /* Default policy. */ #define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */ -#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */ -#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */ -#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */ -#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */ -#define DB_LOCK_RANDOM 7 /* Abort random transaction. */ -#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */ +#define DB_LOCK_MAXLOCKS 3 /* Select locker with max locks. */ +#define DB_LOCK_MAXWRITE 4 /* Select locker with max writelocks. */ +#define DB_LOCK_MINLOCKS 5 /* Select locker with min locks. */ +#define DB_LOCK_MINWRITE 6 /* Select locker with min writelocks. */ +#define DB_LOCK_OLDEST 7 /* Select oldest locker. */ +#define DB_LOCK_RANDOM 8 /* Select random locker. */ +#define DB_LOCK_YOUNGEST 9 /* Select youngest locker. */ /* Flag values for lock_vec(), lock_get(). */ -#define DB_LOCK_NOWAIT 0x001 /* Don't wait on unavailable lock. */ -#define DB_LOCK_RECORD 0x002 /* Internal: record lock. */ -#define DB_LOCK_REMOVE 0x004 /* Internal: flag object removed. */ -#define DB_LOCK_SET_TIMEOUT 0x008 /* Internal: set lock timeout. */ -#define DB_LOCK_SWITCH 0x010 /* Internal: switch existing lock. */ -#define DB_LOCK_UPGRADE 0x020 /* Internal: upgrade existing lock. */ +#define DB_LOCK_ABORT 0x001 /* Internal: Lock during abort. */ +#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */ +#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */ +#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */ +#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */ +#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */ +#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */ /* * Simple R/W lock modes and for multi-granularity intention locking. @@ -378,16 +430,15 @@ typedef enum { */ typedef enum { DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */ - DB_LSTAT_ERR=2, /* Lock is bad. */ - DB_LSTAT_EXPIRED=3, /* Lock has expired. */ - DB_LSTAT_FREE=4, /* Lock is unallocated. */ - DB_LSTAT_HELD=5, /* Lock is currently held. */ - DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting + DB_LSTAT_EXPIRED=2, /* Lock has expired. */ + DB_LSTAT_FREE=3, /* Lock is unallocated. */ + DB_LSTAT_HELD=4, /* Lock is currently held. */ + DB_LSTAT_NOTEXIST=5, /* Object on which lock was waiting * was removed */ - DB_LSTAT_PENDING=7, /* Lock was waiting and has been + DB_LSTAT_PENDING=6, /* Lock was waiting and has been * promoted; waiting for the owner * to run and upgrade it to held. */ - DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */ + DB_LSTAT_WAITING=7 /* Lock is on the wait queue. */ }db_status_t; /* Lock statistics structure. */ @@ -397,7 +448,7 @@ struct __db_lock_stat { u_int32_t st_maxlocks; /* Maximum number of locks in table. */ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */ u_int32_t st_maxobjects; /* Maximum num of objects in table. */ - u_int32_t st_nmodes; /* Number of lock modes. */ + int st_nmodes; /* Number of lock modes. */ u_int32_t st_nlocks; /* Current number of locks. */ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */ u_int32_t st_nlockers; /* Current number of lockers. */ @@ -416,7 +467,7 @@ struct __db_lock_stat { u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */ u_int32_t st_region_wait; /* Region lock granted after wait. */ u_int32_t st_region_nowait; /* Region lock granted without wait. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ }; /* @@ -438,7 +489,7 @@ struct __db_ilock { * lock_get request (or a lock_vec/DB_LOCK_GET). */ struct __db_lock_u { - size_t off; /* Offset of the lock in the region */ + roff_t off; /* Offset of the lock in the region */ u_int32_t ndx; /* Index of the object referenced by * this lock; used for locking. */ u_int32_t gen; /* Generation number of this lock. */ @@ -457,8 +508,8 @@ struct __db_lockreq { /******************************************************* * Logging. *******************************************************/ -#define DB_LOGVERSION 8 /* Current log version. */ -#define DB_LOGOLDVER 8 /* Oldest log version supported. */ +#define DB_LOGVERSION 10 /* Current log version. */ +#define DB_LOGOLDVER 10 /* Oldest log version supported. */ #define DB_LOGMAGIC 0x040988 /* Flag values for DB_ENV->log_archive(). */ @@ -474,7 +525,8 @@ struct __db_lockreq { #define DB_LOG_NOCOPY 0x008 /* Don't copy data */ #define DB_LOG_NOT_DURABLE 0x010 /* Do not log; keep in memory */ #define DB_LOG_PERM 0x020 /* Flag record with REP_PERMANENT */ -#define DB_LOG_WRNOSYNC 0x040 /* Write, don't sync log_put */ +#define DB_LOG_RESEND 0x040 /* Resent log record */ +#define DB_LOG_WRNOSYNC 0x080 /* Write, don't sync log_put */ /* * A DB_LSN has two parts, a fileid which identifies a specific file, and an @@ -538,7 +590,7 @@ struct __db_log_cursor { struct __db_log_stat { u_int32_t st_magic; /* Log file magic number. */ u_int32_t st_version; /* Log file version number. */ - int st_mode; /* Log file mode. */ + int st_mode; /* Log file mode. */ u_int32_t st_lg_bsize; /* Log buffer size. */ u_int32_t st_lg_size; /* Log file size. */ u_int32_t st_w_bytes; /* Bytes to log. */ @@ -554,11 +606,19 @@ struct __db_log_stat { u_int32_t st_cur_offset; /* Current log file offset. */ u_int32_t st_disk_file; /* Known on disk log file number. */ u_int32_t st_disk_offset; /* Known on disk log file offset. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */ }; +/* + * We need to record the first log record of a transaction. + * For user defined logging this macro returns the place to + * put that information, if it is need in rlsnp, otherwise it + * leaves it unchanged. + */ +#define DB_SET_BEGIN_LSNP(txn, rlsnp) ((txn)->set_begin_lsnp(txn, rlsnp)) + /******************************************************* * Shared buffer cache (mpool). *******************************************************/ @@ -571,6 +631,7 @@ struct __db_log_stat { #define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */ #define DB_MPOOL_DIRTY 0x002 /* Page is modified. */ #define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */ +#define DB_MPOOL_FREE 0x008 /* Free page if present. */ /* Flags values for DB_MPOOLFILE->set_flags. */ #define DB_MPOOL_NOFILE 0x001 /* Never open a backing file. */ @@ -638,7 +699,7 @@ struct __db_mpoolfile { /* Methods. */ int (*close) __P((DB_MPOOLFILE *, u_int32_t)); int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *)); - int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); + int (*open) __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t)); int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t)); int (*get_clear_len) __P((DB_MPOOLFILE *, u_int32_t *)); @@ -664,7 +725,7 @@ struct __db_mpoolfile { * thread protected because they are initialized before the file is * linked onto the per-process lists, and never modified. * - * MP_FLUSH is thread protected becase it is potentially read/set by + * MP_FLUSH is thread protected because it is potentially read/set by * multiple threads of control. */ #define MP_FILEID_SET 0x001 /* Application supplied a file ID. */ @@ -674,14 +735,16 @@ struct __db_mpoolfile { u_int32_t flags; }; -/* - * Mpool statistics structure. - */ +/* Mpool statistics structure. */ struct __db_mpool_stat { u_int32_t st_gbytes; /* Total cache size: GB. */ u_int32_t st_bytes; /* Total cache size: B. */ u_int32_t st_ncache; /* Number of caches. */ - u_int32_t st_regsize; /* Cache size. */ + roff_t st_regsize; /* Region size. */ + size_t st_mmapsize; /* Maximum file size for mmap. */ + int st_maxopenfd; /* Maximum number of open fd's. */ + int st_maxwrite; /* Maximum buffers to write. */ + int st_maxwrite_sleep; /* Sleep after writing max buffers. */ u_int32_t st_map; /* Pages from mapped files. */ u_int32_t st_cache_hit; /* Pages found in the cache. */ u_int32_t st_cache_miss; /* Pages not found in the cache. */ @@ -713,7 +776,7 @@ struct __db_mpool_stat { /* Mpool file statistics structure. */ struct __db_mpool_fstat { char *file_name; /* File name. */ - size_t st_pagesize; /* Page size. */ + u_int32_t st_pagesize; /* Page size. */ u_int32_t st_map; /* Pages from mapped files. */ u_int32_t st_cache_hit; /* Pages found in the cache. */ u_int32_t st_cache_miss; /* Pages not found in the cache. */ @@ -733,10 +796,9 @@ typedef enum { DB_TXN_BACKWARD_ALLOC=2, /* Internal. */ DB_TXN_BACKWARD_ROLL=3, /* Public. */ DB_TXN_FORWARD_ROLL=4, /* Public. */ - DB_TXN_GETPGNOS=5, /* Internal. */ - DB_TXN_OPENFILES=6, /* Internal. */ - DB_TXN_POPENFILES=7, /* Internal. */ - DB_TXN_PRINT=8 /* Public. */ + DB_TXN_OPENFILES=5, /* Internal. */ + DB_TXN_POPENFILES=6, /* Internal. */ + DB_TXN_PRINT=7 /* Public. */ } db_recops; /* @@ -815,8 +877,8 @@ struct __db_txn { struct __db_txn **tqe_prev; } klinks; - /* API-private structure: used by C++ */ - void *api_internal; + void *api_internal; /* C++ API private. */ + void *xml_internal; /* XML API private. */ u_int32_t cursors; /* Number of cursors open for txn */ @@ -826,17 +888,20 @@ struct __db_txn { int (*discard) __P((DB_TXN *, u_int32_t)); u_int32_t (*id) __P((DB_TXN *)); int (*prepare) __P((DB_TXN *, u_int8_t *)); + void (*set_begin_lsnp) __P((DB_TXN *txn, DB_LSN **)); int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t)); #define TXN_CHILDCOMMIT 0x001 /* Transaction that has committed. */ #define TXN_COMPENSATE 0x002 /* Compensating transaction. */ -#define TXN_DIRTY_READ 0x004 /* Transaction does dirty reads. */ -#define TXN_LOCKTIMEOUT 0x008 /* Transaction has a lock timeout. */ -#define TXN_MALLOC 0x010 /* Structure allocated by TXN system. */ -#define TXN_NOSYNC 0x020 /* Do not sync on prepare and commit. */ -#define TXN_NOWAIT 0x040 /* Do not wait on locks. */ -#define TXN_RESTORED 0x080 /* Transaction has been restored. */ -#define TXN_SYNC 0x100 /* Sync on prepare and commit. */ +#define TXN_DEADLOCK 0x004 /* Transaction has deadlocked. */ +#define TXN_DEGREE_2 0x008 /* Has degree 2 isolation. */ +#define TXN_DIRTY_READ 0x010 /* Transaction does dirty reads. */ +#define TXN_LOCKTIMEOUT 0x020 /* Transaction has a lock timeout. */ +#define TXN_MALLOC 0x040 /* Structure allocated by TXN system. */ +#define TXN_NOSYNC 0x080 /* Do not sync on prepare and commit. */ +#define TXN_NOWAIT 0x100 /* Do not wait on locks. */ +#define TXN_RESTORED 0x200 /* Transaction has been restored. */ +#define TXN_SYNC 0x400 /* Sync on prepare and commit. */ u_int32_t flags; }; @@ -878,7 +943,7 @@ struct __db_txn_stat { DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */ u_int32_t st_region_wait; /* Region lock granted after wait. */ u_int32_t st_region_nowait; /* Region lock granted without wait. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ }; /******************************************************* @@ -890,8 +955,7 @@ struct __db_txn_stat { /* rep_start flags values */ #define DB_REP_CLIENT 0x001 -#define DB_REP_LOGSONLY 0x002 -#define DB_REP_MASTER 0x004 +#define DB_REP_MASTER 0x002 /* Replication statistics. */ struct __db_rep_stat { @@ -907,13 +971,15 @@ struct __db_rep_stat { u_int32_t st_status; /* Current replication status. */ DB_LSN st_next_lsn; /* Next LSN to use or expect. */ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */ + db_pgno_t st_next_pg; /* Next pg we expect. */ + db_pgno_t st_waiting_pg; /* pg we're awaiting, if any. */ u_int32_t st_dupmasters; /* # of times a duplicate master condition was detected.+ */ int st_env_id; /* Current environment ID. */ int st_env_priority; /* Current environment priority. */ u_int32_t st_gen; /* Current generation number. */ - u_int32_t st_in_recovery; /* This site is in client sync-up. */ + u_int32_t st_egen; /* Current election gen number. */ u_int32_t st_log_duplicated; /* Log records received multiply.+ */ u_int32_t st_log_queued; /* Log records currently queued.+ */ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */ @@ -934,6 +1000,10 @@ struct __db_rep_stat { u_int32_t st_nthrottles; /* # of times we were throttled. */ u_int32_t st_outdated; /* # of times we detected and returned an OUTDATED condition.+ */ + u_int32_t st_pg_duplicated; /* Pages received multiply.+ */ + u_int32_t st_pg_records; /* Pages received and stored.+ */ + u_int32_t st_pg_requested; /* Pages missed and requested.+ */ + u_int32_t st_startup_complete; /* Site completed client sync-up. */ u_int32_t st_txns_applied; /* # of transactions applied.+ */ /* Elections generally. */ @@ -945,11 +1015,75 @@ struct __db_rep_stat { u_int32_t st_election_gen; /* Election generation number. */ DB_LSN st_election_lsn; /* Max. LSN of current winner. */ int st_election_nsites; /* # of "registered voters". */ + int st_election_nvotes; /* # of "registered voters" needed. */ int st_election_priority; /* Current election priority. */ int st_election_status; /* Current election status. */ - int st_election_tiebreaker; /* Election tiebreaker value. */ + u_int32_t st_election_tiebreaker;/* Election tiebreaker value. */ int st_election_votes; /* Votes received in this round. */ }; +/* + * The storage record for a sequence. + */ +struct __db_seq_record { + u_int32_t seq_version; /* Version size/number. */ +#define DB_SEQ_DEC 0x00000001 /* Decrement sequence. */ +#define DB_SEQ_INC 0x00000002 /* Increment sequence. */ +#define DB_SEQ_RANGE_SET 0x00000004 /* Range set (internal). */ +#define DB_SEQ_WRAP 0x00000008 /* Wrap sequence at min/max. */ + u_int32_t flags; /* Flags. */ + db_seq_t seq_value; /* Current value. */ + db_seq_t seq_max; /* Max permitted. */ + db_seq_t seq_min; /* Min permitted. */ +}; + +/* + * Handle for a sequence object. + */ +struct __db_sequence { + DB *seq_dbp; /* DB handle for this sequence. */ + DB_MUTEX *seq_mutexp; /* Mutex if sequence is threaded. */ + DB_SEQ_RECORD *seq_rp; /* Pointer to current data. */ + DB_SEQ_RECORD seq_record; /* Data from DB_SEQUENCE. */ + int32_t seq_cache_size; /* Number of values cached. */ + db_seq_t seq_last_value; /* Last value cached. */ + DBT seq_key; /* DBT pointing to sequence key. */ + DBT seq_data; /* DBT pointing to seq_record. */ + + /* API-private structure: used by C++ and Java. */ + void *api_internal; + + int (*close) __P((DB_SEQUENCE *, u_int32_t)); + int (*get) __P((DB_SEQUENCE *, + DB_TXN *, int32_t, db_seq_t *, u_int32_t)); + int (*get_cachesize) __P((DB_SEQUENCE *, int32_t *)); + int (*get_db) __P((DB_SEQUENCE *, DB **)); + int (*get_flags) __P((DB_SEQUENCE *, u_int32_t *)); + int (*get_key) __P((DB_SEQUENCE *, DBT *)); + int (*get_range) __P((DB_SEQUENCE *, + db_seq_t *, db_seq_t *)); + int (*initial_value) __P((DB_SEQUENCE *, db_seq_t)); + int (*open) __P((DB_SEQUENCE *, + DB_TXN *, DBT *, u_int32_t)); + int (*remove) __P((DB_SEQUENCE *, DB_TXN *, u_int32_t)); + int (*set_cachesize) __P((DB_SEQUENCE *, int32_t)); + int (*set_flags) __P((DB_SEQUENCE *, u_int32_t)); + int (*set_range) __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); + int (*stat) __P((DB_SEQUENCE *, + DB_SEQUENCE_STAT **, u_int32_t)); + int (*stat_print) __P((DB_SEQUENCE *, u_int32_t)); +}; + +struct __db_seq_stat { + u_int32_t st_wait; /* Sequence lock granted without wait. */ + u_int32_t st_nowait; /* Sequence lock granted after wait. */ + db_seq_t st_current; /* Current value in db. */ + db_seq_t st_value; /* Current cached value. */ + db_seq_t st_last_value; /* Last cached value. */ + db_seq_t st_min; /* Minimum value. */ + db_seq_t st_max; /* Maximum value. */ + int32_t st_cache_size; /* Cache size. */ + u_int32_t st_flags; /* Flag value. */ +}; /******************************************************* * Access methods. @@ -976,6 +1110,8 @@ typedef enum { #define DB_QAMOLDVER 3 /* Oldest queue version supported. */ #define DB_QAMMAGIC 0x042253 +#define DB_SEQUENCE_VERSION 1 /* Current sequence version. */ + /* * DB access method and cursor operation values. Each value is an operation * code to which additional bit flags are added. @@ -1024,10 +1160,10 @@ typedef enum { * Masks for flags that can be OR'd into DB access method and cursor * operation values. * - * DB_DIRTY_READ 0x02000000 Dirty Read. */ -#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */ -#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */ -#define DB_RMW 0x10000000 /* Acquire write flag immediately. */ + * DB_DIRTY_READ 0x04000000 Dirty Read. */ +#define DB_MULTIPLE 0x08000000 /* Return multiple data values. */ +#define DB_MULTIPLE_KEY 0x10000000 /* Return multiple data/key pairs. */ +#define DB_RMW 0x20000000 /* Acquire write flag immediately. */ /* * DB (user visible) error return codes. @@ -1044,42 +1180,47 @@ typedef enum { * document that we own the error name space from -30,800 to -30,999. */ /* DB (public) error return codes. */ -#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */ -#define DB_FILEOPEN (-30998)/* Rename/remove while file is open. */ +#define DB_BUFFER_SMALL (-30999)/* User memory too small for return. */ +#define DB_DONOTINDEX (-30998)/* "Null" return from 2ndary callbk. */ #define DB_KEYEMPTY (-30997)/* Key/data deleted or never created. */ #define DB_KEYEXIST (-30996)/* The key/data pair already exists. */ #define DB_LOCK_DEADLOCK (-30995)/* Deadlock. */ #define DB_LOCK_NOTGRANTED (-30994)/* Lock unavailable. */ -#define DB_NOSERVER (-30993)/* Server panic return. */ -#define DB_NOSERVER_HOME (-30992)/* Bad home sent to server. */ -#define DB_NOSERVER_ID (-30991)/* Bad ID sent to server. */ -#define DB_NOTFOUND (-30990)/* Key/data pair not found (EOF). */ -#define DB_OLD_VERSION (-30989)/* Out-of-date version. */ -#define DB_PAGE_NOTFOUND (-30988)/* Requested page not found. */ -#define DB_REP_DUPMASTER (-30987)/* There are two masters. */ -#define DB_REP_HANDLE_DEAD (-30986)/* Rolled back a commit. */ -#define DB_REP_HOLDELECTION (-30985)/* Time to hold an election. */ -#define DB_REP_ISPERM (-30984)/* Cached not written perm written.*/ -#define DB_REP_NEWMASTER (-30983)/* We have learned of a new master. */ -#define DB_REP_NEWSITE (-30982)/* New site entered system. */ -#define DB_REP_NOTPERM (-30981)/* Permanent log record not written. */ -#define DB_REP_OUTDATED (-30980)/* Site is too far behind master. */ -#define DB_REP_UNAVAIL (-30979)/* Site cannot currently be reached. */ -#define DB_RUNRECOVERY (-30978)/* Panic return. */ -#define DB_SECONDARY_BAD (-30977)/* Secondary index corrupt. */ -#define DB_VERIFY_BAD (-30976)/* Verify failed; bad format. */ +#define DB_LOG_BUFFER_FULL (-30993)/* In-memory log buffer full. */ +#define DB_NOSERVER (-30992)/* Server panic return. */ +#define DB_NOSERVER_HOME (-30991)/* Bad home sent to server. */ +#define DB_NOSERVER_ID (-30990)/* Bad ID sent to server. */ +#define DB_NOTFOUND (-30989)/* Key/data pair not found (EOF). */ +#define DB_OLD_VERSION (-30988)/* Out-of-date version. */ +#define DB_PAGE_NOTFOUND (-30987)/* Requested page not found. */ +#define DB_REP_DUPMASTER (-30986)/* There are two masters. */ +#define DB_REP_HANDLE_DEAD (-30985)/* Rolled back a commit. */ +#define DB_REP_HOLDELECTION (-30984)/* Time to hold an election. */ +#define DB_REP_ISPERM (-30983)/* Cached not written perm written.*/ +#define DB_REP_NEWMASTER (-30982)/* We have learned of a new master. */ +#define DB_REP_NEWSITE (-30981)/* New site entered system. */ +#define DB_REP_NOTPERM (-30980)/* Permanent log record not written. */ +#define DB_REP_STARTUPDONE (-30979)/* Client startup complete. */ +#define DB_REP_UNAVAIL (-30978)/* Site cannot currently be reached. */ +#define DB_RUNRECOVERY (-30977)/* Panic return. */ +#define DB_SECONDARY_BAD (-30976)/* Secondary index corrupt. */ +#define DB_VERIFY_BAD (-30975)/* Verify failed; bad format. */ +#define DB_VERSION_MISMATCH (-30974)/* Environment version mismatch. */ /* DB (private) error return codes. */ #define DB_ALREADY_ABORTED (-30899) #define DB_DELETED (-30898)/* Recovery file marked deleted. */ #define DB_LOCK_NOTEXIST (-30897)/* Object to lock is gone. */ #define DB_NEEDSPLIT (-30896)/* Page needs to be split. */ -#define DB_SURPRISE_KID (-30895)/* Child commit where parent +#define DB_REP_EGENCHG (-30895)/* Egen changed while in election. */ +#define DB_REP_LOGREADY (-30894)/* Rep log ready for recovery. */ +#define DB_REP_PAGEDONE (-30893)/* This page was already done. */ +#define DB_SURPRISE_KID (-30892)/* Child commit where parent didn't know it was a parent. */ -#define DB_SWAPBYTES (-30894)/* Database needs byte swapping. */ -#define DB_TIMEOUT (-30893)/* Timed out waiting for election. */ -#define DB_TXN_CKP (-30892)/* Encountered ckp record in log. */ -#define DB_VERIFY_FATAL (-30891)/* DB->verify cannot proceed. */ +#define DB_SWAPBYTES (-30891)/* Database needs byte swapping. */ +#define DB_TIMEOUT (-30890)/* Timed out waiting for election. */ +#define DB_TXN_CKP (-30889)/* Encountered ckp record in log. */ +#define DB_VERIFY_FATAL (-30888)/* DB->verify cannot proceed. */ /* Database handle. */ struct __db { @@ -1122,7 +1263,7 @@ struct __db { u_int32_t associate_lid; /* Locker id for DB->associate call. */ DB_LOCK handle_lock; /* Lock held on this handle. */ - long cl_id; /* RPC: remote client id. */ + u_int cl_id; /* RPC: remote client id. */ time_t timestamp; /* Handle timestamp for replication. */ @@ -1235,6 +1376,8 @@ struct __db { int (*close) __P((DB *, u_int32_t)); int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t)); int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t)); + int (*dump) __P((DB *, + const char *, int (*)(void *, const void *), void *, int, int)); void (*err) __P((DB *, int, const char *, ...)); void (*errx) __P((DB *, const char *, ...)); int (*fd) __P((DB *, int *)); @@ -1244,14 +1387,14 @@ struct __db { int (*get_cachesize) __P((DB *, u_int32_t *, u_int32_t *, int *)); int (*get_dbname) __P((DB *, const char **, const char **)); int (*get_encrypt_flags) __P((DB *, u_int32_t *)); - int (*get_env) __P((DB *, DB_ENV **)); + DB_ENV *(*get_env) __P((DB *)); void (*get_errfile) __P((DB *, FILE **)); void (*get_errpfx) __P((DB *, const char **)); int (*get_flags) __P((DB *, u_int32_t *)); int (*get_lorder) __P((DB *, int *)); int (*get_open_flags) __P((DB *, u_int32_t *)); int (*get_pagesize) __P((DB *, u_int32_t *)); - int (*get_transactional) __P((DB *, int *)); + int (*get_transactional) __P((DB *)); int (*get_type) __P((DB *, DBTYPE *)); int (*join) __P((DB *, DBC **, DBC **, u_int32_t)); int (*key_range) __P((DB *, @@ -1270,15 +1413,20 @@ struct __db { int (*set_dup_compare) __P((DB *, int (*)(DB *, const DBT *, const DBT *))); int (*set_encrypt) __P((DB *, const char *, u_int32_t)); - void (*set_errcall) __P((DB *, void (*)(const char *, char *))); + void (*set_errcall) __P((DB *, + void (*)(const DB_ENV *, const char *, const char *))); void (*set_errfile) __P((DB *, FILE *)); void (*set_errpfx) __P((DB *, const char *)); int (*set_feedback) __P((DB *, void (*)(DB *, int, int))); int (*set_flags) __P((DB *, u_int32_t)); int (*set_lorder) __P((DB *, int)); + void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB *, FILE **)); + void (*set_msgfile) __P((DB *, FILE *)); int (*set_pagesize) __P((DB *, u_int32_t)); int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int))); - int (*stat) __P((DB *, void *, u_int32_t)); + int (*stat) __P((DB *, DB_TXN *, void *, u_int32_t)); + int (*stat_print) __P((DB *, u_int32_t)); int (*sync) __P((DB *, u_int32_t)); int (*upgrade) __P((DB *, const char *, u_int32_t)); int (*verify) __P((DB *, @@ -1311,8 +1459,7 @@ struct __db { int (*get_q_extentsize) __P((DB *, u_int32_t *)); int (*set_q_extentsize) __P((DB *, u_int32_t)); - int (*db_am_remove) __P((DB *, - DB_TXN *, const char *, const char *, DB_LSN *)); + int (*db_am_remove) __P((DB *, DB_TXN *, const char *, const char *)); int (*db_am_rename) __P((DB *, DB_TXN *, const char *, const char *, const char *)); @@ -1343,23 +1490,24 @@ struct __db { #define DB_AM_ENCRYPT 0x00000800 /* Encryption. */ #define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */ #define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */ -#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */ -#define DB_AM_NOT_DURABLE 0x00008000 /* Do not log changes. */ -#define DB_AM_OPEN_CALLED 0x00010000 /* DB->open called. */ -#define DB_AM_PAD 0x00020000 /* Fixed-length record pad. */ -#define DB_AM_PGDEF 0x00040000 /* Page size was defaulted. */ -#define DB_AM_RDONLY 0x00080000 /* Database is readonly. */ -#define DB_AM_RECNUM 0x00100000 /* DB_RECNUM. */ -#define DB_AM_RECOVER 0x00200000 /* DB opened by recovery routine. */ -#define DB_AM_RENUMBER 0x00400000 /* DB_RENUMBER. */ -#define DB_AM_REPLICATION 0x00800000 /* An internal replication file. */ -#define DB_AM_REVSPLITOFF 0x01000000 /* DB_REVSPLITOFF. */ -#define DB_AM_SECONDARY 0x02000000 /* Database is a secondary index. */ -#define DB_AM_SNAPSHOT 0x04000000 /* DB_SNAPSHOT. */ -#define DB_AM_SUBDB 0x08000000 /* Subdatabases supported. */ -#define DB_AM_SWAP 0x10000000 /* Pages need to be byte-swapped. */ -#define DB_AM_TXN 0x20000000 /* Opened in a transaction. */ -#define DB_AM_VERIFYING 0x40000000 /* DB handle is in the verifier. */ +#define DB_AM_INORDER 0x00004000 /* DB_INORDER. */ +#define DB_AM_IN_RENAME 0x00008000 /* File is being renamed. */ +#define DB_AM_NOT_DURABLE 0x00010000 /* Do not log changes. */ +#define DB_AM_OPEN_CALLED 0x00020000 /* DB->open called. */ +#define DB_AM_PAD 0x00040000 /* Fixed-length record pad. */ +#define DB_AM_PGDEF 0x00080000 /* Page size was defaulted. */ +#define DB_AM_RDONLY 0x00100000 /* Database is readonly. */ +#define DB_AM_RECNUM 0x00200000 /* DB_RECNUM. */ +#define DB_AM_RECOVER 0x00400000 /* DB opened by recovery routine. */ +#define DB_AM_RENUMBER 0x00800000 /* DB_RENUMBER. */ +#define DB_AM_REPLICATION 0x01000000 /* An internal replication file. */ +#define DB_AM_REVSPLITOFF 0x02000000 /* DB_REVSPLITOFF. */ +#define DB_AM_SECONDARY 0x04000000 /* Database is a secondary index. */ +#define DB_AM_SNAPSHOT 0x08000000 /* DB_SNAPSHOT. */ +#define DB_AM_SUBDB 0x10000000 /* Subdatabases supported. */ +#define DB_AM_SWAP 0x20000000 /* Pages need to be byte-swapped. */ +#define DB_AM_TXN 0x40000000 /* Opened in a transaction. */ +#define DB_AM_VERIFYING 0x80000000 /* DB handle is in the verifier. */ u_int32_t orig_flags; /* Flags at open, for refresh. */ u_int32_t flags; }; @@ -1468,7 +1616,7 @@ struct __dbc { DB_LOCK_ILOCK lock; /* Object to be locked. */ DB_LOCK mylock; /* CDB lock held on this cursor. */ - long cl_id; /* Remote client id. */ + u_int cl_id; /* Remote client id. */ DBTYPE dbtype; /* Cursor type. */ @@ -1493,16 +1641,17 @@ struct __dbc { #define DBC_ACTIVE 0x0001 /* Cursor in use. */ #define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */ -#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */ -#define DBC_OPD 0x0008 /* Cursor references off-page dups. */ -#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */ -#define DBC_RMW 0x0020 /* Acquire write flag in read op. */ -#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */ -#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */ -#define DBC_WRITER 0x0100 /* Cursor immediately writing (CDB). */ -#define DBC_MULTIPLE 0x0200 /* Return Multiple data. */ -#define DBC_MULTIPLE_KEY 0x0400 /* Return Multiple keys and data. */ -#define DBC_OWN_LID 0x0800 /* Free lock id on destroy. */ +#define DBC_DEGREE_2 0x0004 /* Cursor has degree 2 isolation. */ +#define DBC_DIRTY_READ 0x0008 /* Cursor supports dirty reads. */ +#define DBC_OPD 0x0010 /* Cursor references off-page dups. */ +#define DBC_RECOVER 0x0020 /* Recovery cursor; don't log/lock. */ +#define DBC_RMW 0x0040 /* Acquire write flag in read op. */ +#define DBC_TRANSIENT 0x0080 /* Cursor is transient. */ +#define DBC_WRITECURSOR 0x0100 /* Cursor may be used to write (CDB). */ +#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */ +#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */ +#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */ +#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */ u_int32_t flags; }; @@ -1530,6 +1679,7 @@ struct __db_bt_stat { u_int32_t bt_leaf_pg; /* Leaf pages. */ u_int32_t bt_dup_pg; /* Duplicate pages. */ u_int32_t bt_over_pg; /* Overflow pages. */ + u_int32_t bt_empty_pg; /* Empty pages. */ u_int32_t bt_free; /* Pages on the free list. */ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */ @@ -1584,10 +1734,16 @@ struct __db_env { /******************************************************* * Public: owned by the application. *******************************************************/ + /* Error message callback. */ + void (*db_errcall) __P((const DB_ENV *, const char *, const char *)); FILE *db_errfile; /* Error message file stream. */ const char *db_errpfx; /* Error message prefix. */ - /* Callbacks. */ - void (*db_errcall) __P((const char *, char *)); + + FILE *db_msgfile; /* Statistics message file stream. */ + /* Statistics message callback. */ + void (*db_msgcall) __P((const DB_ENV *, const char *)); + + /* Other Callbacks. */ void (*db_feedback) __P((DB_ENV *, int, int)); void (*db_paniccall) __P((DB_ENV *, int)); @@ -1601,11 +1757,10 @@ struct __db_env { * entries. There's no reason that it needs to be limited, if * there are ever more than 32 entries, convert to a bit array. */ -#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */ -#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */ -#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */ -#define DB_VERB_REPLICATION 0x0008 /* Replication information. */ -#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */ +#define DB_VERB_DEADLOCK 0x0001 /* Deadlock detection information. */ +#define DB_VERB_RECOVERY 0x0002 /* Recovery information. */ +#define DB_VERB_REPLICATION 0x0004 /* Replication information. */ +#define DB_VERB_WAITSFOR 0x0008 /* Dump waits-for table. */ u_int32_t verbose; /* Verbose output. */ void *app_private; /* Application-private handle. */ @@ -1615,7 +1770,7 @@ struct __db_env { /* Locking. */ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */ - u_int32_t lk_modes; /* Number of lock modes in table. */ + int lk_modes; /* Number of lock modes in table. */ u_int32_t lk_max; /* Maximum number of locks. */ u_int32_t lk_max_lockers;/* Maximum number of lockers. */ u_int32_t lk_max_objects;/* Maximum number of locked objects. */ @@ -1630,9 +1785,9 @@ struct __db_env { /* Memory pool. */ u_int32_t mp_gbytes; /* Cachesize: GB. */ u_int32_t mp_bytes; /* Cachesize: Bytes. */ - size_t mp_size; /* DEPRECATED: Cachesize: bytes. */ - int mp_ncache; /* Number of cache regions. */ + u_int mp_ncache; /* Number of cache regions. */ size_t mp_mmapsize; /* Maximum file size for mmap. */ + int mp_maxopenfd; /* Maximum open file descriptors. */ int mp_maxwrite; /* Maximum buffers to write. */ int /* Sleep after writing max buffers. */ mp_maxwrite_sleep; @@ -1661,6 +1816,8 @@ struct __db_env { int data_next; /* Next Database data file slot. */ int db_mode; /* Default open permissions. */ + int dir_mode; /* Intermediate directory perms. */ + u_int32_t env_lid; /* Locker ID in non-threaded handles. */ u_int32_t open_flags; /* Flags passed to DB_ENV->open. */ void *reginfo; /* REGINFO structure reference. */ @@ -1672,7 +1829,7 @@ struct __db_env { /* Slots in the dispatch table. */ void *cl_handle; /* RPC: remote client handle. */ - long cl_id; /* RPC: remote client env id. */ + u_int cl_id; /* RPC: remote client env id. */ int db_ref; /* DB reference count. */ @@ -1734,10 +1891,18 @@ struct __db_env { const char *, const char *, const char *, u_int32_t)); void (*err) __P((const DB_ENV *, int, const char *, ...)); void (*errx) __P((const DB_ENV *, const char *, ...)); - int (*get_home) __P((DB_ENV *, const char **)); - int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); int (*open) __P((DB_ENV *, const char *, u_int32_t, int)); int (*remove) __P((DB_ENV *, const char *, u_int32_t)); + int (*stat_print) __P((DB_ENV *, u_int32_t)); + + /* House-keeping. */ + int (*fileid_reset) __P((DB_ENV *, char *, int)); + int (*is_bigendian) __P((void)); + int (*lsn_reset) __P((DB_ENV *, char *, int)); + int (*prdbt) __P((DBT *, + int, const char *, void *, int (*)(void *, const void *), int)); + + /* Setters/getters. */ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *))); int (*set_app_dispatch) __P((DB_ENV *, @@ -1746,7 +1911,8 @@ struct __db_env { int (*set_data_dir) __P((DB_ENV *, const char *)); int (*get_encrypt_flags) __P((DB_ENV *, u_int32_t *)); int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t)); - void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *))); + void (*set_errcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *, const char *))); void (*get_errfile) __P((DB_ENV *, FILE **)); void (*set_errfile) __P((DB_ENV *, FILE *)); void (*get_errpfx) __P((DB_ENV *, const char **)); @@ -1754,11 +1920,18 @@ struct __db_env { int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int))); int (*get_flags) __P((DB_ENV *, u_int32_t *)); int (*set_flags) __P((DB_ENV *, u_int32_t, int)); + int (*get_home) __P((DB_ENV *, const char **)); + int (*set_intermediate_dir) __P((DB_ENV *, int, u_int32_t)); + int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int))); int (*set_rpc_server) __P((DB_ENV *, void *, const char *, long, long, u_int32_t)); int (*get_shm_key) __P((DB_ENV *, long *)); int (*set_shm_key) __P((DB_ENV *, long)); + void (*set_msgcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB_ENV *, FILE **)); + void (*set_msgfile) __P((DB_ENV *, FILE *)); int (*get_tas_spins) __P((DB_ENV *, u_int32_t *)); int (*set_tas_spins) __P((DB_ENV *, u_int32_t)); int (*get_tmp_dir) __P((DB_ENV *, const char **)); @@ -1781,6 +1954,7 @@ struct __db_env { int (*log_flush) __P((DB_ENV *, const DB_LSN *)); int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t)); int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); + int (*log_stat_print) __P((DB_ENV *, u_int32_t)); void *lk_handle; /* Lock handle and methods. */ int (*get_lk_conflicts) __P((DB_ENV *, const u_int8_t **, int *)); @@ -1795,13 +1969,13 @@ struct __db_env { int (*get_lk_max_objects) __P((DB_ENV *, u_int32_t *)); int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t)); int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *)); - int (*lock_dump_region) __P((DB_ENV *, const char *, FILE *)); int (*lock_get) __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); int (*lock_put) __P((DB_ENV *, DB_LOCK *)); int (*lock_id) __P((DB_ENV *, u_int32_t *)); int (*lock_id_free) __P((DB_ENV *, u_int32_t)); int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t)); + int (*lock_stat_print) __P((DB_ENV *, u_int32_t)); int (*lock_vec) __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); @@ -1810,25 +1984,29 @@ struct __db_env { int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int)); int (*get_mp_mmapsize) __P((DB_ENV *, size_t *)); int (*set_mp_mmapsize) __P((DB_ENV *, size_t)); - int (*get_mp_maxwrite) __P((DB_ENV *, int *, int *)); - int (*set_mp_maxwrite) __P((DB_ENV *, int, int)); - int (*memp_dump_region) __P((DB_ENV *, const char *, FILE *)); + int (*get_mp_max_openfd) __P((DB_ENV *, int *)); + int (*set_mp_max_openfd) __P((DB_ENV *, int)); + int (*get_mp_max_write) __P((DB_ENV *, int *, int *)); + int (*set_mp_max_write) __P((DB_ENV *, int, int)); int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); int (*memp_register) __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *))); int (*memp_stat) __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t)); + int (*memp_stat_print) __P((DB_ENV *, u_int32_t)); int (*memp_sync) __P((DB_ENV *, DB_LSN *)); int (*memp_trickle) __P((DB_ENV *, int, int *)); void *rep_handle; /* Replication handle and methods. */ - int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *)); + int (*rep_elect) __P((DB_ENV *, int, int, int, + u_int32_t, int *, u_int32_t)); int (*rep_flush) __P((DB_ENV *)); int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *, DB_LSN *)); int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t)); int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); + int (*rep_stat_print) __P((DB_ENV *, u_int32_t)); int (*get_rep_limit) __P((DB_ENV *, u_int32_t *, u_int32_t *)); int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t)); int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t)); @@ -1846,19 +2024,22 @@ struct __db_env { int (*txn_recover) __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t)); int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); + int (*txn_stat_print) __P((DB_ENV *, u_int32_t)); int (*get_timeout) __P((DB_ENV *, db_timeout_t *, u_int32_t)); int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t)); #define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */ -#define DB_TEST_POSTDESTROY 2 /* after destroy op */ -#define DB_TEST_POSTLOG 3 /* after logging all pages */ -#define DB_TEST_POSTLOGMETA 4 /* after logging meta in btree */ -#define DB_TEST_POSTOPEN 5 /* after __os_open */ -#define DB_TEST_POSTSYNC 6 /* after syncing the log */ -#define DB_TEST_PREDESTROY 7 /* before destroy op */ -#define DB_TEST_PREOPEN 8 /* before __os_open */ -#define DB_TEST_SUBDB_LOCKS 9 /* subdb locking tests */ +#define DB_TEST_ELECTVOTE1 2 /* after sending VOTE1 */ +#define DB_TEST_POSTDESTROY 3 /* after destroy op */ +#define DB_TEST_POSTLOG 4 /* after logging all pages */ +#define DB_TEST_POSTLOGMETA 5 /* after logging meta in btree */ +#define DB_TEST_POSTOPEN 6 /* after __os_open */ +#define DB_TEST_POSTSYNC 7 /* after syncing the log */ +#define DB_TEST_PREDESTROY 8 /* before destroy op */ +#define DB_TEST_PREOPEN 9 /* before __os_open */ +#define DB_TEST_SUBDB_LOCKS 10 /* subdb locking tests */ int test_abort; /* Abort value for testing. */ + int test_check; /* Checkpoint value for testing. */ int test_copy; /* Copy value for testing. */ #define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */ @@ -1868,25 +2049,26 @@ struct __db_env { #define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */ #define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */ #define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */ -#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */ -#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */ -#define DB_ENV_LOG_AUTOREMOVE 0x0000200 /* DB_LOG_AUTOREMOVE set. */ -#define DB_ENV_NOLOCKING 0x0000400 /* DB_NOLOCKING set. */ -#define DB_ENV_NOMMAP 0x0000800 /* DB_NOMMAP set. */ -#define DB_ENV_NOPANIC 0x0001000 /* Okay if panic set. */ -#define DB_ENV_OPEN_CALLED 0x0002000 /* DB_ENV->open called. */ -#define DB_ENV_OVERWRITE 0x0004000 /* DB_OVERWRITE set. */ -#define DB_ENV_PRIVATE 0x0008000 /* DB_PRIVATE set. */ -#define DB_ENV_REGION_INIT 0x0010000 /* DB_REGION_INIT set. */ -#define DB_ENV_RPCCLIENT 0x0020000 /* DB_RPCCLIENT set. */ -#define DB_ENV_RPCCLIENT_GIVEN 0x0040000 /* User-supplied RPC client struct */ -#define DB_ENV_SYSTEM_MEM 0x0080000 /* DB_SYSTEM_MEM set. */ -#define DB_ENV_THREAD 0x0100000 /* DB_THREAD set. */ -#define DB_ENV_TIME_NOTGRANTED 0x0200000 /* DB_TIME_NOTGRANTED set. */ -#define DB_ENV_TXN_NOSYNC 0x0400000 /* DB_TXN_NOSYNC set. */ -#define DB_ENV_TXN_NOT_DURABLE 0x0800000 /* DB_TXN_NOT_DURABLE set. */ -#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */ -#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */ +#define DB_ENV_DSYNC_LOG 0x0000080 /* DB_DSYNC_LOG set. */ +#define DB_ENV_FATAL 0x0000100 /* Doing fatal recovery in env. */ +#define DB_ENV_LOCKDOWN 0x0000200 /* DB_LOCKDOWN set. */ +#define DB_ENV_LOG_AUTOREMOVE 0x0000400 /* DB_LOG_AUTOREMOVE set. */ +#define DB_ENV_LOG_INMEMORY 0x0000800 /* DB_LOG_INMEMORY set. */ +#define DB_ENV_NOLOCKING 0x0001000 /* DB_NOLOCKING set. */ +#define DB_ENV_NOMMAP 0x0002000 /* DB_NOMMAP set. */ +#define DB_ENV_NOPANIC 0x0004000 /* Okay if panic set. */ +#define DB_ENV_OPEN_CALLED 0x0008000 /* DB_ENV->open called. */ +#define DB_ENV_OVERWRITE 0x0010000 /* DB_OVERWRITE set. */ +#define DB_ENV_PRIVATE 0x0020000 /* DB_PRIVATE set. */ +#define DB_ENV_REGION_INIT 0x0040000 /* DB_REGION_INIT set. */ +#define DB_ENV_RPCCLIENT 0x0080000 /* DB_RPCCLIENT set. */ +#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */ +#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */ +#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */ +#define DB_ENV_TIME_NOTGRANTED 0x0800000 /* DB_TIME_NOTGRANTED set. */ +#define DB_ENV_TXN_NOSYNC 0x1000000 /* DB_TXN_NOSYNC set. */ +#define DB_ENV_TXN_WRITE_NOSYNC 0x2000000 /* DB_TXN_WRITE_NOSYNC set. */ +#define DB_ENV_YIELDCPU 0x4000000 /* DB_YIELDCPU set. */ u_int32_t flags; }; @@ -1978,10 +2160,6 @@ typedef struct entry { #endif #endif /* !_DB_H_ */ -/* DO NOT EDIT: automatically built by dist/s_rpc. */ -#define DB_RPC_SERVERPROG ((unsigned long)(351457)) -#define DB_RPC_SERVERVERS ((unsigned long)(4002)) - /* DO NOT EDIT: automatically built by dist/s_include. */ #ifndef _DB_EXT_PROT_IN_ #define _DB_EXT_PROT_IN_ @@ -2001,19 +2179,23 @@ int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *))); int db_env_set_func_exists __P((int (*)(const char *, int *))); int db_env_set_func_free __P((void (*)(void *))); int db_env_set_func_fsync __P((int (*)(int))); +int db_env_set_func_ftruncate __P((int (*)(int, off_t))); int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *))); int db_env_set_func_malloc __P((void *(*)(size_t))); int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **))); +int db_env_set_func_pread __P((ssize_t (*)(int, void *, size_t, off_t))); +int db_env_set_func_pwrite __P((ssize_t (*)(int, const void *, size_t, off_t))); int db_env_set_func_open __P((int (*)(const char *, int, ...))); int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t))); int db_env_set_func_realloc __P((void *(*)(void *, size_t))); int db_env_set_func_rename __P((int (*)(const char *, const char *))); -int db_env_set_func_seek __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int))); +int db_env_set_func_seek __P((int (*)(int, off_t, int))); int db_env_set_func_sleep __P((int (*)(u_long, u_long))); int db_env_set_func_unlink __P((int (*)(const char *))); int db_env_set_func_unmap __P((int (*)(void *, size_t))); int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t))); int db_env_set_func_yield __P((int (*)(void))); +int db_sequence_create __P((DB_SEQUENCE **, DB *, u_int32_t)); #if DB_DBM_HSEARCH != 0 int __db_ndbm_clearerr __P((DBM *)); void __db_ndbm_close __P((DBM *)); @@ -2028,14 +2210,11 @@ int __db_ndbm_pagfno __P((DBM *)); int __db_ndbm_rdonly __P((DBM *)); int __db_ndbm_store __P((DBM *, datum, datum, int)); int __db_dbm_close __P((void)); -int __db_dbm_dbrdonly __P((void)); int __db_dbm_delete __P((datum)); -int __db_dbm_dirf __P((void)); datum __db_dbm_fetch __P((datum)); datum __db_dbm_firstkey __P((void)); int __db_dbm_init __P((char *)); datum __db_dbm_nextkey __P((datum)); -int __db_dbm_pagf __P((void)); int __db_dbm_store __P((datum, datum)); #endif #if DB_DBM_HSEARCH != 0 diff --git a/db/build_vxworks/db_archive/db_archive.c b/db/build_vxworks/db_archive/db_archive.c index 0b2535584..2297111b7 100644 --- a/db/build_vxworks/db_archive/db_archive.c +++ b/db/build_vxworks/db_archive/db_archive.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_archive.c,v 11.46 2004/06/10 01:00:08 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_archive.c,v 11.42 2003/08/13 19:57:04 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -123,9 +123,6 @@ db_archive_main(argc, argv) dbenv->set_errfile(dbenv, stderr); dbenv->set_errpfx(dbenv, progname); - if (verbose) - (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1); - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) { dbenv->err(dbenv, ret, "set_passwd"); @@ -137,9 +134,10 @@ db_archive_main(argc, argv) */ if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, DB_CREATE | - DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } diff --git a/db/build_vxworks/db_checkpoint/db_checkpoint.c b/db/build_vxworks/db_checkpoint/db_checkpoint.c index c5770614c..6ee299571 100644 --- a/db/build_vxworks/db_checkpoint/db_checkpoint.c +++ b/db/build_vxworks/db_checkpoint/db_checkpoint.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_checkpoint.c,v 11.54 2004/03/24 15:13:12 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_checkpoint.c,v 11.51 2003/09/04 18:57:00 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -193,7 +193,7 @@ db_checkpoint_main(argc, argv) while (!__db_util_interrupted()) { if (verbose) { (void)time(&now); - dbenv->errx(dbenv, "checkpoint: %s", ctime(&now)); + dbenv->errx(dbenv, "checkpoint begin: %s", ctime(&now)); } if ((ret = dbenv->txn_checkpoint(dbenv, @@ -202,10 +202,16 @@ db_checkpoint_main(argc, argv) goto shutdown; } + if (verbose) { + (void)time(&now); + dbenv->errx(dbenv, + "checkpoint complete: %s", ctime(&now)); + } + if (once) break; - (void)__os_sleep(dbenv, seconds, 0); + __os_sleep(dbenv, seconds, 0); } if (0) { diff --git a/db/build_vxworks/db_config.h b/db/build_vxworks/db_config.h index 654d0120b..70b14ee74 100644 --- a/db/build_vxworks/db_config.h +++ b/db/build_vxworks/db_config.h @@ -54,9 +54,15 @@ /* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ /* #undef HAVE_FCNTL_F_SETFD */ +/* Define to 1 if you have the `fdatasync' function. */ +/* #undef HAVE_FDATASYNC */ + /* Define to 1 if allocated filesystem blocks are not zeroed. */ #define HAVE_FILESYSTEM_NOTZERO 1 +/* Define to 1 if you have the `ftruncate' function. */ +/* #undef HAVE_FTRUNCATE */ + /* Define to 1 if you have the `getcwd' function. */ #define HAVE_GETCWD 1 @@ -81,6 +87,9 @@ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ +/* Define to 1 if the system has the type `long long'. */ +/* #undef HAVE_LONG_LONG */ + /* Define to 1 if you have the `memcmp' function. */ #define HAVE_MEMCMP 1 @@ -228,6 +237,9 @@ /* Define to 1 if you have the `raise' function. */ #define HAVE_RAISE 1 +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + /* Define to 1 if building replication support. */ #define HAVE_REPLICATION 1 @@ -240,12 +252,21 @@ /* Define to 1 if you have the `select' function. */ #define HAVE_SELECT 1 +/* Define to 1 if building sequence support. */ +/* #undef HAVE_SEQUENCE */ + /* Define to 1 if you have the `shmget' function. */ /* #undef HAVE_SHMGET */ /* Define to 1 if you have the `snprintf' function. */ /* #undef HAVE_SNPRINTF */ +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if building statistics support. */ +#define HAVE_STATISTICS 1 + /* Define to 1 if you have the header file. */ /* #undef HAVE_STDINT_H */ @@ -305,6 +326,9 @@ /* Define to 1 if unlink of file with open file descriptors will fail. */ #define HAVE_UNLINK_WITH_OPEN_FAILURE 1 +/* Define to 1 if the system has the type `unsigned long long'. */ +/* #undef HAVE_UNSIGNED_LONG_LONG */ + /* Define to 1 if building access method verification support. */ #define HAVE_VERIFY 1 @@ -330,13 +354,13 @@ #define PACKAGE_NAME "Berkeley DB" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "Berkeley DB 4.2.52" +#define PACKAGE_STRING "Berkeley DB 4.3.14" /* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "db-4.2.52" +#define PACKAGE_TARNAME "db-4.3.14" /* Define to the version of this package. */ -#define PACKAGE_VERSION "4.2.52" +#define PACKAGE_VERSION "4.3.14" /* Define to 1 if the `S_IS*' macros in do not work properly. */ /* #undef STAT_MACROS_BROKEN */ @@ -347,7 +371,7 @@ /* Define to 1 if you can safely include both and . */ /* #undef TIME_WITH_SYS_TIME */ -/* Define to 1 to mask harmless unitialized memory read/writes. */ +/* Define to 1 to mask harmless uninitialized memory read/writes. */ /* #undef UMRW */ /* Number of bits in a file offset, on hosts where this is settable. */ diff --git a/db/build_vxworks/db_config_small.h b/db/build_vxworks/db_config_small.h new file mode 100644 index 000000000..82b5c905f --- /dev/null +++ b/db/build_vxworks/db_config_small.h @@ -0,0 +1,440 @@ +/* DO NOT EDIT: automatically built by dist/s_vxworks. */ +/* !!! + * The CONFIG_TEST option may be added using the Tornado project build. + * DO NOT modify it here. + */ +/* Define to 1 if you want to build a version for running the test suite. */ +/* #undef CONFIG_TEST */ + +/* We use DB_WIN32 much as one would use _WIN32 -- to specify that we're using + an operating system environment that supports Win32 calls and semantics. We + don't use _WIN32 because Cygwin/GCC also defines _WIN32, even though + Cygwin/GCC closely emulates the Unix environment. */ +/* #undef DB_WIN32 */ + +/* !!! + * The DEBUG option may be added using the Tornado project build. + * DO NOT modify it here. + */ +/* Define to 1 if you want a debugging version. */ +/* #undef DEBUG */ + +/* Define to 1 if you want a version that logs read operations. */ +/* #undef DEBUG_ROP */ + +/* Define to 1 if you want a version that logs write operations. */ +/* #undef DEBUG_WOP */ + +/* !!! + * The DIAGNOSTIC option may be added using the Tornado project build. + * DO NOT modify it here. + */ +/* Define to 1 if you want a version with run-time diagnostic checking. */ +/* #undef DIAGNOSTIC */ + +/* Define to 1 if you have the `clock_gettime' function. */ +#define HAVE_CLOCK_GETTIME 1 + +/* Define to 1 if Berkeley DB release includes strong cryptography. */ +/* #undef HAVE_CRYPTO */ + +/* Define to 1 if you have the `directio' function. */ +/* #undef HAVE_DIRECTIO */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLFCN_H */ + +/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */ +#define HAVE_EXIT_SUCCESS 1 + +/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ +/* #undef HAVE_FCNTL_F_SETFD */ + +/* Define to 1 if you have the `fdatasync' function. */ +/* #undef HAVE_FDATASYNC */ + +/* Define to 1 if allocated filesystem blocks are not zeroed. */ +#define HAVE_FILESYSTEM_NOTZERO 1 + +/* Define to 1 if you have the `ftruncate' function. */ +/* #undef HAVE_FTRUNCATE */ + +/* Define to 1 if you have the `getcwd' function. */ +#define HAVE_GETCWD 1 + +/* Define to 1 if you have the `getopt' function. */ +/* #undef HAVE_GETOPT */ + +/* Define to 1 if you have the `getrusage' function. */ +/* #undef HAVE_GETRUSAGE */ + +/* Define to 1 if you have the `gettimeofday' function. */ +/* #undef HAVE_GETTIMEOFDAY */ + +/* Define to 1 if you have the `getuid' function. */ +/* #undef HAVE_GETUID */ + +/* Define to 1 if building Hash access method. */ +/* #undef HAVE_HASH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_INTTYPES_H */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if the system has the type `long long'. */ +/* #undef HAVE_LONG_LONG */ + +/* Define to 1 if you have the `memcmp' function. */ +#define HAVE_MEMCMP 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mlock' function. */ +/* #undef HAVE_MLOCK */ + +/* Define to 1 if you have the `mmap' function. */ +/* #undef HAVE_MMAP */ + +/* Define to 1 if you have the `munlock' function. */ +/* #undef HAVE_MUNLOCK */ + +/* Define to 1 if you have the `munmap' function. */ +/* #undef HAVE_MUNMAP */ + +/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */ +/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */ + +/* Define to 1 to use the AIX _check_lock mutexes. */ +/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */ + +/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */ +/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */ + +/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */ +/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */ + +/* Define to 1 to use the Apple/Darwin _spin_lock_try mutexes. */ +/* #undef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY */ + +/* Define to 1 to use the UNIX fcntl system call mutexes. */ +/* #undef HAVE_MUTEX_FCNTL */ + +/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes. + */ +/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */ + +/* Define to 1 to use the msem_XXX mutexes on HP-UX. */ +/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */ + +/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */ +/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */ + +/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */ +/* #undef HAVE_MUTEX_MSEM_INIT */ + +/* Define to 1 to use the GCC compiler and PowerPC assembly language mutexes. + */ +/* #undef HAVE_MUTEX_PPC_GCC_ASSEMBLY */ + +/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */ +/* #undef HAVE_MUTEX_PTHREADS */ + +/* Define to 1 to use Reliant UNIX initspin mutexes. */ +/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */ + +/* Define to 1 to use the IBM C compiler and S/390 assembly language mutexes. + */ +/* #undef HAVE_MUTEX_S390_CC_ASSEMBLY */ + +/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */ +/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */ + +/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */ +/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */ + +/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */ +/* #undef HAVE_MUTEX_SEMA_INIT */ + +/* Define to 1 to use the SGI XXX_lock mutexes. */ +/* #undef HAVE_MUTEX_SGI_INIT_LOCK */ + +/* Define to 1 to use the Solaris _lock_XXX mutexes. */ +/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */ + +/* Define to 1 to use the Solaris lwp threads mutexes. */ +/* #undef HAVE_MUTEX_SOLARIS_LWP */ + +/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */ +/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */ + +/* Define to 1 if mutexes hold system resources. */ +#define HAVE_MUTEX_SYSTEM_RESOURCES 1 + +/* Define to 1 if fast mutexes are available. */ +#define HAVE_MUTEX_THREADS 1 + +/* Define to 1 to configure mutexes intra-process only. */ +/* #undef HAVE_MUTEX_THREAD_ONLY */ + +/* Define to 1 to use the CC compiler and Tru64 assembly language mutexes. */ +/* #undef HAVE_MUTEX_TRU64_CC_ASSEMBLY */ + +/* Define to 1 to use the UNIX International mutexes. */ +/* #undef HAVE_MUTEX_UI_THREADS */ + +/* Define to 1 to use the UTS compiler and assembly language mutexes. */ +/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */ + +/* Define to 1 to use VMS mutexes. */ +/* #undef HAVE_MUTEX_VMS */ + +/* Define to 1 to use VxWorks mutexes. */ +#define HAVE_MUTEX_VXWORKS 1 + +/* Define to 1 to use the MSVC compiler and Windows mutexes. */ +/* #undef HAVE_MUTEX_WIN32 */ + +/* Define to 1 to use the GCC compiler and Windows mutexes. */ +/* #undef HAVE_MUTEX_WIN32_GCC */ + +/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */ +/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */ + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the O_DIRECT flag. */ +/* #undef HAVE_O_DIRECT */ + +/* Define to 1 if you have the `pread' function. */ +/* #undef HAVE_PREAD */ + +/* Define to 1 if you have the `pstat_getdynamic' function. */ +/* #undef HAVE_PSTAT_GETDYNAMIC */ + +/* Define to 1 if you have the `pwrite' function. */ +/* #undef HAVE_PWRITE */ + +/* Define to 1 if building on QNX. */ +/* #undef HAVE_QNX */ + +/* Define to 1 if building Queue access method. */ +/* #undef HAVE_QUEUE */ + +/* Define to 1 if you have the `raise' function. */ +#define HAVE_RAISE 1 + +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + +/* Define to 1 if building replication support. */ +/* #undef HAVE_REPLICATION */ + +/* Define to 1 if building RPC client/server. */ +/* #undef HAVE_RPC */ + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `select' function. */ +#define HAVE_SELECT 1 + +/* Define to 1 if building sequence support. */ +/* #undef HAVE_SEQUENCE */ + +/* Define to 1 if you have the `shmget' function. */ +/* #undef HAVE_SHMGET */ + +/* Define to 1 if you have the `snprintf' function. */ +/* #undef HAVE_SNPRINTF */ + +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if building statistics support. */ +/* #undef HAVE_STATISTICS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STDINT_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strcasecmp' function. */ +/* #undef HAVE_STRCASECMP */ + +/* Define to 1 if you have the `strdup' function. */ +/* #undef HAVE_STRDUP */ + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if `st_blksize' is member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if you have the `sysconf' function. */ +/* #undef HAVE_SYSCONF */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FCNTL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_SELECT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_STAT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_TIME_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_TYPES_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if unlink of file with open file descriptors will fail. */ +#define HAVE_UNLINK_WITH_OPEN_FAILURE 1 + +/* Define to 1 if the system has the type `unsigned long long'. */ +/* #undef HAVE_UNSIGNED_LONG_LONG */ + +/* Define to 1 if building access method verification support. */ +/* #undef HAVE_VERIFY */ + +/* Define to 1 if you have the `vsnprintf' function. */ +/* #undef HAVE_VSNPRINTF */ + +/* Define to 1 if building VxWorks. */ +#define HAVE_VXWORKS 1 + +/* Define to 1 if you have the `yield' function. */ +/* #undef HAVE_YIELD */ + +/* Define to 1 if you have the `_fstati64' function. */ +/* #undef HAVE__FSTATI64 */ + +/* Define to a value if using non-standard mutex alignment. */ +/* #undef MUTEX_ALIGN */ + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "support@sleepycat.com" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "Berkeley DB" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "Berkeley DB 4.3.14" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "db-4.3.14" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "4.3.14" + +/* Define to 1 if the `S_IS*' macros in do not work properly. */ +/* #undef STAT_MACROS_BROKEN */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +/* #undef TIME_WITH_SYS_TIME */ + +/* Define to 1 to mask harmless uninitialized memory read/writes. */ +/* #undef UMRW */ + +/* Number of bits in a file offset, on hosts where this is settable. */ +/* #undef _FILE_OFFSET_BITS */ + +/* Define for large files, on AIX-style hosts. */ +/* #undef _LARGE_FILES */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* + * Exit success/failure macros. + */ +#ifndef HAVE_EXIT_SUCCESS +#define EXIT_FAILURE 1 +#define EXIT_SUCCESS 0 +#endif + +/* + * Don't step on the namespace. Other libraries may have their own + * implementations of these functions, we don't want to use their + * implementations or force them to use ours based on the load order. + */ +#ifndef HAVE_GETCWD +#define getcwd __db_Cgetcwd +#endif +#ifndef HAVE_GETOPT +#define getopt __db_Cgetopt +#define optarg __db_Coptarg +#define opterr __db_Copterr +#define optind __db_Coptind +#define optopt __db_Coptopt +#endif +#ifndef HAVE_MEMCMP +#define memcmp __db_Cmemcmp +#endif +#ifndef HAVE_MEMCPY +#define memcpy __db_Cmemcpy +#endif +#ifndef HAVE_MEMMOVE +#define memmove __db_Cmemmove +#endif +#ifndef HAVE_RAISE +#define raise __db_Craise +#endif +#ifndef HAVE_SNPRINTF +#define snprintf __db_Csnprintf +#endif +#ifndef HAVE_STRCASECMP +#define strcasecmp __db_Cstrcasecmp +#define strncasecmp __db_Cstrncasecmp +#endif +#ifndef HAVE_STRERROR +#define strerror __db_Cstrerror +#endif +#ifndef HAVE_VSNPRINTF +#define vsnprintf __db_Cvsnprintf +#endif + +/* + * !!! + * The following is not part of the automatic configuration setup, but + * provides the information necessary to build Berkeley DB on VxWorks. + */ +#include "vxWorks.h" diff --git a/db/build_vxworks/db_deadlock/db_deadlock.c b/db/build_vxworks/db_deadlock/db_deadlock.c index bb5dff9f0..32689d203 100644 --- a/db/build_vxworks/db_deadlock/db_deadlock.c +++ b/db/build_vxworks/db_deadlock/db_deadlock.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_deadlock.c,v 11.45 2004/03/24 15:13:12 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_deadlock.c,v 11.41 2003/06/17 14:36:44 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -95,6 +95,9 @@ db_deadlock_main(argc, argv) case 'o': atype = DB_LOCK_OLDEST; break; + case 'W': + atype = DB_LOCK_MAXWRITE; + break; case 'w': atype = DB_LOCK_MINWRITE; break; @@ -176,8 +179,8 @@ db_deadlock_main(argc, argv) } /* An environment is required. */ - if ((ret = dbenv->open(dbenv, home, - DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) { + if ((ret = + dbenv->open(dbenv, home, DB_INIT_LOCK | DB_USE_ENVIRON, 0)) != 0) { dbenv->err(dbenv, ret, "open"); goto shutdown; } @@ -196,7 +199,7 @@ db_deadlock_main(argc, argv) /* Make a pass every "secs" secs and "usecs" usecs. */ if (secs == 0 && usecs == 0) break; - (void)__os_sleep(dbenv, secs, usecs); + __os_sleep(dbenv, secs, usecs); } if (0) { @@ -225,7 +228,7 @@ db_deadlock_usage() { (void)fprintf(stderr, "%s\n\t%s\n", "usage: db_deadlock [-Vv]", - "[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]"); + "[-a e | m | n | o | W | w | y] [-h home] [-L file] [-t sec.usec]"); return (EXIT_FAILURE); } diff --git a/db/build_vxworks/db_dump/db_dump.c b/db/build_vxworks/db_dump/db_dump.c index a14427a78..048bed6b9 100644 --- a/db/build_vxworks/db_dump/db_dump.c +++ b/db/build_vxworks/db_dump/db_dump.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_dump.c,v 11.99 2004/10/11 18:53:13 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_dump.c,v 11.88 2003/08/13 19:57:06 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -28,7 +28,6 @@ static const char revid[] = #include "dbinc/db_am.h" int db_dump_db_init __P((DB_ENV *, char *, int, u_int32_t, int *)); -int db_dump_dump __P((DB *, int, int)); int db_dump_dump_sub __P((DB_ENV *, DB *, char *, int, int)); int db_dump_is_sub __P((DB *, int *)); int db_dump_main __P((int, char *[])); @@ -226,7 +225,7 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { goto err; } if (private != 0) { - if ((ret = __db_util_cache(dbenv, dbp, &cache, &resize)) != 0) + if ((ret = __db_util_cache(dbp, &cache, &resize)) != 0) goto err; if (resize) { (void)dbp->close(dbp, 0); @@ -239,8 +238,8 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { } if (dopt != NULL) { - if (__db_dump(dbp, dopt, NULL)) { - dbp->err(dbp, ret, "__db_dump: %s", argv[0]); + if ((ret = __db_dumptree(dbp, dopt, NULL)) != 0) { + dbp->err(dbp, ret, "__db_dumptree: %s", argv[0]); goto err; } } else if (lflag) { @@ -261,9 +260,8 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { if (db_dump_dump_sub(dbenv, dbp, argv[0], pflag, keyflag)) goto err; } else - if (__db_prheader(dbp, NULL, pflag, keyflag, stdout, - __db_pr_callback, NULL, 0) || - db_dump_dump(dbp, pflag, keyflag)) + if (dbp->dump(dbp, NULL, + __db_pr_callback, stdout, pflag, keyflag)) goto err; } @@ -322,9 +320,11 @@ db_dump_db_init(dbenv, home, is_salvage, cache, is_privatep) * before we create our own. */ *is_privatep = 0; - if (dbenv->open(dbenv, home, - DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0) + if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON | + (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0)) == 0) return (0); + if (ret == DB_VERSION_MISMATCH) + goto err; /* * An environment is required because we may be trying to look at @@ -343,7 +343,7 @@ db_dump_db_init(dbenv, home, is_salvage, cache, is_privatep) return (0); /* An environment is required. */ - dbenv->err(dbenv, ret, "open"); +err: dbenv->err(dbenv, ret, "DB_ENV->open"); return (1); } @@ -363,7 +363,7 @@ db_dump_is_sub(dbp, yesno) switch (dbp->type) { case DB_BTREE: case DB_RECNO: - if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) { + if ((ret = dbp->stat(dbp, NULL, &btsp, DB_FAST_STAT)) != 0) { dbp->err(dbp, ret, "DB->stat"); return (ret); } @@ -371,7 +371,7 @@ db_dump_is_sub(dbp, yesno) free(btsp); break; case DB_HASH: - if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) { + if ((ret = dbp->stat(dbp, NULL, &hsp, DB_FAST_STAT)) != 0) { dbp->err(dbp, ret, "DB->stat"); return (ret); } @@ -435,10 +435,8 @@ db_dump_dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag) parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) dbp->err(dbp, ret, "DB->open: %s:%s", parent_name, subdb); - if (ret == 0 && - (__db_prheader(dbp, subdb, pflag, keyflag, stdout, - __db_pr_callback, NULL, 0) || - db_dump_dump(dbp, pflag, keyflag))) + if (ret == 0 && dbp->dump( + dbp, subdb, __db_pr_callback, stdout, pflag, keyflag)) ret = 1; (void)dbp->close(dbp, 0); free(subdb); @@ -482,8 +480,8 @@ db_dump_show_subs(dbp) memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) { - if ((ret = __db_prdbt(&key, 1, NULL, stdout, - __db_pr_callback, 0, NULL)) != 0) { + if ((ret = dbp->dbenv->prdbt( + &key, 1, NULL, stdout, __db_pr_callback, 0)) != 0) { dbp->errx(dbp, NULL); return (1); } @@ -500,105 +498,6 @@ db_dump_show_subs(dbp) return (0); } -/* - * dump -- - * Dump out the records for a DB. - */ -int -db_dump_dump(dbp, pflag, keyflag) - DB *dbp; - int pflag, keyflag; -{ - DBC *dbcp; - DBT key, data; - DBT keyret, dataret; - db_recno_t recno; - int is_recno, failed, ret; - void *pointer; - - /* - * Get a cursor and step through the database, printing out each - * key/data pair. - */ - if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) { - dbp->err(dbp, ret, "DB->cursor"); - return (1); - } - - failed = 0; - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - data.data = malloc(1024 * 1024); - if (data.data == NULL) { - dbp->err(dbp, ENOMEM, "bulk get buffer"); - failed = 1; - goto err; - } - data.ulen = 1024 * 1024; - data.flags = DB_DBT_USERMEM; - is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE); - keyflag = is_recno ? keyflag : 1; - if (is_recno) { - keyret.data = &recno; - keyret.size = sizeof(recno); - } - -retry: - while ((ret = - dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) { - DB_MULTIPLE_INIT(pointer, &data); - for (;;) { - if (is_recno) - DB_MULTIPLE_RECNO_NEXT(pointer, &data, - recno, dataret.data, dataret.size); - else - DB_MULTIPLE_KEY_NEXT(pointer, - &data, keyret.data, - keyret.size, dataret.data, dataret.size); - - if (dataret.data == NULL) - break; - - if ((keyflag && (ret = __db_prdbt(&keyret, - pflag, " ", stdout, __db_pr_callback, - is_recno, NULL)) != 0) || (ret = - __db_prdbt(&dataret, pflag, " ", stdout, - __db_pr_callback, 0, NULL)) != 0) { - dbp->errx(dbp, NULL); - failed = 1; - goto err; - } - } - } - if (ret == ENOMEM) { - data.size = ALIGN(data.size, 1024); - data.data = realloc(data.data, data.size); - if (data.data == NULL) { - dbp->err(dbp, ENOMEM, "bulk get buffer"); - failed = 1; - goto err; - } - data.ulen = data.size; - goto retry; - } - - if (ret != DB_NOTFOUND) { - dbp->err(dbp, ret, "DBcursor->get"); - failed = 1; - } - -err: if (data.data != NULL) - free(data.data); - - if ((ret = dbcp->c_close(dbcp)) != 0) { - dbp->err(dbp, ret, "DBcursor->close"); - failed = 1; - } - - (void)__db_prfooter(stdout, __db_pr_callback); - return (failed); -} - /* * usage -- * Display the usage message. diff --git a/db/build_vxworks/db_int.h b/db/build_vxworks/db_int.h index 0f7d51303..ed7a86900 100644 --- a/db/build_vxworks/db_int.h +++ b/db/build_vxworks/db_int.h @@ -2,10 +2,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_int.in,v 11.126 2003/09/10 17:27:14 sue Exp $ + * $Id: db_int.in,v 11.153 2004/10/05 14:43:53 mjc Exp $ */ #ifndef _DB_INTERNAL_H_ @@ -36,8 +36,39 @@ extern "C" { /******************************************************* * General purpose constants and macros. *******************************************************/ -#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */ -#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */ +#ifndef UINT16_MAX +#define UINT16_MAX 65535 /* Maximum 16-bit unsigned. */ +#endif +#ifndef UINT32_MAX +#define UINT32_MAX 4294967295U /* Maximum 32-bit unsigned. */ +#endif + +#if defined(HAVE_LONG_LONG) && defined(HAVE_UNSIGNED_LONG_LONG) +#undef INT64_MAX +#undef INT64_MIN +#undef UINT64_MAX + +#ifdef DB_WIN32 +#define INT64_MAX _I64_MAX +#define INT64_MIN _I64_MIN +#define UINT64_MAX _UI64_MAX + +#define INT64_FMT "%l64d" +#define UINT64_FMT "%l64u" +#else +/* + * Override the system's 64-bit min/max constants. AIX's 32-bit compiler can + * handle 64-bit values, but the system's constants don't include the LL/ULL + * suffix, and so can't be compiled using the 32-bit compiler. + */ +#define INT64_MAX 9223372036854775807LL +#define INT64_MIN (-INT64_MAX-1) +#define UINT64_MAX 18446744073709551615ULL + +#define INT64_FMT "%lld" +#define UINT64_FMT "%llu" +#endif /* DB_WIN32 */ +#endif /* HAVE_LONG_LONG && HAVE_UNSIGNED_LONG_LONG */ #define MEGABYTE 1048576 #define GIGABYTE 1073741824 @@ -65,53 +96,38 @@ extern "C" { */ #define DB_DEF_IOSIZE (8 * 1024) -/* Number of times to reties I/O operations that return EINTR or EBUSY. */ -#define DB_RETRY 100 +/* Align an integer to a specific boundary. */ +#undef DB_ALIGN +#define DB_ALIGN(v, bound) \ + (((v) + (bound) - 1) & ~(((uintmax_t)bound) - 1)) -/* - * Aligning items to particular sizes or in pages or memory. - * - * db_align_t -- - * Largest integral type, used to align structures in memory. We don't store - * floating point types in structures, so integral types should be sufficient - * (and we don't have to worry about systems that store floats in other than - * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite - * structure assignments and ANSI C memcpy calls to be in-line instructions - * that happen to require alignment. Note: this alignment isn't sufficient for - * mutexes, which depend on things like cache line alignment. Mutex alignment - * is handled separately, in mutex.h. - * - * db_alignp_t -- - * Integral type that's the same size as a pointer. There are places where - * DB modifies pointers by discarding the bottom bits to guarantee alignment. - * We can't use db_align_t, it may be larger than the pointer, and compilers - * get upset about that. So far we haven't run on any machine where there - * isn't an integral type the same size as a pointer -- here's hoping. - */ -typedef unsigned long db_align_t; -typedef unsigned long db_alignp_t; +/* Increment a pointer to a specific boundary. */ +#undef ALIGNP_INC +#define ALIGNP_INC(p, bound) \ + (void *)(((uintptr_t)(p) + (bound) - 1) & ~(((uintptr_t)bound) - 1)) -/* Align an integer to a specific boundary. */ -#undef ALIGN -#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1)) +/* Decrement a pointer to a specific boundary. */ +#undef ALIGNP_DEC +#define ALIGNP_DEC(p, bound) \ + (void *)((uintptr_t)(p) & ~(((uintptr_t)bound) - 1)) /* * Print an address as a u_long (a u_long is the largest type we can print * portably). Most 64-bit systems have made longs 64-bits, so this should * work. */ -#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p)) +#define P_TO_ULONG(p) ((u_long)(uintptr_t)(p)) /* * Convert a pointer to a small integral value. * - * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast + * The (u_int16_t)(uintptr_t) cast avoids warnings: the (uintptr_t) cast * converts the value to an integral type, and the (u_int16_t) cast converts * it to a small integral type so we don't get complaints when we assign the - * final result to an integral type smaller than db_alignp_t. + * final result to an integral type smaller than uintptr_t. */ -#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p)) -#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p)) +#define P_TO_UINT32(p) ((u_int32_t)(uintptr_t)(p)) +#define P_TO_UINT16(p) ((u_int16_t)(uintptr_t)(p)) /* * There are several on-page structures that are declared to have a number of @@ -149,9 +165,64 @@ typedef struct __fn { #define LF_ISSET(f) ((flags) & (f)) #define LF_SET(f) ((flags) |= (f)) -/* Display separator string. */ -#undef DB_LINE -#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=" +/* + * Calculate a percentage. The values can overflow 32-bit integer arithmetic + * so we use floating point. + * + * When calculating a bytes-vs-page size percentage, we're getting the inverse + * of the percentage in all cases, that is, we want 100 minus the percentage we + * calculate. + */ +#define DB_PCT(v, total) \ + ((int)((total) == 0 ? 0 : ((double)(v) * 100) / (total))) +#define DB_PCT_PG(v, total, pgsize) \ + ((int)((total) == 0 ? 0 : \ + 100 - ((double)(v) * 100) / ((total) * (pgsize)))) + +/* + * Structure used for callback message aggregation. + * + * Display values in XXX_stat_print calls. + */ +typedef struct __db_msgbuf { + char *buf; /* Heap allocated buffer. */ + char *cur; /* Current end of message. */ + size_t len; /* Allocated length of buffer. */ +} DB_MSGBUF; +#define DB_MSGBUF_INIT(a) do { \ + (a)->buf = (a)->cur = NULL; \ + (a)->len = 0; \ +} while (0) +#define DB_MSGBUF_FLUSH(dbenv, a) do { \ + if ((a)->buf != NULL) { \ + if ((a)->cur != (a)->buf) \ + __db_msg(dbenv, "%s", (a)->buf); \ + __os_free(dbenv, (a)->buf); \ + DB_MSGBUF_INIT(a); \ + } \ +} while (0) +#define STAT_FMT(msg, fmt, type, v) do { \ + DB_MSGBUF __mb; \ + DB_MSGBUF_INIT(&__mb); \ + __db_msgadd(dbenv, &__mb, fmt, (type)(v)); \ + __db_msgadd(dbenv, &__mb, "\t%s", msg); \ + DB_MSGBUF_FLUSH(dbenv, &__mb); \ +} while (0) +#define STAT_HEX(msg, v) \ + __db_msg(dbenv, "%#lx\t%s", (u_long)(v), msg) +#define STAT_ISSET(msg, p) \ + __db_msg(dbenv, "%sSet\t%s", (p) == NULL ? "!" : " ", msg) +#define STAT_LONG(msg, v) \ + __db_msg(dbenv, "%ld\t%s", (long)(v), msg) +#define STAT_LSN(msg, lsnp) \ + __db_msg(dbenv, "%lu/%lu\t%s", \ + (u_long)(lsnp)->file, (u_long)(lsnp)->offset, msg) +#define STAT_STRING(msg, p) do { \ + const char *__p = p; /* p may be a function call. */ \ + __db_msg(dbenv, "%s\t%s", __p == NULL ? "!Set" : __p, msg); \ +} while (0) +#define STAT_ULONG(msg, v) \ + __db_msg(dbenv, "%lu\t%s", (u_long)(v), msg) /******************************************************* * API return values @@ -177,7 +248,8 @@ typedef struct __fn { (ret) == DB_REP_ISPERM || \ (ret) == DB_REP_NEWMASTER || \ (ret) == DB_REP_NEWSITE || \ - (ret) == DB_REP_NOTPERM) + (ret) == DB_REP_NOTPERM || \ + (ret) == DB_REP_STARTUPDONE) /* Find a reasonable operation-not-supported error. */ #ifdef EOPNOTSUPP @@ -263,6 +335,9 @@ typedef enum { #define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \ if (handle == NULL) \ return (__db_env_config(dbenv, i, flags)); +#define ENV_NOT_CONFIGURED(dbenv, handle, i, flags) \ + if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ + ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) /******************************************************* * Database Access Methods. @@ -386,11 +461,11 @@ typedef struct __dbpginfo { } while (0) #define MAX_LSN(LSN) do { \ - (LSN).file = UINT32_T_MAX; \ - (LSN).offset = UINT32_T_MAX; \ + (LSN).file = UINT32_MAX; \ + (LSN).offset = UINT32_MAX; \ } while (0) #define IS_MAX_LSN(LSN) \ - ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX) + ((LSN).file == UINT32_MAX && (LSN).offset == UINT32_MAX) /* If logging is turned off, smash the lsn. */ #define LSN_NOT_LOGGED(LSN) do { \ @@ -404,6 +479,8 @@ typedef struct __dbpginfo { * Txn. *******************************************************/ #define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT)) +#define NOWAIT_FLAG(txn) \ + ((txn) != NULL && F_ISSET((txn), TXN_NOWAIT) ? DB_LOCK_NOWAIT : 0) #define IS_SUBTRANSACTION(txn) \ ((txn) != NULL && (txn)->parent != NULL) @@ -413,6 +490,33 @@ typedef struct __dbpginfo { #define DB_IV_BYTES 16 /* Bytes per IV */ #define DB_MAC_KEY 20 /* Bytes per MAC checksum */ +/******************************************************* + * Secondaries over RPC. + *******************************************************/ +#ifdef CONFIG_TEST +/* + * These are flags passed to DB->associate calls by the Tcl API if running + * over RPC. The RPC server will mask out these flags before making the real + * DB->associate call. + * + * These flags must coexist with the valid flags to DB->associate (currently + * DB_AUTO_COMMIT and DB_CREATE). DB_AUTO_COMMIT is in the group of + * high-order shared flags (0xff000000), and DB_CREATE is in the low-order + * group (0x00000fff), so we pick a range in between. + */ +#define DB_RPC2ND_MASK 0x00f00000 /* Reserved bits. */ + +#define DB_RPC2ND_REVERSEDATA 0x00100000 /* callback_n(0) _s_reversedata. */ +#define DB_RPC2ND_NOOP 0x00200000 /* callback_n(1) _s_noop */ +#define DB_RPC2ND_CONCATKEYDATA 0x00300000 /* callback_n(2) _s_concatkeydata */ +#define DB_RPC2ND_CONCATDATAKEY 0x00400000 /* callback_n(3) _s_concatdatakey */ +#define DB_RPC2ND_REVERSECONCAT 0x00500000 /* callback_n(4) _s_reverseconcat */ +#define DB_RPC2ND_TRUNCDATA 0x00600000 /* callback_n(5) _s_truncdata */ +#define DB_RPC2ND_CONSTANT 0x00700000 /* callback_n(6) _s_constant */ +#define DB_RPC2ND_GETZIP 0x00800000 /* sj_getzip */ +#define DB_RPC2ND_GETNAME 0x00900000 /* sj_getname */ +#endif + /******************************************************* * Forward structure declarations. *******************************************************/ diff --git a/db/build_vxworks/db_load/db_load.c b/db/build_vxworks/db_load/db_load.c index a96642583..11367dc88 100644 --- a/db/build_vxworks/db_load/db_load.c +++ b/db/build_vxworks/db_load/db_load.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_load.c,v 11.99 2004/10/11 18:53:14 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_load.c,v 11.88 2003/10/16 17:51:08 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -85,12 +85,13 @@ db_load_main(argc, argv) int argc; char *argv[]; { + enum { NOTSET, FILEID_RESET, LSN_RESET, INVALID } reset; extern char *optarg; extern int optind, __db_getopt_reset; DBTYPE dbtype; DB_ENV *dbenv; LDG ldg; - u_int32_t ldf; + u_int ldf; int ch, existed, exitval, ret; char **clist, **clp; @@ -106,6 +107,7 @@ db_load_main(argc, argv) if ((ret = db_load_version_check(ldg.progname)) != 0) return (ret); + reset = NOTSET; ldf = 0; exitval = existed = 0; dbtype = DB_UNKNOWN; @@ -116,13 +118,28 @@ db_load_main(argc, argv) return (EXIT_FAILURE); } + /* + * There are two modes for db_load: -r and everything else. The -r + * option zeroes out the database LSN's or resets the file ID, it + * doesn't really "load" a new database. The functionality is in + * db_load because we don't have a better place to put it, and we + * don't want to create a new utility for just that functionality. + */ __db_getopt_reset = 1; - while ((ch = getopt(argc, argv, "c:f:h:nP:Tt:V")) != EOF) + while ((ch = getopt(argc, argv, "c:f:h:nP:r:Tt:V")) != EOF) switch (ch) { case 'c': + if (reset != NOTSET) + return (db_load_usage()); + reset = INVALID; + *clp++ = optarg; break; case 'f': + if (reset != NOTSET) + return (db_load_usage()); + reset = INVALID; + if (freopen(optarg, "r", stdin) == NULL) { fprintf(stderr, "%s: %s: reopen: %s\n", ldg.progname, optarg, strerror(errno)); @@ -133,6 +150,10 @@ db_load_main(argc, argv) ldg.home = optarg; break; case 'n': + if (reset != NOTSET) + return (db_load_usage()); + reset = INVALID; + ldf |= LDF_NOOVERWRITE; break; case 'P': @@ -145,10 +166,28 @@ db_load_main(argc, argv) } ldf |= LDF_PASSWORD; break; + case 'r': + if (reset == INVALID) + return (db_load_usage()); + if (strcmp(optarg, "lsn") == 0) + reset = LSN_RESET; + else if (strcmp(optarg, "fileid") == 0) + reset = FILEID_RESET; + else + return (db_load_usage()); + break; case 'T': + if (reset != NOTSET) + return (db_load_usage()); + reset = INVALID; + ldf |= LDF_NOHEADER; break; case 't': + if (reset != NOTSET) + return (db_load_usage()); + reset = INVALID; + if (strcmp(optarg, "btree") == 0) { dbtype = DB_BTREE; break; @@ -189,10 +228,23 @@ db_load_main(argc, argv) if (db_load_env_create(&dbenv, &ldg) != 0) goto shutdown; - while (!ldg.endofile) - if (db_load_load(dbenv, argv[0], dbtype, clist, ldf, - &ldg, &existed) != 0) - goto shutdown; + /* If we're resetting the LSNs, that's an entirely separate path. */ + switch (reset) { + case FILEID_RESET: + exitval = dbenv->fileid_reset( + dbenv, argv[0], ldf & LDF_PASSWORD ? 1 : 0); + break; + case LSN_RESET: + exitval = dbenv->lsn_reset( + dbenv, argv[0], ldf & LDF_PASSWORD ? 1 : 0); + break; + default: + while (!ldg.endofile) + if (db_load_load(dbenv, argv[0], dbtype, clist, ldf, + &ldg, &existed) != 0) + goto shutdown; + break; + } if (0) { shutdown: exitval = 1; @@ -367,8 +419,7 @@ retry_db: goto err; } if (ldg->private != 0) { - if ((ret = - __db_util_cache(dbenv, dbp, &ldg->cache, &resize)) != 0) + if ((ret = __db_util_cache(dbp, &ldg->cache, &resize)) != 0) goto err; if (resize) { if ((ret = dbp->close(dbp, 0)) != 0) @@ -464,8 +515,8 @@ retry: if (txn != NULL) name, !keyflag ? recno : recno * 2 - 1); - (void)__db_prdbt(&key, checkprint, 0, stderr, - __db_pr_callback, 0, NULL); + (void)dbenv->prdbt(&key, + checkprint, 0, stderr, __db_pr_callback, 0); break; case DB_LOCK_DEADLOCK: /* If we have a child txn, retry--else it's fatal. */ @@ -575,8 +626,10 @@ db_load_db_init(dbenv, home, cache, is_private) /* We may be loading into a live environment. Try and join. */ flags = DB_USE_ENVIRON | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN; - if (dbenv->open(dbenv, home, flags, 0) == 0) + if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0) return (0); + if (ret == DB_VERSION_MISMATCH) + goto err; /* * We're trying to load a database. @@ -601,7 +654,7 @@ db_load_db_init(dbenv, home, cache, is_private) return (0); /* An environment is required. */ - dbenv->err(dbenv, ret, "DB_ENV->open"); +err: dbenv->err(dbenv, ret, "DB_ENV->open"); return (1); } @@ -768,14 +821,10 @@ memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen); break; } - if (ch == '\n') - break; - /* - * If the buffer is too small, double it. The - * +1 is for the nul byte inserted below. + * If the buffer is too small, double it. */ - if (linelen + start + 1 == buflen) { + if (linelen + start == buflen) { G(hdrbuf) = realloc(G(hdrbuf), buflen *= 2); if (G(hdrbuf) == NULL) @@ -783,6 +832,9 @@ memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen); buf = &G(hdrbuf)[start]; } + if (ch == '\n') + break; + buf[linelen++] = ch; } if (G(endofile) == 1) @@ -1125,6 +1177,7 @@ db_load_dbt_rrecno(dbenv, dbtp, ishex) int ishex; { char buf[32], *p, *q; + u_long recno; ++G(lineno); @@ -1163,12 +1216,12 @@ db_load_dbt_rrecno(dbenv, dbtp, ishex) *p = '\0'; } - if (__db_getulong(dbenv, - G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) { + if (__db_getulong(dbenv, G(progname), buf + 1, 0, 0, &recno)) { bad: db_load_badend(dbenv); return (1); } + *((db_recno_t *)dbtp->data) = recno; dbtp->size = sizeof(db_recno_t); return (0); } @@ -1256,6 +1309,8 @@ db_load_usage() (void)fprintf(stderr, "%s\n\t%s\n", "usage: db_load [-nTV] [-c name=value] [-f file]", "[-h home] [-P password] [-t btree | hash | recno | queue] db_file"); + (void)fprintf(stderr, "%s\n", + "usage: db_load -r lsn | fileid [-h home] [-P password] db_file"); return (EXIT_FAILURE); } diff --git a/db/build_vxworks/db_printlog/db_printlog.c b/db/build_vxworks/db_printlog/db_printlog.c index 6a251f785..3227f2e5e 100644 --- a/db/build_vxworks/db_printlog/db_printlog.c +++ b/db/build_vxworks/db_printlog/db_printlog.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_printlog.c,v 11.64 2004/06/17 17:35:17 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_printlog.c,v 11.59 2003/08/18 18:00:31 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -33,11 +33,12 @@ static const char revid[] = #include "dbinc/qam.h" #include "dbinc/txn.h" +int db_printlog_lsn_arg __P((const char *, char *, DB_LSN *)); int db_printlog_main __P((int, char *[])); +int db_printlog_open_rep_db __P((DB_ENV *, DB **, DBC **)); +int db_printlog_print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops)); int db_printlog_usage __P((void)); int db_printlog_version_check __P((const char *)); -int db_printlog_print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops)); -int db_printlog_open_rep_db __P((DB_ENV *, DB **, DBC **)); int db_printlog(args) @@ -63,29 +64,41 @@ db_printlog_main(argc, argv) const char *progname = "db_printlog"; DB *dbp; DBC *dbc; + DBT data, keydbt; DB_ENV *dbenv; DB_LOGC *logc; - int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + DB_LSN key, start, stop; size_t dtabsize; - DBT data, keydbt; - DB_LSN key; - int ch, exitval, nflag, rflag, ret, repflag; + u_int32_t logcflag; + int ch, cmp, exitval, nflag, rflag, ret, repflag; + int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); char *home, *passwd; if ((ret = db_printlog_version_check(progname)) != 0) return (ret); - dbenv = NULL; dbp = NULL; dbc = NULL; + dbenv = NULL; logc = NULL; - exitval = nflag = rflag = repflag = 0; - home = passwd = NULL; + ZERO_LSN(start); + ZERO_LSN(stop); dtabsize = 0; + exitval = nflag = rflag = repflag = 0; dtab = NULL; + home = passwd = NULL; + __db_getopt_reset = 1; - while ((ch = getopt(argc, argv, "h:NP:rRV")) != EOF) + while ((ch = getopt(argc, argv, "b:e:h:NP:rRV")) != EOF) switch (ch) { + case 'b': + if (db_printlog_lsn_arg(progname, optarg, &start)) + return (db_printlog_usage()); + break; + case 'e': + if (db_printlog_lsn_arg(progname, optarg, &stop)) + return (db_printlog_usage()); + break; case 'h': home = optarg; break; @@ -104,7 +117,7 @@ db_printlog_main(argc, argv) case 'r': rflag = 1; break; - case 'R': + case 'R': /* Undocumented */ repflag = 1; break; case 'V': @@ -172,28 +185,34 @@ db_printlog_main(argc, argv) if (repflag) { if ((ret = dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) - != 0) { - dbenv->err(dbenv, ret, "open"); + != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } } else if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } /* Initialize print callbacks. */ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 || (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 || (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 || + (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 || (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 || +#ifdef HAVE_HASH (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 || +#endif +#ifdef HAVE_QUEUE + (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 || +#endif (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) { dbenv->err(dbenv, ret, "callback: initialization"); goto shutdown; @@ -208,17 +227,22 @@ db_printlog_main(argc, argv) goto shutdown; } + if (IS_ZERO_LSN(start)) { + memset(&keydbt, 0, sizeof(keydbt)); + logcflag = rflag ? DB_PREV : DB_NEXT; + } else { + key = start; + logcflag = DB_SET; + } memset(&data, 0, sizeof(data)); - memset(&keydbt, 0, sizeof(keydbt)); - while (!__db_util_interrupted()) { + + for (; !__db_util_interrupted(); logcflag = rflag ? DB_PREV : DB_NEXT) { if (repflag) { - ret = dbc->c_get(dbc, - &keydbt, &data, rflag ? DB_PREV : DB_NEXT); + ret = dbc->c_get(dbc, &keydbt, &data, logcflag); if (ret == 0) key = ((REP_CONTROL *)keydbt.data)->lsn; } else - ret = logc->get(logc, - &key, &data, rflag ? DB_PREV : DB_NEXT); + ret = logc->get(logc, &key, &data, logcflag); if (ret != 0) { if (ret == DB_NOTFOUND) break; @@ -227,6 +251,15 @@ db_printlog_main(argc, argv) goto shutdown; } + /* + * We may have reached the end of the range we're displaying. + */ + if (!IS_ZERO_LSN(stop)) { + cmp = log_compare(&key, &stop); + if ((rflag && cmp < 0) || (!rflag && cmp > 0)) + break; + } + ret = __db_dispatch(dbenv, dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL); @@ -279,8 +312,8 @@ shutdown: exitval = 1; int db_printlog_usage() { - fprintf(stderr, "%s\n", - "usage: db_printlog [-NrV] [-h home] [-P password]"); + fprintf(stderr, "usage: db_printlog %s\n", + "[-NrV] [-b file/offset] [-e file/offset] [-h home] [-P password]"); return (EXIT_FAILURE); } @@ -376,3 +409,38 @@ err: if (*dbpp != NULL) (void)(*dbpp)->close(*dbpp, 0); return (ret); } + +/* + * lsn_arg -- + * Parse a LSN argument. + */ +int +db_printlog_lsn_arg(progname, optarg, lsnp) + const char *progname; + char *optarg; + DB_LSN *lsnp; +{ + char *p; + u_long uval; + + /* + * Expected format is: lsn.file/lsn.offset. + * + * Don't use getsubopt(3), some systems don't have it. + */ + if ((p = strchr(optarg, '/')) == NULL) + return (1); + *p = '\0'; + + if (__db_getulong(NULL, progname, optarg, 0, 0, &uval)) + return (1); + if (uval > UINT32_MAX) + return (1); + lsnp->file = uval; + if (__db_getulong(NULL, progname, p + 1, 0, 0, &uval)) + return (1); + if (uval > UINT32_MAX) + return (1); + lsnp->offset = uval; + return (0); +} diff --git a/db/build_vxworks/db_recover/db_recover.c b/db/build_vxworks/db_recover/db_recover.c index 1c7210dc9..cea04a115 100644 --- a/db/build_vxworks/db_recover/db_recover.c +++ b/db/build_vxworks/db_recover/db_recover.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_recover.c,v 11.41 2004/01/28 03:36:00 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_recover.c,v 11.39 2003/09/04 18:06:46 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -130,10 +130,8 @@ db_recover_main(argc, argv) } dbenv->set_errfile(dbenv, stderr); dbenv->set_errpfx(dbenv, progname); - if (verbose) { + if (verbose) (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1); - (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1); - } if (timestamp && (ret = dbenv->set_tx_timestamp(dbenv, ×tamp)) != 0) { dbenv->err(dbenv, ret, "DB_ENV->set_timestamp"); diff --git a/db/build_vxworks/db_stat/db_stat.c b/db/build_vxworks/db_stat/db_stat.c index bd628ca09..3b004fa19 100644 --- a/db/build_vxworks/db_stat/db_stat.c +++ b/db/build_vxworks/db_stat/db_stat.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_stat.c,v 11.158 2004/07/15 18:26:48 ubell Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_stat.c,v 11.142 2003/10/27 19:47:25 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -38,30 +38,11 @@ static const char revid[] = #include "dbinc/db_page.h" #include "dbinc/txn.h" -#define PCT(f, t, pgsize) \ - ((t) == 0 ? 0 : \ - ((((((double)t) * (pgsize)) - (f)) / (((double)t) * (pgsize))) * 100)) - typedef enum { T_NOTSET, T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t; -int db_stat_argcheck __P((char *, const char *)); -int db_stat_btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, u_int32_t)); int db_stat_db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *)); -void db_stat_dl __P((const char *, u_long)); -void db_stat_dl_bytes __P((const char *, u_long, u_long, u_long)); -int db_stat_env_stats __P((DB_ENV *, u_int32_t)); -int db_stat_hash_stats __P((DB_ENV *, DB *, u_int32_t)); -int db_stat_lock_stats __P((DB_ENV *, char *, u_int32_t)); -int db_stat_log_stats __P((DB_ENV *, u_int32_t)); int db_stat_main __P((int, char *[])); -int db_stat_mpool_stats __P((DB_ENV *, char *, u_int32_t)); -void db_stat_prflags __P((DB *, u_int32_t, const FN *)); -int db_stat_queue_stats __P((DB_ENV *, DB *, u_int32_t)); -int db_stat_rep_stats __P((DB_ENV *, u_int32_t)); -int db_stat_txn_compare __P((const void *, const void *)); -int db_stat_txn_stats __P((DB_ENV *, u_int32_t)); -void db_stat_txn_xid_stats __P((DB_TXN_ACTIVE *)); int db_stat_usage __P((void)); int db_stat_version_check __P((const char *)); @@ -92,9 +73,9 @@ db_stat_main(argc, argv) DB *alt_dbp, *dbp; test_t ttype; u_int32_t cache, env_flags, fast, flags; - int ch, checked, exitval; + int ch, exitval; int nflag, private, resize, ret; - char *db, *home, *internal, *passwd, *subdb; + char *db, *home, *p, *passwd, *subdb; if ((ret = db_stat_version_check(progname)) != 0) return (ret); @@ -104,34 +85,53 @@ db_stat_main(argc, argv) ttype = T_NOTSET; cache = MEGABYTE; exitval = fast = flags = nflag = private = 0; - db = home = internal = passwd = subdb = NULL; + db = home = passwd = subdb = NULL; env_flags = 0; __db_getopt_reset = 1; - while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNP:rs:tVZ")) != EOF) + while ((ch = getopt(argc, argv, "C:cd:Eefh:L:lM:mNP:R:rs:tVZ")) != EOF) switch (ch) { - case 'C': - if (ttype != T_NOTSET) - goto argcombo; - ttype = T_LOCK; - if (!db_stat_argcheck(internal = optarg, "Aclmop")) - return (db_stat_usage()); - break; - case 'c': - if (ttype != T_NOTSET) + case 'C': case 'c': + if (ttype != T_NOTSET && ttype != T_LOCK) goto argcombo; ttype = T_LOCK; + if (ch != 'c') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + case 'c': + LF_SET(DB_STAT_LOCK_CONF); + break; + case 'l': + LF_SET(DB_STAT_LOCK_LOCKERS); + break; + case 'm': /* Backward compatible. */ + break; + case 'o': + LF_SET(DB_STAT_LOCK_OBJECTS); + break; + case 'p': + LF_SET(DB_STAT_LOCK_PARAMS); + break; + default: + return (db_stat_usage()); + } break; case 'd': - if (ttype != T_DB && ttype != T_NOTSET) + if (ttype != T_NOTSET && ttype != T_DB) goto argcombo; ttype = T_DB; db = optarg; break; - case 'e': - if (ttype != T_NOTSET) + case 'E': case 'e': + if (ttype != T_NOTSET && ttype != T_ENV) goto argcombo; ttype = T_ENV; + LF_SET(DB_STAT_SUBSYSTEM); + if (ch == 'E') + LF_SET(DB_STAT_ALL); break; case 'f': fast = DB_FAST_STAT; @@ -139,22 +139,38 @@ db_stat_main(argc, argv) case 'h': home = optarg; break; - case 'l': - if (ttype != T_NOTSET) + case 'L': case 'l': + if (ttype != T_NOTSET && ttype != T_LOG) goto argcombo; ttype = T_LOG; + if (ch != 'l') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + default: + return (db_stat_usage()); + } break; - case 'M': - if (ttype != T_NOTSET) - goto argcombo; - ttype = T_MPOOL; - if (!db_stat_argcheck(internal = optarg, "Ahm")) - return (db_stat_usage()); - break; - case 'm': - if (ttype != T_NOTSET) + case 'M': case 'm': + if (ttype != T_NOTSET && ttype != T_MPOOL) goto argcombo; ttype = T_MPOOL; + if (ch != 'm') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + case 'h': + LF_SET(DB_STAT_MEMP_HASH); + break; + case 'm': /* Backward compatible. */ + break; + default: + return (db_stat_usage()); + } break; case 'N': nflag = 1; @@ -168,13 +184,22 @@ db_stat_main(argc, argv) return (EXIT_FAILURE); } break; - case 'r': - if (ttype != T_NOTSET) + case 'R': case 'r': + if (ttype != T_NOTSET && ttype != T_REP) goto argcombo; ttype = T_REP; + if (ch != 'r') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + default: + return (db_stat_usage()); + } break; case 's': - if (ttype != T_DB && ttype != T_NOTSET) + if (ttype != T_NOTSET && ttype != T_DB) goto argcombo; ttype = T_DB; subdb = optarg; @@ -192,7 +217,7 @@ argcombo: fprintf(stderr, printf("%s\n", db_version(NULL, NULL, NULL)); return (EXIT_SUCCESS); case 'Z': - flags |= DB_STAT_CLEAR; + LF_SET(DB_STAT_CLEAR); break; case '?': default: @@ -230,7 +255,7 @@ argcombo: fprintf(stderr, retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { fprintf(stderr, "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; + goto err; } dbenv->set_errfile(dbenv, stderr); @@ -239,45 +264,45 @@ retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { if (nflag) { if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) { dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING"); - goto shutdown; + goto err; } if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) { dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC"); - goto shutdown; + goto err; } } if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) { dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; + goto err; } /* Initialize the environment. */ if (db_stat_db_init(dbenv, home, ttype, cache, &private) != 0) - goto shutdown; + goto err; switch (ttype) { case T_DB: - /* Create the DB object and open the file. */ if (flags != 0) return (db_stat_usage()); + + /* Create the DB object and open the file. */ if ((ret = db_create(&dbp, dbenv, 0)) != 0) { dbenv->err(dbenv, ret, "db_create"); - goto shutdown; + goto err; } if ((ret = dbp->open(dbp, NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbp->err(dbp, ret, "DB->open: %s", db); - goto shutdown; + dbenv->err(dbenv, ret, "DB->open: %s", db); + goto err; } /* Check if cache is too small for this DB's pagesize. */ if (private) { - if ((ret = - __db_util_cache(dbenv, dbp, &cache, &resize)) != 0) - goto shutdown; + if ((ret = __db_util_cache(dbp, &cache, &resize)) != 0) + goto err; if (resize) { (void)dbp->close(dbp, DB_NOSYNC); dbp = NULL; @@ -293,13 +318,10 @@ retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { * If its a master-db then we cannot. So check to see, * if its btree then it might be. */ - checked = 0; - if (subdb == NULL && dbp->type == DB_BTREE) { - if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - goto shutdown; - } - checked = 1; + if (subdb == NULL && dbp->type == DB_BTREE && + (ret = dbp->stat(dbp, NULL, &sp, DB_FAST_STAT)) != 0) { + dbenv->err(dbenv, ret, "DB->stat"); + goto err; } if (subdb != NULL || @@ -307,74 +329,58 @@ retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { (sp->bt_metaflags & BTM_SUBDB) == 0) { if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) { dbenv->err(dbenv, ret, "db_create"); - goto shutdown; + goto err; } if ((ret = dbp->open(alt_dbp, NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbenv->err(dbenv, - ret, "DB->open: %s:%s", db, subdb); + if (subdb == NULL) + dbenv->err(dbenv, + ret, "DB->open: %s", db); + else + dbenv->err(dbenv, + ret, "DB->open: %s:%s", db, subdb); (void)alt_dbp->close(alt_dbp, DB_NOSYNC); - goto shutdown; + goto err; } (void)dbp->close(dbp, DB_NOSYNC); dbp = alt_dbp; - - /* Need to run again to update counts */ - checked = 0; } - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - if (db_stat_btree_stats( - dbenv, dbp, checked == 1 ? sp : NULL, fast)) - goto shutdown; - break; - case DB_HASH: - if (db_stat_hash_stats(dbenv, dbp, fast)) - goto shutdown; - break; - case DB_QUEUE: - if (db_stat_queue_stats(dbenv, dbp, fast)) - goto shutdown; - break; - case DB_UNKNOWN: - dbenv->errx(dbenv, "Unknown database type."); - goto shutdown; - } + if (dbp->stat_print(dbp, flags)) + goto err; break; case T_ENV: - if (db_stat_env_stats(dbenv, flags)) - goto shutdown; + if (dbenv->stat_print(dbenv, flags)) + goto err; break; case T_LOCK: - if (db_stat_lock_stats(dbenv, internal, flags)) - goto shutdown; + if (dbenv->lock_stat_print(dbenv, flags)) + goto err; break; case T_LOG: - if (db_stat_log_stats(dbenv, flags)) - goto shutdown; + if (dbenv->log_stat_print(dbenv, flags)) + goto err; break; case T_MPOOL: - if (db_stat_mpool_stats(dbenv, internal, flags)) - goto shutdown; + if (dbenv->memp_stat_print(dbenv, flags)) + goto err; break; case T_REP: - if (db_stat_rep_stats(dbenv, flags)) - goto shutdown; + if (dbenv->rep_stat_print(dbenv, flags)) + goto err; break; case T_TXN: - if (db_stat_txn_stats(dbenv, flags)) - goto shutdown; + if (dbenv->txn_stat_print(dbenv, flags)) + goto err; break; case T_NOTSET: - dbenv->errx(dbenv, "Unknown statistics flag."); - goto shutdown; + dbenv->errx(dbenv, "Unknown statistics flag"); + goto err; } if (0) { -shutdown: exitval = 1; +err: exitval = 1; } if (dbp != NULL && (ret = dbp->close(dbp, DB_NOSYNC)) != 0) { exitval = 1; @@ -395,857 +401,6 @@ shutdown: exitval = 1; return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); } -/* - * env_stats -- - * Display environment statistics. - */ -int -db_stat_env_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - REGENV renv; - REGION *rp, regs[1024]; - int n, ret; - const char *lable; - - n = sizeof(regs) / sizeof(regs[0]); - if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) { - dbenv->err(dbenv, ret, "__db_e_stat"); - return (1); - } - - printf("%d.%d.%d\tEnvironment version.\n", - renv.majver, renv.minver, renv.patch); - printf("%lx\tMagic number.\n", (u_long)renv.magic); - printf("%d\tPanic value.\n", renv.envpanic); - - /* Adjust the reference count for us... */ - printf("%d\tReferences.\n", renv.refcnt - 1); - - db_stat_dl("Locks granted without waiting.\n", - (u_long)renv.mutex.mutex_set_nowait); - db_stat_dl("Locks granted after waiting.\n", - (u_long)renv.mutex.mutex_set_wait); - - while (n > 0) { - printf("%s\n", DB_LINE); - rp = ®s[--n]; - switch (rp->type) { - case REGION_TYPE_ENV: - lable = "Environment"; - break; - case REGION_TYPE_LOCK: - lable = "Lock"; - break; - case REGION_TYPE_LOG: - lable = "Log"; - break; - case REGION_TYPE_MPOOL: - lable = "Mpool"; - break; - case REGION_TYPE_MUTEX: - lable = "Mutex"; - break; - case REGION_TYPE_TXN: - lable = "Txn"; - break; - case INVALID_REGION_TYPE: - default: - lable = "Invalid"; - break; - } - printf("%s Region: %d.\n", lable, rp->id); - db_stat_dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size); - printf("%ld\tSegment ID.\n", rp->segid); - db_stat_dl("Locks granted without waiting.\n", - (u_long)rp->mutex.mutex_set_nowait); - db_stat_dl("Locks granted after waiting.\n", - (u_long)rp->mutex.mutex_set_wait); - } - - return (0); -} - -/* - * btree_stats -- - * Display btree/recno statistics. - */ -int -db_stat_btree_stats(dbenv, dbp, msp, fast) - DB_ENV *dbenv; - DB *dbp; - DB_BTREE_STAT *msp; - u_int32_t fast; -{ - static const FN fn[] = { - { BTM_DUP, "duplicates" }, - { BTM_FIXEDLEN, "fixed-length" }, - { BTM_RECNO, "recno" }, - { BTM_RECNUM, "record-numbers" }, - { BTM_RENUMBER, "renumber" }, - { BTM_SUBDB, "multiple-databases" }, - { 0, NULL } - }; - DB_BTREE_STAT *sp; - int ret; - - COMPQUIET(dbenv, NULL); - - if (msp != NULL) - sp = msp; - else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (1); - } - - printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic); - printf("%lu\tBtree version number.\n", (u_long)sp->bt_version); - db_stat_prflags(dbp, sp->bt_metaflags, fn); - if (dbp->type == DB_BTREE) { -#ifdef NOT_IMPLEMENTED - db_stat_dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey); -#endif - db_stat_dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey); - } - if (dbp->type == DB_RECNO) { - db_stat_dl("Fixed-length record size.\n", (u_long)sp->bt_re_len); - if (isprint((int)sp->bt_re_pad) && !isspace((int)sp->bt_re_pad)) - printf("%c\tFixed-length record pad.\n", - (int)sp->bt_re_pad); - else - printf("0x%x\tFixed-length record pad.\n", - (int)sp->bt_re_pad); - } - db_stat_dl("Underlying database page size.\n", (u_long)sp->bt_pagesize); - db_stat_dl("Number of levels in the tree.\n", (u_long)sp->bt_levels); - db_stat_dl(dbp->type == DB_BTREE ? - "Number of unique keys in the tree.\n" : - "Number of records in the tree.\n", (u_long)sp->bt_nkeys); - db_stat_dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata); - - db_stat_dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg); - db_stat_dl("Number of bytes free in tree internal pages", - (u_long)sp->bt_int_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize)); - - db_stat_dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg); - db_stat_dl("Number of bytes free in tree leaf pages", - (u_long)sp->bt_leaf_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize)); - - db_stat_dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg); - db_stat_dl("Number of bytes free in tree duplicate pages", - (u_long)sp->bt_dup_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize)); - - db_stat_dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg); - db_stat_dl("Number of bytes free in tree overflow pages", - (u_long)sp->bt_over_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize)); - - db_stat_dl("Number of pages on the free list.\n", (u_long)sp->bt_free); - - free(sp); - - return (0); -} - -/* - * hash_stats -- - * Display hash statistics. - */ -int -db_stat_hash_stats(dbenv, dbp, fast) - DB_ENV *dbenv; - DB *dbp; - u_int32_t fast; -{ - static const FN fn[] = { - { DB_HASH_DUP, "duplicates" }, - { DB_HASH_SUBDB,"multiple-databases" }, - { 0, NULL } - }; - DB_HASH_STAT *sp; - int ret; - - COMPQUIET(dbenv, NULL); - - if ((ret = dbp->stat(dbp, &sp, fast)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (1); - } - - printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic); - printf("%lu\tHash version number.\n", (u_long)sp->hash_version); - db_stat_prflags(dbp, sp->hash_metaflags, fn); - db_stat_dl("Underlying database page size.\n", (u_long)sp->hash_pagesize); - db_stat_dl("Specified fill factor.\n", (u_long)sp->hash_ffactor); - db_stat_dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys); - db_stat_dl("Number of data items in the database.\n", (u_long)sp->hash_ndata); - - db_stat_dl("Number of hash buckets.\n", (u_long)sp->hash_buckets); - db_stat_dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize)); - - db_stat_dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages); - db_stat_dl("Number of bytes free in overflow pages", - (u_long)sp->hash_big_bfree); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize)); - - db_stat_dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows); - db_stat_dl("Number of bytes free in bucket overflow pages", - (u_long)sp->hash_ovfl_free); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize)); - - db_stat_dl("Number of duplicate pages.\n", (u_long)sp->hash_dup); - db_stat_dl("Number of bytes free in duplicate pages", - (u_long)sp->hash_dup_free); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize)); - - db_stat_dl("Number of pages on the free list.\n", (u_long)sp->hash_free); - - free(sp); - - return (0); -} - -/* - * queue_stats -- - * Display queue statistics. - */ -int -db_stat_queue_stats(dbenv, dbp, fast) - DB_ENV *dbenv; - DB *dbp; - u_int32_t fast; -{ - DB_QUEUE_STAT *sp; - int ret; - - COMPQUIET(dbenv, NULL); - - if ((ret = dbp->stat(dbp, &sp, fast)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (1); - } - - printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic); - printf("%lu\tQueue version number.\n", (u_long)sp->qs_version); - db_stat_dl("Fixed-length record size.\n", (u_long)sp->qs_re_len); - if (isprint((int)sp->qs_re_pad) && !isspace((int)sp->qs_re_pad)) - printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad); - else - printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad); - db_stat_dl("Underlying database page size.\n", (u_long)sp->qs_pagesize); - if (sp->qs_extentsize != 0) - db_stat_dl("Underlying database extent size.\n", - (u_long)sp->qs_extentsize); - db_stat_dl("Number of records in the database.\n", (u_long)sp->qs_nkeys); - db_stat_dl("Number of database pages.\n", (u_long)sp->qs_pages); - db_stat_dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize)); - printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno); - printf( - "%lu\tNext available record number.\n", (u_long)sp->qs_cur_recno); - - free(sp); - - return (0); -} - -/* - * lock_stats -- - * Display lock statistics. - */ -int -db_stat_lock_stats(dbenv, internal, flags) - DB_ENV *dbenv; - char *internal; - u_int32_t flags; -{ - DB_LOCK_STAT *sp; - int ret; - - if (internal != NULL) { - if ((ret = - dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - return (0); - } - - if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - db_stat_dl("Last allocated locker ID.\n", (u_long)sp->st_id); - db_stat_dl("Current maximum unused locker ID.\n", (u_long)sp->st_cur_maxid); - db_stat_dl("Number of lock modes.\n", (u_long)sp->st_nmodes); - db_stat_dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks); - db_stat_dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers); - db_stat_dl("Maximum number of lock objects possible.\n", - (u_long)sp->st_maxobjects); - db_stat_dl("Number of current locks.\n", (u_long)sp->st_nlocks); - db_stat_dl("Maximum number of locks at any one time.\n", - (u_long)sp->st_maxnlocks); - db_stat_dl("Number of current lockers.\n", (u_long)sp->st_nlockers); - db_stat_dl("Maximum number of lockers at any one time.\n", - (u_long)sp->st_maxnlockers); - db_stat_dl("Number of current lock objects.\n", (u_long)sp->st_nobjects); - db_stat_dl("Maximum number of lock objects at any one time.\n", - (u_long)sp->st_maxnobjects); - db_stat_dl("Total number of locks requested.\n", (u_long)sp->st_nrequests); - db_stat_dl("Total number of locks released.\n", (u_long)sp->st_nreleases); - db_stat_dl( - "Total number of lock requests failing because DB_LOCK_NOWAIT was set.\n", - (u_long)sp->st_nnowaits); - db_stat_dl( - "Total number of locks not immediately available due to conflicts.\n", - (u_long)sp->st_nconflicts); - db_stat_dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks); - db_stat_dl("Lock timeout value.\n", (u_long)sp->st_locktimeout); - db_stat_dl("Number of locks that have timed out.\n", - (u_long)sp->st_nlocktimeouts); - db_stat_dl("Transaction timeout value.\n", (u_long)sp->st_txntimeout); - db_stat_dl("Number of transactions that have timed out.\n", - (u_long)sp->st_ntxntimeouts); - - db_stat_dl_bytes("The size of the lock region.", - (u_long)0, (u_long)0, (u_long)sp->st_regsize); - db_stat_dl("The number of region locks granted after waiting.\n", - (u_long)sp->st_region_wait); - db_stat_dl("The number of region locks granted without waiting.\n", - (u_long)sp->st_region_nowait); - - free(sp); - - return (0); -} - -/* - * log_stats -- - * Display log statistics. - */ -int -db_stat_log_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - DB_LOG_STAT *sp; - int ret; - - if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - printf("%lx\tLog magic number.\n", (u_long)sp->st_magic); - printf("%lu\tLog version number.\n", (u_long)sp->st_version); - db_stat_dl_bytes("Log record cache size", - (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize); - printf("%#o\tLog file mode.\n", sp->st_mode); - if (sp->st_lg_size % MEGABYTE == 0) - printf("%luMb\tCurrent log file size.\n", - (u_long)sp->st_lg_size / MEGABYTE); - else if (sp->st_lg_size % 1024 == 0) - printf("%luKb\tCurrent log file size.\n", - (u_long)sp->st_lg_size / 1024); - else - printf("%lu\tCurrent log file size.\n", - (u_long)sp->st_lg_size); - db_stat_dl_bytes("Log bytes written", - (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes); - db_stat_dl_bytes("Log bytes written since last checkpoint", - (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes); - db_stat_dl("Total log file writes.\n", (u_long)sp->st_wcount); - db_stat_dl("Total log file write due to overflow.\n", - (u_long)sp->st_wcount_fill); - db_stat_dl("Total log file flushes.\n", (u_long)sp->st_scount); - printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file); - printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset); - printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file); - printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset); - - db_stat_dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush); - db_stat_dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush); - - db_stat_dl_bytes("Log region size", - (u_long)0, (u_long)0, (u_long)sp->st_regsize); - db_stat_dl("The number of region locks granted after waiting.\n", - (u_long)sp->st_region_wait); - db_stat_dl("The number of region locks granted without waiting.\n", - (u_long)sp->st_region_nowait); - - free(sp); - - return (0); -} - -/* - * mpool_stats -- - * Display mpool statistics. - */ -int -db_stat_mpool_stats(dbenv, internal, flags) - DB_ENV *dbenv; - char *internal; - u_int32_t flags; -{ - DB_MPOOL_FSTAT **fsp; - DB_MPOOL_STAT *gsp; - int ret; - - if (internal != NULL) { - if ((ret = - dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - return (0); - } - - if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - db_stat_dl_bytes("Total cache size", - (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes); - db_stat_dl("Number of caches.\n", (u_long)gsp->st_ncache); - db_stat_dl_bytes("Pool individual cache size", - (u_long)0, (u_long)0, (u_long)gsp->st_regsize); - db_stat_dl("Requested pages mapped into the process' address space.\n", - (u_long)gsp->st_map); - db_stat_dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit); - if (gsp->st_cache_hit + gsp->st_cache_miss != 0) - printf(" (%.0f%%)", ((double)gsp->st_cache_hit / - (gsp->st_cache_hit + gsp->st_cache_miss)) * 100); - printf(".\n"); - db_stat_dl("Requested pages not found in the cache.\n", - (u_long)gsp->st_cache_miss); - db_stat_dl("Pages created in the cache.\n", (u_long)gsp->st_page_create); - db_stat_dl("Pages read into the cache.\n", (u_long)gsp->st_page_in); - db_stat_dl("Pages written from the cache to the backing file.\n", - (u_long)gsp->st_page_out); - db_stat_dl("Clean pages forced from the cache.\n", - (u_long)gsp->st_ro_evict); - db_stat_dl("Dirty pages forced from the cache.\n", - (u_long)gsp->st_rw_evict); - db_stat_dl("Dirty pages written by trickle-sync thread.\n", - (u_long)gsp->st_page_trickle); - db_stat_dl("Current total page count.\n", - (u_long)gsp->st_pages); - db_stat_dl("Current clean page count.\n", - (u_long)gsp->st_page_clean); - db_stat_dl("Current dirty page count.\n", - (u_long)gsp->st_page_dirty); - db_stat_dl("Number of hash buckets used for page location.\n", - (u_long)gsp->st_hash_buckets); - db_stat_dl("Total number of times hash chains searched for a page.\n", - (u_long)gsp->st_hash_searches); - db_stat_dl("The longest hash chain searched for a page.\n", - (u_long)gsp->st_hash_longest); - db_stat_dl("Total number of hash buckets examined for page location.\n", - (u_long)gsp->st_hash_examined); - db_stat_dl("The number of hash bucket locks granted without waiting.\n", - (u_long)gsp->st_hash_nowait); - db_stat_dl("The number of hash bucket locks granted after waiting.\n", - (u_long)gsp->st_hash_wait); - db_stat_dl("The maximum number of times any hash bucket lock was waited for.\n", - (u_long)gsp->st_hash_max_wait); - db_stat_dl("The number of region locks granted without waiting.\n", - (u_long)gsp->st_region_nowait); - db_stat_dl("The number of region locks granted after waiting.\n", - (u_long)gsp->st_region_wait); - db_stat_dl("The number of page allocations.\n", - (u_long)gsp->st_alloc); - db_stat_dl("The number of hash buckets examined during allocations\n", - (u_long)gsp->st_alloc_buckets); - db_stat_dl("The max number of hash buckets examined for an allocation\n", - (u_long)gsp->st_alloc_max_buckets); - db_stat_dl("The number of pages examined during allocations\n", - (u_long)gsp->st_alloc_pages); - db_stat_dl("The max number of pages examined for an allocation\n", - (u_long)gsp->st_alloc_max_pages); - - for (; fsp != NULL && *fsp != NULL; ++fsp) { - printf("%s\n", DB_LINE); - printf("Pool File: %s\n", (*fsp)->file_name); - db_stat_dl("Page size.\n", (u_long)(*fsp)->st_pagesize); - db_stat_dl("Requested pages mapped into the process' address space.\n", - (u_long)(*fsp)->st_map); - db_stat_dl("Requested pages found in the cache", - (u_long)(*fsp)->st_cache_hit); - if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0) - printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit / - ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) * - 100); - printf(".\n"); - db_stat_dl("Requested pages not found in the cache.\n", - (u_long)(*fsp)->st_cache_miss); - db_stat_dl("Pages created in the cache.\n", - (u_long)(*fsp)->st_page_create); - db_stat_dl("Pages read into the cache.\n", - (u_long)(*fsp)->st_page_in); - db_stat_dl("Pages written from the cache to the backing file.\n", - (u_long)(*fsp)->st_page_out); - } - - free(gsp); - - return (0); -} - -/* - * rep_stats -- - * Display replication statistics. - */ -int -db_stat_rep_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - DB_REP_STAT *sp; - int is_client, ret; - const char *p; - - if ((ret = dbenv->rep_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - is_client = 0; - switch (sp->st_status) { - case DB_REP_MASTER: - printf("Environment configured as a replication master.\n"); - break; - case DB_REP_CLIENT: - printf("Environment configured as a replication client.\n"); - is_client = 1; - break; - case DB_REP_LOGSONLY: - printf("Environment configured as a logs-only replica.\n"); - is_client = 1; - break; - default: - printf("Environment not configured for replication.\n"); - break; - } - - printf("%lu/%lu\t%s\n", - (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset, - is_client ? "Next LSN expected." : "Next LSN to be used."); - p = sp->st_waiting_lsn.file == 0 ? - "Not waiting for any missed log records." : - "LSN of first log record we have after missed log records."; - printf("%lu/%lu\t%s\n", - (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset, - p); - - db_stat_dl("Number of duplicate master conditions detected.\n", - (u_long)sp->st_dupmasters); - if (sp->st_env_id != DB_EID_INVALID) - db_stat_dl("Current environment ID.\n", (u_long)sp->st_env_id); - else - printf("No current environment ID.\n"); - db_stat_dl("Current environment priority.\n", (u_long)sp->st_env_priority); - db_stat_dl("Current generation number.\n", (u_long)sp->st_gen); - db_stat_dl("Number of duplicate log records received.\n", - (u_long)sp->st_log_duplicated); - db_stat_dl("Number of log records currently queued.\n", - (u_long)sp->st_log_queued); - db_stat_dl("Maximum number of log records ever queued at once.\n", - (u_long)sp->st_log_queued_max); - db_stat_dl("Total number of log records queued.\n", - (u_long)sp->st_log_queued_total); - db_stat_dl("Number of log records received and appended to the log.\n", - (u_long)sp->st_log_records); - db_stat_dl("Number of log records missed and requested.\n", - (u_long)sp->st_log_requested); - if (sp->st_master != DB_EID_INVALID) - db_stat_dl("Current master ID.\n", (u_long)sp->st_master); - else - printf("No current master ID.\n"); - db_stat_dl("Number of times the master has changed.\n", - (u_long)sp->st_master_changes); - db_stat_dl("Number of messages received with a bad generation number.\n", - (u_long)sp->st_msgs_badgen); - db_stat_dl("Number of messages received and processed.\n", - (u_long)sp->st_msgs_processed); - db_stat_dl("Number of messages ignored due to pending recovery.\n", - (u_long)sp->st_msgs_recover); - db_stat_dl("Number of failed message sends.\n", - (u_long)sp->st_msgs_send_failures); - db_stat_dl("Number of messages sent.\n", (u_long)sp->st_msgs_sent); - db_stat_dl("Number of new site messages received.\n", (u_long)sp->st_newsites); - db_stat_dl("Number of environments believed to be in the replication group.\n", - (u_long)sp->st_nsites); - db_stat_dl("Transmission limited.\n", (u_long)sp->st_nthrottles); - db_stat_dl("Number of outdated conditions detected.\n", - (u_long)sp->st_outdated); - db_stat_dl("Number of transactions applied.\n", (u_long)sp->st_txns_applied); - - db_stat_dl("Number of elections held.\n", (u_long)sp->st_elections); - db_stat_dl("Number of elections won.\n", (u_long)sp->st_elections_won); - - if (sp->st_election_status == 0) - printf("No election in progress.\n"); - else { - db_stat_dl("Current election phase.\n", (u_long)sp->st_election_status); - db_stat_dl("Election winner.\n", - (u_long)sp->st_election_cur_winner); - db_stat_dl("Election generation number.\n", - (u_long)sp->st_election_gen); - printf("%lu/%lu\tMaximum LSN of election winner.\n", - (u_long)sp->st_election_lsn.file, - (u_long)sp->st_election_lsn.offset); - db_stat_dl("Number of sites expected to participate in elections.\n", - (u_long)sp->st_election_nsites); - db_stat_dl("Election priority.\n", (u_long)sp->st_election_priority); - db_stat_dl("Election tiebreaker value.\n", - (u_long)sp->st_election_tiebreaker); - db_stat_dl("Votes received this election round.\n", - (u_long)sp->st_election_votes); - } - - free(sp); - - return (0); -} - -/* - * txn_stats -- - * Display transaction statistics. - */ -int -db_stat_txn_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - DB_TXN_STAT *sp; - u_int32_t i; - int ret; - const char *p; - - if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - p = sp->st_last_ckp.file == 0 ? - "No checkpoint LSN." : "File/offset for last checkpoint LSN."; - printf("%lu/%lu\t%s\n", - (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p); - if (sp->st_time_ckp == 0) - printf("0\tNo checkpoint timestamp.\n"); - else - printf("%.24s\tCheckpoint timestamp.\n", - ctime(&sp->st_time_ckp)); - printf("%lx\tLast transaction ID allocated.\n", - (u_long)sp->st_last_txnid); - db_stat_dl("Maximum number of active transactions configured.\n", - (u_long)sp->st_maxtxns); - db_stat_dl("Active transactions.\n", (u_long)sp->st_nactive); - db_stat_dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive); - db_stat_dl("Number of transactions begun.\n", (u_long)sp->st_nbegins); - db_stat_dl("Number of transactions aborted.\n", (u_long)sp->st_naborts); - db_stat_dl("Number of transactions committed.\n", (u_long)sp->st_ncommits); - db_stat_dl("Number of transactions restored.\n", (u_long)sp->st_nrestores); - - db_stat_dl_bytes("Transaction region size", - (u_long)0, (u_long)0, (u_long)sp->st_regsize); - db_stat_dl("The number of region locks granted after waiting.\n", - (u_long)sp->st_region_wait); - db_stat_dl("The number of region locks granted without waiting.\n", - (u_long)sp->st_region_nowait); - - qsort(sp->st_txnarray, - sp->st_nactive, sizeof(sp->st_txnarray[0]), db_stat_txn_compare); - for (i = 0; i < sp->st_nactive; ++i) { - printf("\tid: %lx; begin LSN: file/offset %lu/%lu", - (u_long)sp->st_txnarray[i].txnid, - (u_long)sp->st_txnarray[i].lsn.file, - (u_long)sp->st_txnarray[i].lsn.offset); - if (sp->st_txnarray[i].parentid != 0) - printf("; parent: %lx", - (u_long)sp->st_txnarray[i].parentid); - if (sp->st_txnarray[i].xa_status != 0) - db_stat_txn_xid_stats(&sp->st_txnarray[i]); - printf("\n"); - } - - free(sp); - - return (0); -} - -void -db_stat_txn_xid_stats(txnp) - DB_TXN_ACTIVE *txnp; -{ - u_int32_t v; - u_int i; - int cnt; - - printf("\n\tXA: "); - switch (txnp->xa_status) { - case TXN_XA_ABORTED: - printf("ABORTED"); - break; - case TXN_XA_DEADLOCKED: - printf("DEADLOCKED"); - break; - case TXN_XA_ENDED: - printf("ENDED"); - break; - case TXN_XA_PREPARED: - printf("PREPARED"); - break; - case TXN_XA_STARTED: - printf("STARTED"); - break; - case TXN_XA_SUSPENDED: - printf("SUSPENDED"); - break; - default: - printf("unknown state: %lu", (u_long)txnp->xa_status); - break; - } - printf("; XID:\n\t\t"); - for (i = 0, cnt = 0; i < DB_XIDDATASIZE; i += sizeof(u_int32_t)) { - memcpy(&v, &txnp->xid[i], sizeof(u_int32_t)); - printf("0x%x ", v); - if (++cnt == 4) { - printf("\n\t\t"); - cnt = 0; - } - } -} - -int -db_stat_txn_compare(a1, b1) - const void *a1, *b1; -{ - const DB_TXN_ACTIVE *a, *b; - - a = a1; - b = b1; - - if (a->txnid > b->txnid) - return (1); - if (a->txnid < b->txnid) - return (-1); - return (0); -} - -/* - * dl -- - * Display a big value. - */ -void -db_stat_dl(msg, value) - const char *msg; - u_long value; -{ - /* - * Two formats: if less than 10 million, display as the number, if - * greater than 10 million display as ###M. - */ - if (value < 10000000) - printf("%lu\t%s", value, msg); - else - printf("%luM\t%s", value / 1000000, msg); -} - -/* - * dl_bytes -- - * Display a big number of bytes. - */ -void -db_stat_dl_bytes(msg, gbytes, mbytes, bytes) - const char *msg; - u_long gbytes, mbytes, bytes; -{ - const char *sep; - - /* Normalize the values. */ - while (bytes >= MEGABYTE) { - ++mbytes; - bytes -= MEGABYTE; - } - while (mbytes >= GIGABYTE / MEGABYTE) { - ++gbytes; - mbytes -= GIGABYTE / MEGABYTE; - } - - sep = ""; - if (gbytes > 0) { - printf("%luGB", gbytes); - sep = " "; - } - if (mbytes > 0) { - printf("%s%luMB", sep, mbytes); - sep = " "; - } - if (bytes >= 1024) { - printf("%s%luKB", sep, bytes / 1024); - bytes %= 1024; - sep = " "; - } - if (bytes > 0) - printf("%s%luB", sep, bytes); - - printf("\t%s.\n", msg); -} - -/* - * prflags -- - * Print out flag values. - */ -void -db_stat_prflags(dbp, flags, fnp) - DB *dbp; - u_int32_t flags; - const FN *fnp; -{ - const char *sep; - int lorder; - - sep = "\t"; - printf("Flags:"); - for (; fnp->mask != 0; ++fnp) - if (fnp->mask & flags) { - printf("%s%s", sep, fnp->name); - sep = ", "; - } - - (void)dbp->get_lorder(dbp, &lorder); - switch (lorder) { - case 1234: - printf("%s%s", sep, "little-endian"); - break; - case 4321: - printf("%s%s", sep, "big-endian"); - break; - default: - printf("%s%s", sep, "UNKNOWN-LORDER"); - break; - } - printf("\n"); -} - /* * db_init -- * Initialize the environment. @@ -1275,6 +430,8 @@ db_stat_db_init(dbenv, home, ttype, cache, is_private) if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0) return (0); + if (ret == DB_VERSION_MISMATCH) + goto err; if (ttype != T_DB && ttype != T_LOG) { dbenv->err(dbenv, ret, "DB_ENV->open%s%s", home == NULL ? "" : ": ", home == NULL ? "" : home); @@ -1307,31 +464,18 @@ db_stat_db_init(dbenv, home, ttype, cache, is_private) return (0); /* An environment is required. */ - dbenv->err(dbenv, ret, "open"); - return (1); -} - -/* - * argcheck -- - * Return if argument flags are okay. - */ -int -db_stat_argcheck(arg, ok_args) - char *arg; - const char *ok_args; -{ - for (; *arg != '\0'; ++arg) - if (strchr(ok_args, *arg) == NULL) - return (0); +err: dbenv->err(dbenv, ret, "DB_ENV->open"); return (1); } int db_stat_usage() { - fprintf(stderr, "%s\n\t%s\n", - "usage: db_stat [-celmNrtVZ] [-C Aclmop]", - "[-d file [-f] [-s database]] [-h home] [-M Ahlm] [-P password]"); + fprintf(stderr, "usage: db_stat %s\n", + "-d file [-fN] [-h home] [-P password] [-s database]"); + fprintf(stderr, "usage: db_stat %s\n\t%s\n", + "[-cEelmNrtVZ] [-C Aclop]", + "[-h home] [-L A] [-M A] [-P password] [-R A]"); return (EXIT_FAILURE); } diff --git a/db/build_vxworks/db_upgrade/db_upgrade.c b/db/build_vxworks/db_upgrade/db_upgrade.c index 983f8b789..cfcb1cffc 100644 --- a/db/build_vxworks/db_upgrade/db_upgrade.c +++ b/db/build_vxworks/db_upgrade/db_upgrade.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_upgrade.c,v 1.37 2004/06/10 01:00:09 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_upgrade.c,v 1.35 2003/08/13 19:57:09 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -137,9 +137,11 @@ db_upgrade_main(argc, argv) */ if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, + 0)) != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } diff --git a/db/build_vxworks/db_verify/db_verify.c b/db/build_vxworks/db_verify/db_verify.c index f685cce1d..f00e658b2 100644 --- a/db/build_vxworks/db_verify/db_verify.c +++ b/db/build_vxworks/db_verify/db_verify.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_verify.c,v 1.49 2004/08/01 00:21:58 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_verify.c,v 1.45 2003/08/13 19:57:09 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -53,8 +53,8 @@ db_verify_main(argc, argv) const char *progname = "db_verify"; DB *dbp, *dbp1; DB_ENV *dbenv; - u_int32_t cache; - int ch, exitval, nflag, oflag, private; + u_int32_t flags, cache; + int ch, exitval, nflag, private; int quiet, resize, ret; char *home, *passwd; @@ -64,10 +64,11 @@ db_verify_main(argc, argv) dbenv = NULL; dbp = NULL; cache = MEGABYTE; - exitval = nflag = oflag = quiet = 0; + exitval = nflag = quiet = 0; + flags = 0; home = passwd = NULL; __db_getopt_reset = 1; - while ((ch = getopt(argc, argv, "h:NoP:qV")) != EOF) + while ((ch = getopt(argc, argv, "h:NoP:quV")) != EOF) switch (ch) { case 'h': home = optarg; @@ -85,11 +86,14 @@ db_verify_main(argc, argv) } break; case 'o': - oflag = 1; + LF_SET(DB_NOORDERCHK); break; case 'q': quiet = 1; break; + case 'u': /* Undocumented. */ + LF_SET(DB_UNREF); + break; case 'V': printf("%s\n", db_version(NULL, NULL, NULL)); return (EXIT_SUCCESS); @@ -145,14 +149,18 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { private = 0; if ((ret = dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) { - if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) { - dbenv->err(dbenv, ret, "set_cachesize"); - goto shutdown; + if (ret != DB_VERSION_MISMATCH) { + if ((ret = + dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) { + dbenv->err(dbenv, ret, "set_cachesize"); + goto shutdown; + } + private = 1; + ret = dbenv->open(dbenv, home, DB_CREATE | + DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0); } - private = 1; - if ((ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + if (ret != 0) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } } @@ -166,6 +174,10 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { /* * We create a 2nd dbp to this database to get its pagesize * because the dbp we're using for verify cannot be opened. + * + * If the database is corrupted, we may not be able to open + * it, of course. In that case, just continue, using the + * cache size we have. */ if (private) { if ((ret = db_create(&dbp1, dbenv, 0)) != 0) { @@ -174,12 +186,9 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { goto shutdown; } - if ((ret = dbp1->open(dbp1, NULL, - argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbenv->err(dbenv, ret, "DB->open: %s", argv[0]); - (void)dbp1->close(dbp1, 0); - goto shutdown; - } + ret = dbp1->open(dbp1, + NULL, argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0); + /* * If we get here, we can check the cache/page. * !!! @@ -188,29 +197,26 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { * will still be working on the same argv when we * get back into the for-loop. */ - ret = __db_util_cache(dbenv, dbp1, &cache, &resize); - (void)dbp1->close(dbp1, 0); - if (ret != 0) - goto shutdown; - - if (resize) { - (void)dbp->close(dbp, 0); - dbp = NULL; - - (void)dbenv->close(dbenv, 0); - dbenv = NULL; - goto retry; + if (ret == 0) { + if (__db_util_cache( + dbp1, &cache, &resize) == 0 && resize) { + (void)dbp1->close(dbp1, 0); + (void)dbp->close(dbp, 0); + dbp = NULL; + + (void)dbenv->close(dbenv, 0); + dbenv = NULL; + goto retry; + } } + (void)dbp1->close(dbp1, 0); } /* The verify method is a destructor. */ - ret = dbp->verify(dbp, - argv[0], NULL, NULL, oflag ? DB_NOORDERCHK : 0); + ret = dbp->verify(dbp, argv[0], NULL, NULL, flags); dbp = NULL; - if (ret != 0) { - dbenv->err(dbenv, ret, "DB->verify: %s", argv[0]); + if (ret != 0) goto shutdown; - } } if (0) { diff --git a/db/build_vxworks/dbdemo/dbdemo.c b/db/build_vxworks/dbdemo/dbdemo.c index 0bfd4d7e4..993a51a36 100644 --- a/db/build_vxworks/dbdemo/dbdemo.c +++ b/db/build_vxworks/dbdemo/dbdemo.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_access.c,v 11.23 2003/01/08 04:43:53 bostic Exp $ + * $Id: ex_access.c,v 11.25 2004/09/17 22:00:28 mjc Exp $ */ #include @@ -49,7 +49,7 @@ dbdemo_main(argc, argv) DB *dbp; DBC *dbcp; DBT key, data; - u_int32_t len; + size_t len; int ch, ret, rflag; char *database, *p, *t, buf[1024], rbuf[1024]; const char *progname = "dbdemo"; /* Program name. */ @@ -118,7 +118,7 @@ dbdemo_main(argc, argv) key.data = buf; data.data = rbuf; - data.size = key.size = len - 1; + data.size = key.size = (u_int32_t)len - 1; switch (ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) { diff --git a/db/build_win32/db.h b/db/build_win32/db.h index 380f158ba..c4cb5d212 100644 --- a/db/build_win32/db.h +++ b/db/build_win32/db.h @@ -2,10 +2,10 @@ /* * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db.in,v 11.389 2003/10/01 21:33:58 sue Exp $ + * $Id: db.in,v 11.463 2004/10/11 18:47:50 bostic Exp $ * * db.h include file layout: * General. @@ -24,8 +24,7 @@ #ifndef __NO_SYSTEM_INCLUDES #include - -/* does not include on some systems. */ +#include #include #endif @@ -41,9 +40,9 @@ extern "C" { * Berkeley DB version information. */ #define DB_VERSION_MAJOR 4 -#define DB_VERSION_MINOR 2 -#define DB_VERSION_PATCH 52 -#define DB_VERSION_STRING "Sleepycat Software: Berkeley DB 4.2.52: (December 3, 2003)" +#define DB_VERSION_MINOR 3 +#define DB_VERSION_PATCH 14 +#define DB_VERSION_STRING "Sleepycat Software: Berkeley DB 4.3.14: (October 14, 2004)" /* * !!! @@ -65,20 +64,52 @@ typedef short int16_t; typedef unsigned short u_int16_t; typedef int int32_t; typedef unsigned int u_int32_t; +typedef __int64 int64_t; +typedef unsigned __int64 u_int64_t; #endif -#if !defined(_WINSOCKAPI_) +#ifndef _WINSOCKAPI_ typedef unsigned char u_char; typedef unsigned short u_short; typedef unsigned int u_int; typedef unsigned long u_long; #endif -#if defined(_WIN64) -typedef __int64 ssize_t; +#ifdef _WIN64 +typedef int64_t ssize_t; +#else +typedef int32_t ssize_t; +#endif + +/* + * uintmax_t -- + * Largest unsigned type, used to align structures in memory. We don't store + * floating point types in structures, so integral types should be sufficient + * (and we don't have to worry about systems that store floats in other than + * power-of-2 numbers of bytes). Additionally this fixes compilers that rewrite + * structure assignments and ANSI C memcpy calls to be in-line instructions + * that happen to require alignment. Note: this alignment isn't sufficient for + * mutexes, which depend on things like cache line alignment. Mutex alignment + * is handled separately, in mutex.h. + * + * uintptr_t -- + * Unsigned type that's the same size as a pointer. There are places where + * DB modifies pointers by discarding the bottom bits to guarantee alignment. + * We can't use uintmax_t, it may be larger than the pointer, and compilers + * get upset about that. So far we haven't run on any machine where there's + * no unsigned type the same size as a pointer -- here's hoping. + */ +typedef u_int64_t uintmax_t; +#ifdef _WIN64 +typedef u_int64_t uintptr_t; #else -typedef int ssize_t; +typedef u_int32_t uintptr_t; #endif +/* + * Sequences are only available on machines with 64-bit integral types. + */ +typedef int64_t db_seq_t; + /* Basic types that are exported or quasi-exported. */ typedef u_int32_t db_pgno_t; /* Page number type. */ typedef u_int16_t db_indx_t; /* Page offset type. */ @@ -90,11 +121,13 @@ typedef u_int32_t db_recno_t; /* Record number type. */ typedef u_int32_t db_timeout_t; /* Type of a timeout. */ /* - * Region offsets are currently limited to 32-bits. I expect that's going - * to have to be fixed in the not-too-distant future, since we won't want to - * split 100Gb memory pools into that many different regions. + * Region offsets are the difference between a pointer in a region and the + * region's base address. With private environments, both addresses are the + * result of calling malloc, and we can't assume anything about what malloc + * will return, so region offsets have to be able to hold differences between + * arbitrary pointers. */ -typedef u_int32_t roff_t; +typedef uintptr_t roff_t; /* * Forward structure declarations, so we can declare pointers and @@ -121,6 +154,9 @@ struct __db_preplist; typedef struct __db_preplist DB_PREPLIST; struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT; struct __db_rep; typedef struct __db_rep DB_REP; struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT; +struct __db_sequence; typedef struct __db_sequence DB_SEQUENCE; +struct __db_seq_record; typedef struct __db_seq_record DB_SEQ_RECORD; +struct __db_seq_stat; typedef struct __db_seq_stat DB_SEQUENCE_STAT; struct __db_txn; typedef struct __db_txn DB_TXN; struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE; struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT; @@ -181,6 +217,8 @@ struct __db_dbt { * DB_AUTO_COMMIT: * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open, * DB->remove, DB->rename, DB->truncate + * DB_DEGREE_2: + * DB->cursor, DB->get, DB->join, DBcursor->c_get, DB_ENV->txn_begin * DB_DIRTY_READ: * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get, * DB_ENV->txn_begin @@ -189,13 +227,14 @@ struct __db_dbt { * DB->remove, DB->rename, DB->truncate * * !!! - * The DB_DIRTY_READ bit mask can't be changed without also changing the - * masks for the flags that can be OR'd into DB access method and cursor - * operation values. + * The DB_DIRTY_READ and DB_DEGREE_2 bit masks can't be changed without + * also changing the masks for the flags that can be OR'd into DB + * access method and cursor operation values. */ -#define DB_AUTO_COMMIT 0x1000000 /* Implied transaction. */ -#define DB_DIRTY_READ 0x2000000 /* Dirty Read. */ -#define DB_NO_AUTO_COMMIT 0x4000000 /* Override env-wide AUTO-COMMIT. */ +#define DB_AUTO_COMMIT 0x01000000/* Implied transaction. */ +#define DB_DEGREE_2 0x02000000/* Degree 2. */ +#define DB_DIRTY_READ 0x04000000/* Dirty Read. */ +#define DB_NO_AUTO_COMMIT 0x08000000/* Override env-wide AUTOCOMMIT. */ /* * Flags private to db_env_create. @@ -244,19 +283,25 @@ struct __db_dbt { /* * Flags private to DB_ENV->set_flags. - * Shared flags up to 0x0000800 */ -#define DB_CDB_ALLDB 0x0001000 /* Set CDB locking per environment. */ -#define DB_DIRECT_DB 0x0002000 /* Don't buffer databases in the OS. */ -#define DB_DIRECT_LOG 0x0004000 /* Don't buffer log files in the OS. */ -#define DB_LOG_AUTOREMOVE 0x0008000 /* Automatically remove log files. */ -#define DB_NOLOCKING 0x0010000 /* Set locking/mutex behavior. */ -#define DB_NOPANIC 0x0020000 /* Set panic state per DB_ENV. */ -#define DB_OVERWRITE 0x0040000 /* Overwrite unlinked region files. */ -#define DB_PANIC_ENVIRONMENT 0x0080000 /* Set panic state per environment. */ -#define DB_REGION_INIT 0x0100000 /* Page-fault regions on open. */ -#define DB_TIME_NOTGRANTED 0x0200000 /* Return NOTGRANTED on timeout. */ -#define DB_TXN_WRITE_NOSYNC 0x0400000 /* Write, don't sync, on txn commit. */ -#define DB_YIELDCPU 0x0800000 /* Yield the CPU (a lot). */ + * Shared flags up to 0x00000800 */ +#define DB_CDB_ALLDB 0x00001000/* Set CDB locking per environment. */ +#define DB_DIRECT_DB 0x00002000/* Don't buffer databases in the OS. */ +#define DB_DIRECT_LOG 0x00004000/* Don't buffer log files in the OS. */ +#define DB_DSYNC_LOG 0x00008000/* Set O_DSYNC on the log. */ +#define DB_LOG_AUTOREMOVE 0x00010000/* Automatically remove log files. */ +#define DB_LOG_INMEMORY 0x00020000/* Store logs in buffers in memory. */ +#define DB_NOLOCKING 0x00040000/* Set locking/mutex behavior. */ +#define DB_NOPANIC 0x00080000/* Set panic state per DB_ENV. */ +#define DB_OVERWRITE 0x00100000/* Overwrite unlinked region files. */ +#define DB_PANIC_ENVIRONMENT 0x00200000/* Set panic state per environment. */ +#define DB_REGION_INIT 0x00400000/* Page-fault regions on open. */ +#define DB_TIME_NOTGRANTED 0x00800000/* Return NOTGRANTED on timeout. */ +/* Shared flags at 0x01000000 */ +/* Shared flags at 0x02000000 */ +/* Shared flags at 0x04000000 */ +/* Shared flags at 0x08000000 */ +#define DB_TXN_WRITE_NOSYNC 0x10000000/* Write, don't sync, on txn commit. */ +#define DB_YIELDCPU 0x20000000/* Yield the CPU (a lot). */ /* * Flags private to DB->set_feedback's callback. @@ -268,8 +313,9 @@ struct __db_dbt { * Flags private to DB_MPOOLFILE->open. * Shared flags up to 0x0000800 */ #define DB_DIRECT 0x0001000 /* Don't buffer the file in the OS. */ -#define DB_EXTENT 0x0002000 /* UNDOC: dealing with an extent. */ -#define DB_ODDFILESIZE 0x0004000 /* Truncate file to N * pgsize. */ +#define DB_DURABLE_UNKNOWN 0x0002000 /* internal: durability on open. */ +#define DB_EXTENT 0x0004000 /* internal: dealing with an extent. */ +#define DB_ODDFILESIZE 0x0008000 /* Truncate file to N * pgsize. */ /* * Flags private to DB->set_flags. @@ -278,15 +324,23 @@ struct __db_dbt { #define DB_DUP 0x0000002 /* Btree, Hash: duplicate keys. */ #define DB_DUPSORT 0x0000004 /* Btree, Hash: duplicate keys. */ #define DB_ENCRYPT 0x0000008 /* Btree, Hash: duplicate keys. */ -#define DB_RECNUM 0x0000010 /* Btree: record numbers. */ -#define DB_RENUMBER 0x0000020 /* Recno: renumber on insert/delete. */ -#define DB_REVSPLITOFF 0x0000040 /* Btree: turn off reverse splits. */ -#define DB_SNAPSHOT 0x0000080 /* Recno: snapshot the input. */ +#define DB_INORDER 0x0000010 /* Queue: strict ordering on consume. */ +#define DB_RECNUM 0x0000020 /* Btree: record numbers. */ +#define DB_RENUMBER 0x0000040 /* Recno: renumber on insert/delete. */ +#define DB_REVSPLITOFF 0x0000080 /* Btree: turn off reverse splits. */ +#define DB_SNAPSHOT 0x0000100 /* Recno: snapshot the input. */ /* - * Flags private to the DB->stat methods. + * Flags private to the DB_ENV->stat_print, DB->stat and DB->stat_print methods. */ -#define DB_STAT_CLEAR 0x0000001 /* Clear stat after returning values. */ +#define DB_STAT_ALL 0x0000001 /* Print: Everything. */ +#define DB_STAT_CLEAR 0x0000002 /* Clear stat after returning values. */ +#define DB_STAT_LOCK_CONF 0x0000004 /* Print: Lock conflict matrix. */ +#define DB_STAT_LOCK_LOCKERS 0x0000008 /* Print: Lockers. */ +#define DB_STAT_LOCK_OBJECTS 0x0000010 /* Print: Lock objects. */ +#define DB_STAT_LOCK_PARAMS 0x0000020 /* Print: Lock parameters. */ +#define DB_STAT_MEMP_HASH 0x0000040 /* Print: Mpool hash buckets. */ +#define DB_STAT_SUBSYSTEM 0x0000080 /* Print: Subsystems too. */ /* * Flags private to DB->join. @@ -303,6 +357,7 @@ struct __db_dbt { #define DB_PR_RECOVERYTEST 0x0000010 /* Recovery test (-dr). */ #define DB_PRINTABLE 0x0000020 /* Use printable format for salvage. */ #define DB_SALVAGE 0x0000040 /* Salvage what looks like data. */ +#define DB_UNREF 0x0000080 /* Report unreferenced pages. */ /* * !!! * These must not go over 0x8000, or they will collide with the flags @@ -329,20 +384,22 @@ struct __db_dbt { #define DB_LOCK_NORUN 0 #define DB_LOCK_DEFAULT 1 /* Default policy. */ #define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */ -#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */ -#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */ -#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */ -#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */ -#define DB_LOCK_RANDOM 7 /* Abort random transaction. */ -#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */ +#define DB_LOCK_MAXLOCKS 3 /* Select locker with max locks. */ +#define DB_LOCK_MAXWRITE 4 /* Select locker with max writelocks. */ +#define DB_LOCK_MINLOCKS 5 /* Select locker with min locks. */ +#define DB_LOCK_MINWRITE 6 /* Select locker with min writelocks. */ +#define DB_LOCK_OLDEST 7 /* Select oldest locker. */ +#define DB_LOCK_RANDOM 8 /* Select random locker. */ +#define DB_LOCK_YOUNGEST 9 /* Select youngest locker. */ /* Flag values for lock_vec(), lock_get(). */ -#define DB_LOCK_NOWAIT 0x001 /* Don't wait on unavailable lock. */ -#define DB_LOCK_RECORD 0x002 /* Internal: record lock. */ -#define DB_LOCK_REMOVE 0x004 /* Internal: flag object removed. */ -#define DB_LOCK_SET_TIMEOUT 0x008 /* Internal: set lock timeout. */ -#define DB_LOCK_SWITCH 0x010 /* Internal: switch existing lock. */ -#define DB_LOCK_UPGRADE 0x020 /* Internal: upgrade existing lock. */ +#define DB_LOCK_ABORT 0x001 /* Internal: Lock during abort. */ +#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */ +#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */ +#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */ +#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */ +#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */ +#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */ /* * Simple R/W lock modes and for multi-granularity intention locking. @@ -386,16 +443,15 @@ typedef enum { */ typedef enum { DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */ - DB_LSTAT_ERR=2, /* Lock is bad. */ - DB_LSTAT_EXPIRED=3, /* Lock has expired. */ - DB_LSTAT_FREE=4, /* Lock is unallocated. */ - DB_LSTAT_HELD=5, /* Lock is currently held. */ - DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting + DB_LSTAT_EXPIRED=2, /* Lock has expired. */ + DB_LSTAT_FREE=3, /* Lock is unallocated. */ + DB_LSTAT_HELD=4, /* Lock is currently held. */ + DB_LSTAT_NOTEXIST=5, /* Object on which lock was waiting * was removed */ - DB_LSTAT_PENDING=7, /* Lock was waiting and has been + DB_LSTAT_PENDING=6, /* Lock was waiting and has been * promoted; waiting for the owner * to run and upgrade it to held. */ - DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */ + DB_LSTAT_WAITING=7 /* Lock is on the wait queue. */ }db_status_t; /* Lock statistics structure. */ @@ -405,7 +461,7 @@ struct __db_lock_stat { u_int32_t st_maxlocks; /* Maximum number of locks in table. */ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */ u_int32_t st_maxobjects; /* Maximum num of objects in table. */ - u_int32_t st_nmodes; /* Number of lock modes. */ + int st_nmodes; /* Number of lock modes. */ u_int32_t st_nlocks; /* Current number of locks. */ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */ u_int32_t st_nlockers; /* Current number of lockers. */ @@ -424,7 +480,7 @@ struct __db_lock_stat { u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */ u_int32_t st_region_wait; /* Region lock granted after wait. */ u_int32_t st_region_nowait; /* Region lock granted without wait. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ }; /* @@ -446,7 +502,7 @@ struct __db_ilock { * lock_get request (or a lock_vec/DB_LOCK_GET). */ struct __db_lock_u { - size_t off; /* Offset of the lock in the region */ + roff_t off; /* Offset of the lock in the region */ u_int32_t ndx; /* Index of the object referenced by * this lock; used for locking. */ u_int32_t gen; /* Generation number of this lock. */ @@ -465,8 +521,8 @@ struct __db_lockreq { /******************************************************* * Logging. *******************************************************/ -#define DB_LOGVERSION 8 /* Current log version. */ -#define DB_LOGOLDVER 8 /* Oldest log version supported. */ +#define DB_LOGVERSION 10 /* Current log version. */ +#define DB_LOGOLDVER 10 /* Oldest log version supported. */ #define DB_LOGMAGIC 0x040988 /* Flag values for DB_ENV->log_archive(). */ @@ -482,7 +538,8 @@ struct __db_lockreq { #define DB_LOG_NOCOPY 0x008 /* Don't copy data */ #define DB_LOG_NOT_DURABLE 0x010 /* Do not log; keep in memory */ #define DB_LOG_PERM 0x020 /* Flag record with REP_PERMANENT */ -#define DB_LOG_WRNOSYNC 0x040 /* Write, don't sync log_put */ +#define DB_LOG_RESEND 0x040 /* Resent log record */ +#define DB_LOG_WRNOSYNC 0x080 /* Write, don't sync log_put */ /* * A DB_LSN has two parts, a fileid which identifies a specific file, and an @@ -546,7 +603,7 @@ struct __db_log_cursor { struct __db_log_stat { u_int32_t st_magic; /* Log file magic number. */ u_int32_t st_version; /* Log file version number. */ - int st_mode; /* Log file mode. */ + int st_mode; /* Log file mode. */ u_int32_t st_lg_bsize; /* Log buffer size. */ u_int32_t st_lg_size; /* Log file size. */ u_int32_t st_w_bytes; /* Bytes to log. */ @@ -562,11 +619,19 @@ struct __db_log_stat { u_int32_t st_cur_offset; /* Current log file offset. */ u_int32_t st_disk_file; /* Known on disk log file number. */ u_int32_t st_disk_offset; /* Known on disk log file offset. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */ }; +/* + * We need to record the first log record of a transaction. + * For user defined logging this macro returns the place to + * put that information, if it is need in rlsnp, otherwise it + * leaves it unchanged. + */ +#define DB_SET_BEGIN_LSNP(txn, rlsnp) ((txn)->set_begin_lsnp(txn, rlsnp)) + /******************************************************* * Shared buffer cache (mpool). *******************************************************/ @@ -579,6 +644,7 @@ struct __db_log_stat { #define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */ #define DB_MPOOL_DIRTY 0x002 /* Page is modified. */ #define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */ +#define DB_MPOOL_FREE 0x008 /* Free page if present. */ /* Flags values for DB_MPOOLFILE->set_flags. */ #define DB_MPOOL_NOFILE 0x001 /* Never open a backing file. */ @@ -646,7 +712,7 @@ struct __db_mpoolfile { /* Methods. */ int (*close) __P((DB_MPOOLFILE *, u_int32_t)); int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *)); - int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); + int (*open) __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t)); int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t)); int (*get_clear_len) __P((DB_MPOOLFILE *, u_int32_t *)); @@ -672,7 +738,7 @@ struct __db_mpoolfile { * thread protected because they are initialized before the file is * linked onto the per-process lists, and never modified. * - * MP_FLUSH is thread protected becase it is potentially read/set by + * MP_FLUSH is thread protected because it is potentially read/set by * multiple threads of control. */ #define MP_FILEID_SET 0x001 /* Application supplied a file ID. */ @@ -682,14 +748,16 @@ struct __db_mpoolfile { u_int32_t flags; }; -/* - * Mpool statistics structure. - */ +/* Mpool statistics structure. */ struct __db_mpool_stat { u_int32_t st_gbytes; /* Total cache size: GB. */ u_int32_t st_bytes; /* Total cache size: B. */ u_int32_t st_ncache; /* Number of caches. */ - u_int32_t st_regsize; /* Cache size. */ + roff_t st_regsize; /* Region size. */ + size_t st_mmapsize; /* Maximum file size for mmap. */ + int st_maxopenfd; /* Maximum number of open fd's. */ + int st_maxwrite; /* Maximum buffers to write. */ + int st_maxwrite_sleep; /* Sleep after writing max buffers. */ u_int32_t st_map; /* Pages from mapped files. */ u_int32_t st_cache_hit; /* Pages found in the cache. */ u_int32_t st_cache_miss; /* Pages not found in the cache. */ @@ -721,7 +789,7 @@ struct __db_mpool_stat { /* Mpool file statistics structure. */ struct __db_mpool_fstat { char *file_name; /* File name. */ - size_t st_pagesize; /* Page size. */ + u_int32_t st_pagesize; /* Page size. */ u_int32_t st_map; /* Pages from mapped files. */ u_int32_t st_cache_hit; /* Pages found in the cache. */ u_int32_t st_cache_miss; /* Pages not found in the cache. */ @@ -741,10 +809,9 @@ typedef enum { DB_TXN_BACKWARD_ALLOC=2, /* Internal. */ DB_TXN_BACKWARD_ROLL=3, /* Public. */ DB_TXN_FORWARD_ROLL=4, /* Public. */ - DB_TXN_GETPGNOS=5, /* Internal. */ - DB_TXN_OPENFILES=6, /* Internal. */ - DB_TXN_POPENFILES=7, /* Internal. */ - DB_TXN_PRINT=8 /* Public. */ + DB_TXN_OPENFILES=5, /* Internal. */ + DB_TXN_POPENFILES=6, /* Internal. */ + DB_TXN_PRINT=7 /* Public. */ } db_recops; /* @@ -823,8 +890,8 @@ struct __db_txn { struct __db_txn **tqe_prev; } klinks; - /* API-private structure: used by C++ */ - void *api_internal; + void *api_internal; /* C++ API private. */ + void *xml_internal; /* XML API private. */ u_int32_t cursors; /* Number of cursors open for txn */ @@ -834,17 +901,20 @@ struct __db_txn { int (*discard) __P((DB_TXN *, u_int32_t)); u_int32_t (*id) __P((DB_TXN *)); int (*prepare) __P((DB_TXN *, u_int8_t *)); + void (*set_begin_lsnp) __P((DB_TXN *txn, DB_LSN **)); int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t)); #define TXN_CHILDCOMMIT 0x001 /* Transaction that has committed. */ #define TXN_COMPENSATE 0x002 /* Compensating transaction. */ -#define TXN_DIRTY_READ 0x004 /* Transaction does dirty reads. */ -#define TXN_LOCKTIMEOUT 0x008 /* Transaction has a lock timeout. */ -#define TXN_MALLOC 0x010 /* Structure allocated by TXN system. */ -#define TXN_NOSYNC 0x020 /* Do not sync on prepare and commit. */ -#define TXN_NOWAIT 0x040 /* Do not wait on locks. */ -#define TXN_RESTORED 0x080 /* Transaction has been restored. */ -#define TXN_SYNC 0x100 /* Sync on prepare and commit. */ +#define TXN_DEADLOCK 0x004 /* Transaction has deadlocked. */ +#define TXN_DEGREE_2 0x008 /* Has degree 2 isolation. */ +#define TXN_DIRTY_READ 0x010 /* Transaction does dirty reads. */ +#define TXN_LOCKTIMEOUT 0x020 /* Transaction has a lock timeout. */ +#define TXN_MALLOC 0x040 /* Structure allocated by TXN system. */ +#define TXN_NOSYNC 0x080 /* Do not sync on prepare and commit. */ +#define TXN_NOWAIT 0x100 /* Do not wait on locks. */ +#define TXN_RESTORED 0x200 /* Transaction has been restored. */ +#define TXN_SYNC 0x400 /* Sync on prepare and commit. */ u_int32_t flags; }; @@ -886,7 +956,7 @@ struct __db_txn_stat { DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */ u_int32_t st_region_wait; /* Region lock granted after wait. */ u_int32_t st_region_nowait; /* Region lock granted without wait. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ }; /******************************************************* @@ -898,8 +968,7 @@ struct __db_txn_stat { /* rep_start flags values */ #define DB_REP_CLIENT 0x001 -#define DB_REP_LOGSONLY 0x002 -#define DB_REP_MASTER 0x004 +#define DB_REP_MASTER 0x002 /* Replication statistics. */ struct __db_rep_stat { @@ -915,13 +984,15 @@ struct __db_rep_stat { u_int32_t st_status; /* Current replication status. */ DB_LSN st_next_lsn; /* Next LSN to use or expect. */ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */ + db_pgno_t st_next_pg; /* Next pg we expect. */ + db_pgno_t st_waiting_pg; /* pg we're awaiting, if any. */ u_int32_t st_dupmasters; /* # of times a duplicate master condition was detected.+ */ int st_env_id; /* Current environment ID. */ int st_env_priority; /* Current environment priority. */ u_int32_t st_gen; /* Current generation number. */ - u_int32_t st_in_recovery; /* This site is in client sync-up. */ + u_int32_t st_egen; /* Current election gen number. */ u_int32_t st_log_duplicated; /* Log records received multiply.+ */ u_int32_t st_log_queued; /* Log records currently queued.+ */ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */ @@ -942,6 +1013,10 @@ struct __db_rep_stat { u_int32_t st_nthrottles; /* # of times we were throttled. */ u_int32_t st_outdated; /* # of times we detected and returned an OUTDATED condition.+ */ + u_int32_t st_pg_duplicated; /* Pages received multiply.+ */ + u_int32_t st_pg_records; /* Pages received and stored.+ */ + u_int32_t st_pg_requested; /* Pages missed and requested.+ */ + u_int32_t st_startup_complete; /* Site completed client sync-up. */ u_int32_t st_txns_applied; /* # of transactions applied.+ */ /* Elections generally. */ @@ -953,11 +1028,75 @@ struct __db_rep_stat { u_int32_t st_election_gen; /* Election generation number. */ DB_LSN st_election_lsn; /* Max. LSN of current winner. */ int st_election_nsites; /* # of "registered voters". */ + int st_election_nvotes; /* # of "registered voters" needed. */ int st_election_priority; /* Current election priority. */ int st_election_status; /* Current election status. */ - int st_election_tiebreaker; /* Election tiebreaker value. */ + u_int32_t st_election_tiebreaker;/* Election tiebreaker value. */ int st_election_votes; /* Votes received in this round. */ }; +/* + * The storage record for a sequence. + */ +struct __db_seq_record { + u_int32_t seq_version; /* Version size/number. */ +#define DB_SEQ_DEC 0x00000001 /* Decrement sequence. */ +#define DB_SEQ_INC 0x00000002 /* Increment sequence. */ +#define DB_SEQ_RANGE_SET 0x00000004 /* Range set (internal). */ +#define DB_SEQ_WRAP 0x00000008 /* Wrap sequence at min/max. */ + u_int32_t flags; /* Flags. */ + db_seq_t seq_value; /* Current value. */ + db_seq_t seq_max; /* Max permitted. */ + db_seq_t seq_min; /* Min permitted. */ +}; + +/* + * Handle for a sequence object. + */ +struct __db_sequence { + DB *seq_dbp; /* DB handle for this sequence. */ + DB_MUTEX *seq_mutexp; /* Mutex if sequence is threaded. */ + DB_SEQ_RECORD *seq_rp; /* Pointer to current data. */ + DB_SEQ_RECORD seq_record; /* Data from DB_SEQUENCE. */ + int32_t seq_cache_size; /* Number of values cached. */ + db_seq_t seq_last_value; /* Last value cached. */ + DBT seq_key; /* DBT pointing to sequence key. */ + DBT seq_data; /* DBT pointing to seq_record. */ + + /* API-private structure: used by C++ and Java. */ + void *api_internal; + + int (*close) __P((DB_SEQUENCE *, u_int32_t)); + int (*get) __P((DB_SEQUENCE *, + DB_TXN *, int32_t, db_seq_t *, u_int32_t)); + int (*get_cachesize) __P((DB_SEQUENCE *, int32_t *)); + int (*get_db) __P((DB_SEQUENCE *, DB **)); + int (*get_flags) __P((DB_SEQUENCE *, u_int32_t *)); + int (*get_key) __P((DB_SEQUENCE *, DBT *)); + int (*get_range) __P((DB_SEQUENCE *, + db_seq_t *, db_seq_t *)); + int (*initial_value) __P((DB_SEQUENCE *, db_seq_t)); + int (*open) __P((DB_SEQUENCE *, + DB_TXN *, DBT *, u_int32_t)); + int (*remove) __P((DB_SEQUENCE *, DB_TXN *, u_int32_t)); + int (*set_cachesize) __P((DB_SEQUENCE *, int32_t)); + int (*set_flags) __P((DB_SEQUENCE *, u_int32_t)); + int (*set_range) __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); + int (*stat) __P((DB_SEQUENCE *, + DB_SEQUENCE_STAT **, u_int32_t)); + int (*stat_print) __P((DB_SEQUENCE *, u_int32_t)); +}; + +struct __db_seq_stat { + u_int32_t st_wait; /* Sequence lock granted without wait. */ + u_int32_t st_nowait; /* Sequence lock granted after wait. */ + db_seq_t st_current; /* Current value in db. */ + db_seq_t st_value; /* Current cached value. */ + db_seq_t st_last_value; /* Last cached value. */ + db_seq_t st_min; /* Minimum value. */ + db_seq_t st_max; /* Maximum value. */ + int32_t st_cache_size; /* Cache size. */ + u_int32_t st_flags; /* Flag value. */ +}; /******************************************************* * Access methods. @@ -984,6 +1123,8 @@ typedef enum { #define DB_QAMOLDVER 3 /* Oldest queue version supported. */ #define DB_QAMMAGIC 0x042253 +#define DB_SEQUENCE_VERSION 1 /* Current sequence version. */ + /* * DB access method and cursor operation values. Each value is an operation * code to which additional bit flags are added. @@ -1032,10 +1173,10 @@ typedef enum { * Masks for flags that can be OR'd into DB access method and cursor * operation values. * - * DB_DIRTY_READ 0x02000000 Dirty Read. */ -#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */ -#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */ -#define DB_RMW 0x10000000 /* Acquire write flag immediately. */ + * DB_DIRTY_READ 0x04000000 Dirty Read. */ +#define DB_MULTIPLE 0x08000000 /* Return multiple data values. */ +#define DB_MULTIPLE_KEY 0x10000000 /* Return multiple data/key pairs. */ +#define DB_RMW 0x20000000 /* Acquire write flag immediately. */ /* * DB (user visible) error return codes. @@ -1052,42 +1193,47 @@ typedef enum { * document that we own the error name space from -30,800 to -30,999. */ /* DB (public) error return codes. */ -#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */ -#define DB_FILEOPEN (-30998)/* Rename/remove while file is open. */ +#define DB_BUFFER_SMALL (-30999)/* User memory too small for return. */ +#define DB_DONOTINDEX (-30998)/* "Null" return from 2ndary callbk. */ #define DB_KEYEMPTY (-30997)/* Key/data deleted or never created. */ #define DB_KEYEXIST (-30996)/* The key/data pair already exists. */ #define DB_LOCK_DEADLOCK (-30995)/* Deadlock. */ #define DB_LOCK_NOTGRANTED (-30994)/* Lock unavailable. */ -#define DB_NOSERVER (-30993)/* Server panic return. */ -#define DB_NOSERVER_HOME (-30992)/* Bad home sent to server. */ -#define DB_NOSERVER_ID (-30991)/* Bad ID sent to server. */ -#define DB_NOTFOUND (-30990)/* Key/data pair not found (EOF). */ -#define DB_OLD_VERSION (-30989)/* Out-of-date version. */ -#define DB_PAGE_NOTFOUND (-30988)/* Requested page not found. */ -#define DB_REP_DUPMASTER (-30987)/* There are two masters. */ -#define DB_REP_HANDLE_DEAD (-30986)/* Rolled back a commit. */ -#define DB_REP_HOLDELECTION (-30985)/* Time to hold an election. */ -#define DB_REP_ISPERM (-30984)/* Cached not written perm written.*/ -#define DB_REP_NEWMASTER (-30983)/* We have learned of a new master. */ -#define DB_REP_NEWSITE (-30982)/* New site entered system. */ -#define DB_REP_NOTPERM (-30981)/* Permanent log record not written. */ -#define DB_REP_OUTDATED (-30980)/* Site is too far behind master. */ -#define DB_REP_UNAVAIL (-30979)/* Site cannot currently be reached. */ -#define DB_RUNRECOVERY (-30978)/* Panic return. */ -#define DB_SECONDARY_BAD (-30977)/* Secondary index corrupt. */ -#define DB_VERIFY_BAD (-30976)/* Verify failed; bad format. */ +#define DB_LOG_BUFFER_FULL (-30993)/* In-memory log buffer full. */ +#define DB_NOSERVER (-30992)/* Server panic return. */ +#define DB_NOSERVER_HOME (-30991)/* Bad home sent to server. */ +#define DB_NOSERVER_ID (-30990)/* Bad ID sent to server. */ +#define DB_NOTFOUND (-30989)/* Key/data pair not found (EOF). */ +#define DB_OLD_VERSION (-30988)/* Out-of-date version. */ +#define DB_PAGE_NOTFOUND (-30987)/* Requested page not found. */ +#define DB_REP_DUPMASTER (-30986)/* There are two masters. */ +#define DB_REP_HANDLE_DEAD (-30985)/* Rolled back a commit. */ +#define DB_REP_HOLDELECTION (-30984)/* Time to hold an election. */ +#define DB_REP_ISPERM (-30983)/* Cached not written perm written.*/ +#define DB_REP_NEWMASTER (-30982)/* We have learned of a new master. */ +#define DB_REP_NEWSITE (-30981)/* New site entered system. */ +#define DB_REP_NOTPERM (-30980)/* Permanent log record not written. */ +#define DB_REP_STARTUPDONE (-30979)/* Client startup complete. */ +#define DB_REP_UNAVAIL (-30978)/* Site cannot currently be reached. */ +#define DB_RUNRECOVERY (-30977)/* Panic return. */ +#define DB_SECONDARY_BAD (-30976)/* Secondary index corrupt. */ +#define DB_VERIFY_BAD (-30975)/* Verify failed; bad format. */ +#define DB_VERSION_MISMATCH (-30974)/* Environment version mismatch. */ /* DB (private) error return codes. */ #define DB_ALREADY_ABORTED (-30899) #define DB_DELETED (-30898)/* Recovery file marked deleted. */ #define DB_LOCK_NOTEXIST (-30897)/* Object to lock is gone. */ #define DB_NEEDSPLIT (-30896)/* Page needs to be split. */ -#define DB_SURPRISE_KID (-30895)/* Child commit where parent +#define DB_REP_EGENCHG (-30895)/* Egen changed while in election. */ +#define DB_REP_LOGREADY (-30894)/* Rep log ready for recovery. */ +#define DB_REP_PAGEDONE (-30893)/* This page was already done. */ +#define DB_SURPRISE_KID (-30892)/* Child commit where parent didn't know it was a parent. */ -#define DB_SWAPBYTES (-30894)/* Database needs byte swapping. */ -#define DB_TIMEOUT (-30893)/* Timed out waiting for election. */ -#define DB_TXN_CKP (-30892)/* Encountered ckp record in log. */ -#define DB_VERIFY_FATAL (-30891)/* DB->verify cannot proceed. */ +#define DB_SWAPBYTES (-30891)/* Database needs byte swapping. */ +#define DB_TIMEOUT (-30890)/* Timed out waiting for election. */ +#define DB_TXN_CKP (-30889)/* Encountered ckp record in log. */ +#define DB_VERIFY_FATAL (-30888)/* DB->verify cannot proceed. */ /* Database handle. */ struct __db { @@ -1130,7 +1276,7 @@ struct __db { u_int32_t associate_lid; /* Locker id for DB->associate call. */ DB_LOCK handle_lock; /* Lock held on this handle. */ - long cl_id; /* RPC: remote client id. */ + u_int cl_id; /* RPC: remote client id. */ time_t timestamp; /* Handle timestamp for replication. */ @@ -1243,6 +1389,8 @@ struct __db { int (*close) __P((DB *, u_int32_t)); int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t)); int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t)); + int (*dump) __P((DB *, + const char *, int (*)(void *, const void *), void *, int, int)); void (*err) __P((DB *, int, const char *, ...)); void (*errx) __P((DB *, const char *, ...)); int (*fd) __P((DB *, int *)); @@ -1252,14 +1400,14 @@ struct __db { int (*get_cachesize) __P((DB *, u_int32_t *, u_int32_t *, int *)); int (*get_dbname) __P((DB *, const char **, const char **)); int (*get_encrypt_flags) __P((DB *, u_int32_t *)); - int (*get_env) __P((DB *, DB_ENV **)); + DB_ENV *(*get_env) __P((DB *)); void (*get_errfile) __P((DB *, FILE **)); void (*get_errpfx) __P((DB *, const char **)); int (*get_flags) __P((DB *, u_int32_t *)); int (*get_lorder) __P((DB *, int *)); int (*get_open_flags) __P((DB *, u_int32_t *)); int (*get_pagesize) __P((DB *, u_int32_t *)); - int (*get_transactional) __P((DB *, int *)); + int (*get_transactional) __P((DB *)); int (*get_type) __P((DB *, DBTYPE *)); int (*join) __P((DB *, DBC **, DBC **, u_int32_t)); int (*key_range) __P((DB *, @@ -1278,15 +1426,20 @@ struct __db { int (*set_dup_compare) __P((DB *, int (*)(DB *, const DBT *, const DBT *))); int (*set_encrypt) __P((DB *, const char *, u_int32_t)); - void (*set_errcall) __P((DB *, void (*)(const char *, char *))); + void (*set_errcall) __P((DB *, + void (*)(const DB_ENV *, const char *, const char *))); void (*set_errfile) __P((DB *, FILE *)); void (*set_errpfx) __P((DB *, const char *)); int (*set_feedback) __P((DB *, void (*)(DB *, int, int))); int (*set_flags) __P((DB *, u_int32_t)); int (*set_lorder) __P((DB *, int)); + void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB *, FILE **)); + void (*set_msgfile) __P((DB *, FILE *)); int (*set_pagesize) __P((DB *, u_int32_t)); int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int))); - int (*stat) __P((DB *, void *, u_int32_t)); + int (*stat) __P((DB *, DB_TXN *, void *, u_int32_t)); + int (*stat_print) __P((DB *, u_int32_t)); int (*sync) __P((DB *, u_int32_t)); int (*upgrade) __P((DB *, const char *, u_int32_t)); int (*verify) __P((DB *, @@ -1319,8 +1472,7 @@ struct __db { int (*get_q_extentsize) __P((DB *, u_int32_t *)); int (*set_q_extentsize) __P((DB *, u_int32_t)); - int (*db_am_remove) __P((DB *, - DB_TXN *, const char *, const char *, DB_LSN *)); + int (*db_am_remove) __P((DB *, DB_TXN *, const char *, const char *)); int (*db_am_rename) __P((DB *, DB_TXN *, const char *, const char *, const char *)); @@ -1351,23 +1503,24 @@ struct __db { #define DB_AM_ENCRYPT 0x00000800 /* Encryption. */ #define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */ #define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */ -#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */ -#define DB_AM_NOT_DURABLE 0x00008000 /* Do not log changes. */ -#define DB_AM_OPEN_CALLED 0x00010000 /* DB->open called. */ -#define DB_AM_PAD 0x00020000 /* Fixed-length record pad. */ -#define DB_AM_PGDEF 0x00040000 /* Page size was defaulted. */ -#define DB_AM_RDONLY 0x00080000 /* Database is readonly. */ -#define DB_AM_RECNUM 0x00100000 /* DB_RECNUM. */ -#define DB_AM_RECOVER 0x00200000 /* DB opened by recovery routine. */ -#define DB_AM_RENUMBER 0x00400000 /* DB_RENUMBER. */ -#define DB_AM_REPLICATION 0x00800000 /* An internal replication file. */ -#define DB_AM_REVSPLITOFF 0x01000000 /* DB_REVSPLITOFF. */ -#define DB_AM_SECONDARY 0x02000000 /* Database is a secondary index. */ -#define DB_AM_SNAPSHOT 0x04000000 /* DB_SNAPSHOT. */ -#define DB_AM_SUBDB 0x08000000 /* Subdatabases supported. */ -#define DB_AM_SWAP 0x10000000 /* Pages need to be byte-swapped. */ -#define DB_AM_TXN 0x20000000 /* Opened in a transaction. */ -#define DB_AM_VERIFYING 0x40000000 /* DB handle is in the verifier. */ +#define DB_AM_INORDER 0x00004000 /* DB_INORDER. */ +#define DB_AM_IN_RENAME 0x00008000 /* File is being renamed. */ +#define DB_AM_NOT_DURABLE 0x00010000 /* Do not log changes. */ +#define DB_AM_OPEN_CALLED 0x00020000 /* DB->open called. */ +#define DB_AM_PAD 0x00040000 /* Fixed-length record pad. */ +#define DB_AM_PGDEF 0x00080000 /* Page size was defaulted. */ +#define DB_AM_RDONLY 0x00100000 /* Database is readonly. */ +#define DB_AM_RECNUM 0x00200000 /* DB_RECNUM. */ +#define DB_AM_RECOVER 0x00400000 /* DB opened by recovery routine. */ +#define DB_AM_RENUMBER 0x00800000 /* DB_RENUMBER. */ +#define DB_AM_REPLICATION 0x01000000 /* An internal replication file. */ +#define DB_AM_REVSPLITOFF 0x02000000 /* DB_REVSPLITOFF. */ +#define DB_AM_SECONDARY 0x04000000 /* Database is a secondary index. */ +#define DB_AM_SNAPSHOT 0x08000000 /* DB_SNAPSHOT. */ +#define DB_AM_SUBDB 0x10000000 /* Subdatabases supported. */ +#define DB_AM_SWAP 0x20000000 /* Pages need to be byte-swapped. */ +#define DB_AM_TXN 0x40000000 /* Opened in a transaction. */ +#define DB_AM_VERIFYING 0x80000000 /* DB handle is in the verifier. */ u_int32_t orig_flags; /* Flags at open, for refresh. */ u_int32_t flags; }; @@ -1476,7 +1629,7 @@ struct __dbc { DB_LOCK_ILOCK lock; /* Object to be locked. */ DB_LOCK mylock; /* CDB lock held on this cursor. */ - long cl_id; /* Remote client id. */ + u_int cl_id; /* Remote client id. */ DBTYPE dbtype; /* Cursor type. */ @@ -1501,16 +1654,17 @@ struct __dbc { #define DBC_ACTIVE 0x0001 /* Cursor in use. */ #define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */ -#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */ -#define DBC_OPD 0x0008 /* Cursor references off-page dups. */ -#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */ -#define DBC_RMW 0x0020 /* Acquire write flag in read op. */ -#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */ -#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */ -#define DBC_WRITER 0x0100 /* Cursor immediately writing (CDB). */ -#define DBC_MULTIPLE 0x0200 /* Return Multiple data. */ -#define DBC_MULTIPLE_KEY 0x0400 /* Return Multiple keys and data. */ -#define DBC_OWN_LID 0x0800 /* Free lock id on destroy. */ +#define DBC_DEGREE_2 0x0004 /* Cursor has degree 2 isolation. */ +#define DBC_DIRTY_READ 0x0008 /* Cursor supports dirty reads. */ +#define DBC_OPD 0x0010 /* Cursor references off-page dups. */ +#define DBC_RECOVER 0x0020 /* Recovery cursor; don't log/lock. */ +#define DBC_RMW 0x0040 /* Acquire write flag in read op. */ +#define DBC_TRANSIENT 0x0080 /* Cursor is transient. */ +#define DBC_WRITECURSOR 0x0100 /* Cursor may be used to write (CDB). */ +#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */ +#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */ +#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */ +#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */ u_int32_t flags; }; @@ -1538,6 +1692,7 @@ struct __db_bt_stat { u_int32_t bt_leaf_pg; /* Leaf pages. */ u_int32_t bt_dup_pg; /* Duplicate pages. */ u_int32_t bt_over_pg; /* Overflow pages. */ + u_int32_t bt_empty_pg; /* Empty pages. */ u_int32_t bt_free; /* Pages on the free list. */ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */ @@ -1592,10 +1747,16 @@ struct __db_env { /******************************************************* * Public: owned by the application. *******************************************************/ + /* Error message callback. */ + void (*db_errcall) __P((const DB_ENV *, const char *, const char *)); FILE *db_errfile; /* Error message file stream. */ const char *db_errpfx; /* Error message prefix. */ - /* Callbacks. */ - void (*db_errcall) __P((const char *, char *)); + + FILE *db_msgfile; /* Statistics message file stream. */ + /* Statistics message callback. */ + void (*db_msgcall) __P((const DB_ENV *, const char *)); + + /* Other Callbacks. */ void (*db_feedback) __P((DB_ENV *, int, int)); void (*db_paniccall) __P((DB_ENV *, int)); @@ -1609,11 +1770,10 @@ struct __db_env { * entries. There's no reason that it needs to be limited, if * there are ever more than 32 entries, convert to a bit array. */ -#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */ -#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */ -#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */ -#define DB_VERB_REPLICATION 0x0008 /* Replication information. */ -#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */ +#define DB_VERB_DEADLOCK 0x0001 /* Deadlock detection information. */ +#define DB_VERB_RECOVERY 0x0002 /* Recovery information. */ +#define DB_VERB_REPLICATION 0x0004 /* Replication information. */ +#define DB_VERB_WAITSFOR 0x0008 /* Dump waits-for table. */ u_int32_t verbose; /* Verbose output. */ void *app_private; /* Application-private handle. */ @@ -1623,7 +1783,7 @@ struct __db_env { /* Locking. */ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */ - u_int32_t lk_modes; /* Number of lock modes in table. */ + int lk_modes; /* Number of lock modes in table. */ u_int32_t lk_max; /* Maximum number of locks. */ u_int32_t lk_max_lockers;/* Maximum number of lockers. */ u_int32_t lk_max_objects;/* Maximum number of locked objects. */ @@ -1638,9 +1798,9 @@ struct __db_env { /* Memory pool. */ u_int32_t mp_gbytes; /* Cachesize: GB. */ u_int32_t mp_bytes; /* Cachesize: Bytes. */ - size_t mp_size; /* DEPRECATED: Cachesize: bytes. */ - int mp_ncache; /* Number of cache regions. */ + u_int mp_ncache; /* Number of cache regions. */ size_t mp_mmapsize; /* Maximum file size for mmap. */ + int mp_maxopenfd; /* Maximum open file descriptors. */ int mp_maxwrite; /* Maximum buffers to write. */ int /* Sleep after writing max buffers. */ mp_maxwrite_sleep; @@ -1669,6 +1829,8 @@ struct __db_env { int data_next; /* Next Database data file slot. */ int db_mode; /* Default open permissions. */ + int dir_mode; /* Intermediate directory perms. */ + u_int32_t env_lid; /* Locker ID in non-threaded handles. */ u_int32_t open_flags; /* Flags passed to DB_ENV->open. */ void *reginfo; /* REGINFO structure reference. */ @@ -1680,7 +1842,7 @@ struct __db_env { /* Slots in the dispatch table. */ void *cl_handle; /* RPC: remote client handle. */ - long cl_id; /* RPC: remote client env id. */ + u_int cl_id; /* RPC: remote client env id. */ int db_ref; /* DB reference count. */ @@ -1742,10 +1904,18 @@ struct __db_env { const char *, const char *, const char *, u_int32_t)); void (*err) __P((const DB_ENV *, int, const char *, ...)); void (*errx) __P((const DB_ENV *, const char *, ...)); - int (*get_home) __P((DB_ENV *, const char **)); - int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); int (*open) __P((DB_ENV *, const char *, u_int32_t, int)); int (*remove) __P((DB_ENV *, const char *, u_int32_t)); + int (*stat_print) __P((DB_ENV *, u_int32_t)); + + /* House-keeping. */ + int (*fileid_reset) __P((DB_ENV *, char *, int)); + int (*is_bigendian) __P((void)); + int (*lsn_reset) __P((DB_ENV *, char *, int)); + int (*prdbt) __P((DBT *, + int, const char *, void *, int (*)(void *, const void *), int)); + + /* Setters/getters. */ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *))); int (*set_app_dispatch) __P((DB_ENV *, @@ -1754,7 +1924,8 @@ struct __db_env { int (*set_data_dir) __P((DB_ENV *, const char *)); int (*get_encrypt_flags) __P((DB_ENV *, u_int32_t *)); int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t)); - void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *))); + void (*set_errcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *, const char *))); void (*get_errfile) __P((DB_ENV *, FILE **)); void (*set_errfile) __P((DB_ENV *, FILE *)); void (*get_errpfx) __P((DB_ENV *, const char **)); @@ -1762,11 +1933,18 @@ struct __db_env { int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int))); int (*get_flags) __P((DB_ENV *, u_int32_t *)); int (*set_flags) __P((DB_ENV *, u_int32_t, int)); + int (*get_home) __P((DB_ENV *, const char **)); + int (*set_intermediate_dir) __P((DB_ENV *, int, u_int32_t)); + int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int))); int (*set_rpc_server) __P((DB_ENV *, void *, const char *, long, long, u_int32_t)); int (*get_shm_key) __P((DB_ENV *, long *)); int (*set_shm_key) __P((DB_ENV *, long)); + void (*set_msgcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB_ENV *, FILE **)); + void (*set_msgfile) __P((DB_ENV *, FILE *)); int (*get_tas_spins) __P((DB_ENV *, u_int32_t *)); int (*set_tas_spins) __P((DB_ENV *, u_int32_t)); int (*get_tmp_dir) __P((DB_ENV *, const char **)); @@ -1789,6 +1967,7 @@ struct __db_env { int (*log_flush) __P((DB_ENV *, const DB_LSN *)); int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t)); int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); + int (*log_stat_print) __P((DB_ENV *, u_int32_t)); void *lk_handle; /* Lock handle and methods. */ int (*get_lk_conflicts) __P((DB_ENV *, const u_int8_t **, int *)); @@ -1803,13 +1982,13 @@ struct __db_env { int (*get_lk_max_objects) __P((DB_ENV *, u_int32_t *)); int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t)); int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *)); - int (*lock_dump_region) __P((DB_ENV *, const char *, FILE *)); int (*lock_get) __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); int (*lock_put) __P((DB_ENV *, DB_LOCK *)); int (*lock_id) __P((DB_ENV *, u_int32_t *)); int (*lock_id_free) __P((DB_ENV *, u_int32_t)); int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t)); + int (*lock_stat_print) __P((DB_ENV *, u_int32_t)); int (*lock_vec) __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); @@ -1818,25 +1997,29 @@ struct __db_env { int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int)); int (*get_mp_mmapsize) __P((DB_ENV *, size_t *)); int (*set_mp_mmapsize) __P((DB_ENV *, size_t)); - int (*get_mp_maxwrite) __P((DB_ENV *, int *, int *)); - int (*set_mp_maxwrite) __P((DB_ENV *, int, int)); - int (*memp_dump_region) __P((DB_ENV *, const char *, FILE *)); + int (*get_mp_max_openfd) __P((DB_ENV *, int *)); + int (*set_mp_max_openfd) __P((DB_ENV *, int)); + int (*get_mp_max_write) __P((DB_ENV *, int *, int *)); + int (*set_mp_max_write) __P((DB_ENV *, int, int)); int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); int (*memp_register) __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *))); int (*memp_stat) __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t)); + int (*memp_stat_print) __P((DB_ENV *, u_int32_t)); int (*memp_sync) __P((DB_ENV *, DB_LSN *)); int (*memp_trickle) __P((DB_ENV *, int, int *)); void *rep_handle; /* Replication handle and methods. */ - int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *)); + int (*rep_elect) __P((DB_ENV *, int, int, int, + u_int32_t, int *, u_int32_t)); int (*rep_flush) __P((DB_ENV *)); int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *, DB_LSN *)); int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t)); int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); + int (*rep_stat_print) __P((DB_ENV *, u_int32_t)); int (*get_rep_limit) __P((DB_ENV *, u_int32_t *, u_int32_t *)); int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t)); int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t)); @@ -1854,19 +2037,22 @@ struct __db_env { int (*txn_recover) __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t)); int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); + int (*txn_stat_print) __P((DB_ENV *, u_int32_t)); int (*get_timeout) __P((DB_ENV *, db_timeout_t *, u_int32_t)); int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t)); #define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */ -#define DB_TEST_POSTDESTROY 2 /* after destroy op */ -#define DB_TEST_POSTLOG 3 /* after logging all pages */ -#define DB_TEST_POSTLOGMETA 4 /* after logging meta in btree */ -#define DB_TEST_POSTOPEN 5 /* after __os_open */ -#define DB_TEST_POSTSYNC 6 /* after syncing the log */ -#define DB_TEST_PREDESTROY 7 /* before destroy op */ -#define DB_TEST_PREOPEN 8 /* before __os_open */ -#define DB_TEST_SUBDB_LOCKS 9 /* subdb locking tests */ +#define DB_TEST_ELECTVOTE1 2 /* after sending VOTE1 */ +#define DB_TEST_POSTDESTROY 3 /* after destroy op */ +#define DB_TEST_POSTLOG 4 /* after logging all pages */ +#define DB_TEST_POSTLOGMETA 5 /* after logging meta in btree */ +#define DB_TEST_POSTOPEN 6 /* after __os_open */ +#define DB_TEST_POSTSYNC 7 /* after syncing the log */ +#define DB_TEST_PREDESTROY 8 /* before destroy op */ +#define DB_TEST_PREOPEN 9 /* before __os_open */ +#define DB_TEST_SUBDB_LOCKS 10 /* subdb locking tests */ int test_abort; /* Abort value for testing. */ + int test_check; /* Checkpoint value for testing. */ int test_copy; /* Copy value for testing. */ #define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */ @@ -1876,25 +2062,26 @@ struct __db_env { #define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */ #define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */ #define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */ -#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */ -#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */ -#define DB_ENV_LOG_AUTOREMOVE 0x0000200 /* DB_LOG_AUTOREMOVE set. */ -#define DB_ENV_NOLOCKING 0x0000400 /* DB_NOLOCKING set. */ -#define DB_ENV_NOMMAP 0x0000800 /* DB_NOMMAP set. */ -#define DB_ENV_NOPANIC 0x0001000 /* Okay if panic set. */ -#define DB_ENV_OPEN_CALLED 0x0002000 /* DB_ENV->open called. */ -#define DB_ENV_OVERWRITE 0x0004000 /* DB_OVERWRITE set. */ -#define DB_ENV_PRIVATE 0x0008000 /* DB_PRIVATE set. */ -#define DB_ENV_REGION_INIT 0x0010000 /* DB_REGION_INIT set. */ -#define DB_ENV_RPCCLIENT 0x0020000 /* DB_RPCCLIENT set. */ -#define DB_ENV_RPCCLIENT_GIVEN 0x0040000 /* User-supplied RPC client struct */ -#define DB_ENV_SYSTEM_MEM 0x0080000 /* DB_SYSTEM_MEM set. */ -#define DB_ENV_THREAD 0x0100000 /* DB_THREAD set. */ -#define DB_ENV_TIME_NOTGRANTED 0x0200000 /* DB_TIME_NOTGRANTED set. */ -#define DB_ENV_TXN_NOSYNC 0x0400000 /* DB_TXN_NOSYNC set. */ -#define DB_ENV_TXN_NOT_DURABLE 0x0800000 /* DB_TXN_NOT_DURABLE set. */ -#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */ -#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */ +#define DB_ENV_DSYNC_LOG 0x0000080 /* DB_DSYNC_LOG set. */ +#define DB_ENV_FATAL 0x0000100 /* Doing fatal recovery in env. */ +#define DB_ENV_LOCKDOWN 0x0000200 /* DB_LOCKDOWN set. */ +#define DB_ENV_LOG_AUTOREMOVE 0x0000400 /* DB_LOG_AUTOREMOVE set. */ +#define DB_ENV_LOG_INMEMORY 0x0000800 /* DB_LOG_INMEMORY set. */ +#define DB_ENV_NOLOCKING 0x0001000 /* DB_NOLOCKING set. */ +#define DB_ENV_NOMMAP 0x0002000 /* DB_NOMMAP set. */ +#define DB_ENV_NOPANIC 0x0004000 /* Okay if panic set. */ +#define DB_ENV_OPEN_CALLED 0x0008000 /* DB_ENV->open called. */ +#define DB_ENV_OVERWRITE 0x0010000 /* DB_OVERWRITE set. */ +#define DB_ENV_PRIVATE 0x0020000 /* DB_PRIVATE set. */ +#define DB_ENV_REGION_INIT 0x0040000 /* DB_REGION_INIT set. */ +#define DB_ENV_RPCCLIENT 0x0080000 /* DB_RPCCLIENT set. */ +#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */ +#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */ +#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */ +#define DB_ENV_TIME_NOTGRANTED 0x0800000 /* DB_TIME_NOTGRANTED set. */ +#define DB_ENV_TXN_NOSYNC 0x1000000 /* DB_TXN_NOSYNC set. */ +#define DB_ENV_TXN_WRITE_NOSYNC 0x2000000 /* DB_TXN_WRITE_NOSYNC set. */ +#define DB_ENV_YIELDCPU 0x4000000 /* DB_YIELDCPU set. */ u_int32_t flags; }; @@ -1986,10 +2173,6 @@ typedef struct entry { #endif #endif /* !_DB_H_ */ -/* DO NOT EDIT: automatically built by dist/s_rpc. */ -#define DB_RPC_SERVERPROG ((unsigned long)(351457)) -#define DB_RPC_SERVERVERS ((unsigned long)(4002)) - /* DO NOT EDIT: automatically built by dist/s_include. */ #ifndef _DB_EXT_PROT_IN_ #define _DB_EXT_PROT_IN_ @@ -2009,19 +2192,23 @@ int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *))); int db_env_set_func_exists __P((int (*)(const char *, int *))); int db_env_set_func_free __P((void (*)(void *))); int db_env_set_func_fsync __P((int (*)(int))); +int db_env_set_func_ftruncate __P((int (*)(int, off_t))); int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *))); int db_env_set_func_malloc __P((void *(*)(size_t))); int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **))); +int db_env_set_func_pread __P((ssize_t (*)(int, void *, size_t, off_t))); +int db_env_set_func_pwrite __P((ssize_t (*)(int, const void *, size_t, off_t))); int db_env_set_func_open __P((int (*)(const char *, int, ...))); int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t))); int db_env_set_func_realloc __P((void *(*)(void *, size_t))); int db_env_set_func_rename __P((int (*)(const char *, const char *))); -int db_env_set_func_seek __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int))); +int db_env_set_func_seek __P((int (*)(int, off_t, int))); int db_env_set_func_sleep __P((int (*)(u_long, u_long))); int db_env_set_func_unlink __P((int (*)(const char *))); int db_env_set_func_unmap __P((int (*)(void *, size_t))); int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t))); int db_env_set_func_yield __P((int (*)(void))); +int db_sequence_create __P((DB_SEQUENCE **, DB *, u_int32_t)); #if DB_DBM_HSEARCH != 0 int __db_ndbm_clearerr __P((DBM *)); void __db_ndbm_close __P((DBM *)); @@ -2036,14 +2223,11 @@ int __db_ndbm_pagfno __P((DBM *)); int __db_ndbm_rdonly __P((DBM *)); int __db_ndbm_store __P((DBM *, datum, datum, int)); int __db_dbm_close __P((void)); -int __db_dbm_dbrdonly __P((void)); int __db_dbm_delete __P((datum)); -int __db_dbm_dirf __P((void)); datum __db_dbm_fetch __P((datum)); datum __db_dbm_firstkey __P((void)); int __db_dbm_init __P((char *)); datum __db_dbm_nextkey __P((datum)); -int __db_dbm_pagf __P((void)); int __db_dbm_store __P((datum, datum)); #endif #if DB_DBM_HSEARCH != 0 diff --git a/db/build_win32/db_archive.dsp b/db/build_win32/db_archive.dsp index 0815d77be..4c664182d 100644 --- a/db/build_win32/db_archive.dsp +++ b/db/build_win32/db_archive.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_archive - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_archive - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_archive - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_checkpoint.dsp b/db/build_win32/db_checkpoint.dsp index 524a1dbeb..26a647d48 100644 --- a/db/build_win32/db_checkpoint.dsp +++ b/db/build_win32/db_checkpoint.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_checkpoint - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_config.h b/db/build_win32/db_config.h index 0acc25f6a..0dad44320 100644 --- a/db/build_win32/db_config.h +++ b/db/build_win32/db_config.h @@ -49,9 +49,15 @@ /* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ /* #undef HAVE_FCNTL_F_SETFD */ +/* Define to 1 if you have the `fdatasync' function. */ +/* #undef HAVE_FDATASYNC */ + /* Define to 1 if allocated filesystem blocks are not zeroed. */ #define HAVE_FILESYSTEM_NOTZERO 1 +/* Define to 1 if you have the `ftruncate' function. */ +#define HAVE_FTRUNCATE 1 + /* Define to 1 if you have the `getcwd' function. */ #define HAVE_GETCWD 1 @@ -78,6 +84,9 @@ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + /* Define to 1 if you have the `memcmp' function. */ #define HAVE_MEMCMP 1 @@ -227,6 +236,9 @@ /* Define to 1 if you have the `raise' function. */ #define HAVE_RAISE 1 +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + /* Define to 1 if building replication support. */ #ifndef HAVE_SMALLBUILD #define HAVE_REPLICATION 1 @@ -241,12 +253,21 @@ /* Define to 1 if you have the `select' function. */ /* #undef HAVE_SELECT */ +/* Define to 1 if building sequence support. */ +#define HAVE_SEQUENCE 1 + /* Define to 1 if you have the `shmget' function. */ /* #undef HAVE_SHMGET */ /* Define to 1 if you have the `snprintf' function. */ #define HAVE_SNPRINTF 1 +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if building statistics support. */ +#define HAVE_STATISTICS 1 + /* Define to 1 if you have the header file. */ /* #undef HAVE_STDINT_H */ @@ -306,6 +327,9 @@ /* Define to 1 if unlink of file with open file descriptors will fail. */ /* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */ +/* Define to 1 if the system has the type `unsigned long long'. */ +#define HAVE_UNSIGNED_LONG_LONG 1 + /* Define to 1 if building access method verification support. */ #ifndef HAVE_SMALLBUILD #define HAVE_VERIFY 1 @@ -333,13 +357,13 @@ #define PACKAGE_NAME "Berkeley DB" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "Berkeley DB 4.2.52" +#define PACKAGE_STRING "Berkeley DB 4.3.14" /* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "db-4.2.52" +#define PACKAGE_TARNAME "db-4.3.14" /* Define to the version of this package. */ -#define PACKAGE_VERSION "4.2.52" +#define PACKAGE_VERSION "4.3.14" /* Define to 1 if the `S_IS*' macros in do not work properly. */ /* #undef STAT_MACROS_BROKEN */ @@ -350,7 +374,7 @@ /* Define to 1 if you can safely include both and . */ /* #undef TIME_WITH_SYS_TIME */ -/* Define to 1 to mask harmless unitialized memory read/writes. */ +/* Define to 1 to mask harmless uninitialized memory read/writes. */ /* #undef UMRW */ /* Number of bits in a file offset, on hosts where this is settable. */ @@ -411,6 +435,6 @@ * arguments turning OFF all vendor extensions. Even more unfortunately, if * we do that, it fails to parse windows.h!!!!! So, we define __STDC__ here, * after windows.h comes in. Note: the compiler knows we've defined it, and - * starts enforcing strict ANSI compilance from this point on. + * starts enforcing strict ANSI compliance from this point on. */ #define __STDC__ 1 diff --git a/db/build_win32/db_cxx.h b/db/build_win32/db_cxx.h index 670f760e3..abba7b635 100644 --- a/db/build_win32/db_cxx.h +++ b/db/build_win32/db_cxx.h @@ -2,10 +2,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_cxx.in,v 11.134 2003/09/04 19:02:27 bostic Exp $ + * $Id: db_cxx.in,v 11.147 2004/10/07 21:39:48 bostic Exp $ */ #ifndef _DB_CXX_H_ @@ -55,10 +55,12 @@ #define HAVE_CXX_STDHEADERS 1 #ifdef HAVE_CXX_STDHEADERS #include -#define __DB_OSTREAMCLASS std::ostream +#include +#define __DB_STD(x) std::x #else #include -#define __DB_OSTREAMCLASS ostream +#include +#define __DB_STD(x) x #endif #include "db.h" @@ -74,40 +76,20 @@ class DbMpoolFile; // forward class DbPreplist; // forward class Dbt; // forward class DbTxn; // forward -class DbDeadlockException; // forward -class DbException; // forward -class DbLockNotGrantedException; // forward class DbLock; // forward -class DbMemoryException; // forward -class DbRunRecoveryException; // forward +class DbSequence; // forward class Dbt; // forward + class DbMultipleIterator; // forward class DbMultipleKeyDataIterator; // forward class DbMultipleRecnoDataIterator; // forward class DbMultipleDataIterator; // forward -// These classes are not defined here and should be invisible -// to the user, but some compilers require forward references. -// There is one for each use of the DEFINE_DB_CLASS macro. - -class DbImp; -class DbEnvImp; -class DbMpoolFileImp; -class DbTxnImp; - -// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor. -// The underlying type is a pointer to an opaque *Imp class, that -// gets converted to the correct implementation class by the implementation. -// -// Since these defines use "private/public" labels, and leave the access -// being "private", we always use these by convention before any data -// members in the private section of a class. Keeping them in the -// private section also emphasizes that they are off limits to user code. -// -#define DEFINE_DB_CLASS(name) \ - public: class name##Imp* imp() { return (imp_); } \ - public: const class name##Imp* constimp() const { return (imp_); } \ - private: class name##Imp* imp_ +class DbException; // forward +class DbDeadlockException; // forward +class DbLockNotGrantedException; // forward +class DbMemoryException; // forward +class DbRunRecoveryException; // forward //////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////// @@ -187,304 +169,194 @@ extern "C" { (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); typedef int (*pgout_fcn_type) (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Exception classes -// - -// Almost any error in the DB library throws a DbException. -// Every exception should be considered an abnormality -// (e.g. bug, misuse of DB, file system error). -// -// NOTE: We would like to inherit from class exception and -// let it handle what(), but there are -// MSVC++ problems when is included. -// -class _exported DbException -{ -public: - virtual ~DbException(); - DbException(int err); - DbException(const char *description); - DbException(const char *prefix, int err); - DbException(const char *prefix1, const char *prefix2, int err); - int get_errno() const; - virtual const char *what() const; - DbEnv *get_env() const; - void set_env(DbEnv *env); - - DbException(const DbException &); - DbException &operator = (const DbException &); - -private: - char *what_; - int err_; // errno - DbEnv *env_; -}; - -// -// A specific sort of exception that occurs when -// an operation is aborted to resolve a deadlock. -// -class _exported DbDeadlockException : public DbException -{ -public: - virtual ~DbDeadlockException(); - DbDeadlockException(const char *description); - - DbDeadlockException(const DbDeadlockException &); - DbDeadlockException &operator = (const DbDeadlockException &); -}; - -// -// A specific sort of exception that occurs when -// a lock is not granted, e.g. by lock_get or lock_vec. -// Note that the Dbt is only live as long as the Dbt used -// in the offending call. -// -class _exported DbLockNotGrantedException : public DbException -{ -public: - virtual ~DbLockNotGrantedException(); - DbLockNotGrantedException(const char *prefix, db_lockop_t op, - db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index); - DbLockNotGrantedException(const char *description); - DbLockNotGrantedException(const DbLockNotGrantedException &); - DbLockNotGrantedException &operator = - (const DbLockNotGrantedException &); - - db_lockop_t get_op() const; - db_lockmode_t get_mode() const; - const Dbt* get_obj() const; - DbLock *get_lock() const; - int get_index() const; - -private: - db_lockop_t op_; - db_lockmode_t mode_; - const Dbt *obj_; - DbLock *lock_; - int index_; -}; +} // -// A specific sort of exception that occurs when -// user declared memory is insufficient in a Dbt. -// -class _exported DbMemoryException : public DbException -{ -public: - virtual ~DbMemoryException(); - DbMemoryException(Dbt *dbt); - DbMemoryException(const char *description); - DbMemoryException(const char *prefix, Dbt *dbt); - DbMemoryException(const char *prefix1, const char *prefix2, Dbt *dbt); - Dbt *get_dbt() const; - - DbMemoryException(const DbMemoryException &); - DbMemoryException &operator = (const DbMemoryException &); - -private: - Dbt *dbt_; -}; - -// -// A specific sort of exception that occurs when -// recovery is required before continuing DB activity. -// -class _exported DbRunRecoveryException : public DbException -{ -public: - virtual ~DbRunRecoveryException(); - DbRunRecoveryException(const char *description); - - DbRunRecoveryException(const DbRunRecoveryException &); - DbRunRecoveryException &operator = (const DbRunRecoveryException &); -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Lock classes -// - -class _exported DbLock -{ - friend class DbEnv; - -public: - DbLock(); - DbLock(const DbLock &); - DbLock &operator = (const DbLock &); - -protected: - // We can add data to this class if needed - // since its contained class is not allocated by db. - // (see comment at top) - - DbLock(DB_LOCK); - DB_LOCK lock_; -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Log classes -// - -class _exported DbLsn : public DB_LSN -{ - friend class DbEnv; // friendship needed to cast to base class - friend class DbLogc; // friendship needed to cast to base class -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Memory pool classes +// Represents a database table = a set of keys with associated values. // - -class _exported DbMpoolFile +class _exported Db { friend class DbEnv; - friend class Db; - -private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(DbMpoolFile); public: - int close(u_int32_t flags); - int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep); - int open(const char *file, u_int32_t flags, int mode, size_t pagesize); - int put(void *pgaddr, u_int32_t flags); - int set(void *pgaddr, u_int32_t flags); - int get_clear_len(u_int32_t *len); - int set_clear_len(u_int32_t len); - int get_fileid(u_int8_t *fileid); - int set_fileid(u_int8_t *fileid); - int get_flags(u_int32_t *flagsp); - int set_flags(u_int32_t flags, int onoff); - int get_ftype(int *ftype); - int set_ftype(int ftype); - int get_lsn_offset(int32_t *offsetp); - int set_lsn_offset(int32_t offset); - int get_maxsize(u_int32_t *gbytes, u_int32_t *bytes); - int set_maxsize(u_int32_t gbytes, u_int32_t bytes); - int get_pgcookie(DBT *dbt); - int set_pgcookie(DBT *dbt); - int get_priority(DB_CACHE_PRIORITY *priorityp); - int set_priority(DB_CACHE_PRIORITY priority); - int sync(); - - virtual DB_MPOOLFILE *get_DB_MPOOLFILE() - { - return (DB_MPOOLFILE *)imp(); - } - - virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const - { - return (const DB_MPOOLFILE *)constimp(); - } - -private: - // We can add data to this class if needed - // since it is implemented via a pointer. - // (see comment at top) + Db(DbEnv*, u_int32_t); // create a Db object, then call open() + virtual ~Db(); // does *not* call close. - // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile, - // and call DbMpoolFile::close() rather than delete to release them. + // These methods exactly match those in the C interface. // - DbMpoolFile(); - - // Shut g++ up. -protected: - virtual ~DbMpoolFile(); - -private: - // no copying - DbMpoolFile(const DbMpoolFile &); - void operator = (const DbMpoolFile &); -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// This is filled in and returned by the DbEnv::txn_recover() method. -// - -class _exported DbPreplist -{ -public: - DbTxn *txn; - u_int8_t gid[DB_XIDDATASIZE]; -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Transaction classes -// - -class _exported DbTxn -{ - friend class DbEnv; + virtual int associate(DbTxn *txn, Db *secondary, + int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *), + u_int32_t flags); + virtual int close(u_int32_t flags); + virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags); + virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags); + virtual void err(int, const char *, ...); + virtual void errx(const char *, ...); + virtual int fd(int *fdp); + virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags); + virtual void *get_app_private() const; + virtual int get_byteswapped(int *); + virtual int get_dbname(const char **, const char **); + virtual int get_open_flags(u_int32_t *); + virtual int get_type(DBTYPE *); + virtual int get_transactional(); + virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags); + virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t); + virtual int open(DbTxn *txnid, + const char *, const char *subname, DBTYPE, u_int32_t, int); + virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, + u_int32_t flags); + virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t); + virtual int remove(const char *, const char *, u_int32_t); + virtual int rename(const char *, const char *, const char *, u_int32_t); + virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, + db_free_fcn_type); + virtual void set_app_private(void *); + virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t)); + virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/ + virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *)); + virtual int set_bt_maxkey(u_int32_t); + virtual int get_bt_minkey(u_int32_t *); + virtual int set_bt_minkey(u_int32_t); + virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/ + virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *)); + virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); + virtual int set_cachesize(u_int32_t, u_int32_t, int); + virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/ + virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *)); + virtual int get_encrypt_flags(u_int32_t *); + virtual int set_encrypt(const char *, u_int32_t); + virtual void set_errcall( + void (*)(const DbEnv *, const char *, const char *)); + virtual void get_errfile(FILE **); + virtual void set_errfile(FILE *); + virtual void get_errpfx(const char **); + virtual void set_errpfx(const char *); + virtual int set_feedback(void (*)(Db *, int, int)); + virtual int get_flags(u_int32_t *); + virtual int set_flags(u_int32_t); + virtual int get_h_ffactor(u_int32_t *); + virtual int set_h_ffactor(u_int32_t); + virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/ + virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t)); + virtual int get_h_nelem(u_int32_t *); + virtual int set_h_nelem(u_int32_t); + virtual int get_lorder(int *); + virtual int set_lorder(int); + virtual void set_msgcall(void (*)(const DbEnv *, const char *)); + virtual void get_msgfile(FILE **); + virtual void set_msgfile(FILE *); + virtual int get_pagesize(u_int32_t *); + virtual int set_pagesize(u_int32_t); + virtual int set_paniccall(void (*)(DbEnv *, int)); + virtual int get_re_delim(int *); + virtual int set_re_delim(int); + virtual int get_re_len(u_int32_t *); + virtual int set_re_len(u_int32_t); + virtual int get_re_pad(int *); + virtual int set_re_pad(int); + virtual int get_re_source(const char **); + virtual int set_re_source(const char *); + virtual int get_q_extentsize(u_int32_t *); + virtual int set_q_extentsize(u_int32_t); + virtual int stat(DbTxn *, void *sp, u_int32_t flags); + virtual int stat_print(u_int32_t flags); + virtual int sync(u_int32_t flags); + virtual int truncate(DbTxn *, u_int32_t *, u_int32_t); + virtual int upgrade(const char *name, u_int32_t flags); + virtual int verify(const char *, const char *, __DB_STD(ostream) *, + u_int32_t); -private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(DbTxn); + // These additional methods are not in the C interface, and + // are only available for C++. + // + virtual __DB_STD(ostream) *get_error_stream(); + virtual void set_error_stream(__DB_STD(ostream) *); + virtual __DB_STD(ostream) *get_message_stream(); + virtual void set_message_stream(__DB_STD(ostream) *); -public: - int abort(); - int commit(u_int32_t flags); - int discard(u_int32_t flags); - u_int32_t id(); - int prepare(u_int8_t *gid); - int set_timeout(db_timeout_t timeout, u_int32_t flags); + virtual DbEnv *get_env(); + virtual DbMpoolFile *get_mpf(); - virtual DB_TXN *get_DB_TXN() + virtual DB *get_DB() { - return (DB_TXN *)imp(); + return imp_; } - virtual const DB_TXN *get_const_DB_TXN() const + virtual const DB *get_const_DB() const { - return (const DB_TXN *)constimp(); + return imp_; } - static DbTxn* get_DbTxn(DB_TXN *txn) + static Db* get_Db(DB *db) { - return (DbTxn *)txn->api_internal; + return (Db *)db->api_internal; } - static const DbTxn* get_const_DbTxn(const DB_TXN *txn) + static const Db* get_const_Db(const DB *db) { - return (const DbTxn *)txn->api_internal; + return (const Db *)db->api_internal; } - // For internal use only. - static DbTxn* wrap_DB_TXN(DB_TXN *txn); +private: + // no copying + Db(const Db &); + Db &operator = (const Db &); + + void cleanup(); + int initialize(); + int error_policy(); + + // instance data + DB *imp_; + DbEnv *env_; + DbMpoolFile *mpf_; + int construct_error_; + u_int32_t flags_; + u_int32_t construct_flags_; + +public: + // These are public only because they need to be called + // via C callback functions. They should never be used by + // external users of this class. + // + int (*append_recno_callback_)(Db *, Dbt *, db_recno_t); + int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *); + int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *); + size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *); + int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *); + void (*feedback_callback_)(Db *, int, int); + u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t); +}; + +// +// Cursor +// +class _exported Dbc : protected DBC +{ + friend class Db; + +public: + int close(); + int count(db_recno_t *countp, u_int32_t flags); + int del(u_int32_t flags); + int dup(Dbc** cursorp, u_int32_t flags); + int get(Dbt* key, Dbt *data, u_int32_t flags); + int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags); + int put(Dbt* key, Dbt *data, u_int32_t flags); private: - // We can add data to this class if needed - // since it is implemented via a pointer. - // (see comment at top) + // No data is permitted in this class (see comment at top) - // Note: use DbEnv::txn_begin() to get pointers to a DbTxn, - // and call DbTxn::abort() or DbTxn::commit rather than - // delete to release them. + // Note: use Db::cursor() to get pointers to a Dbc, + // and call Dbc::close() rather than delete to release them. // - DbTxn(); - // For internal use only. - DbTxn(DB_TXN *txn); - virtual ~DbTxn(); + Dbc(); + ~Dbc(); // no copying - DbTxn(const DbTxn &); - void operator = (const DbTxn &); + Dbc(const Dbc &); + Dbc &operator = (const Dbc &); }; // @@ -502,10 +374,6 @@ class _exported DbEnv friend class DbLock; friend class DbMpoolFile; -private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(DbEnv); - public: // After using this constructor, you can set any needed // parameters for the environment using the set_* methods. @@ -531,7 +399,7 @@ public: virtual int open(const char *, u_int32_t, int); virtual int remove(const char *, u_int32_t); virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, - db_free_fcn_type); + db_free_fcn_type); virtual void set_app_private(void *); virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); virtual int set_cachesize(u_int32_t, u_int32_t, int); @@ -539,7 +407,8 @@ public: virtual int set_data_dir(const char *); virtual int get_encrypt_flags(u_int32_t *); virtual int set_encrypt(const char *, u_int32_t); - virtual void set_errcall(void (*)(const char *, char *)); + virtual void set_errcall( + void (*)(const DbEnv *, const char *, const char *)); virtual void get_errfile(FILE **); virtual void set_errfile(FILE *); virtual void get_errpfx(const char **); @@ -568,6 +437,9 @@ public: virtual int set_lk_max_objects(u_int32_t); virtual int get_mp_mmapsize(size_t *); virtual int set_mp_mmapsize(size_t); + virtual void set_msgcall(void (*)(const DbEnv *, const char *)); + virtual void get_msgfile(FILE **); + virtual void set_msgfile(FILE *); virtual int set_paniccall(void (*)(DbEnv *, int)); virtual int set_rpc_server(void *, char *, long, long, u_int32_t); virtual int get_shm_key(long *); @@ -603,7 +475,10 @@ public: // set_error_stream() to force all errors to a C++ stream. // It is unwise to mix these approaches. // - virtual void set_error_stream(__DB_OSTREAMCLASS *); + virtual __DB_STD(ostream) *get_error_stream(); + virtual void set_error_stream(__DB_STD(ostream) *); + virtual __DB_STD(ostream) *get_message_stream(); + virtual void set_message_stream(__DB_STD(ostream) *); // used internally static void runtime_error(DbEnv *env, const char *caller, int err, @@ -624,6 +499,7 @@ public: virtual int lock_id_free(u_int32_t id); virtual int lock_put(DbLock *lock); virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags); + virtual int lock_stat_print(u_int32_t flags); virtual int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp); @@ -637,6 +513,7 @@ public: virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags); virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags); + virtual int log_stat_print(u_int32_t flags); // Mpool functions // @@ -646,6 +523,7 @@ public: pgout_fcn_type pgout_fcn); virtual int memp_stat(DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags); + virtual int memp_stat_print(u_int32_t flags); virtual int memp_sync(DbLsn *lsn); virtual int memp_trickle(int pct, int *nwrotep); @@ -657,13 +535,15 @@ public: virtual int txn_recover(DbPreplist *preplist, long count, long *retp, u_int32_t flags); virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags); + virtual int txn_stat_print(u_int32_t flags); // Replication functions // - virtual int rep_elect(int, int, u_int32_t, int *); + virtual int rep_elect(int, int, int, u_int32_t, int *, u_int32_t); virtual int rep_process_message(Dbt *, Dbt *, int *, DbLsn *); virtual int rep_start(Dbt *, u_int32_t); virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags); + virtual int rep_stat_print(u_int32_t flags); virtual int get_rep_limit(u_int32_t *, u_int32_t *); virtual int set_rep_limit(u_int32_t, u_int32_t); virtual int set_rep_transport(int, int (*)(DbEnv *, @@ -673,22 +553,22 @@ public: // virtual DB_ENV *get_DB_ENV() { - return (DB_ENV *)imp(); + return imp_; } virtual const DB_ENV *get_const_DB_ENV() const { - return (const DB_ENV *)constimp(); + return imp_; } static DbEnv* get_DbEnv(DB_ENV *dbenv) { - return (DbEnv *)dbenv->api1_internal; + return dbenv ? (DbEnv *)dbenv->api1_internal : 0; } static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv) { - return (const DbEnv *)dbenv->api1_internal; + return dbenv ? (const DbEnv *)dbenv->api1_internal : 0; } // For internal use only. @@ -698,15 +578,19 @@ public: // via C functions. They should never be called by users // of this class. // - static void _stream_error_function(const char *, char *); static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn, - db_recops op); + db_recops op); static void _paniccall_intercept(DB_ENV *env, int errval); static void _feedback_intercept(DB_ENV *env, int opcode, int pct); static int _rep_send_intercept(DB_ENV *env, const DBT *cntrl, const DBT *data, const DB_LSN *lsn, int id, u_int32_t flags); + static void _stream_error_function(const DB_ENV *env, + const char *prefix, + const char *message); + static void _stream_message_function(const DB_ENV *env, + const char *message); private: void cleanup(); @@ -721,10 +605,16 @@ private: void operator = (const DbEnv &); // instance data + DB_ENV *imp_; int construct_error_; u_int32_t construct_flags_; + __DB_STD(ostream) *error_stream_; + __DB_STD(ostream) *message_stream_; + int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops); + void (*error_callback_)(const DbEnv *, const char *, const char *); void (*feedback_callback_)(DbEnv *, int, int); + void (*message_callback_)(const DbEnv *, const char *); void (*paniccall_callback_)(DbEnv *, int); int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno, void *pgaddr, Dbt *pgcookie); @@ -732,167 +622,255 @@ private: void *pgaddr, Dbt *pgcookie); int (*rep_send_callback_)(DbEnv *, const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t); - - // class data - static __DB_OSTREAMCLASS *error_stream_; }; -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// // -// Table access classes +// Lock // +class _exported DbLock +{ + friend class DbEnv; + +public: + DbLock(); + DbLock(const DbLock &); + DbLock &operator = (const DbLock &); + +protected: + // We can add data to this class if needed + // since its contained class is not allocated by db. + // (see comment at top) + + DbLock(DB_LOCK); + DB_LOCK lock_; +}; // -// Represents a database table = a set of keys with associated values. +// Log cursor // -class _exported Db +class _exported DbLogc : protected DB_LOGC { friend class DbEnv; +public: + int close(u_int32_t _flags); + int get(DbLsn *lsn, Dbt *data, u_int32_t _flags); + private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(Db); + // No data is permitted in this class (see comment at top) + + // Note: use Db::cursor() to get pointers to a Dbc, + // and call Dbc::close() rather than delete to release them. + // + DbLogc(); + ~DbLogc(); + + // no copying + DbLogc(const Dbc &); + DbLogc &operator = (const Dbc &); +}; + +// +// Log sequence number +// +class _exported DbLsn : public DB_LSN +{ + friend class DbEnv; // friendship needed to cast to base class + friend class DbLogc; // friendship needed to cast to base class +}; + +// +// Memory pool file +// +class _exported DbMpoolFile +{ + friend class DbEnv; + friend class Db; public: - Db(DbEnv*, u_int32_t); // create a Db object, then call open() - virtual ~Db(); // does *not* call close. + int close(u_int32_t flags); + int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep); + int open(const char *file, u_int32_t flags, int mode, size_t pagesize); + int get_transactional(void); + int put(void *pgaddr, u_int32_t flags); + int set(void *pgaddr, u_int32_t flags); + int get_clear_len(u_int32_t *len); + int set_clear_len(u_int32_t len); + int get_fileid(u_int8_t *fileid); + int set_fileid(u_int8_t *fileid); + int get_flags(u_int32_t *flagsp); + int set_flags(u_int32_t flags, int onoff); + int get_ftype(int *ftype); + int set_ftype(int ftype); + int get_lsn_offset(int32_t *offsetp); + int set_lsn_offset(int32_t offset); + int get_maxsize(u_int32_t *gbytes, u_int32_t *bytes); + int set_maxsize(u_int32_t gbytes, u_int32_t bytes); + int get_pgcookie(DBT *dbt); + int set_pgcookie(DBT *dbt); + int get_priority(DB_CACHE_PRIORITY *priorityp); + int set_priority(DB_CACHE_PRIORITY priority); + int sync(); - // These methods exactly match those in the C interface. + virtual DB_MPOOLFILE *get_DB_MPOOLFILE() + { + return imp_; + } + + virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const + { + return imp_; + } + +private: + DB_MPOOLFILE *imp_; + + // We can add data to this class if needed + // since it is implemented via a pointer. + // (see comment at top) + + // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile, + // and call DbMpoolFile::close() rather than delete to release them. // - virtual int associate(DbTxn *txn, Db *secondary, - int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *), - u_int32_t flags); - virtual int close(u_int32_t flags); - virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags); - virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags); - virtual void err(int, const char *, ...); - virtual void errx(const char *, ...); - virtual int fd(int *fdp); - virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags); - virtual void *get_app_private() const; - virtual int get_byteswapped(int *); - virtual int get_dbname(const char **, const char **); - virtual int get_open_flags(u_int32_t *); - virtual int get_type(DBTYPE *); - virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags); - virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t); - virtual int open(DbTxn *txnid, - const char *, const char *subname, DBTYPE, u_int32_t, int); - virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, - u_int32_t flags); - virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t); - virtual int remove(const char *, const char *, u_int32_t); - virtual int rename(const char *, const char *, const char *, u_int32_t); - virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, - db_free_fcn_type); - virtual void set_app_private(void *); - virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t)); - virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/ - virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *)); - virtual int set_bt_maxkey(u_int32_t); - virtual int get_bt_minkey(u_int32_t *); - virtual int set_bt_minkey(u_int32_t); - virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/ - virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *)); - virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); - virtual int set_cachesize(u_int32_t, u_int32_t, int); - virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/ - virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *)); - virtual int get_encrypt_flags(u_int32_t *); - virtual int set_encrypt(const char *, u_int32_t); - virtual void set_errcall(void (*)(const char *, char *)); - virtual void get_errfile(FILE **); - virtual void set_errfile(FILE *); - virtual void get_errpfx(const char **); - virtual void set_errpfx(const char *); - virtual int set_feedback(void (*)(Db *, int, int)); - virtual int get_flags(u_int32_t *); - virtual int set_flags(u_int32_t); - virtual int get_h_ffactor(u_int32_t *); - virtual int set_h_ffactor(u_int32_t); - virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/ - virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t)); - virtual int get_h_nelem(u_int32_t *); - virtual int set_h_nelem(u_int32_t); - virtual int get_lorder(int *); - virtual int set_lorder(int); - virtual int get_pagesize(u_int32_t *); - virtual int set_pagesize(u_int32_t); - virtual int set_paniccall(void (*)(DbEnv *, int)); - virtual int get_re_delim(int *); - virtual int set_re_delim(int); - virtual int get_re_len(u_int32_t *); - virtual int set_re_len(u_int32_t); - virtual int get_re_pad(int *); - virtual int set_re_pad(int); - virtual int get_re_source(const char **); - virtual int set_re_source(const char *); - virtual int get_q_extentsize(u_int32_t *); - virtual int set_q_extentsize(u_int32_t); - virtual int stat(void *sp, u_int32_t flags); - virtual int sync(u_int32_t flags); - virtual int truncate(DbTxn *, u_int32_t *, u_int32_t); - virtual int upgrade(const char *name, u_int32_t flags); - virtual int verify(const char *, const char *, __DB_OSTREAMCLASS *, - u_int32_t); + DbMpoolFile(); + + // Shut g++ up. +protected: + virtual ~DbMpoolFile(); + +private: + // no copying + DbMpoolFile(const DbMpoolFile &); + void operator = (const DbMpoolFile &); +}; - // These additional methods are not in the C interface, and - // are only available for C++. - // - virtual void set_error_stream(__DB_OSTREAMCLASS *); +// +// This is filled in and returned by the DbEnv::txn_recover() method. +// +class _exported DbPreplist +{ +public: + DbTxn *txn; + u_int8_t gid[DB_XIDDATASIZE]; +}; - virtual DbEnv *get_env(); - virtual DbMpoolFile *get_mpf(); +// +// A sequence record in a database +// +class _exported DbSequence +{ +public: + DbSequence(Db *db, u_int32_t flags); + virtual ~DbSequence(); - virtual DB *get_DB() + int open(DbTxn *txnid, Dbt *key, u_int32_t flags); + int initial_value(db_seq_t value); + int close(u_int32_t flags); + int remove(DbTxn *txnid, u_int32_t flags); + int stat(DB_SEQUENCE_STAT **sp, u_int32_t flags); + int stat_print(u_int32_t flags); + + int get(DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags); + int get_cachesize(int32_t *sizep); + int set_cachesize(int32_t size); + int get_flags(u_int32_t *flagsp); + int set_flags(u_int32_t flags); + int get_range(db_seq_t *minp, db_seq_t *maxp); + int set_range(db_seq_t min, db_seq_t max); + + Db *get_db(); + Dbt *get_key(); + + virtual DB_SEQUENCE *get_DB_SEQUENCE() { - return (DB *)imp(); + return imp_; } - virtual const DB *get_const_DB() const + virtual const DB_SEQUENCE *get_const_DB_SEQUENCE() const { - return (const DB *)constimp(); + return imp_; } - static Db* get_Db(DB *db) + static DbSequence* get_DbSequence(DB_SEQUENCE *seq) { - return (Db *)db->api_internal; + return (DbSequence *)seq->api_internal; } - static const Db* get_const_Db(const DB *db) + static const DbSequence* get_const_DbSequence(const DB_SEQUENCE *seq) { - return (const Db *)db->api_internal; + return (const DbSequence *)seq->api_internal; } + // For internal use only. + static DbSequence* wrap_DB_SEQUENCE(DB_SEQUENCE *seq); + private: + DbSequence(DB_SEQUENCE *seq); // no copying - Db(const Db &); - Db &operator = (const Db &); + DbSequence(const DbSequence &); + DbSequence &operator = (const DbSequence &); - void cleanup(); - int initialize(); - int error_policy(); + DB_SEQUENCE *imp_; + DBT key_; +}; - // instance data - DbEnv *env_; - DbMpoolFile *mpf_; - int construct_error_; - u_int32_t flags_; - u_int32_t construct_flags_; +// +// Transaction +// +class _exported DbTxn +{ + friend class DbEnv; public: - // These are public only because they need to be called - // via C callback functions. They should never be used by - // external users of this class. + int abort(); + int commit(u_int32_t flags); + int discard(u_int32_t flags); + u_int32_t id(); + int prepare(u_int8_t *gid); + int set_timeout(db_timeout_t timeout, u_int32_t flags); + + virtual DB_TXN *get_DB_TXN() + { + return imp_; + } + + virtual const DB_TXN *get_const_DB_TXN() const + { + return imp_; + } + + static DbTxn* get_DbTxn(DB_TXN *txn) + { + return (DbTxn *)txn->api_internal; + } + + static const DbTxn* get_const_DbTxn(const DB_TXN *txn) + { + return (const DbTxn *)txn->api_internal; + } + + // For internal use only. + static DbTxn* wrap_DB_TXN(DB_TXN *txn); + +private: + DB_TXN *imp_; + + // We can add data to this class if needed + // since it is implemented via a pointer. + // (see comment at top) + + // Note: use DbEnv::txn_begin() to get pointers to a DbTxn, + // and call DbTxn::abort() or DbTxn::commit rather than + // delete to release them. // - int (*append_recno_callback_)(Db *, Dbt *, db_recno_t); - int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *); - int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *); - size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *); - int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *); - void (*feedback_callback_)(Db *, int, int); - u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t); + DbTxn(); + // For internal use only. + DbTxn(DB_TXN *txn); + virtual ~DbTxn(); + + // no copying + DbTxn(const DbTxn &); + void operator = (const DbTxn &); }; // @@ -900,13 +878,13 @@ public: // class _exported Dbt : private DBT { - friend class Dbc; friend class Db; + friend class Dbc; friend class DbEnv; friend class DbLogc; + friend class DbSequence; public: - // key/data void *get_data() const { return data; } void set_data(void *value) { data = value; } @@ -957,55 +935,6 @@ private: // not of your subclassed type. }; -class _exported Dbc : protected DBC -{ - friend class Db; - -public: - int close(); - int count(db_recno_t *countp, u_int32_t flags); - int del(u_int32_t flags); - int dup(Dbc** cursorp, u_int32_t flags); - int get(Dbt* key, Dbt *data, u_int32_t flags); - int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags); - int put(Dbt* key, Dbt *data, u_int32_t flags); - -private: - // No data is permitted in this class (see comment at top) - - // Note: use Db::cursor() to get pointers to a Dbc, - // and call Dbc::close() rather than delete to release them. - // - Dbc(); - ~Dbc(); - - // no copying - Dbc(const Dbc &); - Dbc &operator = (const Dbc &); -}; - -class _exported DbLogc : protected DB_LOGC -{ - friend class DbEnv; - -public: - int close(u_int32_t _flags); - int get(DbLsn *lsn, Dbt *data, u_int32_t _flags); - -private: - // No data is permitted in this class (see comment at top) - - // Note: use Db::cursor() to get pointers to a Dbc, - // and call Dbc::close() rather than delete to release them. - // - DbLogc(); - ~DbLogc(); - - // no copying - DbLogc(const Dbc &); - DbLogc &operator = (const Dbc &); -}; - //////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////// // @@ -1045,4 +974,116 @@ public: bool next(Dbt &data); }; +//////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////// +// +// Exception classes +// + +// Almost any error in the DB library throws a DbException. +// Every exception should be considered an abnormality +// (e.g. bug, misuse of DB, file system error). +// +class _exported DbException : public __DB_STD(exception) +{ +public: + virtual ~DbException() throw(); + DbException(int err); + DbException(const char *description); + DbException(const char *description, int err); + DbException(const char *prefix, const char *description, int err); + int get_errno() const; + virtual const char *what() const throw(); + DbEnv *get_env() const; + void set_env(DbEnv *env); + + DbException(const DbException &); + DbException &operator = (const DbException &); + +private: + void describe(const char *prefix, const char *description); + + char *what_; + int err_; // errno + DbEnv *env_; +}; + +// +// A specific sort of exception that occurs when +// an operation is aborted to resolve a deadlock. +// +class _exported DbDeadlockException : public DbException +{ +public: + virtual ~DbDeadlockException() throw(); + DbDeadlockException(const char *description); + + DbDeadlockException(const DbDeadlockException &); + DbDeadlockException &operator = (const DbDeadlockException &); +}; + +// +// A specific sort of exception that occurs when +// a lock is not granted, e.g. by lock_get or lock_vec. +// Note that the Dbt is only live as long as the Dbt used +// in the offending call. +// +class _exported DbLockNotGrantedException : public DbException +{ +public: + virtual ~DbLockNotGrantedException() throw(); + DbLockNotGrantedException(const char *prefix, db_lockop_t op, + db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index); + DbLockNotGrantedException(const char *description); + + DbLockNotGrantedException(const DbLockNotGrantedException &); + DbLockNotGrantedException &operator = + (const DbLockNotGrantedException &); + + db_lockop_t get_op() const; + db_lockmode_t get_mode() const; + const Dbt* get_obj() const; + DbLock *get_lock() const; + int get_index() const; + +private: + db_lockop_t op_; + db_lockmode_t mode_; + const Dbt *obj_; + DbLock *lock_; + int index_; +}; + +// +// A specific sort of exception that occurs when +// user declared memory is insufficient in a Dbt. +// +class _exported DbMemoryException : public DbException +{ +public: + virtual ~DbMemoryException() throw(); + DbMemoryException(Dbt *dbt); + DbMemoryException(const char *prefix, Dbt *dbt); + + DbMemoryException(const DbMemoryException &); + DbMemoryException &operator = (const DbMemoryException &); + + Dbt *get_dbt() const; +private: + Dbt *dbt_; +}; + +// +// A specific sort of exception that occurs when +// recovery is required before continuing DB activity. +// +class _exported DbRunRecoveryException : public DbException +{ +public: + virtual ~DbRunRecoveryException() throw(); + DbRunRecoveryException(const char *description); + + DbRunRecoveryException(const DbRunRecoveryException &); + DbRunRecoveryException &operator = (const DbRunRecoveryException &); +}; #endif /* !_DB_CXX_H_ */ diff --git a/db/build_win32/db_deadlock.dsp b/db/build_win32/db_deadlock.dsp index eb5688a2d..9a8993833 100644 --- a/db/build_win32/db_deadlock.dsp +++ b/db/build_win32/db_deadlock.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_deadlock - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_dll.dsp b/db/build_win32/db_dll.dsp index 52a1d4838..f09c22f6b 100644 --- a/db/build_win32/db_dll.dsp +++ b/db/build_win32/db_dll.dsp @@ -53,7 +53,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb42.dll" +# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb43.dll" !ELSEIF "$(CFG)" == "db_dll - Win32 Debug" @@ -80,7 +80,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb42d.dll" /fixed:no +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb43d.dll" /fixed:no !ENDIF @@ -262,6 +262,10 @@ SOURCE=..\cxx\cxx_multi.cpp # End Source File # Begin Source File +SOURCE=..\cxx\cxx_seq.cpp +# End Source File +# Begin Source File + SOURCE=..\cxx\cxx_txn.cpp # End Source File # Begin Source File @@ -354,6 +358,18 @@ SOURCE=..\db\db_ret.c # End Source File # Begin Source File +SOURCE=..\db\db_setid.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setlsn.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_stati.c +# End Source File +# Begin Source File + SOURCE=..\db\db_truncate.c # End Source File # Begin Source File @@ -390,6 +406,10 @@ SOURCE=..\dbreg\dbreg_rec.c # End Source File # Begin Source File +SOURCE=..\dbreg\dbreg_stat.c +# End Source File +# Begin Source File + SOURCE=..\dbreg\dbreg_util.c # End Source File # Begin Source File @@ -422,6 +442,10 @@ SOURCE=..\env\env_region.c # End Source File # Begin Source File +SOURCE=..\env\env_stat.c +# End Source File +# Begin Source File + SOURCE=..\fileops\fileops_auto.c # End Source File # Begin Source File @@ -514,6 +538,14 @@ SOURCE=..\lock\lock_deadlock.c # End Source File # Begin Source File +SOURCE=..\lock\lock_id.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_list.c +# End Source File +# Begin Source File + SOURCE=..\lock\lock_method.c # End Source File # Begin Source File @@ -526,6 +558,10 @@ SOURCE=..\lock\lock_stat.c # End Source File # Begin Source File +SOURCE=..\lock\lock_timer.c +# End Source File +# Begin Source File + SOURCE=..\lock\lock_util.c # End Source File # Begin Source File @@ -554,6 +590,10 @@ SOURCE=..\log\log_put.c # End Source File # Begin Source File +SOURCE=..\log\log_stat.c +# End Source File +# Begin Source File + SOURCE=..\mp\mp_alloc.c # End Source File # Begin Source File @@ -566,6 +606,10 @@ SOURCE=..\mp\mp_fget.c # End Source File # Begin Source File +SOURCE=..\mp\mp_fmethod.c +# End Source File +# Begin Source File + SOURCE=..\mp\mp_fopen.c # End Source File # Begin Source File @@ -642,10 +686,6 @@ SOURCE=..\os\os_tmpdir.c # End Source File # Begin Source File -SOURCE=..\os\os_unlink.c -# End Source File -# Begin Source File - SOURCE=..\os_win32\os_abs.c # End Source File # Begin Source File @@ -710,6 +750,14 @@ SOURCE=..\os_win32\os_stat.c # End Source File # Begin Source File +SOURCE=..\os_win32\os_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_unlink.c +# End Source File +# Begin Source File + SOURCE=..\qam\qam.c # End Source File # Begin Source File @@ -750,6 +798,14 @@ SOURCE=..\qam\qam_verify.c # End Source File # Begin Source File +SOURCE=..\rep\rep_auto.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_backup.c +# End Source File +# Begin Source File + SOURCE=..\rep\rep_method.c # End Source File # Begin Source File @@ -762,10 +818,22 @@ SOURCE=..\rep\rep_region.c # End Source File # Begin Source File +SOURCE=..\rep\rep_stat.c +# End Source File +# Begin Source File + SOURCE=..\rep\rep_util.c # End Source File # Begin Source File +SOURCE=..\sequence\seq_stat.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\sequence.c +# End Source File +# Begin Source File + SOURCE=..\txn\txn.c # End Source File # Begin Source File diff --git a/db/build_win32/db_dump.dsp b/db/build_win32/db_dump.dsp index 6da230922..709ce2471 100644 --- a/db/build_win32/db_dump.dsp +++ b/db/build_win32/db_dump.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_dump - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_dump - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_dump - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_int.h b/db/build_win32/db_int.h index c6daf139b..f7af2cd19 100644 --- a/db/build_win32/db_int.h +++ b/db/build_win32/db_int.h @@ -2,10 +2,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_int.in,v 11.126 2003/09/10 17:27:14 sue Exp $ + * $Id: db_int.in,v 11.153 2004/10/05 14:43:53 mjc Exp $ */ #ifndef _DB_INTERNAL_H_ @@ -36,8 +36,39 @@ extern "C" { /******************************************************* * General purpose constants and macros. *******************************************************/ -#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */ -#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */ +#ifndef UINT16_MAX +#define UINT16_MAX 65535 /* Maximum 16-bit unsigned. */ +#endif +#ifndef UINT32_MAX +#define UINT32_MAX 4294967295U /* Maximum 32-bit unsigned. */ +#endif + +#if defined(HAVE_LONG_LONG) && defined(HAVE_UNSIGNED_LONG_LONG) +#undef INT64_MAX +#undef INT64_MIN +#undef UINT64_MAX + +#ifdef DB_WIN32 +#define INT64_MAX _I64_MAX +#define INT64_MIN _I64_MIN +#define UINT64_MAX _UI64_MAX + +#define INT64_FMT "%l64d" +#define UINT64_FMT "%l64u" +#else +/* + * Override the system's 64-bit min/max constants. AIX's 32-bit compiler can + * handle 64-bit values, but the system's constants don't include the LL/ULL + * suffix, and so can't be compiled using the 32-bit compiler. + */ +#define INT64_MAX 9223372036854775807LL +#define INT64_MIN (-INT64_MAX-1) +#define UINT64_MAX 18446744073709551615ULL + +#define INT64_FMT "%lld" +#define UINT64_FMT "%llu" +#endif /* DB_WIN32 */ +#endif /* HAVE_LONG_LONG && HAVE_UNSIGNED_LONG_LONG */ #define MEGABYTE 1048576 #define GIGABYTE 1073741824 @@ -65,53 +96,38 @@ extern "C" { */ #define DB_DEF_IOSIZE (8 * 1024) -/* Number of times to reties I/O operations that return EINTR or EBUSY. */ -#define DB_RETRY 100 +/* Align an integer to a specific boundary. */ +#undef DB_ALIGN +#define DB_ALIGN(v, bound) \ + (((v) + (bound) - 1) & ~(((uintmax_t)bound) - 1)) -/* - * Aligning items to particular sizes or in pages or memory. - * - * db_align_t -- - * Largest integral type, used to align structures in memory. We don't store - * floating point types in structures, so integral types should be sufficient - * (and we don't have to worry about systems that store floats in other than - * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite - * structure assignments and ANSI C memcpy calls to be in-line instructions - * that happen to require alignment. Note: this alignment isn't sufficient for - * mutexes, which depend on things like cache line alignment. Mutex alignment - * is handled separately, in mutex.h. - * - * db_alignp_t -- - * Integral type that's the same size as a pointer. There are places where - * DB modifies pointers by discarding the bottom bits to guarantee alignment. - * We can't use db_align_t, it may be larger than the pointer, and compilers - * get upset about that. So far we haven't run on any machine where there - * isn't an integral type the same size as a pointer -- here's hoping. - */ -typedef unsigned long db_align_t; -typedef unsigned long db_alignp_t; +/* Increment a pointer to a specific boundary. */ +#undef ALIGNP_INC +#define ALIGNP_INC(p, bound) \ + (void *)(((uintptr_t)(p) + (bound) - 1) & ~(((uintptr_t)bound) - 1)) -/* Align an integer to a specific boundary. */ -#undef ALIGN -#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1)) +/* Decrement a pointer to a specific boundary. */ +#undef ALIGNP_DEC +#define ALIGNP_DEC(p, bound) \ + (void *)((uintptr_t)(p) & ~(((uintptr_t)bound) - 1)) /* * Print an address as a u_long (a u_long is the largest type we can print * portably). Most 64-bit systems have made longs 64-bits, so this should * work. */ -#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p)) +#define P_TO_ULONG(p) ((u_long)(uintptr_t)(p)) /* * Convert a pointer to a small integral value. * - * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast + * The (u_int16_t)(uintptr_t) cast avoids warnings: the (uintptr_t) cast * converts the value to an integral type, and the (u_int16_t) cast converts * it to a small integral type so we don't get complaints when we assign the - * final result to an integral type smaller than db_alignp_t. + * final result to an integral type smaller than uintptr_t. */ -#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p)) -#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p)) +#define P_TO_UINT32(p) ((u_int32_t)(uintptr_t)(p)) +#define P_TO_UINT16(p) ((u_int16_t)(uintptr_t)(p)) /* * There are several on-page structures that are declared to have a number of @@ -149,9 +165,64 @@ typedef struct __fn { #define LF_ISSET(f) ((flags) & (f)) #define LF_SET(f) ((flags) |= (f)) -/* Display separator string. */ -#undef DB_LINE -#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=" +/* + * Calculate a percentage. The values can overflow 32-bit integer arithmetic + * so we use floating point. + * + * When calculating a bytes-vs-page size percentage, we're getting the inverse + * of the percentage in all cases, that is, we want 100 minus the percentage we + * calculate. + */ +#define DB_PCT(v, total) \ + ((int)((total) == 0 ? 0 : ((double)(v) * 100) / (total))) +#define DB_PCT_PG(v, total, pgsize) \ + ((int)((total) == 0 ? 0 : \ + 100 - ((double)(v) * 100) / ((total) * (pgsize)))) + +/* + * Structure used for callback message aggregation. + * + * Display values in XXX_stat_print calls. + */ +typedef struct __db_msgbuf { + char *buf; /* Heap allocated buffer. */ + char *cur; /* Current end of message. */ + size_t len; /* Allocated length of buffer. */ +} DB_MSGBUF; +#define DB_MSGBUF_INIT(a) do { \ + (a)->buf = (a)->cur = NULL; \ + (a)->len = 0; \ +} while (0) +#define DB_MSGBUF_FLUSH(dbenv, a) do { \ + if ((a)->buf != NULL) { \ + if ((a)->cur != (a)->buf) \ + __db_msg(dbenv, "%s", (a)->buf); \ + __os_free(dbenv, (a)->buf); \ + DB_MSGBUF_INIT(a); \ + } \ +} while (0) +#define STAT_FMT(msg, fmt, type, v) do { \ + DB_MSGBUF __mb; \ + DB_MSGBUF_INIT(&__mb); \ + __db_msgadd(dbenv, &__mb, fmt, (type)(v)); \ + __db_msgadd(dbenv, &__mb, "\t%s", msg); \ + DB_MSGBUF_FLUSH(dbenv, &__mb); \ +} while (0) +#define STAT_HEX(msg, v) \ + __db_msg(dbenv, "%#lx\t%s", (u_long)(v), msg) +#define STAT_ISSET(msg, p) \ + __db_msg(dbenv, "%sSet\t%s", (p) == NULL ? "!" : " ", msg) +#define STAT_LONG(msg, v) \ + __db_msg(dbenv, "%ld\t%s", (long)(v), msg) +#define STAT_LSN(msg, lsnp) \ + __db_msg(dbenv, "%lu/%lu\t%s", \ + (u_long)(lsnp)->file, (u_long)(lsnp)->offset, msg) +#define STAT_STRING(msg, p) do { \ + const char *__p = p; /* p may be a function call. */ \ + __db_msg(dbenv, "%s\t%s", __p == NULL ? "!Set" : __p, msg); \ +} while (0) +#define STAT_ULONG(msg, v) \ + __db_msg(dbenv, "%lu\t%s", (u_long)(v), msg) /******************************************************* * API return values @@ -177,7 +248,8 @@ typedef struct __fn { (ret) == DB_REP_ISPERM || \ (ret) == DB_REP_NEWMASTER || \ (ret) == DB_REP_NEWSITE || \ - (ret) == DB_REP_NOTPERM) + (ret) == DB_REP_NOTPERM || \ + (ret) == DB_REP_STARTUPDONE) /* Find a reasonable operation-not-supported error. */ #ifdef EOPNOTSUPP @@ -263,6 +335,9 @@ typedef enum { #define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \ if (handle == NULL) \ return (__db_env_config(dbenv, i, flags)); +#define ENV_NOT_CONFIGURED(dbenv, handle, i, flags) \ + if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ + ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) /******************************************************* * Database Access Methods. @@ -386,11 +461,11 @@ typedef struct __dbpginfo { } while (0) #define MAX_LSN(LSN) do { \ - (LSN).file = UINT32_T_MAX; \ - (LSN).offset = UINT32_T_MAX; \ + (LSN).file = UINT32_MAX; \ + (LSN).offset = UINT32_MAX; \ } while (0) #define IS_MAX_LSN(LSN) \ - ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX) + ((LSN).file == UINT32_MAX && (LSN).offset == UINT32_MAX) /* If logging is turned off, smash the lsn. */ #define LSN_NOT_LOGGED(LSN) do { \ @@ -404,6 +479,8 @@ typedef struct __dbpginfo { * Txn. *******************************************************/ #define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT)) +#define NOWAIT_FLAG(txn) \ + ((txn) != NULL && F_ISSET((txn), TXN_NOWAIT) ? DB_LOCK_NOWAIT : 0) #define IS_SUBTRANSACTION(txn) \ ((txn) != NULL && (txn)->parent != NULL) @@ -413,6 +490,33 @@ typedef struct __dbpginfo { #define DB_IV_BYTES 16 /* Bytes per IV */ #define DB_MAC_KEY 20 /* Bytes per MAC checksum */ +/******************************************************* + * Secondaries over RPC. + *******************************************************/ +#ifdef CONFIG_TEST +/* + * These are flags passed to DB->associate calls by the Tcl API if running + * over RPC. The RPC server will mask out these flags before making the real + * DB->associate call. + * + * These flags must coexist with the valid flags to DB->associate (currently + * DB_AUTO_COMMIT and DB_CREATE). DB_AUTO_COMMIT is in the group of + * high-order shared flags (0xff000000), and DB_CREATE is in the low-order + * group (0x00000fff), so we pick a range in between. + */ +#define DB_RPC2ND_MASK 0x00f00000 /* Reserved bits. */ + +#define DB_RPC2ND_REVERSEDATA 0x00100000 /* callback_n(0) _s_reversedata. */ +#define DB_RPC2ND_NOOP 0x00200000 /* callback_n(1) _s_noop */ +#define DB_RPC2ND_CONCATKEYDATA 0x00300000 /* callback_n(2) _s_concatkeydata */ +#define DB_RPC2ND_CONCATDATAKEY 0x00400000 /* callback_n(3) _s_concatdatakey */ +#define DB_RPC2ND_REVERSECONCAT 0x00500000 /* callback_n(4) _s_reverseconcat */ +#define DB_RPC2ND_TRUNCDATA 0x00600000 /* callback_n(5) _s_truncdata */ +#define DB_RPC2ND_CONSTANT 0x00700000 /* callback_n(6) _s_constant */ +#define DB_RPC2ND_GETZIP 0x00800000 /* sj_getzip */ +#define DB_RPC2ND_GETNAME 0x00900000 /* sj_getname */ +#endif + /******************************************************* * Forward structure declarations. *******************************************************/ diff --git a/db/build_win32/db_java.dsp b/db/build_win32/db_java.dsp index 3fe85f523..535774353 100644 --- a/db/build_win32/db_java.dsp +++ b/db/build_win32/db_java.dsp @@ -53,19 +53,19 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java42.dll" +# ADD LINK32 Release/libdb43.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java43.dll" # Begin Custom Build - Compiling java files using javac ProjDir=. -InputPath=.\Release\libdb_java42.dll +InputPath=.\Release\libdb_java43.dll SOURCE="$(InputPath)" "force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" echo compiling Berkeley DB classes mkdir "$(OUTDIR)\classes" - javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\$(OUTDIR)\*.java ..\java\src\com\sleepycat\bdb\bind\*.java ..\java\src\com\sleepycat\bdb\bind\serial\*.java ..\java\src\com\sleepycat\bdb\bind\tuple\*.java ..\java\src\com\sleepycat\bdb\*.java ..\java\src\com\sleepycat\bdb\collection\*.java ..\java\src\com\sleepycat\bdb\factory\*.java ..\java\src\com\sleepycat\bdb\util\*.java + javac -O -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java echo compiling examples mkdir "$(OUTDIR)\classes.ex" - javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\bdb\access\*.java ..\examples_java\src\com\sleepycat\examples\bdb\helloworld\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\basic\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\entity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\tuple\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\sentity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\marshal\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\factory\*.java + javac -O -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java echo creating jar files jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . @@ -98,19 +98,19 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java42d.dll" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java43d.dll" /fixed:no # Begin Custom Build - Compiling java files using javac ProjDir=. -InputPath=.\Debug\libdb_java42d.dll +InputPath=.\Debug\libdb_java43d.dll SOURCE="$(InputPath)" "force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" echo compiling Berkeley DB classes mkdir "$(OUTDIR)\classes" - javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\$(OUTDIR)\*.java ..\java\src\com\sleepycat\bdb\bind\*.java ..\java\src\com\sleepycat\bdb\bind\serial\*.java ..\java\src\com\sleepycat\bdb\bind\tuple\*.java ..\java\src\com\sleepycat\bdb\*.java ..\java\src\com\sleepycat\bdb\collection\*.java ..\java\src\com\sleepycat\bdb\factory\*.java ..\java\src\com\sleepycat\bdb\util\*.java + javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java echo compiling examples mkdir "$(OUTDIR)\classes.ex" - javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\bdb\access\*.java ..\examples_java\src\com\sleepycat\examples\bdb\helloworld\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\basic\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\entity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\tuple\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\sentity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\marshal\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\factory\*.java + javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java echo creating jar files jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . diff --git a/db/build_win32/db_load.dsp b/db/build_win32/db_load.dsp index 50adc11be..277b02b71 100644 --- a/db/build_win32/db_load.dsp +++ b/db/build_win32/db_load.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_load - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_load - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_load - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_perf.dsp b/db/build_win32/db_perf.dsp index 82587c8ca..2e2617d61 100644 --- a/db/build_win32/db_perf.dsp +++ b/db/build_win32/db_perf.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_perf - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_perf - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_perf - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_printlog.dsp b/db/build_win32/db_printlog.dsp index dd51dbfef..7ca7fec5f 100644 --- a/db/build_win32/db_printlog.dsp +++ b/db/build_win32/db_printlog.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_printlog - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_printlog - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_printlog - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF @@ -138,10 +138,46 @@ LINK32=link.exe # Name "db_printlog - Win32 Debug Static" # Begin Source File +SOURCE=..\btree\btree_autop.c +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_autop.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_autop.c +# End Source File +# Begin Source File + SOURCE=..\db_printlog\db_printlog.c # End Source File # Begin Source File +SOURCE=..\dbreg\dbreg_autop.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fileops_autop.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_autop.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_autop.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_autop.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_autop.c +# End Source File +# Begin Source File + SOURCE=..\clib\getopt.c # End Source File # End Target diff --git a/db/build_win32/db_recover.dsp b/db/build_win32/db_recover.dsp index 3be0c3b3e..e41a41f69 100644 --- a/db/build_win32/db_recover.dsp +++ b/db/build_win32/db_recover.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_recover - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_recover - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_recover - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_small.dsp b/db/build_win32/db_small.dsp index 25560431d..f82b0b08f 100644 --- a/db/build_win32/db_small.dsp +++ b/db/build_win32/db_small.dsp @@ -48,8 +48,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Release_small/libdb42s.lib" -# ADD LIB32 /nologo /out:"Release_small/libdb42s.lib" +# ADD BASE LIB32 /nologo /out:"Release_small/libdb43s.lib" +# ADD LIB32 /nologo /out:"Release_small/libdb43s.lib" !ELSEIF "$(CFG)" == "db_small - Win32 Debug Static" @@ -71,8 +71,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Debug_small/libdb42sd.lib" -# ADD LIB32 /nologo /out:"Debug_small/libdb42sd.lib" +# ADD BASE LIB32 /nologo /out:"Debug_small/libdb43sd.lib" +# ADD LIB32 /nologo /out:"Debug_small/libdb43sd.lib" !ENDIF @@ -226,6 +226,10 @@ SOURCE=..\cxx\cxx_multi.cpp # End Source File # Begin Source File +SOURCE=..\cxx\cxx_seq.cpp +# End Source File +# Begin Source File + SOURCE=..\cxx\cxx_txn.cpp # End Source File # Begin Source File @@ -314,6 +318,18 @@ SOURCE=..\db\db_ret.c # End Source File # Begin Source File +SOURCE=..\db\db_setid.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setlsn.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_stati.c +# End Source File +# Begin Source File + SOURCE=..\db\db_truncate.c # End Source File # Begin Source File @@ -342,6 +358,10 @@ SOURCE=..\dbreg\dbreg_rec.c # End Source File # Begin Source File +SOURCE=..\dbreg\dbreg_stat.c +# End Source File +# Begin Source File + SOURCE=..\dbreg\dbreg_util.c # End Source File # Begin Source File @@ -374,6 +394,10 @@ SOURCE=..\env\env_region.c # End Source File # Begin Source File +SOURCE=..\env\env_stat.c +# End Source File +# Begin Source File + SOURCE=..\fileops\fileops_auto.c # End Source File # Begin Source File @@ -414,6 +438,14 @@ SOURCE=..\lock\lock_deadlock.c # End Source File # Begin Source File +SOURCE=..\lock\lock_id.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_list.c +# End Source File +# Begin Source File + SOURCE=..\lock\lock_method.c # End Source File # Begin Source File @@ -426,6 +458,10 @@ SOURCE=..\lock\lock_stat.c # End Source File # Begin Source File +SOURCE=..\lock\lock_timer.c +# End Source File +# Begin Source File + SOURCE=..\lock\lock_util.c # End Source File # Begin Source File @@ -454,6 +490,10 @@ SOURCE=..\log\log_put.c # End Source File # Begin Source File +SOURCE=..\log\log_stat.c +# End Source File +# Begin Source File + SOURCE=..\mp\mp_alloc.c # End Source File # Begin Source File @@ -466,6 +506,10 @@ SOURCE=..\mp\mp_fget.c # End Source File # Begin Source File +SOURCE=..\mp\mp_fmethod.c +# End Source File +# Begin Source File + SOURCE=..\mp\mp_fopen.c # End Source File # Begin Source File @@ -542,10 +586,6 @@ SOURCE=..\os\os_tmpdir.c # End Source File # Begin Source File -SOURCE=..\os\os_unlink.c -# End Source File -# Begin Source File - SOURCE=..\os_win32\os_abs.c # End Source File # Begin Source File @@ -610,6 +650,14 @@ SOURCE=..\os_win32\os_stat.c # End Source File # Begin Source File +SOURCE=..\os_win32\os_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_unlink.c +# End Source File +# Begin Source File + SOURCE=..\qam\qam_stub.c # End Source File # Begin Source File @@ -618,6 +666,14 @@ SOURCE=..\rep\rep_stub.c # End Source File # Begin Source File +SOURCE=..\sequence\seq_stat.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\sequence.c +# End Source File +# Begin Source File + SOURCE=..\txn\txn.c # End Source File # Begin Source File diff --git a/db/build_win32/db_stat.dsp b/db/build_win32/db_stat.dsp index beda97384..5397b52b4 100644 --- a/db/build_win32/db_stat.dsp +++ b/db/build_win32/db_stat.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_stat - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_stat - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_stat - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_static.dsp b/db/build_win32/db_static.dsp index ae6b5c8ce..063db5791 100644 --- a/db/build_win32/db_static.dsp +++ b/db/build_win32/db_static.dsp @@ -48,8 +48,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Release/libdb42s.lib" -# ADD LIB32 /nologo /out:"Release_static/libdb42s.lib" +# ADD BASE LIB32 /nologo /out:"Release/libdb43s.lib" +# ADD LIB32 /nologo /out:"Release_static/libdb43s.lib" !ELSEIF "$(CFG)" == "db_static - Win32 Debug Static" @@ -71,8 +71,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Debug/libdb42sd.lib" -# ADD LIB32 /nologo /out:"Debug_static/libdb42sd.lib" +# ADD BASE LIB32 /nologo /out:"Debug/libdb43sd.lib" +# ADD LIB32 /nologo /out:"Debug_static/libdb43sd.lib" !ENDIF @@ -246,6 +246,10 @@ SOURCE=..\cxx\cxx_multi.cpp # End Source File # Begin Source File +SOURCE=..\cxx\cxx_seq.cpp +# End Source File +# Begin Source File + SOURCE=..\cxx\cxx_txn.cpp # End Source File # Begin Source File @@ -338,6 +342,18 @@ SOURCE=..\db\db_ret.c # End Source File # Begin Source File +SOURCE=..\db\db_setid.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setlsn.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_stati.c +# End Source File +# Begin Source File + SOURCE=..\db\db_truncate.c # End Source File # Begin Source File @@ -374,6 +390,10 @@ SOURCE=..\dbreg\dbreg_rec.c # End Source File # Begin Source File +SOURCE=..\dbreg\dbreg_stat.c +# End Source File +# Begin Source File + SOURCE=..\dbreg\dbreg_util.c # End Source File # Begin Source File @@ -406,6 +426,10 @@ SOURCE=..\env\env_region.c # End Source File # Begin Source File +SOURCE=..\env\env_stat.c +# End Source File +# Begin Source File + SOURCE=..\fileops\fileops_auto.c # End Source File # Begin Source File @@ -498,6 +522,14 @@ SOURCE=..\lock\lock_deadlock.c # End Source File # Begin Source File +SOURCE=..\lock\lock_id.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_list.c +# End Source File +# Begin Source File + SOURCE=..\lock\lock_method.c # End Source File # Begin Source File @@ -510,6 +542,10 @@ SOURCE=..\lock\lock_stat.c # End Source File # Begin Source File +SOURCE=..\lock\lock_timer.c +# End Source File +# Begin Source File + SOURCE=..\lock\lock_util.c # End Source File # Begin Source File @@ -538,6 +574,10 @@ SOURCE=..\log\log_put.c # End Source File # Begin Source File +SOURCE=..\log\log_stat.c +# End Source File +# Begin Source File + SOURCE=..\mp\mp_alloc.c # End Source File # Begin Source File @@ -550,6 +590,10 @@ SOURCE=..\mp\mp_fget.c # End Source File # Begin Source File +SOURCE=..\mp\mp_fmethod.c +# End Source File +# Begin Source File + SOURCE=..\mp\mp_fopen.c # End Source File # Begin Source File @@ -626,10 +670,6 @@ SOURCE=..\os\os_tmpdir.c # End Source File # Begin Source File -SOURCE=..\os\os_unlink.c -# End Source File -# Begin Source File - SOURCE=..\os_win32\os_abs.c # End Source File # Begin Source File @@ -694,6 +734,14 @@ SOURCE=..\os_win32\os_stat.c # End Source File # Begin Source File +SOURCE=..\os_win32\os_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_unlink.c +# End Source File +# Begin Source File + SOURCE=..\qam\qam.c # End Source File # Begin Source File @@ -734,6 +782,14 @@ SOURCE=..\qam\qam_verify.c # End Source File # Begin Source File +SOURCE=..\rep\rep_auto.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_backup.c +# End Source File +# Begin Source File + SOURCE=..\rep\rep_method.c # End Source File # Begin Source File @@ -746,10 +802,22 @@ SOURCE=..\rep\rep_region.c # End Source File # Begin Source File +SOURCE=..\rep\rep_stat.c +# End Source File +# Begin Source File + SOURCE=..\rep\rep_util.c # End Source File # Begin Source File +SOURCE=..\sequence\seq_stat.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\sequence.c +# End Source File +# Begin Source File + SOURCE=..\txn\txn.c # End Source File # Begin Source File diff --git a/db/build_win32/db_tcl.dsp b/db/build_win32/db_tcl.dsp index 30cccd3e6..665475e50 100644 --- a/db/build_win32/db_tcl.dsp +++ b/db/build_win32/db_tcl.dsp @@ -53,7 +53,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 Release/libdb42.lib tcl84.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl42.dll" +# ADD LINK32 Release/libdb43.lib tcl84.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl43.dll" !ELSEIF "$(CFG)" == "db_tcl - Win32 Debug" @@ -80,7 +80,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib tcl84g.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl42d.dll" /fixed:no +# ADD LINK32 Debug/libdb43d.lib tcl84g.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl43d.dll" /fixed:no !ENDIF @@ -134,6 +134,10 @@ SOURCE=..\tcl\tcl_rep.c # End Source File # Begin Source File +SOURCE=..\tcl\tcl_seq.c +# End Source File +# Begin Source File + SOURCE=..\tcl\tcl_txn.c # End Source File # Begin Source File diff --git a/db/build_win32/db_test.dsp b/db/build_win32/db_test.dsp index 4bb5cb919..457b00cde 100644 --- a/db/build_win32/db_test.dsp +++ b/db/build_win32/db_test.dsp @@ -50,7 +50,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 # Begin Special Build Tool SOURCE="$(InputPath)" PostBuild_Desc=Copy built executable files. @@ -79,7 +79,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no +# ADD LINK32 Debug/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no # Begin Special Build Tool SOURCE="$(InputPath)" PostBuild_Desc=Copy built executable files. diff --git a/db/build_win32/db_upgrade.dsp b/db/build_win32/db_upgrade.dsp index 2cc9b8862..e5c49aac3 100644 --- a/db/build_win32/db_upgrade.dsp +++ b/db/build_win32/db_upgrade.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_upgrade - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/db_verify.dsp b/db/build_win32/db_verify.dsp index ad2669c8f..09eb3d18a 100644 --- a/db/build_win32/db_verify.dsp +++ b/db/build_win32/db_verify.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "db_verify - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "db_verify - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "db_verify - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/dbkill.cpp b/db/build_win32/dbkill.cpp index 91dab52db..7be76135c 100644 --- a/db/build_win32/dbkill.cpp +++ b/db/build_win32/dbkill.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. * - * $Id: dbkill.cpp,v 11.8 2003/01/08 04:05:42 bostic Exp $ + * $Id: dbkill.cpp,v 11.9 2004/01/28 03:35:52 bostic Exp $ */ /* * Kill - diff --git a/db/build_win32/ex_access.dsp b/db/build_win32/ex_access.dsp index e845c704b..09c1ad172 100644 --- a/db/build_win32/ex_access.dsp +++ b/db/build_win32/ex_access.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "ex_access - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "ex_access - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "ex_access - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/ex_btrec.dsp b/db/build_win32/ex_btrec.dsp index 227df9a28..6cbb91071 100644 --- a/db/build_win32/ex_btrec.dsp +++ b/db/build_win32/ex_btrec.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "ex_btrec - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/ex_env.dsp b/db/build_win32/ex_env.dsp index d6ba8071e..5d6176014 100644 --- a/db/build_win32/ex_env.dsp +++ b/db/build_win32/ex_env.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "ex_env - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "ex_env - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "ex_env - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/ex_lock.dsp b/db/build_win32/ex_lock.dsp index 25ddb8732..3cb1dc170 100644 --- a/db/build_win32/ex_lock.dsp +++ b/db/build_win32/ex_lock.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "ex_lock - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "ex_lock - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "ex_lock - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/ex_mpool.dsp b/db/build_win32/ex_mpool.dsp index b1e3b0e05..27dfd5e93 100644 --- a/db/build_win32/ex_mpool.dsp +++ b/db/build_win32/ex_mpool.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "ex_mpool - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/ex_repquote.dsp b/db/build_win32/ex_repquote.dsp index 966aff955..76ec321c0 100644 --- a/db/build_win32/ex_repquote.dsp +++ b/db/build_win32/ex_repquote.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib ws2_32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib ws2_32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "ex_repquote - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib ws2_32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib ws2_32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "ex_repquote - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib ws2_32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib ws2_32.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib ws2_32.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib ws2_32.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "ex_repquote - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/ex_tpcb.dsp b/db/build_win32/ex_tpcb.dsp index cab6a401f..d8029a6dc 100644 --- a/db/build_win32/ex_tpcb.dsp +++ b/db/build_win32/ex_tpcb.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "ex_tpcb - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/excxx_access.dsp b/db/build_win32/excxx_access.dsp index 798e0636e..a36c3f648 100644 --- a/db/build_win32/excxx_access.dsp +++ b/db/build_win32/excxx_access.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "excxx_access - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "excxx_access - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "excxx_access - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/excxx_btrec.dsp b/db/build_win32/excxx_btrec.dsp index f17546350..12f0c106d 100644 --- a/db/build_win32/excxx_btrec.dsp +++ b/db/build_win32/excxx_btrec.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "excxx_btrec - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/excxx_env.dsp b/db/build_win32/excxx_env.dsp index 68d091b5f..ae1b2986f 100644 --- a/db/build_win32/excxx_env.dsp +++ b/db/build_win32/excxx_env.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "excxx_env - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "excxx_env - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "excxx_env - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/excxx_lock.dsp b/db/build_win32/excxx_lock.dsp index 582750233..7bf8aadcb 100644 --- a/db/build_win32/excxx_lock.dsp +++ b/db/build_win32/excxx_lock.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "excxx_lock - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/excxx_mpool.dsp b/db/build_win32/excxx_mpool.dsp index fdf59b924..48172fbf3 100644 --- a/db/build_win32/excxx_mpool.dsp +++ b/db/build_win32/excxx_mpool.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "excxx_mpool - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/excxx_tpcb.dsp b/db/build_win32/excxx_tpcb.dsp index c18336efc..6a734fdfc 100644 --- a/db/build_win32/excxx_tpcb.dsp +++ b/db/build_win32/excxx_tpcb.dsp @@ -52,7 +52,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb42.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" !ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug" @@ -76,7 +76,7 @@ BSC32=bscmake.exe # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb42d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no !ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Release Static" @@ -100,8 +100,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb42.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb42s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:I386 !ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug Static" @@ -125,8 +125,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb42d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb42sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no !ENDIF diff --git a/db/build_win32/include.tcl b/db/build_win32/include.tcl index ce9067706..fd0e69f2a 100644 --- a/db/build_win32/include.tcl +++ b/db/build_win32/include.tcl @@ -1,10 +1,11 @@ # Automatically built by dist/s_test; may require local editing. set tclsh_path SET_YOUR_TCLSH_PATH -set tcllib ./Debug/libdb_tcl42d.dll +set tcllib ./Debug/libdb_tcl43d.dll set src_root .. set test_path ../test +set je_root ../../je global testdir set testdir ./TESTDIR diff --git a/db/build_win32/java_dsp.src b/db/build_win32/java_dsp.src index e444d30ef..521800c17 100644 --- a/db/build_win32/java_dsp.src +++ b/db/build_win32/java_dsp.src @@ -62,10 +62,10 @@ SOURCE="$(InputPath)" "force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" echo compiling Berkeley DB classes mkdir "$(OUTDIR)\classes" - javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\$(OUTDIR)\*.java ..\java\src\com\sleepycat\bdb\bind\*.java ..\java\src\com\sleepycat\bdb\bind\serial\*.java ..\java\src\com\sleepycat\bdb\bind\tuple\*.java ..\java\src\com\sleepycat\bdb\*.java ..\java\src\com\sleepycat\bdb\collection\*.java ..\java\src\com\sleepycat\bdb\factory\*.java ..\java\src\com\sleepycat\bdb\util\*.java + javac -O -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java echo compiling examples mkdir "$(OUTDIR)\classes.ex" - javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\bdb\access\*.java ..\examples_java\src\com\sleepycat\examples\bdb\helloworld\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\basic\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\entity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\tuple\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\sentity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\marshal\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\factory\*.java + javac -O -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java echo creating jar files jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . @@ -107,10 +107,10 @@ SOURCE="$(InputPath)" "force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" echo compiling Berkeley DB classes mkdir "$(OUTDIR)\classes" - javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\$(OUTDIR)\*.java ..\java\src\com\sleepycat\bdb\bind\*.java ..\java\src\com\sleepycat\bdb\bind\serial\*.java ..\java\src\com\sleepycat\bdb\bind\tuple\*.java ..\java\src\com\sleepycat\bdb\*.java ..\java\src\com\sleepycat\bdb\collection\*.java ..\java\src\com\sleepycat\bdb\factory\*.java ..\java\src\com\sleepycat\bdb\util\*.java + javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java echo compiling examples mkdir "$(OUTDIR)\classes.ex" - javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\bdb\access\*.java ..\examples_java\src\com\sleepycat\examples\bdb\helloworld\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\basic\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\entity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\tuple\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\sentity\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\marshal\*.java ..\examples_java\src\com\sleepycat\examples\bdb\shipment\factory\*.java + javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java echo creating jar files jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . diff --git a/db/build_win32/libdb.def b/db/build_win32/libdb.def index 31b049306..f519ad543 100644 --- a/db/build_win32/libdb.def +++ b/db/build_win32/libdb.def @@ -1,127 +1,171 @@ ; DO NOT EDIT: automatically built by dist/s_win32. -DESCRIPTION 'Berkeley DB 4.2 Library' - EXPORTS db_create @1 db_env_create @2 - db_strerror @3 - db_version @4 - db_xa_switch @5 - log_compare @6 - db_env_set_func_close @7 - db_env_set_func_dirfree @8 - db_env_set_func_dirlist @9 - db_env_set_func_exists @10 - db_env_set_func_free @11 - db_env_set_func_fsync @12 - db_env_set_func_ioinfo @13 - db_env_set_func_malloc @14 - db_env_set_func_map @15 - db_env_set_func_open @16 - db_env_set_func_read @17 - db_env_set_func_realloc @18 - db_env_set_func_rename @19 - db_env_set_func_seek @20 - db_env_set_func_sleep @21 - db_env_set_func_unlink @22 - db_env_set_func_unmap @23 - db_env_set_func_write @24 - db_env_set_func_yield @25 - __db_add_recovery @26 - __db_dbm_close @27 - __db_dbm_delete @28 - __db_dbm_fetch @29 - __db_dbm_firstkey @30 - __db_dbm_init @31 - __db_dbm_nextkey @32 - __db_dbm_store @33 - __db_hcreate @34 - __db_hdestroy @35 - __db_hsearch @36 - __db_loadme @37 - __db_ndbm_clearerr @38 - __db_ndbm_close @39 - __db_ndbm_delete @40 - __db_ndbm_dirfno @41 - __db_ndbm_error @42 - __db_ndbm_fetch @43 - __db_ndbm_firstkey @44 - __db_ndbm_nextkey @45 - __db_ndbm_open @46 - __db_ndbm_pagfno @47 - __db_ndbm_rdonly @48 - __db_ndbm_store @49 - __db_panic @50 - __db_r_attach @51 - __db_r_detach @52 - __db_win32_mutex_init @53 - __db_win32_mutex_lock @54 - __db_win32_mutex_unlock @55 - __ham_func2 @56 - __ham_func3 @57 - __ham_func4 @58 - __ham_func5 @59 - __ham_test @60 - __lock_dump_region @61 - __lock_id_set @62 - __memp_dump_region @63 - __os_calloc @64 - __os_closehandle @65 - __os_free @66 - __os_ioinfo @67 - __os_malloc @68 - __os_open @69 - __os_openhandle @70 - __os_read @71 - __os_realloc @72 - __os_strdup @73 - __os_umalloc @74 - __os_write @75 - __txn_id_set @76 - __bam_init_print @77 - __bam_pgin @78 - __bam_pgout @79 - __crdel_init_print @80 - __db_dispatch @81 - __db_dump @82 - __db_e_stat @83 - __db_err @84 - __db_getlong @85 - __db_getulong @86 - __db_global_values @87 - __db_init_print @88 - __db_inmemdbflags @89 - __db_isbigendian @90 - __db_omode @91 - __db_overwrite @92 - __db_pgin @93 - __db_pgout @94 - __db_pr_callback @95 - __db_prdbt @96 - __db_prfooter @97 - __db_prheader @98 - __db_rpath @99 - __db_util_cache @100 - __db_util_interrupted @101 - __db_util_logset @102 - __db_util_siginit @103 - __db_util_sigresend @104 - __db_verify_internal @105 - __dbreg_init_print @106 - __fop_init_print @107 - __ham_get_meta @108 - __ham_init_print @109 - __ham_pgin @110 - __ham_pgout @111 - __ham_release_meta @112 - __os_clock @113 - __os_get_errno @114 - __os_id @115 - __os_set_errno @116 - __os_sleep @117 - __os_ufree @118 - __os_yield @119 - __qam_init_print @120 - __qam_pgin_out @121 - __txn_init_print @122 + db_sequence_create @3 + db_strerror @4 + db_version @5 + db_xa_switch @6 + log_compare @7 + db_env_set_func_close @8 + db_env_set_func_dirfree @9 + db_env_set_func_dirlist @10 + db_env_set_func_exists @11 + db_env_set_func_free @12 + db_env_set_func_fsync @13 + db_env_set_func_ftruncate @14 + db_env_set_func_ioinfo @15 + db_env_set_func_malloc @16 + db_env_set_func_map @17 + db_env_set_func_open @18 + db_env_set_func_pread @19 + db_env_set_func_pwrite @20 + db_env_set_func_read @21 + db_env_set_func_realloc @22 + db_env_set_func_rename @23 + db_env_set_func_seek @24 + db_env_set_func_sleep @25 + db_env_set_func_unlink @26 + db_env_set_func_unmap @27 + db_env_set_func_write @28 + db_env_set_func_yield @29 + __db_add_recovery @30 + __db_dbm_close @31 + __db_dbm_delete @32 + __db_dbm_fetch @33 + __db_dbm_firstkey @34 + __db_dbm_init @35 + __db_dbm_nextkey @36 + __db_dbm_store @37 + __db_get_flags_fn @38 + __db_get_seq_flags_fn @39 + __db_hcreate @40 + __db_hdestroy @41 + __db_hsearch @42 + __db_loadme @43 + __db_ndbm_clearerr @44 + __db_ndbm_close @45 + __db_ndbm_delete @46 + __db_ndbm_dirfno @47 + __db_ndbm_error @48 + __db_ndbm_fetch @49 + __db_ndbm_firstkey @50 + __db_ndbm_nextkey @51 + __db_ndbm_open @52 + __db_ndbm_pagfno @53 + __db_ndbm_rdonly @54 + __db_ndbm_store @55 + __db_panic @56 + __db_r_attach @57 + __db_r_detach @58 + __db_win32_mutex_init @59 + __db_win32_mutex_lock @60 + __db_win32_mutex_unlock @61 + __ham_func2 @62 + __ham_func3 @63 + __ham_func4 @64 + __ham_func5 @65 + __ham_test @66 + __lock_id_set @67 + __os_calloc @68 + __os_closehandle @69 + __os_free @70 + __os_ioinfo @71 + __os_malloc @72 + __os_open @73 + __os_openhandle @74 + __os_read @75 + __os_realloc @76 + __os_strdup @77 + __os_umalloc @78 + __os_write @79 + __txn_id_set @80 + __bam_adj_read @81 + __bam_cadjust_read @82 + __bam_cdel_read @83 + __bam_curadj_read @84 + __bam_pgin @85 + __bam_pgout @86 + __bam_rcuradj_read @87 + __bam_relink_read @88 + __bam_repl_read @89 + __bam_root_read @90 + __bam_rsplit_read @91 + __bam_split_read @92 + __crdel_metasub_read @93 + __db_addrem_read @94 + __db_big_read @95 + __db_cksum_read @96 + __db_debug_read @97 + __db_dispatch @98 + __db_dumptree @99 + __db_err @100 + __db_fileid_reset @101 + __db_getlong @102 + __db_getulong @103 + __db_global_values @104 + __db_isbigendian @105 + __db_lsn_reset @106 + __db_noop_read @107 + __db_omode @108 + __db_overwrite @109 + __db_ovref_read @110 + __db_pg_alloc_read @111 + __db_pg_free_read @112 + __db_pg_freedata_read @113 + __db_pg_init_read @114 + __db_pg_new_read @115 + __db_pg_prepare_read @116 + __db_pgin @117 + __db_pgout @118 + __db_pr_callback @119 + __db_rpath @120 + __db_stat_pp @121 + __db_stat_print_pp @122 + __db_util_cache @123 + __db_util_interrupted @124 + __db_util_logset @125 + __db_util_siginit @126 + __db_util_sigresend @127 + __db_verify_internal @128 + __dbreg_register_read @129 + __fop_create_read @130 + __fop_file_remove_read @131 + __fop_remove_read @132 + __fop_rename_read @133 + __fop_write_read @134 + __ham_chgpg_read @135 + __ham_copypage_read @136 + __ham_curadj_read @137 + __ham_get_meta @138 + __ham_groupalloc_read @139 + __ham_insdel_read @140 + __ham_metagroup_read @141 + __ham_newpage_read @142 + __ham_pgin @143 + __ham_pgout @144 + __ham_release_meta @145 + __ham_replace_read @146 + __ham_splitdata_read @147 + __lock_list_print @148 + __log_stat_pp @149 + __os_clock @150 + __os_get_errno @151 + __os_id @152 + __os_set_errno @153 + __os_sleep @154 + __os_ufree @155 + __os_yield @156 + __qam_add_read @157 + __qam_del_read @158 + __qam_delext_read @159 + __qam_incfirst_read @160 + __qam_mvptr_read @161 + __qam_pgin_out @162 + __rep_stat_print @163 + __txn_child_read @164 + __txn_ckp_read @165 + __txn_recycle_read @166 + __txn_regop_read @167 + __txn_xa_regop_read @168 diff --git a/db/build_win32/libdb.rc b/db/build_win32/libdb.rc index b17dd7a4f..16c656239 100644 --- a/db/build_win32/libdb.rc +++ b/db/build_win32/libdb.rc @@ -1,6 +1,6 @@ 1 VERSIONINFO - FILEVERSION 4,0,2,52 - PRODUCTVERSION 4,0,2,52 + FILEVERSION 4,0,3,14 + PRODUCTVERSION 4,0,3,14 FILEFLAGSMASK 0x3fL #ifdef _DEBUG FILEFLAGS 0x1L @@ -18,12 +18,12 @@ BEGIN BEGIN VALUE "CompanyName", "Sleepycat Software\0" VALUE "FileDescription", "Berkeley DB 3.0 DLL\0" - VALUE "FileVersion", "4.2.52\0" + VALUE "FileVersion", "4.3.14\0" VALUE "InternalName", "libdb.dll\0" - VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2003\0" + VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2004\0" VALUE "OriginalFilename", "libdb.dll\0" VALUE "ProductName", "Sleepycat Software libdb\0" - VALUE "ProductVersion", "4.2.52\0" + VALUE "ProductVersion", "4.3.14\0" END END BLOCK "VarFileInfo" diff --git a/db/build_win32/libdbrc.src b/db/build_win32/libdbrc.src index 3d5df36ef..4c644ea9f 100644 --- a/db/build_win32/libdbrc.src +++ b/db/build_win32/libdbrc.src @@ -20,7 +20,7 @@ BEGIN VALUE "FileDescription", "Berkeley DB 3.0 DLL\0" VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0" VALUE "InternalName", "libdb.dll\0" - VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2003\0" + VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2004\0" VALUE "OriginalFilename", "libdb.dll\0" VALUE "ProductName", "Sleepycat Software libdb\0" VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0" diff --git a/db/build_win32/win_db.h b/db/build_win32/win_db.h index cdec9a6bb..ee02e7668 100644 --- a/db/build_win32/win_db.h +++ b/db/build_win32/win_db.h @@ -1,5 +1,5 @@ /*- - * $Id: win_db.h,v 1.51 2003/12/03 21:15:37 bostic Exp $ + * $Id: win_db.h,v 1.123 2004/10/14 15:32:28 bostic Exp $ * * The following provides the information necessary to build Berkeley * DB on native Windows, and other Windows environments such as MinGW. @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -57,3 +58,37 @@ extern int getopt(int, char * const *, const char *); #if defined(__cplusplus) } #endif + +#ifdef _UNICODE +#define TO_TSTRING(dbenv, s, ts, ret) do { \ + int __len = strlen(s) + 1; \ + ts = NULL; \ + if ((ret = __os_malloc((dbenv), \ + __len * sizeof (_TCHAR), &(ts))) == 0 && \ + MultiByteToWideChar(CP_UTF8, 0, \ + (s), -1, (ts), __len) == 0) \ + ret = __os_get_errno(); \ + } while (0) + +#define FROM_TSTRING(dbenv, ts, s, ret) { \ + int __len = WideCharToMultiByte(CP_UTF8, 0, ts, -1, \ + NULL, 0, NULL, NULL); \ + s = NULL; \ + if ((ret = __os_malloc((dbenv), __len, &(s))) == 0 && \ + WideCharToMultiByte(CP_UTF8, 0, \ + (ts), -1, (s), __len, NULL, NULL) == 0) \ + ret = __os_get_errno(); \ + } while (0) + +#define FREE_STRING(dbenv, s) do { \ + if ((s) != NULL) { \ + __os_free((dbenv), (s)); \ + (s) = NULL; \ + } \ + } while (0) + +#else +#define TO_TSTRING(dbenv, s, ts, ret) (ret) = 0, (ts) = (_TCHAR *)(s) +#define FROM_TSTRING(dbenv, ts, s, ret) (ret) = 0, (s) = (char *)(ts) +#define FREE_STRING(dbenv, ts) +#endif diff --git a/db/build_win64/Berkeley_DB.dsw b/db/build_win64/Berkeley_DB.dsw new file mode 100644 index 000000000..bcce71e8a --- /dev/null +++ b/db/build_win64/Berkeley_DB.dsw @@ -0,0 +1,539 @@ +Microsoft Developer Studio Workspace File, Format Version 6.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "build_all"=.\build_all.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_archive + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_checkpoint + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_deadlock + End Project Dependency + Begin Project Dependency + Project_Dep_Name DB_DLL + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_dump + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_load + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_printlog + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_recover + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_stat + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_upgrade + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_verify + End Project Dependency + Begin Project Dependency + Project_Dep_Name DB_Static + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_access + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_btrec + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_env + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_lock + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_mpool + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_tpcb + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_access + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_btrec + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_env + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_lock + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_mpool + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_tpcb + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_archive"=.\db_archive.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_checkpoint"=.\db_checkpoint.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_deadlock"=.\db_deadlock.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name DB_DLL + End Project Dependency +}}} + +############################################################################### + +Project: "db_dll"=.\db_dll.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Project: "db_dump"=.\db_dump.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_java"=.\db_java.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name DB_DLL + End Project Dependency +}}} + +############################################################################### + +Project: "db_lib"=.\db_lib.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_static + End Project Dependency +}}} + +############################################################################### + +Project: "db_load"=.\db_load.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_printlog"=.\db_printlog.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_recover"=.\db_recover.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_stat"=.\db_stat.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_static"=.\db_static.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Project: "db_tcl"=.\db_tcl.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name DB_DLL + End Project Dependency +}}} + +############################################################################### + +Project: "db_test"=.\db_test.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name build_all + End Project Dependency + Begin Project Dependency + Project_Dep_Name db_tcl + End Project Dependency +}}} + +############################################################################### + +Project: "db_upgrade"=.\db_upgrade.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "db_verify"=.\db_verify.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "ex_access"=.\ex_access.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "ex_btrec"=.\ex_btrec.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "ex_env"=.\ex_env.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "ex_lock"=.\ex_lock.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "ex_mpool"=.\ex_mpool.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "ex_repquote"=.\ex_repquote.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "ex_tpcb"=.\ex_tpcb.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_access"=.\excxx_access.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_btrec"=.\excxx_btrec.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_env"=.\excxx_env.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_lock"=.\excxx_lock.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_mpool"=.\excxx_mpool.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_tpcb"=.\excxx_tpcb.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_lib + End Project Dependency +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + diff --git a/db/build_win64/app_dsp.src b/db/build_win64/app_dsp.src new file mode 100644 index 000000000..30af5b6d7 --- /dev/null +++ b/db/build_win64/app_dsp.src @@ -0,0 +1,145 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=@project_name@ - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release" +# Name "@project_name@ - Win32 Debug" +# Name "@project_name@ - Win32 Release Static" +# Name "@project_name@ - Win32 Debug Static" +@SOURCE_FILES@ +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/build_all.dsp b/db/build_win64/build_all.dsp new file mode 100644 index 000000000..7ae1f9bb0 --- /dev/null +++ b/db/build_win64/build_all.dsp @@ -0,0 +1,96 @@ +# Microsoft Developer Studio Project File - Name="build_all" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Generic Project" 0x010a + +CFG=build_all - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "build_all.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "build_all.mak" CFG="build_all - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "build_all - Win32 Release" (based on "Win32 (x86) External Target") +!MESSAGE "build_all - Win32 Debug" (based on "Win32 (x86) External Target") +!MESSAGE "build_all - Win32 Release Static" (based on "Win32 (x86) External Target") +!MESSAGE "build_all - Win32 Debug Static" (based on "Win32 (x86) External Target") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" + +!IF "$(CFG)" == "build_all - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Cmd_Line "echo DB Release version built." +# PROP Target_Dir "" + +!ELSEIF "$(CFG)" == "build_all - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Cmd_Line "echo DB Debug version built." +# PROP Target_Dir "" + +!ELSEIF "$(CFG)" == "build_all - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release_static" +# PROP BASE Intermediate_Dir "Release_static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Cmd_Line "echo DB Release Static version built." +# PROP Target_Dir "" + +!ELSEIF "$(CFG)" == "build_all - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug_static" +# PROP BASE Intermediate_Dir "Debug_Static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_Static" +# PROP Cmd_Line "echo DB Debug Static version built." +# PROP Target_Dir "" + +!ENDIF + +# Begin Target + +# Name "build_all - Win32 Release" +# Name "build_all - Win32 Debug" +# Name "build_all - Win32 Release Static" +# Name "build_all - Win32 Debug Static" +# End Target +# End Project diff --git a/db/build_win64/db.h b/db/build_win64/db.h new file mode 100644 index 000000000..315e3b215 --- /dev/null +++ b/db/build_win64/db.h @@ -0,0 +1,2242 @@ +/* DO NOT EDIT: automatically built by dist/s_win32. */ +/* + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: db.h,v 11.20 2004/10/14 15:32:29 bostic Exp $ + * + * db.h include file layout: + * General. + * Database Environment. + * Locking subsystem. + * Logging subsystem. + * Shared buffer cache (mpool) subsystem. + * Transaction subsystem. + * Access methods. + * Access method cursors. + * Dbm/Ndbm, Hsearch historic interfaces. + */ + +#ifndef _DB_H_ +#define _DB_H_ + +#ifndef __NO_SYSTEM_INCLUDES +#include +#include +#include +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + + +#undef __P +#define __P(protos) protos + +/* + * Berkeley DB version information. + */ +#define DB_VERSION_MAJOR 4 +#define DB_VERSION_MINOR 3 +#define DB_VERSION_PATCH 14 +#define DB_VERSION_STRING "Sleepycat Software: Berkeley DB 4.3.14: (October 14, 2004)" + +/* + * !!! + * Berkeley DB uses specifically sized types. If they're not provided by + * the system, typedef them here. + * + * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__, + * as does BIND and Kerberos, since we don't know for sure what #include + * files the user is using. + * + * !!! + * We also provide the standard u_int, u_long etc., if they're not provided + * by the system. + */ +#ifndef __BIT_TYPES_DEFINED__ +#define __BIT_TYPES_DEFINED__ +typedef unsigned char u_int8_t; +typedef short int16_t; +typedef unsigned short u_int16_t; +typedef int int32_t; +typedef unsigned int u_int32_t; +typedef __int64 int64_t; +typedef unsigned __int64 u_int64_t; +#endif + +#ifndef _WINSOCKAPI_ +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef unsigned int u_int; +typedef unsigned long u_long; +#endif +#ifdef _WIN64 +typedef int64_t ssize_t; +#else +typedef int32_t ssize_t; +#endif + +/* + * uintmax_t -- + * Largest unsigned type, used to align structures in memory. We don't store + * floating point types in structures, so integral types should be sufficient + * (and we don't have to worry about systems that store floats in other than + * power-of-2 numbers of bytes). Additionally this fixes compilers that rewrite + * structure assignments and ANSI C memcpy calls to be in-line instructions + * that happen to require alignment. Note: this alignment isn't sufficient for + * mutexes, which depend on things like cache line alignment. Mutex alignment + * is handled separately, in mutex.h. + * + * uintptr_t -- + * Unsigned type that's the same size as a pointer. There are places where + * DB modifies pointers by discarding the bottom bits to guarantee alignment. + * We can't use uintmax_t, it may be larger than the pointer, and compilers + * get upset about that. So far we haven't run on any machine where there's + * no unsigned type the same size as a pointer -- here's hoping. + */ +typedef u_int64_t uintmax_t; +#ifdef _WIN64 +typedef u_int64_t uintptr_t; +#else +typedef u_int32_t uintptr_t; +#endif + +/* + * Sequences are only available on machines with 64-bit integral types. + */ +typedef int64_t db_seq_t; + +/* Basic types that are exported or quasi-exported. */ +typedef u_int32_t db_pgno_t; /* Page number type. */ +typedef u_int16_t db_indx_t; /* Page offset type. */ +#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */ + +typedef u_int32_t db_recno_t; /* Record number type. */ +#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */ + +typedef u_int32_t db_timeout_t; /* Type of a timeout. */ + +/* + * Region offsets are the difference between a pointer in a region and the + * region's base address. With private environments, both addresses are the + * result of calling malloc, and we can't assume anything about what malloc + * will return, so region offsets have to be able to hold differences between + * arbitrary pointers. + */ +typedef uintptr_t roff_t; + +/* + * Forward structure declarations, so we can declare pointers and + * applications can get type checking. + */ +struct __db; typedef struct __db DB; +struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT; +struct __db_cipher; typedef struct __db_cipher DB_CIPHER; +struct __db_dbt; typedef struct __db_dbt DBT; +struct __db_env; typedef struct __db_env DB_ENV; +struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT; +struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK; +struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT; +struct __db_lock_u; typedef struct __db_lock_u DB_LOCK; +struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ; +struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC; +struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT; +struct __db_lsn; typedef struct __db_lsn DB_LSN; +struct __db_mpool; typedef struct __db_mpool DB_MPOOL; +struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT; +struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT; +struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE; +struct __db_preplist; typedef struct __db_preplist DB_PREPLIST; +struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT; +struct __db_rep; typedef struct __db_rep DB_REP; +struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT; +struct __db_sequence; typedef struct __db_sequence DB_SEQUENCE; +struct __db_seq_record; typedef struct __db_seq_record DB_SEQ_RECORD; +struct __db_seq_stat; typedef struct __db_seq_stat DB_SEQUENCE_STAT; +struct __db_txn; typedef struct __db_txn DB_TXN; +struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE; +struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT; +struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR; +struct __dbc; typedef struct __dbc DBC; +struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL; +struct __fh_t; typedef struct __fh_t DB_FH; +struct __fname; typedef struct __fname FNAME; +struct __key_range; typedef struct __key_range DB_KEY_RANGE; +struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE; +struct __mutex_t; typedef struct __mutex_t DB_MUTEX; + +/* Key/data structure -- a Data-Base Thang. */ +struct __db_dbt { + /* + * data/size must be fields 1 and 2 for DB 1.85 compatibility. + */ + void *data; /* Key/data */ + u_int32_t size; /* key/data length */ + + u_int32_t ulen; /* RO: length of user buffer. */ + u_int32_t dlen; /* RO: get/put record length. */ + u_int32_t doff; /* RO: get/put record offset. */ + +#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */ +#define DB_DBT_ISSET 0x002 /* Lower level calls set value. */ +#define DB_DBT_MALLOC 0x004 /* Return in malloc'd memory. */ +#define DB_DBT_PARTIAL 0x008 /* Partial put/get. */ +#define DB_DBT_REALLOC 0x010 /* Return in realloc'd memory. */ +#define DB_DBT_USERMEM 0x020 /* Return in user's memory. */ +#define DB_DBT_DUPOK 0x040 /* Insert if duplicate. */ + u_int32_t flags; +}; + +/* + * Common flags -- + * Interfaces which use any of these common flags should never have + * interface specific flags in this range. + */ +#define DB_CREATE 0x0000001 /* Create file as necessary. */ +#define DB_CXX_NO_EXCEPTIONS 0x0000002 /* C++: return error values. */ +#define DB_FORCE 0x0000004 /* Force (anything). */ +#define DB_NOMMAP 0x0000008 /* Don't mmap underlying file. */ +#define DB_RDONLY 0x0000010 /* Read-only (O_RDONLY). */ +#define DB_RECOVER 0x0000020 /* Run normal recovery. */ +#define DB_THREAD 0x0000040 /* Applications are threaded. */ +#define DB_TRUNCATE 0x0000080 /* Discard existing DB (O_TRUNC). */ +#define DB_TXN_NOSYNC 0x0000100 /* Do not sync log on commit. */ +#define DB_TXN_NOT_DURABLE 0x0000200 /* Do not log changes. */ +#define DB_USE_ENVIRON 0x0000400 /* Use the environment. */ +#define DB_USE_ENVIRON_ROOT 0x0000800 /* Use the environment if root. */ + +/* + * Common flags -- + * Interfaces which use any of these common flags should never have + * interface specific flags in this range. + * + * DB_AUTO_COMMIT: + * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open, + * DB->remove, DB->rename, DB->truncate + * DB_DEGREE_2: + * DB->cursor, DB->get, DB->join, DBcursor->c_get, DB_ENV->txn_begin + * DB_DIRTY_READ: + * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get, + * DB_ENV->txn_begin + * DB_NOAUTO_COMMIT + * DB->associate, DB->del, DB->put, DB->open, + * DB->remove, DB->rename, DB->truncate + * + * !!! + * The DB_DIRTY_READ and DB_DEGREE_2 bit masks can't be changed without + * also changing the masks for the flags that can be OR'd into DB + * access method and cursor operation values. + */ +#define DB_AUTO_COMMIT 0x01000000/* Implied transaction. */ +#define DB_DEGREE_2 0x02000000/* Degree 2. */ +#define DB_DIRTY_READ 0x04000000/* Dirty Read. */ +#define DB_NO_AUTO_COMMIT 0x08000000/* Override env-wide AUTOCOMMIT. */ + +/* + * Flags private to db_env_create. + */ +#define DB_RPCCLIENT 0x0000001 /* An RPC client environment. */ + +/* + * Flags private to db_create. + */ +#define DB_REP_CREATE 0x0000001 /* Open of an internal rep database. */ +#define DB_XA_CREATE 0x0000002 /* Open in an XA environment. */ + +/* + * Flags private to DB_ENV->open. + * Shared flags up to 0x0000800 */ +#define DB_INIT_CDB 0x0001000 /* Concurrent Access Methods. */ +#define DB_INIT_LOCK 0x0002000 /* Initialize locking. */ +#define DB_INIT_LOG 0x0004000 /* Initialize logging. */ +#define DB_INIT_MPOOL 0x0008000 /* Initialize mpool. */ +#define DB_INIT_REP 0x0010000 /* Initialize replication. */ +#define DB_INIT_TXN 0x0020000 /* Initialize transactions. */ +#define DB_JOINENV 0x0040000 /* Initialize all subsystems present. */ +#define DB_LOCKDOWN 0x0080000 /* Lock memory into physical core. */ +#define DB_PRIVATE 0x0100000 /* DB_ENV is process local. */ +#define DB_RECOVER_FATAL 0x0200000 /* Run catastrophic recovery. */ +#define DB_SYSTEM_MEM 0x0400000 /* Use system-backed memory. */ + +/* + * Flags private to DB->open. + * Shared flags up to 0x0000800 */ +#define DB_EXCL 0x0001000 /* Exclusive open (O_EXCL). */ +#define DB_FCNTL_LOCKING 0x0002000 /* UNDOC: fcntl(2) locking. */ +#define DB_RDWRMASTER 0x0004000 /* UNDOC: allow subdb master open R/W */ +#define DB_WRITEOPEN 0x0008000 /* UNDOC: open with write lock. */ + +/* + * Flags private to DB_ENV->txn_begin. + * Shared flags up to 0x0000800 */ +#define DB_TXN_NOWAIT 0x0001000 /* Do not wait for locks in this TXN. */ +#define DB_TXN_SYNC 0x0002000 /* Always sync log on commit. */ + +/* + * Flags private to DB_ENV->set_encrypt. + */ +#define DB_ENCRYPT_AES 0x0000001 /* AES, assumes SHA1 checksum */ + +/* + * Flags private to DB_ENV->set_flags. + * Shared flags up to 0x00000800 */ +#define DB_CDB_ALLDB 0x00001000/* Set CDB locking per environment. */ +#define DB_DIRECT_DB 0x00002000/* Don't buffer databases in the OS. */ +#define DB_DIRECT_LOG 0x00004000/* Don't buffer log files in the OS. */ +#define DB_DSYNC_LOG 0x00008000/* Set O_DSYNC on the log. */ +#define DB_LOG_AUTOREMOVE 0x00010000/* Automatically remove log files. */ +#define DB_LOG_INMEMORY 0x00020000/* Store logs in buffers in memory. */ +#define DB_NOLOCKING 0x00040000/* Set locking/mutex behavior. */ +#define DB_NOPANIC 0x00080000/* Set panic state per DB_ENV. */ +#define DB_OVERWRITE 0x00100000/* Overwrite unlinked region files. */ +#define DB_PANIC_ENVIRONMENT 0x00200000/* Set panic state per environment. */ +#define DB_REGION_INIT 0x00400000/* Page-fault regions on open. */ +#define DB_TIME_NOTGRANTED 0x00800000/* Return NOTGRANTED on timeout. */ +/* Shared flags at 0x01000000 */ +/* Shared flags at 0x02000000 */ +/* Shared flags at 0x04000000 */ +/* Shared flags at 0x08000000 */ +#define DB_TXN_WRITE_NOSYNC 0x10000000/* Write, don't sync, on txn commit. */ +#define DB_YIELDCPU 0x20000000/* Yield the CPU (a lot). */ + +/* + * Flags private to DB->set_feedback's callback. + */ +#define DB_UPGRADE 0x0000001 /* Upgrading. */ +#define DB_VERIFY 0x0000002 /* Verifying. */ + +/* + * Flags private to DB_MPOOLFILE->open. + * Shared flags up to 0x0000800 */ +#define DB_DIRECT 0x0001000 /* Don't buffer the file in the OS. */ +#define DB_DURABLE_UNKNOWN 0x0002000 /* internal: durability on open. */ +#define DB_EXTENT 0x0004000 /* internal: dealing with an extent. */ +#define DB_ODDFILESIZE 0x0008000 /* Truncate file to N * pgsize. */ + +/* + * Flags private to DB->set_flags. + */ +#define DB_CHKSUM 0x0000001 /* Do checksumming */ +#define DB_DUP 0x0000002 /* Btree, Hash: duplicate keys. */ +#define DB_DUPSORT 0x0000004 /* Btree, Hash: duplicate keys. */ +#define DB_ENCRYPT 0x0000008 /* Btree, Hash: duplicate keys. */ +#define DB_INORDER 0x0000010 /* Queue: strict ordering on consume. */ +#define DB_RECNUM 0x0000020 /* Btree: record numbers. */ +#define DB_RENUMBER 0x0000040 /* Recno: renumber on insert/delete. */ +#define DB_REVSPLITOFF 0x0000080 /* Btree: turn off reverse splits. */ +#define DB_SNAPSHOT 0x0000100 /* Recno: snapshot the input. */ + +/* + * Flags private to the DB_ENV->stat_print, DB->stat and DB->stat_print methods. + */ +#define DB_STAT_ALL 0x0000001 /* Print: Everything. */ +#define DB_STAT_CLEAR 0x0000002 /* Clear stat after returning values. */ +#define DB_STAT_LOCK_CONF 0x0000004 /* Print: Lock conflict matrix. */ +#define DB_STAT_LOCK_LOCKERS 0x0000008 /* Print: Lockers. */ +#define DB_STAT_LOCK_OBJECTS 0x0000010 /* Print: Lock objects. */ +#define DB_STAT_LOCK_PARAMS 0x0000020 /* Print: Lock parameters. */ +#define DB_STAT_MEMP_HASH 0x0000040 /* Print: Mpool hash buckets. */ +#define DB_STAT_SUBSYSTEM 0x0000080 /* Print: Subsystems too. */ + +/* + * Flags private to DB->join. + */ +#define DB_JOIN_NOSORT 0x0000001 /* Don't try to optimize join. */ + +/* + * Flags private to DB->verify. + */ +#define DB_AGGRESSIVE 0x0000001 /* Salvage whatever could be data.*/ +#define DB_NOORDERCHK 0x0000002 /* Skip sort order/hashing check. */ +#define DB_ORDERCHKONLY 0x0000004 /* Only perform the order check. */ +#define DB_PR_PAGE 0x0000008 /* Show page contents (-da). */ +#define DB_PR_RECOVERYTEST 0x0000010 /* Recovery test (-dr). */ +#define DB_PRINTABLE 0x0000020 /* Use printable format for salvage. */ +#define DB_SALVAGE 0x0000040 /* Salvage what looks like data. */ +#define DB_UNREF 0x0000080 /* Report unreferenced pages. */ +/* + * !!! + * These must not go over 0x8000, or they will collide with the flags + * used by __bam_vrfy_subtree. + */ + +/* + * Flags private to DB->set_rep_transport's send callback. + */ +#define DB_REP_NOBUFFER 0x0000001 /* Do not buffer this message. */ +#define DB_REP_PERMANENT 0x0000002 /* Important--app. may want to flush. */ + +/******************************************************* + * Locking. + *******************************************************/ +#define DB_LOCKVERSION 1 + +#define DB_FILE_ID_LEN 20 /* Unique file ID length. */ + +/* + * Deadlock detector modes; used in the DB_ENV structure to configure the + * locking subsystem. + */ +#define DB_LOCK_NORUN 0 +#define DB_LOCK_DEFAULT 1 /* Default policy. */ +#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */ +#define DB_LOCK_MAXLOCKS 3 /* Select locker with max locks. */ +#define DB_LOCK_MAXWRITE 4 /* Select locker with max writelocks. */ +#define DB_LOCK_MINLOCKS 5 /* Select locker with min locks. */ +#define DB_LOCK_MINWRITE 6 /* Select locker with min writelocks. */ +#define DB_LOCK_OLDEST 7 /* Select oldest locker. */ +#define DB_LOCK_RANDOM 8 /* Select random locker. */ +#define DB_LOCK_YOUNGEST 9 /* Select youngest locker. */ + +/* Flag values for lock_vec(), lock_get(). */ +#define DB_LOCK_ABORT 0x001 /* Internal: Lock during abort. */ +#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */ +#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */ +#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */ +#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */ +#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */ +#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */ + +/* + * Simple R/W lock modes and for multi-granularity intention locking. + * + * !!! + * These values are NOT random, as they are used as an index into the lock + * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD + * must be == 4. + */ +typedef enum { + DB_LOCK_NG=0, /* Not granted. */ + DB_LOCK_READ=1, /* Shared/read. */ + DB_LOCK_WRITE=2, /* Exclusive/write. */ + DB_LOCK_WAIT=3, /* Wait for event */ + DB_LOCK_IWRITE=4, /* Intent exclusive/write. */ + DB_LOCK_IREAD=5, /* Intent to share/read. */ + DB_LOCK_IWR=6, /* Intent to read and write. */ + DB_LOCK_DIRTY=7, /* Dirty Read. */ + DB_LOCK_WWRITE=8 /* Was Written. */ +} db_lockmode_t; + +/* + * Request types. + */ +typedef enum { + DB_LOCK_DUMP=0, /* Display held locks. */ + DB_LOCK_GET=1, /* Get the lock. */ + DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */ + DB_LOCK_INHERIT=3, /* Pass locks to parent. */ + DB_LOCK_PUT=4, /* Release the lock. */ + DB_LOCK_PUT_ALL=5, /* Release locker's locks. */ + DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */ + DB_LOCK_PUT_READ=7, /* Release locker's read locks. */ + DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */ + DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */ + DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */ +} db_lockop_t; + +/* + * Status of a lock. + */ +typedef enum { + DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */ + DB_LSTAT_EXPIRED=2, /* Lock has expired. */ + DB_LSTAT_FREE=3, /* Lock is unallocated. */ + DB_LSTAT_HELD=4, /* Lock is currently held. */ + DB_LSTAT_NOTEXIST=5, /* Object on which lock was waiting + * was removed */ + DB_LSTAT_PENDING=6, /* Lock was waiting and has been + * promoted; waiting for the owner + * to run and upgrade it to held. */ + DB_LSTAT_WAITING=7 /* Lock is on the wait queue. */ +}db_status_t; + +/* Lock statistics structure. */ +struct __db_lock_stat { + u_int32_t st_id; /* Last allocated locker ID. */ + u_int32_t st_cur_maxid; /* Current maximum unused ID. */ + u_int32_t st_maxlocks; /* Maximum number of locks in table. */ + u_int32_t st_maxlockers; /* Maximum num of lockers in table. */ + u_int32_t st_maxobjects; /* Maximum num of objects in table. */ + int st_nmodes; /* Number of lock modes. */ + u_int32_t st_nlocks; /* Current number of locks. */ + u_int32_t st_maxnlocks; /* Maximum number of locks so far. */ + u_int32_t st_nlockers; /* Current number of lockers. */ + u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */ + u_int32_t st_nobjects; /* Current number of objects. */ + u_int32_t st_maxnobjects; /* Maximum number of objects so far. */ + u_int32_t st_nconflicts; /* Number of lock conflicts. */ + u_int32_t st_nrequests; /* Number of lock gets. */ + u_int32_t st_nreleases; /* Number of lock puts. */ + u_int32_t st_nnowaits; /* Number of requests that would have + waited, but NOWAIT was set. */ + u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */ + db_timeout_t st_locktimeout; /* Lock timeout. */ + u_int32_t st_nlocktimeouts; /* Number of lock timeouts. */ + db_timeout_t st_txntimeout; /* Transaction timeout. */ + u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */ + u_int32_t st_region_wait; /* Region lock granted after wait. */ + u_int32_t st_region_nowait; /* Region lock granted without wait. */ + roff_t st_regsize; /* Region size. */ +}; + +/* + * DB_LOCK_ILOCK -- + * Internal DB access method lock. + */ +struct __db_ilock { + db_pgno_t pgno; /* Page being locked. */ + u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */ +#define DB_HANDLE_LOCK 1 +#define DB_RECORD_LOCK 2 +#define DB_PAGE_LOCK 3 + u_int32_t type; /* Type of lock. */ +}; + +/* + * DB_LOCK -- + * The structure is allocated by the caller and filled in during a + * lock_get request (or a lock_vec/DB_LOCK_GET). + */ +struct __db_lock_u { + roff_t off; /* Offset of the lock in the region */ + u_int32_t ndx; /* Index of the object referenced by + * this lock; used for locking. */ + u_int32_t gen; /* Generation number of this lock. */ + db_lockmode_t mode; /* mode of this lock. */ +}; + +/* Lock request structure. */ +struct __db_lockreq { + db_lockop_t op; /* Operation. */ + db_lockmode_t mode; /* Requested mode. */ + db_timeout_t timeout; /* Time to expire lock. */ + DBT *obj; /* Object being locked. */ + DB_LOCK lock; /* Lock returned. */ +}; + +/******************************************************* + * Logging. + *******************************************************/ +#define DB_LOGVERSION 10 /* Current log version. */ +#define DB_LOGOLDVER 10 /* Oldest log version supported. */ +#define DB_LOGMAGIC 0x040988 + +/* Flag values for DB_ENV->log_archive(). */ +#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */ +#define DB_ARCH_DATA 0x002 /* Data files. */ +#define DB_ARCH_LOG 0x004 /* Log files. */ +#define DB_ARCH_REMOVE 0x008 /* Remove log files. */ + +/* Flag values for DB_ENV->log_put(). */ +#define DB_FLUSH 0x001 /* Flush data to disk (public). */ +#define DB_LOG_CHKPNT 0x002 /* Flush supports a checkpoint */ +#define DB_LOG_COMMIT 0x004 /* Flush supports a commit */ +#define DB_LOG_NOCOPY 0x008 /* Don't copy data */ +#define DB_LOG_NOT_DURABLE 0x010 /* Do not log; keep in memory */ +#define DB_LOG_PERM 0x020 /* Flag record with REP_PERMANENT */ +#define DB_LOG_RESEND 0x040 /* Resent log record */ +#define DB_LOG_WRNOSYNC 0x080 /* Write, don't sync log_put */ + +/* + * A DB_LSN has two parts, a fileid which identifies a specific file, and an + * offset within that file. The fileid is an unsigned 4-byte quantity that + * uniquely identifies a file within the log directory -- currently a simple + * counter inside the log. The offset is also an unsigned 4-byte value. The + * log manager guarantees the offset is never more than 4 bytes by switching + * to a new log file before the maximum length imposed by an unsigned 4-byte + * offset is reached. + */ +struct __db_lsn { + u_int32_t file; /* File ID. */ + u_int32_t offset; /* File offset. */ +}; + +/* + * Application-specified log record types start at DB_user_BEGIN, and must not + * equal or exceed DB_debug_FLAG. + * + * DB_debug_FLAG is the high-bit of the u_int32_t that specifies a log record + * type. If the flag is set, it's a log record that was logged for debugging + * purposes only, even if it reflects a database change -- the change was part + * of a non-durable transaction. + */ +#define DB_user_BEGIN 10000 +#define DB_debug_FLAG 0x80000000 + +/* + * DB_LOGC -- + * Log cursor. + */ +struct __db_log_cursor { + DB_ENV *dbenv; /* Enclosing dbenv. */ + + DB_FH *c_fhp; /* File handle. */ + DB_LSN c_lsn; /* Cursor: LSN */ + u_int32_t c_len; /* Cursor: record length */ + u_int32_t c_prev; /* Cursor: previous record's offset */ + + DBT c_dbt; /* Return DBT. */ + +#define DB_LOGC_BUF_SIZE (32 * 1024) + u_int8_t *bp; /* Allocated read buffer. */ + u_int32_t bp_size; /* Read buffer length in bytes. */ + u_int32_t bp_rlen; /* Read buffer valid data length. */ + DB_LSN bp_lsn; /* Read buffer first byte LSN. */ + + u_int32_t bp_maxrec; /* Max record length in the log file. */ + + /* Methods. */ + int (*close) __P((DB_LOGC *, u_int32_t)); + int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t)); + +#define DB_LOG_DISK 0x01 /* Log record came from disk. */ +#define DB_LOG_LOCKED 0x02 /* Log region already locked */ +#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */ + u_int32_t flags; +}; + +/* Log statistics structure. */ +struct __db_log_stat { + u_int32_t st_magic; /* Log file magic number. */ + u_int32_t st_version; /* Log file version number. */ + int st_mode; /* Log file mode. */ + u_int32_t st_lg_bsize; /* Log buffer size. */ + u_int32_t st_lg_size; /* Log file size. */ + u_int32_t st_w_bytes; /* Bytes to log. */ + u_int32_t st_w_mbytes; /* Megabytes to log. */ + u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */ + u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */ + u_int32_t st_wcount; /* Total writes to the log. */ + u_int32_t st_wcount_fill; /* Overflow writes to the log. */ + u_int32_t st_scount; /* Total syncs to the log. */ + u_int32_t st_region_wait; /* Region lock granted after wait. */ + u_int32_t st_region_nowait; /* Region lock granted without wait. */ + u_int32_t st_cur_file; /* Current log file number. */ + u_int32_t st_cur_offset; /* Current log file offset. */ + u_int32_t st_disk_file; /* Known on disk log file number. */ + u_int32_t st_disk_offset; /* Known on disk log file offset. */ + roff_t st_regsize; /* Region size. */ + u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */ + u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */ +}; + +/* + * We need to record the first log record of a transaction. + * For user defined logging this macro returns the place to + * put that information, if it is need in rlsnp, otherwise it + * leaves it unchanged. + */ +#define DB_SET_BEGIN_LSNP(txn, rlsnp) ((txn)->set_begin_lsnp(txn, rlsnp)) + +/******************************************************* + * Shared buffer cache (mpool). + *******************************************************/ +/* Flag values for DB_MPOOLFILE->get. */ +#define DB_MPOOL_CREATE 0x001 /* Create a page. */ +#define DB_MPOOL_LAST 0x002 /* Return the last page. */ +#define DB_MPOOL_NEW 0x004 /* Create a new page. */ + +/* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */ +#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */ +#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */ +#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */ +#define DB_MPOOL_FREE 0x008 /* Free page if present. */ + +/* Flags values for DB_MPOOLFILE->set_flags. */ +#define DB_MPOOL_NOFILE 0x001 /* Never open a backing file. */ +#define DB_MPOOL_UNLINK 0x002 /* Unlink the file on last close. */ + +/* Priority values for DB_MPOOLFILE->set_priority. */ +typedef enum { + DB_PRIORITY_VERY_LOW=1, + DB_PRIORITY_LOW=2, + DB_PRIORITY_DEFAULT=3, + DB_PRIORITY_HIGH=4, + DB_PRIORITY_VERY_HIGH=5 +} DB_CACHE_PRIORITY; + +/* Per-process DB_MPOOLFILE information. */ +struct __db_mpoolfile { + DB_FH *fhp; /* Underlying file handle. */ + + /* + * !!! + * The ref, pinref and q fields are protected by the region lock. + */ + u_int32_t ref; /* Reference count. */ + + u_int32_t pinref; /* Pinned block reference count. */ + + /* + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_ENTRY(__db_mpoolfile) q; + */ + struct { + struct __db_mpoolfile *tqe_next; + struct __db_mpoolfile **tqe_prev; + } q; /* Linked list of DB_MPOOLFILE's. */ + + /* + * !!! + * The rest of the fields (with the exception of the MP_FLUSH flag) + * are not thread-protected, even when they may be modified at any + * time by the application. The reason is the DB_MPOOLFILE handle + * is single-threaded from the viewpoint of the application, and so + * the only fields needing to be thread-protected are those accessed + * by checkpoint or sync threads when using DB_MPOOLFILE structures + * to flush buffers from the cache. + */ + DB_ENV *dbenv; /* Overlying DB_ENV. */ + MPOOLFILE *mfp; /* Underlying MPOOLFILE. */ + + u_int32_t clear_len; /* Cleared length on created pages. */ + u_int8_t /* Unique file ID. */ + fileid[DB_FILE_ID_LEN]; + int ftype; /* File type. */ + int32_t lsn_offset; /* LSN offset in page. */ + u_int32_t gbytes, bytes; /* Maximum file size. */ + DBT *pgcookie; /* Byte-string passed to pgin/pgout. */ + DB_CACHE_PRIORITY /* Cache priority. */ + priority; + + void *addr; /* Address of mmap'd region. */ + size_t len; /* Length of mmap'd region. */ + + u_int32_t config_flags; /* Flags to DB_MPOOLFILE->set_flags. */ + + /* Methods. */ + int (*close) __P((DB_MPOOLFILE *, u_int32_t)); + int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *)); + int (*open) __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); + int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t)); + int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t)); + int (*get_clear_len) __P((DB_MPOOLFILE *, u_int32_t *)); + int (*set_clear_len) __P((DB_MPOOLFILE *, u_int32_t)); + int (*get_fileid) __P((DB_MPOOLFILE *, u_int8_t *)); + int (*set_fileid) __P((DB_MPOOLFILE *, u_int8_t *)); + int (*get_flags) __P((DB_MPOOLFILE *, u_int32_t *)); + int (*set_flags) __P((DB_MPOOLFILE *, u_int32_t, int)); + int (*get_ftype) __P((DB_MPOOLFILE *, int *)); + int (*set_ftype) __P((DB_MPOOLFILE *, int)); + int (*get_lsn_offset) __P((DB_MPOOLFILE *, int32_t *)); + int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t)); + int (*get_maxsize) __P((DB_MPOOLFILE *, u_int32_t *, u_int32_t *)); + int (*set_maxsize) __P((DB_MPOOLFILE *, u_int32_t, u_int32_t)); + int (*get_pgcookie) __P((DB_MPOOLFILE *, DBT *)); + int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *)); + int (*get_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY *)); + int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY)); + int (*sync) __P((DB_MPOOLFILE *)); + + /* + * MP_FILEID_SET, MP_OPEN_CALLED and MP_READONLY do not need to be + * thread protected because they are initialized before the file is + * linked onto the per-process lists, and never modified. + * + * MP_FLUSH is thread protected because it is potentially read/set by + * multiple threads of control. + */ +#define MP_FILEID_SET 0x001 /* Application supplied a file ID. */ +#define MP_FLUSH 0x002 /* Was opened to flush a buffer. */ +#define MP_OPEN_CALLED 0x004 /* File opened. */ +#define MP_READONLY 0x008 /* File is readonly. */ + u_int32_t flags; +}; + +/* Mpool statistics structure. */ +struct __db_mpool_stat { + u_int32_t st_gbytes; /* Total cache size: GB. */ + u_int32_t st_bytes; /* Total cache size: B. */ + u_int32_t st_ncache; /* Number of caches. */ + roff_t st_regsize; /* Region size. */ + size_t st_mmapsize; /* Maximum file size for mmap. */ + int st_maxopenfd; /* Maximum number of open fd's. */ + int st_maxwrite; /* Maximum buffers to write. */ + int st_maxwrite_sleep; /* Sleep after writing max buffers. */ + u_int32_t st_map; /* Pages from mapped files. */ + u_int32_t st_cache_hit; /* Pages found in the cache. */ + u_int32_t st_cache_miss; /* Pages not found in the cache. */ + u_int32_t st_page_create; /* Pages created in the cache. */ + u_int32_t st_page_in; /* Pages read in. */ + u_int32_t st_page_out; /* Pages written out. */ + u_int32_t st_ro_evict; /* Clean pages forced from the cache. */ + u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */ + u_int32_t st_page_trickle; /* Pages written by memp_trickle. */ + u_int32_t st_pages; /* Total number of pages. */ + u_int32_t st_page_clean; /* Clean pages. */ + u_int32_t st_page_dirty; /* Dirty pages. */ + u_int32_t st_hash_buckets; /* Number of hash buckets. */ + u_int32_t st_hash_searches; /* Total hash chain searches. */ + u_int32_t st_hash_longest; /* Longest hash chain searched. */ + u_int32_t st_hash_examined; /* Total hash entries searched. */ + u_int32_t st_hash_nowait; /* Hash lock granted with nowait. */ + u_int32_t st_hash_wait; /* Hash lock granted after wait. */ + u_int32_t st_hash_max_wait; /* Max hash lock granted after wait. */ + u_int32_t st_region_nowait; /* Region lock granted with nowait. */ + u_int32_t st_region_wait; /* Region lock granted after wait. */ + u_int32_t st_alloc; /* Number of page allocations. */ + u_int32_t st_alloc_buckets; /* Buckets checked during allocation. */ + u_int32_t st_alloc_max_buckets; /* Max checked during allocation. */ + u_int32_t st_alloc_pages; /* Pages checked during allocation. */ + u_int32_t st_alloc_max_pages; /* Max checked during allocation. */ +}; + +/* Mpool file statistics structure. */ +struct __db_mpool_fstat { + char *file_name; /* File name. */ + u_int32_t st_pagesize; /* Page size. */ + u_int32_t st_map; /* Pages from mapped files. */ + u_int32_t st_cache_hit; /* Pages found in the cache. */ + u_int32_t st_cache_miss; /* Pages not found in the cache. */ + u_int32_t st_page_create; /* Pages created in the cache. */ + u_int32_t st_page_in; /* Pages read in. */ + u_int32_t st_page_out; /* Pages written out. */ +}; + +/******************************************************* + * Transactions and recovery. + *******************************************************/ +#define DB_TXNVERSION 1 + +typedef enum { + DB_TXN_ABORT=0, /* Public. */ + DB_TXN_APPLY=1, /* Public. */ + DB_TXN_BACKWARD_ALLOC=2, /* Internal. */ + DB_TXN_BACKWARD_ROLL=3, /* Public. */ + DB_TXN_FORWARD_ROLL=4, /* Public. */ + DB_TXN_OPENFILES=5, /* Internal. */ + DB_TXN_POPENFILES=6, /* Internal. */ + DB_TXN_PRINT=7 /* Public. */ +} db_recops; + +/* + * BACKWARD_ALLOC is used during the forward pass to pick up any aborted + * allocations for files that were created during the forward pass. + * The main difference between _ALLOC and _ROLL is that the entry for + * the file not exist during the rollforward pass. + */ +#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \ + (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC) +#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY) + +struct __db_txn { + DB_TXNMGR *mgrp; /* Pointer to transaction manager. */ + DB_TXN *parent; /* Pointer to transaction's parent. */ + DB_LSN last_lsn; /* Lsn of last log write. */ + u_int32_t txnid; /* Unique transaction id. */ + u_int32_t tid; /* Thread id for use in MT XA. */ + roff_t off; /* Detail structure within region. */ + db_timeout_t lock_timeout; /* Timeout for locks for this txn. */ + db_timeout_t expire; /* Time this txn expires. */ + void *txn_list; /* Undo information for parent. */ + + /* + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_ENTRY(__db_txn) links; + * TAILQ_ENTRY(__db_txn) xalinks; + */ + struct { + struct __db_txn *tqe_next; + struct __db_txn **tqe_prev; + } links; /* Links transactions off manager. */ + struct { + struct __db_txn *tqe_next; + struct __db_txn **tqe_prev; + } xalinks; /* Links active XA transactions. */ + + /* + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_HEAD(__events, __txn_event) events; + */ + struct { + struct __txn_event *tqh_first; + struct __txn_event **tqh_last; + } events; + + /* + * !!! + * Explicit representations of structures from queue.h. + * STAILQ_HEAD(__logrec, __txn_logrec) logs; + */ + struct { + struct __txn_logrec *stqh_first; + struct __txn_logrec **stqh_last; + } logs; /* Links deferred events. */ + + /* + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_HEAD(__kids, __db_txn) kids; + */ + struct __kids { + struct __db_txn *tqh_first; + struct __db_txn **tqh_last; + } kids; + + /* + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_ENTRY(__db_txn) klinks; + */ + struct { + struct __db_txn *tqe_next; + struct __db_txn **tqe_prev; + } klinks; + + void *api_internal; /* C++ API private. */ + void *xml_internal; /* XML API private. */ + + u_int32_t cursors; /* Number of cursors open for txn */ + + /* Methods. */ + int (*abort) __P((DB_TXN *)); + int (*commit) __P((DB_TXN *, u_int32_t)); + int (*discard) __P((DB_TXN *, u_int32_t)); + u_int32_t (*id) __P((DB_TXN *)); + int (*prepare) __P((DB_TXN *, u_int8_t *)); + void (*set_begin_lsnp) __P((DB_TXN *txn, DB_LSN **)); + int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t)); + +#define TXN_CHILDCOMMIT 0x001 /* Transaction that has committed. */ +#define TXN_COMPENSATE 0x002 /* Compensating transaction. */ +#define TXN_DEADLOCK 0x004 /* Transaction has deadlocked. */ +#define TXN_DEGREE_2 0x008 /* Has degree 2 isolation. */ +#define TXN_DIRTY_READ 0x010 /* Transaction does dirty reads. */ +#define TXN_LOCKTIMEOUT 0x020 /* Transaction has a lock timeout. */ +#define TXN_MALLOC 0x040 /* Structure allocated by TXN system. */ +#define TXN_NOSYNC 0x080 /* Do not sync on prepare and commit. */ +#define TXN_NOWAIT 0x100 /* Do not wait on locks. */ +#define TXN_RESTORED 0x200 /* Transaction has been restored. */ +#define TXN_SYNC 0x400 /* Sync on prepare and commit. */ + u_int32_t flags; +}; + +/* + * Structure used for two phase commit interface. Berkeley DB support for two + * phase commit is compatible with the X/open XA interface. + * + * The XA #define XIDDATASIZE defines the size of a global transaction ID. We + * have our own version here (for name space reasons) which must have the same + * value. + */ +#define DB_XIDDATASIZE 128 +struct __db_preplist { + DB_TXN *txn; + u_int8_t gid[DB_XIDDATASIZE]; +}; + +/* Transaction statistics structure. */ +struct __db_txn_active { + u_int32_t txnid; /* Transaction ID */ + u_int32_t parentid; /* Transaction ID of parent */ + DB_LSN lsn; /* LSN when transaction began */ + u_int32_t xa_status; /* XA status */ + u_int8_t xid[DB_XIDDATASIZE]; /* XA global transaction ID */ +}; + +struct __db_txn_stat { + DB_LSN st_last_ckp; /* lsn of the last checkpoint */ + time_t st_time_ckp; /* time of last checkpoint */ + u_int32_t st_last_txnid; /* last transaction id given out */ + u_int32_t st_maxtxns; /* maximum txns possible */ + u_int32_t st_naborts; /* number of aborted transactions */ + u_int32_t st_nbegins; /* number of begun transactions */ + u_int32_t st_ncommits; /* number of committed transactions */ + u_int32_t st_nactive; /* number of active transactions */ + u_int32_t st_nrestores; /* number of restored transactions + after recovery. */ + u_int32_t st_maxnactive; /* maximum active transactions */ + DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */ + u_int32_t st_region_wait; /* Region lock granted after wait. */ + u_int32_t st_region_nowait; /* Region lock granted without wait. */ + roff_t st_regsize; /* Region size. */ +}; + +/******************************************************* + * Replication. + *******************************************************/ +/* Special, out-of-band environment IDs. */ +#define DB_EID_BROADCAST -1 +#define DB_EID_INVALID -2 + +/* rep_start flags values */ +#define DB_REP_CLIENT 0x001 +#define DB_REP_MASTER 0x002 + +/* Replication statistics. */ +struct __db_rep_stat { + /* !!! + * Many replication statistics fields cannot be protected by a mutex + * without an unacceptable performance penalty, since most message + * processing is done without the need to hold a region-wide lock. + * Fields whose comments end with a '+' may be updated without holding + * the replication or log mutexes (as appropriate), and thus may be + * off somewhat (or, on unreasonable architectures under unlucky + * circumstances, garbaged). + */ + u_int32_t st_status; /* Current replication status. */ + DB_LSN st_next_lsn; /* Next LSN to use or expect. */ + DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */ + db_pgno_t st_next_pg; /* Next pg we expect. */ + db_pgno_t st_waiting_pg; /* pg we're awaiting, if any. */ + + u_int32_t st_dupmasters; /* # of times a duplicate master + condition was detected.+ */ + int st_env_id; /* Current environment ID. */ + int st_env_priority; /* Current environment priority. */ + u_int32_t st_gen; /* Current generation number. */ + u_int32_t st_egen; /* Current election gen number. */ + u_int32_t st_log_duplicated; /* Log records received multiply.+ */ + u_int32_t st_log_queued; /* Log records currently queued.+ */ + u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */ + u_int32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */ + u_int32_t st_log_records; /* Log records received and put.+ */ + u_int32_t st_log_requested; /* Log recs. missed and requested.+ */ + int st_master; /* Env. ID of the current master. */ + u_int32_t st_master_changes; /* # of times we've switched masters. */ + u_int32_t st_msgs_badgen; /* Messages with a bad generation #.+ */ + u_int32_t st_msgs_processed; /* Messages received and processed.+ */ + u_int32_t st_msgs_recover; /* Messages ignored because this site + was a client in recovery.+ */ + u_int32_t st_msgs_send_failures;/* # of failed message sends.+ */ + u_int32_t st_msgs_sent; /* # of successful message sends.+ */ + u_int32_t st_newsites; /* # of NEWSITE msgs. received.+ */ + int st_nsites; /* Current number of sites we will + assume during elections. */ + u_int32_t st_nthrottles; /* # of times we were throttled. */ + u_int32_t st_outdated; /* # of times we detected and returned + an OUTDATED condition.+ */ + u_int32_t st_pg_duplicated; /* Pages received multiply.+ */ + u_int32_t st_pg_records; /* Pages received and stored.+ */ + u_int32_t st_pg_requested; /* Pages missed and requested.+ */ + u_int32_t st_startup_complete; /* Site completed client sync-up. */ + u_int32_t st_txns_applied; /* # of transactions applied.+ */ + + /* Elections generally. */ + u_int32_t st_elections; /* # of elections held.+ */ + u_int32_t st_elections_won; /* # of elections won by this site.+ */ + + /* Statistics about an in-progress election. */ + int st_election_cur_winner; /* Current front-runner. */ + u_int32_t st_election_gen; /* Election generation number. */ + DB_LSN st_election_lsn; /* Max. LSN of current winner. */ + int st_election_nsites; /* # of "registered voters". */ + int st_election_nvotes; /* # of "registered voters" needed. */ + int st_election_priority; /* Current election priority. */ + int st_election_status; /* Current election status. */ + u_int32_t st_election_tiebreaker;/* Election tiebreaker value. */ + int st_election_votes; /* Votes received in this round. */ +}; +/* + * The storage record for a sequence. + */ +struct __db_seq_record { + u_int32_t seq_version; /* Version size/number. */ +#define DB_SEQ_DEC 0x00000001 /* Decrement sequence. */ +#define DB_SEQ_INC 0x00000002 /* Increment sequence. */ +#define DB_SEQ_RANGE_SET 0x00000004 /* Range set (internal). */ +#define DB_SEQ_WRAP 0x00000008 /* Wrap sequence at min/max. */ + u_int32_t flags; /* Flags. */ + db_seq_t seq_value; /* Current value. */ + db_seq_t seq_max; /* Max permitted. */ + db_seq_t seq_min; /* Min permitted. */ +}; + +/* + * Handle for a sequence object. + */ +struct __db_sequence { + DB *seq_dbp; /* DB handle for this sequence. */ + DB_MUTEX *seq_mutexp; /* Mutex if sequence is threaded. */ + DB_SEQ_RECORD *seq_rp; /* Pointer to current data. */ + DB_SEQ_RECORD seq_record; /* Data from DB_SEQUENCE. */ + int32_t seq_cache_size; /* Number of values cached. */ + db_seq_t seq_last_value; /* Last value cached. */ + DBT seq_key; /* DBT pointing to sequence key. */ + DBT seq_data; /* DBT pointing to seq_record. */ + + /* API-private structure: used by C++ and Java. */ + void *api_internal; + + int (*close) __P((DB_SEQUENCE *, u_int32_t)); + int (*get) __P((DB_SEQUENCE *, + DB_TXN *, int32_t, db_seq_t *, u_int32_t)); + int (*get_cachesize) __P((DB_SEQUENCE *, int32_t *)); + int (*get_db) __P((DB_SEQUENCE *, DB **)); + int (*get_flags) __P((DB_SEQUENCE *, u_int32_t *)); + int (*get_key) __P((DB_SEQUENCE *, DBT *)); + int (*get_range) __P((DB_SEQUENCE *, + db_seq_t *, db_seq_t *)); + int (*initial_value) __P((DB_SEQUENCE *, db_seq_t)); + int (*open) __P((DB_SEQUENCE *, + DB_TXN *, DBT *, u_int32_t)); + int (*remove) __P((DB_SEQUENCE *, DB_TXN *, u_int32_t)); + int (*set_cachesize) __P((DB_SEQUENCE *, int32_t)); + int (*set_flags) __P((DB_SEQUENCE *, u_int32_t)); + int (*set_range) __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); + int (*stat) __P((DB_SEQUENCE *, + DB_SEQUENCE_STAT **, u_int32_t)); + int (*stat_print) __P((DB_SEQUENCE *, u_int32_t)); +}; + +struct __db_seq_stat { + u_int32_t st_wait; /* Sequence lock granted without wait. */ + u_int32_t st_nowait; /* Sequence lock granted after wait. */ + db_seq_t st_current; /* Current value in db. */ + db_seq_t st_value; /* Current cached value. */ + db_seq_t st_last_value; /* Last cached value. */ + db_seq_t st_min; /* Minimum value. */ + db_seq_t st_max; /* Maximum value. */ + int32_t st_cache_size; /* Cache size. */ + u_int32_t st_flags; /* Flag value. */ +}; + +/******************************************************* + * Access methods. + *******************************************************/ +typedef enum { + DB_BTREE=1, + DB_HASH=2, + DB_RECNO=3, + DB_QUEUE=4, + DB_UNKNOWN=5 /* Figure it out on open. */ +} DBTYPE; + +#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */ + +#define DB_BTREEVERSION 9 /* Current btree version. */ +#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */ +#define DB_BTREEMAGIC 0x053162 + +#define DB_HASHVERSION 8 /* Current hash version. */ +#define DB_HASHOLDVER 7 /* Oldest hash version supported. */ +#define DB_HASHMAGIC 0x061561 + +#define DB_QAMVERSION 4 /* Current queue version. */ +#define DB_QAMOLDVER 3 /* Oldest queue version supported. */ +#define DB_QAMMAGIC 0x042253 + +#define DB_SEQUENCE_VERSION 1 /* Current sequence version. */ + +/* + * DB access method and cursor operation values. Each value is an operation + * code to which additional bit flags are added. + */ +#define DB_AFTER 1 /* c_put() */ +#define DB_APPEND 2 /* put() */ +#define DB_BEFORE 3 /* c_put() */ +#define DB_CACHED_COUNTS 4 /* stat() */ +#define DB_CONSUME 5 /* get() */ +#define DB_CONSUME_WAIT 6 /* get() */ +#define DB_CURRENT 7 /* c_get(), c_put(), DB_LOGC->get() */ +#define DB_FAST_STAT 8 /* stat() */ +#define DB_FIRST 9 /* c_get(), DB_LOGC->get() */ +#define DB_GET_BOTH 10 /* get(), c_get() */ +#define DB_GET_BOTHC 11 /* c_get() (internal) */ +#define DB_GET_BOTH_RANGE 12 /* get(), c_get() */ +#define DB_GET_RECNO 13 /* c_get() */ +#define DB_JOIN_ITEM 14 /* c_get(); do not do primary lookup */ +#define DB_KEYFIRST 15 /* c_put() */ +#define DB_KEYLAST 16 /* c_put() */ +#define DB_LAST 17 /* c_get(), DB_LOGC->get() */ +#define DB_NEXT 18 /* c_get(), DB_LOGC->get() */ +#define DB_NEXT_DUP 19 /* c_get() */ +#define DB_NEXT_NODUP 20 /* c_get() */ +#define DB_NODUPDATA 21 /* put(), c_put() */ +#define DB_NOOVERWRITE 22 /* put() */ +#define DB_NOSYNC 23 /* close() */ +#define DB_POSITION 24 /* c_dup() */ +#define DB_PREV 25 /* c_get(), DB_LOGC->get() */ +#define DB_PREV_NODUP 26 /* c_get(), DB_LOGC->get() */ +#define DB_RECORDCOUNT 27 /* stat() */ +#define DB_SET 28 /* c_get(), DB_LOGC->get() */ +#define DB_SET_LOCK_TIMEOUT 29 /* set_timout() */ +#define DB_SET_RANGE 30 /* c_get() */ +#define DB_SET_RECNO 31 /* get(), c_get() */ +#define DB_SET_TXN_NOW 32 /* set_timout() (internal) */ +#define DB_SET_TXN_TIMEOUT 33 /* set_timout() */ +#define DB_UPDATE_SECONDARY 34 /* c_get(), c_del() (internal) */ +#define DB_WRITECURSOR 35 /* cursor() */ +#define DB_WRITELOCK 36 /* cursor() (internal) */ + +/* This has to change when the max opcode hits 255. */ +#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */ + +/* + * Masks for flags that can be OR'd into DB access method and cursor + * operation values. + * + * DB_DIRTY_READ 0x04000000 Dirty Read. */ +#define DB_MULTIPLE 0x08000000 /* Return multiple data values. */ +#define DB_MULTIPLE_KEY 0x10000000 /* Return multiple data/key pairs. */ +#define DB_RMW 0x20000000 /* Acquire write flag immediately. */ + +/* + * DB (user visible) error return codes. + * + * !!! + * For source compatibility with DB 2.X deadlock return (EAGAIN), use the + * following: + * #include + * #define DB_LOCK_DEADLOCK EAGAIN + * + * !!! + * We don't want our error returns to conflict with other packages where + * possible, so pick a base error value that's hopefully not common. We + * document that we own the error name space from -30,800 to -30,999. + */ +/* DB (public) error return codes. */ +#define DB_BUFFER_SMALL (-30999)/* User memory too small for return. */ +#define DB_DONOTINDEX (-30998)/* "Null" return from 2ndary callbk. */ +#define DB_KEYEMPTY (-30997)/* Key/data deleted or never created. */ +#define DB_KEYEXIST (-30996)/* The key/data pair already exists. */ +#define DB_LOCK_DEADLOCK (-30995)/* Deadlock. */ +#define DB_LOCK_NOTGRANTED (-30994)/* Lock unavailable. */ +#define DB_LOG_BUFFER_FULL (-30993)/* In-memory log buffer full. */ +#define DB_NOSERVER (-30992)/* Server panic return. */ +#define DB_NOSERVER_HOME (-30991)/* Bad home sent to server. */ +#define DB_NOSERVER_ID (-30990)/* Bad ID sent to server. */ +#define DB_NOTFOUND (-30989)/* Key/data pair not found (EOF). */ +#define DB_OLD_VERSION (-30988)/* Out-of-date version. */ +#define DB_PAGE_NOTFOUND (-30987)/* Requested page not found. */ +#define DB_REP_DUPMASTER (-30986)/* There are two masters. */ +#define DB_REP_HANDLE_DEAD (-30985)/* Rolled back a commit. */ +#define DB_REP_HOLDELECTION (-30984)/* Time to hold an election. */ +#define DB_REP_ISPERM (-30983)/* Cached not written perm written.*/ +#define DB_REP_NEWMASTER (-30982)/* We have learned of a new master. */ +#define DB_REP_NEWSITE (-30981)/* New site entered system. */ +#define DB_REP_NOTPERM (-30980)/* Permanent log record not written. */ +#define DB_REP_STARTUPDONE (-30979)/* Client startup complete. */ +#define DB_REP_UNAVAIL (-30978)/* Site cannot currently be reached. */ +#define DB_RUNRECOVERY (-30977)/* Panic return. */ +#define DB_SECONDARY_BAD (-30976)/* Secondary index corrupt. */ +#define DB_VERIFY_BAD (-30975)/* Verify failed; bad format. */ +#define DB_VERSION_MISMATCH (-30974)/* Environment version mismatch. */ + +/* DB (private) error return codes. */ +#define DB_ALREADY_ABORTED (-30899) +#define DB_DELETED (-30898)/* Recovery file marked deleted. */ +#define DB_LOCK_NOTEXIST (-30897)/* Object to lock is gone. */ +#define DB_NEEDSPLIT (-30896)/* Page needs to be split. */ +#define DB_REP_EGENCHG (-30895)/* Egen changed while in election. */ +#define DB_REP_LOGREADY (-30894)/* Rep log ready for recovery. */ +#define DB_REP_PAGEDONE (-30893)/* This page was already done. */ +#define DB_SURPRISE_KID (-30892)/* Child commit where parent + didn't know it was a parent. */ +#define DB_SWAPBYTES (-30891)/* Database needs byte swapping. */ +#define DB_TIMEOUT (-30890)/* Timed out waiting for election. */ +#define DB_TXN_CKP (-30889)/* Encountered ckp record in log. */ +#define DB_VERIFY_FATAL (-30888)/* DB->verify cannot proceed. */ + +/* Database handle. */ +struct __db { + /******************************************************* + * Public: owned by the application. + *******************************************************/ + u_int32_t pgsize; /* Database logical page size. */ + + /* Callbacks. */ + int (*db_append_recno) __P((DB *, DBT *, db_recno_t)); + void (*db_feedback) __P((DB *, int, int)); + int (*dup_compare) __P((DB *, const DBT *, const DBT *)); + + void *app_private; /* Application-private handle. */ + + /******************************************************* + * Private: owned by DB. + *******************************************************/ + DB_ENV *dbenv; /* Backing environment. */ + + DBTYPE type; /* DB access method type. */ + + DB_MPOOLFILE *mpf; /* Backing buffer pool. */ + + DB_MUTEX *mutexp; /* Synchronization for free threading */ + + char *fname, *dname; /* File/database passed to DB->open. */ + u_int32_t open_flags; /* Flags passed to DB->open. */ + + u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */ + + u_int32_t adj_fileid; /* File's unique ID for curs. adj. */ + +#define DB_LOGFILEID_INVALID -1 + FNAME *log_filename; /* File's naming info for logging. */ + + db_pgno_t meta_pgno; /* Meta page number */ + u_int32_t lid; /* Locker id for handle locking. */ + u_int32_t cur_lid; /* Current handle lock holder. */ + u_int32_t associate_lid; /* Locker id for DB->associate call. */ + DB_LOCK handle_lock; /* Lock held on this handle. */ + + u_int cl_id; /* RPC: remote client id. */ + + time_t timestamp; /* Handle timestamp for replication. */ + + /* + * Returned data memory for DB->get() and friends. + */ + DBT my_rskey; /* Secondary key. */ + DBT my_rkey; /* [Primary] key. */ + DBT my_rdata; /* Data. */ + + /* + * !!! + * Some applications use DB but implement their own locking outside of + * DB. If they're using fcntl(2) locking on the underlying database + * file, and we open and close a file descriptor for that file, we will + * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an + * undocumented interface to support this usage which leaves any file + * descriptors we open until DB->close. This will only work with the + * DB->open interface and simple caches, e.g., creating a transaction + * thread may open/close file descriptors this flag doesn't protect. + * Locking with fcntl(2) on a file that you don't own is a very, very + * unsafe thing to do. 'Nuff said. + */ + DB_FH *saved_open_fhp; /* Saved file handle. */ + + /* + * Linked list of DBP's, linked from the DB_ENV, used to keep track + * of all open db handles for cursor adjustment. + * + * !!! + * Explicit representations of structures from queue.h. + * LIST_ENTRY(__db) dblistlinks; + */ + struct { + struct __db *le_next; + struct __db **le_prev; + } dblistlinks; + + /* + * Cursor queues. + * + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_HEAD(__cq_fq, __dbc) free_queue; + * TAILQ_HEAD(__cq_aq, __dbc) active_queue; + * TAILQ_HEAD(__cq_jq, __dbc) join_queue; + */ + struct __cq_fq { + struct __dbc *tqh_first; + struct __dbc **tqh_last; + } free_queue; + struct __cq_aq { + struct __dbc *tqh_first; + struct __dbc **tqh_last; + } active_queue; + struct __cq_jq { + struct __dbc *tqh_first; + struct __dbc **tqh_last; + } join_queue; + + /* + * Secondary index support. + * + * Linked list of secondary indices -- set in the primary. + * + * !!! + * Explicit representations of structures from queue.h. + * LIST_HEAD(s_secondaries, __db); + */ + struct { + struct __db *lh_first; + } s_secondaries; + + /* + * List entries for secondaries, and reference count of how + * many threads are updating this secondary (see __db_c_put). + * + * !!! + * Note that these are synchronized by the primary's mutex, but + * filled in in the secondaries. + * + * !!! + * Explicit representations of structures from queue.h. + * LIST_ENTRY(__db) s_links; + */ + struct { + struct __db *le_next; + struct __db **le_prev; + } s_links; + u_int32_t s_refcnt; + + /* Secondary callback and free functions -- set in the secondary. */ + int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *)); + + /* Reference to primary -- set in the secondary. */ + DB *s_primary; + + /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */ + void *api_internal; + + /* Subsystem-private structure. */ + void *bt_internal; /* Btree/Recno access method. */ + void *h_internal; /* Hash access method. */ + void *q_internal; /* Queue access method. */ + void *xa_internal; /* XA. */ + + /* Methods. */ + int (*associate) __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *, + const DBT *, DBT *), u_int32_t)); + int (*close) __P((DB *, u_int32_t)); + int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t)); + int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t)); + int (*dump) __P((DB *, + const char *, int (*)(void *, const void *), void *, int, int)); + void (*err) __P((DB *, int, const char *, ...)); + void (*errx) __P((DB *, const char *, ...)); + int (*fd) __P((DB *, int *)); + int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); + int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t)); + int (*get_byteswapped) __P((DB *, int *)); + int (*get_cachesize) __P((DB *, u_int32_t *, u_int32_t *, int *)); + int (*get_dbname) __P((DB *, const char **, const char **)); + int (*get_encrypt_flags) __P((DB *, u_int32_t *)); + DB_ENV *(*get_env) __P((DB *)); + void (*get_errfile) __P((DB *, FILE **)); + void (*get_errpfx) __P((DB *, const char **)); + int (*get_flags) __P((DB *, u_int32_t *)); + int (*get_lorder) __P((DB *, int *)); + int (*get_open_flags) __P((DB *, u_int32_t *)); + int (*get_pagesize) __P((DB *, u_int32_t *)); + int (*get_transactional) __P((DB *)); + int (*get_type) __P((DB *, DBTYPE *)); + int (*join) __P((DB *, DBC **, DBC **, u_int32_t)); + int (*key_range) __P((DB *, + DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t)); + int (*open) __P((DB *, DB_TXN *, + const char *, const char *, DBTYPE, u_int32_t, int)); + int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); + int (*remove) __P((DB *, const char *, const char *, u_int32_t)); + int (*rename) __P((DB *, + const char *, const char *, const char *, u_int32_t)); + int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); + int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t))); + int (*set_alloc) __P((DB *, void *(*)(size_t), + void *(*)(void *, size_t), void (*)(void *))); + int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int)); + int (*set_dup_compare) __P((DB *, + int (*)(DB *, const DBT *, const DBT *))); + int (*set_encrypt) __P((DB *, const char *, u_int32_t)); + void (*set_errcall) __P((DB *, + void (*)(const DB_ENV *, const char *, const char *))); + void (*set_errfile) __P((DB *, FILE *)); + void (*set_errpfx) __P((DB *, const char *)); + int (*set_feedback) __P((DB *, void (*)(DB *, int, int))); + int (*set_flags) __P((DB *, u_int32_t)); + int (*set_lorder) __P((DB *, int)); + void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB *, FILE **)); + void (*set_msgfile) __P((DB *, FILE *)); + int (*set_pagesize) __P((DB *, u_int32_t)); + int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int))); + int (*stat) __P((DB *, DB_TXN *, void *, u_int32_t)); + int (*stat_print) __P((DB *, u_int32_t)); + int (*sync) __P((DB *, u_int32_t)); + int (*upgrade) __P((DB *, const char *, u_int32_t)); + int (*verify) __P((DB *, + const char *, const char *, FILE *, u_int32_t)); + + int (*get_bt_minkey) __P((DB *, u_int32_t *)); + int (*set_bt_compare) __P((DB *, + int (*)(DB *, const DBT *, const DBT *))); + int (*set_bt_maxkey) __P((DB *, u_int32_t)); + int (*set_bt_minkey) __P((DB *, u_int32_t)); + int (*set_bt_prefix) __P((DB *, + size_t (*)(DB *, const DBT *, const DBT *))); + + int (*get_h_ffactor) __P((DB *, u_int32_t *)); + int (*get_h_nelem) __P((DB *, u_int32_t *)); + int (*set_h_ffactor) __P((DB *, u_int32_t)); + int (*set_h_hash) __P((DB *, + u_int32_t (*)(DB *, const void *, u_int32_t))); + int (*set_h_nelem) __P((DB *, u_int32_t)); + + int (*get_re_delim) __P((DB *, int *)); + int (*get_re_len) __P((DB *, u_int32_t *)); + int (*get_re_pad) __P((DB *, int *)); + int (*get_re_source) __P((DB *, const char **)); + int (*set_re_delim) __P((DB *, int)); + int (*set_re_len) __P((DB *, u_int32_t)); + int (*set_re_pad) __P((DB *, int)); + int (*set_re_source) __P((DB *, const char *)); + + int (*get_q_extentsize) __P((DB *, u_int32_t *)); + int (*set_q_extentsize) __P((DB *, u_int32_t)); + + int (*db_am_remove) __P((DB *, DB_TXN *, const char *, const char *)); + int (*db_am_rename) __P((DB *, DB_TXN *, + const char *, const char *, const char *)); + + /* + * Never called; these are a place to save function pointers + * so that we can undo an associate. + */ + int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); + int (*stored_close) __P((DB *, u_int32_t)); + +#define DB_OK_BTREE 0x01 +#define DB_OK_HASH 0x02 +#define DB_OK_QUEUE 0x04 +#define DB_OK_RECNO 0x08 + u_int32_t am_ok; /* Legal AM choices. */ + +#define DB_AM_CHKSUM 0x00000001 /* Checksumming. */ +#define DB_AM_CL_WRITER 0x00000002 /* Allow writes in client replica. */ +#define DB_AM_COMPENSATE 0x00000004 /* Created by compensating txn. */ +#define DB_AM_CREATED 0x00000008 /* Database was created upon open. */ +#define DB_AM_CREATED_MSTR 0x00000010 /* Encompassing file was created. */ +#define DB_AM_DBM_ERROR 0x00000020 /* Error in DBM/NDBM database. */ +#define DB_AM_DELIMITER 0x00000040 /* Variable length delimiter set. */ +#define DB_AM_DIRTY 0x00000080 /* Support Dirty Reads. */ +#define DB_AM_DISCARD 0x00000100 /* Discard any cached pages. */ +#define DB_AM_DUP 0x00000200 /* DB_DUP. */ +#define DB_AM_DUPSORT 0x00000400 /* DB_DUPSORT. */ +#define DB_AM_ENCRYPT 0x00000800 /* Encryption. */ +#define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */ +#define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */ +#define DB_AM_INORDER 0x00004000 /* DB_INORDER. */ +#define DB_AM_IN_RENAME 0x00008000 /* File is being renamed. */ +#define DB_AM_NOT_DURABLE 0x00010000 /* Do not log changes. */ +#define DB_AM_OPEN_CALLED 0x00020000 /* DB->open called. */ +#define DB_AM_PAD 0x00040000 /* Fixed-length record pad. */ +#define DB_AM_PGDEF 0x00080000 /* Page size was defaulted. */ +#define DB_AM_RDONLY 0x00100000 /* Database is readonly. */ +#define DB_AM_RECNUM 0x00200000 /* DB_RECNUM. */ +#define DB_AM_RECOVER 0x00400000 /* DB opened by recovery routine. */ +#define DB_AM_RENUMBER 0x00800000 /* DB_RENUMBER. */ +#define DB_AM_REPLICATION 0x01000000 /* An internal replication file. */ +#define DB_AM_REVSPLITOFF 0x02000000 /* DB_REVSPLITOFF. */ +#define DB_AM_SECONDARY 0x04000000 /* Database is a secondary index. */ +#define DB_AM_SNAPSHOT 0x08000000 /* DB_SNAPSHOT. */ +#define DB_AM_SUBDB 0x10000000 /* Subdatabases supported. */ +#define DB_AM_SWAP 0x20000000 /* Pages need to be byte-swapped. */ +#define DB_AM_TXN 0x40000000 /* Opened in a transaction. */ +#define DB_AM_VERIFYING 0x80000000 /* DB handle is in the verifier. */ + u_int32_t orig_flags; /* Flags at open, for refresh. */ + u_int32_t flags; +}; + +/* + * Macros for bulk get. These are only intended for the C API. + * For C++, use DbMultiple*Iterator. + */ +#define DB_MULTIPLE_INIT(pointer, dbt) \ + (pointer = (u_int8_t *)(dbt)->data + \ + (dbt)->ulen - sizeof(u_int32_t)) +#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \ + do { \ + if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \ + retdata = NULL; \ + pointer = NULL; \ + break; \ + } \ + retdata = (u_int8_t *) \ + (dbt)->data + *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + retdlen = *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + if (retdlen == 0 && \ + retdata == (u_int8_t *)(dbt)->data) \ + retdata = NULL; \ + } while (0) +#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \ + do { \ + if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \ + retdata = NULL; \ + retkey = NULL; \ + pointer = NULL; \ + break; \ + } \ + retkey = (u_int8_t *) \ + (dbt)->data + *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + retklen = *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + retdata = (u_int8_t *) \ + (dbt)->data + *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + retdlen = *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + } while (0) + +#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \ + do { \ + if (*((u_int32_t *)(pointer)) == (u_int32_t)0) { \ + recno = 0; \ + retdata = NULL; \ + pointer = NULL; \ + break; \ + } \ + recno = *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + retdata = (u_int8_t *) \ + (dbt)->data + *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + retdlen = *(u_int32_t *)(pointer); \ + (pointer) = (u_int32_t *)(pointer) - 1; \ + } while (0) + +/******************************************************* + * Access method cursors. + *******************************************************/ +struct __dbc { + DB *dbp; /* Related DB access method. */ + DB_TXN *txn; /* Associated transaction. */ + + /* + * Active/free cursor queues. + * + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_ENTRY(__dbc) links; + */ + struct { + DBC *tqe_next; + DBC **tqe_prev; + } links; + + /* + * The DBT *'s below are used by the cursor routines to return + * data to the user when DBT flags indicate that DB should manage + * the returned memory. They point at a DBT containing the buffer + * and length that will be used, and "belonging" to the handle that + * should "own" this memory. This may be a "my_*" field of this + * cursor--the default--or it may be the corresponding field of + * another cursor, a DB handle, a join cursor, etc. In general, it + * will be whatever handle the user originally used for the current + * DB interface call. + */ + DBT *rskey; /* Returned secondary key. */ + DBT *rkey; /* Returned [primary] key. */ + DBT *rdata; /* Returned data. */ + + DBT my_rskey; /* Space for returned secondary key. */ + DBT my_rkey; /* Space for returned [primary] key. */ + DBT my_rdata; /* Space for returned data. */ + + u_int32_t lid; /* Default process' locker id. */ + u_int32_t locker; /* Locker for this operation. */ + DBT lock_dbt; /* DBT referencing lock. */ + DB_LOCK_ILOCK lock; /* Object to be locked. */ + DB_LOCK mylock; /* CDB lock held on this cursor. */ + + u_int cl_id; /* Remote client id. */ + + DBTYPE dbtype; /* Cursor type. */ + + DBC_INTERNAL *internal; /* Access method private. */ + + int (*c_close) __P((DBC *)); /* Methods: public. */ + int (*c_count) __P((DBC *, db_recno_t *, u_int32_t)); + int (*c_del) __P((DBC *, u_int32_t)); + int (*c_dup) __P((DBC *, DBC **, u_int32_t)); + int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t)); + int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t)); + int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t)); + + /* Methods: private. */ + int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t)); + int (*c_am_close) __P((DBC *, db_pgno_t, int *)); + int (*c_am_del) __P((DBC *)); + int (*c_am_destroy) __P((DBC *)); + int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); + int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); + int (*c_am_writelock) __P((DBC *)); + +#define DBC_ACTIVE 0x0001 /* Cursor in use. */ +#define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */ +#define DBC_DEGREE_2 0x0004 /* Cursor has degree 2 isolation. */ +#define DBC_DIRTY_READ 0x0008 /* Cursor supports dirty reads. */ +#define DBC_OPD 0x0010 /* Cursor references off-page dups. */ +#define DBC_RECOVER 0x0020 /* Recovery cursor; don't log/lock. */ +#define DBC_RMW 0x0040 /* Acquire write flag in read op. */ +#define DBC_TRANSIENT 0x0080 /* Cursor is transient. */ +#define DBC_WRITECURSOR 0x0100 /* Cursor may be used to write (CDB). */ +#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */ +#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */ +#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */ +#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */ + u_int32_t flags; +}; + +/* Key range statistics structure */ +struct __key_range { + double less; + double equal; + double greater; +}; + +/* Btree/Recno statistics structure. */ +struct __db_bt_stat { + u_int32_t bt_magic; /* Magic number. */ + u_int32_t bt_version; /* Version number. */ + u_int32_t bt_metaflags; /* Metadata flags. */ + u_int32_t bt_nkeys; /* Number of unique keys. */ + u_int32_t bt_ndata; /* Number of data items. */ + u_int32_t bt_pagesize; /* Page size. */ + u_int32_t bt_maxkey; /* Maxkey value. */ + u_int32_t bt_minkey; /* Minkey value. */ + u_int32_t bt_re_len; /* Fixed-length record length. */ + u_int32_t bt_re_pad; /* Fixed-length record pad. */ + u_int32_t bt_levels; /* Tree levels. */ + u_int32_t bt_int_pg; /* Internal pages. */ + u_int32_t bt_leaf_pg; /* Leaf pages. */ + u_int32_t bt_dup_pg; /* Duplicate pages. */ + u_int32_t bt_over_pg; /* Overflow pages. */ + u_int32_t bt_empty_pg; /* Empty pages. */ + u_int32_t bt_free; /* Pages on the free list. */ + u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */ + u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */ + u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */ + u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */ +}; + +/* Hash statistics structure. */ +struct __db_h_stat { + u_int32_t hash_magic; /* Magic number. */ + u_int32_t hash_version; /* Version number. */ + u_int32_t hash_metaflags; /* Metadata flags. */ + u_int32_t hash_nkeys; /* Number of unique keys. */ + u_int32_t hash_ndata; /* Number of data items. */ + u_int32_t hash_pagesize; /* Page size. */ + u_int32_t hash_ffactor; /* Fill factor specified at create. */ + u_int32_t hash_buckets; /* Number of hash buckets. */ + u_int32_t hash_free; /* Pages on the free list. */ + u_int32_t hash_bfree; /* Bytes free on bucket pages. */ + u_int32_t hash_bigpages; /* Number of big key/data pages. */ + u_int32_t hash_big_bfree; /* Bytes free on big item pages. */ + u_int32_t hash_overflows; /* Number of overflow pages. */ + u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */ + u_int32_t hash_dup; /* Number of dup pages. */ + u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */ +}; + +/* Queue statistics structure. */ +struct __db_qam_stat { + u_int32_t qs_magic; /* Magic number. */ + u_int32_t qs_version; /* Version number. */ + u_int32_t qs_metaflags; /* Metadata flags. */ + u_int32_t qs_nkeys; /* Number of unique keys. */ + u_int32_t qs_ndata; /* Number of data items. */ + u_int32_t qs_pagesize; /* Page size. */ + u_int32_t qs_extentsize; /* Pages per extent. */ + u_int32_t qs_pages; /* Data pages. */ + u_int32_t qs_re_len; /* Fixed-length record length. */ + u_int32_t qs_re_pad; /* Fixed-length record pad. */ + u_int32_t qs_pgfree; /* Bytes free in data pages. */ + u_int32_t qs_first_recno; /* First not deleted record. */ + u_int32_t qs_cur_recno; /* Next available record number. */ +}; + +/******************************************************* + * Environment. + *******************************************************/ +#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */ + +/* Database Environment handle. */ +struct __db_env { + /******************************************************* + * Public: owned by the application. + *******************************************************/ + /* Error message callback. */ + void (*db_errcall) __P((const DB_ENV *, const char *, const char *)); + FILE *db_errfile; /* Error message file stream. */ + const char *db_errpfx; /* Error message prefix. */ + + FILE *db_msgfile; /* Statistics message file stream. */ + /* Statistics message callback. */ + void (*db_msgcall) __P((const DB_ENV *, const char *)); + + /* Other Callbacks. */ + void (*db_feedback) __P((DB_ENV *, int, int)); + void (*db_paniccall) __P((DB_ENV *, int)); + + /* App-specified alloc functions. */ + void *(*db_malloc) __P((size_t)); + void *(*db_realloc) __P((void *, size_t)); + void (*db_free) __P((void *)); + + /* + * Currently, the verbose list is a bit field with room for 32 + * entries. There's no reason that it needs to be limited, if + * there are ever more than 32 entries, convert to a bit array. + */ +#define DB_VERB_DEADLOCK 0x0001 /* Deadlock detection information. */ +#define DB_VERB_RECOVERY 0x0002 /* Recovery information. */ +#define DB_VERB_REPLICATION 0x0004 /* Replication information. */ +#define DB_VERB_WAITSFOR 0x0008 /* Dump waits-for table. */ + u_int32_t verbose; /* Verbose output. */ + + void *app_private; /* Application-private handle. */ + + int (*app_dispatch) /* User-specified recovery dispatch. */ + __P((DB_ENV *, DBT *, DB_LSN *, db_recops)); + + /* Locking. */ + u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */ + int lk_modes; /* Number of lock modes in table. */ + u_int32_t lk_max; /* Maximum number of locks. */ + u_int32_t lk_max_lockers;/* Maximum number of lockers. */ + u_int32_t lk_max_objects;/* Maximum number of locked objects. */ + u_int32_t lk_detect; /* Deadlock detect on all conflicts. */ + db_timeout_t lk_timeout; /* Lock timeout period. */ + + /* Logging. */ + u_int32_t lg_bsize; /* Buffer size. */ + u_int32_t lg_size; /* Log file size. */ + u_int32_t lg_regionmax; /* Region size. */ + + /* Memory pool. */ + u_int32_t mp_gbytes; /* Cachesize: GB. */ + u_int32_t mp_bytes; /* Cachesize: Bytes. */ + u_int mp_ncache; /* Number of cache regions. */ + size_t mp_mmapsize; /* Maximum file size for mmap. */ + int mp_maxopenfd; /* Maximum open file descriptors. */ + int mp_maxwrite; /* Maximum buffers to write. */ + int /* Sleep after writing max buffers. */ + mp_maxwrite_sleep; + + /* Replication */ + int rep_eid; /* environment id. */ + int (*rep_send) /* Send function. */ + __P((DB_ENV *, const DBT *, const DBT *, + const DB_LSN *, int, u_int32_t)); + + /* Transactions. */ + u_int32_t tx_max; /* Maximum number of transactions. */ + time_t tx_timestamp; /* Recover to specific timestamp. */ + db_timeout_t tx_timeout; /* Timeout for transactions. */ + + /******************************************************* + * Private: owned by DB. + *******************************************************/ + /* User files, paths. */ + char *db_home; /* Database home. */ + char *db_log_dir; /* Database log file directory. */ + char *db_tmp_dir; /* Database tmp file directory. */ + + char **db_data_dir; /* Database data file directories. */ + int data_cnt; /* Database data file slots. */ + int data_next; /* Next Database data file slot. */ + + int db_mode; /* Default open permissions. */ + int dir_mode; /* Intermediate directory perms. */ + u_int32_t env_lid; /* Locker ID in non-threaded handles. */ + u_int32_t open_flags; /* Flags passed to DB_ENV->open. */ + + void *reginfo; /* REGINFO structure reference. */ + DB_FH *lockfhp; /* fcntl(2) locking file handle. */ + + int (**recover_dtab) /* Dispatch table for recover funcs. */ + __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t recover_dtab_size; + /* Slots in the dispatch table. */ + + void *cl_handle; /* RPC: remote client handle. */ + u_int cl_id; /* RPC: remote client env id. */ + + int db_ref; /* DB reference count. */ + + long shm_key; /* shmget(2) key. */ + u_int32_t tas_spins; /* test-and-set spins. */ + + /* + * List of open DB handles for this DB_ENV, used for cursor + * adjustment. Must be protected for multi-threaded support. + * + * !!! + * As this structure is allocated in per-process memory, the + * mutex may need to be stored elsewhere on architectures unable + * to support mutexes in heap memory, e.g. HP/UX 9. + * + * !!! + * Explicit representation of structure in queue.h. + * LIST_HEAD(dblist, __db); + */ + DB_MUTEX *dblist_mutexp; /* Mutex. */ + struct { + struct __db *lh_first; + } dblist; + + /* + * XA support. + * + * !!! + * Explicit representations of structures from queue.h. + * TAILQ_ENTRY(__db_env) links; + * TAILQ_HEAD(xa_txn, __db_txn); + */ + struct { + struct __db_env *tqe_next; + struct __db_env **tqe_prev; + } links; + struct __xa_txn { /* XA Active Transactions. */ + struct __db_txn *tqh_first; + struct __db_txn **tqh_last; + } xa_txn; + int xa_rmid; /* XA Resource Manager ID. */ + + /* API-private structure. */ + void *api1_internal; /* C++, Perl API private */ + void *api2_internal; /* Java API private */ + + char *passwd; /* Cryptography support. */ + size_t passwd_len; + void *crypto_handle; /* Primary handle. */ + DB_MUTEX *mt_mutexp; /* Mersenne Twister mutex. */ + int mti; /* Mersenne Twister index. */ + u_long *mt; /* Mersenne Twister state vector. */ + + /* DB_ENV Methods. */ + int (*close) __P((DB_ENV *, u_int32_t)); + int (*dbremove) __P((DB_ENV *, + DB_TXN *, const char *, const char *, u_int32_t)); + int (*dbrename) __P((DB_ENV *, DB_TXN *, + const char *, const char *, const char *, u_int32_t)); + void (*err) __P((const DB_ENV *, int, const char *, ...)); + void (*errx) __P((const DB_ENV *, const char *, ...)); + int (*open) __P((DB_ENV *, const char *, u_int32_t, int)); + int (*remove) __P((DB_ENV *, const char *, u_int32_t)); + int (*stat_print) __P((DB_ENV *, u_int32_t)); + + /* House-keeping. */ + int (*fileid_reset) __P((DB_ENV *, char *, int)); + int (*is_bigendian) __P((void)); + int (*lsn_reset) __P((DB_ENV *, char *, int)); + int (*prdbt) __P((DBT *, + int, const char *, void *, int (*)(void *, const void *), int)); + + /* Setters/getters. */ + int (*set_alloc) __P((DB_ENV *, void *(*)(size_t), + void *(*)(void *, size_t), void (*)(void *))); + int (*set_app_dispatch) __P((DB_ENV *, + int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops))); + int (*get_data_dirs) __P((DB_ENV *, const char ***)); + int (*set_data_dir) __P((DB_ENV *, const char *)); + int (*get_encrypt_flags) __P((DB_ENV *, u_int32_t *)); + int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t)); + void (*set_errcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *, const char *))); + void (*get_errfile) __P((DB_ENV *, FILE **)); + void (*set_errfile) __P((DB_ENV *, FILE *)); + void (*get_errpfx) __P((DB_ENV *, const char **)); + void (*set_errpfx) __P((DB_ENV *, const char *)); + int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int))); + int (*get_flags) __P((DB_ENV *, u_int32_t *)); + int (*set_flags) __P((DB_ENV *, u_int32_t, int)); + int (*get_home) __P((DB_ENV *, const char **)); + int (*set_intermediate_dir) __P((DB_ENV *, int, u_int32_t)); + int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); + int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int))); + int (*set_rpc_server) __P((DB_ENV *, + void *, const char *, long, long, u_int32_t)); + int (*get_shm_key) __P((DB_ENV *, long *)); + int (*set_shm_key) __P((DB_ENV *, long)); + void (*set_msgcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB_ENV *, FILE **)); + void (*set_msgfile) __P((DB_ENV *, FILE *)); + int (*get_tas_spins) __P((DB_ENV *, u_int32_t *)); + int (*set_tas_spins) __P((DB_ENV *, u_int32_t)); + int (*get_tmp_dir) __P((DB_ENV *, const char **)); + int (*set_tmp_dir) __P((DB_ENV *, const char *)); + int (*get_verbose) __P((DB_ENV *, u_int32_t, int *)); + int (*set_verbose) __P((DB_ENV *, u_int32_t, int)); + + void *lg_handle; /* Log handle and methods. */ + int (*get_lg_bsize) __P((DB_ENV *, u_int32_t *)); + int (*set_lg_bsize) __P((DB_ENV *, u_int32_t)); + int (*get_lg_dir) __P((DB_ENV *, const char **)); + int (*set_lg_dir) __P((DB_ENV *, const char *)); + int (*get_lg_max) __P((DB_ENV *, u_int32_t *)); + int (*set_lg_max) __P((DB_ENV *, u_int32_t)); + int (*get_lg_regionmax) __P((DB_ENV *, u_int32_t *)); + int (*set_lg_regionmax) __P((DB_ENV *, u_int32_t)); + int (*log_archive) __P((DB_ENV *, char **[], u_int32_t)); + int (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t)); + int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t)); + int (*log_flush) __P((DB_ENV *, const DB_LSN *)); + int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t)); + int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); + int (*log_stat_print) __P((DB_ENV *, u_int32_t)); + + void *lk_handle; /* Lock handle and methods. */ + int (*get_lk_conflicts) __P((DB_ENV *, const u_int8_t **, int *)); + int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int)); + int (*get_lk_detect) __P((DB_ENV *, u_int32_t *)); + int (*set_lk_detect) __P((DB_ENV *, u_int32_t)); + int (*set_lk_max) __P((DB_ENV *, u_int32_t)); + int (*get_lk_max_locks) __P((DB_ENV *, u_int32_t *)); + int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t)); + int (*get_lk_max_lockers) __P((DB_ENV *, u_int32_t *)); + int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t)); + int (*get_lk_max_objects) __P((DB_ENV *, u_int32_t *)); + int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t)); + int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *)); + int (*lock_get) __P((DB_ENV *, + u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); + int (*lock_put) __P((DB_ENV *, DB_LOCK *)); + int (*lock_id) __P((DB_ENV *, u_int32_t *)); + int (*lock_id_free) __P((DB_ENV *, u_int32_t)); + int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t)); + int (*lock_stat_print) __P((DB_ENV *, u_int32_t)); + int (*lock_vec) __P((DB_ENV *, + u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); + + void *mp_handle; /* Mpool handle and methods. */ + int (*get_cachesize) __P((DB_ENV *, u_int32_t *, u_int32_t *, int *)); + int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int)); + int (*get_mp_mmapsize) __P((DB_ENV *, size_t *)); + int (*set_mp_mmapsize) __P((DB_ENV *, size_t)); + int (*get_mp_max_openfd) __P((DB_ENV *, int *)); + int (*set_mp_max_openfd) __P((DB_ENV *, int)); + int (*get_mp_max_write) __P((DB_ENV *, int *, int *)); + int (*set_mp_max_write) __P((DB_ENV *, int, int)); + int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); + int (*memp_register) __P((DB_ENV *, int, + int (*)(DB_ENV *, db_pgno_t, void *, DBT *), + int (*)(DB_ENV *, db_pgno_t, void *, DBT *))); + int (*memp_stat) __P((DB_ENV *, + DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t)); + int (*memp_stat_print) __P((DB_ENV *, u_int32_t)); + int (*memp_sync) __P((DB_ENV *, DB_LSN *)); + int (*memp_trickle) __P((DB_ENV *, int, int *)); + + void *rep_handle; /* Replication handle and methods. */ + int (*rep_elect) __P((DB_ENV *, int, int, int, + u_int32_t, int *, u_int32_t)); + int (*rep_flush) __P((DB_ENV *)); + int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, + int *, DB_LSN *)); + int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t)); + int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); + int (*rep_stat_print) __P((DB_ENV *, u_int32_t)); + int (*get_rep_limit) __P((DB_ENV *, u_int32_t *, u_int32_t *)); + int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t)); + int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t)); + int (*set_rep_transport) __P((DB_ENV *, int, + int (*) (DB_ENV *, const DBT *, const DBT *, const DB_LSN *, + int, u_int32_t))); + + void *tx_handle; /* Txn handle and methods. */ + int (*get_tx_max) __P((DB_ENV *, u_int32_t *)); + int (*set_tx_max) __P((DB_ENV *, u_int32_t)); + int (*get_tx_timestamp) __P((DB_ENV *, time_t *)); + int (*set_tx_timestamp) __P((DB_ENV *, time_t *)); + int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t)); + int (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); + int (*txn_recover) __P((DB_ENV *, + DB_PREPLIST *, long, long *, u_int32_t)); + int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); + int (*txn_stat_print) __P((DB_ENV *, u_int32_t)); + int (*get_timeout) __P((DB_ENV *, db_timeout_t *, u_int32_t)); + int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t)); + +#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */ +#define DB_TEST_ELECTVOTE1 2 /* after sending VOTE1 */ +#define DB_TEST_POSTDESTROY 3 /* after destroy op */ +#define DB_TEST_POSTLOG 4 /* after logging all pages */ +#define DB_TEST_POSTLOGMETA 5 /* after logging meta in btree */ +#define DB_TEST_POSTOPEN 6 /* after __os_open */ +#define DB_TEST_POSTSYNC 7 /* after syncing the log */ +#define DB_TEST_PREDESTROY 8 /* before destroy op */ +#define DB_TEST_PREOPEN 9 /* before __os_open */ +#define DB_TEST_SUBDB_LOCKS 10 /* subdb locking tests */ + int test_abort; /* Abort value for testing. */ + int test_check; /* Checkpoint value for testing. */ + int test_copy; /* Copy value for testing. */ + +#define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */ +#define DB_ENV_CDB 0x0000002 /* DB_INIT_CDB. */ +#define DB_ENV_CDB_ALLDB 0x0000004 /* CDB environment wide locking. */ +#define DB_ENV_CREATE 0x0000008 /* DB_CREATE set. */ +#define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */ +#define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */ +#define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */ +#define DB_ENV_DSYNC_LOG 0x0000080 /* DB_DSYNC_LOG set. */ +#define DB_ENV_FATAL 0x0000100 /* Doing fatal recovery in env. */ +#define DB_ENV_LOCKDOWN 0x0000200 /* DB_LOCKDOWN set. */ +#define DB_ENV_LOG_AUTOREMOVE 0x0000400 /* DB_LOG_AUTOREMOVE set. */ +#define DB_ENV_LOG_INMEMORY 0x0000800 /* DB_LOG_INMEMORY set. */ +#define DB_ENV_NOLOCKING 0x0001000 /* DB_NOLOCKING set. */ +#define DB_ENV_NOMMAP 0x0002000 /* DB_NOMMAP set. */ +#define DB_ENV_NOPANIC 0x0004000 /* Okay if panic set. */ +#define DB_ENV_OPEN_CALLED 0x0008000 /* DB_ENV->open called. */ +#define DB_ENV_OVERWRITE 0x0010000 /* DB_OVERWRITE set. */ +#define DB_ENV_PRIVATE 0x0020000 /* DB_PRIVATE set. */ +#define DB_ENV_REGION_INIT 0x0040000 /* DB_REGION_INIT set. */ +#define DB_ENV_RPCCLIENT 0x0080000 /* DB_RPCCLIENT set. */ +#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */ +#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */ +#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */ +#define DB_ENV_TIME_NOTGRANTED 0x0800000 /* DB_TIME_NOTGRANTED set. */ +#define DB_ENV_TXN_NOSYNC 0x1000000 /* DB_TXN_NOSYNC set. */ +#define DB_ENV_TXN_WRITE_NOSYNC 0x2000000 /* DB_TXN_WRITE_NOSYNC set. */ +#define DB_ENV_YIELDCPU 0x4000000 /* DB_YIELDCPU set. */ + u_int32_t flags; +}; + +#ifndef DB_DBM_HSEARCH +#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */ +#endif +#if DB_DBM_HSEARCH != 0 +/******************************************************* + * Dbm/Ndbm historic interfaces. + *******************************************************/ +typedef struct __db DBM; + +#define DBM_INSERT 0 /* Flags to dbm_store(). */ +#define DBM_REPLACE 1 + +/* + * The DB support for ndbm(3) always appends this suffix to the + * file name to avoid overwriting the user's original database. + */ +#define DBM_SUFFIX ".db" + +#if defined(_XPG4_2) +typedef struct { + char *dptr; + size_t dsize; +} datum; +#else +typedef struct { + char *dptr; + int dsize; +} datum; +#endif + +/* + * Translate NDBM calls into DB calls so that DB doesn't step on the + * application's name space. + */ +#define dbm_clearerr(a) __db_ndbm_clearerr(a) +#define dbm_close(a) __db_ndbm_close(a) +#define dbm_delete(a, b) __db_ndbm_delete(a, b) +#define dbm_dirfno(a) __db_ndbm_dirfno(a) +#define dbm_error(a) __db_ndbm_error(a) +#define dbm_fetch(a, b) __db_ndbm_fetch(a, b) +#define dbm_firstkey(a) __db_ndbm_firstkey(a) +#define dbm_nextkey(a) __db_ndbm_nextkey(a) +#define dbm_open(a, b, c) __db_ndbm_open(a, b, c) +#define dbm_pagfno(a) __db_ndbm_pagfno(a) +#define dbm_rdonly(a) __db_ndbm_rdonly(a) +#define dbm_store(a, b, c, d) \ + __db_ndbm_store(a, b, c, d) + +/* + * Translate DBM calls into DB calls so that DB doesn't step on the + * application's name space. + * + * The global variables dbrdonly, dirf and pagf were not retained when 4BSD + * replaced the dbm interface with ndbm, and are not supported here. + */ +#define dbminit(a) __db_dbm_init(a) +#define dbmclose __db_dbm_close +#if !defined(__cplusplus) +#define delete(a) __db_dbm_delete(a) +#endif +#define fetch(a) __db_dbm_fetch(a) +#define firstkey __db_dbm_firstkey +#define nextkey(a) __db_dbm_nextkey(a) +#define store(a, b) __db_dbm_store(a, b) + +/******************************************************* + * Hsearch historic interface. + *******************************************************/ +typedef enum { + FIND, ENTER +} ACTION; + +typedef struct entry { + char *key; + char *data; +} ENTRY; + +#define hcreate(a) __db_hcreate(a) +#define hdestroy __db_hdestroy +#define hsearch(a, b) __db_hsearch(a, b) + +#endif /* DB_DBM_HSEARCH */ + +#if defined(__cplusplus) +} +#endif +#endif /* !_DB_H_ */ + +/* DO NOT EDIT: automatically built by dist/s_include. */ +#ifndef _DB_EXT_PROT_IN_ +#define _DB_EXT_PROT_IN_ + +#if defined(__cplusplus) +extern "C" { +#endif + +int db_create __P((DB **, DB_ENV *, u_int32_t)); +char *db_strerror __P((int)); +int db_env_create __P((DB_ENV **, u_int32_t)); +char *db_version __P((int *, int *, int *)); +int log_compare __P((const DB_LSN *, const DB_LSN *)); +int db_env_set_func_close __P((int (*)(int))); +int db_env_set_func_dirfree __P((void (*)(char **, int))); +int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *))); +int db_env_set_func_exists __P((int (*)(const char *, int *))); +int db_env_set_func_free __P((void (*)(void *))); +int db_env_set_func_fsync __P((int (*)(int))); +int db_env_set_func_ftruncate __P((int (*)(int, off_t))); +int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *))); +int db_env_set_func_malloc __P((void *(*)(size_t))); +int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **))); +int db_env_set_func_pread __P((ssize_t (*)(int, void *, size_t, off_t))); +int db_env_set_func_pwrite __P((ssize_t (*)(int, const void *, size_t, off_t))); +int db_env_set_func_open __P((int (*)(const char *, int, ...))); +int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t))); +int db_env_set_func_realloc __P((void *(*)(void *, size_t))); +int db_env_set_func_rename __P((int (*)(const char *, const char *))); +int db_env_set_func_seek __P((int (*)(int, off_t, int))); +int db_env_set_func_sleep __P((int (*)(u_long, u_long))); +int db_env_set_func_unlink __P((int (*)(const char *))); +int db_env_set_func_unmap __P((int (*)(void *, size_t))); +int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t))); +int db_env_set_func_yield __P((int (*)(void))); +int db_sequence_create __P((DB_SEQUENCE **, DB *, u_int32_t)); +#if DB_DBM_HSEARCH != 0 +int __db_ndbm_clearerr __P((DBM *)); +void __db_ndbm_close __P((DBM *)); +int __db_ndbm_delete __P((DBM *, datum)); +int __db_ndbm_dirfno __P((DBM *)); +int __db_ndbm_error __P((DBM *)); +datum __db_ndbm_fetch __P((DBM *, datum)); +datum __db_ndbm_firstkey __P((DBM *)); +datum __db_ndbm_nextkey __P((DBM *)); +DBM *__db_ndbm_open __P((const char *, int, int)); +int __db_ndbm_pagfno __P((DBM *)); +int __db_ndbm_rdonly __P((DBM *)); +int __db_ndbm_store __P((DBM *, datum, datum, int)); +int __db_dbm_close __P((void)); +int __db_dbm_delete __P((datum)); +datum __db_dbm_fetch __P((datum)); +datum __db_dbm_firstkey __P((void)); +int __db_dbm_init __P((char *)); +datum __db_dbm_nextkey __P((datum)); +int __db_dbm_store __P((datum, datum)); +#endif +#if DB_DBM_HSEARCH != 0 +int __db_hcreate __P((size_t)); +ENTRY *__db_hsearch __P((ENTRY, ACTION)); +void __db_hdestroy __P((void)); +#endif + +#if defined(__cplusplus) +} +#endif +#endif /* !_DB_EXT_PROT_IN_ */ diff --git a/db/build_win64/db_archive.dsp b/db/build_win64/db_archive.dsp new file mode 100644 index 000000000..ce7ffbded --- /dev/null +++ b/db/build_win64/db_archive.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_archive" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_archive - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_archive.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_archive.mak" CFG="db_archive - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_archive - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_archive - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_archive - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_archive - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_archive - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_archive - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_archive - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_archive - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_archive - Win32 Release" +# Name "db_archive - Win32 Debug" +# Name "db_archive - Win32 Release Static" +# Name "db_archive - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_archive\db_archive.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_checkpoint.dsp b/db/build_win64/db_checkpoint.dsp new file mode 100644 index 000000000..a68063fcd --- /dev/null +++ b/db/build_win64/db_checkpoint.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_checkpoint" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_checkpoint - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_checkpoint.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_checkpoint.mak" CFG="db_checkpoint - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_checkpoint - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_checkpoint - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_checkpoint - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_checkpoint - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_checkpoint - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_checkpoint - Win32 Release" +# Name "db_checkpoint - Win32 Debug" +# Name "db_checkpoint - Win32 Release Static" +# Name "db_checkpoint - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_checkpoint\db_checkpoint.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_config.h b/db/build_win64/db_config.h new file mode 100644 index 000000000..0dad44320 --- /dev/null +++ b/db/build_win64/db_config.h @@ -0,0 +1,440 @@ +/* DO NOT EDIT: automatically built by dist/s_win32. */ +/* Define to 1 if you want to build a version for running the test suite. */ +/* #undef CONFIG_TEST */ + +/* We use DB_WIN32 much as one would use _WIN32 -- to specify that we're using + an operating system environment that supports Win32 calls and semantics. We + don't use _WIN32 because Cygwin/GCC also defines _WIN32, even though + Cygwin/GCC closely emulates the Unix environment. */ +#define DB_WIN32 1 + +/* Define to 1 if you want a debugging version. */ +/* #undef DEBUG */ +#if defined(_DEBUG) +#if !defined(DEBUG) +#define DEBUG 1 +#endif +#endif + +/* Define to 1 if you want a version that logs read operations. */ +/* #undef DEBUG_ROP */ + +/* Define to 1 if you want a version that logs write operations. */ +/* #undef DEBUG_WOP */ + +/* Define to 1 if you want a version with run-time diagnostic checking. */ +/* #undef DIAGNOSTIC */ + +/* Define to 1 if you have the `clock_gettime' function. */ +/* #undef HAVE_CLOCK_GETTIME */ + +/* Define to 1 if Berkeley DB release includes strong cryptography. */ +#ifndef HAVE_SMALLBUILD +#define HAVE_CRYPTO 1 +#endif + +/* Define to 1 if you have the `directio' function. */ +/* #undef HAVE_DIRECTIO */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_DIRENT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLFCN_H */ + +/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */ +#define HAVE_EXIT_SUCCESS 1 + +/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ +/* #undef HAVE_FCNTL_F_SETFD */ + +/* Define to 1 if you have the `fdatasync' function. */ +/* #undef HAVE_FDATASYNC */ + +/* Define to 1 if allocated filesystem blocks are not zeroed. */ +#define HAVE_FILESYSTEM_NOTZERO 1 + +/* Define to 1 if you have the `ftruncate' function. */ +#define HAVE_FTRUNCATE 1 + +/* Define to 1 if you have the `getcwd' function. */ +#define HAVE_GETCWD 1 + +/* Define to 1 if you have the `getopt' function. */ +/* #undef HAVE_GETOPT */ + +/* Define to 1 if you have the `getrusage' function. */ +/* #undef HAVE_GETRUSAGE */ + +/* Define to 1 if you have the `gettimeofday' function. */ +/* #undef HAVE_GETTIMEOFDAY */ + +/* Define to 1 if you have the `getuid' function. */ +/* #undef HAVE_GETUID */ + +/* Define to 1 if building Hash access method. */ +#ifndef HAVE_SMALLBUILD +#define HAVE_HASH 1 +#endif + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_INTTYPES_H */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the `memcmp' function. */ +#define HAVE_MEMCMP 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mlock' function. */ +/* #undef HAVE_MLOCK */ + +/* Define to 1 if you have the `mmap' function. */ +/* #undef HAVE_MMAP */ + +/* Define to 1 if you have the `munlock' function. */ +/* #undef HAVE_MUNLOCK */ + +/* Define to 1 if you have the `munmap' function. */ +/* #undef HAVE_MUNMAP */ + +/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */ +/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */ + +/* Define to 1 to use the AIX _check_lock mutexes. */ +/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */ + +/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */ +/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */ + +/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */ +/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */ + +/* Define to 1 to use the Apple/Darwin _spin_lock_try mutexes. */ +/* #undef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY */ + +/* Define to 1 to use the UNIX fcntl system call mutexes. */ +/* #undef HAVE_MUTEX_FCNTL */ + +/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes. + */ +/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */ + +/* Define to 1 to use the msem_XXX mutexes on HP-UX. */ +/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */ + +/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */ +/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */ + +/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */ +/* #undef HAVE_MUTEX_MSEM_INIT */ + +/* Define to 1 to use the GCC compiler and PowerPC assembly language mutexes. + */ +/* #undef HAVE_MUTEX_PPC_GCC_ASSEMBLY */ + +/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */ +/* #undef HAVE_MUTEX_PTHREADS */ + +/* Define to 1 to use Reliant UNIX initspin mutexes. */ +/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */ + +/* Define to 1 to use the IBM C compiler and S/390 assembly language mutexes. + */ +/* #undef HAVE_MUTEX_S390_CC_ASSEMBLY */ + +/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */ +/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */ + +/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */ +/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */ + +/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */ +/* #undef HAVE_MUTEX_SEMA_INIT */ + +/* Define to 1 to use the SGI XXX_lock mutexes. */ +/* #undef HAVE_MUTEX_SGI_INIT_LOCK */ + +/* Define to 1 to use the Solaris _lock_XXX mutexes. */ +/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */ + +/* Define to 1 to use the Solaris lwp threads mutexes. */ +/* #undef HAVE_MUTEX_SOLARIS_LWP */ + +/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */ +/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */ + +/* Define to 1 if mutexes hold system resources. */ +/* #undef HAVE_MUTEX_SYSTEM_RESOURCES */ + +/* Define to 1 if fast mutexes are available. */ +#define HAVE_MUTEX_THREADS 1 + +/* Define to 1 to configure mutexes intra-process only. */ +/* #undef HAVE_MUTEX_THREAD_ONLY */ + +/* Define to 1 to use the CC compiler and Tru64 assembly language mutexes. */ +/* #undef HAVE_MUTEX_TRU64_CC_ASSEMBLY */ + +/* Define to 1 to use the UNIX International mutexes. */ +/* #undef HAVE_MUTEX_UI_THREADS */ + +/* Define to 1 to use the UTS compiler and assembly language mutexes. */ +/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */ + +/* Define to 1 to use VMS mutexes. */ +/* #undef HAVE_MUTEX_VMS */ + +/* Define to 1 to use VxWorks mutexes. */ +/* #undef HAVE_MUTEX_VXWORKS */ + +/* Define to 1 to use the MSVC compiler and Windows mutexes. */ +#define HAVE_MUTEX_WIN32 1 + +/* Define to 1 to use the GCC compiler and Windows mutexes. */ +/* #undef HAVE_MUTEX_WIN32_GCC */ + +/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */ +/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */ + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the O_DIRECT flag. */ +/* #undef HAVE_O_DIRECT */ + +/* Define to 1 if you have the `pread' function. */ +/* #undef HAVE_PREAD */ + +/* Define to 1 if you have the `pstat_getdynamic' function. */ +/* #undef HAVE_PSTAT_GETDYNAMIC */ + +/* Define to 1 if you have the `pwrite' function. */ +/* #undef HAVE_PWRITE */ + +/* Define to 1 if building on QNX. */ +/* #undef HAVE_QNX */ + +/* Define to 1 if building Queue access method. */ +#ifndef HAVE_SMALLBUILD +#define HAVE_QUEUE 1 +#endif + +/* Define to 1 if you have the `raise' function. */ +#define HAVE_RAISE 1 + +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + +/* Define to 1 if building replication support. */ +#ifndef HAVE_SMALLBUILD +#define HAVE_REPLICATION 1 +#endif + +/* Define to 1 if building RPC client/server. */ +/* #undef HAVE_RPC */ + +/* Define to 1 if you have the `sched_yield' function. */ +/* #undef HAVE_SCHED_YIELD */ + +/* Define to 1 if you have the `select' function. */ +/* #undef HAVE_SELECT */ + +/* Define to 1 if building sequence support. */ +#define HAVE_SEQUENCE 1 + +/* Define to 1 if you have the `shmget' function. */ +/* #undef HAVE_SHMGET */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if building statistics support. */ +#define HAVE_STATISTICS 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STDINT_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strcasecmp' function. */ +/* #undef HAVE_STRCASECMP */ + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if `st_blksize' is member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_BLKSIZE */ + +/* Define to 1 if you have the `sysconf' function. */ +/* #undef HAVE_SYSCONF */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FCNTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_SELECT_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_TIME_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UNISTD_H */ + +/* Define to 1 if unlink of file with open file descriptors will fail. */ +/* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */ + +/* Define to 1 if the system has the type `unsigned long long'. */ +#define HAVE_UNSIGNED_LONG_LONG 1 + +/* Define to 1 if building access method verification support. */ +#ifndef HAVE_SMALLBUILD +#define HAVE_VERIFY 1 +#endif + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if building VxWorks. */ +/* #undef HAVE_VXWORKS */ + +/* Define to 1 if you have the `yield' function. */ +/* #undef HAVE_YIELD */ + +/* Define to 1 if you have the `_fstati64' function. */ +#define HAVE__FSTATI64 1 + +/* Define to a value if using non-standard mutex alignment. */ +/* #undef MUTEX_ALIGN */ + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "support@sleepycat.com" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "Berkeley DB" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "Berkeley DB 4.3.14" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "db-4.3.14" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "4.3.14" + +/* Define to 1 if the `S_IS*' macros in do not work properly. */ +/* #undef STAT_MACROS_BROKEN */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +/* #undef TIME_WITH_SYS_TIME */ + +/* Define to 1 to mask harmless uninitialized memory read/writes. */ +/* #undef UMRW */ + +/* Number of bits in a file offset, on hosts where this is settable. */ +/* #undef _FILE_OFFSET_BITS */ + +/* Define for large files, on AIX-style hosts. */ +/* #undef _LARGE_FILES */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* + * Exit success/failure macros. + */ +#ifndef HAVE_EXIT_SUCCESS +#define EXIT_FAILURE 1 +#define EXIT_SUCCESS 0 +#endif + +/* + * Don't step on the namespace. Other libraries may have their own + * implementations of these functions, we don't want to use their + * implementations or force them to use ours based on the load order. + */ +#ifndef HAVE_GETCWD +#define getcwd __db_Cgetcwd +#endif +#ifndef HAVE_MEMCMP +#define memcmp __db_Cmemcmp +#endif +#ifndef HAVE_MEMCPY +#define memcpy __db_Cmemcpy +#endif +#ifndef HAVE_MEMMOVE +#define memmove __db_Cmemmove +#endif +#ifndef HAVE_RAISE +#define raise __db_Craise +#endif +#ifndef HAVE_SNPRINTF +#define snprintf __db_Csnprintf +#endif +#ifndef HAVE_STRCASECMP +#define strcasecmp __db_Cstrcasecmp +#define strncasecmp __db_Cstrncasecmp +#endif +#ifndef HAVE_STRERROR +#define strerror __db_Cstrerror +#endif +#ifndef HAVE_VSNPRINTF +#define vsnprintf __db_Cvsnprintf +#endif + +#include "win_db.h" + +/* + * Microsoft's compiler _doesn't_ define __STDC__ unless you invoke it with + * arguments turning OFF all vendor extensions. Even more unfortunately, if + * we do that, it fails to parse windows.h!!!!! So, we define __STDC__ here, + * after windows.h comes in. Note: the compiler knows we've defined it, and + * starts enforcing strict ANSI compliance from this point on. + */ +#define __STDC__ 1 diff --git a/db/build_win64/db_cxx.h b/db/build_win64/db_cxx.h new file mode 100644 index 000000000..2de60bb5e --- /dev/null +++ b/db/build_win64/db_cxx.h @@ -0,0 +1,1089 @@ +/* DO NOT EDIT: automatically built by dist/s_win32. */ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: db_cxx.h,v 11.16 2004/10/14 15:32:29 bostic Exp $ + */ + +#ifndef _DB_CXX_H_ +#define _DB_CXX_H_ +// +// C++ assumptions: +// +// To ensure portability to many platforms, both new and old, we make +// few assumptions about the C++ compiler and library. For example, +// we do not expect STL, templates or namespaces to be available. The +// "newest" C++ feature used is exceptions, which are used liberally +// to transmit error information. Even the use of exceptions can be +// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags +// with the DbEnv or Db constructor. +// +// C++ naming conventions: +// +// - All top level class names start with Db. +// - All class members start with lower case letter. +// - All private data members are suffixed with underscore. +// - Use underscores to divide names into multiple words. +// - Simple data accessors are named with get_ or set_ prefix. +// - All method names are taken from names of functions in the C +// layer of db (usually by dropping a prefix like "db_"). +// These methods have the same argument types and order, +// other than dropping the explicit arg that acts as "this". +// +// As a rule, each DbFoo object has exactly one underlying DB_FOO struct +// (defined in db.h) associated with it. In some cases, we inherit directly +// from the DB_FOO structure to make this relationship explicit. Often, +// the underlying C layer allocates and deallocates these structures, so +// there is no easy way to add any data to the DbFoo class. When you see +// a comment about whether data is permitted to be added, this is what +// is going on. Of course, if we need to add data to such C++ classes +// in the future, we will arrange to have an indirect pointer to the +// DB_FOO struct (as some of the classes already have). +// + +//////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////// +// +// Forward declarations +// + +#include + +#define HAVE_CXX_STDHEADERS 1 +#ifdef HAVE_CXX_STDHEADERS +#include +#include +#define __DB_STD(x) std::x +#else +#include +#include +#define __DB_STD(x) x +#endif + +#include "db.h" + +class Db; // forward +class Dbc; // forward +class DbEnv; // forward +class DbInfo; // forward +class DbLock; // forward +class DbLogc; // forward +class DbLsn; // forward +class DbMpoolFile; // forward +class DbPreplist; // forward +class Dbt; // forward +class DbTxn; // forward +class DbLock; // forward +class DbSequence; // forward +class Dbt; // forward + +class DbMultipleIterator; // forward +class DbMultipleKeyDataIterator; // forward +class DbMultipleRecnoDataIterator; // forward +class DbMultipleDataIterator; // forward + +class DbException; // forward +class DbDeadlockException; // forward +class DbLockNotGrantedException; // forward +class DbMemoryException; // forward +class DbRunRecoveryException; // forward + +//////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////// +// +// Turn off inappropriate compiler warnings +// + +#ifdef _MSC_VER + +// These are level 4 warnings that are explicitly disabled. +// With Visual C++, by default you do not see above level 3 unless +// you use /W4. But we like to compile with the highest level +// warnings to catch other errors. +// +// 4201: nameless struct/union +// triggered by standard include file +// +// 4514: unreferenced inline function has been removed +// certain include files in MSVC define methods that are not called +// +#pragma warning(disable: 4201 4514) + +#endif + +//////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////// +// +// Mechanisms for declaring classes +// + +// +// Every class defined in this file has an _exported next to the class name. +// This is needed for WinTel machines so that the class methods can +// be exported or imported in a DLL as appropriate. Users of the DLL +// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL +// must be defined. +// +#if defined(_MSC_VER) + +# if defined(DB_CREATE_DLL) +# define _exported __declspec(dllexport) // creator of dll +# elif defined(DB_USE_DLL) +# define _exported __declspec(dllimport) // user of dll +# else +# define _exported // static lib creator or user +# endif + +#else /* _MSC_VER */ + +# define _exported + +#endif /* _MSC_VER */ + +// Some interfaces can be customized by allowing users to define +// callback functions. For performance and logistical reasons, some +// callback functions must be declared in extern "C" blocks. For others, +// we allow you to declare the callbacks in C++ or C (or an extern "C" +// block) as you wish. See the set methods for the callbacks for +// the choices. +// +extern "C" { + typedef void * (*db_malloc_fcn_type) + (size_t); + typedef void * (*db_realloc_fcn_type) + (void *, size_t); + typedef void (*db_free_fcn_type) + (void *); + typedef int (*bt_compare_fcn_type) /*C++ version available*/ + (DB *, const DBT *, const DBT *); + typedef size_t (*bt_prefix_fcn_type) /*C++ version available*/ + (DB *, const DBT *, const DBT *); + typedef int (*dup_compare_fcn_type) /*C++ version available*/ + (DB *, const DBT *, const DBT *); + typedef u_int32_t (*h_hash_fcn_type) /*C++ version available*/ + (DB *, const void *, u_int32_t); + typedef int (*pgin_fcn_type) + (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); + typedef int (*pgout_fcn_type) + (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); +} + +// +// Represents a database table = a set of keys with associated values. +// +class _exported Db +{ + friend class DbEnv; + +public: + Db(DbEnv*, u_int32_t); // create a Db object, then call open() + virtual ~Db(); // does *not* call close. + + // These methods exactly match those in the C interface. + // + virtual int associate(DbTxn *txn, Db *secondary, + int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *), + u_int32_t flags); + virtual int close(u_int32_t flags); + virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags); + virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags); + virtual void err(int, const char *, ...); + virtual void errx(const char *, ...); + virtual int fd(int *fdp); + virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags); + virtual void *get_app_private() const; + virtual int get_byteswapped(int *); + virtual int get_dbname(const char **, const char **); + virtual int get_open_flags(u_int32_t *); + virtual int get_type(DBTYPE *); + virtual int get_transactional(); + virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags); + virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t); + virtual int open(DbTxn *txnid, + const char *, const char *subname, DBTYPE, u_int32_t, int); + virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, + u_int32_t flags); + virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t); + virtual int remove(const char *, const char *, u_int32_t); + virtual int rename(const char *, const char *, const char *, u_int32_t); + virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, + db_free_fcn_type); + virtual void set_app_private(void *); + virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t)); + virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/ + virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *)); + virtual int set_bt_maxkey(u_int32_t); + virtual int get_bt_minkey(u_int32_t *); + virtual int set_bt_minkey(u_int32_t); + virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/ + virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *)); + virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); + virtual int set_cachesize(u_int32_t, u_int32_t, int); + virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/ + virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *)); + virtual int get_encrypt_flags(u_int32_t *); + virtual int set_encrypt(const char *, u_int32_t); + virtual void set_errcall( + void (*)(const DbEnv *, const char *, const char *)); + virtual void get_errfile(FILE **); + virtual void set_errfile(FILE *); + virtual void get_errpfx(const char **); + virtual void set_errpfx(const char *); + virtual int set_feedback(void (*)(Db *, int, int)); + virtual int get_flags(u_int32_t *); + virtual int set_flags(u_int32_t); + virtual int get_h_ffactor(u_int32_t *); + virtual int set_h_ffactor(u_int32_t); + virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/ + virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t)); + virtual int get_h_nelem(u_int32_t *); + virtual int set_h_nelem(u_int32_t); + virtual int get_lorder(int *); + virtual int set_lorder(int); + virtual void set_msgcall(void (*)(const DbEnv *, const char *)); + virtual void get_msgfile(FILE **); + virtual void set_msgfile(FILE *); + virtual int get_pagesize(u_int32_t *); + virtual int set_pagesize(u_int32_t); + virtual int set_paniccall(void (*)(DbEnv *, int)); + virtual int get_re_delim(int *); + virtual int set_re_delim(int); + virtual int get_re_len(u_int32_t *); + virtual int set_re_len(u_int32_t); + virtual int get_re_pad(int *); + virtual int set_re_pad(int); + virtual int get_re_source(const char **); + virtual int set_re_source(const char *); + virtual int get_q_extentsize(u_int32_t *); + virtual int set_q_extentsize(u_int32_t); + virtual int stat(DbTxn *, void *sp, u_int32_t flags); + virtual int stat_print(u_int32_t flags); + virtual int sync(u_int32_t flags); + virtual int truncate(DbTxn *, u_int32_t *, u_int32_t); + virtual int upgrade(const char *name, u_int32_t flags); + virtual int verify(const char *, const char *, __DB_STD(ostream) *, + u_int32_t); + + // These additional methods are not in the C interface, and + // are only available for C++. + // + virtual __DB_STD(ostream) *get_error_stream(); + virtual void set_error_stream(__DB_STD(ostream) *); + virtual __DB_STD(ostream) *get_message_stream(); + virtual void set_message_stream(__DB_STD(ostream) *); + + virtual DbEnv *get_env(); + virtual DbMpoolFile *get_mpf(); + + virtual DB *get_DB() + { + return imp_; + } + + virtual const DB *get_const_DB() const + { + return imp_; + } + + static Db* get_Db(DB *db) + { + return (Db *)db->api_internal; + } + + static const Db* get_const_Db(const DB *db) + { + return (const Db *)db->api_internal; + } + +private: + // no copying + Db(const Db &); + Db &operator = (const Db &); + + void cleanup(); + int initialize(); + int error_policy(); + + // instance data + DB *imp_; + DbEnv *env_; + DbMpoolFile *mpf_; + int construct_error_; + u_int32_t flags_; + u_int32_t construct_flags_; + +public: + // These are public only because they need to be called + // via C callback functions. They should never be used by + // external users of this class. + // + int (*append_recno_callback_)(Db *, Dbt *, db_recno_t); + int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *); + int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *); + size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *); + int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *); + void (*feedback_callback_)(Db *, int, int); + u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t); +}; + +// +// Cursor +// +class _exported Dbc : protected DBC +{ + friend class Db; + +public: + int close(); + int count(db_recno_t *countp, u_int32_t flags); + int del(u_int32_t flags); + int dup(Dbc** cursorp, u_int32_t flags); + int get(Dbt* key, Dbt *data, u_int32_t flags); + int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags); + int put(Dbt* key, Dbt *data, u_int32_t flags); + +private: + // No data is permitted in this class (see comment at top) + + // Note: use Db::cursor() to get pointers to a Dbc, + // and call Dbc::close() rather than delete to release them. + // + Dbc(); + ~Dbc(); + + // no copying + Dbc(const Dbc &); + Dbc &operator = (const Dbc &); +}; + +// +// Berkeley DB environment class. Provides functions for opening databases. +// User of this library can use this class as a starting point for +// developing a DB application - derive their application class from +// this one, add application control logic. +// +// Note that if you use the default constructor, you must explicitly +// call appinit() before any other db activity (e.g. opening files) +// +class _exported DbEnv +{ + friend class Db; + friend class DbLock; + friend class DbMpoolFile; + +public: + // After using this constructor, you can set any needed + // parameters for the environment using the set_* methods. + // Then call open() to finish initializing the environment + // and attaching it to underlying files. + // + DbEnv(u_int32_t flags); + + virtual ~DbEnv(); + + // These methods match those in the C interface. + // + virtual int close(u_int32_t); + virtual int dbremove(DbTxn *txn, const char *name, const char *subdb, + u_int32_t flags); + virtual int dbrename(DbTxn *txn, const char *name, const char *subdb, + const char *newname, u_int32_t flags); + virtual void err(int, const char *, ...); + virtual void errx(const char *, ...); + virtual void *get_app_private() const; + virtual int get_home(const char **); + virtual int get_open_flags(u_int32_t *); + virtual int open(const char *, u_int32_t, int); + virtual int remove(const char *, u_int32_t); + virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, + db_free_fcn_type); + virtual void set_app_private(void *); + virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); + virtual int set_cachesize(u_int32_t, u_int32_t, int); + virtual int get_data_dirs(const char ***); + virtual int set_data_dir(const char *); + virtual int get_encrypt_flags(u_int32_t *); + virtual int set_encrypt(const char *, u_int32_t); + virtual void set_errcall( + void (*)(const DbEnv *, const char *, const char *)); + virtual void get_errfile(FILE **); + virtual void set_errfile(FILE *); + virtual void get_errpfx(const char **); + virtual void set_errpfx(const char *); + virtual int get_flags(u_int32_t *); + virtual int set_flags(u_int32_t, int); + virtual int set_feedback(void (*)(DbEnv *, int, int)); + virtual int get_lg_bsize(u_int32_t *); + virtual int set_lg_bsize(u_int32_t); + virtual int get_lg_dir(const char **); + virtual int set_lg_dir(const char *); + virtual int get_lg_max(u_int32_t *); + virtual int set_lg_max(u_int32_t); + virtual int get_lg_regionmax(u_int32_t *); + virtual int set_lg_regionmax(u_int32_t); + virtual int get_lk_conflicts(const u_int8_t **, int *); + virtual int set_lk_conflicts(u_int8_t *, int); + virtual int get_lk_detect(u_int32_t *); + virtual int set_lk_detect(u_int32_t); + virtual int set_lk_max(u_int32_t); + virtual int get_lk_max_lockers(u_int32_t *); + virtual int set_lk_max_lockers(u_int32_t); + virtual int get_lk_max_locks(u_int32_t *); + virtual int set_lk_max_locks(u_int32_t); + virtual int get_lk_max_objects(u_int32_t *); + virtual int set_lk_max_objects(u_int32_t); + virtual int get_mp_mmapsize(size_t *); + virtual int set_mp_mmapsize(size_t); + virtual void set_msgcall(void (*)(const DbEnv *, const char *)); + virtual void get_msgfile(FILE **); + virtual void set_msgfile(FILE *); + virtual int set_paniccall(void (*)(DbEnv *, int)); + virtual int set_rpc_server(void *, char *, long, long, u_int32_t); + virtual int get_shm_key(long *); + virtual int set_shm_key(long); + virtual int get_timeout(db_timeout_t *, u_int32_t); + virtual int set_timeout(db_timeout_t, u_int32_t); + virtual int get_tmp_dir(const char **); + virtual int set_tmp_dir(const char *); + virtual int get_tas_spins(u_int32_t *); + virtual int set_tas_spins(u_int32_t); + virtual int get_tx_max(u_int32_t *); + virtual int set_tx_max(u_int32_t); + virtual int set_app_dispatch(int (*)(DbEnv *, + Dbt *, DbLsn *, db_recops)); + virtual int get_tx_timestamp(time_t *); + virtual int set_tx_timestamp(time_t *); + virtual int get_verbose(u_int32_t which, int *); + virtual int set_verbose(u_int32_t which, int); + + // Version information. A static method so it can be obtained anytime. + // + static char *version(int *major, int *minor, int *patch); + + // Convert DB errors to strings + static char *strerror(int); + + // If an error is detected and the error call function + // or stream is set, a message is dispatched or printed. + // If a prefix is set, each message is prefixed. + // + // You can use set_errcall() or set_errfile() above to control + // error functionality. Alternatively, you can call + // set_error_stream() to force all errors to a C++ stream. + // It is unwise to mix these approaches. + // + virtual __DB_STD(ostream) *get_error_stream(); + virtual void set_error_stream(__DB_STD(ostream) *); + virtual __DB_STD(ostream) *get_message_stream(); + virtual void set_message_stream(__DB_STD(ostream) *); + + // used internally + static void runtime_error(DbEnv *env, const char *caller, int err, + int error_policy); + static void runtime_error_dbt(DbEnv *env, const char *caller, Dbt *dbt, + int error_policy); + static void runtime_error_lock_get(DbEnv *env, const char *caller, + int err, db_lockop_t op, db_lockmode_t mode, + const Dbt *obj, DbLock lock, int index, + int error_policy); + + // Lock functions + // + virtual int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted); + virtual int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj, + db_lockmode_t lock_mode, DbLock *lock); + virtual int lock_id(u_int32_t *idp); + virtual int lock_id_free(u_int32_t id); + virtual int lock_put(DbLock *lock); + virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags); + virtual int lock_stat_print(u_int32_t flags); + virtual int lock_vec(u_int32_t locker, u_int32_t flags, + DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp); + + // Log functions + // + virtual int log_archive(char **list[], u_int32_t flags); + static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1); + virtual int log_cursor(DbLogc **cursorp, u_int32_t flags); + virtual int log_file(DbLsn *lsn, char *namep, size_t len); + virtual int log_flush(const DbLsn *lsn); + virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags); + + virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags); + virtual int log_stat_print(u_int32_t flags); + + // Mpool functions + // + virtual int memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags); + virtual int memp_register(int ftype, + pgin_fcn_type pgin_fcn, + pgout_fcn_type pgout_fcn); + virtual int memp_stat(DB_MPOOL_STAT + **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags); + virtual int memp_stat_print(u_int32_t flags); + virtual int memp_sync(DbLsn *lsn); + virtual int memp_trickle(int pct, int *nwrotep); + + // Transaction functions + // + virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags); + virtual int txn_checkpoint(u_int32_t kbyte, u_int32_t min, + u_int32_t flags); + virtual int txn_recover(DbPreplist *preplist, long count, + long *retp, u_int32_t flags); + virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags); + virtual int txn_stat_print(u_int32_t flags); + + // Replication functions + // + virtual int rep_elect(int, int, int, u_int32_t, int *, u_int32_t); + virtual int rep_process_message(Dbt *, Dbt *, int *, DbLsn *); + virtual int rep_start(Dbt *, u_int32_t); + virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags); + virtual int rep_stat_print(u_int32_t flags); + virtual int get_rep_limit(u_int32_t *, u_int32_t *); + virtual int set_rep_limit(u_int32_t, u_int32_t); + virtual int set_rep_transport(int, int (*)(DbEnv *, + const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t)); + + // Conversion functions + // + virtual DB_ENV *get_DB_ENV() + { + return imp_; + } + + virtual const DB_ENV *get_const_DB_ENV() const + { + return imp_; + } + + static DbEnv* get_DbEnv(DB_ENV *dbenv) + { + return dbenv ? (DbEnv *)dbenv->api1_internal : 0; + } + + static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv) + { + return dbenv ? (const DbEnv *)dbenv->api1_internal : 0; + } + + // For internal use only. + static DbEnv* wrap_DB_ENV(DB_ENV *dbenv); + + // These are public only because they need to be called + // via C functions. They should never be called by users + // of this class. + // + static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn, + db_recops op); + static void _paniccall_intercept(DB_ENV *env, int errval); + static void _feedback_intercept(DB_ENV *env, int opcode, int pct); + static int _rep_send_intercept(DB_ENV *env, + const DBT *cntrl, const DBT *data, + const DB_LSN *lsn, int id, + u_int32_t flags); + static void _stream_error_function(const DB_ENV *env, + const char *prefix, + const char *message); + static void _stream_message_function(const DB_ENV *env, + const char *message); + +private: + void cleanup(); + int initialize(DB_ENV *env); + int error_policy(); + + // For internal use only. + DbEnv(DB_ENV *, u_int32_t flags); + + // no copying + DbEnv(const DbEnv &); + void operator = (const DbEnv &); + + // instance data + DB_ENV *imp_; + int construct_error_; + u_int32_t construct_flags_; + __DB_STD(ostream) *error_stream_; + __DB_STD(ostream) *message_stream_; + + int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops); + void (*error_callback_)(const DbEnv *, const char *, const char *); + void (*feedback_callback_)(DbEnv *, int, int); + void (*message_callback_)(const DbEnv *, const char *); + void (*paniccall_callback_)(DbEnv *, int); + int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno, + void *pgaddr, Dbt *pgcookie); + int (*pgout_callback_)(DbEnv *dbenv, db_pgno_t pgno, + void *pgaddr, Dbt *pgcookie); + int (*rep_send_callback_)(DbEnv *, + const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t); +}; + +// +// Lock +// +class _exported DbLock +{ + friend class DbEnv; + +public: + DbLock(); + DbLock(const DbLock &); + DbLock &operator = (const DbLock &); + +protected: + // We can add data to this class if needed + // since its contained class is not allocated by db. + // (see comment at top) + + DbLock(DB_LOCK); + DB_LOCK lock_; +}; + +// +// Log cursor +// +class _exported DbLogc : protected DB_LOGC +{ + friend class DbEnv; + +public: + int close(u_int32_t _flags); + int get(DbLsn *lsn, Dbt *data, u_int32_t _flags); + +private: + // No data is permitted in this class (see comment at top) + + // Note: use Db::cursor() to get pointers to a Dbc, + // and call Dbc::close() rather than delete to release them. + // + DbLogc(); + ~DbLogc(); + + // no copying + DbLogc(const Dbc &); + DbLogc &operator = (const Dbc &); +}; + +// +// Log sequence number +// +class _exported DbLsn : public DB_LSN +{ + friend class DbEnv; // friendship needed to cast to base class + friend class DbLogc; // friendship needed to cast to base class +}; + +// +// Memory pool file +// +class _exported DbMpoolFile +{ + friend class DbEnv; + friend class Db; + +public: + int close(u_int32_t flags); + int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep); + int open(const char *file, u_int32_t flags, int mode, size_t pagesize); + int get_transactional(void); + int put(void *pgaddr, u_int32_t flags); + int set(void *pgaddr, u_int32_t flags); + int get_clear_len(u_int32_t *len); + int set_clear_len(u_int32_t len); + int get_fileid(u_int8_t *fileid); + int set_fileid(u_int8_t *fileid); + int get_flags(u_int32_t *flagsp); + int set_flags(u_int32_t flags, int onoff); + int get_ftype(int *ftype); + int set_ftype(int ftype); + int get_lsn_offset(int32_t *offsetp); + int set_lsn_offset(int32_t offset); + int get_maxsize(u_int32_t *gbytes, u_int32_t *bytes); + int set_maxsize(u_int32_t gbytes, u_int32_t bytes); + int get_pgcookie(DBT *dbt); + int set_pgcookie(DBT *dbt); + int get_priority(DB_CACHE_PRIORITY *priorityp); + int set_priority(DB_CACHE_PRIORITY priority); + int sync(); + + virtual DB_MPOOLFILE *get_DB_MPOOLFILE() + { + return imp_; + } + + virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const + { + return imp_; + } + +private: + DB_MPOOLFILE *imp_; + + // We can add data to this class if needed + // since it is implemented via a pointer. + // (see comment at top) + + // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile, + // and call DbMpoolFile::close() rather than delete to release them. + // + DbMpoolFile(); + + // Shut g++ up. +protected: + virtual ~DbMpoolFile(); + +private: + // no copying + DbMpoolFile(const DbMpoolFile &); + void operator = (const DbMpoolFile &); +}; + +// +// This is filled in and returned by the DbEnv::txn_recover() method. +// +class _exported DbPreplist +{ +public: + DbTxn *txn; + u_int8_t gid[DB_XIDDATASIZE]; +}; + +// +// A sequence record in a database +// +class _exported DbSequence +{ +public: + DbSequence(Db *db, u_int32_t flags); + virtual ~DbSequence(); + + int open(DbTxn *txnid, Dbt *key, u_int32_t flags); + int initial_value(db_seq_t value); + int close(u_int32_t flags); + int remove(DbTxn *txnid, u_int32_t flags); + int stat(DB_SEQUENCE_STAT **sp, u_int32_t flags); + int stat_print(u_int32_t flags); + + int get(DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags); + int get_cachesize(int32_t *sizep); + int set_cachesize(int32_t size); + int get_flags(u_int32_t *flagsp); + int set_flags(u_int32_t flags); + int get_range(db_seq_t *minp, db_seq_t *maxp); + int set_range(db_seq_t min, db_seq_t max); + + Db *get_db(); + Dbt *get_key(); + + virtual DB_SEQUENCE *get_DB_SEQUENCE() + { + return imp_; + } + + virtual const DB_SEQUENCE *get_const_DB_SEQUENCE() const + { + return imp_; + } + + static DbSequence* get_DbSequence(DB_SEQUENCE *seq) + { + return (DbSequence *)seq->api_internal; + } + + static const DbSequence* get_const_DbSequence(const DB_SEQUENCE *seq) + { + return (const DbSequence *)seq->api_internal; + } + + // For internal use only. + static DbSequence* wrap_DB_SEQUENCE(DB_SEQUENCE *seq); + +private: + DbSequence(DB_SEQUENCE *seq); + // no copying + DbSequence(const DbSequence &); + DbSequence &operator = (const DbSequence &); + + DB_SEQUENCE *imp_; + DBT key_; +}; + +// +// Transaction +// +class _exported DbTxn +{ + friend class DbEnv; + +public: + int abort(); + int commit(u_int32_t flags); + int discard(u_int32_t flags); + u_int32_t id(); + int prepare(u_int8_t *gid); + int set_timeout(db_timeout_t timeout, u_int32_t flags); + + virtual DB_TXN *get_DB_TXN() + { + return imp_; + } + + virtual const DB_TXN *get_const_DB_TXN() const + { + return imp_; + } + + static DbTxn* get_DbTxn(DB_TXN *txn) + { + return (DbTxn *)txn->api_internal; + } + + static const DbTxn* get_const_DbTxn(const DB_TXN *txn) + { + return (const DbTxn *)txn->api_internal; + } + + // For internal use only. + static DbTxn* wrap_DB_TXN(DB_TXN *txn); + +private: + DB_TXN *imp_; + + // We can add data to this class if needed + // since it is implemented via a pointer. + // (see comment at top) + + // Note: use DbEnv::txn_begin() to get pointers to a DbTxn, + // and call DbTxn::abort() or DbTxn::commit rather than + // delete to release them. + // + DbTxn(); + // For internal use only. + DbTxn(DB_TXN *txn); + virtual ~DbTxn(); + + // no copying + DbTxn(const DbTxn &); + void operator = (const DbTxn &); +}; + +// +// A chunk of data, maybe a key or value. +// +class _exported Dbt : private DBT +{ + friend class Db; + friend class Dbc; + friend class DbEnv; + friend class DbLogc; + friend class DbSequence; + +public: + // key/data + void *get_data() const { return data; } + void set_data(void *value) { data = value; } + + // key/data length + u_int32_t get_size() const { return size; } + void set_size(u_int32_t value) { size = value; } + + // RO: length of user buffer. + u_int32_t get_ulen() const { return ulen; } + void set_ulen(u_int32_t value) { ulen = value; } + + // RO: get/put record length. + u_int32_t get_dlen() const { return dlen; } + void set_dlen(u_int32_t value) { dlen = value; } + + // RO: get/put record offset. + u_int32_t get_doff() const { return doff; } + void set_doff(u_int32_t value) { doff = value; } + + // flags + u_int32_t get_flags() const { return flags; } + void set_flags(u_int32_t value) { flags = value; } + + // Conversion functions + DBT *get_DBT() { return (DBT *)this; } + const DBT *get_const_DBT() const { return (const DBT *)this; } + + static Dbt* get_Dbt(DBT *dbt) { return (Dbt *)dbt; } + static const Dbt* get_const_Dbt(const DBT *dbt) + { return (const Dbt *)dbt; } + + Dbt(void *data, u_int32_t size); + Dbt(); + ~Dbt(); + Dbt(const Dbt &); + Dbt &operator = (const Dbt &); + +private: + // Note: no extra data appears in this class (other than + // inherited from DBT) since we need DBT and Dbt objects + // to have interchangable pointers. + // + // When subclassing this class, remember that callback + // methods like bt_compare, bt_prefix, dup_compare may + // internally manufacture DBT objects (which later are + // cast to Dbt), so such callbacks might receive objects + // not of your subclassed type. +}; + +//////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////// +// +// multiple key/data/reco iterator classes +// + +// DbMultipleIterator is a shared private base class for the three types +// of bulk-return Iterator; it should never be instantiated directly, +// but it handles the functionality shared by its subclasses. +class _exported DbMultipleIterator +{ +public: + DbMultipleIterator(const Dbt &dbt); +protected: + u_int8_t *data_; + u_int32_t *p_; +}; + +class _exported DbMultipleKeyDataIterator : private DbMultipleIterator +{ +public: + DbMultipleKeyDataIterator(const Dbt &dbt) : DbMultipleIterator(dbt) {} + bool next(Dbt &key, Dbt &data); +}; + +class _exported DbMultipleRecnoDataIterator : private DbMultipleIterator +{ +public: + DbMultipleRecnoDataIterator(const Dbt &dbt) : DbMultipleIterator(dbt) {} + bool next(db_recno_t &recno, Dbt &data); +}; + +class _exported DbMultipleDataIterator : private DbMultipleIterator +{ +public: + DbMultipleDataIterator(const Dbt &dbt) : DbMultipleIterator(dbt) {} + bool next(Dbt &data); +}; + +//////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////// +// +// Exception classes +// + +// Almost any error in the DB library throws a DbException. +// Every exception should be considered an abnormality +// (e.g. bug, misuse of DB, file system error). +// +class _exported DbException : public __DB_STD(exception) +{ +public: + virtual ~DbException() throw(); + DbException(int err); + DbException(const char *description); + DbException(const char *description, int err); + DbException(const char *prefix, const char *description, int err); + int get_errno() const; + virtual const char *what() const throw(); + DbEnv *get_env() const; + void set_env(DbEnv *env); + + DbException(const DbException &); + DbException &operator = (const DbException &); + +private: + void describe(const char *prefix, const char *description); + + char *what_; + int err_; // errno + DbEnv *env_; +}; + +// +// A specific sort of exception that occurs when +// an operation is aborted to resolve a deadlock. +// +class _exported DbDeadlockException : public DbException +{ +public: + virtual ~DbDeadlockException() throw(); + DbDeadlockException(const char *description); + + DbDeadlockException(const DbDeadlockException &); + DbDeadlockException &operator = (const DbDeadlockException &); +}; + +// +// A specific sort of exception that occurs when +// a lock is not granted, e.g. by lock_get or lock_vec. +// Note that the Dbt is only live as long as the Dbt used +// in the offending call. +// +class _exported DbLockNotGrantedException : public DbException +{ +public: + virtual ~DbLockNotGrantedException() throw(); + DbLockNotGrantedException(const char *prefix, db_lockop_t op, + db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index); + DbLockNotGrantedException(const char *description); + + DbLockNotGrantedException(const DbLockNotGrantedException &); + DbLockNotGrantedException &operator = + (const DbLockNotGrantedException &); + + db_lockop_t get_op() const; + db_lockmode_t get_mode() const; + const Dbt* get_obj() const; + DbLock *get_lock() const; + int get_index() const; + +private: + db_lockop_t op_; + db_lockmode_t mode_; + const Dbt *obj_; + DbLock *lock_; + int index_; +}; + +// +// A specific sort of exception that occurs when +// user declared memory is insufficient in a Dbt. +// +class _exported DbMemoryException : public DbException +{ +public: + virtual ~DbMemoryException() throw(); + DbMemoryException(Dbt *dbt); + DbMemoryException(const char *prefix, Dbt *dbt); + + DbMemoryException(const DbMemoryException &); + DbMemoryException &operator = (const DbMemoryException &); + + Dbt *get_dbt() const; +private: + Dbt *dbt_; +}; + +// +// A specific sort of exception that occurs when +// recovery is required before continuing DB activity. +// +class _exported DbRunRecoveryException : public DbException +{ +public: + virtual ~DbRunRecoveryException() throw(); + DbRunRecoveryException(const char *description); + + DbRunRecoveryException(const DbRunRecoveryException &); + DbRunRecoveryException &operator = (const DbRunRecoveryException &); +}; +#endif /* !_DB_CXX_H_ */ diff --git a/db/build_win64/db_deadlock.dsp b/db/build_win64/db_deadlock.dsp new file mode 100644 index 000000000..efd268610 --- /dev/null +++ b/db/build_win64/db_deadlock.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_deadlock" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_deadlock - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_deadlock.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_deadlock.mak" CFG="db_deadlock - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_deadlock - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_deadlock - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_deadlock - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_deadlock - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_deadlock - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_deadlock - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_deadlock - Win32 Release" +# Name "db_deadlock - Win32 Debug" +# Name "db_deadlock - Win32 Release Static" +# Name "db_deadlock - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_deadlock\db_deadlock.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_dll.dsp b/db/build_win64/db_dll.dsp new file mode 100644 index 000000000..af74213b2 --- /dev/null +++ b/db/build_win64/db_dll.dsp @@ -0,0 +1,880 @@ +# Microsoft Developer Studio Project File - Name="db_dll" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=db_dll - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_dll.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_dll.mak" CFG="db_dll - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_dll - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "db_dll - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_dll - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 +# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb43.dll" + +!ELSEIF "$(CFG)" == "db_dll - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 2 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb43d.dll" /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_dll - Win32 Release" +# Name "db_dll - Win32 Debug" +# Begin Source File + +SOURCE=..\btree\bt_compare.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_conv.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_curadj.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_cursor.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_delete.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_method.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_open.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_put.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_rec.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_recno.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_rsearch.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_search.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_split.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_stat.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_verify.c +# End Source File +# Begin Source File + +SOURCE=..\btree\btree_auto.c +# End Source File +# Begin Source File + +SOURCE=..\build_win32\libdb.def +# End Source File +# Begin Source File + +SOURCE=..\build_win32\libdb.rc +# End Source File +# Begin Source File + +SOURCE=..\clib\strcasecmp.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_byteorder.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_err.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_getlong.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_idspace.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_log2.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_cache.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_log.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_sig.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\aes_method.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\crypto.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\mersenne\mt19937db.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\rijndael\rijndael-alg-fst.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\rijndael\rijndael-api-fst.c +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_db.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_dbc.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_dbt.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_env.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_except.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_lock.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_logc.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_mpool.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_multi.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_seq.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_txn.cpp +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_auto.c +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_rec.c +# End Source File +# Begin Source File + +SOURCE=..\db\db.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_am.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_auto.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_cam.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_conv.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_dispatch.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_dup.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_iface.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_join.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_meta.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_method.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_open.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_overflow.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_ovfl_vrfy.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_pr.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_rec.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_remove.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_rename.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_ret.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setid.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setlsn.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_stati.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_upg.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_upg_opd.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_vrfy.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_vrfyutil.c +# End Source File +# Begin Source File + +SOURCE=..\dbm\dbm.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_auto.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_rec.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_stat.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_util.c +# End Source File +# Begin Source File + +SOURCE=..\env\db_salloc.c +# End Source File +# Begin Source File + +SOURCE=..\env\db_shash.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_file.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_method.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_open.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_recover.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_region.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_stat.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fileops_auto.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_basic.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_rec.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_util.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_auto.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_conv.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_dup.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_func.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_meta.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_method.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_open.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_page.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_rec.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_stat.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_verify.c +# End Source File +# Begin Source File + +SOURCE=..\hmac\hmac.c +# End Source File +# Begin Source File + +SOURCE=..\hmac\sha1.c +# End Source File +# Begin Source File + +SOURCE=..\hsearch\hsearch.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_deadlock.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_id.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_list.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_method.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_region.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_stat.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_timer.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_util.c +# End Source File +# Begin Source File + +SOURCE=..\log\log.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_archive.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_compare.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_get.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_method.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_put.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_stat.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_alloc.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_bh.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fget.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fmethod.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fopen.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fput.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fset.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_method.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_region.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_register.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_stat.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_sync.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_trickle.c +# End Source File +# Begin Source File + +SOURCE=..\mutex\mut_win32.c +# End Source File +# Begin Source File + +SOURCE=..\mutex\mutex.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_alloc.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_id.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_method.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_oflags.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_region.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_root.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_rpath.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_tmpdir.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_abs.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_clock.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_config.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_dir.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_errno.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_fid.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_fsync.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_handle.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_map.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_open.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_rename.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_rw.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_seek.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_sleep.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_spin.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_stat.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_unlink.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_auto.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_conv.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_files.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_method.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_open.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_rec.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_stat.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_verify.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_auto.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_backup.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_method.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_record.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_region.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_stat.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_util.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\seq_stat.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\sequence.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_auto.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_method.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_rec.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_recover.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_region.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_stat.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_util.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa_db.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa_map.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_dump.dsp b/db/build_win64/db_dump.dsp new file mode 100644 index 000000000..9fc059677 --- /dev/null +++ b/db/build_win64/db_dump.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_dump" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_dump - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_dump.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_dump.mak" CFG="db_dump - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_dump - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_dump - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_dump - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_dump - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_dump - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_dump - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_dump - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_dump - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_dump - Win32 Release" +# Name "db_dump - Win32 Debug" +# Name "db_dump - Win32 Release Static" +# Name "db_dump - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_dump\db_dump.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_int.h b/db/build_win64/db_int.h new file mode 100644 index 000000000..300b9dc9d --- /dev/null +++ b/db/build_win64/db_int.h @@ -0,0 +1,588 @@ +/* DO NOT EDIT: automatically built by dist/s_win32. */ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: db_int.h,v 11.17 2004/10/14 15:32:29 bostic Exp $ + */ + +#ifndef _DB_INTERNAL_H_ +#define _DB_INTERNAL_H_ + +/******************************************************* + * System includes, db.h, a few general DB includes. The DB includes are + * here because it's OK if db_int.h includes queue structure declarations. + *******************************************************/ +#ifndef NO_SYSTEM_INCLUDES +#if defined(STDC_HEADERS) || defined(__cplusplus) +#include +#else +#include +#endif +#include +#endif + +#include "db.h" + +#include "dbinc/queue.h" +#include "dbinc/shqueue.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/******************************************************* + * General purpose constants and macros. + *******************************************************/ +#ifndef UINT16_MAX +#define UINT16_MAX 65535 /* Maximum 16-bit unsigned. */ +#endif +#ifndef UINT32_MAX +#define UINT32_MAX 4294967295U /* Maximum 32-bit unsigned. */ +#endif + +#if defined(HAVE_LONG_LONG) && defined(HAVE_UNSIGNED_LONG_LONG) +#undef INT64_MAX +#undef INT64_MIN +#undef UINT64_MAX + +#ifdef DB_WIN32 +#define INT64_MAX _I64_MAX +#define INT64_MIN _I64_MIN +#define UINT64_MAX _UI64_MAX + +#define INT64_FMT "%l64d" +#define UINT64_FMT "%l64u" +#else +/* + * Override the system's 64-bit min/max constants. AIX's 32-bit compiler can + * handle 64-bit values, but the system's constants don't include the LL/ULL + * suffix, and so can't be compiled using the 32-bit compiler. + */ +#define INT64_MAX 9223372036854775807LL +#define INT64_MIN (-INT64_MAX-1) +#define UINT64_MAX 18446744073709551615ULL + +#define INT64_FMT "%lld" +#define UINT64_FMT "%llu" +#endif /* DB_WIN32 */ +#endif /* HAVE_LONG_LONG && HAVE_UNSIGNED_LONG_LONG */ + +#define MEGABYTE 1048576 +#define GIGABYTE 1073741824 + +#define MS_PER_SEC 1000 /* Milliseconds in a second. */ +#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */ + +#define RECNO_OOB 0 /* Illegal record number. */ + +/* Test for a power-of-two (tests true for zero, which doesn't matter here). */ +#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0) + +/* Test for valid page sizes. */ +#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */ +#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */ +#define IS_VALID_PAGESIZE(x) \ + (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE)) + +/* Minimum number of pages cached, by default. */ +#define DB_MINPAGECACHE 16 + +/* + * If we are unable to determine the underlying filesystem block size, use + * 8K on the grounds that most OS's use less than 8K for a VM page size. + */ +#define DB_DEF_IOSIZE (8 * 1024) + +/* Align an integer to a specific boundary. */ +#undef DB_ALIGN +#define DB_ALIGN(v, bound) \ + (((v) + (bound) - 1) & ~(((uintmax_t)bound) - 1)) + +/* Increment a pointer to a specific boundary. */ +#undef ALIGNP_INC +#define ALIGNP_INC(p, bound) \ + (void *)(((uintptr_t)(p) + (bound) - 1) & ~(((uintptr_t)bound) - 1)) + +/* Decrement a pointer to a specific boundary. */ +#undef ALIGNP_DEC +#define ALIGNP_DEC(p, bound) \ + (void *)((uintptr_t)(p) & ~(((uintptr_t)bound) - 1)) + +/* + * Print an address as a u_long (a u_long is the largest type we can print + * portably). Most 64-bit systems have made longs 64-bits, so this should + * work. + */ +#define P_TO_ULONG(p) ((u_long)(uintptr_t)(p)) + +/* + * Convert a pointer to a small integral value. + * + * The (u_int16_t)(uintptr_t) cast avoids warnings: the (uintptr_t) cast + * converts the value to an integral type, and the (u_int16_t) cast converts + * it to a small integral type so we don't get complaints when we assign the + * final result to an integral type smaller than uintptr_t. + */ +#define P_TO_UINT32(p) ((u_int32_t)(uintptr_t)(p)) +#define P_TO_UINT16(p) ((u_int16_t)(uintptr_t)(p)) + +/* + * There are several on-page structures that are declared to have a number of + * fields followed by a variable length array of items. The structure size + * without including the variable length array or the address of the first of + * those elements can be found using SSZ. + * + * This macro can also be used to find the offset of a structure element in a + * structure. This is used in various places to copy structure elements from + * unaligned memory references, e.g., pointers into a packed page. + * + * There are two versions because compilers object if you take the address of + * an array. + */ +#undef SSZ +#define SSZ(name, field) P_TO_UINT16(&(((name *)0)->field)) + +#undef SSZA +#define SSZA(name, field) P_TO_UINT16(&(((name *)0)->field[0])) + +/* Structure used to print flag values. */ +typedef struct __fn { + u_int32_t mask; /* Flag value. */ + const char *name; /* Flag name. */ +} FN; + +/* Set, clear and test flags. */ +#define FLD_CLR(fld, f) (fld) &= ~(f) +#define FLD_ISSET(fld, f) ((fld) & (f)) +#define FLD_SET(fld, f) (fld) |= (f) +#define F_CLR(p, f) (p)->flags &= ~(f) +#define F_ISSET(p, f) ((p)->flags & (f)) +#define F_SET(p, f) (p)->flags |= (f) +#define LF_CLR(f) ((flags) &= ~(f)) +#define LF_ISSET(f) ((flags) & (f)) +#define LF_SET(f) ((flags) |= (f)) + +/* + * Calculate a percentage. The values can overflow 32-bit integer arithmetic + * so we use floating point. + * + * When calculating a bytes-vs-page size percentage, we're getting the inverse + * of the percentage in all cases, that is, we want 100 minus the percentage we + * calculate. + */ +#define DB_PCT(v, total) \ + ((int)((total) == 0 ? 0 : ((double)(v) * 100) / (total))) +#define DB_PCT_PG(v, total, pgsize) \ + ((int)((total) == 0 ? 0 : \ + 100 - ((double)(v) * 100) / ((total) * (pgsize)))) + +/* + * Structure used for callback message aggregation. + * + * Display values in XXX_stat_print calls. + */ +typedef struct __db_msgbuf { + char *buf; /* Heap allocated buffer. */ + char *cur; /* Current end of message. */ + size_t len; /* Allocated length of buffer. */ +} DB_MSGBUF; +#define DB_MSGBUF_INIT(a) do { \ + (a)->buf = (a)->cur = NULL; \ + (a)->len = 0; \ +} while (0) +#define DB_MSGBUF_FLUSH(dbenv, a) do { \ + if ((a)->buf != NULL) { \ + if ((a)->cur != (a)->buf) \ + __db_msg(dbenv, "%s", (a)->buf); \ + __os_free(dbenv, (a)->buf); \ + DB_MSGBUF_INIT(a); \ + } \ +} while (0) +#define STAT_FMT(msg, fmt, type, v) do { \ + DB_MSGBUF __mb; \ + DB_MSGBUF_INIT(&__mb); \ + __db_msgadd(dbenv, &__mb, fmt, (type)(v)); \ + __db_msgadd(dbenv, &__mb, "\t%s", msg); \ + DB_MSGBUF_FLUSH(dbenv, &__mb); \ +} while (0) +#define STAT_HEX(msg, v) \ + __db_msg(dbenv, "%#lx\t%s", (u_long)(v), msg) +#define STAT_ISSET(msg, p) \ + __db_msg(dbenv, "%sSet\t%s", (p) == NULL ? "!" : " ", msg) +#define STAT_LONG(msg, v) \ + __db_msg(dbenv, "%ld\t%s", (long)(v), msg) +#define STAT_LSN(msg, lsnp) \ + __db_msg(dbenv, "%lu/%lu\t%s", \ + (u_long)(lsnp)->file, (u_long)(lsnp)->offset, msg) +#define STAT_STRING(msg, p) do { \ + const char *__p = p; /* p may be a function call. */ \ + __db_msg(dbenv, "%s\t%s", __p == NULL ? "!Set" : __p, msg); \ +} while (0) +#define STAT_ULONG(msg, v) \ + __db_msg(dbenv, "%lu\t%s", (u_long)(v), msg) + +/******************************************************* + * API return values + *******************************************************/ +/* + * Return values that are OK for each different call. Most calls have a + * standard 'return of 0 is only OK value', but some, like db->get have + * DB_NOTFOUND as a return value, but it really isn't an error. + */ +#define DB_RETOK_STD(ret) ((ret) == 0) +#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \ + (ret) == DB_NOTFOUND) +#define DB_RETOK_DBCGET(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \ + (ret) == DB_NOTFOUND) +#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST || \ + (ret) == DB_NOTFOUND) +#define DB_RETOK_DBDEL(ret) DB_RETOK_DBCDEL(ret) +#define DB_RETOK_DBGET(ret) DB_RETOK_DBCGET(ret) +#define DB_RETOK_DBPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST) +#define DB_RETOK_LGGET(ret) ((ret) == 0 || (ret) == DB_NOTFOUND) +#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) == DB_PAGE_NOTFOUND) +#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || \ + (ret) == DB_REP_ISPERM || \ + (ret) == DB_REP_NEWMASTER || \ + (ret) == DB_REP_NEWSITE || \ + (ret) == DB_REP_NOTPERM || \ + (ret) == DB_REP_STARTUPDONE) + +/* Find a reasonable operation-not-supported error. */ +#ifdef EOPNOTSUPP +#define DB_OPNOTSUP EOPNOTSUPP +#else +#ifdef ENOTSUP +#define DB_OPNOTSUP ENOTSUP +#else +#define DB_OPNOTSUP EINVAL +#endif +#endif + +/******************************************************* + * Files. + *******************************************************/ +/* + * We use 1024 as the maximum path length. It's too hard to figure out what + * the real path length is, as it was traditionally stored in , + * and that file isn't always available. + */ +#undef MAXPATHLEN +#define MAXPATHLEN 1024 + +#define PATH_DOT "." /* Current working directory. */ + /* Path separator character(s). */ +#define PATH_SEPARATOR "\\/:" + +/******************************************************* + * Environment. + *******************************************************/ +/* Type passed to __db_appname(). */ +typedef enum { + DB_APP_NONE=0, /* No type (region). */ + DB_APP_DATA, /* Data file. */ + DB_APP_LOG, /* Log file. */ + DB_APP_TMP /* Temporary file. */ +} APPNAME; + +/* + * CDB_LOCKING CDB product locking. + * CRYPTO_ON Security has been configured. + * LOCKING_ON Locking has been configured. + * LOGGING_ON Logging has been configured. + * MPOOL_ON Memory pool has been configured. + * REP_ON Replication has been configured. + * RPC_ON RPC has been configured. + * TXN_ON Transactions have been configured. + */ +#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB) +#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle != NULL) +#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL) +#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL) +#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL) +#define REP_ON(dbenv) ((dbenv)->rep_handle != NULL) +#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL) +#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL) + +/* + * STD_LOCKING Standard locking, that is, locking was configured and CDB + * was not. We do not do locking in off-page duplicate trees, + * so we check for that in the cursor first. + */ +#define STD_LOCKING(dbc) \ + (!F_ISSET(dbc, DBC_OPD) && \ + !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv)) + +/* + * IS_RECOVERING: The system is running recovery. + */ +#define IS_RECOVERING(dbenv) \ + (LOGGING_ON(dbenv) && \ + F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER)) + +/* Initialization methods are often illegal before/after open is called. */ +#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \ + if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ + return (__db_mi_open(dbenv, name, 1)); +#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \ + if (!F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ + return (__db_mi_open(dbenv, name, 0)); + +/* We're not actually user hostile, honest. */ +#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \ + if (handle == NULL) \ + return (__db_env_config(dbenv, i, flags)); +#define ENV_NOT_CONFIGURED(dbenv, handle, i, flags) \ + if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ + ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) + +/******************************************************* + * Database Access Methods. + *******************************************************/ +/* + * DB_IS_THREADED -- + * The database handle is free-threaded (was opened with DB_THREAD). + */ +#define DB_IS_THREADED(dbp) \ + ((dbp)->mutexp != NULL) + +/* Initialization methods are often illegal before/after open is called. */ +#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \ + if (F_ISSET((dbp), DB_AM_OPEN_CALLED)) \ + return (__db_mi_open((dbp)->dbenv, name, 1)); +#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \ + if (!F_ISSET((dbp), DB_AM_OPEN_CALLED)) \ + return (__db_mi_open((dbp)->dbenv, name, 0)); +/* Some initialization methods are illegal if environment isn't local. */ +#define DB_ILLEGAL_IN_ENV(dbp, name) \ + if (!F_ISSET((dbp)->dbenv, DB_ENV_DBLOCAL)) \ + return (__db_mi_env((dbp)->dbenv, name)); +#define DB_ILLEGAL_METHOD(dbp, flags) { \ + int __ret; \ + if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \ + return (__ret); \ +} + +/* + * Common DBC->internal fields. Each access method adds additional fields + * to this list, but the initial fields are common. + */ +#define __DBC_INTERNAL \ + DBC *opd; /* Off-page duplicate cursor. */\ + \ + void *page; /* Referenced page. */ \ + db_pgno_t root; /* Tree root. */ \ + db_pgno_t pgno; /* Referenced page number. */ \ + db_indx_t indx; /* Referenced key item index. */\ + \ + DB_LOCK lock; /* Cursor lock. */ \ + db_lockmode_t lock_mode; /* Lock mode. */ + +struct __dbc_internal { + __DBC_INTERNAL +}; + +/* Actions that __db_master_update can take. */ +typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action; + +/* + * Access-method-common macro for determining whether a cursor + * has been initialized. + */ +#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID) + +/* Free the callback-allocated buffer, if necessary, hanging off of a DBT. */ +#define FREE_IF_NEEDED(sdbp, dbt) \ + if (F_ISSET((dbt), DB_DBT_APPMALLOC)) { \ + __os_ufree((sdbp)->dbenv, (dbt)->data); \ + F_CLR((dbt), DB_DBT_APPMALLOC); \ + } + +/* + * Use memory belonging to object "owner" to return the results of + * any no-DBT-flag get ops on cursor "dbc". + */ +#define SET_RET_MEM(dbc, owner) \ + do { \ + (dbc)->rskey = &(owner)->my_rskey; \ + (dbc)->rkey = &(owner)->my_rkey; \ + (dbc)->rdata = &(owner)->my_rdata; \ + } while (0) + +/* Use the return-data memory src is currently set to use in dest as well. */ +#define COPY_RET_MEM(src, dest) \ + do { \ + (dest)->rskey = (src)->rskey; \ + (dest)->rkey = (src)->rkey; \ + (dest)->rdata = (src)->rdata; \ + } while (0) + +/* Reset the returned-memory pointers to their defaults. */ +#define RESET_RET_MEM(dbc) \ + do { \ + (dbc)->rskey = &(dbc)->my_rskey; \ + (dbc)->rkey = &(dbc)->my_rkey; \ + (dbc)->rdata = &(dbc)->my_rdata; \ + } while (0) + +/******************************************************* + * Mpool. + *******************************************************/ +/* + * File types for DB access methods. Negative numbers are reserved to DB. + */ +#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */ +#define DB_FTYPE_NOTSET 0 /* Don't call... */ + +/* Structure used as the DB pgin/pgout pgcookie. */ +typedef struct __dbpginfo { + size_t db_pagesize; /* Underlying page size. */ + u_int32_t flags; /* Some DB_AM flags needed. */ + DBTYPE type; /* DB type */ +} DB_PGINFO; + +/******************************************************* + * Log. + *******************************************************/ +/* Initialize an LSN to 'zero'. */ +#define ZERO_LSN(LSN) do { \ + (LSN).file = 0; \ + (LSN).offset = 0; \ +} while (0) +#define IS_ZERO_LSN(LSN) ((LSN).file == 0) + +#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN).offset == 0) +#define INIT_LSN(LSN) do { \ + (LSN).file = 1; \ + (LSN).offset = 0; \ +} while (0) + +#define MAX_LSN(LSN) do { \ + (LSN).file = UINT32_MAX; \ + (LSN).offset = UINT32_MAX; \ +} while (0) +#define IS_MAX_LSN(LSN) \ + ((LSN).file == UINT32_MAX && (LSN).offset == UINT32_MAX) + +/* If logging is turned off, smash the lsn. */ +#define LSN_NOT_LOGGED(LSN) do { \ + (LSN).file = 0; \ + (LSN).offset = 1; \ +} while (0) +#define IS_NOT_LOGGED_LSN(LSN) \ + ((LSN).file == 0 && (LSN).offset == 1) + +/******************************************************* + * Txn. + *******************************************************/ +#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT)) +#define NOWAIT_FLAG(txn) \ + ((txn) != NULL && F_ISSET((txn), TXN_NOWAIT) ? DB_LOCK_NOWAIT : 0) +#define IS_SUBTRANSACTION(txn) \ + ((txn) != NULL && (txn)->parent != NULL) + +/******************************************************* + * Crypto. + *******************************************************/ +#define DB_IV_BYTES 16 /* Bytes per IV */ +#define DB_MAC_KEY 20 /* Bytes per MAC checksum */ + +/******************************************************* + * Secondaries over RPC. + *******************************************************/ +#ifdef CONFIG_TEST +/* + * These are flags passed to DB->associate calls by the Tcl API if running + * over RPC. The RPC server will mask out these flags before making the real + * DB->associate call. + * + * These flags must coexist with the valid flags to DB->associate (currently + * DB_AUTO_COMMIT and DB_CREATE). DB_AUTO_COMMIT is in the group of + * high-order shared flags (0xff000000), and DB_CREATE is in the low-order + * group (0x00000fff), so we pick a range in between. + */ +#define DB_RPC2ND_MASK 0x00f00000 /* Reserved bits. */ + +#define DB_RPC2ND_REVERSEDATA 0x00100000 /* callback_n(0) _s_reversedata. */ +#define DB_RPC2ND_NOOP 0x00200000 /* callback_n(1) _s_noop */ +#define DB_RPC2ND_CONCATKEYDATA 0x00300000 /* callback_n(2) _s_concatkeydata */ +#define DB_RPC2ND_CONCATDATAKEY 0x00400000 /* callback_n(3) _s_concatdatakey */ +#define DB_RPC2ND_REVERSECONCAT 0x00500000 /* callback_n(4) _s_reverseconcat */ +#define DB_RPC2ND_TRUNCDATA 0x00600000 /* callback_n(5) _s_truncdata */ +#define DB_RPC2ND_CONSTANT 0x00700000 /* callback_n(6) _s_constant */ +#define DB_RPC2ND_GETZIP 0x00800000 /* sj_getzip */ +#define DB_RPC2ND_GETNAME 0x00900000 /* sj_getname */ +#endif + +/******************************************************* + * Forward structure declarations. + *******************************************************/ +struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO; +struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD; +struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST; +struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO; +struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO; +struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO; + +#if defined(__cplusplus) +} +#endif + +/******************************************************* + * Remaining general DB includes. + *******************************************************/ + + +#include "dbinc/globals.h" +#include "dbinc/debug.h" +#include "dbinc/mutex.h" +#include "dbinc/region.h" +#include "dbinc_auto/mutex_ext.h" /* XXX: Include after region.h. */ +#include "dbinc_auto/env_ext.h" +#include "dbinc/os.h" +#include "dbinc/rep.h" +#include "dbinc_auto/clib_ext.h" +#include "dbinc_auto/common_ext.h" + +/******************************************************* + * Remaining Log. + * These need to be defined after the general includes + * because they need rep.h from above. + *******************************************************/ +/* + * Test if the environment is currently logging changes. If we're in recovery + * or we're a replication client, we don't need to log changes because they're + * already in the log, even though we have a fully functional log system. + */ +#define DBENV_LOGGING(dbenv) \ + (LOGGING_ON(dbenv) && !IS_REP_CLIENT(dbenv) && \ + (!IS_RECOVERING(dbenv))) + +/* + * Test if we need to log a change. By default, we don't log operations without + * associated transactions, unless DIAGNOSTIC, DEBUG_ROP or DEBUG_WOP are on. + * This is because we want to get log records for read/write operations, and, if + * we trying to debug something, more information is always better. + * + * The DBC_RECOVER flag is set when we're in abort, as well as during recovery; + * thus DBC_LOGGING may be false for a particular dbc even when DBENV_LOGGING + * is true. + * + * We explicitly use LOGGING_ON/IS_REP_CLIENT here because we don't want to pull + * in the log headers, which IS_RECOVERING (and thus DBENV_LOGGING) rely on, and + * because DBC_RECOVER should be set anytime IS_RECOVERING would be true. + */ +#if defined(DIAGNOSTIC) || defined(DEBUG_ROP) || defined(DEBUG_WOP) +#define DBC_LOGGING(dbc) \ + (LOGGING_ON((dbc)->dbp->dbenv) && \ + !F_ISSET((dbc), DBC_RECOVER) && !IS_REP_CLIENT((dbc)->dbp->dbenv)) +#else +#define DBC_LOGGING(dbc) \ + ((dbc)->txn != NULL && LOGGING_ON((dbc)->dbp->dbenv) && \ + !F_ISSET((dbc), DBC_RECOVER) && !IS_REP_CLIENT((dbc)->dbp->dbenv)) +#endif + +#endif /* !_DB_INTERNAL_H_ */ diff --git a/db/build_win64/db_java.dsp b/db/build_win64/db_java.dsp new file mode 100644 index 000000000..dcf152708 --- /dev/null +++ b/db/build_win64/db_java.dsp @@ -0,0 +1,132 @@ +# Microsoft Developer Studio Project File - Name="db_java" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=db_java - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_java.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_java.mak" CFG="db_java - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_java - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "db_java - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_java - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb_java43.dll" +# Begin Custom Build - Compiling java files using javac +ProjDir=. +InputPath=.\Release\libdb_java43.dll +SOURCE="$(InputPath)" + +"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" + echo compiling Berkeley DB classes + mkdir "$(OUTDIR)\classes" + javac -O -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java + echo compiling examples + mkdir "$(OUTDIR)\classes.ex" + javac -O -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java + echo creating jar files + jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . + jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . + echo Java build finished + +# End Custom Build + +!ELSEIF "$(CFG)" == "db_java - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 2 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb_java43d.dll" /fixed:no +# Begin Custom Build - Compiling java files using javac +ProjDir=. +InputPath=.\Debug\libdb_java43d.dll +SOURCE="$(InputPath)" + +"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" + echo compiling Berkeley DB classes + mkdir "$(OUTDIR)\classes" + javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java + echo compiling examples + mkdir "$(OUTDIR)\classes.ex" + javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java + echo creating jar files + jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . + jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . + echo Java build finished + +# End Custom Build + +!ENDIF + +# Begin Target + +# Name "db_java - Win32 Release" +# Name "db_java - Win32 Debug" +# Begin Source File + +SOURCE=..\libdb_java\db_java_wrap.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_lib.dsp b/db/build_win64/db_lib.dsp new file mode 100644 index 000000000..a7fb41579 --- /dev/null +++ b/db/build_win64/db_lib.dsp @@ -0,0 +1,92 @@ +# Microsoft Developer Studio Project File - Name="db_lib" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Generic Project" 0x010a + +CFG=db_lib - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_lib.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_lib.mak" CFG="db_lib - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_lib - Win32 Release" (based on "Win32 (x86) Generic Project") +!MESSAGE "db_lib - Win32 Debug" (based on "Win32 (x86) Generic Project") +!MESSAGE "db_lib - Win32 Release Static" (based on "Win32 (x86) Generic Project") +!MESSAGE "db_lib - Win32 Debug Static" (based on "Win32 (x86) Generic Project") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" + +!IF "$(CFG)" == "db_lib - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Target_Dir "" + +!ELSEIF "$(CFG)" == "db_lib - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Target_Dir "" + +!ELSEIF "$(CFG)" == "db_lib - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release_static" +# PROP BASE Intermediate_Dir "Release_static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Target_Dir "" + +!ELSEIF "$(CFG)" == "db_lib - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug_static" +# PROP BASE Intermediate_Dir "Debug_Static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_Static" +# PROP Target_Dir "" + +!ENDIF + +# Begin Target + +# Name "db_lib - Win32 Release" +# Name "db_lib - Win32 Debug" +# Name "db_lib - Win32 Release Static" +# Name "db_lib - Win32 Debug Static" +# End Target +# End Project diff --git a/db/build_win64/db_load.dsp b/db/build_win64/db_load.dsp new file mode 100644 index 000000000..71ae7a299 --- /dev/null +++ b/db/build_win64/db_load.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_load" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_load - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_load.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_load.mak" CFG="db_load - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_load - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_load - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_load - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_load - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_load - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_load - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_load - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_load - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_load - Win32 Release" +# Name "db_load - Win32 Debug" +# Name "db_load - Win32 Release Static" +# Name "db_load - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_load\db_load.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_perf.dsp b/db/build_win64/db_perf.dsp new file mode 100644 index 000000000..93ce956fe --- /dev/null +++ b/db/build_win64/db_perf.dsp @@ -0,0 +1,224 @@ +# Microsoft Developer Studio Project File - Name="db_perf" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_perf - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_perf.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_perf.mak" CFG="db_perf - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_perf - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_perf - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_perf - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_perf - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_perf - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_perf - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_perf - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_perf - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_perf - Win32 Release" +# Name "db_perf - Win32 Debug" +# Name "db_perf - Win32 Release Static" +# Name "db_perf - Win32 Debug Static" +# Begin Source File + +SOURCE=..\test_perf\db_perf.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_checkpoint.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_config.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_dbs.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_dead.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_debug.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_file.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_key.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_log.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_misc.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_op.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_parse.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_rand.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_spawn.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_stat.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_sync.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_thread.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_trickle.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_txn.c +# End Source File +# Begin Source File + +SOURCE=..\test_perf\perf_util.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_printlog.dsp b/db/build_win64/db_printlog.dsp new file mode 100644 index 000000000..5fb97bef5 --- /dev/null +++ b/db/build_win64/db_printlog.dsp @@ -0,0 +1,184 @@ +# Microsoft Developer Studio Project File - Name="db_printlog" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_printlog - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_printlog.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_printlog.mak" CFG="db_printlog - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_printlog - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_printlog - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_printlog - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_printlog - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_printlog - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_printlog - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_printlog - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_printlog - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_printlog - Win32 Release" +# Name "db_printlog - Win32 Debug" +# Name "db_printlog - Win32 Release Static" +# Name "db_printlog - Win32 Debug Static" +# Begin Source File + +SOURCE=..\btree\btree_autop.c +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_autop.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_autop.c +# End Source File +# Begin Source File + +SOURCE=..\db_printlog\db_printlog.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_autop.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fileops_autop.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_autop.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_autop.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_autop.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_autop.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_recover.dsp b/db/build_win64/db_recover.dsp new file mode 100644 index 000000000..039530fd6 --- /dev/null +++ b/db/build_win64/db_recover.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_recover" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_recover - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_recover.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_recover.mak" CFG="db_recover - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_recover - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_recover - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_recover - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_recover - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_recover - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_recover - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_recover - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_recover - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_recover - Win32 Release" +# Name "db_recover - Win32 Debug" +# Name "db_recover - Win32 Release Static" +# Name "db_recover - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_recover\db_recover.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_small.dsp b/db/build_win64/db_small.dsp new file mode 100644 index 000000000..f82b0b08f --- /dev/null +++ b/db/build_win64/db_small.dsp @@ -0,0 +1,720 @@ +# Microsoft Developer Studio Project File - Name="db_small" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Static Library" 0x0104 + +CFG=db_small - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_small.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_small.mak" CFG="db_small - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_small - Win32 Release Static" (based on "Win32 (x86) Static Library") +!MESSAGE "db_small - Win32 Debug Static" (based on "Win32 (x86) Static Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_small - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release_small" +# PROP BASE Intermediate_Dir "Release_small" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_small" +# PROP Intermediate_Dir "Release_small" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Release_small/libdb43s.lib" +# ADD LIB32 /nologo /out:"Release_small/libdb43s.lib" + +!ELSEIF "$(CFG)" == "db_small - Win32 Debug Static" + +# PROP BASE Use_MFC 1 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug_small" +# PROP BASE Intermediate_Dir "Debug_small" +# PROP BASE Target_Dir "" +# PROP Use_MFC 1 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_small" +# PROP Intermediate_Dir "Debug_small" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Debug_small/libdb43sd.lib" +# ADD LIB32 /nologo /out:"Debug_small/libdb43sd.lib" + +!ENDIF + +# Begin Target + +# Name "db_small - Win32 Release Static" +# Name "db_small - Win32 Debug Static" +# Begin Source File + +SOURCE=..\btree\bt_compare.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_conv.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_curadj.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_cursor.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_delete.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_method.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_open.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_put.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_rec.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_recno.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_rsearch.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_search.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_split.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_stat.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\btree\btree_auto.c +# End Source File +# Begin Source File + +SOURCE=..\clib\strcasecmp.c +# End Source File +# Begin Source File + +SOURCE=..\common\crypto_stub.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_byteorder.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_err.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_getlong.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_idspace.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_log2.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_cache.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_log.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_sig.c +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_db.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_dbc.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_dbt.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_env.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_except.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_lock.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_logc.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_mpool.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_multi.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_seq.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_txn.cpp +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_auto.c +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_rec.c +# End Source File +# Begin Source File + +SOURCE=..\db\db.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_am.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_auto.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_cam.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_conv.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_dispatch.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_dup.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_iface.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_join.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_meta.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_method.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_open.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_overflow.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_pr.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_rec.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_remove.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_rename.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_ret.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setid.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setlsn.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_stati.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_upg.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_upg_opd.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_vrfy_stub.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_auto.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_rec.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_stat.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_util.c +# End Source File +# Begin Source File + +SOURCE=..\env\db_salloc.c +# End Source File +# Begin Source File + +SOURCE=..\env\db_shash.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_file.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_method.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_open.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_recover.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_region.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_stat.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fileops_auto.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_basic.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_rec.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_util.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_func.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_stub.c +# End Source File +# Begin Source File + +SOURCE=..\hmac\hmac.c +# End Source File +# Begin Source File + +SOURCE=..\hmac\sha1.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_deadlock.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_id.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_list.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_method.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_region.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_stat.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_timer.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_util.c +# End Source File +# Begin Source File + +SOURCE=..\log\log.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_archive.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_compare.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_get.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_method.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_put.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_stat.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_alloc.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_bh.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fget.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fmethod.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fopen.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fput.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fset.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_method.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_region.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_register.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_stat.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_sync.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_trickle.c +# End Source File +# Begin Source File + +SOURCE=..\mutex\mut_win32.c +# End Source File +# Begin Source File + +SOURCE=..\mutex\mutex.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_alloc.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_id.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_method.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_oflags.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_region.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_root.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_rpath.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_tmpdir.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_abs.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_clock.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_config.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_dir.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_errno.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_fid.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_fsync.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_handle.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_map.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_open.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_rename.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_rw.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_seek.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_sleep.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_spin.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_stat.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_unlink.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_stub.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_stub.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\seq_stat.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\sequence.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_auto.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_method.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_rec.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_recover.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_region.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_stat.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_util.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa_db.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa_map.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_stat.dsp b/db/build_win64/db_stat.dsp new file mode 100644 index 000000000..8ff259572 --- /dev/null +++ b/db/build_win64/db_stat.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_stat" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_stat - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_stat.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_stat.mak" CFG="db_stat - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_stat - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_stat - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_stat - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_stat - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_stat - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_stat - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_stat - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_stat - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_stat - Win32 Release" +# Name "db_stat - Win32 Debug" +# Name "db_stat - Win32 Release Static" +# Name "db_stat - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_stat\db_stat.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_static.dsp b/db/build_win64/db_static.dsp new file mode 100644 index 000000000..063db5791 --- /dev/null +++ b/db/build_win64/db_static.dsp @@ -0,0 +1,864 @@ +# Microsoft Developer Studio Project File - Name="db_static" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Static Library" 0x0104 + +CFG=db_static - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_static.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_static.mak" CFG="db_static - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_static - Win32 Release Static" (based on "Win32 (x86) Static Library") +!MESSAGE "db_static - Win32 Debug Static" (based on "Win32 (x86) Static Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_static - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release_static" +# PROP BASE Intermediate_Dir "Release_static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Release/libdb43s.lib" +# ADD LIB32 /nologo /out:"Release_static/libdb43s.lib" + +!ELSEIF "$(CFG)" == "db_static - Win32 Debug Static" + +# PROP BASE Use_MFC 1 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug_static" +# PROP BASE Intermediate_Dir "Debug_static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 1 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Debug/libdb43sd.lib" +# ADD LIB32 /nologo /out:"Debug_static/libdb43sd.lib" + +!ENDIF + +# Begin Target + +# Name "db_static - Win32 Release Static" +# Name "db_static - Win32 Debug Static" +# Begin Source File + +SOURCE=..\btree\bt_compare.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_conv.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_curadj.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_cursor.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_delete.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_method.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_open.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_put.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_rec.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_recno.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_rsearch.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_search.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_split.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_stat.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\btree\bt_verify.c +# End Source File +# Begin Source File + +SOURCE=..\btree\btree_auto.c +# End Source File +# Begin Source File + +SOURCE=..\clib\strcasecmp.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_byteorder.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_err.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_getlong.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_idspace.c +# End Source File +# Begin Source File + +SOURCE=..\common\db_log2.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_cache.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_log.c +# End Source File +# Begin Source File + +SOURCE=..\common\util_sig.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\aes_method.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\crypto.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\mersenne\mt19937db.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\rijndael\rijndael-alg-fst.c +# End Source File +# Begin Source File + +SOURCE=..\crypto\rijndael\rijndael-api-fst.c +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_db.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_dbc.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_dbt.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_env.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_except.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_lock.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_logc.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_mpool.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_multi.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_seq.cpp +# End Source File +# Begin Source File + +SOURCE=..\cxx\cxx_txn.cpp +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_auto.c +# End Source File +# Begin Source File + +SOURCE=..\db\crdel_rec.c +# End Source File +# Begin Source File + +SOURCE=..\db\db.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_am.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_auto.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_cam.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_conv.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_dispatch.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_dup.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_iface.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_join.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_meta.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_method.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_open.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_overflow.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_ovfl_vrfy.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_pr.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_rec.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_remove.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_rename.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_ret.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setid.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_setlsn.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_stati.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_upg.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_upg_opd.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_vrfy.c +# End Source File +# Begin Source File + +SOURCE=..\db\db_vrfyutil.c +# End Source File +# Begin Source File + +SOURCE=..\dbm\dbm.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_auto.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_rec.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_stat.c +# End Source File +# Begin Source File + +SOURCE=..\dbreg\dbreg_util.c +# End Source File +# Begin Source File + +SOURCE=..\env\db_salloc.c +# End Source File +# Begin Source File + +SOURCE=..\env\db_shash.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_file.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_method.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_open.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_recover.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_region.c +# End Source File +# Begin Source File + +SOURCE=..\env\env_stat.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fileops_auto.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_basic.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_rec.c +# End Source File +# Begin Source File + +SOURCE=..\fileops\fop_util.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_auto.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_conv.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_dup.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_func.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_meta.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_method.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_open.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_page.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_rec.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_reclaim.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_stat.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\hash\hash_verify.c +# End Source File +# Begin Source File + +SOURCE=..\hmac\hmac.c +# End Source File +# Begin Source File + +SOURCE=..\hmac\sha1.c +# End Source File +# Begin Source File + +SOURCE=..\hsearch\hsearch.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_deadlock.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_id.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_list.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_method.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_region.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_stat.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_timer.c +# End Source File +# Begin Source File + +SOURCE=..\lock\lock_util.c +# End Source File +# Begin Source File + +SOURCE=..\log\log.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_archive.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_compare.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_get.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_method.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_put.c +# End Source File +# Begin Source File + +SOURCE=..\log\log_stat.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_alloc.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_bh.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fget.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fmethod.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fopen.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fput.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_fset.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_method.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_region.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_register.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_stat.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_sync.c +# End Source File +# Begin Source File + +SOURCE=..\mp\mp_trickle.c +# End Source File +# Begin Source File + +SOURCE=..\mutex\mut_win32.c +# End Source File +# Begin Source File + +SOURCE=..\mutex\mutex.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_alloc.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_id.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_method.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_oflags.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_region.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_root.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_rpath.c +# End Source File +# Begin Source File + +SOURCE=..\os\os_tmpdir.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_abs.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_clock.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_config.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_dir.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_errno.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_fid.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_fsync.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_handle.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_map.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_open.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_rename.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_rw.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_seek.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_sleep.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_spin.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_stat.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_truncate.c +# End Source File +# Begin Source File + +SOURCE=..\os_win32\os_unlink.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_auto.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_conv.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_files.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_method.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_open.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_rec.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_stat.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\qam\qam_verify.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_auto.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_backup.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_method.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_record.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_region.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_stat.c +# End Source File +# Begin Source File + +SOURCE=..\rep\rep_util.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\seq_stat.c +# End Source File +# Begin Source File + +SOURCE=..\sequence\sequence.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_auto.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_method.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_rec.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_recover.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_region.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_stat.c +# End Source File +# Begin Source File + +SOURCE=..\txn\txn_util.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa_db.c +# End Source File +# Begin Source File + +SOURCE=..\xa\xa_map.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_tcl.dsp b/db/build_win64/db_tcl.dsp new file mode 100644 index 000000000..0ce84a247 --- /dev/null +++ b/db/build_win64/db_tcl.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_tcl" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=db_tcl - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_tcl.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_tcl.mak" CFG="db_tcl - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_tcl - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "db_tcl - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_tcl - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 +# ADD LINK32 Release/libdb43.lib tcl84.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb_tcl43.dll" + +!ELSEIF "$(CFG)" == "db_tcl - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 2 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib tcl84g.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb_tcl43d.dll" /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_tcl - Win32 Release" +# Name "db_tcl - Win32 Debug" +# Begin Source File + +SOURCE=..\build_win32\libdb_tcl.def +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_compat.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_db.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_db_pkg.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_dbcursor.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_env.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_internal.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_lock.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_log.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_mp.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_rep.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_seq.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_txn.c +# End Source File +# Begin Source File + +SOURCE=..\tcl\tcl_util.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_test.dsp b/db/build_win64/db_test.dsp new file mode 100644 index 000000000..c12971a5a --- /dev/null +++ b/db/build_win64/db_test.dsp @@ -0,0 +1,100 @@ +# Microsoft Developer Studio Project File - Name="db_test" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_test - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_test.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_test.mak" CFG="db_test - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_test - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_test - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_test - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 +# Begin Special Build Tool +SOURCE="$(InputPath)" +PostBuild_Desc=Copy built executable files. +PostBuild_Cmds=copy Release\*.exe . +# End Special Build Tool + +!ELSEIF "$(CFG)" == "db_test - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /out:"Debug/dbkill.exe" /fixed:no +# Begin Special Build Tool +SOURCE="$(InputPath)" +PostBuild_Desc=Copy built executable files. +PostBuild_Cmds=copy Debug\*.exe . +# End Special Build Tool + +!ENDIF + +# Begin Target + +# Name "db_test - Win32 Release" +# Name "db_test - Win32 Debug" +# Begin Source File + +SOURCE=..\build_win32\dbkill.cpp +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_test.src b/db/build_win64/db_test.src new file mode 100644 index 000000000..960143e04 --- /dev/null +++ b/db/build_win64/db_test.src @@ -0,0 +1,97 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=@project_name@ - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:IA64 +# Begin Special Build Tool +SOURCE="$(InputPath)" +PostBuild_Desc=Copy built executable files. +PostBuild_Cmds=copy Release\*.exe . +# End Special Build Tool + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /out:"Debug/dbkill.exe" /fixed:no +# Begin Special Build Tool +SOURCE="$(InputPath)" +PostBuild_Desc=Copy built executable files. +PostBuild_Cmds=copy Debug\*.exe . +# End Special Build Tool + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release" +# Name "@project_name@ - Win32 Debug" +@SOURCE_FILES@ +# End Target +# End Project diff --git a/db/build_win64/db_upgrade.dsp b/db/build_win64/db_upgrade.dsp new file mode 100644 index 000000000..ef51dbce4 --- /dev/null +++ b/db/build_win64/db_upgrade.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_upgrade" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_upgrade - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_upgrade.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_upgrade.mak" CFG="db_upgrade - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_upgrade - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_upgrade - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_upgrade - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_upgrade - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_upgrade - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_upgrade - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_upgrade - Win32 Release" +# Name "db_upgrade - Win32 Debug" +# Name "db_upgrade - Win32 Release Static" +# Name "db_upgrade - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_upgrade\db_upgrade.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/db_verify.dsp b/db/build_win64/db_verify.dsp new file mode 100644 index 000000000..02a03de9e --- /dev/null +++ b/db/build_win64/db_verify.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="db_verify" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=db_verify - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "db_verify.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "db_verify.mak" CFG="db_verify - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "db_verify - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "db_verify - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "db_verify - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "db_verify - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "db_verify - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "db_verify - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "db_verify - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "db_verify - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "db_verify - Win32 Release" +# Name "db_verify - Win32 Debug" +# Name "db_verify - Win32 Release Static" +# Name "db_verify - Win32 Debug Static" +# Begin Source File + +SOURCE=..\db_verify\db_verify.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/dynamic_dsp.src b/db/build_win64/dynamic_dsp.src new file mode 100644 index 000000000..5f409e3de --- /dev/null +++ b/db/build_win64/dynamic_dsp.src @@ -0,0 +1,93 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=@project_name@ - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 +# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 2 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release" +# Name "@project_name@ - Win32 Debug" +@SOURCE_FILES@ +# End Target +# End Project diff --git a/db/build_win64/ex_access.dsp b/db/build_win64/ex_access.dsp new file mode 100644 index 000000000..f58224544 --- /dev/null +++ b/db/build_win64/ex_access.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="ex_access" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ex_access - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ex_access.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ex_access.mak" CFG="ex_access - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ex_access - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_access - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_access - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_access - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ex_access - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "ex_access - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "ex_access - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "ex_access - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "ex_access - Win32 Release" +# Name "ex_access - Win32 Debug" +# Name "ex_access - Win32 Release Static" +# Name "ex_access - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_c\ex_access.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/ex_btrec.dsp b/db/build_win64/ex_btrec.dsp new file mode 100644 index 000000000..698c7453c --- /dev/null +++ b/db/build_win64/ex_btrec.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="ex_btrec" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ex_btrec - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ex_btrec.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ex_btrec.mak" CFG="ex_btrec - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ex_btrec - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_btrec - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_btrec - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_btrec - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ex_btrec - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "ex_btrec - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "ex_btrec - Win32 Release" +# Name "ex_btrec - Win32 Debug" +# Name "ex_btrec - Win32 Release Static" +# Name "ex_btrec - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_c\ex_btrec.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/ex_env.dsp b/db/build_win64/ex_env.dsp new file mode 100644 index 000000000..703e4e921 --- /dev/null +++ b/db/build_win64/ex_env.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="ex_env" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ex_env - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ex_env.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ex_env.mak" CFG="ex_env - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ex_env - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_env - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_env - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_env - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ex_env - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "ex_env - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "ex_env - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "ex_env - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "ex_env - Win32 Release" +# Name "ex_env - Win32 Debug" +# Name "ex_env - Win32 Release Static" +# Name "ex_env - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_c\ex_env.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/ex_lock.dsp b/db/build_win64/ex_lock.dsp new file mode 100644 index 000000000..4d11379ec --- /dev/null +++ b/db/build_win64/ex_lock.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="ex_lock" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ex_lock - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ex_lock.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ex_lock.mak" CFG="ex_lock - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ex_lock - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_lock - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_lock - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_lock - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ex_lock - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "ex_lock - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "ex_lock - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "ex_lock - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "ex_lock - Win32 Release" +# Name "ex_lock - Win32 Debug" +# Name "ex_lock - Win32 Release Static" +# Name "ex_lock - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_c\ex_lock.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/ex_mpool.dsp b/db/build_win64/ex_mpool.dsp new file mode 100644 index 000000000..36f156f5d --- /dev/null +++ b/db/build_win64/ex_mpool.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="ex_mpool" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ex_mpool - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ex_mpool.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ex_mpool.mak" CFG="ex_mpool - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ex_mpool - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_mpool - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_mpool - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_mpool - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ex_mpool - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "ex_mpool - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "ex_mpool - Win32 Release" +# Name "ex_mpool - Win32 Debug" +# Name "ex_mpool - Win32 Release Static" +# Name "ex_mpool - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_c\ex_mpool.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/ex_repquote.dsp b/db/build_win64/ex_repquote.dsp new file mode 100644 index 000000000..699e72baa --- /dev/null +++ b/db/build_win64/ex_repquote.dsp @@ -0,0 +1,164 @@ +# Microsoft Developer Studio Project File - Name="ex_repquote" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ex_repquote - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ex_repquote.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ex_repquote.mak" CFG="ex_repquote - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ex_repquote - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_repquote - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_repquote - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_repquote - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ex_repquote - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "ex_repquote - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib ws2_32.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "ex_repquote - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "ex_repquote - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "ex_repquote - Win32 Release" +# Name "ex_repquote - Win32 Debug" +# Name "ex_repquote - Win32 Release Static" +# Name "ex_repquote - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_c\ex_repquote\ex_rq_client.c +# End Source File +# Begin Source File + +SOURCE=..\examples_c\ex_repquote\ex_rq_main.c +# End Source File +# Begin Source File + +SOURCE=..\examples_c\ex_repquote\ex_rq_master.c +# End Source File +# Begin Source File + +SOURCE=..\examples_c\ex_repquote\ex_rq_net.c +# End Source File +# Begin Source File + +SOURCE=..\examples_c\ex_repquote\ex_rq_util.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/ex_repquote.src b/db/build_win64/ex_repquote.src new file mode 100644 index 000000000..da08375ec --- /dev/null +++ b/db/build_win64/ex_repquote.src @@ -0,0 +1,145 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=@project_name@ - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib ws2_32.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release" +# Name "@project_name@ - Win32 Debug" +# Name "@project_name@ - Win32 Release Static" +# Name "@project_name@ - Win32 Debug Static" +@SOURCE_FILES@ +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/ex_tpcb.dsp b/db/build_win64/ex_tpcb.dsp new file mode 100644 index 000000000..15c92708f --- /dev/null +++ b/db/build_win64/ex_tpcb.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="ex_tpcb" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ex_tpcb - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ex_tpcb.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ex_tpcb.mak" CFG="ex_tpcb - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ex_tpcb - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_tpcb - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_tpcb - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "ex_tpcb - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ex_tpcb - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "ex_tpcb - Win32 Release" +# Name "ex_tpcb - Win32 Debug" +# Name "ex_tpcb - Win32 Release Static" +# Name "ex_tpcb - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_c\ex_tpcb.c +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/excxx_access.dsp b/db/build_win64/excxx_access.dsp new file mode 100644 index 000000000..53aa6acf4 --- /dev/null +++ b/db/build_win64/excxx_access.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="excxx_access" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=excxx_access - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "excxx_access.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "excxx_access.mak" CFG="excxx_access - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "excxx_access - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_access - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_access - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_access - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "excxx_access - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "excxx_access - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "excxx_access - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "excxx_access - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "excxx_access - Win32 Release" +# Name "excxx_access - Win32 Debug" +# Name "excxx_access - Win32 Release Static" +# Name "excxx_access - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_cxx\AccessExample.cpp +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/excxx_btrec.dsp b/db/build_win64/excxx_btrec.dsp new file mode 100644 index 000000000..852a61f25 --- /dev/null +++ b/db/build_win64/excxx_btrec.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="excxx_btrec" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=excxx_btrec - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "excxx_btrec.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "excxx_btrec.mak" CFG="excxx_btrec - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "excxx_btrec - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_btrec - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_btrec - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_btrec - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "excxx_btrec - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "excxx_btrec - Win32 Release" +# Name "excxx_btrec - Win32 Debug" +# Name "excxx_btrec - Win32 Release Static" +# Name "excxx_btrec - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_cxx\BtRecExample.cpp +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/excxx_env.dsp b/db/build_win64/excxx_env.dsp new file mode 100644 index 000000000..44ee9a2fa --- /dev/null +++ b/db/build_win64/excxx_env.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="excxx_env" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=excxx_env - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "excxx_env.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "excxx_env.mak" CFG="excxx_env - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "excxx_env - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_env - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_env - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_env - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "excxx_env - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "excxx_env - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "excxx_env - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "excxx_env - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "excxx_env - Win32 Release" +# Name "excxx_env - Win32 Debug" +# Name "excxx_env - Win32 Release Static" +# Name "excxx_env - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_cxx\EnvExample.cpp +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/excxx_lock.dsp b/db/build_win64/excxx_lock.dsp new file mode 100644 index 000000000..af0327793 --- /dev/null +++ b/db/build_win64/excxx_lock.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="excxx_lock" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=excxx_lock - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "excxx_lock.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "excxx_lock.mak" CFG="excxx_lock - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "excxx_lock - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_lock - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_lock - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_lock - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "excxx_lock - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "excxx_lock - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "excxx_lock - Win32 Release" +# Name "excxx_lock - Win32 Debug" +# Name "excxx_lock - Win32 Release Static" +# Name "excxx_lock - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_cxx\LockExample.cpp +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/excxx_mpool.dsp b/db/build_win64/excxx_mpool.dsp new file mode 100644 index 000000000..22af8b36b --- /dev/null +++ b/db/build_win64/excxx_mpool.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="excxx_mpool" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=excxx_mpool - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "excxx_mpool.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "excxx_mpool.mak" CFG="excxx_mpool - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "excxx_mpool - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_mpool - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_mpool - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_mpool - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "excxx_mpool - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "excxx_mpool - Win32 Release" +# Name "excxx_mpool - Win32 Debug" +# Name "excxx_mpool - Win32 Release Static" +# Name "excxx_mpool - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_cxx\MpoolExample.cpp +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/excxx_tpcb.dsp b/db/build_win64/excxx_tpcb.dsp new file mode 100644 index 000000000..fd513fd9b --- /dev/null +++ b/db/build_win64/excxx_tpcb.dsp @@ -0,0 +1,148 @@ +# Microsoft Developer Studio Project File - Name="excxx_tpcb" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=excxx_tpcb - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "excxx_tpcb.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "excxx_tpcb.mak" CFG="excxx_tpcb - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "excxx_tpcb - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_tpcb - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_tpcb - Win32 Release Static" (based on "Win32 (x86) Console Application") +!MESSAGE "excxx_tpcb - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "excxx_tpcb - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release/libdb43.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" + +!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb43d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no + +!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Release_static/libdb43.lib /nologo /subsystem:console /machine:IA64 +# ADD LINK32 Release_static/libdb43s.lib /nologo /subsystem:console /machine:IA64 + +!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 Debug_static/libdb43d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no +# ADD LINK32 Debug_static/libdb43sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no + +!ENDIF + +# Begin Target + +# Name "excxx_tpcb - Win32 Release" +# Name "excxx_tpcb - Win32 Debug" +# Name "excxx_tpcb - Win32 Release Static" +# Name "excxx_tpcb - Win32 Debug Static" +# Begin Source File + +SOURCE=..\examples_cxx\TpcbExample.cpp +# End Source File +# Begin Source File + +SOURCE=..\clib\getopt.c +# End Source File +# End Target +# End Project diff --git a/db/build_win64/java_dsp.src b/db/build_win64/java_dsp.src new file mode 100644 index 000000000..36d5c5ba4 --- /dev/null +++ b/db/build_win64/java_dsp.src @@ -0,0 +1,129 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=@project_name@ - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 +# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" +# Begin Custom Build - Compiling java files using javac +ProjDir=. +InputPath=.\Release\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll +SOURCE="$(InputPath)" + +"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" + echo compiling Berkeley DB classes + mkdir "$(OUTDIR)\classes" + javac -O -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java + echo compiling examples + mkdir "$(OUTDIR)\classes.ex" + javac -O -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java + echo creating jar files + jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . + jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . + echo Java build finished + +# End Custom Build + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 2 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no +# Begin Custom Build - Compiling java files using javac +ProjDir=. +InputPath=.\Debug\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll +SOURCE="$(InputPath)" + +"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" + echo compiling Berkeley DB classes + mkdir "$(OUTDIR)\classes" + javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java + echo compiling examples + mkdir "$(OUTDIR)\classes.ex" + javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java + echo creating jar files + jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . + jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . + echo Java build finished + +# End Custom Build + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release" +# Name "@project_name@ - Win32 Debug" +@SOURCE_FILES@ +# End Target +# End Project diff --git a/db/build_win64/libdbrc.src b/db/build_win64/libdbrc.src new file mode 100644 index 000000000..4c644ea9f --- /dev/null +++ b/db/build_win64/libdbrc.src @@ -0,0 +1,33 @@ +1 VERSIONINFO + FILEVERSION %MAJOR%,0,%MINOR%,%PATCH% + PRODUCTVERSION %MAJOR%,0,%MINOR%,%PATCH% + FILEFLAGSMASK 0x3fL +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x4L + FILETYPE 0x2L + FILESUBTYPE 0x0L + +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "CompanyName", "Sleepycat Software\0" + VALUE "FileDescription", "Berkeley DB 3.0 DLL\0" + VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0" + VALUE "InternalName", "libdb.dll\0" + VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2004\0" + VALUE "OriginalFilename", "libdb.dll\0" + VALUE "ProductName", "Sleepycat Software libdb\0" + VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END diff --git a/db/build_win64/small_dsp.src b/db/build_win64/small_dsp.src new file mode 100644 index 000000000..1b42e8c71 --- /dev/null +++ b/db/build_win64/small_dsp.src @@ -0,0 +1,85 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Static Library" 0x0104 + +CFG=@project_name@ - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release_small" +# PROP BASE Intermediate_Dir "Release_small" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_small" +# PROP Intermediate_Dir "Release_small" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Release_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" +# ADD LIB32 /nologo /out:"Release_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" + +# PROP BASE Use_MFC 1 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug_small" +# PROP BASE Intermediate_Dir "Debug_small" +# PROP BASE Target_Dir "" +# PROP Use_MFC 1 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_small" +# PROP Intermediate_Dir "Debug_small" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Debug_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" +# ADD LIB32 /nologo /out:"Debug_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release Static" +# Name "@project_name@ - Win32 Debug Static" +@SOURCE_FILES@ +# End Target +# End Project diff --git a/db/build_win64/srcfile_dsp.src b/db/build_win64/srcfile_dsp.src new file mode 100644 index 000000000..408a55e2a --- /dev/null +++ b/db/build_win64/srcfile_dsp.src @@ -0,0 +1,4 @@ +# Begin Source File + +SOURCE=@srcdir@\@srcfile@ +# End Source File diff --git a/db/build_win64/static_dsp.src b/db/build_win64/static_dsp.src new file mode 100644 index 000000000..a2746856c --- /dev/null +++ b/db/build_win64/static_dsp.src @@ -0,0 +1,85 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Static Library" 0x0104 + +CFG=@project_name@ - Win32 Debug Static +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release Static" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release_static" +# PROP BASE Intermediate_Dir "Release_static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release_static" +# PROP Intermediate_Dir "Release_static" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" +# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" + +# PROP BASE Use_MFC 1 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug_static" +# PROP BASE Intermediate_Dir "Debug_static" +# PROP BASE Target_Dir "" +# PROP Use_MFC 1 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug_static" +# PROP Intermediate_Dir "Debug_static" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c +# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" +# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release Static" +# Name "@project_name@ - Win32 Debug Static" +@SOURCE_FILES@ +# End Target +# End Project diff --git a/db/build_win64/tcl_dsp.src b/db/build_win64/tcl_dsp.src new file mode 100644 index 000000000..2d25d7407 --- /dev/null +++ b/db/build_win64/tcl_dsp.src @@ -0,0 +1,93 @@ +# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=@project_name@ - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "@project_name@ - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 +# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl84.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 2 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept +# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl84g.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no + +!ENDIF + +# Begin Target + +# Name "@project_name@ - Win32 Release" +# Name "@project_name@ - Win32 Debug" +@SOURCE_FILES@ +# End Target +# End Project diff --git a/db/build_win64/win_db.h b/db/build_win64/win_db.h new file mode 100644 index 000000000..858b74b3f --- /dev/null +++ b/db/build_win64/win_db.h @@ -0,0 +1,94 @@ +/*- + * $Id: win_db.h,v 11.18 2004/10/14 15:32:29 bostic Exp $ + * + * The following provides the information necessary to build Berkeley + * DB on native Windows, and other Windows environments such as MinGW. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * To build Tcl interface libraries, the include path must be configured to + * use the directory containing , usually the include directory in + * the Tcl distribution. + */ +#ifdef DB_TCL_SUPPORT +#include +#endif + +#define WIN32_LEAN_AND_MEAN +#include + +/* + * All of the necessary includes have been included, ignore the #includes + * in the Berkeley DB source files. + */ +#define NO_SYSTEM_INCLUDES + +/* + * Win32 has getcwd, snprintf and vsnprintf, but under different names. + */ +#define getcwd(buf, size) _getcwd(buf, size) +#define snprintf _snprintf +#define vsnprintf _vsnprintf + +/* + * Win32 does not define getopt and friends in any header file, so we must. + */ +#if defined(__cplusplus) +extern "C" { +#endif +extern int optind; +extern char *optarg; +extern int getopt(int, char * const *, const char *); +#if defined(__cplusplus) +} +#endif + +#ifdef _UNICODE +#define TO_TSTRING(dbenv, s, ts, ret) do { \ + int __len = strlen(s) + 1; \ + ts = NULL; \ + if ((ret = __os_malloc((dbenv), \ + __len * sizeof (_TCHAR), &(ts))) == 0 && \ + MultiByteToWideChar(CP_UTF8, 0, \ + (s), -1, (ts), __len) == 0) \ + ret = __os_get_errno(); \ + } while (0) + +#define FROM_TSTRING(dbenv, ts, s, ret) { \ + int __len = WideCharToMultiByte(CP_UTF8, 0, ts, -1, \ + NULL, 0, NULL, NULL); \ + s = NULL; \ + if ((ret = __os_malloc((dbenv), __len, &(s))) == 0 && \ + WideCharToMultiByte(CP_UTF8, 0, \ + (ts), -1, (s), __len, NULL, NULL) == 0) \ + ret = __os_get_errno(); \ + } while (0) + +#define FREE_STRING(dbenv, s) do { \ + if ((s) != NULL) { \ + __os_free((dbenv), (s)); \ + (s) = NULL; \ + } \ + } while (0) + +#else +#define TO_TSTRING(dbenv, s, ts, ret) (ret) = 0, (ts) = (_TCHAR *)(s) +#define FROM_TSTRING(dbenv, ts, s, ret) (ret) = 0, (s) = (char *)(ts) +#define FREE_STRING(dbenv, ts) +#endif diff --git a/db/clib/getcwd.c b/db/clib/getcwd.c index 10485f176..ec28f1fb6 100644 --- a/db/clib/getcwd.c +++ b/db/clib/getcwd.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -31,14 +31,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: getcwd.c,v 11.15 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: getcwd.c,v 11.14 2003/01/08 04:06:37 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include diff --git a/db/clib/getopt.c b/db/clib/getopt.c index 56f43afb3..527ee6967 100644 --- a/db/clib/getopt.c +++ b/db/clib/getopt.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -31,14 +31,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: getopt.c,v 11.9 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: getopt.c,v 11.8 2003/01/08 04:06:38 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include diff --git a/db/clib/memcmp.c b/db/clib/memcmp.c index 4143a5482..055a2f5fa 100644 --- a/db/clib/memcmp.c +++ b/db/clib/memcmp.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -31,14 +31,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: memcmp.c,v 11.9 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: memcmp.c,v 11.8 2003/01/08 04:06:38 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/clib/memmove.c b/db/clib/memmove.c index e92cbf494..60ece571d 100644 --- a/db/clib/memmove.c +++ b/db/clib/memmove.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -31,14 +31,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: memmove.c,v 11.8 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: memmove.c,v 11.7 2003/01/08 04:06:38 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/clib/raise.c b/db/clib/raise.c index 27305549f..2f9e8cb80 100644 --- a/db/clib/raise.c +++ b/db/clib/raise.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: raise.c,v 11.8 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: raise.c,v 11.7 2003/01/08 04:06:41 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include diff --git a/db/clib/snprintf.c b/db/clib/snprintf.c index e41272dde..e1bc5d112 100644 --- a/db/clib/snprintf.c +++ b/db/clib/snprintf.c @@ -1,24 +1,30 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: snprintf.c,v 11.18 2004/09/22 03:32:43 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: snprintf.c,v 11.12 2003/05/02 16:10:41 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include +#include +#include +#include /* Declare STDERR_FILENO. */ #endif #include "db_int.h" +#if !defined(HAVE_SNPRINTF) || !defined(HAVE_VSNPRINTF) +static void sprintf_overflow __P((void)); +static int sprintf_retcharpnt __P((void)); +#endif + /* * snprintf -- * Bounded version of sprintf. @@ -41,9 +47,98 @@ snprintf(str, n, fmt, va_alist) { static int ret_charpnt = -1; va_list ap; - int len; + size_t len; + + if (ret_charpnt == -1) + ret_charpnt = sprintf_retcharpnt(); + +#ifdef STDC_HEADERS + va_start(ap, fmt); +#else + va_start(ap); +#endif + len = (size_t)vsprintf(str, fmt, ap); + if (ret_charpnt) + len = strlen(str); + + va_end(ap); + + if (len >= n) { + sprintf_overflow(); + /* NOTREACHED */ + } + return ((int)len); +} +#endif + +/* + * vsnprintf -- + * Bounded version of vsprintf. + * + * PUBLIC: #ifndef HAVE_VSNPRINTF + * PUBLIC: int vsnprintf __P((char *, size_t, const char *, va_list)); + * PUBLIC: #endif + */ +#ifndef HAVE_VSNPRINTF +int +vsnprintf(str, n, fmt, ap) + char *str; + size_t n; + const char *fmt; + va_list ap; +{ + static int ret_charpnt = -1; + size_t len; + + if (ret_charpnt == -1) + ret_charpnt = sprintf_retcharpnt(); + + len = (size_t)vsprintf(str, fmt, ap); + if (ret_charpnt) + len = strlen(str); + + if (len >= n) { + sprintf_overflow(); + /* NOTREACHED */ + } + return ((int)len); +} +#endif + +#if !defined(HAVE_SNPRINTF) || !defined(HAVE_VSNPRINTF) +static void +sprintf_overflow() +{ + /* + * !!! + * We're potentially manipulating strings handed us by the application, + * and on systems without a real snprintf() the sprintf() calls could + * have overflowed the buffer. We can't do anything about it now, but + * we don't want to return control to the application, we might have + * overwritten the stack with a Trojan horse. We're not trying to do + * anything recoverable here because systems without snprintf support + * are pretty rare anymore. + */ +#define OVERFLOW_ERROR "internal buffer overflow, process ended\n" +#ifndef STDERR_FILENO +#define STDERR_FILENO 2 +#endif + (void)write(STDERR_FILENO, OVERFLOW_ERROR, sizeof(OVERFLOW_ERROR) - 1); + + /* Be polite. */ + exit(1); - COMPQUIET(n, 0); + /* But firm. */ + abort(); + + /* NOTREACHED */ +} + +static int +sprintf_retcharpnt() +{ + int ret_charpnt; + char buf[10]; /* * Some old versions of sprintf return a pointer to the first argument @@ -53,22 +148,12 @@ snprintf(str, n, fmt, va_alist) * We do this test at run-time because it's not a test we can do in a * cross-compilation environment. */ - if (ret_charpnt == -1) { - char buf[10]; - ret_charpnt = - sprintf(buf, "123") != 3 || - sprintf(buf, "123456789") != 9 || - sprintf(buf, "1234") != 4; - } + ret_charpnt = + (int)sprintf(buf, "123") != 3 || + (int)sprintf(buf, "123456789") != 9 || + (int)sprintf(buf, "1234") != 4; -#ifdef STDC_HEADERS - va_start(ap, fmt); -#else - va_start(ap); -#endif - len = vsprintf(str, fmt, ap); - va_end(ap); - return (ret_charpnt ? (int)strlen(str) : len); + return (ret_charpnt); } #endif diff --git a/db/clib/strcasecmp.c b/db/clib/strcasecmp.c index d5ce6d76d..e8365c451 100644 --- a/db/clib/strcasecmp.c +++ b/db/clib/strcasecmp.c @@ -29,14 +29,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: strcasecmp.c,v 1.8 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: strcasecmp.c,v 1.7 2001/11/15 17:51:38 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/clib/strdup.c b/db/clib/strdup.c index e68623f14..9c451d328 100644 --- a/db/clib/strdup.c +++ b/db/clib/strdup.c @@ -29,14 +29,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: strdup.c,v 1.6 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: strdup.c,v 1.5 2002/05/01 18:40:05 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/clib/strerror.c b/db/clib/strerror.c index a98cab450..e0710add2 100644 --- a/db/clib/strerror.c +++ b/db/clib/strerror.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. */ /* @@ -31,14 +31,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: strerror.c,v 11.8 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: strerror.c,v 11.7 2003/01/08 04:06:43 bostic Exp $"; -#endif /* not lint */ - /* * strerror -- * Return the string associated with an errno. diff --git a/db/common/crypto_stub.c b/db/common/crypto_stub.c index 234126bca..68f06b4c8 100644 --- a/db/common/crypto_stub.c +++ b/db/common/crypto_stub.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: crypto_stub.c,v 1.4 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: crypto_stub.c,v 1.3 2003/07/01 19:47:10 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* diff --git a/db/common/db_byteorder.c b/db/common/db_byteorder.c index 2a708c86f..0a48055c8 100644 --- a/db/common/db_byteorder.c +++ b/db/common/db_byteorder.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_byteorder.c,v 11.10 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_byteorder.c,v 11.9 2003/01/08 04:07:38 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/common/db_err.c b/db/common/db_err.c index 8c3c7b376..6e49e7941 100644 --- a/db/common/db_err.c +++ b/db/common/db_err.c @@ -1,22 +1,19 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_err.c,v 11.123 2004/09/22 03:07:50 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_err.c,v 11.100 2003/10/07 18:55:38 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include #include -#include /* Declare STDERR_FILENO. */ #endif #include "db_int.h" @@ -27,6 +24,9 @@ static const char revid[] = "$Id: db_err.c,v 11.100 2003/10/07 18:55:38 mjc Exp #include "dbinc/log.h" #include "dbinc/txn.h" +static void __db_msgcall __P((const DB_ENV *, const char *, va_list)); +static void __db_msgfile __P((const DB_ENV *, const char *, va_list)); + /* * __db_fchk -- * General flags checking routine. @@ -88,7 +88,8 @@ __db_fnl(dbenv, name) const char *name; { __db_err(dbenv, - "%s: the DB_DIRTY_READ and DB_RMW flags require locking", name); + "%s: the DB_DIRTY_READ, DB_DEGREE_2 and DB_RMW flags require locking", + name); return (EINVAL); } @@ -240,10 +241,11 @@ db_strerror(error) * altered. */ switch (error) { + case DB_BUFFER_SMALL: + return + ("DB_BUFFER_SMALL: User memory too small for return value"); case DB_DONOTINDEX: return ("DB_DONOTINDEX: Secondary index callback returns null"); - case DB_FILEOPEN: - return ("DB_FILEOPEN: Rename or remove while file is open."); case DB_KEYEMPTY: return ("DB_KEYEMPTY: Non-existent key/data pair"); case DB_KEYEXIST: @@ -253,6 +255,8 @@ db_strerror(error) ("DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock"); case DB_LOCK_NOTGRANTED: return ("DB_LOCK_NOTGRANTED: Lock not granted"); + case DB_LOG_BUFFER_FULL: + return ("DB_LOG_BUFFER_FULL: In-memory log buffer is full"); case DB_NOSERVER: return ("DB_NOSERVER: Fatal error, no RPC server"); case DB_NOSERVER_HOME: @@ -268,7 +272,7 @@ db_strerror(error) case DB_REP_DUPMASTER: return ("DB_REP_DUPMASTER: A second master site appeared"); case DB_REP_HANDLE_DEAD: - return ("DB_REP_HANDLE_DEAD: Handle is no longer valid."); + return ("DB_REP_HANDLE_DEAD: Handle is no longer valid"); case DB_REP_HOLDELECTION: return ("DB_REP_HOLDELECTION: Need to hold an election"); case DB_REP_ISPERM: @@ -278,10 +282,10 @@ db_strerror(error) case DB_REP_NEWSITE: return ("DB_REP_NEWSITE: A new site has entered the system"); case DB_REP_NOTPERM: - return ("DB_REP_NOTPERM: Permanent log record not written."); - case DB_REP_OUTDATED: + return ("DB_REP_NOTPERM: Permanent log record not written"); + case DB_REP_STARTUPDONE: return - ("DB_REP_OUTDATED: Insufficient logs on master to recover"); + ("DB_REP_STARTUPDONE: Client completed startup synchronization."); case DB_REP_UNAVAIL: return ("DB_REP_UNAVAIL: Unable to elect a master"); case DB_RUNRECOVERY: @@ -291,6 +295,9 @@ db_strerror(error) ("DB_SECONDARY_BAD: Secondary index inconsistent with primary"); case DB_VERIFY_BAD: return ("DB_VERIFY_BAD: Database verification failed"); + case DB_VERSION_MISMATCH: + return + ("DB_VERSION_MISMATCH: Database environment version mismatch"); default: break; } @@ -330,11 +337,6 @@ __db_err(dbenv, fmt, va_alist) DB_REAL_ERR(dbenv, 0, 0, 0, fmt); } -#define OVERFLOW_ERROR "internal buffer overflow, process aborted\n" -#ifndef STDERR_FILENO -#define STDERR_FILENO 2 -#endif - /* * __db_errcall -- * Do the error message work for callback functions. @@ -350,35 +352,17 @@ __db_errcall(dbenv, error, error_set, fmt, ap) va_list ap; { char *p; - char errbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ + char buf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ - p = errbuf; + p = buf; if (fmt != NULL) - p += vsnprintf(errbuf, sizeof(errbuf), fmt, ap); + p += vsnprintf(buf, sizeof(buf), fmt, ap); if (error_set) p += snprintf(p, - sizeof(errbuf) - (size_t)(p - errbuf), ": %s", + sizeof(buf) - (size_t)(p - buf), ": %s", db_strerror(error)); -#ifndef HAVE_VSNPRINTF - /* - * !!! - * We're potentially manipulating strings handed us by the application, - * and on systems without a real snprintf() the sprintf() calls could - * have overflowed the buffer. We can't do anything about it now, but - * we don't want to return control to the application, we might have - * overwritten the stack with a Trojan horse. We're not trying to do - * anything recoverable here because systems without snprintf support - * are pretty rare anymore. - */ - if ((size_t)(p - errbuf) > sizeof(errbuf)) { - write( - STDERR_FILENO, OVERFLOW_ERROR, sizeof(OVERFLOW_ERROR) - 1); - abort(); - /* NOTREACHED */ - } -#endif - dbenv->db_errcall(dbenv->db_errpfx, errbuf); + dbenv->db_errcall(dbenv, dbenv->db_errpfx, buf); } /* @@ -413,6 +397,111 @@ __db_errfile(dbenv, error, error_set, fmt, ap) (void)fflush(fp); } +/* + * __db_msgadd -- + * Aggregate a set of strings into a buffer for the callback API. + * + * PUBLIC: void __db_msgadd __P((DB_ENV *, DB_MSGBUF *, const char *, ...)) + * PUBLIC: __attribute__ ((__format__ (__printf__, 3, 4))); + */ +void +#ifdef STDC_HEADERS +__db_msgadd(DB_ENV *dbenv, DB_MSGBUF *mbp, const char *fmt, ...) +#else +__db_msgadd(dbenv, mbp, fmt, va_alist) + DB_ENV *dbenv; + DB_MSGBUF *mbp; + const char *fmt; + va_dcl +#endif +{ + va_list ap; + size_t len, olen; + char buf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ + +#ifdef STDC_HEADERS + va_start(ap, fmt); +#else + va_start(ap); +#endif + len = (size_t)vsnprintf(buf, sizeof(buf), fmt, ap); + + va_end(ap); + + /* + * There's a heap buffer in the DB_ENV handle we use to aggregate the + * message chunks. We maintain a pointer to the buffer, the next slot + * to be filled in in the buffer, and a total buffer length. + */ + olen = (size_t)(mbp->cur - mbp->buf); + if (olen + len >= mbp->len) { + if (__os_realloc(dbenv, mbp->len + len + 256, &mbp->buf)) + return; + mbp->len += (len + 256); + mbp->cur = mbp->buf + olen; + } + + memcpy(mbp->cur, buf, len + 1); + mbp->cur += len; +} + +/* + * __db_msg -- + * Standard DB stat message routine. + * + * PUBLIC: void __db_msg __P((const DB_ENV *, const char *, ...)) + * PUBLIC: __attribute__ ((__format__ (__printf__, 2, 3))); + */ +void +#ifdef STDC_HEADERS +__db_msg(const DB_ENV *dbenv, const char *fmt, ...) +#else +__db_msg(dbenv, fmt, va_alist) + const DB_ENV *dbenv; + const char *fmt; + va_dcl +#endif +{ + DB_REAL_MSG(dbenv, fmt); +} + +/* + * __db_msgcall -- + * Do the message work for callback functions. + */ +static void +__db_msgcall(dbenv, fmt, ap) + const DB_ENV *dbenv; + const char *fmt; + va_list ap; +{ + char buf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ + + (void)vsnprintf(buf, sizeof(buf), fmt, ap); + + dbenv->db_msgcall(dbenv, buf); +} + +/* + * __db_msgfile -- + * Do the message work for FILE *s. + */ +static void +__db_msgfile(dbenv, fmt, ap) + const DB_ENV *dbenv; + const char *fmt; + va_list ap; +{ + FILE *fp; + + fp = dbenv == NULL || + dbenv->db_msgfile == NULL ? stdout : dbenv->db_msgfile; + (void)vfprintf(fp, fmt, ap); + + (void)fprintf(fp, "\n"); + (void)fflush(fp); +} + /* * __db_logmsg -- * Write information into the DB log. @@ -455,24 +544,7 @@ __db_logmsg(dbenv, txnid, opname, flags, fmt, va_alist) msgdbt.data = __logbuf; msgdbt.size = (u_int32_t)vsnprintf(__logbuf, sizeof(__logbuf), fmt, ap); -#ifndef HAVE_VSNPRINTF - /* - * !!! - * We're potentially manipulating strings handed us by the application, - * and on systems without a real snprintf() the sprintf() calls could - * have overflowed the buffer. We can't do anything about it now, but - * we don't want to return control to the application, we might have - * overwritten the stack with a Trojan horse. We're not trying to do - * anything recoverable here because systems without snprintf support - * are pretty rare anymore. - */ - if (msgdbt.size > sizeof(__logbuf)) { - write( - STDERR_FILENO, OVERFLOW_ERROR, sizeof(OVERFLOW_ERROR) - 1); - abort(); - /* NOTREACHED */ - } -#endif + va_end(ap); /* * XXX @@ -481,8 +553,6 @@ __db_logmsg(dbenv, txnid, opname, flags, fmt, va_alist) */ (void)__db_debug_log( (DB_ENV *)dbenv, txnid, &lsn, flags, &opdbt, -1, &msgdbt, NULL, 0); - - va_end(ap); } /* @@ -496,7 +566,7 @@ __db_unknown_flag(dbenv, routine, flag) char *routine; u_int32_t flag; { - __db_err(dbenv, "%s: Unknown flag: 0x%x", routine, (u_int)flag); + __db_err(dbenv, "%s: Unknown flag: %#x", routine, (u_int)flag); DB_ASSERT(0); return (EINVAL); } @@ -512,7 +582,9 @@ __db_unknown_type(dbenv, routine, type) char *routine; DBTYPE type; { - __db_err(dbenv, "%s: Unknown db type: 0x%x", routine, (u_int)type); + __db_err(dbenv, + "%s: Unexpected DB type: %s", routine, __db_dbtype_to_string(type)); + DB_ASSERT(0); return (EINVAL); } @@ -531,6 +603,7 @@ __db_check_txn(dbp, txn, assoc_lid, read_op) int read_op; { DB_ENV *dbenv; + int isp, ret; dbenv = dbp->dbenv; @@ -565,8 +638,19 @@ __db_check_txn(dbp, txn, assoc_lid, read_op) if (dbp->cur_lid >= TXN_MINIMUM) goto open_err; } else { - if (dbp->cur_lid >= TXN_MINIMUM && dbp->cur_lid != txn->txnid) - goto open_err; + if (F_ISSET(txn, TXN_DEADLOCK)) { + __db_err(dbenv, + "Previous deadlock return not resolved"); + return (EINVAL); + } + if (dbp->cur_lid >= TXN_MINIMUM && + dbp->cur_lid != txn->txnid) { + if ((ret = __lock_locker_is_parent(dbenv, + dbp->cur_lid, txn->txnid, &isp)) != 0) + return (ret); + if (!isp) + goto open_err; + } if (!TXN_ON(dbenv)) return (__db_not_txn_env(dbenv)); @@ -653,3 +737,21 @@ __db_rec_repl(dbenv, data_size, data_dlen) "Record length error", (u_long)data_size, (u_long)data_dlen); return (EINVAL); } + +/* + * __db_check_lsn -- + * Display the log sequence error message. + * + * PUBLIC: int __db_check_lsn __P((DB_ENV *, DB_LSN *, DB_LSN *)); + */ +int +__db_check_lsn(dbenv, lsn, prev) + DB_ENV *dbenv; + DB_LSN *lsn, *prev; +{ + __db_err(dbenv, + "Log sequence error: page LSN %lu %lu; previous LSN %lu %lu", + (u_long)(lsn)->file, (u_long)(lsn)->offset, + (u_long)(prev)->file, (u_long)(prev)->offset); + return (EINVAL); +} diff --git a/db/common/db_getlong.c b/db/common/db_getlong.c index 8c3126ebd..c3bd88b0c 100644 --- a/db/common/db_getlong.c +++ b/db/common/db_getlong.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_getlong.c,v 11.21 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_getlong.c,v 11.20 2003/01/08 04:07:46 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/common/db_idspace.c b/db/common/db_idspace.c index e67d88479..49f2e9137 100644 --- a/db/common/db_idspace.c +++ b/db/common/db_idspace.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_idspace.c,v 1.9 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_idspace.c,v 1.7 2003/05/18 18:29:35 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -44,7 +42,8 @@ __db_idcmp(a, b) * * On input, minp and maxp contain the minimum and maximum valid values for * the name space and on return, they contain the minimum and maximum ids - * available (by finding the biggest gap). + * available (by finding the biggest gap). The minimum can be an inuse + * value, but the maximum cannot be. * * PUBLIC: void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *)); */ @@ -85,9 +84,9 @@ __db_idspace(inuse, n, minp, maxp) /* Do same check as we do in the n == 1 case. */ if (inuse[n - 1] != *maxp) *minp = inuse[n - 1]; - *maxp = inuse[0]; + *maxp = inuse[0] - 1; } else { *minp = inuse[low]; - *maxp = inuse[low + 1]; + *maxp = inuse[low + 1] - 1; } } diff --git a/db/common/db_log2.c b/db/common/db_log2.c index 273216980..fcc1a6035 100644 --- a/db/common/db_log2.c +++ b/db/common/db_log2.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -34,14 +34,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: db_log2.c,v 11.9 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_log2.c,v 11.8 2003/01/08 04:07:52 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/common/util_arg.c b/db/common/util_arg.c index 759bd766c..16a17ee28 100644 --- a/db/common/util_arg.c +++ b/db/common/util_arg.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: util_arg.c,v 1.6 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: util_arg.c,v 1.5 2003/01/08 04:07:53 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/common/util_cache.c b/db/common/util_cache.c index 7720ed16c..006c34557 100644 --- a/db/common/util_cache.c +++ b/db/common/util_cache.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: util_cache.c,v 1.8 2004/02/17 16:03:05 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: util_cache.c,v 1.6 2003/05/18 18:29:36 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -25,60 +23,26 @@ static const char revid[] = "$Id: util_cache.c,v 1.6 2003/05/18 18:29:36 bostic * __db_util_cache -- * Compute if we have enough cache. * - * PUBLIC: int __db_util_cache __P((DB_ENV *, DB *, u_int32_t *, int *)); + * PUBLIC: int __db_util_cache __P((DB *, u_int32_t *, int *)); */ int -__db_util_cache(dbenv, dbp, cachep, resizep) - DB_ENV *dbenv; +__db_util_cache(dbp, cachep, resizep) DB *dbp; u_int32_t *cachep; int *resizep; { - DBTYPE type; - DB_BTREE_STAT *bsp; - DB_HASH_STAT *hsp; - DB_QUEUE_STAT *qsp; u_int32_t pgsize; int ret; - void *sp; + + /* Get the current page size. */ + if ((ret = dbp->get_pagesize(dbp, &pgsize)) != 0) + return (ret); /* * The current cache size is in cachep. If it's insufficient, set the * the memory referenced by resizep to 1 and set cachep to the minimum * size needed. - */ - if ((ret = dbp->get_type(dbp, &type)) != 0) { - dbenv->err(dbenv, ret, "DB->get_type"); - return (ret); - } - - if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) { - dbenv->err(dbenv, ret, "DB->stat"); - return (ret); - } - - switch (type) { - case DB_QUEUE: - qsp = (DB_QUEUE_STAT *)sp; - pgsize = qsp->qs_pagesize; - break; - case DB_HASH: - hsp = (DB_HASH_STAT *)sp; - pgsize = hsp->hash_pagesize; - break; - case DB_BTREE: - case DB_RECNO: - bsp = (DB_BTREE_STAT *)sp; - pgsize = bsp->bt_pagesize; - break; - case DB_UNKNOWN: - default: - dbenv->err(dbenv, ret, "unknown database type: %d", type); - return (EINVAL); - } - free(sp); - - /* + * * Make sure our current cache is big enough. We want at least * DB_MINPAGECACHE pages in the cache. */ diff --git a/db/common/util_log.c b/db/common/util_log.c index 5971c6a10..98fd1951c 100644 --- a/db/common/util_log.c +++ b/db/common/util_log.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: util_log.c,v 1.14 2004/01/28 03:35:52 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: util_log.c,v 1.13 2003/05/05 19:54:57 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/common/util_sig.c b/db/common/util_sig.c index 4c2a02ab6..53087360e 100644 --- a/db/common/util_sig.c +++ b/db/common/util_sig.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: util_sig.c,v 1.9 2004/01/28 03:35:54 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: util_sig.c,v 1.8 2003/01/08 04:07:58 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/crypto/aes_method.c b/db/crypto/aes_method.c index a88731ecf..567e67456 100644 --- a/db/crypto/aes_method.c +++ b/db/crypto/aes_method.c @@ -1,20 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * * Some parts of this code originally written by Adam Stubblefield, - * astubble@rice.edu. + * -- astubble@rice.edu. + * + * $Id: aes_method.c,v 1.20 2004/09/17 22:00:25 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: aes_method.c,v 1.18 2003/04/28 19:59:19 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -64,7 +61,7 @@ __aes_adj_size(len) { if (len % DB_AES_CHUNK == 0) return (0); - return (DB_AES_CHUNK - (len % DB_AES_CHUNK)); + return (DB_AES_CHUNK - (u_int)(len % DB_AES_CHUNK)); } /* diff --git a/db/crypto/crypto.c b/db/crypto/crypto.c index f23c1f75f..23d24996c 100644 --- a/db/crypto/crypto.c +++ b/db/crypto/crypto.c @@ -1,19 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * * Some parts of this code originally written by Adam Stubblefield - * - astubble@rice.edu + * -- astubble@rice.edu + * + * $Id: crypto.c,v 1.30 2004/09/15 21:49:11 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: crypto.c,v 1.23 2003/06/30 17:19:41 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -45,17 +43,17 @@ __crypto_region_init(dbenv) MUTEX_LOCK(dbenv, &renv->mutex); if (renv->cipher_off == INVALID_ROFF) { if (!CRYPTO_ON(dbenv)) - goto out; + goto err; if (!F_ISSET(infop, REGION_CREATE)) { __db_err(dbenv, "Joining non-encrypted environment with encryption key"); ret = EINVAL; - goto out; + goto err; } if (F_ISSET(db_cipher, CIPHER_ANY)) { __db_err(dbenv, "Encryption algorithm not supplied"); ret = EINVAL; - goto out; + goto err; } /* * Must create the shared information. We need: @@ -63,42 +61,42 @@ __crypto_region_init(dbenv) * After we copy the passwd, we smash and free the one in the * dbenv. */ - if ((ret = __db_shalloc(infop->addr, - sizeof(CIPHER), MUTEX_ALIGN, &cipher)) != 0) - goto out; + if ((ret = __db_shalloc( + infop, sizeof(CIPHER), MUTEX_ALIGN, &cipher)) != 0) + goto err; memset(cipher, 0, sizeof(*cipher)); - if ((ret = __db_shalloc(infop->addr, - dbenv->passwd_len, 0, &sh_passwd)) != 0) { - __db_shalloc_free(infop->addr, cipher); - goto out; + if ((ret = __db_shalloc( + infop, dbenv->passwd_len, 0, &sh_passwd)) != 0) { + __db_shalloc_free(infop, cipher); + goto err; } memset(sh_passwd, 0, dbenv->passwd_len); - cipher->passwd = R_OFFSET(infop, sh_passwd); + cipher->passwd = R_OFFSET(dbenv, infop, sh_passwd); cipher->passwd_len = dbenv->passwd_len; cipher->flags = db_cipher->alg; memcpy(sh_passwd, dbenv->passwd, cipher->passwd_len); - renv->cipher_off = R_OFFSET(infop, cipher); + renv->cipher_off = R_OFFSET(dbenv, infop, cipher); } else { if (!CRYPTO_ON(dbenv)) { __db_err(dbenv, "Encrypted environment: no encryption key supplied"); ret = EINVAL; - goto out; + goto err; } - cipher = R_ADDR(infop, renv->cipher_off); - sh_passwd = R_ADDR(infop, cipher->passwd); + cipher = R_ADDR(dbenv, infop, renv->cipher_off); + sh_passwd = R_ADDR(dbenv, infop, cipher->passwd); if ((cipher->passwd_len != dbenv->passwd_len) || memcmp(dbenv->passwd, sh_passwd, cipher->passwd_len) != 0) { __db_err(dbenv, "Invalid password"); ret = EPERM; - goto out; + goto err; } if (!F_ISSET(db_cipher, CIPHER_ANY) && db_cipher->alg != cipher->flags) { __db_err(dbenv, "Environment encrypted using a different algorithm"); ret = EINVAL; - goto out; + goto err; } if (F_ISSET(db_cipher, CIPHER_ANY)) /* @@ -108,7 +106,7 @@ __crypto_region_init(dbenv) */ if ((ret = __crypto_algsetup(dbenv, db_cipher, cipher->flags, 0)) != 0) - goto out; + goto err; } MUTEX_UNLOCK(dbenv, &renv->mutex); ret = db_cipher->init(dbenv, db_cipher); @@ -124,7 +122,7 @@ __crypto_region_init(dbenv) dbenv->passwd_len = 0; if (0) { -out: MUTEX_UNLOCK(dbenv, &renv->mutex); +err: MUTEX_UNLOCK(dbenv, &renv->mutex); } return (ret); } @@ -157,6 +155,30 @@ __crypto_dbenv_close(dbenv) return (ret); } +/* + * __crypto_region_destroy -- + * Destroy any system resources allocated in the primary region. + * + * PUBLIC: int __crypto_region_destroy __P((DB_ENV *)); + */ +int +__crypto_region_destroy(dbenv) + DB_ENV *dbenv; +{ + CIPHER *cipher; + REGENV *renv; + REGINFO *infop; + + infop = dbenv->reginfo; + renv = infop->primary; + if (renv->cipher_off != INVALID_ROFF) { + cipher = R_ADDR(dbenv, infop, renv->cipher_off); + __db_shalloc_free(infop, R_ADDR(dbenv, infop, cipher->passwd)); + __db_shalloc_free(infop, cipher); + } + return (0); +} + /* * __crypto_algsetup -- * Given a db_cipher structure and a valid algorithm flag, call @@ -357,7 +379,7 @@ __crypto_set_passwd(dbenv_src, dbenv_dest) DB_ASSERT(CRYPTO_ON(dbenv_src)); - cipher = R_ADDR(infop, renv->cipher_off); - sh_passwd = R_ADDR(infop, cipher->passwd); + cipher = R_ADDR(dbenv_src, infop, renv->cipher_off); + sh_passwd = R_ADDR(dbenv_src, infop, cipher->passwd); return (__dbenv_set_encrypt(dbenv_dest, sh_passwd, DB_ENCRYPT_AES)); } diff --git a/db/crypto/mersenne/mt19937db.c b/db/crypto/mersenne/mt19937db.c index bfdd56a04..1c1699db5 100644 --- a/db/crypto/mersenne/mt19937db.c +++ b/db/crypto/mersenne/mt19937db.c @@ -1,9 +1,8 @@ +/* + * $Id: mt19937db.c,v 1.12 2004/06/14 16:54:27 mjc Exp $ + */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mt19937db.c,v 1.10 2003/04/24 14:30:42 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" #include "dbinc/crypto.h" #include "dbinc/hmac.h" @@ -157,8 +156,7 @@ __db_genrand(dbenv) * function will return 4 bytes if we don't send in a key. */ do { - if (__os_clock(dbenv, &secs, &usecs) != 0) - return (0); /* 0 is the only invalid return */ + __os_clock(dbenv, &secs, &usecs); __db_chksum((u_int8_t *)&secs, sizeof(secs), NULL, (u_int8_t *)&seed); } while (seed == 0); diff --git a/db/crypto/rijndael/rijndael-api-fst.c b/db/crypto/rijndael/rijndael-api-fst.c index facd1020e..09475370f 100644 --- a/db/crypto/rijndael/rijndael-api-fst.c +++ b/db/crypto/rijndael/rijndael-api-fst.c @@ -78,7 +78,7 @@ __db_makeKey(key, direction, keyLen, keyMaterial) } if (keyMaterial != NULL) { - memcpy(cipherKey, keyMaterial, key->keyLen/4); + memcpy(cipherKey, keyMaterial, key->keyLen/8); } if (direction == DIR_ENCRYPT) { diff --git a/db/cxx/cxx_db.cpp b/db/cxx/cxx_db.cpp index de6c983f2..22f1b135d 100644 --- a/db/cxx/cxx_db.cpp +++ b/db/cxx/cxx_db.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_db.cpp,v 11.87 2004/07/15 18:26:48 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_db.cpp,v 11.77 2003/04/18 08:36:29 mjc Exp $"; -#endif /* not lint */ - #include #include @@ -43,7 +41,7 @@ int Db::_name _argspec \ return (ret); \ } -#define DB_METHOD_CHECKED(_name, _cleanup, _argspec, _arglist, _retok) \ +#define DB_DESTRUCTOR(_name, _argspec, _arglist, _retok) \ int Db::_name _argspec \ { \ int ret; \ @@ -53,8 +51,7 @@ int Db::_name _argspec \ DB_ERROR(env_, "Db::" # _name, EINVAL, error_policy()); \ return (EINVAL); \ } \ - if (_cleanup) \ - cleanup(); \ + cleanup(); \ ret = db->_name _arglist; \ if (!_retok(ret)) \ DB_ERROR(env_, "Db::" # _name, ret, error_policy()); \ @@ -151,7 +148,7 @@ int Db::initialize() return (ret); // Associate the DB with this object - imp_ = wrap(db); + imp_ = db; db->api_internal = this; // Create a new DbEnv from a DB_ENV* if it was created locally. @@ -162,7 +159,7 @@ int Db::initialize() // Create a DbMpoolFile from the DB_MPOOLFILE* in the DB handle. mpf_ = new DbMpoolFile(); - mpf_->imp_ = wrap(db->mpf); + mpf_->imp_ = db->mpf; return (0); } @@ -221,26 +218,7 @@ int Db::error_policy() } } -int Db::close(u_int32_t flags) -{ - DB *db = unwrap(this); - int ret; - - // after a DB->close (no matter if success or failure), - // the underlying DB object must not be accessed, - // so we clean up in advance. - // - cleanup(); - - // It's safe to throw an error after the close, - // since our error mechanism does not peer into - // the DB* structures. - // - if ((ret = db->close(db, flags)) != 0) - DB_ERROR(env_, "Db::close", ret, error_policy()); - - return (ret); -} +DB_DESTRUCTOR(close, (u_int32_t flags), (db, flags), DB_RETOK_STD) // The following cast implies that Dbc can be no larger than DBC DB_METHOD(cursor, (DbTxn *txnid, Dbc **cursorp, u_int32_t flags), @@ -275,7 +253,7 @@ int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags) ret = db->get(db, unwrap(txnid), key, value, flags); if (!DB_RETOK_DBGET(ret)) { - if (ret == ENOMEM && DB_OVERFLOWED_DBT(value)) + if (ret == DB_BUFFER_SMALL) DB_ERROR_DBT(env_, "Db::get", value, error_policy()); else DB_ERROR(env_, "Db::get", ret, error_policy()); @@ -293,8 +271,7 @@ int Db::get_byteswapped(int *isswapped) DbEnv *Db::get_env() { DB *db = (DB *)unwrapConst(this); - DB_ENV *dbenv; - (void)db->get_env(db, &dbenv); + DB_ENV *dbenv = db->get_env(db); return (dbenv != NULL ? DbEnv::get_DbEnv(dbenv) : NULL); } @@ -306,8 +283,7 @@ DbMpoolFile *Db::get_mpf() DB_METHOD(get_dbname, (const char **filenamep, const char **dbnamep), (db, filenamep, dbnamep), DB_RETOK_STD) -DB_METHOD(get_open_flags, (u_int32_t *flagsp), - (db, flagsp), DB_RETOK_STD) +DB_METHOD(get_open_flags, (u_int32_t *flagsp), (db, flagsp), DB_RETOK_STD) int Db::get_type(DBTYPE *dbtype) { @@ -319,13 +295,11 @@ int Db::get_type(DBTYPE *dbtype) // or even extra data members, so these casts, although technically // non-portable, "should" always be okay. DB_METHOD(join, (Dbc **curslist, Dbc **cursorp, u_int32_t flags), - (db, (DBC **)curslist, (DBC **)cursorp, flags), - DB_RETOK_STD) + (db, (DBC **)curslist, (DBC **)cursorp, flags), DB_RETOK_STD) DB_METHOD(key_range, (DbTxn *txnid, Dbt *key, DB_KEY_RANGE *results, u_int32_t flags), - (db, unwrap(txnid), key, results, flags), - DB_RETOK_STD) + (db, unwrap(txnid), key, results, flags), DB_RETOK_STD) // If an error occurred during the constructor, report it now. // Otherwise, call the underlying DB->open method. @@ -366,31 +340,28 @@ int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *value, u_int32_t flags) return (ret); } -DB_METHOD(put, - (DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags), - (db, unwrap(txnid), key, value, flags), - DB_RETOK_DBPUT) +DB_METHOD(put, (DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags), + (db, unwrap(txnid), key, value, flags), DB_RETOK_DBPUT) -DB_METHOD_CHECKED(rename, 1, +DB_DESTRUCTOR(rename, (const char *file, const char *database, const char *newname, u_int32_t flags), (db, file, database, newname, flags), DB_RETOK_STD) -DB_METHOD_CHECKED(remove, 1, - (const char *file, const char *database, u_int32_t flags), +DB_DESTRUCTOR(remove, (const char *file, const char *database, u_int32_t flags), (db, file, database, flags), DB_RETOK_STD) -DB_METHOD_CHECKED(truncate, 0, - (DbTxn *txnid, u_int32_t *countp, u_int32_t flags), +DB_METHOD(truncate, (DbTxn *txnid, u_int32_t *countp, u_int32_t flags), (db, unwrap(txnid), countp, flags), DB_RETOK_STD) -DB_METHOD_CHECKED(stat, 0, - (void *sp, u_int32_t flags), (db, sp, flags), DB_RETOK_STD) +DB_METHOD(stat, (DbTxn *txnid, void *sp, u_int32_t flags), + (db, unwrap(txnid), sp, flags), DB_RETOK_STD) + +DB_METHOD(stat_print, (u_int32_t flags), (db, flags), DB_RETOK_STD) -DB_METHOD_CHECKED(sync, 0, - (u_int32_t flags), (db, flags), DB_RETOK_STD) +DB_METHOD(sync, (u_int32_t flags), (db, flags), DB_RETOK_STD) -DB_METHOD_CHECKED(upgrade, 0, +DB_METHOD(upgrade, (const char *name, u_int32_t flags), (db, name, flags), DB_RETOK_STD) //////////////////////////////////////////////////////////////////////// @@ -526,10 +497,10 @@ extern "C" int _verify_callback_c(void *handle, const void *str_arg) { char *str; - __DB_OSTREAMCLASS *out; + __DB_STD(ostream) *out; str = (char *)str_arg; - out = (__DB_OSTREAMCLASS *)handle; + out = (__DB_STD(ostream) *)handle; (*out) << str; if (out->fail()) @@ -539,7 +510,7 @@ int _verify_callback_c(void *handle, const void *str_arg) } int Db::verify(const char *name, const char *subdb, - __DB_OSTREAMCLASS *ostr, u_int32_t flags) + __DB_STD(ostream) *ostr, u_int32_t flags) { DB *db = unwrap(this); int ret; @@ -601,6 +572,8 @@ DB_METHOD(get_lorder, (int *db_lorderp), (db, db_lorderp), DB_RETOK_STD) DB_METHOD(set_lorder, (int db_lorder), (db, db_lorder), DB_RETOK_STD) +DB_METHOD_VOID(get_msgfile, (FILE **msgfilep), (db, msgfilep)) +DB_METHOD_VOID(set_msgfile, (FILE *msgfile), (db, msgfile)) DB_METHOD(get_pagesize, (u_int32_t *db_pagesizep), (db, db_pagesizep), DB_RETOK_STD) DB_METHOD(set_pagesize, (u_int32_t db_pagesize), @@ -630,11 +603,16 @@ DB_METHOD_QUIET(set_alloc, (db_malloc_fcn_type malloc_fcn, db_realloc_fcn_type realloc_fcn, db_free_fcn_type free_fcn), (db, malloc_fcn, realloc_fcn, free_fcn)) -void Db::set_errcall(void (*arg)(const char *, char *)) +void Db::set_errcall(void (*arg)(const DbEnv *, const char *, const char *)) { env_->set_errcall(arg); } +void Db::set_msgcall(void (*arg)(const DbEnv *, const char *)) +{ + env_->set_msgcall(arg); +} + void *Db::get_app_private() const { return unwrapConst(this)->app_private; @@ -655,7 +633,24 @@ int Db::set_paniccall(void (*callback)(DbEnv *, int)) return (env_->set_paniccall(callback)); } -void Db::set_error_stream(__DB_OSTREAMCLASS *error_stream) +__DB_STD(ostream) *Db::get_error_stream() +{ + return env_->get_error_stream(); +} + +void Db::set_error_stream(__DB_STD(ostream) *error_stream) { env_->set_error_stream(error_stream); } + +__DB_STD(ostream) *Db::get_message_stream() +{ + return env_->get_message_stream(); +} + +void Db::set_message_stream(__DB_STD(ostream) *message_stream) +{ + env_->set_message_stream(message_stream); +} + +DB_METHOD_QUIET(get_transactional, (), (db)) diff --git a/db/cxx/cxx_dbc.cpp b/db/cxx/cxx_dbc.cpp index 9350c97e6..0ca59735f 100644 --- a/db/cxx/cxx_dbc.cpp +++ b/db/cxx/cxx_dbc.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_dbc.cpp,v 11.59 2004/01/28 03:35:56 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_dbc.cpp,v 11.58 2003/04/24 15:36:27 bostic Exp $"; -#endif /* not lint */ - #include #include diff --git a/db/cxx/cxx_dbt.cpp b/db/cxx/cxx_dbt.cpp index ab35f2f81..ab8942495 100644 --- a/db/cxx/cxx_dbt.cpp +++ b/db/cxx/cxx_dbt.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_dbt.cpp,v 11.55 2004/01/28 03:35:56 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_dbt.cpp,v 11.54 2003/01/08 04:10:27 bostic Exp $"; -#endif /* not lint */ - #include #include diff --git a/db/cxx/cxx_env.cpp b/db/cxx/cxx_env.cpp index 7917c06c1..92067ab1f 100644 --- a/db/cxx/cxx_env.cpp +++ b/db/cxx/cxx_env.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_env.cpp,v 11.105 2004/09/22 22:20:31 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_env.cpp,v 11.96 2003/06/30 19:17:48 mjc Exp $"; -#endif /* not lint */ - #include #include // needed for set_error_stream #include @@ -63,13 +61,6 @@ void DbEnv::_name _argspec \ dbenv->_name _arglist; \ } -// This datatype is needed for picky compilers. -// -extern "C" { - typedef void (*db_errcall_fcn_type) - (const char *, char *); -}; - // The reason for a static variable is that some structures // (like Dbts) have no connection to any Db or DbEnv, so when // errors occur in their methods, we must have some reasonable @@ -81,8 +72,6 @@ extern "C" { // static int last_known_error_policy = ON_ERROR_UNKNOWN; -__DB_OSTREAMCLASS *DbEnv::error_stream_ = 0; - // These 'glue' function are declared as extern "C" so they will // be compatible with picky compilers that do not allow mixing // of function pointers to 'C' functions with function pointers @@ -101,14 +90,21 @@ void _paniccall_intercept_c(DB_ENV *env, int errval) } extern "C" -void _stream_error_function_c(const char *prefix, char *message) +void _stream_error_function_c(const DB_ENV *env, + const char *prefix, const char *message) { - DbEnv::_stream_error_function(prefix, message); + DbEnv::_stream_error_function(env, prefix, message); +} + +extern "C" +void _stream_message_function_c(const DB_ENV *env, const char *message) +{ + DbEnv::_stream_message_function(env, message); } extern "C" int _app_dispatch_intercept_c(DB_ENV *env, DBT *dbt, - DB_LSN *lsn, db_recops op) + DB_LSN *lsn, db_recops op) { return (DbEnv::_app_dispatch_intercept(env, dbt, lsn, op)); } @@ -124,14 +120,9 @@ int _rep_send_intercept_c(DB_ENV *env, const DBT *cntrl, void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct) { - if (env == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN); - return; - } - DbEnv *cxxenv = (DbEnv *)env->api1_internal; + DbEnv *cxxenv = DbEnv::get_DbEnv(env); if (cxxenv == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), + DB_ERROR(0, "DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN); return; } @@ -145,32 +136,24 @@ void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct) void DbEnv::_paniccall_intercept(DB_ENV *env, int errval) { - if (env == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN); - } - DbEnv *cxxenv = (DbEnv *)env->api1_internal; + DbEnv *cxxenv = DbEnv::get_DbEnv(env); if (cxxenv == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), + DB_ERROR(0, "DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN); + return; } if (cxxenv->paniccall_callback_ == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::paniccall_callback", EINVAL, + DB_ERROR(cxxenv, "DbEnv::paniccall_callback", EINVAL, cxxenv->error_policy()); + return; } (*cxxenv->paniccall_callback_)(cxxenv, errval); } int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt, - DB_LSN *lsn, db_recops op) + DB_LSN *lsn, db_recops op) { - if (env == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::app_dispatch_callback", EINVAL, ON_ERROR_UNKNOWN); - return (EINVAL); - } - DbEnv *cxxenv = (DbEnv *)env->api1_internal; + DbEnv *cxxenv = DbEnv::get_DbEnv(env); if (cxxenv == 0) { DB_ERROR(DbEnv::get_DbEnv(env), "DbEnv::app_dispatch_callback", EINVAL, ON_ERROR_UNKNOWN); @@ -188,14 +171,10 @@ int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt, } int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl, - const DBT *data, const DB_LSN *lsn, int id, u_int32_t flags) + const DBT *data, const DB_LSN *lsn, + int id, u_int32_t flags) { - if (env == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN); - return (EINVAL); - } - DbEnv *cxxenv = (DbEnv *)env->api1_internal; + DbEnv *cxxenv = DbEnv::get_DbEnv(env); if (cxxenv == 0) { DB_ERROR(DbEnv::get_DbEnv(env), "DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN); @@ -224,6 +203,8 @@ DbEnv::DbEnv(u_int32_t flags) : imp_(0) , construct_error_(0) , construct_flags_(flags) +, error_stream_(0) +, message_stream_(0) , app_dispatch_callback_(0) , feedback_callback_(0) , paniccall_callback_(0) @@ -240,6 +221,8 @@ DbEnv::DbEnv(DB_ENV *env, u_int32_t flags) : imp_(0) , construct_error_(0) , construct_flags_(flags) +, error_stream_(0) +, message_stream_(0) , app_dispatch_callback_(0) , feedback_callback_(0) , paniccall_callback_(0) @@ -363,7 +346,7 @@ int DbEnv::initialize(DB_ENV *env) construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0) return (ret); } - imp_ = wrap(env); + imp_ = env; env->api1_internal = this; // for DB_ENV* to DbEnv* conversion return (0); } @@ -383,6 +366,7 @@ DBENV_METHOD(lock_id_free, (u_int32_t id), (dbenv, id)) DBENV_METHOD(lock_put, (DbLock *lock), (dbenv, &lock->lock_)) DBENV_METHOD(lock_stat, (DB_LOCK_STAT **statp, u_int32_t flags), (dbenv, statp, flags)) +DBENV_METHOD(lock_stat_print, (u_int32_t flags), (dbenv, flags)) DBENV_METHOD_ERR(lock_vec, (u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elist_returned), @@ -410,6 +394,7 @@ DBENV_METHOD(log_put, (DbLsn *lsn, const Dbt *data, u_int32_t flags), (dbenv, lsn, data, flags)) DBENV_METHOD(log_stat, (DB_LOG_STAT **spp, u_int32_t flags), (dbenv, spp, flags)) +DBENV_METHOD(log_stat_print, (u_int32_t flags), (dbenv, flags)) int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags) { @@ -424,7 +409,7 @@ int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags) if (DB_RETOK_STD(ret)) { *dbmfp = new DbMpoolFile(); - (*dbmfp)->imp_ = wrap(mpf); + (*dbmfp)->imp_ = mpf; } else DB_ERROR(this, "DbMpoolFile::f_create", ret, ON_ERROR_UNKNOWN); @@ -439,9 +424,8 @@ DBENV_METHOD(memp_register, DBENV_METHOD(memp_stat, (DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags), (dbenv, gsp, fsp, flags)) - +DBENV_METHOD(memp_stat_print, (u_int32_t flags), (dbenv, flags)) DBENV_METHOD(memp_sync, (DbLsn *sn), (dbenv, sn)) - DBENV_METHOD(memp_trickle, (int pct, int *nwrotep), (dbenv, pct, nwrotep)) // If an error occurred during the constructor, report it now. @@ -568,32 +552,53 @@ void DbEnv::runtime_error_lock_get(DbEnv *env, } } -// static method -char *DbEnv::strerror(int error) +void DbEnv::_stream_error_function( + const DB_ENV *env, const char *prefix, const char *message) { - return (db_strerror(error)); -} + const DbEnv *cxxenv = DbEnv::get_const_DbEnv(env); + if (cxxenv == 0) { + DB_ERROR(0, + "DbEnv::stream_error", EINVAL, ON_ERROR_UNKNOWN); + return; + } -void DbEnv::_stream_error_function(const char *prefix, char *message) -{ - // HP compilers need the extra casts, we don't know why. - if (error_stream_) { + if (cxxenv->error_callback_) + cxxenv->error_callback_(cxxenv, prefix, message); + else if (cxxenv->error_stream_) { + // HP compilers need the extra casts, we don't know why. if (prefix) { - (*error_stream_) << prefix << (const char *)": "; - } - if (message) { - (*error_stream_) << (const char *)message; + (*cxxenv->error_stream_) << prefix; + (*cxxenv->error_stream_) << (const char *)": "; } - (*error_stream_) << (const char *)"\n"; + if (message) + (*cxxenv->error_stream_) << (const char *)message; + (*cxxenv->error_stream_) << (const char *)"\n"; } } -// set methods +void DbEnv::_stream_message_function(const DB_ENV *env, const char *message) +{ + const DbEnv *cxxenv = DbEnv::get_const_DbEnv(env); + if (cxxenv == 0) { + DB_ERROR(0, + "DbEnv::stream_message", EINVAL, ON_ERROR_UNKNOWN); + return; + } -DBENV_METHOD_VOID(get_errfile, (FILE **errfilep), (dbenv, errfilep)) -DBENV_METHOD_VOID(set_errfile, (FILE *errfile), (dbenv, errfile)) -DBENV_METHOD_VOID(get_errpfx, (const char **errpfxp), (dbenv, errpfxp)) -DBENV_METHOD_VOID(set_errpfx, (const char *errpfx), (dbenv, errpfx)) + if (cxxenv->message_callback_) + cxxenv->message_callback_(cxxenv, message); + else if (cxxenv->message_stream_) { + // HP compilers need the extra casts, we don't know why. + (*cxxenv->message_stream_) << (const char *)message; + (*cxxenv->message_stream_) << (const char *)"\n"; + } +} + +// static method +char *DbEnv::strerror(int error) +{ + return (db_strerror(error)); +} // We keep these alphabetical by field name, // for comparison with Java's list. @@ -603,6 +608,10 @@ DBENV_METHOD(get_encrypt_flags, (u_int32_t *flagsp), (dbenv, flagsp)) DBENV_METHOD(set_encrypt, (const char *passwd, u_int32_t flags), (dbenv, passwd, flags)) +DBENV_METHOD_VOID(get_errfile, (FILE **errfilep), (dbenv, errfilep)) +DBENV_METHOD_VOID(set_errfile, (FILE *errfile), (dbenv, errfile)) +DBENV_METHOD_VOID(get_errpfx, (const char **errpfxp), (dbenv, errpfxp)) +DBENV_METHOD_VOID(set_errpfx, (const char *errpfx), (dbenv, errpfx)) DBENV_METHOD(get_lg_bsize, (u_int32_t *bsizep), (dbenv, bsizep)) DBENV_METHOD(set_lg_bsize, (u_int32_t bsize), (dbenv, bsize)) DBENV_METHOD(get_lg_dir, (const char **dirp), (dbenv, dirp)) @@ -628,6 +637,8 @@ DBENV_METHOD(get_lk_max_objects, (u_int32_t *max_objectsp), DBENV_METHOD(set_lk_max_objects, (u_int32_t max_objects), (dbenv, max_objects)) DBENV_METHOD(get_mp_mmapsize, (size_t *mmapsizep), (dbenv, mmapsizep)) DBENV_METHOD(set_mp_mmapsize, (size_t mmapsize), (dbenv, mmapsize)) +DBENV_METHOD_VOID(get_msgfile, (FILE **msgfilep), (dbenv, msgfilep)) +DBENV_METHOD_VOID(set_msgfile, (FILE *msgfile), (dbenv, msgfile)) DBENV_METHOD(get_tmp_dir, (const char **tmp_dirp), (dbenv, tmp_dirp)) DBENV_METHOD(set_tmp_dir, (const char *tmp_dir), (dbenv, tmp_dir)) DBENV_METHOD(get_tx_max, (u_int32_t *tx_maxp), (dbenv, tx_maxp)) @@ -650,36 +661,29 @@ DBENV_METHOD(set_cachesize, (u_int32_t gbytes, u_int32_t bytes, int ncache), (dbenv, gbytes, bytes, ncache)) -void DbEnv::set_errcall(void (*arg)(const char *, char *)) +void DbEnv::set_errcall(void (*arg)(const DbEnv *, const char *, const char *)) { DB_ENV *dbenv = unwrap(this); - // XXX - // We are casting from a function ptr declared with C++ - // linkage to one (same arg types) declared with C - // linkage. It's hard to imagine a pair of C/C++ - // compilers from the same vendor for which this - // won't work. Unfortunately, we can't use a - // intercept function like the others since the - // function does not have a (DbEnv*) as one of - // the args. If this causes trouble, we can pull - // the same trick we use in Java, namely stuffing - // a (DbEnv*) pointer into the prefix. We're - // avoiding this for the moment because it obfuscates. - // - (*(dbenv->set_errcall))(dbenv, (db_errcall_fcn_type)arg); + error_callback_ = arg; + error_stream_ = 0; + + dbenv->set_errcall(dbenv, (arg == 0) ? 0 : + _stream_error_function_c); } -// Note: This actually behaves a bit like a static function, -// since DB_ENV.db_errcall has no information about which -// db_env triggered the call. A user that has multiple DB_ENVs -// will simply not be able to have different streams for each one. -// -void DbEnv::set_error_stream(__DB_OSTREAMCLASS *stream) +__DB_STD(ostream) *DbEnv::get_error_stream() +{ + return (error_stream_); +} + +void DbEnv::set_error_stream(__DB_STD(ostream) *stream) { DB_ENV *dbenv = unwrap(this); error_stream_ = stream; + error_callback_ = 0; + dbenv->set_errcall(dbenv, (stream == 0) ? 0 : _stream_error_function_c); } @@ -690,19 +694,46 @@ int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int)) feedback_callback_ = arg; - return ((*(dbenv->set_feedback))(dbenv, _feedback_intercept_c)); + return (dbenv->set_feedback(dbenv, _feedback_intercept_c)); } DBENV_METHOD(get_flags, (u_int32_t *flagsp), (dbenv, flagsp)) DBENV_METHOD(set_flags, (u_int32_t flags, int onoff), (dbenv, flags, onoff)) +void DbEnv::set_msgcall(void (*arg)(const DbEnv *, const char *)) +{ + DB_ENV *dbenv = unwrap(this); + + message_callback_ = arg; + message_stream_ = 0; + + dbenv->set_msgcall(dbenv, (arg == 0) ? 0 : + _stream_message_function_c); +} + +__DB_STD(ostream) *DbEnv::get_message_stream() +{ + return (message_stream_); +} + +void DbEnv::set_message_stream(__DB_STD(ostream) *stream) +{ + DB_ENV *dbenv = unwrap(this); + + message_stream_ = stream; + message_callback_ = 0; + + dbenv->set_msgcall(dbenv, (stream == 0) ? 0 : + _stream_message_function_c); +} + int DbEnv::set_paniccall(void (*arg)(DbEnv *, int)) { DB_ENV *dbenv = unwrap(this); paniccall_callback_ = arg; - return ((*(dbenv->set_paniccall))(dbenv, _paniccall_intercept_c)); + return (dbenv->set_paniccall(dbenv, _paniccall_intercept_c)); } DBENV_METHOD(set_rpc_server, @@ -721,7 +752,7 @@ int DbEnv::set_app_dispatch int ret; app_dispatch_callback_ = arg; - if ((ret = (*(dbenv->set_app_dispatch))(dbenv, + if ((ret = dbenv->set_app_dispatch(dbenv, _app_dispatch_intercept_c)) != 0) DB_ERROR(this, "DbEnv::set_app_dispatch", ret, error_policy()); @@ -785,7 +816,7 @@ int DbEnv::txn_recover(DbPreplist *preplist, long count, for (i = 0; i < *retp; i++) { preplist[i].txn = new DbTxn(); - preplist[i].txn->imp_ = wrap(c_preplist[i].txn); + preplist[i].txn->imp_ = c_preplist[i].txn; memcpy(preplist[i].gid, c_preplist[i].gid, sizeof(preplist[i].gid)); } @@ -797,10 +828,11 @@ int DbEnv::txn_recover(DbPreplist *preplist, long count, DBENV_METHOD(txn_stat, (DB_TXN_STAT **statp, u_int32_t flags), (dbenv, statp, flags)) +DBENV_METHOD(txn_stat_print, (u_int32_t flags), (dbenv, flags)) int DbEnv::set_rep_transport(int myid, int (*f_send)(DbEnv *, const Dbt *, const Dbt *, const DbLsn *, int, - u_int32_t)) + u_int32_t)) { DB_ENV *dbenv = unwrap(this); int ret; @@ -814,8 +846,9 @@ int DbEnv::set_rep_transport(int myid, } DBENV_METHOD(rep_elect, - (int nsites, int pri, u_int32_t timeout, int *idp), - (dbenv, nsites, pri, timeout, idp)) + (int nsites, + int nvotes, int priority, u_int32_t timeout, int *eidp, u_int32_t flags), + (dbenv, nvotes, nsites, priority, timeout, eidp, flags)) int DbEnv::rep_process_message(Dbt *control, Dbt *rec, int *idp, DbLsn *ret_lsnp) @@ -837,6 +870,7 @@ DBENV_METHOD(rep_start, DBENV_METHOD(rep_stat, (DB_REP_STAT **statp, u_int32_t flags), (dbenv, statp, flags)) +DBENV_METHOD(rep_stat_print, (u_int32_t flags), (dbenv, flags)) DBENV_METHOD(get_rep_limit, (u_int32_t *gbytesp, u_int32_t *bytesp), (dbenv, gbytesp, bytesp)) @@ -860,7 +894,5 @@ char *DbEnv::version(int *major, int *minor, int *patch) DbEnv *DbEnv::wrap_DB_ENV(DB_ENV *dbenv) { DbEnv *wrapped_env = get_DbEnv(dbenv); - if (wrapped_env == NULL) - wrapped_env = new DbEnv(dbenv, 0); - return wrapped_env; + return (wrapped_env != NULL) ? wrapped_env : new DbEnv(dbenv, 0); } diff --git a/db/cxx/cxx_except.cpp b/db/cxx/cxx_except.cpp index 2710af30b..22eb0eae9 100644 --- a/db/cxx/cxx_except.cpp +++ b/db/cxx/cxx_except.cpp @@ -1,68 +1,20 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_except.cpp,v 11.28 2004/09/22 03:34:48 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_except.cpp,v 11.21 2003/05/19 15:38:51 mjc Exp $"; -#endif /* not lint */ - #include #include #include "db_cxx.h" #include "dbinc/cxx_int.h" -// tmpString is used to create strings on the stack -// -class tmpString -{ -public: - tmpString(const char *str1, - const char *str2 = 0, - const char *str3 = 0, - const char *str4 = 0, - const char *str5 = 0); - ~tmpString() { delete [] s_; } - operator const char *() { return (s_); } - -private: - char *s_; -}; - -tmpString::tmpString(const char *str1, - const char *str2, - const char *str3, - const char *str4, - const char *str5) -{ - size_t len = strlen(str1); - if (str2) - len += strlen(str2); - if (str3) - len += strlen(str3); - if (str4) - len += strlen(str4); - if (str5) - len += strlen(str5); - - s_ = new char[len+1]; - - strcpy(s_, str1); - if (str2) - strcat(s_, str2); - if (str3) - strcat(s_, str3); - if (str4) - strcat(s_, str4); - if (str5) - strcat(s_, str5); -} - // Note: would not be needed if we can inherit from exception // It does not appear to be possible to inherit from exception // with the current Microsoft library (VC5.0). @@ -80,66 +32,99 @@ static char *dupString(const char *s) // // //////////////////////////////////////////////////////////////////////// -DbException::~DbException() +DbException::~DbException() throw() { - if (what_) - delete [] what_; + delete [] what_; } DbException::DbException(int err) : err_(err) , env_(0) { - what_ = dupString(db_strerror(err)); + describe(0, 0); } DbException::DbException(const char *description) : err_(0) , env_(0) { - what_ = dupString(tmpString(description)); + describe(0, description); } -DbException::DbException(const char *prefix, int err) +DbException::DbException(const char *description, int err) : err_(err) , env_(0) { - what_ = dupString(tmpString(prefix, ": ", db_strerror(err))); + describe(0, description); } -DbException::DbException(const char *prefix1, const char *prefix2, int err) +DbException::DbException(const char *prefix, const char *description, int err) : err_(err) , env_(0) { - what_ = dupString(tmpString(prefix1, ": ", prefix2, ": ", - db_strerror(err))); + describe(prefix, description); } DbException::DbException(const DbException &that) -: err_(that.err_) +: __DB_STD(exception)() +, what_(dupString(that.what_)) +, err_(that.err_) , env_(0) { - what_ = dupString(that.what_); } DbException &DbException::operator = (const DbException &that) { if (this != &that) { err_ = that.err_; - if (what_) - delete [] what_; - what_ = 0; // in case new throws exception + delete [] what_; what_ = dupString(that.what_); } return (*this); } +void DbException::describe(const char *prefix, const char *description) +{ + char msgbuf[1024], *p, *end; + + p = msgbuf; + end = msgbuf + sizeof(msgbuf) - 1; + + if (prefix != NULL) { + strncpy(p, prefix, (p < end) ? end - p: 0); + p += strlen(prefix); + strncpy(p, ": ", (p < end) ? end - p: 0); + p += 2; + } + if (description != NULL) { + strncpy(p, description, (p < end) ? end - p: 0); + p += strlen(description); + if (err_ != 0) { + strncpy(p, ": ", (p < end) ? end - p: 0); + p += 2; + } + } + if (err_ != 0) { + strncpy(p, db_strerror(err_), (p < end) ? end - p: 0); + p += strlen(db_strerror(err_)); + } + + /* + * If the result was too long, the buffer will not be null-terminated, + * so we need to fix that here before duplicating it. + */ + if (p >= end) + *end = '\0'; + + what_ = dupString(msgbuf); +} + int DbException::get_errno() const { return (err_); } -const char *DbException::what() const +const char *DbException::what() const throw() { return (what_); } @@ -161,7 +146,7 @@ void DbException::set_env(DbEnv *env) //////////////////////////////////////////////////////////////////////// static const char *memory_err_desc = "Dbt not large enough for available data"; -DbMemoryException::~DbMemoryException() +DbMemoryException::~DbMemoryException() throw() { } @@ -171,25 +156,12 @@ DbMemoryException::DbMemoryException(Dbt *dbt) { } -DbMemoryException::DbMemoryException(const char *description) -: DbException(description, ENOMEM) -, dbt_(0) -{ -} - DbMemoryException::DbMemoryException(const char *prefix, Dbt *dbt) : DbException(prefix, memory_err_desc, ENOMEM) , dbt_(dbt) { } -DbMemoryException::DbMemoryException(const char *prefix1, const char *prefix2, - Dbt *dbt) -: DbException(prefix1, prefix2, ENOMEM) -, dbt_(dbt) -{ -} - DbMemoryException::DbMemoryException(const DbMemoryException &that) : DbException(that) , dbt_(that.dbt_) @@ -217,7 +189,7 @@ Dbt *DbMemoryException::get_dbt() const // // //////////////////////////////////////////////////////////////////////// -DbDeadlockException::~DbDeadlockException() +DbDeadlockException::~DbDeadlockException() throw() { } @@ -245,7 +217,7 @@ DbDeadlockException // // //////////////////////////////////////////////////////////////////////// -DbLockNotGrantedException::~DbLockNotGrantedException() +DbLockNotGrantedException::~DbLockNotGrantedException() throw() { delete lock_; } @@ -258,13 +230,18 @@ DbLockNotGrantedException::DbLockNotGrantedException(const char *prefix, , op_(op) , mode_(mode) , obj_(obj) +, lock_(new DbLock(lock)) , index_(index) { - lock_ = new DbLock(lock); } DbLockNotGrantedException::DbLockNotGrantedException(const char *description) : DbException(description, DB_LOCK_NOTGRANTED) +, op_(DB_LOCK_GET) +, mode_(DB_LOCK_NG) +, obj_(NULL) +, lock_(NULL) +, index_(0) { } @@ -275,7 +252,7 @@ DbLockNotGrantedException::DbLockNotGrantedException op_ = that.op_; mode_ = that.mode_; obj_ = that.obj_; - lock_ = new DbLock(*that.lock_); + lock_ = (that.lock_ != NULL) ? new DbLock(*that.lock_) : NULL; index_ = that.index_; } @@ -287,7 +264,7 @@ DbLockNotGrantedException op_ = that.op_; mode_ = that.mode_; obj_ = that.obj_; - lock_ = new DbLock(*that.lock_); + lock_ = (that.lock_ != NULL) ? new DbLock(*that.lock_) : NULL; index_ = that.index_; } return (*this); @@ -324,7 +301,7 @@ int DbLockNotGrantedException::get_index() const // // //////////////////////////////////////////////////////////////////////// -DbRunRecoveryException::~DbRunRecoveryException() +DbRunRecoveryException::~DbRunRecoveryException() throw() { } diff --git a/db/cxx/cxx_lock.cpp b/db/cxx/cxx_lock.cpp index 602c7409b..d22cf662b 100644 --- a/db/cxx/cxx_lock.cpp +++ b/db/cxx/cxx_lock.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_lock.cpp,v 11.19 2004/01/28 03:35:56 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_lock.cpp,v 11.18 2003/01/08 04:10:36 bostic Exp $"; -#endif /* not lint */ - #include #include diff --git a/db/cxx/cxx_logc.cpp b/db/cxx/cxx_logc.cpp index 0a7fdaca1..c5399b531 100644 --- a/db/cxx/cxx_logc.cpp +++ b/db/cxx/cxx_logc.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_logc.cpp,v 11.13 2004/02/05 02:25:12 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_logc.cpp,v 11.11 2003/03/27 20:05:04 merrells Exp $"; -#endif /* not lint */ - #include #include @@ -56,7 +54,7 @@ int DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t _flags) ret = logc->get(logc, lsn, data, _flags); if (!DB_RETOK_LGGET(ret)) { - if (ret == ENOMEM && DB_OVERFLOWED_DBT(data)) + if (ret == DB_BUFFER_SMALL) DB_ERROR_DBT(DbEnv::get_DbEnv(logc->dbenv), "DbLogc::get", data, ON_ERROR_UNKNOWN); else diff --git a/db/cxx/cxx_mpool.cpp b/db/cxx/cxx_mpool.cpp index 6ef491e0f..54747b092 100644 --- a/db/cxx/cxx_mpool.cpp +++ b/db/cxx/cxx_mpool.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_mpool.cpp,v 11.28 2004/01/28 03:35:56 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_mpool.cpp,v 11.27 2003/06/30 17:19:41 bostic Exp $"; -#endif /* not lint */ - #include #include "db_cxx.h" diff --git a/db/cxx/cxx_multi.cpp b/db/cxx/cxx_multi.cpp index be6287a3c..0961f2921 100644 --- a/db/cxx/cxx_multi.cpp +++ b/db/cxx/cxx_multi.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_multi.cpp,v 1.4 2004/01/28 03:35:56 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_multi.cpp,v 1.3 2003/02/22 01:25:43 merrells Exp $"; -#endif /* not lint */ - #include "db_cxx.h" DbMultipleIterator::DbMultipleIterator(const Dbt &dbt) diff --git a/db/cxx/cxx_seq.cpp b/db/cxx/cxx_seq.cpp new file mode 100644 index 000000000..60bc7455b --- /dev/null +++ b/db/cxx/cxx_seq.cpp @@ -0,0 +1,113 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: cxx_seq.cpp,v 11.3 2004/09/23 20:05:08 mjc Exp $ + */ + +#include "db_config.h" + +#include +#include + +#include "db_cxx.h" +#include "dbinc/cxx_int.h" + +#include "db_int.h" + +// Helper macro for simple methods that pass through to the +// underlying C method. It may return an error or raise an exception. +// Note this macro expects that input _argspec is an argument +// list element (e.g., "char *arg") and that _arglist is the arguments +// that should be passed through to the C method (e.g., "(db, arg)") +// +#define DBSEQ_METHOD(_name, _argspec, _arglist, _destructor) \ +int DbSequence::_name _argspec \ +{ \ + int ret; \ + DB_SEQUENCE *seq = unwrap(this); \ + DbEnv *dbenv = DbEnv::get_DbEnv(seq->seq_dbp->dbenv); \ + \ + ret = seq->_name _arglist; \ + if (_destructor) \ + imp_ = 0; \ + if (!DB_RETOK_STD(ret)) \ + DB_ERROR(dbenv, \ + "DbSequence::" # _name, ret, ON_ERROR_UNKNOWN); \ + return (ret); \ +} + +DbSequence::DbSequence(Db *db, u_int32_t flags) +: imp_(0) +{ + DB_SEQUENCE *seq; + int ret; + + if ((ret = db_sequence_create(&seq, unwrap(db), flags)) != 0) + DB_ERROR(db->get_env(), "DbSequence::DbSequence", ret, + ON_ERROR_UNKNOWN); + else { + imp_ = seq; + seq->api_internal = this; + } +} + +DbSequence::DbSequence(DB_SEQUENCE *seq) +: imp_(seq) +{ + seq->api_internal = this; +} + +DbSequence::~DbSequence() +{ + DB_SEQUENCE *seq; + + seq = unwrap(this); + if (seq != NULL) + (void)seq->close(seq, 0); +} + +DBSEQ_METHOD(open, (DbTxn *txnid, Dbt *key, u_int32_t flags), + (seq, unwrap(txnid), key, flags), 0) +DBSEQ_METHOD(initial_value, (db_seq_t value), (seq, value), 0) +DBSEQ_METHOD(close, (u_int32_t flags), (seq, flags), 1) +DBSEQ_METHOD(remove, (DbTxn *txnid, u_int32_t flags), + (seq, unwrap(txnid), flags), 1) +DBSEQ_METHOD(stat, (DB_SEQUENCE_STAT **sp, u_int32_t flags), + (seq, sp, flags), 0) +DBSEQ_METHOD(stat_print, (u_int32_t flags), (seq, flags), 0) + +DBSEQ_METHOD(get, + (DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags), + (seq, unwrap(txnid), delta, retp, flags), 0) +DBSEQ_METHOD(get_cachesize, (int32_t *sizep), (seq, sizep), 0) +DBSEQ_METHOD(set_cachesize, (int32_t size), (seq, size), 0) +DBSEQ_METHOD(get_flags, (u_int32_t *flagsp), (seq, flagsp), 0) +DBSEQ_METHOD(set_flags, (u_int32_t flags), (seq, flags), 0) +DBSEQ_METHOD(get_range, (db_seq_t *minp, db_seq_t *maxp), (seq, minp, maxp), 0) +DBSEQ_METHOD(set_range, (db_seq_t min, db_seq_t max), (seq, min, max), 0) + +Db *DbSequence::get_db() +{ + DB_SEQUENCE *seq = unwrap(this); + DB *db; + (void)seq->get_db(seq, &db); + return Db::get_Db(db); +} + +Dbt *DbSequence::get_key() +{ + DB_SEQUENCE *seq = unwrap(this); + memset(&key_, 0, sizeof (DBT)); + (void)seq->get_key(seq, &key_); + return Dbt::get_Dbt(&key_); +} + +// static method +DbSequence *DbSequence::wrap_DB_SEQUENCE(DB_SEQUENCE *seq) +{ + DbSequence *wrapped_seq = get_DbSequence(seq); + return (wrapped_seq != NULL) ? wrapped_seq : new DbSequence(seq); +} diff --git a/db/cxx/cxx_txn.cpp b/db/cxx/cxx_txn.cpp index 9db1c9fb9..89ad7f02b 100644 --- a/db/cxx/cxx_txn.cpp +++ b/db/cxx/cxx_txn.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: cxx_txn.cpp,v 11.33 2004/09/22 22:20:31 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: cxx_txn.cpp,v 11.29 2003/03/11 15:39:40 merrells Exp $"; -#endif /* not lint */ - #include #include "db_cxx.h" @@ -25,20 +23,20 @@ static const char revid[] = "$Id: cxx_txn.cpp,v 11.29 2003/03/11 15:39:40 merrel // list element (e.g., "char *arg") and that _arglist is the arguments // that should be passed through to the C method (e.g., "(db, arg)") // -#define DBTXN_METHOD(_name, _delete, _argspec, _arglist) \ -int DbTxn::_name _argspec \ -{ \ - int ret; \ - DB_TXN *txn = unwrap(this); \ - \ - ret = txn->_name _arglist; \ - /* Weird, but safe if we don't access this again. */ \ - if (_delete) \ - delete this; \ - if (!DB_RETOK_STD(ret)) \ - DB_ERROR(DbEnv::get_DbEnv(txn->mgrp->dbenv), \ - "DbTxn::" # _name, ret, ON_ERROR_UNKNOWN); \ - return (ret); \ +#define DBTXN_METHOD(_name, _delete, _argspec, _arglist) \ +int DbTxn::_name _argspec \ +{ \ + int ret; \ + DB_TXN *txn = unwrap(this); \ + DbEnv *dbenv = DbEnv::get_DbEnv(txn->mgrp->dbenv); \ + \ + ret = txn->_name _arglist; \ + /* Weird, but safe if we don't access this again. */ \ + if (_delete) \ + delete this; \ + if (!DB_RETOK_STD(ret)) \ + DB_ERROR(dbenv, "DbTxn::" # _name, ret, ON_ERROR_UNKNOWN); \ + return (ret); \ } // private constructor, never called but needed by some C++ linkers @@ -48,7 +46,7 @@ DbTxn::DbTxn() } DbTxn::DbTxn(DB_TXN *txn) -: imp_(wrap(txn)) +: imp_(txn) { txn->api_internal = this; } @@ -77,7 +75,5 @@ DBTXN_METHOD(set_timeout, 0, (db_timeout_t timeout, u_int32_t flags), DbTxn *DbTxn::wrap_DB_TXN(DB_TXN *txn) { DbTxn *wrapped_txn = get_DbTxn(txn); - if (wrapped_txn == NULL) - wrapped_txn = new DbTxn(txn); - return wrapped_txn; + return (wrapped_txn != NULL) ? wrapped_txn : new DbTxn(txn); } diff --git a/db/db/crdel.src b/db/db/crdel.src index 6e64a7eee..034e7b82f 100644 --- a/db/db/crdel.src +++ b/db/db/crdel.src @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: crdel.src,v 11.27 2003/11/14 05:32:29 ubell Exp $ + * $Id: crdel.src,v 11.29 2004/06/17 17:35:15 bostic Exp $ */ PREFIX __crdel DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE diff --git a/db/db/crdel_auto.c b/db/db/crdel_auto.c index a30f5d792..d4f2cd716 100644 --- a/db/db/crdel_auto.c +++ b/db/db/crdel_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -33,33 +34,42 @@ __crdel_metasub_log(dbp, txnid, ret_lsnp, flags, pgno, page, lsn) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___crdel_metasub; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -75,27 +85,23 @@ __crdel_metasub_log(dbp, txnid, ret_lsnp, flags, pgno, page, lsn) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -143,132 +149,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__crdel_metasub_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __crdel_metasub_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__crdel_metasub_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__crdel_metasub_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __crdel_metasub_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __crdel_metasub_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__crdel_metasub%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tpage: "); - for (i = 0; i < argp->page.size; i++) { - ch = ((u_int8_t *)argp->page.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __crdel_metasub_read __P((DB_ENV *, void *, * PUBLIC: __crdel_metasub_args **)); @@ -287,9 +208,9 @@ __crdel_metasub_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__crdel_metasub_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -320,45 +241,6 @@ __crdel_metasub_read(dbenv, recbuf, argpp) return (0); } -/* - * PUBLIC: int __crdel_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__crdel_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __crdel_metasub_print, DB___crdel_metasub)) != 0) - return (ret); - return (0); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __crdel_init_getpgnos __P((DB_ENV *, - * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), - * PUBLIC: size_t *)); - */ -int -__crdel_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __crdel_metasub_getpgnos, DB___crdel_metasub)) != 0) - return (ret); - return (0); -} -#endif /* HAVE_REPLICATION */ - /* * PUBLIC: int __crdel_init_recover __P((DB_ENV *, int (***)(DB_ENV *, * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); diff --git a/db/db/crdel_autop.c b/db/db/crdel_autop.c new file mode 100644 index 000000000..e57a5c714 --- /dev/null +++ b/db/db/crdel_autop.c @@ -0,0 +1,82 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__crdel_metasub_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __crdel_metasub_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __crdel_metasub_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__crdel_metasub%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tpage: "); + for (i = 0; i < argp->page.size; i++) { + ch = ((u_int8_t *)argp->page.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __crdel_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__crdel_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __crdel_metasub_print, DB___crdel_metasub)) != 0) + return (ret); + return (0); +} diff --git a/db/db/crdel_rec.c b/db/db/crdel_rec.c index 46529c12c..7ff4fbd06 100644 --- a/db/db/crdel_rec.c +++ b/db/db/crdel_rec.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: crdel_rec.c,v 11.68 2004/04/29 00:07:55 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: crdel_rec.c,v 11.66 2003/06/30 17:19:42 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -52,15 +50,9 @@ __crdel_metasub_recover(dbenv, dbtp, lsnp, op, info) REC_INTRO(__crdel_metasub_read, 0); if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_REDO(op)) { - if ((ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } + *lsnp = argp->prev_lsn; + ret = 0; + goto out; } modified = 0; diff --git a/db/db/db.c b/db/db/db.c index 7499a2d3e..4191a3a95 100644 --- a/db/db/db.c +++ b/db/db/db.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -35,14 +35,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: db.c,v 11.298 2004/10/07 16:43:43 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db.c,v 11.283 2003/11/14 05:32:29 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -61,10 +59,9 @@ static const char revid[] = "$Id: db.c,v 11.283 2003/11/14 05:32:29 ubell Exp $" #include "dbinc/qam.h" #include "dbinc/txn.h" -static int __db_dbenv_mpool __P((DB *, const char *, u_int32_t)); static int __db_disassociate __P((DB *)); -#if CONFIG_TEST +#ifdef CONFIG_TEST static void __db_makecopy __P((DB_ENV *, const char *, const char *)); static int __db_testdocopy __P((DB_ENV *, const char *)); static int __qam_testdocopy __P((DB *, const char *)); @@ -124,15 +121,13 @@ __db_master_open(subdbp, txn, name, flags, mode, dbpp) goto err; /* - * Verify that pagesize is the same on both. - * The items in dbp were now initialized from the meta - * page. The items in dbp were set in __db_dbopen - * when we either read or created the master file. - * Other items such as checksum and encryption are - * checked when we read the meta-page. So we do not - * check those here. However, if the meta-page caused - * chksumming to be turned on and it wasn't already, set - * it here. + * Verify that pagesize is the same on both. The items in dbp were now + * initialized from the meta page. The items in dbp were set in + * __db_dbopen when we either read or created the master file. Other + * items such as checksum and encryption are checked when we read the + * meta-page. So we do not check those here. However, if the + * meta-page caused checksumming to be turned on and it wasn't already, + * set it here. */ if (F_ISSET(dbp, DB_AM_CHKSUM)) F_SET(subdbp, DB_AM_CHKSUM); @@ -170,7 +165,7 @@ __db_master_update(mdbp, sdbp, txn, subdb, type, action, newname, flags) DB_ENV *dbenv; DBC *dbc, *ndbc; DBT key, data, ndata; - PAGE *p; + PAGE *p, *r; db_pgno_t t_pgno; int modify, ret, t_ret; @@ -246,6 +241,18 @@ __db_master_update(mdbp, sdbp, txn, subdb, type, action, newname, flags) __memp_fget(mdbp->mpf, &sdbp->meta_pgno, 0, &p)) != 0) goto err; + /* Free the root on the master db. */ + if (TYPE(p) == P_BTREEMETA) { + if ((ret = __memp_fget(mdbp->mpf, + &((BTMETA *)p)->root, 0, &r)) != 0) + goto err; + + /* Free and put the page. */ + if ((ret = __db_free(dbc, r)) != 0) { + r = NULL; + goto err; + } + } /* Free and put the page. */ if ((ret = __db_free(dbc, p)) != 0) { p = NULL; @@ -362,14 +369,6 @@ done: /* if ((t_ret = __memp_fput(mdbp->mpf, p, DB_MPOOL_DIRTY)) != 0) ret = t_ret; - /* - * Since we cannot close this file until after - * transaction commit, we need to sync the dirty - * pages, because we'll read these directly from - * disk to open. - */ - if ((t_ret = __db_sync(mdbp)) != 0 && ret == 0) - ret = t_ret; } else (void)__memp_fput(mdbp->mpf, p, 0); } @@ -502,8 +501,10 @@ __db_dbenv_setup(dbp, txn, fname, id, flags) /* * __db_dbenv_mpool -- * Set up the underlying environment cache during a db_open. + * + * PUBLIC: int __db_dbenv_mpool __P((DB *, const char *, u_int32_t)); */ -static int +int __db_dbenv_mpool(dbp, fname, flags) DB *dbp; const char *fname; @@ -520,11 +521,6 @@ __db_dbenv_mpool(dbp, fname, flags) dbenv = dbp->dbenv; - /* Register DB's pgin/pgout functions. */ - if ((ret = __memp_register( - dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) - return (ret); - /* * If we need to pre- or post-process a file's pages on I/O, set the * file type. If it's a hash file, always call the pgin and pgout @@ -613,7 +609,6 @@ __db_close(dbp, txn, flags) u_int32_t flags; { DB_ENV *dbenv; - u_int32_t dbpflags; int db_ref, deferred_close, ret, t_ret; dbenv = dbp->dbenv; @@ -629,7 +624,6 @@ __db_close(dbp, txn, flags) if (txn != NULL) (void)__db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0); - dbpflags = dbp->flags; /* Refresh the structure and close any underlying resources. */ ret = __db_refresh(dbp, txn, flags, &deferred_close); @@ -640,22 +634,6 @@ __db_close(dbp, txn, flags) if (deferred_close) return (ret); - /* - * Call the access specific close function. - * - * !!! - * Because of where these functions are called in the DB handle close - * process, these routines can't do anything that would dirty pages or - * otherwise affect closing down the database. Specifically, we can't - * abort and recover any of the information they control. - */ - if ((t_ret = __bam_db_close(dbp)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __ham_db_close(dbp)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __qam_db_close(dbp, dbpflags)) != 0 && ret == 0) - ret = t_ret; - /* !!! * This code has an apparent race between the moment we read and * decrement dbenv->db_ref and the moment we check whether it's 0. @@ -703,7 +681,7 @@ __db_refresh(dbp, txn, flags, deferred_closep) DB_ENV *dbenv; DB_LOCKREQ lreq; DB_MPOOL *dbmp; - int ret, t_ret; + int resync, ret, t_ret; ret = 0; @@ -750,6 +728,7 @@ __db_refresh(dbp, txn, flags, deferred_closep) * routine. Note that any failure on a close is considered "really * bad" and we just break out of the loop and force forward. */ + resync = TAILQ_FIRST(&dbp->active_queue) == NULL ? 0 : 1; while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL) if ((t_ret = __db_c_close(dbc)) != 0) { if (ret == 0) @@ -765,8 +744,9 @@ __db_refresh(dbp, txn, flags, deferred_closep) } /* - * Close any outstanding join cursors. Join cursors destroy - * themselves on close and have no separate destroy routine. + * Close any outstanding join cursors. Join cursors destroy themselves + * on close and have no separate destroy routine. We don't have to set + * the resync flag here, because join cursors aren't write cursors. */ while ((dbc = TAILQ_FIRST(&dbp->join_queue)) != NULL) if ((t_ret = __db_join_close(dbc)) != 0) { @@ -784,7 +764,7 @@ __db_refresh(dbp, txn, flags, deferred_closep) * entire buffer cache is searched. If we're in recovery, don't flush * the file, it's not necessary. */ - if (!LF_ISSET(DB_NOSYNC) && + if (resync && !LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD | DB_AM_RECOVER) && (t_ret = __memp_fsync(dbp->mpf)) != 0 && ret == 0) ret = t_ret; @@ -808,8 +788,8 @@ __db_refresh(dbp, txn, flags, deferred_closep) if (F_ISSET(dbp, DB_AM_RECOVER)) t_ret = __dbreg_revoke_id(dbp, 0, DB_LOGFILEID_INVALID); else { - if ((t_ret = __dbreg_close_id(dbp, txn)) != 0 && - txn != NULL) { + if ((t_ret = __dbreg_close_id(dbp, + txn, DBREG_CLOSE)) != 0 && txn != NULL) { /* * We're in a txn and the attempt to log the * close failed; let the txn subsystem know @@ -846,6 +826,64 @@ __db_refresh(dbp, txn, flags, deferred_closep) ret = t_ret; never_opened: + /* + * Remove this DB handle from the DB_ENV's dblist, if it's been added. + * + * Close our reference to the underlying cache while locked, we don't + * want to race with a thread searching for our underlying cache link + * while opening a DB handle. + */ + MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + if (dbp->dblistlinks.le_prev != NULL) { + LIST_REMOVE(dbp, dblistlinks); + dbp->dblistlinks.le_prev = NULL; + } + + /* Close the memory pool file handle. */ + if (dbp->mpf != NULL) { + if ((t_ret = __memp_fclose(dbp->mpf, + F_ISSET(dbp, DB_AM_DISCARD) ? DB_MPOOL_DISCARD : 0)) != 0 && + ret == 0) + ret = t_ret; + dbp->mpf = NULL; + } + + MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + + /* + * Call the access specific close function. + * + * We do this here rather than in __db_close as we need to do this when + * aborting an open so that file descriptors are closed and abort of + * renames can succeed on platforms that lock open files (such as + * Windows). In particular, we need to ensure that all the extents + * associated with a queue are closed so that queue renames can be + * aborted. + * + * It is also important that we do this before releasing the handle + * lock, because dbremove and dbrename assume that once they have the + * handle lock, it is safe to modify the underlying file(s). + * + * !!! + * Because of where these functions are called in the DB handle close + * process, these routines can't do anything that would dirty pages or + * otherwise affect closing down the database. Specifically, we can't + * abort and recover any of the information they control. + */ + if ((t_ret = __bam_db_close(dbp)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __ham_db_close(dbp)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __qam_db_close(dbp, dbp->flags)) != 0 && ret == 0) + ret = t_ret; + + /* + * !!! + * At this point, the access-method specific information has been + * freed. From now on, we can use the dbp, but not touch any + * access-method specific data. + */ + if (dbp->lid != DB_LOCK_INVALIDID) { /* We may have pending trade operations on this dbp. */ if (txn != NULL) @@ -902,30 +940,6 @@ never_opened: memset(&dbp->my_rkey, 0, sizeof(DBT)); memset(&dbp->my_rdata, 0, sizeof(DBT)); - /* - * Remove this DB handle from the DB_ENV's dblist, if it's been added. - * - * Close our reference to the underlying cache while locked, we don't - * want to race with a thread searching for our underlying cache link - * while opening a DB handle. - */ - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); - if (dbp->dblistlinks.le_prev != NULL) { - LIST_REMOVE(dbp, dblistlinks); - dbp->dblistlinks.le_prev = NULL; - } - - /* Close the memory pool file handle. */ - if (dbp->mpf != NULL) { - if ((t_ret = __memp_fclose(dbp->mpf, - F_ISSET(dbp, DB_AM_DISCARD) ? DB_MPOOL_DISCARD : 0)) != 0 && - ret == 0) - ret = t_ret; - dbp->mpf = NULL; - } - - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); - /* Clear out fields that normally get set during open. */ memset(dbp->fileid, 0, sizeof(dbp->fileid)); dbp->adj_fileid = 0; @@ -1005,9 +1019,18 @@ __db_backup_name(dbenv, name, txn, backup) { DB_LSN lsn; size_t len; - int plen, ret, use_lsn; + int ret; char *p, *retp; + /* + * Part of the name may be a full path, so we need to make sure that + * we allocate enough space for it, even in the case where we don't + * use the entire filename for the backup name. + */ + len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT; + if ((ret = __os_malloc(dbenv, len, &retp)) != 0) + return (ret); + /* * Create the name. Backup file names are in one of two forms: * @@ -1015,64 +1038,49 @@ __db_backup_name(dbenv, name, txn, backup) * and * in a non-transactional env: __db.FILENAME * - * If the transaction doesn't have a current LSN, we write - * a dummy log record to force it, so that we ensure that - * all tmp names are unique. + * If the transaction doesn't have a current LSN, we write a dummy + * log record to force it, so we ensure all tmp names are unique. * * In addition, the name passed may contain an env-relative path. * In that case, put the __db. in the right place (in the last * component of the pathname). + * + * There are four cases here: + * 1. simple path w/out transaction + * 2. simple path + transaction + * 3. multi-component path w/out transaction + * 4. multi-component path + transaction */ - if (!F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txn != NULL) { + p = __db_rpath(name); + if (txn == NULL) + if (p == NULL) /* Case 1. */ + snprintf(retp, len, "%s%s", BACKUP_PREFIX, name); + else /* Case 3. */ + snprintf(retp, len, "%.*s%s%s", + (int)(p - name) + 1, name, BACKUP_PREFIX, p + 1); + else { if (IS_ZERO_LSN(txn->last_lsn)) { /* - * Write dummy log record. The two choices for - * dummy log records are __db_noop_log and - * __db_debug_log; unfortunately __db_noop_log requires - * a valid dbp, and we aren't guaranteed to be able - * to pass one in here. + * Write dummy log record. The two choices for dummy + * log records are __db_noop_log and __db_debug_log; + * unfortunately __db_noop_log requires a valid dbp, + * and we aren't guaranteed to be able to pass one in + * here. */ - if ((ret = __db_debug_log(dbenv, txn, &lsn, 0, - NULL, 0, NULL, NULL, 0)) != 0) + if ((ret = __db_debug_log(dbenv, + txn, &lsn, 0, NULL, 0, NULL, NULL, 0)) != 0) { + __os_free(dbenv, retp); return (ret); + } } else lsn = txn->last_lsn; - use_lsn = 1; - } else - use_lsn = 0; - - /* - * Part of the name may be a full path, so we need to make sure that - * we allocate enough space for it, even in the case where we don't - * use the entire filename for the backup name. - */ - len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT; - if ((ret = __os_malloc(dbenv, len, &retp)) != 0) - return (ret); - - /* - * There are four cases here: - * 1. simple path w/out transaction - * 2. simple path + transaction - * 3. multi-component path w/out transaction - * 4. multi-component path + transaction - */ - if ((p = __db_rpath(name)) == NULL) { - if (use_lsn) /* case 2 */ + if (p == NULL) /* Case 2. */ snprintf(retp, len, "%s%x.%x", BACKUP_PREFIX, lsn.file, lsn.offset); - else /* case 1 */ - snprintf(retp, len, "%s%s", BACKUP_PREFIX, name); - } else { - plen = (int)(p - name) + 1; - p++; - if (use_lsn) /* case 4 */ - snprintf(retp, len, - "%.*s%x.%x", plen, name, lsn.file, lsn.offset); - else /* case 3 */ - snprintf(retp, len, - "%.*s%s%s", plen, name, BACKUP_PREFIX, p); + else /* Case 4. */ + snprintf(retp, len, "%.*s%x.%x", + (int)(p - name) + 1, name, lsn.file, lsn.offset); } *backup = retp; @@ -1140,12 +1148,12 @@ __db_disassociate(sdbp) return (ret); } -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * __db_testcopy * Create a copy of all backup files and our "main" DB. * - * PUBLIC: #if CONFIG_TEST + * PUBLIC: #ifdef CONFIG_TEST * PUBLIC: int __db_testcopy __P((DB_ENV *, DB *, const char *)); * PUBLIC: #endif */ @@ -1163,7 +1171,7 @@ __db_testcopy(dbenv, dbp, name) if (name == NULL) { dbmp = dbenv->mp_handle; mpf = dbp->mpf; - name = R_ADDR(dbmp->reginfo, mpf->mfp->path_off); + name = R_ADDR(dbenv, dbmp->reginfo, mpf->mfp->path_off); } if (dbp != NULL && dbp->type == DB_QUEUE) @@ -1214,26 +1222,27 @@ __db_testdocopy(dbenv, name) { size_t len; int dircnt, i, ret; - char **namesp, *backup, *copy, *dir, *p, *real_name; - real_name = NULL; + char *backup, *copy, *dir, **namesp, *p, *real_name; + + dircnt = 0; + copy = backup = NULL; + namesp = NULL; + /* Get the real backing file name. */ if ((ret = __db_appname(dbenv, DB_APP_DATA, name, 0, NULL, &real_name)) != 0) return (ret); - copy = backup = NULL; - namesp = NULL; - /* * Maximum size of file, including adding a ".afterop". */ len = strlen(real_name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 9; if ((ret = __os_malloc(dbenv, len, ©)) != 0) - goto out; + goto err; if ((ret = __os_malloc(dbenv, len, &backup)) != 0) - goto out; + goto err; /* * First copy the file itself. @@ -1242,9 +1251,10 @@ __db_testdocopy(dbenv, name) __db_makecopy(dbenv, real_name, copy); if ((ret = __os_strdup(dbenv, real_name, &dir)) != 0) - goto out; + goto err; __os_free(dbenv, real_name); real_name = NULL; + /* * Create the name. Backup file names are of the form: * @@ -1276,7 +1286,7 @@ __db_testdocopy(dbenv, name) #endif __os_free(dbenv, dir); if (ret != 0) - goto out; + goto err; for (i = 0; i < dircnt; i++) { /* * Need to check if it is a backup file for this. @@ -1289,7 +1299,7 @@ __db_testdocopy(dbenv, name) if (strncmp(namesp[i], backup, strlen(backup)) == 0) { if ((ret = __db_appname(dbenv, DB_APP_DATA, namesp[i], 0, NULL, &real_name)) != 0) - goto out; + goto err; /* * This should not happen. Check that old @@ -1307,8 +1317,8 @@ __db_testdocopy(dbenv, name) real_name = NULL; } } -out: - if (backup != NULL) + +err: if (backup != NULL) __os_free(dbenv, backup); if (copy != NULL) __os_free(dbenv, copy); diff --git a/db/db/db.src b/db/db/db.src index d83441875..2cac31a4f 100644 --- a/db/db/db.src +++ b/db/db/db.src @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db.src,v 11.24 2003/11/14 05:32:30 ubell Exp $ + * $Id: db.src,v 11.28 2004/06/17 17:35:15 bostic Exp $ */ PREFIX __db DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE @@ -97,28 +95,6 @@ ARG adjust int32_t ld POINTER lsn DB_LSN * lu END -/* - * relink -- Handles relinking around a page. - * - * opcode: indicates if this is an addpage or delete page - * pgno: the page being changed. - * lsn the page's original lsn. - * prev: the previous page. - * lsn_prev: the previous page's original lsn. - * next: the next page. - * lsn_next: the previous page's original lsn. - */ -BEGIN relink 45 -ARG opcode u_int32_t lu -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER lsn DB_LSN * lu -ARG prev db_pgno_t lu -POINTER lsn_prev DB_LSN * lu -ARG next db_pgno_t lu -POINTER lsn_next DB_LSN * lu -END - /* * Debug -- log an operation upon entering an access method. * op: Operation (cursor, c_close, c_get, c_put, c_del, @@ -154,6 +130,7 @@ END * pgno: the page allocated. * ptype: the type of the page allocated. * next: the next page on the free list. + * last_pgno: the last page in the file after this op. */ BEGIN pg_alloc 49 DB fileid int32_t ld @@ -163,6 +140,7 @@ POINTER page_lsn DB_LSN * lu ARG pgno db_pgno_t lu ARG ptype u_int32_t lu ARG next db_pgno_t lu +ARG last_pgno db_pgno_t lu END /* @@ -173,6 +151,7 @@ END * meta_pgno: the meta-data page number. * header: the header from the free'd page. * next: the previous next pointer on the metadata page. + * last_pgno: the last page in the file before this op. */ BEGIN pg_free 50 DB fileid int32_t ld @@ -181,6 +160,7 @@ POINTER meta_lsn DB_LSN * lu ARG meta_pgno db_pgno_t lu PGDBT header DBT s ARG next db_pgno_t lu +ARG last_pgno db_pgno_t lu END /* @@ -202,6 +182,7 @@ END * header: the header and index entries from the free'd page. * data: the data from the free'd page. * next: the previous next pointer on the metadata page. + * last_pgno: the last page in the file before this op. */ BEGIN pg_freedata 52 DB fileid int32_t ld @@ -210,6 +191,7 @@ POINTER meta_lsn DB_LSN * lu ARG meta_pgno db_pgno_t lu PGDBT header DBT s ARG next db_pgno_t lu +ARG last_pgno db_pgno_t lu PGDBT data DBT s END @@ -240,3 +222,17 @@ ARG meta_pgno db_pgno_t lu PGDBT header DBT s ARG next db_pgno_t lu END + +/* + * pg_init: used to reinitialize a page during truncate. + * + * pgno: the page being initialized. + * header: the header from the page. + * data: data that used to be on the page. + */ +BEGIN pg_init 60 +DB fileid int32_t ld +ARG pgno db_pgno_t lu +PGDBT header DBT s +PGDBT data DBT s +END diff --git a/db/db/db_am.c b/db/db/db_am.c index 5fbc7837d..0e4a864f9 100644 --- a/db/db/db_am.c +++ b/db/db/db_am.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_am.c,v 11.120 2004/10/07 17:33:32 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_am.c,v 11.112 2003/09/13 19:23:42 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -29,11 +27,6 @@ static const char revid[] = "$Id: db_am.c,v 11.112 2003/09/13 19:23:42 bostic Ex static int __db_append_primary __P((DBC *, DBT *, DBT *)); static int __db_secondary_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); -static int __db_secondary_close __P((DB *, u_int32_t)); - -#ifdef DEBUG -static int __db_cprint_item __P((DBC *)); -#endif /* * __db_cursor_int -- @@ -52,7 +45,7 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp) u_int32_t lockerid; DBC **dbcp; { - DBC *dbc, *adbc; + DBC *dbc; DBC_INTERNAL *cp; DB_ENV *dbenv; int allocated, ret; @@ -89,15 +82,22 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp) /* Set up locking information. */ if (LOCKING_ON(dbenv)) { /* - * If we are not threaded, then there is no need to - * create new locker ids. We know that no one else - * is running concurrently using this DB, so we can - * take a peek at any cursors on the active queue. + * If we are not threaded, we share a locker ID among + * all cursors opened in the environment handle, + * allocating one if this is the first cursor. + * + * This relies on the fact that non-threaded DB handles + * always have non-threaded environment handles, since + * we set DB_THREAD on DB handles created with threaded + * environment handles. */ - if (!DB_IS_THREADED(dbp) && - (adbc = TAILQ_FIRST(&dbp->active_queue)) != NULL) - dbc->lid = adbc->lid; - else { + if (!DB_IS_THREADED(dbp)) { + if (dbp->dbenv->env_lid == DB_LOCK_INVALIDID && + (ret = + __lock_id(dbenv,&dbp->dbenv->env_lid)) != 0) + goto err; + dbc->lid = dbp->dbenv->env_lid; + } else { if ((ret = __lock_id(dbenv, &dbc->lid)) != 0) goto err; F_SET(dbc, DBC_OWN_LID); @@ -201,10 +201,8 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp) dbc->locker = lockerid; else dbc->locker = dbc->lid; - } else { + } else dbc->locker = txn->txnid; - txn->cursors++; - } /* * These fields change when we are used as a secondary index, so @@ -247,6 +245,14 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp) goto err; } + /* + * The transaction keeps track of how many cursors were opened within + * it to catch application errors where the cursor isn't closed when + * the transaction is resolved. + */ + if (txn != NULL) + ++txn->cursors; + MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links); F_SET(dbc, DBC_ACTIVE); @@ -260,96 +266,6 @@ err: if (allocated) return (ret); } -#ifdef DEBUG -/* - * __db_cprint -- - * Display the cursor active and free queues. - * - * PUBLIC: int __db_cprint __P((DB *)); - */ -int -__db_cprint(dbp) - DB *dbp; -{ - DBC *dbc; - int ret, t_ret; - - ret = 0; - MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp); - fprintf(stderr, "Active queue:\n"); - for (dbc = TAILQ_FIRST(&dbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0) - ret = t_ret; - fprintf(stderr, "Join queue:\n"); - for (dbc = TAILQ_FIRST(&dbp->join_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0) - ret = t_ret; - fprintf(stderr, "Free queue:\n"); - for (dbc = TAILQ_FIRST(&dbp->free_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0) - ret = t_ret; - MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp); - - return (ret); -} - -static -int __db_cprint_item(dbc) - DBC *dbc; -{ - static const FN fn[] = { - { DBC_ACTIVE, "active" }, - { DBC_COMPENSATE, "compensate" }, - { DBC_OPD, "off-page-dup" }, - { DBC_RECOVER, "recover" }, - { DBC_RMW, "read-modify-write" }, - { DBC_TRANSIENT, "transient" }, - { DBC_WRITECURSOR, "write cursor" }, - { DBC_WRITER, "short-term write cursor" }, - { 0, NULL } - }; - DB *dbp; - DBC_INTERNAL *cp; - const char *s; - - dbp = dbc->dbp; - cp = dbc->internal; - - s = __db_dbtype_to_string(dbc->dbtype); - fprintf(stderr, "%s/%#lx: opd: %#lx\n", - s, P_TO_ULONG(dbc), P_TO_ULONG(cp->opd)); - - fprintf(stderr, "\ttxn: %#lx lid: %lu locker: %lu\n", - P_TO_ULONG(dbc->txn), (u_long)dbc->lid, (u_long)dbc->locker); - - fprintf(stderr, "\troot: %lu page/index: %lu/%lu", - (u_long)cp->root, (u_long)cp->pgno, (u_long)cp->indx); - - __db_prflags(dbc->flags, fn, stderr); - fprintf(stderr, "\n"); - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - __bam_cprint(dbc); - break; - case DB_HASH: - __ham_cprint(dbc); - break; - case DB_UNKNOWN: - DB_ASSERT(dbp->type != DB_UNKNOWN); - /* FALLTHROUGH */ - case DB_QUEUE: - default: - break; - } - return (0); -} -#endif /* DEBUG */ - /* * __db_put -- * Store a key/data pair. @@ -601,7 +517,7 @@ __db_sync(dbp) if (dbp->type == DB_RECNO) ret = __ram_writeback(dbp); - /* If the database was never backed by a databse file, we're done. */ + /* If the database was never backed by a database file, we're done. */ if (F_ISSET(dbp, DB_AM_INMEM)) return (ret); @@ -645,7 +561,7 @@ __db_associate(dbp, txn, sdbp, callback, flags) sdbp->get = __db_secondary_get; sdbp->stored_close = sdbp->close; - sdbp->close = __db_secondary_close; + sdbp->close = __db_secondary_close_pp; F_SET(sdbp, DB_AM_SECONDARY); @@ -771,7 +687,7 @@ __db_secondary_get(sdbp, txn, skey, data, flags) { DB_ASSERT(F_ISSET(sdbp, DB_AM_SECONDARY)); - return (__db_pget(sdbp, txn, skey, NULL, data, flags)); + return (__db_pget_pp(sdbp, txn, skey, NULL, data, flags)); } /* @@ -779,8 +695,10 @@ __db_secondary_get(sdbp, txn, skey, data, flags) * Wrapper function for DB->close() which we use on secondaries to * manage refcounting and make sure we don't close them underneath * a primary that is updating. + * + * PUBLIC: int __db_secondary_close __P((DB *, u_int32_t)); */ -static int +int __db_secondary_close(sdbp, flags) DB *sdbp; u_int32_t flags; diff --git a/db/db/db_auto.c b/db/db/db_auto.c index d1113d576..03002ff85 100644 --- a/db/db/db_auto.c +++ b/db/db/db_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -40,33 +41,42 @@ __db_addrem_log(dbp, txnid, ret_lsnp, flags, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_addrem; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -86,27 +96,23 @@ __db_addrem_log(dbp, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -177,141 +183,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_addrem_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_addrem_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_addrem_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_addrem_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_addrem_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_addrem_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_addrem%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\tnbytes: %lu\n", (u_long)argp->nbytes); - (void)printf("\thdr: "); - for (i = 0; i < argp->hdr.size; i++) { - ch = ((u_int8_t *)argp->hdr.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tdbt: "); - for (i = 0; i < argp->dbt.size; i++) { - ch = ((u_int8_t *)argp->dbt.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_addrem_read __P((DB_ENV *, void *, __db_addrem_args **)); */ @@ -329,9 +241,9 @@ __db_addrem_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_addrem_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -405,33 +317,42 @@ __db_big_log(dbp, txnid, ret_lsnp, flags, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_big; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -452,27 +373,23 @@ __db_big_log(dbp, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -544,139 +461,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_big_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_big_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_big_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_big_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_big_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_big_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_big%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno); - (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno); - (void)printf("\tdbt: "); - for (i = 0; i < argp->dbt.size; i++) { - ch = ((u_int8_t *)argp->dbt.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\tprevlsn: [%lu][%lu]\n", - (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset); - (void)printf("\tnextlsn: [%lu][%lu]\n", - (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_big_read __P((DB_ENV *, void *, __db_big_args **)); */ @@ -694,9 +519,9 @@ __db_big_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_big_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -762,33 +587,42 @@ __db_ovref_log(dbp, txnid, ret_lsnp, flags, pgno, adjust, lsn) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_ovref; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -804,27 +638,23 @@ __db_ovref_log(dbp, txnid, ret_lsnp, flags, pgno, adjust, lsn) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -865,125 +695,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_ovref_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_ovref_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_ovref_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_ovref_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_ovref_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_ovref_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_ovref%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tadjust: %ld\n", (long)argp->adjust); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_ovref_read __P((DB_ENV *, void *, __db_ovref_args **)); */ @@ -1001,9 +753,9 @@ __db_ovref_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_ovref_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1033,422 +785,91 @@ __db_ovref_read(dbenv, recbuf, argpp) } /* - * PUBLIC: int __db_relink_log __P((DB *, DB_TXN *, DB_LSN *, - * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, - * PUBLIC: db_pgno_t, DB_LSN *)); + * PUBLIC: int __db_debug_log __P((DB_ENV *, DB_TXN *, DB_LSN *, + * PUBLIC: u_int32_t, const DBT *, int32_t, const DBT *, const DBT *, + * PUBLIC: u_int32_t)); */ int -__db_relink_log(dbp, txnid, ret_lsnp, flags, - opcode, pgno, lsn, prev, lsn_prev, - next, lsn_next) - DB *dbp; +__db_debug_log(dbenv, txnid, ret_lsnp, flags, + op, fileid, key, data, arg_flags) + DB_ENV *dbenv; DB_TXN *txnid; DB_LSN *ret_lsnp; u_int32_t flags; - u_int32_t opcode; - db_pgno_t pgno; - DB_LSN * lsn; - db_pgno_t prev; - DB_LSN * lsn_prev; - db_pgno_t next; - DB_LSN * lsn_next; + const DBT *op; + int32_t fileid; + const DBT *key; + const DBT *data; + u_int32_t arg_flags; { DBT logrec; - DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; - u_int32_t uinttmp, rectype, txn_num; + DB_LSN *lsnp, null_lsn, *rlsnp; + u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; - dbenv = dbp->dbenv; - rectype = DB___db_relink; + COMPQUIET(lr, NULL); + + rectype = DB___db_debug; npad = 0; + rlsnp = ret_lsnp; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || - F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { + if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { - if (TAILQ_FIRST(&txnid->kids) != NULL && - (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) - return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN) + + sizeof(u_int32_t) + (op == NULL ? 0 : op->size) + sizeof(u_int32_t) - + sizeof(u_int32_t) - + sizeof(u_int32_t) - + sizeof(*lsn) - + sizeof(u_int32_t) - + sizeof(*lsn_prev) - + sizeof(u_int32_t) - + sizeof(*lsn_next); + + sizeof(u_int32_t) + (key == NULL ? 0 : key->size) + + sizeof(u_int32_t) + (data == NULL ? 0 : data->size) + + sizeof(u_int32_t); if (CRYPTO_ON(dbenv)) { npad = ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size); logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } - } - if (npad > 0) - memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); - - bp = logrec.data; - - memcpy(bp, &rectype, sizeof(rectype)); - bp += sizeof(rectype); - - memcpy(bp, &txn_num, sizeof(txn_num)); - bp += sizeof(txn_num); - - memcpy(bp, lsnp, sizeof(DB_LSN)); - bp += sizeof(DB_LSN); - - uinttmp = (u_int32_t)opcode; - memcpy(bp, &uinttmp, sizeof(uinttmp)); - bp += sizeof(uinttmp); - - DB_ASSERT(dbp->log_filename != NULL); - if (dbp->log_filename->id == DB_LOGFILEID_INVALID && - (ret = __dbreg_lazy_id(dbp)) != 0) - return (ret); - - uinttmp = (u_int32_t)dbp->log_filename->id; - memcpy(bp, &uinttmp, sizeof(uinttmp)); - bp += sizeof(uinttmp); - - uinttmp = (u_int32_t)pgno; - memcpy(bp, &uinttmp, sizeof(uinttmp)); - bp += sizeof(uinttmp); - - if (lsn != NULL) - memcpy(bp, lsn, sizeof(*lsn)); - else - memset(bp, 0, sizeof(*lsn)); - bp += sizeof(*lsn); - - uinttmp = (u_int32_t)prev; - memcpy(bp, &uinttmp, sizeof(uinttmp)); - bp += sizeof(uinttmp); - - if (lsn_prev != NULL) - memcpy(bp, lsn_prev, sizeof(*lsn_prev)); - else - memset(bp, 0, sizeof(*lsn_prev)); - bp += sizeof(*lsn_prev); - - uinttmp = (u_int32_t)next; - memcpy(bp, &uinttmp, sizeof(uinttmp)); - bp += sizeof(uinttmp); - - if (lsn_next != NULL) - memcpy(bp, lsn_next, sizeof(*lsn_next)); - else - memset(bp, 0, sizeof(*lsn_next)); - bp += sizeof(*lsn_next); - - DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); - -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. - */ - memcpy(lr->data, logrec.data, logrec.size); - rectype |= DB_debug_FLAG; - memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - - if (!is_durable && txnid != NULL) { - ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; - } - - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); -#ifdef LOG_DIAGNOSTIC - if (ret != 0) - (void)__db_relink_print(dbenv, - (DBT *)&logrec, ret_lsnp, NULL, NULL); -#endif -#ifndef DIAGNOSTIC - if (is_durable || txnid == NULL) -#endif - __os_free(dbenv, logrec.data); - - return (ret); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_relink_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_relink_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_relink_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_relink_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_relink_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_relink_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_relink%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tprev: %lu\n", (u_long)argp->prev); - (void)printf("\tlsn_prev: [%lu][%lu]\n", - (u_long)argp->lsn_prev.file, (u_long)argp->lsn_prev.offset); - (void)printf("\tnext: %lu\n", (u_long)argp->next); - (void)printf("\tlsn_next: [%lu][%lu]\n", - (u_long)argp->lsn_next.file, (u_long)argp->lsn_next.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - -/* - * PUBLIC: int __db_relink_read __P((DB_ENV *, void *, __db_relink_args **)); - */ -int -__db_relink_read(dbenv, recbuf, argpp) - DB_ENV *dbenv; - void *recbuf; - __db_relink_args **argpp; -{ - __db_relink_args *argp; - u_int32_t uinttmp; - u_int8_t *bp; - int ret; - - if ((ret = __os_malloc(dbenv, - sizeof(__db_relink_args) + sizeof(DB_TXN), &argp)) != 0) - return (ret); - argp->txnid = (DB_TXN *)&argp[1]; - - bp = recbuf; - memcpy(&argp->type, bp, sizeof(argp->type)); - bp += sizeof(argp->type); - - memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid)); - bp += sizeof(argp->txnid->txnid); - - memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN)); - bp += sizeof(DB_LSN); - - memcpy(&uinttmp, bp, sizeof(uinttmp)); - argp->opcode = (u_int32_t)uinttmp; - bp += sizeof(uinttmp); - - memcpy(&uinttmp, bp, sizeof(uinttmp)); - argp->fileid = (int32_t)uinttmp; - bp += sizeof(uinttmp); - - memcpy(&uinttmp, bp, sizeof(uinttmp)); - argp->pgno = (db_pgno_t)uinttmp; - bp += sizeof(uinttmp); - - memcpy(&argp->lsn, bp, sizeof(argp->lsn)); - bp += sizeof(argp->lsn); - - memcpy(&uinttmp, bp, sizeof(uinttmp)); - argp->prev = (db_pgno_t)uinttmp; - bp += sizeof(uinttmp); - - memcpy(&argp->lsn_prev, bp, sizeof(argp->lsn_prev)); - bp += sizeof(argp->lsn_prev); - - memcpy(&uinttmp, bp, sizeof(uinttmp)); - argp->next = (db_pgno_t)uinttmp; - bp += sizeof(uinttmp); - - memcpy(&argp->lsn_next, bp, sizeof(argp->lsn_next)); - bp += sizeof(argp->lsn_next); - - *argpp = argp; - return (0); -} - -/* - * PUBLIC: int __db_debug_log __P((DB_ENV *, DB_TXN *, DB_LSN *, - * PUBLIC: u_int32_t, const DBT *, int32_t, const DBT *, const DBT *, - * PUBLIC: u_int32_t)); - */ -int -__db_debug_log(dbenv, txnid, ret_lsnp, flags, - op, fileid, key, data, arg_flags) - DB_ENV *dbenv; - DB_TXN *txnid; - DB_LSN *ret_lsnp; - u_int32_t flags; - const DBT *op; - int32_t fileid; - const DBT *key; - const DBT *data; - u_int32_t arg_flags; -{ - DBT logrec; - DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; - u_int32_t zero, uinttmp, rectype, txn_num; - u_int npad; - u_int8_t *bp; - int is_durable, ret; - - rectype = DB___db_debug; - npad = 0; - - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { - if (txnid == NULL) - return (0); - is_durable = 0; - } - if (txnid == NULL) { - txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; - lsnp = &null_lsn; - } else { - txn_num = txnid->txnid; - lsnp = &txnid->last_lsn; - } - - logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN) - + sizeof(u_int32_t) + (op == NULL ? 0 : op->size) - + sizeof(u_int32_t) - + sizeof(u_int32_t) + (key == NULL ? 0 : key->size) - + sizeof(u_int32_t) + (data == NULL ? 0 : data->size) - + sizeof(u_int32_t); - if (CRYPTO_ON(dbenv)) { - npad = - ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size); - logrec.size += npad; - } - - if (!is_durable && txnid != NULL) { - if ((ret = __os_malloc(dbenv, - logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) - return (ret); -#ifdef DIAGNOSTIC - goto do_malloc; #else - logrec.data = &lr->data; + logrec.data = lr->data; #endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif - if ((ret = - __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif - return (ret); - } } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1507,142 +928,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_debug_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_debug_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_debug_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_debug_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_debug_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_debug_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_debug%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\top: "); - for (i = 0; i < argp->op.size; i++) { - ch = ((u_int8_t *)argp->op.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tkey: "); - for (i = 0; i < argp->key.size; i++) { - ch = ((u_int8_t *)argp->key.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tdata: "); - for (i = 0; i < argp->data.size; i++) { - ch = ((u_int8_t *)argp->data.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\targ_flags: %lu\n", (u_long)argp->arg_flags); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_debug_read __P((DB_ENV *, void *, __db_debug_args **)); */ @@ -1660,9 +986,9 @@ __db_debug_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_debug_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1718,33 +1044,42 @@ __db_noop_log(dbp, txnid, ret_lsnp, flags, pgno, prevlsn) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_noop; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1759,27 +1094,23 @@ __db_noop_log(dbp, txnid, ret_lsnp, flags, pgno, prevlsn) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1816,122 +1147,45 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { - ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; - } - - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); -#ifdef LOG_DIAGNOSTIC - if (ret != 0) - (void)__db_noop_print(dbenv, - (DBT *)&logrec, ret_lsnp, NULL, NULL); -#endif -#ifndef DIAGNOSTIC - if (is_durable || txnid == NULL) + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else + ret = 0; #endif - __os_free(dbenv, logrec.data); - - return (ret); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_noop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_noop_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_noop_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_noop_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); + } - if ((ret = __db_noop_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_noop%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tprevlsn: [%lu][%lu]\n", - (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); +#ifdef LOG_DIAGNOSTIC + if (ret != 0) + (void)__db_noop_print(dbenv, + (DBT *)&logrec, ret_lsnp, NULL, NULL); +#endif - return (0); +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else + if (is_durable || txnid == NULL) + __os_free(dbenv, logrec.data); +#endif + return (ret); } /* @@ -1951,9 +1205,9 @@ __db_noop_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_noop_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1981,11 +1235,11 @@ __db_noop_read(dbenv, recbuf, argpp) /* * PUBLIC: int __db_pg_alloc_log __P((DB *, DB_TXN *, DB_LSN *, * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, u_int32_t, - * PUBLIC: db_pgno_t)); + * PUBLIC: db_pgno_t, db_pgno_t)); */ int __db_pg_alloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, meta_pgno, page_lsn, pgno, ptype, - next) + next, last_pgno) DB *dbp; DB_TXN *txnid; DB_LSN *ret_lsnp; @@ -1996,37 +1250,47 @@ __db_pg_alloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, meta_pgno, page_lsn, pg db_pgno_t pgno; u_int32_t ptype; db_pgno_t next; + db_pgno_t last_pgno; { DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_pg_alloc; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2038,6 +1302,7 @@ __db_pg_alloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, meta_pgno, page_lsn, pg + sizeof(*page_lsn) + sizeof(u_int32_t) + sizeof(u_int32_t) + + sizeof(u_int32_t) + sizeof(u_int32_t); if (CRYPTO_ON(dbenv)) { npad = @@ -2045,27 +1310,23 @@ __db_pg_alloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, meta_pgno, page_lsn, pg logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2118,131 +1379,53 @@ do_malloc: memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); + uinttmp = (u_int32_t)last_pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_pg_alloc_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_pg_alloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_pg_alloc_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_pg_alloc_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_pg_alloc_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_pg_alloc_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_pg_alloc%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tmeta_lsn: [%lu][%lu]\n", - (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\tpage_lsn: [%lu][%lu]\n", - (u_long)argp->page_lsn.file, (u_long)argp->page_lsn.offset); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tptype: %lu\n", (u_long)argp->ptype); - (void)printf("\tnext: %lu\n", (u_long)argp->next); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_pg_alloc_read __P((DB_ENV *, void *, * PUBLIC: __db_pg_alloc_args **)); @@ -2261,9 +1444,9 @@ __db_pg_alloc_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_pg_alloc_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2299,6 +1482,10 @@ __db_pg_alloc_read(dbenv, recbuf, argpp) argp->next = (db_pgno_t)uinttmp; bp += sizeof(uinttmp); + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->last_pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + *argpp = argp; return (0); } @@ -2306,10 +1493,11 @@ __db_pg_alloc_read(dbenv, recbuf, argpp) /* * PUBLIC: int __db_pg_free_log __P((DB *, DB_TXN *, DB_LSN *, * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, - * PUBLIC: db_pgno_t)); + * PUBLIC: db_pgno_t, db_pgno_t)); */ int -__db_pg_free_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, next) +__db_pg_free_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, next, + last_pgno) DB *dbp; DB_TXN *txnid; DB_LSN *ret_lsnp; @@ -2319,37 +1507,47 @@ __db_pg_free_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, db_pgno_t meta_pgno; const DBT *header; db_pgno_t next; + db_pgno_t last_pgno; { DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_pg_free; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2360,6 +1558,7 @@ __db_pg_free_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, + sizeof(*meta_lsn) + sizeof(u_int32_t) + sizeof(u_int32_t) + (header == NULL ? 0 : header->size) + + sizeof(u_int32_t) + sizeof(u_int32_t); if (CRYPTO_ON(dbenv)) { npad = @@ -2367,27 +1566,23 @@ __db_pg_free_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2441,136 +1636,53 @@ do_malloc: memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); + uinttmp = (u_int32_t)last_pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_pg_free_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_pg_free_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_pg_free_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_pg_free_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_pg_free_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_pg_free_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_pg_free%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tmeta_lsn: [%lu][%lu]\n", - (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\theader: "); - for (i = 0; i < argp->header.size; i++) { - ch = ((u_int8_t *)argp->header.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tnext: %lu\n", (u_long)argp->next); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_pg_free_read __P((DB_ENV *, void *, __db_pg_free_args **)); */ @@ -2588,9 +1700,9 @@ __db_pg_free_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_pg_free_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2625,6 +1737,10 @@ __db_pg_free_read(dbenv, recbuf, argpp) argp->next = (db_pgno_t)uinttmp; bp += sizeof(uinttmp); + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->last_pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + *argpp = argp; return (0); } @@ -2641,31 +1757,42 @@ __db_cksum_log(dbenv, txnid, ret_lsnp, flags) { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___db_cksum; npad = 0; + rlsnp = ret_lsnp; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2677,27 +1804,23 @@ __db_cksum_log(dbenv, txnid, ret_lsnp, flags) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2715,120 +1838,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_cksum_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_cksum_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_cksum_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_cksum_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_cksum_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_cksum_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_cksum_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_cksum%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_cksum_read __P((DB_ENV *, void *, __db_cksum_args **)); */ @@ -2845,9 +1895,9 @@ __db_cksum_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_cksum_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2864,11 +1914,11 @@ __db_cksum_read(dbenv, recbuf, argpp) /* * PUBLIC: int __db_pg_freedata_log __P((DB *, DB_TXN *, DB_LSN *, * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, - * PUBLIC: db_pgno_t, const DBT *)); + * PUBLIC: db_pgno_t, db_pgno_t, const DBT *)); */ int __db_pg_freedata_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, next, - data) + last_pgno, data) DB *dbp; DB_TXN *txnid; DB_LSN *ret_lsnp; @@ -2878,38 +1928,48 @@ __db_pg_freedata_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, hea db_pgno_t meta_pgno; const DBT *header; db_pgno_t next; + db_pgno_t last_pgno; const DBT *data; { DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_pg_freedata; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2921,6 +1981,7 @@ __db_pg_freedata_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, hea + sizeof(u_int32_t) + sizeof(u_int32_t) + (header == NULL ? 0 : header->size) + sizeof(u_int32_t) + + sizeof(u_int32_t) + sizeof(u_int32_t) + (data == NULL ? 0 : data->size); if (CRYPTO_ON(dbenv)) { npad = @@ -2928,27 +1989,23 @@ __db_pg_freedata_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, hea logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -3002,6 +2059,10 @@ do_malloc: memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); + uinttmp = (u_int32_t)last_pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + if (data == NULL) { zero = 0; memcpy(bp, &zero, sizeof(u_int32_t)); @@ -3015,140 +2076,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_pg_freedata_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_pg_freedata_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__db_pg_freedata_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_pg_freedata_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_pg_freedata_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_pg_freedata_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_pg_freedata_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_pg_freedata%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tmeta_lsn: [%lu][%lu]\n", - (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\theader: "); - for (i = 0; i < argp->header.size; i++) { - ch = ((u_int8_t *)argp->header.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tnext: %lu\n", (u_long)argp->next); - (void)printf("\tdata: "); - for (i = 0; i < argp->data.size; i++) { - ch = ((u_int8_t *)argp->data.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __db_pg_freedata_read __P((DB_ENV *, void *, * PUBLIC: __db_pg_freedata_args **)); @@ -3167,9 +2135,9 @@ __db_pg_freedata_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__db_pg_freedata_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -3204,6 +2172,10 @@ __db_pg_freedata_read(dbenv, recbuf, argpp) argp->next = (db_pgno_t)uinttmp; bp += sizeof(uinttmp); + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->last_pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + memset(&argp->data, 0, sizeof(argp->data)); memcpy(&argp->data.size, bp, sizeof(u_int32_t)); bp += sizeof(u_int32_t); @@ -3229,33 +2201,42 @@ __db_pg_prepare_log(dbp, txnid, ret_lsnp, flags, pgno) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___db_pg_prepare; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -3269,27 +2250,23 @@ __db_pg_prepare_log(dbp, txnid, ret_lsnp, flags, pgno) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -3320,143 +2297,295 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__db_pg_prepare_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION /* - * PUBLIC: int __db_pg_prepare_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); + * PUBLIC: int __db_pg_prepare_read __P((DB_ENV *, void *, + * PUBLIC: __db_pg_prepare_args **)); */ int -__db_pg_prepare_getpgnos(dbenv, rec, lsnp, notused1, summary) +__db_pg_prepare_read(dbenv, recbuf, argpp) DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; + void *recbuf; + __db_pg_prepare_args **argpp; { - TXN_RECS *t; + __db_pg_prepare_args *argp; + u_int32_t uinttmp; + u_int8_t *bp; int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) + if ((ret = __os_malloc(dbenv, + sizeof(__db_pg_prepare_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; + argp->txnid = (DB_TXN *)&argp[1]; + + memcpy(&argp->type, bp, sizeof(argp->type)); + bp += sizeof(argp->type); - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); + memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid)); + bp += sizeof(argp->txnid->txnid); + + memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN)); + bp += sizeof(DB_LSN); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->fileid = (int32_t)uinttmp; + bp += sizeof(uinttmp); - t->npages++; + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + *argpp = argp; return (0); } -#endif /* HAVE_REPLICATION */ /* - * PUBLIC: int __db_pg_prepare_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); + * PUBLIC: int __db_pg_new_log __P((DB *, DB_TXN *, DB_LSN *, + * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, + * PUBLIC: db_pgno_t)); */ int -__db_pg_prepare_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; +__db_pg_new_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, next) + DB *dbp; + DB_TXN *txnid; + DB_LSN *ret_lsnp; + u_int32_t flags; + db_pgno_t pgno; + DB_LSN * meta_lsn; + db_pgno_t meta_pgno; + const DBT *header; + db_pgno_t next; { - __db_pg_prepare_args *argp; - int ret; + DBT logrec; + DB_ENV *dbenv; + DB_TXNLOGREC *lr; + DB_LSN *lsnp, null_lsn, *rlsnp; + u_int32_t zero, uinttmp, rectype, txn_num; + u_int npad; + u_int8_t *bp; + int is_durable, ret; + + dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + + rectype = DB___db_pg_new; + npad = 0; + rlsnp = ret_lsnp; + + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE) || + F_ISSET(dbp, DB_AM_NOT_DURABLE)) { + is_durable = 0; + } else + is_durable = 1; + + if (txnid == NULL) { + txn_num = 0; + lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; + } else { + if (TAILQ_FIRST(&txnid->kids) != NULL && + (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) + return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); + txn_num = txnid->txnid; + lsnp = &txnid->last_lsn; + } + + logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN) + + sizeof(u_int32_t) + + sizeof(u_int32_t) + + sizeof(*meta_lsn) + + sizeof(u_int32_t) + + sizeof(u_int32_t) + (header == NULL ? 0 : header->size) + + sizeof(u_int32_t); + if (CRYPTO_ON(dbenv)) { + npad = + ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size); + logrec.size += npad; + } + + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { + if ((ret = __os_malloc(dbenv, + logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) + return (ret); +#ifdef DIAGNOSTIC + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { + __os_free(dbenv, lr); + return (ret); + } +#else + logrec.data = lr->data; +#endif + } + if (npad > 0) + memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); + + bp = logrec.data; + + memcpy(bp, &rectype, sizeof(rectype)); + bp += sizeof(rectype); + + memcpy(bp, &txn_num, sizeof(txn_num)); + bp += sizeof(txn_num); + + memcpy(bp, lsnp, sizeof(DB_LSN)); + bp += sizeof(DB_LSN); + + DB_ASSERT(dbp->log_filename != NULL); + if (dbp->log_filename->id == DB_LOGFILEID_INVALID && + (ret = __dbreg_lazy_id(dbp)) != 0) + return (ret); + + uinttmp = (u_int32_t)dbp->log_filename->id; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + if (meta_lsn != NULL) + memcpy(bp, meta_lsn, sizeof(*meta_lsn)); + else + memset(bp, 0, sizeof(*meta_lsn)); + bp += sizeof(*meta_lsn); + + uinttmp = (u_int32_t)meta_pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + if (header == NULL) { + zero = 0; + memcpy(bp, &zero, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + } else { + memcpy(bp, &header->size, sizeof(header->size)); + bp += sizeof(header->size); + memcpy(bp, header->data, header->size); + bp += header->size; + } + + uinttmp = (u_int32_t)next; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { +#ifdef DIAGNOSTIC + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. + */ + memcpy(lr->data, logrec.data, logrec.size); + rectype |= DB_debug_FLAG; + memcpy(logrec.data, &rectype, sizeof(rectype)); - notused2 = DB_TXN_ABORT; - notused3 = NULL; + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else + ret = 0; +#endif + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); + } - if ((ret = __db_pg_prepare_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_pg_prepare%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\n"); - __os_free(dbenv, argp); +#ifdef LOG_DIAGNOSTIC + if (ret != 0) + (void)__db_pg_new_print(dbenv, + (DBT *)&logrec, ret_lsnp, NULL, NULL); +#endif - return (0); +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else + if (is_durable || txnid == NULL) + __os_free(dbenv, logrec.data); +#endif + return (ret); } /* - * PUBLIC: int __db_pg_prepare_read __P((DB_ENV *, void *, - * PUBLIC: __db_pg_prepare_args **)); + * PUBLIC: int __db_pg_new_read __P((DB_ENV *, void *, __db_pg_new_args **)); */ int -__db_pg_prepare_read(dbenv, recbuf, argpp) +__db_pg_new_read(dbenv, recbuf, argpp) DB_ENV *dbenv; void *recbuf; - __db_pg_prepare_args **argpp; + __db_pg_new_args **argpp; { - __db_pg_prepare_args *argp; + __db_pg_new_args *argp; u_int32_t uinttmp; u_int8_t *bp; int ret; if ((ret = __os_malloc(dbenv, - sizeof(__db_pg_prepare_args) + sizeof(DB_TXN), &argp)) != 0) + sizeof(__db_pg_new_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -3474,57 +2603,80 @@ __db_pg_prepare_read(dbenv, recbuf, argpp) argp->pgno = (db_pgno_t)uinttmp; bp += sizeof(uinttmp); + memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn)); + bp += sizeof(argp->meta_lsn); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->meta_pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + + memset(&argp->header, 0, sizeof(argp->header)); + memcpy(&argp->header.size, bp, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + argp->header.data = bp; + bp += argp->header.size; + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->next = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + *argpp = argp; return (0); } /* - * PUBLIC: int __db_pg_new_log __P((DB *, DB_TXN *, DB_LSN *, - * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, - * PUBLIC: db_pgno_t)); + * PUBLIC: int __db_pg_init_log __P((DB *, DB_TXN *, DB_LSN *, + * PUBLIC: u_int32_t, db_pgno_t, const DBT *, const DBT *)); */ int -__db_pg_new_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, next) +__db_pg_init_log(dbp, txnid, ret_lsnp, flags, pgno, header, data) DB *dbp; DB_TXN *txnid; DB_LSN *ret_lsnp; u_int32_t flags; db_pgno_t pgno; - DB_LSN * meta_lsn; - db_pgno_t meta_pgno; const DBT *header; - db_pgno_t next; + const DBT *data; { DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; - rectype = DB___db_pg_new; + COMPQUIET(lr, NULL); + + rectype = DB___db_pg_init; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -3532,37 +2684,31 @@ __db_pg_new_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN) + sizeof(u_int32_t) + sizeof(u_int32_t) - + sizeof(*meta_lsn) - + sizeof(u_int32_t) + sizeof(u_int32_t) + (header == NULL ? 0 : header->size) - + sizeof(u_int32_t); + + sizeof(u_int32_t) + (data == NULL ? 0 : data->size); if (CRYPTO_ON(dbenv)) { npad = ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size); logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -3591,16 +2737,6 @@ do_malloc: memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); - if (meta_lsn != NULL) - memcpy(bp, meta_lsn, sizeof(*meta_lsn)); - else - memset(bp, 0, sizeof(*meta_lsn)); - bp += sizeof(*meta_lsn); - - uinttmp = (u_int32_t)meta_pgno; - memcpy(bp, &uinttmp, sizeof(uinttmp)); - bp += sizeof(uinttmp); - if (header == NULL) { zero = 0; memcpy(bp, &zero, sizeof(u_int32_t)); @@ -3612,160 +2748,80 @@ do_malloc: bp += header->size; } - uinttmp = (u_int32_t)next; - memcpy(bp, &uinttmp, sizeof(uinttmp)); - bp += sizeof(uinttmp); + if (data == NULL) { + zero = 0; + memcpy(bp, &zero, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + } else { + memcpy(bp, &data->size, sizeof(data->size)); + bp += sizeof(data->size); + memcpy(bp, data->data, data->size); + bp += data->size; + } DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) - (void)__db_pg_new_print(dbenv, + (void)__db_pg_init_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_pg_new_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_pg_new_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __db_pg_new_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__db_pg_new_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __db_pg_new_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __db_pg_new_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__db_pg_new%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tmeta_lsn: [%lu][%lu]\n", - (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\theader: "); - for (i = 0; i < argp->header.size; i++) { - ch = ((u_int8_t *)argp->header.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tnext: %lu\n", (u_long)argp->next); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* - * PUBLIC: int __db_pg_new_read __P((DB_ENV *, void *, __db_pg_new_args **)); + * PUBLIC: int __db_pg_init_read __P((DB_ENV *, void *, __db_pg_init_args **)); */ int -__db_pg_new_read(dbenv, recbuf, argpp) +__db_pg_init_read(dbenv, recbuf, argpp) DB_ENV *dbenv; void *recbuf; - __db_pg_new_args **argpp; + __db_pg_init_args **argpp; { - __db_pg_new_args *argp; + __db_pg_init_args *argp; u_int32_t uinttmp; u_int8_t *bp; int ret; if ((ret = __os_malloc(dbenv, - sizeof(__db_pg_new_args) + sizeof(DB_TXN), &argp)) != 0) + sizeof(__db_pg_init_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -3783,131 +2839,22 @@ __db_pg_new_read(dbenv, recbuf, argpp) argp->pgno = (db_pgno_t)uinttmp; bp += sizeof(uinttmp); - memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn)); - bp += sizeof(argp->meta_lsn); - - memcpy(&uinttmp, bp, sizeof(uinttmp)); - argp->meta_pgno = (db_pgno_t)uinttmp; - bp += sizeof(uinttmp); - memset(&argp->header, 0, sizeof(argp->header)); memcpy(&argp->header.size, bp, sizeof(u_int32_t)); bp += sizeof(u_int32_t); argp->header.data = bp; bp += argp->header.size; - memcpy(&uinttmp, bp, sizeof(uinttmp)); - argp->next = (db_pgno_t)uinttmp; - bp += sizeof(uinttmp); + memset(&argp->data, 0, sizeof(argp->data)); + memcpy(&argp->data.size, bp, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + argp->data.data = bp; + bp += argp->data.size; *argpp = argp; return (0); } -/* - * PUBLIC: int __db_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__db_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_addrem_print, DB___db_addrem)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_big_print, DB___db_big)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_ovref_print, DB___db_ovref)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_relink_print, DB___db_relink)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_debug_print, DB___db_debug)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_noop_print, DB___db_noop)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_alloc_print, DB___db_pg_alloc)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_free_print, DB___db_pg_free)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_cksum_print, DB___db_cksum)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_freedata_print, DB___db_pg_freedata)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_prepare_print, DB___db_pg_prepare)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_new_print, DB___db_pg_new)) != 0) - return (ret); - return (0); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __db_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__db_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_addrem_getpgnos, DB___db_addrem)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_big_getpgnos, DB___db_big)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_ovref_getpgnos, DB___db_ovref)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_relink_getpgnos, DB___db_relink)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_debug_getpgnos, DB___db_debug)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_noop_getpgnos, DB___db_noop)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_alloc_getpgnos, DB___db_pg_alloc)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_free_getpgnos, DB___db_pg_free)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_cksum_getpgnos, DB___db_cksum)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_freedata_getpgnos, DB___db_pg_freedata)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_prepare_getpgnos, DB___db_pg_prepare)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_pg_new_getpgnos, DB___db_pg_new)) != 0) - return (ret); - return (0); -} -#endif /* HAVE_REPLICATION */ - /* * PUBLIC: int __db_init_recover __P((DB_ENV *, int (***)(DB_ENV *, * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); @@ -3929,9 +2876,6 @@ __db_init_recover(dbenv, dtabp, dtabsizep) if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, __db_ovref_recover, DB___db_ovref)) != 0) return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __db_relink_recover, DB___db_relink)) != 0) - return (ret); if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, __db_debug_recover, DB___db_debug)) != 0) return (ret); @@ -3956,5 +2900,8 @@ __db_init_recover(dbenv, dtabp, dtabsizep) if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, __db_pg_new_recover, DB___db_pg_new)) != 0) return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_pg_init_recover, DB___db_pg_init)) != 0) + return (ret); return (0); } diff --git a/db/db/db_autop.c b/db/db/db_autop.c new file mode 100644 index 000000000..8f97a7cf5 --- /dev/null +++ b/db/db/db_autop.c @@ -0,0 +1,626 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_addrem_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_addrem_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_addrem_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_addrem%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\tnbytes: %lu\n", (u_long)argp->nbytes); + (void)printf("\thdr: "); + for (i = 0; i < argp->hdr.size; i++) { + ch = ((u_int8_t *)argp->hdr.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tdbt: "); + for (i = 0; i < argp->dbt.size; i++) { + ch = ((u_int8_t *)argp->dbt.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_big_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_big_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_big_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_big%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno); + (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno); + (void)printf("\tdbt: "); + for (i = 0; i < argp->dbt.size; i++) { + ch = ((u_int8_t *)argp->dbt.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\tprevlsn: [%lu][%lu]\n", + (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset); + (void)printf("\tnextlsn: [%lu][%lu]\n", + (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_ovref_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_ovref_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_ovref_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_ovref%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tadjust: %ld\n", (long)argp->adjust); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_debug_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_debug_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_debug_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_debug%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\top: "); + for (i = 0; i < argp->op.size; i++) { + ch = ((u_int8_t *)argp->op.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tkey: "); + for (i = 0; i < argp->key.size; i++) { + ch = ((u_int8_t *)argp->key.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tdata: "); + for (i = 0; i < argp->data.size; i++) { + ch = ((u_int8_t *)argp->data.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\targ_flags: %lu\n", (u_long)argp->arg_flags); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_noop_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_noop_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_noop_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_noop%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tprevlsn: [%lu][%lu]\n", + (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_pg_alloc_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_pg_alloc_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_pg_alloc_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_pg_alloc%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tmeta_lsn: [%lu][%lu]\n", + (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\tpage_lsn: [%lu][%lu]\n", + (u_long)argp->page_lsn.file, (u_long)argp->page_lsn.offset); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tptype: %lu\n", (u_long)argp->ptype); + (void)printf("\tnext: %lu\n", (u_long)argp->next); + (void)printf("\tlast_pgno: %lu\n", (u_long)argp->last_pgno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_pg_free_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_pg_free_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_pg_free_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_pg_free%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tmeta_lsn: [%lu][%lu]\n", + (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\theader: "); + for (i = 0; i < argp->header.size; i++) { + ch = ((u_int8_t *)argp->header.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tnext: %lu\n", (u_long)argp->next); + (void)printf("\tlast_pgno: %lu\n", (u_long)argp->last_pgno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_cksum_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_cksum_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_cksum_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_cksum_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_cksum%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_pg_freedata_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_pg_freedata_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_pg_freedata_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_pg_freedata_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_pg_freedata%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tmeta_lsn: [%lu][%lu]\n", + (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\theader: "); + for (i = 0; i < argp->header.size; i++) { + ch = ((u_int8_t *)argp->header.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tnext: %lu\n", (u_long)argp->next); + (void)printf("\tlast_pgno: %lu\n", (u_long)argp->last_pgno); + (void)printf("\tdata: "); + for (i = 0; i < argp->data.size; i++) { + ch = ((u_int8_t *)argp->data.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_pg_prepare_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_pg_prepare_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_pg_prepare_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_pg_prepare_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_pg_prepare%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_pg_new_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_pg_new_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_pg_new_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_pg_new_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_pg_new%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tmeta_lsn: [%lu][%lu]\n", + (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\theader: "); + for (i = 0; i < argp->header.size; i++) { + ch = ((u_int8_t *)argp->header.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tnext: %lu\n", (u_long)argp->next); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_pg_init_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__db_pg_init_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __db_pg_init_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __db_pg_init_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__db_pg_init%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\theader: "); + for (i = 0; i < argp->header.size; i++) { + ch = ((u_int8_t *)argp->header.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tdata: "); + for (i = 0; i < argp->data.size; i++) { + ch = ((u_int8_t *)argp->data.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __db_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__db_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_addrem_print, DB___db_addrem)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_big_print, DB___db_big)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_ovref_print, DB___db_ovref)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_debug_print, DB___db_debug)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_noop_print, DB___db_noop)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_pg_alloc_print, DB___db_pg_alloc)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_pg_free_print, DB___db_pg_free)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_cksum_print, DB___db_cksum)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_pg_freedata_print, DB___db_pg_freedata)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_pg_prepare_print, DB___db_pg_prepare)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_pg_new_print, DB___db_pg_new)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __db_pg_init_print, DB___db_pg_init)) != 0) + return (ret); + return (0); +} diff --git a/db/db/db_cam.c b/db/db/db_cam.c index a555b5ebd..075765072 100644 --- a/db/db/db_cam.c +++ b/db/db/db_cam.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_cam.c,v 11.156 2004/09/28 18:07:32 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_cam.c,v 11.140 2003/11/18 18:20:48 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -79,7 +77,7 @@ __db_c_close(dbc) /* * Remove the cursor(s) from the active queue. We may be closing two * cursors at once here, a top-level one and a lower-level, off-page - * duplicate one. The acess-method specific cursor close routine must + * duplicate one. The access-method specific cursor close routine must * close both of them in a single call. * * !!! @@ -114,11 +112,8 @@ __db_c_close(dbc) * and secondary update cursors, a cursor in a CDB * environment may not have a lock at all. */ - if (LOCK_ISSET(dbc->mylock)) { - if ((t_ret = __lock_put( - dbenv, &dbc->mylock)) != 0 && ret == 0) - ret = t_ret; - } + if ((t_ret = __LPUT(dbc, dbc->mylock)) != 0 && ret == 0) + ret = t_ret; /* For safety's sake, since this is going on the free queue. */ memset(&dbc->mylock, 0, sizeof(dbc->mylock)); @@ -248,7 +243,7 @@ __db_c_del(dbc, flags) { DB *dbp; DBC *opd; - int ret; + int ret, t_ret; dbp = dbc->dbp; @@ -296,6 +291,22 @@ __db_c_del(dbc, flags) if ((ret = dbc->c_am_writelock(dbc)) == 0) ret = opd->c_am_del(opd); + /* + * If this was an update that is supporting dirty reads + * then we may have just swapped our read for a write lock + * which is held by the surviving cursor. We need + * to explicitly downgrade this lock. The closed cursor + * may only have had a read lock. + */ + if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && + dbc->internal->lock_mode == DB_LOCK_WRITE) { + if ((t_ret = + __TLPUT(dbc, dbc->internal->lock)) != 0 && ret == 0) + ret = t_ret; + if (t_ret == 0) + dbc->internal->lock_mode = DB_LOCK_WWRITE; + } + done: CDB_LOCKING_DONE(dbp, dbc); return (ret); @@ -367,7 +378,7 @@ __db_c_idup(dbc_orig, dbcp, flags) dbc_orig->locker, &dbc_n)) != 0) return (ret); - /* If the user wants the cursor positioned, do it here. */ + /* Position the cursor if requested, acquiring the necessary locks. */ if (flags == DB_POSITION) { int_n = dbc_n->internal; int_orig = dbc_orig->internal; @@ -401,9 +412,9 @@ __db_c_idup(dbc_orig, dbcp, flags) } } - /* Copy the dirty read flag to the new cursor. */ - F_SET(dbc_n, F_ISSET(dbc_orig, DBC_DIRTY_READ)); - F_SET(dbc_n, F_ISSET(dbc_orig, DBC_WRITECURSOR)); + /* Copy the locking flags to the new cursor. */ + F_SET(dbc_n, + F_ISSET(dbc_orig, DBC_WRITECURSOR | DBC_DIRTY_READ | DBC_DEGREE_2)); /* * If we're in CDB and this isn't an offpage dup cursor, then @@ -756,7 +767,8 @@ done: /* * get set up unless there is an error. Assume success * here. This is the only call to c_am_bulk, and it avoids * setting it exactly the same everywhere. If we have an - * ENOMEM error, it'll get overwritten with the needed value. + * DB_BUFFER_SMALL error, it'll get overwritten with the + * needed value. */ data->size = data->ulen; ret = dbc_n->c_am_bulk(dbc_n, data, flags | multi); @@ -774,6 +786,19 @@ err: /* Don't pass DB_DBT_ISSET back to application level, error or no. */ /* Cleanup and cursor resolution. */ if (opd != NULL) { + /* + * To support dirty reads we must reget the write lock + * if we have just stepped off a deleted record. + * Since the OPD cursor does not know anything + * about the referencing page or cursor we need + * to peek at the OPD cursor and get the lock here. + */ + if (F_ISSET(dbc_arg->dbp, DB_AM_DIRTY) && + F_ISSET((BTREE_CURSOR *) + dbc_arg->internal->opd->internal, C_DELETED)) + if ((t_ret = + dbc_arg->c_am_writelock(dbc_arg)) != 0 && ret != 0) + ret = t_ret; if ((t_ret = __db_c_cleanup( dbc_arg->internal->opd, opd, ret)) != 0 && ret == 0) ret = t_ret; @@ -926,14 +951,6 @@ __db_c_put(dbc_arg, key, data, flags) */ rmw = STD_LOCKING(dbc_arg) ? DB_RMW : 0; - /* - * Set pkey so we can use &pkey everywhere instead of key. - * If DB_CURRENT is set and there is a key at the current - * location, pkey will be overwritten before it's used. - */ - pkey.data = key->data; - pkey.size = key->size; - if (flags == DB_CURRENT) { /* Step 1. */ /* * This is safe to do on the cursor we already have; @@ -943,19 +960,25 @@ __db_c_put(dbc_arg, key, data, flags) * writing soon enough in the "normal" put code. In * transactional databases we'll hold those write locks * even if we close the cursor we're reading with. + * + * The DB_KEYEMPTY return needs special handling -- if the + * cursor is on a deleted key, we return DB_NOTFOUND. */ ret = __db_c_get(dbc_arg, &pkey, &olddata, rmw | DB_CURRENT); - if (ret == DB_KEYEMPTY) { - nodel = 1; /* - * We know we don't need a delete - * in the secondary. - */ - have_oldrec = 1; /* We've looked for the old record. */ - ret = 0; - } else if (ret != 0) + if (ret == DB_KEYEMPTY) + ret = DB_NOTFOUND; + if (ret != 0) goto err; - else - have_oldrec = 1; + + have_oldrec = 1; /* We've looked for the old record. */ + } else { + /* + * Set pkey so we can use &pkey everywhere instead of key. + * If DB_CURRENT is set and there is a key at the current + * location, pkey will be overwritten before it's used. + */ + pkey.data = key->data; + pkey.size = key->size; } /* @@ -1144,10 +1167,10 @@ __db_c_put(dbc_arg, key, data, flags) goto skipput; } else if (!F_ISSET(sdbp, DB_AM_DUPSORT)) { /* Case 2. */ - memset(&tempskey, 0, sizeof (DBT)); + memset(&tempskey, 0, sizeof(DBT)); tempskey.data = skey.data; tempskey.size = skey.size; - memset(&temppkey, 0, sizeof (DBT)); + memset(&temppkey, 0, sizeof(DBT)); temppkey.data = pkey.data; temppkey.size = pkey.size; ret = __db_c_get(sdbc, &tempskey, &temppkey, @@ -1500,6 +1523,22 @@ __db_c_cleanup(dbc, dbc_n, failed) if ((t_ret = __db_c_close(dbc_n)) != 0 && ret == 0) ret = t_ret; + /* + * If this was an update that is supporting dirty reads + * then we may have just swapped our read for a write lock + * which is held by the surviving cursor. We need + * to explicitly downgrade this lock. The closed cursor + * may only have had a read lock. + */ + if (F_ISSET(dbp, DB_AM_DIRTY) && + dbc->internal->lock_mode == DB_LOCK_WRITE) { + if ((t_ret = + __TLPUT(dbc, dbc->internal->lock)) != 0 && ret == 0) + ret = t_ret; + if (t_ret == 0) + dbc->internal->lock_mode = DB_LOCK_WWRITE; + } + return (ret); } @@ -1892,7 +1931,7 @@ __db_c_del_secondary(dbc) else if (ret == DB_NOTFOUND) ret = __db_secondary_corrupt(pdbp); - if ((t_ret = __db_c_close(pdbc)) != 0 && ret != 0) + if ((t_ret = __db_c_close(pdbc)) != 0 && ret == 0) ret = t_ret; return (ret); diff --git a/db/db/db_conv.c b/db/db/db_conv.c index 55d07e12a..b4c5c9a29 100644 --- a/db/db/db_conv.c +++ b/db/db/db_conv.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -35,14 +35,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: db_conv.c,v 11.45 2004/01/28 03:35:57 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_conv.c,v 11.43 2003/09/23 16:15:00 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -142,7 +140,7 @@ __db_pgin(dbenv, pg, pp, cookie) * it's fatal: panic the system. */ if (F_ISSET(dbp, DB_AM_CHKSUM) && sum_len != 0) { - if (F_ISSET(dbp, DB_AM_SWAP) && is_hmac == 0) + if (F_ISSET(dbp, DB_AM_SWAP) && is_hmac == 0) P_32_SWAP(chksum); switch (ret = __db_check_chksum( dbenv, db_cipher, chksum, pp, sum_len, is_hmac)) { diff --git a/db/db/db_dispatch.c b/db/db/db_dispatch.c index a952eb43a..46547a1f8 100644 --- a/db/db/db_dispatch.c +++ b/db/db/db_dispatch.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -34,18 +34,15 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: db_dispatch.c,v 11.167 2004/09/24 00:43:14 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_dispatch.c,v 11.145 2003/09/10 20:31:18 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include -#include #include #include #endif @@ -54,12 +51,12 @@ static const char revid[] = "$Id: db_dispatch.c,v 11.145 2003/09/10 20:31:18 ube #include "dbinc/db_page.h" #include "dbinc/db_shash.h" #include "dbinc/hash.h" -#include "dbinc/lock.h" #include "dbinc/log.h" #include "dbinc/mp.h" #include "dbinc/fop.h" #include "dbinc/txn.h" +#ifndef HAVE_FTRUNCATE static int __db_limbo_fix __P((DB *, DB_TXN *, DB_TXNLIST *, db_pgno_t *, DBMETA *, db_limbo_state)); static int __db_limbo_bucket __P((DB_ENV *, @@ -68,10 +65,12 @@ static int __db_limbo_move __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNLIST *)); static int __db_limbo_prepare __P(( DB *, DB_TXN *, DB_TXNLIST *)); static int __db_lock_move __P((DB_ENV *, u_int8_t *, db_pgno_t, db_lockmode_t, DB_TXN *, DB_TXN *)); -static int __db_txnlist_find_internal __P((DB_ENV *, void *, db_txnlist_type, - u_int32_t, u_int8_t [DB_FILE_ID_LEN], DB_TXNLIST **, int)); static int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *, int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t)); +#endif +static int __db_txnlist_find_internal __P((DB_ENV *, + void *, db_txnlist_type, u_int32_t, u_int8_t[DB_FILE_ID_LEN], + DB_TXNLIST **, int, u_int32_t *)); /* * __db_dispatch -- @@ -97,7 +96,7 @@ __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info) void *info; { DB_LSN prev_lsn; - u_int32_t rectype, txnid; + u_int32_t rectype, status, txnid; int make_call, ret; memcpy(&rectype, db->data, sizeof(rectype)); @@ -142,51 +141,82 @@ __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info) break; case DB_TXN_BACKWARD_ROLL: /* - * Running full recovery in the backward pass. If we've - * seen this txnid before and added to it our commit list, - * then we do nothing during this pass, unless this is a child - * commit record, in which case we need to process it. If - * we've never seen it, then we call the appropriate recovery - * routine. - * - * We need to always undo DB___db_noop records, so that we - * properly handle any aborts before the file was closed. + * Running full recovery in the backward pass. In general, + * we only process records during this pass that belong + * to aborted transactions. Unfortunately, there are several + * exceptions: + * 1. If this is a meta-record, one not associated with + * a transaction, then we must always process it. + * 2. If this is a transaction commit/abort, we must + * always process it, so that we know the status of + * every transaction. + * 3. If this is a child commit, we need to process it + * because the outcome of the child transaction depends + * on the outcome of the parent. + * 4. If this is a dbreg_register record, we must always + * process is because they contain non-transactional + * closes that must be properly handled. + * 5. If this is a noop, we must always undo it so that we + * properly handle any aborts before a file was closed. + * 6. If this a file remove, we need to process it to + * determine if the on-disk file is the same as the + * one being described. */ switch (rectype) { + /* + * These either do not belong to a transaction or (regop) + * must be processed regardless of the status of the + * transaction. + */ case DB___txn_regop: case DB___txn_recycle: case DB___txn_ckp: + make_call = 1; + break; + /* + * These belong to a transaction whose status must be + * checked. + */ + case DB___txn_child: case DB___db_noop: case DB___fop_file_remove: - case DB___txn_child: + case DB___dbreg_register: make_call = 1; - break; - case DB___dbreg_register: - if (txnid == 0) { - make_call = 1; - break; - } /* FALLTHROUGH */ default: - if (txnid != 0 && (ret = - __db_txnlist_find(dbenv, - info, txnid)) != TXN_COMMIT && ret != TXN_IGNORE) { - /* - * If not found then, this is an incomplete - * abort. - */ - if (ret == TXN_NOTFOUND) - return (__db_txnlist_add(dbenv, - info, txnid, TXN_IGNORE, lsnp)); - make_call = 1; - if (ret == TXN_OK && - (ret = __db_txnlist_update(dbenv, - info, txnid, - rectype == DB___txn_xa_regop ? - TXN_PREPARE : TXN_ABORT, NULL)) != 0) - return (ret); + if (txnid == 0) + break; + + ret = __db_txnlist_find(dbenv, info, txnid, &status); + + /* If not found, this is an incomplete abort. */ + if (ret == DB_NOTFOUND) + return (__db_txnlist_add(dbenv, + info, txnid, TXN_IGNORE, lsnp)); + if (ret != 0) + return (ret); + + /* + * If we ignore the transaction, ignore the operation + * UNLESS this is a child commit in which case we need + * to make sure that the child also gets marked as + * ignore. + */ + if (status == TXN_IGNORE && rectype != DB___txn_child) { + make_call = 0; + break; } + if (status == TXN_COMMIT) + break; + + /* Set make_call in case we came through default */ + make_call = 1; + if (status == TXN_OK && + (ret = __db_txnlist_update(dbenv, + info, txnid, rectype == DB___txn_xa_regop ? + TXN_PREPARE : TXN_ABORT, NULL, &status, 0)) != 0) + return (ret); } break; case DB_TXN_FORWARD_ROLL: @@ -205,15 +235,28 @@ __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info) break; default: - if (txnid != 0 && (ret = __db_txnlist_find(dbenv, - info, txnid)) == TXN_COMMIT) - make_call = 1; - else if (ret != TXN_IGNORE && + if (txnid != 0) { + ret = __db_txnlist_find(dbenv, + info, txnid, &status); + + if (ret == DB_NOTFOUND) + /* Break out out of if clause. */ + ; + else if (ret != 0) + return (ret); + else if (status == TXN_COMMIT) { + make_call = 1; + break; + } + } + +#ifndef HAVE_FTRUNCATE + if (status != TXN_IGNORE && (rectype == DB___ham_metagroup || rectype == DB___ham_groupalloc || rectype == DB___db_pg_alloc)) { /* - * Because we cannot undo file extensions + * Because we do not have truncate * all allocation records must be reprocessed * during rollforward in case the file was * just created. It may not have been @@ -221,7 +264,9 @@ __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info) */ make_call = 1; redo = DB_TXN_BACKWARD_ALLOC; - } else if (rectype == DB___dbreg_register) { + } else +#endif + if (rectype == DB___dbreg_register) { /* * This may be a transaction dbreg_register. * If it is, we only make the call on a COMMIT, @@ -234,47 +279,12 @@ __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info) } } break; - case DB_TXN_GETPGNOS: - /* - * If this is one of DB's own log records, we simply - * dispatch. - */ - if (rectype < DB_user_BEGIN) { - make_call = 1; - break; - } - - /* - * If we're still here, this is a custom record in an - * application that's doing app-specific logging. Such a - * record doesn't have a getpgno function for the user - * dispatch function to call--the getpgnos functions return - * which pages replication needs to lock using the TXN_RECS - * structure, which is private and not something we want to - * document. - * - * Thus, we leave any necessary locking for the app's - * recovery function to do during the upcoming - * DB_TXN_APPLY. Fill in default getpgnos info (we need - * a stub entry for every log record that will get - * DB_TXN_APPLY'd) and return success. - */ - return (__db_default_getpgnos(dbenv, lsnp, info)); case DB_TXN_BACKWARD_ALLOC: default: return (__db_unknown_flag( dbenv, "__db_dispatch", (u_int32_t)redo)); } - /* - * The switch statement uses ret to receive the return value of - * __db_txnlist_find, which returns a large number of different - * statuses, none of which we will be returning. For safety, - * let's reset this here in case we ever do a "return(ret)" - * below in the future. - */ - ret = 0; - if (make_call) { /* * If the debug flag is set then we are logging @@ -426,14 +436,13 @@ __db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp) * Add an element to our transaction linked list. * * PUBLIC: int __db_txnlist_add __P((DB_ENV *, - * PUBLIC: void *, u_int32_t, int32_t, DB_LSN *)); + * PUBLIC: void *, u_int32_t, u_int32_t, DB_LSN *)); */ int __db_txnlist_add(dbenv, listp, txnid, status, lsn) DB_ENV *dbenv; void *listp; - u_int32_t txnid; - int32_t status; + u_int32_t txnid, status; DB_LSN *lsn; { DB_TXNHEAD *hp; @@ -474,10 +483,10 @@ __db_txnlist_remove(dbenv, listp, txnid) u_int32_t txnid; { DB_TXNLIST *entry; + u_int32_t status; return (__db_txnlist_find_internal(dbenv, - listp, TXNLIST_TXNID, txnid, - NULL, &entry, 1) == TXN_NOTFOUND ? TXN_NOTFOUND : TXN_OK); + listp, TXNLIST_TXNID, txnid, NULL, &entry, 1, &status)); } /* @@ -553,55 +562,67 @@ __db_txnlist_end(dbenv, listp) /* * __db_txnlist_find -- * Checks to see if a txnid with the current generation is in the - * txnid list. This returns TXN_NOTFOUND if the item isn't in the + * txnid list. This returns DB_NOTFOUND if the item isn't in the * list otherwise it returns (like __db_txnlist_find_internal) * the status of the transaction. A txnid of 0 means the record * was generated while not in a transaction. * - * PUBLIC: int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t)); + * PUBLIC: int __db_txnlist_find __P((DB_ENV *, + * PUBLIC: void *, u_int32_t, u_int32_t *)); */ int -__db_txnlist_find(dbenv, listp, txnid) +__db_txnlist_find(dbenv, listp, txnid, statusp) DB_ENV *dbenv; void *listp; - u_int32_t txnid; + u_int32_t txnid, *statusp; { DB_TXNLIST *entry; if (txnid == 0) - return (TXN_NOTFOUND); + return (DB_NOTFOUND); + return (__db_txnlist_find_internal(dbenv, listp, - TXNLIST_TXNID, txnid, NULL, &entry, 0)); + TXNLIST_TXNID, txnid, NULL, &entry, 0, statusp)); } /* * __db_txnlist_update -- * Change the status of an existing transaction entry. - * Returns TXN_NOTFOUND if no such entry exists. + * Returns DB_NOTFOUND if no such entry exists. * * PUBLIC: int __db_txnlist_update __P((DB_ENV *, - * PUBLIC: void *, u_int32_t, int32_t, DB_LSN *)); + * PUBLIC: void *, u_int32_t, u_int32_t, DB_LSN *, u_int32_t *, int)); */ int -__db_txnlist_update(dbenv, listp, txnid, status, lsn) +__db_txnlist_update(dbenv, listp, txnid, status, lsn, ret_status, add_ok) DB_ENV *dbenv; void *listp; - u_int32_t txnid; - int32_t status; + u_int32_t txnid, status; DB_LSN *lsn; + u_int32_t *ret_status; + int add_ok; { DB_TXNHEAD *hp; DB_TXNLIST *elp; int ret; if (txnid == 0) - return (TXN_NOTFOUND); + return (DB_NOTFOUND); + hp = (DB_TXNHEAD *)listp; ret = __db_txnlist_find_internal(dbenv, - listp, TXNLIST_TXNID, txnid, NULL, &elp, 0); + listp, TXNLIST_TXNID, txnid, NULL, &elp, 0, ret_status); - if (ret == TXN_NOTFOUND || ret == TXN_IGNORE) + if (ret == DB_NOTFOUND && add_ok) { + *ret_status = status; + return (__db_txnlist_add(dbenv, listp, txnid, status, lsn)); + } + if (ret != 0) return (ret); + + if (*ret_status == TXN_IGNORE) + return (0); + elp->u.t.status = status; if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT) @@ -613,12 +634,13 @@ __db_txnlist_update(dbenv, listp, txnid, status, lsn) /* * __db_txnlist_find_internal -- * Find an entry on the transaction list. If the entry is not there or - * the list pointer is not initialized we return TXN_NOTFOUND. If the + * the list pointer is not initialized we return DB_NOTFOUND. If the * item is found, we return the status. Currently we always call this * with an initialized list pointer but checking for NULL keeps it general. */ static int -__db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete) +__db_txnlist_find_internal(dbenv, + listp, type, txnid, uid, txnlistp, delete, statusp) DB_ENV *dbenv; void *listp; db_txnlist_type type; @@ -626,6 +648,7 @@ __db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete) u_int8_t uid[DB_FILE_ID_LEN]; DB_TXNLIST **txnlistp; int delete; + u_int32_t *statusp; { struct __db_headlink *head; DB_TXNHEAD *hp; @@ -633,8 +656,10 @@ __db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete) u_int32_t generation, hash, i; int ret; + ret = 0; + if ((hp = (DB_TXNHEAD *)listp) == NULL) - return (TXN_NOTFOUND); + return (DB_NOTFOUND); switch (type) { case TXNLIST_TXNID: @@ -659,8 +684,7 @@ __db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete) case TXNLIST_DELETE: case TXNLIST_LSN: default: - DB_ASSERT(0); - return (EINVAL); + return (__db_panic(dbenv, EINVAL)); } head = &hp->head[DB_TXNLIST_MASK(hp, hash)]; @@ -673,20 +697,17 @@ __db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete) if (p->u.t.txnid != txnid || generation != p->u.t.generation) continue; - ret = p->u.t.status; + *statusp = p->u.t.status; break; case TXNLIST_PGNO: if (memcmp(uid, p->u.p.uid, DB_FILE_ID_LEN) != 0) continue; - - ret = 0; break; case TXNLIST_DELETE: case TXNLIST_LSN: default: - DB_ASSERT(0); - ret = EINVAL; + return (__db_panic(dbenv, EINVAL)); } if (delete == 1) { LIST_REMOVE(p, links); @@ -700,7 +721,7 @@ __db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete) return (ret); } - return (TXN_NOTFOUND); + return (DB_NOTFOUND); } /* @@ -726,7 +747,7 @@ __db_txnlist_gen(dbenv, listp, incr, min, max) * whenever we take a checkpoint and there are no outstanding * transactions. When that happens, we can reset transaction IDs * back to TXNID_MINIMUM. Currently we only do the reset - * at then end of recovery. Recycle records occrur when txnids + * at then end of recovery. Recycle records occur when txnids * are exhausted during runtime. A free range of ids is identified * and logged. This code maintains a stack of ranges. A txnid * is given the generation number of the first range it falls into @@ -858,12 +879,15 @@ err: __db_txnlist_end(dbenv, hp); return (ret); } +#ifndef HAVE_FTRUNCATE /* * __db_add_limbo -- add pages to the limbo list. * Get the file information and call pgnoadd for each page. * + * PUBLIC: #ifndef HAVE_FTRUNCATE * PUBLIC: int __db_add_limbo __P((DB_ENV *, * PUBLIC: void *, int32_t, db_pgno_t, int32_t)); + * PUBLIC: #endif */ int __db_add_limbo(dbenv, info, fileid, pgno, count) @@ -884,7 +908,7 @@ __db_add_limbo(dbenv, info, fileid, pgno, count) do { if ((ret = __db_txnlist_pgnoadd(dbenv, info, fileid, fnp->ufid, - R_ADDR(&dblp->reginfo, fnp->name_off), pgno)) != 0) + R_ADDR(dbenv, &dblp->reginfo, fnp->name_off), pgno)) != 0) return (ret); pgno++; } while (--count != 0); @@ -904,7 +928,7 @@ __db_add_limbo(dbenv, info, fileid, pgno, count) * the specific modifications to the free list. * * If we run out of log space during an abort, then we can't write the - * compensating transaction, so we abandon the idea of a compenating + * compensating transaction, so we abandon the idea of a compensating * transaction, and go back to processing how we do during recovery. * The reason that this is not the norm is that it's expensive: it requires * that we flush any database with an in-question allocation. Thus if @@ -924,8 +948,10 @@ __db_add_limbo(dbenv, info, fileid, pgno, count) * "create list and write meta-data page" algorithm. Otherwise, we're in * an abort and doing the "use compensating transaction" algorithm. * + * PUBLIC: #ifndef HAVE_FTRUNCATE * PUBLIC: int __db_do_the_limbo __P((DB_ENV *, * PUBLIC: DB_TXN *, DB_TXN *, DB_TXNHEAD *, db_limbo_state)); + * PUBLIC: #endif */ int __db_do_the_limbo(dbenv, ptxn, txn, hp, state) @@ -1044,6 +1070,7 @@ __db_limbo_bucket(dbenv, txn, elp, state) DB_MPOOLFILE *mpf; DBMETA *meta; DB_TXN *ctxn, *t; + FNAME *fname; db_pgno_t last_pgno, pgno; int dbp_created, in_retry, ret, t_ret; @@ -1077,6 +1104,18 @@ retry: dbp_created = 0; /* First try to get a dbp by fileid. */ ret = __dbreg_id_to_db(dbenv, t, &dbp, elp->u.p.fileid, 0); + /* + * If the file was closed and reopened its id could change. + * Look it up the hard way. + */ + if (ret == DB_DELETED || ret == ENOENT || + ((ret == 0 && + memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) != 0))) { + if ((ret = __dbreg_fid_to_fname( + dbenv->lg_handle, elp->u.p.uid, 0, &fname)) == 0) + ret = __dbreg_id_to_db( + dbenv, t, &dbp, fname->id, 0); + } /* * File is being destroyed. No need to worry about * dealing with recovery of allocations. @@ -1138,6 +1177,9 @@ retry: dbp_created = 0; if (ret == DB_RUNRECOVERY || ctxn == NULL) goto err; in_retry = 1; + if ((ret = __txn_abort(ctxn)) != 0) + goto err; + ctxn = NULL; goto retry; } @@ -1186,15 +1228,33 @@ retry: dbp_created = 0; if ((ret = __memp_fput(mpf, meta, 0)) != 0) goto err; meta = NULL; - if ((ret = __db_sync(dbp)) != 0) - goto err; - pgno = PGNO_BASE_MD; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err; - meta->free = last_pgno; - if ((ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY)) != 0) - goto err; - meta = NULL; + /* + * If the sync fails then we cannot flush the + * newly allocated pages. That is, the file + * cannot be extended. Don't let the metapage + * point at them. + * We may lose these pages from the file if it + * can be extended later. If there is never + * space for the pages, then things will be ok. + */ + if ((ret = __db_sync(dbp)) == 0) { + pgno = PGNO_BASE_MD; + if ((ret = + __memp_fget(mpf, &pgno, 0, &meta)) != 0) + goto err; + meta->free = last_pgno; + if ((ret = __memp_fput(mpf, + meta, DB_MPOOL_DIRTY)) != 0) + goto err; + meta = NULL; + } else { + __db_err(dbenv, + "%s: %s", dbp->fname, db_strerror(ret)); + __db_err(dbenv, "%s: %s %s", dbp->fname, + "allocation flush failed, some free pages", + "may not appear in the free list"); + ret = 0; + } } next: @@ -1245,7 +1305,7 @@ __db_limbo_fix(dbp, ctxn, elp, lastp, meta, state) PAGE *freep, *pagep; db_pgno_t next, pgno; u_int32_t i; - int put_page, ret, t_ret; + int ret, t_ret; /* * Loop through the entries for this txnlist element and @@ -1255,7 +1315,7 @@ __db_limbo_fix(dbp, ctxn, elp, lastp, meta, state) dbc = NULL; mpf = dbp->mpf; pagep = NULL; - put_page = ret = 0; + ret = 0; for (i = 0; i < elp->u.p.nentries; i++) { pgno = elp->u.p.pgno_array[i]; @@ -1269,7 +1329,6 @@ __db_limbo_fix(dbp, ctxn, elp, lastp, meta, state) goto err; continue; } - put_page = 1; if (state == LIMBO_COMPENSATE || IS_ZERO_LSN(LSN(pagep))) { if (ctxn == NULL) { @@ -1299,9 +1358,9 @@ __db_limbo_fix(dbp, ctxn, elp, lastp, meta, state) } } else if (state == LIMBO_COMPENSATE) { /* - * Generate a log record for what we did - * on the LIMBO_TIMESTAMP pass. All pages - * here are free so P_OVERHEAD is sufficent. + * Generate a log record for what we did on the + * LIMBO_TIMESTAMP pass. All pages here are + * free so P_OVERHEAD is sufficient. */ ZERO_LSN(pagep->lsn); memset(&ldbt, 0, sizeof(ldbt)); @@ -1324,7 +1383,8 @@ __db_limbo_fix(dbp, ctxn, elp, lastp, meta, state) */ F_SET(dbc, DBC_COMPENSATE); ret = __db_free(dbc, pagep); - put_page = 0; + pagep = NULL; + /* * On any error, we hope that the error was * caused due to running out of space, and we @@ -1345,15 +1405,15 @@ __db_limbo_fix(dbp, ctxn, elp, lastp, meta, state) else elp->u.p.pgno_array[i] = PGNO_INVALID; - if (put_page == 1) { + if (pagep != NULL) { ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY); - put_page = 0; + pagep = NULL; } if (ret != 0) goto err; } -err: if (put_page && +err: if (pagep != NULL && (t_ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) @@ -1423,20 +1483,22 @@ __db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno) { DB_TXNLIST *elp; size_t len; - u_int32_t hash; + u_int32_t hash, status; int ret; elp = NULL; - if (__db_txnlist_find_internal(dbenv, hp, - TXNLIST_PGNO, 0, uid, &elp, 0) != 0) { + if ((ret = __db_txnlist_find_internal(dbenv, hp, + TXNLIST_PGNO, 0, uid, &elp, 0, &status)) != 0 && ret != DB_NOTFOUND) + goto err; + + if (ret == DB_NOTFOUND || status != TXN_OK) { if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0) goto err; memcpy(&hash, uid, sizeof(hash)); LIST_INSERT_HEAD( &hp->head[DB_TXNLIST_MASK(hp, hash)], elp, links); - elp->u.p.fileid = fileid; memcpy(elp->u.p.uid, uid, DB_FILE_ID_LEN); len = strlen(fname) + 1; @@ -1460,44 +1522,12 @@ __db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno) } elp->u.p.pgno_array[elp->u.p.nentries++] = pgno; + /* Update to the latest fileid. Limbo will find it faster. */ + elp->u.p.fileid = fileid; return (0); -err: __db_txnlist_end(dbenv, hp); - return (ret); -} - -#ifdef HAVE_REPLICATION -/* - * __db_default_getpgnos -- - * Fill in default getpgnos information for an application-specific - * log record. - * - * PUBLIC: int __db_default_getpgnos __P((DB_ENV *, DB_LSN *lsnp, void *)); - */ -int -__db_default_getpgnos(dbenv, lsnp, summary) - DB_ENV *dbenv; - DB_LSN *lsnp; - void *summary; -{ - TXN_RECS *t; - int ret; - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); +err: return (ret); } #endif @@ -1541,9 +1571,6 @@ __db_txnlist_print(listp) case TXN_ABORT: txntype = "abort"; break; - case TXN_NOTFOUND: - txntype = "notfound"; - break; case TXN_IGNORE: txntype = "ignore"; break; diff --git a/db/db/db_dup.c b/db/db/db_dup.c index 9ea8a7e23..725e81cce 100644 --- a/db/db/db_dup.c +++ b/db/db/db_dup.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_dup.c,v 11.39 2004/02/18 21:34:37 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_dup.c,v 11.36 2003/06/30 17:19:44 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -20,7 +18,6 @@ static const char revid[] = "$Id: db_dup.c,v 11.36 2003/06/30 17:19:44 bostic Ex #include "db_int.h" #include "dbinc/db_page.h" #include "dbinc/db_shash.h" -#include "dbinc/lock.h" #include "dbinc/mp.h" #include "dbinc/db_am.h" @@ -165,118 +162,3 @@ __db_pitem(dbc, pagep, indx, nbytes, hdr, data) return (0); } - -/* - * __db_relink -- - * Relink around a deleted page. - * - * PUBLIC: int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int)); - */ -int -__db_relink(dbc, add_rem, pagep, new_next, needlock) - DBC *dbc; - u_int32_t add_rem; - PAGE *pagep, **new_next; - int needlock; -{ - DB *dbp; - PAGE *np, *pp; - DB_LOCK npl, ppl; - DB_LSN *nlsnp, *plsnp, ret_lsn; - DB_MPOOLFILE *mpf; - int ret; - - dbp = dbc->dbp; - np = pp = NULL; - LOCK_INIT(npl); - LOCK_INIT(ppl); - nlsnp = plsnp = NULL; - mpf = dbp->mpf; - ret = 0; - - /* - * Retrieve and lock the one/two pages. For a remove, we may need - * two pages (the before and after). For an add, we only need one - * because, the split took care of the prev. - */ - if (pagep->next_pgno != PGNO_INVALID) { - if (needlock && (ret = __db_lget(dbc, - 0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pagep->next_pgno, 0, &np)) != 0) { - ret = __db_pgerr(dbp, pagep->next_pgno, ret); - goto err; - } - nlsnp = &np->lsn; - } - if (add_rem == DB_REM_PAGE && pagep->prev_pgno != PGNO_INVALID) { - if (needlock && (ret = __db_lget(dbc, - 0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pagep->prev_pgno, 0, &pp)) != 0) { - ret = __db_pgerr(dbp, pagep->prev_pgno, ret); - goto err; - } - plsnp = &pp->lsn; - } - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - if ((ret = __db_relink_log(dbp, dbc->txn, &ret_lsn, 0, add_rem, - pagep->pgno, &pagep->lsn, pagep->prev_pgno, plsnp, - pagep->next_pgno, nlsnp)) != 0) - goto err; - } else - LSN_NOT_LOGGED(ret_lsn); - if (np != NULL) - np->lsn = ret_lsn; - if (pp != NULL) - pp->lsn = ret_lsn; - if (add_rem == DB_REM_PAGE) - pagep->lsn = ret_lsn; - - /* - * Modify and release the two pages. - * - * !!! - * The parameter new_next gets set to the page following the page we - * are removing. If there is no following page, then new_next gets - * set to NULL. - */ - if (np != NULL) { - if (add_rem == DB_ADD_PAGE) - np->prev_pgno = pagep->pgno; - else - np->prev_pgno = pagep->prev_pgno; - if (new_next == NULL) - ret = __memp_fput(mpf, np, DB_MPOOL_DIRTY); - else { - *new_next = np; - ret = __memp_fset(mpf, np, DB_MPOOL_DIRTY); - } - if (ret != 0) - goto err; - if (needlock) - (void)__TLPUT(dbc, npl); - } else if (new_next != NULL) - *new_next = NULL; - - if (pp != NULL) { - pp->next_pgno = pagep->next_pgno; - if ((ret = __memp_fput(mpf, pp, DB_MPOOL_DIRTY)) != 0) - goto err; - if (needlock) - (void)__TLPUT(dbc, ppl); - } - return (0); - -err: if (np != NULL) - (void)__memp_fput(mpf, np, 0); - if (needlock) - (void)__TLPUT(dbc, npl); - if (pp != NULL) - (void)__memp_fput(mpf, pp, 0); - if (needlock) - (void)__TLPUT(dbc, ppl); - return (ret); -} diff --git a/db/db/db_iface.c b/db/db/db_iface.c index 3a4e6792e..7be20ede1 100644 --- a/db/db/db_iface.c +++ b/db/db/db_iface.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_iface.c,v 11.121 2004/10/07 17:33:32 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_iface.c,v 11.106 2003/10/02 02:57:46 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -21,8 +19,8 @@ static const char revid[] = "$Id: db_iface.c,v 11.106 2003/10/02 02:57:46 margo #include "dbinc/db_page.h" #include "dbinc/db_shash.h" #include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/qam.h" +#include "dbinc/hash.h" /* For __db_no_hash_am(). */ +#include "dbinc/qam.h" /* For __db_no_queue_am(). */ #include "dbinc/lock.h" #include "dbinc/log.h" #include "dbinc/mp.h" @@ -42,7 +40,6 @@ static int __db_open_arg __P((DB *, static int __db_pget_arg __P((DB *, DBT *, u_int32_t)); static int __db_put_arg __P((DB *, DBT *, DBT *, u_int32_t)); static int __db_rdonly __P((const DB_ENV *, const char *)); -static int __db_stat_arg __P((DB *, u_int32_t)); static int __dbt_ferr __P((const DB *, const char *, const DBT *, int)); /* @@ -53,7 +50,6 @@ static int __dbt_ferr __P((const DB *, const char *, const DBT *, int)); #define IS_READONLY(dbp) \ (F_ISSET(dbp, DB_AM_RDONLY) || \ (IS_REP_CLIENT((dbp)->dbenv) && \ - !IS_REP_LOGSONLY((dbp)->dbenv) && \ !F_ISSET((dbp), DB_AM_CL_WRITER))) /* @@ -133,7 +129,7 @@ __db_associate_pp(dbp, txn, sdbp, callback, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) goto err; while ((sdbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL) @@ -145,7 +141,7 @@ __db_associate_pp(dbp, txn, sdbp, callback, flags) /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); err: return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret); } @@ -192,7 +188,8 @@ __db_associate_arg(dbp, sdbp, callback, flags) "The primary and secondary must be opened in the same environment"); return (EINVAL); } - if (DB_IS_THREADED(dbp) != DB_IS_THREADED(sdbp)) { + if ((DB_IS_THREADED(dbp) && !DB_IS_THREADED(sdbp)) || + (!DB_IS_THREADED(dbp) && DB_IS_THREADED(sdbp))) { __db_err(dbenv, "The DB_THREAD setting must be the same for primary and secondary"); return (EINVAL); @@ -244,15 +241,18 @@ __db_close_pp(dbp, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (t_ret = __db_rep_enter(dbp, 0, 0)) != 0 && ret == 0) - ret = t_ret; + (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) { + handle_check = 0; + if (ret == 0) + ret = t_ret; + } if ((t_ret = __db_close(dbp, NULL, flags)) != 0 && ret == 0) ret = t_ret; /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -293,14 +293,14 @@ __db_cursor_pp(dbp, txn, dbcp, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) return (ret); ret = __db_cursor(dbp, txn, dbcp, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -351,6 +351,10 @@ __db_cursor(dbp, txn, dbcp, flags) (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ))) F_SET(dbc, DBC_DIRTY_READ); + if (LF_ISSET(DB_DEGREE_2) || + (txn != NULL && F_ISSET(txn, TXN_DEGREE_2))) + F_SET(dbc, DBC_DEGREE_2); + *dbcp = dbc; return (0); @@ -371,11 +375,14 @@ __db_cursor_arg(dbp, flags) dbenv = dbp->dbenv; - /* DB_DIRTY_READ is the only valid bit-flag and requires locking. */ - if (LF_ISSET(DB_DIRTY_READ)) { + /* + * DB_DIRTY_READ and DB_DGREE_2 are the only valid bit-flags + * and requires locking. + */ + if (LF_ISSET(DB_DIRTY_READ | DB_DEGREE_2)) { if (!LOCKING_ON(dbenv)) return (__db_fnl(dbenv, "DB->cursor")); - LF_CLR(DB_DIRTY_READ); + LF_CLR(DB_DIRTY_READ | DB_DEGREE_2); } /* Check for invalid function flags. */ @@ -438,14 +445,14 @@ __db_del_pp(dbp, txn, key, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) goto err; ret = __db_del(dbp, txn, key, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); err: return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret); } @@ -501,7 +508,7 @@ __db_fd_pp(dbp, fdp) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) return (ret); /* @@ -527,7 +534,7 @@ __db_fd_pp(dbp, fdp) err: /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -579,14 +586,14 @@ __db_get_pp(dbp, txn, key, data, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) goto err; ret = __db_get(dbp, txn, key, data, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); err: return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret); } @@ -612,6 +619,9 @@ __db_get(dbp, txn, key, data, flags) if (LF_ISSET(DB_DIRTY_READ)) { mode = DB_DIRTY_READ; LF_CLR(DB_DIRTY_READ); + } else if (LF_ISSET(DB_DEGREE_2)) { + mode = DB_DEGREE_2; + LF_CLR(DB_DEGREE_2); } else if ((flags & DB_OPFLAGS_MASK) == DB_CONSUME || (flags & DB_OPFLAGS_MASK) == DB_CONSUME_WAIT) mode = DB_WRITELOCK; @@ -674,12 +684,14 @@ __db_get_arg(dbp, key, data, flags) * flag in a path where CDB may have been configured. */ check_thread = dirty = 0; - if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) { + if (LF_ISSET(DB_DIRTY_READ | DB_RMW | DB_DEGREE_2)) { if (!LOCKING_ON(dbenv)) return (__db_fnl(dbenv, "DB->get")); - if (LF_ISSET(DB_DIRTY_READ)) - dirty = 1; - LF_CLR(DB_DIRTY_READ | DB_RMW); + dirty = LF_ISSET(DB_DIRTY_READ | DB_DEGREE_2); + if ((ret = __db_fcchk(dbenv, + "DB->get", flags, DB_DIRTY_READ, DB_DEGREE_2)) != 0) + return (ret); + LF_CLR(DB_DIRTY_READ | DB_RMW | DB_DEGREE_2); } multi = 0; @@ -711,7 +723,9 @@ __db_get_arg(dbp, key, data, flags) check_thread = 1; if (dirty) { __db_err(dbenv, - "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT"); + "%s is not supported with DB_CONSUME or DB_CONSUME_WAIT", + LF_ISSET(DB_DIRTY_READ) ? + "DB_DIRTY_READ" : "DB_DEGREE_2"); return (EINVAL); } if (multi) @@ -782,15 +796,15 @@ __db_join_pp(primary, curslist, dbcp, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, primary); - if (handle_check && - (ret = __db_rep_enter(primary, 1, curslist[0]->txn != NULL)) != 0) + if (handle_check && (ret = + __db_rep_enter(primary, 1, 0, curslist[0]->txn != NULL)) != 0) return (ret); ret = __db_join(primary, curslist, dbcp, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -799,7 +813,7 @@ __db_join_pp(primary, curslist, dbcp, flags) * __db_join_arg -- * Check DB->join arguments. */ -int +static int __db_join_arg(primary, curslist, flags) DB *primary; DBC **curslist; @@ -873,7 +887,7 @@ __db_key_range_pp(dbp, txn, key, kr, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) return (ret); /* @@ -906,7 +920,7 @@ __db_key_range_pp(dbp, txn, key, kr, flags) /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -970,8 +984,11 @@ __db_open_pp(dbp, txn, fname, dname, type, flags, mode) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && + (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { + handle_check = 0; goto err; + } if ((ret = __db_open(dbp, txn, fname, dname, type, flags, mode, PGNO_BASE_MD)) != 0) @@ -1022,7 +1039,7 @@ err: if (ret != 0 && txn == NULL) { /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (txn_local ? __db_txn_auto_resolve(dbenv, txn, nosync, ret) : ret); @@ -1183,14 +1200,14 @@ __db_pget_pp(dbp, txn, skey, pkey, data, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) return (ret); ret = __db_pget(dbp, txn, skey, pkey, data, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1346,14 +1363,14 @@ __db_put_pp(dbp, txn, key, data, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) goto err; ret = __db_put(dbp, txn, key, data, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); err: return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret); } @@ -1409,6 +1426,10 @@ err: return (__db_ferr(dbenv, "DB->put", 0)); if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0) return (ret); + /* Keys shouldn't have partial flags during a put. */ + if (F_ISSET(key, DB_DBT_PARTIAL)) + return (__db_ferr(dbenv, "key DBT", 0)); + /* Check for partial puts in the presence of duplicates. */ if (F_ISSET(data, DB_DBT_PARTIAL) && (F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))) { @@ -1420,122 +1441,6 @@ err: return (__db_ferr(dbenv, "DB->put", 0)); return (0); } -/* - * __db_stat_pp -- - * DB->stat pre/post processing. - * - * PUBLIC: int __db_stat_pp __P((DB *, void *, u_int32_t)); - */ -int -__db_stat_pp(dbp, spp, flags) - DB *dbp; - void *spp; - u_int32_t flags; -{ - DB_ENV *dbenv; - int handle_check, ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbp->dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat"); - - if ((ret = __db_stat_arg(dbp, flags)) != 0) - return (ret); - - /* Check for replication block. */ - handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0)) != 0) - return (ret); - - ret = __db_stat(dbp, spp, flags); - - /* Release replication block. */ - if (handle_check) - __db_rep_exit(dbenv); - - return (ret); -} - -/* - * __db_stat -- - * DB->stat. - * - * PUBLIC: int __db_stat __P((DB *, void *, u_int32_t)); - */ -int -__db_stat(dbp, spp, flags) - DB *dbp; - void *spp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DBC *dbc; - int ret, t_ret; - - dbenv = dbp->dbenv; - - /* Acquire a cursor. */ - if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) - return (ret); - - DEBUG_LWRITE(dbc, NULL, "DB->stat", NULL, NULL, flags); - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - ret = __bam_stat(dbc, spp, flags); - break; - case DB_HASH: - ret = __ham_stat(dbc, spp, flags); - break; - case DB_QUEUE: - ret = __qam_stat(dbc, spp, flags); - break; - case DB_UNKNOWN: - default: - ret = (__db_unknown_type(dbenv, "DB->stat", dbp->type)); - break; - } - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_stat_arg -- - * Check DB->stat arguments. - */ -static int -__db_stat_arg(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - - dbenv = dbp->dbenv; - - /* Check for invalid function flags. */ - switch (flags) { - case 0: - case DB_FAST_STAT: - case DB_CACHED_COUNTS: /* Deprecated and undocumented. */ - break; - case DB_RECORDCOUNT: /* Deprecated and undocumented. */ - if (dbp->type == DB_RECNO) - break; - if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM)) - break; - goto err; - default: -err: return (__db_ferr(dbenv, "DB->stat", 0)); - } - - return (0); -} - /* * __db_sync_pp -- * DB->sync pre/post processing. @@ -1564,14 +1469,14 @@ __db_sync_pp(dbp, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) return (ret); ret = __db_sync(dbp); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1610,14 +1515,14 @@ __db_c_close_pp(dbc) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 0, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 0, 0, dbc->txn != NULL)) != 0) return (ret); ret = __db_c_close(dbc); /* Release replication block. */ if (handle_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1660,14 +1565,14 @@ __db_c_count_pp(dbc, recnop, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 1, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) return (ret); ret = __db_c_count(dbc, recnop); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1702,7 +1607,7 @@ __db_c_del_pp(dbc, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 1, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) return (ret); DEBUG_LWRITE(dbc, dbc->txn, "DBcursor->del", NULL, NULL, flags); @@ -1711,7 +1616,7 @@ __db_c_del_pp(dbc, flags) /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1788,14 +1693,14 @@ __db_c_dup_pp(dbc, dbcp, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 1, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) return (ret); ret = __db_c_dup(dbc, dbcp, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1827,7 +1732,7 @@ __db_c_get_pp(dbc, key, data, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 1, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) return (ret); DEBUG_LREAD(dbc, dbc->txn, "DBcursor->get", @@ -1837,7 +1742,7 @@ __db_c_get_pp(dbc, key, data, flags) /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1985,6 +1890,55 @@ err: return (__db_ferr(dbenv, "DBcursor->get", 0)); return (0); } +/* + * __db_secondary_close_pp -- + * DB->close for secondaries + * + * PUBLIC: int __db_secondary_close_pp __P((DB *, u_int32_t)); + */ +int +__db_secondary_close_pp(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + DB_ENV *dbenv; + int handle_check, ret, t_ret; + + dbenv = dbp->dbenv; + ret = 0; + + PANIC_CHECK(dbenv); + + /* + * !!! + * The actual argument checking is simple, do it inline. + * + * Validate arguments and complain if they're wrong, but as a DB + * handle destructor, we can't fail. + */ + if (flags != 0 && flags != DB_NOSYNC && + (t_ret = __db_ferr(dbenv, "DB->close", 0)) != 0 && ret == 0) + ret = t_ret; + + /* Check for replication block. */ + handle_check = IS_REPLICATED(dbenv, dbp); + if (handle_check && + (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) { + handle_check = 0; + if (ret == 0) + ret = t_ret; + } + + if ((t_ret = __db_secondary_close(dbp, flags)) != 0 && ret == 0) + ret = t_ret; + + /* Release replication block. */ + if (handle_check) + __env_db_rep_exit(dbenv); + + return (ret); +} + /* * __db_c_pget_pp -- * DBC->c_pget pre/post processing. @@ -2015,14 +1969,14 @@ __db_c_pget_pp(dbc, skey, pkey, data, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 1, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) return (ret); ret = __db_c_pget(dbc, skey, pkey, data, flags); /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -2122,7 +2076,7 @@ __db_c_put_pp(dbc, key, data, flags) /* Check for replication block. */ handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 1, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) return (ret); DEBUG_LWRITE(dbc, dbc->txn, "DBcursor->put", @@ -2134,7 +2088,7 @@ __db_c_put_pp(dbc, key, data, flags) /* Release replication block. */ if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -2221,6 +2175,10 @@ err: return (__db_ferr(dbenv, "DBcursor->put", 0)); if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0) return (ret); + /* Keys shouldn't have partial flags during a put. */ + if (F_ISSET(key, DB_DBT_PARTIAL)) + return (__db_ferr(dbenv, "key DBT", 0)); + /* * The cursor must be initialized for anything other than DB_KEYFIRST * and DB_KEYLAST, return EINVAL for an invalid cursor, otherwise 0. diff --git a/db/db/db_join.c b/db/db/db_join.c index 556ededb3..f486f296e 100644 --- a/db/db/db_join.c +++ b/db/db/db_join.c @@ -1,16 +1,14 @@ /* * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_join.c,v 11.75 2004/09/22 03:30:23 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_join.c,v 11.65 2003/10/07 18:55:39 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -68,7 +66,7 @@ static int __db_join_put __P((DBC *, DBT *, DBT *, u_int32_t)); * cursor method of a DB, join cursors are created through an explicit * call to DB->join. * - * The curslist is an array of existing, intialized cursors and primary + * The curslist is an array of existing, initialized cursors and primary * is the DB of the primary file. The data item that joins all the * cursors in the curslist is used as the key into the primary and that * key and data are returned. When no more items are left in the join @@ -229,7 +227,7 @@ err: if (jc != NULL) { __os_free(dbenv, jc->j_curslist); if (jc->j_workcurs != NULL) { if (jc->j_workcurs[0] != NULL) - __os_free(dbenv, jc->j_workcurs[0]); + (void)__db_c_close(jc->j_workcurs[0]); __os_free(dbenv, jc->j_workcurs); } if (jc->j_fdupcurs != NULL) @@ -262,13 +260,13 @@ __db_join_close_pp(dbc) handle_check = IS_REPLICATED(dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 0, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 0, 0, dbc->txn != NULL)) != 0) return (ret); ret = __db_join_close(dbc); if (handle_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -311,7 +309,8 @@ __db_join_get_pp(dbc, key, data, flags) { DB *dbp; DB_ENV *dbenv; - u_int32_t handle_check, ret, save_flags; + u_int32_t handle_check, save_flags; + int ret; dbp = dbc->dbp; dbenv = dbp->dbenv; @@ -321,11 +320,11 @@ __db_join_get_pp(dbc, key, data, flags) PANIC_CHECK(dbenv); - if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) { + if (LF_ISSET(DB_DIRTY_READ | DB_DEGREE_2 | DB_RMW)) { if (!LOCKING_ON(dbp->dbenv)) return (__db_fnl(dbp->dbenv, "DBcursor->c_get")); - LF_CLR(DB_DIRTY_READ | DB_RMW); + LF_CLR(DB_DIRTY_READ | DB_DEGREE_2 | DB_RMW); } switch (flags) { @@ -355,7 +354,7 @@ __db_join_get_pp(dbc, key, data, flags) handle_check = IS_REPLICATED(dbp->dbenv, dbp); if (handle_check && - (ret = __db_rep_enter(dbp, 1, dbc->txn != NULL)) != 0) + (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) return (ret); /* Restore the original flags value. */ @@ -364,7 +363,7 @@ __db_join_get_pp(dbc, key, data, flags) ret = __db_join_get(dbc, key, data, flags); if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -391,7 +390,7 @@ __db_join_get(dbc, key_arg, data_arg, flags) * If the set of flags here changes, check that __db_join_primget * is updated to handle them properly. */ - opmods = LF_ISSET(DB_RMW | DB_DIRTY_READ); + opmods = LF_ISSET(DB_RMW | DB_DEGREE_2 | DB_DIRTY_READ); /* * Since we are fetching the key as a datum in the secondary indices, @@ -423,7 +422,7 @@ __db_join_get(dbc, key_arg, data_arg, flags) retry: ret = __db_c_get(jc->j_workcurs[0], &jc->j_key, key_n, opmods | (jc->j_exhausted[0] ? DB_NEXT_DUP : DB_CURRENT)); - if (ret == ENOMEM) { + if (ret == DB_BUFFER_SMALL) { jc->j_key.ulen <<= 1; if ((ret = __os_realloc(dbp->dbenv, jc->j_key.ulen, &jc->j_key.data)) != 0) @@ -473,7 +472,7 @@ retry: ret = __db_c_get(jc->j_workcurs[0], &jc->j_key, key_n, if (jc->j_workcurs[i] == NULL) /* If this is NULL, we need to dup curslist into it. */ if ((ret = __db_c_dup(jc->j_curslist[i], - jc->j_workcurs + i, DB_POSITION)) != 0) + &jc->j_workcurs[i], DB_POSITION)) != 0) goto err; retry2: cp = jc->j_workcurs[i]; @@ -560,18 +559,17 @@ retry2: cp = jc->j_workcurs[i]; __db_c_close(jc->j_workcurs[j])) != 0) goto err; jc->j_exhausted[j] = 0; - if (jc->j_fdupcurs[j] != NULL && - (ret = __db_c_dup(jc->j_fdupcurs[j], + if (jc->j_fdupcurs[j] == NULL) + jc->j_workcurs[j] = NULL; + else if ((ret = __db_c_dup(jc->j_fdupcurs[j], &jc->j_workcurs[j], DB_POSITION)) != 0) goto err; - else - jc->j_workcurs[j] = NULL; } goto retry2; /* NOTREACHED */ } - if (ret == ENOMEM) { + if (ret == DB_BUFFER_SMALL) { jc->j_key.ulen <<= 1; if ((ret = __os_realloc(dbp->dbenv, jc->j_key.ulen, &jc->j_key.data)) != 0) { @@ -611,7 +609,6 @@ mem_err: __db_err(dbp->dbenv, if (SORTED_SET(jc, i) && jc->j_fdupcurs[i] == NULL && (ret = __db_c_dup(cp, &jc->j_fdupcurs[i], DB_POSITION)) != 0) goto err; - } err: if (ret != 0) @@ -875,7 +872,11 @@ __db_join_primget(dbp, txn, lockerid, key, data, flags) u_int32_t flags; { DBC *dbc; - int dirty, ret, rmw, t_ret; + int ret, rmw, t_ret; + + if ((ret = __db_cursor_int(dbp, + txn, dbp->type, PGNO_INVALID, 0, lockerid, &dbc)) != 0) + return (ret); /* * The only allowable flags here are the two flags copied into @@ -884,17 +885,17 @@ __db_join_primget(dbp, txn, lockerid, key, data, flags) * It's a DB bug if we allow any other flags down in here. */ rmw = LF_ISSET(DB_RMW); - dirty = LF_ISSET(DB_DIRTY_READ); - LF_CLR(DB_RMW | DB_DIRTY_READ); - DB_ASSERT(flags == 0); - - if ((ret = __db_cursor_int(dbp, - txn, dbp->type, PGNO_INVALID, 0, lockerid, &dbc)) != 0) - return (ret); - - if (dirty || + if (LF_ISSET(DB_DIRTY_READ) || (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ))) F_SET(dbc, DBC_DIRTY_READ); + + if (LF_ISSET(DB_DEGREE_2) || + (txn != NULL && F_ISSET(txn, TXN_DEGREE_2))) + F_SET(dbc, DBC_DEGREE_2); + + LF_CLR(DB_RMW | DB_DIRTY_READ | DB_DEGREE_2); + DB_ASSERT(flags == 0); + F_SET(dbc, DBC_TRANSIENT); /* diff --git a/db/db/db_meta.c b/db/db/db_meta.c index 6ecc1e781..c5e88bb56 100644 --- a/db/db/db_meta.c +++ b/db/db/db_meta.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: db_meta.c,v 11.89 2004/10/05 14:28:33 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_meta.c,v 11.77 2003/09/09 16:42:06 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -108,7 +106,7 @@ __db_new(dbc, type, pagepp) PAGE *h; db_pgno_t last, pgno, newnext; u_int32_t meta_flags; - int extend, ret; + int extend, ret, t_ret; meta = NULL; meta_flags = 0; @@ -150,8 +148,8 @@ __db_new(dbc, type, pagepp) */ if (DBC_LOGGING(dbc)) { if ((ret = __db_pg_alloc_log(dbp, dbc->txn, &LSN(meta), 0, - &LSN(meta), PGNO_BASE_MD, &lsn, pgno, - (u_int32_t)type, newnext)) != 0) + &LSN(meta), PGNO_BASE_MD, &lsn, + pgno, (u_int32_t)type, newnext, meta->last_pgno)) != 0) goto err; } else LSN_NOT_LOGGED(LSN(meta)); @@ -174,8 +172,12 @@ __db_new(dbc, type, pagepp) if (TYPE(h) != P_INVALID) return (__db_panic(dbp->dbenv, EINVAL)); - (void)__memp_fput(mpf, (PAGE *)meta, DB_MPOOL_DIRTY); - (void)__TLPUT(dbc, metalock); + ret = __memp_fput(mpf, (PAGE *)meta, DB_MPOOL_DIRTY); + meta = NULL; + if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; switch (type) { case P_BTREEMETA: @@ -271,7 +273,8 @@ __db_free(dbc, h) ddbt.size = dbp->pgsize - h->hf_offset; ret = __db_pg_freedata_log(dbp, dbc->txn, &LSN(meta), 0, h->pgno, &LSN(meta), - PGNO_BASE_MD, &ldbt, meta->free, &ddbt); + PGNO_BASE_MD, &ldbt, + meta->free, meta->last_pgno, &ddbt); break; } goto log; @@ -288,8 +291,8 @@ __db_free(dbc, h) DB_ASSERT(h->type != P_QAMDATA); log: ret = __db_pg_free_log(dbp, - dbc->txn, &LSN(meta), 0, h->pgno, - &LSN(meta), PGNO_BASE_MD, &ldbt, meta->free); + dbc->txn, &LSN(meta), 0, h->pgno, &LSN(meta), + PGNO_BASE_MD, &ldbt, meta->free, meta->last_pgno); } if (ret != 0) { (void)__memp_fput(mpf, (PAGE *)meta, 0); @@ -300,13 +303,32 @@ log: ret = __db_pg_free_log(dbp, LSN_NOT_LOGGED(LSN(meta)); LSN(h) = LSN(meta); - P_INIT(h, dbp->pgsize, h->pgno, PGNO_INVALID, meta->free, 0, P_INVALID); -#ifdef DIAGNOSTIC - memset((u_int8_t *) - h + P_OVERHEAD(dbp), CLEAR_BYTE, dbp->pgsize - P_OVERHEAD(dbp)); +#ifdef HAVE_FTRUNCATE + if (h->pgno == meta->last_pgno) { + if ((ret = __memp_fput(mpf, h, DB_MPOOL_DISCARD)) != 0) + goto err; + /* Give the page back to the OS. */ + if ((ret = __memp_ftruncate(mpf, meta->last_pgno, 0)) != 0) + goto err; + meta->last_pgno--; + h = NULL; + } else #endif - meta->free = h->pgno; + { + /* + * If we are not truncating the page then we + * reinitialize it and put it hat the head of + * the free list. + */ + P_INIT(h, dbp->pgsize, + h->pgno, PGNO_INVALID, meta->free, 0, P_INVALID); +#ifdef DIAGNOSTIC + memset((u_int8_t *) h + P_OVERHEAD(dbp), + CLEAR_BYTE, dbp->pgsize - P_OVERHEAD(dbp)); +#endif + meta->free = h->pgno; + } /* Discard the metadata page. */ if ((t_ret = @@ -317,7 +339,8 @@ log: ret = __db_pg_free_log(dbp, /* Discard the caller's page reference. */ dirty_flag = DB_MPOOL_DIRTY; -err: if ((t_ret = __memp_fput(mpf, h, dirty_flag)) != 0 && ret == 0) +err: if (h != NULL && + (t_ret = __memp_fput(mpf, h, dirty_flag)) != 0 && ret == 0) ret = t_ret; /* @@ -353,21 +376,6 @@ __db_lprint(dbc) } #endif -/* - * Implement the rules for transactional locking. We can release the previous - * lock if we are not in a transaction or COUPLE_ALWAYS is specifed (used in - * record locking). If we are doing dirty reads then we can release read locks - * and down grade write locks. - */ -#define DB_PUT_ACTION(dbc, action, lockp) \ - (((action == LCK_COUPLE || action == LCK_COUPLE_ALWAYS) && \ - LOCK_ISSET(*lockp)) ? \ - (dbc->txn == NULL || action == LCK_COUPLE_ALWAYS || \ - (F_ISSET(dbc, DBC_DIRTY_READ) && \ - (lockp)->mode == DB_LOCK_DIRTY)) ? LCK_COUPLE : \ - (F_ISSET((dbc)->dbp, DB_AM_DIRTY) && \ - (lockp)->mode == DB_LOCK_WRITE) ? LCK_DOWNGRADE : 0 : 0) - /* * __db_lget -- * The standard lock get call. @@ -413,6 +421,8 @@ __db_lget(dbc, action, pgno, mode, lkflags, lockp) else dbc->lock.type = DB_PAGE_LOCK; lkflags &= ~DB_LOCK_RECORD; + if (action == LCK_ROLLBACK) + lkflags |= DB_LOCK_ABORT; /* * If the transaction enclosing this cursor has DB_LOCK_NOWAIT set, @@ -427,13 +437,46 @@ __db_lget(dbc, action, pgno, mode, lkflags, lockp) has_timeout = F_ISSET(dbc, DBC_RECOVER) || (txn != NULL && F_ISSET(txn, TXN_LOCKTIMEOUT)); - switch (DB_PUT_ACTION(dbc, action, lockp)) { + /* + * Transactional locking. + * Hold on to the previous read lock only if we are in full isolation. + * COUPLE_ALWAYS indicates we are holding an interior node + * which need not be isolated. + * Downgrade write locks if we are supporting dirty readers. + */ + if ((action != LCK_COUPLE && action != LCK_COUPLE_ALWAYS) || + !LOCK_ISSET(*lockp)) + action = 0; + else if (dbc->txn == NULL || action == LCK_COUPLE_ALWAYS) + action = LCK_COUPLE; + else if (F_ISSET(dbc, DBC_DEGREE_2) && lockp->mode == DB_LOCK_READ) + action = LCK_COUPLE; + else if (F_ISSET(dbc, DBC_DIRTY_READ) && lockp->mode == DB_LOCK_DIRTY) + action = LCK_COUPLE; + else if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && lockp->mode == DB_LOCK_WRITE) + action = LCK_DOWNGRADE; + else + action = 0; + + switch (action) { + case LCK_DOWNGRADE: + if ((ret = __lock_downgrade( + dbenv, lockp, DB_LOCK_WWRITE, 0)) != 0) + return (ret); + /* FALLTHROUGH */ + + default: + if (!has_timeout) { + ret = __lock_get(dbenv, + dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp); + break; + } + + /* FALLTHROUGH */ case LCK_COUPLE: -lck_couple: couple[0].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET; + couple[0].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET; couple[0].obj = &dbc->lock_dbt; couple[0].mode = mode; - if (action == LCK_COUPLE_ALWAYS) - action = LCK_COUPLE; UMRW_SET(couple[0].timeout); if (has_timeout) couple[0].timeout = @@ -448,19 +491,10 @@ lck_couple: couple[0].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET; if (ret == 0 || reqp == &couple[1]) *lockp = couple[0].lock; break; - case LCK_DOWNGRADE: - if ((ret = __lock_downgrade( - dbenv, lockp, DB_LOCK_WWRITE, 0)) != 0) - return (ret); - /* FALL THROUGH */ - default: - if (has_timeout) - goto lck_couple; - ret = __lock_get(dbenv, - dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp); - break; } + if (txn != NULL && ret == DB_LOCK_DEADLOCK) + F_SET(txn, TXN_DEADLOCK); return ((ret == DB_LOCK_NOTGRANTED && !F_ISSET(dbenv, DB_ENV_TIME_NOTGRANTED)) ? DB_LOCK_DEADLOCK : ret); } @@ -477,13 +511,28 @@ __db_lput(dbc, lockp) DB_LOCK *lockp; { DB_ENV *dbenv; - int ret; + int action, ret; - dbenv = dbc->dbp->dbenv; + /* + * Transactional locking. + * Hold on to the read locks only if we are in full isolation. + * Downgrade write locks if we are supporting dirty readers. + */ + if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && lockp->mode == DB_LOCK_WRITE) + action = LCK_DOWNGRADE; + else if (dbc->txn == NULL) + action = LCK_COUPLE; + else if (F_ISSET(dbc, DBC_DEGREE_2) && lockp->mode == DB_LOCK_READ) + action = LCK_COUPLE; + else if (F_ISSET(dbc, DBC_DIRTY_READ) && lockp->mode == DB_LOCK_DIRTY) + action = LCK_COUPLE; + else + action = 0; - switch (DB_PUT_ACTION(dbc, LCK_COUPLE, lockp)) { + dbenv = dbc->dbp->dbenv; + switch (action) { case LCK_COUPLE: - ret = __lock_put(dbenv, lockp); + ret = __lock_put(dbenv, lockp, 0); break; case LCK_DOWNGRADE: ret = __lock_downgrade(dbenv, lockp, DB_LOCK_WWRITE, 0); diff --git a/db/db/db_method.c b/db/db/db_method.c index ddd6abdd1..4266fbf0e 100644 --- a/db/db/db_method.c +++ b/db/db/db_method.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_method.c,v 11.116 2004/10/11 18:22:05 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_method.c,v 11.99 2003/07/08 20:14:17 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -21,6 +19,10 @@ static const char revid[] = "$Id: db_method.c,v 11.99 2003/07/08 20:14:17 ubell #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + #include "db_int.h" #include "dbinc/crypto.h" #include "dbinc/db_page.h" @@ -34,15 +36,13 @@ static const char revid[] = "$Id: db_method.c,v 11.99 2003/07/08 20:14:17 ubell #include "dbinc_auto/xa_ext.h" #ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" #endif static int __db_get_byteswapped __P((DB *, int *)); static int __db_get_dbname __P((DB *, const char **, const char **)); -static int __db_get_env __P((DB *, DB_ENV **)); -static int __db_get_lorder __P((DB *, int *)); -static int __db_get_transactional __P((DB *, int *)); +static DB_ENV *__db_get_env __P((DB *)); +static int __db_get_transactional __P((DB *)); static int __db_get_type __P((DB *, DBTYPE *dbtype)); static int __db_init __P((DB *, u_int32_t)); static int __db_set_alloc __P((DB *, void *(*)(size_t), @@ -56,14 +56,18 @@ static int __db_get_encrypt_flags __P((DB *, u_int32_t *)); static int __db_set_encrypt __P((DB *, const char *, u_int32_t)); static int __db_set_feedback __P((DB *, void (*)(DB *, int, int))); static void __db_map_flags __P((DB *, u_int32_t *, u_int32_t *)); -static int __db_get_flags __P((DB *, u_int32_t *)); static int __db_get_pagesize __P((DB *, u_int32_t *)); static int __db_set_paniccall __P((DB *, void (*)(DB_ENV *, int))); -static void __db_set_errcall __P((DB *, void (*)(const char *, char *))); +static void __db_set_errcall + __P((DB *, void (*)(const DB_ENV *, const char *, const char *))); static void __db_get_errfile __P((DB *, FILE **)); static void __db_set_errfile __P((DB *, FILE *)); static void __db_get_errpfx __P((DB *, const char **)); static void __db_set_errpfx __P((DB *, const char *)); +static void __db_set_msgcall + __P((DB *, void (*)(const DB_ENV *, const char *))); +static void __db_get_msgfile __P((DB *, FILE **)); +static void __db_set_msgfile __P((DB *, FILE *)); static void __dbh_err __P((DB *, int, const char *, ...)); static void __dbh_errx __P((DB *, const char *, ...)); @@ -140,11 +144,14 @@ db_create(dbpp, dbenv, flags) */ dbp->timestamp = (F_ISSET(dbenv, DB_ENV_DBLOCAL) || !REP_ON(dbenv)) ? 0 : - ((DB_REP *)dbenv->rep_handle)->region->timestamp; + ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->rep_timestamp; - /* Open a backing DB_MPOOLFILE handle in the memory pool. */ - if ((ret = __memp_fcreate(dbenv, &dbp->mpf)) != 0) - goto err; + /* If not RPC, open a backing DB_MPOOLFILE handle in the memory pool. */ +#ifdef HAVE_RPC + if (!RPC_ON(dbenv)) +#endif + if ((ret = __memp_fcreate(dbenv, &dbp->mpf)) != 0) + goto err; dbp->type = DB_UNKNOWN; @@ -186,6 +193,7 @@ __db_init(dbp, flags) dbp->close = __db_close_pp; dbp->cursor = __db_cursor_pp; dbp->del = __db_del_pp; + dbp->dump = __db_dump_pp; dbp->err = __dbh_err; dbp->errx = __dbh_errx; dbp->fd = __db_fd_pp; @@ -221,10 +229,14 @@ __db_init(dbp, flags) dbp->set_flags = __db_set_flags; dbp->get_lorder = __db_get_lorder; dbp->set_lorder = __db_set_lorder; + dbp->set_msgcall = __db_set_msgcall; + dbp->get_msgfile = __db_get_msgfile; + dbp->set_msgfile = __db_set_msgfile; dbp->get_pagesize = __db_get_pagesize; dbp->set_pagesize = __db_set_pagesize; dbp->set_paniccall = __db_set_paniccall; dbp->stat = __db_stat_pp; + dbp->stat_print = __db_stat_print_pp; dbp->sync = __db_sync_pp; dbp->upgrade = __db_upgrade_pp; dbp->verify = __db_verify_pp; @@ -351,13 +363,11 @@ __db_get_dbname(dbp, fnamep, dnamep) * __db_get_env -- * Get the DB_ENV handle that was passed to db_create. */ -static int -__db_get_env(dbp, dbenvp) +static DB_ENV * +__db_get_env(dbp) DB *dbp; - DB_ENV **dbenvp; { - *dbenvp = dbp->dbenv; - return (0); + return (dbp->dbenv); } /* @@ -365,14 +375,10 @@ __db_get_env(dbp, dbenvp) * Get whether this database was created in a transaction. */ static int -__db_get_transactional(dbp, istxnp) +__db_get_transactional(dbp) DB *dbp; - int *istxnp; { - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get_transactional"); - - *istxnp = F_ISSET(dbp, DB_AM_TXN) ? 1 : 0; - return (0); + return (F_ISSET(dbp, DB_AM_TXN) ? 1 : 0); } /* @@ -509,7 +515,7 @@ __db_set_encrypt(dbp, passwd, flags) static void __db_set_errcall(dbp, errcall) DB *dbp; - void (*errcall) __P((const char *, char *)); + void (*errcall) __P((const DB_ENV *, const char *, const char *)); { __dbenv_set_errcall(dbp->dbenv, errcall); } @@ -581,7 +587,13 @@ __db_map_flags(dbp, inflagsp, outflagsp) } } -static int +/* + * __db_get_flags -- + * The DB->get_flags method. + * + * PUBLIC: int __db_get_flags __P((DB *, u_int32_t *)); + */ +int __db_get_flags(dbp, flagsp) DB *dbp; u_int32_t *flagsp; @@ -591,6 +603,7 @@ __db_get_flags(dbp, flagsp) DB_DUP, DB_DUPSORT, DB_ENCRYPT, + DB_INORDER, DB_RECNUM, DB_RENUMBER, DB_REVSPLITOFF, @@ -607,6 +620,9 @@ __db_get_flags(dbp, flagsp) __db_map_flags(dbp, &f, &mapped_flag); __bam_map_flags(dbp, &f, &mapped_flag); __ram_map_flags(dbp, &f, &mapped_flag); +#ifdef HAVE_QUEUE + __qam_map_flags(dbp, &f, &mapped_flag); +#endif DB_ASSERT(f == 0); if (F_ISSET(dbp, mapped_flag) == mapped_flag) LF_SET(db_flags[i]); @@ -647,6 +663,10 @@ __db_set_flags(dbp, flags) return (ret); if ((ret = __ram_set_flags(dbp, &flags)) != 0) return (ret); +#ifdef HAVE_QUEUE + if ((ret = __qam_set_flags(dbp, &flags)) != 0) + return (ret); +#endif return (flags == 0 ? 0 : __db_ferr(dbenv, "DB->set_flags", 0)); } @@ -654,8 +674,10 @@ __db_set_flags(dbp, flags) /* * __db_get_lorder -- * Get whether lorder is swapped or not. + * + * PUBLIC: int __db_get_lorder __P((DB *, int *)); */ -static int +int __db_get_lorder(dbp, db_lorderp) DB *dbp; int *db_lorderp; @@ -721,6 +743,30 @@ __db_set_alloc(dbp, mal_func, real_func, free_func) return (__dbenv_set_alloc(dbp->dbenv, mal_func, real_func, free_func)); } +static void +__db_set_msgcall(dbp, msgcall) + DB *dbp; + void (*msgcall) __P((const DB_ENV *, const char *)); +{ + __dbenv_set_msgcall(dbp->dbenv, msgcall); +} + +static void +__db_get_msgfile(dbp, msgfilep) + DB *dbp; + FILE **msgfilep; +{ + __dbenv_get_msgfile(dbp->dbenv, msgfilep); +} + +static void +__db_set_msgfile(dbp, msgfile) + DB *dbp; + FILE *msgfile; +{ + __dbenv_set_msgfile(dbp->dbenv, msgfile); +} + static int __db_get_pagesize(dbp, db_pagesizep) DB *dbp; diff --git a/db/db/db_open.c b/db/db/db_open.c index 232a435f4..35e115091 100644 --- a/db/db/db_open.c +++ b/db/db/db_open.c @@ -1,20 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_open.c,v 11.240 2004/09/22 20:53:19 margo Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_open.c,v 11.236 2003/09/27 00:29:03 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include -#include #include #include #endif @@ -359,8 +356,8 @@ __db_chk_meta(dbenv, dbp, meta, do_metachk) int do_metachk; { int is_hmac, ret, swapped; - u_int8_t *chksum; u_int32_t orig_chk; + u_int8_t *chksum; ret = 0; @@ -370,27 +367,26 @@ __db_chk_meta(dbenv, dbp, meta, do_metachk) is_hmac = meta->encrypt_alg == 0 ? 0 : 1; chksum = ((BTMETA *)meta)->chksum; + /* - * We cannot add this to __db_metaswap because that gets - * done later after we've verified the checksum or - * decrypted. + * If we need to swap, the checksum function overwrites the + * original checksum with 0, so we need to save a copy of the + * original for swapping later. + */ + orig_chk = *(u_int32_t *)chksum; + + /* + * We cannot add this to __db_metaswap because that gets done + * later after we've verified the checksum or decrypted. */ if (do_metachk) { - /* - * If we need to swap, the checksum function - * overwrites the original checksum with 0, so - * we need to save a copy of the original for - * swapping later. - */ - if (is_hmac == 0) - orig_chk = *(u_int32_t *)chksum; swapped = 0; -chk_retry: - if ((ret = __db_check_chksum(dbenv, +chk_retry: if ((ret = __db_check_chksum(dbenv, (DB_CIPHER *)dbenv->crypto_handle, chksum, meta, DBMETASIZE, is_hmac)) != 0) { if (is_hmac || swapped) return (ret); + M_32_SWAP(orig_chk); swapped = 1; *(u_int32_t *)chksum = orig_chk; @@ -530,6 +526,9 @@ swap_retry: return (0); bad_format: - __db_err(dbenv, "%s: unexpected file type or format", name); + if (F_ISSET(dbp, DB_AM_RECOVER)) + ret = ENOENT; + else + __db_err(dbenv, "%s: unexpected file type or format", name); return (ret == 0 ? EINVAL : ret); } diff --git a/db/db/db_overflow.c b/db/db/db_overflow.c index c510a0c9e..046e60fab 100644 --- a/db/db/db_overflow.c +++ b/db/db/db_overflow.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: db_overflow.c,v 11.54 2004/03/28 17:17:50 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_overflow.c,v 11.51 2003/06/30 17:19:46 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -117,7 +115,7 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz) if (F_ISSET(dbt, DB_DBT_USERMEM)) { if (needed > dbt->ulen) { dbt->size = needed; - return (ENOMEM); + return (DB_BUFFER_SMALL); } } else if (F_ISSET(dbt, DB_DBT_MALLOC)) { if ((ret = __os_umalloc(dbenv, needed, &dbt->data)) != 0) @@ -125,13 +123,20 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz) } else if (F_ISSET(dbt, DB_DBT_REALLOC)) { if ((ret = __os_urealloc(dbenv, needed, &dbt->data)) != 0) return (ret); - } else if (*bpsz == 0 || *bpsz < needed) { + } else if (bpsz != NULL && (*bpsz == 0 || *bpsz < needed)) { if ((ret = __os_realloc(dbenv, needed, bpp)) != 0) return (ret); *bpsz = needed; dbt->data = *bpp; - } else + } else if (bpp != NULL) dbt->data = *bpp; + else { + DB_ASSERT( + F_ISSET(dbt, + DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC) || + bpsz != NULL || bpp != NULL); + return (DB_BUFFER_SMALL); + } /* * Step through the linked list of pages, copying the data on each diff --git a/db/db/db_ovfl_vrfy.c b/db/db/db_ovfl_vrfy.c index 07c728f78..a3c5fba7c 100644 --- a/db/db/db_ovfl_vrfy.c +++ b/db/db/db_ovfl_vrfy.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: db_ovfl_vrfy.c,v 11.56 2004/01/28 03:35:57 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_ovfl_vrfy.c,v 11.55 2003/06/30 17:19:46 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include diff --git a/db/db/db_pr.c b/db/db/db_pr.c index 4fbe8f4fd..7282173d8 100644 --- a/db/db/db_pr.c +++ b/db/db/db_pr.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_pr.c,v 11.120 2004/10/11 18:47:50 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_pr.c,v 11.94 2003/06/30 17:19:46 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -28,15 +26,6 @@ static const char revid[] = "$Id: db_pr.c,v 11.94 2003/06/30 17:19:46 bostic Exp #include "dbinc/qam.h" #include "dbinc/db_verify.h" -static int __db_bmeta __P((DB *, FILE *, BTMETA *, u_int32_t)); -static int __db_hmeta __P((DB *, FILE *, HMETA *, u_int32_t)); -static void __db_meta __P((DB *, DBMETA *, FILE *, FN const *, u_int32_t)); -static const char *__db_pagetype_to_string __P((u_int32_t)); -static void __db_prdb __P((DB *, FILE *)); -static void __db_proff __P((void *, FILE *)); -static int __db_prtree __P((DB *, FILE *, u_int32_t)); -static int __db_qmeta __P((DB *, FILE *, QMETA *, u_int32_t)); - /* * __db_loadme -- * A nice place to put a breakpoint. @@ -51,21 +40,34 @@ __db_loadme() __os_id(&id); } +#ifdef HAVE_STATISTICS +static int __db_bmeta __P((DB *, BTMETA *, u_int32_t)); +static int __db_hmeta __P((DB *, HMETA *, u_int32_t)); +static void __db_meta __P((DB *, DBMETA *, FN const *, u_int32_t)); +static const char *__db_pagetype_to_string __P((u_int32_t)); +static void __db_prdb __P((DB *, u_int32_t)); +static void __db_proff __P((DB_ENV *, DB_MSGBUF *, void *)); +static int __db_prtree __P((DB *, u_int32_t)); +static int __db_qmeta __P((DB *, QMETA *, u_int32_t)); + /* - * __db_dump -- + * __db_dumptree -- * Dump the tree to a file. * - * PUBLIC: int __db_dump __P((DB *, char *, char *)); + * PUBLIC: int __db_dumptree __P((DB *, char *, char *)); */ int -__db_dump(dbp, op, name) +__db_dumptree(dbp, op, name) DB *dbp; char *op, *name; { - FILE *fp; + DB_ENV *dbenv; + FILE *fp, *orig_fp; u_int32_t flags; int ret; + dbenv = dbp->dbenv; + for (flags = 0; *op != '\0'; ++op) switch (*op) { case 'a': @@ -80,74 +82,74 @@ __db_dump(dbp, op, name) return (EINVAL); } - if (name == NULL) - fp = stdout; - else { + if (name != NULL) { if ((fp = fopen(name, "w")) == NULL) return (__os_get_errno()); - } - __db_prdb(dbp, fp); + orig_fp = dbenv->db_msgfile; + dbenv->db_msgfile = fp; + } else + fp = orig_fp = NULL; + + __db_prdb(dbp, flags); - fprintf(fp, "%s\n", DB_LINE); + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); - ret = __db_prtree(dbp, fp, flags); + ret = __db_prtree(dbp, flags); - (void)fflush(fp); - if (name != NULL) + if (fp != NULL) { (void)fclose(fp); + dbenv->db_msgfile = orig_fp; + } return (ret); } +static const FN __db_flags_fn[] = { + { DB_AM_CHKSUM, "checksumming" }, + { DB_AM_CL_WRITER, "client replica writer" }, + { DB_AM_COMPENSATE, "created by compensating transaction" }, + { DB_AM_CREATED, "database created" }, + { DB_AM_CREATED_MSTR, "encompassing file created" }, + { DB_AM_DBM_ERROR, "dbm/ndbm error" }, + { DB_AM_DELIMITER, "variable length" }, + { DB_AM_DIRTY, "dirty reads" }, + { DB_AM_DISCARD, "discard cached pages" }, + { DB_AM_DUP, "duplicates" }, + { DB_AM_DUPSORT, "sorted duplicates" }, + { DB_AM_ENCRYPT, "encrypted" }, + { DB_AM_FIXEDLEN, "fixed-length records" }, + { DB_AM_INMEM, "in-memory" }, + { DB_AM_IN_RENAME, "file is being renamed" }, + { DB_AM_NOT_DURABLE, "changes not logged" }, + { DB_AM_OPEN_CALLED, "open called" }, + { DB_AM_PAD, "pad value" }, + { DB_AM_PGDEF, "default page size" }, + { DB_AM_RDONLY, "read-only" }, + { DB_AM_RECNUM, "Btree record numbers" }, + { DB_AM_RECOVER, "opened for recovery" }, + { DB_AM_RENUMBER, "renumber" }, + { DB_AM_REPLICATION, "replication file" }, + { DB_AM_REVSPLITOFF, "no reverse splits" }, + { DB_AM_SECONDARY, "secondary" }, + { DB_AM_SNAPSHOT, "load on open" }, + { DB_AM_SUBDB, "subdatabases" }, + { DB_AM_SWAP, "needswap" }, + { DB_AM_TXN, "transactional" }, + { DB_AM_VERIFYING, "verifier" }, + { 0, NULL } +}; + /* - * __db_inmemdbflags -- - * Call a callback for printing or other handling of strings associated - * with whatever in-memory DB structure flags are set. + * __db_get_flags_fn -- + * Return the __db_flags_fn array. * - * PUBLIC: void __db_inmemdbflags __P((u_int32_t, void *, - * PUBLIC: void (*)(u_int32_t, const FN *, void *))); + * PUBLIC: const FN * __db_get_flags_fn __P((void)); */ -void -__db_inmemdbflags(flags, cookie, callback) - u_int32_t flags; - void *cookie; - void (*callback) __P((u_int32_t, const FN *, void *)); +const FN * +__db_get_flags_fn() { - static const FN fn[] = { - { DB_AM_CHKSUM, "checksumming" }, - { DB_AM_CL_WRITER, "client replica writer" }, - { DB_AM_COMPENSATE, "created by compensating transaction" }, - { DB_AM_CREATED, "database created" }, - { DB_AM_CREATED_MSTR, "encompassing file created" }, - { DB_AM_DBM_ERROR, "dbm/ndbm error" }, - { DB_AM_DELIMITER, "variable length" }, - { DB_AM_DIRTY, "dirty reads" }, - { DB_AM_DISCARD, "discard cached pages" }, - { DB_AM_DUP, "duplicates" }, - { DB_AM_DUPSORT, "sorted duplicates" }, - { DB_AM_ENCRYPT, "encrypted" }, - { DB_AM_FIXEDLEN, "fixed-length records" }, - { DB_AM_INMEM, "in-memory" }, - { DB_AM_IN_RENAME, "file is being renamed" }, - { DB_AM_OPEN_CALLED, "DB->open called" }, - { DB_AM_PAD, "pad value" }, - { DB_AM_PGDEF, "default page size" }, - { DB_AM_RDONLY, "read-only" }, - { DB_AM_RECNUM, "Btree record numbers" }, - { DB_AM_RECOVER, "opened for recovery" }, - { DB_AM_RENUMBER, "renumber" }, - { DB_AM_REVSPLITOFF, "no reverse splits" }, - { DB_AM_SECONDARY, "secondary" }, - { DB_AM_SNAPSHOT, "load on open" }, - { DB_AM_SUBDB, "subdatabases" }, - { DB_AM_SWAP, "needswap" }, - { DB_AM_TXN, "transactional" }, - { DB_AM_VERIFYING, "verifier" }, - { 0, NULL } - }; - - callback(flags, fn, cookie); + return (__db_flags_fn); } /* @@ -155,56 +157,65 @@ __db_inmemdbflags(flags, cookie, callback) * Print out the DB structure information. */ static void -__db_prdb(dbp, fp) +__db_prdb(dbp, flags) DB *dbp; - FILE *fp; + u_int32_t flags; { + DB_MSGBUF mb; + DB_ENV *dbenv; BTREE *bt; HASH *h; QUEUE *q; - fprintf(fp, - "In-memory DB structure:\n%s: %#lx", + dbenv = dbp->dbenv; + + DB_MSGBUF_INIT(&mb); + __db_msg(dbenv, "In-memory DB structure:"); + __db_msgadd(dbenv, &mb, "%s: %#lx", __db_dbtype_to_string(dbp->type), (u_long)dbp->flags); - __db_inmemdbflags(dbp->flags, fp, __db_prflags); - fprintf(fp, "\n"); + __db_prflags(dbenv, &mb, dbp->flags, __db_flags_fn, " (", ")"); + DB_MSGBUF_FLUSH(dbenv, &mb); switch (dbp->type) { case DB_BTREE: case DB_RECNO: bt = dbp->bt_internal; - fprintf(fp, "bt_meta: %lu bt_root: %lu\n", + __db_msg(dbenv, "bt_meta: %lu bt_root: %lu", (u_long)bt->bt_meta, (u_long)bt->bt_root); - fprintf(fp, "bt_maxkey: %lu bt_minkey: %lu\n", + __db_msg(dbenv, "bt_maxkey: %lu bt_minkey: %lu", (u_long)bt->bt_maxkey, (u_long)bt->bt_minkey); - fprintf(fp, "bt_compare: %#lx bt_prefix: %#lx\n", - P_TO_ULONG(bt->bt_compare), P_TO_ULONG(bt->bt_prefix)); - fprintf(fp, "bt_lpgno: %lu\n", (u_long)bt->bt_lpgno); + if (!LF_ISSET(DB_PR_RECOVERYTEST)) + __db_msg(dbenv, "bt_compare: %#lx bt_prefix: %#lx", + P_TO_ULONG(bt->bt_compare), + P_TO_ULONG(bt->bt_prefix)); + __db_msg(dbenv, "bt_lpgno: %lu", (u_long)bt->bt_lpgno); if (dbp->type == DB_RECNO) { - fprintf(fp, - "re_pad: %#lx re_delim: %#lx re_len: %lu re_source: %s\n", + __db_msg(dbenv, + "re_pad: %#lx re_delim: %#lx re_len: %lu re_source: %s", (u_long)bt->re_pad, (u_long)bt->re_delim, (u_long)bt->re_len, bt->re_source == NULL ? "" : bt->re_source); - fprintf(fp, "re_modified: %d re_eof: %d re_last: %lu\n", + __db_msg(dbenv, + "re_modified: %d re_eof: %d re_last: %lu", bt->re_modified, bt->re_eof, (u_long)bt->re_last); } break; case DB_HASH: h = dbp->h_internal; - fprintf(fp, "meta_pgno: %lu\n", (u_long)h->meta_pgno); - fprintf(fp, "h_ffactor: %lu\n", (u_long)h->h_ffactor); - fprintf(fp, "h_nelem: %lu\n", (u_long)h->h_nelem); - fprintf(fp, "h_hash: %#lx\n", P_TO_ULONG(h->h_hash)); + __db_msg(dbenv, "meta_pgno: %lu", (u_long)h->meta_pgno); + __db_msg(dbenv, "h_ffactor: %lu", (u_long)h->h_ffactor); + __db_msg(dbenv, "h_nelem: %lu", (u_long)h->h_nelem); + if (!LF_ISSET(DB_PR_RECOVERYTEST)) + __db_msg(dbenv, "h_hash: %#lx", P_TO_ULONG(h->h_hash)); break; case DB_QUEUE: q = dbp->q_internal; - fprintf(fp, "q_meta: %lu\n", (u_long)q->q_meta); - fprintf(fp, "q_root: %lu\n", (u_long)q->q_root); - fprintf(fp, "re_pad: %#lx re_len: %lu\n", + __db_msg(dbenv, "q_meta: %lu", (u_long)q->q_meta); + __db_msg(dbenv, "q_root: %lu", (u_long)q->q_root); + __db_msg(dbenv, "re_pad: %#lx re_len: %lu", (u_long)q->re_pad, (u_long)q->re_len); - fprintf(fp, "rec_page: %lu\n", (u_long)q->rec_page); - fprintf(fp, "page_ext: %lu\n", (u_long)q->page_ext); + __db_msg(dbenv, "rec_page: %lu", (u_long)q->rec_page); + __db_msg(dbenv, "page_ext: %lu", (u_long)q->page_ext); break; case DB_UNKNOWN: default: @@ -217,9 +228,8 @@ __db_prdb(dbp, fp) * Print out the entire tree. */ static int -__db_prtree(dbp, fp, flags) +__db_prtree(dbp, flags) DB *dbp; - FILE *fp; u_int32_t flags; { DB_MPOOLFILE *mpf; @@ -230,7 +240,7 @@ __db_prtree(dbp, fp, flags) mpf = dbp->mpf; if (dbp->type == DB_QUEUE) - return (__db_prqueue(dbp, fp, flags)); + return (__db_prqueue(dbp, flags)); /* * Find out the page number of the last page in the database, then @@ -240,7 +250,7 @@ __db_prtree(dbp, fp, flags) for (i = 0; i <= last; ++i) { if ((ret = __memp_fget(mpf, &i, 0, &h)) != 0) return (ret); - (void)__db_prpage(dbp, h, fp, flags); + (void)__db_prpage(dbp, h, flags); if ((ret = __memp_fput(mpf, h, 0)) != 0) return (ret); } @@ -253,13 +263,14 @@ __db_prtree(dbp, fp, flags) * Print out common metadata information. */ static void -__db_meta(dbp, dbmeta, fp, fn, flags) +__db_meta(dbp, dbmeta, fn, flags) DB *dbp; DBMETA *dbmeta; - FILE *fp; FN const *fn; u_int32_t flags; { + DB_MSGBUF mb; + DB_ENV *dbenv; DB_MPOOLFILE *mpf; PAGE *h; db_pgno_t pgno; @@ -267,58 +278,62 @@ __db_meta(dbp, dbmeta, fp, fn, flags) int cnt, ret; const char *sep; + dbenv = dbp->dbenv; mpf = dbp->mpf; + DB_MSGBUF_INIT(&mb); - fprintf(fp, "\tmagic: %#lx\n", (u_long)dbmeta->magic); - fprintf(fp, "\tversion: %lu\n", (u_long)dbmeta->version); - fprintf(fp, "\tpagesize: %lu\n", (u_long)dbmeta->pagesize); - fprintf(fp, "\ttype: %lu\n", (u_long)dbmeta->type); - fprintf(fp, "\tkeys: %lu\trecords: %lu\n", + __db_msg(dbenv, "\tmagic: %#lx", (u_long)dbmeta->magic); + __db_msg(dbenv, "\tversion: %lu", (u_long)dbmeta->version); + __db_msg(dbenv, "\tpagesize: %lu", (u_long)dbmeta->pagesize); + __db_msg(dbenv, "\ttype: %lu", (u_long)dbmeta->type); + __db_msg(dbenv, "\tkeys: %lu\trecords: %lu", (u_long)dbmeta->key_count, (u_long)dbmeta->record_count); + /* + * If we're doing recovery testing, don't display the free list, + * it may have changed and that makes the dump diff not work. + */ if (!LF_ISSET(DB_PR_RECOVERYTEST)) { - /* - * If we're doing recovery testing, don't display the free - * list, it may have changed and that makes the dump diff - * not work. - */ - fprintf(fp, "\tfree list: %lu", (u_long)dbmeta->free); + __db_msgadd( + dbenv, &mb, "\tfree list: %lu", (u_long)dbmeta->free); for (pgno = dbmeta->free, cnt = 0, sep = ", "; pgno != PGNO_INVALID;) { if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) { - fprintf(fp, - "Unable to retrieve free-list page: %lu: %s\n", + DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msg(dbenv, + "Unable to retrieve free-list page: %lu: %s", (u_long)pgno, db_strerror(ret)); break; } pgno = h->next_pgno; (void)__memp_fput(mpf, h, 0); - fprintf(fp, "%s%lu", sep, (u_long)pgno); + __db_msgadd(dbenv, &mb, "%s%lu", sep, (u_long)pgno); if (++cnt % 10 == 0) { - fprintf(fp, "\n"); + DB_MSGBUF_FLUSH(dbenv, &mb); cnt = 0; sep = "\t"; } else sep = ", "; } - fprintf(fp, "\n"); - fprintf(fp, "\tlast_pgno: %lu\n", (u_long)dbmeta->last_pgno); + DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msg(dbenv, "\tlast_pgno: %lu", (u_long)dbmeta->last_pgno); } if (fn != NULL) { - fprintf(fp, "\tflags: %#lx", (u_long)dbmeta->flags); - __db_prflags(dbmeta->flags, fn, fp); - fprintf(fp, "\n"); + DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msgadd(dbenv, &mb, "\tflags: %#lx", (u_long)dbmeta->flags); + __db_prflags(dbenv, &mb, dbmeta->flags, fn, " (", ")"); } - fprintf(fp, "\tuid: "); + DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msgadd(dbenv, &mb, "\tuid: "); for (p = (u_int8_t *)dbmeta->uid, cnt = 0; cnt < DB_FILE_ID_LEN; ++cnt) { - fprintf(fp, "%x", *p++); + __db_msgadd(dbenv, &mb, "%x", *p++); if (cnt < DB_FILE_ID_LEN - 1) - fprintf(fp, " "); + __db_msgadd(dbenv, &mb, " "); } - fprintf(fp, "\n"); + DB_MSGBUF_FLUSH(dbenv, &mb); } /* @@ -326,30 +341,33 @@ __db_meta(dbp, dbmeta, fp, fn, flags) * Print out the btree meta-data page. */ static int -__db_bmeta(dbp, fp, h, flags) +__db_bmeta(dbp, h, flags) DB *dbp; - FILE *fp; BTMETA *h; u_int32_t flags; { - static const FN mfn[] = { + static const FN fn[] = { { BTM_DUP, "duplicates" }, { BTM_RECNO, "recno" }, { BTM_RECNUM, "btree:recnum" }, { BTM_FIXEDLEN, "recno:fixed-length" }, { BTM_RENUMBER, "recno:renumber" }, { BTM_SUBDB, "multiple-databases" }, + { BTM_DUPSORT, "sorted duplicates" }, { 0, NULL } }; + DB_ENV *dbenv; + + dbenv = dbp->dbenv; - __db_meta(dbp, (DBMETA *)h, fp, mfn, flags); + __db_meta(dbp, (DBMETA *)h, fn, flags); - fprintf(fp, "\tmaxkey: %lu minkey: %lu\n", + __db_msg(dbenv, "\tmaxkey: %lu minkey: %lu", (u_long)h->maxkey, (u_long)h->minkey); if (dbp->type == DB_RECNO) - fprintf(fp, "\tre_len: %#lx re_pad: %lu\n", + __db_msg(dbenv, "\tre_len: %#lx re_pad: %#lx", (u_long)h->re_len, (u_long)h->re_pad); - fprintf(fp, "\troot: %lu\n", (u_long)h->root); + __db_msg(dbenv, "\troot: %lu", (u_long)h->root); return (0); } @@ -359,31 +377,36 @@ __db_bmeta(dbp, fp, h, flags) * Print out the hash meta-data page. */ static int -__db_hmeta(dbp, fp, h, flags) +__db_hmeta(dbp, h, flags) DB *dbp; - FILE *fp; HMETA *h; u_int32_t flags; { - static const FN mfn[] = { - { DB_HASH_DUP, "duplicates" }, - { DB_HASH_SUBDB, "multiple-databases" }, - { 0, NULL } + DB_MSGBUF mb; + static const FN fn[] = { + { DB_HASH_DUP, "duplicates" }, + { DB_HASH_SUBDB, "multiple-databases" }, + { DB_HASH_DUPSORT, "sorted duplicates" }, + { 0, NULL } }; + DB_ENV *dbenv; int i; - __db_meta(dbp, (DBMETA *)h, fp, mfn, flags); + dbenv = dbp->dbenv; + DB_MSGBUF_INIT(&mb); + + __db_meta(dbp, (DBMETA *)h, fn, flags); - fprintf(fp, "\tmax_bucket: %lu\n", (u_long)h->max_bucket); - fprintf(fp, "\thigh_mask: %#lx\n", (u_long)h->high_mask); - fprintf(fp, "\tlow_mask: %#lx\n", (u_long)h->low_mask); - fprintf(fp, "\tffactor: %lu\n", (u_long)h->ffactor); - fprintf(fp, "\tnelem: %lu\n", (u_long)h->nelem); - fprintf(fp, "\th_charkey: %#lx\n", (u_long)h->h_charkey); - fprintf(fp, "\tspare points: "); + __db_msg(dbenv, "\tmax_bucket: %lu", (u_long)h->max_bucket); + __db_msg(dbenv, "\thigh_mask: %#lx", (u_long)h->high_mask); + __db_msg(dbenv, "\tlow_mask: %#lx", (u_long)h->low_mask); + __db_msg(dbenv, "\tffactor: %lu", (u_long)h->ffactor); + __db_msg(dbenv, "\tnelem: %lu", (u_long)h->nelem); + __db_msg(dbenv, "\th_charkey: %#lx", (u_long)h->h_charkey); + __db_msgadd(dbenv, &mb, "\tspare points: "); for (i = 0; i < NCACHED; i++) - fprintf(fp, "%lu ", (u_long)h->spares[i]); - fprintf(fp, "\n"); + __db_msgadd(dbenv, &mb, "%lu ", (u_long)h->spares[i]); + DB_MSGBUF_FLUSH(dbenv, &mb); return (0); } @@ -393,20 +416,23 @@ __db_hmeta(dbp, fp, h, flags) * Print out the queue meta-data page. */ static int -__db_qmeta(dbp, fp, h, flags) +__db_qmeta(dbp, h, flags) DB *dbp; - FILE *fp; QMETA *h; u_int32_t flags; { - __db_meta(dbp, (DBMETA *)h, fp, NULL, flags); + DB_ENV *dbenv; + + dbenv = dbp->dbenv; + + __db_meta(dbp, (DBMETA *)h, NULL, flags); - fprintf(fp, "\tfirst_recno: %lu\n", (u_long)h->first_recno); - fprintf(fp, "\tcur_recno: %lu\n", (u_long)h->cur_recno); - fprintf(fp, "\tre_len: %#lx re_pad: %lu\n", + __db_msg(dbenv, "\tfirst_recno: %lu", (u_long)h->first_recno); + __db_msg(dbenv, "\tcur_recno: %lu", (u_long)h->cur_recno); + __db_msg(dbenv, "\tre_len: %#lx re_pad: %lu", (u_long)h->re_len, (u_long)h->re_pad); - fprintf(fp, "\trec_page: %lu\n", (u_long)h->rec_page); - fprintf(fp, "\tpage_ext: %lu\n", (u_long)h->page_ext); + __db_msg(dbenv, "\trec_page: %lu", (u_long)h->rec_page); + __db_msg(dbenv, "\tpage_ext: %lu", (u_long)h->page_ext); return (0); } @@ -415,13 +441,12 @@ __db_qmeta(dbp, fp, h, flags) * __db_prnpage * -- Print out a specific page. * - * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t, FILE *)); + * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t)); */ int -__db_prnpage(dbp, pgno, fp) +__db_prnpage(dbp, pgno) DB *dbp; db_pgno_t pgno; - FILE *fp; { DB_MPOOLFILE *mpf; PAGE *h; @@ -432,7 +457,7 @@ __db_prnpage(dbp, pgno, fp) if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) return (ret); - ret = __db_prpage(dbp, h, fp, DB_PR_PAGE); + ret = __db_prpage(dbp, h, DB_PR_PAGE); if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) ret = t_ret; @@ -444,17 +469,18 @@ __db_prnpage(dbp, pgno, fp) * __db_prpage * -- Print out a page. * - * PUBLIC: int __db_prpage __P((DB *, PAGE *, FILE *, u_int32_t)); + * PUBLIC: int __db_prpage __P((DB *, PAGE *, u_int32_t)); */ int -__db_prpage(dbp, h, fp, flags) +__db_prpage(dbp, h, flags) DB *dbp; PAGE *h; - FILE *fp; u_int32_t flags; { BINTERNAL *bi; BKEYDATA *bk; + DB_ENV *dbenv; + DB_MSGBUF mb; HOFFPAGE a_hkd; QAMDATA *qp, *qep; RINTERNAL *ri; @@ -467,6 +493,9 @@ __db_prpage(dbp, h, fp, flags) const char *s; void *sp; + dbenv = dbp->dbenv; + DB_MSGBUF_INIT(&mb); + /* * If we're doing recovery testing and this page is P_INVALID, * assume it's a page that's on the free list, and don't display it. @@ -474,9 +503,8 @@ __db_prpage(dbp, h, fp, flags) if (LF_ISSET(DB_PR_RECOVERYTEST) && TYPE(h) == P_INVALID) return (0); - s = __db_pagetype_to_string(TYPE(h)); - if (s == NULL) { - fprintf(fp, "ILLEGAL PAGE TYPE: page: %lu type: %lu\n", + if ((s = __db_pagetype_to_string(TYPE(h))) == NULL) { + __db_msg(dbenv, "ILLEGAL PAGE TYPE: page: %lu type: %lu", (u_long)h->pgno, (u_long)TYPE(h)); return (1); } @@ -490,27 +518,28 @@ __db_prpage(dbp, h, fp, flags) pagesize = (u_int32_t)dbp->mpf->mfp->stat.st_pagesize; /* Page number, page type. */ - fprintf(fp, "page %lu: %s level: %lu", + __db_msgadd(dbenv, &mb, "page %lu: %s level: %lu", (u_long)h->pgno, s, (u_long)h->level); /* Record count. */ if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO || (TYPE(h) == P_LRECNO && h->pgno == ((BTREE *)dbp->bt_internal)->bt_root)) - fprintf(fp, " records: %lu", (u_long)RE_NREC(h)); + __db_msgadd(dbenv, &mb, " records: %lu", (u_long)RE_NREC(h)); /* LSN. */ if (!LF_ISSET(DB_PR_RECOVERYTEST)) - fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n", + __db_msgadd(dbenv, &mb, " (lsn.file: %lu lsn.offset: %lu)", (u_long)LSN(h).file, (u_long)LSN(h).offset); + DB_MSGBUF_FLUSH(dbenv, &mb); switch (TYPE(h)) { case P_BTREEMETA: - return (__db_bmeta(dbp, fp, (BTMETA *)h, flags)); + return (__db_bmeta(dbp, (BTMETA *)h, flags)); case P_HASHMETA: - return (__db_hmeta(dbp, fp, (HMETA *)h, flags)); + return (__db_hmeta(dbp, (HMETA *)h, flags)); case P_QAMMETA: - return (__db_qmeta(dbp, fp, (QMETA *)h, flags)); + return (__db_qmeta(dbp, (QMETA *)h, flags)); case P_QAMDATA: /* Should be meta->start. */ if (!LF_ISSET(DB_PR_PAGE)) return (0); @@ -524,11 +553,11 @@ __db_prpage(dbp, h, fp, flags) if (!F_ISSET(qp, QAM_SET)) continue; - fprintf(fp, "%s", + __db_msgadd(dbenv, &mb, "%s", F_ISSET(qp, QAM_VALID) ? "\t" : " D"); - fprintf(fp, "[%03lu] %4lu ", (u_long)recno, + __db_msgadd(dbenv, &mb, "[%03lu] %4lu ", (u_long)recno, (u_long)((u_int8_t *)qp - (u_int8_t *)h)); - __db_pr(qp->data, qlen, fp); + __db_pr(dbenv, &mb, qp->data, qlen); } return (0); default: @@ -537,22 +566,24 @@ __db_prpage(dbp, h, fp, flags) /* LSN. */ if (LF_ISSET(DB_PR_RECOVERYTEST)) - fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n", + __db_msg(dbenv, " (lsn.file: %lu lsn.offset: %lu)", (u_long)LSN(h).file, (u_long)LSN(h).offset); s = "\t"; if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) { - fprintf(fp, "%sprev: %4lu next: %4lu", + __db_msgadd(dbenv, &mb, "%sprev: %4lu next: %4lu", s, (u_long)PREV_PGNO(h), (u_long)NEXT_PGNO(h)); s = " "; } if (TYPE(h) == P_OVERFLOW) { - fprintf(fp, "%sref cnt: %4lu ", s, (u_long)OV_REF(h)); - __db_pr((u_int8_t *)h + P_OVERHEAD(dbp), OV_LEN(h), fp); + __db_msgadd(dbenv, &mb, + "%sref cnt: %4lu ", s, (u_long)OV_REF(h)); + __db_pr(dbenv, &mb, (u_int8_t *)h + P_OVERHEAD(dbp), OV_LEN(h)); return (0); } - fprintf(fp, "%sentries: %4lu", s, (u_long)NUM_ENT(h)); - fprintf(fp, " offset: %4lu\n", (u_long)HOFFSET(h)); + __db_msgadd(dbenv, &mb, "%sentries: %4lu", s, (u_long)NUM_ENT(h)); + __db_msgadd(dbenv, &mb, " offset: %4lu", (u_long)HOFFSET(h)); + DB_MSGBUF_FLUSH(dbenv, &mb); if (TYPE(h) == P_INVALID || !LF_ISSET(DB_PR_PAGE)) return (0); @@ -560,11 +591,11 @@ __db_prpage(dbp, h, fp, flags) ret = 0; inp = P_INP(dbp, h); for (i = 0; i < NUM_ENT(h); i++) { - if ((db_alignp_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) < - (db_alignp_t)(P_OVERHEAD(dbp)) || + if ((uintptr_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) < + (uintptr_t)(P_OVERHEAD(dbp)) || (size_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) >= pagesize) { - fprintf(fp, - "ILLEGAL PAGE OFFSET: indx: %lu of %lu\n", + __db_msg(dbenv, + "ILLEGAL PAGE OFFSET: indx: %lu of %lu", (u_long)i, (u_long)inp[i]); ret = EINVAL; continue; @@ -589,8 +620,9 @@ __db_prpage(dbp, h, fp, flags) default: goto type_err; } - fprintf(fp, "%s", deleted ? " D" : "\t"); - fprintf(fp, "[%03lu] %4lu ", (u_long)i, (u_long)inp[i]); + __db_msgadd(dbenv, &mb, "%s", deleted ? " D" : "\t"); + __db_msgadd( + dbenv, &mb, "[%03lu] %4lu ", (u_long)i, (u_long)inp[i]); switch (TYPE(h)) { case P_HASH: hk = sp; @@ -598,8 +630,9 @@ __db_prpage(dbp, h, fp, flags) case H_OFFDUP: memcpy(&pgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t)); - fprintf(fp, - "%4lu [offpage dups]\n", (u_long)pgno); + __db_msgadd(dbenv, &mb, + "%4lu [offpage dups]", (u_long)pgno); + DB_MSGBUF_FLUSH(dbenv, &mb); break; case H_DUPLICATE: /* @@ -613,29 +646,32 @@ __db_prpage(dbp, h, fp, flags) else len = 1; - fprintf(fp, "Duplicates:\n"); + __db_msgadd(dbenv, &mb, "Duplicates:"); + DB_MSGBUF_FLUSH(dbenv, &mb); for (p = HKEYDATA_DATA(hk), ep = p + len; p < ep;) { memcpy(&dlen, p, sizeof(db_indx_t)); p += sizeof(db_indx_t); - fprintf(fp, "\t\t"); - __db_pr(p, dlen, fp); + __db_msgadd(dbenv, &mb, "\t\t"); + __db_pr(dbenv, &mb, p, dlen); p += sizeof(db_indx_t) + dlen; } break; case H_KEYDATA: - __db_pr(HKEYDATA_DATA(hk), + __db_pr(dbenv, &mb, HKEYDATA_DATA(hk), LEN_HKEYDATA(dbp, h, i == 0 ? - pagesize : 0, i), fp); + pagesize : 0, i)); break; case H_OFFPAGE: memcpy(&a_hkd, hk, HOFFPAGE_SIZE); - fprintf(fp, - "overflow: total len: %4lu page: %4lu\n", + __db_msgadd(dbenv, &mb, + "overflow: total len: %4lu page: %4lu", (u_long)a_hkd.tlen, (u_long)a_hkd.pgno); + DB_MSGBUF_FLUSH(dbenv, &mb); break; default: - fprintf(fp, "ILLEGAL HASH PAGE TYPE: %lu\n", + DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msg(dbenv, "ILLEGAL HASH PAGE TYPE: %lu", (u_long)HPAGE_PTYPE(hk)); ret = EINVAL; break; @@ -643,19 +679,21 @@ __db_prpage(dbp, h, fp, flags) break; case P_IBTREE: bi = sp; - fprintf(fp, "count: %4lu pgno: %4lu type: %4lu", + __db_msgadd(dbenv, &mb, + "count: %4lu pgno: %4lu type: %4lu", (u_long)bi->nrecs, (u_long)bi->pgno, (u_long)bi->type); switch (B_TYPE(bi->type)) { case B_KEYDATA: - __db_pr(bi->data, bi->len, fp); + __db_pr(dbenv, &mb, bi->data, bi->len); break; case B_DUPLICATE: case B_OVERFLOW: - __db_proff(bi->data, fp); + __db_proff(dbenv, &mb, bi->data); break; default: - fprintf(fp, "ILLEGAL BINTERNAL TYPE: %lu\n", + DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msg(dbenv, "ILLEGAL BINTERNAL TYPE: %lu", (u_long)B_TYPE(bi->type)); ret = EINVAL; break; @@ -663,8 +701,9 @@ __db_prpage(dbp, h, fp, flags) break; case P_IRECNO: ri = sp; - fprintf(fp, "entries %4lu pgno %4lu\n", + __db_msgadd(dbenv, &mb, "entries %4lu pgno %4lu", (u_long)ri->nrecs, (u_long)ri->pgno); + DB_MSGBUF_FLUSH(dbenv, &mb); break; case P_LBTREE: case P_LDUP: @@ -672,28 +711,29 @@ __db_prpage(dbp, h, fp, flags) bk = sp; switch (B_TYPE(bk->type)) { case B_KEYDATA: - __db_pr(bk->data, bk->len, fp); + __db_pr(dbenv, &mb, bk->data, bk->len); break; case B_DUPLICATE: case B_OVERFLOW: - __db_proff(bk, fp); + __db_proff(dbenv, &mb, bk); break; default: - fprintf(fp, - "ILLEGAL DUPLICATE/LBTREE/LRECNO TYPE: %lu\n", + DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msg(dbenv, + "ILLEGAL DUPLICATE/LBTREE/LRECNO TYPE: %lu", (u_long)B_TYPE(bk->type)); ret = EINVAL; break; } break; default: -type_err: fprintf(fp, - "ILLEGAL PAGE TYPE: %lu\n", (u_long)TYPE(h)); +type_err: DB_MSGBUF_FLUSH(dbenv, &mb); + __db_msg(dbenv, + "ILLEGAL PAGE TYPE: %lu", (u_long)TYPE(h)); ret = EINVAL; continue; } } - (void)fflush(fp); return (ret); } @@ -701,142 +741,30 @@ type_err: fprintf(fp, * __db_pr -- * Print out a data element. * - * PUBLIC: void __db_pr __P((u_int8_t *, u_int32_t, FILE *)); + * PUBLIC: void __db_pr __P((DB_ENV *, DB_MSGBUF *, u_int8_t *, u_int32_t)); */ void -__db_pr(p, len, fp) +__db_pr(dbenv, mbp, p, len) + DB_ENV *dbenv; + DB_MSGBUF *mbp; u_int8_t *p; u_int32_t len; - FILE *fp; { u_int32_t i; - int lastch; - fprintf(fp, "len: %3lu", (u_long)len); - lastch = '.'; + __db_msgadd(dbenv, mbp, "len: %3lu", (u_long)len); if (len != 0) { - fprintf(fp, " data: "); + __db_msgadd(dbenv, mbp, " data: "); for (i = len <= 20 ? len : 20; i > 0; --i, ++p) { - lastch = *p; if (isprint((int)*p) || *p == '\n') - fprintf(fp, "%c", *p); + __db_msgadd(dbenv, mbp, "%c", *p); else - fprintf(fp, "0x%.2x", (u_int)*p); + __db_msgadd(dbenv, mbp, "%#.2x", (u_int)*p); } - if (len > 20) { - fprintf(fp, "..."); - lastch = '.'; - } - } - if (lastch != '\n') - fprintf(fp, "\n"); -} - -/* - * __db_prdbt -- - * Print out a DBT data element. - * - * PUBLIC: int __db_prdbt __P((DBT *, int, const char *, void *, - * PUBLIC: int (*)(void *, const void *), int, VRFY_DBINFO *)); - */ -int -__db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp) - DBT *dbtp; - int checkprint; - const char *prefix; - void *handle; - int (*callback) __P((void *, const void *)); - int is_recno; - VRFY_DBINFO *vdp; -{ - static const u_char hex[] = "0123456789abcdef"; - db_recno_t recno; - size_t len; - int ret; -#define DBTBUFLEN 100 - u_int8_t *p, *hp; - char buf[DBTBUFLEN], hbuf[DBTBUFLEN]; - - if (vdp != NULL) { - /* - * If vdp is non-NULL, we might be the first key in the - * "fake" subdatabase used for key/data pairs we can't - * associate with a known subdb. - * - * Check and clear the SALVAGE_PRINTHEADER flag; if - * it was set, print a subdatabase header. - */ - if (F_ISSET(vdp, SALVAGE_PRINTHEADER)) - (void)__db_prheader(NULL, "__OTHER__", 0, 0, - handle, callback, vdp, 0); - F_CLR(vdp, SALVAGE_PRINTHEADER); - F_SET(vdp, SALVAGE_PRINTFOOTER); - - /* - * Even if the printable flag wasn't set by our immediate - * caller, it may be set on a salvage-wide basis. - */ - if (F_ISSET(vdp, SALVAGE_PRINTABLE)) - checkprint = 1; + if (len > 20) + __db_msgadd(dbenv, mbp, "..."); } - - /* - * !!! - * This routine is the routine that dumps out items in the format - * used by db_dump(1) and db_load(1). This means that the format - * cannot change. - */ - if (prefix != NULL && (ret = callback(handle, prefix)) != 0) - return (ret); - if (is_recno) { - /* - * We're printing a record number, and this has to be done - * in a platform-independent way. So we use the numeral in - * straight ASCII. - */ - (void)__ua_memcpy(&recno, dbtp->data, sizeof(recno)); - snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno); - - /* If we're printing data as hex, print keys as hex too. */ - if (!checkprint) { - for (len = strlen(buf), p = (u_int8_t *)buf, - hp = (u_int8_t *)hbuf; len-- > 0; ++p) { - *hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4]; - *hp++ = hex[*p & 0x0f]; - } - *hp = '\0'; - ret = callback(handle, hbuf); - } else - ret = callback(handle, buf); - - if (ret != 0) - return (ret); - } else if (checkprint) { - for (len = dbtp->size, p = dbtp->data; len--; ++p) - if (isprint((int)*p)) { - if (*p == '\\' && - (ret = callback(handle, "\\")) != 0) - return (ret); - snprintf(buf, DBTBUFLEN, "%c", *p); - if ((ret = callback(handle, buf)) != 0) - return (ret); - } else { - snprintf(buf, DBTBUFLEN, "\\%c%c", - hex[(u_int8_t)(*p & 0xf0) >> 4], - hex[*p & 0x0f]); - if ((ret = callback(handle, buf)) != 0) - return (ret); - } - } else - for (len = dbtp->size, p = dbtp->data; len--; ++p) { - snprintf(buf, DBTBUFLEN, "%c%c", - hex[(u_int8_t)(*p & 0xf0) >> 4], - hex[*p & 0x0f]); - if ((ret = callback(handle, buf)) != 0) - return (ret); - } - - return (callback(handle, "\n")); + DB_MSGBUF_FLUSH(dbenv, mbp); } /* @@ -844,84 +772,109 @@ __db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp) * Print out an off-page element. */ static void -__db_proff(vp, fp) +__db_proff(dbenv, mbp, vp) + DB_ENV *dbenv; + DB_MSGBUF *mbp; void *vp; - FILE *fp; { BOVERFLOW *bo; bo = vp; switch (B_TYPE(bo->type)) { case B_OVERFLOW: - fprintf(fp, "overflow: total len: %4lu page: %4lu\n", + __db_msgadd(dbenv, mbp, "overflow: total len: %4lu page: %4lu", (u_long)bo->tlen, (u_long)bo->pgno); break; case B_DUPLICATE: - fprintf(fp, "duplicate: page: %4lu\n", (u_long)bo->pgno); + __db_msgadd( + dbenv, mbp, "duplicate: page: %4lu", (u_long)bo->pgno); break; default: /* NOTREACHED */ break; } + DB_MSGBUF_FLUSH(dbenv, mbp); } /* * __db_prflags -- * Print out flags values. * - * PUBLIC: void __db_prflags __P((u_int32_t, const FN *, void *)); + * PUBLIC: void __db_prflags __P((DB_ENV *, DB_MSGBUF *, + * PUBLIC: u_int32_t, const FN *, const char *, const char *)); */ void -__db_prflags(flags, fn, vfp) +__db_prflags(dbenv, mbp, flags, fn, prefix, suffix) + DB_ENV *dbenv; + DB_MSGBUF *mbp; u_int32_t flags; FN const *fn; - void *vfp; + const char *prefix, *suffix; { - FILE *fp; + DB_MSGBUF mb; const FN *fnp; - int found; + int found, standalone; const char *sep; /* - * We pass the FILE * through a void * so that we can use - * this function as as a callback. + * If it's a standalone message, output the suffix (which will be the + * label), regardless of whether we found anything or not, and flush + * the line. */ - fp = (FILE *)vfp; + if (mbp == NULL) { + standalone = 1; + mbp = &mb; + DB_MSGBUF_INIT(mbp); + } else + standalone = 0; - sep = " ("; + sep = prefix == NULL ? "" : prefix; for (found = 0, fnp = fn; fnp->mask != 0; ++fnp) if (LF_ISSET(fnp->mask)) { - fprintf(fp, "%s%s", sep, fnp->name); + __db_msgadd(dbenv, mbp, "%s%s", sep, fnp->name); sep = ", "; found = 1; } - if (found) - fprintf(fp, ")"); + + if ((standalone || found) && suffix != NULL) + __db_msgadd(dbenv, mbp, "%s", suffix); + if (standalone) + DB_MSGBUF_FLUSH(dbenv, mbp); } /* - * __db_dbtype_to_string -- - * Return the name of the database type. - * PUBLIC: const char * __db_dbtype_to_string __P((DBTYPE)); + * __db_lockmode_to_string -- + * Return the name of the lock mode. + * + * PUBLIC: const char * __db_lockmode_to_string __P((db_lockmode_t)); */ const char * -__db_dbtype_to_string(type) - DBTYPE type; +__db_lockmode_to_string(mode) + db_lockmode_t mode; { - switch (type) { - case DB_BTREE: - return ("btree"); - case DB_HASH: - return ("hash"); - case DB_RECNO: - return ("recno"); - case DB_QUEUE: - return ("queue"); - case DB_UNKNOWN: + switch (mode) { + case DB_LOCK_NG: + return ("Not granted"); + case DB_LOCK_READ: + return ("Shared/read"); + case DB_LOCK_WRITE: + return ("Exclusive/write"); + case DB_LOCK_WAIT: + return ("Wait for event"); + case DB_LOCK_IWRITE: + return ("Intent exclusive/write"); + case DB_LOCK_IREAD: + return ("Intent shared/read"); + case DB_LOCK_IWR: + return ("Intent to read/write"); + case DB_LOCK_DIRTY: + return ("Dirty read"); + case DB_LOCK_WWRITE: + return ("Was written"); default: - return ("UNKNOWN TYPE"); + break; } - /* NOTREACHED */ + return ("UNKNOWN LOCK MODE"); } /* @@ -979,17 +932,267 @@ __db_pagetype_to_string(type) return (s); } +#else /* !HAVE_STATISTICS */ + +/* + * __db_dumptree -- + * Dump the tree to a file. + * + * PUBLIC: int __db_dumptree __P((DB *, char *, char *)); + */ +int +__db_dumptree(dbp, op, name) + DB *dbp; + char *op, *name; +{ + COMPQUIET(op, NULL); + COMPQUIET(name, NULL); + + return (__db_stat_not_built(dbp->dbenv)); +} + +/* + * __db_get_flags_fn -- + * Return the __db_flags_fn array. + * + * PUBLIC: const FN * __db_get_flags_fn __P((void)); + */ +const FN * +__db_get_flags_fn() +{ + static const FN __db_flags_fn[] = { + { 0, NULL } + }; + + /* + * !!! + * The Tcl API uses this interface, stub it off. + */ + return (__db_flags_fn); +} +#endif + +/* + * __db_dump_pp -- + * DB->dump pre/post processing. + * + * PUBLIC: int __db_dump_pp __P((DB *, const char *, + * PUBLIC: int (*)(void *, const void *), void *, int, int)); + */ +int +__db_dump_pp(dbp, subname, callback, handle, pflag, keyflag) + DB *dbp; + const char *subname; + int (*callback)(void *, const void *); + void *handle; + int pflag, keyflag; +{ + DB_ENV *dbenv; + int handle_check, ret; + + dbenv = dbp->dbenv; + + PANIC_CHECK(dbenv); + DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->dump"); + + /* Check for replication block. */ + handle_check = IS_REPLICATED(dbenv, dbp); + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 1)) != 0) + return (ret); + + ret = __db_dump(dbp, subname, callback, handle, pflag, keyflag); + + /* Release replication block. */ + if (handle_check) + __env_db_rep_exit(dbenv); + + return (0); +} + +/* + * __db_dump -- + * DB->dump. + * + * PUBLIC: int __db_dump __P((DB *, const char *, + * PUBLIC: int (*)(void *, const void *), void *, int, int)); + */ +int +__db_dump(dbp, subname, callback, handle, pflag, keyflag) + DB *dbp; + const char *subname; + int (*callback)(void *, const void *); + void *handle; + int pflag, keyflag; +{ + DB_ENV *dbenv; + DBC *dbcp; + DBT key, data; + DBT keyret, dataret; + db_recno_t recno; + int is_recno, ret, t_ret; + void *pointer; + + dbenv = dbp->dbenv; + + if ((ret = __db_prheader( + dbp, subname, pflag, keyflag, handle, callback, NULL, 0)) != 0) + return (ret); + + /* + * Get a cursor and step through the database, printing out each + * key/data pair. + */ + if ((ret = __db_cursor(dbp, NULL, &dbcp, 0)) != 0) + return (ret); + + memset(&key, 0, sizeof(key)); + memset(&data, 0, sizeof(data)); + if ((ret = __os_malloc(dbenv, 1024 * 1024, &data.data)) != 0) + goto err; + data.ulen = 1024 * 1024; + data.flags = DB_DBT_USERMEM; + is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE); + keyflag = is_recno ? keyflag : 1; + if (is_recno) { + keyret.data = &recno; + keyret.size = sizeof(recno); + } + +retry: while ((ret = + __db_c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) { + DB_MULTIPLE_INIT(pointer, &data); + for (;;) { + if (is_recno) + DB_MULTIPLE_RECNO_NEXT(pointer, &data, + recno, dataret.data, dataret.size); + else + DB_MULTIPLE_KEY_NEXT(pointer, + &data, keyret.data, + keyret.size, dataret.data, dataret.size); + + if (dataret.data == NULL) + break; + + if ((keyflag && + (ret = __db_prdbt(&keyret, pflag, " ", + handle, callback, is_recno)) != 0) || + (ret = __db_prdbt(&dataret, pflag, " ", + handle, callback, 0)) != 0) + goto err; + } + } + if (ret == DB_BUFFER_SMALL) { + data.size = (u_int32_t)DB_ALIGN(data.size, 1024); + if ((ret = __os_realloc(dbenv, data.size, &data.data)) != 0) + goto err; + data.ulen = data.size; + goto retry; + } + + (void)__db_prfooter(handle, callback); + +err: if ((t_ret = __db_c_close(dbcp)) != 0 && ret == 0) + ret = t_ret; + if (data.data != NULL) + __os_free(dbenv, data.data); + + return (ret); +} + +/* + * __db_prdbt -- + * Print out a DBT data element. + * + * PUBLIC: int __db_prdbt __P((DBT *, int, const char *, void *, + * PUBLIC: int (*)(void *, const void *), int)); + */ +int +__db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno) + DBT *dbtp; + int checkprint; + const char *prefix; + void *handle; + int (*callback) __P((void *, const void *)); + int is_recno; +{ + static const u_char hex[] = "0123456789abcdef"; + db_recno_t recno; + size_t len; + int ret; +#define DBTBUFLEN 100 + u_int8_t *p, *hp; + char buf[DBTBUFLEN], hbuf[DBTBUFLEN]; + + /* + * !!! + * This routine is the routine that dumps out items in the format + * used by db_dump(1) and db_load(1). This means that the format + * cannot change. + */ + if (prefix != NULL && (ret = callback(handle, prefix)) != 0) + return (ret); + if (is_recno) { + /* + * We're printing a record number, and this has to be done + * in a platform-independent way. So we use the numeral in + * straight ASCII. + */ + (void)__ua_memcpy(&recno, dbtp->data, sizeof(recno)); + snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno); + + /* If we're printing data as hex, print keys as hex too. */ + if (!checkprint) { + for (len = strlen(buf), p = (u_int8_t *)buf, + hp = (u_int8_t *)hbuf; len-- > 0; ++p) { + *hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4]; + *hp++ = hex[*p & 0x0f]; + } + *hp = '\0'; + ret = callback(handle, hbuf); + } else + ret = callback(handle, buf); + + if (ret != 0) + return (ret); + } else if (checkprint) { + for (len = dbtp->size, p = dbtp->data; len--; ++p) + if (isprint((int)*p)) { + if (*p == '\\' && + (ret = callback(handle, "\\")) != 0) + return (ret); + snprintf(buf, DBTBUFLEN, "%c", *p); + if ((ret = callback(handle, buf)) != 0) + return (ret); + } else { + snprintf(buf, DBTBUFLEN, "\\%c%c", + hex[(u_int8_t)(*p & 0xf0) >> 4], + hex[*p & 0x0f]); + if ((ret = callback(handle, buf)) != 0) + return (ret); + } + } else + for (len = dbtp->size, p = dbtp->data; len--; ++p) { + snprintf(buf, DBTBUFLEN, "%c%c", + hex[(u_int8_t)(*p & 0xf0) >> 4], + hex[*p & 0x0f]); + if ((ret = callback(handle, buf)) != 0) + return (ret); + } + + return (callback(handle, "\n")); +} + /* * __db_prheader -- * Write out header information in the format expected by db_load. * - * PUBLIC: int __db_prheader __P((DB *, char *, int, int, void *, + * PUBLIC: int __db_prheader __P((DB *, const char *, int, int, void *, * PUBLIC: int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t)); */ int __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) DB *dbp; - char *subname; + const char *subname; int pflag, keyflag; void *handle; int (*callback) __P((void *, const void *)); @@ -997,19 +1200,14 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) db_pgno_t meta_pgno; { DBT dbt; - DB_BTREE_STAT *btsp; DB_ENV *dbenv; - DB_HASH_STAT *hsp; - DB_QUEUE_STAT *qsp; DBTYPE dbtype; VRFY_PAGEINFO *pip; + u_int32_t flags, tmp_u_int32; size_t buflen; char *buf; - int using_vdp, ret, t_ret; + int using_vdp, ret, t_ret, tmp_int; - btsp = NULL; - hsp = NULL; - qsp = NULL; ret = 0; buf = NULL; COMPQUIET(buflen, 0); @@ -1103,10 +1301,9 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) if ((ret = callback(handle, buf)) != 0) goto err; memset(&dbt, 0, sizeof(dbt)); - dbt.data = subname; + dbt.data = (char *)subname; dbt.size = (u_int32_t)strlen(subname); - if ((ret = __db_prdbt(&dbt, - 1, NULL, handle, callback, 0, NULL)) != 0) + if ((ret = __db_prdbt(&dbt, 1, NULL, handle, callback, 0)) != 0) goto err; } switch (dbtype) { @@ -1133,27 +1330,28 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) } break; } - if ((ret = __db_stat(dbp, &btsp, 0)) != 0) { - __db_err(dbp->dbenv, "DB->stat: %s", db_strerror(ret)); + + if ((ret = __db_get_flags(dbp, &flags)) != 0) { + __db_err(dbenv, "DB->get_flags: %s", db_strerror(ret)); goto err; } if (F_ISSET(dbp, DB_AM_RECNUM)) if ((ret = callback(handle, "recnum=1\n")) != 0) goto err; - if (btsp->bt_maxkey != 0) { - snprintf(buf, buflen, - "bt_maxkey=%lu\n", (u_long)btsp->bt_maxkey); - if ((ret = callback(handle, buf)) != 0) - goto err; + if ((ret = __bam_get_bt_minkey(dbp, &tmp_u_int32)) != 0) { + __db_err(dbenv, + "DB->get_bt_minkey: %s", db_strerror(ret)); + goto err; } - if (btsp->bt_minkey != 0 && btsp->bt_minkey != DEFMINKEYPAGE) { + if (tmp_u_int32 != 0 && tmp_u_int32 != DEFMINKEYPAGE) { snprintf(buf, buflen, - "bt_minkey=%lu\n", (u_long)btsp->bt_minkey); + "bt_minkey=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } break; case DB_HASH: +#ifdef HAVE_HASH if ((ret = callback(handle, "type=hash\n")) != 0) goto err; if (using_vdp) { @@ -1171,24 +1369,35 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) } break; } - if ((ret = __db_stat(dbp, &hsp, 0)) != 0) { - __db_err(dbp->dbenv, "DB->stat: %s", db_strerror(ret)); + if ((ret = __ham_get_h_ffactor(dbp, &tmp_u_int32)) != 0) { + __db_err(dbenv, + "DB->get_h_ffactor: %s", db_strerror(ret)); goto err; } - if (hsp->hash_ffactor != 0) { + if (tmp_u_int32 != 0) { snprintf(buf, buflen, - "h_ffactor=%lu\n", (u_long)hsp->hash_ffactor); + "h_ffactor=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } - if (hsp->hash_nkeys != 0) { + if ((ret = __ham_get_h_nelem(dbp, &tmp_u_int32)) != 0) { + __db_err(dbenv, + "DB->get_h_nelem: %s", db_strerror(ret)); + goto err; + } + if (tmp_u_int32 != 0) { snprintf(buf, buflen, - "h_nelem=%lu\n", (u_long)hsp->hash_nkeys); + "h_nelem=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } break; +#else + ret = __db_no_hash_am(dbenv); + goto err; +#endif case DB_QUEUE: +#ifdef HAVE_QUEUE if ((ret = callback(handle, "type=queue\n")) != 0) goto err; if (vdp != NULL) { @@ -1198,25 +1407,40 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) goto err; break; } - if ((ret = __db_stat(dbp, &qsp, 0)) != 0) { - __db_err(dbp->dbenv, "DB->stat: %s", db_strerror(ret)); + if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) { + __db_err(dbenv, + "DB->get_re_len: %s", db_strerror(ret)); goto err; } - snprintf(buf, buflen, "re_len=%lu\n", (u_long)qsp->qs_re_len); + snprintf(buf, buflen, "re_len=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; - if (qsp->qs_re_pad != 0 && qsp->qs_re_pad != ' ') { - snprintf(buf, buflen, "re_pad=%#x\n", qsp->qs_re_pad); + if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) { + __db_err(dbenv, + "DB->get_re_pad: %s", db_strerror(ret)); + goto err; + } + if (tmp_int != 0 && tmp_int != ' ') { + snprintf(buf, buflen, "re_pad=%#x\n", tmp_int); if ((ret = callback(handle, buf)) != 0) goto err; } - if (qsp->qs_extentsize != 0) { + if ((ret = __qam_get_extentsize(dbp, &tmp_u_int32)) != 0) { + __db_err(dbenv, + "DB->get_q_extentsize: %s", db_strerror(ret)); + goto err; + } + if (tmp_u_int32 != 0) { snprintf(buf, buflen, - "extentsize=%lu\n", (u_long)qsp->qs_extentsize); + "extentsize=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } break; +#else + ret = __db_no_queue_am(dbenv); + goto err; +#endif case DB_RECNO: if ((ret = callback(handle, "type=recno\n")) != 0) goto err; @@ -1233,28 +1457,37 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) } break; } - if ((ret = __db_stat(dbp, &btsp, 0)) != 0) { - __db_err(dbp->dbenv, "DB->stat: %s", db_strerror(ret)); - goto err; - } if (F_ISSET(dbp, DB_AM_RENUMBER)) if ((ret = callback(handle, "renumber=1\n")) != 0) goto err; if (F_ISSET(dbp, DB_AM_FIXEDLEN)) { + if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) { + __db_err(dbenv, + "DB->get_re_len: %s", db_strerror(ret)); + goto err; + } snprintf(buf, buflen, - "re_len=%lu\n", (u_long)btsp->bt_re_len); + "re_len=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; - } - if (btsp->bt_re_pad != 0 && btsp->bt_re_pad != ' ') { - snprintf(buf, buflen, "re_pad=%#x\n", btsp->bt_re_pad); - if ((ret = callback(handle, buf)) != 0) + + if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) { + __db_err(dbenv, + "DB->get_re_pad: %s", db_strerror(ret)); goto err; + } + if (tmp_int != 0 && tmp_int != ' ') { + snprintf(buf, + buflen, "re_pad=%#x\n", (u_int)tmp_int); + if ((ret = callback(handle, buf)) != 0) + goto err; + } } break; case DB_UNKNOWN: DB_ASSERT(0); /* Impossible. */ - __db_err(dbenv, "Impossible DB type in __db_prheader"); + __db_err(dbenv, + "Unknown or unsupported DB type in __db_prheader"); ret = EINVAL; goto err; } @@ -1293,12 +1526,6 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) err: if (using_vdp && (t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) ret = t_ret; - if (btsp != NULL) - __os_ufree(dbenv, btsp); - if (hsp != NULL) - __os_ufree(dbenv, hsp); - if (qsp != NULL) - __os_ufree(dbenv, qsp); if (buf != NULL) __os_free(dbenv, buf); @@ -1343,3 +1570,29 @@ __db_pr_callback(handle, str_arg) return (0); } + +/* + * __db_dbtype_to_string -- + * Return the name of the database type. + * + * PUBLIC: const char * __db_dbtype_to_string __P((DBTYPE)); + */ +const char * +__db_dbtype_to_string(type) + DBTYPE type; +{ + switch (type) { + case DB_BTREE: + return ("btree"); + case DB_HASH: + return ("hash"); + case DB_RECNO: + return ("recno"); + case DB_QUEUE: + return ("queue"); + case DB_UNKNOWN: + default: + break; + } + return ("UNKNOWN TYPE"); +} diff --git a/db/db/db_rec.c b/db/db/db_rec.c index a692d9749..d7de551b9 100644 --- a/db/db/db_rec.c +++ b/db/db/db_rec.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_rec.c,v 11.60 2004/09/22 03:30:23 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_rec.c,v 11.48 2003/08/27 03:54:18 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -55,20 +53,7 @@ __db_addrem_recover(dbenv, dbtp, lsnp, op, info) REC_PRINT(__db_addrem_print); REC_INTRO(__db_addrem_read, 1); - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ - goto done; - } else - if ((ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->pgno, &pagep, done); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); @@ -138,21 +123,7 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info) REC_PRINT(__db_big_print); REC_INTRO(__db_big_read, 1); - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ - ret = 0; - goto ppage; - } else - if ((ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->pgno, &pagep, ppage); /* * There are three pages we need to check. The one on which we are @@ -201,23 +172,7 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info) /* Now check the previous page. */ ppage: if (argp->prev_pgno != PGNO_INVALID) { change = 0; - if ((ret = - __memp_fget(mpf, &argp->prev_pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. - * That is equivalent to having a pagelsn of 0, - * so we would not have to undo anything. In - * this case, don't bother creating a page. - */ - *lsnp = argp->prev_lsn; - ret = 0; - goto npage; - } else - if ((ret = __memp_fget(mpf, &argp->prev_pgno, - DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->prev_pgno, &pagep, npage); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->prevlsn); @@ -243,21 +198,7 @@ ppage: if (argp->prev_pgno != PGNO_INVALID) { /* Now check the next page. Can only be set on a delete. */ npage: if (argp->next_pgno != PGNO_INVALID) { change = 0; - if ((ret = - __memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. - * That is equivalent to having a pagelsn of 0, - * so we would not have to undo anything. In - * this case, don't bother creating a page. - */ - goto done; - } else - if ((ret = __memp_fget(mpf, &argp->next_pgno, - DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->next_pgno, &pagep, done); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->nextlsn); @@ -311,12 +252,7 @@ __db_ovref_recover(dbenv, dbtp, lsnp, op, info) REC_PRINT(__db_ovref_print); REC_INTRO(__db_ovref_read, 1); - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) - goto done; - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } + REC_FGET(mpf, argp->pgno, &pagep, done); modified = 0; cmp = log_compare(&LSN(pagep), &argp->lsn); @@ -346,143 +282,6 @@ out: if (pagep != NULL) REC_CLOSE; } -/* - * __db_relink_recover -- - * Recovery function for relink. - * - * PUBLIC: int __db_relink_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_relink_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_relink_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__db_relink_print); - REC_INTRO(__db_relink_read, 1); - - /* - * There are up to three pages we need to check -- the page, and the - * previous and next pages, if they existed. For a page add operation, - * the current page is the result of a split and is being recovered - * elsewhere, so all we need do is recover the next page. - */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_REDO(op)) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } - goto next2; - } - modified = 0; - if (argp->opcode == DB_ADD_PAGE) - goto next1; - - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Redo the relink. */ - pagep->lsn = *lsnp; - modified = 1; - } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { - /* Undo the relink. */ - pagep->next_pgno = argp->next; - pagep->prev_pgno = argp->prev; - - pagep->lsn = argp->lsn; - modified = 1; - } -next1: if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -next2: if ((ret = __memp_fget(mpf, &argp->next, 0, &pagep)) != 0) { - if (DB_REDO(op)) { - ret = __db_pgerr(file_dbp, argp->next, ret); - goto out; - } - goto prev; - } - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn_next); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_next); - if ((argp->opcode == DB_REM_PAGE && cmp_p == 0 && DB_REDO(op)) || - (argp->opcode == DB_ADD_PAGE && cmp_n == 0 && DB_UNDO(op))) { - /* Redo the remove or undo the add. */ - pagep->prev_pgno = argp->prev; - - modified = 1; - } else if ((argp->opcode == DB_REM_PAGE && cmp_n == 0 && DB_UNDO(op)) || - (argp->opcode == DB_ADD_PAGE && cmp_p == 0 && DB_REDO(op))) { - /* Undo the remove or redo the add. */ - pagep->prev_pgno = argp->pgno; - - modified = 1; - } - if (modified == 1) { - if (DB_UNDO(op)) - pagep->lsn = argp->lsn_next; - else - pagep->lsn = *lsnp; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - if (argp->opcode == DB_ADD_PAGE) - goto done; - -prev: if ((ret = __memp_fget(mpf, &argp->prev, 0, &pagep)) != 0) { - if (DB_REDO(op)) { - ret = __db_pgerr(file_dbp, argp->prev, ret); - goto out; - } - goto done; - } - modified = 0; - cmp_p = log_compare(&LSN(pagep), &argp->lsn_prev); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_prev); - if (cmp_p == 0 && DB_REDO(op)) { - /* Redo the relink. */ - pagep->next_pgno = argp->next; - - modified = 1; - } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { - /* Undo the relink. */ - pagep->next_pgno = argp->pgno; - - modified = 1; - } - if (modified == 1) { - if (DB_UNDO(op)) - pagep->lsn = argp->lsn_prev; - else - pagep->lsn = *lsnp; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - /* * __db_debug_recover -- * Recovery function for debug. @@ -542,8 +341,7 @@ __db_noop_recover(dbenv, dbtp, lsnp, op, info) REC_PRINT(__db_noop_print); REC_INTRO(__db_noop_read, 0); - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) - goto out; + REC_FGET(mpf, argp->pgno, &pagep, done); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->prevlsn); @@ -587,19 +385,15 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info) DB_MPOOLFILE *mpf; PAGE *pagep; db_pgno_t pgno; - int cmp_n, cmp_p, created, level, modified, ret; + int cmp_n, cmp_p, created, level, meta_modified, modified, ret; meta = NULL; pagep = NULL; + created = meta_modified = modified = 0; REC_PRINT(__db_pg_alloc_print); REC_INTRO(__db_pg_alloc_read, 0); /* - * Fix up the allocated page. If we're redoing the operation, we have - * to get the page (creating it if it doesn't exist), and update its - * LSN. If we're undoing the operation, we have to reset the page's - * LSN and put it on the free list. - * * Fix up the metadata page. If we're redoing the operation, we have * to get the metadata page and update its LSN and its free pointer. * If we're undoing the operation and the page was ever created, we put @@ -614,7 +408,45 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info) } else goto done; } - created = modified = 0; + cmp_n = log_compare(lsnp, &LSN(meta)); + cmp_p = log_compare(&LSN(meta), &argp->meta_lsn); + CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn); + if (cmp_p == 0 && DB_REDO(op)) { + /* Need to redo update described. */ + LSN(meta) = *lsnp; + meta->free = argp->next; + meta_modified = 1; + if (argp->pgno > meta->last_pgno) + meta->last_pgno = argp->pgno; + } else if (cmp_n == 0 && DB_UNDO(op)) { + /* Need to undo update described. */ + LSN(meta) = argp->meta_lsn; + /* + * If the page has a zero LSN then its newly created + * and will be truncated or go into limbo rather than + * directly on the free list. + */ + if (!IS_ZERO_LSN(argp->page_lsn)) + meta->free = argp->pgno; +#ifdef HAVE_FTRUNCATE + /* + * With truncate we will restore the file to + * its original length. Without truncate + * the last_pgno never goes backward. + */ + meta->last_pgno = argp->last_pgno; +#endif + meta_modified = 1; + } + + /* + * Fix up the allocated page. If the page does not exist + * and we can truncate it then don't create it. + * Otherwise if we're redoing the operation, we have + * to get the page (creating it if it doesn't exist), and update its + * LSN. If we're undoing the operation, we have to reset the page's + * LSN and put it on the free list, or into limbo.. + */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { /* * We have to be able to identify if a page was newly @@ -624,10 +456,14 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info) * try for the page without CREATE and if that fails, then * create it. */ +#ifdef HAVE_FTRUNCATE + if (DB_UNDO(op)) + goto do_truncate; +#endif if ((ret = __memp_fget( mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) { - if (ret == ENOSPC) - goto do_meta; + if (DB_UNDO(op) && ret == ENOSPC) + goto do_truncate; ret = __db_pgerr(file_dbp, argp->pgno, ret); goto out; } @@ -639,24 +475,24 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info) cmp_p = log_compare(&LSN(pagep), &argp->page_lsn); /* - * If an inital allocation is aborted and then reallocated - * during an archival restore the log record will have - * an LSN for the page but the page will be empty. + * If an initial allocation is aborted and then reallocated during + * an archival restore the log record will have an LSN for the page + * but the page will be empty. + * If we we rolled back this allocation previously during an + * archive restore, the page may have INIT_LSN from the limbo list. */ - if (IS_ZERO_LSN(LSN(pagep))) + if (IS_ZERO_LSN(LSN(pagep)) || + (IS_ZERO_LSN(argp->page_lsn) && IS_INIT_LSN(LSN(pagep)))) cmp_p = 0; + CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->page_lsn); /* - * If we we rolled back this allocation previously during an - * archive restore, the page may have INIT_LSN from the limbo list. * Another special case we have to handle is if we ended up with a * page of all 0's which can happen if we abort between allocating a * page in mpool and initializing it. In that case, even if we're * undoing, we need to re-initialize the page. */ - if (DB_REDO(op) && - (cmp_p == 0 || - (IS_ZERO_LSN(argp->page_lsn) && IS_INIT_LSN(LSN(pagep))))) { + if (DB_REDO(op) && cmp_p == 0) { /* Need to redo update described. */ switch (argp->ptype) { case P_LBTREE: @@ -687,56 +523,47 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info) modified = 1; } +do_truncate: /* - * If the page was newly created, put it on the limbo list. + * If the page was newly created, give it back, if + * possible. Otherwise put it into limbo. */ - if (IS_ZERO_LSN(LSN(pagep)) && + if ((pagep == NULL || IS_ZERO_LSN(LSN(pagep))) && IS_ZERO_LSN(argp->page_lsn) && DB_UNDO(op)) { +#ifdef HAVE_FTRUNCATE + COMPQUIET(info, NULL); + /* Discard the page. */ + if (pagep != NULL) { + if ((ret = + __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0) + goto out; + pagep = NULL; + /* Give the page back to the OS. */ + if (meta->last_pgno <= argp->pgno && + (ret = __memp_ftruncate(mpf, argp->pgno, 0)) != 0) + goto out; + } +#else /* Put the page in limbo.*/ if ((ret = __db_add_limbo(dbenv, info, argp->fileid, argp->pgno, 1)) != 0) goto out; + /* The last_pgno grows if this was a new page. */ + if (argp->pgno > meta->last_pgno) { + meta->last_pgno = argp->pgno; + meta_modified = 1; + } +#endif } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + if (pagep != NULL && + (ret = __memp_fput(mpf, + pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) goto out; pagep = NULL; -do_meta: - /* Fix up the metadata page. */ - modified = 0; - cmp_n = log_compare(lsnp, &LSN(meta)); - cmp_p = log_compare(&LSN(meta), &argp->meta_lsn); - CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - LSN(meta) = *lsnp; - meta->free = argp->next; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - LSN(meta) = argp->meta_lsn; - - /* - * If the page has a zero LSN then its newly created - * and will go into limbo rather than directly on the - * free list. - */ - if (!IS_ZERO_LSN(argp->page_lsn)) - meta->free = argp->pgno; - modified = 1; - } - - /* - * Make sure that meta->last_pgno always reflects the largest page - * that we've ever allocated. - */ - if (argp->pgno > meta->last_pgno) { - meta->last_pgno = argp->pgno; - modified = 1; - } - - if ((ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) + if ((ret = __memp_fput(mpf, + meta, meta_modified ? DB_MPOOL_DIRTY : 0)) != 0) goto out; meta = NULL; @@ -769,50 +596,14 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data) DB_LSN copy_lsn; PAGE *pagep; db_pgno_t pgno; - int cmp_n, cmp_p, modified, ret; + int cmp_n, cmp_p, meta_modified, modified, ret; meta = NULL; pagep = NULL; - /* - * Fix up the freed page. If we're redoing the operation we get the - * page and explicitly discard its contents, then update its LSN. If - * we're undoing the operation, we get the page and restore its header. - * Create the page if necessary, we may be freeing an aborted - * create. - */ - if ((ret = __memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - modified = 0; - (void)__ua_memcpy(©_lsn, &LSN(argp->header.data), sizeof(DB_LSN)); - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), ©_lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), ©_lsn); - if (DB_REDO(op) && - (cmp_p == 0 || - (IS_ZERO_LSN(copy_lsn) && - log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) { - /* Need to redo update described. */ - P_INIT(pagep, file_dbp->pgsize, - argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID); - pagep->lsn = *lsnp; - - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - memcpy(pagep, argp->header.data, argp->header.size); - if (data) - memcpy((u_int8_t*)pagep + pagep->hf_offset, - argp->data.data, argp->data.size); - - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; + meta_modified = modified = 0; /* - * Fix up the metadata page. If we're redoing or undoing the operation - * we get the page and update its LSN and free pointer. + * Get the metapage first so we can see where we are. */ pgno = PGNO_BASE_MD; if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) { @@ -820,12 +611,25 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data) ret = __db_pgerr(file_dbp, pgno, ret); goto out; } - - modified = 0; cmp_n = log_compare(lsnp, &LSN(meta)); cmp_p = log_compare(&LSN(meta), &argp->meta_lsn); CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn); + + /* + * Fix up the metadata page. If we're redoing or undoing the operation + * we get the page and update its LSN, last and free pointer. + */ if (cmp_p == 0 && DB_REDO(op)) { +#ifdef HAVE_FTRUNCATE + /* + * If we are at the end of the file truncate, otherwise + * put on the free list. + */ + if (argp->pgno == argp->last_pgno) + meta->last_pgno = argp->pgno - 1; + else + meta->free = argp->pgno; +#else /* Need to redo the deallocation. */ meta->free = argp->pgno; /* @@ -835,19 +639,105 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data) */ if (meta->last_pgno < meta->free) meta->last_pgno = meta->free; +#endif LSN(meta) = *lsnp; - modified = 1; + meta_modified = 1; } else if (cmp_n == 0 && DB_UNDO(op)) { /* Need to undo the deallocation. */ meta->free = argp->next; LSN(meta) = argp->meta_lsn; + if (meta->last_pgno < argp->pgno) + meta->last_pgno = argp->pgno; + meta_modified = 1; + } + + /* + * Get the freed page. If we support truncate then don't + * create the page if we are going to free it. If we're + * redoing the operation we get the page and explicitly discard + * its contents, then update its LSN. If we're undoing the + * operation, we get the page and restore its header. + * If we don't support truncate, then we must create the page + * and roll it back. + */ +#ifdef HAVE_FTRUNCATE + if (DB_REDO(op) || meta->last_pgno < argp->pgno) { + if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { + if (ret == DB_PAGE_NOTFOUND) + goto done; + goto out; + } + } else +#endif + if ((ret = + __memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) + goto out; + + (void)__ua_memcpy(©_lsn, &LSN(argp->header.data), sizeof(DB_LSN)); + cmp_n = IS_ZERO_LSN(LSN(pagep)) ? 0 : log_compare(lsnp, &LSN(pagep)); + cmp_p = log_compare(&LSN(pagep), ©_lsn); + +#ifdef HAVE_FTRUNCATE + /* + * This page got extended by a later allocation, + * but its allocation was not in the scope of this + * recovery pass. + */ + if (IS_ZERO_LSN(LSN(pagep))) + cmp_p = 0; +#endif + + CHECK_LSN(op, cmp_p, &LSN(pagep), ©_lsn); + if (DB_REDO(op) && + (cmp_p == 0 || + (IS_ZERO_LSN(copy_lsn) && + log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) { + /* Need to redo the deallocation. */ +#ifdef HAVE_FTRUNCATE + if (meta->last_pgno <= argp->pgno) { + if ((ret = + __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0) + goto out; + pagep = NULL; + if ((ret = __memp_ftruncate(mpf, argp->pgno, 0)) != 0) + goto out; + } else if (argp->last_pgno == argp->pgno) { + /* The page was truncated at runtime, zero it out. */ + P_INIT(pagep, 0, PGNO_INVALID, + PGNO_INVALID, PGNO_INVALID, 0, P_INVALID); + ZERO_LSN(pagep->lsn); + modified = 1; + } else +#endif + { + P_INIT(pagep, file_dbp->pgsize, + argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID); + pagep->lsn = *lsnp; + + modified = 1; + } + } else if (cmp_n == 0 && DB_UNDO(op)) { + /* Need to reallocate the page. */ + memcpy(pagep, argp->header.data, argp->header.size); + if (data) + memcpy((u_int8_t*)pagep + pagep->hf_offset, + argp->data.data, argp->data.size); + modified = 1; } - if ((ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) + if (pagep != NULL && + (ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; + + pagep = NULL; +#ifdef HAVE_FTRUNCATE +done: +#endif + if ((ret = __memp_fput(mpf, + meta, meta_modified ? DB_MPOOL_DIRTY : 0)) != 0) goto out; meta = NULL; - *lsnp = argp->prev_lsn; ret = 0; out: if (pagep != NULL) @@ -886,10 +776,9 @@ __db_pg_free_recover(dbenv, dbtp, lsnp, op, info) ret = __db_pg_free_recover_int(dbenv, (__db_pg_freedata_args *)argp, file_dbp, lsnp, mpf, op, 0); -done: +done: *lsnp = argp->prev_lsn; out: REC_CLOSE; - } /* @@ -908,6 +797,7 @@ __db_pg_new_recover(dbenv, dbtp, lsnp, op, info) db_recops op; void *info; { +#ifndef HAVE_FTRUNCATE DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; @@ -916,7 +806,7 @@ __db_pg_new_recover(dbenv, dbtp, lsnp, op, info) REC_PRINT(__db_pg_free_print); REC_INTRO(__db_pg_free_read, 1); - COMPQUIET(op, 0); + COMPQUIET(op, DB_TXN_ABORT); if ((ret = __db_add_limbo(dbenv, info, argp->fileid, argp->pgno, 1)) == 0) @@ -925,7 +815,14 @@ __db_pg_new_recover(dbenv, dbtp, lsnp, op, info) done: out: REC_CLOSE; - +#else + COMPQUIET(dbenv, NULL); + COMPQUIET(dbtp, NULL); + COMPQUIET(lsnp, NULL); + COMPQUIET(op, DB_TXN_PRINT); + COMPQUIET(info, NULL); + return (0); +#endif } /* @@ -955,10 +852,9 @@ __db_pg_freedata_recover(dbenv, dbtp, lsnp, op, info) ret = __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, 1); -done: +done: *lsnp = argp->prev_lsn; out: REC_CLOSE; - } /* @@ -1004,6 +900,7 @@ __db_cksum_recover(dbenv, dbtp, lsnp, op, info) __os_free(dbenv, argp); return (ret); } + /* * __db_pg_prepare_recover -- * Recovery function for pg_prepare. @@ -1019,6 +916,7 @@ __db_pg_prepare_recover(dbenv, dbtp, lsnp, op, info) db_recops op; void *info; { +#ifndef HAVE_FTRUNCATE __db_pg_prepare_args *argp; DB *file_dbp; DBC *dbc; @@ -1032,7 +930,7 @@ __db_pg_prepare_recover(dbenv, dbtp, lsnp, op, info) mpf = file_dbp->mpf; /* - * If this made it into the limbo list a prepare time then + * If this made it into the limbo list at prepare time then * it was a new free page allocated by an aborted subtransaction. * Only that subtransaction could have toched the page. * All other pages in the free list at this point are @@ -1063,4 +961,70 @@ __db_pg_prepare_recover(dbenv, dbtp, lsnp, op, info) done: if (ret == 0) *lsnp = argp->prev_lsn; out: REC_CLOSE; +#else + COMPQUIET(dbenv, NULL); + COMPQUIET(dbtp, NULL); + COMPQUIET(lsnp, NULL); + COMPQUIET(op, DB_TXN_PRINT); + COMPQUIET(info, NULL); + return (0); +#endif + +} +/* + * __db_pg_init_recover -- + * Recovery function to reinit pages for truncate. + * + * PUBLIC: int __db_pg_init_recover + * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + */ +int +__db_pg_init_recover(dbenv, dbtp, lsnp, op, info) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops op; + void *info; +{ + __db_pg_init_args *argp; + DB *file_dbp; + DBC *dbc; + DB_LSN copy_lsn; + DB_MPOOLFILE *mpf; + PAGE *pagep; + int cmp_n, cmp_p, modified, ret; + + COMPQUIET(info, NULL); + REC_PRINT(__db_pg_init_print); + REC_INTRO(__db_pg_init_read, 1); + + mpf = file_dbp->mpf; + REC_FGET(mpf, argp->pgno, &pagep, done); + + modified = 0; + (void)__ua_memcpy(©_lsn, &LSN(argp->header.data), sizeof(DB_LSN)); + cmp_n = log_compare(lsnp, &LSN(pagep)); + cmp_p = log_compare(&LSN(pagep), ©_lsn); + CHECK_LSN(op, cmp_p, &LSN(pagep), ©_lsn); + + if (cmp_p == 0 && DB_REDO(op)) { + P_INIT(pagep, file_dbp->pgsize, PGNO(pagep), PGNO_INVALID, + PGNO_INVALID, TYPE(pagep) == P_HASH ? 0 : 1, TYPE(pagep)); + pagep->lsn = *lsnp; + modified = 1; + } else if (cmp_n == 0 && DB_UNDO(op)) { + /* Put the data back on the page. */ + memcpy(pagep, argp->header.data, argp->header.size); + if (argp->data.size > 0) + memcpy((u_int8_t*)pagep + pagep->hf_offset, + argp->data.data, argp->data.size); + + modified = 1; + } + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; + +done: *lsnp = argp->prev_lsn; +out: + REC_CLOSE; } diff --git a/db/db/db_reclaim.c b/db/db/db_reclaim.c index 1a70bf608..4795b8caa 100644 --- a/db/db/db_reclaim.c +++ b/db/db/db_reclaim.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_reclaim.c,v 11.42 2004/06/10 04:46:44 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_reclaim.c,v 11.37 2003/06/30 17:19:47 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -50,7 +48,13 @@ __db_traverse_big(dbp, pgno, callback, cookie) did_put = 0; if ((ret = __memp_fget(mpf, &pgno, 0, &p)) != 0) return (ret); + /* + * If we are freeing pages only process the overflow + * chain if the head of the chain has a refcount of 1. + */ pgno = NEXT_PGNO(p); + if (callback == __db_truncate_callback && OV_REF(p) != 1) + pgno = PGNO_INVALID; if ((ret = callback(dbp, p, cookie, &did_put)) == 0 && !did_put) ret = __memp_fput(mpf, p, 0); @@ -79,8 +83,15 @@ __db_reclaim_callback(dbp, p, cookie, putp) { int ret; - COMPQUIET(dbp, NULL); + /* + * We don't want to log the free of the root with the subdb. + * If we abort then the subdb may not be openable to undo + * the free. + */ + if ((dbp->type == DB_BTREE || dbp->type == DB_RECNO) && + PGNO(p) == ((BTREE *)dbp->bt_internal)->bt_root) + return (0); if ((ret = __db_free(cookie, p)) != 0) return (ret); *putp = 1; @@ -104,6 +115,7 @@ __db_truncate_callback(dbp, p, cookie, putp) int *putp; { DB_MPOOLFILE *mpf; + DBT ddbt, ldbt; db_indx_t indx, len, off, tlen, top; db_trunc_param *param; u_int8_t *hk, type; @@ -166,8 +178,8 @@ __db_truncate_callback(dbp, p, cookie, putp) for (indx = 0; indx < top; indx += P_INDX) { switch (*H_PAIRDATA(dbp, p, indx)) { case H_OFFDUP: - case H_OFFPAGE: break; + case H_OFFPAGE: case H_KEYDATA: ++param->count; break; @@ -175,7 +187,7 @@ __db_truncate_callback(dbp, p, cookie, putp) tlen = LEN_HDATA(dbp, p, 0, indx); hk = H_PAIRDATA(dbp, p, indx); for (off = 0; off < tlen; - off += len + 2 * sizeof (db_indx_t)) { + off += len + 2 * sizeof(db_indx_t)) { ++param->count; memcpy(&len, HKEYDATA_DATA(hk) @@ -192,9 +204,16 @@ __db_truncate_callback(dbp, p, cookie, putp) reinit: *putp = 0; if (DBC_LOGGING(param->dbc)) { - if ((ret = __db_free(param->dbc, p)) != 0) - return (ret); - if ((ret = __db_new(param->dbc, type, &p)) != 0) + memset(&ldbt, 0, sizeof(ldbt)); + memset(&ddbt, 0, sizeof(ddbt)); + ldbt.data = p; + ldbt.size = P_OVERHEAD(dbp); + ldbt.size += p->entries * sizeof(db_indx_t); + ddbt.data = (u_int8_t *)p + p->hf_offset; + ddbt.size = dbp->pgsize - p->hf_offset; + if ((ret = __db_pg_init_log(dbp, + param->dbc->txn, &LSN(p), 0, + p->pgno, &ldbt, &ddbt)) != 0) return (ret); } else LSN_NOT_LOGGED(LSN(p)); diff --git a/db/db/db_remove.c b/db/db/db_remove.c index 3357ba233..5497c5e01 100644 --- a/db/db/db_remove.c +++ b/db/db/db_remove.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_remove.c,v 11.219 2004/09/16 17:55:17 margo Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_remove.c,v 11.214 2003/09/04 18:56:41 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -68,7 +66,7 @@ __dbenv_dbremove_pp(dbenv, txn, name, subdb, flags) goto err; handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, txn != NULL)) != 0) goto err; ret = __db_remove_int(dbp, txn, name, subdb, flags); @@ -93,7 +91,7 @@ __dbenv_dbremove_pp(dbenv, txn, name, subdb, flags) } if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); err: if (txn_local) ret = __db_txn_auto_resolve(dbenv, txn, 0, ret); @@ -152,14 +150,14 @@ __db_remove_pp(dbp, name, subdb, flags) return (ret); handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, 0)) != 0) return (ret); /* Remove the file. */ ret = __db_remove(dbp, NULL, name, subdb, flags); if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -202,7 +200,6 @@ __db_remove_int(dbp, txn, name, subdb, flags) u_int32_t flags; { DB_ENV *dbenv; - DB_LSN newlsn; int ret; char *real_name, *tmpname; @@ -242,7 +239,7 @@ __db_remove_int(dbp, txn, name, subdb, flags) goto err; if (dbp->db_am_remove != NULL && - (ret = dbp->db_am_remove(dbp, NULL, name, subdb, &newlsn)) != 0) + (ret = dbp->db_am_remove(dbp, NULL, name, subdb)) != 0) goto err; ret = __fop_remove(dbenv, NULL, dbp->fileid, name, DB_APP_DATA, @@ -319,7 +316,7 @@ err: ret = t_ret; if (mdbp != NULL && - (t_ret = __db_close(mdbp, txn, 0)) != 0 && ret == 0) + (t_ret = __db_close(mdbp, txn, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; return (ret); @@ -332,7 +329,6 @@ __db_dbtxn_remove(dbp, txn, name) const char *name; { DB_ENV *dbenv; - DB_LSN newlsn; int ret; char *tmpname; @@ -357,7 +353,7 @@ __db_dbtxn_remove(dbp, txn, name) /* The internal removes will also translate into delayed removes. */ if (dbp->db_am_remove != NULL && - (ret = dbp->db_am_remove(dbp, txn, tmpname, NULL, &newlsn)) != 0) + (ret = dbp->db_am_remove(dbp, txn, tmpname, NULL)) != 0) goto err; ret = __fop_remove(dbenv, txn, dbp->fileid, tmpname, DB_APP_DATA, diff --git a/db/db/db_rename.c b/db/db/db_rename.c index c02948391..12f1f2276 100644 --- a/db/db/db_rename.c +++ b/db/db/db_rename.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_rename.c,v 11.216 2004/09/16 17:55:17 margo Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_rename.c,v 11.211 2003/09/04 18:06:45 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -92,8 +90,8 @@ __dbenv_dbrename(dbenv, txn, name, subdb, newname, txn_local) F_SET(dbp, DB_AM_TXN); handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) - return (ret); + if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, txn != NULL)) != 0) + goto err; ret = __db_rename_int(dbp, txn, name, subdb, newname); @@ -117,8 +115,9 @@ __dbenv_dbrename(dbenv, txn, name, subdb, newname, txn_local) } if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); +err: if ((t_ret = __db_close(dbp, txn, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; @@ -170,7 +169,7 @@ __db_rename_pp(dbp, name, subdb, newname, flags) goto err; handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0)) != 0) { + if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, 0)) != 0) { handle_check = 0; goto err; } @@ -179,7 +178,7 @@ __db_rename_pp(dbp, name, subdb, newname, flags) ret = __db_rename(dbp, NULL, name, subdb, newname); err: if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -325,7 +324,7 @@ __db_subdb_rename(dbp, txn, name, subdb, newname) goto err; memcpy(dbp->fileid, ((DBMETA *)meta)->uid, DB_FILE_ID_LEN); if ((ret = __fop_lock_handle(dbenv, - dbp, mdbp->lid, DB_LOCK_WRITE, NULL, 0)) != 0) + dbp, mdbp->lid, DB_LOCK_WRITE, NULL, NOWAIT_FLAG(txn))) != 0) goto err; ret = __memp_fput(mdbp->mpf, meta, 0); @@ -346,7 +345,7 @@ err: ret = t_ret; if (mdbp != NULL && - (t_ret = __db_close(mdbp, txn, 0)) != 0 && ret == 0) + (t_ret = __db_close(mdbp, txn, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; return (ret); diff --git a/db/db/db_ret.c b/db/db/db_ret.c index 83521a1f1..99421eccc 100644 --- a/db/db/db_ret.c +++ b/db/db/db_ret.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_ret.c,v 11.26 2004/02/05 02:25:13 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_ret.c,v 11.24 2003/04/02 14:12:34 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -127,7 +125,7 @@ __db_retcopy(dbenv, dbt, data, len, memp, memsize) ret = __os_urealloc(dbenv, len, &dbt->data); } else if (F_ISSET(dbt, DB_DBT_USERMEM)) { if (len != 0 && (dbt->data == NULL || dbt->ulen < len)) - ret = ENOMEM; + ret = DB_BUFFER_SMALL; } else if (memp == NULL || memsize == NULL) { ret = EINVAL; } else { diff --git a/db/db/db_setid.c b/db/db/db_setid.c new file mode 100644 index 000000000..fffba1c6d --- /dev/null +++ b/db/db/db_setid.c @@ -0,0 +1,155 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: db_setid.c,v 1.6 2004/09/24 13:41:08 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_swap.h" +#include "dbinc/db_am.h" + +/* + * __db_fileid_reset -- + * Reset the file IDs for every database in the file. + * + * PUBLIC: int __db_fileid_reset __P((DB_ENV *, char *, int)); + */ +int +__db_fileid_reset(dbenv, name, passwd) + DB_ENV *dbenv; + char *name; + int passwd; +{ + DB *dbp; + DBC *dbcp; + DBT key, data; + DB_MPOOLFILE *mpf; + db_pgno_t pgno; + int t_ret, ret; + void *pagep; + char *real_name; + u_int8_t fileid[DB_FILE_ID_LEN]; + + dbp = NULL; + dbcp = NULL; + real_name = NULL; + + /* Get the real backing file name. */ + if ((ret = __db_appname(dbenv, + DB_APP_DATA, name, 0, NULL, &real_name)) != 0) + return (ret); + + /* Get a new file ID. */ + if ((ret = __os_fileid(dbenv, real_name, 1, fileid)) != 0) { + dbenv->err(dbenv, ret, "unable to get new file ID"); + goto err; + } + + /* Create the DB object. */ + if ((ret = db_create(&dbp, dbenv, 0)) != 0) { + dbenv->err(dbenv, ret, "db_create"); + goto err; + } + + /* If configured with a password, the databases are encrypted. */ + if (passwd && (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) { + dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT"); + goto err; + } + + /* + * Open the DB file. + * + * !!! + * Note DB_RDWRMASTER flag, we need to open the master database file + * for writing in this case. + */ + if ((ret = dbp->open(dbp, + NULL, name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0) { + dbp->err(dbp, ret, "DB->open: %s", name); + goto err; + } + + mpf = dbp->mpf; + + pgno = PGNO_BASE_MD; + if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) { + dbp->err(dbp, ret, + "%s: DB_MPOOLFILE->get: %lu", name, (u_long)pgno); + goto err; + } + memcpy(((DBMETA *)pagep)->uid, fileid, DB_FILE_ID_LEN); + if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0) { + dbp->err(dbp, ret, + "%s: DB_MPOOLFILE->put: %lu", name, (u_long)pgno); + goto err; + } + + /* + * If the database file doesn't support subdatabases, we only have + * to update a single metadata page. Otherwise, we have to open a + * cursor and step through the master database, and update all of + * the subdatabases' metadata pages. + */ + if (!F_ISSET(dbp, DB_AM_SUBDB)) + goto err; + + memset(&key, 0, sizeof(key)); + memset(&data, 0, sizeof(data)); + if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) { + dbp->err(dbp, ret, "DB->cursor"); + goto err; + } + while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) { + /* + * XXX + * We're handling actual data, not on-page meta-data, so it + * hasn't been converted to/from opposite endian architectures. + * Do it explicitly, now. + */ + memcpy(&pgno, data.data, sizeof(db_pgno_t)); + DB_NTOHL(&pgno); + if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) { + dbp->err(dbp, ret, + "%s: DB_MPOOLFILE->get: %lu", name, (u_long)pgno); + goto err; + } + memcpy(((DBMETA *)pagep)->uid, fileid, DB_FILE_ID_LEN); + if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0) { + dbp->err(dbp, ret, + "%s: DB_MPOOLFILE->put: %lu", name, (u_long)pgno); + goto err; + } + } + if (ret == DB_NOTFOUND) + ret = 0; + else + dbp->err(dbp, ret, "DBcursor->get"); + +err: if (dbcp != NULL && (t_ret = dbcp->c_close(dbcp)) != 0) { + dbp->err(dbp, ret, "DBcursor->close"); + if (ret == 0) + ret = t_ret; + } + if (dbp != NULL && (t_ret = dbp->close(dbp, 0)) != 0) { + dbenv->err(dbenv, ret, "DB->close"); + if (ret == 0) + ret = t_ret; + } + if (real_name != NULL) + __os_free(dbenv, real_name); + + return (ret); +} diff --git a/db/db/db_setlsn.c b/db/db/db_setlsn.c new file mode 100644 index 000000000..5865798e5 --- /dev/null +++ b/db/db/db_setlsn.c @@ -0,0 +1,83 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: db_setlsn.c,v 1.2 2004/04/27 16:10:13 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" + +/* + * __db_lsn_reset -- + * Reset the LSNs for every page in the file. + * + * PUBLIC: int __db_lsn_reset __P((DB_ENV *, char *, int)); + */ +int +__db_lsn_reset(dbenv, name, passwd) + DB_ENV *dbenv; + char *name; + int passwd; +{ + DB *dbp; + DB_MPOOLFILE *mpf; + PAGE *pagep; + db_pgno_t pgno; + int t_ret, ret; + + /* Create the DB object. */ + if ((ret = db_create(&dbp, dbenv, 0)) != 0) { + dbenv->err(dbenv, ret, "db_create"); + return (1); + } + + /* If configured with a password, the databases are encrypted. */ + if (passwd && (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) { + dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT"); + goto err; + } + + /* + * Open the DB file. + * + * !!! + * Note DB_RDWRMASTER flag, we need to open the master database file + * for writing in this case. + */ + if ((ret = dbp->open(dbp, + NULL, name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0) { + dbp->err(dbp, ret, "DB->open: %s", name); + goto err; + } + + /* Reset the LSN on every page of the database file. */ + mpf = dbp->mpf; + for (pgno = 0; (ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0; ++pgno) { + LSN_NOT_LOGGED(pagep->lsn); + if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0) { + dbp->err(dbp, ret, "DB_MPOOLFILE->put: %s", name); + goto err; + } + } + + if (ret == DB_PAGE_NOTFOUND) + ret = 0; + else + dbp->err(dbp, ret, "DB_MPOOLFILE->get: %s", name); + +err: if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) + ret = t_ret; + return (ret == 0 ? 0 : 1); +} diff --git a/db/db/db_stati.c b/db/db/db_stati.c new file mode 100644 index 000000000..cd73b8ea4 --- /dev/null +++ b/db/db/db_stati.c @@ -0,0 +1,500 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: db_stati.c,v 11.123 2004/07/19 16:40:51 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + +#include +#endif + +#include "db_int.h" + +#include "dbinc/db_page.h" +#include "dbinc/db_shash.h" +#include "dbinc/btree.h" +#include "dbinc/hash.h" +#include "dbinc/qam.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" + +#ifdef HAVE_STATISTICS +static int __db_print_all __P((DB *, u_int32_t)); +static int __db_print_citem __P((DBC *)); +static int __db_print_cursor __P((DB *)); +static int __db_print_stats __P((DB *, u_int32_t)); +static int __db_stat_arg __P((DB *, u_int32_t)); + +/* + * __db_stat_pp -- + * DB->stat pre/post processing. + * + * PUBLIC: int __db_stat_pp __P((DB *, DB_TXN *, void *, u_int32_t)); + */ +int +__db_stat_pp(dbp, txn, spp, flags) + DB *dbp; + DB_TXN *txn; + void *spp; + u_int32_t flags; +{ + DB_ENV *dbenv; + int handle_check, ret; + + dbenv = dbp->dbenv; + + PANIC_CHECK(dbp->dbenv); + DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat"); + + if ((ret = __db_stat_arg(dbp, flags)) != 0) + return (ret); + + /* Check for replication block. */ + handle_check = IS_REPLICATED(dbenv, dbp); + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) + return (ret); + + ret = __db_stat(dbp, txn, spp, flags); + + /* Release replication block. */ + if (handle_check) + __env_db_rep_exit(dbenv); + + return (ret); +} + +/* + * __db_stat -- + * DB->stat. + * + * PUBLIC: int __db_stat __P((DB *, DB_TXN *, void *, u_int32_t)); + */ +int +__db_stat(dbp, txn, spp, flags) + DB *dbp; + DB_TXN *txn; + void *spp; + u_int32_t flags; +{ + DB_ENV *dbenv; + DBC *dbc; + int ret, t_ret; + + dbenv = dbp->dbenv; + + /* Acquire a cursor. */ + if ((ret = __db_cursor(dbp, txn, + &dbc, LF_ISSET(DB_DEGREE_2 | DB_DIRTY_READ))) != 0) + return (ret); + + DEBUG_LWRITE(dbc, NULL, "DB->stat", NULL, NULL, flags); + LF_CLR(DB_DEGREE_2 | DB_DIRTY_READ); + + switch (dbp->type) { + case DB_BTREE: + case DB_RECNO: + ret = __bam_stat(dbc, spp, flags); + break; + case DB_HASH: + ret = __ham_stat(dbc, spp, flags); + break; + case DB_QUEUE: + ret = __qam_stat(dbc, spp, flags); + break; + case DB_UNKNOWN: + default: + ret = (__db_unknown_type(dbenv, "DB->stat", dbp->type)); + break; + } + + if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + + return (ret); +} + +/* + * __db_stat_arg -- + * Check DB->stat arguments. + */ +static int +__db_stat_arg(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + DB_ENV *dbenv; + + dbenv = dbp->dbenv; + + /* Check for invalid function flags. */ + LF_CLR(DB_DEGREE_2 | DB_DIRTY_READ); + switch (flags) { + case 0: + case DB_FAST_STAT: + case DB_CACHED_COUNTS: /* Deprecated and undocumented. */ + break; + case DB_RECORDCOUNT: /* Deprecated and undocumented. */ + if (dbp->type == DB_RECNO) + break; + if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM)) + break; + /* FALLTHROUGH */ + default: + return (__db_ferr(dbenv, "DB->stat", 0)); + } + + return (0); +} + +/* + * __db_stat_print_pp -- + * DB->stat_print pre/post processing. + * + * PUBLIC: int __db_stat_print_pp __P((DB *, u_int32_t)); + */ +int +__db_stat_print_pp(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + DB_ENV *dbenv; + int handle_check, ret; + + dbenv = dbp->dbenv; + + PANIC_CHECK(dbenv); + DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat"); + + /* + * !!! + * The actual argument checking is simple, do it inline. + */ + if ((ret = __db_fchk(dbenv, "DB->stat_print", + flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0) + return (ret); + + /* Check for replication block. */ + handle_check = IS_REPLICATED(dbenv, dbp); + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) + return (ret); + + ret = __db_stat_print(dbp, flags); + + /* Release replication block. */ + if (handle_check) + __env_db_rep_exit(dbenv); + + return (ret); +} + +/* + * __db_stat_print -- + * DB->stat_print. + * + * PUBLIC: int __db_stat_print __P((DB *, u_int32_t)); + */ +int +__db_stat_print(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + int ret; + + if (flags == 0 || LF_ISSET(DB_STAT_ALL)) { + ret = __db_print_stats(dbp, flags); + if (flags == 0 || ret != 0) + return (ret); + } + + if (LF_ISSET(DB_STAT_ALL) && (ret = __db_print_all(dbp, flags)) != 0) + return (ret); + + return (0); +} + +/* + * __db_print_stats -- + * Display default DB handle statistics. + */ +static int +__db_print_stats(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + DBC *dbc; + DB_ENV *dbenv; + int ret, t_ret; + + dbenv = dbp->dbenv; + + /* Acquire a cursor. */ + if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) + return (ret); + + DEBUG_LWRITE(dbc, NULL, "DB->stat_print", NULL, NULL, 0); + + switch (dbp->type) { + case DB_BTREE: + case DB_RECNO: + ret = __bam_stat_print(dbc, flags); + break; + case DB_HASH: + ret = __ham_stat_print(dbc, flags); + break; + case DB_QUEUE: + ret = __qam_stat_print(dbc, flags); + break; + case DB_UNKNOWN: + default: + ret = (__db_unknown_type(dbenv, "DB->stat_print", dbp->type)); + break; + } + + if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + + return (ret); +} + +/* + * __db_print_all -- + * Display debugging DB handle statistics. + */ +static int +__db_print_all(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + static const FN fn[] = { + { DB_AM_CHKSUM, "DB_AM_CHKSUM" }, + { DB_AM_CL_WRITER, "DB_AM_CL_WRITER" }, + { DB_AM_COMPENSATE, "DB_AM_COMPENSATE" }, + { DB_AM_CREATED, "DB_AM_CREATED" }, + { DB_AM_CREATED_MSTR, "DB_AM_CREATED_MSTR" }, + { DB_AM_DBM_ERROR, "DB_AM_DBM_ERROR" }, + { DB_AM_DELIMITER, "DB_AM_DELIMITER" }, + { DB_AM_DIRTY, "DB_AM_DIRTY" }, + { DB_AM_DISCARD, "DB_AM_DISCARD" }, + { DB_AM_DUP, "DB_AM_DUP" }, + { DB_AM_DUPSORT, "DB_AM_DUPSORT" }, + { DB_AM_ENCRYPT, "DB_AM_ENCRYPT" }, + { DB_AM_FIXEDLEN, "DB_AM_FIXEDLEN" }, + { DB_AM_INMEM, "DB_AM_INMEM" }, + { DB_AM_IN_RENAME, "DB_AM_IN_RENAME" }, + { DB_AM_NOT_DURABLE, "DB_AM_NOT_DURABLE" }, + { DB_AM_OPEN_CALLED, "DB_AM_OPEN_CALLED" }, + { DB_AM_PAD, "DB_AM_PAD" }, + { DB_AM_PGDEF, "DB_AM_PGDEF" }, + { DB_AM_RDONLY, "DB_AM_RDONLY" }, + { DB_AM_RECNUM, "DB_AM_RECNUM" }, + { DB_AM_RECOVER, "DB_AM_RECOVER" }, + { DB_AM_RENUMBER, "DB_AM_RENUMBER" }, + { DB_AM_REPLICATION, "DB_AM_REPLICATION" }, + { DB_AM_REVSPLITOFF, "DB_AM_REVSPLITOFF" }, + { DB_AM_SECONDARY, "DB_AM_SECONDARY" }, + { DB_AM_SNAPSHOT, "DB_AM_SNAPSHOT" }, + { DB_AM_SUBDB, "DB_AM_SUBDB" }, + { DB_AM_SWAP, "DB_AM_SWAP" }, + { DB_AM_TXN, "DB_AM_TXN" }, + { DB_AM_VERIFYING, "DB_AM_VERIFYING" }, + { 0, NULL } + }; + DB_ENV *dbenv; + + dbenv = dbp->dbenv; + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB handle information:"); + STAT_ULONG("Page size", dbp->pgsize); + STAT_ISSET("Append recno", dbp->db_append_recno); + STAT_ISSET("Feedback", dbp->db_feedback); + STAT_ISSET("Dup compare", dbp->dup_compare); + STAT_ISSET("App private", dbp->app_private); + STAT_ISSET("DbEnv", dbp->dbenv); + STAT_STRING("Type", __db_dbtype_to_string(dbp->type)); + + __db_print_mutex(dbenv, NULL, dbp->mutexp, "Thread mutex", flags); + + STAT_STRING("File", dbp->fname); + STAT_STRING("Database", dbp->dname); + STAT_HEX("Open flags", dbp->open_flags); + + __db_print_fileid(dbenv, dbp->fileid, "\tFile ID"); + + STAT_ULONG("Cursor adjust ID", dbp->adj_fileid); + STAT_ULONG("Meta pgno", dbp->meta_pgno); + STAT_ULONG("Locker ID", dbp->lid); + STAT_ULONG("Handle lock", dbp->cur_lid); + STAT_ULONG("Associate lock", dbp->associate_lid); + STAT_ULONG("RPC remote ID", dbp->cl_id); + + __db_msg(dbenv, + "%.24s\tReplication handle timestamp", + dbp->timestamp == 0 ? "0" : ctime(&dbp->timestamp)); + + STAT_ISSET("Secondary callback", dbp->s_callback); + STAT_ISSET("Primary handle", dbp->s_primary); + + STAT_ISSET("api internal", dbp->api_internal); + STAT_ISSET("Btree/Recno internal", dbp->bt_internal); + STAT_ISSET("Hash internal", dbp->h_internal); + STAT_ISSET("Queue internal", dbp->q_internal); + STAT_ISSET("XA internal", dbp->xa_internal); + + __db_prflags(dbenv, NULL, dbp->flags, fn, NULL, "\tFlags"); + + if (dbp->log_filename == NULL) + STAT_ISSET("File naming information", dbp->log_filename); + else + __dbreg_print_fname(dbenv, dbp->log_filename); + + (void)__db_print_cursor(dbp); + + return (0); +} + +/* + * __db_print_cursor -- + * Display the cursor active and free queues. + */ +static int +__db_print_cursor(dbp) + DB *dbp; +{ + DB_ENV *dbenv; + DBC *dbc; + int ret, t_ret; + + dbenv = dbp->dbenv; + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB handle cursors:"); + + ret = 0; + MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp); + __db_msg(dbenv, "Active queue:"); + for (dbc = TAILQ_FIRST(&dbp->active_queue); + dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) + if ((t_ret = __db_print_citem(dbc)) != 0 && ret == 0) + ret = t_ret; + __db_msg(dbenv, "Join queue:"); + for (dbc = TAILQ_FIRST(&dbp->join_queue); + dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) + if ((t_ret = __db_print_citem(dbc)) != 0 && ret == 0) + ret = t_ret; + __db_msg(dbenv, "Free queue:"); + for (dbc = TAILQ_FIRST(&dbp->free_queue); + dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) + if ((t_ret = __db_print_citem(dbc)) != 0 && ret == 0) + ret = t_ret; + MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp); + + return (ret); +} + +static +int __db_print_citem(dbc) + DBC *dbc; +{ + static const FN fn[] = { + { DBC_ACTIVE, "DBC_ACTIVE" }, + { DBC_COMPENSATE, "DBC_COMPENSATE" }, + { DBC_DEGREE_2, "DBC_DEGREE_2" }, + { DBC_DIRTY_READ, "DBC_DIRTY_READ" }, + { DBC_OPD, "DBC_OPD" }, + { DBC_RECOVER, "DBC_RECOVER" }, + { DBC_RMW, "DBC_RMW" }, + { DBC_TRANSIENT, "DBC_TRANSIENT" }, + { DBC_WRITECURSOR, "DBC_WRITECURSOR" }, + { DBC_WRITER, "DBC_WRITER" }, + { DBC_MULTIPLE, "DBC_MULTIPLE" }, + { DBC_MULTIPLE_KEY, "DBC_MULTIPLE_KEY" }, + { DBC_OWN_LID, "DBC_OWN_LID" }, + { 0, NULL } + }; + DB *dbp; + DBC_INTERNAL *cp; + DB_ENV *dbenv; + + dbp = dbc->dbp; + dbenv = dbp->dbenv; + cp = dbc->internal; + + STAT_HEX("DBC", dbc); + STAT_HEX("Associated dbp", dbc->dbp); + STAT_HEX("Associated txn", dbc->txn); + STAT_HEX("Internal", cp); + STAT_HEX("Default locker ID", dbc->lid); + STAT_HEX("Locker", dbc->locker); + STAT_STRING("Type", __db_dbtype_to_string(dbc->dbtype)); + + STAT_HEX("Off-page duplicate cursor", cp->opd); + STAT_HEX("Referenced page", cp->page); + STAT_ULONG("Root", cp->root); + STAT_ULONG("Page number", cp->pgno); + STAT_ULONG("Page index", cp->indx); + STAT_STRING("Lock mode", __db_lockmode_to_string(cp->lock_mode)); + __db_prflags(dbenv, NULL, dbc->flags, fn, NULL, "\tFlags"); + + switch (dbc->dbtype) { + case DB_BTREE: + case DB_RECNO: + __bam_print_cursor(dbc); + break; + case DB_HASH: + __ham_print_cursor(dbc); + break; + case DB_UNKNOWN: + DB_ASSERT(dbp->type != DB_UNKNOWN); + /* FALLTHROUGH */ + case DB_QUEUE: + default: + break; + } + return (0); +} + +#else /* !HAVE_STATISTICS */ + +int +__db_stat_pp(dbp, txn, spp, flags) + DB *dbp; + DB_TXN *txn; + void *spp; + u_int32_t flags; +{ + COMPQUIET(spp, NULL); + COMPQUIET(txn, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbp->dbenv)); +} + +int +__db_stat_print_pp(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbp->dbenv)); +} +#endif diff --git a/db/db/db_truncate.c b/db/db/db_truncate.c index 8eb177d56..801f3712f 100644 --- a/db/db/db_truncate.c +++ b/db/db/db_truncate.c @@ -1,22 +1,23 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_truncate.c,v 11.201 2004/07/15 15:52:51 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_truncate.c,v 11.195 2003/07/02 15:39:51 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include + +#include #endif #include "db_int.h" #include "dbinc/db_page.h" +#include "dbinc/log.h" #include "dbinc/btree.h" #include "dbinc/hash.h" #include "dbinc/qam.h" @@ -44,12 +45,11 @@ __db_truncate_pp(dbp, txn, countp, flags) PANIC_CHECK(dbenv); /* Check for invalid flags. */ - if (F_ISSET(dbp, DB_AM_SECONDARY) && !LF_ISSET(DB_UPDATE_SECONDARY)) { + if (F_ISSET(dbp, DB_AM_SECONDARY)) { __db_err(dbenv, "DBP->truncate forbidden on secondary indices"); return (EINVAL); } - LF_CLR(DB_UPDATE_SECONDARY); if ((ret = __db_fchk(dbenv, "DB->truncate", flags, DB_AUTO_COMMIT)) != 0) return (ret); @@ -80,13 +80,13 @@ __db_truncate_pp(dbp, txn, countp, flags) } handle_check = IS_REPLICATED(dbenv, dbp); - if (handle_check && (ret = __db_rep_enter(dbp, 1, txn != NULL)) != 0) + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) goto err; - ret = __db_truncate(dbp, txn, countp, flags); + ret = __db_truncate(dbp, txn, countp); if (handle_check) - __db_rep_exit(dbenv); + __env_db_rep_exit(dbenv); err: return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret); } @@ -95,13 +95,13 @@ err: return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret); * __db_truncate * DB->truncate. * - * PUBLIC: int __db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); + * PUBLIC: int __db_truncate __P((DB *, DB_TXN *, u_int32_t *)); */ int -__db_truncate(dbp, txn, countp, flags) +__db_truncate(dbp, txn, countp) DB *dbp; DB_TXN *txn; - u_int32_t *countp, flags; + u_int32_t *countp; { DB *sdbp; DBC *dbc; @@ -109,30 +109,34 @@ __db_truncate(dbp, txn, countp, flags) u_int32_t scount; int ret, t_ret; - COMPQUIET(flags, 0); - dbenv = dbp->dbenv; dbc = NULL; ret = 0; - DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, NULL); - /* * Run through all secondaries and truncate them first. The count * returned is the count of the primary only. QUEUE uses normal * processing to truncate so it will update the secondaries normally. */ - if (dbp->type != DB_QUEUE && LIST_FIRST(&dbp->s_secondaries) != NULL) + if (dbp->type != DB_QUEUE && LIST_FIRST(&dbp->s_secondaries) != NULL) { for (sdbp = __db_s_first(dbp); sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) - if ((ret = __db_truncate(sdbp, - txn, &scount, DB_UPDATE_SECONDARY)) != 0) - return (ret); + if ((ret = __db_truncate(sdbp, txn, &scount)) != 0) + break; + if (sdbp != NULL) + (void)__db_s_done(sdbp); + if (ret != 0) + return (ret); + } + + DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, NULL); /* Acquire a cursor. */ if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) return (ret); + DEBUG_LWRITE(dbc, txn, "DB->truncate", NULL, NULL, 0); + switch (dbp->type) { case DB_BTREE: case DB_RECNO: diff --git a/db/db/db_upg.c b/db/db/db_upg.c index f194c8005..a41a1c49d 100644 --- a/db/db/db_upg.c +++ b/db/db/db_upg.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_upg.c,v 11.35 2004/03/24 20:37:35 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_upg.c,v 11.33 2003/06/06 14:55:40 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -181,7 +179,7 @@ __db_upgrade(dbp, fname, flags) * the end of the database file. * * In DB 3.X, we now create all the hash pages - * belonging to a doubling atomicly; it's not + * belonging to a doubling atomically; it's not * safe to just save them for later, because when * we create an overflow page we'll just create * a new last page (whatever that may be). Grow diff --git a/db/db/db_upg_opd.c b/db/db/db_upg_opd.c index 099bfd0d2..fcae089ad 100644 --- a/db/db/db_upg_opd.c +++ b/db/db/db_upg_opd.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_upg_opd.c,v 11.21 2004/03/19 16:10:26 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_upg_opd.c,v 11.19 2003/01/08 04:13:45 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -102,26 +100,26 @@ __db_31_offdup(dbp, real_name, fhp, sorted, pgnop) } /* If we only have a single page, it's easy. */ - if (cur_cnt > 1) { - /* - * pgno_cur is the list of pages we just converted. We're - * going to walk that list, but we'll need to create a new - * list while we do so. - */ - if ((ret = __os_malloc(dbp->dbenv, - cur_cnt * sizeof(db_pgno_t), &pgno_next)) != 0) - goto err; - - /* Figure out where we can start allocating new pages. */ - if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0) - goto err; - - /* Allocate room for an internal page. */ - if ((ret = __os_malloc(dbp->dbenv, - dbp->pgsize, &ipage)) != 0) - goto err; - PGNO(ipage) = PGNO_INVALID; - } + if (cur_cnt <= 1) + goto done; + + /* + * pgno_cur is the list of pages we just converted. We're + * going to walk that list, but we'll need to create a new + * list while we do so. + */ + if ((ret = __os_malloc(dbp->dbenv, + cur_cnt * sizeof(db_pgno_t), &pgno_next)) != 0) + goto err; + + /* Figure out where we can start allocating new pages. */ + if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0) + goto err; + + /* Allocate room for an internal page. */ + if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &ipage)) != 0) + goto err; + PGNO(ipage) = PGNO_INVALID; /* * Repeatedly walk the list of pages, building internal pages, until @@ -181,7 +179,7 @@ __db_31_offdup(dbp, real_name, fhp, sorted, pgnop) pgno_next = tmp; } - *pgnop = pgno_cur[0]; +done: *pgnop = pgno_cur[0]; err: if (pgno_cur != NULL) __os_free(dbp->dbenv, pgno_cur); diff --git a/db/db/db_vrfy.c b/db/db/db_vrfy.c index eab383cb0..41abbde40 100644 --- a/db/db/db_vrfy.c +++ b/db/db/db_vrfy.c @@ -1,18 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_vrfy.c,v 1.127 2003/07/16 22:25:34 ubell Exp $ + * $Id: db_vrfy.c,v 1.138 2004/10/11 18:47:50 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_vrfy.c,v 1.127 2003/07/16 22:25:34 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -49,6 +45,8 @@ static int __db_salvage_subdbs __P((DB *, VRFY_DBINFO *, void *, int(*)(void *, const void *), u_int32_t, int *)); static int __db_salvage_unknowns __P((DB *, VRFY_DBINFO *, void *, int (*)(void *, const void *), u_int32_t)); +static int __db_verify __P((DB *, const char *, const char *, + void *, int (*)(void *, const void *), u_int32_t)); static int __db_verify_arg __P((DB *, const char *, u_int32_t)); static int __db_vrfy_freelist __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t)); @@ -64,6 +62,10 @@ static int __db_vrfy_structure static int __db_vrfy_walkpages __P((DB *, VRFY_DBINFO *, void *, int (*)(void *, const void *), u_int32_t)); +#define VERIFY_FLAGS \ + (DB_AGGRESSIVE | \ + DB_NOORDERCHK | DB_ORDERCHKONLY | DB_PRINTABLE | DB_SALVAGE | DB_UNREF) + /* * __db_verify_pp -- * DB->verify public interface. @@ -108,6 +110,15 @@ __db_verify_internal(dbp, fname, dname, handle, callback, flags) PANIC_CHECK(dbenv); DB_ILLEGAL_AFTER_OPEN(dbp, "DB->verify"); +#ifdef HAVE_FTRUNCATE + /* + * If we're using ftruncate to abort page-allocation functions, there + * should never be unreferenced pages. Always check for unreferenced + * pages on those systems. + */ + LF_SET(DB_UNREF); +#endif + if ((ret = __db_verify_arg(dbp, dname, flags)) != 0) return (ret); @@ -141,10 +152,7 @@ __db_verify_arg(dbp, dname, flags) dbenv = dbp->dbenv; -#undef OKFLAGS -#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_ORDERCHKONLY | \ - DB_PRINTABLE | DB_SALVAGE) - if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0) + if ((ret = __db_fchk(dbenv, "DB->verify", flags, VERIFY_FLAGS)) != 0) return (ret); /* @@ -160,7 +168,7 @@ __db_verify_arg(dbp, dname, flags) !LF_ISSET(DB_SALVAGE)) return (__db_ferr(dbenv, "__db_verify", 1)); - if (LF_ISSET(DB_ORDERCHKONLY) && flags != DB_ORDERCHKONLY) + if (LF_ISSET(DB_ORDERCHKONLY) && LF_ISSET(DB_SALVAGE | DB_NOORDERCHK)) return (__db_ferr(dbenv, "__db_verify", 1)); if (LF_ISSET(DB_ORDERCHKONLY) && dname == NULL) { @@ -183,11 +191,8 @@ __db_verify_arg(dbp, dname, flags) * * flags may be 0, DB_NOORDERCHK, DB_ORDERCHKONLY, or DB_SALVAGE * (and optionally DB_AGGRESSIVE). - * - * PUBLIC: int __db_verify __P((DB *, const char *, - * PUBLIC: const char *, void *, int (*)(void *, const void *), u_int32_t)); */ -int +static int __db_verify(dbp, name, subdb, handle, callback, flags) DB *dbp; const char *name, *subdb; @@ -249,7 +254,7 @@ __db_verify(dbp, name, subdb, handle, callback, flags) * safe to open the database normally and then use the page swapping * code, which makes life easier. */ - if ((ret = __os_open(dbenv, real_name, DB_OSO_RDONLY, 0444, &fhp)) != 0) + if ((ret = __os_open(dbenv, real_name, DB_OSO_RDONLY, 0, &fhp)) != 0) goto err; /* Verify the metadata page 0; set pagesize and type. */ @@ -340,7 +345,7 @@ __db_verify(dbp, name, subdb, handle, callback, flags) __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)) != 0) { if (ret == DB_VERIFY_BAD) isbad = 1; - else if (ret != 0) + else goto err; } @@ -350,7 +355,7 @@ __db_verify(dbp, name, subdb, handle, callback, flags) __db_vrfy_structure(dbp, vdp, name, 0, flags)) != 0) { if (ret == DB_VERIFY_BAD) isbad = 1; - else if (ret != 0) + else goto err; } @@ -367,14 +372,7 @@ __db_verify(dbp, name, subdb, handle, callback, flags) __db_salvage_destroy(vdp); } - if (0) { - /* Don't try to strerror() DB_VERIFY_FATAL; it's private. */ -err: if (ret == DB_VERIFY_FATAL) - ret = DB_VERIFY_BAD; - __db_err(dbenv, "%s: %s", name, db_strerror(ret)); - } - - if (LF_ISSET(DB_SALVAGE) && +err: if (LF_ISSET(DB_SALVAGE) && (has == 0 || F_ISSET(vdp, SALVAGE_PRINTFOOTER))) (void)__db_prfooter(handle, callback); @@ -393,9 +391,22 @@ done: if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL) if (real_name != NULL) __os_free(dbenv, real_name); - if ((ret == 0 && isbad == 1) || ret == DB_VERIFY_FATAL) + /* + * DB_VERIFY_FATAL is a private error, translate to a public one. + * + * If we didn't find a page, it's probably a page number was corrupted. + * Return the standard corruption error. + * + * Otherwise, if we found corruption along the way, set the return. + */ + if (ret == DB_VERIFY_FATAL || + ret == DB_PAGE_NOTFOUND || (ret == 0 && isbad == 1)) ret = DB_VERIFY_BAD; + /* Make sure there's a public complaint if we found corruption. */ + if (ret != 0) + __db_err(dbenv, "%s: %s", name, db_strerror(ret)); + return (ret); } @@ -604,14 +615,9 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags) dbenv = dbp->dbenv; mpf = dbp->mpf; + h = NULL; ret = isbad = t_ret = 0; -#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_ORDERCHKONLY | \ - DB_PRINTABLE | DB_SALVAGE) - if ((ret = __db_fchk(dbenv, - "__db_vrfy_walkpages", flags, OKFLAGS)) != 0) - return (ret); - for (i = 0; i <= vdp->last_pgno; i++) { /* * If DB_SALVAGE is set, we inspect our database of @@ -630,8 +636,7 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags) ret = t_ret; if (LF_ISSET(DB_SALVAGE)) continue; - else - return (ret); + return (ret); } if (LF_ISSET(DB_SALVAGE)) { @@ -763,7 +768,7 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags) } if (0) { -err: if ((t_ret = __memp_fput(mpf, h, 0)) != 0) +err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0) return (ret == 0 ? t_ret : ret); } @@ -798,13 +803,6 @@ __db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags) dbenv = dbp->dbenv; pgset = vdp->pgset; - if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0) - return (ret); - if (LF_ISSET(DB_SALVAGE)) { - __db_err(dbenv, "__db_vrfy_structure called with DB_SALVAGE"); - return (EINVAL); - } - /* * Providing feedback here is tricky; in most situations, * we fetch each page one more time, but we do so in a top-down @@ -918,7 +916,7 @@ __db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags) (u_long)pip->refcount, (u_long)p)); isbad = 1; } - } else if (p == 0) { + } else if (p == 0 && LF_ISSET(DB_UNREF)) { EPRINT((dbenv, "Page %lu: unreferenced page", (u_long)i)); isbad = 1; @@ -1831,9 +1829,9 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags) */ if ((ret = __db_safe_goff(dbp, vdp, pgno, &key, &ovflbuf, flags)) != 0 || - (ret = __db_prdbt(&key, + (ret = __db_vrfy_prdbt(&key, 0, " ", handle, callback, 0, vdp)) != 0 || - (ret = __db_prdbt(&unkdbt, + (ret = __db_vrfy_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; @@ -1936,7 +1934,7 @@ __db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp) * Check alignment; if it's unaligned, it's unsafe to * manipulate this item. */ - if (offset != ALIGN(offset, sizeof(u_int32_t))) { + if (offset != DB_ALIGN(offset, sizeof(u_int32_t))) { EPRINT((dbenv, "Page %lu: unaligned offset %lu at page index %lu", (u_long)pgno, (u_long)offset, (u_long)i)); @@ -2307,7 +2305,8 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags) err_ret = DB_VERIFY_BAD; continue; } - memcpy(&meta_pgno, bkdata->data, sizeof(db_pgno_t)); + memcpy(&meta_pgno, + (db_pgno_t *)bkdata->data, sizeof(db_pgno_t)); /* * Subdatabase meta pgnos are stored in network byte diff --git a/db/db/db_vrfy_stub.c b/db/db/db_vrfy_stub.c index d3206a9d2..486802d7d 100644 --- a/db/db/db_vrfy_stub.c +++ b/db/db/db_vrfy_stub.c @@ -1,15 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_vrfy_stub.c,v 11.6 2004/06/14 15:23:32 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_vrfy_stub.c,v 11.4 2003/07/01 19:47:12 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" +#ifndef HAVE_VERIFY #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -100,3 +100,4 @@ __db_vrfy_putpageinfo(dbenv, vdp, pip) COMPQUIET(pip, NULL); return (__db_novrfy(dbenv)); } +#endif /* !HAVE_VERIFY */ diff --git a/db/db/db_vrfyutil.c b/db/db/db_vrfyutil.c index 5e5485f06..f1034af1f 100644 --- a/db/db/db_vrfyutil.c +++ b/db/db/db_vrfyutil.c @@ -1,18 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_vrfyutil.c,v 11.37 2003/06/30 17:19:49 bostic Exp $ + * $Id: db_vrfyutil.c,v 11.40 2004/10/11 18:47:50 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_vrfyutil.c,v 11.37 2003/06/30 17:19:49 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -321,7 +317,6 @@ __db_vrfy_pgset_get(dbp, pgno, valp) if ((ret = __db_get(dbp, NULL, &key, &data, 0)) == 0) { DB_ASSERT(data.size == sizeof(int)); - memcpy(&val, data.data, sizeof(int)); } else if (ret == DB_NOTFOUND) val = 0; else @@ -359,7 +354,6 @@ __db_vrfy_pgset_inc(dbp, pgno) if ((ret = __db_get(dbp, NULL, &key, &data, 0)) == 0) { DB_ASSERT(data.size == sizeof(int)); - memcpy(&val, data.data, sizeof(int)); } else if (ret != DB_NOTFOUND) return (ret); @@ -866,3 +860,46 @@ __db_salvage_markneeded(vdp, pgno, pgtype) ret = __db_put(dbp, NULL, &key, &data, DB_NOOVERWRITE); return (ret == DB_KEYEXIST ? 0 : ret); } + +/* + * __db_vrfy_prdbt -- + * Print out a DBT data element from a verification routine. + * + * PUBLIC: int __db_vrfy_prdbt __P((DBT *, int, const char *, void *, + * PUBLIC: int (*)(void *, const void *), int, VRFY_DBINFO *)); + */ +int +__db_vrfy_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp) + DBT *dbtp; + int checkprint; + const char *prefix; + void *handle; + int (*callback) __P((void *, const void *)); + int is_recno; + VRFY_DBINFO *vdp; +{ + if (vdp != NULL) { + /* + * If vdp is non-NULL, we might be the first key in the + * "fake" subdatabase used for key/data pairs we can't + * associate with a known subdb. + * + * Check and clear the SALVAGE_PRINTHEADER flag; if + * it was set, print a subdatabase header. + */ + if (F_ISSET(vdp, SALVAGE_PRINTHEADER)) + (void)__db_prheader( + NULL, "__OTHER__", 0, 0, handle, callback, vdp, 0); + F_CLR(vdp, SALVAGE_PRINTHEADER); + F_SET(vdp, SALVAGE_PRINTFOOTER); + + /* + * Even if the printable flag wasn't set by our immediate + * caller, it may be set on a salvage-wide basis. + */ + if (F_ISSET(vdp, SALVAGE_PRINTABLE)) + checkprint = 1; + } + return ( + __db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno)); +} diff --git a/db/db185/db185.c b/db/db185/db185.c index d1c35501f..8399eac42 100644 --- a/db/db185/db185.c +++ b/db/db185/db185.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db185.c,v 11.35 2004/03/24 20:37:35 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db185.c,v 11.33 2003/05/05 19:54:58 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -224,7 +224,7 @@ __db185_open(file, oflags, mode, type, openinfo) * Store a reference so we can indirect from the DB 1.85 structure * to the underlying DB structure, and vice-versa. This has to be * done BEFORE the DB::open method call because the hash callback - * is exercised as part of hash database initialiation. + * is exercised as part of hash database initialization. */ db185p->dbp = dbp; dbp->api_internal = db185p; diff --git a/db/db185/db185_int.in b/db/db185/db185_int.in index 04faeaeca..f9bfdbba0 100644 --- a/db/db185/db185_int.in +++ b/db/db185/db185_int.in @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. */ /* @@ -36,7 +36,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: db185_int.in,v 11.13 2003/01/08 04:15:06 bostic Exp $ + * $Id: db185_int.in,v 11.14 2004/01/28 03:35:59 bostic Exp $ */ #ifndef _DB185_INT_H_ diff --git a/db/db_archive/db_archive.c b/db/db_archive/db_archive.c index cfed47b91..d5db42dee 100644 --- a/db/db_archive/db_archive.c +++ b/db/db_archive/db_archive.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_archive.c,v 11.46 2004/06/10 01:00:08 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_archive.c,v 11.42 2003/08/13 19:57:04 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -108,9 +108,6 @@ main(argc, argv) dbenv->set_errfile(dbenv, stderr); dbenv->set_errpfx(dbenv, progname); - if (verbose) - (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1); - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) { dbenv->err(dbenv, ret, "set_passwd"); @@ -122,9 +119,10 @@ main(argc, argv) */ if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, DB_CREATE | - DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } diff --git a/db/db_checkpoint/db_checkpoint.c b/db/db_checkpoint/db_checkpoint.c index c100605ee..538b66ddc 100644 --- a/db/db_checkpoint/db_checkpoint.c +++ b/db/db_checkpoint/db_checkpoint.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_checkpoint.c,v 11.54 2004/03/24 15:13:12 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_checkpoint.c,v 11.51 2003/09/04 18:57:00 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -178,7 +178,7 @@ main(argc, argv) while (!__db_util_interrupted()) { if (verbose) { (void)time(&now); - dbenv->errx(dbenv, "checkpoint: %s", ctime(&now)); + dbenv->errx(dbenv, "checkpoint begin: %s", ctime(&now)); } if ((ret = dbenv->txn_checkpoint(dbenv, @@ -187,10 +187,16 @@ main(argc, argv) goto shutdown; } + if (verbose) { + (void)time(&now); + dbenv->errx(dbenv, + "checkpoint complete: %s", ctime(&now)); + } + if (once) break; - (void)__os_sleep(dbenv, seconds, 0); + __os_sleep(dbenv, seconds, 0); } if (0) { diff --git a/db/db_deadlock/db_deadlock.c b/db/db_deadlock/db_deadlock.c index 8a6f7b7c2..cc91db25d 100644 --- a/db/db_deadlock/db_deadlock.c +++ b/db/db_deadlock/db_deadlock.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_deadlock.c,v 11.45 2004/03/24 15:13:12 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_deadlock.c,v 11.41 2003/06/17 14:36:44 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -80,6 +80,9 @@ main(argc, argv) case 'o': atype = DB_LOCK_OLDEST; break; + case 'W': + atype = DB_LOCK_MAXWRITE; + break; case 'w': atype = DB_LOCK_MINWRITE; break; @@ -161,8 +164,8 @@ main(argc, argv) } /* An environment is required. */ - if ((ret = dbenv->open(dbenv, home, - DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) { + if ((ret = + dbenv->open(dbenv, home, DB_INIT_LOCK | DB_USE_ENVIRON, 0)) != 0) { dbenv->err(dbenv, ret, "open"); goto shutdown; } @@ -181,7 +184,7 @@ main(argc, argv) /* Make a pass every "secs" secs and "usecs" usecs. */ if (secs == 0 && usecs == 0) break; - (void)__os_sleep(dbenv, secs, usecs); + __os_sleep(dbenv, secs, usecs); } if (0) { @@ -210,7 +213,7 @@ usage() { (void)fprintf(stderr, "%s\n\t%s\n", "usage: db_deadlock [-Vv]", - "[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]"); + "[-a e | m | n | o | W | w | y] [-h home] [-L file] [-t sec.usec]"); return (EXIT_FAILURE); } diff --git a/db/db_dump/db_dump.c b/db/db_dump/db_dump.c index 858eb0239..732a4c62f 100644 --- a/db/db_dump/db_dump.c +++ b/db/db_dump/db_dump.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_dump.c,v 11.99 2004/10/11 18:53:13 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_dump.c,v 11.88 2003/08/13 19:57:06 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -28,7 +28,6 @@ static const char revid[] = #include "dbinc/db_am.h" int db_init __P((DB_ENV *, char *, int, u_int32_t, int *)); -int dump __P((DB *, int, int)); int dump_sub __P((DB_ENV *, DB *, char *, int, int)); int is_sub __P((DB *, int *)); int main __P((int, char *[])); @@ -211,7 +210,7 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { goto err; } if (private != 0) { - if ((ret = __db_util_cache(dbenv, dbp, &cache, &resize)) != 0) + if ((ret = __db_util_cache(dbp, &cache, &resize)) != 0) goto err; if (resize) { (void)dbp->close(dbp, 0); @@ -224,8 +223,8 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { } if (dopt != NULL) { - if (__db_dump(dbp, dopt, NULL)) { - dbp->err(dbp, ret, "__db_dump: %s", argv[0]); + if ((ret = __db_dumptree(dbp, dopt, NULL)) != 0) { + dbp->err(dbp, ret, "__db_dumptree: %s", argv[0]); goto err; } } else if (lflag) { @@ -246,9 +245,8 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { if (dump_sub(dbenv, dbp, argv[0], pflag, keyflag)) goto err; } else - if (__db_prheader(dbp, NULL, pflag, keyflag, stdout, - __db_pr_callback, NULL, 0) || - dump(dbp, pflag, keyflag)) + if (dbp->dump(dbp, NULL, + __db_pr_callback, stdout, pflag, keyflag)) goto err; } @@ -307,9 +305,11 @@ db_init(dbenv, home, is_salvage, cache, is_privatep) * before we create our own. */ *is_privatep = 0; - if (dbenv->open(dbenv, home, - DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0) + if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON | + (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0)) == 0) return (0); + if (ret == DB_VERSION_MISMATCH) + goto err; /* * An environment is required because we may be trying to look at @@ -328,7 +328,7 @@ db_init(dbenv, home, is_salvage, cache, is_privatep) return (0); /* An environment is required. */ - dbenv->err(dbenv, ret, "open"); +err: dbenv->err(dbenv, ret, "DB_ENV->open"); return (1); } @@ -348,7 +348,7 @@ is_sub(dbp, yesno) switch (dbp->type) { case DB_BTREE: case DB_RECNO: - if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) { + if ((ret = dbp->stat(dbp, NULL, &btsp, DB_FAST_STAT)) != 0) { dbp->err(dbp, ret, "DB->stat"); return (ret); } @@ -356,7 +356,7 @@ is_sub(dbp, yesno) free(btsp); break; case DB_HASH: - if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) { + if ((ret = dbp->stat(dbp, NULL, &hsp, DB_FAST_STAT)) != 0) { dbp->err(dbp, ret, "DB->stat"); return (ret); } @@ -420,10 +420,8 @@ dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag) parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) dbp->err(dbp, ret, "DB->open: %s:%s", parent_name, subdb); - if (ret == 0 && - (__db_prheader(dbp, subdb, pflag, keyflag, stdout, - __db_pr_callback, NULL, 0) || - dump(dbp, pflag, keyflag))) + if (ret == 0 && dbp->dump( + dbp, subdb, __db_pr_callback, stdout, pflag, keyflag)) ret = 1; (void)dbp->close(dbp, 0); free(subdb); @@ -467,8 +465,8 @@ show_subs(dbp) memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) { - if ((ret = __db_prdbt(&key, 1, NULL, stdout, - __db_pr_callback, 0, NULL)) != 0) { + if ((ret = dbp->dbenv->prdbt( + &key, 1, NULL, stdout, __db_pr_callback, 0)) != 0) { dbp->errx(dbp, NULL); return (1); } @@ -485,105 +483,6 @@ show_subs(dbp) return (0); } -/* - * dump -- - * Dump out the records for a DB. - */ -int -dump(dbp, pflag, keyflag) - DB *dbp; - int pflag, keyflag; -{ - DBC *dbcp; - DBT key, data; - DBT keyret, dataret; - db_recno_t recno; - int is_recno, failed, ret; - void *pointer; - - /* - * Get a cursor and step through the database, printing out each - * key/data pair. - */ - if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) { - dbp->err(dbp, ret, "DB->cursor"); - return (1); - } - - failed = 0; - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - data.data = malloc(1024 * 1024); - if (data.data == NULL) { - dbp->err(dbp, ENOMEM, "bulk get buffer"); - failed = 1; - goto err; - } - data.ulen = 1024 * 1024; - data.flags = DB_DBT_USERMEM; - is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE); - keyflag = is_recno ? keyflag : 1; - if (is_recno) { - keyret.data = &recno; - keyret.size = sizeof(recno); - } - -retry: - while ((ret = - dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) { - DB_MULTIPLE_INIT(pointer, &data); - for (;;) { - if (is_recno) - DB_MULTIPLE_RECNO_NEXT(pointer, &data, - recno, dataret.data, dataret.size); - else - DB_MULTIPLE_KEY_NEXT(pointer, - &data, keyret.data, - keyret.size, dataret.data, dataret.size); - - if (dataret.data == NULL) - break; - - if ((keyflag && (ret = __db_prdbt(&keyret, - pflag, " ", stdout, __db_pr_callback, - is_recno, NULL)) != 0) || (ret = - __db_prdbt(&dataret, pflag, " ", stdout, - __db_pr_callback, 0, NULL)) != 0) { - dbp->errx(dbp, NULL); - failed = 1; - goto err; - } - } - } - if (ret == ENOMEM) { - data.size = ALIGN(data.size, 1024); - data.data = realloc(data.data, data.size); - if (data.data == NULL) { - dbp->err(dbp, ENOMEM, "bulk get buffer"); - failed = 1; - goto err; - } - data.ulen = data.size; - goto retry; - } - - if (ret != DB_NOTFOUND) { - dbp->err(dbp, ret, "DBcursor->get"); - failed = 1; - } - -err: if (data.data != NULL) - free(data.data); - - if ((ret = dbcp->c_close(dbcp)) != 0) { - dbp->err(dbp, ret, "DBcursor->close"); - failed = 1; - } - - (void)__db_prfooter(stdout, __db_pr_callback); - return (failed); -} - /* * usage -- * Display the usage message. diff --git a/db/db_dump185/db_dump185.c b/db/db_dump185/db_dump185.c index 3a0b46e05..2fb3cc5ab 100644 --- a/db/db_dump185/db_dump185.c +++ b/db/db_dump185/db_dump185.c @@ -1,15 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_dump185.c,v 11.19 2004/01/28 03:36:00 bostic Exp $ */ #ifndef lint static char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static char revid[] = - "$Id: db_dump185.c,v 11.18 2003/01/08 04:19:32 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #include diff --git a/db/db_load/db_load.c b/db/db_load/db_load.c index 0635bca46..da11029ec 100644 --- a/db/db_load/db_load.c +++ b/db/db_load/db_load.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_load.c,v 11.99 2004/10/11 18:53:14 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_load.c,v 11.88 2003/10/16 17:51:08 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -71,12 +71,13 @@ main(argc, argv) int argc; char *argv[]; { + enum { NOTSET, FILEID_RESET, LSN_RESET, INVALID } reset; extern char *optarg; extern int optind; DBTYPE dbtype; DB_ENV *dbenv; LDG ldg; - u_int32_t ldf; + u_int ldf; int ch, existed, exitval, ret; char **clist, **clp; @@ -92,6 +93,7 @@ main(argc, argv) if ((ret = version_check(ldg.progname)) != 0) return (ret); + reset = NOTSET; ldf = 0; exitval = existed = 0; dbtype = DB_UNKNOWN; @@ -102,12 +104,27 @@ main(argc, argv) return (EXIT_FAILURE); } - while ((ch = getopt(argc, argv, "c:f:h:nP:Tt:V")) != EOF) + /* + * There are two modes for db_load: -r and everything else. The -r + * option zeroes out the database LSN's or resets the file ID, it + * doesn't really "load" a new database. The functionality is in + * db_load because we don't have a better place to put it, and we + * don't want to create a new utility for just that functionality. + */ + while ((ch = getopt(argc, argv, "c:f:h:nP:r:Tt:V")) != EOF) switch (ch) { case 'c': + if (reset != NOTSET) + return (usage()); + reset = INVALID; + *clp++ = optarg; break; case 'f': + if (reset != NOTSET) + return (usage()); + reset = INVALID; + if (freopen(optarg, "r", stdin) == NULL) { fprintf(stderr, "%s: %s: reopen: %s\n", ldg.progname, optarg, strerror(errno)); @@ -118,6 +135,10 @@ main(argc, argv) ldg.home = optarg; break; case 'n': + if (reset != NOTSET) + return (usage()); + reset = INVALID; + ldf |= LDF_NOOVERWRITE; break; case 'P': @@ -130,10 +151,28 @@ main(argc, argv) } ldf |= LDF_PASSWORD; break; + case 'r': + if (reset == INVALID) + return (usage()); + if (strcmp(optarg, "lsn") == 0) + reset = LSN_RESET; + else if (strcmp(optarg, "fileid") == 0) + reset = FILEID_RESET; + else + return (usage()); + break; case 'T': + if (reset != NOTSET) + return (usage()); + reset = INVALID; + ldf |= LDF_NOHEADER; break; case 't': + if (reset != NOTSET) + return (usage()); + reset = INVALID; + if (strcmp(optarg, "btree") == 0) { dbtype = DB_BTREE; break; @@ -174,10 +213,23 @@ main(argc, argv) if (env_create(&dbenv, &ldg) != 0) goto shutdown; - while (!ldg.endofile) - if (load(dbenv, argv[0], dbtype, clist, ldf, - &ldg, &existed) != 0) - goto shutdown; + /* If we're resetting the LSNs, that's an entirely separate path. */ + switch (reset) { + case FILEID_RESET: + exitval = dbenv->fileid_reset( + dbenv, argv[0], ldf & LDF_PASSWORD ? 1 : 0); + break; + case LSN_RESET: + exitval = dbenv->lsn_reset( + dbenv, argv[0], ldf & LDF_PASSWORD ? 1 : 0); + break; + default: + while (!ldg.endofile) + if (load(dbenv, argv[0], dbtype, clist, ldf, + &ldg, &existed) != 0) + goto shutdown; + break; + } if (0) { shutdown: exitval = 1; @@ -352,8 +404,7 @@ retry_db: goto err; } if (ldg->private != 0) { - if ((ret = - __db_util_cache(dbenv, dbp, &ldg->cache, &resize)) != 0) + if ((ret = __db_util_cache(dbp, &ldg->cache, &resize)) != 0) goto err; if (resize) { if ((ret = dbp->close(dbp, 0)) != 0) @@ -449,8 +500,8 @@ retry: if (txn != NULL) name, !keyflag ? recno : recno * 2 - 1); - (void)__db_prdbt(&key, checkprint, 0, stderr, - __db_pr_callback, 0, NULL); + (void)dbenv->prdbt(&key, + checkprint, 0, stderr, __db_pr_callback, 0); break; case DB_LOCK_DEADLOCK: /* If we have a child txn, retry--else it's fatal. */ @@ -560,8 +611,10 @@ db_init(dbenv, home, cache, is_private) /* We may be loading into a live environment. Try and join. */ flags = DB_USE_ENVIRON | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN; - if (dbenv->open(dbenv, home, flags, 0) == 0) + if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0) return (0); + if (ret == DB_VERSION_MISMATCH) + goto err; /* * We're trying to load a database. @@ -586,7 +639,7 @@ db_init(dbenv, home, cache, is_private) return (0); /* An environment is required. */ - dbenv->err(dbenv, ret, "DB_ENV->open"); +err: dbenv->err(dbenv, ret, "DB_ENV->open"); return (1); } @@ -753,14 +806,10 @@ memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen); break; } - if (ch == '\n') - break; - /* - * If the buffer is too small, double it. The - * +1 is for the nul byte inserted below. + * If the buffer is too small, double it. */ - if (linelen + start + 1 == buflen) { + if (linelen + start == buflen) { G(hdrbuf) = realloc(G(hdrbuf), buflen *= 2); if (G(hdrbuf) == NULL) @@ -768,6 +817,9 @@ memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen); buf = &G(hdrbuf)[start]; } + if (ch == '\n') + break; + buf[linelen++] = ch; } if (G(endofile) == 1) @@ -1110,6 +1162,7 @@ dbt_rrecno(dbenv, dbtp, ishex) int ishex; { char buf[32], *p, *q; + u_long recno; ++G(lineno); @@ -1148,12 +1201,12 @@ dbt_rrecno(dbenv, dbtp, ishex) *p = '\0'; } - if (__db_getulong(dbenv, - G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) { + if (__db_getulong(dbenv, G(progname), buf + 1, 0, 0, &recno)) { bad: badend(dbenv); return (1); } + *((db_recno_t *)dbtp->data) = recno; dbtp->size = sizeof(db_recno_t); return (0); } @@ -1241,6 +1294,8 @@ usage() (void)fprintf(stderr, "%s\n\t%s\n", "usage: db_load [-nTV] [-c name=value] [-f file]", "[-h home] [-P password] [-t btree | hash | recno | queue] db_file"); + (void)fprintf(stderr, "%s\n", + "usage: db_load -r lsn | fileid [-h home] [-P password] db_file"); return (EXIT_FAILURE); } diff --git a/db/db_printlog/README b/db/db_printlog/README index d59f4c77f..d62596429 100644 --- a/db/db_printlog/README +++ b/db/db_printlog/README @@ -1,4 +1,4 @@ -# $Id: README,v 10.6 2002/06/20 14:52:54 bostic Exp $ +# $Id: README,v 10.7 2004/09/24 00:43:16 bostic Exp $ Berkeley DB log dump utility. This utility dumps out a DB log in human readable form, a record at a time, to assist in recovery and transaction @@ -28,7 +28,7 @@ rectype.awk Print out a range of the log -- command line should rectypes (or partial strings of rectypes) sought. status.awk Read through db_printlog output and list the transactions - encountered, and whether they commited or aborted. + encountered, and whether they committed or aborted. txn.awk Print out all the records for a comma-separated list of transaction IDs. diff --git a/db/db_printlog/db_printlog.c b/db/db_printlog/db_printlog.c index 94ea496ff..d06477160 100644 --- a/db/db_printlog/db_printlog.c +++ b/db/db_printlog/db_printlog.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_printlog.c,v 11.64 2004/06/17 17:35:17 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_printlog.c,v 11.59 2003/08/18 18:00:31 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -33,11 +33,12 @@ static const char revid[] = #include "dbinc/qam.h" #include "dbinc/txn.h" +int lsn_arg __P((const char *, char *, DB_LSN *)); int main __P((int, char *[])); +int open_rep_db __P((DB_ENV *, DB **, DBC **)); +int print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops)); int usage __P((void)); int version_check __P((const char *)); -int print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops)); -int open_rep_db __P((DB_ENV *, DB **, DBC **)); int main(argc, argv) @@ -49,28 +50,40 @@ main(argc, argv) const char *progname = "db_printlog"; DB *dbp; DBC *dbc; + DBT data, keydbt; DB_ENV *dbenv; DB_LOGC *logc; - int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + DB_LSN key, start, stop; size_t dtabsize; - DBT data, keydbt; - DB_LSN key; - int ch, exitval, nflag, rflag, ret, repflag; + u_int32_t logcflag; + int ch, cmp, exitval, nflag, rflag, ret, repflag; + int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); char *home, *passwd; if ((ret = version_check(progname)) != 0) return (ret); - dbenv = NULL; dbp = NULL; dbc = NULL; + dbenv = NULL; logc = NULL; - exitval = nflag = rflag = repflag = 0; - home = passwd = NULL; + ZERO_LSN(start); + ZERO_LSN(stop); dtabsize = 0; + exitval = nflag = rflag = repflag = 0; dtab = NULL; - while ((ch = getopt(argc, argv, "h:NP:rRV")) != EOF) + home = passwd = NULL; + + while ((ch = getopt(argc, argv, "b:e:h:NP:rRV")) != EOF) switch (ch) { + case 'b': + if (lsn_arg(progname, optarg, &start)) + return (usage()); + break; + case 'e': + if (lsn_arg(progname, optarg, &stop)) + return (usage()); + break; case 'h': home = optarg; break; @@ -89,7 +102,7 @@ main(argc, argv) case 'r': rflag = 1; break; - case 'R': + case 'R': /* Undocumented */ repflag = 1; break; case 'V': @@ -157,28 +170,34 @@ main(argc, argv) if (repflag) { if ((ret = dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) - != 0) { - dbenv->err(dbenv, ret, "open"); + != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } } else if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } /* Initialize print callbacks. */ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 || (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 || (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 || + (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 || (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 || +#ifdef HAVE_HASH (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 || +#endif +#ifdef HAVE_QUEUE + (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 || +#endif (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) { dbenv->err(dbenv, ret, "callback: initialization"); goto shutdown; @@ -193,17 +212,22 @@ main(argc, argv) goto shutdown; } + if (IS_ZERO_LSN(start)) { + memset(&keydbt, 0, sizeof(keydbt)); + logcflag = rflag ? DB_PREV : DB_NEXT; + } else { + key = start; + logcflag = DB_SET; + } memset(&data, 0, sizeof(data)); - memset(&keydbt, 0, sizeof(keydbt)); - while (!__db_util_interrupted()) { + + for (; !__db_util_interrupted(); logcflag = rflag ? DB_PREV : DB_NEXT) { if (repflag) { - ret = dbc->c_get(dbc, - &keydbt, &data, rflag ? DB_PREV : DB_NEXT); + ret = dbc->c_get(dbc, &keydbt, &data, logcflag); if (ret == 0) key = ((REP_CONTROL *)keydbt.data)->lsn; } else - ret = logc->get(logc, - &key, &data, rflag ? DB_PREV : DB_NEXT); + ret = logc->get(logc, &key, &data, logcflag); if (ret != 0) { if (ret == DB_NOTFOUND) break; @@ -212,6 +236,15 @@ main(argc, argv) goto shutdown; } + /* + * We may have reached the end of the range we're displaying. + */ + if (!IS_ZERO_LSN(stop)) { + cmp = log_compare(&key, &stop); + if ((rflag && cmp < 0) || (!rflag && cmp > 0)) + break; + } + ret = __db_dispatch(dbenv, dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL); @@ -264,8 +297,8 @@ shutdown: exitval = 1; int usage() { - fprintf(stderr, "%s\n", - "usage: db_printlog [-NrV] [-h home] [-P password]"); + fprintf(stderr, "usage: db_printlog %s\n", + "[-NrV] [-b file/offset] [-e file/offset] [-h home] [-P password]"); return (EXIT_FAILURE); } @@ -361,3 +394,38 @@ err: if (*dbpp != NULL) (void)(*dbpp)->close(*dbpp, 0); return (ret); } + +/* + * lsn_arg -- + * Parse a LSN argument. + */ +int +lsn_arg(progname, optarg, lsnp) + const char *progname; + char *optarg; + DB_LSN *lsnp; +{ + char *p; + u_long uval; + + /* + * Expected format is: lsn.file/lsn.offset. + * + * Don't use getsubopt(3), some systems don't have it. + */ + if ((p = strchr(optarg, '/')) == NULL) + return (1); + *p = '\0'; + + if (__db_getulong(NULL, progname, optarg, 0, 0, &uval)) + return (1); + if (uval > UINT32_MAX) + return (1); + lsnp->file = uval; + if (__db_getulong(NULL, progname, p + 1, 0, 0, &uval)) + return (1); + if (uval > UINT32_MAX) + return (1); + lsnp->offset = uval; + return (0); +} diff --git a/db/db_printlog/rectype.awk b/db/db_printlog/rectype.awk index 7f7b2f5ee..f30124cac 100644 --- a/db/db_printlog/rectype.awk +++ b/db/db_printlog/rectype.awk @@ -1,7 +1,7 @@ -# $Id: rectype.awk,v 11.3 2000/07/17 22:00:49 ubell Exp $ +# $Id: rectype.awk,v 11.4 2004/04/19 09:36:58 bostic Exp $ # -# Print out a range of the log -# Command line should set RECTYPE to the a comma separated list +# Print out a range of the log. +# Command line should set RECTYPE to a comma separated list # of the rectypes (or partial strings of rectypes) sought. NR == 1 { ntypes = 0 diff --git a/db/db_printlog/status.awk b/db/db_printlog/status.awk index faf9e0c1f..a0c381ed9 100644 --- a/db/db_printlog/status.awk +++ b/db/db_printlog/status.awk @@ -1,10 +1,10 @@ -# $Id: status.awk,v 10.4 2003/07/03 16:05:30 margo Exp $ +# $Id: status.awk,v 10.5 2004/09/24 00:43:17 bostic Exp $ # # Read through db_printlog output and list all the transactions encountered -# and whether they commited or aborted. +# and whether they committed or aborted. # # 1 = started -# 2 = commited +# 2 = committed # 3 = explicitly aborted # 4 = other BEGIN { diff --git a/db/db_recover/db_recover.c b/db/db_recover/db_recover.c index 0a2f6b049..75961f78c 100644 --- a/db/db_recover/db_recover.c +++ b/db/db_recover/db_recover.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_recover.c,v 11.41 2004/01/28 03:36:00 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_recover.c,v 11.39 2003/09/04 18:06:46 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -115,10 +115,8 @@ main(argc, argv) } dbenv->set_errfile(dbenv, stderr); dbenv->set_errpfx(dbenv, progname); - if (verbose) { + if (verbose) (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1); - (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1); - } if (timestamp && (ret = dbenv->set_tx_timestamp(dbenv, ×tamp)) != 0) { dbenv->err(dbenv, ret, "DB_ENV->set_timestamp"); diff --git a/db/db_stat/db_stat.c b/db/db_stat/db_stat.c index 54b55efbe..aa1918655 100644 --- a/db/db_stat/db_stat.c +++ b/db/db_stat/db_stat.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_stat.c,v 11.158 2004/07/15 18:26:48 ubell Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_stat.c,v 11.142 2003/10/27 19:47:25 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -38,30 +38,11 @@ static const char revid[] = #include "dbinc/db_page.h" #include "dbinc/txn.h" -#define PCT(f, t, pgsize) \ - ((t) == 0 ? 0 : \ - ((((((double)t) * (pgsize)) - (f)) / (((double)t) * (pgsize))) * 100)) - typedef enum { T_NOTSET, T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t; -int argcheck __P((char *, const char *)); -int btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, u_int32_t)); int db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *)); -void dl __P((const char *, u_long)); -void dl_bytes __P((const char *, u_long, u_long, u_long)); -int env_stats __P((DB_ENV *, u_int32_t)); -int hash_stats __P((DB_ENV *, DB *, u_int32_t)); -int lock_stats __P((DB_ENV *, char *, u_int32_t)); -int log_stats __P((DB_ENV *, u_int32_t)); int main __P((int, char *[])); -int mpool_stats __P((DB_ENV *, char *, u_int32_t)); -void prflags __P((DB *, u_int32_t, const FN *)); -int queue_stats __P((DB_ENV *, DB *, u_int32_t)); -int rep_stats __P((DB_ENV *, u_int32_t)); -int txn_compare __P((const void *, const void *)); -int txn_stats __P((DB_ENV *, u_int32_t)); -void txn_xid_stats __P((DB_TXN_ACTIVE *)); int usage __P((void)); int version_check __P((const char *)); @@ -78,9 +59,9 @@ main(argc, argv) DB *alt_dbp, *dbp; test_t ttype; u_int32_t cache, env_flags, fast, flags; - int ch, checked, exitval; + int ch, exitval; int nflag, private, resize, ret; - char *db, *home, *internal, *passwd, *subdb; + char *db, *home, *p, *passwd, *subdb; if ((ret = version_check(progname)) != 0) return (ret); @@ -90,33 +71,52 @@ main(argc, argv) ttype = T_NOTSET; cache = MEGABYTE; exitval = fast = flags = nflag = private = 0; - db = home = internal = passwd = subdb = NULL; + db = home = passwd = subdb = NULL; env_flags = 0; - while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNP:rs:tVZ")) != EOF) + while ((ch = getopt(argc, argv, "C:cd:Eefh:L:lM:mNP:R:rs:tVZ")) != EOF) switch (ch) { - case 'C': - if (ttype != T_NOTSET) - goto argcombo; - ttype = T_LOCK; - if (!argcheck(internal = optarg, "Aclmop")) - return (usage()); - break; - case 'c': - if (ttype != T_NOTSET) + case 'C': case 'c': + if (ttype != T_NOTSET && ttype != T_LOCK) goto argcombo; ttype = T_LOCK; + if (ch != 'c') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + case 'c': + LF_SET(DB_STAT_LOCK_CONF); + break; + case 'l': + LF_SET(DB_STAT_LOCK_LOCKERS); + break; + case 'm': /* Backward compatible. */ + break; + case 'o': + LF_SET(DB_STAT_LOCK_OBJECTS); + break; + case 'p': + LF_SET(DB_STAT_LOCK_PARAMS); + break; + default: + return (usage()); + } break; case 'd': - if (ttype != T_DB && ttype != T_NOTSET) + if (ttype != T_NOTSET && ttype != T_DB) goto argcombo; ttype = T_DB; db = optarg; break; - case 'e': - if (ttype != T_NOTSET) + case 'E': case 'e': + if (ttype != T_NOTSET && ttype != T_ENV) goto argcombo; ttype = T_ENV; + LF_SET(DB_STAT_SUBSYSTEM); + if (ch == 'E') + LF_SET(DB_STAT_ALL); break; case 'f': fast = DB_FAST_STAT; @@ -124,22 +124,38 @@ main(argc, argv) case 'h': home = optarg; break; - case 'l': - if (ttype != T_NOTSET) + case 'L': case 'l': + if (ttype != T_NOTSET && ttype != T_LOG) goto argcombo; ttype = T_LOG; + if (ch != 'l') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + default: + return (usage()); + } break; - case 'M': - if (ttype != T_NOTSET) - goto argcombo; - ttype = T_MPOOL; - if (!argcheck(internal = optarg, "Ahm")) - return (usage()); - break; - case 'm': - if (ttype != T_NOTSET) + case 'M': case 'm': + if (ttype != T_NOTSET && ttype != T_MPOOL) goto argcombo; ttype = T_MPOOL; + if (ch != 'm') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + case 'h': + LF_SET(DB_STAT_MEMP_HASH); + break; + case 'm': /* Backward compatible. */ + break; + default: + return (usage()); + } break; case 'N': nflag = 1; @@ -153,13 +169,22 @@ main(argc, argv) return (EXIT_FAILURE); } break; - case 'r': - if (ttype != T_NOTSET) + case 'R': case 'r': + if (ttype != T_NOTSET && ttype != T_REP) goto argcombo; ttype = T_REP; + if (ch != 'r') + for (p = optarg; *p; ++p) + switch (*p) { + case 'A': + LF_SET(DB_STAT_ALL); + break; + default: + return (usage()); + } break; case 's': - if (ttype != T_DB && ttype != T_NOTSET) + if (ttype != T_NOTSET && ttype != T_DB) goto argcombo; ttype = T_DB; subdb = optarg; @@ -177,7 +202,7 @@ argcombo: fprintf(stderr, printf("%s\n", db_version(NULL, NULL, NULL)); return (EXIT_SUCCESS); case 'Z': - flags |= DB_STAT_CLEAR; + LF_SET(DB_STAT_CLEAR); break; case '?': default: @@ -215,7 +240,7 @@ argcombo: fprintf(stderr, retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { fprintf(stderr, "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; + goto err; } dbenv->set_errfile(dbenv, stderr); @@ -224,45 +249,45 @@ retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { if (nflag) { if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) { dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING"); - goto shutdown; + goto err; } if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) { dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC"); - goto shutdown; + goto err; } } if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) { dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; + goto err; } /* Initialize the environment. */ if (db_init(dbenv, home, ttype, cache, &private) != 0) - goto shutdown; + goto err; switch (ttype) { case T_DB: - /* Create the DB object and open the file. */ if (flags != 0) return (usage()); + + /* Create the DB object and open the file. */ if ((ret = db_create(&dbp, dbenv, 0)) != 0) { dbenv->err(dbenv, ret, "db_create"); - goto shutdown; + goto err; } if ((ret = dbp->open(dbp, NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbp->err(dbp, ret, "DB->open: %s", db); - goto shutdown; + dbenv->err(dbenv, ret, "DB->open: %s", db); + goto err; } /* Check if cache is too small for this DB's pagesize. */ if (private) { - if ((ret = - __db_util_cache(dbenv, dbp, &cache, &resize)) != 0) - goto shutdown; + if ((ret = __db_util_cache(dbp, &cache, &resize)) != 0) + goto err; if (resize) { (void)dbp->close(dbp, DB_NOSYNC); dbp = NULL; @@ -278,13 +303,10 @@ retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { * If its a master-db then we cannot. So check to see, * if its btree then it might be. */ - checked = 0; - if (subdb == NULL && dbp->type == DB_BTREE) { - if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - goto shutdown; - } - checked = 1; + if (subdb == NULL && dbp->type == DB_BTREE && + (ret = dbp->stat(dbp, NULL, &sp, DB_FAST_STAT)) != 0) { + dbenv->err(dbenv, ret, "DB->stat"); + goto err; } if (subdb != NULL || @@ -292,74 +314,58 @@ retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { (sp->bt_metaflags & BTM_SUBDB) == 0) { if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) { dbenv->err(dbenv, ret, "db_create"); - goto shutdown; + goto err; } if ((ret = dbp->open(alt_dbp, NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbenv->err(dbenv, - ret, "DB->open: %s:%s", db, subdb); + if (subdb == NULL) + dbenv->err(dbenv, + ret, "DB->open: %s", db); + else + dbenv->err(dbenv, + ret, "DB->open: %s:%s", db, subdb); (void)alt_dbp->close(alt_dbp, DB_NOSYNC); - goto shutdown; + goto err; } (void)dbp->close(dbp, DB_NOSYNC); dbp = alt_dbp; - - /* Need to run again to update counts */ - checked = 0; } - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - if (btree_stats( - dbenv, dbp, checked == 1 ? sp : NULL, fast)) - goto shutdown; - break; - case DB_HASH: - if (hash_stats(dbenv, dbp, fast)) - goto shutdown; - break; - case DB_QUEUE: - if (queue_stats(dbenv, dbp, fast)) - goto shutdown; - break; - case DB_UNKNOWN: - dbenv->errx(dbenv, "Unknown database type."); - goto shutdown; - } + if (dbp->stat_print(dbp, flags)) + goto err; break; case T_ENV: - if (env_stats(dbenv, flags)) - goto shutdown; + if (dbenv->stat_print(dbenv, flags)) + goto err; break; case T_LOCK: - if (lock_stats(dbenv, internal, flags)) - goto shutdown; + if (dbenv->lock_stat_print(dbenv, flags)) + goto err; break; case T_LOG: - if (log_stats(dbenv, flags)) - goto shutdown; + if (dbenv->log_stat_print(dbenv, flags)) + goto err; break; case T_MPOOL: - if (mpool_stats(dbenv, internal, flags)) - goto shutdown; + if (dbenv->memp_stat_print(dbenv, flags)) + goto err; break; case T_REP: - if (rep_stats(dbenv, flags)) - goto shutdown; + if (dbenv->rep_stat_print(dbenv, flags)) + goto err; break; case T_TXN: - if (txn_stats(dbenv, flags)) - goto shutdown; + if (dbenv->txn_stat_print(dbenv, flags)) + goto err; break; case T_NOTSET: - dbenv->errx(dbenv, "Unknown statistics flag."); - goto shutdown; + dbenv->errx(dbenv, "Unknown statistics flag"); + goto err; } if (0) { -shutdown: exitval = 1; +err: exitval = 1; } if (dbp != NULL && (ret = dbp->close(dbp, DB_NOSYNC)) != 0) { exitval = 1; @@ -380,857 +386,6 @@ shutdown: exitval = 1; return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); } -/* - * env_stats -- - * Display environment statistics. - */ -int -env_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - REGENV renv; - REGION *rp, regs[1024]; - int n, ret; - const char *lable; - - n = sizeof(regs) / sizeof(regs[0]); - if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) { - dbenv->err(dbenv, ret, "__db_e_stat"); - return (1); - } - - printf("%d.%d.%d\tEnvironment version.\n", - renv.majver, renv.minver, renv.patch); - printf("%lx\tMagic number.\n", (u_long)renv.magic); - printf("%d\tPanic value.\n", renv.envpanic); - - /* Adjust the reference count for us... */ - printf("%d\tReferences.\n", renv.refcnt - 1); - - dl("Locks granted without waiting.\n", - (u_long)renv.mutex.mutex_set_nowait); - dl("Locks granted after waiting.\n", - (u_long)renv.mutex.mutex_set_wait); - - while (n > 0) { - printf("%s\n", DB_LINE); - rp = ®s[--n]; - switch (rp->type) { - case REGION_TYPE_ENV: - lable = "Environment"; - break; - case REGION_TYPE_LOCK: - lable = "Lock"; - break; - case REGION_TYPE_LOG: - lable = "Log"; - break; - case REGION_TYPE_MPOOL: - lable = "Mpool"; - break; - case REGION_TYPE_MUTEX: - lable = "Mutex"; - break; - case REGION_TYPE_TXN: - lable = "Txn"; - break; - case INVALID_REGION_TYPE: - default: - lable = "Invalid"; - break; - } - printf("%s Region: %d.\n", lable, rp->id); - dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size); - printf("%ld\tSegment ID.\n", rp->segid); - dl("Locks granted without waiting.\n", - (u_long)rp->mutex.mutex_set_nowait); - dl("Locks granted after waiting.\n", - (u_long)rp->mutex.mutex_set_wait); - } - - return (0); -} - -/* - * btree_stats -- - * Display btree/recno statistics. - */ -int -btree_stats(dbenv, dbp, msp, fast) - DB_ENV *dbenv; - DB *dbp; - DB_BTREE_STAT *msp; - u_int32_t fast; -{ - static const FN fn[] = { - { BTM_DUP, "duplicates" }, - { BTM_FIXEDLEN, "fixed-length" }, - { BTM_RECNO, "recno" }, - { BTM_RECNUM, "record-numbers" }, - { BTM_RENUMBER, "renumber" }, - { BTM_SUBDB, "multiple-databases" }, - { 0, NULL } - }; - DB_BTREE_STAT *sp; - int ret; - - COMPQUIET(dbenv, NULL); - - if (msp != NULL) - sp = msp; - else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (1); - } - - printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic); - printf("%lu\tBtree version number.\n", (u_long)sp->bt_version); - prflags(dbp, sp->bt_metaflags, fn); - if (dbp->type == DB_BTREE) { -#ifdef NOT_IMPLEMENTED - dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey); -#endif - dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey); - } - if (dbp->type == DB_RECNO) { - dl("Fixed-length record size.\n", (u_long)sp->bt_re_len); - if (isprint((int)sp->bt_re_pad) && !isspace((int)sp->bt_re_pad)) - printf("%c\tFixed-length record pad.\n", - (int)sp->bt_re_pad); - else - printf("0x%x\tFixed-length record pad.\n", - (int)sp->bt_re_pad); - } - dl("Underlying database page size.\n", (u_long)sp->bt_pagesize); - dl("Number of levels in the tree.\n", (u_long)sp->bt_levels); - dl(dbp->type == DB_BTREE ? - "Number of unique keys in the tree.\n" : - "Number of records in the tree.\n", (u_long)sp->bt_nkeys); - dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata); - - dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg); - dl("Number of bytes free in tree internal pages", - (u_long)sp->bt_int_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize)); - - dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg); - dl("Number of bytes free in tree leaf pages", - (u_long)sp->bt_leaf_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize)); - - dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg); - dl("Number of bytes free in tree duplicate pages", - (u_long)sp->bt_dup_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize)); - - dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg); - dl("Number of bytes free in tree overflow pages", - (u_long)sp->bt_over_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize)); - - dl("Number of pages on the free list.\n", (u_long)sp->bt_free); - - free(sp); - - return (0); -} - -/* - * hash_stats -- - * Display hash statistics. - */ -int -hash_stats(dbenv, dbp, fast) - DB_ENV *dbenv; - DB *dbp; - u_int32_t fast; -{ - static const FN fn[] = { - { DB_HASH_DUP, "duplicates" }, - { DB_HASH_SUBDB,"multiple-databases" }, - { 0, NULL } - }; - DB_HASH_STAT *sp; - int ret; - - COMPQUIET(dbenv, NULL); - - if ((ret = dbp->stat(dbp, &sp, fast)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (1); - } - - printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic); - printf("%lu\tHash version number.\n", (u_long)sp->hash_version); - prflags(dbp, sp->hash_metaflags, fn); - dl("Underlying database page size.\n", (u_long)sp->hash_pagesize); - dl("Specified fill factor.\n", (u_long)sp->hash_ffactor); - dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys); - dl("Number of data items in the database.\n", (u_long)sp->hash_ndata); - - dl("Number of hash buckets.\n", (u_long)sp->hash_buckets); - dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize)); - - dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages); - dl("Number of bytes free in overflow pages", - (u_long)sp->hash_big_bfree); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize)); - - dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows); - dl("Number of bytes free in bucket overflow pages", - (u_long)sp->hash_ovfl_free); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize)); - - dl("Number of duplicate pages.\n", (u_long)sp->hash_dup); - dl("Number of bytes free in duplicate pages", - (u_long)sp->hash_dup_free); - printf(" (%.0f%% ff).\n", - PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize)); - - dl("Number of pages on the free list.\n", (u_long)sp->hash_free); - - free(sp); - - return (0); -} - -/* - * queue_stats -- - * Display queue statistics. - */ -int -queue_stats(dbenv, dbp, fast) - DB_ENV *dbenv; - DB *dbp; - u_int32_t fast; -{ - DB_QUEUE_STAT *sp; - int ret; - - COMPQUIET(dbenv, NULL); - - if ((ret = dbp->stat(dbp, &sp, fast)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (1); - } - - printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic); - printf("%lu\tQueue version number.\n", (u_long)sp->qs_version); - dl("Fixed-length record size.\n", (u_long)sp->qs_re_len); - if (isprint((int)sp->qs_re_pad) && !isspace((int)sp->qs_re_pad)) - printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad); - else - printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad); - dl("Underlying database page size.\n", (u_long)sp->qs_pagesize); - if (sp->qs_extentsize != 0) - dl("Underlying database extent size.\n", - (u_long)sp->qs_extentsize); - dl("Number of records in the database.\n", (u_long)sp->qs_nkeys); - dl("Number of database pages.\n", (u_long)sp->qs_pages); - dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree); - printf(" (%.0f%% ff).\n", - PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize)); - printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno); - printf( - "%lu\tNext available record number.\n", (u_long)sp->qs_cur_recno); - - free(sp); - - return (0); -} - -/* - * lock_stats -- - * Display lock statistics. - */ -int -lock_stats(dbenv, internal, flags) - DB_ENV *dbenv; - char *internal; - u_int32_t flags; -{ - DB_LOCK_STAT *sp; - int ret; - - if (internal != NULL) { - if ((ret = - dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - return (0); - } - - if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - dl("Last allocated locker ID.\n", (u_long)sp->st_id); - dl("Current maximum unused locker ID.\n", (u_long)sp->st_cur_maxid); - dl("Number of lock modes.\n", (u_long)sp->st_nmodes); - dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks); - dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers); - dl("Maximum number of lock objects possible.\n", - (u_long)sp->st_maxobjects); - dl("Number of current locks.\n", (u_long)sp->st_nlocks); - dl("Maximum number of locks at any one time.\n", - (u_long)sp->st_maxnlocks); - dl("Number of current lockers.\n", (u_long)sp->st_nlockers); - dl("Maximum number of lockers at any one time.\n", - (u_long)sp->st_maxnlockers); - dl("Number of current lock objects.\n", (u_long)sp->st_nobjects); - dl("Maximum number of lock objects at any one time.\n", - (u_long)sp->st_maxnobjects); - dl("Total number of locks requested.\n", (u_long)sp->st_nrequests); - dl("Total number of locks released.\n", (u_long)sp->st_nreleases); - dl( - "Total number of lock requests failing because DB_LOCK_NOWAIT was set.\n", - (u_long)sp->st_nnowaits); - dl( - "Total number of locks not immediately available due to conflicts.\n", - (u_long)sp->st_nconflicts); - dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks); - dl("Lock timeout value.\n", (u_long)sp->st_locktimeout); - dl("Number of locks that have timed out.\n", - (u_long)sp->st_nlocktimeouts); - dl("Transaction timeout value.\n", (u_long)sp->st_txntimeout); - dl("Number of transactions that have timed out.\n", - (u_long)sp->st_ntxntimeouts); - - dl_bytes("The size of the lock region.", - (u_long)0, (u_long)0, (u_long)sp->st_regsize); - dl("The number of region locks granted after waiting.\n", - (u_long)sp->st_region_wait); - dl("The number of region locks granted without waiting.\n", - (u_long)sp->st_region_nowait); - - free(sp); - - return (0); -} - -/* - * log_stats -- - * Display log statistics. - */ -int -log_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - DB_LOG_STAT *sp; - int ret; - - if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - printf("%lx\tLog magic number.\n", (u_long)sp->st_magic); - printf("%lu\tLog version number.\n", (u_long)sp->st_version); - dl_bytes("Log record cache size", - (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize); - printf("%#o\tLog file mode.\n", sp->st_mode); - if (sp->st_lg_size % MEGABYTE == 0) - printf("%luMb\tCurrent log file size.\n", - (u_long)sp->st_lg_size / MEGABYTE); - else if (sp->st_lg_size % 1024 == 0) - printf("%luKb\tCurrent log file size.\n", - (u_long)sp->st_lg_size / 1024); - else - printf("%lu\tCurrent log file size.\n", - (u_long)sp->st_lg_size); - dl_bytes("Log bytes written", - (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes); - dl_bytes("Log bytes written since last checkpoint", - (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes); - dl("Total log file writes.\n", (u_long)sp->st_wcount); - dl("Total log file write due to overflow.\n", - (u_long)sp->st_wcount_fill); - dl("Total log file flushes.\n", (u_long)sp->st_scount); - printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file); - printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset); - printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file); - printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset); - - dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush); - dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush); - - dl_bytes("Log region size", - (u_long)0, (u_long)0, (u_long)sp->st_regsize); - dl("The number of region locks granted after waiting.\n", - (u_long)sp->st_region_wait); - dl("The number of region locks granted without waiting.\n", - (u_long)sp->st_region_nowait); - - free(sp); - - return (0); -} - -/* - * mpool_stats -- - * Display mpool statistics. - */ -int -mpool_stats(dbenv, internal, flags) - DB_ENV *dbenv; - char *internal; - u_int32_t flags; -{ - DB_MPOOL_FSTAT **fsp; - DB_MPOOL_STAT *gsp; - int ret; - - if (internal != NULL) { - if ((ret = - dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - return (0); - } - - if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - dl_bytes("Total cache size", - (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes); - dl("Number of caches.\n", (u_long)gsp->st_ncache); - dl_bytes("Pool individual cache size", - (u_long)0, (u_long)0, (u_long)gsp->st_regsize); - dl("Requested pages mapped into the process' address space.\n", - (u_long)gsp->st_map); - dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit); - if (gsp->st_cache_hit + gsp->st_cache_miss != 0) - printf(" (%.0f%%)", ((double)gsp->st_cache_hit / - (gsp->st_cache_hit + gsp->st_cache_miss)) * 100); - printf(".\n"); - dl("Requested pages not found in the cache.\n", - (u_long)gsp->st_cache_miss); - dl("Pages created in the cache.\n", (u_long)gsp->st_page_create); - dl("Pages read into the cache.\n", (u_long)gsp->st_page_in); - dl("Pages written from the cache to the backing file.\n", - (u_long)gsp->st_page_out); - dl("Clean pages forced from the cache.\n", - (u_long)gsp->st_ro_evict); - dl("Dirty pages forced from the cache.\n", - (u_long)gsp->st_rw_evict); - dl("Dirty pages written by trickle-sync thread.\n", - (u_long)gsp->st_page_trickle); - dl("Current total page count.\n", - (u_long)gsp->st_pages); - dl("Current clean page count.\n", - (u_long)gsp->st_page_clean); - dl("Current dirty page count.\n", - (u_long)gsp->st_page_dirty); - dl("Number of hash buckets used for page location.\n", - (u_long)gsp->st_hash_buckets); - dl("Total number of times hash chains searched for a page.\n", - (u_long)gsp->st_hash_searches); - dl("The longest hash chain searched for a page.\n", - (u_long)gsp->st_hash_longest); - dl("Total number of hash buckets examined for page location.\n", - (u_long)gsp->st_hash_examined); - dl("The number of hash bucket locks granted without waiting.\n", - (u_long)gsp->st_hash_nowait); - dl("The number of hash bucket locks granted after waiting.\n", - (u_long)gsp->st_hash_wait); - dl("The maximum number of times any hash bucket lock was waited for.\n", - (u_long)gsp->st_hash_max_wait); - dl("The number of region locks granted without waiting.\n", - (u_long)gsp->st_region_nowait); - dl("The number of region locks granted after waiting.\n", - (u_long)gsp->st_region_wait); - dl("The number of page allocations.\n", - (u_long)gsp->st_alloc); - dl("The number of hash buckets examined during allocations\n", - (u_long)gsp->st_alloc_buckets); - dl("The max number of hash buckets examined for an allocation\n", - (u_long)gsp->st_alloc_max_buckets); - dl("The number of pages examined during allocations\n", - (u_long)gsp->st_alloc_pages); - dl("The max number of pages examined for an allocation\n", - (u_long)gsp->st_alloc_max_pages); - - for (; fsp != NULL && *fsp != NULL; ++fsp) { - printf("%s\n", DB_LINE); - printf("Pool File: %s\n", (*fsp)->file_name); - dl("Page size.\n", (u_long)(*fsp)->st_pagesize); - dl("Requested pages mapped into the process' address space.\n", - (u_long)(*fsp)->st_map); - dl("Requested pages found in the cache", - (u_long)(*fsp)->st_cache_hit); - if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0) - printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit / - ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) * - 100); - printf(".\n"); - dl("Requested pages not found in the cache.\n", - (u_long)(*fsp)->st_cache_miss); - dl("Pages created in the cache.\n", - (u_long)(*fsp)->st_page_create); - dl("Pages read into the cache.\n", - (u_long)(*fsp)->st_page_in); - dl("Pages written from the cache to the backing file.\n", - (u_long)(*fsp)->st_page_out); - } - - free(gsp); - - return (0); -} - -/* - * rep_stats -- - * Display replication statistics. - */ -int -rep_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - DB_REP_STAT *sp; - int is_client, ret; - const char *p; - - if ((ret = dbenv->rep_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - is_client = 0; - switch (sp->st_status) { - case DB_REP_MASTER: - printf("Environment configured as a replication master.\n"); - break; - case DB_REP_CLIENT: - printf("Environment configured as a replication client.\n"); - is_client = 1; - break; - case DB_REP_LOGSONLY: - printf("Environment configured as a logs-only replica.\n"); - is_client = 1; - break; - default: - printf("Environment not configured for replication.\n"); - break; - } - - printf("%lu/%lu\t%s\n", - (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset, - is_client ? "Next LSN expected." : "Next LSN to be used."); - p = sp->st_waiting_lsn.file == 0 ? - "Not waiting for any missed log records." : - "LSN of first log record we have after missed log records."; - printf("%lu/%lu\t%s\n", - (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset, - p); - - dl("Number of duplicate master conditions detected.\n", - (u_long)sp->st_dupmasters); - if (sp->st_env_id != DB_EID_INVALID) - dl("Current environment ID.\n", (u_long)sp->st_env_id); - else - printf("No current environment ID.\n"); - dl("Current environment priority.\n", (u_long)sp->st_env_priority); - dl("Current generation number.\n", (u_long)sp->st_gen); - dl("Number of duplicate log records received.\n", - (u_long)sp->st_log_duplicated); - dl("Number of log records currently queued.\n", - (u_long)sp->st_log_queued); - dl("Maximum number of log records ever queued at once.\n", - (u_long)sp->st_log_queued_max); - dl("Total number of log records queued.\n", - (u_long)sp->st_log_queued_total); - dl("Number of log records received and appended to the log.\n", - (u_long)sp->st_log_records); - dl("Number of log records missed and requested.\n", - (u_long)sp->st_log_requested); - if (sp->st_master != DB_EID_INVALID) - dl("Current master ID.\n", (u_long)sp->st_master); - else - printf("No current master ID.\n"); - dl("Number of times the master has changed.\n", - (u_long)sp->st_master_changes); - dl("Number of messages received with a bad generation number.\n", - (u_long)sp->st_msgs_badgen); - dl("Number of messages received and processed.\n", - (u_long)sp->st_msgs_processed); - dl("Number of messages ignored due to pending recovery.\n", - (u_long)sp->st_msgs_recover); - dl("Number of failed message sends.\n", - (u_long)sp->st_msgs_send_failures); - dl("Number of messages sent.\n", (u_long)sp->st_msgs_sent); - dl("Number of new site messages received.\n", (u_long)sp->st_newsites); - dl("Number of environments believed to be in the replication group.\n", - (u_long)sp->st_nsites); - dl("Transmission limited.\n", (u_long)sp->st_nthrottles); - dl("Number of outdated conditions detected.\n", - (u_long)sp->st_outdated); - dl("Number of transactions applied.\n", (u_long)sp->st_txns_applied); - - dl("Number of elections held.\n", (u_long)sp->st_elections); - dl("Number of elections won.\n", (u_long)sp->st_elections_won); - - if (sp->st_election_status == 0) - printf("No election in progress.\n"); - else { - dl("Current election phase.\n", (u_long)sp->st_election_status); - dl("Election winner.\n", - (u_long)sp->st_election_cur_winner); - dl("Election generation number.\n", - (u_long)sp->st_election_gen); - printf("%lu/%lu\tMaximum LSN of election winner.\n", - (u_long)sp->st_election_lsn.file, - (u_long)sp->st_election_lsn.offset); - dl("Number of sites expected to participate in elections.\n", - (u_long)sp->st_election_nsites); - dl("Election priority.\n", (u_long)sp->st_election_priority); - dl("Election tiebreaker value.\n", - (u_long)sp->st_election_tiebreaker); - dl("Votes received this election round.\n", - (u_long)sp->st_election_votes); - } - - free(sp); - - return (0); -} - -/* - * txn_stats -- - * Display transaction statistics. - */ -int -txn_stats(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - DB_TXN_STAT *sp; - u_int32_t i; - int ret; - const char *p; - - if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) { - dbenv->err(dbenv, ret, NULL); - return (1); - } - - p = sp->st_last_ckp.file == 0 ? - "No checkpoint LSN." : "File/offset for last checkpoint LSN."; - printf("%lu/%lu\t%s\n", - (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p); - if (sp->st_time_ckp == 0) - printf("0\tNo checkpoint timestamp.\n"); - else - printf("%.24s\tCheckpoint timestamp.\n", - ctime(&sp->st_time_ckp)); - printf("%lx\tLast transaction ID allocated.\n", - (u_long)sp->st_last_txnid); - dl("Maximum number of active transactions configured.\n", - (u_long)sp->st_maxtxns); - dl("Active transactions.\n", (u_long)sp->st_nactive); - dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive); - dl("Number of transactions begun.\n", (u_long)sp->st_nbegins); - dl("Number of transactions aborted.\n", (u_long)sp->st_naborts); - dl("Number of transactions committed.\n", (u_long)sp->st_ncommits); - dl("Number of transactions restored.\n", (u_long)sp->st_nrestores); - - dl_bytes("Transaction region size", - (u_long)0, (u_long)0, (u_long)sp->st_regsize); - dl("The number of region locks granted after waiting.\n", - (u_long)sp->st_region_wait); - dl("The number of region locks granted without waiting.\n", - (u_long)sp->st_region_nowait); - - qsort(sp->st_txnarray, - sp->st_nactive, sizeof(sp->st_txnarray[0]), txn_compare); - for (i = 0; i < sp->st_nactive; ++i) { - printf("\tid: %lx; begin LSN: file/offset %lu/%lu", - (u_long)sp->st_txnarray[i].txnid, - (u_long)sp->st_txnarray[i].lsn.file, - (u_long)sp->st_txnarray[i].lsn.offset); - if (sp->st_txnarray[i].parentid != 0) - printf("; parent: %lx", - (u_long)sp->st_txnarray[i].parentid); - if (sp->st_txnarray[i].xa_status != 0) - txn_xid_stats(&sp->st_txnarray[i]); - printf("\n"); - } - - free(sp); - - return (0); -} - -void -txn_xid_stats(txnp) - DB_TXN_ACTIVE *txnp; -{ - u_int32_t v; - u_int i; - int cnt; - - printf("\n\tXA: "); - switch (txnp->xa_status) { - case TXN_XA_ABORTED: - printf("ABORTED"); - break; - case TXN_XA_DEADLOCKED: - printf("DEADLOCKED"); - break; - case TXN_XA_ENDED: - printf("ENDED"); - break; - case TXN_XA_PREPARED: - printf("PREPARED"); - break; - case TXN_XA_STARTED: - printf("STARTED"); - break; - case TXN_XA_SUSPENDED: - printf("SUSPENDED"); - break; - default: - printf("unknown state: %lu", (u_long)txnp->xa_status); - break; - } - printf("; XID:\n\t\t"); - for (i = 0, cnt = 0; i < DB_XIDDATASIZE; i += sizeof(u_int32_t)) { - memcpy(&v, &txnp->xid[i], sizeof(u_int32_t)); - printf("0x%x ", v); - if (++cnt == 4) { - printf("\n\t\t"); - cnt = 0; - } - } -} - -int -txn_compare(a1, b1) - const void *a1, *b1; -{ - const DB_TXN_ACTIVE *a, *b; - - a = a1; - b = b1; - - if (a->txnid > b->txnid) - return (1); - if (a->txnid < b->txnid) - return (-1); - return (0); -} - -/* - * dl -- - * Display a big value. - */ -void -dl(msg, value) - const char *msg; - u_long value; -{ - /* - * Two formats: if less than 10 million, display as the number, if - * greater than 10 million display as ###M. - */ - if (value < 10000000) - printf("%lu\t%s", value, msg); - else - printf("%luM\t%s", value / 1000000, msg); -} - -/* - * dl_bytes -- - * Display a big number of bytes. - */ -void -dl_bytes(msg, gbytes, mbytes, bytes) - const char *msg; - u_long gbytes, mbytes, bytes; -{ - const char *sep; - - /* Normalize the values. */ - while (bytes >= MEGABYTE) { - ++mbytes; - bytes -= MEGABYTE; - } - while (mbytes >= GIGABYTE / MEGABYTE) { - ++gbytes; - mbytes -= GIGABYTE / MEGABYTE; - } - - sep = ""; - if (gbytes > 0) { - printf("%luGB", gbytes); - sep = " "; - } - if (mbytes > 0) { - printf("%s%luMB", sep, mbytes); - sep = " "; - } - if (bytes >= 1024) { - printf("%s%luKB", sep, bytes / 1024); - bytes %= 1024; - sep = " "; - } - if (bytes > 0) - printf("%s%luB", sep, bytes); - - printf("\t%s.\n", msg); -} - -/* - * prflags -- - * Print out flag values. - */ -void -prflags(dbp, flags, fnp) - DB *dbp; - u_int32_t flags; - const FN *fnp; -{ - const char *sep; - int lorder; - - sep = "\t"; - printf("Flags:"); - for (; fnp->mask != 0; ++fnp) - if (fnp->mask & flags) { - printf("%s%s", sep, fnp->name); - sep = ", "; - } - - (void)dbp->get_lorder(dbp, &lorder); - switch (lorder) { - case 1234: - printf("%s%s", sep, "little-endian"); - break; - case 4321: - printf("%s%s", sep, "big-endian"); - break; - default: - printf("%s%s", sep, "UNKNOWN-LORDER"); - break; - } - printf("\n"); -} - /* * db_init -- * Initialize the environment. @@ -1260,6 +415,8 @@ db_init(dbenv, home, ttype, cache, is_private) if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0) return (0); + if (ret == DB_VERSION_MISMATCH) + goto err; if (ttype != T_DB && ttype != T_LOG) { dbenv->err(dbenv, ret, "DB_ENV->open%s%s", home == NULL ? "" : ": ", home == NULL ? "" : home); @@ -1292,31 +449,18 @@ db_init(dbenv, home, ttype, cache, is_private) return (0); /* An environment is required. */ - dbenv->err(dbenv, ret, "open"); - return (1); -} - -/* - * argcheck -- - * Return if argument flags are okay. - */ -int -argcheck(arg, ok_args) - char *arg; - const char *ok_args; -{ - for (; *arg != '\0'; ++arg) - if (strchr(ok_args, *arg) == NULL) - return (0); +err: dbenv->err(dbenv, ret, "DB_ENV->open"); return (1); } int usage() { - fprintf(stderr, "%s\n\t%s\n", - "usage: db_stat [-celmNrtVZ] [-C Aclmop]", - "[-d file [-f] [-s database]] [-h home] [-M Ahlm] [-P password]"); + fprintf(stderr, "usage: db_stat %s\n", + "-d file [-fN] [-h home] [-P password] [-s database]"); + fprintf(stderr, "usage: db_stat %s\n\t%s\n", + "[-cEelmNrtVZ] [-C Aclop]", + "[-h home] [-L A] [-M A] [-P password] [-R A]"); return (EXIT_FAILURE); } diff --git a/db/db_stat/dd.sh b/db/db_stat/dd.sh index 952750481..d8bb7033f 100644 --- a/db/db_stat/dd.sh +++ b/db/db_stat/dd.sh @@ -1,5 +1,5 @@ #! /bin/sh -# $Id: dd.sh,v 1.2 2003/09/05 00:05:59 bostic Exp $ +# $Id: dd.sh,v 1.3 2004/05/04 15:51:45 bostic Exp $ # # Display environment's deadlocks based on "db_stat -Co" output. @@ -8,10 +8,23 @@ t2=__b trap 'rm -f $t1 $t2; exit 0' 0 1 2 3 13 15 +if [ $# -ne 1 ]; then + echo "Usage: dd.sh [db_stat -Co output]" + exit 1 +fi + +if `egrep '\.*\' $1 > /dev/null`; then + n=`egrep '\.*\' $1 | wc -l | awk '{print $1}'` + echo "dd.sh: $1: $n page locks in a WAIT state." +else + echo "dd.sh: $1: No page locks in a WAIT state found." + exit 1 +fi + # Print out list of node wait states, and output cycles in the graph. -egrep 'WAIT.*page' $1 | awk '{print $1 " " $7}' | +egrep '\.*\' $1 | awk '{print $1 " " $7}' | while read l p; do - p=`egrep "HELD.*page[ ][ ]*$p$" $1 | awk '{print $1}'` + p=`egrep "\.*\[ ][ ]*$p$" $1 | awk '{print $1}'` echo "$l $p" done | tsort > /dev/null 2>$t1 diff --git a/db/db_upgrade/db_upgrade.c b/db/db_upgrade/db_upgrade.c index f62ebb473..0f43be6ba 100644 --- a/db/db_upgrade/db_upgrade.c +++ b/db/db_upgrade/db_upgrade.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_upgrade.c,v 1.37 2004/06/10 01:00:09 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_upgrade.c,v 1.35 2003/08/13 19:57:09 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -122,9 +122,11 @@ main(argc, argv) */ if ((ret = dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 && + (ret == DB_VERSION_MISMATCH || (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, + 0)) != 0)) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } diff --git a/db/db_verify/db_verify.c b/db/db_verify/db_verify.c index cae81f312..148ce1f8e 100644 --- a/db/db_verify/db_verify.c +++ b/db/db_verify/db_verify.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_verify.c,v 1.49 2004/08/01 00:21:58 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: db_verify.c,v 1.45 2003/08/13 19:57:09 ubell Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -39,8 +39,8 @@ main(argc, argv) const char *progname = "db_verify"; DB *dbp, *dbp1; DB_ENV *dbenv; - u_int32_t cache; - int ch, exitval, nflag, oflag, private; + u_int32_t flags, cache; + int ch, exitval, nflag, private; int quiet, resize, ret; char *home, *passwd; @@ -50,9 +50,10 @@ main(argc, argv) dbenv = NULL; dbp = NULL; cache = MEGABYTE; - exitval = nflag = oflag = quiet = 0; + exitval = nflag = quiet = 0; + flags = 0; home = passwd = NULL; - while ((ch = getopt(argc, argv, "h:NoP:qV")) != EOF) + while ((ch = getopt(argc, argv, "h:NoP:quV")) != EOF) switch (ch) { case 'h': home = optarg; @@ -70,11 +71,14 @@ main(argc, argv) } break; case 'o': - oflag = 1; + LF_SET(DB_NOORDERCHK); break; case 'q': quiet = 1; break; + case 'u': /* Undocumented. */ + LF_SET(DB_UNREF); + break; case 'V': printf("%s\n", db_version(NULL, NULL, NULL)); return (EXIT_SUCCESS); @@ -130,14 +134,18 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { private = 0; if ((ret = dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) { - if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) { - dbenv->err(dbenv, ret, "set_cachesize"); - goto shutdown; + if (ret != DB_VERSION_MISMATCH) { + if ((ret = + dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) { + dbenv->err(dbenv, ret, "set_cachesize"); + goto shutdown; + } + private = 1; + ret = dbenv->open(dbenv, home, DB_CREATE | + DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0); } - private = 1; - if ((ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); + if (ret != 0) { + dbenv->err(dbenv, ret, "DB_ENV->open"); goto shutdown; } } @@ -151,6 +159,10 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { /* * We create a 2nd dbp to this database to get its pagesize * because the dbp we're using for verify cannot be opened. + * + * If the database is corrupted, we may not be able to open + * it, of course. In that case, just continue, using the + * cache size we have. */ if (private) { if ((ret = db_create(&dbp1, dbenv, 0)) != 0) { @@ -159,12 +171,9 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { goto shutdown; } - if ((ret = dbp1->open(dbp1, NULL, - argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbenv->err(dbenv, ret, "DB->open: %s", argv[0]); - (void)dbp1->close(dbp1, 0); - goto shutdown; - } + ret = dbp1->open(dbp1, + NULL, argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0); + /* * If we get here, we can check the cache/page. * !!! @@ -173,29 +182,26 @@ retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { * will still be working on the same argv when we * get back into the for-loop. */ - ret = __db_util_cache(dbenv, dbp1, &cache, &resize); - (void)dbp1->close(dbp1, 0); - if (ret != 0) - goto shutdown; - - if (resize) { - (void)dbp->close(dbp, 0); - dbp = NULL; - - (void)dbenv->close(dbenv, 0); - dbenv = NULL; - goto retry; + if (ret == 0) { + if (__db_util_cache( + dbp1, &cache, &resize) == 0 && resize) { + (void)dbp1->close(dbp1, 0); + (void)dbp->close(dbp, 0); + dbp = NULL; + + (void)dbenv->close(dbenv, 0); + dbenv = NULL; + goto retry; + } } + (void)dbp1->close(dbp1, 0); } /* The verify method is a destructor. */ - ret = dbp->verify(dbp, - argv[0], NULL, NULL, oflag ? DB_NOORDERCHK : 0); + ret = dbp->verify(dbp, argv[0], NULL, NULL, flags); dbp = NULL; - if (ret != 0) { - dbenv->err(dbenv, ret, "DB->verify: %s", argv[0]); + if (ret != 0) goto shutdown; - } } if (0) { diff --git a/db/dbinc/btree.h b/db/dbinc/btree.h index 4bd2f5695..d6bb2c839 100644 --- a/db/dbinc/btree.h +++ b/db/dbinc/btree.h @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -39,7 +39,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: btree.h,v 11.47 2003/05/18 16:54:52 bostic Exp $ + * $Id: btree.h,v 11.50 2004/07/22 21:52:57 bostic Exp $ */ #ifndef _DB_BTREE_H_ #define _DB_BTREE_H_ @@ -237,7 +237,7 @@ struct __cursor { */ #define B_MINKEY_TO_OVFLSIZE(dbp, minkey, pgsize) \ ((u_int16_t)(((pgsize) - P_OVERHEAD(dbp)) / ((minkey) * P_INDX) -\ - (BKEYDATA_PSIZE(0) + ALIGN(1, sizeof(int32_t))))) + (BKEYDATA_PSIZE(0) + DB_ALIGN(1, sizeof(int32_t))))) /* * The maximum space that a single item can ever take up on one page. @@ -281,6 +281,7 @@ struct __btree { /* Btree access method. */ * of its information. */ db_pgno_t bt_lpgno; /* Last insert location. */ + DB_LSN bt_llsn; /* Last insert LSN. */ /* * !!! diff --git a/db/dbinc/crypto.h b/db/dbinc/crypto.h index 05fcf6c8e..8eeebf81b 100644 --- a/db/dbinc/crypto.h +++ b/db/dbinc/crypto.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: crypto.h,v 1.11 2003/04/28 14:27:26 bostic Exp $ + * $Id: crypto.h,v 1.12 2004/01/28 03:36:00 bostic Exp $ */ #ifndef _DB_CRYPTO_H_ diff --git a/db/dbinc/cxx_int.h b/db/dbinc/cxx_int.h index 09d6c3349..d71ca25d0 100644 --- a/db/dbinc/cxx_int.h +++ b/db/dbinc/cxx_int.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: cxx_int.h,v 11.22 2003/03/11 15:39:41 merrells Exp $ + * $Id: cxx_int.h,v 11.25 2004/09/22 22:20:31 mjc Exp $ */ #ifndef _CXX_INT_H_ @@ -30,27 +30,23 @@ \ class _IMP_CLASS {}; \ \ - inline _WRAPPED_TYPE unwrap(_WRAPPER_CLASS *val) \ + inline _WRAPPED_TYPE *unwrap(_WRAPPER_CLASS *val) \ { \ if (!val) return (0); \ - return ((_WRAPPED_TYPE)((void *)(val->imp()))); \ + return (val->get_##_WRAPPED_TYPE()); \ } \ \ - inline const _WRAPPED_TYPE unwrapConst(const _WRAPPER_CLASS *val) \ + inline const _WRAPPED_TYPE *unwrapConst(const _WRAPPER_CLASS *val) \ { \ if (!val) return (0); \ - return ((const _WRAPPED_TYPE)((void *)(val->constimp()))); \ - } \ - \ - inline _IMP_CLASS *wrap(_WRAPPED_TYPE val) \ - { \ - return ((_IMP_CLASS*)((void *)val)); \ + return (val->get_const_##_WRAPPED_TYPE()); \ } -WRAPPED_CLASS(DbMpoolFile, DbMpoolFileImp, DB_MPOOLFILE*) -WRAPPED_CLASS(Db, DbImp, DB*) -WRAPPED_CLASS(DbEnv, DbEnvImp, DB_ENV*) -WRAPPED_CLASS(DbTxn, DbTxnImp, DB_TXN*) +WRAPPED_CLASS(Db, DbImp, DB) +WRAPPED_CLASS(DbEnv, DbEnvImp, DB_ENV) +WRAPPED_CLASS(DbMpoolFile, DbMpoolFileImp, DB_MPOOLFILE) +WRAPPED_CLASS(DbSequence, DbSequenceImp, DB_SEQUENCE) +WRAPPED_CLASS(DbTxn, DbTxnImp, DB_TXN) // A tristate integer value used by the DB_ERROR macro below. // We chose not to make this an enumerated type so it can diff --git a/db/dbinc/db.in b/db/dbinc/db.in index 295992c36..dc3d43594 100644 --- a/db/dbinc/db.in +++ b/db/dbinc/db.in @@ -1,10 +1,10 @@ /* * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db.in,v 11.389 2003/10/01 21:33:58 sue Exp $ + * $Id: db.in,v 11.463 2004/10/11 18:47:50 bostic Exp $ * * db.h include file layout: * General. @@ -23,9 +23,9 @@ #ifndef __NO_SYSTEM_INCLUDES #include - -/* does not include on some systems. */ -@inttypes_decl@ +@inttypes_h_decl@ +@stdint_h_decl@ +@stddef_h_decl@ #include #endif @@ -65,6 +65,8 @@ extern "C" { @u_int16_decl@ @int32_decl@ @u_int32_decl@ +@int64_decl@ +@u_int64_decl@ #endif @u_char_decl@ @@ -73,6 +75,32 @@ extern "C" { @u_long_decl@ @ssize_t_decl@ +/* + * uintmax_t -- + * Largest unsigned type, used to align structures in memory. We don't store + * floating point types in structures, so integral types should be sufficient + * (and we don't have to worry about systems that store floats in other than + * power-of-2 numbers of bytes). Additionally this fixes compilers that rewrite + * structure assignments and ANSI C memcpy calls to be in-line instructions + * that happen to require alignment. Note: this alignment isn't sufficient for + * mutexes, which depend on things like cache line alignment. Mutex alignment + * is handled separately, in mutex.h. + * + * uintptr_t -- + * Unsigned type that's the same size as a pointer. There are places where + * DB modifies pointers by discarding the bottom bits to guarantee alignment. + * We can't use uintmax_t, it may be larger than the pointer, and compilers + * get upset about that. So far we haven't run on any machine where there's + * no unsigned type the same size as a pointer -- here's hoping. + */ +@uintmax_t_decl@ +@uintptr_t_decl@ + +/* + * Sequences are only available on machines with 64-bit integral types. + */ +@db_seq_decl@ + /* Basic types that are exported or quasi-exported. */ typedef u_int32_t db_pgno_t; /* Page number type. */ typedef u_int16_t db_indx_t; /* Page offset type. */ @@ -84,11 +112,13 @@ typedef u_int32_t db_recno_t; /* Record number type. */ typedef u_int32_t db_timeout_t; /* Type of a timeout. */ /* - * Region offsets are currently limited to 32-bits. I expect that's going - * to have to be fixed in the not-too-distant future, since we won't want to - * split 100Gb memory pools into that many different regions. + * Region offsets are the difference between a pointer in a region and the + * region's base address. With private environments, both addresses are the + * result of calling malloc, and we can't assume anything about what malloc + * will return, so region offsets have to be able to hold differences between + * arbitrary pointers. */ -typedef u_int32_t roff_t; +typedef uintptr_t roff_t; /* * Forward structure declarations, so we can declare pointers and @@ -115,6 +145,9 @@ struct __db_preplist; typedef struct __db_preplist DB_PREPLIST; struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT; struct __db_rep; typedef struct __db_rep DB_REP; struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT; +struct __db_sequence; typedef struct __db_sequence DB_SEQUENCE; +struct __db_seq_record; typedef struct __db_seq_record DB_SEQ_RECORD; +struct __db_seq_stat; typedef struct __db_seq_stat DB_SEQUENCE_STAT; struct __db_txn; typedef struct __db_txn DB_TXN; struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE; struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT; @@ -175,6 +208,8 @@ struct __db_dbt { * DB_AUTO_COMMIT: * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open, * DB->remove, DB->rename, DB->truncate + * DB_DEGREE_2: + * DB->cursor, DB->get, DB->join, DBcursor->c_get, DB_ENV->txn_begin * DB_DIRTY_READ: * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get, * DB_ENV->txn_begin @@ -183,13 +218,14 @@ struct __db_dbt { * DB->remove, DB->rename, DB->truncate * * !!! - * The DB_DIRTY_READ bit mask can't be changed without also changing the - * masks for the flags that can be OR'd into DB access method and cursor - * operation values. + * The DB_DIRTY_READ and DB_DEGREE_2 bit masks can't be changed without + * also changing the masks for the flags that can be OR'd into DB + * access method and cursor operation values. */ -#define DB_AUTO_COMMIT 0x1000000 /* Implied transaction. */ -#define DB_DIRTY_READ 0x2000000 /* Dirty Read. */ -#define DB_NO_AUTO_COMMIT 0x4000000 /* Override env-wide AUTO-COMMIT. */ +#define DB_AUTO_COMMIT 0x01000000/* Implied transaction. */ +#define DB_DEGREE_2 0x02000000/* Degree 2. */ +#define DB_DIRTY_READ 0x04000000/* Dirty Read. */ +#define DB_NO_AUTO_COMMIT 0x08000000/* Override env-wide AUTOCOMMIT. */ /* * Flags private to db_env_create. @@ -238,19 +274,25 @@ struct __db_dbt { /* * Flags private to DB_ENV->set_flags. - * Shared flags up to 0x0000800 */ -#define DB_CDB_ALLDB 0x0001000 /* Set CDB locking per environment. */ -#define DB_DIRECT_DB 0x0002000 /* Don't buffer databases in the OS. */ -#define DB_DIRECT_LOG 0x0004000 /* Don't buffer log files in the OS. */ -#define DB_LOG_AUTOREMOVE 0x0008000 /* Automatically remove log files. */ -#define DB_NOLOCKING 0x0010000 /* Set locking/mutex behavior. */ -#define DB_NOPANIC 0x0020000 /* Set panic state per DB_ENV. */ -#define DB_OVERWRITE 0x0040000 /* Overwrite unlinked region files. */ -#define DB_PANIC_ENVIRONMENT 0x0080000 /* Set panic state per environment. */ -#define DB_REGION_INIT 0x0100000 /* Page-fault regions on open. */ -#define DB_TIME_NOTGRANTED 0x0200000 /* Return NOTGRANTED on timeout. */ -#define DB_TXN_WRITE_NOSYNC 0x0400000 /* Write, don't sync, on txn commit. */ -#define DB_YIELDCPU 0x0800000 /* Yield the CPU (a lot). */ + * Shared flags up to 0x00000800 */ +#define DB_CDB_ALLDB 0x00001000/* Set CDB locking per environment. */ +#define DB_DIRECT_DB 0x00002000/* Don't buffer databases in the OS. */ +#define DB_DIRECT_LOG 0x00004000/* Don't buffer log files in the OS. */ +#define DB_DSYNC_LOG 0x00008000/* Set O_DSYNC on the log. */ +#define DB_LOG_AUTOREMOVE 0x00010000/* Automatically remove log files. */ +#define DB_LOG_INMEMORY 0x00020000/* Store logs in buffers in memory. */ +#define DB_NOLOCKING 0x00040000/* Set locking/mutex behavior. */ +#define DB_NOPANIC 0x00080000/* Set panic state per DB_ENV. */ +#define DB_OVERWRITE 0x00100000/* Overwrite unlinked region files. */ +#define DB_PANIC_ENVIRONMENT 0x00200000/* Set panic state per environment. */ +#define DB_REGION_INIT 0x00400000/* Page-fault regions on open. */ +#define DB_TIME_NOTGRANTED 0x00800000/* Return NOTGRANTED on timeout. */ +/* Shared flags at 0x01000000 */ +/* Shared flags at 0x02000000 */ +/* Shared flags at 0x04000000 */ +/* Shared flags at 0x08000000 */ +#define DB_TXN_WRITE_NOSYNC 0x10000000/* Write, don't sync, on txn commit. */ +#define DB_YIELDCPU 0x20000000/* Yield the CPU (a lot). */ /* * Flags private to DB->set_feedback's callback. @@ -262,8 +304,9 @@ struct __db_dbt { * Flags private to DB_MPOOLFILE->open. * Shared flags up to 0x0000800 */ #define DB_DIRECT 0x0001000 /* Don't buffer the file in the OS. */ -#define DB_EXTENT 0x0002000 /* UNDOC: dealing with an extent. */ -#define DB_ODDFILESIZE 0x0004000 /* Truncate file to N * pgsize. */ +#define DB_DURABLE_UNKNOWN 0x0002000 /* internal: durability on open. */ +#define DB_EXTENT 0x0004000 /* internal: dealing with an extent. */ +#define DB_ODDFILESIZE 0x0008000 /* Truncate file to N * pgsize. */ /* * Flags private to DB->set_flags. @@ -272,15 +315,23 @@ struct __db_dbt { #define DB_DUP 0x0000002 /* Btree, Hash: duplicate keys. */ #define DB_DUPSORT 0x0000004 /* Btree, Hash: duplicate keys. */ #define DB_ENCRYPT 0x0000008 /* Btree, Hash: duplicate keys. */ -#define DB_RECNUM 0x0000010 /* Btree: record numbers. */ -#define DB_RENUMBER 0x0000020 /* Recno: renumber on insert/delete. */ -#define DB_REVSPLITOFF 0x0000040 /* Btree: turn off reverse splits. */ -#define DB_SNAPSHOT 0x0000080 /* Recno: snapshot the input. */ +#define DB_INORDER 0x0000010 /* Queue: strict ordering on consume. */ +#define DB_RECNUM 0x0000020 /* Btree: record numbers. */ +#define DB_RENUMBER 0x0000040 /* Recno: renumber on insert/delete. */ +#define DB_REVSPLITOFF 0x0000080 /* Btree: turn off reverse splits. */ +#define DB_SNAPSHOT 0x0000100 /* Recno: snapshot the input. */ /* - * Flags private to the DB->stat methods. + * Flags private to the DB_ENV->stat_print, DB->stat and DB->stat_print methods. */ -#define DB_STAT_CLEAR 0x0000001 /* Clear stat after returning values. */ +#define DB_STAT_ALL 0x0000001 /* Print: Everything. */ +#define DB_STAT_CLEAR 0x0000002 /* Clear stat after returning values. */ +#define DB_STAT_LOCK_CONF 0x0000004 /* Print: Lock conflict matrix. */ +#define DB_STAT_LOCK_LOCKERS 0x0000008 /* Print: Lockers. */ +#define DB_STAT_LOCK_OBJECTS 0x0000010 /* Print: Lock objects. */ +#define DB_STAT_LOCK_PARAMS 0x0000020 /* Print: Lock parameters. */ +#define DB_STAT_MEMP_HASH 0x0000040 /* Print: Mpool hash buckets. */ +#define DB_STAT_SUBSYSTEM 0x0000080 /* Print: Subsystems too. */ /* * Flags private to DB->join. @@ -297,6 +348,7 @@ struct __db_dbt { #define DB_PR_RECOVERYTEST 0x0000010 /* Recovery test (-dr). */ #define DB_PRINTABLE 0x0000020 /* Use printable format for salvage. */ #define DB_SALVAGE 0x0000040 /* Salvage what looks like data. */ +#define DB_UNREF 0x0000080 /* Report unreferenced pages. */ /* * !!! * These must not go over 0x8000, or they will collide with the flags @@ -323,20 +375,22 @@ struct __db_dbt { #define DB_LOCK_NORUN 0 #define DB_LOCK_DEFAULT 1 /* Default policy. */ #define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */ -#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */ -#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */ -#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */ -#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */ -#define DB_LOCK_RANDOM 7 /* Abort random transaction. */ -#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */ +#define DB_LOCK_MAXLOCKS 3 /* Select locker with max locks. */ +#define DB_LOCK_MAXWRITE 4 /* Select locker with max writelocks. */ +#define DB_LOCK_MINLOCKS 5 /* Select locker with min locks. */ +#define DB_LOCK_MINWRITE 6 /* Select locker with min writelocks. */ +#define DB_LOCK_OLDEST 7 /* Select oldest locker. */ +#define DB_LOCK_RANDOM 8 /* Select random locker. */ +#define DB_LOCK_YOUNGEST 9 /* Select youngest locker. */ /* Flag values for lock_vec(), lock_get(). */ -#define DB_LOCK_NOWAIT 0x001 /* Don't wait on unavailable lock. */ -#define DB_LOCK_RECORD 0x002 /* Internal: record lock. */ -#define DB_LOCK_REMOVE 0x004 /* Internal: flag object removed. */ -#define DB_LOCK_SET_TIMEOUT 0x008 /* Internal: set lock timeout. */ -#define DB_LOCK_SWITCH 0x010 /* Internal: switch existing lock. */ -#define DB_LOCK_UPGRADE 0x020 /* Internal: upgrade existing lock. */ +#define DB_LOCK_ABORT 0x001 /* Internal: Lock during abort. */ +#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */ +#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */ +#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */ +#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */ +#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */ +#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */ /* * Simple R/W lock modes and for multi-granularity intention locking. @@ -380,16 +434,15 @@ typedef enum { */ typedef enum { DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */ - DB_LSTAT_ERR=2, /* Lock is bad. */ - DB_LSTAT_EXPIRED=3, /* Lock has expired. */ - DB_LSTAT_FREE=4, /* Lock is unallocated. */ - DB_LSTAT_HELD=5, /* Lock is currently held. */ - DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting + DB_LSTAT_EXPIRED=2, /* Lock has expired. */ + DB_LSTAT_FREE=3, /* Lock is unallocated. */ + DB_LSTAT_HELD=4, /* Lock is currently held. */ + DB_LSTAT_NOTEXIST=5, /* Object on which lock was waiting * was removed */ - DB_LSTAT_PENDING=7, /* Lock was waiting and has been + DB_LSTAT_PENDING=6, /* Lock was waiting and has been * promoted; waiting for the owner * to run and upgrade it to held. */ - DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */ + DB_LSTAT_WAITING=7 /* Lock is on the wait queue. */ }db_status_t; /* Lock statistics structure. */ @@ -399,7 +452,7 @@ struct __db_lock_stat { u_int32_t st_maxlocks; /* Maximum number of locks in table. */ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */ u_int32_t st_maxobjects; /* Maximum num of objects in table. */ - u_int32_t st_nmodes; /* Number of lock modes. */ + int st_nmodes; /* Number of lock modes. */ u_int32_t st_nlocks; /* Current number of locks. */ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */ u_int32_t st_nlockers; /* Current number of lockers. */ @@ -418,7 +471,7 @@ struct __db_lock_stat { u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */ u_int32_t st_region_wait; /* Region lock granted after wait. */ u_int32_t st_region_nowait; /* Region lock granted without wait. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ }; /* @@ -440,7 +493,7 @@ struct __db_ilock { * lock_get request (or a lock_vec/DB_LOCK_GET). */ struct __db_lock_u { - size_t off; /* Offset of the lock in the region */ + roff_t off; /* Offset of the lock in the region */ u_int32_t ndx; /* Index of the object referenced by * this lock; used for locking. */ u_int32_t gen; /* Generation number of this lock. */ @@ -459,8 +512,8 @@ struct __db_lockreq { /******************************************************* * Logging. *******************************************************/ -#define DB_LOGVERSION 8 /* Current log version. */ -#define DB_LOGOLDVER 8 /* Oldest log version supported. */ +#define DB_LOGVERSION 10 /* Current log version. */ +#define DB_LOGOLDVER 10 /* Oldest log version supported. */ #define DB_LOGMAGIC 0x040988 /* Flag values for DB_ENV->log_archive(). */ @@ -476,7 +529,8 @@ struct __db_lockreq { #define DB_LOG_NOCOPY 0x008 /* Don't copy data */ #define DB_LOG_NOT_DURABLE 0x010 /* Do not log; keep in memory */ #define DB_LOG_PERM 0x020 /* Flag record with REP_PERMANENT */ -#define DB_LOG_WRNOSYNC 0x040 /* Write, don't sync log_put */ +#define DB_LOG_RESEND 0x040 /* Resent log record */ +#define DB_LOG_WRNOSYNC 0x080 /* Write, don't sync log_put */ /* * A DB_LSN has two parts, a fileid which identifies a specific file, and an @@ -540,7 +594,7 @@ struct __db_log_cursor { struct __db_log_stat { u_int32_t st_magic; /* Log file magic number. */ u_int32_t st_version; /* Log file version number. */ - int st_mode; /* Log file mode. */ + int st_mode; /* Log file mode. */ u_int32_t st_lg_bsize; /* Log buffer size. */ u_int32_t st_lg_size; /* Log file size. */ u_int32_t st_w_bytes; /* Bytes to log. */ @@ -556,11 +610,19 @@ struct __db_log_stat { u_int32_t st_cur_offset; /* Current log file offset. */ u_int32_t st_disk_file; /* Known on disk log file number. */ u_int32_t st_disk_offset; /* Known on disk log file offset. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */ }; +/* + * We need to record the first log record of a transaction. + * For user defined logging this macro returns the place to + * put that information, if it is need in rlsnp, otherwise it + * leaves it unchanged. + */ +#define DB_SET_BEGIN_LSNP(txn, rlsnp) ((txn)->set_begin_lsnp(txn, rlsnp)) + /******************************************************* * Shared buffer cache (mpool). *******************************************************/ @@ -573,6 +635,7 @@ struct __db_log_stat { #define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */ #define DB_MPOOL_DIRTY 0x002 /* Page is modified. */ #define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */ +#define DB_MPOOL_FREE 0x008 /* Free page if present. */ /* Flags values for DB_MPOOLFILE->set_flags. */ #define DB_MPOOL_NOFILE 0x001 /* Never open a backing file. */ @@ -640,7 +703,7 @@ struct __db_mpoolfile { /* Methods. */ int (*close) __P((DB_MPOOLFILE *, u_int32_t)); int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *)); - int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); + int (*open) __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t)); int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t)); int (*get_clear_len) __P((DB_MPOOLFILE *, u_int32_t *)); @@ -666,7 +729,7 @@ struct __db_mpoolfile { * thread protected because they are initialized before the file is * linked onto the per-process lists, and never modified. * - * MP_FLUSH is thread protected becase it is potentially read/set by + * MP_FLUSH is thread protected because it is potentially read/set by * multiple threads of control. */ #define MP_FILEID_SET 0x001 /* Application supplied a file ID. */ @@ -676,14 +739,16 @@ struct __db_mpoolfile { u_int32_t flags; }; -/* - * Mpool statistics structure. - */ +/* Mpool statistics structure. */ struct __db_mpool_stat { u_int32_t st_gbytes; /* Total cache size: GB. */ u_int32_t st_bytes; /* Total cache size: B. */ u_int32_t st_ncache; /* Number of caches. */ - u_int32_t st_regsize; /* Cache size. */ + roff_t st_regsize; /* Region size. */ + size_t st_mmapsize; /* Maximum file size for mmap. */ + int st_maxopenfd; /* Maximum number of open fd's. */ + int st_maxwrite; /* Maximum buffers to write. */ + int st_maxwrite_sleep; /* Sleep after writing max buffers. */ u_int32_t st_map; /* Pages from mapped files. */ u_int32_t st_cache_hit; /* Pages found in the cache. */ u_int32_t st_cache_miss; /* Pages not found in the cache. */ @@ -715,7 +780,7 @@ struct __db_mpool_stat { /* Mpool file statistics structure. */ struct __db_mpool_fstat { char *file_name; /* File name. */ - size_t st_pagesize; /* Page size. */ + u_int32_t st_pagesize; /* Page size. */ u_int32_t st_map; /* Pages from mapped files. */ u_int32_t st_cache_hit; /* Pages found in the cache. */ u_int32_t st_cache_miss; /* Pages not found in the cache. */ @@ -735,10 +800,9 @@ typedef enum { DB_TXN_BACKWARD_ALLOC=2, /* Internal. */ DB_TXN_BACKWARD_ROLL=3, /* Public. */ DB_TXN_FORWARD_ROLL=4, /* Public. */ - DB_TXN_GETPGNOS=5, /* Internal. */ - DB_TXN_OPENFILES=6, /* Internal. */ - DB_TXN_POPENFILES=7, /* Internal. */ - DB_TXN_PRINT=8 /* Public. */ + DB_TXN_OPENFILES=5, /* Internal. */ + DB_TXN_POPENFILES=6, /* Internal. */ + DB_TXN_PRINT=7 /* Public. */ } db_recops; /* @@ -817,8 +881,8 @@ struct __db_txn { struct __db_txn **tqe_prev; } klinks; - /* API-private structure: used by C++ */ - void *api_internal; + void *api_internal; /* C++ API private. */ + void *xml_internal; /* XML API private. */ u_int32_t cursors; /* Number of cursors open for txn */ @@ -828,17 +892,20 @@ struct __db_txn { int (*discard) __P((DB_TXN *, u_int32_t)); u_int32_t (*id) __P((DB_TXN *)); int (*prepare) __P((DB_TXN *, u_int8_t *)); + void (*set_begin_lsnp) __P((DB_TXN *txn, DB_LSN **)); int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t)); #define TXN_CHILDCOMMIT 0x001 /* Transaction that has committed. */ #define TXN_COMPENSATE 0x002 /* Compensating transaction. */ -#define TXN_DIRTY_READ 0x004 /* Transaction does dirty reads. */ -#define TXN_LOCKTIMEOUT 0x008 /* Transaction has a lock timeout. */ -#define TXN_MALLOC 0x010 /* Structure allocated by TXN system. */ -#define TXN_NOSYNC 0x020 /* Do not sync on prepare and commit. */ -#define TXN_NOWAIT 0x040 /* Do not wait on locks. */ -#define TXN_RESTORED 0x080 /* Transaction has been restored. */ -#define TXN_SYNC 0x100 /* Sync on prepare and commit. */ +#define TXN_DEADLOCK 0x004 /* Transaction has deadlocked. */ +#define TXN_DEGREE_2 0x008 /* Has degree 2 isolation. */ +#define TXN_DIRTY_READ 0x010 /* Transaction does dirty reads. */ +#define TXN_LOCKTIMEOUT 0x020 /* Transaction has a lock timeout. */ +#define TXN_MALLOC 0x040 /* Structure allocated by TXN system. */ +#define TXN_NOSYNC 0x080 /* Do not sync on prepare and commit. */ +#define TXN_NOWAIT 0x100 /* Do not wait on locks. */ +#define TXN_RESTORED 0x200 /* Transaction has been restored. */ +#define TXN_SYNC 0x400 /* Sync on prepare and commit. */ u_int32_t flags; }; @@ -880,7 +947,7 @@ struct __db_txn_stat { DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */ u_int32_t st_region_wait; /* Region lock granted after wait. */ u_int32_t st_region_nowait; /* Region lock granted without wait. */ - u_int32_t st_regsize; /* Region size. */ + roff_t st_regsize; /* Region size. */ }; /******************************************************* @@ -892,8 +959,7 @@ struct __db_txn_stat { /* rep_start flags values */ #define DB_REP_CLIENT 0x001 -#define DB_REP_LOGSONLY 0x002 -#define DB_REP_MASTER 0x004 +#define DB_REP_MASTER 0x002 /* Replication statistics. */ struct __db_rep_stat { @@ -909,13 +975,15 @@ struct __db_rep_stat { u_int32_t st_status; /* Current replication status. */ DB_LSN st_next_lsn; /* Next LSN to use or expect. */ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */ + db_pgno_t st_next_pg; /* Next pg we expect. */ + db_pgno_t st_waiting_pg; /* pg we're awaiting, if any. */ u_int32_t st_dupmasters; /* # of times a duplicate master condition was detected.+ */ int st_env_id; /* Current environment ID. */ int st_env_priority; /* Current environment priority. */ u_int32_t st_gen; /* Current generation number. */ - u_int32_t st_in_recovery; /* This site is in client sync-up. */ + u_int32_t st_egen; /* Current election gen number. */ u_int32_t st_log_duplicated; /* Log records received multiply.+ */ u_int32_t st_log_queued; /* Log records currently queued.+ */ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */ @@ -936,6 +1004,10 @@ struct __db_rep_stat { u_int32_t st_nthrottles; /* # of times we were throttled. */ u_int32_t st_outdated; /* # of times we detected and returned an OUTDATED condition.+ */ + u_int32_t st_pg_duplicated; /* Pages received multiply.+ */ + u_int32_t st_pg_records; /* Pages received and stored.+ */ + u_int32_t st_pg_requested; /* Pages missed and requested.+ */ + u_int32_t st_startup_complete; /* Site completed client sync-up. */ u_int32_t st_txns_applied; /* # of transactions applied.+ */ /* Elections generally. */ @@ -947,11 +1019,75 @@ struct __db_rep_stat { u_int32_t st_election_gen; /* Election generation number. */ DB_LSN st_election_lsn; /* Max. LSN of current winner. */ int st_election_nsites; /* # of "registered voters". */ + int st_election_nvotes; /* # of "registered voters" needed. */ int st_election_priority; /* Current election priority. */ int st_election_status; /* Current election status. */ - int st_election_tiebreaker; /* Election tiebreaker value. */ + u_int32_t st_election_tiebreaker;/* Election tiebreaker value. */ int st_election_votes; /* Votes received in this round. */ }; +/* + * The storage record for a sequence. + */ +struct __db_seq_record { + u_int32_t seq_version; /* Version size/number. */ +#define DB_SEQ_DEC 0x00000001 /* Decrement sequence. */ +#define DB_SEQ_INC 0x00000002 /* Increment sequence. */ +#define DB_SEQ_RANGE_SET 0x00000004 /* Range set (internal). */ +#define DB_SEQ_WRAP 0x00000008 /* Wrap sequence at min/max. */ + u_int32_t flags; /* Flags. */ + db_seq_t seq_value; /* Current value. */ + db_seq_t seq_max; /* Max permitted. */ + db_seq_t seq_min; /* Min permitted. */ +}; + +/* + * Handle for a sequence object. + */ +struct __db_sequence { + DB *seq_dbp; /* DB handle for this sequence. */ + DB_MUTEX *seq_mutexp; /* Mutex if sequence is threaded. */ + DB_SEQ_RECORD *seq_rp; /* Pointer to current data. */ + DB_SEQ_RECORD seq_record; /* Data from DB_SEQUENCE. */ + int32_t seq_cache_size; /* Number of values cached. */ + db_seq_t seq_last_value; /* Last value cached. */ + DBT seq_key; /* DBT pointing to sequence key. */ + DBT seq_data; /* DBT pointing to seq_record. */ + + /* API-private structure: used by C++ and Java. */ + void *api_internal; + + int (*close) __P((DB_SEQUENCE *, u_int32_t)); + int (*get) __P((DB_SEQUENCE *, + DB_TXN *, int32_t, db_seq_t *, u_int32_t)); + int (*get_cachesize) __P((DB_SEQUENCE *, int32_t *)); + int (*get_db) __P((DB_SEQUENCE *, DB **)); + int (*get_flags) __P((DB_SEQUENCE *, u_int32_t *)); + int (*get_key) __P((DB_SEQUENCE *, DBT *)); + int (*get_range) __P((DB_SEQUENCE *, + db_seq_t *, db_seq_t *)); + int (*initial_value) __P((DB_SEQUENCE *, db_seq_t)); + int (*open) __P((DB_SEQUENCE *, + DB_TXN *, DBT *, u_int32_t)); + int (*remove) __P((DB_SEQUENCE *, DB_TXN *, u_int32_t)); + int (*set_cachesize) __P((DB_SEQUENCE *, int32_t)); + int (*set_flags) __P((DB_SEQUENCE *, u_int32_t)); + int (*set_range) __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); + int (*stat) __P((DB_SEQUENCE *, + DB_SEQUENCE_STAT **, u_int32_t)); + int (*stat_print) __P((DB_SEQUENCE *, u_int32_t)); +}; + +struct __db_seq_stat { + u_int32_t st_wait; /* Sequence lock granted without wait. */ + u_int32_t st_nowait; /* Sequence lock granted after wait. */ + db_seq_t st_current; /* Current value in db. */ + db_seq_t st_value; /* Current cached value. */ + db_seq_t st_last_value; /* Last cached value. */ + db_seq_t st_min; /* Minimum value. */ + db_seq_t st_max; /* Maximum value. */ + int32_t st_cache_size; /* Cache size. */ + u_int32_t st_flags; /* Flag value. */ +}; /******************************************************* * Access methods. @@ -978,6 +1114,8 @@ typedef enum { #define DB_QAMOLDVER 3 /* Oldest queue version supported. */ #define DB_QAMMAGIC 0x042253 +#define DB_SEQUENCE_VERSION 1 /* Current sequence version. */ + /* * DB access method and cursor operation values. Each value is an operation * code to which additional bit flags are added. @@ -1026,10 +1164,10 @@ typedef enum { * Masks for flags that can be OR'd into DB access method and cursor * operation values. * - * DB_DIRTY_READ 0x02000000 Dirty Read. */ -#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */ -#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */ -#define DB_RMW 0x10000000 /* Acquire write flag immediately. */ + * DB_DIRTY_READ 0x04000000 Dirty Read. */ +#define DB_MULTIPLE 0x08000000 /* Return multiple data values. */ +#define DB_MULTIPLE_KEY 0x10000000 /* Return multiple data/key pairs. */ +#define DB_RMW 0x20000000 /* Acquire write flag immediately. */ /* * DB (user visible) error return codes. @@ -1046,42 +1184,47 @@ typedef enum { * document that we own the error name space from -30,800 to -30,999. */ /* DB (public) error return codes. */ -#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */ -#define DB_FILEOPEN (-30998)/* Rename/remove while file is open. */ +#define DB_BUFFER_SMALL (-30999)/* User memory too small for return. */ +#define DB_DONOTINDEX (-30998)/* "Null" return from 2ndary callbk. */ #define DB_KEYEMPTY (-30997)/* Key/data deleted or never created. */ #define DB_KEYEXIST (-30996)/* The key/data pair already exists. */ #define DB_LOCK_DEADLOCK (-30995)/* Deadlock. */ #define DB_LOCK_NOTGRANTED (-30994)/* Lock unavailable. */ -#define DB_NOSERVER (-30993)/* Server panic return. */ -#define DB_NOSERVER_HOME (-30992)/* Bad home sent to server. */ -#define DB_NOSERVER_ID (-30991)/* Bad ID sent to server. */ -#define DB_NOTFOUND (-30990)/* Key/data pair not found (EOF). */ -#define DB_OLD_VERSION (-30989)/* Out-of-date version. */ -#define DB_PAGE_NOTFOUND (-30988)/* Requested page not found. */ -#define DB_REP_DUPMASTER (-30987)/* There are two masters. */ -#define DB_REP_HANDLE_DEAD (-30986)/* Rolled back a commit. */ -#define DB_REP_HOLDELECTION (-30985)/* Time to hold an election. */ -#define DB_REP_ISPERM (-30984)/* Cached not written perm written.*/ -#define DB_REP_NEWMASTER (-30983)/* We have learned of a new master. */ -#define DB_REP_NEWSITE (-30982)/* New site entered system. */ -#define DB_REP_NOTPERM (-30981)/* Permanent log record not written. */ -#define DB_REP_OUTDATED (-30980)/* Site is too far behind master. */ -#define DB_REP_UNAVAIL (-30979)/* Site cannot currently be reached. */ -#define DB_RUNRECOVERY (-30978)/* Panic return. */ -#define DB_SECONDARY_BAD (-30977)/* Secondary index corrupt. */ -#define DB_VERIFY_BAD (-30976)/* Verify failed; bad format. */ +#define DB_LOG_BUFFER_FULL (-30993)/* In-memory log buffer full. */ +#define DB_NOSERVER (-30992)/* Server panic return. */ +#define DB_NOSERVER_HOME (-30991)/* Bad home sent to server. */ +#define DB_NOSERVER_ID (-30990)/* Bad ID sent to server. */ +#define DB_NOTFOUND (-30989)/* Key/data pair not found (EOF). */ +#define DB_OLD_VERSION (-30988)/* Out-of-date version. */ +#define DB_PAGE_NOTFOUND (-30987)/* Requested page not found. */ +#define DB_REP_DUPMASTER (-30986)/* There are two masters. */ +#define DB_REP_HANDLE_DEAD (-30985)/* Rolled back a commit. */ +#define DB_REP_HOLDELECTION (-30984)/* Time to hold an election. */ +#define DB_REP_ISPERM (-30983)/* Cached not written perm written.*/ +#define DB_REP_NEWMASTER (-30982)/* We have learned of a new master. */ +#define DB_REP_NEWSITE (-30981)/* New site entered system. */ +#define DB_REP_NOTPERM (-30980)/* Permanent log record not written. */ +#define DB_REP_STARTUPDONE (-30979)/* Client startup complete. */ +#define DB_REP_UNAVAIL (-30978)/* Site cannot currently be reached. */ +#define DB_RUNRECOVERY (-30977)/* Panic return. */ +#define DB_SECONDARY_BAD (-30976)/* Secondary index corrupt. */ +#define DB_VERIFY_BAD (-30975)/* Verify failed; bad format. */ +#define DB_VERSION_MISMATCH (-30974)/* Environment version mismatch. */ /* DB (private) error return codes. */ #define DB_ALREADY_ABORTED (-30899) #define DB_DELETED (-30898)/* Recovery file marked deleted. */ #define DB_LOCK_NOTEXIST (-30897)/* Object to lock is gone. */ #define DB_NEEDSPLIT (-30896)/* Page needs to be split. */ -#define DB_SURPRISE_KID (-30895)/* Child commit where parent +#define DB_REP_EGENCHG (-30895)/* Egen changed while in election. */ +#define DB_REP_LOGREADY (-30894)/* Rep log ready for recovery. */ +#define DB_REP_PAGEDONE (-30893)/* This page was already done. */ +#define DB_SURPRISE_KID (-30892)/* Child commit where parent didn't know it was a parent. */ -#define DB_SWAPBYTES (-30894)/* Database needs byte swapping. */ -#define DB_TIMEOUT (-30893)/* Timed out waiting for election. */ -#define DB_TXN_CKP (-30892)/* Encountered ckp record in log. */ -#define DB_VERIFY_FATAL (-30891)/* DB->verify cannot proceed. */ +#define DB_SWAPBYTES (-30891)/* Database needs byte swapping. */ +#define DB_TIMEOUT (-30890)/* Timed out waiting for election. */ +#define DB_TXN_CKP (-30889)/* Encountered ckp record in log. */ +#define DB_VERIFY_FATAL (-30888)/* DB->verify cannot proceed. */ /* Database handle. */ struct __db { @@ -1124,7 +1267,7 @@ struct __db { u_int32_t associate_lid; /* Locker id for DB->associate call. */ DB_LOCK handle_lock; /* Lock held on this handle. */ - long cl_id; /* RPC: remote client id. */ + u_int cl_id; /* RPC: remote client id. */ time_t timestamp; /* Handle timestamp for replication. */ @@ -1237,6 +1380,8 @@ struct __db { int (*close) __P((DB *, u_int32_t)); int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t)); int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t)); + int (*dump) __P((DB *, + const char *, int (*)(void *, const void *), void *, int, int)); void (*err) __P((DB *, int, const char *, ...)); void (*errx) __P((DB *, const char *, ...)); int (*fd) __P((DB *, int *)); @@ -1246,14 +1391,14 @@ struct __db { int (*get_cachesize) __P((DB *, u_int32_t *, u_int32_t *, int *)); int (*get_dbname) __P((DB *, const char **, const char **)); int (*get_encrypt_flags) __P((DB *, u_int32_t *)); - int (*get_env) __P((DB *, DB_ENV **)); + DB_ENV *(*get_env) __P((DB *)); void (*get_errfile) __P((DB *, FILE **)); void (*get_errpfx) __P((DB *, const char **)); int (*get_flags) __P((DB *, u_int32_t *)); int (*get_lorder) __P((DB *, int *)); int (*get_open_flags) __P((DB *, u_int32_t *)); int (*get_pagesize) __P((DB *, u_int32_t *)); - int (*get_transactional) __P((DB *, int *)); + int (*get_transactional) __P((DB *)); int (*get_type) __P((DB *, DBTYPE *)); int (*join) __P((DB *, DBC **, DBC **, u_int32_t)); int (*key_range) __P((DB *, @@ -1272,15 +1417,20 @@ struct __db { int (*set_dup_compare) __P((DB *, int (*)(DB *, const DBT *, const DBT *))); int (*set_encrypt) __P((DB *, const char *, u_int32_t)); - void (*set_errcall) __P((DB *, void (*)(const char *, char *))); + void (*set_errcall) __P((DB *, + void (*)(const DB_ENV *, const char *, const char *))); void (*set_errfile) __P((DB *, FILE *)); void (*set_errpfx) __P((DB *, const char *)); int (*set_feedback) __P((DB *, void (*)(DB *, int, int))); int (*set_flags) __P((DB *, u_int32_t)); int (*set_lorder) __P((DB *, int)); + void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB *, FILE **)); + void (*set_msgfile) __P((DB *, FILE *)); int (*set_pagesize) __P((DB *, u_int32_t)); int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int))); - int (*stat) __P((DB *, void *, u_int32_t)); + int (*stat) __P((DB *, DB_TXN *, void *, u_int32_t)); + int (*stat_print) __P((DB *, u_int32_t)); int (*sync) __P((DB *, u_int32_t)); int (*upgrade) __P((DB *, const char *, u_int32_t)); int (*verify) __P((DB *, @@ -1313,8 +1463,7 @@ struct __db { int (*get_q_extentsize) __P((DB *, u_int32_t *)); int (*set_q_extentsize) __P((DB *, u_int32_t)); - int (*db_am_remove) __P((DB *, - DB_TXN *, const char *, const char *, DB_LSN *)); + int (*db_am_remove) __P((DB *, DB_TXN *, const char *, const char *)); int (*db_am_rename) __P((DB *, DB_TXN *, const char *, const char *, const char *)); @@ -1345,23 +1494,24 @@ struct __db { #define DB_AM_ENCRYPT 0x00000800 /* Encryption. */ #define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */ #define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */ -#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */ -#define DB_AM_NOT_DURABLE 0x00008000 /* Do not log changes. */ -#define DB_AM_OPEN_CALLED 0x00010000 /* DB->open called. */ -#define DB_AM_PAD 0x00020000 /* Fixed-length record pad. */ -#define DB_AM_PGDEF 0x00040000 /* Page size was defaulted. */ -#define DB_AM_RDONLY 0x00080000 /* Database is readonly. */ -#define DB_AM_RECNUM 0x00100000 /* DB_RECNUM. */ -#define DB_AM_RECOVER 0x00200000 /* DB opened by recovery routine. */ -#define DB_AM_RENUMBER 0x00400000 /* DB_RENUMBER. */ -#define DB_AM_REPLICATION 0x00800000 /* An internal replication file. */ -#define DB_AM_REVSPLITOFF 0x01000000 /* DB_REVSPLITOFF. */ -#define DB_AM_SECONDARY 0x02000000 /* Database is a secondary index. */ -#define DB_AM_SNAPSHOT 0x04000000 /* DB_SNAPSHOT. */ -#define DB_AM_SUBDB 0x08000000 /* Subdatabases supported. */ -#define DB_AM_SWAP 0x10000000 /* Pages need to be byte-swapped. */ -#define DB_AM_TXN 0x20000000 /* Opened in a transaction. */ -#define DB_AM_VERIFYING 0x40000000 /* DB handle is in the verifier. */ +#define DB_AM_INORDER 0x00004000 /* DB_INORDER. */ +#define DB_AM_IN_RENAME 0x00008000 /* File is being renamed. */ +#define DB_AM_NOT_DURABLE 0x00010000 /* Do not log changes. */ +#define DB_AM_OPEN_CALLED 0x00020000 /* DB->open called. */ +#define DB_AM_PAD 0x00040000 /* Fixed-length record pad. */ +#define DB_AM_PGDEF 0x00080000 /* Page size was defaulted. */ +#define DB_AM_RDONLY 0x00100000 /* Database is readonly. */ +#define DB_AM_RECNUM 0x00200000 /* DB_RECNUM. */ +#define DB_AM_RECOVER 0x00400000 /* DB opened by recovery routine. */ +#define DB_AM_RENUMBER 0x00800000 /* DB_RENUMBER. */ +#define DB_AM_REPLICATION 0x01000000 /* An internal replication file. */ +#define DB_AM_REVSPLITOFF 0x02000000 /* DB_REVSPLITOFF. */ +#define DB_AM_SECONDARY 0x04000000 /* Database is a secondary index. */ +#define DB_AM_SNAPSHOT 0x08000000 /* DB_SNAPSHOT. */ +#define DB_AM_SUBDB 0x10000000 /* Subdatabases supported. */ +#define DB_AM_SWAP 0x20000000 /* Pages need to be byte-swapped. */ +#define DB_AM_TXN 0x40000000 /* Opened in a transaction. */ +#define DB_AM_VERIFYING 0x80000000 /* DB handle is in the verifier. */ u_int32_t orig_flags; /* Flags at open, for refresh. */ u_int32_t flags; }; @@ -1470,7 +1620,7 @@ struct __dbc { DB_LOCK_ILOCK lock; /* Object to be locked. */ DB_LOCK mylock; /* CDB lock held on this cursor. */ - long cl_id; /* Remote client id. */ + u_int cl_id; /* Remote client id. */ DBTYPE dbtype; /* Cursor type. */ @@ -1495,16 +1645,17 @@ struct __dbc { #define DBC_ACTIVE 0x0001 /* Cursor in use. */ #define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */ -#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */ -#define DBC_OPD 0x0008 /* Cursor references off-page dups. */ -#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */ -#define DBC_RMW 0x0020 /* Acquire write flag in read op. */ -#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */ -#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */ -#define DBC_WRITER 0x0100 /* Cursor immediately writing (CDB). */ -#define DBC_MULTIPLE 0x0200 /* Return Multiple data. */ -#define DBC_MULTIPLE_KEY 0x0400 /* Return Multiple keys and data. */ -#define DBC_OWN_LID 0x0800 /* Free lock id on destroy. */ +#define DBC_DEGREE_2 0x0004 /* Cursor has degree 2 isolation. */ +#define DBC_DIRTY_READ 0x0008 /* Cursor supports dirty reads. */ +#define DBC_OPD 0x0010 /* Cursor references off-page dups. */ +#define DBC_RECOVER 0x0020 /* Recovery cursor; don't log/lock. */ +#define DBC_RMW 0x0040 /* Acquire write flag in read op. */ +#define DBC_TRANSIENT 0x0080 /* Cursor is transient. */ +#define DBC_WRITECURSOR 0x0100 /* Cursor may be used to write (CDB). */ +#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */ +#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */ +#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */ +#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */ u_int32_t flags; }; @@ -1532,6 +1683,7 @@ struct __db_bt_stat { u_int32_t bt_leaf_pg; /* Leaf pages. */ u_int32_t bt_dup_pg; /* Duplicate pages. */ u_int32_t bt_over_pg; /* Overflow pages. */ + u_int32_t bt_empty_pg; /* Empty pages. */ u_int32_t bt_free; /* Pages on the free list. */ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */ @@ -1586,10 +1738,16 @@ struct __db_env { /******************************************************* * Public: owned by the application. *******************************************************/ + /* Error message callback. */ + void (*db_errcall) __P((const DB_ENV *, const char *, const char *)); FILE *db_errfile; /* Error message file stream. */ const char *db_errpfx; /* Error message prefix. */ - /* Callbacks. */ - void (*db_errcall) __P((const char *, char *)); + + FILE *db_msgfile; /* Statistics message file stream. */ + /* Statistics message callback. */ + void (*db_msgcall) __P((const DB_ENV *, const char *)); + + /* Other Callbacks. */ void (*db_feedback) __P((DB_ENV *, int, int)); void (*db_paniccall) __P((DB_ENV *, int)); @@ -1603,11 +1761,10 @@ struct __db_env { * entries. There's no reason that it needs to be limited, if * there are ever more than 32 entries, convert to a bit array. */ -#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */ -#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */ -#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */ -#define DB_VERB_REPLICATION 0x0008 /* Replication information. */ -#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */ +#define DB_VERB_DEADLOCK 0x0001 /* Deadlock detection information. */ +#define DB_VERB_RECOVERY 0x0002 /* Recovery information. */ +#define DB_VERB_REPLICATION 0x0004 /* Replication information. */ +#define DB_VERB_WAITSFOR 0x0008 /* Dump waits-for table. */ u_int32_t verbose; /* Verbose output. */ void *app_private; /* Application-private handle. */ @@ -1617,7 +1774,7 @@ struct __db_env { /* Locking. */ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */ - u_int32_t lk_modes; /* Number of lock modes in table. */ + int lk_modes; /* Number of lock modes in table. */ u_int32_t lk_max; /* Maximum number of locks. */ u_int32_t lk_max_lockers;/* Maximum number of lockers. */ u_int32_t lk_max_objects;/* Maximum number of locked objects. */ @@ -1632,9 +1789,9 @@ struct __db_env { /* Memory pool. */ u_int32_t mp_gbytes; /* Cachesize: GB. */ u_int32_t mp_bytes; /* Cachesize: Bytes. */ - size_t mp_size; /* DEPRECATED: Cachesize: bytes. */ - int mp_ncache; /* Number of cache regions. */ + u_int mp_ncache; /* Number of cache regions. */ size_t mp_mmapsize; /* Maximum file size for mmap. */ + int mp_maxopenfd; /* Maximum open file descriptors. */ int mp_maxwrite; /* Maximum buffers to write. */ int /* Sleep after writing max buffers. */ mp_maxwrite_sleep; @@ -1663,6 +1820,8 @@ struct __db_env { int data_next; /* Next Database data file slot. */ int db_mode; /* Default open permissions. */ + int dir_mode; /* Intermediate directory perms. */ + u_int32_t env_lid; /* Locker ID in non-threaded handles. */ u_int32_t open_flags; /* Flags passed to DB_ENV->open. */ void *reginfo; /* REGINFO structure reference. */ @@ -1674,7 +1833,7 @@ struct __db_env { /* Slots in the dispatch table. */ void *cl_handle; /* RPC: remote client handle. */ - long cl_id; /* RPC: remote client env id. */ + u_int cl_id; /* RPC: remote client env id. */ int db_ref; /* DB reference count. */ @@ -1736,10 +1895,18 @@ struct __db_env { const char *, const char *, const char *, u_int32_t)); void (*err) __P((const DB_ENV *, int, const char *, ...)); void (*errx) __P((const DB_ENV *, const char *, ...)); - int (*get_home) __P((DB_ENV *, const char **)); - int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); int (*open) __P((DB_ENV *, const char *, u_int32_t, int)); int (*remove) __P((DB_ENV *, const char *, u_int32_t)); + int (*stat_print) __P((DB_ENV *, u_int32_t)); + + /* House-keeping. */ + int (*fileid_reset) __P((DB_ENV *, char *, int)); + int (*is_bigendian) __P((void)); + int (*lsn_reset) __P((DB_ENV *, char *, int)); + int (*prdbt) __P((DBT *, + int, const char *, void *, int (*)(void *, const void *), int)); + + /* Setters/getters. */ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *))); int (*set_app_dispatch) __P((DB_ENV *, @@ -1748,7 +1915,8 @@ struct __db_env { int (*set_data_dir) __P((DB_ENV *, const char *)); int (*get_encrypt_flags) __P((DB_ENV *, u_int32_t *)); int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t)); - void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *))); + void (*set_errcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *, const char *))); void (*get_errfile) __P((DB_ENV *, FILE **)); void (*set_errfile) __P((DB_ENV *, FILE *)); void (*get_errpfx) __P((DB_ENV *, const char **)); @@ -1756,11 +1924,18 @@ struct __db_env { int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int))); int (*get_flags) __P((DB_ENV *, u_int32_t *)); int (*set_flags) __P((DB_ENV *, u_int32_t, int)); + int (*get_home) __P((DB_ENV *, const char **)); + int (*set_intermediate_dir) __P((DB_ENV *, int, u_int32_t)); + int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int))); int (*set_rpc_server) __P((DB_ENV *, void *, const char *, long, long, u_int32_t)); int (*get_shm_key) __P((DB_ENV *, long *)); int (*set_shm_key) __P((DB_ENV *, long)); + void (*set_msgcall) __P((DB_ENV *, + void (*)(const DB_ENV *, const char *))); + void (*get_msgfile) __P((DB_ENV *, FILE **)); + void (*set_msgfile) __P((DB_ENV *, FILE *)); int (*get_tas_spins) __P((DB_ENV *, u_int32_t *)); int (*set_tas_spins) __P((DB_ENV *, u_int32_t)); int (*get_tmp_dir) __P((DB_ENV *, const char **)); @@ -1783,6 +1958,7 @@ struct __db_env { int (*log_flush) __P((DB_ENV *, const DB_LSN *)); int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t)); int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); + int (*log_stat_print) __P((DB_ENV *, u_int32_t)); void *lk_handle; /* Lock handle and methods. */ int (*get_lk_conflicts) __P((DB_ENV *, const u_int8_t **, int *)); @@ -1797,13 +1973,13 @@ struct __db_env { int (*get_lk_max_objects) __P((DB_ENV *, u_int32_t *)); int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t)); int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *)); - int (*lock_dump_region) __P((DB_ENV *, const char *, FILE *)); int (*lock_get) __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); int (*lock_put) __P((DB_ENV *, DB_LOCK *)); int (*lock_id) __P((DB_ENV *, u_int32_t *)); int (*lock_id_free) __P((DB_ENV *, u_int32_t)); int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t)); + int (*lock_stat_print) __P((DB_ENV *, u_int32_t)); int (*lock_vec) __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); @@ -1812,25 +1988,29 @@ struct __db_env { int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int)); int (*get_mp_mmapsize) __P((DB_ENV *, size_t *)); int (*set_mp_mmapsize) __P((DB_ENV *, size_t)); - int (*get_mp_maxwrite) __P((DB_ENV *, int *, int *)); - int (*set_mp_maxwrite) __P((DB_ENV *, int, int)); - int (*memp_dump_region) __P((DB_ENV *, const char *, FILE *)); + int (*get_mp_max_openfd) __P((DB_ENV *, int *)); + int (*set_mp_max_openfd) __P((DB_ENV *, int)); + int (*get_mp_max_write) __P((DB_ENV *, int *, int *)); + int (*set_mp_max_write) __P((DB_ENV *, int, int)); int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); int (*memp_register) __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *))); int (*memp_stat) __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t)); + int (*memp_stat_print) __P((DB_ENV *, u_int32_t)); int (*memp_sync) __P((DB_ENV *, DB_LSN *)); int (*memp_trickle) __P((DB_ENV *, int, int *)); void *rep_handle; /* Replication handle and methods. */ - int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *)); + int (*rep_elect) __P((DB_ENV *, int, int, int, + u_int32_t, int *, u_int32_t)); int (*rep_flush) __P((DB_ENV *)); int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *, DB_LSN *)); int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t)); int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); + int (*rep_stat_print) __P((DB_ENV *, u_int32_t)); int (*get_rep_limit) __P((DB_ENV *, u_int32_t *, u_int32_t *)); int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t)); int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t)); @@ -1848,19 +2028,22 @@ struct __db_env { int (*txn_recover) __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t)); int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); + int (*txn_stat_print) __P((DB_ENV *, u_int32_t)); int (*get_timeout) __P((DB_ENV *, db_timeout_t *, u_int32_t)); int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t)); #define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */ -#define DB_TEST_POSTDESTROY 2 /* after destroy op */ -#define DB_TEST_POSTLOG 3 /* after logging all pages */ -#define DB_TEST_POSTLOGMETA 4 /* after logging meta in btree */ -#define DB_TEST_POSTOPEN 5 /* after __os_open */ -#define DB_TEST_POSTSYNC 6 /* after syncing the log */ -#define DB_TEST_PREDESTROY 7 /* before destroy op */ -#define DB_TEST_PREOPEN 8 /* before __os_open */ -#define DB_TEST_SUBDB_LOCKS 9 /* subdb locking tests */ +#define DB_TEST_ELECTVOTE1 2 /* after sending VOTE1 */ +#define DB_TEST_POSTDESTROY 3 /* after destroy op */ +#define DB_TEST_POSTLOG 4 /* after logging all pages */ +#define DB_TEST_POSTLOGMETA 5 /* after logging meta in btree */ +#define DB_TEST_POSTOPEN 6 /* after __os_open */ +#define DB_TEST_POSTSYNC 7 /* after syncing the log */ +#define DB_TEST_PREDESTROY 8 /* before destroy op */ +#define DB_TEST_PREOPEN 9 /* before __os_open */ +#define DB_TEST_SUBDB_LOCKS 10 /* subdb locking tests */ int test_abort; /* Abort value for testing. */ + int test_check; /* Checkpoint value for testing. */ int test_copy; /* Copy value for testing. */ #define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */ @@ -1870,25 +2053,26 @@ struct __db_env { #define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */ #define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */ #define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */ -#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */ -#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */ -#define DB_ENV_LOG_AUTOREMOVE 0x0000200 /* DB_LOG_AUTOREMOVE set. */ -#define DB_ENV_NOLOCKING 0x0000400 /* DB_NOLOCKING set. */ -#define DB_ENV_NOMMAP 0x0000800 /* DB_NOMMAP set. */ -#define DB_ENV_NOPANIC 0x0001000 /* Okay if panic set. */ -#define DB_ENV_OPEN_CALLED 0x0002000 /* DB_ENV->open called. */ -#define DB_ENV_OVERWRITE 0x0004000 /* DB_OVERWRITE set. */ -#define DB_ENV_PRIVATE 0x0008000 /* DB_PRIVATE set. */ -#define DB_ENV_REGION_INIT 0x0010000 /* DB_REGION_INIT set. */ -#define DB_ENV_RPCCLIENT 0x0020000 /* DB_RPCCLIENT set. */ -#define DB_ENV_RPCCLIENT_GIVEN 0x0040000 /* User-supplied RPC client struct */ -#define DB_ENV_SYSTEM_MEM 0x0080000 /* DB_SYSTEM_MEM set. */ -#define DB_ENV_THREAD 0x0100000 /* DB_THREAD set. */ -#define DB_ENV_TIME_NOTGRANTED 0x0200000 /* DB_TIME_NOTGRANTED set. */ -#define DB_ENV_TXN_NOSYNC 0x0400000 /* DB_TXN_NOSYNC set. */ -#define DB_ENV_TXN_NOT_DURABLE 0x0800000 /* DB_TXN_NOT_DURABLE set. */ -#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */ -#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */ +#define DB_ENV_DSYNC_LOG 0x0000080 /* DB_DSYNC_LOG set. */ +#define DB_ENV_FATAL 0x0000100 /* Doing fatal recovery in env. */ +#define DB_ENV_LOCKDOWN 0x0000200 /* DB_LOCKDOWN set. */ +#define DB_ENV_LOG_AUTOREMOVE 0x0000400 /* DB_LOG_AUTOREMOVE set. */ +#define DB_ENV_LOG_INMEMORY 0x0000800 /* DB_LOG_INMEMORY set. */ +#define DB_ENV_NOLOCKING 0x0001000 /* DB_NOLOCKING set. */ +#define DB_ENV_NOMMAP 0x0002000 /* DB_NOMMAP set. */ +#define DB_ENV_NOPANIC 0x0004000 /* Okay if panic set. */ +#define DB_ENV_OPEN_CALLED 0x0008000 /* DB_ENV->open called. */ +#define DB_ENV_OVERWRITE 0x0010000 /* DB_OVERWRITE set. */ +#define DB_ENV_PRIVATE 0x0020000 /* DB_PRIVATE set. */ +#define DB_ENV_REGION_INIT 0x0040000 /* DB_REGION_INIT set. */ +#define DB_ENV_RPCCLIENT 0x0080000 /* DB_RPCCLIENT set. */ +#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */ +#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */ +#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */ +#define DB_ENV_TIME_NOTGRANTED 0x0800000 /* DB_TIME_NOTGRANTED set. */ +#define DB_ENV_TXN_NOSYNC 0x1000000 /* DB_TXN_NOSYNC set. */ +#define DB_ENV_TXN_WRITE_NOSYNC 0x2000000 /* DB_TXN_WRITE_NOSYNC set. */ +#define DB_ENV_YIELDCPU 0x4000000 /* DB_YIELDCPU set. */ u_int32_t flags; }; diff --git a/db/dbinc/db_185.in b/db/dbinc/db_185.in index 88e2cb850..338455a60 100644 --- a/db/dbinc/db_185.in +++ b/db/dbinc/db_185.in @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: db_185.in,v 11.10 2003/04/24 15:41:00 bostic Exp $ + * $Id: db_185.in,v 11.11 2004/01/28 03:36:01 bostic Exp $ */ #ifndef _DB_185_H_ diff --git a/db/dbinc/db_am.h b/db/dbinc/db_am.h index ed1f9f3c8..ed1956c66 100644 --- a/db/dbinc/db_am.h +++ b/db/dbinc/db_am.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_am.h,v 11.70 2003/06/30 17:19:50 bostic Exp $ + * $Id: db_am.h,v 11.78 2004/09/22 21:14:56 ubell Exp $ */ #ifndef _DB_AM_H_ #define _DB_AM_H_ @@ -24,8 +24,6 @@ #define DB_REM_DUP 2 #define DB_ADD_BIG 3 #define DB_REM_BIG 4 -#define DB_ADD_PAGE 5 -#define DB_REM_PAGE 6 /* * Standard initialization and shutdown macros for all recovery functions. @@ -34,7 +32,8 @@ argp = NULL; \ dbc = NULL; \ file_dbp = NULL; \ - mpf = NULL; \ + /* mpf isn't used by all of the recovery functions. */ \ + COMPQUIET(mpf, NULL); \ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \ goto out; \ if ((ret = __dbreg_id_to_db(dbenv, argp->txnid, \ @@ -74,6 +73,31 @@ __os_free(dbenv, argp); \ return (ret) +/* + * Macro for reading pages during recovery. In most cases we + * want to avoid an error if the page is not found during rollback + * or if we are using truncate to remove pages from the file. + */ +#ifndef HAVE_FTRUNCATE +#define REC_FGET(mpf, pgno, pagep, cont) \ + if ((ret = __memp_fget(mpf, &(pgno), 0, pagep)) != 0) { \ + if (ret != DB_PAGE_NOTFOUND || DB_REDO(op)) { \ + ret = __db_pgerr(file_dbp, pgno, ret); \ + goto out; \ + } else \ + goto cont; \ + } +#else +#define REC_FGET(mpf, pgno, pagep, cont) \ + if ((ret = __memp_fget(mpf, &(pgno), 0, pagep)) != 0) { \ + if (ret != DB_PAGE_NOTFOUND) { \ + ret = __db_pgerr(file_dbp, pgno, ret); \ + goto out; \ + } else \ + goto cont; \ + } +#endif + /* * Standard debugging macro for all recovery functions. */ @@ -100,7 +124,10 @@ * we don't tie up the internal pages of the tree longer than necessary. */ #define __LPUT(dbc, lock) \ - (LOCK_ISSET(lock) ? __lock_put((dbc)->dbp->dbenv, &(lock)) : 0) + __ENV_LPUT((dbc)->dbp->dbenv, \ + lock, F_ISSET((dbc)->dbp, DB_AM_DIRTY) ? DB_LOCK_DOWNGRADE : 0) +#define __ENV_LPUT(dbenv, lock, flags) \ + (LOCK_ISSET(lock) ? __lock_put(dbenv, &(lock), flags) : 0) /* * __TLPUT -- transactional lock put diff --git a/db/dbinc/db_cxx.in b/db/dbinc/db_cxx.in index 08fa1ea74..356145765 100644 --- a/db/dbinc/db_cxx.in +++ b/db/dbinc/db_cxx.in @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_cxx.in,v 11.134 2003/09/04 19:02:27 bostic Exp $ + * $Id: db_cxx.in,v 11.147 2004/10/07 21:39:48 bostic Exp $ */ #ifndef _DB_CXX_H_ @@ -54,10 +54,12 @@ @cxx_have_stdheaders@ #ifdef HAVE_CXX_STDHEADERS #include -#define __DB_OSTREAMCLASS std::ostream +#include +#define __DB_STD(x) std::x #else #include -#define __DB_OSTREAMCLASS ostream +#include +#define __DB_STD(x) x #endif #include "db.h" @@ -73,40 +75,20 @@ class DbMpoolFile; // forward class DbPreplist; // forward class Dbt; // forward class DbTxn; // forward -class DbDeadlockException; // forward -class DbException; // forward -class DbLockNotGrantedException; // forward class DbLock; // forward -class DbMemoryException; // forward -class DbRunRecoveryException; // forward +class DbSequence; // forward class Dbt; // forward + class DbMultipleIterator; // forward class DbMultipleKeyDataIterator; // forward class DbMultipleRecnoDataIterator; // forward class DbMultipleDataIterator; // forward -// These classes are not defined here and should be invisible -// to the user, but some compilers require forward references. -// There is one for each use of the DEFINE_DB_CLASS macro. - -class DbImp; -class DbEnvImp; -class DbMpoolFileImp; -class DbTxnImp; - -// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor. -// The underlying type is a pointer to an opaque *Imp class, that -// gets converted to the correct implementation class by the implementation. -// -// Since these defines use "private/public" labels, and leave the access -// being "private", we always use these by convention before any data -// members in the private section of a class. Keeping them in the -// private section also emphasizes that they are off limits to user code. -// -#define DEFINE_DB_CLASS(name) \ - public: class name##Imp* imp() { return (imp_); } \ - public: const class name##Imp* constimp() const { return (imp_); } \ - private: class name##Imp* imp_ +class DbException; // forward +class DbDeadlockException; // forward +class DbLockNotGrantedException; // forward +class DbMemoryException; // forward +class DbRunRecoveryException; // forward //////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////// @@ -186,304 +168,194 @@ extern "C" { (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); typedef int (*pgout_fcn_type) (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Exception classes -// - -// Almost any error in the DB library throws a DbException. -// Every exception should be considered an abnormality -// (e.g. bug, misuse of DB, file system error). -// -// NOTE: We would like to inherit from class exception and -// let it handle what(), but there are -// MSVC++ problems when is included. -// -class _exported DbException -{ -public: - virtual ~DbException(); - DbException(int err); - DbException(const char *description); - DbException(const char *prefix, int err); - DbException(const char *prefix1, const char *prefix2, int err); - int get_errno() const; - virtual const char *what() const; - DbEnv *get_env() const; - void set_env(DbEnv *env); - - DbException(const DbException &); - DbException &operator = (const DbException &); - -private: - char *what_; - int err_; // errno - DbEnv *env_; -}; - -// -// A specific sort of exception that occurs when -// an operation is aborted to resolve a deadlock. -// -class _exported DbDeadlockException : public DbException -{ -public: - virtual ~DbDeadlockException(); - DbDeadlockException(const char *description); - - DbDeadlockException(const DbDeadlockException &); - DbDeadlockException &operator = (const DbDeadlockException &); -}; - -// -// A specific sort of exception that occurs when -// a lock is not granted, e.g. by lock_get or lock_vec. -// Note that the Dbt is only live as long as the Dbt used -// in the offending call. -// -class _exported DbLockNotGrantedException : public DbException -{ -public: - virtual ~DbLockNotGrantedException(); - DbLockNotGrantedException(const char *prefix, db_lockop_t op, - db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index); - DbLockNotGrantedException(const char *description); - DbLockNotGrantedException(const DbLockNotGrantedException &); - DbLockNotGrantedException &operator = - (const DbLockNotGrantedException &); - - db_lockop_t get_op() const; - db_lockmode_t get_mode() const; - const Dbt* get_obj() const; - DbLock *get_lock() const; - int get_index() const; - -private: - db_lockop_t op_; - db_lockmode_t mode_; - const Dbt *obj_; - DbLock *lock_; - int index_; -}; +} // -// A specific sort of exception that occurs when -// user declared memory is insufficient in a Dbt. -// -class _exported DbMemoryException : public DbException -{ -public: - virtual ~DbMemoryException(); - DbMemoryException(Dbt *dbt); - DbMemoryException(const char *description); - DbMemoryException(const char *prefix, Dbt *dbt); - DbMemoryException(const char *prefix1, const char *prefix2, Dbt *dbt); - Dbt *get_dbt() const; - - DbMemoryException(const DbMemoryException &); - DbMemoryException &operator = (const DbMemoryException &); - -private: - Dbt *dbt_; -}; - -// -// A specific sort of exception that occurs when -// recovery is required before continuing DB activity. -// -class _exported DbRunRecoveryException : public DbException -{ -public: - virtual ~DbRunRecoveryException(); - DbRunRecoveryException(const char *description); - - DbRunRecoveryException(const DbRunRecoveryException &); - DbRunRecoveryException &operator = (const DbRunRecoveryException &); -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Lock classes -// - -class _exported DbLock -{ - friend class DbEnv; - -public: - DbLock(); - DbLock(const DbLock &); - DbLock &operator = (const DbLock &); - -protected: - // We can add data to this class if needed - // since its contained class is not allocated by db. - // (see comment at top) - - DbLock(DB_LOCK); - DB_LOCK lock_; -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Log classes -// - -class _exported DbLsn : public DB_LSN -{ - friend class DbEnv; // friendship needed to cast to base class - friend class DbLogc; // friendship needed to cast to base class -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Memory pool classes +// Represents a database table = a set of keys with associated values. // - -class _exported DbMpoolFile +class _exported Db { friend class DbEnv; - friend class Db; - -private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(DbMpoolFile); public: - int close(u_int32_t flags); - int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep); - int open(const char *file, u_int32_t flags, int mode, size_t pagesize); - int put(void *pgaddr, u_int32_t flags); - int set(void *pgaddr, u_int32_t flags); - int get_clear_len(u_int32_t *len); - int set_clear_len(u_int32_t len); - int get_fileid(u_int8_t *fileid); - int set_fileid(u_int8_t *fileid); - int get_flags(u_int32_t *flagsp); - int set_flags(u_int32_t flags, int onoff); - int get_ftype(int *ftype); - int set_ftype(int ftype); - int get_lsn_offset(int32_t *offsetp); - int set_lsn_offset(int32_t offset); - int get_maxsize(u_int32_t *gbytes, u_int32_t *bytes); - int set_maxsize(u_int32_t gbytes, u_int32_t bytes); - int get_pgcookie(DBT *dbt); - int set_pgcookie(DBT *dbt); - int get_priority(DB_CACHE_PRIORITY *priorityp); - int set_priority(DB_CACHE_PRIORITY priority); - int sync(); - - virtual DB_MPOOLFILE *get_DB_MPOOLFILE() - { - return (DB_MPOOLFILE *)imp(); - } - - virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const - { - return (const DB_MPOOLFILE *)constimp(); - } - -private: - // We can add data to this class if needed - // since it is implemented via a pointer. - // (see comment at top) + Db(DbEnv*, u_int32_t); // create a Db object, then call open() + virtual ~Db(); // does *not* call close. - // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile, - // and call DbMpoolFile::close() rather than delete to release them. + // These methods exactly match those in the C interface. // - DbMpoolFile(); - - // Shut g++ up. -protected: - virtual ~DbMpoolFile(); - -private: - // no copying - DbMpoolFile(const DbMpoolFile &); - void operator = (const DbMpoolFile &); -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// This is filled in and returned by the DbEnv::txn_recover() method. -// - -class _exported DbPreplist -{ -public: - DbTxn *txn; - u_int8_t gid[DB_XIDDATASIZE]; -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Transaction classes -// - -class _exported DbTxn -{ - friend class DbEnv; + virtual int associate(DbTxn *txn, Db *secondary, + int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *), + u_int32_t flags); + virtual int close(u_int32_t flags); + virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags); + virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags); + virtual void err(int, const char *, ...); + virtual void errx(const char *, ...); + virtual int fd(int *fdp); + virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags); + virtual void *get_app_private() const; + virtual int get_byteswapped(int *); + virtual int get_dbname(const char **, const char **); + virtual int get_open_flags(u_int32_t *); + virtual int get_type(DBTYPE *); + virtual int get_transactional(); + virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags); + virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t); + virtual int open(DbTxn *txnid, + const char *, const char *subname, DBTYPE, u_int32_t, int); + virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, + u_int32_t flags); + virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t); + virtual int remove(const char *, const char *, u_int32_t); + virtual int rename(const char *, const char *, const char *, u_int32_t); + virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, + db_free_fcn_type); + virtual void set_app_private(void *); + virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t)); + virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/ + virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *)); + virtual int set_bt_maxkey(u_int32_t); + virtual int get_bt_minkey(u_int32_t *); + virtual int set_bt_minkey(u_int32_t); + virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/ + virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *)); + virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); + virtual int set_cachesize(u_int32_t, u_int32_t, int); + virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/ + virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *)); + virtual int get_encrypt_flags(u_int32_t *); + virtual int set_encrypt(const char *, u_int32_t); + virtual void set_errcall( + void (*)(const DbEnv *, const char *, const char *)); + virtual void get_errfile(FILE **); + virtual void set_errfile(FILE *); + virtual void get_errpfx(const char **); + virtual void set_errpfx(const char *); + virtual int set_feedback(void (*)(Db *, int, int)); + virtual int get_flags(u_int32_t *); + virtual int set_flags(u_int32_t); + virtual int get_h_ffactor(u_int32_t *); + virtual int set_h_ffactor(u_int32_t); + virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/ + virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t)); + virtual int get_h_nelem(u_int32_t *); + virtual int set_h_nelem(u_int32_t); + virtual int get_lorder(int *); + virtual int set_lorder(int); + virtual void set_msgcall(void (*)(const DbEnv *, const char *)); + virtual void get_msgfile(FILE **); + virtual void set_msgfile(FILE *); + virtual int get_pagesize(u_int32_t *); + virtual int set_pagesize(u_int32_t); + virtual int set_paniccall(void (*)(DbEnv *, int)); + virtual int get_re_delim(int *); + virtual int set_re_delim(int); + virtual int get_re_len(u_int32_t *); + virtual int set_re_len(u_int32_t); + virtual int get_re_pad(int *); + virtual int set_re_pad(int); + virtual int get_re_source(const char **); + virtual int set_re_source(const char *); + virtual int get_q_extentsize(u_int32_t *); + virtual int set_q_extentsize(u_int32_t); + virtual int stat(DbTxn *, void *sp, u_int32_t flags); + virtual int stat_print(u_int32_t flags); + virtual int sync(u_int32_t flags); + virtual int truncate(DbTxn *, u_int32_t *, u_int32_t); + virtual int upgrade(const char *name, u_int32_t flags); + virtual int verify(const char *, const char *, __DB_STD(ostream) *, + u_int32_t); -private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(DbTxn); + // These additional methods are not in the C interface, and + // are only available for C++. + // + virtual __DB_STD(ostream) *get_error_stream(); + virtual void set_error_stream(__DB_STD(ostream) *); + virtual __DB_STD(ostream) *get_message_stream(); + virtual void set_message_stream(__DB_STD(ostream) *); -public: - int abort(); - int commit(u_int32_t flags); - int discard(u_int32_t flags); - u_int32_t id(); - int prepare(u_int8_t *gid); - int set_timeout(db_timeout_t timeout, u_int32_t flags); + virtual DbEnv *get_env(); + virtual DbMpoolFile *get_mpf(); - virtual DB_TXN *get_DB_TXN() + virtual DB *get_DB() { - return (DB_TXN *)imp(); + return imp_; } - virtual const DB_TXN *get_const_DB_TXN() const + virtual const DB *get_const_DB() const { - return (const DB_TXN *)constimp(); + return imp_; } - static DbTxn* get_DbTxn(DB_TXN *txn) + static Db* get_Db(DB *db) { - return (DbTxn *)txn->api_internal; + return (Db *)db->api_internal; } - static const DbTxn* get_const_DbTxn(const DB_TXN *txn) + static const Db* get_const_Db(const DB *db) { - return (const DbTxn *)txn->api_internal; + return (const Db *)db->api_internal; } - // For internal use only. - static DbTxn* wrap_DB_TXN(DB_TXN *txn); +private: + // no copying + Db(const Db &); + Db &operator = (const Db &); + + void cleanup(); + int initialize(); + int error_policy(); + + // instance data + DB *imp_; + DbEnv *env_; + DbMpoolFile *mpf_; + int construct_error_; + u_int32_t flags_; + u_int32_t construct_flags_; + +public: + // These are public only because they need to be called + // via C callback functions. They should never be used by + // external users of this class. + // + int (*append_recno_callback_)(Db *, Dbt *, db_recno_t); + int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *); + int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *); + size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *); + int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *); + void (*feedback_callback_)(Db *, int, int); + u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t); +}; + +// +// Cursor +// +class _exported Dbc : protected DBC +{ + friend class Db; + +public: + int close(); + int count(db_recno_t *countp, u_int32_t flags); + int del(u_int32_t flags); + int dup(Dbc** cursorp, u_int32_t flags); + int get(Dbt* key, Dbt *data, u_int32_t flags); + int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags); + int put(Dbt* key, Dbt *data, u_int32_t flags); private: - // We can add data to this class if needed - // since it is implemented via a pointer. - // (see comment at top) + // No data is permitted in this class (see comment at top) - // Note: use DbEnv::txn_begin() to get pointers to a DbTxn, - // and call DbTxn::abort() or DbTxn::commit rather than - // delete to release them. + // Note: use Db::cursor() to get pointers to a Dbc, + // and call Dbc::close() rather than delete to release them. // - DbTxn(); - // For internal use only. - DbTxn(DB_TXN *txn); - virtual ~DbTxn(); + Dbc(); + ~Dbc(); // no copying - DbTxn(const DbTxn &); - void operator = (const DbTxn &); + Dbc(const Dbc &); + Dbc &operator = (const Dbc &); }; // @@ -501,10 +373,6 @@ class _exported DbEnv friend class DbLock; friend class DbMpoolFile; -private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(DbEnv); - public: // After using this constructor, you can set any needed // parameters for the environment using the set_* methods. @@ -530,7 +398,7 @@ public: virtual int open(const char *, u_int32_t, int); virtual int remove(const char *, u_int32_t); virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, - db_free_fcn_type); + db_free_fcn_type); virtual void set_app_private(void *); virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); virtual int set_cachesize(u_int32_t, u_int32_t, int); @@ -538,7 +406,8 @@ public: virtual int set_data_dir(const char *); virtual int get_encrypt_flags(u_int32_t *); virtual int set_encrypt(const char *, u_int32_t); - virtual void set_errcall(void (*)(const char *, char *)); + virtual void set_errcall( + void (*)(const DbEnv *, const char *, const char *)); virtual void get_errfile(FILE **); virtual void set_errfile(FILE *); virtual void get_errpfx(const char **); @@ -567,6 +436,9 @@ public: virtual int set_lk_max_objects(u_int32_t); virtual int get_mp_mmapsize(size_t *); virtual int set_mp_mmapsize(size_t); + virtual void set_msgcall(void (*)(const DbEnv *, const char *)); + virtual void get_msgfile(FILE **); + virtual void set_msgfile(FILE *); virtual int set_paniccall(void (*)(DbEnv *, int)); virtual int set_rpc_server(void *, char *, long, long, u_int32_t); virtual int get_shm_key(long *); @@ -602,7 +474,10 @@ public: // set_error_stream() to force all errors to a C++ stream. // It is unwise to mix these approaches. // - virtual void set_error_stream(__DB_OSTREAMCLASS *); + virtual __DB_STD(ostream) *get_error_stream(); + virtual void set_error_stream(__DB_STD(ostream) *); + virtual __DB_STD(ostream) *get_message_stream(); + virtual void set_message_stream(__DB_STD(ostream) *); // used internally static void runtime_error(DbEnv *env, const char *caller, int err, @@ -623,6 +498,7 @@ public: virtual int lock_id_free(u_int32_t id); virtual int lock_put(DbLock *lock); virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags); + virtual int lock_stat_print(u_int32_t flags); virtual int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp); @@ -636,6 +512,7 @@ public: virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags); virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags); + virtual int log_stat_print(u_int32_t flags); // Mpool functions // @@ -645,6 +522,7 @@ public: pgout_fcn_type pgout_fcn); virtual int memp_stat(DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags); + virtual int memp_stat_print(u_int32_t flags); virtual int memp_sync(DbLsn *lsn); virtual int memp_trickle(int pct, int *nwrotep); @@ -656,13 +534,15 @@ public: virtual int txn_recover(DbPreplist *preplist, long count, long *retp, u_int32_t flags); virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags); + virtual int txn_stat_print(u_int32_t flags); // Replication functions // - virtual int rep_elect(int, int, u_int32_t, int *); + virtual int rep_elect(int, int, int, u_int32_t, int *, u_int32_t); virtual int rep_process_message(Dbt *, Dbt *, int *, DbLsn *); virtual int rep_start(Dbt *, u_int32_t); virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags); + virtual int rep_stat_print(u_int32_t flags); virtual int get_rep_limit(u_int32_t *, u_int32_t *); virtual int set_rep_limit(u_int32_t, u_int32_t); virtual int set_rep_transport(int, int (*)(DbEnv *, @@ -672,22 +552,22 @@ public: // virtual DB_ENV *get_DB_ENV() { - return (DB_ENV *)imp(); + return imp_; } virtual const DB_ENV *get_const_DB_ENV() const { - return (const DB_ENV *)constimp(); + return imp_; } static DbEnv* get_DbEnv(DB_ENV *dbenv) { - return (DbEnv *)dbenv->api1_internal; + return dbenv ? (DbEnv *)dbenv->api1_internal : 0; } static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv) { - return (const DbEnv *)dbenv->api1_internal; + return dbenv ? (const DbEnv *)dbenv->api1_internal : 0; } // For internal use only. @@ -697,15 +577,19 @@ public: // via C functions. They should never be called by users // of this class. // - static void _stream_error_function(const char *, char *); static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn, - db_recops op); + db_recops op); static void _paniccall_intercept(DB_ENV *env, int errval); static void _feedback_intercept(DB_ENV *env, int opcode, int pct); static int _rep_send_intercept(DB_ENV *env, const DBT *cntrl, const DBT *data, const DB_LSN *lsn, int id, u_int32_t flags); + static void _stream_error_function(const DB_ENV *env, + const char *prefix, + const char *message); + static void _stream_message_function(const DB_ENV *env, + const char *message); private: void cleanup(); @@ -720,10 +604,16 @@ private: void operator = (const DbEnv &); // instance data + DB_ENV *imp_; int construct_error_; u_int32_t construct_flags_; + __DB_STD(ostream) *error_stream_; + __DB_STD(ostream) *message_stream_; + int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops); + void (*error_callback_)(const DbEnv *, const char *, const char *); void (*feedback_callback_)(DbEnv *, int, int); + void (*message_callback_)(const DbEnv *, const char *); void (*paniccall_callback_)(DbEnv *, int); int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno, void *pgaddr, Dbt *pgcookie); @@ -731,167 +621,255 @@ private: void *pgaddr, Dbt *pgcookie); int (*rep_send_callback_)(DbEnv *, const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t); - - // class data - static __DB_OSTREAMCLASS *error_stream_; }; -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// // -// Table access classes +// Lock // +class _exported DbLock +{ + friend class DbEnv; + +public: + DbLock(); + DbLock(const DbLock &); + DbLock &operator = (const DbLock &); + +protected: + // We can add data to this class if needed + // since its contained class is not allocated by db. + // (see comment at top) + + DbLock(DB_LOCK); + DB_LOCK lock_; +}; // -// Represents a database table = a set of keys with associated values. +// Log cursor // -class _exported Db +class _exported DbLogc : protected DB_LOGC { friend class DbEnv; +public: + int close(u_int32_t _flags); + int get(DbLsn *lsn, Dbt *data, u_int32_t _flags); + private: - // Put this first to allow inlining with some C++ compilers (g++-2.95) - DEFINE_DB_CLASS(Db); + // No data is permitted in this class (see comment at top) + + // Note: use Db::cursor() to get pointers to a Dbc, + // and call Dbc::close() rather than delete to release them. + // + DbLogc(); + ~DbLogc(); + + // no copying + DbLogc(const Dbc &); + DbLogc &operator = (const Dbc &); +}; + +// +// Log sequence number +// +class _exported DbLsn : public DB_LSN +{ + friend class DbEnv; // friendship needed to cast to base class + friend class DbLogc; // friendship needed to cast to base class +}; + +// +// Memory pool file +// +class _exported DbMpoolFile +{ + friend class DbEnv; + friend class Db; public: - Db(DbEnv*, u_int32_t); // create a Db object, then call open() - virtual ~Db(); // does *not* call close. + int close(u_int32_t flags); + int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep); + int open(const char *file, u_int32_t flags, int mode, size_t pagesize); + int get_transactional(void); + int put(void *pgaddr, u_int32_t flags); + int set(void *pgaddr, u_int32_t flags); + int get_clear_len(u_int32_t *len); + int set_clear_len(u_int32_t len); + int get_fileid(u_int8_t *fileid); + int set_fileid(u_int8_t *fileid); + int get_flags(u_int32_t *flagsp); + int set_flags(u_int32_t flags, int onoff); + int get_ftype(int *ftype); + int set_ftype(int ftype); + int get_lsn_offset(int32_t *offsetp); + int set_lsn_offset(int32_t offset); + int get_maxsize(u_int32_t *gbytes, u_int32_t *bytes); + int set_maxsize(u_int32_t gbytes, u_int32_t bytes); + int get_pgcookie(DBT *dbt); + int set_pgcookie(DBT *dbt); + int get_priority(DB_CACHE_PRIORITY *priorityp); + int set_priority(DB_CACHE_PRIORITY priority); + int sync(); - // These methods exactly match those in the C interface. + virtual DB_MPOOLFILE *get_DB_MPOOLFILE() + { + return imp_; + } + + virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const + { + return imp_; + } + +private: + DB_MPOOLFILE *imp_; + + // We can add data to this class if needed + // since it is implemented via a pointer. + // (see comment at top) + + // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile, + // and call DbMpoolFile::close() rather than delete to release them. // - virtual int associate(DbTxn *txn, Db *secondary, - int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *), - u_int32_t flags); - virtual int close(u_int32_t flags); - virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags); - virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags); - virtual void err(int, const char *, ...); - virtual void errx(const char *, ...); - virtual int fd(int *fdp); - virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags); - virtual void *get_app_private() const; - virtual int get_byteswapped(int *); - virtual int get_dbname(const char **, const char **); - virtual int get_open_flags(u_int32_t *); - virtual int get_type(DBTYPE *); - virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags); - virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t); - virtual int open(DbTxn *txnid, - const char *, const char *subname, DBTYPE, u_int32_t, int); - virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, - u_int32_t flags); - virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t); - virtual int remove(const char *, const char *, u_int32_t); - virtual int rename(const char *, const char *, const char *, u_int32_t); - virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, - db_free_fcn_type); - virtual void set_app_private(void *); - virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t)); - virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/ - virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *)); - virtual int set_bt_maxkey(u_int32_t); - virtual int get_bt_minkey(u_int32_t *); - virtual int set_bt_minkey(u_int32_t); - virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/ - virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *)); - virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); - virtual int set_cachesize(u_int32_t, u_int32_t, int); - virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/ - virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *)); - virtual int get_encrypt_flags(u_int32_t *); - virtual int set_encrypt(const char *, u_int32_t); - virtual void set_errcall(void (*)(const char *, char *)); - virtual void get_errfile(FILE **); - virtual void set_errfile(FILE *); - virtual void get_errpfx(const char **); - virtual void set_errpfx(const char *); - virtual int set_feedback(void (*)(Db *, int, int)); - virtual int get_flags(u_int32_t *); - virtual int set_flags(u_int32_t); - virtual int get_h_ffactor(u_int32_t *); - virtual int set_h_ffactor(u_int32_t); - virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/ - virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t)); - virtual int get_h_nelem(u_int32_t *); - virtual int set_h_nelem(u_int32_t); - virtual int get_lorder(int *); - virtual int set_lorder(int); - virtual int get_pagesize(u_int32_t *); - virtual int set_pagesize(u_int32_t); - virtual int set_paniccall(void (*)(DbEnv *, int)); - virtual int get_re_delim(int *); - virtual int set_re_delim(int); - virtual int get_re_len(u_int32_t *); - virtual int set_re_len(u_int32_t); - virtual int get_re_pad(int *); - virtual int set_re_pad(int); - virtual int get_re_source(const char **); - virtual int set_re_source(const char *); - virtual int get_q_extentsize(u_int32_t *); - virtual int set_q_extentsize(u_int32_t); - virtual int stat(void *sp, u_int32_t flags); - virtual int sync(u_int32_t flags); - virtual int truncate(DbTxn *, u_int32_t *, u_int32_t); - virtual int upgrade(const char *name, u_int32_t flags); - virtual int verify(const char *, const char *, __DB_OSTREAMCLASS *, - u_int32_t); + DbMpoolFile(); + + // Shut g++ up. +protected: + virtual ~DbMpoolFile(); + +private: + // no copying + DbMpoolFile(const DbMpoolFile &); + void operator = (const DbMpoolFile &); +}; - // These additional methods are not in the C interface, and - // are only available for C++. - // - virtual void set_error_stream(__DB_OSTREAMCLASS *); +// +// This is filled in and returned by the DbEnv::txn_recover() method. +// +class _exported DbPreplist +{ +public: + DbTxn *txn; + u_int8_t gid[DB_XIDDATASIZE]; +}; - virtual DbEnv *get_env(); - virtual DbMpoolFile *get_mpf(); +// +// A sequence record in a database +// +class _exported DbSequence +{ +public: + DbSequence(Db *db, u_int32_t flags); + virtual ~DbSequence(); - virtual DB *get_DB() + int open(DbTxn *txnid, Dbt *key, u_int32_t flags); + int initial_value(db_seq_t value); + int close(u_int32_t flags); + int remove(DbTxn *txnid, u_int32_t flags); + int stat(DB_SEQUENCE_STAT **sp, u_int32_t flags); + int stat_print(u_int32_t flags); + + int get(DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags); + int get_cachesize(int32_t *sizep); + int set_cachesize(int32_t size); + int get_flags(u_int32_t *flagsp); + int set_flags(u_int32_t flags); + int get_range(db_seq_t *minp, db_seq_t *maxp); + int set_range(db_seq_t min, db_seq_t max); + + Db *get_db(); + Dbt *get_key(); + + virtual DB_SEQUENCE *get_DB_SEQUENCE() { - return (DB *)imp(); + return imp_; } - virtual const DB *get_const_DB() const + virtual const DB_SEQUENCE *get_const_DB_SEQUENCE() const { - return (const DB *)constimp(); + return imp_; } - static Db* get_Db(DB *db) + static DbSequence* get_DbSequence(DB_SEQUENCE *seq) { - return (Db *)db->api_internal; + return (DbSequence *)seq->api_internal; } - static const Db* get_const_Db(const DB *db) + static const DbSequence* get_const_DbSequence(const DB_SEQUENCE *seq) { - return (const Db *)db->api_internal; + return (const DbSequence *)seq->api_internal; } + // For internal use only. + static DbSequence* wrap_DB_SEQUENCE(DB_SEQUENCE *seq); + private: + DbSequence(DB_SEQUENCE *seq); // no copying - Db(const Db &); - Db &operator = (const Db &); + DbSequence(const DbSequence &); + DbSequence &operator = (const DbSequence &); - void cleanup(); - int initialize(); - int error_policy(); + DB_SEQUENCE *imp_; + DBT key_; +}; - // instance data - DbEnv *env_; - DbMpoolFile *mpf_; - int construct_error_; - u_int32_t flags_; - u_int32_t construct_flags_; +// +// Transaction +// +class _exported DbTxn +{ + friend class DbEnv; public: - // These are public only because they need to be called - // via C callback functions. They should never be used by - // external users of this class. + int abort(); + int commit(u_int32_t flags); + int discard(u_int32_t flags); + u_int32_t id(); + int prepare(u_int8_t *gid); + int set_timeout(db_timeout_t timeout, u_int32_t flags); + + virtual DB_TXN *get_DB_TXN() + { + return imp_; + } + + virtual const DB_TXN *get_const_DB_TXN() const + { + return imp_; + } + + static DbTxn* get_DbTxn(DB_TXN *txn) + { + return (DbTxn *)txn->api_internal; + } + + static const DbTxn* get_const_DbTxn(const DB_TXN *txn) + { + return (const DbTxn *)txn->api_internal; + } + + // For internal use only. + static DbTxn* wrap_DB_TXN(DB_TXN *txn); + +private: + DB_TXN *imp_; + + // We can add data to this class if needed + // since it is implemented via a pointer. + // (see comment at top) + + // Note: use DbEnv::txn_begin() to get pointers to a DbTxn, + // and call DbTxn::abort() or DbTxn::commit rather than + // delete to release them. // - int (*append_recno_callback_)(Db *, Dbt *, db_recno_t); - int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *); - int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *); - size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *); - int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *); - void (*feedback_callback_)(Db *, int, int); - u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t); + DbTxn(); + // For internal use only. + DbTxn(DB_TXN *txn); + virtual ~DbTxn(); + + // no copying + DbTxn(const DbTxn &); + void operator = (const DbTxn &); }; // @@ -899,13 +877,13 @@ public: // class _exported Dbt : private DBT { - friend class Dbc; friend class Db; + friend class Dbc; friend class DbEnv; friend class DbLogc; + friend class DbSequence; public: - // key/data void *get_data() const { return data; } void set_data(void *value) { data = value; } @@ -956,55 +934,6 @@ private: // not of your subclassed type. }; -class _exported Dbc : protected DBC -{ - friend class Db; - -public: - int close(); - int count(db_recno_t *countp, u_int32_t flags); - int del(u_int32_t flags); - int dup(Dbc** cursorp, u_int32_t flags); - int get(Dbt* key, Dbt *data, u_int32_t flags); - int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags); - int put(Dbt* key, Dbt *data, u_int32_t flags); - -private: - // No data is permitted in this class (see comment at top) - - // Note: use Db::cursor() to get pointers to a Dbc, - // and call Dbc::close() rather than delete to release them. - // - Dbc(); - ~Dbc(); - - // no copying - Dbc(const Dbc &); - Dbc &operator = (const Dbc &); -}; - -class _exported DbLogc : protected DB_LOGC -{ - friend class DbEnv; - -public: - int close(u_int32_t _flags); - int get(DbLsn *lsn, Dbt *data, u_int32_t _flags); - -private: - // No data is permitted in this class (see comment at top) - - // Note: use Db::cursor() to get pointers to a Dbc, - // and call Dbc::close() rather than delete to release them. - // - DbLogc(); - ~DbLogc(); - - // no copying - DbLogc(const Dbc &); - DbLogc &operator = (const Dbc &); -}; - //////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////// // @@ -1044,4 +973,116 @@ public: bool next(Dbt &data); }; +//////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////// +// +// Exception classes +// + +// Almost any error in the DB library throws a DbException. +// Every exception should be considered an abnormality +// (e.g. bug, misuse of DB, file system error). +// +class _exported DbException : public __DB_STD(exception) +{ +public: + virtual ~DbException() throw(); + DbException(int err); + DbException(const char *description); + DbException(const char *description, int err); + DbException(const char *prefix, const char *description, int err); + int get_errno() const; + virtual const char *what() const throw(); + DbEnv *get_env() const; + void set_env(DbEnv *env); + + DbException(const DbException &); + DbException &operator = (const DbException &); + +private: + void describe(const char *prefix, const char *description); + + char *what_; + int err_; // errno + DbEnv *env_; +}; + +// +// A specific sort of exception that occurs when +// an operation is aborted to resolve a deadlock. +// +class _exported DbDeadlockException : public DbException +{ +public: + virtual ~DbDeadlockException() throw(); + DbDeadlockException(const char *description); + + DbDeadlockException(const DbDeadlockException &); + DbDeadlockException &operator = (const DbDeadlockException &); +}; + +// +// A specific sort of exception that occurs when +// a lock is not granted, e.g. by lock_get or lock_vec. +// Note that the Dbt is only live as long as the Dbt used +// in the offending call. +// +class _exported DbLockNotGrantedException : public DbException +{ +public: + virtual ~DbLockNotGrantedException() throw(); + DbLockNotGrantedException(const char *prefix, db_lockop_t op, + db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index); + DbLockNotGrantedException(const char *description); + + DbLockNotGrantedException(const DbLockNotGrantedException &); + DbLockNotGrantedException &operator = + (const DbLockNotGrantedException &); + + db_lockop_t get_op() const; + db_lockmode_t get_mode() const; + const Dbt* get_obj() const; + DbLock *get_lock() const; + int get_index() const; + +private: + db_lockop_t op_; + db_lockmode_t mode_; + const Dbt *obj_; + DbLock *lock_; + int index_; +}; + +// +// A specific sort of exception that occurs when +// user declared memory is insufficient in a Dbt. +// +class _exported DbMemoryException : public DbException +{ +public: + virtual ~DbMemoryException() throw(); + DbMemoryException(Dbt *dbt); + DbMemoryException(const char *prefix, Dbt *dbt); + + DbMemoryException(const DbMemoryException &); + DbMemoryException &operator = (const DbMemoryException &); + + Dbt *get_dbt() const; +private: + Dbt *dbt_; +}; + +// +// A specific sort of exception that occurs when +// recovery is required before continuing DB activity. +// +class _exported DbRunRecoveryException : public DbException +{ +public: + virtual ~DbRunRecoveryException() throw(); + DbRunRecoveryException(const char *description); + + DbRunRecoveryException(const DbRunRecoveryException &); + DbRunRecoveryException &operator = (const DbRunRecoveryException &); +}; #endif /* !_DB_CXX_H_ */ diff --git a/db/dbinc/db_dispatch.h b/db/dbinc/db_dispatch.h index 03d59fbcf..bbaff69db 100644 --- a/db/dbinc/db_dispatch.h +++ b/db/dbinc/db_dispatch.h @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: db_dispatch.h,v 11.36 2003/09/04 19:01:13 bostic Exp $ + * $Id: db_dispatch.h,v 11.38 2004/07/26 19:54:08 margo Exp $ */ #ifndef _DB_DISPATCH_H_ @@ -75,7 +75,7 @@ struct __db_txnlist { struct { u_int32_t txnid; u_int32_t generation; - int32_t status; + u_int32_t status; } t; struct { u_int32_t ntxns; diff --git a/db/dbinc/db_int.in b/db/dbinc/db_int.in index 0efe1179f..351c4f044 100644 --- a/db/dbinc/db_int.in +++ b/db/dbinc/db_int.in @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_int.in,v 11.126 2003/09/10 17:27:14 sue Exp $ + * $Id: db_int.in,v 11.153 2004/10/05 14:43:53 mjc Exp $ */ #ifndef _DB_INTERNAL_H_ @@ -35,8 +35,39 @@ extern "C" { /******************************************************* * General purpose constants and macros. *******************************************************/ -#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */ -#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */ +#ifndef UINT16_MAX +#define UINT16_MAX 65535 /* Maximum 16-bit unsigned. */ +#endif +#ifndef UINT32_MAX +#define UINT32_MAX 4294967295U /* Maximum 32-bit unsigned. */ +#endif + +#if defined(HAVE_LONG_LONG) && defined(HAVE_UNSIGNED_LONG_LONG) +#undef INT64_MAX +#undef INT64_MIN +#undef UINT64_MAX + +#ifdef DB_WIN32 +#define INT64_MAX _I64_MAX +#define INT64_MIN _I64_MIN +#define UINT64_MAX _UI64_MAX + +#define INT64_FMT "%l64d" +#define UINT64_FMT "%l64u" +#else +/* + * Override the system's 64-bit min/max constants. AIX's 32-bit compiler can + * handle 64-bit values, but the system's constants don't include the LL/ULL + * suffix, and so can't be compiled using the 32-bit compiler. + */ +#define INT64_MAX 9223372036854775807LL +#define INT64_MIN (-INT64_MAX-1) +#define UINT64_MAX 18446744073709551615ULL + +#define INT64_FMT "%lld" +#define UINT64_FMT "%llu" +#endif /* DB_WIN32 */ +#endif /* HAVE_LONG_LONG && HAVE_UNSIGNED_LONG_LONG */ #define MEGABYTE 1048576 #define GIGABYTE 1073741824 @@ -64,53 +95,38 @@ extern "C" { */ #define DB_DEF_IOSIZE (8 * 1024) -/* Number of times to reties I/O operations that return EINTR or EBUSY. */ -#define DB_RETRY 100 +/* Align an integer to a specific boundary. */ +#undef DB_ALIGN +#define DB_ALIGN(v, bound) \ + (((v) + (bound) - 1) & ~(((uintmax_t)bound) - 1)) -/* - * Aligning items to particular sizes or in pages or memory. - * - * db_align_t -- - * Largest integral type, used to align structures in memory. We don't store - * floating point types in structures, so integral types should be sufficient - * (and we don't have to worry about systems that store floats in other than - * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite - * structure assignments and ANSI C memcpy calls to be in-line instructions - * that happen to require alignment. Note: this alignment isn't sufficient for - * mutexes, which depend on things like cache line alignment. Mutex alignment - * is handled separately, in mutex.h. - * - * db_alignp_t -- - * Integral type that's the same size as a pointer. There are places where - * DB modifies pointers by discarding the bottom bits to guarantee alignment. - * We can't use db_align_t, it may be larger than the pointer, and compilers - * get upset about that. So far we haven't run on any machine where there - * isn't an integral type the same size as a pointer -- here's hoping. - */ -@db_align_t_decl@ -@db_alignp_t_decl@ +/* Increment a pointer to a specific boundary. */ +#undef ALIGNP_INC +#define ALIGNP_INC(p, bound) \ + (void *)(((uintptr_t)(p) + (bound) - 1) & ~(((uintptr_t)bound) - 1)) -/* Align an integer to a specific boundary. */ -#undef ALIGN -#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1)) +/* Decrement a pointer to a specific boundary. */ +#undef ALIGNP_DEC +#define ALIGNP_DEC(p, bound) \ + (void *)((uintptr_t)(p) & ~(((uintptr_t)bound) - 1)) /* * Print an address as a u_long (a u_long is the largest type we can print * portably). Most 64-bit systems have made longs 64-bits, so this should * work. */ -#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p)) +#define P_TO_ULONG(p) ((u_long)(uintptr_t)(p)) /* * Convert a pointer to a small integral value. * - * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast + * The (u_int16_t)(uintptr_t) cast avoids warnings: the (uintptr_t) cast * converts the value to an integral type, and the (u_int16_t) cast converts * it to a small integral type so we don't get complaints when we assign the - * final result to an integral type smaller than db_alignp_t. + * final result to an integral type smaller than uintptr_t. */ -#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p)) -#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p)) +#define P_TO_UINT32(p) ((u_int32_t)(uintptr_t)(p)) +#define P_TO_UINT16(p) ((u_int16_t)(uintptr_t)(p)) /* * There are several on-page structures that are declared to have a number of @@ -148,9 +164,64 @@ typedef struct __fn { #define LF_ISSET(f) ((flags) & (f)) #define LF_SET(f) ((flags) |= (f)) -/* Display separator string. */ -#undef DB_LINE -#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=" +/* + * Calculate a percentage. The values can overflow 32-bit integer arithmetic + * so we use floating point. + * + * When calculating a bytes-vs-page size percentage, we're getting the inverse + * of the percentage in all cases, that is, we want 100 minus the percentage we + * calculate. + */ +#define DB_PCT(v, total) \ + ((int)((total) == 0 ? 0 : ((double)(v) * 100) / (total))) +#define DB_PCT_PG(v, total, pgsize) \ + ((int)((total) == 0 ? 0 : \ + 100 - ((double)(v) * 100) / ((total) * (pgsize)))) + +/* + * Structure used for callback message aggregation. + * + * Display values in XXX_stat_print calls. + */ +typedef struct __db_msgbuf { + char *buf; /* Heap allocated buffer. */ + char *cur; /* Current end of message. */ + size_t len; /* Allocated length of buffer. */ +} DB_MSGBUF; +#define DB_MSGBUF_INIT(a) do { \ + (a)->buf = (a)->cur = NULL; \ + (a)->len = 0; \ +} while (0) +#define DB_MSGBUF_FLUSH(dbenv, a) do { \ + if ((a)->buf != NULL) { \ + if ((a)->cur != (a)->buf) \ + __db_msg(dbenv, "%s", (a)->buf); \ + __os_free(dbenv, (a)->buf); \ + DB_MSGBUF_INIT(a); \ + } \ +} while (0) +#define STAT_FMT(msg, fmt, type, v) do { \ + DB_MSGBUF __mb; \ + DB_MSGBUF_INIT(&__mb); \ + __db_msgadd(dbenv, &__mb, fmt, (type)(v)); \ + __db_msgadd(dbenv, &__mb, "\t%s", msg); \ + DB_MSGBUF_FLUSH(dbenv, &__mb); \ +} while (0) +#define STAT_HEX(msg, v) \ + __db_msg(dbenv, "%#lx\t%s", (u_long)(v), msg) +#define STAT_ISSET(msg, p) \ + __db_msg(dbenv, "%sSet\t%s", (p) == NULL ? "!" : " ", msg) +#define STAT_LONG(msg, v) \ + __db_msg(dbenv, "%ld\t%s", (long)(v), msg) +#define STAT_LSN(msg, lsnp) \ + __db_msg(dbenv, "%lu/%lu\t%s", \ + (u_long)(lsnp)->file, (u_long)(lsnp)->offset, msg) +#define STAT_STRING(msg, p) do { \ + const char *__p = p; /* p may be a function call. */ \ + __db_msg(dbenv, "%s\t%s", __p == NULL ? "!Set" : __p, msg); \ +} while (0) +#define STAT_ULONG(msg, v) \ + __db_msg(dbenv, "%lu\t%s", (u_long)(v), msg) /******************************************************* * API return values @@ -176,7 +247,8 @@ typedef struct __fn { (ret) == DB_REP_ISPERM || \ (ret) == DB_REP_NEWMASTER || \ (ret) == DB_REP_NEWSITE || \ - (ret) == DB_REP_NOTPERM) + (ret) == DB_REP_NOTPERM || \ + (ret) == DB_REP_STARTUPDONE) /* Find a reasonable operation-not-supported error. */ #ifdef EOPNOTSUPP @@ -262,6 +334,9 @@ typedef enum { #define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \ if (handle == NULL) \ return (__db_env_config(dbenv, i, flags)); +#define ENV_NOT_CONFIGURED(dbenv, handle, i, flags) \ + if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ + ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) /******************************************************* * Database Access Methods. @@ -385,11 +460,11 @@ typedef struct __dbpginfo { } while (0) #define MAX_LSN(LSN) do { \ - (LSN).file = UINT32_T_MAX; \ - (LSN).offset = UINT32_T_MAX; \ + (LSN).file = UINT32_MAX; \ + (LSN).offset = UINT32_MAX; \ } while (0) #define IS_MAX_LSN(LSN) \ - ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX) + ((LSN).file == UINT32_MAX && (LSN).offset == UINT32_MAX) /* If logging is turned off, smash the lsn. */ #define LSN_NOT_LOGGED(LSN) do { \ @@ -403,6 +478,8 @@ typedef struct __dbpginfo { * Txn. *******************************************************/ #define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT)) +#define NOWAIT_FLAG(txn) \ + ((txn) != NULL && F_ISSET((txn), TXN_NOWAIT) ? DB_LOCK_NOWAIT : 0) #define IS_SUBTRANSACTION(txn) \ ((txn) != NULL && (txn)->parent != NULL) @@ -412,6 +489,33 @@ typedef struct __dbpginfo { #define DB_IV_BYTES 16 /* Bytes per IV */ #define DB_MAC_KEY 20 /* Bytes per MAC checksum */ +/******************************************************* + * Secondaries over RPC. + *******************************************************/ +#ifdef CONFIG_TEST +/* + * These are flags passed to DB->associate calls by the Tcl API if running + * over RPC. The RPC server will mask out these flags before making the real + * DB->associate call. + * + * These flags must coexist with the valid flags to DB->associate (currently + * DB_AUTO_COMMIT and DB_CREATE). DB_AUTO_COMMIT is in the group of + * high-order shared flags (0xff000000), and DB_CREATE is in the low-order + * group (0x00000fff), so we pick a range in between. + */ +#define DB_RPC2ND_MASK 0x00f00000 /* Reserved bits. */ + +#define DB_RPC2ND_REVERSEDATA 0x00100000 /* callback_n(0) _s_reversedata. */ +#define DB_RPC2ND_NOOP 0x00200000 /* callback_n(1) _s_noop */ +#define DB_RPC2ND_CONCATKEYDATA 0x00300000 /* callback_n(2) _s_concatkeydata */ +#define DB_RPC2ND_CONCATDATAKEY 0x00400000 /* callback_n(3) _s_concatdatakey */ +#define DB_RPC2ND_REVERSECONCAT 0x00500000 /* callback_n(4) _s_reverseconcat */ +#define DB_RPC2ND_TRUNCDATA 0x00600000 /* callback_n(5) _s_truncdata */ +#define DB_RPC2ND_CONSTANT 0x00700000 /* callback_n(6) _s_constant */ +#define DB_RPC2ND_GETZIP 0x00800000 /* sj_getzip */ +#define DB_RPC2ND_GETNAME 0x00900000 /* sj_getname */ +#endif + /******************************************************* * Forward structure declarations. *******************************************************/ diff --git a/db/dbinc/db_join.h b/db/dbinc/db_join.h index f04c9934e..3fea2ad2f 100644 --- a/db/dbinc/db_join.h +++ b/db/dbinc/db_join.h @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. * * @(#)db_join.h 11.1 (Sleepycat) 7/25/99 @@ -18,7 +18,7 @@ typedef struct __join_cursor { u_int8_t *j_exhausted; /* Array of flags; is cursor i exhausted? */ DBC **j_curslist; /* Array of cursors in the join: constant. */ - DBC **j_fdupcurs; /* Cursors w/ first intances of current dup. */ + DBC **j_fdupcurs; /* Cursors w/ first instances of current dup. */ DBC **j_workcurs; /* Scratch cursor copies to muck with. */ DB *j_primary; /* Primary dbp. */ DBT j_key; /* Used to do lookups. */ diff --git a/db/dbinc/db_page.h b/db/dbinc/db_page.h index 28641e06b..59a1292ff 100644 --- a/db/dbinc/db_page.h +++ b/db/dbinc/db_page.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_page.h,v 11.59 2003/09/13 18:49:29 bostic Exp $ + * $Id: db_page.h,v 11.63 2004/09/17 22:00:27 mjc Exp $ */ #ifndef _DB_PAGE_H_ @@ -553,10 +553,13 @@ typedef struct _bkeydata { /* * Page space required to add a new BKEYDATA item to the page, with and - * without the index value. + * without the index value. The (u_int16_t) cast avoids warnings: DB_ALIGN + * casts to uintmax_t, the cast converts it to a small integral type so we + * don't get complaints when we assign the final result to an integral type + * smaller than uintmax_t. */ #define BKEYDATA_SIZE(len) \ - ALIGN((len) + SSZA(BKEYDATA, data), sizeof(u_int32_t)) + (u_int16_t)DB_ALIGN((len) + SSZA(BKEYDATA, data), sizeof(u_int32_t)) #define BKEYDATA_PSIZE(len) \ (BKEYDATA_SIZE(len) + sizeof(db_indx_t)) @@ -578,13 +581,10 @@ typedef struct _boverflow { /* * Page space required to add a new BOVERFLOW item to the page, with and - * without the index value. The (u_int16_t) cast avoids warnings: ALIGN - * casts to db_align_t, the cast converts it to a small integral type so - * we don't get complaints when we assign the final result to an integral - * type smaller than db_align_t. + * without the index value. */ #define BOVERFLOW_SIZE \ - ((u_int16_t)ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t))) + ((u_int16_t)DB_ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t))) #define BOVERFLOW_PSIZE \ (BOVERFLOW_SIZE + sizeof(db_indx_t)) @@ -621,7 +621,7 @@ typedef struct _binternal { * without the index value. */ #define BINTERNAL_SIZE(len) \ - ALIGN((len) + SSZA(BINTERNAL, data), sizeof(u_int32_t)) + (u_int16_t)DB_ALIGN((len) + SSZA(BINTERNAL, data), sizeof(u_int32_t)) #define BINTERNAL_PSIZE(len) \ (BINTERNAL_SIZE(len) + sizeof(db_indx_t)) @@ -646,7 +646,7 @@ typedef struct _rinternal { * without the index value. */ #define RINTERNAL_SIZE \ - ALIGN(sizeof(RINTERNAL), sizeof(u_int32_t)) + (u_int16_t)DB_ALIGN(sizeof(RINTERNAL), sizeof(u_int32_t)) #define RINTERNAL_PSIZE \ (RINTERNAL_SIZE + sizeof(db_indx_t)) diff --git a/db/dbinc/db_server_int.h b/db/dbinc/db_server_int.h index 93193bc16..eba36efcb 100644 --- a/db/dbinc/db_server_int.h +++ b/db/dbinc/db_server_int.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_server_int.h,v 1.24 2003/01/08 04:31:23 bostic Exp $ + * $Id: db_server_int.h,v 1.25 2004/01/28 03:36:02 bostic Exp $ */ #ifndef _DB_SERVER_INT_H_ diff --git a/db/dbinc/db_shash.h b/db/dbinc/db_shash.h index 0f335aadf..51277e5e0 100644 --- a/db/dbinc/db_shash.h +++ b/db/dbinc/db_shash.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_shash.h,v 11.12 2003/01/08 04:31:28 bostic Exp $ + * $Id: db_shash.h,v 11.13 2004/01/28 03:36:02 bostic Exp $ */ #ifndef _DB_SHASH_H_ diff --git a/db/dbinc/db_swap.h b/db/dbinc/db_swap.h index d0c7e6683..dcfac416a 100644 --- a/db/dbinc/db_swap.h +++ b/db/dbinc/db_swap.h @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: db_swap.h,v 11.9 2003/01/08 04:31:32 bostic Exp $ + * $Id: db_swap.h,v 11.11 2004/01/28 03:36:02 bostic Exp $ */ #ifndef _DB_SWAP_H_ @@ -44,6 +44,7 @@ * P_32_COPY copy potentially unaligned 4 byte quantities * P_32_SWAP swap a referenced memory location */ +#undef M_32_SWAP #define M_32_SWAP(a) { \ u_int32_t _tmp; \ _tmp = a; \ @@ -52,12 +53,14 @@ ((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[1]; \ ((u_int8_t *)&a)[3] = ((u_int8_t *)&_tmp)[0]; \ } +#undef P_32_COPY #define P_32_COPY(a, b) { \ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \ ((u_int8_t *)b)[2] = ((u_int8_t *)a)[2]; \ ((u_int8_t *)b)[3] = ((u_int8_t *)a)[3]; \ } +#undef P_32_SWAP #define P_32_SWAP(a) { \ u_int32_t _tmp; \ P_32_COPY(a, &_tmp); \ @@ -73,16 +76,19 @@ * P_16_COPY copy potentially unaligned 2 byte quantities * P_16_SWAP swap a referenced memory location */ +#undef M_16_SWAP #define M_16_SWAP(a) { \ u_int16_t _tmp; \ _tmp = (u_int16_t)a; \ ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[1]; \ ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[0]; \ } +#undef P_16_COPY #define P_16_COPY(a, b) { \ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \ } +#undef P_16_SWAP #define P_16_SWAP(a) { \ u_int16_t _tmp; \ P_16_COPY(a, &_tmp); \ @@ -90,10 +96,12 @@ ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[0]; \ } +#undef SWAP32 #define SWAP32(p) { \ P_32_SWAP(p); \ (p) += sizeof(u_int32_t); \ } +#undef SWAP16 #define SWAP16(p) { \ P_16_SWAP(p); \ (p) += sizeof(u_int16_t); \ @@ -104,10 +112,12 @@ * pointers to the right size memory locations; the portability magic for * finding the real system functions isn't worth the effort. */ +#undef DB_HTONL #define DB_HTONL(p) do { \ if (!__db_isbigendian()) \ P_32_SWAP(p); \ } while (0) +#undef DB_NTOHL #define DB_NTOHL(p) do { \ if (!__db_isbigendian()) \ P_32_SWAP(p); \ diff --git a/db/dbinc/db_upgrade.h b/db/dbinc/db_upgrade.h index 48e8cc114..e7ac0bc96 100644 --- a/db/dbinc/db_upgrade.h +++ b/db/dbinc/db_upgrade.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_upgrade.h,v 1.11 2003/01/08 04:31:35 bostic Exp $ + * $Id: db_upgrade.h,v 1.12 2004/01/28 03:36:02 bostic Exp $ */ #ifndef _DB_UPGRADE_H_ diff --git a/db/dbinc/db_verify.h b/db/dbinc/db_verify.h index b877eb855..528ba8f04 100644 --- a/db/dbinc/db_verify.h +++ b/db/dbinc/db_verify.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. * - * $Id: db_verify.h,v 1.32 2003/09/04 19:01:13 bostic Exp $ + * $Id: db_verify.h,v 1.34 2004/05/20 14:34:12 bostic Exp $ */ #ifndef _DB_VERIFY_H_ @@ -20,21 +20,22 @@ * for DB->err. */ #define EPRINT(x) do { \ - if (!LF_ISSET(DB_SALVAGE)) \ - __db_err x; \ + if (!LF_ISSET(DB_SALVAGE)) \ + __db_err x; \ } while (0) /* For fatal type errors--i.e., verifier bugs. */ #define TYPE_ERR_PRINT(dbenv, func, pgno, ptype) \ - EPRINT(((dbenv), "Page %lu: %s called on nonsensical page of type %lu", \ - (u_long)(pgno), (func), (u_long)(ptype))); + EPRINT(((dbenv), \ + "Page %lu: %s called on nonsensical page of type %lu", \ + (u_long)(pgno), (func), (u_long)(ptype))); /* Complain about a totally zeroed page where we don't expect one. */ -#define ZEROPG_ERR_PRINT(dbenv, pgno, str) do { \ - EPRINT(((dbenv), "Page %lu: %s is of inappropriate type %lu", \ - (u_long)(pgno), str, (u_long)P_INVALID)); \ - EPRINT(((dbenv), "Page %lu: totally zeroed page", \ - (u_long)(pgno))); \ +#define ZEROPG_ERR_PRINT(dbenv, pgno, str) do { \ + EPRINT(((dbenv), "Page %lu: %s is of inappropriate type %lu", \ + (u_long)(pgno), str, (u_long)P_INVALID)); \ + EPRINT(((dbenv), "Page %lu: totally zeroed page", \ + (u_long)(pgno))); \ } while (0) /* diff --git a/db/dbinc/debug.h b/db/dbinc/debug.h index 9211ec7db..068c8af2b 100644 --- a/db/dbinc/debug.h +++ b/db/dbinc/debug.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. * - * $Id: debug.h,v 11.38 2003/09/04 19:01:14 bostic Exp $ + * $Id: debug.h,v 11.44 2004/09/24 00:43:18 bostic Exp $ */ #ifndef _DB_DEBUG_H_ @@ -61,22 +61,22 @@ extern "C" { #endif /* - * Error message handling. Use a macro instead of a function because va_list + * Message handling. Use a macro instead of a function because va_list * references to variadic arguments cannot be reset to the beginning of the * variadic argument list (and then rescanned), by functions other than the * original routine that took the variadic list of arguments. */ #if defined(STDC_HEADERS) || defined(__cplusplus) -#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \ +#define DB_REAL_ERR(env, error, error_set, default_stream, fmt) { \ va_list ap; \ \ - /* Call the user's callback function, if specified. */ \ + /* Call the application's callback function, if specified. */ \ va_start(ap, fmt); \ if ((env) != NULL && (env)->db_errcall != NULL) \ __db_errcall(env, error, error_set, fmt, ap); \ va_end(ap); \ \ - /* Write to the user's file descriptor, if specified. */ \ + /* Write to the application's file descriptor, if specified. */\ va_start(ap, fmt); \ if ((env) != NULL && (env)->db_errfile != NULL) \ __db_errfile(env, error, error_set, fmt, ap); \ @@ -87,22 +87,22 @@ extern "C" { * write to the default. \ */ \ va_start(ap, fmt); \ - if ((stderr_default) && ((env) == NULL || \ + if ((default_stream) && ((env) == NULL || \ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \ __db_errfile(env, error, error_set, fmt, ap); \ va_end(ap); \ } #else -#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \ +#define DB_REAL_ERR(env, error, error_set, default_stream, fmt) { \ va_list ap; \ \ - /* Call the user's callback function, if specified. */ \ + /* Call the application's callback function, if specified. */ \ va_start(ap); \ if ((env) != NULL && (env)->db_errcall != NULL) \ __db_errcall(env, error, error_set, fmt, ap); \ va_end(ap); \ \ - /* Write to the user's file descriptor, if specified. */ \ + /* Write to the application's file descriptor, if specified. */\ va_start(ap); \ if ((env) != NULL && (env)->db_errfile != NULL) \ __db_errfile(env, error, error_set, fmt, ap); \ @@ -113,12 +113,57 @@ extern "C" { * write to the default. \ */ \ va_start(ap); \ - if ((stderr_default) && ((env) == NULL || \ + if ((default_stream) && ((env) == NULL || \ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \ __db_errfile(env, error, error_set, fmt, ap); \ va_end(ap); \ } #endif +#if defined(STDC_HEADERS) || defined(__cplusplus) +#define DB_REAL_MSG(env, fmt) { \ + va_list ap; \ + \ + /* Call the application's callback function, if specified. */ \ + va_start(ap, fmt); \ + if ((env) != NULL && (env)->db_msgcall != NULL) \ + __db_msgcall(env, fmt, ap); \ + va_end(ap); \ + \ + /* \ + * If the application specified a file descriptor, or we wrote \ + * to neither the application's callback routine or to its file \ + * descriptor, write to stdout. \ + */ \ + va_start(ap, fmt); \ + if ((env) == NULL || \ + (env)->db_msgfile != NULL || (env)->db_msgcall == NULL) { \ + __db_msgfile(env, fmt, ap); \ + } \ + va_end(ap); \ +} +#else +#define DB_REAL_MSG(env, fmt) { \ + va_list ap; \ + \ + /* Call the application's callback function, if specified. */ \ + va_start(ap); \ + if ((env) != NULL && (env)->db_msgcall != NULL) \ + __db_msgcall(env, fmt, ap); \ + va_end(ap); \ + \ + /* \ + * If the application specified a file descriptor, or we wrote \ + * to neither the application's callback routine or to its file \ + * descriptor, write to stdout. \ + */ \ + va_start(ap); \ + if ((env) == NULL || \ + (env)->db_msgfile != NULL || (env)->db_msgcall == NULL) { \ + __db_msgfile(env, fmt, ap); \ + } \ + va_end(ap); \ +} +#endif /* * Debugging macro to log operations. @@ -159,14 +204,12 @@ extern "C" { * Hook for testing subdb locks. */ #if CONFIG_TEST -#define DB_TEST_SUBLOCKS(env, flags) \ -do { \ +#define DB_TEST_SUBLOCKS(env, flags) do { \ if ((env)->test_abort == DB_TEST_SUBDB_LOCKS) \ (flags) |= DB_LOCK_NOWAIT; \ } while (0) -#define DB_ENV_TEST_RECOVERY(env, val, ret, name) \ -do { \ +#define DB_ENV_TEST_RECOVERY(env, val, ret, name) do { \ int __ret; \ PANIC_CHECK((env)); \ if ((env)->test_copy == (val)) { \ @@ -182,8 +225,7 @@ do { \ } \ } while (0) -#define DB_TEST_RECOVERY(dbp, val, ret, name) \ -do { \ +#define DB_TEST_RECOVERY(dbp, val, ret, name) do { \ int __ret; \ PANIC_CHECK((dbp)->dbenv); \ if ((dbp)->dbenv->test_copy == (val)) { \ @@ -204,11 +246,16 @@ do { \ } while (0) #define DB_TEST_RECOVERY_LABEL db_tr_err: + +#define DB_TEST_CHECKPOINT(env, val) \ + if ((val) != 0) \ + __os_sleep((env), (u_long)(val), 0) #else #define DB_TEST_SUBLOCKS(env, flags) #define DB_ENV_TEST_RECOVERY(env, val, ret, name) #define DB_TEST_RECOVERY(dbp, val, ret, name) #define DB_TEST_RECOVERY_LABEL +#define DB_TEST_CHECKPOINT(env, val) #endif #if defined(__cplusplus) diff --git a/db/dbinc/fop.h b/db/dbinc/fop.h index 58616243b..ef87ff6e2 100644 --- a/db/dbinc/fop.h +++ b/db/dbinc/fop.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: fop.h,v 11.4 2003/01/08 04:31:47 bostic Exp $ + * $Id: fop.h,v 11.5 2004/01/28 03:36:02 bostic Exp $ */ #ifndef _FOP_H_ diff --git a/db/dbinc/globals.h b/db/dbinc/globals.h index b2dcc629a..95d96533a 100644 --- a/db/dbinc/globals.h +++ b/db/dbinc/globals.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: globals.h,v 11.3 2003/01/08 04:31:51 bostic Exp $ + * $Id: globals.h,v 11.9 2004/09/17 22:00:27 mjc Exp $ */ /******************************************************* @@ -24,21 +24,26 @@ typedef struct __db_globals { /* XA: list of opened environments. */ TAILQ_HEAD(__db_envq, __db_env) db_envq; + char *db_line; /* DB display string. */ + int (*j_close) __P((int)); /* Underlying OS interface jump table.*/ void (*j_dirfree) __P((char **, int)); int (*j_dirlist) __P((const char *, char ***, int *)); int (*j_exists) __P((const char *, int *)); void (*j_free) __P((void *)); int (*j_fsync) __P((int)); + int (*j_ftruncate) __P((int, off_t)); int (*j_ioinfo) __P((const char *, int, u_int32_t *, u_int32_t *, u_int32_t *)); void *(*j_malloc) __P((size_t)); int (*j_map) __P((char *, size_t, int, int, void **)); int (*j_open) __P((const char *, int, ...)); + ssize_t (*j_pread) __P((int, void *, size_t, off_t)); + ssize_t (*j_pwrite) __P((int, const void *, size_t, off_t)); ssize_t (*j_read) __P((int, void *, size_t)); void *(*j_realloc) __P((void *, size_t)); int (*j_rename) __P((const char *, const char *)); - int (*j_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int)); + int (*j_seek) __P((int, off_t, int)); int (*j_sleep) __P((u_long, u_long)); int (*j_unlink) __P((const char *)); int (*j_unmap) __P((void *, size_t)); @@ -54,6 +59,12 @@ DB_GLOBALS __db_global_values = { #endif /* XA: list of opened environments. */ {NULL, &__db_global_values.db_envq.tqh_first}, + + "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=", + + NULL, + NULL, + NULL, NULL, NULL, NULL, diff --git a/db/dbinc/hash.h b/db/dbinc/hash.h index 5bfcae899..10059a5e0 100644 --- a/db/dbinc/hash.h +++ b/db/dbinc/hash.h @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -39,7 +39,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: hash.h,v 11.27 2003/01/08 04:31:58 bostic Exp $ + * $Id: hash.h,v 11.28 2004/01/28 03:36:02 bostic Exp $ */ #ifndef _DB_HASH_H_ diff --git a/db/dbinc/hmac.h b/db/dbinc/hmac.h index 43501f204..439537927 100644 --- a/db/dbinc/hmac.h +++ b/db/dbinc/hmac.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: hmac.h,v 1.4 2003/01/08 04:32:02 bostic Exp $ + * $Id: hmac.h,v 1.5 2004/01/28 03:36:02 bostic Exp $ */ #ifndef _DB_HMAC_H_ diff --git a/db/dbinc/lock.h b/db/dbinc/lock.h index e0d65f592..e59abbff8 100644 --- a/db/dbinc/lock.h +++ b/db/dbinc/lock.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: lock.h,v 11.47 2003/04/16 18:23:27 ubell Exp $ + * $Id: lock.h,v 11.53 2004/09/22 21:14:56 ubell Exp $ */ #ifndef _DB_LOCK_H_ @@ -46,8 +46,8 @@ typedef struct { #define LOCK_TIME_ISVALID(time) ((time)->tv_sec != 0) #define LOCK_SET_TIME_INVALID(time) ((time)->tv_sec = 0) -#define LOCK_TIME_ISMAX(time) ((time)->tv_sec == UINT32_T_MAX) -#define LOCK_SET_TIME_MAX(time) ((time)->tv_sec = UINT32_T_MAX) +#define LOCK_TIME_ISMAX(time) ((time)->tv_sec == UINT32_MAX) +#define LOCK_SET_TIME_MAX(time) ((time)->tv_sec = UINT32_MAX) #define LOCK_TIME_EQUAL(t1, t2) \ ((t1)->tv_sec == (t2)->tv_sec && (t1)->tv_usec == (t2)->tv_usec) #define LOCK_TIME_GREATER(t1, t2) \ @@ -96,7 +96,7 @@ typedef struct __db_lockregion { */ typedef struct __sh_dbt { u_int32_t size; /* Byte length. */ - ssize_t off; /* Region offset. */ + roff_t off; /* Region offset. */ } SH_DBT; #define SH_DBT_PTR(p) ((void *)(((u_int8_t *)(p)) + (p)->off)) @@ -125,8 +125,8 @@ typedef struct __db_locker { u_int32_t dd_id; /* Deadlock detector id. */ u_int32_t nlocks; /* Number of locks held. */ u_int32_t nwrites; /* Number of write locks held. */ - size_t master_locker; /* Locker of master transaction. */ - size_t parent_locker; /* Parent of this child. */ + roff_t master_locker; /* Locker of master transaction. */ + roff_t parent_locker; /* Parent of this child. */ SH_LIST_HEAD(_child) child_locker; /* List of descendant txns; only used in a "master" txn. */ @@ -160,9 +160,13 @@ typedef struct __db_locktab { DB_HASHTAB *locker_tab; /* Beginning of locker hash table. */ } DB_LOCKTAB; -/* Test for conflicts. */ +/* + * Test for conflicts. + * + * Cast HELD and WANTED to ints, they are usually db_lockmode_t enums. + */ #define CONFLICTS(T, R, HELD, WANTED) \ - (T)->conflicts[(HELD) * (R)->stat.st_nmodes + (WANTED)] + (T)->conflicts[((int)HELD) * (R)->stat.st_nmodes + ((int)WANTED)] #define OBJ_LINKS_VALID(L) ((L)->links.stqe_prev != -1) @@ -179,7 +183,7 @@ struct __db_lock { SH_LIST_ENTRY locker_links; /* List of locks held by a locker. */ u_int32_t refcount; /* Reference count the lock. */ db_lockmode_t mode; /* What sort of lock. */ - ssize_t obj; /* Relative offset of object struct. */ + roff_t obj; /* Relative offset of object struct. */ db_status_t status; /* Status of this lock. */ }; @@ -194,10 +198,12 @@ struct __db_lock { * we pass some of those around (i.e., DB_LOCK_REMOVE). */ #define DB_LOCK_DOALL 0x010000 -#define DB_LOCK_FREE 0x020000 -#define DB_LOCK_NOPROMOTE 0x040000 -#define DB_LOCK_UNLINK 0x080000 -#define DB_LOCK_NOWAITERS 0x100000 +#define DB_LOCK_DOWNGRADE 0x020000 +#define DB_LOCK_FREE 0x040000 +#define DB_LOCK_NOPROMOTE 0x080000 +#define DB_LOCK_UNLINK 0x100000 +#define DB_LOCK_NOREGION 0x200000 +#define DB_LOCK_NOWAITERS 0x400000 /* * Macros to get/release different types of mutexes. @@ -209,8 +215,8 @@ struct __db_lock { #define LOCKER_LOCK(lt, reg, locker, ndx) \ ndx = __lock_locker_hash(locker) % (reg)->locker_t_size; -#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &(lt)->reginfo) -#define UNLOCKREGION(dbenv, lt) R_UNLOCK((dbenv), &(lt)->reginfo) +#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &((DB_LOCKTAB *)lt)->reginfo) +#define UNLOCKREGION(dbenv, lt) R_UNLOCK((dbenv), &((DB_LOCKTAB *)lt)->reginfo) #include "dbinc_auto/lock_ext.h" #endif /* !_DB_LOCK_H_ */ diff --git a/db/dbinc/log.h b/db/dbinc/log.h index 06a2b0f71..01d3d0259 100644 --- a/db/dbinc/log.h +++ b/db/dbinc/log.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: log.h,v 11.68 2003/11/20 18:32:19 bostic Exp $ + * $Id: log.h,v 11.89 2004/09/24 00:43:18 bostic Exp $ */ #ifndef _LOG_H_ @@ -65,7 +65,9 @@ struct __log_persist; typedef struct __log_persist LOGP; #define LFNAME_V1 "log.%05d" /* Log file name template, rev 1. */ #define LG_MAX_DEFAULT (10 * MEGABYTE) /* 10 MB. */ +#define LG_MAX_INMEM (256 * 1024) /* 256 KB. */ #define LG_BSIZE_DEFAULT (32 * 1024) /* 32 KB. */ +#define LG_BSIZE_INMEM (1 * MEGABYTE) /* 1 MB. */ #define LG_BASE_REGION_SIZE (60 * 1024) /* 60 KB. */ /* @@ -158,8 +160,8 @@ struct __log { SH_TAILQ_HEAD(__fq1) fq; /* List of file names. */ int32_t fid_max; /* Max fid allocated. */ roff_t free_fid_stack; /* Stack of free file ids. */ - int free_fids; /* Height of free fid stack. */ - int free_fids_alloced; /* Number of free fid slots alloc'ed. */ + u_int free_fids; /* Height of free fid stack. */ + u_int free_fids_alloced; /* N free fid slots allocated. */ /* * The lsn LSN is the file offset that we're about to write and which @@ -178,7 +180,16 @@ struct __log { u_int32_t w_off; /* Current write offset in the file. */ u_int32_t len; /* Length of the last record. */ + DB_LSN active_lsn; /* Oldest active LSN in the buffer. */ + size_t a_off; /* Offset in the buffer of first active + file. */ + /* + * Due to alignment constraints on some architectures (e.g. HP-UX), + * DB_MUTEXes must be the first element of shalloced structures, + * and as a corollary there can be only one per structure. Thus, + * flush_mutex_off points to a mutex in a separately-allocated chunk. + * * The s_lsn LSN is the last LSN that we know is on disk, not just * written, but synced. This field is protected by the flush mutex * rather than by the region mutex. @@ -190,11 +201,12 @@ struct __log { DB_LOG_STAT stat; /* Log statistics. */ /* - * !!! - NOTE that the next 6 fields, waiting_lsn, verify_lsn, - * max_wait_lsn, wait_recs, rcvd_recs, and ready_lsn are NOT - * protected by the log region lock. They are protected by - * db_rep->db_mutexp. If you need access to both, you must - * acquire the db_mutexp before acquiring the log region lock. + * !!! - NOTE that the next 7 fields, waiting_lsn, verify_lsn, + * max_wait_lsn, maxperm_lsn, wait_recs, rcvd_recs, + * and ready_lsn are NOT protected + * by the log region lock. They are protected by db_rep->db_mutexp. + * If you need access to both, you must acquire db_rep->db_mutexp + * before acquiring the log region lock. * * The waiting_lsn is used by the replication system. It is the * first LSN that we are holding without putting in the log, because @@ -212,6 +224,7 @@ struct __log { DB_LSN waiting_lsn; /* First log record after a gap. */ DB_LSN verify_lsn; /* LSN we are waiting to verify. */ DB_LSN max_wait_lsn; /* Maximum LSN requested. */ + DB_LSN max_perm_lsn; /* Maximum PERMANENT LSN processed. */ u_int32_t wait_recs; /* Records to wait before requesting. */ u_int32_t rcvd_recs; /* Records received while waiting. */ /* @@ -232,18 +245,34 @@ struct __log { */ DB_LSN cached_ckp_lsn; + u_int32_t regionmax; /* Configured size of the region. */ + roff_t buffer_off; /* Log buffer offset in the region. */ u_int32_t buffer_size; /* Log buffer size. */ u_int32_t log_size; /* Log file's size. */ u_int32_t log_nsize; /* Next log file's size. */ - u_int32_t ncommit; /* Number of txns waiting to commit. */ + /* + * DB_LOG_AUTOREMOVE and DB_LOG_INMEMORY: not protected by a mutex, + * all we care about is if they're zero or non-zero. + */ + int db_log_autoremove; + int db_log_inmemory; + u_int32_t ncommit; /* Number of txns waiting to commit. */ DB_LSN t_lsn; /* LSN of first commit */ SH_TAILQ_HEAD(__commit) commits;/* list of txns waiting to commit. */ SH_TAILQ_HEAD(__free) free_commits;/* free list of commit structs. */ + /* + * In-memory logs maintain a list of the start positions of all log + * files currently active in the in-memory buffer. This is to make the + * lookup from LSN to log buffer offset efficient. + */ + SH_TAILQ_HEAD(__logfile) logfiles; + SH_TAILQ_HEAD(__free_logfile) free_logfiles; + #ifdef HAVE_MUTEX_SYSTEM_RESOURCES #define LG_MAINT_SIZE (sizeof(roff_t) * DB_MAX_HANDLES) @@ -253,8 +282,7 @@ struct __log { /* * __db_commit structure -- - * One of these is allocated for each transaction waiting - * to commit. + * One of these is allocated for each transaction waiting to commit. */ struct __db_commit { DB_MUTEX mutex; /* Mutex for txn to wait on. */ @@ -265,15 +293,75 @@ struct __db_commit { u_int32_t flags; }; +/* + * Check for the proper progression of Log Sequence Numbers. + * If we are rolling forward the LSN on the page must be greater + * than or equal to the previous LSN in log record. + * We ignore NOT LOGGED LSNs. The user did an unlogged update. + * We should eventually see a log record that matches and continue + * forward. + * If truncate is supported then a ZERO LSN implies a page that was + * allocated prior to the recovery start pont and then truncated + * later in the log. An allocation of a page after this + * page will extend the file, leaving a hole. We want to + * ignore this page until it is truncated again. + * + */ + +#ifdef HAVE_FTRUNCATE +#define CHECK_LSN(redo, cmp, lsn, prev) \ + if (DB_REDO(redo) && (cmp) < 0 && \ + !IS_NOT_LOGGED_LSN(*(lsn)) && !IS_ZERO_LSN(*(lsn))) { \ + ret = __db_check_lsn(dbenv, lsn, prev); \ + goto out; \ + } +#else #define CHECK_LSN(redo, cmp, lsn, prev) \ if (DB_REDO(redo) && (cmp) < 0 && !IS_NOT_LOGGED_LSN(*(lsn))) { \ - __db_err(dbenv, \ - "Log sequence error: page LSN %lu %lu; previous LSN %lu %lu", \ - (u_long)(lsn)->file, (u_long)(lsn)->offset, \ - (u_long)(prev)->file, (u_long)(prev)->offset); \ - ret = EINVAL; \ + ret = __db_check_lsn(dbenv, lsn, prev); \ goto out; \ } +#endif + +/* + * Helper for in-memory logs -- check whether an offset is in range + * in a ring buffer (inclusive of start, exclusive of end). + */ +struct __db_filestart { + u_int32_t file; + size_t b_off; + + SH_TAILQ_ENTRY links; /* Either on free or waiting list. */ +}; + +#define RINGBUF_LEN(lp, start, end) \ + ((start) < (end) ? \ + (end) - (start) : (lp)->buffer_size - ((start) - (end))) + +/* + * Internal macro to set pointer to the begin_lsn for generated + * logging routines. If begin_lsn is already set then do nothing. + */ +#undef DB_SET_BEGIN_LSNP +#define DB_SET_BEGIN_LSNP(txn, rlsnp) do { \ + DB_LSN *__lsnp; \ + TXN_DETAIL *__td; \ + __td = (TXN_DETAIL *)R_ADDR((txn)->mgrp->dbenv, \ + &(txn)->mgrp->reginfo, (txn)->off); \ + while (__td->parent != INVALID_ROFF) \ + __td = (TXN_DETAIL *)R_ADDR((txn)->mgrp->dbenv, \ + &(txn)->mgrp->reginfo, __td->parent); \ + __lsnp = &__td->begin_lsn; \ + if (IS_ZERO_LSN(*__lsnp)) \ + *(rlsnp) = __lsnp; \ +} while (0) + +/* + * These are used in __log_backup to determine which LSN in the + * checkpoint record to compare and return. + */ +#define CKPLSN_CMP 0 +#define LASTCKP_CMP 1 /* * Status codes indicating the validity of a log file examined by diff --git a/db/dbinc/mp.h b/db/dbinc/mp.h index 0952b1c29..871bd6df9 100644 --- a/db/dbinc/mp.h +++ b/db/dbinc/mp.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: mp.h,v 11.52 2003/07/08 20:14:19 ubell Exp $ + * $Id: mp.h,v 11.61 2004/09/17 22:00:27 mjc Exp $ */ #ifndef _DB_MP_H_ @@ -85,7 +85,7 @@ struct __db_mpreg { * more frequent than a random data page. */ #define NCACHE(mp, mf_offset, pgno) \ - (((pgno) ^ ((mf_offset) >> 3)) % ((MPOOL *)mp)->nreg) + (((pgno) ^ ((u_int32_t)(mf_offset) >> 3)) % ((MPOOL *)mp)->nreg) /* * NBUCKET -- @@ -126,6 +126,12 @@ struct __mpool { SH_TAILQ_HEAD(__mpfq) mpfq; /* List of MPOOLFILEs. */ + /* Configuration information: protected by the region lock. */ + size_t mp_mmapsize; /* Maximum file size for mmap. */ + int mp_maxopenfd; /* Maximum open file descriptors. */ + int mp_maxwrite; /* Maximum buffers to write. */ + int mp_maxwrite_sleep; /* Sleep after writing max buffers. */ + /* * The nreg, regids and maint_off fields are not thread protected, * as they are initialized during mpool creation, and not modified @@ -148,7 +154,7 @@ struct __mpool { * The last_checked and lru_count fields are thread protected by * the region lock. */ - int htab_buckets; /* Number of hash table entries. */ + u_int32_t htab_buckets; /* Number of hash table entries. */ roff_t htab; /* Hash table offset. */ u_int32_t last_checked; /* Last bucket checked for free. */ u_int32_t lru_count; /* Counter for buffer LRU */ @@ -176,6 +182,17 @@ struct __db_mpool_hash { u_int32_t hash_page_dirty;/* Count of dirty pages. */ u_int32_t hash_priority; /* Minimum priority of bucket buffer. */ + +#ifdef HPUX_MUTEX_PAD + /* + * !!! + * We allocate the mpool hash buckets as an array, which means that + * they are not individually aligned. This fails on one platform: + * HPUX 10.20, where mutexes require 16 byte alignment. This is a + * grievous hack for that single platform. + */ + u_int8_t pad[HPUX_MUTEX_PAD]; +#endif }; /* @@ -183,7 +200,7 @@ struct __db_mpool_hash { * When the LRU counter wraps, we shift everybody down to a base-relative * value. */ -#define MPOOL_BASE_DECREMENT (UINT32_T_MAX - (UINT32_T_MAX / 4)) +#define MPOOL_BASE_DECREMENT (UINT32_MAX - (UINT32_MAX / 4)) /* * Mpool priorities from low to high. Defined in terms of fractions of the @@ -243,14 +260,18 @@ struct __mpoolfile { * There are potential races with the file_written field (many threads * may be writing blocks at the same time), and with no_backing_file * and unlink_on_close fields, as they may be set while other threads - * are reading them. However, we only care if the value of these fields - * are zero or non-zero, so don't lock the memory. + * are reading them. However, we only care if the field value is zero + * or non-zero, so don't lock the memory. * * !!! * Theoretically, a 64-bit architecture could put two of these fields * in a single memory operation and we could race. I have never seen * an architecture where that's a problem, and I believe Java requires * that to never be the case. + * + * File_written is set whenever a buffer is marked dirty in the cache. + * It can be cleared in some cases, after all dirty buffers have been + * written AND the file has been flushed to disk. */ int32_t file_written; /* File was written. */ int32_t no_backing_file; /* Never open a backing file. */ @@ -280,16 +301,23 @@ struct __mpoolfile { */ #define MP_CAN_MMAP 0x001 /* If the file can be mmap'd. */ #define MP_DIRECT 0x002 /* No OS buffering. */ -#define MP_EXTENT 0x004 /* Extent file. */ -#define MP_FAKE_DEADFILE 0x008 /* Deadfile field: fake flag. */ -#define MP_FAKE_FILEWRITTEN 0x010 /* File_written field: fake flag. */ -#define MP_FAKE_NB 0x020 /* No_backing_file field: fake flag. */ -#define MP_FAKE_UOC 0x040 /* Unlink_on_close field: fake flag. */ -#define MP_NOT_DURABLE 0x080 /* File is not durable. */ -#define MP_TEMP 0x100 /* Backing file is a temporary. */ +#define MP_DURABLE_UNKNOWN 0x004 /* We don't care about durability. */ +#define MP_EXTENT 0x008 /* Extent file. */ +#define MP_FAKE_DEADFILE 0x010 /* Deadfile field: fake flag. */ +#define MP_FAKE_FILEWRITTEN 0x020 /* File_written field: fake flag. */ +#define MP_FAKE_NB 0x040 /* No_backing_file field: fake flag. */ +#define MP_FAKE_UOC 0x080 /* Unlink_on_close field: fake flag. */ +#define MP_NOT_DURABLE 0x100 /* File is not durable. */ +#define MP_TEMP 0x200 /* Backing file is a temporary. */ u_int32_t flags; }; +/* + * Flags to __memp_bh_free. + */ +#define BH_FREE_FREEMEM 0x01 +#define BH_FREE_UNLOCKED 0x02 + /* * BH -- * Buffer header. diff --git a/db/dbinc/mutex.h b/db/dbinc/mutex.h index 042a4e54a..056d34bab 100644 --- a/db/dbinc/mutex.h +++ b/db/dbinc/mutex.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: mutex.h,v 11.90 2003/09/20 21:40:49 bostic Exp $ + * $Id: mutex.h,v 11.100 2004/10/05 14:41:12 mjc Exp $ */ #ifndef _DB_MUTEX_H_ @@ -128,6 +128,7 @@ extern void _spin_unlock(tsl_t *); #ifndef MUTEX_ALIGN #define MUTEX_ALIGN 16 +#define HPUX_MUTEX_PAD 8 #endif #endif @@ -290,8 +291,7 @@ typedef SEM_ID tsl_t; * trying to initialize the global lock at the same time. */ #undef DB_BEGIN_SINGLE_THREAD -#define DB_BEGIN_SINGLE_THREAD \ -do { \ +#define DB_BEGIN_SINGLE_THREAD do { \ if (DB_GLOBAL(db_global_init)) \ (void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER); \ else { \ @@ -360,8 +360,10 @@ typedef unsigned int tsl_t; * platforms, and it improves performance on Pentium 4 processor platforms." */ #ifdef HAVE_MUTEX_WIN32 +#ifndef _WIN64 #define MUTEX_PAUSE {__asm{_emit 0xf3}; __asm{_emit 0x90}} #endif +#endif #ifdef HAVE_MUTEX_WIN32_GCC #define MUTEX_PAUSE asm volatile ("rep; nop" : : ); #endif @@ -375,9 +377,8 @@ typedef unsigned int tsl_t; typedef unsigned char tsl_t; #ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * For gcc/68K, 0 is clear, 1 is set. - */ +#define MUTEX_SET_TEST 1 /* gcc/68K: 0 is clear, 1 is set. */ + #define MUTEX_SET(tsl) ({ \ register tsl_t *__l = (tsl); \ int __r; \ @@ -467,9 +468,8 @@ typedef volatile u_int32_t tsl_t; typedef unsigned char tsl_t; #ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * For arm/gcc, 0 is clear, 1 is set. - */ +#define MUTEX_SET_TEST 1 /* gcc/arm: 0 is clear, 1 is set. */ + #define MUTEX_SET(tsl) ({ \ int __r; \ asm volatile( \ @@ -494,6 +494,7 @@ typedef u_int32_t tsl_t; #ifndef MUTEX_ALIGN #define MUTEX_ALIGN 16 +#define HPUX_MUTEX_PAD 8 #endif #ifdef LOAD_ACTUAL_MUTEX_CODE @@ -521,9 +522,8 @@ typedef u_int32_t tsl_t; typedef unsigned char tsl_t; #ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * For gcc/ia64, 0 is clear, 1 is set. - */ +#define MUTEX_SET_TEST 1 /* gcc/ia64: 0 is clear, 1 is set. */ + #define MUTEX_SET(tsl) ({ \ register tsl_t *__l = (tsl); \ long __r; \ @@ -578,28 +578,30 @@ typedef u_int32_t tsl_t; * 'set' mutexes have the value 1, like on Intel; the returned value from * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise. */ -#ifdef HAVE_MUTEX_PPC_GCC_ASSEMBLY +#define MUTEX_SET_TEST 1 /* gcc/ppc: 0 is clear, 1 is set. */ + static inline int MUTEX_SET(int *tsl) { int __r; - asm volatile ( + int __tmp = (int)tsl; + asm volatile ( "0: \n\t" -" lwarx %0,0,%1 \n\t" +" lwarx %0,0,%2 \n\t" " cmpwi %0,0 \n\t" " bne- 1f \n\t" -" stwcx. %1,0,%1 \n\t" +" stwcx. %2,0,%2 \n\t" " isync \n\t" " beq+ 2f \n\t" " b 0b \n\t" "1: \n\t" " li %1, 0 \n\t" "2: \n\t" - : "=&r" (__r), "+r" (tsl) - : + : "=&r" (__r), "=r" (tsl) + : "r" (__tmp) : "cr0", "memory"); return (int)tsl; } -#endif + static inline int MUTEX_UNSET(tsl_t *tsl) { asm volatile("sync" : : : "memory"); @@ -616,7 +618,7 @@ MUTEX_UNSET(tsl_t *tsl) { typedef int tsl_t; #ifndef MUTEX_ALIGN -#define MUTEX_ALIGN sizeof(int) +#define MUTEX_ALIGN sizeof(int) #endif #ifdef LOAD_ACTUAL_MUTEX_CODE @@ -637,9 +639,8 @@ typedef int tsl_t; typedef int tsl_t; #ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * For gcc/S390, 0 is clear, 1 is set. - */ +#define MUTEX_SET_TEST 1 /* gcc/S390: 0 is clear, 1 is set. */ + static inline int MUTEX_SET(tsl_t *tsl) { \ register tsl_t *__l = (tsl); \ @@ -669,9 +670,8 @@ typedef unsigned char tsl_t; #ifdef LOAD_ACTUAL_MUTEX_CODE /* * UnixWare has threads in libthread, but OpenServer doesn't (yet). - * - * For cc/x86, 0 is clear, 1 is set. */ +#define MUTEX_SET_TEST 1 /* cc/x86: 0 is clear, 1 is set. */ #if defined(__USLC__) asm int @@ -711,9 +711,9 @@ typedef unsigned char tsl_t; * so is functional there as well. For v7, stbar may generate an illegal * instruction and we have no way to tell what we're running on. Some * operating systems notice and skip this instruction in the fault handler. - * - * For gcc/sparc, 0 is clear, 1 is set. */ +#define MUTEX_SET_TEST 1 /* gcc/sparc: 0 is clear, 1 is set. */ + #define MUTEX_SET(tsl) ({ \ register tsl_t *__l = (tsl); \ register tsl_t __r; \ @@ -752,9 +752,8 @@ typedef int tsl_t; typedef unsigned char tsl_t; #ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * For gcc/x86, 0 is clear, 1 is set. - */ +#define MUTEX_SET_TEST 1 /* gcc/x86: 0 is clear, 1 is set. */ + #define MUTEX_SET(tsl) ({ \ register tsl_t *__l = (tsl); \ int __r; \ @@ -805,7 +804,7 @@ typedef unsigned char tsl_t; /* * !!! - * The flag arguments for __db_mutex_setup (and the underyling intialization + * The flag arguments for __db_mutex_setup (and the underlying initialization * function for the mutex type, for example, __db_tas_mutex_init), and flags * stored in the DB_MUTEX structure are combined, and may not overlap. Flags * to __db_mutex_setup: @@ -866,6 +865,11 @@ struct __mutex_t { u_int32_t flags; /* MUTEX_XXX */ }; +/* Macro to clear mutex statistics. */ +#define MUTEX_CLEAR(mp) { \ + (mp)->mutex_set_wait = (mp)->mutex_set_nowait = 0; \ +} + /* Redirect calls to the correct functions. */ #ifdef HAVE_MUTEX_THREADS #if defined(HAVE_MUTEX_PTHREADS) || \ @@ -875,7 +879,8 @@ struct __mutex_t { #define __db_mutex_lock(a, b) __db_pthread_mutex_lock(a, b) #define __db_mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b) #define __db_mutex_destroy(a) __db_pthread_mutex_destroy(a) -#elif defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC) +#else +#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC) #define __db_mutex_init_int(a, b, c, d) __db_win32_mutex_init(a, b, d) #define __db_mutex_lock(a, b) __db_win32_mutex_lock(a, b) #define __db_mutex_unlock(a, b) __db_win32_mutex_unlock(a, b) @@ -886,6 +891,7 @@ struct __mutex_t { #define __db_mutex_unlock(a, b) __db_tas_mutex_unlock(a, b) #define __db_mutex_destroy(a) __db_tas_mutex_destroy(a) #endif +#endif #else #define __db_mutex_init_int(a, b, c, d) __db_fcntl_mutex_init(a, b, c) #define __db_mutex_lock(a, b) __db_fcntl_mutex_lock(a, b) diff --git a/db/dbinc/os.h b/db/dbinc/os.h index 548170646..24685a4a7 100644 --- a/db/dbinc/os.h +++ b/db/dbinc/os.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: os.h,v 11.18 2003/03/11 14:59:29 bostic Exp $ + * $Id: os.h,v 11.25 2004/09/22 03:40:20 bostic Exp $ */ #ifndef _DB_OS_H_ @@ -14,18 +14,30 @@ extern "C" { #endif +/* Number of times to retry system calls that return EINTR or EBUSY. */ +#define DB_RETRY 100 + +#define RETRY_CHK(op, ret) do { \ + int __retries = DB_RETRY; \ + do { \ + (ret) = (op); \ + } while ((ret) != 0 && (((ret) = __os_get_errno()) == EAGAIN || \ + (ret) == EBUSY || (ret) == EINTR) && --__retries > 0); \ +} while (0) + /* * Flags understood by __os_open. */ #define DB_OSO_CREATE 0x0001 /* POSIX: O_CREAT */ #define DB_OSO_DIRECT 0x0002 /* Don't buffer the file in the OS. */ -#define DB_OSO_EXCL 0x0004 /* POSIX: O_EXCL */ -#define DB_OSO_LOG 0x0008 /* Opening a log file. */ -#define DB_OSO_RDONLY 0x0010 /* POSIX: O_RDONLY */ -#define DB_OSO_REGION 0x0020 /* Opening a region file. */ -#define DB_OSO_SEQ 0x0040 /* Expected sequential access. */ -#define DB_OSO_TEMP 0x0080 /* Remove after last close. */ -#define DB_OSO_TRUNC 0x0100 /* POSIX: O_TRUNC */ +#define DB_OSO_DSYNC 0x0004 /* POSIX: O_DSYNC. */ +#define DB_OSO_EXCL 0x0008 /* POSIX: O_EXCL */ +#define DB_OSO_LOG 0x0010 /* Opening a log file. */ +#define DB_OSO_RDONLY 0x0020 /* POSIX: O_RDONLY */ +#define DB_OSO_REGION 0x0040 /* Opening a region file. */ +#define DB_OSO_SEQ 0x0080 /* Expected sequential access. */ +#define DB_OSO_TEMP 0x0100 /* Remove after last close. */ +#define DB_OSO_TRUNC 0x0200 /* POSIX: O_TRUNC */ /* * Seek options understood by __os_seek. @@ -65,7 +77,7 @@ struct __fh_t { * Last seek statistics, used for zero-filling on filesystems * that don't support it directly. */ - u_int32_t pgno; + db_pgno_t pgno; u_int32_t pgsize; u_int32_t offset; diff --git a/db/dbinc/qam.h b/db/dbinc/qam.h index 98a64cc24..43910d01d 100644 --- a/db/dbinc/qam.h +++ b/db/dbinc/qam.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. * - * $Id: qam.h,v 11.44 2003/10/01 20:03:41 ubell Exp $ + * $Id: qam.h,v 11.49 2004/09/17 22:00:27 mjc Exp $ */ #ifndef _DB_QAM_H_ @@ -78,7 +78,7 @@ typedef struct __qam_filelist { } QUEUE_FILELIST; /* - * Caculate the page number of a recno + * Calculate the page number of a recno. * * Number of records per page = * Divide the available space on the page by the record len + header. @@ -94,7 +94,7 @@ typedef struct __qam_filelist { */ #define CALC_QAM_RECNO_PER_PAGE(dbp) \ (((dbp)->pgsize - QPAGE_SZ(dbp)) / \ - ALIGN((db_align_t)SSZA(QAMDATA, data) + \ + (u_int32_t)DB_ALIGN((uintmax_t)SSZA(QAMDATA, data) + \ ((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t))) #define QAM_RECNO_PER_PAGE(dbp) (((QUEUE*)(dbp)->q_internal)->rec_page) @@ -115,7 +115,7 @@ typedef struct __qam_filelist { #define QAM_GET_RECORD(dbp, page, index) \ ((QAMDATA *)((u_int8_t *)(page) + (QPAGE_SZ(dbp) + \ - (ALIGN((db_align_t)SSZA(QAMDATA, data) + \ + (DB_ALIGN((uintmax_t)SSZA(QAMDATA, data) + \ ((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t)) * index)))) #define QAM_AFTER_CURRENT(meta, recno) \ diff --git a/db/dbinc/region.h b/db/dbinc/region.h index f69f40ba2..805acb1ea 100644 --- a/db/dbinc/region.h +++ b/db/dbinc/region.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. * - * $Id: region.h,v 11.38 2003/07/14 13:44:59 bostic Exp $ + * $Id: region.h,v 11.50 2004/09/15 21:49:12 mjc Exp $ */ #ifndef _DB_REGION_H_ @@ -53,7 +53,7 @@ * or joining the REGENV file, i.e., __db.001. We have to be absolutely sure * that only one process creates it, and that everyone else joins it without * seeing inconsistent data. Once that region is created, we can use normal - * shared locking procedures to do mutal exclusion for all other regions. + * shared locking procedures to do mutual exclusion for all other regions. * * One of the REGION structures in the main environment region describes the * environment region itself. @@ -114,7 +114,7 @@ typedef enum { REGION_TYPE_LOG, REGION_TYPE_MPOOL, REGION_TYPE_MUTEX, - REGION_TYPE_TXN } reg_type; + REGION_TYPE_TXN } reg_type_t; #define INVALID_REGION_SEGID -1 /* Segment IDs are either shmget(2) or * Win16 segment identifiers. They are @@ -150,6 +150,7 @@ typedef struct __db_reg_env { * zero/non-zero value. */ u_int32_t magic; /* Valid region magic number. */ + u_int32_t envid; /* Unique environment ID. */ int envpanic; /* Environment is dead. */ @@ -166,6 +167,11 @@ typedef struct __db_reg_env { u_int32_t refcnt; /* References to the environment. */ roff_t rep_off; /* Offset of the replication area. */ +#define DB_REGENV_REPLOCKED 0x0001 /* Env locked for rep backup. */ + u_int32_t flags; /* Shared environment flags. */ +#define DB_REGENV_TIMEOUT 30 /* Backup timeout. */ + time_t op_timestamp; /* Timestamp for operations. */ + time_t rep_timestamp; /* Timestamp for rep db handles. */ size_t pad; /* Guarantee that following memory is * size_t aligned. This is necessary @@ -185,10 +191,11 @@ typedef struct __db_region { SH_LIST_ENTRY q; /* Linked list of REGIONs. */ - reg_type type; /* Region type. */ + reg_type_t type; /* Region type. */ u_int32_t id; /* Region id. */ - roff_t size; /* Region size in bytes. */ + roff_t size_orig; /* Region size in bytes (original). */ + roff_t size; /* Region size in bytes (adjusted). */ roff_t primary; /* Primary data structure offset. */ @@ -199,18 +206,22 @@ typedef struct __db_region { * Per-process/per-attachment information about a single region. */ struct __db_reginfo_t { /* __db_r_attach IN parameters. */ - reg_type type; /* Region type. */ + DB_ENV *dbenv; /* Enclosing environment. */ + reg_type_t type; /* Region type. */ u_int32_t id; /* Region id. */ - int mode; /* File creation mode. */ /* __db_r_attach OUT parameters. */ REGION *rp; /* Shared region. */ char *name; /* Region file name. */ - void *addr; /* Region allocation address. */ + void *addr_orig; /* Region address (original). */ + void *addr; /* Region address (adjusted). */ void *primary; /* Primary data structure address. */ + size_t max_alloc; /* Maximum bytes allocated. */ + size_t allocated; /* Bytes allocated. */ + #ifdef DB_WIN32 HANDLE wnt_handle; /* Win/NT HANDLE. */ #endif @@ -244,15 +255,13 @@ typedef struct __db_regmaint_t { /* * R_ADDR Return a per-process address for a shared region offset. * R_OFFSET Return a shared region offset for a per-process address. - * - * !!! - * R_OFFSET should really be returning a ptrdiff_t, but that's not yet - * portable. We use u_int32_t, which restricts regions to 4Gb in size. */ -#define R_ADDR(base, offset) \ - ((void *)((u_int8_t *)((base)->addr) + offset)) -#define R_OFFSET(base, p) \ - ((u_int32_t)((u_int8_t *)(p) - (u_int8_t *)(base)->addr)) +#define R_ADDR(dbenv, base, offset) \ + (F_ISSET((dbenv), DB_ENV_PRIVATE) ? (void *)(offset) : \ + (void *)((u_int8_t *)((base)->addr) + (offset))) +#define R_OFFSET(dbenv, base, p) \ + (F_ISSET((dbenv), DB_ENV_PRIVATE) ? (roff_t)(p) : \ + (roff_t)((u_int8_t *)(p) - (u_int8_t *)(base)->addr)) /* * R_LOCK Lock/unlock a region. @@ -283,7 +292,7 @@ typedef struct __db_regmaint_t { #define OS_VMPAGESIZE (8 * 1024) #define OS_VMROUNDOFF(i) { \ if ((i) < \ - (UINT32_T_MAX - OS_VMPAGESIZE) + 1 || (i) < OS_VMPAGESIZE) \ + (UINT32_MAX - OS_VMPAGESIZE) + 1 || (i) < OS_VMPAGESIZE) \ (i) += OS_VMPAGESIZE - 1; \ (i) -= (i) % OS_VMPAGESIZE; \ } diff --git a/db/dbinc/rep.h b/db/dbinc/rep.h index 1a016ca74..ec1f290f4 100644 --- a/db/dbinc/rep.h +++ b/db/dbinc/rep.h @@ -1,41 +1,101 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. */ #ifndef _REP_H_ #define _REP_H_ +#include "dbinc_auto/rep_auto.h" + #define REP_ALIVE 1 /* I am alive message. */ #define REP_ALIVE_REQ 2 /* Request for alive messages. */ #define REP_ALL_REQ 3 /* Request all log records greater than LSN. */ #define REP_DUPMASTER 4 /* Duplicate master detected; propagate. */ -#define REP_FILE 5 /* Page of a database file. */ -#define REP_FILE_REQ 6 /* Request for a database file. */ -#define REP_LOG 7 /* Log record. */ -#define REP_LOG_MORE 8 /* There are more log records to request. */ -#define REP_LOG_REQ 9 /* Request for a log record. */ -#define REP_MASTER_REQ 10 /* Who is the master */ -#define REP_NEWCLIENT 11 /* Announces the presence of a new client. */ -#define REP_NEWFILE 12 /* Announce a log file change. */ -#define REP_NEWMASTER 13 /* Announces who the master is. */ -#define REP_NEWSITE 14 /* Announces that a site has heard from a new +#define REP_FILE 5 /* Page of a database file. NOTUSED */ +#define REP_FILE_FAIL 6 /* File requested does not exist. */ +#define REP_FILE_REQ 7 /* Request for a database file. NOTUSED */ +#define REP_LOG 8 /* Log record. */ +#define REP_LOG_MORE 9 /* There are more log records to request. */ +#define REP_LOG_REQ 10 /* Request for a log record. */ +#define REP_MASTER_REQ 11 /* Who is the master */ +#define REP_NEWCLIENT 12 /* Announces the presence of a new client. */ +#define REP_NEWFILE 13 /* Announce a log file change. */ +#define REP_NEWMASTER 14 /* Announces who the master is. */ +#define REP_NEWSITE 15 /* Announces that a site has heard from a new * site; like NEWCLIENT, but indirect. A * NEWCLIENT message comes directly from the new * client while a NEWSITE comes indirectly from * someone who heard about a NEWSITE. */ -#define REP_PAGE 15 /* Database page. */ -#define REP_PAGE_REQ 16 /* Request for a database page. */ -#define REP_PLIST 17 /* Database page list. */ -#define REP_PLIST_REQ 18 /* Request for a page list. */ -#define REP_VERIFY 19 /* A log record for verification. */ -#define REP_VERIFY_FAIL 20 /* The client is outdated. */ -#define REP_VERIFY_REQ 21 /* Request for a log record to verify. */ -#define REP_VOTE1 22 /* Send out your information for an election. */ -#define REP_VOTE2 23 /* Send a "you are master" vote. */ +#define REP_PAGE 16 /* Database page. */ +#define REP_PAGE_FAIL 17 /* Requested page does not exist. */ +#define REP_PAGE_MORE 18 /* There are more pages to request. */ +#define REP_PAGE_REQ 19 /* Request for a database page. */ +#define REP_UPDATE 20 /* Environment hotcopy information. */ +#define REP_UPDATE_REQ 21 /* Request for hotcopy information. */ +#define REP_VERIFY 22 /* A log record for verification. */ +#define REP_VERIFY_FAIL 23 /* The client is outdated. */ +#define REP_VERIFY_REQ 24 /* Request for a log record to verify. */ +#define REP_VOTE1 25 /* Send out your information for an election. */ +#define REP_VOTE2 26 /* Send a "you are master" vote. */ + +/* + * REP_PRINT_MESSAGE + * A function to print a debugging message. + * + * RPRINT + * A macro for debug printing. Takes as an arg the arg set for __db_msg. + * + * !!! This function assumes a local DB_MSGBUF variable called 'mb'. + */ +#ifdef DIAGNOSTIC +#define REP_PRINT_MESSAGE(dbenv, eid, rp, str) \ + __rep_print_message(dbenv, eid, rp, str) +#define RPRINT(e, r, x) do { \ + if (FLD_ISSET((e)->verbose, DB_VERB_REPLICATION)) { \ + DB_MSGBUF_INIT(&mb); \ + if ((e)->db_errpfx == NULL) { \ + if (F_ISSET((r), REP_F_CLIENT)) \ + __db_msgadd((e), &mb, "CLIENT: "); \ + else if (F_ISSET((r), REP_F_MASTER)) \ + __db_msgadd((e), &mb, "MASTER: "); \ + else \ + __db_msgadd((e), &mb, "REP_UNDEF: "); \ + } else \ + __db_msgadd((e), &mb, "%s: ",(e)->db_errpfx); \ + __db_msgadd x; \ + DB_MSGBUF_FLUSH((e), &mb); \ + } \ +} while (0) +#else +#define REP_PRINT_MESSAGE(dbenv, eid, rp, str) +#define RPRINT(e, r, x) +#endif + +/* + * Election gen file name + * The file contains an egen number for an election this client + * has NOT participated in. I.e. it is the number of a future + * election. We create it when we create the rep region, if it + * doesn't already exist and initialize egen to 1. If it does + * exist, we read it when we create the rep region. We write it + * immediately before sending our VOTE1 in an election. That way, + * if a client has ever sent a vote for any election, the file is + * already going to be updated to reflect a future election, + * should it crash. + */ +#define REP_EGENNAME "__db.rep.egen" + +/* + * Database types for __rep_client_dbinit + */ +typedef enum { + REP_DB, /* Log record database. */ + REP_PG /* Pg database. */ +} repdb_t; /* Shared replication structure. */ @@ -48,8 +108,8 @@ typedef struct __rep { */ DB_MUTEX mutex; /* Region lock. */ roff_t db_mutex_off; /* Client database mutex. */ - u_int32_t tally_off; /* Offset of the tally region. */ - u_int32_t v2tally_off; /* Offset of the vote2 tally region. */ + roff_t tally_off; /* Offset of the tally region. */ + roff_t v2tally_off; /* Offset of the vote2 tally region. */ int eid; /* Environment id. */ int master_id; /* ID of the master site. */ u_int32_t egen; /* Replication election generation. */ @@ -57,6 +117,7 @@ typedef struct __rep { u_int32_t recover_gen; /* Last generation number in log. */ int asites; /* Space allocated for sites. */ int nsites; /* Number of sites in group. */ + int nvotes; /* Number of votes needed. */ int priority; /* My priority in an election. */ u_int32_t gbytes; /* Limit on data sent in single... */ u_int32_t bytes; /* __rep_process_message call. */ @@ -67,12 +128,29 @@ typedef struct __rep { u_int32_t max_gap; /* Maximum number of records before * requesting a missing log record. */ /* Status change information */ + int elect_th; /* A thread is in rep_elect. */ u_int32_t msg_th; /* Number of callers in rep_proc_msg. */ int start_th; /* A thread is in rep_start. */ u_int32_t handle_cnt; /* Count of handles in library. */ u_int32_t op_cnt; /* Multi-step operation count.*/ int in_recovery; /* Running recovery now. */ - time_t timestamp; /* Recovery timestamp. */ + + /* Backup information. */ + int nfiles; /* Number of files we have info on. */ + int curfile; /* Current file we're getting. */ + __rep_fileinfo_args *curinfo; /* Current file info ptr. */ + void *finfo; /* Current file info buffer. */ + void *nextinfo; /* Next file info buffer. */ + void *originfo; /* Original file info buffer. */ + DB_LSN first_lsn; /* Earliest LSN we need. */ + DB_LSN last_lsn; /* Latest LSN we need. */ + db_pgno_t ready_pg; /* Next pg expected. */ + db_pgno_t waiting_pg; /* First pg after gap. */ + db_pgno_t max_wait_pg; /* Maximum pg requested. */ + u_int32_t npages; /* Num of pages rcvd for this file. */ + DB_MPOOLFILE *file_mpf; /* Mpoolfile for in-mem database. */ + DB *file_dbp; /* This file's page info. */ + DB *queue_dbp; /* Dbp for a queue file. */ /* Vote tallying information. */ int sites; /* Sites heard from. */ @@ -80,26 +158,36 @@ typedef struct __rep { int w_priority; /* Winner priority. */ u_int32_t w_gen; /* Winner generation. */ DB_LSN w_lsn; /* Winner LSN. */ - int w_tiebreaker; /* Winner tiebreaking value. */ + u_int32_t w_tiebreaker; /* Winner tiebreaking value. */ int votes; /* Number of votes for this site. */ /* Statistics. */ DB_REP_STAT stat; -#define REP_F_EPHASE1 0x001 /* In phase 1 of election. */ -#define REP_F_EPHASE2 0x002 /* In phase 2 of election. */ -#define REP_F_LOGSONLY 0x004 /* Log only; can't upgrade. */ -#define REP_F_MASTER 0x008 /* Master replica. */ -#define REP_F_MASTERELECT 0x010 /* Master elect */ -#define REP_F_NOARCHIVE 0x020 /* Rep blocks log_archive */ -#define REP_F_READY 0x040 /* Wait for txn_cnt to be 0. */ -#define REP_F_RECOVER 0x080 /* In recovery. */ -#define REP_F_TALLY 0x100 /* Tallied vote before elect. */ -#define REP_F_UPGRADE 0x200 /* Upgradeable replica. */ -#define REP_ISCLIENT (REP_F_UPGRADE | REP_F_LOGSONLY) +#define REP_F_CLIENT 0x00001 /* Client replica. */ +#define REP_F_EPHASE1 0x00002 /* In phase 1 of election. */ +#define REP_F_EPHASE2 0x00004 /* In phase 2 of election. */ +#define REP_F_MASTER 0x00008 /* Master replica. */ +#define REP_F_MASTERELECT 0x00010 /* Master elect */ +#define REP_F_NOARCHIVE 0x00020 /* Rep blocks log_archive */ +#define REP_F_READY 0x00040 /* Wait for txn_cnt to be 0. */ +#define REP_F_RECOVER_LOG 0x00080 /* In recovery - log. */ +#define REP_F_RECOVER_PAGE 0x00100 /* In recovery - pages. */ +#define REP_F_RECOVER_UPDATE 0x00200 /* In recovery - files. */ +#define REP_F_RECOVER_VERIFY 0x00400 /* In recovery - verify. */ +#define REP_F_TALLY 0x00800 /* Tallied vote before elect. */ u_int32_t flags; } REP; +/* + * Recovery flag mask to easily check any/all recovery bits. That is + * REP_F_READY and all REP_F_RECOVER*. This must change if the values + * of the flags change. + */ +#define REP_F_RECOVER_MASK \ + (REP_F_READY | REP_F_RECOVER_LOG | REP_F_RECOVER_PAGE | \ + REP_F_RECOVER_UPDATE | REP_F_RECOVER_VERIFY) + #define IN_ELECTION(R) F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2) #define IN_ELECTION_TALLY(R) \ F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2 | REP_F_TALLY) @@ -111,12 +199,12 @@ typedef struct __rep { #define IS_REP_CLIENT(dbenv) \ (REP_ON(dbenv) && ((DB_REP *)(dbenv)->rep_handle)->region && \ F_ISSET(((REP *)((DB_REP *)(dbenv)->rep_handle)->region), \ - REP_ISCLIENT)) + REP_F_CLIENT)) -#define IS_REP_LOGSONLY(dbenv) \ - (REP_ON(dbenv) && ((DB_REP *)(dbenv)->rep_handle)->region && \ - F_ISSET(((REP *)((DB_REP *)(dbenv)->rep_handle)->region), \ - REP_F_LOGSONLY)) +#define IS_CLIENT_PGRECOVER(dbenv) \ + (IS_REP_CLIENT(dbenv) && \ + F_ISSET(((REP *)((DB_REP *)(dbenv)->rep_handle)->region), \ + REP_F_RECOVER_PAGE)) /* * Macros to figure out if we need to do replication pre/post-amble @@ -127,7 +215,7 @@ typedef struct __rep { REP_ON(E) && ((DB_REP *)((E)->rep_handle))->region != NULL && \ ((DB_REP *)((E)->rep_handle))->region->flags != 0) -#define IS_ENV_REPLICATED(E) (!IS_RECOVERING(E) && REP_ON(E) && \ +#define IS_ENV_REPLICATED(E) (REP_ON(E) && \ ((DB_REP *)((E)->rep_handle))->region != NULL && \ ((DB_REP *)((E)->rep_handle))->region->flags != 0) @@ -136,9 +224,9 @@ typedef struct __rep { * * There are 2 mutexes used in replication. * 1. rep_mutexp - This protects the fields of the rep region above. - * 2. db_mutexp - This protects the bookkeeping database and all - * of the components that maintain it. Those components include - * the following fields in the log region (see log.h): + * 2. db_mutexp - This protects the per-process flags, and bookkeeping + * database and all of the components that maintain it. Those + * components include the following fields in the log region (see log.h): * a. ready_lsn * b. waiting_lsn * c. verify_lsn @@ -148,6 +236,11 @@ typedef struct __rep { * These fields in the log region are NOT protected by the log * region lock at all. * + * Note that the per-process flags should truly be protected by a + * special per-process thread mutex, but it is currently set in so + * isolated a manner that it didn't make sense to do so and in most + * case we're already holding the db_mutexp anyway. + * * The lock ordering protocol is that db_mutexp must be acquired * first and then either rep_mutexp, or the log region mutex may * be acquired if necessary. @@ -159,6 +252,8 @@ struct __db_rep { DB *rep_db; /* Bookkeeping database. */ REP *region; /* In memory structure. */ +#define DBREP_OPENFILES 0x0001 /* This handle has opened files. */ + u_int32_t flags; /* per-process flags. */ }; /* @@ -167,10 +262,10 @@ struct __db_rep { * Note that the version information should be at the beginning of the * structure, so that we can rearrange the rest of it while letting the * version checks continue to work. DB_REPVERSION should be revved any time - * the rest of the structure changes. + * the rest of the structure changes or when the message numbers change. */ typedef struct __rep_control { -#define DB_REPVERSION 1 +#define DB_REPVERSION 2 u_int32_t rep_version; /* Replication version number. */ u_int32_t log_version; /* Log version number. */ @@ -185,8 +280,9 @@ typedef struct __rep_vote { u_int32_t egen; /* Election generation. */ int nsites; /* Number of sites I've been in * communication with. */ + int nvotes; /* Number of votes needed to win. */ int priority; /* My site's priority. */ - int tiebreaker; /* Tie-breaking quasi-random int. */ + u_int32_t tiebreaker; /* Tie-breaking quasi-random value. */ } REP_VOTE_INFO; typedef struct __rep_vtally { @@ -200,25 +296,9 @@ typedef struct __rep_vtally { * we can obtain locks and apply updates in a deadlock free * order. */ -typedef struct __lsn_page { - DB_LSN lsn; - int32_t fid; - DB_LOCK_ILOCK pgdesc; -#define LSN_PAGE_NOLOCK 0x0001 /* No lock necessary for log rec. */ - u_int32_t flags; -} LSN_PAGE; - -typedef struct __txn_recs { - int npages; - int nalloc; - LSN_PAGE *array; - u_int32_t txnid; - u_int32_t lockid; -} TXN_RECS; - typedef struct __lsn_collection { - int nlsns; - int nalloc; + u_int nlsns; + u_int nalloc; DB_LSN *array; } LSN_COLLECTION; diff --git a/db/dbinc/shqueue.h b/db/dbinc/shqueue.h index 20a5234c7..8d7e4eef9 100644 --- a/db/dbinc/shqueue.h +++ b/db/dbinc/shqueue.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: shqueue.h,v 11.13 2003/04/24 15:41:02 bostic Exp $ + * $Id: shqueue.h,v 11.15 2004/03/24 20:37:37 bostic Exp $ */ #ifndef _SYS_SHQUEUE_H_ @@ -95,7 +95,7 @@ struct { \ /* * Given correct A.next: B.prev = SH_LIST_NEXT_TO_PREV(A) * in a list [A, B] - * The prev value is always the offset from an element to its preceeding + * The prev value is always the offset from an element to its preceding * element's next location, not the beginning of the structure. To get * to the beginning of an element structure in memory given an element * do the following: @@ -237,7 +237,7 @@ struct { \ /* * Given correct A.next: B.prev = SH_TAILQ_NEXT_TO_PREV(A) * in a list [A, B] - * The prev value is always the offset from an element to its preceeding + * The prev value is always the offset from an element to its preceding * element's next location, not the beginning of the structure. To get * to the beginning of an element structure in memory given an element * do the following: diff --git a/db/dbinc/tcl_db.h b/db/dbinc/tcl_db.h index b25a7cc6b..f1adea5ae 100644 --- a/db/dbinc/tcl_db.h +++ b/db/dbinc/tcl_db.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. * - * $Id: tcl_db.h,v 11.31 2003/01/08 04:32:44 bostic Exp $ + * $Id: tcl_db.h,v 11.40 2004/09/22 03:40:20 bostic Exp $ */ #ifndef _DB_TCL_DB_H_ @@ -13,7 +13,8 @@ #define MSG_SIZE 100 /* Message size */ enum INFOTYPE { - I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_LOGC, I_NDBM, I_MUTEX }; + I_ENV, I_DB, I_DBC, I_TXN, I_MP, + I_PG, I_LOCK, I_LOGC, I_NDBM, I_MUTEX, I_SEQ}; #define MAX_ID 8 /* Maximum number of sub-id's we need */ #define DBTCL_PREP 64 /* Size of txn_recover preplist */ @@ -25,7 +26,7 @@ typedef struct _mutex_entry { union { struct { DB_MUTEX real_m; - u_int32_t real_val; + int real_val; } r; /* * This is here to make sure that each of the mutex structures @@ -46,7 +47,6 @@ typedef struct _mutex_data { REGINFO reginfo; _MUTEX_ENTRY *marray; size_t size; - u_int32_t n_mutex; } _MUTEX_DATA; /* @@ -74,7 +74,7 @@ typedef struct _mutex_data { * I believe the number of simultaneous DB widgets in existence at one time * is not going to be that large (more than several dozen) such that * linearly searching the list is not going to impact performance in a - * noticable way. Should performance be impacted due to the size of the + * noticeable way. Should performance be impacted due to the size of the * info list, then perhaps it is time to revisit this decision. */ typedef struct dbtcl_info { @@ -100,7 +100,7 @@ typedef struct dbtcl_info { } und; union data2 { int anydata; - size_t pagesz; + int pagesz; } und2; DBT i_lockobj; FILE *i_err; @@ -156,26 +156,39 @@ typedef struct dbtcl_global { extern DBTCL_GLOBAL __dbtcl_global; -#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((name)) -#define NAME_TO_DB(name) (DB *)_NameToPtr((name)) -#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name)) -#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((name)) -#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPtr((name)) -#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((name)) +/* + * Tcl_NewStringObj takes an "int" length argument, when the typical use is to + * call it with a size_t length (for example, returned by strlen). Tcl is in + * the wrong, but that doesn't help us much -- cast the argument. + */ +#define NewStringObj(a, b) \ + Tcl_NewStringObj(a, (int)b) + +#define NAME_TO_DB(name) (DB *)_NameToPtr((name)) +#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name)) +#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((name)) +#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((name)) +#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPtr((name)) +#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((name)) +#define NAME_TO_SEQUENCE(name) (DB_SEQUENCE *)_NameToPtr((name)) /* - * MAKE_STAT_LIST appends a {name value} pair to a result list - * that MUST be called 'res' that is a Tcl_Obj * in the local - * function. This macro also assumes a label "error" to go to - * in the even of a Tcl error. For stat functions this will - * typically go before the "free" function to free the stat structure - * returned by DB. + * MAKE_STAT_LIST appends a {name value} pair to a result list that MUST be + * called 'res' that is a Tcl_Obj * in the local function. This macro also + * assumes a label "error" to go to in the event of a Tcl error. For stat + * functions this will typically go before the "free" function to free the + * stat structure returned by DB. */ -#define MAKE_STAT_LIST(s,v) \ -do { \ - result = _SetListElemInt(interp, res, (s), (v)); \ - if (result != TCL_OK) \ - goto error; \ +#define MAKE_STAT_LIST(s, v) do { \ + result = _SetListElemInt(interp, res, (s), (long)(v)); \ + if (result != TCL_OK) \ + goto error; \ +} while (0) + +#define MAKE_WSTAT_LIST(s, v) do { \ + result = _SetListElemWideInt(interp, res, (s), (int64_t)(v)); \ + if (result != TCL_OK) \ + goto error; \ } while (0) /* @@ -186,14 +199,13 @@ do { \ * typically go before the "free" function to free the stat structure * returned by DB. */ -#define MAKE_STAT_LSN(s, lsn) \ -do { \ +#define MAKE_STAT_LSN(s, lsn) do { \ myobjc = 2; \ myobjv[0] = Tcl_NewLongObj((long)(lsn)->file); \ myobjv[1] = Tcl_NewLongObj((long)(lsn)->offset); \ lsnlist = Tcl_NewListObj(myobjc, myobjv); \ myobjc = 2; \ - myobjv[0] = Tcl_NewStringObj((s), strlen(s)); \ + myobjv[0] = Tcl_NewStringObj((s), (int)strlen(s)); \ myobjv[1] = lsnlist; \ thislist = Tcl_NewListObj(myobjc, myobjv); \ result = Tcl_ListObjAppendElement(interp, res, thislist); \ @@ -209,27 +221,25 @@ do { \ * typically go before the "free" function to free the stat structure * returned by DB. */ -#define MAKE_STAT_STRLIST(s,s1) \ -do { \ - result = _SetListElem(interp, res, (s), strlen(s), \ - (s1), strlen(s1)); \ - if (result != TCL_OK) \ - goto error; \ +#define MAKE_STAT_STRLIST(s,s1) do { \ + result = _SetListElem(interp, res, (s), strlen(s), \ + (s1), strlen(s1)); \ + if (result != TCL_OK) \ + goto error; \ } while (0) /* * FLAG_CHECK checks that the given flag is not set yet. * If it is, it sets up an error message. */ -#define FLAG_CHECK(flag) \ -do { \ - if ((flag) != 0) { \ - Tcl_SetResult(interp, \ - " Only 1 policy can be specified.\n", \ - TCL_STATIC); \ - result = TCL_ERROR; \ - break; \ - } \ +#define FLAG_CHECK(flag) do { \ + if ((flag) != 0) { \ + Tcl_SetResult(interp, \ + " Only 1 policy can be specified.\n", \ + TCL_STATIC); \ + result = TCL_ERROR; \ + break; \ + } \ } while (0) /* @@ -237,15 +247,14 @@ do { \ * only set to the given allowed value. * If it is, it sets up an error message. */ -#define FLAG_CHECK2(flag,val) \ -do { \ - if (((flag) & ~(val)) != 0) { \ - Tcl_SetResult(interp, \ - " Only 1 policy can be specified.\n", \ - TCL_STATIC); \ - result = TCL_ERROR; \ - break; \ - } \ +#define FLAG_CHECK2(flag, val) do { \ + if (((flag) & ~(val)) != 0) { \ + Tcl_SetResult(interp, \ + " Only 1 policy can be specified.\n", \ + TCL_STATIC); \ + result = TCL_ERROR; \ + break; \ + } \ } while (0) /* diff --git a/db/dbinc/txn.h b/db/dbinc/txn.h index d7067407c..514b740e8 100644 --- a/db/dbinc/txn.h +++ b/db/dbinc/txn.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: txn.h,v 11.48 2003/07/08 20:14:20 ubell Exp $ + * $Id: txn.h,v 11.54 2004/09/24 00:43:18 bostic Exp $ */ #ifndef _TXN_H_ @@ -77,14 +77,14 @@ typedef struct __txn_detail { * The transaction manager encapsulates the transaction system. */ struct __db_txnmgr { -/* - * These fields need to be protected for multi-threaded support. - * - * !!! - * As this structure is allocated in per-process memory, the mutex may need - * to be stored elsewhere on architectures unable to support mutexes in heap - * memory, e.g., HP/UX 9. - */ + /* + * These fields need to be protected for multi-threaded support. + * + * !!! + * As this structure is allocated in per-process memory, the mutex may + * need to be stored elsewhere on architectures unable to support + * mutexes in heap memory, e.g., HP/UX 9. + */ DB_MUTEX *mutexp; /* Lock list of active transactions * (including the content of each * TXN_DETAIL structure on the list). @@ -136,17 +136,78 @@ struct __txn_logrec { /* * Log record types. Note that these are *not* alphabetical. This is * intentional so that we don't change the meaning of values between - * software upgrades. EXPECTED, UNEXPECTED, IGNORE, NOTFOUND and OK - * are used in the txnlist functions. + * software upgrades. + * + * EXPECTED, UNEXPECTED, IGNORE, and OK are used in the txnlist functions. + * Here is an explanation of how the statuses are used. + * + * TXN_OK + * BEGIN records for transactions found on the txnlist during + * OPENFILES (BEGIN records are those with a prev_lsn of 0,0) + * + * TXN_COMMIT + * Transaction committed and should be rolled forward. + * + * TXN_ABORT + * This transaction's changes must be undone. Either there was + * never a prepare or commit record for this transaction OR there + * was a commit, but we are recovering to a timestamp or particular + * LSN and that point is before this transaction's commit. + * + * TXN_PREPARE + * Prepare record, but no commit record is in the log. + * + * TXN_IGNORE + * Generic meaning is that this transaction should not be + * processed during later recovery passes. We use it in a + * number of different manners: + * + * 1. We never saw its BEGIN record. Therefore, the logs have + * been reclaimed and we *know* that this transaction doesn't + * need to be aborted, because in order for it to be + * reclaimed, there must have been a subsequent checkpoint + * (and any dirty pages for this transaction made it to + * disk). + * + * 2. This is a child transaction that created a database. + * For some reason, we don't want to recreate that database + * (i.e., it already exists or some other database created + * after it exists). + * + * 3. During recovery open of subdatabases, if the master check fails, + * we use a TXN_IGNORE on the create of the subdb in the nested + * transaction. + * + * 4. During a remove, the file with the name being removed isn't + * the file for which we are recovering a remove. + * + * TXN_EXPECTED + * After a successful open during recovery, we update the + * transaction's status to TXN_EXPECTED. The open was done + * in the parent, but in the open log record, we record the + * child transaction's ID if we also did a create. When there + * is a valid ID in that field, we use it and mark the child's + * status as TXN_EXPECTED (indicating that we don't need to redo + * a create for this file). + * + * When recovering a remove, if we don't find or can't open + * the file, the child (which does the remove) gets marked + * EXPECTED (indicating that we don't need to redo the remove). + * + * TXN_UNEXPECTED + * During recovery, we attempted an open that should have succeeded + * and we got ENOENT, so like with the EXPECTED case, we indicate + * in the child that we got the UNEXPECTED return so that we do redo + * the creating/deleting operation. + * */ #define TXN_OK 0 #define TXN_COMMIT 1 #define TXN_PREPARE 2 #define TXN_ABORT 3 -#define TXN_NOTFOUND 4 -#define TXN_IGNORE 5 -#define TXN_EXPECTED 6 -#define TXN_UNEXPECTED 7 +#define TXN_IGNORE 4 +#define TXN_EXPECTED 5 +#define TXN_UNEXPECTED 6 #include "dbinc_auto/txn_auto.h" #include "dbinc_auto/txn_ext.h" diff --git a/db/dbinc/xa.h b/db/dbinc/xa.h index 7edf49282..71333c2c9 100644 --- a/db/dbinc/xa.h +++ b/db/dbinc/xa.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. * - * $Id: xa.h,v 11.6 2003/01/08 04:32:51 bostic Exp $ + * $Id: xa.h,v 11.7 2004/01/28 03:36:02 bostic Exp $ */ /* * Start of xa.h header diff --git a/db/dbinc_auto/btree_auto.h b/db/dbinc_auto/btree_auto.h index 4feb07ad9..d9d4e47ad 100644 --- a/db/dbinc_auto/btree_auto.h +++ b/db/dbinc_auto/btree_auto.h @@ -125,4 +125,18 @@ typedef struct ___bam_rcuradj_args { u_int32_t order; } __bam_rcuradj_args; +#define DB___bam_relink 147 +typedef struct ___bam_relink_args { + u_int32_t type; + DB_TXN *txnid; + DB_LSN prev_lsn; + int32_t fileid; + db_pgno_t pgno; + DB_LSN lsn; + db_pgno_t prev; + DB_LSN lsn_prev; + db_pgno_t next; + DB_LSN lsn_next; +} __bam_relink_args; + #endif diff --git a/db/dbinc_auto/btree_ext.h b/db/dbinc_auto/btree_ext.h index a662b027b..9acb14051 100644 --- a/db/dbinc_auto/btree_ext.h +++ b/db/dbinc_auto/btree_ext.h @@ -12,7 +12,6 @@ size_t __bam_defpfx __P((DB *, const DBT *, const DBT *)); int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *)); int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *)); int __bam_mswap __P((PAGE *)); -void __bam_cprint __P((DBC *)); int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int)); int __ram_ca_delete __P((DB *, db_pgno_t)); int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int)); @@ -31,13 +30,17 @@ int __bam_c_rget __P((DBC *, DBT *)); int __bam_ditem __P((DBC *, PAGE *, u_int32_t)); int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int)); int __bam_dpages __P((DBC *, EPG *)); +int __bam_relink __P((DBC *, PAGE *, PAGE **)); int __bam_db_create __P((DB *)); int __bam_db_close __P((DB *)); void __bam_map_flags __P((DB *, u_int32_t *, u_int32_t *)); int __bam_set_flags __P((DB *, u_int32_t *flagsp)); int __bam_set_bt_compare __P((DB *, int (*)(DB *, const DBT *, const DBT *))); +int __bam_get_bt_minkey __P((DB *, u_int32_t *)); void __ram_map_flags __P((DB *, u_int32_t *, u_int32_t *)); int __ram_set_flags __P((DB *, u_int32_t *flagsp)); +int __ram_get_re_len __P((DB *, u_int32_t *)); +int __ram_get_re_pad __P((DB *, int *)); int __bam_open __P((DB *, DB_TXN *, const char *, db_pgno_t, u_int32_t)); int __bam_metachk __P((DB *, const char *, BTMETA *)); int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t)); @@ -54,6 +57,7 @@ int __bam_repl_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_root_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_curadj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_rcuradj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_relink_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_reclaim __P((DB *, DB_TXN *)); int __bam_truncate __P((DBC *, u_int32_t *)); int __ram_open __P((DB *, DB_TXN *, const char *, db_pgno_t, u_int32_t)); @@ -74,9 +78,11 @@ int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *)); int __bam_split __P((DBC *, void *, db_pgno_t *)); int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t)); int __bam_stat __P((DBC *, void *, u_int32_t)); -int __bam_traverse __P((DBC *, db_lockmode_t, db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *)); +int __bam_stat_print __P((DBC *, u_int32_t)); int __bam_stat_callback __P((DB *, PAGE *, void *, int *)); +void __bam_print_cursor __P((DBC *)); int __bam_key_range __P((DBC *, DBT *, DB_KEY_RANGE *, u_int32_t)); +int __bam_traverse __P((DBC *, db_lockmode_t, db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *)); int __bam_30_btreemeta __P((DB *, char *, u_int8_t *)); int __bam_31_btreemeta __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)); int __bam_31_lbtree __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)); @@ -90,44 +96,37 @@ int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, PAGE *, void * int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *, DBT *, void *, int (*)(void *, const void *), u_int32_t)); int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *, u_int32_t, DB *)); int __bam_split_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, u_int32_t)); -int __bam_split_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_split_read __P((DB_ENV *, void *, __bam_split_args **)); int __bam_rsplit_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, const DBT *, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *)); -int __bam_rsplit_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_rsplit_read __P((DB_ENV *, void *, __bam_rsplit_args **)); int __bam_adj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, u_int32_t)); -int __bam_adj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_adj_read __P((DB_ENV *, void *, __bam_adj_args **)); int __bam_cadjust_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, int32_t, u_int32_t)); -int __bam_cadjust_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_cadjust_read __P((DB_ENV *, void *, __bam_cadjust_args **)); int __bam_cdel_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t)); -int __bam_cdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_cdel_read __P((DB_ENV *, void *, __bam_cdel_args **)); int __bam_repl_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, u_int32_t, u_int32_t)); -int __bam_repl_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_repl_read __P((DB_ENV *, void *, __bam_repl_args **)); int __bam_root_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, db_pgno_t, DB_LSN *)); -int __bam_root_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_root_read __P((DB_ENV *, void *, __bam_root_args **)); int __bam_curadj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_ca_mode, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t)); -int __bam_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_curadj_read __P((DB_ENV *, void *, __bam_curadj_args **)); int __bam_rcuradj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, ca_recno_arg, db_pgno_t, db_recno_t, u_int32_t)); -int __bam_rcuradj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __bam_rcuradj_read __P((DB_ENV *, void *, __bam_rcuradj_args **)); -int __bam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __bam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __bam_relink_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *)); +int __bam_relink_read __P((DB_ENV *, void *, __bam_relink_args **)); int __bam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_relink_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __bam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/clib_ext.h b/db/dbinc_auto/clib_ext.h index 7e2817d62..a471268e0 100644 --- a/db/dbinc_auto/clib_ext.h +++ b/db/dbinc_auto/clib_ext.h @@ -27,6 +27,9 @@ int raise __P((int)); #ifndef HAVE_SNPRINTF int snprintf __P((char *, size_t, const char *, ...)); #endif +#ifndef HAVE_VSNPRINTF +int vsnprintf __P((char *, size_t, const char *, va_list)); +#endif #ifndef HAVE_STRCASECMP int strcasecmp __P((const char *, const char *)); #endif @@ -39,9 +42,6 @@ char *strdup __P((const char *)); #ifndef HAVE_STRERROR char *strerror __P((int)); #endif -#ifndef HAVE_VSNPRINTF -int vsnprintf __P((char *, size_t, const char *, va_list)); -#endif #if defined(__cplusplus) } diff --git a/db/dbinc_auto/common_ext.h b/db/dbinc_auto/common_ext.h index 5cf03df70..d9f05406d 100644 --- a/db/dbinc_auto/common_ext.h +++ b/db/dbinc_auto/common_ext.h @@ -23,6 +23,8 @@ int __db_panic __P((DB_ENV *, int)); void __db_err __P((const DB_ENV *, const char *, ...)) __attribute__ ((__format__ (__printf__, 2, 3))); void __db_errcall __P((const DB_ENV *, int, int, const char *, va_list)); void __db_errfile __P((const DB_ENV *, int, int, const char *, va_list)); +void __db_msgadd __P((DB_ENV *, DB_MSGBUF *, const char *, ...)) __attribute__ ((__format__ (__printf__, 3, 4))); +void __db_msg __P((const DB_ENV *, const char *, ...)) __attribute__ ((__format__ (__printf__, 2, 3))); void __db_logmsg __P((const DB_ENV *, DB_TXN *, const char *, u_int32_t, const char *, ...)) __attribute__ ((__format__ (__printf__, 5, 6))); int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t)); int __db_unknown_type __P((DB_ENV *, char *, DBTYPE)); @@ -30,12 +32,13 @@ int __db_check_txn __P((DB *, DB_TXN *, u_int32_t, int)); int __db_not_txn_env __P((DB_ENV *)); int __db_rec_toobig __P((DB_ENV *, u_int32_t, u_int32_t)); int __db_rec_repl __P((DB_ENV *, u_int32_t, u_int32_t)); +int __db_check_lsn __P((DB_ENV *, DB_LSN *, DB_LSN *)); int __db_getlong __P((DB_ENV *, const char *, char *, long, long, long *)); int __db_getulong __P((DB_ENV *, const char *, char *, u_long, u_long, u_long *)); void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *)); u_int32_t __db_log2 __P((u_int32_t)); int __db_util_arg __P((char *, char *, int *, char ***)); -int __db_util_cache __P((DB_ENV *, DB *, u_int32_t *, int *)); +int __db_util_cache __P((DB *, u_int32_t *, int *)); int __db_util_logset __P((const char *, char *)); void __db_util_siginit __P((void)); int __db_util_interrupted __P((void)); diff --git a/db/dbinc_auto/crypto_ext.h b/db/dbinc_auto/crypto_ext.h index dc8b5efd1..b305ec5a7 100644 --- a/db/dbinc_auto/crypto_ext.h +++ b/db/dbinc_auto/crypto_ext.h @@ -13,6 +13,7 @@ int __aes_decrypt __P((DB_ENV *, void *, void *, u_int8_t *, size_t)); int __aes_encrypt __P((DB_ENV *, void *, void *, u_int8_t *, size_t)); int __aes_init __P((DB_ENV *, DB_CIPHER *)); int __crypto_dbenv_close __P((DB_ENV *)); +int __crypto_region_destroy __P((DB_ENV *)); int __crypto_algsetup __P((DB_ENV *, DB_CIPHER *, u_int32_t, int)); int __crypto_decrypt_meta __P((DB_ENV *, DB *, u_int8_t *, int)); int __crypto_set_passwd __P((DB_ENV *, DB_ENV *)); diff --git a/db/dbinc_auto/db_auto.h b/db/dbinc_auto/db_auto.h index 8803328d3..c2edcedbc 100644 --- a/db/dbinc_auto/db_auto.h +++ b/db/dbinc_auto/db_auto.h @@ -44,21 +44,6 @@ typedef struct ___db_ovref_args { DB_LSN lsn; } __db_ovref_args; -#define DB___db_relink 45 -typedef struct ___db_relink_args { - u_int32_t type; - DB_TXN *txnid; - DB_LSN prev_lsn; - u_int32_t opcode; - int32_t fileid; - db_pgno_t pgno; - DB_LSN lsn; - db_pgno_t prev; - DB_LSN lsn_prev; - db_pgno_t next; - DB_LSN lsn_next; -} __db_relink_args; - #define DB___db_debug 47 typedef struct ___db_debug_args { u_int32_t type; @@ -93,6 +78,7 @@ typedef struct ___db_pg_alloc_args { db_pgno_t pgno; u_int32_t ptype; db_pgno_t next; + db_pgno_t last_pgno; } __db_pg_alloc_args; #define DB___db_pg_free 50 @@ -106,6 +92,7 @@ typedef struct ___db_pg_free_args { db_pgno_t meta_pgno; DBT header; db_pgno_t next; + db_pgno_t last_pgno; } __db_pg_free_args; #define DB___db_cksum 51 @@ -126,6 +113,7 @@ typedef struct ___db_pg_freedata_args { db_pgno_t meta_pgno; DBT header; db_pgno_t next; + db_pgno_t last_pgno; DBT data; } __db_pg_freedata_args; @@ -151,4 +139,15 @@ typedef struct ___db_pg_new_args { db_pgno_t next; } __db_pg_new_args; +#define DB___db_pg_init 60 +typedef struct ___db_pg_init_args { + u_int32_t type; + DB_TXN *txnid; + DB_LSN prev_lsn; + int32_t fileid; + db_pgno_t pgno; + DBT header; + DBT data; +} __db_pg_init_args; + #endif diff --git a/db/dbinc_auto/db_ext.h b/db/dbinc_auto/db_ext.h index 94781310d..c7124c350 100644 --- a/db/dbinc_auto/db_ext.h +++ b/db/dbinc_auto/db_ext.h @@ -7,81 +7,67 @@ extern "C" { #endif int __crdel_metasub_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, const DBT *, DB_LSN *)); -int __crdel_metasub_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __crdel_metasub_read __P((DB_ENV *, void *, __crdel_metasub_args **)); -int __crdel_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __crdel_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __crdel_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __crdel_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __crdel_metasub_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_master_open __P((DB *, DB_TXN *, const char *, u_int32_t, int, DB **)); int __db_master_update __P((DB *, DB *, DB_TXN *, const char *, DBTYPE, mu_action, const char *, u_int32_t)); int __db_dbenv_setup __P((DB *, DB_TXN *, const char *, u_int32_t, u_int32_t)); +int __db_dbenv_mpool __P((DB *, const char *, u_int32_t)); int __db_close __P((DB *, DB_TXN *, u_int32_t)); int __db_refresh __P((DB *, DB_TXN *, u_int32_t, int *)); int __db_log_page __P((DB *, DB_TXN *, DB_LSN *, db_pgno_t, PAGE *)); int __db_backup_name __P((DB_ENV *, const char *, DB_TXN *, char **)); DB *__dblist_get __P((DB_ENV *, u_int32_t)); -#if CONFIG_TEST +#ifdef CONFIG_TEST int __db_testcopy __P((DB_ENV *, DB *, const char *)); #endif int __db_cursor_int __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, u_int32_t, DBC **)); -int __db_cprint __P((DB *)); int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); int __db_del __P((DB *, DB_TXN *, DBT *, u_int32_t)); int __db_sync __P((DB *)); int __db_associate __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t)); +int __db_secondary_close __P((DB *, u_int32_t)); int __db_addrem_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, u_int32_t, u_int32_t, const DBT *, const DBT *, DB_LSN *)); -int __db_addrem_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_addrem_read __P((DB_ENV *, void *, __db_addrem_args **)); int __db_big_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *, DB_LSN *, DB_LSN *)); -int __db_big_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_big_read __P((DB_ENV *, void *, __db_big_args **)); int __db_ovref_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, int32_t, DB_LSN *)); -int __db_ovref_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_ovref_read __P((DB_ENV *, void *, __db_ovref_args **)); -int __db_relink_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *)); -int __db_relink_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_relink_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_relink_read __P((DB_ENV *, void *, __db_relink_args **)); int __db_debug_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, int32_t, const DBT *, const DBT *, u_int32_t)); -int __db_debug_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_debug_read __P((DB_ENV *, void *, __db_debug_args **)); int __db_noop_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *)); -int __db_noop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_noop_read __P((DB_ENV *, void *, __db_noop_args **)); -int __db_pg_alloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t)); -int __db_pg_alloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_alloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t, db_pgno_t)); int __db_pg_alloc_read __P((DB_ENV *, void *, __db_pg_alloc_args **)); -int __db_pg_free_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, db_pgno_t)); -int __db_pg_free_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_free_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, db_pgno_t, db_pgno_t)); int __db_pg_free_read __P((DB_ENV *, void *, __db_pg_free_args **)); int __db_cksum_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t)); -int __db_cksum_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_cksum_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_cksum_read __P((DB_ENV *, void *, __db_cksum_args **)); -int __db_pg_freedata_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, db_pgno_t, const DBT *)); -int __db_pg_freedata_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_pg_freedata_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_freedata_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, db_pgno_t, db_pgno_t, const DBT *)); int __db_pg_freedata_read __P((DB_ENV *, void *, __db_pg_freedata_args **)); int __db_pg_prepare_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t)); -int __db_pg_prepare_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_pg_prepare_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_pg_prepare_read __P((DB_ENV *, void *, __db_pg_prepare_args **)); int __db_pg_new_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, db_pgno_t)); -int __db_pg_new_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_pg_new_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_pg_new_read __P((DB_ENV *, void *, __db_pg_new_args **)); -int __db_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __db_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __db_pg_init_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, const DBT *, const DBT *)); +int __db_pg_init_read __P((DB_ENV *, void *, __db_pg_init_args **)); int __db_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_cksum_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_freedata_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_prepare_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_new_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_init_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __db_c_close __P((DBC *)); int __db_c_destroy __P((DBC *)); int __db_c_count __P((DBC *, db_recno_t *)); @@ -106,22 +92,24 @@ int __db_byteswap __P((DB_ENV *, DB *, db_pgno_t, PAGE *, size_t, int)); int __db_dispatch __P((DB_ENV *, int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)), size_t, DBT *, DB_LSN *, db_recops, void *)); int __db_add_recovery __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t)); int __db_txnlist_init __P((DB_ENV *, u_int32_t, u_int32_t, DB_LSN *, void *)); -int __db_txnlist_add __P((DB_ENV *, void *, u_int32_t, int32_t, DB_LSN *)); +int __db_txnlist_add __P((DB_ENV *, void *, u_int32_t, u_int32_t, DB_LSN *)); int __db_txnlist_remove __P((DB_ENV *, void *, u_int32_t)); void __db_txnlist_ckp __P((DB_ENV *, void *, DB_LSN *)); void __db_txnlist_end __P((DB_ENV *, void *)); -int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t)); -int __db_txnlist_update __P((DB_ENV *, void *, u_int32_t, int32_t, DB_LSN *)); +int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t, u_int32_t *)); +int __db_txnlist_update __P((DB_ENV *, void *, u_int32_t, u_int32_t, DB_LSN *, u_int32_t *, int)); int __db_txnlist_gen __P((DB_ENV *, void *, int, u_int32_t, u_int32_t)); int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t)); int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *)); +#ifndef HAVE_FTRUNCATE int __db_add_limbo __P((DB_ENV *, void *, int32_t, db_pgno_t, int32_t)); +#endif +#ifndef HAVE_FTRUNCATE int __db_do_the_limbo __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNHEAD *, db_limbo_state)); -int __db_default_getpgnos __P((DB_ENV *, DB_LSN *lsnp, void *)); +#endif void __db_txnlist_print __P((void *)); int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t)); int __db_pitem __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *)); -int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int)); int __db_associate_pp __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t)); int __db_close_pp __P((DB *, u_int32_t)); int __db_cursor_pp __P((DB *, DB_TXN *, DBC **, u_int32_t)); @@ -136,8 +124,6 @@ int __db_open_pp __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int3 int __db_pget_pp __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t)); int __db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t)); int __db_put_pp __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); -int __db_stat_pp __P((DB *, void *, u_int32_t)); -int __db_stat __P((DB *, void *, u_int32_t)); int __db_sync_pp __P((DB *, u_int32_t)); int __db_c_close_pp __P((DBC *)); int __db_c_count_pp __P((DBC *, db_recno_t *, u_int32_t)); @@ -145,6 +131,7 @@ int __db_c_del_pp __P((DBC *, u_int32_t)); int __db_c_del_arg __P((DBC *, u_int32_t)); int __db_c_dup_pp __P((DBC *, DBC **, u_int32_t)); int __db_c_get_pp __P((DBC *, DBT *, DBT *, u_int32_t)); +int __db_secondary_close_pp __P((DB *, u_int32_t)); int __db_c_pget_pp __P((DBC *, DBT *, DBT *, DBT *, u_int32_t)); int __db_c_put_pp __P((DBC *, DBT *, DBT *, u_int32_t)); int __db_txn_auto_init __P((DB_ENV *, DB_TXN **)); @@ -158,7 +145,9 @@ int __db_lprint __P((DBC *)); int __db_lget __P((DBC *, int, db_pgno_t, db_lockmode_t, u_int32_t, DB_LOCK *)); int __db_lput __P((DBC *, DB_LOCK *)); int __dbh_am_chk __P((DB *, u_int32_t)); +int __db_get_flags __P((DB *, u_int32_t *)); int __db_set_flags __P((DB *, u_int32_t)); +int __db_get_lorder __P((DB *, int *)); int __db_set_lorder __P((DB *, int)); int __db_set_pagesize __P((DB *, u_int32_t)); int __db_open __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int, db_pgno_t)); @@ -176,21 +165,25 @@ int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t)); int __db_vrfy_ovfl_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t)); int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t, DBT *, void *, u_int32_t)); void __db_loadme __P((void)); -int __db_dump __P((DB *, char *, char *)); -void __db_inmemdbflags __P((u_int32_t, void *, void (*)(u_int32_t, const FN *, void *))); -int __db_prnpage __P((DB *, db_pgno_t, FILE *)); -int __db_prpage __P((DB *, PAGE *, FILE *, u_int32_t)); -void __db_pr __P((u_int8_t *, u_int32_t, FILE *)); -int __db_prdbt __P((DBT *, int, const char *, void *, int (*)(void *, const void *), int, VRFY_DBINFO *)); -void __db_prflags __P((u_int32_t, const FN *, void *)); -const char * __db_dbtype_to_string __P((DBTYPE)); -int __db_prheader __P((DB *, char *, int, int, void *, int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t)); +int __db_dumptree __P((DB *, char *, char *)); +const FN * __db_get_flags_fn __P((void)); +int __db_prnpage __P((DB *, db_pgno_t)); +int __db_prpage __P((DB *, PAGE *, u_int32_t)); +void __db_pr __P((DB_ENV *, DB_MSGBUF *, u_int8_t *, u_int32_t)); +void __db_prflags __P((DB_ENV *, DB_MSGBUF *, u_int32_t, const FN *, const char *, const char *)); +const char * __db_lockmode_to_string __P((db_lockmode_t)); +int __db_dumptree __P((DB *, char *, char *)); +const FN * __db_get_flags_fn __P((void)); +int __db_dump_pp __P((DB *, const char *, int (*)(void *, const void *), void *, int, int)); +int __db_dump __P((DB *, const char *, int (*)(void *, const void *), void *, int, int)); +int __db_prdbt __P((DBT *, int, const char *, void *, int (*)(void *, const void *), int)); +int __db_prheader __P((DB *, const char *, int, int, void *, int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t)); int __db_prfooter __P((void *, int (*)(void *, const void *))); int __db_pr_callback __P((void *, const void *)); +const char * __db_dbtype_to_string __P((DBTYPE)); int __db_addrem_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_big_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_ovref_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __db_relink_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_debug_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_noop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_pg_alloc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); @@ -199,6 +192,7 @@ int __db_pg_new_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_pg_freedata_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_cksum_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_pg_prepare_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __db_pg_init_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __db_traverse_big __P((DB *, db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *)); int __db_reclaim_callback __P((DB *, PAGE *, void *, int *)); int __db_truncate_callback __P((DB *, PAGE *, void *, int *)); @@ -212,15 +206,20 @@ int __db_rename __P((DB *, DB_TXN *, const char *, const char *, const char *)); int __db_rename_int __P((DB *, DB_TXN *, const char *, const char *, const char *)); int __db_ret __P((DB *, PAGE *, u_int32_t, DBT *, void **, u_int32_t *)); int __db_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t, void **, u_int32_t *)); +int __db_fileid_reset __P((DB_ENV *, char *, int)); +int __db_lsn_reset __P((DB_ENV *, char *, int)); +int __db_stat_pp __P((DB *, DB_TXN *, void *, u_int32_t)); +int __db_stat __P((DB *, DB_TXN *, void *, u_int32_t)); +int __db_stat_print_pp __P((DB *, u_int32_t)); +int __db_stat_print __P((DB *, u_int32_t)); int __db_truncate_pp __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); -int __db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); +int __db_truncate __P((DB *, DB_TXN *, u_int32_t *)); int __db_upgrade_pp __P((DB *, const char *, u_int32_t)); int __db_upgrade __P((DB *, const char *, u_int32_t)); int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *)); int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *)); int __db_verify_pp __P((DB *, const char *, const char *, FILE *, u_int32_t)); int __db_verify_internal __P((DB *, const char *, const char *, void *, int (*)(void *, const void *), u_int32_t)); -int __db_verify __P((DB *, const char *, const char *, void *, int (*)(void *, const void *), u_int32_t)); int __db_vrfy_common __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t)); int __db_vrfy_datapage __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t)); int __db_vrfy_meta __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t)); @@ -248,6 +247,7 @@ int __db_salvage_getnext __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *)); int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t)); int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t)); int __db_salvage_markneeded __P((VRFY_DBINFO *, db_pgno_t, u_int32_t)); +int __db_vrfy_prdbt __P((DBT *, int, const char *, void *, int (*)(void *, const void *), int, VRFY_DBINFO *)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/dbreg_ext.h b/db/dbinc_auto/dbreg_ext.h index cb0df15ab..5902b9db9 100644 --- a/db/dbinc_auto/dbreg_ext.h +++ b/db/dbinc_auto/dbreg_ext.h @@ -12,18 +12,18 @@ int __dbreg_new_id __P((DB *, DB_TXN *)); int __dbreg_get_id __P((DB *, DB_TXN *, int32_t *)); int __dbreg_assign_id __P((DB *, int32_t)); int __dbreg_revoke_id __P((DB *, int, int32_t)); -int __dbreg_close_id __P((DB *, DB_TXN *)); +int __dbreg_close_id __P((DB *, DB_TXN *, u_int32_t)); int __dbreg_register_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, int32_t, DBTYPE, db_pgno_t, u_int32_t)); -int __dbreg_register_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __dbreg_register_read __P((DB_ENV *, void *, __dbreg_register_args **)); -int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __dbreg_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __dbreg_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __dbreg_register_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +void __dbreg_print_fname __P((DB_ENV *, FNAME *)); +void __dbreg_print_dblist __P((DB_ENV *, u_int32_t)); int __dbreg_add_dbentry __P((DB_ENV *, DB_LOG *, DB *, int32_t)); void __dbreg_rem_dbentry __P((DB_LOG *, int32_t)); -int __dbreg_open_files __P((DB_ENV *)); +int __dbreg_log_files __P((DB_ENV *)); int __dbreg_close_files __P((DB_ENV *)); int __dbreg_id_to_db __P((DB_ENV *, DB_TXN *, DB **, int32_t, int)); int __dbreg_id_to_db_int __P((DB_ENV *, DB_TXN *, DB **, int32_t, int, int)); @@ -32,10 +32,6 @@ int __dbreg_fid_to_fname __P((DB_LOG *, u_int8_t *, int, FNAME **)); int __dbreg_get_name __P((DB_ENV *, u_int8_t *, char **)); int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *, char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t)); int __dbreg_lazy_id __P((DB *)); -int __dbreg_push_id __P((DB_ENV *, int32_t)); -int __dbreg_pop_id __P((DB_ENV *, int32_t *)); -int __dbreg_pluck_id __P((DB_ENV *, int32_t)); -void __dbreg_print_dblist __P((DB_ENV *)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/env_ext.h b/db/dbinc_auto/env_ext.h index 22d66fa30..03b3ae995 100644 --- a/db/dbinc_auto/env_ext.h +++ b/db/dbinc_auto/env_ext.h @@ -6,13 +6,12 @@ extern "C" { #endif -void __db_shalloc_init __P((void *, size_t)); -int __db_shalloc_size __P((size_t, size_t)); -int __db_shalloc __P((void *, size_t, size_t, void *)); -void __db_shalloc_free __P((void *, void *)); -size_t __db_shsizeof __P((void *)); -void __db_shalloc_dump __P((void *, FILE *)); -int __db_tablesize __P((u_int32_t)); +void __db_shalloc_init __P((REGINFO *, size_t)); +size_t __db_shalloc_size __P((size_t, size_t)); +int __db_shalloc __P((REGINFO *, size_t, size_t, void *)); +void __db_shalloc_free __P((REGINFO *, void *)); +size_t __db_shalloc_sizeof __P((void *)); +u_int32_t __db_tablesize __P((u_int32_t)); void __db_hashinit __P((void *, u_int32_t)); int __db_fileinit __P((DB_ENV *, DB_FH *, size_t, int)); int __db_overwrite __P((DB_ENV *, const char *)); @@ -21,11 +20,15 @@ int __dbenv_get_encrypt_flags __P((DB_ENV *, u_int32_t *)); int __dbenv_set_encrypt __P((DB_ENV *, const char *, u_int32_t)); int __dbenv_set_flags __P((DB_ENV *, u_int32_t, int)); int __dbenv_set_data_dir __P((DB_ENV *, const char *)); -void __dbenv_set_errcall __P((DB_ENV *, void (*)(const char *, char *))); +int __dbenv_set_intermediate_dir __P((DB_ENV *, int, u_int32_t)); +void __dbenv_set_errcall __P((DB_ENV *, void (*)(const DB_ENV *, const char *, const char *))); void __dbenv_get_errfile __P((DB_ENV *, FILE **)); void __dbenv_set_errfile __P((DB_ENV *, FILE *)); void __dbenv_get_errpfx __P((DB_ENV *, const char **)); void __dbenv_set_errpfx __P((DB_ENV *, const char *)); +void __dbenv_set_msgcall __P((DB_ENV *, void (*)(const DB_ENV *, const char *))); +void __dbenv_get_msgfile __P((DB_ENV *, FILE **)); +void __dbenv_set_msgfile __P((DB_ENV *, FILE *)); int __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int))); int __dbenv_set_shm_key __P((DB_ENV *, long)); int __dbenv_set_tas_spins __P((DB_ENV *, u_int32_t)); @@ -41,14 +44,23 @@ int __dbenv_close __P((DB_ENV *, int)); int __dbenv_get_open_flags __P((DB_ENV *, u_int32_t *)); int __db_appname __P((DB_ENV *, APPNAME, const char *, u_int32_t, DB_FH **, char **)); int __db_home __P((DB_ENV *, const char *, u_int32_t)); -int __db_apprec __P((DB_ENV *, DB_LSN *, DB_LSN *, u_int32_t, u_int32_t)); +int __db_apprec __P((DB_ENV *, DB_LSN *, DB_LSN *, int, u_int32_t)); +int __log_backup __P((DB_ENV *, DB_LOGC *, DB_LSN *, DB_LSN *, u_int32_t)); int __env_openfiles __P((DB_ENV *, DB_LOGC *, void *, DBT *, DB_LSN *, DB_LSN *, double, int)); int __db_e_attach __P((DB_ENV *, u_int32_t *)); int __db_e_detach __P((DB_ENV *, int)); int __db_e_remove __P((DB_ENV *, u_int32_t)); -int __db_e_stat __P((DB_ENV *, REGENV *, REGION *, int *, u_int32_t)); int __db_r_attach __P((DB_ENV *, REGINFO *, size_t)); int __db_r_detach __P((DB_ENV *, REGINFO *, int)); +int __dbenv_stat_print_pp __P((DB_ENV *, u_int32_t)); +void __db_print_fh __P((DB_ENV *, DB_FH *, u_int32_t)); +void __db_print_fileid __P((DB_ENV *, u_int8_t *, const char *)); +void __db_print_mutex __P((DB_ENV *, DB_MSGBUF *, DB_MUTEX *, const char *, u_int32_t)); +void __db_dl __P((DB_ENV *, const char *, u_long)); +void __db_dl_pct __P((DB_ENV *, const char *, u_long, int, const char *)); +void __db_dlbytes __P((DB_ENV *, const char *, u_long, u_long, u_long)); +void __db_print_reginfo __P((DB_ENV *, REGINFO *, const char *)); +int __db_stat_not_built __P((DB_ENV *)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/ext_def.in b/db/dbinc_auto/ext_def.in index f4a48371e..37577a77e 100644 --- a/db/dbinc_auto/ext_def.in +++ b/db/dbinc_auto/ext_def.in @@ -14,9 +14,12 @@ #define db_env_set_func_exists db_env_set_func_exists@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_free db_env_set_func_free@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_fsync db_env_set_func_fsync@DB_VERSION_UNIQUE_NAME@ +#define db_env_set_func_ftruncate db_env_set_func_ftruncate@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_ioinfo db_env_set_func_ioinfo@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_malloc db_env_set_func_malloc@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_map db_env_set_func_map@DB_VERSION_UNIQUE_NAME@ +#define db_env_set_func_pread db_env_set_func_pread@DB_VERSION_UNIQUE_NAME@ +#define db_env_set_func_pwrite db_env_set_func_pwrite@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_open db_env_set_func_open@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_read db_env_set_func_read@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_realloc db_env_set_func_realloc@DB_VERSION_UNIQUE_NAME@ @@ -27,8 +30,7 @@ #define db_env_set_func_unmap db_env_set_func_unmap@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_write db_env_set_func_write@DB_VERSION_UNIQUE_NAME@ #define db_env_set_func_yield db_env_set_func_yield@DB_VERSION_UNIQUE_NAME@ -#define db_env_xa_attach db_env_xa_attach@DB_VERSION_UNIQUE_NAME@ -#define db_env_set_thread_func db_env_set_thread_func@DB_VERSION_UNIQUE_NAME@ +#define db_sequence_create db_sequence_create@DB_VERSION_UNIQUE_NAME@ #if DB_DBM_HSEARCH != 0 #define __db_ndbm_clearerr __db_ndbm_clearerr@DB_VERSION_UNIQUE_NAME@ #define __db_ndbm_close __db_ndbm_close@DB_VERSION_UNIQUE_NAME@ @@ -43,14 +45,11 @@ #define __db_ndbm_rdonly __db_ndbm_rdonly@DB_VERSION_UNIQUE_NAME@ #define __db_ndbm_store __db_ndbm_store@DB_VERSION_UNIQUE_NAME@ #define __db_dbm_close __db_dbm_close@DB_VERSION_UNIQUE_NAME@ -#define __db_dbm_dbrdonly __db_dbm_dbrdonly@DB_VERSION_UNIQUE_NAME@ #define __db_dbm_delete __db_dbm_delete@DB_VERSION_UNIQUE_NAME@ -#define __db_dbm_dirf __db_dbm_dirf@DB_VERSION_UNIQUE_NAME@ #define __db_dbm_fetch __db_dbm_fetch@DB_VERSION_UNIQUE_NAME@ #define __db_dbm_firstkey __db_dbm_firstkey@DB_VERSION_UNIQUE_NAME@ #define __db_dbm_init __db_dbm_init@DB_VERSION_UNIQUE_NAME@ #define __db_dbm_nextkey __db_dbm_nextkey@DB_VERSION_UNIQUE_NAME@ -#define __db_dbm_pagf __db_dbm_pagf@DB_VERSION_UNIQUE_NAME@ #define __db_dbm_store __db_dbm_store@DB_VERSION_UNIQUE_NAME@ #endif #if DB_DBM_HSEARCH != 0 diff --git a/db/dbinc_auto/ext_prot.in b/db/dbinc_auto/ext_prot.in index 58f4943df..d4e49ed4c 100644 --- a/db/dbinc_auto/ext_prot.in +++ b/db/dbinc_auto/ext_prot.in @@ -18,19 +18,23 @@ int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *))); int db_env_set_func_exists __P((int (*)(const char *, int *))); int db_env_set_func_free __P((void (*)(void *))); int db_env_set_func_fsync __P((int (*)(int))); +int db_env_set_func_ftruncate __P((int (*)(int, off_t))); int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *))); int db_env_set_func_malloc __P((void *(*)(size_t))); int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **))); +int db_env_set_func_pread __P((ssize_t (*)(int, void *, size_t, off_t))); +int db_env_set_func_pwrite __P((ssize_t (*)(int, const void *, size_t, off_t))); int db_env_set_func_open __P((int (*)(const char *, int, ...))); int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t))); int db_env_set_func_realloc __P((void *(*)(void *, size_t))); int db_env_set_func_rename __P((int (*)(const char *, const char *))); -int db_env_set_func_seek __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int))); +int db_env_set_func_seek __P((int (*)(int, off_t, int))); int db_env_set_func_sleep __P((int (*)(u_long, u_long))); int db_env_set_func_unlink __P((int (*)(const char *))); int db_env_set_func_unmap __P((int (*)(void *, size_t))); int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t))); int db_env_set_func_yield __P((int (*)(void))); +int db_sequence_create __P((DB_SEQUENCE **, DB *, u_int32_t)); #if DB_DBM_HSEARCH != 0 int __db_ndbm_clearerr __P((DBM *)); void __db_ndbm_close __P((DBM *)); @@ -45,14 +49,11 @@ int __db_ndbm_pagfno __P((DBM *)); int __db_ndbm_rdonly __P((DBM *)); int __db_ndbm_store __P((DBM *, datum, datum, int)); int __db_dbm_close __P((void)); -int __db_dbm_dbrdonly __P((void)); int __db_dbm_delete __P((datum)); -int __db_dbm_dirf __P((void)); datum __db_dbm_fetch __P((datum)); datum __db_dbm_firstkey __P((void)); int __db_dbm_init __P((char *)); datum __db_dbm_nextkey __P((datum)); -int __db_dbm_pagf __P((void)); int __db_dbm_store __P((datum, datum)); #endif #if DB_DBM_HSEARCH != 0 diff --git a/db/dbinc_auto/fileops_ext.h b/db/dbinc_auto/fileops_ext.h index 328e471d2..25b2fe625 100644 --- a/db/dbinc_auto/fileops_ext.h +++ b/db/dbinc_auto/fileops_ext.h @@ -7,28 +7,22 @@ extern "C" { #endif int __fop_create_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, u_int32_t, u_int32_t)); -int __fop_create_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __fop_create_read __P((DB_ENV *, void *, __fop_create_args **)); int __fop_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, u_int32_t)); -int __fop_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __fop_remove_read __P((DB_ENV *, void *, __fop_remove_args **)); int __fop_write_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, u_int32_t, u_int32_t, db_pgno_t, u_int32_t, const DBT *, u_int32_t)); -int __fop_write_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __fop_write_read __P((DB_ENV *, void *, __fop_write_args **)); int __fop_rename_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t)); -int __fop_rename_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __fop_rename_read __P((DB_ENV *, void *, __fop_rename_args **)); int __fop_file_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t, u_int32_t)); -int __fop_file_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __fop_file_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __fop_file_remove_read __P((DB_ENV *, void *, __fop_file_remove_args **)); -int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __fop_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __fop_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __fop_file_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __fop_create __P((DB_ENV *, DB_TXN *, DB_FH **, const char *, APPNAME, int, u_int32_t)); int __fop_remove __P((DB_ENV *, DB_TXN *, u_int8_t *, const char *, APPNAME, u_int32_t)); int __fop_write __P((DB_ENV *, DB_TXN *, const char *, APPNAME, DB_FH *, u_int32_t, db_pgno_t, u_int32_t, u_int8_t *, u_int32_t, u_int32_t, u_int32_t)); diff --git a/db/dbinc_auto/hash_auto.h b/db/dbinc_auto/hash_auto.h index 7ec3fb7ef..5b5525363 100644 --- a/db/dbinc_auto/hash_auto.h +++ b/db/dbinc_auto/hash_auto.h @@ -87,6 +87,7 @@ typedef struct ___ham_metagroup_args { db_pgno_t pgno; DB_LSN pagelsn; u_int32_t newalloc; + db_pgno_t last_pgno; } __ham_metagroup_args; #define DB___ham_groupalloc 32 @@ -99,6 +100,7 @@ typedef struct ___ham_groupalloc_args { db_pgno_t start_pgno; u_int32_t num; db_pgno_t free; + db_pgno_t last_pgno; } __ham_groupalloc_args; #define DB___ham_curadj 33 diff --git a/db/dbinc_auto/hash_ext.h b/db/dbinc_auto/hash_ext.h index ecd088083..4869df741 100644 --- a/db/dbinc_auto/hash_ext.h +++ b/db/dbinc_auto/hash_ext.h @@ -10,49 +10,39 @@ int __ham_quick_delete __P((DBC *)); int __ham_c_init __P((DBC *)); int __ham_c_count __P((DBC *, db_recno_t *)); int __ham_c_dup __P((DBC *, DBC *)); -u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t)); +u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, u_int32_t)); int __ham_init_dbt __P((DB_ENV *, DBT *, u_int32_t, void **, u_int32_t *)); int __ham_c_update __P((DBC *, u_int32_t, int, int)); int __ham_get_clist __P((DB *, db_pgno_t, u_int32_t, DBC ***)); int __ham_insdel_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, u_int32_t, DB_LSN *, const DBT *, const DBT *)); -int __ham_insdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_insdel_read __P((DB_ENV *, void *, __ham_insdel_args **)); int __ham_newpage_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *)); -int __ham_newpage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_newpage_read __P((DB_ENV *, void *, __ham_newpage_args **)); int __ham_splitdata_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, const DBT *, DB_LSN *)); -int __ham_splitdata_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_splitdata_read __P((DB_ENV *, void *, __ham_splitdata_args **)); int __ham_replace_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, u_int32_t, DB_LSN *, int32_t, const DBT *, const DBT *, u_int32_t)); -int __ham_replace_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_replace_read __P((DB_ENV *, void *, __ham_replace_args **)); int __ham_copypage_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, const DBT *)); -int __ham_copypage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_copypage_read __P((DB_ENV *, void *, __ham_copypage_args **)); -int __ham_metagroup_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t)); -int __ham_metagroup_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_metagroup_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t, db_pgno_t)); int __ham_metagroup_read __P((DB_ENV *, void *, __ham_metagroup_args **)); -int __ham_groupalloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t)); -int __ham_groupalloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_groupalloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t, db_pgno_t)); int __ham_groupalloc_read __P((DB_ENV *, void *, __ham_groupalloc_args **)); int __ham_curadj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t, int, int, u_int32_t)); -int __ham_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_curadj_read __P((DB_ENV *, void *, __ham_curadj_args **)); int __ham_chgpg_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_ham_mode, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t)); -int __ham_chgpg_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_chgpg_read __P((DB_ENV *, void *, __ham_chgpg_args **)); -int __ham_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __ham_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __ham_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __ham_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __ham_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *)); int __ham_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *)); int __ham_mswap __P((void *)); @@ -60,7 +50,6 @@ int __ham_add_dup __P((DBC *, DBT *, u_int32_t, db_pgno_t *)); int __ham_dup_convert __P((DBC *)); int __ham_make_dup __P((DB_ENV *, const DBT *, DBT *d, void **, u_int32_t *)); void __ham_dsearch __P((DBC *, DBT *, u_int32_t *, int *, u_int32_t)); -void __ham_cprint __P((DBC *)); u_int32_t __ham_func2 __P((DB *, const void *, u_int32_t)); u_int32_t __ham_func3 __P((DB *, const void *, u_int32_t)); u_int32_t __ham_func4 __P((DB *, const void *, u_int32_t)); @@ -71,13 +60,15 @@ int __ham_release_meta __P((DBC *)); int __ham_dirty_meta __P((DBC *)); int __ham_db_create __P((DB *)); int __ham_db_close __P((DB *)); +int __ham_get_h_ffactor __P((DB *, u_int32_t *)); +int __ham_get_h_nelem __P((DB *, u_int32_t *)); int __ham_open __P((DB *, DB_TXN *, const char * name, db_pgno_t, u_int32_t)); int __ham_metachk __P((DB *, const char *, HMETA *)); int __ham_new_file __P((DB *, DB_TXN *, DB_FH *, const char *)); int __ham_new_subdb __P((DB *, DB *, DB_TXN *)); int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *)); int __ham_item_reset __P((DBC *)); -void __ham_item_init __P((DBC *)); +int __ham_item_init __P((DBC *)); int __ham_item_last __P((DBC *, db_lockmode_t, db_pgno_t *)); int __ham_item_first __P((DBC *, db_lockmode_t, db_pgno_t *)); int __ham_item_prev __P((DBC *, db_lockmode_t, db_pgno_t *)); @@ -86,7 +77,7 @@ void __ham_putitem __P((DB *, PAGE *p, const DBT *, int)); void __ham_reputpair __P((DB *, PAGE *, u_int32_t, const DBT *, const DBT *)); int __ham_del_pair __P((DBC *, int)); int __ham_replpair __P((DBC *, DBT *, u_int32_t)); -void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t, int32_t, int32_t, DBT *)); +void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t, int32_t, u_int32_t, int, DBT *)); int __ham_split_page __P((DBC *, u_int32_t, u_int32_t)); int __ham_add_el __P((DBC *, const DBT *, const DBT *, int)); void __ham_copy_item __P((DB *, PAGE *, u_int32_t, PAGE *)); @@ -107,6 +98,8 @@ int __ham_chgpg_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __ham_reclaim __P((DB *, DB_TXN *txn)); int __ham_truncate __P((DBC *, u_int32_t *)); int __ham_stat __P((DBC *, void *, u_int32_t)); +int __ham_stat_print __P((DBC *, u_int32_t)); +void __ham_print_cursor __P((DBC *)); int __ham_traverse __P((DBC *, db_lockmode_t, int (*)(DB *, PAGE *, void *, int *), void *, int)); int __db_no_hash_am __P((DB_ENV *)); int __ham_30_hashmeta __P((DB *, char *, u_int8_t *)); diff --git a/db/dbinc_auto/int_def.in b/db/dbinc_auto/int_def.in index 2de7fb620..eb8756bf4 100644 --- a/db/dbinc_auto/int_def.in +++ b/db/dbinc_auto/int_def.in @@ -3,81 +3,67 @@ #define _DB_INT_DEF_IN_ #define __crdel_metasub_log __crdel_metasub_log@DB_VERSION_UNIQUE_NAME@ -#define __crdel_metasub_getpgnos __crdel_metasub_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __crdel_metasub_print __crdel_metasub_print@DB_VERSION_UNIQUE_NAME@ #define __crdel_metasub_read __crdel_metasub_read@DB_VERSION_UNIQUE_NAME@ -#define __crdel_init_print __crdel_init_print@DB_VERSION_UNIQUE_NAME@ -#define __crdel_init_getpgnos __crdel_init_getpgnos@DB_VERSION_UNIQUE_NAME@ #define __crdel_init_recover __crdel_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __crdel_metasub_print __crdel_metasub_print@DB_VERSION_UNIQUE_NAME@ +#define __crdel_init_print __crdel_init_print@DB_VERSION_UNIQUE_NAME@ #define __crdel_metasub_recover __crdel_metasub_recover@DB_VERSION_UNIQUE_NAME@ #define __db_master_open __db_master_open@DB_VERSION_UNIQUE_NAME@ #define __db_master_update __db_master_update@DB_VERSION_UNIQUE_NAME@ #define __db_dbenv_setup __db_dbenv_setup@DB_VERSION_UNIQUE_NAME@ +#define __db_dbenv_mpool __db_dbenv_mpool@DB_VERSION_UNIQUE_NAME@ #define __db_close __db_close@DB_VERSION_UNIQUE_NAME@ #define __db_refresh __db_refresh@DB_VERSION_UNIQUE_NAME@ #define __db_log_page __db_log_page@DB_VERSION_UNIQUE_NAME@ #define __db_backup_name __db_backup_name@DB_VERSION_UNIQUE_NAME@ #define __dblist_get __dblist_get@DB_VERSION_UNIQUE_NAME@ -#if CONFIG_TEST +#ifdef CONFIG_TEST #define __db_testcopy __db_testcopy@DB_VERSION_UNIQUE_NAME@ #endif #define __db_cursor_int __db_cursor_int@DB_VERSION_UNIQUE_NAME@ -#define __db_cprint __db_cprint@DB_VERSION_UNIQUE_NAME@ #define __db_put __db_put@DB_VERSION_UNIQUE_NAME@ #define __db_del __db_del@DB_VERSION_UNIQUE_NAME@ #define __db_sync __db_sync@DB_VERSION_UNIQUE_NAME@ #define __db_associate __db_associate@DB_VERSION_UNIQUE_NAME@ +#define __db_secondary_close __db_secondary_close@DB_VERSION_UNIQUE_NAME@ #define __db_addrem_log __db_addrem_log@DB_VERSION_UNIQUE_NAME@ -#define __db_addrem_getpgnos __db_addrem_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_addrem_print __db_addrem_print@DB_VERSION_UNIQUE_NAME@ #define __db_addrem_read __db_addrem_read@DB_VERSION_UNIQUE_NAME@ #define __db_big_log __db_big_log@DB_VERSION_UNIQUE_NAME@ -#define __db_big_getpgnos __db_big_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_big_print __db_big_print@DB_VERSION_UNIQUE_NAME@ #define __db_big_read __db_big_read@DB_VERSION_UNIQUE_NAME@ #define __db_ovref_log __db_ovref_log@DB_VERSION_UNIQUE_NAME@ -#define __db_ovref_getpgnos __db_ovref_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_ovref_print __db_ovref_print@DB_VERSION_UNIQUE_NAME@ #define __db_ovref_read __db_ovref_read@DB_VERSION_UNIQUE_NAME@ -#define __db_relink_log __db_relink_log@DB_VERSION_UNIQUE_NAME@ -#define __db_relink_getpgnos __db_relink_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_relink_print __db_relink_print@DB_VERSION_UNIQUE_NAME@ -#define __db_relink_read __db_relink_read@DB_VERSION_UNIQUE_NAME@ #define __db_debug_log __db_debug_log@DB_VERSION_UNIQUE_NAME@ -#define __db_debug_getpgnos __db_debug_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_debug_print __db_debug_print@DB_VERSION_UNIQUE_NAME@ #define __db_debug_read __db_debug_read@DB_VERSION_UNIQUE_NAME@ #define __db_noop_log __db_noop_log@DB_VERSION_UNIQUE_NAME@ -#define __db_noop_getpgnos __db_noop_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_noop_print __db_noop_print@DB_VERSION_UNIQUE_NAME@ #define __db_noop_read __db_noop_read@DB_VERSION_UNIQUE_NAME@ #define __db_pg_alloc_log __db_pg_alloc_log@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_alloc_getpgnos __db_pg_alloc_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_alloc_print __db_pg_alloc_print@DB_VERSION_UNIQUE_NAME@ #define __db_pg_alloc_read __db_pg_alloc_read@DB_VERSION_UNIQUE_NAME@ #define __db_pg_free_log __db_pg_free_log@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_free_getpgnos __db_pg_free_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_free_print __db_pg_free_print@DB_VERSION_UNIQUE_NAME@ #define __db_pg_free_read __db_pg_free_read@DB_VERSION_UNIQUE_NAME@ #define __db_cksum_log __db_cksum_log@DB_VERSION_UNIQUE_NAME@ -#define __db_cksum_getpgnos __db_cksum_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_cksum_print __db_cksum_print@DB_VERSION_UNIQUE_NAME@ #define __db_cksum_read __db_cksum_read@DB_VERSION_UNIQUE_NAME@ #define __db_pg_freedata_log __db_pg_freedata_log@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_freedata_getpgnos __db_pg_freedata_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_freedata_print __db_pg_freedata_print@DB_VERSION_UNIQUE_NAME@ #define __db_pg_freedata_read __db_pg_freedata_read@DB_VERSION_UNIQUE_NAME@ #define __db_pg_prepare_log __db_pg_prepare_log@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_prepare_getpgnos __db_pg_prepare_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_prepare_print __db_pg_prepare_print@DB_VERSION_UNIQUE_NAME@ #define __db_pg_prepare_read __db_pg_prepare_read@DB_VERSION_UNIQUE_NAME@ #define __db_pg_new_log __db_pg_new_log@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_new_getpgnos __db_pg_new_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __db_pg_new_print __db_pg_new_print@DB_VERSION_UNIQUE_NAME@ #define __db_pg_new_read __db_pg_new_read@DB_VERSION_UNIQUE_NAME@ -#define __db_init_print __db_init_print@DB_VERSION_UNIQUE_NAME@ -#define __db_init_getpgnos __db_init_getpgnos@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_init_log __db_pg_init_log@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_init_read __db_pg_init_read@DB_VERSION_UNIQUE_NAME@ #define __db_init_recover __db_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __db_addrem_print __db_addrem_print@DB_VERSION_UNIQUE_NAME@ +#define __db_big_print __db_big_print@DB_VERSION_UNIQUE_NAME@ +#define __db_ovref_print __db_ovref_print@DB_VERSION_UNIQUE_NAME@ +#define __db_debug_print __db_debug_print@DB_VERSION_UNIQUE_NAME@ +#define __db_noop_print __db_noop_print@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_alloc_print __db_pg_alloc_print@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_free_print __db_pg_free_print@DB_VERSION_UNIQUE_NAME@ +#define __db_cksum_print __db_cksum_print@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_freedata_print __db_pg_freedata_print@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_prepare_print __db_pg_prepare_print@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_new_print __db_pg_new_print@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_init_print __db_pg_init_print@DB_VERSION_UNIQUE_NAME@ +#define __db_init_print __db_init_print@DB_VERSION_UNIQUE_NAME@ #define __db_c_close __db_c_close@DB_VERSION_UNIQUE_NAME@ #define __db_c_destroy __db_c_destroy@DB_VERSION_UNIQUE_NAME@ #define __db_c_count __db_c_count@DB_VERSION_UNIQUE_NAME@ @@ -111,13 +97,15 @@ #define __db_txnlist_gen __db_txnlist_gen@DB_VERSION_UNIQUE_NAME@ #define __db_txnlist_lsnadd __db_txnlist_lsnadd@DB_VERSION_UNIQUE_NAME@ #define __db_txnlist_lsninit __db_txnlist_lsninit@DB_VERSION_UNIQUE_NAME@ +#ifndef HAVE_FTRUNCATE #define __db_add_limbo __db_add_limbo@DB_VERSION_UNIQUE_NAME@ +#endif +#ifndef HAVE_FTRUNCATE #define __db_do_the_limbo __db_do_the_limbo@DB_VERSION_UNIQUE_NAME@ -#define __db_default_getpgnos __db_default_getpgnos@DB_VERSION_UNIQUE_NAME@ +#endif #define __db_txnlist_print __db_txnlist_print@DB_VERSION_UNIQUE_NAME@ #define __db_ditem __db_ditem@DB_VERSION_UNIQUE_NAME@ #define __db_pitem __db_pitem@DB_VERSION_UNIQUE_NAME@ -#define __db_relink __db_relink@DB_VERSION_UNIQUE_NAME@ #define __db_associate_pp __db_associate_pp@DB_VERSION_UNIQUE_NAME@ #define __db_close_pp __db_close_pp@DB_VERSION_UNIQUE_NAME@ #define __db_cursor_pp __db_cursor_pp@DB_VERSION_UNIQUE_NAME@ @@ -132,8 +120,6 @@ #define __db_pget_pp __db_pget_pp@DB_VERSION_UNIQUE_NAME@ #define __db_pget __db_pget@DB_VERSION_UNIQUE_NAME@ #define __db_put_pp __db_put_pp@DB_VERSION_UNIQUE_NAME@ -#define __db_stat_pp __db_stat_pp@DB_VERSION_UNIQUE_NAME@ -#define __db_stat __db_stat@DB_VERSION_UNIQUE_NAME@ #define __db_sync_pp __db_sync_pp@DB_VERSION_UNIQUE_NAME@ #define __db_c_close_pp __db_c_close_pp@DB_VERSION_UNIQUE_NAME@ #define __db_c_count_pp __db_c_count_pp@DB_VERSION_UNIQUE_NAME@ @@ -141,6 +127,7 @@ #define __db_c_del_arg __db_c_del_arg@DB_VERSION_UNIQUE_NAME@ #define __db_c_dup_pp __db_c_dup_pp@DB_VERSION_UNIQUE_NAME@ #define __db_c_get_pp __db_c_get_pp@DB_VERSION_UNIQUE_NAME@ +#define __db_secondary_close_pp __db_secondary_close_pp@DB_VERSION_UNIQUE_NAME@ #define __db_c_pget_pp __db_c_pget_pp@DB_VERSION_UNIQUE_NAME@ #define __db_c_put_pp __db_c_put_pp@DB_VERSION_UNIQUE_NAME@ #define __db_txn_auto_init __db_txn_auto_init@DB_VERSION_UNIQUE_NAME@ @@ -154,7 +141,9 @@ #define __db_lget __db_lget@DB_VERSION_UNIQUE_NAME@ #define __db_lput __db_lput@DB_VERSION_UNIQUE_NAME@ #define __dbh_am_chk __dbh_am_chk@DB_VERSION_UNIQUE_NAME@ +#define __db_get_flags __db_get_flags@DB_VERSION_UNIQUE_NAME@ #define __db_set_flags __db_set_flags@DB_VERSION_UNIQUE_NAME@ +#define __db_get_lorder __db_get_lorder@DB_VERSION_UNIQUE_NAME@ #define __db_set_lorder __db_set_lorder@DB_VERSION_UNIQUE_NAME@ #define __db_set_pagesize __db_set_pagesize@DB_VERSION_UNIQUE_NAME@ #define __db_open __db_open@DB_VERSION_UNIQUE_NAME@ @@ -172,21 +161,25 @@ #define __db_vrfy_ovfl_structure __db_vrfy_ovfl_structure@DB_VERSION_UNIQUE_NAME@ #define __db_safe_goff __db_safe_goff@DB_VERSION_UNIQUE_NAME@ #define __db_loadme __db_loadme@DB_VERSION_UNIQUE_NAME@ -#define __db_dump __db_dump@DB_VERSION_UNIQUE_NAME@ -#define __db_inmemdbflags __db_inmemdbflags@DB_VERSION_UNIQUE_NAME@ +#define __db_dumptree __db_dumptree@DB_VERSION_UNIQUE_NAME@ +#define __db_get_flags_fn __db_get_flags_fn@DB_VERSION_UNIQUE_NAME@ #define __db_prnpage __db_prnpage@DB_VERSION_UNIQUE_NAME@ #define __db_prpage __db_prpage@DB_VERSION_UNIQUE_NAME@ #define __db_pr __db_pr@DB_VERSION_UNIQUE_NAME@ -#define __db_prdbt __db_prdbt@DB_VERSION_UNIQUE_NAME@ #define __db_prflags __db_prflags@DB_VERSION_UNIQUE_NAME@ -#define __db_dbtype_to_string __db_dbtype_to_string@DB_VERSION_UNIQUE_NAME@ +#define __db_lockmode_to_string __db_lockmode_to_string@DB_VERSION_UNIQUE_NAME@ +#define __db_dumptree __db_dumptree@DB_VERSION_UNIQUE_NAME@ +#define __db_get_flags_fn __db_get_flags_fn@DB_VERSION_UNIQUE_NAME@ +#define __db_dump_pp __db_dump_pp@DB_VERSION_UNIQUE_NAME@ +#define __db_dump __db_dump@DB_VERSION_UNIQUE_NAME@ +#define __db_prdbt __db_prdbt@DB_VERSION_UNIQUE_NAME@ #define __db_prheader __db_prheader@DB_VERSION_UNIQUE_NAME@ #define __db_prfooter __db_prfooter@DB_VERSION_UNIQUE_NAME@ #define __db_pr_callback __db_pr_callback@DB_VERSION_UNIQUE_NAME@ +#define __db_dbtype_to_string __db_dbtype_to_string@DB_VERSION_UNIQUE_NAME@ #define __db_addrem_recover __db_addrem_recover@DB_VERSION_UNIQUE_NAME@ #define __db_big_recover __db_big_recover@DB_VERSION_UNIQUE_NAME@ #define __db_ovref_recover __db_ovref_recover@DB_VERSION_UNIQUE_NAME@ -#define __db_relink_recover __db_relink_recover@DB_VERSION_UNIQUE_NAME@ #define __db_debug_recover __db_debug_recover@DB_VERSION_UNIQUE_NAME@ #define __db_noop_recover __db_noop_recover@DB_VERSION_UNIQUE_NAME@ #define __db_pg_alloc_recover __db_pg_alloc_recover@DB_VERSION_UNIQUE_NAME@ @@ -195,6 +188,7 @@ #define __db_pg_freedata_recover __db_pg_freedata_recover@DB_VERSION_UNIQUE_NAME@ #define __db_cksum_recover __db_cksum_recover@DB_VERSION_UNIQUE_NAME@ #define __db_pg_prepare_recover __db_pg_prepare_recover@DB_VERSION_UNIQUE_NAME@ +#define __db_pg_init_recover __db_pg_init_recover@DB_VERSION_UNIQUE_NAME@ #define __db_traverse_big __db_traverse_big@DB_VERSION_UNIQUE_NAME@ #define __db_reclaim_callback __db_reclaim_callback@DB_VERSION_UNIQUE_NAME@ #define __db_truncate_callback __db_truncate_callback@DB_VERSION_UNIQUE_NAME@ @@ -208,6 +202,12 @@ #define __db_rename_int __db_rename_int@DB_VERSION_UNIQUE_NAME@ #define __db_ret __db_ret@DB_VERSION_UNIQUE_NAME@ #define __db_retcopy __db_retcopy@DB_VERSION_UNIQUE_NAME@ +#define __db_fileid_reset __db_fileid_reset@DB_VERSION_UNIQUE_NAME@ +#define __db_lsn_reset __db_lsn_reset@DB_VERSION_UNIQUE_NAME@ +#define __db_stat_pp __db_stat_pp@DB_VERSION_UNIQUE_NAME@ +#define __db_stat __db_stat@DB_VERSION_UNIQUE_NAME@ +#define __db_stat_print_pp __db_stat_print_pp@DB_VERSION_UNIQUE_NAME@ +#define __db_stat_print __db_stat_print@DB_VERSION_UNIQUE_NAME@ #define __db_truncate_pp __db_truncate_pp@DB_VERSION_UNIQUE_NAME@ #define __db_truncate __db_truncate@DB_VERSION_UNIQUE_NAME@ #define __db_upgrade_pp __db_upgrade_pp@DB_VERSION_UNIQUE_NAME@ @@ -216,7 +216,6 @@ #define __db_31_offdup __db_31_offdup@DB_VERSION_UNIQUE_NAME@ #define __db_verify_pp __db_verify_pp@DB_VERSION_UNIQUE_NAME@ #define __db_verify_internal __db_verify_internal@DB_VERSION_UNIQUE_NAME@ -#define __db_verify __db_verify@DB_VERSION_UNIQUE_NAME@ #define __db_vrfy_common __db_vrfy_common@DB_VERSION_UNIQUE_NAME@ #define __db_vrfy_datapage __db_vrfy_datapage@DB_VERSION_UNIQUE_NAME@ #define __db_vrfy_meta __db_vrfy_meta@DB_VERSION_UNIQUE_NAME@ @@ -244,13 +243,13 @@ #define __db_salvage_isdone __db_salvage_isdone@DB_VERSION_UNIQUE_NAME@ #define __db_salvage_markdone __db_salvage_markdone@DB_VERSION_UNIQUE_NAME@ #define __db_salvage_markneeded __db_salvage_markneeded@DB_VERSION_UNIQUE_NAME@ +#define __db_vrfy_prdbt __db_vrfy_prdbt@DB_VERSION_UNIQUE_NAME@ #define __bam_cmp __bam_cmp@DB_VERSION_UNIQUE_NAME@ #define __bam_defcmp __bam_defcmp@DB_VERSION_UNIQUE_NAME@ #define __bam_defpfx __bam_defpfx@DB_VERSION_UNIQUE_NAME@ #define __bam_pgin __bam_pgin@DB_VERSION_UNIQUE_NAME@ #define __bam_pgout __bam_pgout@DB_VERSION_UNIQUE_NAME@ #define __bam_mswap __bam_mswap@DB_VERSION_UNIQUE_NAME@ -#define __bam_cprint __bam_cprint@DB_VERSION_UNIQUE_NAME@ #define __bam_ca_delete __bam_ca_delete@DB_VERSION_UNIQUE_NAME@ #define __ram_ca_delete __ram_ca_delete@DB_VERSION_UNIQUE_NAME@ #define __bam_ca_di __bam_ca_di@DB_VERSION_UNIQUE_NAME@ @@ -269,13 +268,17 @@ #define __bam_ditem __bam_ditem@DB_VERSION_UNIQUE_NAME@ #define __bam_adjindx __bam_adjindx@DB_VERSION_UNIQUE_NAME@ #define __bam_dpages __bam_dpages@DB_VERSION_UNIQUE_NAME@ +#define __bam_relink __bam_relink@DB_VERSION_UNIQUE_NAME@ #define __bam_db_create __bam_db_create@DB_VERSION_UNIQUE_NAME@ #define __bam_db_close __bam_db_close@DB_VERSION_UNIQUE_NAME@ #define __bam_map_flags __bam_map_flags@DB_VERSION_UNIQUE_NAME@ #define __bam_set_flags __bam_set_flags@DB_VERSION_UNIQUE_NAME@ #define __bam_set_bt_compare __bam_set_bt_compare@DB_VERSION_UNIQUE_NAME@ +#define __bam_get_bt_minkey __bam_get_bt_minkey@DB_VERSION_UNIQUE_NAME@ #define __ram_map_flags __ram_map_flags@DB_VERSION_UNIQUE_NAME@ #define __ram_set_flags __ram_set_flags@DB_VERSION_UNIQUE_NAME@ +#define __ram_get_re_len __ram_get_re_len@DB_VERSION_UNIQUE_NAME@ +#define __ram_get_re_pad __ram_get_re_pad@DB_VERSION_UNIQUE_NAME@ #define __bam_open __bam_open@DB_VERSION_UNIQUE_NAME@ #define __bam_metachk __bam_metachk@DB_VERSION_UNIQUE_NAME@ #define __bam_read_root __bam_read_root@DB_VERSION_UNIQUE_NAME@ @@ -292,6 +295,7 @@ #define __bam_root_recover __bam_root_recover@DB_VERSION_UNIQUE_NAME@ #define __bam_curadj_recover __bam_curadj_recover@DB_VERSION_UNIQUE_NAME@ #define __bam_rcuradj_recover __bam_rcuradj_recover@DB_VERSION_UNIQUE_NAME@ +#define __bam_relink_recover __bam_relink_recover@DB_VERSION_UNIQUE_NAME@ #define __bam_reclaim __bam_reclaim@DB_VERSION_UNIQUE_NAME@ #define __bam_truncate __bam_truncate@DB_VERSION_UNIQUE_NAME@ #define __ram_open __ram_open@DB_VERSION_UNIQUE_NAME@ @@ -312,9 +316,11 @@ #define __bam_split __bam_split@DB_VERSION_UNIQUE_NAME@ #define __bam_copy __bam_copy@DB_VERSION_UNIQUE_NAME@ #define __bam_stat __bam_stat@DB_VERSION_UNIQUE_NAME@ -#define __bam_traverse __bam_traverse@DB_VERSION_UNIQUE_NAME@ +#define __bam_stat_print __bam_stat_print@DB_VERSION_UNIQUE_NAME@ #define __bam_stat_callback __bam_stat_callback@DB_VERSION_UNIQUE_NAME@ +#define __bam_print_cursor __bam_print_cursor@DB_VERSION_UNIQUE_NAME@ #define __bam_key_range __bam_key_range@DB_VERSION_UNIQUE_NAME@ +#define __bam_traverse __bam_traverse@DB_VERSION_UNIQUE_NAME@ #define __bam_30_btreemeta __bam_30_btreemeta@DB_VERSION_UNIQUE_NAME@ #define __bam_31_btreemeta __bam_31_btreemeta@DB_VERSION_UNIQUE_NAME@ #define __bam_31_lbtree __bam_31_lbtree@DB_VERSION_UNIQUE_NAME@ @@ -328,44 +334,37 @@ #define __bam_salvage_walkdupint __bam_salvage_walkdupint@DB_VERSION_UNIQUE_NAME@ #define __bam_meta2pgset __bam_meta2pgset@DB_VERSION_UNIQUE_NAME@ #define __bam_split_log __bam_split_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_split_getpgnos __bam_split_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_split_print __bam_split_print@DB_VERSION_UNIQUE_NAME@ #define __bam_split_read __bam_split_read@DB_VERSION_UNIQUE_NAME@ #define __bam_rsplit_log __bam_rsplit_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_rsplit_getpgnos __bam_rsplit_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_rsplit_print __bam_rsplit_print@DB_VERSION_UNIQUE_NAME@ #define __bam_rsplit_read __bam_rsplit_read@DB_VERSION_UNIQUE_NAME@ #define __bam_adj_log __bam_adj_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_adj_getpgnos __bam_adj_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_adj_print __bam_adj_print@DB_VERSION_UNIQUE_NAME@ #define __bam_adj_read __bam_adj_read@DB_VERSION_UNIQUE_NAME@ #define __bam_cadjust_log __bam_cadjust_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_cadjust_getpgnos __bam_cadjust_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_cadjust_print __bam_cadjust_print@DB_VERSION_UNIQUE_NAME@ #define __bam_cadjust_read __bam_cadjust_read@DB_VERSION_UNIQUE_NAME@ #define __bam_cdel_log __bam_cdel_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_cdel_getpgnos __bam_cdel_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_cdel_print __bam_cdel_print@DB_VERSION_UNIQUE_NAME@ #define __bam_cdel_read __bam_cdel_read@DB_VERSION_UNIQUE_NAME@ #define __bam_repl_log __bam_repl_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_repl_getpgnos __bam_repl_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_repl_print __bam_repl_print@DB_VERSION_UNIQUE_NAME@ #define __bam_repl_read __bam_repl_read@DB_VERSION_UNIQUE_NAME@ #define __bam_root_log __bam_root_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_root_getpgnos __bam_root_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_root_print __bam_root_print@DB_VERSION_UNIQUE_NAME@ #define __bam_root_read __bam_root_read@DB_VERSION_UNIQUE_NAME@ #define __bam_curadj_log __bam_curadj_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_curadj_getpgnos __bam_curadj_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_curadj_print __bam_curadj_print@DB_VERSION_UNIQUE_NAME@ #define __bam_curadj_read __bam_curadj_read@DB_VERSION_UNIQUE_NAME@ #define __bam_rcuradj_log __bam_rcuradj_log@DB_VERSION_UNIQUE_NAME@ -#define __bam_rcuradj_getpgnos __bam_rcuradj_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __bam_rcuradj_print __bam_rcuradj_print@DB_VERSION_UNIQUE_NAME@ #define __bam_rcuradj_read __bam_rcuradj_read@DB_VERSION_UNIQUE_NAME@ -#define __bam_init_print __bam_init_print@DB_VERSION_UNIQUE_NAME@ -#define __bam_init_getpgnos __bam_init_getpgnos@DB_VERSION_UNIQUE_NAME@ +#define __bam_relink_log __bam_relink_log@DB_VERSION_UNIQUE_NAME@ +#define __bam_relink_read __bam_relink_read@DB_VERSION_UNIQUE_NAME@ #define __bam_init_recover __bam_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __bam_split_print __bam_split_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_rsplit_print __bam_rsplit_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_adj_print __bam_adj_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_cadjust_print __bam_cadjust_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_cdel_print __bam_cdel_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_repl_print __bam_repl_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_root_print __bam_root_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_curadj_print __bam_curadj_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_rcuradj_print __bam_rcuradj_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_relink_print __bam_relink_print@DB_VERSION_UNIQUE_NAME@ +#define __bam_init_print __bam_init_print@DB_VERSION_UNIQUE_NAME@ #ifndef HAVE_GETCWD #define getcwd getcwd@DB_VERSION_UNIQUE_NAME@ #endif @@ -387,6 +386,9 @@ #ifndef HAVE_SNPRINTF #define snprintf snprintf@DB_VERSION_UNIQUE_NAME@ #endif +#ifndef HAVE_VSNPRINTF +#define vsnprintf vsnprintf@DB_VERSION_UNIQUE_NAME@ +#endif #ifndef HAVE_STRCASECMP #define strcasecmp strcasecmp@DB_VERSION_UNIQUE_NAME@ #endif @@ -399,9 +401,6 @@ #ifndef HAVE_STRERROR #define strerror strerror@DB_VERSION_UNIQUE_NAME@ #endif -#ifndef HAVE_VSNPRINTF -#define vsnprintf vsnprintf@DB_VERSION_UNIQUE_NAME@ -#endif #define __crypto_region_init __crypto_region_init@DB_VERSION_UNIQUE_NAME@ #define __db_isbigendian __db_isbigendian@DB_VERSION_UNIQUE_NAME@ #define __db_byteorder __db_byteorder@DB_VERSION_UNIQUE_NAME@ @@ -419,6 +418,8 @@ #define __db_err __db_err@DB_VERSION_UNIQUE_NAME@ #define __db_errcall __db_errcall@DB_VERSION_UNIQUE_NAME@ #define __db_errfile __db_errfile@DB_VERSION_UNIQUE_NAME@ +#define __db_msgadd __db_msgadd@DB_VERSION_UNIQUE_NAME@ +#define __db_msg __db_msg@DB_VERSION_UNIQUE_NAME@ #define __db_logmsg __db_logmsg@DB_VERSION_UNIQUE_NAME@ #define __db_unknown_flag __db_unknown_flag@DB_VERSION_UNIQUE_NAME@ #define __db_unknown_type __db_unknown_type@DB_VERSION_UNIQUE_NAME@ @@ -426,6 +427,7 @@ #define __db_not_txn_env __db_not_txn_env@DB_VERSION_UNIQUE_NAME@ #define __db_rec_toobig __db_rec_toobig@DB_VERSION_UNIQUE_NAME@ #define __db_rec_repl __db_rec_repl@DB_VERSION_UNIQUE_NAME@ +#define __db_check_lsn __db_check_lsn@DB_VERSION_UNIQUE_NAME@ #define __db_getlong __db_getlong@DB_VERSION_UNIQUE_NAME@ #define __db_getulong __db_getulong@DB_VERSION_UNIQUE_NAME@ #define __db_idspace __db_idspace@DB_VERSION_UNIQUE_NAME@ @@ -443,6 +445,7 @@ #define __aes_encrypt __aes_encrypt@DB_VERSION_UNIQUE_NAME@ #define __aes_init __aes_init@DB_VERSION_UNIQUE_NAME@ #define __crypto_dbenv_close __crypto_dbenv_close@DB_VERSION_UNIQUE_NAME@ +#define __crypto_region_destroy __crypto_region_destroy@DB_VERSION_UNIQUE_NAME@ #define __crypto_algsetup __crypto_algsetup@DB_VERSION_UNIQUE_NAME@ #define __crypto_decrypt_meta __crypto_decrypt_meta@DB_VERSION_UNIQUE_NAME@ #define __crypto_set_passwd __crypto_set_passwd@DB_VERSION_UNIQUE_NAME@ @@ -468,16 +471,16 @@ #define __dbreg_revoke_id __dbreg_revoke_id@DB_VERSION_UNIQUE_NAME@ #define __dbreg_close_id __dbreg_close_id@DB_VERSION_UNIQUE_NAME@ #define __dbreg_register_log __dbreg_register_log@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_register_getpgnos __dbreg_register_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_register_print __dbreg_register_print@DB_VERSION_UNIQUE_NAME@ #define __dbreg_register_read __dbreg_register_read@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_init_print __dbreg_init_print@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_init_getpgnos __dbreg_init_getpgnos@DB_VERSION_UNIQUE_NAME@ #define __dbreg_init_recover __dbreg_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __dbreg_register_print __dbreg_register_print@DB_VERSION_UNIQUE_NAME@ +#define __dbreg_init_print __dbreg_init_print@DB_VERSION_UNIQUE_NAME@ #define __dbreg_register_recover __dbreg_register_recover@DB_VERSION_UNIQUE_NAME@ +#define __dbreg_print_fname __dbreg_print_fname@DB_VERSION_UNIQUE_NAME@ +#define __dbreg_print_dblist __dbreg_print_dblist@DB_VERSION_UNIQUE_NAME@ #define __dbreg_add_dbentry __dbreg_add_dbentry@DB_VERSION_UNIQUE_NAME@ #define __dbreg_rem_dbentry __dbreg_rem_dbentry@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_open_files __dbreg_open_files@DB_VERSION_UNIQUE_NAME@ +#define __dbreg_log_files __dbreg_log_files@DB_VERSION_UNIQUE_NAME@ #define __dbreg_close_files __dbreg_close_files@DB_VERSION_UNIQUE_NAME@ #define __dbreg_id_to_db __dbreg_id_to_db@DB_VERSION_UNIQUE_NAME@ #define __dbreg_id_to_db_int __dbreg_id_to_db_int@DB_VERSION_UNIQUE_NAME@ @@ -486,16 +489,11 @@ #define __dbreg_get_name __dbreg_get_name@DB_VERSION_UNIQUE_NAME@ #define __dbreg_do_open __dbreg_do_open@DB_VERSION_UNIQUE_NAME@ #define __dbreg_lazy_id __dbreg_lazy_id@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_push_id __dbreg_push_id@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_pop_id __dbreg_pop_id@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_pluck_id __dbreg_pluck_id@DB_VERSION_UNIQUE_NAME@ -#define __dbreg_print_dblist __dbreg_print_dblist@DB_VERSION_UNIQUE_NAME@ #define __db_shalloc_init __db_shalloc_init@DB_VERSION_UNIQUE_NAME@ #define __db_shalloc_size __db_shalloc_size@DB_VERSION_UNIQUE_NAME@ #define __db_shalloc __db_shalloc@DB_VERSION_UNIQUE_NAME@ #define __db_shalloc_free __db_shalloc_free@DB_VERSION_UNIQUE_NAME@ -#define __db_shsizeof __db_shsizeof@DB_VERSION_UNIQUE_NAME@ -#define __db_shalloc_dump __db_shalloc_dump@DB_VERSION_UNIQUE_NAME@ +#define __db_shalloc_sizeof __db_shalloc_sizeof@DB_VERSION_UNIQUE_NAME@ #define __db_tablesize __db_tablesize@DB_VERSION_UNIQUE_NAME@ #define __db_hashinit __db_hashinit@DB_VERSION_UNIQUE_NAME@ #define __db_fileinit __db_fileinit@DB_VERSION_UNIQUE_NAME@ @@ -505,11 +503,15 @@ #define __dbenv_set_encrypt __dbenv_set_encrypt@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_flags __dbenv_set_flags@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_data_dir __dbenv_set_data_dir@DB_VERSION_UNIQUE_NAME@ +#define __dbenv_set_intermediate_dir __dbenv_set_intermediate_dir@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_errcall __dbenv_set_errcall@DB_VERSION_UNIQUE_NAME@ #define __dbenv_get_errfile __dbenv_get_errfile@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_errfile __dbenv_set_errfile@DB_VERSION_UNIQUE_NAME@ #define __dbenv_get_errpfx __dbenv_get_errpfx@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_errpfx __dbenv_set_errpfx@DB_VERSION_UNIQUE_NAME@ +#define __dbenv_set_msgcall __dbenv_set_msgcall@DB_VERSION_UNIQUE_NAME@ +#define __dbenv_get_msgfile __dbenv_get_msgfile@DB_VERSION_UNIQUE_NAME@ +#define __dbenv_set_msgfile __dbenv_set_msgfile@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_paniccall __dbenv_set_paniccall@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_shm_key __dbenv_set_shm_key@DB_VERSION_UNIQUE_NAME@ #define __dbenv_set_tas_spins __dbenv_set_tas_spins@DB_VERSION_UNIQUE_NAME@ @@ -526,36 +528,39 @@ #define __db_appname __db_appname@DB_VERSION_UNIQUE_NAME@ #define __db_home __db_home@DB_VERSION_UNIQUE_NAME@ #define __db_apprec __db_apprec@DB_VERSION_UNIQUE_NAME@ +#define __log_backup __log_backup@DB_VERSION_UNIQUE_NAME@ #define __env_openfiles __env_openfiles@DB_VERSION_UNIQUE_NAME@ #define __db_e_attach __db_e_attach@DB_VERSION_UNIQUE_NAME@ #define __db_e_detach __db_e_detach@DB_VERSION_UNIQUE_NAME@ #define __db_e_remove __db_e_remove@DB_VERSION_UNIQUE_NAME@ -#define __db_e_stat __db_e_stat@DB_VERSION_UNIQUE_NAME@ #define __db_r_attach __db_r_attach@DB_VERSION_UNIQUE_NAME@ #define __db_r_detach __db_r_detach@DB_VERSION_UNIQUE_NAME@ +#define __dbenv_stat_print_pp __dbenv_stat_print_pp@DB_VERSION_UNIQUE_NAME@ +#define __db_print_fh __db_print_fh@DB_VERSION_UNIQUE_NAME@ +#define __db_print_fileid __db_print_fileid@DB_VERSION_UNIQUE_NAME@ +#define __db_print_mutex __db_print_mutex@DB_VERSION_UNIQUE_NAME@ +#define __db_dl __db_dl@DB_VERSION_UNIQUE_NAME@ +#define __db_dl_pct __db_dl_pct@DB_VERSION_UNIQUE_NAME@ +#define __db_dlbytes __db_dlbytes@DB_VERSION_UNIQUE_NAME@ +#define __db_print_reginfo __db_print_reginfo@DB_VERSION_UNIQUE_NAME@ +#define __db_stat_not_built __db_stat_not_built@DB_VERSION_UNIQUE_NAME@ #define __fop_create_log __fop_create_log@DB_VERSION_UNIQUE_NAME@ -#define __fop_create_getpgnos __fop_create_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __fop_create_print __fop_create_print@DB_VERSION_UNIQUE_NAME@ #define __fop_create_read __fop_create_read@DB_VERSION_UNIQUE_NAME@ #define __fop_remove_log __fop_remove_log@DB_VERSION_UNIQUE_NAME@ -#define __fop_remove_getpgnos __fop_remove_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __fop_remove_print __fop_remove_print@DB_VERSION_UNIQUE_NAME@ #define __fop_remove_read __fop_remove_read@DB_VERSION_UNIQUE_NAME@ #define __fop_write_log __fop_write_log@DB_VERSION_UNIQUE_NAME@ -#define __fop_write_getpgnos __fop_write_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __fop_write_print __fop_write_print@DB_VERSION_UNIQUE_NAME@ #define __fop_write_read __fop_write_read@DB_VERSION_UNIQUE_NAME@ #define __fop_rename_log __fop_rename_log@DB_VERSION_UNIQUE_NAME@ -#define __fop_rename_getpgnos __fop_rename_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __fop_rename_print __fop_rename_print@DB_VERSION_UNIQUE_NAME@ #define __fop_rename_read __fop_rename_read@DB_VERSION_UNIQUE_NAME@ #define __fop_file_remove_log __fop_file_remove_log@DB_VERSION_UNIQUE_NAME@ -#define __fop_file_remove_getpgnos __fop_file_remove_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __fop_file_remove_print __fop_file_remove_print@DB_VERSION_UNIQUE_NAME@ #define __fop_file_remove_read __fop_file_remove_read@DB_VERSION_UNIQUE_NAME@ -#define __fop_init_print __fop_init_print@DB_VERSION_UNIQUE_NAME@ -#define __fop_init_getpgnos __fop_init_getpgnos@DB_VERSION_UNIQUE_NAME@ #define __fop_init_recover __fop_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __fop_create_print __fop_create_print@DB_VERSION_UNIQUE_NAME@ +#define __fop_remove_print __fop_remove_print@DB_VERSION_UNIQUE_NAME@ +#define __fop_write_print __fop_write_print@DB_VERSION_UNIQUE_NAME@ +#define __fop_rename_print __fop_rename_print@DB_VERSION_UNIQUE_NAME@ +#define __fop_file_remove_print __fop_file_remove_print@DB_VERSION_UNIQUE_NAME@ +#define __fop_init_print __fop_init_print@DB_VERSION_UNIQUE_NAME@ #define __fop_create __fop_create@DB_VERSION_UNIQUE_NAME@ #define __fop_remove __fop_remove@DB_VERSION_UNIQUE_NAME@ #define __fop_write __fop_write@DB_VERSION_UNIQUE_NAME@ @@ -581,44 +586,34 @@ #define __ham_c_update __ham_c_update@DB_VERSION_UNIQUE_NAME@ #define __ham_get_clist __ham_get_clist@DB_VERSION_UNIQUE_NAME@ #define __ham_insdel_log __ham_insdel_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_insdel_getpgnos __ham_insdel_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_insdel_print __ham_insdel_print@DB_VERSION_UNIQUE_NAME@ #define __ham_insdel_read __ham_insdel_read@DB_VERSION_UNIQUE_NAME@ #define __ham_newpage_log __ham_newpage_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_newpage_getpgnos __ham_newpage_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_newpage_print __ham_newpage_print@DB_VERSION_UNIQUE_NAME@ #define __ham_newpage_read __ham_newpage_read@DB_VERSION_UNIQUE_NAME@ #define __ham_splitdata_log __ham_splitdata_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_splitdata_getpgnos __ham_splitdata_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_splitdata_print __ham_splitdata_print@DB_VERSION_UNIQUE_NAME@ #define __ham_splitdata_read __ham_splitdata_read@DB_VERSION_UNIQUE_NAME@ #define __ham_replace_log __ham_replace_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_replace_getpgnos __ham_replace_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_replace_print __ham_replace_print@DB_VERSION_UNIQUE_NAME@ #define __ham_replace_read __ham_replace_read@DB_VERSION_UNIQUE_NAME@ #define __ham_copypage_log __ham_copypage_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_copypage_getpgnos __ham_copypage_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_copypage_print __ham_copypage_print@DB_VERSION_UNIQUE_NAME@ #define __ham_copypage_read __ham_copypage_read@DB_VERSION_UNIQUE_NAME@ #define __ham_metagroup_log __ham_metagroup_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_metagroup_getpgnos __ham_metagroup_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_metagroup_print __ham_metagroup_print@DB_VERSION_UNIQUE_NAME@ #define __ham_metagroup_read __ham_metagroup_read@DB_VERSION_UNIQUE_NAME@ #define __ham_groupalloc_log __ham_groupalloc_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_groupalloc_getpgnos __ham_groupalloc_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_groupalloc_print __ham_groupalloc_print@DB_VERSION_UNIQUE_NAME@ #define __ham_groupalloc_read __ham_groupalloc_read@DB_VERSION_UNIQUE_NAME@ #define __ham_curadj_log __ham_curadj_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_curadj_getpgnos __ham_curadj_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_curadj_print __ham_curadj_print@DB_VERSION_UNIQUE_NAME@ #define __ham_curadj_read __ham_curadj_read@DB_VERSION_UNIQUE_NAME@ #define __ham_chgpg_log __ham_chgpg_log@DB_VERSION_UNIQUE_NAME@ -#define __ham_chgpg_getpgnos __ham_chgpg_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __ham_chgpg_print __ham_chgpg_print@DB_VERSION_UNIQUE_NAME@ #define __ham_chgpg_read __ham_chgpg_read@DB_VERSION_UNIQUE_NAME@ -#define __ham_init_print __ham_init_print@DB_VERSION_UNIQUE_NAME@ -#define __ham_init_getpgnos __ham_init_getpgnos@DB_VERSION_UNIQUE_NAME@ #define __ham_init_recover __ham_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __ham_insdel_print __ham_insdel_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_newpage_print __ham_newpage_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_splitdata_print __ham_splitdata_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_replace_print __ham_replace_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_copypage_print __ham_copypage_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_metagroup_print __ham_metagroup_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_groupalloc_print __ham_groupalloc_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_curadj_print __ham_curadj_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_chgpg_print __ham_chgpg_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_init_print __ham_init_print@DB_VERSION_UNIQUE_NAME@ #define __ham_pgin __ham_pgin@DB_VERSION_UNIQUE_NAME@ #define __ham_pgout __ham_pgout@DB_VERSION_UNIQUE_NAME@ #define __ham_mswap __ham_mswap@DB_VERSION_UNIQUE_NAME@ @@ -626,7 +621,6 @@ #define __ham_dup_convert __ham_dup_convert@DB_VERSION_UNIQUE_NAME@ #define __ham_make_dup __ham_make_dup@DB_VERSION_UNIQUE_NAME@ #define __ham_dsearch __ham_dsearch@DB_VERSION_UNIQUE_NAME@ -#define __ham_cprint __ham_cprint@DB_VERSION_UNIQUE_NAME@ #define __ham_func2 __ham_func2@DB_VERSION_UNIQUE_NAME@ #define __ham_func3 __ham_func3@DB_VERSION_UNIQUE_NAME@ #define __ham_func4 __ham_func4@DB_VERSION_UNIQUE_NAME@ @@ -637,6 +631,8 @@ #define __ham_dirty_meta __ham_dirty_meta@DB_VERSION_UNIQUE_NAME@ #define __ham_db_create __ham_db_create@DB_VERSION_UNIQUE_NAME@ #define __ham_db_close __ham_db_close@DB_VERSION_UNIQUE_NAME@ +#define __ham_get_h_ffactor __ham_get_h_ffactor@DB_VERSION_UNIQUE_NAME@ +#define __ham_get_h_nelem __ham_get_h_nelem@DB_VERSION_UNIQUE_NAME@ #define __ham_open __ham_open@DB_VERSION_UNIQUE_NAME@ #define __ham_metachk __ham_metachk@DB_VERSION_UNIQUE_NAME@ #define __ham_new_file __ham_new_file@DB_VERSION_UNIQUE_NAME@ @@ -673,6 +669,8 @@ #define __ham_reclaim __ham_reclaim@DB_VERSION_UNIQUE_NAME@ #define __ham_truncate __ham_truncate@DB_VERSION_UNIQUE_NAME@ #define __ham_stat __ham_stat@DB_VERSION_UNIQUE_NAME@ +#define __ham_stat_print __ham_stat_print@DB_VERSION_UNIQUE_NAME@ +#define __ham_print_cursor __ham_print_cursor@DB_VERSION_UNIQUE_NAME@ #define __ham_traverse __ham_traverse@DB_VERSION_UNIQUE_NAME@ #define __db_no_hash_am __db_no_hash_am@DB_VERSION_UNIQUE_NAME@ #define __ham_30_hashmeta __ham_30_hashmeta@DB_VERSION_UNIQUE_NAME@ @@ -692,27 +690,30 @@ #define __db_SHA1Init __db_SHA1Init@DB_VERSION_UNIQUE_NAME@ #define __db_SHA1Update __db_SHA1Update@DB_VERSION_UNIQUE_NAME@ #define __db_SHA1Final __db_SHA1Final@DB_VERSION_UNIQUE_NAME@ -#define __lock_id_pp __lock_id_pp@DB_VERSION_UNIQUE_NAME@ -#define __lock_id __lock_id@DB_VERSION_UNIQUE_NAME@ -#define __lock_id_free_pp __lock_id_free_pp@DB_VERSION_UNIQUE_NAME@ -#define __lock_id_free __lock_id_free@DB_VERSION_UNIQUE_NAME@ #define __lock_vec_pp __lock_vec_pp@DB_VERSION_UNIQUE_NAME@ #define __lock_vec __lock_vec@DB_VERSION_UNIQUE_NAME@ #define __lock_get_pp __lock_get_pp@DB_VERSION_UNIQUE_NAME@ #define __lock_get __lock_get@DB_VERSION_UNIQUE_NAME@ +#define __lock_get_internal __lock_get_internal@DB_VERSION_UNIQUE_NAME@ #define __lock_put_pp __lock_put_pp@DB_VERSION_UNIQUE_NAME@ #define __lock_put __lock_put@DB_VERSION_UNIQUE_NAME@ #define __lock_downgrade __lock_downgrade@DB_VERSION_UNIQUE_NAME@ -#define __lock_addfamilylocker __lock_addfamilylocker@DB_VERSION_UNIQUE_NAME@ -#define __lock_freefamilylocker __lock_freefamilylocker@DB_VERSION_UNIQUE_NAME@ -#define __lock_set_timeout __lock_set_timeout@DB_VERSION_UNIQUE_NAME@ -#define __lock_inherit_timeout __lock_inherit_timeout@DB_VERSION_UNIQUE_NAME@ -#define __lock_getlocker __lock_getlocker@DB_VERSION_UNIQUE_NAME@ +#define __lock_locker_is_parent __lock_locker_is_parent@DB_VERSION_UNIQUE_NAME@ #define __lock_promote __lock_promote@DB_VERSION_UNIQUE_NAME@ -#define __lock_expired __lock_expired@DB_VERSION_UNIQUE_NAME@ -#define __lock_get_list __lock_get_list@DB_VERSION_UNIQUE_NAME@ #define __lock_detect_pp __lock_detect_pp@DB_VERSION_UNIQUE_NAME@ #define __lock_detect __lock_detect@DB_VERSION_UNIQUE_NAME@ +#define __lock_id_pp __lock_id_pp@DB_VERSION_UNIQUE_NAME@ +#define __lock_id __lock_id@DB_VERSION_UNIQUE_NAME@ +#define __lock_id_free_pp __lock_id_free_pp@DB_VERSION_UNIQUE_NAME@ +#define __lock_id_free __lock_id_free@DB_VERSION_UNIQUE_NAME@ +#define __lock_id_set __lock_id_set@DB_VERSION_UNIQUE_NAME@ +#define __lock_getlocker __lock_getlocker@DB_VERSION_UNIQUE_NAME@ +#define __lock_addfamilylocker __lock_addfamilylocker@DB_VERSION_UNIQUE_NAME@ +#define __lock_freefamilylocker __lock_freefamilylocker@DB_VERSION_UNIQUE_NAME@ +#define __lock_freelocker __lock_freelocker@DB_VERSION_UNIQUE_NAME@ +#define __lock_fix_list __lock_fix_list@DB_VERSION_UNIQUE_NAME@ +#define __lock_get_list __lock_get_list@DB_VERSION_UNIQUE_NAME@ +#define __lock_list_print __lock_list_print@DB_VERSION_UNIQUE_NAME@ #define __lock_dbenv_create __lock_dbenv_create@DB_VERSION_UNIQUE_NAME@ #define __lock_dbenv_close __lock_dbenv_close@DB_VERSION_UNIQUE_NAME@ #define __lock_set_lk_detect __lock_set_lk_detect@DB_VERSION_UNIQUE_NAME@ @@ -724,27 +725,36 @@ #define __lock_open __lock_open@DB_VERSION_UNIQUE_NAME@ #define __lock_dbenv_refresh __lock_dbenv_refresh@DB_VERSION_UNIQUE_NAME@ #define __lock_region_destroy __lock_region_destroy@DB_VERSION_UNIQUE_NAME@ -#define __lock_id_set __lock_id_set@DB_VERSION_UNIQUE_NAME@ #define __lock_stat_pp __lock_stat_pp@DB_VERSION_UNIQUE_NAME@ -#define __lock_dump_region __lock_dump_region@DB_VERSION_UNIQUE_NAME@ +#define __lock_stat_print_pp __lock_stat_print_pp@DB_VERSION_UNIQUE_NAME@ +#define __lock_stat_print __lock_stat_print@DB_VERSION_UNIQUE_NAME@ #define __lock_printlock __lock_printlock@DB_VERSION_UNIQUE_NAME@ +#define __lock_set_timeout __lock_set_timeout@DB_VERSION_UNIQUE_NAME@ +#define __lock_set_timeout_internal __lock_set_timeout_internal@DB_VERSION_UNIQUE_NAME@ +#define __lock_inherit_timeout __lock_inherit_timeout@DB_VERSION_UNIQUE_NAME@ +#define __lock_expires __lock_expires@DB_VERSION_UNIQUE_NAME@ +#define __lock_expired __lock_expired@DB_VERSION_UNIQUE_NAME@ #define __lock_cmp __lock_cmp@DB_VERSION_UNIQUE_NAME@ #define __lock_locker_cmp __lock_locker_cmp@DB_VERSION_UNIQUE_NAME@ #define __lock_ohash __lock_ohash@DB_VERSION_UNIQUE_NAME@ #define __lock_lhash __lock_lhash@DB_VERSION_UNIQUE_NAME@ #define __lock_locker_hash __lock_locker_hash@DB_VERSION_UNIQUE_NAME@ +#define __lock_nomem __lock_nomem@DB_VERSION_UNIQUE_NAME@ #define __log_open __log_open@DB_VERSION_UNIQUE_NAME@ #define __log_find __log_find@DB_VERSION_UNIQUE_NAME@ #define __log_valid __log_valid@DB_VERSION_UNIQUE_NAME@ #define __log_dbenv_refresh __log_dbenv_refresh@DB_VERSION_UNIQUE_NAME@ -#define __log_stat_pp __log_stat_pp@DB_VERSION_UNIQUE_NAME@ #define __log_get_cached_ckp_lsn __log_get_cached_ckp_lsn@DB_VERSION_UNIQUE_NAME@ #define __log_region_destroy __log_region_destroy@DB_VERSION_UNIQUE_NAME@ #define __log_vtruncate __log_vtruncate@DB_VERSION_UNIQUE_NAME@ #define __log_is_outdated __log_is_outdated@DB_VERSION_UNIQUE_NAME@ -#define __log_autoremove __log_autoremove@DB_VERSION_UNIQUE_NAME@ +#define __log_inmem_lsnoff __log_inmem_lsnoff@DB_VERSION_UNIQUE_NAME@ +#define __log_inmem_newfile __log_inmem_newfile@DB_VERSION_UNIQUE_NAME@ +#define __log_inmem_chkspace __log_inmem_chkspace@DB_VERSION_UNIQUE_NAME@ +#define __log_inmem_copyout __log_inmem_copyout@DB_VERSION_UNIQUE_NAME@ +#define __log_inmem_copyin __log_inmem_copyin@DB_VERSION_UNIQUE_NAME@ #define __log_archive_pp __log_archive_pp@DB_VERSION_UNIQUE_NAME@ -#define __log_archive __log_archive@DB_VERSION_UNIQUE_NAME@ +#define __log_autoremove __log_autoremove@DB_VERSION_UNIQUE_NAME@ #define __log_cursor_pp __log_cursor_pp@DB_VERSION_UNIQUE_NAME@ #define __log_cursor __log_cursor@DB_VERSION_UNIQUE_NAME@ #define __log_c_close __log_c_close@DB_VERSION_UNIQUE_NAME@ @@ -754,6 +764,9 @@ #define __log_set_lg_max __log_set_lg_max@DB_VERSION_UNIQUE_NAME@ #define __log_set_lg_regionmax __log_set_lg_regionmax@DB_VERSION_UNIQUE_NAME@ #define __log_set_lg_dir __log_set_lg_dir@DB_VERSION_UNIQUE_NAME@ +#define __log_get_flags __log_get_flags@DB_VERSION_UNIQUE_NAME@ +#define __log_set_flags __log_set_flags@DB_VERSION_UNIQUE_NAME@ +#define __log_check_sizes __log_check_sizes@DB_VERSION_UNIQUE_NAME@ #define __log_put_pp __log_put_pp@DB_VERSION_UNIQUE_NAME@ #define __log_put __log_put@DB_VERSION_UNIQUE_NAME@ #define __log_txn_lsn __log_txn_lsn@DB_VERSION_UNIQUE_NAME@ @@ -764,6 +777,9 @@ #define __log_file_pp __log_file_pp@DB_VERSION_UNIQUE_NAME@ #define __log_name __log_name@DB_VERSION_UNIQUE_NAME@ #define __log_rep_put __log_rep_put@DB_VERSION_UNIQUE_NAME@ +#define __log_stat_pp __log_stat_pp@DB_VERSION_UNIQUE_NAME@ +#define __log_stat_print_pp __log_stat_print_pp@DB_VERSION_UNIQUE_NAME@ +#define __log_stat_print __log_stat_print@DB_VERSION_UNIQUE_NAME@ #define __memp_alloc __memp_alloc@DB_VERSION_UNIQUE_NAME@ #ifdef DIAGNOSTIC #define __memp_check_order __memp_check_order@DB_VERSION_UNIQUE_NAME@ @@ -779,18 +795,20 @@ #define __memp_set_clear_len __memp_set_clear_len@DB_VERSION_UNIQUE_NAME@ #define __memp_get_fileid __memp_get_fileid@DB_VERSION_UNIQUE_NAME@ #define __memp_set_fileid __memp_set_fileid@DB_VERSION_UNIQUE_NAME@ +#define __memp_get_flags __memp_get_flags@DB_VERSION_UNIQUE_NAME@ #define __memp_set_flags __memp_set_flags@DB_VERSION_UNIQUE_NAME@ #define __memp_get_ftype __memp_get_ftype@DB_VERSION_UNIQUE_NAME@ #define __memp_set_ftype __memp_set_ftype@DB_VERSION_UNIQUE_NAME@ #define __memp_set_lsn_offset __memp_set_lsn_offset@DB_VERSION_UNIQUE_NAME@ #define __memp_set_pgcookie __memp_set_pgcookie@DB_VERSION_UNIQUE_NAME@ -#define __memp_fopen __memp_fopen@DB_VERSION_UNIQUE_NAME@ #define __memp_last_pgno __memp_last_pgno@DB_VERSION_UNIQUE_NAME@ -#define __memp_fclose __memp_fclose@DB_VERSION_UNIQUE_NAME@ -#define __memp_mf_sync __memp_mf_sync@DB_VERSION_UNIQUE_NAME@ -#define __memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@ #define __memp_fn __memp_fn@DB_VERSION_UNIQUE_NAME@ #define __memp_fns __memp_fns@DB_VERSION_UNIQUE_NAME@ +#define __memp_fopen_pp __memp_fopen_pp@DB_VERSION_UNIQUE_NAME@ +#define __memp_fopen __memp_fopen@DB_VERSION_UNIQUE_NAME@ +#define __memp_fclose_pp __memp_fclose_pp@DB_VERSION_UNIQUE_NAME@ +#define __memp_fclose __memp_fclose@DB_VERSION_UNIQUE_NAME@ +#define __memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@ #define __memp_fput_pp __memp_fput_pp@DB_VERSION_UNIQUE_NAME@ #define __memp_fput __memp_fput@DB_VERSION_UNIQUE_NAME@ #define __memp_fset_pp __memp_fset_pp@DB_VERSION_UNIQUE_NAME@ @@ -798,16 +816,20 @@ #define __memp_dbenv_create __memp_dbenv_create@DB_VERSION_UNIQUE_NAME@ #define __memp_get_cachesize __memp_get_cachesize@DB_VERSION_UNIQUE_NAME@ #define __memp_set_cachesize __memp_set_cachesize@DB_VERSION_UNIQUE_NAME@ +#define __memp_set_mp_max_openfd __memp_set_mp_max_openfd@DB_VERSION_UNIQUE_NAME@ +#define __memp_set_mp_max_write __memp_set_mp_max_write@DB_VERSION_UNIQUE_NAME@ #define __memp_set_mp_mmapsize __memp_set_mp_mmapsize@DB_VERSION_UNIQUE_NAME@ #define __memp_nameop __memp_nameop@DB_VERSION_UNIQUE_NAME@ #define __memp_get_refcnt __memp_get_refcnt@DB_VERSION_UNIQUE_NAME@ +#define __memp_ftruncate __memp_ftruncate@DB_VERSION_UNIQUE_NAME@ #define __memp_open __memp_open@DB_VERSION_UNIQUE_NAME@ #define __memp_dbenv_refresh __memp_dbenv_refresh@DB_VERSION_UNIQUE_NAME@ -#define __mpool_region_destroy __mpool_region_destroy@DB_VERSION_UNIQUE_NAME@ +#define __memp_region_destroy __memp_region_destroy@DB_VERSION_UNIQUE_NAME@ #define __memp_register_pp __memp_register_pp@DB_VERSION_UNIQUE_NAME@ #define __memp_register __memp_register@DB_VERSION_UNIQUE_NAME@ #define __memp_stat_pp __memp_stat_pp@DB_VERSION_UNIQUE_NAME@ -#define __memp_dump_region __memp_dump_region@DB_VERSION_UNIQUE_NAME@ +#define __memp_stat_print_pp __memp_stat_print_pp@DB_VERSION_UNIQUE_NAME@ +#define __memp_stat_print __memp_stat_print@DB_VERSION_UNIQUE_NAME@ #define __memp_stat_hash __memp_stat_hash@DB_VERSION_UNIQUE_NAME@ #define __memp_sync_pp __memp_sync_pp@DB_VERSION_UNIQUE_NAME@ #define __memp_sync __memp_sync@DB_VERSION_UNIQUE_NAME@ @@ -815,6 +837,7 @@ #define __memp_fsync __memp_fsync@DB_VERSION_UNIQUE_NAME@ #define __mp_xxx_fh __mp_xxx_fh@DB_VERSION_UNIQUE_NAME@ #define __memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@ +#define __memp_mf_sync __memp_mf_sync@DB_VERSION_UNIQUE_NAME@ #define __memp_trickle_pp __memp_trickle_pp@DB_VERSION_UNIQUE_NAME@ #define __db_fcntl_mutex_init __db_fcntl_mutex_init@DB_VERSION_UNIQUE_NAME@ #define __db_fcntl_mutex_lock __db_fcntl_mutex_lock@DB_VERSION_UNIQUE_NAME@ @@ -860,12 +883,14 @@ #define __os_openhandle __os_openhandle@DB_VERSION_UNIQUE_NAME@ #define __os_closehandle __os_closehandle@DB_VERSION_UNIQUE_NAME@ #define __os_id __os_id@DB_VERSION_UNIQUE_NAME@ +#define __os_unique_id __os_unique_id@DB_VERSION_UNIQUE_NAME@ #define __os_r_sysattach __os_r_sysattach@DB_VERSION_UNIQUE_NAME@ #define __os_r_sysdetach __os_r_sysdetach@DB_VERSION_UNIQUE_NAME@ #define __os_mapfile __os_mapfile@DB_VERSION_UNIQUE_NAME@ #define __os_unmapfile __os_unmapfile@DB_VERSION_UNIQUE_NAME@ #define __db_oflags __db_oflags@DB_VERSION_UNIQUE_NAME@ #define __db_omode __db_omode@DB_VERSION_UNIQUE_NAME@ +#define __db_shm_mode __db_shm_mode@DB_VERSION_UNIQUE_NAME@ #define __os_have_direct __os_have_direct@DB_VERSION_UNIQUE_NAME@ #define __os_open __os_open@DB_VERSION_UNIQUE_NAME@ #define __os_open_extend __os_open_extend@DB_VERSION_UNIQUE_NAME@ @@ -887,13 +912,12 @@ #define __os_exists __os_exists@DB_VERSION_UNIQUE_NAME@ #define __os_ioinfo __os_ioinfo@DB_VERSION_UNIQUE_NAME@ #define __os_tmpdir __os_tmpdir@DB_VERSION_UNIQUE_NAME@ +#define __os_truncate __os_truncate@DB_VERSION_UNIQUE_NAME@ #define __os_region_unlink __os_region_unlink@DB_VERSION_UNIQUE_NAME@ #define __os_unlink __os_unlink@DB_VERSION_UNIQUE_NAME@ #define __os_is_winnt __os_is_winnt@DB_VERSION_UNIQUE_NAME@ -#if defined(DB_WIN32) -#define __os_win32_errno __os_win32_errno@DB_VERSION_UNIQUE_NAME@ -#endif #define __os_have_direct __os_have_direct@DB_VERSION_UNIQUE_NAME@ +#define __os_unlink __os_unlink@DB_VERSION_UNIQUE_NAME@ #define __qam_position __qam_position@DB_VERSION_UNIQUE_NAME@ #define __qam_pitem __qam_pitem@DB_VERSION_UNIQUE_NAME@ #define __qam_append __qam_append@DB_VERSION_UNIQUE_NAME@ @@ -901,28 +925,22 @@ #define __qam_c_init __qam_c_init@DB_VERSION_UNIQUE_NAME@ #define __qam_truncate __qam_truncate@DB_VERSION_UNIQUE_NAME@ #define __qam_incfirst_log __qam_incfirst_log@DB_VERSION_UNIQUE_NAME@ -#define __qam_incfirst_getpgnos __qam_incfirst_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __qam_incfirst_print __qam_incfirst_print@DB_VERSION_UNIQUE_NAME@ #define __qam_incfirst_read __qam_incfirst_read@DB_VERSION_UNIQUE_NAME@ #define __qam_mvptr_log __qam_mvptr_log@DB_VERSION_UNIQUE_NAME@ -#define __qam_mvptr_getpgnos __qam_mvptr_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __qam_mvptr_print __qam_mvptr_print@DB_VERSION_UNIQUE_NAME@ #define __qam_mvptr_read __qam_mvptr_read@DB_VERSION_UNIQUE_NAME@ #define __qam_del_log __qam_del_log@DB_VERSION_UNIQUE_NAME@ -#define __qam_del_getpgnos __qam_del_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __qam_del_print __qam_del_print@DB_VERSION_UNIQUE_NAME@ #define __qam_del_read __qam_del_read@DB_VERSION_UNIQUE_NAME@ #define __qam_add_log __qam_add_log@DB_VERSION_UNIQUE_NAME@ -#define __qam_add_getpgnos __qam_add_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __qam_add_print __qam_add_print@DB_VERSION_UNIQUE_NAME@ #define __qam_add_read __qam_add_read@DB_VERSION_UNIQUE_NAME@ #define __qam_delext_log __qam_delext_log@DB_VERSION_UNIQUE_NAME@ -#define __qam_delext_getpgnos __qam_delext_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __qam_delext_print __qam_delext_print@DB_VERSION_UNIQUE_NAME@ #define __qam_delext_read __qam_delext_read@DB_VERSION_UNIQUE_NAME@ -#define __qam_init_print __qam_init_print@DB_VERSION_UNIQUE_NAME@ -#define __qam_init_getpgnos __qam_init_getpgnos@DB_VERSION_UNIQUE_NAME@ #define __qam_init_recover __qam_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __qam_incfirst_print __qam_incfirst_print@DB_VERSION_UNIQUE_NAME@ +#define __qam_mvptr_print __qam_mvptr_print@DB_VERSION_UNIQUE_NAME@ +#define __qam_del_print __qam_del_print@DB_VERSION_UNIQUE_NAME@ +#define __qam_add_print __qam_add_print@DB_VERSION_UNIQUE_NAME@ +#define __qam_delext_print __qam_delext_print@DB_VERSION_UNIQUE_NAME@ +#define __qam_init_print __qam_init_print@DB_VERSION_UNIQUE_NAME@ #define __qam_mswap __qam_mswap@DB_VERSION_UNIQUE_NAME@ #define __qam_pgin_out __qam_pgin_out@DB_VERSION_UNIQUE_NAME@ #define __qam_fprobe __qam_fprobe@DB_VERSION_UNIQUE_NAME@ @@ -935,9 +953,13 @@ #define __qam_nameop __qam_nameop@DB_VERSION_UNIQUE_NAME@ #define __qam_db_create __qam_db_create@DB_VERSION_UNIQUE_NAME@ #define __qam_db_close __qam_db_close@DB_VERSION_UNIQUE_NAME@ +#define __qam_get_extentsize __qam_get_extentsize@DB_VERSION_UNIQUE_NAME@ +#define __queue_pageinfo __queue_pageinfo@DB_VERSION_UNIQUE_NAME@ #define __db_prqueue __db_prqueue@DB_VERSION_UNIQUE_NAME@ #define __qam_remove __qam_remove@DB_VERSION_UNIQUE_NAME@ #define __qam_rename __qam_rename@DB_VERSION_UNIQUE_NAME@ +#define __qam_map_flags __qam_map_flags@DB_VERSION_UNIQUE_NAME@ +#define __qam_set_flags __qam_set_flags@DB_VERSION_UNIQUE_NAME@ #define __qam_open __qam_open@DB_VERSION_UNIQUE_NAME@ #define __qam_set_ext_data __qam_set_ext_data@DB_VERSION_UNIQUE_NAME@ #define __qam_metachk __qam_metachk@DB_VERSION_UNIQUE_NAME@ @@ -948,6 +970,7 @@ #define __qam_delext_recover __qam_delext_recover@DB_VERSION_UNIQUE_NAME@ #define __qam_add_recover __qam_add_recover@DB_VERSION_UNIQUE_NAME@ #define __qam_stat __qam_stat@DB_VERSION_UNIQUE_NAME@ +#define __qam_stat_print __qam_stat_print@DB_VERSION_UNIQUE_NAME@ #define __db_no_queue_am __db_no_queue_am@DB_VERSION_UNIQUE_NAME@ #define __qam_31_qammeta __qam_31_qammeta@DB_VERSION_UNIQUE_NAME@ #define __qam_32_qammeta __qam_32_qammeta@DB_VERSION_UNIQUE_NAME@ @@ -956,20 +979,38 @@ #define __qam_vrfy_structure __qam_vrfy_structure@DB_VERSION_UNIQUE_NAME@ #define __qam_vrfy_walkqueue __qam_vrfy_walkqueue@DB_VERSION_UNIQUE_NAME@ #define __qam_salvage __qam_salvage@DB_VERSION_UNIQUE_NAME@ +#define __rep_update_buf __rep_update_buf@DB_VERSION_UNIQUE_NAME@ +#define __rep_update_read __rep_update_read@DB_VERSION_UNIQUE_NAME@ +#define __rep_fileinfo_buf __rep_fileinfo_buf@DB_VERSION_UNIQUE_NAME@ +#define __rep_fileinfo_read __rep_fileinfo_read@DB_VERSION_UNIQUE_NAME@ +#define __rep_update_req __rep_update_req@DB_VERSION_UNIQUE_NAME@ +#define __rep_page_req __rep_page_req@DB_VERSION_UNIQUE_NAME@ +#define __rep_update_setup __rep_update_setup@DB_VERSION_UNIQUE_NAME@ +#define __rep_page __rep_page@DB_VERSION_UNIQUE_NAME@ +#define __rep_page_fail __rep_page_fail@DB_VERSION_UNIQUE_NAME@ +#define __rep_pggap_req __rep_pggap_req@DB_VERSION_UNIQUE_NAME@ +#define __rep_loggap_req __rep_loggap_req@DB_VERSION_UNIQUE_NAME@ +#define __rep_finfo_alloc __rep_finfo_alloc@DB_VERSION_UNIQUE_NAME@ #define __rep_dbenv_create __rep_dbenv_create@DB_VERSION_UNIQUE_NAME@ #define __rep_open __rep_open@DB_VERSION_UNIQUE_NAME@ +#define __rep_client_dbinit __rep_client_dbinit@DB_VERSION_UNIQUE_NAME@ #define __rep_elect_master __rep_elect_master@DB_VERSION_UNIQUE_NAME@ #define __rep_process_message __rep_process_message@DB_VERSION_UNIQUE_NAME@ #define __rep_process_txn __rep_process_txn@DB_VERSION_UNIQUE_NAME@ #define __rep_tally __rep_tally@DB_VERSION_UNIQUE_NAME@ #define __rep_cmp_vote __rep_cmp_vote@DB_VERSION_UNIQUE_NAME@ #define __rep_cmp_vote2 __rep_cmp_vote2@DB_VERSION_UNIQUE_NAME@ +#define __rep_check_doreq __rep_check_doreq@DB_VERSION_UNIQUE_NAME@ +#define __rep_lockout __rep_lockout@DB_VERSION_UNIQUE_NAME@ #define __rep_region_init __rep_region_init@DB_VERSION_UNIQUE_NAME@ #define __rep_region_destroy __rep_region_destroy@DB_VERSION_UNIQUE_NAME@ #define __rep_dbenv_refresh __rep_dbenv_refresh@DB_VERSION_UNIQUE_NAME@ #define __rep_dbenv_close __rep_dbenv_close@DB_VERSION_UNIQUE_NAME@ #define __rep_preclose __rep_preclose@DB_VERSION_UNIQUE_NAME@ -#define __rep_check_alloc __rep_check_alloc@DB_VERSION_UNIQUE_NAME@ +#define __rep_write_egen __rep_write_egen@DB_VERSION_UNIQUE_NAME@ +#define __rep_stat_pp __rep_stat_pp@DB_VERSION_UNIQUE_NAME@ +#define __rep_stat_print_pp __rep_stat_print_pp@DB_VERSION_UNIQUE_NAME@ +#define __rep_stat_print __rep_stat_print@DB_VERSION_UNIQUE_NAME@ #define __rep_send_message __rep_send_message@DB_VERSION_UNIQUE_NAME@ #define __rep_new_master __rep_new_master@DB_VERSION_UNIQUE_NAME@ #define __rep_is_client __rep_is_client@DB_VERSION_UNIQUE_NAME@ @@ -978,9 +1019,8 @@ #define __rep_elect_done __rep_elect_done@DB_VERSION_UNIQUE_NAME@ #define __rep_grow_sites __rep_grow_sites@DB_VERSION_UNIQUE_NAME@ #define __env_rep_enter __env_rep_enter@DB_VERSION_UNIQUE_NAME@ -#define __env_rep_exit __env_rep_exit@DB_VERSION_UNIQUE_NAME@ +#define __env_db_rep_exit __env_db_rep_exit@DB_VERSION_UNIQUE_NAME@ #define __db_rep_enter __db_rep_enter@DB_VERSION_UNIQUE_NAME@ -#define __db_rep_exit __db_rep_exit@DB_VERSION_UNIQUE_NAME@ #define __op_rep_enter __op_rep_enter@DB_VERSION_UNIQUE_NAME@ #define __op_rep_exit __op_rep_exit@DB_VERSION_UNIQUE_NAME@ #define __rep_get_gen __rep_get_gen@DB_VERSION_UNIQUE_NAME@ @@ -1030,8 +1070,10 @@ #define __dbcl_set_lk_max_lockers __dbcl_set_lk_max_lockers@DB_VERSION_UNIQUE_NAME@ #define __dbcl_get_lk_max_objects __dbcl_get_lk_max_objects@DB_VERSION_UNIQUE_NAME@ #define __dbcl_set_lk_max_objects __dbcl_set_lk_max_objects@DB_VERSION_UNIQUE_NAME@ -#define __dbcl_get_mp_maxwrite __dbcl_get_mp_maxwrite@DB_VERSION_UNIQUE_NAME@ -#define __dbcl_set_mp_maxwrite __dbcl_set_mp_maxwrite@DB_VERSION_UNIQUE_NAME@ +#define __dbcl_get_mp_max_openfd __dbcl_get_mp_max_openfd@DB_VERSION_UNIQUE_NAME@ +#define __dbcl_set_mp_max_openfd __dbcl_set_mp_max_openfd@DB_VERSION_UNIQUE_NAME@ +#define __dbcl_get_mp_max_write __dbcl_get_mp_max_write@DB_VERSION_UNIQUE_NAME@ +#define __dbcl_set_mp_max_write __dbcl_set_mp_max_write@DB_VERSION_UNIQUE_NAME@ #define __dbcl_get_mp_mmapsize __dbcl_get_mp_mmapsize@DB_VERSION_UNIQUE_NAME@ #define __dbcl_set_mp_mmapsize __dbcl_set_mp_mmapsize@DB_VERSION_UNIQUE_NAME@ #define __dbcl_env_get_home __dbcl_env_get_home@DB_VERSION_UNIQUE_NAME@ @@ -1280,6 +1322,10 @@ #define __dbc_close_int __dbc_close_int@DB_VERSION_UNIQUE_NAME@ #define __dbenv_close_int __dbenv_close_int@DB_VERSION_UNIQUE_NAME@ #define get_fullhome get_fullhome@DB_VERSION_UNIQUE_NAME@ +#define __seq_stat __seq_stat@DB_VERSION_UNIQUE_NAME@ +#define __seq_stat_print __seq_stat_print@DB_VERSION_UNIQUE_NAME@ +#define __db_get_seq_flags_fn __db_get_seq_flags_fn@DB_VERSION_UNIQUE_NAME@ +#define __db_get_seq_flags_fn __db_get_seq_flags_fn@DB_VERSION_UNIQUE_NAME@ #define bdb_HCommand bdb_HCommand@DB_VERSION_UNIQUE_NAME@ #if DB_DBM_HSEARCH != 0 #define bdb_NdbmOpen bdb_NdbmOpen@DB_VERSION_UNIQUE_NAME@ @@ -1298,6 +1344,8 @@ #define tcl_EnvSetFlags tcl_EnvSetFlags@DB_VERSION_UNIQUE_NAME@ #define tcl_EnvTest tcl_EnvTest@DB_VERSION_UNIQUE_NAME@ #define tcl_EnvGetEncryptFlags tcl_EnvGetEncryptFlags@DB_VERSION_UNIQUE_NAME@ +#define tcl_EnvSetErrfile tcl_EnvSetErrfile@DB_VERSION_UNIQUE_NAME@ +#define tcl_EnvSetErrpfx tcl_EnvSetErrpfx@DB_VERSION_UNIQUE_NAME@ #define _NewInfo _NewInfo@DB_VERSION_UNIQUE_NAME@ #define _NameToPtr _NameToPtr@DB_VERSION_UNIQUE_NAME@ #define _PtrToInfo _PtrToInfo@DB_VERSION_UNIQUE_NAME@ @@ -1306,6 +1354,7 @@ #define _DeleteInfo _DeleteInfo@DB_VERSION_UNIQUE_NAME@ #define _SetListElem _SetListElem@DB_VERSION_UNIQUE_NAME@ #define _SetListElemInt _SetListElemInt@DB_VERSION_UNIQUE_NAME@ +#define _SetListElemWideInt _SetListElemWideInt@DB_VERSION_UNIQUE_NAME@ #define _SetListRecnoElem _SetListRecnoElem@DB_VERSION_UNIQUE_NAME@ #define _Set3DBTList _Set3DBTList@DB_VERSION_UNIQUE_NAME@ #define _SetMultiList _SetMultiList@DB_VERSION_UNIQUE_NAME@ @@ -1343,6 +1392,7 @@ #define tcl_RepStart tcl_RepStart@DB_VERSION_UNIQUE_NAME@ #define tcl_RepProcessMessage tcl_RepProcessMessage@DB_VERSION_UNIQUE_NAME@ #define tcl_RepStat tcl_RepStat@DB_VERSION_UNIQUE_NAME@ +#define seq_Cmd seq_Cmd@DB_VERSION_UNIQUE_NAME@ #define _TxnInfoDelete _TxnInfoDelete@DB_VERSION_UNIQUE_NAME@ #define tcl_TxnCheckpoint tcl_TxnCheckpoint@DB_VERSION_UNIQUE_NAME@ #define tcl_Txn tcl_Txn@DB_VERSION_UNIQUE_NAME@ @@ -1363,6 +1413,7 @@ #define __txn_set_timeout __txn_set_timeout@DB_VERSION_UNIQUE_NAME@ #define __txn_checkpoint_pp __txn_checkpoint_pp@DB_VERSION_UNIQUE_NAME@ #define __txn_checkpoint __txn_checkpoint@DB_VERSION_UNIQUE_NAME@ +#define __txn_getactive __txn_getactive@DB_VERSION_UNIQUE_NAME@ #define __txn_getckp __txn_getckp@DB_VERSION_UNIQUE_NAME@ #define __txn_activekids __txn_activekids@DB_VERSION_UNIQUE_NAME@ #define __txn_force_abort __txn_force_abort@DB_VERSION_UNIQUE_NAME@ @@ -1370,28 +1421,22 @@ #define __txn_reset __txn_reset@DB_VERSION_UNIQUE_NAME@ #define __txn_updateckp __txn_updateckp@DB_VERSION_UNIQUE_NAME@ #define __txn_regop_log __txn_regop_log@DB_VERSION_UNIQUE_NAME@ -#define __txn_regop_getpgnos __txn_regop_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __txn_regop_print __txn_regop_print@DB_VERSION_UNIQUE_NAME@ #define __txn_regop_read __txn_regop_read@DB_VERSION_UNIQUE_NAME@ #define __txn_ckp_log __txn_ckp_log@DB_VERSION_UNIQUE_NAME@ -#define __txn_ckp_getpgnos __txn_ckp_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __txn_ckp_print __txn_ckp_print@DB_VERSION_UNIQUE_NAME@ #define __txn_ckp_read __txn_ckp_read@DB_VERSION_UNIQUE_NAME@ #define __txn_child_log __txn_child_log@DB_VERSION_UNIQUE_NAME@ -#define __txn_child_getpgnos __txn_child_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __txn_child_print __txn_child_print@DB_VERSION_UNIQUE_NAME@ #define __txn_child_read __txn_child_read@DB_VERSION_UNIQUE_NAME@ #define __txn_xa_regop_log __txn_xa_regop_log@DB_VERSION_UNIQUE_NAME@ -#define __txn_xa_regop_getpgnos __txn_xa_regop_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __txn_xa_regop_print __txn_xa_regop_print@DB_VERSION_UNIQUE_NAME@ #define __txn_xa_regop_read __txn_xa_regop_read@DB_VERSION_UNIQUE_NAME@ #define __txn_recycle_log __txn_recycle_log@DB_VERSION_UNIQUE_NAME@ -#define __txn_recycle_getpgnos __txn_recycle_getpgnos@DB_VERSION_UNIQUE_NAME@ -#define __txn_recycle_print __txn_recycle_print@DB_VERSION_UNIQUE_NAME@ #define __txn_recycle_read __txn_recycle_read@DB_VERSION_UNIQUE_NAME@ -#define __txn_init_print __txn_init_print@DB_VERSION_UNIQUE_NAME@ -#define __txn_init_getpgnos __txn_init_getpgnos@DB_VERSION_UNIQUE_NAME@ #define __txn_init_recover __txn_init_recover@DB_VERSION_UNIQUE_NAME@ +#define __txn_regop_print __txn_regop_print@DB_VERSION_UNIQUE_NAME@ +#define __txn_ckp_print __txn_ckp_print@DB_VERSION_UNIQUE_NAME@ +#define __txn_child_print __txn_child_print@DB_VERSION_UNIQUE_NAME@ +#define __txn_xa_regop_print __txn_xa_regop_print@DB_VERSION_UNIQUE_NAME@ +#define __txn_recycle_print __txn_recycle_print@DB_VERSION_UNIQUE_NAME@ +#define __txn_init_print __txn_init_print@DB_VERSION_UNIQUE_NAME@ #define __txn_dbenv_create __txn_dbenv_create@DB_VERSION_UNIQUE_NAME@ #define __txn_set_tx_max __txn_set_tx_max@DB_VERSION_UNIQUE_NAME@ #define __txn_regop_recover __txn_regop_recover@DB_VERSION_UNIQUE_NAME@ @@ -1405,11 +1450,15 @@ #define __txn_recover_pp __txn_recover_pp@DB_VERSION_UNIQUE_NAME@ #define __txn_recover __txn_recover@DB_VERSION_UNIQUE_NAME@ #define __txn_get_prepared __txn_get_prepared@DB_VERSION_UNIQUE_NAME@ +#define __txn_openfiles __txn_openfiles@DB_VERSION_UNIQUE_NAME@ #define __txn_open __txn_open@DB_VERSION_UNIQUE_NAME@ +#define __txn_findlastckp __txn_findlastckp@DB_VERSION_UNIQUE_NAME@ #define __txn_dbenv_refresh __txn_dbenv_refresh@DB_VERSION_UNIQUE_NAME@ #define __txn_region_destroy __txn_region_destroy@DB_VERSION_UNIQUE_NAME@ #define __txn_id_set __txn_id_set@DB_VERSION_UNIQUE_NAME@ #define __txn_stat_pp __txn_stat_pp@DB_VERSION_UNIQUE_NAME@ +#define __txn_stat_print_pp __txn_stat_print_pp@DB_VERSION_UNIQUE_NAME@ +#define __txn_stat_print __txn_stat_print@DB_VERSION_UNIQUE_NAME@ #define __txn_closeevent __txn_closeevent@DB_VERSION_UNIQUE_NAME@ #define __txn_remevent __txn_remevent@DB_VERSION_UNIQUE_NAME@ #define __txn_remrem __txn_remrem@DB_VERSION_UNIQUE_NAME@ @@ -1424,142 +1473,6 @@ #define __db_unmap_rmid __db_unmap_rmid@DB_VERSION_UNIQUE_NAME@ #define __db_map_xid __db_map_xid@DB_VERSION_UNIQUE_NAME@ #define __db_unmap_xid __db_unmap_xid@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_cachesize_msg xdr___env_get_cachesize_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_cachesize_reply xdr___env_get_cachesize_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_cachesize_msg xdr___env_cachesize_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_cachesize_reply xdr___env_cachesize_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_close_msg xdr___env_close_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_close_reply xdr___env_close_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_create_msg xdr___env_create_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_create_reply xdr___env_create_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_dbremove_msg xdr___env_dbremove_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_dbremove_reply xdr___env_dbremove_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_dbrename_msg xdr___env_dbrename_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_dbrename_reply xdr___env_dbrename_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_encrypt_flags_msg xdr___env_get_encrypt_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_encrypt_flags_reply xdr___env_get_encrypt_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_encrypt_msg xdr___env_encrypt_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_encrypt_reply xdr___env_encrypt_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_flags_msg xdr___env_get_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_flags_reply xdr___env_get_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_flags_msg xdr___env_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_flags_reply xdr___env_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_home_msg xdr___env_get_home_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_home_reply xdr___env_get_home_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_open_flags_msg xdr___env_get_open_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_get_open_flags_reply xdr___env_get_open_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_open_msg xdr___env_open_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_open_reply xdr___env_open_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_remove_msg xdr___env_remove_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___env_remove_reply xdr___env_remove_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_abort_msg xdr___txn_abort_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_abort_reply xdr___txn_abort_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_begin_msg xdr___txn_begin_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_begin_reply xdr___txn_begin_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_commit_msg xdr___txn_commit_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_commit_reply xdr___txn_commit_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_discard_msg xdr___txn_discard_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_discard_reply xdr___txn_discard_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_prepare_msg xdr___txn_prepare_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_prepare_reply xdr___txn_prepare_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_recover_msg xdr___txn_recover_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___txn_recover_reply xdr___txn_recover_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_associate_msg xdr___db_associate_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_associate_reply xdr___db_associate_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_bt_maxkey_msg xdr___db_bt_maxkey_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_bt_maxkey_reply xdr___db_bt_maxkey_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_bt_minkey_msg xdr___db_get_bt_minkey_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_bt_minkey_reply xdr___db_get_bt_minkey_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_bt_minkey_msg xdr___db_bt_minkey_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_bt_minkey_reply xdr___db_bt_minkey_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_close_msg xdr___db_close_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_close_reply xdr___db_close_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_create_msg xdr___db_create_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_create_reply xdr___db_create_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_del_msg xdr___db_del_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_del_reply xdr___db_del_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_encrypt_flags_msg xdr___db_get_encrypt_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_encrypt_flags_reply xdr___db_get_encrypt_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_encrypt_msg xdr___db_encrypt_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_encrypt_reply xdr___db_encrypt_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_extentsize_msg xdr___db_get_extentsize_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_extentsize_reply xdr___db_get_extentsize_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_extentsize_msg xdr___db_extentsize_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_extentsize_reply xdr___db_extentsize_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_flags_msg xdr___db_get_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_flags_reply xdr___db_get_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_flags_msg xdr___db_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_flags_reply xdr___db_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_msg xdr___db_get_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_reply xdr___db_get_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_name_msg xdr___db_get_name_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_name_reply xdr___db_get_name_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_open_flags_msg xdr___db_get_open_flags_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_open_flags_reply xdr___db_get_open_flags_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_h_ffactor_msg xdr___db_get_h_ffactor_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_h_ffactor_reply xdr___db_get_h_ffactor_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_h_ffactor_msg xdr___db_h_ffactor_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_h_ffactor_reply xdr___db_h_ffactor_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_h_nelem_msg xdr___db_get_h_nelem_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_h_nelem_reply xdr___db_get_h_nelem_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_h_nelem_msg xdr___db_h_nelem_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_h_nelem_reply xdr___db_h_nelem_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_key_range_msg xdr___db_key_range_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_key_range_reply xdr___db_key_range_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_lorder_msg xdr___db_get_lorder_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_lorder_reply xdr___db_get_lorder_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_lorder_msg xdr___db_lorder_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_lorder_reply xdr___db_lorder_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_open_msg xdr___db_open_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_open_reply xdr___db_open_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_pagesize_msg xdr___db_get_pagesize_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_pagesize_reply xdr___db_get_pagesize_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_pagesize_msg xdr___db_pagesize_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_pagesize_reply xdr___db_pagesize_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_pget_msg xdr___db_pget_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_pget_reply xdr___db_pget_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_put_msg xdr___db_put_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_put_reply xdr___db_put_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_re_delim_msg xdr___db_get_re_delim_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_re_delim_reply xdr___db_get_re_delim_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_re_delim_msg xdr___db_re_delim_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_re_delim_reply xdr___db_re_delim_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_re_len_msg xdr___db_get_re_len_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_re_len_reply xdr___db_get_re_len_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_re_len_msg xdr___db_re_len_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_re_len_reply xdr___db_re_len_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_re_pad_msg xdr___db_re_pad_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_re_pad_reply xdr___db_re_pad_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_re_pad_msg xdr___db_get_re_pad_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_get_re_pad_reply xdr___db_get_re_pad_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_remove_msg xdr___db_remove_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_remove_reply xdr___db_remove_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_rename_msg xdr___db_rename_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_rename_reply xdr___db_rename_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_stat_msg xdr___db_stat_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_stat_reply xdr___db_stat_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_sync_msg xdr___db_sync_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_sync_reply xdr___db_sync_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_truncate_msg xdr___db_truncate_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_truncate_reply xdr___db_truncate_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_cursor_msg xdr___db_cursor_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_cursor_reply xdr___db_cursor_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_join_msg xdr___db_join_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___db_join_reply xdr___db_join_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_close_msg xdr___dbc_close_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_close_reply xdr___dbc_close_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_count_msg xdr___dbc_count_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_count_reply xdr___dbc_count_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_del_msg xdr___dbc_del_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_del_reply xdr___dbc_del_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_dup_msg xdr___dbc_dup_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_dup_reply xdr___dbc_dup_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_get_msg xdr___dbc_get_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_get_reply xdr___dbc_get_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_pget_msg xdr___dbc_pget_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_pget_reply xdr___dbc_pget_reply@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_put_msg xdr___dbc_put_msg@DB_VERSION_UNIQUE_NAME@ -#define xdr___dbc_put_reply xdr___dbc_put_reply@DB_VERSION_UNIQUE_NAME@ #define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@ #define __db_jump __db_jump@DB_VERSION_UNIQUE_NAME@ diff --git a/db/dbinc_auto/lock_ext.h b/db/dbinc_auto/lock_ext.h index 535dc5315..4b3ab4905 100644 --- a/db/dbinc_auto/lock_ext.h +++ b/db/dbinc_auto/lock_ext.h @@ -6,29 +6,32 @@ extern "C" { #endif -int __lock_id_pp __P((DB_ENV *, u_int32_t *)); -int __lock_id __P((DB_ENV *, u_int32_t *)); -int __lock_id_free_pp __P((DB_ENV *, u_int32_t)); -int __lock_id_free __P((DB_ENV *, u_int32_t)); int __lock_vec_pp __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); int __lock_vec __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); int __lock_get_pp __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); int __lock_get __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); +int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *)); int __lock_put_pp __P((DB_ENV *, DB_LOCK *)); -int __lock_put __P((DB_ENV *, DB_LOCK *)); +int __lock_put __P((DB_ENV *, DB_LOCK *, u_int32_t)); int __lock_downgrade __P((DB_ENV *, DB_LOCK *, db_lockmode_t, u_int32_t)); -int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t)); -int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t)); -int __lock_set_timeout __P(( DB_ENV *, u_int32_t, db_timeout_t, u_int32_t)); -int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t)); -int __lock_getlocker __P((DB_LOCKTAB *, u_int32_t, u_int32_t, int, DB_LOCKER **)); +int __lock_locker_is_parent __P((DB_ENV *, u_int32_t, u_int32_t, int *)); int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t)); -int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *)); -int __lock_get_list __P((DB_ENV *, u_int32_t, u_int32_t, db_lockmode_t, DBT *)); int __lock_detect_pp __P((DB_ENV *, u_int32_t, u_int32_t, int *)); int __lock_detect __P((DB_ENV *, u_int32_t, int *)); +int __lock_id_pp __P((DB_ENV *, u_int32_t *)); +int __lock_id __P((DB_ENV *, u_int32_t *)); +int __lock_id_free_pp __P((DB_ENV *, u_int32_t)); +int __lock_id_free __P((DB_ENV *, u_int32_t)); +int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t)); +int __lock_getlocker __P((DB_LOCKTAB *, u_int32_t, u_int32_t, int, DB_LOCKER **)); +int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t)); +int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t)); +void __lock_freelocker __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, u_int32_t)); +int __lock_fix_list __P((DB_ENV *, DBT *, u_int32_t)); +int __lock_get_list __P((DB_ENV *, u_int32_t, u_int32_t, db_lockmode_t, DBT *)); +void __lock_list_print __P((DB_ENV *, DBT *)); void __lock_dbenv_create __P((DB_ENV *)); -void __lock_dbenv_close __P((DB_ENV *)); +int __lock_dbenv_close __P((DB_ENV *)); int __lock_set_lk_detect __P((DB_ENV *, u_int32_t)); int __lock_set_lk_max __P((DB_ENV *, u_int32_t)); int __lock_set_lk_max_locks __P((DB_ENV *, u_int32_t)); @@ -38,15 +41,21 @@ int __lock_set_env_timeout __P((DB_ENV *, db_timeout_t, u_int32_t)); int __lock_open __P((DB_ENV *)); int __lock_dbenv_refresh __P((DB_ENV *)); void __lock_region_destroy __P((DB_ENV *, REGINFO *)); -int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t)); int __lock_stat_pp __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t)); -int __lock_dump_region __P((DB_ENV *, const char *, FILE *)); -void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int, FILE *)); +int __lock_stat_print_pp __P((DB_ENV *, u_int32_t)); +int __lock_stat_print __P((DB_ENV *, u_int32_t)); +void __lock_printlock __P((DB_LOCKTAB *, DB_MSGBUF *mbp, struct __db_lock *, int)); +int __lock_set_timeout __P(( DB_ENV *, u_int32_t, db_timeout_t, u_int32_t)); +int __lock_set_timeout_internal __P((DB_ENV *, u_int32_t, db_timeout_t, u_int32_t)); +int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t)); +void __lock_expires __P((DB_ENV *, db_timeval_t *, db_timeout_t)); +int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *)); int __lock_cmp __P((const DBT *, DB_LOCKOBJ *)); int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *)); u_int32_t __lock_ohash __P((const DBT *)); u_int32_t __lock_lhash __P((DB_LOCKOBJ *)); u_int32_t __lock_locker_hash __P((u_int32_t)); +int __lock_nomem __P((DB_ENV *, const char *)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/log_ext.h b/db/dbinc_auto/log_ext.h index f979d1fad..c1fbd211b 100644 --- a/db/dbinc_auto/log_ext.h +++ b/db/dbinc_auto/log_ext.h @@ -8,16 +8,19 @@ extern "C" { int __log_open __P((DB_ENV *)); int __log_find __P((DB_LOG *, int, u_int32_t *, logfile_validity *)); -int __log_valid __P((DB_LOG *, u_int32_t, int, DB_FH **, int, logfile_validity *)); +int __log_valid __P((DB_LOG *, u_int32_t, int, DB_FH **, u_int32_t, logfile_validity *)); int __log_dbenv_refresh __P((DB_ENV *)); -int __log_stat_pp __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); void __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *)); void __log_region_destroy __P((DB_ENV *, REGINFO *)); int __log_vtruncate __P((DB_ENV *, DB_LSN *, DB_LSN *, DB_LSN *)); int __log_is_outdated __P((DB_ENV *, u_int32_t, int *)); -void __log_autoremove __P((DB_ENV *)); +int __log_inmem_lsnoff __P((DB_LOG *, DB_LSN *, size_t *)); +int __log_inmem_newfile __P((DB_LOG *, u_int32_t)); +int __log_inmem_chkspace __P((DB_LOG *, size_t)); +void __log_inmem_copyout __P((DB_LOG *, size_t, void *, size_t)); +void __log_inmem_copyin __P((DB_LOG *, size_t, void *, size_t)); int __log_archive_pp __P((DB_ENV *, char **[], u_int32_t)); -int __log_archive __P((DB_ENV *, char **[], u_int32_t)); +void __log_autoremove __P((DB_ENV *)); int __log_cursor_pp __P((DB_ENV *, DB_LOGC **, u_int32_t)); int __log_cursor __P((DB_ENV *, DB_LOGC **)); int __log_c_close __P((DB_LOGC *)); @@ -27,16 +30,22 @@ int __log_set_lg_bsize __P((DB_ENV *, u_int32_t)); int __log_set_lg_max __P((DB_ENV *, u_int32_t)); int __log_set_lg_regionmax __P((DB_ENV *, u_int32_t)); int __log_set_lg_dir __P((DB_ENV *, const char *)); +void __log_get_flags __P((DB_ENV *, u_int32_t *)); +void __log_set_flags __P((DB_ENV *, u_int32_t, int)); +int __log_check_sizes __P((DB_ENV *, u_int32_t, u_int32_t)); int __log_put_pp __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t)); int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t)); void __log_txn_lsn __P((DB_ENV *, DB_LSN *, u_int32_t *, u_int32_t *)); -int __log_newfile __P((DB_LOG *, DB_LSN *)); +int __log_newfile __P((DB_LOG *, DB_LSN *, u_int32_t)); int __log_flush_pp __P((DB_ENV *, const DB_LSN *)); int __log_flush __P((DB_ENV *, const DB_LSN *)); int __log_flush_int __P((DB_LOG *, const DB_LSN *, int)); int __log_file_pp __P((DB_ENV *, const DB_LSN *, char *, size_t)); int __log_name __P((DB_LOG *, u_int32_t, char **, DB_FH **, u_int32_t)); int __log_rep_put __P((DB_ENV *, DB_LSN *, const DBT *)); +int __log_stat_pp __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); +int __log_stat_print_pp __P((DB_ENV *, u_int32_t)); +int __log_stat_print __P((DB_ENV *, u_int32_t)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/mp_ext.h b/db/dbinc_auto/mp_ext.h index 0ea6b9fec..06a62ad5a 100644 --- a/db/dbinc_auto/mp_ext.h +++ b/db/dbinc_auto/mp_ext.h @@ -13,7 +13,7 @@ void __memp_check_order __P((DB_MPOOL_HASH *)); int __memp_bhwrite __P((DB_MPOOL *, DB_MPOOL_HASH *, MPOOLFILE *, BH *, int)); int __memp_pgread __P((DB_MPOOLFILE *, DB_MUTEX *, BH *, int)); int __memp_pg __P((DB_MPOOLFILE *, BH *, int)); -void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, int)); +void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, u_int32_t)); int __memp_fget_pp __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *)); int __memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *)); int __memp_fcreate_pp __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); @@ -21,18 +21,20 @@ int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **)); int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t)); int __memp_get_fileid __P((DB_MPOOLFILE *, u_int8_t *)); int __memp_set_fileid __P((DB_MPOOLFILE *, u_int8_t *)); +int __memp_get_flags __P((DB_MPOOLFILE *, u_int32_t *)); int __memp_set_flags __P((DB_MPOOLFILE *, u_int32_t, int)); int __memp_get_ftype __P((DB_MPOOLFILE *, int *)); int __memp_set_ftype __P((DB_MPOOLFILE *, int)); int __memp_set_lsn_offset __P((DB_MPOOLFILE *, int32_t)); int __memp_set_pgcookie __P((DB_MPOOLFILE *, DBT *)); -int __memp_fopen __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, u_int32_t, int, size_t)); void __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *)); -int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t)); -int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *)); -int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *)); char * __memp_fn __P((DB_MPOOLFILE *)); char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *)); +int __memp_fopen_pp __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); +int __memp_fopen __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, u_int32_t, int, size_t)); +int __memp_fclose_pp __P((DB_MPOOLFILE *, u_int32_t)); +int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t)); +int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *)); int __memp_fput_pp __P((DB_MPOOLFILE *, void *, u_int32_t)); int __memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t)); int __memp_fset_pp __P((DB_MPOOLFILE *, void *, u_int32_t)); @@ -40,23 +42,28 @@ int __memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t)); void __memp_dbenv_create __P((DB_ENV *)); int __memp_get_cachesize __P((DB_ENV *, u_int32_t *, u_int32_t *, int *)); int __memp_set_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int)); +int __memp_set_mp_max_openfd __P((DB_ENV *, int)); +int __memp_set_mp_max_write __P((DB_ENV *, int, int)); int __memp_set_mp_mmapsize __P((DB_ENV *, size_t)); int __memp_nameop __P((DB_ENV *, u_int8_t *, const char *, const char *, const char *)); -int __memp_get_refcnt __P((DB_ENV *, u_int8_t *, int *)); +int __memp_get_refcnt __P((DB_ENV *, u_int8_t *, u_int32_t *)); +int __memp_ftruncate __P((DB_MPOOLFILE *, db_pgno_t, u_int32_t)); int __memp_open __P((DB_ENV *)); int __memp_dbenv_refresh __P((DB_ENV *)); -void __mpool_region_destroy __P((DB_ENV *, REGINFO *)); +void __memp_region_destroy __P((DB_ENV *, REGINFO *)); int __memp_register_pp __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *))); int __memp_register __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *))); int __memp_stat_pp __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t)); -int __memp_dump_region __P((DB_ENV *, const char *, FILE *)); -void __memp_stat_hash __P((REGINFO *, MPOOL *, u_int32_t *)); +int __memp_stat_print_pp __P((DB_ENV *, u_int32_t)); +int __memp_stat_print __P((DB_ENV *, u_int32_t)); +void __memp_stat_hash __P((DB_ENV *, REGINFO *, MPOOL *, u_int32_t *)); int __memp_sync_pp __P((DB_ENV *, DB_LSN *)); int __memp_sync __P((DB_ENV *, DB_LSN *)); int __memp_fsync_pp __P((DB_MPOOLFILE *)); int __memp_fsync __P((DB_MPOOLFILE *)); int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **)); -int __memp_sync_int __P((DB_ENV *, DB_MPOOLFILE *, int, db_sync_op, int *)); +int __memp_sync_int __P((DB_ENV *, DB_MPOOLFILE *, u_int32_t, db_sync_op, u_int32_t *)); +int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *)); int __memp_trickle_pp __P((DB_ENV *, int, int *)); #if defined(__cplusplus) diff --git a/db/dbinc_auto/os_ext.h b/db/dbinc_auto/os_ext.h index ea2748055..a26958501 100644 --- a/db/dbinc_auto/os_ext.h +++ b/db/dbinc_auto/os_ext.h @@ -16,7 +16,7 @@ int __os_malloc __P((DB_ENV *, size_t, void *)); int __os_realloc __P((DB_ENV *, size_t, void *)); void __os_free __P((DB_ENV *, void *)); void *__ua_memcpy __P((void *, const void *, size_t)); -int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *)); +void __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *)); int __os_fs_notzero __P((void)); int __os_dirlist __P((DB_ENV *, const char *, char ***, int *)); void __os_dirfree __P((DB_ENV *, char **, int)); @@ -28,15 +28,17 @@ int __os_fsync __P((DB_ENV *, DB_FH *)); int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH **)); int __os_closehandle __P((DB_ENV *, DB_FH *)); void __os_id __P((u_int32_t *)); +void __os_unique_id __P((DB_ENV *, u_int32_t *)); int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *)); int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int)); int __os_mapfile __P((DB_ENV *, char *, DB_FH *, size_t, int, void **)); int __os_unmapfile __P((DB_ENV *, void *, size_t)); u_int32_t __db_oflags __P((int)); int __db_omode __P((const char *)); +int __db_shm_mode __P((DB_ENV *)); int __os_have_direct __P((void)); int __os_open __P((DB_ENV *, const char *, u_int32_t, int, DB_FH **)); -int __os_open_extend __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t, int, DB_FH **)); +int __os_open_extend __P((DB_ENV *, const char *, u_int32_t, u_int32_t, int, DB_FH **)); #ifdef HAVE_QNX int __os_shmname __P((DB_ENV *, const char *, char **)); #endif @@ -45,23 +47,22 @@ int __os_r_detach __P((DB_ENV *, REGINFO *, int)); int __os_rename __P((DB_ENV *, const char *, const char *, u_int32_t)); int __os_isroot __P((void)); char *__db_rpath __P((const char *)); -int __os_io __P((DB_ENV *, int, DB_FH *, db_pgno_t, size_t, u_int8_t *, size_t *)); +int __os_io __P((DB_ENV *, int, DB_FH *, db_pgno_t, u_int32_t, u_int8_t *, size_t *)); int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *)); int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *)); -int __os_seek __P((DB_ENV *, DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK)); -int __os_sleep __P((DB_ENV *, u_long, u_long)); +int __os_seek __P((DB_ENV *, DB_FH *, u_int32_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK)); +void __os_sleep __P((DB_ENV *, u_long, u_long)); void __os_spin __P((DB_ENV *)); void __os_yield __P((DB_ENV*, u_long)); int __os_exists __P((const char *, int *)); int __os_ioinfo __P((DB_ENV *, const char *, DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *)); int __os_tmpdir __P((DB_ENV *, u_int32_t)); +int __os_truncate __P((DB_ENV *, DB_FH *, db_pgno_t, u_int32_t)); int __os_region_unlink __P((DB_ENV *, const char *)); int __os_unlink __P((DB_ENV *, const char *)); int __os_is_winnt __P((void)); -#if defined(DB_WIN32) -int __os_win32_errno __P((void)); -#endif int __os_have_direct __P((void)); +int __os_unlink __P((DB_ENV *, const char *)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/qam_ext.h b/db/dbinc_auto/qam_ext.h index 4ea10a3a7..98ca31c03 100644 --- a/db/dbinc_auto/qam_ext.h +++ b/db/dbinc_auto/qam_ext.h @@ -13,28 +13,22 @@ int __qam_c_dup __P((DBC *, DBC *)); int __qam_c_init __P((DBC *)); int __qam_truncate __P((DBC *, u_int32_t *)); int __qam_incfirst_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_recno_t, db_pgno_t)); -int __qam_incfirst_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_incfirst_read __P((DB_ENV *, void *, __qam_incfirst_args **)); int __qam_mvptr_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_recno_t, db_recno_t, db_recno_t, db_recno_t, DB_LSN *, db_pgno_t)); -int __qam_mvptr_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_mvptr_read __P((DB_ENV *, void *, __qam_mvptr_args **)); int __qam_del_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t)); -int __qam_del_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_del_read __P((DB_ENV *, void *, __qam_del_args **)); int __qam_add_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *, u_int32_t, const DBT *)); -int __qam_add_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_add_read __P((DB_ENV *, void *, __qam_add_args **)); int __qam_delext_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *)); -int __qam_delext_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_delext_read __P((DB_ENV *, void *, __qam_delext_args **)); -int __qam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __qam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __qam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __qam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __qam_mswap __P((PAGE *)); int __qam_pgin_out __P((DB_ENV *, db_pgno_t, void *, DBT *)); int __qam_fprobe __P((DB *, db_pgno_t, void *, qam_probe_mode, u_int32_t)); @@ -47,9 +41,13 @@ void __qam_exid __P((DB *, u_int8_t *, u_int32_t)); int __qam_nameop __P((DB *, DB_TXN *, const char *, qam_name_op)); int __qam_db_create __P((DB *)); int __qam_db_close __P((DB *, u_int32_t)); -int __db_prqueue __P((DB *, FILE *, u_int32_t)); -int __qam_remove __P((DB *, DB_TXN *, const char *, const char *, DB_LSN *)); +int __qam_get_extentsize __P((DB *, u_int32_t *)); +int __queue_pageinfo __P((DB *, db_pgno_t *, db_pgno_t *, int *, int, u_int32_t)); +int __db_prqueue __P((DB *, u_int32_t)); +int __qam_remove __P((DB *, DB_TXN *, const char *, const char *)); int __qam_rename __P((DB *, DB_TXN *, const char *, const char *, const char *)); +void __qam_map_flags __P((DB *, u_int32_t *, u_int32_t *)); +int __qam_set_flags __P((DB *, u_int32_t *flagsp)); int __qam_open __P((DB *, DB_TXN *, const char *, db_pgno_t, int, u_int32_t)); int __qam_set_ext_data __P((DB*, const char *)); int __qam_metachk __P((DB *, const char *, QMETA *)); @@ -60,6 +58,7 @@ int __qam_del_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_delext_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_add_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __qam_stat __P((DBC *, void *, u_int32_t)); +int __qam_stat_print __P((DBC *, u_int32_t)); int __db_no_queue_am __P((DB_ENV *)); int __qam_31_qammeta __P((DB *, char *, u_int8_t *)); int __qam_32_qammeta __P((DB *, char *, u_int8_t *)); diff --git a/db/dbinc_auto/rep_auto.h b/db/dbinc_auto/rep_auto.h new file mode 100644 index 000000000..5e8b7818c --- /dev/null +++ b/db/dbinc_auto/rep_auto.h @@ -0,0 +1,22 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#ifndef __rep_AUTO_H +#define __rep_AUTO_H +typedef struct ___rep_update_args { + DB_LSN first_lsn; + int num_files; +} __rep_update_args; + +typedef struct ___rep_fileinfo_args { + size_t pgsize; + db_pgno_t pgno; + db_pgno_t max_pgno; + int filenum; + int32_t id; + u_int32_t type; + u_int32_t flags; + DBT uid; + DBT info; +} __rep_fileinfo_args; + +#endif diff --git a/db/dbinc_auto/rep_ext.h b/db/dbinc_auto/rep_ext.h index ab304ece0..3ed197e50 100644 --- a/db/dbinc_auto/rep_ext.h +++ b/db/dbinc_auto/rep_ext.h @@ -6,31 +6,48 @@ extern "C" { #endif -int __rep_dbenv_create __P((DB_ENV *)); +int __rep_update_buf __P((u_int8_t *, size_t, size_t *, DB_LSN *, int)); +int __rep_update_read __P((DB_ENV *, void *, void **, __rep_update_args **)); +int __rep_fileinfo_buf __P((u_int8_t *, size_t, size_t *, size_t, db_pgno_t, db_pgno_t, int, int32_t, u_int32_t, u_int32_t, const DBT *, const DBT *)); +int __rep_fileinfo_read __P((DB_ENV *, void *, void **, __rep_fileinfo_args **)); +int __rep_update_req __P((DB_ENV *, int)); +int __rep_page_req __P((DB_ENV *, int, DBT *)); +int __rep_update_setup __P((DB_ENV *, int, REP_CONTROL *, DBT *)); +int __rep_page __P((DB_ENV *, int, REP_CONTROL *, DBT *)); +int __rep_page_fail __P((DB_ENV *, int, DBT *)); +int __rep_pggap_req __P((DB_ENV *, REP *, __rep_fileinfo_args *, int)); +void __rep_loggap_req __P((DB_ENV *, REP *, DB_LSN *, int)); +int __rep_finfo_alloc __P((DB_ENV *, __rep_fileinfo_args *, __rep_fileinfo_args **)); +void __rep_dbenv_create __P((DB_ENV *)); int __rep_open __P((DB_ENV *)); +int __rep_client_dbinit __P((DB_ENV *, int, repdb_t)); void __rep_elect_master __P((DB_ENV *, REP *, int *)); int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *, DB_LSN *)); int __rep_process_txn __P((DB_ENV *, DBT *)); -int __rep_tally __P((DB_ENV *, REP *, int, int *, u_int32_t, u_int32_t)); -void __rep_cmp_vote __P((DB_ENV *, REP *, int *, DB_LSN *, int, int, int)); +int __rep_tally __P((DB_ENV *, REP *, int, int *, u_int32_t, roff_t)); +void __rep_cmp_vote __P((DB_ENV *, REP *, int *, DB_LSN *, int, u_int32_t, u_int32_t)); int __rep_cmp_vote2 __P((DB_ENV *, REP *, int, u_int32_t)); +int __rep_check_doreq __P((DB_ENV *, REP *)); +void __rep_lockout __P((DB_ENV *, DB_REP *, REP *)); int __rep_region_init __P((DB_ENV *)); int __rep_region_destroy __P((DB_ENV *)); void __rep_dbenv_refresh __P((DB_ENV *)); int __rep_dbenv_close __P((DB_ENV *)); int __rep_preclose __P((DB_ENV *, int)); -int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int)); +int __rep_write_egen __P((DB_ENV *, u_int32_t)); +int __rep_stat_pp __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); +int __rep_stat_print_pp __P((DB_ENV *, u_int32_t)); +int __rep_stat_print __P((DB_ENV *, u_int32_t)); int __rep_send_message __P((DB_ENV *, int, u_int32_t, DB_LSN *, const DBT *, u_int32_t)); int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int)); int __rep_is_client __P((DB_ENV *)); int __rep_noarchive __P((DB_ENV *)); -void __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int, u_int32_t, int, u_int32_t)); +void __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int, u_int32_t, u_int32_t, int, u_int32_t)); void __rep_elect_done __P((DB_ENV *, REP *)); int __rep_grow_sites __P((DB_ENV *dbenv, int nsites)); void __env_rep_enter __P((DB_ENV *)); -void __env_rep_exit __P((DB_ENV *)); -int __db_rep_enter __P((DB *, int, int)); -void __db_rep_exit __P((DB_ENV *)); +void __env_db_rep_exit __P((DB_ENV *)); +int __db_rep_enter __P((DB *, int, int, int)); void __op_rep_enter __P((DB_ENV *)); void __op_rep_exit __P((DB_ENV *)); void __rep_get_gen __P((DB_ENV *, u_int32_t *)); diff --git a/db/dbinc_auto/rpc_client_ext.h b/db/dbinc_auto/rpc_client_ext.h index d0ac2b4f6..b58047136 100644 --- a/db/dbinc_auto/rpc_client_ext.h +++ b/db/dbinc_auto/rpc_client_ext.h @@ -15,7 +15,7 @@ int __dbcl_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t, void **, u_int32_t * void __dbcl_txn_end __P((DB_TXN *)); void __dbcl_txn_setup __P((DB_ENV *, DB_TXN *, DB_TXN *, u_int32_t)); void __dbcl_c_refresh __P((DBC *)); -int __dbcl_c_setup __P((long, DB *, DBC **)); +int __dbcl_c_setup __P((u_int, DB *, DBC **)); int __dbcl_dbclose_common __P((DB *)); int __dbcl_env_alloc __P((DB_ENV *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *))); int __dbcl_set_app_dispatch __P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops))); @@ -51,8 +51,10 @@ int __dbcl_get_lk_max_lockers __P((DB_ENV *, u_int32_t *)); int __dbcl_set_lk_max_lockers __P((DB_ENV *, u_int32_t)); int __dbcl_get_lk_max_objects __P((DB_ENV *, u_int32_t *)); int __dbcl_set_lk_max_objects __P((DB_ENV *, u_int32_t)); -int __dbcl_get_mp_maxwrite __P((DB_ENV *, int *, int *)); -int __dbcl_set_mp_maxwrite __P((DB_ENV *, int, int)); +int __dbcl_get_mp_max_openfd __P((DB_ENV *, int *)); +int __dbcl_set_mp_max_openfd __P((DB_ENV *, int)); +int __dbcl_get_mp_max_write __P((DB_ENV *, int *, int *)); +int __dbcl_set_mp_max_write __P((DB_ENV *, int, int)); int __dbcl_get_mp_mmapsize __P((DB_ENV *, size_t *)); int __dbcl_set_mp_mmapsize __P((DB_ENV *, size_t)); int __dbcl_env_get_home __P((DB_ENV *, const char * *)); @@ -83,7 +85,7 @@ int __dbcl_txn_prepare __P((DB_TXN *, u_int8_t *)); int __dbcl_txn_recover __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t)); int __dbcl_txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); int __dbcl_txn_timeout __P((DB_TXN *, u_int32_t, u_int32_t)); -int __dbcl_rep_elect __P((DB_ENV *, int, int, u_int32_t, int *)); +int __dbcl_rep_elect __P((DB_ENV *, int, int, int, u_int32_t, int *, u_int32_t)); int __dbcl_rep_flush __P((DB_ENV *)); int __dbcl_rep_process_message __P((DB_ENV *, DBT *, DBT *, int *, DB_LSN *)); int __dbcl_rep_get_limit __P((DB_ENV *, u_int32_t *, u_int32_t *)); @@ -141,7 +143,7 @@ int __dbcl_db_get_re_source __P((DB *, const char * *)); int __dbcl_db_re_source __P((DB *, const char *)); int __dbcl_db_remove __P((DB *, const char *, const char *, u_int32_t)); int __dbcl_db_rename __P((DB *, const char *, const char *, const char *, u_int32_t)); -int __dbcl_db_stat __P((DB *, void *, u_int32_t)); +int __dbcl_db_stat __P((DB *, DB_TXN *, void *, u_int32_t)); int __dbcl_db_sync __P((DB *, u_int32_t)); int __dbcl_db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); int __dbcl_db_upgrade __P((DB *, const char *, u_int32_t)); @@ -210,7 +212,7 @@ int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t, __db int __dbcl_db_put_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *)); int __dbcl_db_remove_ret __P((DB *, const char *, const char *, u_int32_t, __db_remove_reply *)); int __dbcl_db_rename_ret __P((DB *, const char *, const char *, const char *, u_int32_t, __db_rename_reply *)); -int __dbcl_db_stat_ret __P((DB *, void *, u_int32_t, __db_stat_reply *)); +int __dbcl_db_stat_ret __P((DB *, DB_TXN *, void *, u_int32_t, __db_stat_reply *)); int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t *, u_int32_t, __db_truncate_reply *)); int __dbcl_db_cursor_ret __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *)); int __dbcl_db_join_ret __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *)); diff --git a/db/dbinc_auto/rpc_server_ext.h b/db/dbinc_auto/rpc_server_ext.h index ef47563f9..6d8cbc71f 100644 --- a/db/dbinc_auto/rpc_server_ext.h +++ b/db/dbinc_auto/rpc_server_ext.h @@ -62,7 +62,7 @@ void __db_get_re_pad_proc __P((long, __db_get_re_pad_reply *)); void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *)); void __db_remove_proc __P((long, char *, char *, u_int32_t, __db_remove_reply *)); void __db_rename_proc __P((long, char *, char *, char *, u_int32_t, __db_rename_reply *)); -void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *, int *)); +void __db_stat_proc __P((long, long, u_int32_t, __db_stat_reply *, int *)); void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *)); void __db_truncate_proc __P((long, long, u_int32_t, __db_truncate_reply *)); void __db_cursor_proc __P((long, long, u_int32_t, __db_cursor_reply *)); @@ -87,74 +87,6 @@ int __db_close_int __P((long, u_int32_t)); int __dbc_close_int __P((ct_entry *)); int __dbenv_close_int __P((long, u_int32_t, int)); home_entry *get_fullhome __P((char *)); -__env_get_cachesize_reply *__db_env_get_cachesize_4002 __P((__env_get_cachesize_msg *, struct svc_req *)); -__env_cachesize_reply *__db_env_cachesize_4002 __P((__env_cachesize_msg *, struct svc_req *)); -__env_close_reply *__db_env_close_4002 __P((__env_close_msg *, struct svc_req *)); -__env_create_reply *__db_env_create_4002 __P((__env_create_msg *, struct svc_req *)); -__env_dbremove_reply *__db_env_dbremove_4002 __P((__env_dbremove_msg *, struct svc_req *)); -__env_dbrename_reply *__db_env_dbrename_4002 __P((__env_dbrename_msg *, struct svc_req *)); -__env_get_encrypt_flags_reply *__db_env_get_encrypt_flags_4002 __P((__env_get_encrypt_flags_msg *, struct svc_req *)); -__env_encrypt_reply *__db_env_encrypt_4002 __P((__env_encrypt_msg *, struct svc_req *)); -__env_get_flags_reply *__db_env_get_flags_4002 __P((__env_get_flags_msg *, struct svc_req *)); -__env_flags_reply *__db_env_flags_4002 __P((__env_flags_msg *, struct svc_req *)); -__env_get_home_reply *__db_env_get_home_4002 __P((__env_get_home_msg *, struct svc_req *)); -__env_get_open_flags_reply *__db_env_get_open_flags_4002 __P((__env_get_open_flags_msg *, struct svc_req *)); -__env_open_reply *__db_env_open_4002 __P((__env_open_msg *, struct svc_req *)); -__env_remove_reply *__db_env_remove_4002 __P((__env_remove_msg *, struct svc_req *)); -__txn_abort_reply *__db_txn_abort_4002 __P((__txn_abort_msg *, struct svc_req *)); -__txn_begin_reply *__db_txn_begin_4002 __P((__txn_begin_msg *, struct svc_req *)); -__txn_commit_reply *__db_txn_commit_4002 __P((__txn_commit_msg *, struct svc_req *)); -__txn_discard_reply *__db_txn_discard_4002 __P((__txn_discard_msg *, struct svc_req *)); -__txn_prepare_reply *__db_txn_prepare_4002 __P((__txn_prepare_msg *, struct svc_req *)); -__txn_recover_reply *__db_txn_recover_4002 __P((__txn_recover_msg *, struct svc_req *)); -__db_associate_reply *__db_db_associate_4002 __P((__db_associate_msg *, struct svc_req *)); -__db_bt_maxkey_reply *__db_db_bt_maxkey_4002 __P((__db_bt_maxkey_msg *, struct svc_req *)); -__db_get_bt_minkey_reply *__db_db_get_bt_minkey_4002 __P((__db_get_bt_minkey_msg *, struct svc_req *)); -__db_bt_minkey_reply *__db_db_bt_minkey_4002 __P((__db_bt_minkey_msg *, struct svc_req *)); -__db_close_reply *__db_db_close_4002 __P((__db_close_msg *, struct svc_req *)); -__db_create_reply *__db_db_create_4002 __P((__db_create_msg *, struct svc_req *)); -__db_del_reply *__db_db_del_4002 __P((__db_del_msg *, struct svc_req *)); -__db_get_encrypt_flags_reply *__db_db_get_encrypt_flags_4002 __P((__db_get_encrypt_flags_msg *, struct svc_req *)); -__db_encrypt_reply *__db_db_encrypt_4002 __P((__db_encrypt_msg *, struct svc_req *)); -__db_get_extentsize_reply *__db_db_get_extentsize_4002 __P((__db_get_extentsize_msg *, struct svc_req *)); -__db_extentsize_reply *__db_db_extentsize_4002 __P((__db_extentsize_msg *, struct svc_req *)); -__db_get_flags_reply *__db_db_get_flags_4002 __P((__db_get_flags_msg *, struct svc_req *)); -__db_flags_reply *__db_db_flags_4002 __P((__db_flags_msg *, struct svc_req *)); -__db_get_reply *__db_db_get_4002 __P((__db_get_msg *, struct svc_req *)); -__db_get_name_reply *__db_db_get_name_4002 __P((__db_get_name_msg *, struct svc_req *)); -__db_get_open_flags_reply *__db_db_get_open_flags_4002 __P((__db_get_open_flags_msg *, struct svc_req *)); -__db_get_h_ffactor_reply *__db_db_get_h_ffactor_4002 __P((__db_get_h_ffactor_msg *, struct svc_req *)); -__db_h_ffactor_reply *__db_db_h_ffactor_4002 __P((__db_h_ffactor_msg *, struct svc_req *)); -__db_get_h_nelem_reply *__db_db_get_h_nelem_4002 __P((__db_get_h_nelem_msg *, struct svc_req *)); -__db_h_nelem_reply *__db_db_h_nelem_4002 __P((__db_h_nelem_msg *, struct svc_req *)); -__db_key_range_reply *__db_db_key_range_4002 __P((__db_key_range_msg *, struct svc_req *)); -__db_get_lorder_reply *__db_db_get_lorder_4002 __P((__db_get_lorder_msg *, struct svc_req *)); -__db_lorder_reply *__db_db_lorder_4002 __P((__db_lorder_msg *, struct svc_req *)); -__db_open_reply *__db_db_open_4002 __P((__db_open_msg *, struct svc_req *)); -__db_get_pagesize_reply *__db_db_get_pagesize_4002 __P((__db_get_pagesize_msg *, struct svc_req *)); -__db_pagesize_reply *__db_db_pagesize_4002 __P((__db_pagesize_msg *, struct svc_req *)); -__db_pget_reply *__db_db_pget_4002 __P((__db_pget_msg *, struct svc_req *)); -__db_put_reply *__db_db_put_4002 __P((__db_put_msg *, struct svc_req *)); -__db_get_re_delim_reply *__db_db_get_re_delim_4002 __P((__db_get_re_delim_msg *, struct svc_req *)); -__db_re_delim_reply *__db_db_re_delim_4002 __P((__db_re_delim_msg *, struct svc_req *)); -__db_get_re_len_reply *__db_db_get_re_len_4002 __P((__db_get_re_len_msg *, struct svc_req *)); -__db_re_len_reply *__db_db_re_len_4002 __P((__db_re_len_msg *, struct svc_req *)); -__db_re_pad_reply *__db_db_re_pad_4002 __P((__db_re_pad_msg *, struct svc_req *)); -__db_get_re_pad_reply *__db_db_get_re_pad_4002 __P((__db_get_re_pad_msg *, struct svc_req *)); -__db_remove_reply *__db_db_remove_4002 __P((__db_remove_msg *, struct svc_req *)); -__db_rename_reply *__db_db_rename_4002 __P((__db_rename_msg *, struct svc_req *)); -__db_stat_reply *__db_db_stat_4002 __P((__db_stat_msg *, struct svc_req *)); -__db_sync_reply *__db_db_sync_4002 __P((__db_sync_msg *, struct svc_req *)); -__db_truncate_reply *__db_db_truncate_4002 __P((__db_truncate_msg *, struct svc_req *)); -__db_cursor_reply *__db_db_cursor_4002 __P((__db_cursor_msg *, struct svc_req *)); -__db_join_reply *__db_db_join_4002 __P((__db_join_msg *, struct svc_req *)); -__dbc_close_reply *__db_dbc_close_4002 __P((__dbc_close_msg *, struct svc_req *)); -__dbc_count_reply *__db_dbc_count_4002 __P((__dbc_count_msg *, struct svc_req *)); -__dbc_del_reply *__db_dbc_del_4002 __P((__dbc_del_msg *, struct svc_req *)); -__dbc_dup_reply *__db_dbc_dup_4002 __P((__dbc_dup_msg *, struct svc_req *)); -__dbc_get_reply *__db_dbc_get_4002 __P((__dbc_get_msg *, struct svc_req *)); -__dbc_pget_reply *__db_dbc_pget_4002 __P((__dbc_pget_msg *, struct svc_req *)); -__dbc_put_reply *__db_dbc_put_4002 __P((__dbc_put_msg *, struct svc_req *)); #if defined(__cplusplus) } diff --git a/db/dbinc_auto/sequence_ext.h b/db/dbinc_auto/sequence_ext.h new file mode 100644 index 000000000..a2c114cf3 --- /dev/null +++ b/db/dbinc_auto/sequence_ext.h @@ -0,0 +1,17 @@ +/* DO NOT EDIT: automatically built by dist/s_include. */ +#ifndef _sequence_ext_h_ +#define _sequence_ext_h_ + +#if defined(__cplusplus) +extern "C" { +#endif + +int __seq_stat __P((DB_SEQUENCE *, DB_SEQUENCE_STAT **, u_int32_t)); +int __seq_stat_print __P((DB_SEQUENCE *, u_int32_t)); +const FN * __db_get_seq_flags_fn __P((void)); +const FN * __db_get_seq_flags_fn __P((void)); + +#if defined(__cplusplus) +} +#endif +#endif /* !_sequence_ext_h_ */ diff --git a/db/dbinc_auto/tcl_ext.h b/db/dbinc_auto/tcl_ext.h index 5860e6b7b..d147bd4f8 100644 --- a/db/dbinc_auto/tcl_ext.h +++ b/db/dbinc_auto/tcl_ext.h @@ -24,24 +24,27 @@ int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); int tcl_EnvSetFlags __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *, Tcl_Obj *)); int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); int tcl_EnvGetEncryptFlags __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); +void tcl_EnvSetErrfile __P((Tcl_Interp *, DB_ENV *, DBTCL_INFO *, char *)); +int tcl_EnvSetErrpfx __P((Tcl_Interp *, DB_ENV *, DBTCL_INFO *, char *)); DBTCL_INFO *_NewInfo __P((Tcl_Interp *, void *, char *, enum INFOTYPE)); void *_NameToPtr __P((CONST char *)); DBTCL_INFO *_PtrToInfo __P((CONST void *)); DBTCL_INFO *_NameToInfo __P((CONST char *)); void _SetInfoData __P((DBTCL_INFO *, void *)); void _DeleteInfo __P((DBTCL_INFO *)); -int _SetListElem __P((Tcl_Interp *, Tcl_Obj *, void *, int, void *, int)); -int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int)); -int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *, db_recno_t, u_char *, int)); +int _SetListElem __P((Tcl_Interp *, Tcl_Obj *, void *, u_int32_t, void *, u_int32_t)); +int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, long)); +int _SetListElemWideInt __P((Tcl_Interp *, Tcl_Obj *, void *, int64_t)); +int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *, db_recno_t, u_char *, u_int32_t)); int _Set3DBTList __P((Tcl_Interp *, Tcl_Obj *, DBT *, int, DBT *, int, DBT *)); int _SetMultiList __P((Tcl_Interp *, Tcl_Obj *, DBT *, DBT*, DBTYPE, u_int32_t)); int _GetGlobPrefix __P((char *, char **)); int _ReturnSetup __P((Tcl_Interp *, int, int, char *)); int _ErrorSetup __P((Tcl_Interp *, int, char *)); -void _ErrorFunc __P((CONST char *, char *)); +void _ErrorFunc __P((const DB_ENV *, CONST char *, const char *)); int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *)); int _GetUInt32 __P((Tcl_Interp *, Tcl_Obj *, u_int32_t *)); -Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t, void (*)(u_int32_t, void *, void (*)(u_int32_t, const FN *, void *)))); +Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t, const FN *)); void _debug_check __P((void)); int _CopyObjBytes __P((Tcl_Interp *, Tcl_Obj *obj, void **, u_int32_t *, int *)); int tcl_LockDetect __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); @@ -69,6 +72,7 @@ int tcl_RepRequest __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); int tcl_RepStart __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); int tcl_RepProcessMessage __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); int tcl_RepStat __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); +int seq_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *)); int tcl_TxnCheckpoint __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); int tcl_Txn __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *)); diff --git a/db/dbinc_auto/txn_auto.h b/db/dbinc_auto/txn_auto.h index 06fbe7d07..2611df15e 100644 --- a/db/dbinc_auto/txn_auto.h +++ b/db/dbinc_auto/txn_auto.h @@ -20,6 +20,7 @@ typedef struct ___txn_ckp_args { DB_LSN ckp_lsn; DB_LSN last_ckp; int32_t timestamp; + u_int32_t envid; u_int32_t rep_gen; } __txn_ckp_args; diff --git a/db/dbinc_auto/txn_ext.h b/db/dbinc_auto/txn_ext.h index 5dc3a4556..94ca6f332 100644 --- a/db/dbinc_auto/txn_ext.h +++ b/db/dbinc_auto/txn_ext.h @@ -18,6 +18,7 @@ u_int32_t __txn_id __P((DB_TXN *)); int __txn_set_timeout __P((DB_TXN *, db_timeout_t, u_int32_t)); int __txn_checkpoint_pp __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); int __txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); +void __txn_getactive __P((DB_ENV *, DB_LSN *)); int __txn_getckp __P((DB_ENV *, DB_LSN *)); int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *)); int __txn_force_abort __P((DB_ENV *, u_int8_t *)); @@ -25,28 +26,22 @@ int __txn_preclose __P((DB_ENV *)); int __txn_reset __P((DB_ENV *)); void __txn_updateckp __P((DB_ENV *, DB_LSN *)); int __txn_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t, const DBT *)); -int __txn_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __txn_regop_read __P((DB_ENV *, void *, __txn_regop_args **)); -int __txn_ckp_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, DB_LSN *, int32_t, u_int32_t)); -int __txn_ckp_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __txn_ckp_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, DB_LSN *, int32_t, u_int32_t, u_int32_t)); int __txn_ckp_read __P((DB_ENV *, void *, __txn_ckp_args **)); int __txn_child_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, DB_LSN *)); -int __txn_child_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __txn_child_read __P((DB_ENV *, void *, __txn_child_args **)); int __txn_xa_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, int32_t, u_int32_t, u_int32_t, DB_LSN *, const DBT *)); -int __txn_xa_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __txn_xa_regop_read __P((DB_ENV *, void *, __txn_xa_regop_args **)); int __txn_recycle_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, u_int32_t)); -int __txn_recycle_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); -int __txn_recycle_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __txn_recycle_read __P((DB_ENV *, void *, __txn_recycle_args **)); -int __txn_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); -int __txn_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); int __txn_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); +int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __txn_recycle_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); +int __txn_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *)); void __txn_dbenv_create __P((DB_ENV *)); int __txn_set_tx_max __P((DB_ENV *, u_int32_t)); int __txn_regop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); @@ -56,15 +51,19 @@ int __txn_child_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); int __txn_restore_txn __P((DB_ENV *, DB_LSN *, __txn_xa_regop_args *)); int __txn_recycle_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *, size_t)); -int __txn_map_gid __P((DB_ENV *, u_int8_t *, TXN_DETAIL **, size_t *)); +int __txn_map_gid __P((DB_ENV *, u_int8_t *, TXN_DETAIL **, roff_t *)); int __txn_recover_pp __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t)); int __txn_recover __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t)); int __txn_get_prepared __P((DB_ENV *, XID *, DB_PREPLIST *, long, long *, u_int32_t)); +int __txn_openfiles __P((DB_ENV *, DB_LSN *, int)); int __txn_open __P((DB_ENV *)); +int __txn_findlastckp __P((DB_ENV *, DB_LSN *, DB_LSN *)); int __txn_dbenv_refresh __P((DB_ENV *)); void __txn_region_destroy __P((DB_ENV *, REGINFO *)); int __txn_id_set __P((DB_ENV *, u_int32_t, u_int32_t)); int __txn_stat_pp __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); +int __txn_stat_print_pp __P((DB_ENV *, u_int32_t)); +int __txn_stat_print __P((DB_ENV *, u_int32_t)); int __txn_closeevent __P((DB_ENV *, DB_TXN *, DB *)); int __txn_remevent __P((DB_ENV *, DB_TXN *, const char *, u_int8_t*)); void __txn_remrem __P((DB_ENV *, DB_TXN *, const char *)); diff --git a/db/dbinc_auto/xa_ext.h b/db/dbinc_auto/xa_ext.h index 8b93c4f71..3247b5bb7 100644 --- a/db/dbinc_auto/xa_ext.h +++ b/db/dbinc_auto/xa_ext.h @@ -9,7 +9,7 @@ extern "C" { int __xa_get_txn __P((DB_ENV *, DB_TXN **, int)); int __db_xa_create __P((DB *)); int __db_rmid_to_env __P((int rmid, DB_ENV **envp)); -int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *)); +int __db_xid_to_txn __P((DB_ENV *, XID *, roff_t *)); int __db_map_rmid __P((int, DB_ENV *)); int __db_unmap_rmid __P((int)); int __db_map_xid __P((DB_ENV *, XID *, size_t)); diff --git a/db/dbm/dbm.c b/db/dbm/dbm.c index 7acd498c6..842b8d0a4 100644 --- a/db/dbm/dbm.c +++ b/db/dbm/dbm.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: dbm.c,v 11.18 2004/05/10 21:26:47 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: dbm.c,v 11.15 2003/01/08 04:34:46 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -76,14 +74,11 @@ static const char revid[] = "$Id: dbm.c,v 11.15 2003/01/08 04:34:46 bostic Exp $ * EXTERN: int __db_ndbm_store __P((DBM *, datum, datum, int)); * * EXTERN: int __db_dbm_close __P((void)); - * EXTERN: int __db_dbm_dbrdonly __P((void)); * EXTERN: int __db_dbm_delete __P((datum)); - * EXTERN: int __db_dbm_dirf __P((void)); * EXTERN: datum __db_dbm_fetch __P((datum)); * EXTERN: datum __db_dbm_firstkey __P((void)); * EXTERN: int __db_dbm_init __P((char *)); * EXTERN: datum __db_dbm_nextkey __P((datum)); - * EXTERN: int __db_dbm_pagf __P((void)); * EXTERN: int __db_dbm_store __P((datum, datum)); * * EXTERN: #endif @@ -101,7 +96,7 @@ __db_dbm_init(file) char *file; { if (__cur_db != NULL) - (void)dbm_close(__cur_db); + dbm_close(__cur_db); if ((__cur_db = dbm_open(file, O_CREAT | O_RDWR, __db_omode("rw----"))) != NULL) return (0); @@ -289,7 +284,7 @@ __db_ndbm_fetch(dbm, key) memset(&_key, 0, sizeof(DBT)); memset(&_data, 0, sizeof(DBT)); - _key.size = key.dsize; + _key.size = (u_int32_t)key.dsize; _key.data = key.dptr; /* @@ -299,7 +294,7 @@ __db_ndbm_fetch(dbm, key) */ if ((ret = dbc->dbp->get(dbc->dbp, NULL, &_key, &_data, 0)) == 0) { data.dptr = _data.data; - data.dsize = _data.size; + data.dsize = (int)_data.size; } else { data.dptr = NULL; data.dsize = 0; @@ -334,7 +329,7 @@ __db_ndbm_firstkey(dbm) if ((ret = dbc->c_get(dbc, &_key, &_data, DB_FIRST)) == 0) { key.dptr = _key.data; - key.dsize = _key.size; + key.dsize = (int)_key.size; } else { key.dptr = NULL; key.dsize = 0; @@ -369,7 +364,7 @@ __db_ndbm_nextkey(dbm) if ((ret = dbc->c_get(dbc, &_key, &_data, DB_NEXT)) == 0) { key.dptr = _key.data; - key.dsize = _key.size; + key.dsize = (int)_key.size; } else { key.dptr = NULL; key.dsize = 0; @@ -401,7 +396,7 @@ __db_ndbm_delete(dbm, key) memset(&_key, 0, sizeof(DBT)); _key.data = key.dptr; - _key.size = key.dsize; + _key.size = (u_int32_t)key.dsize; if ((ret = dbc->dbp->del(dbc->dbp, NULL, &_key, 0)) == 0) return (0); @@ -435,11 +430,11 @@ __db_ndbm_store(dbm, key, data, flags) memset(&_key, 0, sizeof(DBT)); _key.data = key.dptr; - _key.size = key.dsize; + _key.size = (u_int32_t)key.dsize; memset(&_data, 0, sizeof(DBT)); _data.data = data.dptr; - _data.size = data.dsize; + _data.size = (u_int32_t)data.dsize; if ((ret = dbc->dbp->put(dbc->dbp, NULL, &_key, &_data, flags == DBM_INSERT ? DB_NOOVERWRITE : 0)) == 0) diff --git a/db/dbreg/dbreg.c b/db/dbreg/dbreg.c index 4101e5e5a..76edd520c 100644 --- a/db/dbreg/dbreg.c +++ b/db/dbreg/dbreg.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: dbreg.c,v 11.89 2004/09/22 03:43:09 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: dbreg.c,v 11.81 2003/10/27 15:54:31 sue Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -22,6 +21,10 @@ static const char revid[] = "$Id: dbreg.c,v 11.81 2003/10/27 15:54:31 sue Exp $" #include "dbinc/txn.h" #include "dbinc/db_am.h" +static int __dbreg_push_id __P((DB_ENV *, int32_t)); +static int __dbreg_pop_id __P((DB_ENV *, int32_t *)); +static int __dbreg_pluck_id __P((DB_ENV *, int32_t)); + /* * The dbreg subsystem, as its name implies, registers database handles so * that we can associate log messages with them without logging a filename @@ -98,33 +101,33 @@ __dbreg_setup(dbp, name, create_txnid) DB_ENV *dbenv; DB_LOG *dblp; FNAME *fnp; + REGINFO *infop; int ret; size_t len; void *namep; dbenv = dbp->dbenv; dblp = dbenv->lg_handle; + infop = &dblp->reginfo; fnp = NULL; namep = NULL; /* Allocate an FNAME and, if necessary, a buffer for the name itself. */ - R_LOCK(dbenv, &dblp->reginfo); - if ((ret = - __db_shalloc(dblp->reginfo.addr, sizeof(FNAME), 0, &fnp)) != 0) + R_LOCK(dbenv, infop); + if ((ret = __db_shalloc(infop, sizeof(FNAME), 0, &fnp)) != 0) goto err; memset(fnp, 0, sizeof(FNAME)); if (name != NULL) { len = strlen(name) + 1; - if ((ret = - __db_shalloc(dblp->reginfo.addr, len, 0, &namep)) != 0) + if ((ret = __db_shalloc(infop, len, 0, &namep)) != 0) goto err; - fnp->name_off = R_OFFSET(&dblp->reginfo, namep); + fnp->name_off = R_OFFSET(dbenv, infop, namep); memcpy(namep, name, len); } else fnp->name_off = INVALID_ROFF; - R_UNLOCK(dbenv, &dblp->reginfo); + R_UNLOCK(dbenv, infop); /* * Fill in all the remaining info that we'll need later to register @@ -140,7 +143,7 @@ __dbreg_setup(dbp, name, create_txnid) return (0); -err: R_UNLOCK(dbenv, &dblp->reginfo); +err: R_UNLOCK(dbenv, infop); if (ret == ENOMEM) __db_err(dbenv, "Logging region out of memory; you may need to increase its size"); @@ -160,10 +163,12 @@ __dbreg_teardown(dbp) { DB_ENV *dbenv; DB_LOG *dblp; + REGINFO *infop; FNAME *fnp; dbenv = dbp->dbenv; dblp = dbenv->lg_handle; + infop = &dblp->reginfo; fnp = dbp->log_filename; /* @@ -175,12 +180,11 @@ __dbreg_teardown(dbp) DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID); - R_LOCK(dbenv, &dblp->reginfo); + R_LOCK(dbenv, infop); if (fnp->name_off != INVALID_ROFF) - __db_shalloc_free(dblp->reginfo.addr, - R_ADDR(&dblp->reginfo, fnp->name_off)); - __db_shalloc_free(dblp->reginfo.addr, fnp); - R_UNLOCK(dbenv, &dblp->reginfo); + __db_shalloc_free(infop, R_ADDR(dbenv, infop, fnp->name_off)); + __db_shalloc_free(infop, fnp); + R_UNLOCK(dbenv, infop); dbp->log_filename = NULL; @@ -279,7 +283,7 @@ __dbreg_get_id(dbp, txn, idp) memset(&fid_dbt, 0, sizeof(fid_dbt)); memset(&r_name, 0, sizeof(r_name)); if (fnp->name_off != INVALID_ROFF) { - r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off); + r_name.data = R_ADDR(dbenv, &dblp->reginfo, fnp->name_off); r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1; } fid_dbt.data = dbp->fileid; @@ -291,7 +295,7 @@ __dbreg_get_id(dbp, txn, idp) goto err; /* * Once we log the create_txnid, we need to make sure we never - * log it again (as might happen if this is a replication client + * log it again (as might happen if this is a replication client * that later upgrades to a master). */ fnp->create_txnid = TXN_INVALID; @@ -310,7 +314,7 @@ err: if (ret != 0 && id != DB_LOGFILEID_INVALID) { (void)__dbreg_revoke_id(dbp, 1, id); id = DB_LOGFILEID_INVALID; - } + } *idp = id; return (ret); } @@ -390,8 +394,13 @@ cont: if ((ret = __dbreg_pluck_id(dbenv, id)) != 0) fnp->is_durable = !F_ISSET(dbp, DB_AM_NOT_DURABLE); SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname); + /* + * If we get an error adding the dbentry, revoke the id. + * We void the return value since we want to retain and + * return the original error in ret anyway. + */ if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0) - goto err; + (void)__dbreg_revoke_id(dbp, 1, id); err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex); @@ -466,12 +475,13 @@ __dbreg_revoke_id(dbp, have_lock, force_id) * Take a dbreg id away from a dbp that we're closing, and log * the unregistry. * - * PUBLIC: int __dbreg_close_id __P((DB *, DB_TXN *)); + * PUBLIC: int __dbreg_close_id __P((DB *, DB_TXN *, u_int32_t)); */ int -__dbreg_close_id(dbp, txn) +__dbreg_close_id(dbp, txn, op) DB *dbp; DB_TXN *txn; + u_int32_t op; { DBT fid_dbt, r_name, *dbtp; DB_ENV *dbenv; @@ -496,7 +506,7 @@ __dbreg_close_id(dbp, txn) dbtp = NULL; else { memset(&r_name, 0, sizeof(r_name)); - r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off); + r_name.data = R_ADDR(dbenv, &dblp->reginfo, fnp->name_off); r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1; dbtp = &r_name; @@ -506,7 +516,7 @@ __dbreg_close_id(dbp, txn) fid_dbt.size = DB_FILE_ID_LEN; if ((ret = __dbreg_register_log(dbenv, txn, &r_unused, F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0, - DBREG_CLOSE, dbtp, &fid_dbt, fnp->id, + op, dbtp, &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno, TXN_INVALID)) != 0) goto err; @@ -515,3 +525,122 @@ __dbreg_close_id(dbp, txn) err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex); return (ret); } + +/* + * __dbreg_push_id and __dbreg_pop_id -- + * Dbreg ids from closed files are kept on a stack in shared memory + * for recycling. (We want to reuse them as much as possible because each + * process keeps open files in an array by ID.) Push them to the stack and + * pop them from it, managing memory as appropriate. + * + * The stack is protected by the fq_mutex, and in both functions we assume + * that this is already locked. + */ +static int +__dbreg_push_id(dbenv, id) + DB_ENV *dbenv; + int32_t id; +{ + DB_LOG *dblp; + LOG *lp; + REGINFO *infop; + int32_t *stack, *newstack; + int ret; + + dblp = dbenv->lg_handle; + infop = &dblp->reginfo; + lp = infop->primary; + + if (lp->free_fid_stack == INVALID_ROFF) { + stack = NULL; + DB_ASSERT(lp->free_fids_alloced == 0); + } else + stack = R_ADDR(dbenv, infop, lp->free_fid_stack); + + /* Check if we have room on the stack. */ + if (lp->free_fids_alloced <= lp->free_fids + 1) { + R_LOCK(dbenv, infop); + if ((ret = __db_shalloc(infop, + (lp->free_fids_alloced + 20) * sizeof(u_int32_t), 0, + &newstack)) != 0) { + R_UNLOCK(dbenv, infop); + return (ret); + } + + if (stack != NULL) { + memcpy(newstack, stack, + lp->free_fids_alloced * sizeof(u_int32_t)); + __db_shalloc_free(infop, stack); + } + stack = newstack; + lp->free_fid_stack = R_OFFSET(dbenv, infop, stack); + lp->free_fids_alloced += 20; + R_UNLOCK(dbenv, infop); + } + + stack[lp->free_fids++] = id; + return (0); +} + +static int +__dbreg_pop_id(dbenv, id) + DB_ENV *dbenv; + int32_t *id; +{ + DB_LOG *dblp; + LOG *lp; + int32_t *stack; + + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + /* Do we have anything to pop? */ + if (lp->free_fid_stack != INVALID_ROFF && lp->free_fids > 0) { + stack = R_ADDR(dbenv, &dblp->reginfo, lp->free_fid_stack); + *id = stack[--lp->free_fids]; + } else + *id = DB_LOGFILEID_INVALID; + + return (0); +} + +/* + * __dbreg_pluck_id -- + * Remove a particular dbreg id from the stack of free ids. This is + * used when we open a file, as in recovery, with a specific ID that might + * be on the stack. + * + * Returns success whether or not the particular id was found, and like + * push and pop, assumes that the fq_mutex is locked. + */ +static int +__dbreg_pluck_id(dbenv, id) + DB_ENV *dbenv; + int32_t id; +{ + DB_LOG *dblp; + LOG *lp; + int32_t *stack; + u_int i; + + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + /* Do we have anything to look at? */ + if (lp->free_fid_stack != INVALID_ROFF) { + stack = R_ADDR(dbenv, &dblp->reginfo, lp->free_fid_stack); + for (i = 0; i < lp->free_fids; i++) + if (id == stack[i]) { + /* + * Found it. Overwrite it with the top + * id (which may harmlessly be itself), + * and shorten the stack by one. + */ + stack[i] = stack[lp->free_fids - 1]; + lp->free_fids--; + return (0); + } + } + + return (0); +} diff --git a/db/dbreg/dbreg.src b/db/dbreg/dbreg.src index c2205958b..ff3fc2923 100644 --- a/db/dbreg/dbreg.src +++ b/db/dbreg/dbreg.src @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: dbreg.src,v 10.24 2003/04/24 14:19:17 bostic Exp $ + * $Id: dbreg.src,v 10.26 2004/06/17 17:35:17 bostic Exp $ */ PREFIX __dbreg DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE diff --git a/db/dbreg/dbreg_auto.c b/db/dbreg/dbreg_auto.c index 857559c66..a9cc5f704 100644 --- a/db/dbreg/dbreg_auto.c +++ b/db/dbreg/dbreg_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -39,31 +40,42 @@ __dbreg_register_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___dbreg_register; npad = 0; + rlsnp = ret_lsnp; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -82,27 +94,23 @@ __dbreg_register_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -162,139 +170,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__dbreg_register_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __dbreg_register_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__dbreg_register_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__dbreg_register_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __dbreg_register_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__dbreg_register%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tname: "); - for (i = 0; i < argp->name.size; i++) { - ch = ((u_int8_t *)argp->name.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tuid: "); - for (i = 0; i < argp->uid.size; i++) { - ch = ((u_int8_t *)argp->uid.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tftype: 0x%lx\n", (u_long)argp->ftype); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\tid: 0x%lx\n", (u_long)argp->id); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __dbreg_register_read __P((DB_ENV *, void *, * PUBLIC: __dbreg_register_args **)); @@ -313,9 +229,9 @@ __dbreg_register_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__dbreg_register_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -361,45 +277,6 @@ __dbreg_register_read(dbenv, recbuf, argpp) return (0); } -/* - * PUBLIC: int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__dbreg_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __dbreg_register_print, DB___dbreg_register)) != 0) - return (ret); - return (0); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __dbreg_init_getpgnos __P((DB_ENV *, - * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), - * PUBLIC: size_t *)); - */ -int -__dbreg_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __dbreg_register_getpgnos, DB___dbreg_register)) != 0) - return (ret); - return (0); -} -#endif /* HAVE_REPLICATION */ - /* * PUBLIC: int __dbreg_init_recover __P((DB_ENV *, int (***)(DB_ENV *, * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); diff --git a/db/dbreg/dbreg_autop.c b/db/dbreg/dbreg_autop.c new file mode 100644 index 000000000..3889b357d --- /dev/null +++ b/db/dbreg/dbreg_autop.c @@ -0,0 +1,89 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__dbreg_register_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __dbreg_register_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__dbreg_register%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\tname: "); + for (i = 0; i < argp->name.size; i++) { + ch = ((u_int8_t *)argp->name.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tuid: "); + for (i = 0; i < argp->uid.size; i++) { + ch = ((u_int8_t *)argp->uid.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tftype: 0x%lx\n", (u_long)argp->ftype); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\tid: 0x%lx\n", (u_long)argp->id); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__dbreg_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __dbreg_register_print, DB___dbreg_register)) != 0) + return (ret); + return (0); +} diff --git a/db/dbreg/dbreg_rec.c b/db/dbreg/dbreg_rec.c index 3c81e29d7..07b175a1f 100644 --- a/db/dbreg/dbreg_rec.c +++ b/db/dbreg/dbreg_rec.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -31,14 +31,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: dbreg_rec.c,v 11.133 2004/09/24 00:43:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: dbreg_rec.c,v 11.120 2003/10/27 15:54:31 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -52,7 +50,6 @@ static const char revid[] = "$Id: dbreg_rec.c,v 11.120 2003/10/27 15:54:31 sue E #include "dbinc/log.h" #include "dbinc/mp.h" #include "dbinc/txn.h" -#include "dbinc/qam.h" static int __dbreg_open_file __P((DB_ENV *, DB_TXN *, __dbreg_register_args *, void *)); @@ -74,6 +71,7 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info) DB *dbp; __dbreg_register_args *argp; int do_close, do_open, do_rem, ret, t_ret; + u_int32_t status; dblp = dbenv->lg_handle; dbp = NULL; @@ -93,7 +91,6 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info) else do_close = 1; break; - case DBREG_CLOSE: if (DB_UNDO(op)) do_open = 1; @@ -114,12 +111,15 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info) else do_close = 1; break; - case DBREG_CHKPNT: if (DB_UNDO(op) || op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES) do_open = 1; break; + default: + DB_ASSERT(0); + ret = EINVAL; + break; } if (do_open) { @@ -138,6 +138,8 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info) ret = __dbreg_open_file(dbenv, op == DB_TXN_ABORT || op == DB_TXN_POPENFILES ? argp->txnid : NULL, argp, info); + if (ret == DB_PAGE_NOTFOUND && argp->meta_pgno != PGNO_BASE_MD) + ret = ENOENT; if (ret == ENOENT || ret == EINVAL) { /* * If this is an OPEN while rolling forward, it's @@ -152,6 +154,9 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info) dblp->dbentry[argp->fileid].deleted = 0; ret = __dbreg_open_file(dbenv, NULL, argp, info); + if (ret == DB_PAGE_NOTFOUND && + argp->meta_pgno != PGNO_BASE_MD) + ret = ENOENT; } /* * We treat ENOENT as OK since it's possible that @@ -190,11 +195,15 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info) * recovery, it's possible that we failed after * the log record, but before we actually entered * a handle here. + * 3. If we aborted an open, then we wrote a non-txnal + * RCLOSE into the log. During the forward pass, the + * file won't be open, and that's OK. */ dbe = &dblp->dbentry[argp->fileid]; if (dbe->dbp == NULL && !dbe->deleted) { /* No valid entry here. */ - if (DB_REDO(op) || + if ((DB_REDO(op) && + argp->opcode != DBREG_RCLOSE) || argp->opcode == DBREG_CHKPNT) { __db_err(dbenv, "Improper file close at %lu/%lu", @@ -208,64 +217,70 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info) /* We have either an open entry or a deleted entry. */ if ((dbp = dbe->dbp) != NULL) { - MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); - (void)__dbreg_revoke_id(dbp, 0, - DB_LOGFILEID_INVALID); - /* * If we're a replication client, it's * possible to get here with a dbp that * the user opened, but which we later * assigned a fileid to. Be sure that * we only close dbps that we opened in - * the recovery code; they should have - * DB_AM_RECOVER set. - * - * The only exception is if we're aborting - * in a normal environment; then we might - * get here with a non-AM_RECOVER database. + * the recovery code or that were opened + * inside a currently aborting transaction. */ - if (F_ISSET(dbp, DB_AM_RECOVER) || - op == DB_TXN_ABORT) - do_rem = 1; + do_rem = F_ISSET(dbp, DB_AM_RECOVER) || + op == DB_TXN_ABORT; + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); + if (op == DB_TXN_ABORT) + (void)__dbreg_close_id(dbp, + NULL, DBREG_RCLOSE); + else + (void)__dbreg_revoke_id(dbp, 0, + DB_LOGFILEID_INVALID); } else if (dbe->deleted) { MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); __dbreg_rem_dbentry(dblp, argp->fileid); } } else MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); - if (do_rem) { - /* - * During recovery, all files are closed. On an abort, - * we only close the file if we opened it during the - * abort (DB_AM_RECOVER set), otherwise we simply do - * a __db_refresh. For the close case, if remove or - * rename has closed the file, don't request a sync, - * because the NULL mpf would be a problem. - */ - if (dbp != NULL) { - /* - * If we are undoing a create we'd better - * discard any buffers from the memory pool. - * We identify creates because the argp->id - * field contains the transaction containing - * the file create; if that id is invalid, we - * are not creating. - */ - if (argp->id != TXN_INVALID) + /* + * During recovery, all files are closed. On an abort, we only + * close the file if we opened it during the abort + * (DB_AM_RECOVER set), otherwise we simply do a __db_refresh. + * For the close case, if remove or rename has closed the file, + * don't request a sync, because a NULL mpf would be a problem. + * + * If we are undoing a create we'd better discard any buffers + * from the memory pool. We identify creates because the + * argp->id field contains the transaction containing the file + * create; if that id is invalid, we are not creating. + * + * On the backward pass, we need to "undo" opens even if the + * transaction in which they appeared committed, because we have + * already undone the corresponding close. In that case, the + * id will be valid, but we do not want to discard buffers. + */ + if (do_rem && dbp != NULL) { + if (argp->id != TXN_INVALID) { + if ((ret = __db_txnlist_find(dbenv, + info, argp->txnid->txnid, &status)) + != DB_NOTFOUND && ret != 0) + goto out; + if (ret == DB_NOTFOUND || status != TXN_COMMIT) F_SET(dbp, DB_AM_DISCARD); - if (op == DB_TXN_ABORT && - !F_ISSET(dbp, DB_AM_RECOVER)) - t_ret = __db_refresh(dbp, - NULL, DB_NOSYNC, NULL); - else { - if (op == DB_TXN_APPLY) - __db_sync(dbp); - t_ret = - __db_close(dbp, NULL, DB_NOSYNC); - } - if (t_ret != 0 && ret == 0) + ret = 0; + } + + if (op == DB_TXN_ABORT && + !F_ISSET(dbp, DB_AM_RECOVER)) { + if ((t_ret = __db_refresh(dbp, + NULL, DB_NOSYNC, NULL)) != 0 && ret == 0) + ret = t_ret; + } else { + if (op == DB_TXN_APPLY && + (t_ret = __db_sync(dbp)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __db_close( + dbp, NULL, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; } } @@ -291,49 +306,51 @@ __dbreg_open_file(dbenv, txn, argp, info) void *info; { DB_ENTRY *dbe; - DB_LOG *lp; + DB_LOG *dblp; DB *dbp; - u_int32_t id; + u_int32_t id, status; + int ret; - lp = (DB_LOG *)dbenv->lg_handle; - /* - * We never re-open temporary files. Temp files are only - * useful during aborts in which case the dbp was entered - * when the file was registered. During recovery, we treat - * temp files as properly deleted files, allowing the open to - * fail and not reporting any errors when recovery fails to - * get a valid dbp from __dbreg_id_to_db. - */ - if (argp->name.size == 0) { - (void)__dbreg_add_dbentry(dbenv, lp, NULL, argp->fileid); - return (ENOENT); - } + dblp = (DB_LOG *)dbenv->lg_handle; /* * When we're opening, we have to check that the name we are opening * is what we expect. If it's not, then we close the old file and * open the new one. */ - MUTEX_THREAD_LOCK(dbenv, lp->mutexp); - if (argp->fileid < lp->dbentry_cnt) - dbe = &lp->dbentry[argp->fileid]; + MUTEX_THREAD_LOCK(dbenv, dblp->mutexp); + if (argp->fileid < dblp->dbentry_cnt) + dbe = &dblp->dbentry[argp->fileid]; else dbe = NULL; if (dbe != NULL) { if (dbe->deleted) { - MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp); + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); return (ENOENT); } + + /* + * At the end of OPENFILES, we may have a file open. The + * open was part of a committed transaction, so it doesn't + * get undone. However, if the fileid was previously used, + * we'll see a close that may need to get undone. There are + * three ways we can detect this. 1) the meta-pgno in the + * current file does not match that of the open file, 2) the + * file uid of the current file does not match that of the + * previously opened file, 3) the current file is unnamed, in + * which case it should never be opened during recovery. + */ if ((dbp = dbe->dbp) != NULL) { if (dbp->meta_pgno != argp->meta_pgno || - memcmp(dbp->fileid, - argp->uid.data, DB_FILE_ID_LEN) != 0) { - MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp); + argp->name.size == 0 || + memcmp(dbp->fileid, argp->uid.data, + DB_FILE_ID_LEN) != 0) { + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); (void)__dbreg_revoke_id(dbp, 0, DB_LOGFILEID_INVALID); if (F_ISSET(dbp, DB_AM_RECOVER)) - __db_close(dbp, NULL, DB_NOSYNC); + (void)__db_close(dbp, NULL, DB_NOSYNC); goto reopen; } @@ -343,7 +360,7 @@ __dbreg_open_file(dbenv, txn, argp, info) * here had better be the same dbp. */ DB_ASSERT(dbe->dbp == dbp); - MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp); + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); /* * This is a successful open. We need to record that @@ -351,27 +368,40 @@ __dbreg_open_file(dbenv, txn, argp, info) * subtransaction that created the file system object. */ if (argp->id != TXN_INVALID && - __db_txnlist_update(dbenv, info, - argp->id, TXN_EXPECTED, NULL) == TXN_NOTFOUND) - (void)__db_txnlist_add(dbenv, - info, argp->id, TXN_EXPECTED, NULL); + (ret = __db_txnlist_update(dbenv, info, + argp->id, TXN_EXPECTED, NULL, &status, 1)) != 0) + return (ret); return (0); } } - MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp); + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); + +reopen: + /* + * We never re-open temporary files. Temp files are only useful during + * aborts in which case the dbp was entered when the file was + * registered. During recovery, we treat temp files as properly deleted + * files, allowing the open to fail and not reporting any errors when + * recovery fails to get a valid dbp from __dbreg_id_to_db. + */ + if (argp->name.size == 0) { + (void)__dbreg_add_dbentry(dbenv, dblp, NULL, argp->fileid); + return (ENOENT); + } /* * We are about to pass a recovery txn pointer into the main library. * We need to make sure that any accessed fields are set appropriately. */ -reopen: if (txn != NULL) { + if (txn != NULL) { id = txn->txnid; memset(txn, 0, sizeof(DB_TXN)); txn->txnid = id; txn->mgrp = dbenv->tx_handle; } - return (__dbreg_do_open(dbenv, txn, lp, argp->uid.data, argp->name.data, + return (__dbreg_do_open(dbenv, + txn, dblp, argp->uid.data, argp->name.data, argp->ftype, argp->fileid, argp->meta_pgno, info, argp->id)); } diff --git a/db/dbreg/dbreg_stat.c b/db/dbreg/dbreg_stat.c new file mode 100644 index 000000000..d033d7ea1 --- /dev/null +++ b/db/dbreg/dbreg_stat.c @@ -0,0 +1,97 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: dbreg_stat.c,v 11.47 2004/09/22 03:43:09 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" + +#ifdef HAVE_STATISTICS +/* + * __dbreg_print_fname -- + * Display the contents of an FNAME structure. + * + * PUBLIC: void __dbreg_print_fname __P((DB_ENV *, FNAME *)); + */ +void +__dbreg_print_fname(dbenv, fnp) + DB_ENV *dbenv; + FNAME *fnp; +{ + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB handle FNAME contents:"); + STAT_LONG("log ID", fnp->id); + STAT_ULONG("Meta pgno", fnp->meta_pgno); + __db_print_fileid(dbenv, fnp->ufid, "\tFile ID"); + STAT_ULONG("create txn", fnp->create_txnid); + STAT_LONG("durable", fnp->is_durable); +} + +/* + * __dbreg_print_dblist -- + * Display the DB_ENV's list of files. + * + * PUBLIC: void __dbreg_print_dblist __P((DB_ENV *, u_int32_t)); + */ +void +__dbreg_print_dblist(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + DB *dbp; + DB_LOG *dblp; + FNAME *fnp; + LOG *lp; + int del, first; + char *name; + + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "LOG FNAME list:"); + __db_print_mutex(dbenv, NULL, &lp->fq_mutex, "File name mutex", flags); + + STAT_LONG("Fid max", lp->fid_max); + + MUTEX_LOCK(dbenv, &lp->fq_mutex); + for (first = 1, fnp = SH_TAILQ_FIRST(&lp->fq, __fname); + fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) { + if (first) { + first = 0; + __db_msg(dbenv, + "ID\tName\tType\tPgno\tTxnid\tDBP-info"); + } + if (fnp->name_off == INVALID_ROFF) + name = ""; + else + name = R_ADDR(dbenv, &dblp->reginfo, fnp->name_off); + + dbp = fnp->id >= dblp->dbentry_cnt ? NULL : + dblp->dbentry[fnp->id].dbp; + del = fnp->id >= dblp->dbentry_cnt ? 0 : + dblp->dbentry[fnp->id].deleted; + __db_msg(dbenv, "%ld\t%s\t%s\t%lu\t%lx\t%s %d %lx %lx", + (long)fnp->id, name, + __db_dbtype_to_string(fnp->s_type), + (u_long)fnp->meta_pgno, (u_long)fnp->create_txnid, + dbp == NULL ? "No DBP" : "DBP", del, P_TO_ULONG(dbp), + (u_long)(dbp == NULL ? 0 : dbp->flags)); + } + MUTEX_UNLOCK(dbenv, &lp->fq_mutex); +} +#endif diff --git a/db/dbreg/dbreg_util.c b/db/dbreg/dbreg_util.c index 6e21c35f3..c0d36ef17 100644 --- a/db/dbreg/dbreg_util.c +++ b/db/dbreg/dbreg_util.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: dbreg_util.c,v 11.49 2004/09/22 03:43:09 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: dbreg_util.c,v 11.39 2003/11/10 17:42:34 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -51,7 +49,7 @@ __dbreg_add_dbentry(dbenv, dblp, dbp, ndx) */ if (dblp->dbentry_cnt <= ndx) { if ((ret = __os_realloc(dbenv, - (ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY), + (size_t)(ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY), &dblp->dbentry)) != 0) goto err; @@ -91,13 +89,13 @@ __dbreg_rem_dbentry(dblp, ndx) } /* - * __dbreg_open_files -- - * Put a DBREG_CHKPNT log record for each open database. + * __dbreg_log_files -- + * Put a DBREG_CHKPNT/CLOSE log record for each open database. * - * PUBLIC: int __dbreg_open_files __P((DB_ENV *)); + * PUBLIC: int __dbreg_log_files __P((DB_ENV *)); */ int -__dbreg_open_files(dbenv) +__dbreg_log_files(dbenv) DB_ENV *dbenv; { DB_LOG *dblp; @@ -121,7 +119,7 @@ __dbreg_open_files(dbenv) dbtp = NULL; else { memset(&t, 0, sizeof(t)); - t.data = R_ADDR(&dblp->reginfo, fnp->name_off); + t.data = R_ADDR(dbenv, &dblp->reginfo, fnp->name_off); t.size = (u_int32_t)strlen(t.data) + 1; dbtp = &t; } @@ -185,9 +183,9 @@ __dbreg_close_files(dbenv) * so that we don't end up leaving around FNAME entries * for dbps that shouldn't have them. */ - if ((dbp = dblp->dbentry[i].dbp) != NULL) { + if ((dbp = dblp->dbentry[i].dbp) != NULL) { /* - * It's unsafe to call DB->close or revoke_id + * It's unsafe to call DB->close or revoke_id * while holding the thread lock, because * we'll call __dbreg_rem_dbentry and grab it again. * @@ -302,7 +300,7 @@ __dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen) * assumption, because the other process that has the file * open shouldn't be closing it while we're trying to abort. */ - name = R_ADDR(&dblp->reginfo, fname->name_off); + name = R_ADDR(dbenv, &dblp->reginfo, fname->name_off); /* * At this point, we are not holding the thread lock, so exit @@ -346,9 +344,9 @@ err: MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); * PUBLIC: int __dbreg_id_to_fname __P((DB_LOG *, int32_t, int, FNAME **)); */ int -__dbreg_id_to_fname(dblp, lid, have_lock, fnamep) +__dbreg_id_to_fname(dblp, id, have_lock, fnamep) DB_LOG *dblp; - int32_t lid; + int32_t id; int have_lock; FNAME **fnamep; { @@ -366,7 +364,7 @@ __dbreg_id_to_fname(dblp, lid, have_lock, fnamep) MUTEX_LOCK(dbenv, &lp->fq_mutex); for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname); fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) { - if (fnp->id == lid) { + if (fnp->id == id) { *fnamep = fnp; ret = 0; break; @@ -433,12 +431,12 @@ __dbreg_get_name(dbenv, fid, namep) char **namep; { DB_LOG *dblp; - FNAME *fname; + FNAME *fnp; dblp = dbenv->lg_handle; - if (dblp != NULL && __dbreg_fid_to_fname(dblp, fid, 0, &fname) == 0) { - *namep = R_ADDR(&dblp->reginfo, fname->name_off); + if (dblp != NULL && __dbreg_fid_to_fname(dblp, fid, 0, &fnp) == 0) { + *namep = R_ADDR(dbenv, &dblp->reginfo, fnp->name_off); return (0); } @@ -467,8 +465,8 @@ __dbreg_do_open(dbenv, u_int32_t id; { DB *dbp; + u_int32_t cstat, ret_stat; int ret; - u_int32_t cstat; if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0) return (ret); @@ -516,26 +514,21 @@ __dbreg_do_open(dbenv, * know how to handle the subtransaction that created * the file system object. */ - if (id != TXN_INVALID) { - if ((ret = __db_txnlist_update(dbenv, - info, id, cstat, NULL)) == TXN_NOTFOUND) - ret = __db_txnlist_add(dbenv, - info, id, cstat, NULL); - else if (ret > 0) - ret = 0; - } + if (id != TXN_INVALID) + ret = __db_txnlist_update(dbenv, + info, id, cstat, NULL, &ret_stat, 1); + err: if (cstat == TXN_IGNORE) goto not_right; return (ret); } else if (ret == ENOENT) { /* Record that the open failed in the txnlist. */ - if (id != TXN_INVALID && (ret = __db_txnlist_update(dbenv, - info, id, TXN_UNEXPECTED, NULL)) == TXN_NOTFOUND) - ret = __db_txnlist_add(dbenv, - info, id, TXN_UNEXPECTED, NULL); + if (id != TXN_INVALID) + ret = __db_txnlist_update(dbenv, info, + id, TXN_UNEXPECTED, NULL, &ret_stat, 1); } not_right: - (void)__db_close(dbp, NULL, 0); + (void)__db_close(dbp, NULL, DB_NOSYNC); /* Add this file as deleted. */ (void)__dbreg_add_dbentry(dbenv, lp, NULL, ndx); return (ret); @@ -625,7 +618,7 @@ __dbreg_lazy_id(dbp) * modification call finding a valid ID in the dbp before the * dbreg_register and commit records are in the log. * If there was an error, then we call __dbreg_revoke_id to - * remove the entry from the lists. + * remove the entry from the lists. */ fnp->id = id; err: @@ -634,177 +627,3 @@ err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex); return (ret); } - -/* - * __dbreg_push_id and __dbreg_pop_id -- - * Dbreg ids from closed files are kept on a stack in shared memory - * for recycling. (We want to reuse them as much as possible because each - * process keeps open files in an array by ID.) Push them to the stack and - * pop them from it, managing memory as appropriate. - * - * The stack is protected by the fq_mutex, and in both functions we assume - * that this is already locked. - * - * PUBLIC: int __dbreg_push_id __P((DB_ENV *, int32_t)); - * PUBLIC: int __dbreg_pop_id __P((DB_ENV *, int32_t *)); - */ -int -__dbreg_push_id(dbenv, id) - DB_ENV *dbenv; - int32_t id; -{ - DB_LOG *dblp; - LOG *lp; - int32_t *stack, *newstack; - int ret; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - if (lp->free_fid_stack != INVALID_ROFF) - stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack); - else - stack = NULL; - - /* Check if we have room on the stack. */ - if (lp->free_fids_alloced <= lp->free_fids + 1) { - R_LOCK(dbenv, &dblp->reginfo); - if ((ret = __db_shalloc(dblp->reginfo.addr, - (lp->free_fids_alloced + 20) * sizeof(u_int32_t), 0, - &newstack)) != 0) { - R_UNLOCK(dbenv, &dblp->reginfo); - return (ret); - } - - memcpy(newstack, stack, - lp->free_fids_alloced * sizeof(u_int32_t)); - lp->free_fid_stack = R_OFFSET(&dblp->reginfo, newstack); - lp->free_fids_alloced += 20; - - if (stack != NULL) - __db_shalloc_free(dblp->reginfo.addr, stack); - - stack = newstack; - R_UNLOCK(dbenv, &dblp->reginfo); - } - - DB_ASSERT(stack != NULL); - stack[lp->free_fids++] = id; - return (0); -} - -int -__dbreg_pop_id(dbenv, id) - DB_ENV *dbenv; - int32_t *id; -{ - DB_LOG *dblp; - LOG *lp; - int32_t *stack; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - /* Do we have anything to pop? */ - if (lp->free_fid_stack != INVALID_ROFF && lp->free_fids > 0) { - stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack); - *id = stack[--lp->free_fids]; - } else - *id = DB_LOGFILEID_INVALID; - - return (0); -} - -/* - * __dbreg_pluck_id -- - * Remove a particular dbreg id from the stack of free ids. This is - * used when we open a file, as in recovery, with a specific ID that might - * be on the stack. - * - * Returns success whether or not the particular id was found, and like - * push and pop, assumes that the fq_mutex is locked. - * - * PUBLIC: int __dbreg_pluck_id __P((DB_ENV *, int32_t)); - */ -int -__dbreg_pluck_id(dbenv, id) - DB_ENV *dbenv; - int32_t id; -{ - DB_LOG *dblp; - LOG *lp; - int32_t *stack; - int i; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - /* Do we have anything to look at? */ - if (lp->free_fid_stack != INVALID_ROFF) { - stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack); - for (i = 0; i < lp->free_fids; i++) - if (id == stack[i]) { - /* - * Found it. Overwrite it with the top - * id (which may harmlessly be itself), - * and shorten the stack by one. - */ - stack[i] = stack[lp->free_fids - 1]; - lp->free_fids--; - return (0); - } - } - - return (0); -} - -#ifdef DEBUG -/* - * __dbreg_print_dblist -- - * Display the list of files. - * - * PUBLIC: void __dbreg_print_dblist __P((DB_ENV *)); - */ -void -__dbreg_print_dblist(dbenv) - DB_ENV *dbenv; -{ - DB *dbp; - DB_LOG *dblp; - FNAME *fnp; - LOG *lp; - int del, first; - char *name; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - MUTEX_LOCK(dbenv, &lp->fq_mutex); - - for (first = 1, fnp = SH_TAILQ_FIRST(&lp->fq, __fname); - fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) { - if (first) { - first = 0; - __db_err(dbenv, - "ID\t\t\tName\tType\tPgno\tTxnid\tDBP-info"); - } - if (fnp->name_off == INVALID_ROFF) - name = ""; - else - name = R_ADDR(&dblp->reginfo, fnp->name_off); - - dbp = fnp->id >= dblp->dbentry_cnt ? NULL : - dblp->dbentry[fnp->id].dbp; - del = fnp->id >= dblp->dbentry_cnt ? 0 : - dblp->dbentry[fnp->id].deleted; - __db_err(dbenv, "%ld\t%s\t\t\t%s\t%lu\t%lx\t%s %d %lx %lx", - (long)fnp->id, name, - __db_dbtype_to_string(fnp->s_type), - (u_long)fnp->meta_pgno, (u_long)fnp->create_txnid, - dbp == NULL ? "No DBP" : "DBP", del, P_TO_ULONG(dbp), - (u_long)(dbp == NULL ? 0 : dbp->flags)); - } - - MUTEX_UNLOCK(dbenv, &lp->fq_mutex); -} -#endif diff --git a/db/dist/Makefile.in b/db/dist/Makefile.in index 2e3d940c4..4990d70af 100644 --- a/db/dist/Makefile.in +++ b/db/dist/Makefile.in @@ -1,4 +1,4 @@ -# $Id: Makefile.in,v 11.226 2003/11/28 18:50:05 bostic Exp $ +# $Id: Makefile.in,v 11.261 2004/09/24 15:27:47 mjc Exp $ srcdir= @srcdir@/.. builddir=. @@ -30,7 +30,6 @@ ln= @db_cv_path_ln@ mkdir= @db_cv_path_mkdir@ ranlib= @db_cv_path_ranlib@ rm= @db_cv_path_rm@ -rpm= @db_cv_path_rpm@ strip= @db_cv_path_strip@ ################################################## @@ -54,10 +53,11 @@ CPPFLAGS= -I$(builddir) -I$(srcdir) @CPPFLAGS@ ################################################## CFLAGS= -c $(CPPFLAGS) @CFLAGS@ CC= @MAKEFILE_CC@ -CCLINK= @MAKEFILE_CCLINK@ +CCLINK= @MAKEFILE_CCLINK@ @CFLAGS@ LDFLAGS= @LDFLAGS@ LIBS= @LIBS@ +TEST_LIBS= @TEST_LIBS@ LIBSO_LIBS= @LIBSO_LIBS@ libdb_base= libdb @@ -75,8 +75,8 @@ libso_major= $(libdb_base)-$(LIBMAJOR)@SOSUFFIX@ ################################################## CXXFLAGS= -c $(CPPFLAGS) @CXXFLAGS@ CXX= @MAKEFILE_CXX@ -CXXLINK= @MAKEFILE_CXXLINK@ -XSOLINK= @MAKEFILE_XSOLINK@ +CXXLINK= @MAKEFILE_CXXLINK@ @CXXFLAGS@ +XSOLINK= @MAKEFILE_XSOLINK@ @CXXFLAGS@ LIBXSO_LIBS= @LIBXSO_LIBS@ libcxx_base= libdb_cxx @@ -101,12 +101,13 @@ JAVACFLAGS= @JAVACFLAGS@ JAVA_CLASSTOP= ./classes JAVA_RPCCLASSTOP=./classes.rpc JAVA_EXCLASSTOP=./classes.ex -JAVA_SRCDIR= $(srcdir)/java/src JAVA_DBREL= com/sleepycat/db JAVA_EXREL= com/sleepycat/examples JAVA_RPCREL= com/sleepycat/db/rpcserver +JAVA_SRCDIR= $(srcdir)/java/src JAVA_EXDIR= $(srcdir)/examples_java/src/com/sleepycat/examples JAVA_RPCDIR= $(srcdir)/rpc_server/java +JAVA_SLEEPYCAT= $(srcdir)/java/src/com/sleepycat libj_jarfile= db.jar libj_exjarfile= dbexamples.jar @@ -172,7 +173,12 @@ QUEUE_OBJS=\ QUEUE_VRFY_OBJS=\ qam_verify@o@ REP_OBJS=\ - rep_method@o@ rep_record@o@ rep_region@o@ rep_util@o@ + rep_auto@o@ rep_backup@o@ rep_method@o@ rep_record@o@ rep_region@o@ \ + rep_stat@o@ rep_util@o@ +PRINT_OBJS=\ + btree_autop@o@ crdel_autop@o@ db_autop@o@ dbreg_autop@o@ \ + fileops_autop@o@ hash_autop@o@ qam_autop@o@ rep_autop@o@ \ + txn_autop@o@ C_OBJS= @ADDITIONAL_OBJS@ @REPLACEMENT_OBJS@ @CRYPTO_OBJS@ @RPC_CLIENT_OBJS@ \ crdel_auto@o@ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ \ @@ -180,28 +186,31 @@ C_OBJS= @ADDITIONAL_OBJS@ @REPLACEMENT_OBJS@ @CRYPTO_OBJS@ @RPC_CLIENT_OBJS@ \ db_err@o@ db_getlong@o@ db_idspace@o@ db_iface@o@ db_join@o@ \ db_log2@o@ db_meta@o@ db_method@o@ db_open@o@ db_overflow@o@ \ db_pr@o@ db_rec@o@ db_reclaim@o@ db_rename@o@ db_remove@o@ \ - db_ret@o@ db_salloc@o@ db_shash@o@ db_truncate@o@ db_upg@o@ \ - db_upg_opd@o@ dbm@o@ dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ \ - dbreg_util@o@ env_file@o@ env_method@o@ env_open@o@ \ - env_recover@o@ env_region@o@ fileops_auto@o@ fop_basic@o@ \ + db_ret@o@ db_salloc@o@ db_setid@o@ db_setlsn@o@ db_shash@o@ \ + db_stati@o@ db_truncate@o@ db_upg@o@ db_upg_opd@o@ dbm@o@ \ + dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ dbreg_stat@o@ dbreg_util@o@ \ + env_file@o@ env_method@o@ env_open@o@ env_recover@o@ \ + env_region@o@ env_stat@o@ fileops_auto@o@ fop_basic@o@ \ fop_rec@o@ fop_util@o@ hash_func@o@ hmac@o@ hsearch@o@ lock@o@ \ - lock_deadlock@o@ lock_method@o@ lock_region@o@ lock_stat@o@ \ - lock_util@o@ log@o@ log_archive@o@ log_compare@o@ log_get@o@ \ - log_method@o@ log_put@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ \ - mp_fopen@o@ mp_fput@o@ mp_fset@o@ mp_method@o@ mp_region@o@ \ - mp_register@o@ mp_stat@o@ mp_sync@o@ mp_trickle@o@ mutex@o@ \ - os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ os_dir@o@ \ - os_errno@o@ os_fid@o@ os_fsync@o@ os_handle@o@ os_id@o@ \ - os_map@o@ os_method@o@ os_oflags@o@ os_open@o@ os_region@o@ \ - os_rename@o@ os_root@o@ os_rpath@o@ os_rw@o@ os_seek@o@ \ - os_sleep@o@ os_spin@o@ os_stat@o@ os_tmpdir@o@ os_unlink@o@ \ - sha1@o@ txn@o@ txn_auto@o@ txn_method@o@ txn_rec@o@ \ + lock_deadlock@o@ lock_id@o@ lock_list@o@ lock_method@o@ \ + lock_region@o@ lock_stat@o@ lock_timer@o@ lock_util@o@ log@o@ \ + log_archive@o@ log_compare@o@ log_get@o@ log_method@o@ \ + log_put@o@ log_stat@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ \ + mp_fmethod@o@ mp_fopen@o@ mp_fput@o@ mp_fset@o@ mp_method@o@ \ + mp_region@o@ mp_register@o@ mp_stat@o@ mp_sync@o@ mp_trickle@o@ \ + mutex@o@ os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ \ + os_dir@o@ os_errno@o@ os_fid@o@ os_fsync@o@ os_handle@o@ \ + os_id@o@ os_map@o@ os_method@o@ os_oflags@o@ os_open@o@ \ + os_region@o@ os_rename@o@ os_root@o@ os_rpath@o@ os_rw@o@ \ + os_seek@o@ os_sleep@o@ os_spin@o@ os_stat@o@ os_tmpdir@o@ \ + os_truncate@o@ os_unlink@o@ sha1@o@ seq_stat@o@ sequence@o@ \ + snprintf@o@ txn@o@ txn_auto@o@ txn_method@o@ txn_rec@o@ \ txn_recover@o@ txn_region@o@ txn_stat@o@ txn_util@o@ xa@o@ \ xa_db@o@ xa_map@o@ CXX_OBJS=\ - cxx_db@o@ cxx_dbc@o@ cxx_dbt@o@ cxx_env@o@ cxx_except@o@ \ - cxx_lock@o@ cxx_logc@o@ cxx_mpool@o@ cxx_txn@o@ cxx_multi@o@ + cxx_db@o@ cxx_dbc@o@ cxx_dbt@o@ cxx_env@o@ cxx_except@o@ cxx_lock@o@ \ + cxx_logc@o@ cxx_mpool@o@ cxx_multi@o@ cxx_seq@o@ cxx_txn@o@ CRYPTO_OBJS=\ aes_method@o@ crypto@o@ mt19937db@o@ rijndael-alg-fst@o@ \ @@ -210,215 +219,256 @@ CRYPTO_OBJS=\ JAVA_OBJS=\ db_java_wrap@o@ -JAVA_SLEEPYCAT=$(JAVA_SRCDIR)/com/sleepycat JAVA_DBSRCS=\ - $(JAVA_SLEEPYCAT)/bdb/bind/ByteArrayBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/ByteArrayFormat.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/DataBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/DataBuffer.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/DataFormat.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/DataType.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/EntityBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/KeyExtractor.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/SimpleBuffer.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/ClassCatalog.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/SerialBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/SerialFormat.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/SerialInput.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/SerialOutput.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/SerialSerialBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/SerialSerialKeyExtractor.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/TupleSerialBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/TupleSerialKeyExtractor.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/TupleSerialMarshalledBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/serial/TupleSerialMarshalledKeyExtractor.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/MarshalledTupleData.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/MarshalledTupleKeyEntity.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleFormat.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleInput.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleInputBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleMarshalledBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleOutput.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleTupleBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleTupleKeyExtractor.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleTupleMarshalledBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/bind/tuple/TupleTupleMarshalledKeyExtractor.java \ - $(JAVA_SLEEPYCAT)/db/@db_cv_build_type@/DbConstants.java \ - $(JAVA_SLEEPYCAT)/db/db_java.java \ - $(JAVA_SLEEPYCAT)/db/db_javaJNI.java \ - $(JAVA_SLEEPYCAT)/db/Db.java \ - $(JAVA_SLEEPYCAT)/db/DbAppDispatch.java \ - $(JAVA_SLEEPYCAT)/db/DbAppendRecno.java \ - $(JAVA_SLEEPYCAT)/db/DbBtreeCompare.java \ - $(JAVA_SLEEPYCAT)/db/DbBtreePrefix.java \ - $(JAVA_SLEEPYCAT)/db/DbBtreeStat.java \ - $(JAVA_SLEEPYCAT)/db/DbClient.java \ - $(JAVA_SLEEPYCAT)/db/DbDeadlockException.java \ - $(JAVA_SLEEPYCAT)/db/DbDupCompare.java \ - $(JAVA_SLEEPYCAT)/db/DbEnv.java \ - $(JAVA_SLEEPYCAT)/db/DbEnvFeedback.java \ - $(JAVA_SLEEPYCAT)/db/DbEnvFeedbackHandler.java \ - $(JAVA_SLEEPYCAT)/db/DbErrcall.java \ - $(JAVA_SLEEPYCAT)/db/DbErrorHandler.java \ - $(JAVA_SLEEPYCAT)/db/DbException.java \ - $(JAVA_SLEEPYCAT)/db/DbFeedback.java \ - $(JAVA_SLEEPYCAT)/db/DbFeedbackHandler.java \ - $(JAVA_SLEEPYCAT)/db/DbHash.java \ - $(JAVA_SLEEPYCAT)/db/DbHashStat.java \ - $(JAVA_SLEEPYCAT)/db/DbKeyRange.java \ - $(JAVA_SLEEPYCAT)/db/DbLock.java \ - $(JAVA_SLEEPYCAT)/db/DbLockNotGrantedException.java \ - $(JAVA_SLEEPYCAT)/db/DbLockRequest.java \ - $(JAVA_SLEEPYCAT)/db/DbLockStat.java \ - $(JAVA_SLEEPYCAT)/db/DbLogStat.java \ - $(JAVA_SLEEPYCAT)/db/DbLogc.java \ - $(JAVA_SLEEPYCAT)/db/DbLsn.java \ - $(JAVA_SLEEPYCAT)/db/DbMemoryException.java \ - $(JAVA_SLEEPYCAT)/db/DbMpoolFStat.java \ - $(JAVA_SLEEPYCAT)/db/DbMpoolFile.java \ - $(JAVA_SLEEPYCAT)/db/DbMpoolStat.java \ - $(JAVA_SLEEPYCAT)/db/DbMultipleDataIterator.java \ - $(JAVA_SLEEPYCAT)/db/DbMultipleIterator.java \ - $(JAVA_SLEEPYCAT)/db/DbMultipleKeyDataIterator.java \ - $(JAVA_SLEEPYCAT)/db/DbMultipleRecnoDataIterator.java \ - $(JAVA_SLEEPYCAT)/db/DbPanicHandler.java \ - $(JAVA_SLEEPYCAT)/db/DbPreplist.java \ - $(JAVA_SLEEPYCAT)/db/DbQueueStat.java \ - $(JAVA_SLEEPYCAT)/db/DbRepStat.java \ - $(JAVA_SLEEPYCAT)/db/DbRepTransport.java \ - $(JAVA_SLEEPYCAT)/db/DbRunRecoveryException.java \ - $(JAVA_SLEEPYCAT)/db/DbSecondaryKeyCreate.java \ - $(JAVA_SLEEPYCAT)/db/DbTxn.java \ - $(JAVA_SLEEPYCAT)/db/DbTxnStat.java \ - $(JAVA_SLEEPYCAT)/db/DbUtil.java \ - $(JAVA_SLEEPYCAT)/db/Dbc.java \ - $(JAVA_SLEEPYCAT)/db/Dbt.java \ - $(JAVA_SLEEPYCAT)/bdb/CurrentTransaction.java \ - $(JAVA_SLEEPYCAT)/bdb/DataCursor.java \ - $(JAVA_SLEEPYCAT)/bdb/DataDb.java \ - $(JAVA_SLEEPYCAT)/bdb/DataEnvironment.java \ - $(JAVA_SLEEPYCAT)/bdb/DataIndex.java \ - $(JAVA_SLEEPYCAT)/bdb/DataStore.java \ - $(JAVA_SLEEPYCAT)/bdb/DataThang.java \ - $(JAVA_SLEEPYCAT)/bdb/DataView.java \ - $(JAVA_SLEEPYCAT)/bdb/ForeignKeyIndex.java \ - $(JAVA_SLEEPYCAT)/bdb/IntegrityConstraintException.java \ - $(JAVA_SLEEPYCAT)/bdb/KeyRange.java \ - $(JAVA_SLEEPYCAT)/bdb/KeyRangeException.java \ - $(JAVA_SLEEPYCAT)/bdb/PrimaryKeyAssigner.java \ - $(JAVA_SLEEPYCAT)/bdb/RecordNumberBinding.java \ - $(JAVA_SLEEPYCAT)/bdb/RecordNumberFormat.java \ - $(JAVA_SLEEPYCAT)/bdb/StoredClassCatalog.java \ - $(JAVA_SLEEPYCAT)/bdb/TransactionRunner.java \ - $(JAVA_SLEEPYCAT)/bdb/TransactionWorker.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/MapEntry.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredCollection.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredCollections.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredContainer.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredEntrySet.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredIterator.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredKeySet.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredList.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredMap.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredMapEntry.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredSortedEntrySet.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredSortedKeySet.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredSortedMap.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredSortedValueSet.java \ - $(JAVA_SLEEPYCAT)/bdb/collection/StoredValueSet.java \ - $(JAVA_SLEEPYCAT)/bdb/factory/TupleSerialDbFactory.java \ - $(JAVA_SLEEPYCAT)/bdb/util/ExceptionUnwrapper.java \ - $(JAVA_SLEEPYCAT)/bdb/util/ExceptionWrapper.java \ - $(JAVA_SLEEPYCAT)/bdb/util/FastInputStream.java \ - $(JAVA_SLEEPYCAT)/bdb/util/FastOutputStream.java \ - $(JAVA_SLEEPYCAT)/bdb/util/IOExceptionWrapper.java \ - $(JAVA_SLEEPYCAT)/bdb/util/RuntimeExceptionWrapper.java \ - $(JAVA_SLEEPYCAT)/bdb/util/TimeUnits.java \ - $(JAVA_SLEEPYCAT)/bdb/util/UtfOps.java + $(JAVA_SLEEPYCAT)/bind/ByteArrayBinding.java \ + $(JAVA_SLEEPYCAT)/bind/EntityBinding.java \ + $(JAVA_SLEEPYCAT)/bind/EntryBinding.java \ + $(JAVA_SLEEPYCAT)/bind/RecordNumberBinding.java \ + $(JAVA_SLEEPYCAT)/bind/serial/ClassCatalog.java \ + $(JAVA_SLEEPYCAT)/bind/serial/SerialBinding.java \ + $(JAVA_SLEEPYCAT)/bind/serial/SerialInput.java \ + $(JAVA_SLEEPYCAT)/bind/serial/SerialOutput.java \ + $(JAVA_SLEEPYCAT)/bind/serial/SerialSerialBinding.java \ + $(JAVA_SLEEPYCAT)/bind/serial/SerialSerialKeyCreator.java \ + $(JAVA_SLEEPYCAT)/bind/serial/StoredClassCatalog.java \ + $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialBinding.java \ + $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialKeyCreator.java \ + $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialMarshalledBinding.java \ + $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialMarshalledKeyCreator.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/BooleanBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/ByteBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/CharacterBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/DoubleBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/FloatBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/IntegerBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/LongBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/MarshalledTupleEntry.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/MarshalledTupleKeyEntity.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/ShortBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/StringBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleInput.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleInputBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleMarshalledBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleOutput.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleKeyCreator.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleMarshalledBinding.java \ + $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleMarshalledKeyCreator.java \ + $(JAVA_SLEEPYCAT)/collections/CurrentTransaction.java \ + $(JAVA_SLEEPYCAT)/collections/DataCursor.java \ + $(JAVA_SLEEPYCAT)/collections/DataView.java \ + $(JAVA_SLEEPYCAT)/collections/KeyRange.java \ + $(JAVA_SLEEPYCAT)/collections/KeyRangeException.java \ + $(JAVA_SLEEPYCAT)/collections/MapEntryParameter.java \ + $(JAVA_SLEEPYCAT)/collections/PrimaryKeyAssigner.java \ + $(JAVA_SLEEPYCAT)/collections/RangeCursor.java \ + $(JAVA_SLEEPYCAT)/collections/StoredCollection.java \ + $(JAVA_SLEEPYCAT)/collections/StoredCollections.java \ + $(JAVA_SLEEPYCAT)/collections/StoredContainer.java \ + $(JAVA_SLEEPYCAT)/collections/StoredEntrySet.java \ + $(JAVA_SLEEPYCAT)/collections/StoredIterator.java \ + $(JAVA_SLEEPYCAT)/collections/StoredKeySet.java \ + $(JAVA_SLEEPYCAT)/collections/StoredList.java \ + $(JAVA_SLEEPYCAT)/collections/StoredMap.java \ + $(JAVA_SLEEPYCAT)/collections/StoredMapEntry.java \ + $(JAVA_SLEEPYCAT)/collections/StoredSortedEntrySet.java \ + $(JAVA_SLEEPYCAT)/collections/StoredSortedKeySet.java \ + $(JAVA_SLEEPYCAT)/collections/StoredSortedMap.java \ + $(JAVA_SLEEPYCAT)/collections/StoredSortedValueSet.java \ + $(JAVA_SLEEPYCAT)/collections/StoredValueSet.java \ + $(JAVA_SLEEPYCAT)/collections/TransactionRunner.java \ + $(JAVA_SLEEPYCAT)/collections/TransactionWorker.java \ + $(JAVA_SLEEPYCAT)/collections/TupleSerialFactory.java \ + $(JAVA_SLEEPYCAT)/compat/DbCompat.java \ + $(JAVA_SLEEPYCAT)/db/BtreePrefixCalculator.java \ + $(JAVA_SLEEPYCAT)/db/BtreeStats.java \ + $(JAVA_SLEEPYCAT)/db/CacheFile.java \ + $(JAVA_SLEEPYCAT)/db/CacheFilePriority.java \ + $(JAVA_SLEEPYCAT)/db/CacheFileStats.java \ + $(JAVA_SLEEPYCAT)/db/CacheStats.java \ + $(JAVA_SLEEPYCAT)/db/CheckpointConfig.java \ + $(JAVA_SLEEPYCAT)/db/Cursor.java \ + $(JAVA_SLEEPYCAT)/db/CursorConfig.java \ + $(JAVA_SLEEPYCAT)/db/Database.java \ + $(JAVA_SLEEPYCAT)/db/DatabaseConfig.java \ + $(JAVA_SLEEPYCAT)/db/DatabaseEntry.java \ + $(JAVA_SLEEPYCAT)/db/DatabaseException.java \ + $(JAVA_SLEEPYCAT)/db/DatabaseStats.java \ + $(JAVA_SLEEPYCAT)/db/DatabaseType.java \ + $(JAVA_SLEEPYCAT)/db/DeadlockException.java \ + $(JAVA_SLEEPYCAT)/db/Environment.java \ + $(JAVA_SLEEPYCAT)/db/EnvironmentConfig.java \ + $(JAVA_SLEEPYCAT)/db/ErrorHandler.java \ + $(JAVA_SLEEPYCAT)/db/FeedbackHandler.java \ + $(JAVA_SLEEPYCAT)/db/HashStats.java \ + $(JAVA_SLEEPYCAT)/db/Hasher.java \ + $(JAVA_SLEEPYCAT)/db/JoinConfig.java \ + $(JAVA_SLEEPYCAT)/db/JoinCursor.java \ + $(JAVA_SLEEPYCAT)/db/KeyRange.java \ + $(JAVA_SLEEPYCAT)/db/Lock.java \ + $(JAVA_SLEEPYCAT)/db/LockDetectMode.java \ + $(JAVA_SLEEPYCAT)/db/LockMode.java \ + $(JAVA_SLEEPYCAT)/db/LockNotGrantedException.java \ + $(JAVA_SLEEPYCAT)/db/LockOperation.java \ + $(JAVA_SLEEPYCAT)/db/LockRequest.java \ + $(JAVA_SLEEPYCAT)/db/LockRequestMode.java \ + $(JAVA_SLEEPYCAT)/db/LockStats.java \ + $(JAVA_SLEEPYCAT)/db/LogCursor.java \ + $(JAVA_SLEEPYCAT)/db/LogRecordHandler.java \ + $(JAVA_SLEEPYCAT)/db/LogSequenceNumber.java \ + $(JAVA_SLEEPYCAT)/db/LogStats.java \ + $(JAVA_SLEEPYCAT)/db/MemoryException.java \ + $(JAVA_SLEEPYCAT)/db/MessageHandler.java \ + $(JAVA_SLEEPYCAT)/db/MultipleDataEntry.java \ + $(JAVA_SLEEPYCAT)/db/MultipleEntry.java \ + $(JAVA_SLEEPYCAT)/db/MultipleKeyDataEntry.java \ + $(JAVA_SLEEPYCAT)/db/MultipleRecnoDataEntry.java \ + $(JAVA_SLEEPYCAT)/db/OperationStatus.java \ + $(JAVA_SLEEPYCAT)/db/PanicHandler.java \ + $(JAVA_SLEEPYCAT)/db/PreparedTransaction.java \ + $(JAVA_SLEEPYCAT)/db/QueueStats.java \ + $(JAVA_SLEEPYCAT)/db/RecordNumberAppender.java \ + $(JAVA_SLEEPYCAT)/db/RecoveryOperation.java \ + $(JAVA_SLEEPYCAT)/db/ReplicationHandleDeadException.java \ + $(JAVA_SLEEPYCAT)/db/ReplicationStats.java \ + $(JAVA_SLEEPYCAT)/db/ReplicationStatus.java \ + $(JAVA_SLEEPYCAT)/db/ReplicationTransport.java \ + $(JAVA_SLEEPYCAT)/db/RunRecoveryException.java \ + $(JAVA_SLEEPYCAT)/db/SecondaryConfig.java \ + $(JAVA_SLEEPYCAT)/db/SecondaryCursor.java \ + $(JAVA_SLEEPYCAT)/db/SecondaryDatabase.java \ + $(JAVA_SLEEPYCAT)/db/SecondaryKeyCreator.java \ + $(JAVA_SLEEPYCAT)/db/Sequence.java \ + $(JAVA_SLEEPYCAT)/db/SequenceConfig.java \ + $(JAVA_SLEEPYCAT)/db/SequenceStats.java \ + $(JAVA_SLEEPYCAT)/db/StatsConfig.java \ + $(JAVA_SLEEPYCAT)/db/Transaction.java \ + $(JAVA_SLEEPYCAT)/db/TransactionConfig.java \ + $(JAVA_SLEEPYCAT)/db/TransactionStats.java \ + $(JAVA_SLEEPYCAT)/db/VerifyConfig.java \ + $(JAVA_SLEEPYCAT)/db/internal/Db.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbClient.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbConstants.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbEnv.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbLock.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbLogc.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbMpoolFile.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbSequence.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbTxn.java \ + $(JAVA_SLEEPYCAT)/db/internal/DbUtil.java \ + $(JAVA_SLEEPYCAT)/db/internal/Dbc.java \ + $(JAVA_SLEEPYCAT)/db/internal/db_java.java \ + $(JAVA_SLEEPYCAT)/db/internal/db_javaJNI.java \ + $(JAVA_SLEEPYCAT)/util/ExceptionUnwrapper.java \ + $(JAVA_SLEEPYCAT)/util/ExceptionWrapper.java \ + $(JAVA_SLEEPYCAT)/util/FastInputStream.java \ + $(JAVA_SLEEPYCAT)/util/FastOutputStream.java \ + $(JAVA_SLEEPYCAT)/util/IOExceptionWrapper.java \ + $(JAVA_SLEEPYCAT)/util/RuntimeExceptionWrapper.java \ + $(JAVA_SLEEPYCAT)/util/UtfOps.java JAVA_EXSRCS=\ + $(JAVA_EXDIR)/collections/access/AccessExample.java \ + $(JAVA_EXDIR)/collections/hello/HelloDatabaseWorld.java \ + $(JAVA_EXDIR)/collections/ship/basic/PartData.java \ + $(JAVA_EXDIR)/collections/ship/basic/PartKey.java \ + $(JAVA_EXDIR)/collections/ship/basic/Sample.java \ + $(JAVA_EXDIR)/collections/ship/basic/SampleDatabase.java \ + $(JAVA_EXDIR)/collections/ship/basic/SampleViews.java \ + $(JAVA_EXDIR)/collections/ship/basic/ShipmentData.java \ + $(JAVA_EXDIR)/collections/ship/basic/ShipmentKey.java \ + $(JAVA_EXDIR)/collections/ship/basic/SupplierData.java \ + $(JAVA_EXDIR)/collections/ship/basic/SupplierKey.java \ + $(JAVA_EXDIR)/collections/ship/basic/Weight.java \ + $(JAVA_EXDIR)/collections/ship/entity/Part.java \ + $(JAVA_EXDIR)/collections/ship/entity/PartData.java \ + $(JAVA_EXDIR)/collections/ship/entity/PartKey.java \ + $(JAVA_EXDIR)/collections/ship/entity/Sample.java \ + $(JAVA_EXDIR)/collections/ship/entity/SampleDatabase.java \ + $(JAVA_EXDIR)/collections/ship/entity/SampleViews.java \ + $(JAVA_EXDIR)/collections/ship/entity/Shipment.java \ + $(JAVA_EXDIR)/collections/ship/entity/ShipmentData.java \ + $(JAVA_EXDIR)/collections/ship/entity/ShipmentKey.java \ + $(JAVA_EXDIR)/collections/ship/entity/Supplier.java \ + $(JAVA_EXDIR)/collections/ship/entity/SupplierData.java \ + $(JAVA_EXDIR)/collections/ship/entity/SupplierKey.java \ + $(JAVA_EXDIR)/collections/ship/entity/Weight.java \ + $(JAVA_EXDIR)/collections/ship/factory/Part.java \ + $(JAVA_EXDIR)/collections/ship/factory/PartKey.java \ + $(JAVA_EXDIR)/collections/ship/factory/Sample.java \ + $(JAVA_EXDIR)/collections/ship/factory/SampleDatabase.java \ + $(JAVA_EXDIR)/collections/ship/factory/SampleViews.java \ + $(JAVA_EXDIR)/collections/ship/factory/Shipment.java \ + $(JAVA_EXDIR)/collections/ship/factory/ShipmentKey.java \ + $(JAVA_EXDIR)/collections/ship/factory/Supplier.java \ + $(JAVA_EXDIR)/collections/ship/factory/SupplierKey.java \ + $(JAVA_EXDIR)/collections/ship/factory/Weight.java \ + $(JAVA_EXDIR)/collections/ship/index/PartData.java \ + $(JAVA_EXDIR)/collections/ship/index/PartKey.java \ + $(JAVA_EXDIR)/collections/ship/index/Sample.java \ + $(JAVA_EXDIR)/collections/ship/index/SampleDatabase.java \ + $(JAVA_EXDIR)/collections/ship/index/SampleViews.java \ + $(JAVA_EXDIR)/collections/ship/index/ShipmentData.java \ + $(JAVA_EXDIR)/collections/ship/index/ShipmentKey.java \ + $(JAVA_EXDIR)/collections/ship/index/SupplierData.java \ + $(JAVA_EXDIR)/collections/ship/index/SupplierKey.java \ + $(JAVA_EXDIR)/collections/ship/index/Weight.java \ + $(JAVA_EXDIR)/collections/ship/marshal/MarshalledEntity.java \ + $(JAVA_EXDIR)/collections/ship/marshal/MarshalledKey.java \ + $(JAVA_EXDIR)/collections/ship/marshal/Part.java \ + $(JAVA_EXDIR)/collections/ship/marshal/PartKey.java \ + $(JAVA_EXDIR)/collections/ship/marshal/Sample.java \ + $(JAVA_EXDIR)/collections/ship/marshal/SampleDatabase.java \ + $(JAVA_EXDIR)/collections/ship/marshal/SampleViews.java \ + $(JAVA_EXDIR)/collections/ship/marshal/Shipment.java \ + $(JAVA_EXDIR)/collections/ship/marshal/ShipmentKey.java \ + $(JAVA_EXDIR)/collections/ship/marshal/Supplier.java \ + $(JAVA_EXDIR)/collections/ship/marshal/SupplierKey.java \ + $(JAVA_EXDIR)/collections/ship/marshal/Weight.java \ + $(JAVA_EXDIR)/collections/ship/sentity/Part.java \ + $(JAVA_EXDIR)/collections/ship/sentity/PartKey.java \ + $(JAVA_EXDIR)/collections/ship/sentity/Sample.java \ + $(JAVA_EXDIR)/collections/ship/sentity/SampleDatabase.java \ + $(JAVA_EXDIR)/collections/ship/sentity/SampleViews.java \ + $(JAVA_EXDIR)/collections/ship/sentity/Shipment.java \ + $(JAVA_EXDIR)/collections/ship/sentity/ShipmentKey.java \ + $(JAVA_EXDIR)/collections/ship/sentity/Supplier.java \ + $(JAVA_EXDIR)/collections/ship/sentity/SupplierKey.java \ + $(JAVA_EXDIR)/collections/ship/sentity/Weight.java \ + $(JAVA_EXDIR)/collections/ship/tuple/Part.java \ + $(JAVA_EXDIR)/collections/ship/tuple/PartData.java \ + $(JAVA_EXDIR)/collections/ship/tuple/PartKey.java \ + $(JAVA_EXDIR)/collections/ship/tuple/Sample.java \ + $(JAVA_EXDIR)/collections/ship/tuple/SampleDatabase.java \ + $(JAVA_EXDIR)/collections/ship/tuple/SampleViews.java \ + $(JAVA_EXDIR)/collections/ship/tuple/Shipment.java \ + $(JAVA_EXDIR)/collections/ship/tuple/ShipmentData.java \ + $(JAVA_EXDIR)/collections/ship/tuple/ShipmentKey.java \ + $(JAVA_EXDIR)/collections/ship/tuple/Supplier.java \ + $(JAVA_EXDIR)/collections/ship/tuple/SupplierData.java \ + $(JAVA_EXDIR)/collections/ship/tuple/SupplierKey.java \ + $(JAVA_EXDIR)/collections/ship/tuple/Weight.java \ $(JAVA_EXDIR)/db/AccessExample.java \ $(JAVA_EXDIR)/db/BtRecExample.java \ $(JAVA_EXDIR)/db/BulkAccessExample.java \ $(JAVA_EXDIR)/db/EnvExample.java \ + $(JAVA_EXDIR)/db/GettingStarted/ExampleDatabaseLoad.java \ + $(JAVA_EXDIR)/db/GettingStarted/ExampleDatabaseRead.java \ + $(JAVA_EXDIR)/db/GettingStarted/Inventory.java \ + $(JAVA_EXDIR)/db/GettingStarted/InventoryBinding.java \ + $(JAVA_EXDIR)/db/GettingStarted/ItemNameKeyCreator.java \ + $(JAVA_EXDIR)/db/GettingStarted/MyDbs.java \ + $(JAVA_EXDIR)/db/GettingStarted/Vendor.java \ $(JAVA_EXDIR)/db/LockExample.java \ - $(JAVA_EXDIR)/db/TpcbExample.java \ - $(JAVA_EXDIR)/bdb/access/AccessExample.java \ - $(JAVA_EXDIR)/bdb/helloworld/HelloDatabaseWorld.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/PartKey.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/PartValue.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/Sample.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/SampleDatabase.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/SampleViews.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/ShipmentKey.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/ShipmentValue.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/SupplierKey.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/SupplierValue.java \ - $(JAVA_EXDIR)/bdb/shipment/basic/Weight.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/Part.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/PartKey.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/PartValue.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/Sample.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/SampleDatabase.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/SampleViews.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/Shipment.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/ShipmentKey.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/ShipmentValue.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/Supplier.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/SupplierKey.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/SupplierValue.java \ - $(JAVA_EXDIR)/bdb/shipment/entity/Weight.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/Part.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/PartKey.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/PartValue.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/Sample.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/SampleDatabase.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/SampleViews.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/Shipment.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/ShipmentKey.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/ShipmentValue.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/Supplier.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/SupplierKey.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/SupplierValue.java \ - $(JAVA_EXDIR)/bdb/shipment/tuple/Weight.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/Part.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/PartKey.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/Sample.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/SampleDatabase.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/SampleViews.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/Shipment.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/ShipmentKey.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/Supplier.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/SupplierKey.java \ - $(JAVA_EXDIR)/bdb/shipment/sentity/Weight.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/MarshalledEntity.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/MarshalledKey.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/Part.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/PartKey.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/Sample.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/SampleDatabase.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/SampleViews.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/Shipment.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/ShipmentKey.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/Supplier.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/SupplierKey.java \ - $(JAVA_EXDIR)/bdb/shipment/marshal/Weight.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/Part.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/PartKey.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/Sample.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/SampleDatabase.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/SampleViews.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/Shipment.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/ShipmentKey.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/Supplier.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/SupplierKey.java \ - $(JAVA_EXDIR)/bdb/shipment/factory/Weight.java + $(JAVA_EXDIR)/db/RPCExample.java \ + $(JAVA_EXDIR)/db/SequenceExample.java \ + $(JAVA_EXDIR)/db/TpcbExample.java TCL_OBJS=\ tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \ tcl_internal@o@ tcl_lock@o@ tcl_log@o@ tcl_mp@o@ tcl_rep@o@ \ - tcl_txn@o@ tcl_util@o@ + tcl_seq@o@ tcl_txn@o@ tcl_util@o@ RPC_CLIENT_OBJS=\ client@o@ db_server_clnt@o@ db_server_xdr@o@ gen_client@o@ \ @@ -433,16 +483,19 @@ RPC_CXXSRV_OBJS=\ gen_db_server@o@ RPC_JAVASRV_SRCS=\ - $(JAVA_RPCDIR)/DbDispatcher.java \ - $(JAVA_RPCDIR)/DbServer.java \ + $(JAVA_RPCDIR)/AssociateCallbacks.java \ + $(JAVA_RPCDIR)/Dispatcher.java \ $(JAVA_RPCDIR)/FreeList.java \ + $(JAVA_RPCDIR)/JoinCursorAdapter.java \ $(JAVA_RPCDIR)/LocalIterator.java \ $(JAVA_RPCDIR)/RpcDb.java \ $(JAVA_RPCDIR)/RpcDbEnv.java \ $(JAVA_RPCDIR)/RpcDbTxn.java \ $(JAVA_RPCDIR)/RpcDbc.java \ + $(JAVA_RPCDIR)/Server.java \ $(JAVA_RPCDIR)/Timer.java \ - $(JAVA_RPCDIR)/gen/DbServerStub.java \ + $(JAVA_RPCDIR)/Util.java \ + $(JAVA_RPCDIR)/gen/ServerStubs.java \ $(JAVA_RPCDIR)/gen/__db_associate_msg.java \ $(JAVA_RPCDIR)/gen/__db_associate_reply.java \ $(JAVA_RPCDIR)/gen/__db_bt_maxkey_msg.java \ @@ -583,8 +636,8 @@ RPC_JAVASRV_SRCS=\ UTIL_PROGS=\ @ADDITIONAL_PROGS@ \ - db_archive db_checkpoint db_deadlock \ - db_dump db_load db_printlog db_recover db_stat db_upgrade db_verify + db_archive db_checkpoint db_deadlock db_dump db_load db_printlog \ + db_recover db_stat db_upgrade db_verify ################################################## # List of files installed into the library directory. @@ -617,7 +670,6 @@ LIB_INSTALL_FILE_LIST=\ $(libj_jarfile) ################################################## -# We're building a standard library or a RPM file hierarchy. # Note: "all" must be the first target in the Makefile. ################################################## all: @BUILD_TARGET@ @@ -631,38 +683,36 @@ library_build: @INSTALL_LIBS@ @ADDITIONAL_LANG@ $(UTIL_PROGS) # Static C library named libdb.a. $(libdb): $(DEF_LIB) - $(rm) -f $@ - test ! -f .libs/$(libdb_version) || \ - $(ln) -s .libs/$(libdb_version) $@ - test -f .libs/$(libdb_version) || \ - (test -f $(libdb_version) && $(ln) -s $(libdb_version) $@) # Real static C library. $(libdb_version): $(C_OBJS) $(ar) cr $@ $(C_OBJS) test ! -f $(ranlib) || $(ranlib) $@ + $(rm) -f $(libdb) + $(ln) -s $(libdb_version) $(libdb) # Shared C library. $(libso_target): $(C_OBJS) $(SOLINK) $(SOFLAGS) $(LDFLAGS) -o $@ $(C_OBJS) $(LIBSO_LIBS) + $(rm) -f $(libdb) + $(ln) -s .libs/$(libdb_version) $(libdb) # Static C++ library named libdb_cxx.a. $(libcxx): $(DEF_LIB_CXX) - $(rm) -f $@ - test ! -f .libs/$(libcxx_version) || \ - $(ln) -s .libs/$(libcxx_version) $@ - test -f .libs/$(libcxx_version) || \ - (test -f $(libcxx_version) && $(ln) -s $(libcxx_version) $@) # Real static C++ library. $(libcxx_version): $(CXX_OBJS) $(C_OBJS) $(ar) cr $@ $(CXX_OBJS) $(C_OBJS) test ! -f $(ranlib) || $(ranlib) $@ + $(rm) -f $(libcxx) + $(ln) -s $(libcxx_version) $(libcxx) # Shared C++ library. $(libxso_target): $(CXX_OBJS) $(C_OBJS) $(XSOLINK) $(SOFLAGS) $(LDFLAGS) \ -o $@ $(CXX_OBJS) $(C_OBJS) $(LIBXSO_LIBS) + $(rm) -f $(libcxx) + $(ln) -s .libs/$(libcxx_version) $(libcxx) # Shared Java library. $(libjso_target): $(JAVA_OBJS) $(C_OBJS) @@ -716,11 +766,11 @@ berkeley_db_cxxsvc: $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX) $(POSTLINK) $@ berkeley_db_javasvc: $(rpc_jarfile) - echo > $@ "#!/bin/sh" - echo >> $@ CLASSPATH="$(CLASSPATH):$(rpc_jarfile):$(JAVA_RPCDIR)/oncrpc.jar" - echo >> $@ LD_LIBRARY_PATH=.libs - echo >> $@ export CLASSPATH LD_LIBRARY_PATH - echo >> $@ exec java com.sleepycat.db.rpcserver.DbServer \$$@ + echo "#!/bin/sh" > $@ + echo CLASSPATH="$(CLASSPATH):$(rpc_jarfile):$(JAVA_RPCDIR)/oncrpc.jar" >> $@ + echo LD_LIBRARY_PATH=.libs >> $@ + echo export CLASSPATH LD_LIBRARY_PATH >> $@ + echo exec java com.sleepycat.db.rpcserver.Server \$$@ >> $@ chmod +x $@ db_archive: db_archive@o@ util_sig@o@ $(DEF_LIB) @@ -752,9 +802,9 @@ db_load: db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS) $(POSTLINK) $@ -db_printlog: db_printlog@o@ util_sig@o@ $(DEF_LIB) +db_printlog: db_printlog@o@ $(PRINT_OBJS) util_sig@o@ $(DEF_LIB) $(CCLINK) -o $@ $(LDFLAGS) \ - db_printlog@o@ util_sig@o@ $(DEF_LIB) $(LIBS) + db_printlog@o@ $(PRINT_OBJS) util_sig@o@ $(DEF_LIB) $(LIBS) $(POSTLINK) $@ db_recover: db_recover@o@ util_sig@o@ $(DEF_LIB) @@ -845,7 +895,9 @@ uninstall_utilities: $(rm) -f $$i $$i.exe; \ done) -DOCLIST=api_c api_cxx api_tcl images index.html java ref sleepycat utility +DOCLIST=api_c api_cxx api_tcl collections gsg images index.html java ref \ + sleepycat utility + install_docs: @echo "Installing documentation: $(DESTDIR)$(docdir) ..." @test -d $(DESTDIR)$(docdir) || \ @@ -857,35 +909,17 @@ install_docs: uninstall_docs: @cd $(docdir) && $(rm) -rf $(DESTDIR)$(DOCLIST) -################################################## -# RPM, build and install. -################################################## -rpm_build: - @test -f @db_cv_path_rpm_archive@ || \ - (echo "@db_cv_path_rpm_archive@: archive file not found" && false) - @$(rm) -rf BUILD RPMS SOURCES SPECS SRPMS RPM_INSTALL - @$(mkdir) -p BUILD && $(chmod) $(dmode) BUILD - @$(mkdir) -p RPMS/i386 && $(chmod) $(dmode) RPMS RPMS/i386 - @$(mkdir) -p SOURCES && $(chmod) $(dmode) SOURCES - @$(mkdir) -p SPECS && $(chmod) $(dmode) SPECS - @$(mkdir) -p SRPMS && $(chmod) $(dmode) SRPMS - $(cp) @db_cv_path_rpm_archive@ SOURCES/ - $(cp) db.spec SPECS/db.spec - @RPM_BUILD@ --define="`cat rpm-macro-defines`" -ba SPECS/db.spec - -rpm_install: - -RPM_SRPMS=db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-1.src.rpm - ################################################## # Remaining standard Makefile targets. ################################################## CLEAN_LIST=\ - berkeley_db_svc berkeley_db_cxxsvc berkeley_db_javasvc \ - db_dump185 db_perf dbs bench_001 \ - ex_access ex_apprec ex_btrec ex_dbclient ex_env ex_lock ex_mpool \ - ex_repquote ex_thread ex_tpcb excxx_access excxx_btrec excxx_env \ - excxx_lock excxx_mpool excxx_tpcb rpmrc + bench_001 berkeley_db_cxxsvc berkeley_db_javasvc berkeley_db_svc \ + db_dump185 db_perf dbs ex_access ex_apprec ex_btrec ex_dbclient \ + ex_env ex_lock ex_mpool ex_repquote ex_sequence ex_thread \ + ex_tpcb example_database_load example_database_read excxx_access \ + excxx_btrec excxx_env excxx_example_database_load \ + excxx_example_database_read excxx_lock excxx_mpool \ + excxx_sequence excxx_tpcb mostly-clean clean: $(rm) -rf $(C_OBJS) @@ -895,14 +929,16 @@ mostly-clean clean: $(rm) -rf $(JAVA_CLASSTOP) $(JAVA_EXCLASSTOP) $(rm) -rf $(JAVA_RPCCLASSES) $(rpc_jarfile) $(rm) -rf tags *@o@ *.o *.o.lock *.lo core *.core - $(rm) -rf ALL.OUT.* BUILD PARALLEL_TESTDIR.* RPMS RPM_INSTALL - $(rm) -rf RUN_LOG RUNQUEUE SOURCES SPECS SRPMS TESTDIR TESTDIR.A + $(rm) -rf ALL.OUT.* PARALLEL_TESTDIR.* + $(rm) -rf RUN_LOG RUNQUEUE TESTDIR TESTDIR.A $(rm) -rf TEST.LIST logtrack_seen.db tm .libs $(LIB_INSTALL_FILE_LIST) REALCLEAN_LIST=\ Makefile confdefs.h config.cache config.log config.status \ - configure.lineno db.h db.spec db185_int.h db_185.h db_config.h \ - db_cxx.h db_int.h db_int_def.h include.tcl rpm-macro-defines win_db.h + configure.lineno db.h db185_int.h db_185.h db_config.h \ + db_cxx.h db_int.h db_int_def.h include.tcl \ + db_server.h db_server_clnt.c db_server_svc.c db_server_xdr.c \ + gen_db_server.c win_db.h distclean maintainer-clean realclean: clean $(rm) -rf $(REALCLEAN_LIST) @@ -911,7 +947,7 @@ distclean maintainer-clean realclean: clean check depend dvi info obj TAGS: @echo "$@: make target not supported" && true -dist: +dist rpm rpmbuild: @echo "$@: make target not supported" && false ################################################## @@ -945,7 +981,7 @@ DBS_OBJS=\ dbs_yield@o@ dbs: $(DBS_OBJS) $(DEF_LIB) $(CCLINK) -o $@ \ - $(LDFLAGS) $(DBS_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS) + $(LDFLAGS) $(DBS_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) $(POSTLINK) $@ db_perf@o@: $(srcdir)/test_perf/db_perf.c @@ -999,13 +1035,13 @@ DBPERF_OBJS=\ db_perf: $(DBPERF_OBJS) $(DEF_LIB) $(CCLINK) -o $@ \ - $(LDFLAGS) $(DBPERF_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS) + $(LDFLAGS) $(DBPERF_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) $(POSTLINK) $@ tm@o@: $(srcdir)/mutex/tm.c $(CC) $(CFLAGS) $? tm: tm@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) tm@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS) + $(CCLINK) -o $@ $(LDFLAGS) tm@o@ $(DEF_LIB) $(TEST_LIBS) $(LIBS) $(POSTLINK) $@ ################################################## @@ -1032,7 +1068,7 @@ ex_apprec_rec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_rec.c EX_APPREC_OBJS=ex_apprec@o@ ex_apprec_auto@o@ ex_apprec_rec@o@ ex_apprec: $(EX_APPREC_OBJS) $(DEF_LIB) $(CCLINK) -o $@ \ - $(LDFLAGS) $(EX_APPREC_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS) + $(LDFLAGS) $(EX_APPREC_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) ex_btrec@o@: $(srcdir)/examples_c/ex_btrec.c $(CC) $(CFLAGS) $? @@ -1078,14 +1114,20 @@ EX_RQ_OBJS=\ ex_rq_client@o@ ex_rq_main@o@ ex_rq_master@o@ ex_rq_net@o@ ex_rq_util@o@ ex_repquote: $(EX_RQ_OBJS) $(DEF_LIB) $(CCLINK) -o $@ \ - $(LDFLAGS) $(EX_RQ_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS) + $(LDFLAGS) $(EX_RQ_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) + $(POSTLINK) $@ + +ex_sequence@o@: $(srcdir)/examples_c/ex_sequence.c + $(CC) $(CFLAGS) $? +ex_sequence: ex_sequence@o@ $(DEF_LIB) + $(CCLINK) -o $@ $(LDFLAGS) ex_sequence@o@ $(DEF_LIB) $(LIBS) $(POSTLINK) $@ ex_thread@o@: $(srcdir)/examples_c/ex_thread.c $(CC) $(CFLAGS) $? ex_thread: ex_thread@o@ $(DEF_LIB) $(CCLINK) -o $@ \ - $(LDFLAGS) ex_thread@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS) + $(LDFLAGS) ex_thread@o@ $(DEF_LIB) $(TEST_LIBS) $(LIBS) $(POSTLINK) $@ ex_tpcb@o@: $(srcdir)/examples_c/ex_tpcb.c @@ -1094,6 +1136,26 @@ ex_tpcb: ex_tpcb@o@ $(DEF_LIB) $(CCLINK) -o $@ $(LDFLAGS) ex_tpcb@o@ $(DEF_LIB) $(LIBS) $(POSTLINK) $@ +gettingstarted_common@o@: \ + $(srcdir)/examples_c/getting_started/gettingstarted_common.c + $(CC) -I $(srcdir)/examples_c/getting_started $(CFLAGS) $? +example_database_load@o@: \ + $(srcdir)/examples_c/getting_started/example_database_load.c + $(CC) $(CFLAGS) $? +example_database_read@o@: \ + $(srcdir)/examples_c/getting_started/example_database_read.c + $(CC) $(CFLAGS) $? +example_database_load: example_database_load@o@ gettingstarted_common@o@ \ + $(DEF_LIB) + $(CCLINK) -o $@ $(LDFLAGS) \ + example_database_load@o@ gettingstarted_common@o@ $(DEF_LIB) $(LIBS) + $(POSTLINK) $@ +example_database_read: example_database_read@o@ gettingstarted_common@o@ \ + $(DEF_LIB) + $(CCLINK) -o $@ $(LDFLAGS) \ + example_database_read@o@ gettingstarted_common@o@ $(DEF_LIB) $(LIBS) + $(POSTLINK) $@ + ################################################## # Example programs for C++. ################################################## @@ -1127,12 +1189,37 @@ excxx_mpool: MpoolExample@o@ $(DEF_LIB_CXX) $(CXXLINK) -o $@ $(LDFLAGS) MpoolExample@o@ $(DEF_LIB_CXX) $(LIBS) $(POSTLINK) $@ +SequenceExample@o@: $(srcdir)/examples_cxx/SequenceExample.cpp + $(CXX) $(CXXFLAGS) $? +excxx_sequence: SequenceExample@o@ $(DEF_LIB_CXX) + $(CXXLINK) -o $@ $(LDFLAGS) SequenceExample@o@ $(DEF_LIB_CXX) $(LIBS) + $(POSTLINK) $@ + TpcbExample@o@: $(srcdir)/examples_cxx/TpcbExample.cpp $(CXX) $(CXXFLAGS) $? excxx_tpcb: TpcbExample@o@ $(DEF_LIB_CXX) $(CXXLINK) -o $@ $(LDFLAGS) TpcbExample@o@ $(DEF_LIB_CXX) $(LIBS) $(POSTLINK) $@ +excxx_example_database_load@o@: \ + $(srcdir)/examples_cxx/getting_started/excxx_example_database_load.cpp + $(CXX) -I$(srcdir)/examples_cxx/getting_started $(CXXFLAGS) $? +excxx_example_database_read@o@: \ + $(srcdir)/examples_cxx/getting_started/excxx_example_database_read.cpp + $(CXX) -I$(srcdir)/examples_cxx/getting_started $(CXXFLAGS) $? +MyDb@o@: $(srcdir)/examples_cxx/getting_started/MyDb.cpp + $(CXX) -I$(srcdir)/examples_cxx/getting_started $(CXXFLAGS) $? +excxx_example_database_load: \ + excxx_example_database_load@o@ MyDb@o@ $(DEF_LIB_CXX) + $(CXXLINK) -o $@ $(LDFLAGS) \ + excxx_example_database_load@o@ MyDb@o@ $(DEF_LIB_CXX) $(LIBS) + $(POSTLINK) $@ +excxx_example_database_read: \ + excxx_example_database_read@o@ MyDb@o@ $(DEF_LIB_CXX) + $(CXXLINK) -o $@ $(LDFLAGS) \ + excxx_example_database_read@o@ MyDb@o@ $(DEF_LIB_CXX) $(LIBS) + $(POSTLINK) $@ + ################################################## # C API build rules. ################################################## @@ -1174,8 +1261,12 @@ bt_verify@o@: $(srcdir)/btree/bt_verify.c $(CC) $(CFLAGS) $? btree_auto@o@: $(srcdir)/btree/btree_auto.c $(CC) $(CFLAGS) $? +btree_autop@o@: $(srcdir)/btree/btree_autop.c + $(CC) $(CFLAGS) $? crdel_auto@o@: $(srcdir)/db/crdel_auto.c $(CC) $(CFLAGS) $? +crdel_autop@o@: $(srcdir)/db/crdel_autop.c + $(CC) $(CFLAGS) $? crdel_rec@o@: $(srcdir)/db/crdel_rec.c $(CC) $(CFLAGS) $? crypto@o@: $(srcdir)/crypto/crypto.c @@ -1190,6 +1281,8 @@ db_am@o@: $(srcdir)/db/db_am.c $(CC) $(CFLAGS) $? db_auto@o@: $(srcdir)/db/db_auto.c $(CC) $(CFLAGS) $? +db_autop@o@: $(srcdir)/db/db_autop.c + $(CC) $(CFLAGS) $? db_byteorder@o@: $(srcdir)/common/db_byteorder.c $(CC) $(CFLAGS) $? db_cam@o@: $(srcdir)/db/db_cam.c @@ -1234,10 +1327,16 @@ db_remove@o@: $(srcdir)/db/db_remove.c $(CC) $(CFLAGS) $? db_ret@o@: $(srcdir)/db/db_ret.c $(CC) $(CFLAGS) $? +db_setid@o@: $(srcdir)/db/db_setid.c + $(CC) $(CFLAGS) $? +db_setlsn@o@: $(srcdir)/db/db_setlsn.c + $(CC) $(CFLAGS) $? db_salloc@o@: $(srcdir)/env/db_salloc.c $(CC) $(CFLAGS) $? db_shash@o@: $(srcdir)/env/db_shash.c $(CC) $(CFLAGS) $? +db_stati@o@: $(srcdir)/db/db_stati.c + $(CC) $(CFLAGS) $? db_truncate@o@: $(srcdir)/db/db_truncate.c $(CC) $(CFLAGS) $? db_upg@o@: $(srcdir)/db/db_upg.c @@ -1256,8 +1355,12 @@ dbreg@o@: $(srcdir)/dbreg/dbreg.c $(CC) $(CFLAGS) $? dbreg_auto@o@: $(srcdir)/dbreg/dbreg_auto.c $(CC) $(CFLAGS) $? +dbreg_autop@o@: $(srcdir)/dbreg/dbreg_autop.c + $(CC) $(CFLAGS) $? dbreg_rec@o@: $(srcdir)/dbreg/dbreg_rec.c $(CC) $(CFLAGS) $? +dbreg_stat@o@: $(srcdir)/dbreg/dbreg_stat.c + $(CC) $(CFLAGS) $? dbreg_util@o@: $(srcdir)/dbreg/dbreg_util.c $(CC) $(CFLAGS) $? env_file@o@: $(srcdir)/env/env_file.c @@ -1270,8 +1373,12 @@ env_recover@o@: $(srcdir)/env/env_recover.c $(CC) $(CFLAGS) $? env_region@o@: $(srcdir)/env/env_region.c $(CC) $(CFLAGS) $? +env_stat@o@: $(srcdir)/env/env_stat.c + $(CC) $(CFLAGS) $? fileops_auto@o@: $(srcdir)/fileops/fileops_auto.c $(CC) $(CFLAGS) $? +fileops_autop@o@: $(srcdir)/fileops/fileops_autop.c + $(CC) $(CFLAGS) $? fop_basic@o@: $(srcdir)/fileops/fop_basic.c $(CC) $(CFLAGS) $? fop_rec@o@: $(srcdir)/fileops/fop_rec.c @@ -1282,6 +1389,8 @@ hash@o@: $(srcdir)/hash/hash.c $(CC) $(CFLAGS) $? hash_auto@o@: $(srcdir)/hash/hash_auto.c $(CC) $(CFLAGS) $? +hash_autop@o@: $(srcdir)/hash/hash_autop.c + $(CC) $(CFLAGS) $? hash_conv@o@: $(srcdir)/hash/hash_conv.c $(CC) $(CFLAGS) $? hash_dup@o@: $(srcdir)/hash/hash_dup.c @@ -1316,12 +1425,18 @@ lock@o@: $(srcdir)/lock/lock.c $(CC) $(CFLAGS) $? lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c $(CC) $(CFLAGS) $? +lock_id@o@:$(srcdir)/lock/lock_id.c + $(CC) $(CFLAGS) $? +lock_list@o@:$(srcdir)/lock/lock_list.c + $(CC) $(CFLAGS) $? lock_method@o@:$(srcdir)/lock/lock_method.c $(CC) $(CFLAGS) $? lock_region@o@:$(srcdir)/lock/lock_region.c $(CC) $(CFLAGS) $? lock_stat@o@:$(srcdir)/lock/lock_stat.c $(CC) $(CFLAGS) $? +lock_timer@o@:$(srcdir)/lock/lock_timer.c + $(CC) $(CFLAGS) $? lock_util@o@:$(srcdir)/lock/lock_util.c $(CC) $(CFLAGS) $? log@o@: $(srcdir)/log/log.c @@ -1336,12 +1451,16 @@ log_method@o@: $(srcdir)/log/log_method.c $(CC) $(CFLAGS) $? log_put@o@: $(srcdir)/log/log_put.c $(CC) $(CFLAGS) $? +log_stat@o@: $(srcdir)/log/log_stat.c + $(CC) $(CFLAGS) $? mp_alloc@o@: $(srcdir)/mp/mp_alloc.c $(CC) $(CFLAGS) $? mp_bh@o@: $(srcdir)/mp/mp_bh.c $(CC) $(CFLAGS) $? mp_fget@o@: $(srcdir)/mp/mp_fget.c $(CC) $(CFLAGS) $? +mp_fmethod@o@: $(srcdir)/mp/mp_fmethod.c + $(CC) $(CFLAGS) $? mp_fopen@o@: $(srcdir)/mp/mp_fopen.c $(CC) $(CFLAGS) $? mp_fput@o@: $(srcdir)/mp/mp_fput.c @@ -1420,12 +1539,16 @@ os_stat@o@: $(srcdir)/@OSDIR@/os_stat.c $(CC) $(CFLAGS) $? os_tmpdir@o@: $(srcdir)/os/os_tmpdir.c $(CC) $(CFLAGS) $? +os_truncate@o@: $(srcdir)/@OSDIR@/os_truncate.c + $(CC) $(CFLAGS) $? os_unlink@o@: $(srcdir)/os/os_unlink.c $(CC) $(CFLAGS) $? qam@o@: $(srcdir)/qam/qam.c $(CC) $(CFLAGS) $? qam_auto@o@: $(srcdir)/qam/qam_auto.c $(CC) $(CFLAGS) $? +qam_autop@o@: $(srcdir)/qam/qam_autop.c + $(CC) $(CFLAGS) $? qam_conv@o@: $(srcdir)/qam/qam_conv.c $(CC) $(CFLAGS) $? qam_files@o@: $(srcdir)/qam/qam_files.c @@ -1444,6 +1567,12 @@ qam_upgrade@o@: $(srcdir)/qam/qam_upgrade.c $(CC) $(CFLAGS) $? qam_verify@o@: $(srcdir)/qam/qam_verify.c $(CC) $(CFLAGS) $? +rep_auto@o@: $(srcdir)/rep/rep_auto.c + $(CC) $(CFLAGS) $? +rep_autop@o@: $(srcdir)/rep/rep_autop.c + $(CC) $(CFLAGS) $? +rep_backup@o@: $(srcdir)/rep/rep_backup.c + $(CC) $(CFLAGS) $? rep_method@o@: $(srcdir)/rep/rep_method.c $(CC) $(CFLAGS) $? rep_record@o@: $(srcdir)/rep/rep_record.c @@ -1452,18 +1581,28 @@ rep_region@o@: $(srcdir)/rep/rep_region.c $(CC) $(CFLAGS) $? rep_stub@o@: $(srcdir)/rep/rep_stub.c $(CC) $(CFLAGS) $? +rep_stat@o@: $(srcdir)/rep/rep_stat.c + $(CC) $(CFLAGS) $? rep_util@o@: $(srcdir)/rep/rep_util.c $(CC) $(CFLAGS) $? rijndael-alg-fst@o@: $(srcdir)/crypto/rijndael/rijndael-alg-fst.c $(CC) $(CFLAGS) $? rijndael-api-fst@o@: $(srcdir)/crypto/rijndael/rijndael-api-fst.c $(CC) $(CFLAGS) $? +seq_stat@o@: $(srcdir)/sequence/seq_stat.c + $(CC) $(CFLAGS) $? +sequence@o@: $(srcdir)/sequence/sequence.c + $(CC) $(CFLAGS) $? sha1@o@: $(srcdir)/hmac/sha1.c $(CC) $(CFLAGS) $? +stat_stub@o@: $(srcdir)/common/stat_stub.c + $(CC) $(CFLAGS) $? txn@o@: $(srcdir)/txn/txn.c $(CC) $(CFLAGS) $? txn_auto@o@: $(srcdir)/txn/txn_auto.c $(CC) $(CFLAGS) $? +txn_autop@o@: $(srcdir)/txn/txn_autop.c + $(CC) $(CFLAGS) $? txn_method@o@: $(srcdir)/txn/txn_method.c $(CC) $(CFLAGS) $? txn_rec@o@: $(srcdir)/txn/txn_rec.c @@ -1510,10 +1649,12 @@ cxx_logc@o@: $(srcdir)/cxx/cxx_logc.cpp $(CXX) $(CXXFLAGS) $? cxx_mpool@o@: $(srcdir)/cxx/cxx_mpool.cpp $(CXX) $(CXXFLAGS) $? -cxx_txn@o@: $(srcdir)/cxx/cxx_txn.cpp - $(CXX) $(CXXFLAGS) $? cxx_multi@o@: $(srcdir)/cxx/cxx_multi.cpp $(CXX) $(CXXFLAGS) $? +cxx_seq@o@: $(srcdir)/cxx/cxx_seq.cpp + $(CXX) $(CXXFLAGS) $? +cxx_txn@o@: $(srcdir)/cxx/cxx_txn.cpp + $(CXX) $(CXXFLAGS) $? ################################################## # Java API build rules. @@ -1544,6 +1685,8 @@ tcl_mp@o@: $(srcdir)/tcl/tcl_mp.c $(CC) $(CFLAGS) $(TCFLAGS) $? tcl_rep@o@: $(srcdir)/tcl/tcl_rep.c $(CC) $(CFLAGS) $(TCFLAGS) $? +tcl_seq@o@: $(srcdir)/tcl/tcl_seq.c + $(CC) $(CFLAGS) $(TCFLAGS) $? tcl_txn@o@: $(srcdir)/tcl/tcl_txn.c $(CC) $(CFLAGS) $(TCFLAGS) $? tcl_util@o@: $(srcdir)/tcl/tcl_util.c @@ -1555,7 +1698,7 @@ tcl_util@o@: $(srcdir)/tcl/tcl_util.c # RPC client files client@o@: $(srcdir)/rpc_client/client.c $(CC) $(CFLAGS) $? -db_server_clnt@o@: $(srcdir)/rpc_client/db_server_clnt.c +db_server_clnt@o@: db_server_clnt.c $(CC) $(CFLAGS) $? gen_client@o@: $(srcdir)/rpc_client/gen_client.c $(CC) $(CFLAGS) $? @@ -1565,13 +1708,13 @@ gen_client_ret@o@: $(srcdir)/rpc_client/gen_client_ret.c # RPC server files db_server_proc@o@: $(srcdir)/rpc_server/c/db_server_proc.c $(CC) $(CFLAGS) $? -db_server_svc@o@: $(srcdir)/rpc_server/c/db_server_svc.c +db_server_svc@o@: db_server_svc.c $(CC) $(CFLAGS) $? db_server_util@o@: $(srcdir)/rpc_server/c/db_server_util.c $(CC) $(CFLAGS) $? -db_server_xdr@o@: $(srcdir)/rpc_server/c/db_server_xdr.c +db_server_xdr@o@: db_server_xdr.c $(CC) $(CFLAGS) $? -gen_db_server@o@: $(srcdir)/rpc_server/c/gen_db_server.c +gen_db_server@o@: gen_db_server.c $(CC) $(CFLAGS) $? db_server_cxxproc@o@: $(srcdir)/rpc_server/cxx/db_server_cxxproc.cpp $(CXX) $(CXXFLAGS) $? @@ -1627,5 +1770,3 @@ snprintf@o@: $(srcdir)/clib/snprintf.c $(CC) $(CFLAGS) $? strerror@o@: $(srcdir)/clib/strerror.c $(CC) $(CFLAGS) $? -vsnprintf@o@: $(srcdir)/clib/vsnprintf.c - $(CC) $(CFLAGS) $? diff --git a/db/dist/RELEASE b/db/dist/RELEASE index aec0cdc8a..4de7d27cd 100644 --- a/db/dist/RELEASE +++ b/db/dist/RELEASE @@ -1,8 +1,8 @@ -# $Id: RELEASE,v 11.173 2003/12/03 21:15:38 bostic Exp $ +# $Id: RELEASE,v 11.188 2004/10/14 15:32:29 bostic Exp $ DB_VERSION_MAJOR=4 -DB_VERSION_MINOR=2 -DB_VERSION_PATCH=52 +DB_VERSION_MINOR=3 +DB_VERSION_PATCH=14 DB_VERSION="$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH" DB_VERSION_UNIQUE_NAME=`printf "_%d%03d" $DB_VERSION_MAJOR $DB_VERSION_MINOR` diff --git a/db/dist/aclocal/libtool.ac b/db/dist/aclocal/libtool.ac index 59d0b0938..71dae456a 100644 --- a/db/dist/aclocal/libtool.ac +++ b/db/dist/aclocal/libtool.ac @@ -1,5 +1,5 @@ # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- -## Copyright 1996, 1997, 1998, 1999, 2000, 2001 +## Copyright 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 ## Free Software Foundation, Inc. ## Originally by Gordon Matzigkeit , 1996 ## @@ -200,6 +200,8 @@ if test -n "$RANLIB"; then old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi +cc_basename=`$echo X"$compiler" | $Xsed -e 's%^.*/%%'` + # Only perform the check for file, if the check method requires it case $deplibs_check_method in file_magic*) @@ -317,7 +319,7 @@ fi # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. -if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test -z "$ECHO"; then if test "X${echo_test_string+set}" != Xset; then @@ -500,7 +502,7 @@ x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; - ppc64-*linux*) + ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) @@ -564,7 +566,8 @@ need_locks="$enable_libtool_lock" # ---------------------------------------------------------------- # Check whether the given compiler option works AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], -[AC_CACHE_CHECK([$1], [$2], +[AC_REQUIRE([LT_AC_PROG_SED]) +AC_CACHE_CHECK([$1], [$2], [$2=no ifelse([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) printf "$lt_simple_compile_test_code" > conftest.$ac_ext @@ -640,7 +643,7 @@ AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], AC_MSG_CHECKING([the maximum length of command line arguments]) AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl i=0 - testring="ABCD" + teststring="ABCD" case $build_os in msdosdjgpp*) @@ -669,20 +672,40 @@ AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl lt_cv_sys_max_cmd_len=8192; ;; + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* ) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for *BSD + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + ;; + *) # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. - while (test "X"`$CONFIG_SHELL [$]0 --fallback-echo "X$testring" 2>/dev/null` \ - = "XX$testring") >/dev/null 2>&1 && - new_result=`expr "X$testring" : ".*" 2>&1` && + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL [$]0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && lt_cv_sys_max_cmd_len=$new_result && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` - testring=$testring$testring + teststring=$teststring$teststring done - testring= + teststring= # Add a significant safety factor because C++ compilers can tack on massive # amounts of additional arguments before passing them to the linker. # It appears as though 1/2 is a usable value. @@ -832,7 +855,7 @@ else lt_cv_dlopen_self=yes ]) ;; - + *) AC_CHECK_FUNC([shl_load], [lt_cv_dlopen="shl_load"], @@ -921,13 +944,6 @@ AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], mkdir out printf "$lt_simple_compile_test_code" > conftest.$ac_ext - # According to Tom Tromey, Ian Lance Taylor reported there are C compilers - # that will create temporary files in the current directory regardless of - # the output directory. Thus, making CWD read-only will cause this test - # to fail, enabling locking or at least warning the user not to do parallel - # builds. - chmod -w . - lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. @@ -951,8 +967,11 @@ AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], fi fi chmod u+w . - $rm conftest* out/* - rmdir out + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out cd .. rmdir conftest $rm conftest* @@ -1011,8 +1030,8 @@ AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_MSG_CHECKING([how to hardcode library paths into programs]) _LT_AC_TAGVAR(hardcode_action, $1)= if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \ - test -n "$_LT_AC_TAGVAR(runpath_var $1)" || \ - test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)"="Xyes" ; then + test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \ + test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then # We can hardcode non-existant directories. if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no && @@ -1069,7 +1088,7 @@ fi *) AC_MSG_RESULT([no]) ;; - esac + esac fi ])# AC_LIBTOOL_SYS_LIB_STRIP @@ -1082,7 +1101,7 @@ AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER], library_names_spec= libname_spec='lib$name' soname_spec= -shrext=".so" +shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= @@ -1170,7 +1189,7 @@ aix4* | aix5*) amigaos*) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done' + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; beos*) @@ -1179,7 +1198,7 @@ beos*) shlibpath_var=LIBRARY_PATH ;; -bsdi4*) +bsdi[[45]]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' @@ -1195,7 +1214,7 @@ bsdi4*) cygwin* | mingw* | pw32*) version_type=windows - shrext=".dll" + shrext_cmds=".dll" need_version=no need_lib_prefix=no @@ -1217,7 +1236,7 @@ cygwin* | mingw* | pw32*) cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/lib /lib/w32api /usr/lib /usr/local/lib" + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw*) # MinGW DLLs use traditional 'lib' prefix @@ -1256,17 +1275,16 @@ darwin* | rhapsody*) version_type=darwin need_lib_prefix=no need_version=no - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH - shrext='$(test .$module = .yes && echo .so || echo .dylib)' + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' fi sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; @@ -1284,6 +1302,18 @@ freebsd1*) dynamic_linker=no ;; +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + freebsd*) objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` version_type=freebsd-$objformat @@ -1332,7 +1362,7 @@ hpux9* | hpux10* | hpux11*) need_version=no case "$host_cpu" in ia64*) - shrext='.so' + shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH @@ -1347,7 +1377,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) - shrext='.sl' + shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH @@ -1358,7 +1388,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) - shrext='.sl' + shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH @@ -1427,6 +1457,12 @@ linux*) # before this can be enabled. hardcode_into_libs=yes + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`$SED -e 's/[:,\t]/ /g;s/=[^=]*$//;s/=[^= ]* / /g' /etc/ld.so.conf | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, @@ -1436,6 +1472,18 @@ linux*) dynamic_linker='GNU/Linux ld.so' ;; +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + netbsd*) version_type=sunos need_lib_prefix=no @@ -1445,7 +1493,7 @@ netbsd*) finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi @@ -1461,7 +1509,7 @@ newsos6) shlibpath_overrides_runpath=yes ;; -nto-qnx | nto-qnx6*) +nto-qnx*) version_type=linux need_lib_prefix=no need_version=no @@ -1494,7 +1542,7 @@ openbsd*) os2*) libname_spec='$name' - shrext=".dll" + shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' @@ -1930,7 +1978,7 @@ fi # AC_PROG_LD # ---------- -# find the path to the GNU or non-GNU linker +# find the pathname to the GNU or non-GNU linker AC_DEFUN([AC_PROG_LD], [AC_ARG_WITH([gnu-ld], [AC_HELP_STRING([--with-gnu-ld], @@ -1956,7 +2004,7 @@ if test "$GCC" = yes; then # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' - # Canonicalize the path of ld + # Canonicalize the pathname of ld ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` @@ -2019,7 +2067,7 @@ AC_DEFUN([AC_PROG_LD_GNU], [AC_REQUIRE([AC_PROG_EGREP])dnl AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld, [# I'd rather use --version here, but apparently some GNU ld's only accept -v. -case `"$LD" -v 2>&1 &1 /dev/null; then case $host_cpu in i*86 ) @@ -2144,36 +2197,27 @@ hpux10.20* | hpux11*) ;; irix5* | irix6* | nonstopux*) - case $host_os in - irix5* | nonstopux*) - # this will be overridden with pass_all, but let us keep it just in case - lt_cv_deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1" - ;; - *) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - # this will be overridden with pass_all, but let us keep it just in case - lt_cv_deplibs_check_method="file_magic ELF ${libmagic} MSB mips-[[1234]] dynamic lib MIPS - version 1" - ;; + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; esac - lt_cv_file_magic_test_file=`echo /lib${libsuff}/libc.so*` lt_cv_deplibs_check_method=pass_all ;; # This must be Linux ELF. linux*) case $host_cpu in - alpha* | hppa* | i*86 | ia64* | m68* | mips | mipsel | powerpc* | sparc* | s390* | sh*) + alpha*|hppa*|i*86|ia64*|m68*|mips*|powerpc*|sparc*|s390*|sh*) lt_cv_deplibs_check_method=pass_all ;; *) # glibc up to 2.1.1 does not perform some relocations on ARM + # this will be overridden with pass_all, but let us keep it just in case lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;; esac lt_cv_file_magic_test_file=`echo /lib/libc.so* /lib/libc-*.so` + lt_cv_deplibs_check_method=pass_all ;; netbsd*) @@ -2190,24 +2234,19 @@ newos6*) lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; -nto-qnx | nto-qnx6*) +nto-qnx*) lt_cv_deplibs_check_method=unknown ;; openbsd*) - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB shared object' + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else - lt_cv_deplibs_check_method='file_magic OpenBSD.* shared library' + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) - # this will be overridden with pass_all, but let us keep it just in case - lt_cv_deplibs_check_method='file_magic COFF format alpha shared library' - lt_cv_file_magic_test_file=/shlib/libc.so lt_cv_deplibs_check_method=pass_all ;; @@ -2217,7 +2256,6 @@ sco3.2v5*) solaris*) lt_cv_deplibs_check_method=pass_all - lt_cv_file_magic_test_file=/lib/libc.so ;; sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) @@ -2257,7 +2295,7 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown # AC_PROG_NM # ---------- -# find the path to a BSD-compatible name lister +# find the pathname to a BSD-compatible name lister AC_DEFUN([AC_PROG_NM], [AC_CACHE_CHECK([for BSD-compatible nm], lt_cv_path_NM, [if test -n "$NM"; then @@ -2395,7 +2433,7 @@ AC_DEFUN([AC_LIBTOOL_CXX], AC_DEFUN([_LT_AC_LANG_CXX], [AC_REQUIRE([AC_PROG_CXX]) AC_REQUIRE([AC_PROG_CXXCPP]) -_LT_AC_SHELL_INIT([tagnames=`echo "$tagnames,CXX" | sed 's/^,//'`]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}CXX]) ])# _LT_AC_LANG_CXX @@ -2411,7 +2449,7 @@ AC_DEFUN([AC_LIBTOOL_F77], # --------------- AC_DEFUN([_LT_AC_LANG_F77], [AC_REQUIRE([AC_PROG_F77]) -_LT_AC_SHELL_INIT([tagnames=`echo "$tagnames,F77" | sed 's/^,//'`]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}F77]) ])# _LT_AC_LANG_F77 @@ -2432,7 +2470,7 @@ AC_DEFUN([_LT_AC_LANG_GCJ], [ifdef([AC_PROG_GCJ],[AC_REQUIRE([AC_PROG_GCJ])], [ifdef([A][M_PROG_GCJ],[AC_REQUIRE([A][M_PROG_GCJ])], [AC_REQUIRE([A][C_PROG_GCJ_OR_A][M_PROG_GCJ])])])])])]) -_LT_AC_SHELL_INIT([tagnames=`echo "$tagnames,GCJ" | sed 's/^,//'`]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}GCJ]) ])# _LT_AC_LANG_GCJ @@ -2441,7 +2479,7 @@ _LT_AC_SHELL_INIT([tagnames=`echo "$tagnames,GCJ" | sed 's/^,//'`]) # enable support for Windows resource files AC_DEFUN([AC_LIBTOOL_RC], [AC_REQUIRE([LT_AC_PROG_RC]) -_LT_AC_SHELL_INIT([tagnames=`echo "$tagnames,RC" | sed 's/^,//'`]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}RC]) ])# AC_LIBTOOL_RC @@ -2483,7 +2521,7 @@ if test "$GCC" = no; then fi if test -n "$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)"; then AC_MSG_WARN([`$CC' requires `$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)' to build shared libraries]) - if echo "$old_CC $old_CFLAGS " | grep "[[ ]]$]_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)[[[ ]]" >/dev/null; then : + if echo "$old_CC $old_CFLAGS " | grep "[[ ]]$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)[[ ]]" >/dev/null; then : else AC_MSG_WARN([add `$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)' to the CC or CFLAGS env variable and reconfigure]) _LT_AC_TAGVAR(lt_cv_prog_cc_can_build_shared, $1)=no @@ -2510,9 +2548,9 @@ AC_LIBTOOL_PROG_COMPILER_PIC($1) AC_LIBTOOL_PROG_CC_C_O($1) AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) AC_LIBTOOL_SYS_LIB_STRIP -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) AC_LIBTOOL_DLOPEN_SELF($1) # Report which librarie types wil actually be built @@ -2533,39 +2571,11 @@ aix3*) fi ;; -aix4*) +aix4* | aix5*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi - ;; - darwin* | rhapsody*) - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - case "$host_os" in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && _LT_AC_TAGVAR(allow_undefined_flag, $1)='-flat_namespace -undefined suppress' - ;; - esac - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. Also zsh mangles - # `"' quotes if we put them in here... so don't! - output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags -install_name $rpath/$soname $verstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_automatic, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-all_load $convenience' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - fi - ;; + ;; esac AC_MSG_RESULT([$enable_shared]) @@ -2700,7 +2710,7 @@ if test "$GXX" = yes; then # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists @@ -2842,6 +2852,7 @@ case $host_os in esac ;; + cygwin* | mingw* | pw32*) # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. @@ -2865,44 +2876,68 @@ case $host_os in _LT_AC_TAGVAR(ld_shlibs, $1)=no fi ;; + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_automatic, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - darwin* | rhapsody*) - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - case "$host_os" in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && _LT_AC_TAGVAR(allow_undefined_flag, $1)='-flat_namespace -undefined suppress' - ;; - esac - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | grep 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes fi - _LT_AC_TAGVAR(module_cmds, $1)='$CC -bundle ${wl}-bind_at_load $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' else - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case "$cc_basename" in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac fi - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_automatic, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-all_load $convenience' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - fi - ;; + ;; dgux*) case $cc_basename in @@ -2928,7 +2963,7 @@ case $host_os in freebsd-elf*) _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no ;; - freebsd*) + freebsd* | kfreebsd*-gnu) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions _LT_AC_TAGVAR(ld_shlibs, $1)=yes @@ -2959,7 +2994,7 @@ case $host_os in # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | egrep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[-]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' ;; *) if test "$GXX" = yes; then @@ -3108,9 +3143,20 @@ case $host_os in icpc) # Intel C++ with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' @@ -3167,6 +3213,20 @@ case $host_os in # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; + openbsd2*) + # C++ shared libraries are fairly broken + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + openbsd*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; osf3*) case $cc_basename in KCC) @@ -3442,9 +3502,9 @@ AC_LIBTOOL_PROG_COMPILER_PIC($1) AC_LIBTOOL_PROG_CC_C_O($1) AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) AC_LIBTOOL_SYS_LIB_STRIP -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) AC_LIBTOOL_DLOPEN_SELF($1) AC_LIBTOOL_CONFIG($1) @@ -3666,7 +3726,7 @@ aix3*) postinstall_cmds='$RANLIB $lib' fi ;; -aix4*) +aix4* | aix5*) test "$enable_shared" = yes && enable_static=no ;; esac @@ -3686,9 +3746,10 @@ AC_LIBTOOL_PROG_COMPILER_PIC($1) AC_LIBTOOL_PROG_CC_C_O($1) AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) AC_LIBTOOL_SYS_LIB_STRIP -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) + AC_LIBTOOL_CONFIG($1) @@ -3740,9 +3801,9 @@ AC_LIBTOOL_PROG_COMPILER_PIC($1) AC_LIBTOOL_PROG_CC_C_O($1) AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) AC_LIBTOOL_SYS_LIB_STRIP -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) AC_LIBTOOL_DLOPEN_SELF($1) AC_LIBTOOL_CONFIG($1) @@ -3807,11 +3868,12 @@ if test -f "$ltmain"; then # without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST - fi + fi # Now quote all the things that may contain metacharacters while being # careful not to overquote the AC_SUBSTed values. We take copies of the # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM SED SHELL \ + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ libname_spec library_names_spec soname_spec extract_expsyms_cmds \ old_striplib striplib file_magic_cmd finish_cmds finish_eval \ deplibs_check_method reload_flag reload_cmds need_locks \ @@ -3861,7 +3923,7 @@ if test -f "$ltmain"; then _LT_AC_TAGVAR(archive_cmds, $1) | \ _LT_AC_TAGVAR(archive_expsym_cmds, $1) | \ _LT_AC_TAGVAR(module_cmds, $1) | \ - _LT_AC_TAGVAR(module_expsym_cmds, $1) | \ + _LT_AC_TAGVAR(module_expsym_cmds, $1) | \ _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) | \ _LT_AC_TAGVAR(export_symbols_cmds, $1) | \ extract_expsyms_cmds | reload_cmds | finish_cmds | \ @@ -3931,7 +3993,7 @@ Xsed="$SED -e s/^X//" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. -if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH # The names of the tagged configurations supported by this script. available_tags= @@ -3992,7 +4054,7 @@ LN_S=$lt_LN_S NM=$lt_NM # A symbol stripping program -STRIP=$STRIP +STRIP=$lt_STRIP # Used to examine libraries when file_magic_cmd begins "file" MAGIC_CMD=$MAGIC_CMD @@ -4023,7 +4085,7 @@ objext="$ac_objext" libext="$libext" # Shared library suffix (normally ".so"). -shrext='$shrext' +shrext_cmds='$shrext_cmds' # Executable file suffix (normally ""). exeext="$exeext" @@ -4267,7 +4329,10 @@ else # If there is no Makefile yet, we rely on a make rule to execute # `config.status --recheck' to rerun these tests and create the # libtool script then. - test -f Makefile && make "$ltmain" + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi fi ])# AC_LIBTOOL_CONFIG @@ -4340,7 +4405,7 @@ osf*) symcode='[[BCDEGQRST]]' ;; solaris* | sysv5*) - symcode='[[BDT]]' + symcode='[[BDRT]]' ;; sysv4) symcode='[[DFNSTU]]' @@ -4358,7 +4423,7 @@ esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) - symcode='[[ABCDGISTW]]' ;; + symcode='[[ABCDGIRSTW]]' ;; esac # Try without a prefix undercore, then with it. @@ -4560,6 +4625,16 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) ;; esac ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case "$cc_basename" in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; dgux*) case $cc_basename in ec++) @@ -4573,7 +4648,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) ;; esac ;; - freebsd*) + freebsd* | kfreebsd*-gnu) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) @@ -4624,7 +4699,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; + ;; cxx) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha @@ -4813,6 +4888,16 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case "$cc_basename" in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; mingw* | pw32* | os2*) # This hack is so that the source file can tell whether it is being @@ -4849,12 +4934,12 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) linux*) case $CC in - icc|ecc) + icc* | ecc*) _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; - ccc) + ccc*) _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All Alpha code is PIC. _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' @@ -4984,7 +5069,7 @@ ifelse([$1],[CXX],[ _LT_AC_TAGVAR(link_all_deplibs, $1)=unknown _LT_AC_TAGVAR(hardcode_automatic, $1)=no _LT_AC_TAGVAR(module_cmds, $1)= - _LT_AC_TAGVAR(module_expsym_cmds, $1)= + _LT_AC_TAGVAR(module_expsym_cmds, $1)= _LT_AC_TAGVAR(always_export_symbols, $1)=no _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' # include_expsyms should be a list of space-separated symbols to be *always* @@ -5127,6 +5212,31 @@ EOF _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no ;; + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_cmds, $1)="$tmp_archive_cmds" + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="$tmp_archive_cmds" + fi + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' @@ -5289,7 +5399,7 @@ EOF _LT_AC_TAGVAR(ld_shlibs, $1)=no ;; - bsdi4*) + bsdi[[45]]*) _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ;; @@ -5303,7 +5413,7 @@ EOF # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. - shrext=".dll" + shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_AC_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. @@ -5315,44 +5425,53 @@ EOF ;; darwin* | rhapsody*) - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no case "$host_os" in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && _LT_AC_TAGVAR(allow_undefined_flag, $1)='-flat_namespace -undefined suppress' - ;; + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; esac - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. Also zsh mangles - # `"' quotes if we put them in here... so don't! - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | grep 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - _LT_AC_TAGVAR(module_cmds, $1)='$CC -bundle ${wl}-bind_at_load $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no _LT_AC_TAGVAR(hardcode_direct, $1)=no _LT_AC_TAGVAR(hardcode_automatic, $1)=yes _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-all_load $convenience' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - fi + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case "$cc_basename" in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi ;; dgux*) @@ -5385,7 +5504,7 @@ EOF ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd*) + freebsd* | kfreebsd*-gnu) _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_AC_TAGVAR(hardcode_direct, $1)=yes @@ -5496,6 +5615,7 @@ EOF _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' else @@ -5702,7 +5822,7 @@ x|xyes) AC_MSG_CHECKING([whether -lc should be explicitly linked in]) $rm conftest* printf "$lt_simple_compile_test_code" > conftest.$ac_ext - + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then soname=conftest lib=conftest @@ -5861,7 +5981,7 @@ for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do fi done done -SED=$lt_cv_path_SED ]) +SED=$lt_cv_path_SED AC_MSG_RESULT([$SED]) ]) diff --git a/db/dist/aclocal/mutex.ac b/db/dist/aclocal/mutex.ac index 4f2b0189e..959ed4ebe 100644 --- a/db/dist/aclocal/mutex.ac +++ b/db/dist/aclocal/mutex.ac @@ -1,28 +1,6 @@ -# $Id: mutex.ac,v 11.42 2003/06/16 14:54:39 bostic Exp $ +# $Id: mutex.ac,v 11.46 2004/07/09 16:23:19 bostic Exp $ # POSIX pthreads tests: inter-process safe and intra-process only. -# -# We need to run a test here, because the PTHREAD_PROCESS_SHARED flag compiles -# fine on problematic systems, but won't actually work. This is a problem for -# cross-compilation environments. I think inter-process mutexes are as likely -# to fail in cross-compilation environments as real ones (especially since the -# likely cross-compilation environment is Linux, where inter-process mutexes -# don't currently work -- the latest estimate I've heard is Q1 2002, as part -# of IBM's NGPT package). So: -# -# If checking for inter-process pthreads mutexes: -# If it's local, run a test. -# If it's a cross-compilation, fail. -# -# If the user specified pthreads mutexes and we're checking for intra-process -# mutexes only: -# If it's local, run a test. -# If it's a cross-compilation, run a link-test. -# -# So, the thing you can't do here is configure for inter-process POSIX pthread -# mutexes when cross-compiling. Since we're using the GNU/Cygnus toolchain for -# cross-compilation, the target system is likely Linux or *BSD, so we're doing -# the right thing. AC_DEFUN(AM_PTHREADS_SHARED, [ AC_TRY_RUN([ #include @@ -44,7 +22,27 @@ main() { pthread_cond_destroy(&cond) || pthread_condattr_destroy(&condattr) || pthread_mutexattr_destroy(&mutexattr)); -}], [db_cv_mutex="$1"],, [db_cv_mutex="no"])]) +}], [db_cv_mutex="$1"],, +AC_TRY_LINK([ +#include ],[ + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || + pthread_mutexattr_init(&mutexattr) || + pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); +], [db_cv_mutex="$1"]))]) AC_DEFUN(AM_PTHREADS_PRIVATE, [ AC_TRY_RUN([ #include @@ -103,37 +101,27 @@ orig_libs=$LIBS # is already using one type of mutex and doesn't want to mix-and-match (for # example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the # applications POSIX pthreads mutexes don't support inter-process locking, -# but the application wants to use them anyway (for example, current Linux -# and *BSD systems). -# -# If we're on Solaris, we insist that -lthread or -lpthread be used. The -# problem is the Solaris C library has UI/POSIX interface stubs, but they're -# broken, configuring them for inter-process mutexes doesn't return an error, -# but it doesn't work either. Otherwise, we try first without the library -# and then with it: there's some information that SCO/UnixWare/OpenUNIX needs -# this. [#4950] +# but the application wants to use them anyway (for example, some Linux and +# *BSD systems). # # Test for LWP threads before testing for UI/POSIX threads, we prefer them # on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not # pwrite64, if they load the C library before the appropriate threads library, # e.g., tclsh using dlopen to load the DB library. By using LWP threads we # avoid answering lots of user questions, not to mention the bugs. +# +# Otherwise, test for POSIX threads before UI threads. There are Linux systems +# that support a UI compatibility mode, and applications are more likely to be +# written for POSIX threads than UI threads. +# +# Try and link with a threads library if possible. The problem is the Solaris +# C library has UI/POSIX interface stubs, but they're broken, configuring them +# for inter-process mutexes doesn't return an error, but it doesn't work either. if test "$db_cv_posixmutexes" = yes; then - case "$host_os" in - solaris*) - db_cv_mutex="posix_library_only";; - *) - db_cv_mutex="posix_only";; - esac + db_cv_mutex="posix_only"; fi - if test "$db_cv_uimutexes" = yes; then - case "$host_os" in - solaris*) - db_cv_mutex="ui_library_only";; - *) - db_cv_mutex="ui_only";; - esac + db_cv_mutex="ui_only"; fi # User-specified Win32 mutexes (MinGW build) @@ -155,10 +143,33 @@ AC_TRY_LINK([ ], [db_cv_mutex="Solaris/lwp"]) fi -# UI threads: thr_XXX +# POSIX.1 pthreads: pthread_XXX # -# Try with and without the -lthread library. +# If the user specified we use POSIX pthreads mutexes, and we fail to find the +# full interface, try and configure for just intra-process support. +if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then + LIBS="$LIBS -lpthread" + AM_PTHREADS_SHARED("POSIX/pthreads/library") + LIBS="$orig_libs" +fi +if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then + AM_PTHREADS_SHARED("POSIX/pthreads") +fi +if test "$db_cv_mutex" = "posix_only"; then + AM_PTHREADS_PRIVATE("POSIX/pthreads/private") +fi +if test "$db_cv_mutex" = "posix_only"; then + LIBS="$LIBS -lpthread" + AM_PTHREADS_PRIVATE("POSIX/pthreads/library/private") + LIBS="$orig_libs" +fi +if test "$db_cv_mutex" = "posix_only"; then + AC_MSG_ERROR([unable to find POSIX 1003.1 mutex interfaces]) +fi + +# UI threads: thr_XXX if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then +LIBS="$LIBS -lthread" AC_TRY_LINK([ #include #include ],[ @@ -170,11 +181,10 @@ AC_TRY_LINK([ cond_init(&cond, type, NULL) || mutex_lock(&mutex) || mutex_unlock(&mutex)); -], [db_cv_mutex="UI/threads"]) +], [db_cv_mutex="UI/threads/library"]) +LIBS="$orig_libs" fi -if test "$db_cv_mutex" = no -o \ - "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then -LIBS="$LIBS -lthread" +if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then AC_TRY_LINK([ #include #include ],[ @@ -186,42 +196,12 @@ AC_TRY_LINK([ cond_init(&cond, type, NULL) || mutex_lock(&mutex) || mutex_unlock(&mutex)); -], [db_cv_mutex="UI/threads/library"]) -LIBS="$orig_libs" +], [db_cv_mutex="UI/threads"]) fi -if test "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then +if test "$db_cv_mutex" = "ui_only"; then AC_MSG_ERROR([unable to find UI mutex interfaces]) fi -# POSIX.1 pthreads: pthread_XXX -# -# Try with and without the -lpthread library. If the user specified we use -# POSIX pthreads mutexes, and we fail to find the full interface, try and -# configure for just intra-process support. -if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then - AM_PTHREADS_SHARED("POSIX/pthreads") -fi -if test "$db_cv_mutex" = no -o \ - "$db_cv_mutex" = "posix_only" -o "$db_cv_mutex" = "posix_library_only"; then - LIBS="$LIBS -lpthread" - AM_PTHREADS_SHARED("POSIX/pthreads/library") - LIBS="$orig_libs" -fi -if test "$db_cv_mutex" = "posix_only"; then - AM_PTHREADS_PRIVATE("POSIX/pthreads/private") -fi -if test "$db_cv_mutex" = "posix_only" -o \ - "$db_cv_mutex" = "posix_library_only"; then - LIBS="$LIBS -lpthread" - AM_PTHREADS_PRIVATE("POSIX/pthreads/library/private") - LIBS="$orig_libs" -fi - -if test "$db_cv_mutex" = "posix_only" -o \ - "$db_cv_mutex" = "posix_library_only"; then - AC_MSG_ERROR([unable to find POSIX 1003.1 mutex interfaces]) -fi - # msemaphore: HPPA only # Try HPPA before general msem test, it needs special alignment. if test "$db_cv_mutex" = no; then @@ -420,7 +400,7 @@ fi # x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux if test "$db_cv_mutex" = no; then AC_TRY_COMPILE(,[ -#if (defined(i386) || defined(__i386__)) && defined(__GNUC__) +#if (defined(i386) || defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) exit(0); #else FAIL TO COMPILE/LINK @@ -523,12 +503,16 @@ POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY, [Define to 1 to configure mutexes intra-process only.]);; POSIX/pthreads/library) LIBS="$LIBS -lpthread" + LIBJSO_LIBS="$LIBJSO_LIBS -lpthread" + LIBTSO_LIBS="$LIBTSO_LIBS -lpthread" ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" AC_DEFINE(HAVE_MUTEX_PTHREADS) AH_TEMPLATE(HAVE_MUTEX_PTHREADS, [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);; POSIX/pthreads/library/private) LIBS="$LIBS -lpthread" + LIBJSO_LIBS="$LIBJSO_LIBS -lpthread" + LIBTSO_LIBS="$LIBTSO_LIBS -lpthread" ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" AC_DEFINE(HAVE_MUTEX_PTHREADS) AH_TEMPLATE(HAVE_MUTEX_PTHREADS, diff --git a/db/dist/aclocal/options.ac b/db/dist/aclocal/options.ac index 947a1c06b..b770fc444 100644 --- a/db/dist/aclocal/options.ac +++ b/db/dist/aclocal/options.ac @@ -1,4 +1,4 @@ -# $Id: options.ac,v 11.29 2003/11/05 18:25:41 bostic Exp $ +# $Id: options.ac,v 11.37 2004/06/10 16:38:18 bostic Exp $ # Process user-specified options. AC_DEFUN(AM_OPTIONS_SET, [ @@ -51,6 +51,16 @@ case "$enableval" in yes) AC_MSG_RESULT(no);; esac +AC_MSG_CHECKING(if --disable-statistics option specified) +AC_ARG_ENABLE(statistics, + AC_HELP_STRING([--disable-statistics], + [Do not build statistics support.]),, enableval="yes") +db_cv_build_statistics="$enableval" +case "$enableval" in + no) AC_MSG_RESULT(yes);; +yes) AC_MSG_RESULT(no);; +esac + AC_MSG_CHECKING(if --disable-verify option specified) AC_ARG_ENABLE(verify, AC_HELP_STRING([--disable-verify], @@ -137,6 +147,13 @@ AC_ARG_ENABLE(mingw, [db_cv_mingw="$enable_mingw"], [db_cv_mingw="no"]) AC_MSG_RESULT($db_cv_mingw) +AC_MSG_CHECKING(if --enable-o_direct option specified) +AC_ARG_ENABLE(o_direct, + [AC_HELP_STRING([--enable-o_direct], + [Enable the O_DIRECT flag for direct I/O.])], + [db_cv_o_direct="$enable_o_direct"], [db_cv_o_direct="no"]) +AC_MSG_RESULT($db_cv_o_direct) + AC_MSG_CHECKING(if --enable-posixmutexes option specified) AC_ARG_ENABLE(posixmutexes, [AC_HELP_STRING([--enable-posixmutexes], @@ -161,6 +178,7 @@ if test "$db_cv_smallbuild" = "yes"; then db_cv_build_hash="no" db_cv_build_queue="no" db_cv_build_replication="no" + db_cv_build_statistics="no" db_cv_build_verify="no" fi AC_MSG_RESULT($db_cv_smallbuild) @@ -189,7 +207,7 @@ AC_MSG_RESULT($db_cv_uimutexes) AC_MSG_CHECKING(if --enable-umrw option specified) AC_ARG_ENABLE(umrw, [AC_HELP_STRING([--enable-umrw], - [Mask harmless unitialized memory read/writes.])], + [Mask harmless uninitialized memory read/writes.])], [db_cv_umrw="$enable_umrw"], [db_cv_umrw="no"]) AC_MSG_RESULT($db_cv_umrw) @@ -221,22 +239,6 @@ if test "$with_mutexalign" != "no"; then fi AC_MSG_RESULT($with_mutexalign) -AC_MSG_CHECKING(if --with-rpm=ARCHIVE option specified) -AC_ARG_WITH(rpm, - [AC_HELP_STRING([--with-rpm=ARCHIVE], [Path of RPM archive.])], - [with_rpm="$withval"], [with_rpm="no"]) -if test "$with_rpm" = "no"; then - db_cv_rpm="no" - db_cv_path_rpm_archive="Not-configured-with-rpm=ARCHIVE" -else - if test "$with_rpm" = "yes"; then - AC_MSG_ERROR([--with-rpm requires an archive path argument]) - fi - db_cv_rpm="yes" - db_cv_path_rpm_archive="$with_rpm" -fi -AC_MSG_RESULT($with_rpm) - AC_MSG_CHECKING([if --with-tcl=DIR option specified]) AC_ARG_WITH(tcl, [AC_HELP_STRING([--with-tcl=DIR], @@ -271,4 +273,20 @@ if test "$db_cv_test" = "yes"; then if test "$db_cv_tcl" = "no"; then AC_MSG_ERROR([--enable-test requires --enable-tcl]) fi +fi + +# Uniquename excludes C++, Java, RPC. +if test "$db_cv_uniquename" = "yes"; then + if test "$db_cv_rpc" = "yes"; then + AC_MSG_ERROR( + [--with-uniquename is not compatible with --enable-rpc]) + fi + if test "$db_cv_cxx" = "yes"; then + AC_MSG_ERROR( + [--with-uniquename is not compatible with --enable-cxx]) + fi + if test "$db_cv_java" = "yes"; then + AC_MSG_ERROR( + [--with-uniquename is not compatible with --enable-java]) + fi fi]) diff --git a/db/dist/aclocal/programs.ac b/db/dist/aclocal/programs.ac index 7bfa1fa26..db6b4f03e 100644 --- a/db/dist/aclocal/programs.ac +++ b/db/dist/aclocal/programs.ac @@ -1,4 +1,4 @@ -# $Id: programs.ac,v 11.20 2001/09/24 02:09:25 bostic Exp $ +# $Id: programs.ac,v 11.22 2004/06/10 16:38:18 bostic Exp $ # Check for programs used in building/installation. AC_DEFUN(AM_PROGRAMS_SET, [ @@ -18,15 +18,6 @@ if test "$db_cv_path_cp" = missing_cp; then AC_MSG_ERROR([No cp utility found.]) fi -if test "$db_cv_rpm" = "yes"; then - AC_CHECK_TOOL(path_ldconfig, ldconfig, missing_ldconfig) - AC_PATH_PROG(db_cv_path_ldconfig, $path_ldconfig, missing_ldconfig) - if test "$db_cv_path_ldconfig" != missing_ldconfig; then - RPM_POST_INSTALL="%post -p $db_cv_path_ldconfig" - RPM_POST_UNINSTALL="%postun -p $db_cv_path_ldconfig" - fi -fi - AC_CHECK_TOOL(db_cv_path_ln, ln, missing_ln) if test "$db_cv_path_ln" = missing_ln; then AC_MSG_ERROR([No ln utility found.]) @@ -47,10 +38,10 @@ if test "$db_cv_path_rm" = missing_rm; then AC_MSG_ERROR([No rm utility found.]) fi -if test "$db_cv_rpm" = "yes"; then - AC_CHECK_TOOL(db_cv_path_rpm, rpm, missing_rpm) - if test "$db_cv_path_rpm" = missing_rpm; then - AC_MSG_ERROR([No rpm utility found.]) +if test "$db_cv_rpc" = "yes"; then + AC_CHECK_TOOL(db_cv_path_rpcgen, rpcgen, missing_rpcgen) + if test "$db_cv_path_rpcgen" = missing_rpcgen; then + AC_MSG_ERROR([No rpcgen utility found.]) fi fi diff --git a/db/dist/aclocal/rpc.ac b/db/dist/aclocal/rpc.ac new file mode 100644 index 000000000..7d7f4dabe --- /dev/null +++ b/db/dist/aclocal/rpc.ac @@ -0,0 +1,83 @@ +# $Id: rpc.ac,v 11.9 2004/09/27 21:33:48 mjc Exp $ + +# Try and configure RPC support. +AC_DEFUN(AM_RPC_CONFIGURE, [ + AC_DEFINE(HAVE_RPC) + AH_TEMPLATE(HAVE_RPC, [Define to 1 if building RPC client/server.]) + + # We use the target's rpcgen utility because it may be architecture + # specific, for example, 32- or 64-bit specific. + XDR_FILE=$srcdir/../rpc_server/db_server.x + + # Prefer the -C option to rpcgen which generates ANSI C-conformant + # code. + RPCGEN="rpcgen -C" + AC_MSG_CHECKING(["$RPCGEN" build of db_server.h]) + $RPCGEN -h $XDR_FILE > db_server.h 2>/dev/null + if test $? -ne 0; then + AC_MSG_RESULT([no]) + + # Try rpcgen without the -C option. + RPCGEN="rpcgen" + AC_MSG_CHECKING(["$RPCGEN" build of db_server.h]) + $RPCGEN -h $XDR_FILE > db_server.h 2>/dev/null + if test $? -ne 0; then + AC_MSG_RESULT([no]) + AC_MSG_ERROR( + [Unable to build RPC support: $RPCGEN failed.]) + fi + fi + + # Some rpcgen programs generate a set of client stubs called something + # like __db_env_create_4003 and functions on the server to handle the + # request called something like __db_env_create_4003_svc. Others + # expect client and server stubs to both be called __db_env_create_4003. + # + # We have to generate code in whichever format rpcgen expects, and the + # only reliable way to do that is to check what is in the db_server.h + # file we just created. + if grep "env_create_[[0-9]]*_svc" db_server.h >/dev/null 2>&1 ; then + sed 's/__SVCSUFFIX__/_svc/' \ + < $srcdir/../rpc_server/c/gen_db_server.c > gen_db_server.c + else + sed 's/__SVCSUFFIX__//' \ + < $srcdir/../rpc_server/c/gen_db_server.c > gen_db_server.c + fi + + AC_MSG_RESULT([yes]) + + $RPCGEN -l $XDR_FILE | + sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ + -e '1,/^#include/s/^#include/#include "db_config.h"\ +&/' > db_server_clnt.c + + $RPCGEN -s tcp $XDR_FILE | + sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ + -e 's/^main *()/__dbsrv_main()/' \ + -e 's/^main *(.*argc.*argv.*)/__dbsrv_main(int argc, char *argv[])/' \ + -e '/^db_rpc_serverprog/,/^}/{' \ + -e 's/return;//' \ + -e 's/^}/__dbsrv_timeout(0);}/' \ + -e '}' \ + -e '1,/^#include/s/^#include/#include "db_config.h"\ +&/' > db_server_svc.c + + $RPCGEN -c $XDR_FILE | + sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ + -e '1,/^#include/s/^#include/#include "db_config.h"\ +&/' > db_server_xdr.c + + RPC_SERVER_H=db_server.h + RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)" + ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS" + + case "$host_os" in + hpux*) + AC_CHECK_FUNC(svc_run,, + AC_CHECK_LIB(nsl, svc_run, + LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"; + LIBJSO_LIBS="-lnsl $LIBJSO_LIBS"));; + solaris*) + AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));; + esac +]) diff --git a/db/dist/aclocal/sequence.ac b/db/dist/aclocal/sequence.ac new file mode 100644 index 000000000..278b16139 --- /dev/null +++ b/db/dist/aclocal/sequence.ac @@ -0,0 +1,67 @@ +# $Id: sequence.ac,v 1.1 2004/04/01 15:27:30 bostic Exp $ + +# Try and configure sequence support. +AC_DEFUN(AM_SEQUENCE_CONFIGURE, [ + AC_MSG_CHECKING([for 64-bit integral type support for sequences]) + db_cv_build_sequence="yes" + + # Have to be able to cast variables to the "unsigned long long" and + # "long long" types, that's our cast for the printf "%ll[du]" format. + if test "$ac_cv_type_long_long" = "no"; then + db_cv_build_sequence="no" + fi + if test "$ac_cv_type_unsigned_long_long" = "no"; then + db_cv_build_sequence="no" + fi + + # Test to see if we can declare variables of the appropriate size + # and format them. If we're cross-compiling, all we get is a link + # test, which won't test for the appropriate printf format strings. + if test "$db_cv_build_sequence" = "yes"; then + AC_TRY_RUN([ + main() { + long long l; + unsigned long long u; + char buf[100]; + + buf[0] = 'a'; + l = 9223372036854775807LL; + (void)snprintf(buf, sizeof(buf), "%lld", l); + if (strcmp(buf, "9223372036854775807")) + return (1); + u = 18446744073709551615ULL; + (void)snprintf(buf, sizeof(buf), "%llu", u); + if (strcmp(buf, "18446744073709551615")) + return (1); + return (0); + }],, [db_cv_build_sequence="no"], + AC_TRY_LINK(,[ + long long l; + unsigned long long u; + char buf[100]; + + buf[0] = 'a'; + l = 9223372036854775807LL; + (void)snprintf(buf, sizeof(buf), "%lld", l); + if (strcmp(buf, "9223372036854775807")) + return (1); + u = 18446744073709551615ULL; + (void)snprintf(buf, sizeof(buf), "%llu", u); + if (strcmp(buf, "18446744073709551615")) + return (1); + return (0); + ],, [db_cv_build_sequence="no"])) + fi + if test "$db_cv_build_sequence" = "yes"; then + AC_DEFINE(HAVE_SEQUENCE) + AH_TEMPLATE(HAVE_SEQUENCE, + [Define to 1 if building sequence support.]) + + AC_SUBST(db_seq_decl) + db_seq_decl="typedef int64_t db_seq_t;"; + else + # It still has to compile, but it won't run. + db_seq_decl="typedef int db_seq_t;"; + fi + AC_MSG_RESULT($db_cv_build_sequence) +]) diff --git a/db/dist/aclocal/sosuffix.ac b/db/dist/aclocal/sosuffix.ac index 6970d4706..8864280f1 100644 --- a/db/dist/aclocal/sosuffix.ac +++ b/db/dist/aclocal/sosuffix.ac @@ -1,4 +1,4 @@ -# $Id: sosuffix.ac,v 1.3 2003/04/19 05:11:55 dda Exp $ +# $Id: sosuffix.ac,v 1.4 2004/08/14 20:00:45 dda Exp $ # Determine shared object suffixes. # # Our method is to use the libtool variable $library_names_spec, @@ -18,11 +18,11 @@ AC_DEFUN(_SOSUFFIX_INTERNAL, [ versuffix="" release="" libname=libfoo - eval _SOSUFFIX=\"$shrext\" - if test "X$_SOSUFFIX" = "" ; then + eval _SOSUFFIX=\"$shrext_cmds\" + if test "$_SOSUFFIX" = "" ; then _SOSUFFIX=".so" if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then - if test "X$_SOSUFFIX_MESSAGE" = "X"; then + if test "$_SOSUFFIX_MESSAGE" = ""; then _SOSUFFIX_MESSAGE=yes AC_MSG_WARN([libtool may not know about this architecture.]) AC_MSG_WARN([assuming $_SUFFIX suffix for dynamic libraries.]) diff --git a/db/dist/aclocal/tcl.ac b/db/dist/aclocal/tcl.ac index 25e3a16d3..d28d36083 100644 --- a/db/dist/aclocal/tcl.ac +++ b/db/dist/aclocal/tcl.ac @@ -1,4 +1,4 @@ -# $Id: tcl.ac,v 11.17 2003/10/13 21:04:04 bostic Exp $ +# $Id: tcl.ac,v 11.18 2004/03/11 20:11:17 bostic Exp $ # The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl # 8.3.0 distribution, with some minor changes. For this reason, license @@ -124,7 +124,6 @@ AC_DEFUN(SC_LOAD_TCLCONFIG, [ # Optional Tcl API. AC_DEFUN(AM_TCL_LOAD, [ -if test "$db_cv_tcl" = "yes"; then if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then AC_MSG_ERROR([Tcl requires shared libraries]) fi @@ -139,4 +138,4 @@ if test "$db_cv_tcl" = "yes"; then fi INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)" -fi]) +]) diff --git a/db/dist/aclocal/types.ac b/db/dist/aclocal/types.ac index fdac504eb..aae40720e 100644 --- a/db/dist/aclocal/types.ac +++ b/db/dist/aclocal/types.ac @@ -1,4 +1,4 @@ -# $Id: types.ac,v 11.12 2003/09/04 23:59:06 bostic Exp $ +# $Id: types.ac,v 11.16 2004/09/15 21:49:17 mjc Exp $ # Check the sizes we know about, and see if any of them match what's needed. # @@ -14,6 +14,8 @@ AC_DEFUN(AM_SEARCH_USIZES, [ $1="typedef unsigned short $2;";; "$ac_cv_sizeof_unsigned_long") $1="typedef unsigned long $2;";; + "$ac_cv_sizeof_unsigned_long_long") + $1="typedef unsigned long long $2;";; *) AC_MSG_ERROR([No unsigned $3-byte integral type]);; esac]) @@ -27,6 +29,8 @@ AC_DEFUN(AM_SEARCH_SSIZES, [ $1="typedef short $2;";; "$ac_cv_sizeof_long") $1="typedef long $2;";; + "$ac_cv_sizeof_long_long") + $1="typedef long long $2;";; *) AC_MSG_ERROR([No signed $3-byte integral type]);; esac]) @@ -38,19 +42,41 @@ AC_DEFUN(AM_TYPES, [ # autoconf usually includes. For that reason, we specify a set of includes # for all type checking tests. [#5060] # +# C99 says types should be in ; include if it exists. +# +# Some systems have types in ; include if it exists. +# # IBM's OS/390 and z/OS releases have types in not also found -# in . Include as well, if it exists. -AC_SUBST(inttypes_decl) +# in ; include if it exists. db_includes="#include " +AC_SUBST(inttypes_h_decl) AC_CHECK_HEADER(inttypes.h, [ - inttypes_decl="#include " db_includes="$db_includes -#include "]) +#include " + inttypes_h_decl="#include "]) +AC_SUBST(stdint_h_decl) +AC_CHECK_HEADER(stdint.h, [ + db_includes="$db_includes +#include " + stdint_h_decl="#include "]) +AC_SUBST(stddef_h_decl) +AC_CHECK_HEADER(stddef.h, [ + db_includes="$db_includes +#include " + stddef_h_decl="#include "]) db_includes="$db_includes #include " +# We require off_t and size_t, and we don't try to substitute our own +# if we can't find them. +AC_CHECK_TYPE(off_t,, AC_MSG_ERROR([No off_t type.]), $db_includes) +AC_CHECK_TYPE(size_t,, AC_MSG_ERROR([No size_t type.]), $db_includes) + +# Check for long long and unsigned long long, we only support sequences +# if those types are available. +AC_CHECK_TYPES([long long, unsigned long long],,, $db_includes) + # We need to know the sizes of various objects on this system. -# We don't use the SIZEOF_XXX values created by autoconf. AC_CHECK_SIZEOF(char,, $db_includes) AC_CHECK_SIZEOF(unsigned char,, $db_includes) AC_CHECK_SIZEOF(short,, $db_includes) @@ -59,14 +85,11 @@ AC_CHECK_SIZEOF(int,, $db_includes) AC_CHECK_SIZEOF(unsigned int,, $db_includes) AC_CHECK_SIZEOF(long,, $db_includes) AC_CHECK_SIZEOF(unsigned long,, $db_includes) +AC_CHECK_SIZEOF(long long,, $db_includes) +AC_CHECK_SIZEOF(unsigned long long,, $db_includes) AC_CHECK_SIZEOF(size_t,, $db_includes) AC_CHECK_SIZEOF(char *,, $db_includes) -# We require off_t and size_t, and we don't try to substitute our own -# if we can't find them. -AC_CHECK_TYPE(off_t,, AC_MSG_ERROR([No off_t type.]), $db_includes) -AC_CHECK_TYPE(size_t,, AC_MSG_ERROR([No size_t type.]), $db_includes) - # We look for u_char, u_short, u_int, u_long -- if we can't find them, # we create our own. AC_SUBST(u_char_decl) @@ -85,6 +108,7 @@ AC_SUBST(u_long_decl) AC_CHECK_TYPE(u_long,, [u_long_decl="typedef unsigned long u_long;"], $db_includes) +# We look for fixed-size variants of u_char, u_short, u_int, u_long as well. AC_SUBST(u_int8_decl) AC_CHECK_TYPE(u_int8_t,, [AM_SEARCH_USIZES(u_int8_decl, u_int8_t, 1)], $db_includes) @@ -105,6 +129,14 @@ AC_SUBST(int32_decl) AC_CHECK_TYPE(int32_t,, [AM_SEARCH_SSIZES(int32_decl, int32_t, 4)], $db_includes) +AC_SUBST(u_int64_decl) +AC_CHECK_TYPE(u_int64_t,, + [AM_SEARCH_SSIZES(u_int64_decl, u_int64_t, 8)], $db_includes) + +AC_SUBST(int64_decl) +AC_CHECK_TYPE(int64_t,, + [AM_SEARCH_SSIZES(int64_decl, int64_t, 8)], $db_includes) + # Check for ssize_t -- if none exists, find a signed integral type that's # the same size as a size_t. AC_SUBST(ssize_t_decl) @@ -112,13 +144,16 @@ AC_CHECK_TYPE(ssize_t,, [AM_SEARCH_SSIZES(ssize_t_decl, ssize_t, $ac_cv_sizeof_size_t)], $db_includes) -# Find the largest integral type. -AC_SUBST(db_align_t_decl) -AC_CHECK_TYPE(unsigned long long, - [db_align_t_decl="typedef unsigned long long db_align_t;"], - [db_align_t_decl="typedef unsigned long db_align_t;"], $db_includes) - -# Find an integral type which is the same size as a pointer. -AC_SUBST(db_alignp_t_decl) -AM_SEARCH_USIZES(db_alignp_t_decl, db_alignp_t, $ac_cv_sizeof_char_p) +# Check for uintmax_t -- if none exists, first the largest unsigned integral +# type available. +AC_SUBST(uintmax_t_decl) +AC_CHECK_TYPE(uintmax_t,, [AC_CHECK_TYPE(unsigned long long, + [uintmax_t_decl="typedef unsigned long long uintmax_t;"], + [uintmax_t_decl="typedef unsigned long uintmax_t;"], $db_includes)]) + +# Check for uintptr_t -- if none exists, find an integral type which is +# the same size as a pointer. +AC_SUBST(uintptr_t_decl) +AC_CHECK_TYPE(uintptr_t,, + [AM_SEARCH_USIZES(uintptr_t_decl, uintptr_t, $ac_cv_sizeof_char_p)]) ]) diff --git a/db/dist/buildrel b/db/dist/buildrel index 9187cc711..76f0fe417 100644 --- a/db/dist/buildrel +++ b/db/dist/buildrel @@ -1,4 +1,4 @@ -# $Id: buildrel,v 1.57 2003/11/28 19:21:02 bostic Exp $ +# $Id: buildrel,v 1.60 2004/10/14 15:32:29 bostic Exp $ # # Build the distribution package. # @@ -17,13 +17,15 @@ echo "Version: $VERSION" # Make sure the source tree is up-to-date cd $D && cvs -q update +# Build auto-generated files. +cd $D/dist && sh s_all + # Build the documentation. cd db.docs && cvs -q update -cd db.docs && sh build ../db.rel clean && sh build ../db.rel |& sed '/.html$/d' -cd db.docs && sh build ../db.rel javadoc +cd db.docs && sh build $D clean && sh build $D |& sed '/.html$/d' +cd je/docs_src && sh build db ../../db -# Generate new support files, commit anything that's changed. -cd $D/dist && sh s_all +# Commit all of the changes. cd $D && cvs -q commit # Copy a development tree into a release tree. diff --git a/db/dist/config.guess b/db/dist/config.guess index d56c46d84..7d0185e01 100755 --- a/db/dist/config.guess +++ b/db/dist/config.guess @@ -1,9 +1,9 @@ #! /bin/sh # Attempt to guess a canonical system name. # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003 Free Software Foundation, Inc. +# 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. -timestamp='2003-08-18' +timestamp='2004-09-07' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -53,7 +53,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO @@ -197,15 +197,21 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}" exit 0 ;; + amd64:OpenBSD:*:*) + echo x86_64-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; amiga:OpenBSD:*:*) echo m68k-unknown-openbsd${UNAME_RELEASE} exit 0 ;; - arc:OpenBSD:*:*) - echo mipsel-unknown-openbsd${UNAME_RELEASE} + cats:OpenBSD:*:*) + echo arm-unknown-openbsd${UNAME_RELEASE} exit 0 ;; hp300:OpenBSD:*:*) echo m68k-unknown-openbsd${UNAME_RELEASE} exit 0 ;; + luna88k:OpenBSD:*:*) + echo m88k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; mac68k:OpenBSD:*:*) echo m68k-unknown-openbsd${UNAME_RELEASE} exit 0 ;; @@ -221,25 +227,33 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in mvmeppc:OpenBSD:*:*) echo powerpc-unknown-openbsd${UNAME_RELEASE} exit 0 ;; - pmax:OpenBSD:*:*) - echo mipsel-unknown-openbsd${UNAME_RELEASE} - exit 0 ;; sgi:OpenBSD:*:*) - echo mipseb-unknown-openbsd${UNAME_RELEASE} + echo mips64-unknown-openbsd${UNAME_RELEASE} exit 0 ;; sun3:OpenBSD:*:*) echo m68k-unknown-openbsd${UNAME_RELEASE} exit 0 ;; - wgrisc:OpenBSD:*:*) - echo mipsel-unknown-openbsd${UNAME_RELEASE} - exit 0 ;; *:OpenBSD:*:*) echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE} exit 0 ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit 0 ;; + macppc:MirBSD:*:*) + echo powerppc-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; alpha:OSF1:*:*) - if test $UNAME_RELEASE = "V4.0"; then + case $UNAME_RELEASE in + *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` - fi + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU @@ -277,14 +291,12 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in "EV7.9 (21364A)") UNAME_MACHINE="alphaev79" ;; esac + # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[VTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - exit 0 ;; - Alpha*:OpenVMS:*:*) - echo alpha-hp-vms + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` exit 0 ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? @@ -307,6 +319,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:OS/390:*:*) echo i370-ibm-openedition exit 0 ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit 0 ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit 0;; @@ -399,6 +414,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit 0 ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit 0 ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit 0 ;; @@ -734,7 +752,7 @@ EOF echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit 0 ;; *:UNICOS/mp:*:*) - echo nv1-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit 0 ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` @@ -742,6 +760,11 @@ EOF FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit 0 ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit 0 ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit 0 ;; @@ -751,22 +774,8 @@ EOF *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit 0 ;; - *:FreeBSD:*:*|*:GNU/FreeBSD:*:*) - # Determine whether the default compiler uses glibc. - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - #if __GLIBC__ >= 2 - LIBC=gnu - #else - LIBC= - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=` - # GNU/FreeBSD systems have a "k" prefix to indicate we are using - # FreeBSD's kernel, but not the complete OS. - case ${LIBC} in gnu) kernel_only='k' ;; esac - echo ${UNAME_MACHINE}-unknown-${kernel_only}freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`${LIBC:+-$LIBC} + *:FreeBSD:*:*) + echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit 0 ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin @@ -799,8 +808,13 @@ EOF echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit 0 ;; *:GNU:*:*) + # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit 0 ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit 0 ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit 0 ;; @@ -810,9 +824,18 @@ EOF cris:Linux:*:*) echo cris-axis-linux-gnu exit 0 ;; + crisv32:Linux:*:*) + echo crisv32-axis-linux-gnu + exit 0 ;; + frv:Linux:*:*) + echo frv-unknown-linux-gnu + exit 0 ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit 0 ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit 0 ;; @@ -981,6 +1004,9 @@ EOF i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit 0 ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit 0 ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit 0 ;; @@ -1050,9 +1076,9 @@ EOF M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit 0 ;; - M68*:*:R3V[567]*:*) + M68*:*:R3V[5678]*:*) test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;; - 3[34]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0) + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` @@ -1150,9 +1176,10 @@ EOF echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit 0 ;; *:Darwin:*:*) - case `uname -p` in + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in *86) UNAME_PROCESSOR=i686 ;; - powerpc) UNAME_PROCESSOR=powerpc ;; + unknown) UNAME_PROCESSOR=powerpc ;; esac echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit 0 ;; @@ -1167,7 +1194,7 @@ EOF *:QNX:*:4*) echo i386-pc-qnx exit 0 ;; - NSR-[DGKLNPTVW]:NONSTOP_KERNEL:*:*) + NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit 0 ;; *:NonStop-UX:*:*) @@ -1211,6 +1238,16 @@ EOF SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit 0 ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit 0 ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms && exit 0 ;; + I*) echo ia64-dec-vms && exit 0 ;; + V*) echo vax-dec-vms && exit 0 ;; + esac esac #echo '(No uname command or uname output not recognized.)' 1>&2 diff --git a/db/dist/config.hin b/db/dist/config.hin index 78fbe74b3..c7ba05ec0 100644 --- a/db/dist/config.hin +++ b/db/dist/config.hin @@ -43,9 +43,15 @@ /* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ #undef HAVE_FCNTL_F_SETFD +/* Define to 1 if you have the `fdatasync' function. */ +#undef HAVE_FDATASYNC + /* Define to 1 if allocated filesystem blocks are not zeroed. */ #undef HAVE_FILESYSTEM_NOTZERO +/* Define to 1 if you have the `ftruncate' function. */ +#undef HAVE_FTRUNCATE + /* Define to 1 if you have the `getcwd' function. */ #undef HAVE_GETCWD @@ -70,6 +76,9 @@ /* Define to 1 if you have the `nsl' library (-lnsl). */ #undef HAVE_LIBNSL +/* Define to 1 if the system has the type `long long'. */ +#undef HAVE_LONG_LONG + /* Define to 1 if you have the `memcmp' function. */ #undef HAVE_MEMCMP @@ -217,6 +226,9 @@ /* Define to 1 if you have the `raise' function. */ #undef HAVE_RAISE +/* Define to 1 if you have the `rand' function. */ +#undef HAVE_RAND + /* Define to 1 if building replication support. */ #undef HAVE_REPLICATION @@ -229,12 +241,21 @@ /* Define to 1 if you have the `select' function. */ #undef HAVE_SELECT +/* Define to 1 if building sequence support. */ +#undef HAVE_SEQUENCE + /* Define to 1 if you have the `shmget' function. */ #undef HAVE_SHMGET /* Define to 1 if you have the `snprintf' function. */ #undef HAVE_SNPRINTF +/* Define to 1 if you have the `srand' function. */ +#undef HAVE_SRAND + +/* Define to 1 if building statistics support. */ +#undef HAVE_STATISTICS + /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H @@ -294,6 +315,9 @@ /* Define to 1 if unlink of file with open file descriptors will fail. */ #undef HAVE_UNLINK_WITH_OPEN_FAILURE +/* Define to 1 if the system has the type `unsigned long long'. */ +#undef HAVE_UNSIGNED_LONG_LONG + /* Define to 1 if building access method verification support. */ #undef HAVE_VERIFY @@ -339,6 +363,9 @@ /* The size of a `long', as computed by sizeof. */ #undef SIZEOF_LONG +/* The size of a `long long', as computed by sizeof. */ +#undef SIZEOF_LONG_LONG + /* The size of a `short', as computed by sizeof. */ #undef SIZEOF_SHORT @@ -354,6 +381,9 @@ /* The size of a `unsigned long', as computed by sizeof. */ #undef SIZEOF_UNSIGNED_LONG +/* The size of a `unsigned long long', as computed by sizeof. */ +#undef SIZEOF_UNSIGNED_LONG_LONG + /* The size of a `unsigned short', as computed by sizeof. */ #undef SIZEOF_UNSIGNED_SHORT @@ -366,7 +396,7 @@ /* Define to 1 if you can safely include both and . */ #undef TIME_WITH_SYS_TIME -/* Define to 1 to mask harmless unitialized memory read/writes. */ +/* Define to 1 to mask harmless uninitialized memory read/writes. */ #undef UMRW /* Number of bits in a file offset, on hosts where this is settable. */ diff --git a/db/dist/config.sub b/db/dist/config.sub index 54efcfc18..edb6b663c 100755 --- a/db/dist/config.sub +++ b/db/dist/config.sub @@ -1,9 +1,9 @@ #! /bin/sh # Configuration validation subroutine script. # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003 Free Software Foundation, Inc. +# 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. -timestamp='2003-08-18' +timestamp='2004-08-29' # This file is (in principle) common to ALL GNU software. # The presence of a machine in this file suggests that SOME GNU software @@ -70,7 +70,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO @@ -118,7 +118,8 @@ esac # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in - nto-qnx* | linux-gnu* | linux-dietlibc | kfreebsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*) + nto-qnx* | linux-gnu* | linux-dietlibc | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | \ + kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; @@ -144,7 +145,7 @@ case $os in -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis) + -apple | -axis | -knuth | -cray) os= basic_machine=$1 ;; @@ -236,7 +237,7 @@ case $basic_machine in | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ - | m32r | m68000 | m68k | m88k | mcore \ + | m32r | m32rle | m68000 | m68k | m88k | mcore \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ @@ -248,6 +249,7 @@ case $basic_machine in | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipstx39 | mipstx39el \ @@ -298,7 +300,7 @@ case $basic_machine in | avr-* \ | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ - | clipper-* | cydra-* \ + | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | elxsi-* \ | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \ @@ -306,7 +308,7 @@ case $basic_machine in | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ - | m32r-* \ + | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | mcore-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ @@ -320,11 +322,13 @@ case $basic_machine in | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipstx39-* | mipstx39el-* \ + | mmix-* \ | msp430-* \ - | none-* | np1-* | nv1-* | ns16k-* | ns32k-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ @@ -360,6 +364,9 @@ case $basic_machine in basic_machine=a29k-amd os=-udi ;; + abacus) + basic_machine=abacus-unknown + ;; adobe68k) basic_machine=m68010-adobe os=-scout @@ -377,6 +384,9 @@ case $basic_machine in amd64) basic_machine=x86_64-pc ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; amdahl) basic_machine=580-amdahl os=-sysv @@ -436,12 +446,27 @@ case $basic_machine in basic_machine=j90-cray os=-unicos ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16c) + basic_machine=cr16c-unknown + os=-elf + ;; crds | unos) basic_machine=m68k-crds ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; da30 | da30-*) basic_machine=m68k-da30 ;; @@ -642,10 +667,6 @@ case $basic_machine in mips3*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown ;; - mmix*) - basic_machine=mmix-knuth - os=-mmixware - ;; monitor) basic_machine=m68k-rom68k os=-coff @@ -726,10 +747,6 @@ case $basic_machine in np1) basic_machine=np1-gould ;; - nv1) - basic_machine=nv1-cray - os=-unicosmp - ;; nsr-tandem) basic_machine=nsr-tandem ;; @@ -741,6 +758,10 @@ case $basic_machine in basic_machine=or32-unknown os=-coff ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose @@ -961,6 +982,10 @@ case $basic_machine in tower | tower-32) basic_machine=m68k-ncr ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; udi29k) basic_machine=a29k-amd os=-udi @@ -1034,6 +1059,9 @@ case $basic_machine in romp) basic_machine=romp-ibm ;; + mmix) + basic_machine=mmix-knuth + ;; rs6000) basic_machine=rs6000-ibm ;; @@ -1129,19 +1157,20 @@ case $os in | -aos* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -netbsd* | -openbsd* | -kfreebsd* | -freebsd* | -riscix* \ - | -lynxos* | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* | -openbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* \ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -mpeix* | -udk* \ + | -mingw32* | -linux-gnu* | -linux-uclibc* | -uxpv* | -beos* | -mpeix* | -udk* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei*) + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) @@ -1180,6 +1209,9 @@ case $os in -opened*) os=-openedition ;; + -os400*) + os=-os400 + ;; -wince*) os=-wince ;; @@ -1201,6 +1233,9 @@ case $os in -atheos*) os=-atheos ;; + -syllable*) + os=-syllable + ;; -386bsd) os=-bsd ;; @@ -1223,6 +1258,9 @@ case $os in -sinix*) os=-sysv4 ;; + -tpf*) + os=-tpf + ;; -triton*) os=-sysv3 ;; @@ -1339,6 +1377,9 @@ case $basic_machine in *-ibm) os=-aix ;; + *-knuth) + os=-mmixware + ;; *-wec) os=-proelf ;; @@ -1471,9 +1512,15 @@ case $basic_machine in -mvs* | -opened*) vendor=ibm ;; + -os400*) + vendor=ibm + ;; -ptx*) vendor=sequent ;; + -tpf*) + vendor=ibm + ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; diff --git a/db/dist/configure b/db/dist/configure index cca842cdf..2a9cad4cf 100755 --- a/db/dist/configure +++ b/db/dist/configure @@ -1,11 +1,10 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.57 for Berkeley DB 4.2.52. +# Generated by GNU Autoconf 2.59 for Berkeley DB 4.3.14. # # Report bugs to . # -# Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002 -# Free Software Foundation, Inc. +# Copyright (C) 2003 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## --------------------- ## @@ -22,9 +21,10 @@ if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then set -o posix fi +DUALCASE=1; export DUALCASE # for MKS sh # Support unset when possible. -if (FOO=FOO; unset FOO) >/dev/null 2>&1; then +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false @@ -43,7 +43,7 @@ for as_var in \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do - if (set +x; test -n "`(eval $as_var=C; export $as_var) 2>&1`"); then + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else $as_unset $as_var @@ -220,16 +220,17 @@ rm -f conf$$ conf$$.exe conf$$.file if mkdir -p . 2>/dev/null; then as_mkdir_p=: else + test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_executable_p="test -f" # Sed expression to map a string onto a valid CPP name. -as_tr_cpp="sed y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g" +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. -as_tr_sh="sed y%*+%pp%;s%[^_$as_cr_alnum]%_%g" +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" # IFS @@ -279,7 +280,7 @@ fi # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. -if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test -z "$ECHO"; then if test "X${echo_test_string+set}" != Xset; then @@ -392,9 +393,9 @@ fi -tagnames=`echo "$tagnames,CXX" | sed 's/^,//'` +tagnames=${tagnames+${tagnames},}CXX -tagnames=`echo "$tagnames,F77" | sed 's/^,//'` +tagnames=${tagnames+${tagnames},}F77 # Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, @@ -421,13 +422,13 @@ SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME='Berkeley DB' -PACKAGE_TARNAME='db-4.2.52' -PACKAGE_VERSION='4.2.52' -PACKAGE_STRING='Berkeley DB 4.2.52' +PACKAGE_TARNAME='db-4.3.14' +PACKAGE_VERSION='4.3.14' +PACKAGE_STRING='Berkeley DB 4.3.14' PACKAGE_BUGREPORT='support@sleepycat.com' ac_unique_file="../db/db.c" -ac_default_prefix=/usr/local/BerkeleyDB.4.2 +ac_default_prefix=/usr/local/BerkeleyDB.4.3 # Factoring default headers for most tests. ac_includes_default="\ #include @@ -465,7 +466,7 @@ ac_includes_default="\ # include #endif" -ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os ADDITIONAL_INCS ADDITIONAL_LANG ADDITIONAL_OBJS ADDITIONAL_PROGS BUILD_TARGET CFLAGS CONFIGURATION_ARGS CONFIGURATION_PATH CPPFLAGS CRYPTO_OBJS CXX CXXFLAGS DEFAULT_LIB DEFAULT_LIB_CXX INSTALLER INSTALL_LIBS INSTALL_TARGET JAR JAVACFLAGS LDFLAGS LIBJSO_LIBS LIBSO_LIBS LIBTOOL LIBTSO_LIBS LIBTSO_MODSUFFIX LIBTSO_MODULE LIBXSO_LIBS LOAD_LIBS MAKEFILE_CC MAKEFILE_CCLINK MAKEFILE_CXX MAKEFILE_CXXLINK MAKEFILE_SOLINK MAKEFILE_XSOLINK OSDIR POSTLINK REPLACEMENT_OBJS RPC_CLIENT_OBJS RPM_BUILD RPM_POST_INSTALL RPM_POST_UNINSTALL SOFLAGS db_cv_path_rpm_archive db_int_def o DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH DB_VERSION_STRING DB_VERSION_UNIQUE_NAME db_cv_path_ar ac_ct_db_cv_path_ar db_cv_path_chmod ac_ct_db_cv_path_chmod db_cv_path_cp ac_ct_db_cv_path_cp path_ldconfig ac_ct_path_ldconfig db_cv_path_ldconfig db_cv_path_ln ac_ct_db_cv_path_ln db_cv_path_mkdir ac_ct_db_cv_path_mkdir path_ranlib ac_ct_path_ranlib db_cv_path_ranlib db_cv_path_rm ac_ct_db_cv_path_rm db_cv_path_rpm ac_ct_db_cv_path_rpm path_sh ac_ct_path_sh db_cv_path_sh path_strip ac_ct_path_strip db_cv_path_strip db_cv_path_kill ac_ct_db_cv_path_kill INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA db_cv_build_type CC ac_ct_CC EXEEXT OBJEXT DB_PROTO1 DB_PROTO2 DB_CONST CCC ac_ct_CCC ac_ct_CXX CXXCPP cxx_have_stdheaders EGREP LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB STRIP ac_ct_STRIP CPP F77 FFLAGS ac_ct_F77 SOSUFFIX MODSUFFIX JMODSUFFIX JAVAC JAVA uudecode _ACJNI_JAVAC TCFLAGS TCL_BIN_DIR TCL_SRC_DIR TCL_LIB_FILE TCL_TCLSH inttypes_decl u_char_decl u_short_decl u_int_decl u_long_decl u_int8_decl u_int16_decl int16_decl u_int32_decl int32_decl ssize_t_decl db_align_t_decl db_alignp_t_decl LIBOBJS LTLIBOBJS' +ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os ADDITIONAL_INCS ADDITIONAL_LANG ADDITIONAL_OBJS ADDITIONAL_PROGS BUILD_TARGET CFLAGS CONFIGURATION_ARGS CONFIGURATION_PATH CPPFLAGS CRYPTO_OBJS CXX CXXFLAGS DB_PROTO1 DB_PROTO2 DEFAULT_LIB DEFAULT_LIB_CXX INSTALLER INSTALL_LIBS INSTALL_TARGET JAR JAVACFLAGS LDFLAGS LIBJSO_LIBS LIBSO_LIBS LIBTOOL LIBTSO_LIBS LIBTSO_MODSUFFIX LIBTSO_MODULE LIBXSO_LIBS MAKEFILE_CC MAKEFILE_CCLINK MAKEFILE_CXX MAKEFILE_CXXLINK MAKEFILE_SOLINK MAKEFILE_XSOLINK OSDIR POSTLINK REPLACEMENT_OBJS RPC_CLIENT_OBJS RPC_SERVER_H SOFLAGS TEST_LIBS db_int_def o DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH DB_VERSION_STRING DB_VERSION_UNIQUE_NAME db_cv_path_ar ac_ct_db_cv_path_ar db_cv_path_chmod ac_ct_db_cv_path_chmod db_cv_path_cp ac_ct_db_cv_path_cp db_cv_path_ln ac_ct_db_cv_path_ln db_cv_path_mkdir ac_ct_db_cv_path_mkdir path_ranlib ac_ct_path_ranlib db_cv_path_ranlib db_cv_path_rm ac_ct_db_cv_path_rm db_cv_path_rpcgen ac_ct_db_cv_path_rpcgen path_sh ac_ct_path_sh db_cv_path_sh path_strip ac_ct_path_strip db_cv_path_strip db_cv_path_kill ac_ct_db_cv_path_kill INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA db_cv_build_type CC ac_ct_CC EXEEXT OBJEXT DB_CONST CCC ac_ct_CCC ac_ct_CXX CXXCPP cxx_have_stdheaders EGREP LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB STRIP ac_ct_STRIP CPP F77 FFLAGS ac_ct_F77 SOSUFFIX MODSUFFIX JMODSUFFIX JAVAC JAVA uudecode _ACJNI_JAVAC inttypes_h_decl stdint_h_decl stddef_h_decl u_char_decl u_short_decl u_int_decl u_long_decl u_int8_decl u_int16_decl int16_decl u_int32_decl int32_decl u_int64_decl int64_decl ssize_t_decl uintmax_t_decl uintptr_t_decl LIBOBJS TCFLAGS TCL_BIN_DIR TCL_SRC_DIR TCL_LIB_FILE TCL_TCLSH db_seq_decl LTLIBOBJS' ac_subst_files='' # Initialize some variables set by options. @@ -824,7 +825,7 @@ done # Be sure to have absolute paths. for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \ - localstatedir libdir includedir oldincludedir infodir mandir + localstatedir libdir includedir oldincludedir infodir mandir do eval ac_val=$`echo $ac_var` case $ac_val in @@ -864,10 +865,10 @@ if test -z "$srcdir"; then # Try the directory containing this script, then its parent. ac_confdir=`(dirname "$0") 2>/dev/null || $as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$0" : 'X\(//\)[^/]' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| \ - . : '\(.\)' 2>/dev/null || + X"$0" : 'X\(//\)[^/]' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || echo X"$0" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } @@ -954,7 +955,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures Berkeley DB 4.2.52 to adapt to many kinds of systems. +\`configure' configures Berkeley DB 4.3.14 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -979,9 +980,9 @@ _ACEOF cat <<_ACEOF Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] + [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] + [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify @@ -1020,7 +1021,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of Berkeley DB 4.2.52:";; + short | recursive ) echo "Configuration of Berkeley DB 4.3.14:";; esac cat <<\_ACEOF @@ -1032,6 +1033,7 @@ Optional Features: --disable-hash Do not build Hash access method. --disable-queue Do not build Queue access method. --disable-replication Do not build database replication support. + --disable-statistics Do not build statistics support. --disable-verify Do not build database verification support. --enable-compat185 Build DB 1.85 compatibility API. --enable-cxx Build C++ API. @@ -1042,13 +1044,14 @@ Optional Features: --enable-dump185 Build db_dump185(1) to dump 1.85 databases. --enable-java Build Java API. --enable-mingw Build Berkeley DB for MinGW. + --enable-o_direct Enable the O_DIRECT flag for direct I/O. --enable-posixmutexes Force use of POSIX standard mutexes. --enable-rpc Build RPC client/server. --enable-smallbuild Build small footprint version of the library. --enable-tcl Build Tcl API. --enable-test Configure to run the test suite. --enable-uimutexes Force use of Unix International mutexes. - --enable-umrw Mask harmless unitialized memory read/writes. + --enable-umrw Mask harmless uninitialized memory read/writes. --enable-shared[=PKGS] build shared libraries [default=yes] --enable-static[=PKGS] @@ -1064,7 +1067,6 @@ Optional Packages: --with-mutex=MUTEX Selection of non-standard mutexes. --with-mutexalign=ALIGNMENT Selection of non-standard mutex alignment. - --with-rpm=ARCHIVE Path of RPM archive. --with-tcl=DIR Directory location of tclConfig.sh. --with-uniquename=NAME Build a uniquely named library. --with-gnu-ld assume the C compiler uses GNU ld [default=no] @@ -1124,12 +1126,45 @@ case $srcdir in ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_builddir$srcdir ;; esac -# Don't blindly perform a `cd "$ac_dir"/$ac_foo && pwd` since $ac_foo can be -# absolute. -ac_abs_builddir=`cd "$ac_dir" && cd $ac_builddir && pwd` -ac_abs_top_builddir=`cd "$ac_dir" && cd ${ac_top_builddir}. && pwd` -ac_abs_srcdir=`cd "$ac_dir" && cd $ac_srcdir && pwd` -ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd` + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac cd $ac_dir # Check for guested configure; otherwise get Cygnus style configure. @@ -1140,7 +1175,7 @@ ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd` echo $SHELL $ac_srcdir/configure --help=recursive elif test -f $ac_srcdir/configure.ac || - test -f $ac_srcdir/configure.in; then + test -f $ac_srcdir/configure.in; then echo $ac_configure --help else @@ -1153,11 +1188,10 @@ fi test -n "$ac_init_help" && exit 0 if $ac_init_version; then cat <<\_ACEOF -Berkeley DB configure 4.2.52 -generated by GNU Autoconf 2.57 +Berkeley DB configure 4.3.14 +generated by GNU Autoconf 2.59 -Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002 -Free Software Foundation, Inc. +Copyright (C) 2003 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF @@ -1168,8 +1202,8 @@ cat >&5 <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by Berkeley DB $as_me 4.2.52, which was -generated by GNU Autoconf 2.57. Invocation command line was +It was created by Berkeley DB $as_me 4.3.14, which was +generated by GNU Autoconf 2.59. Invocation command line was $ $0 $@ @@ -1246,19 +1280,19 @@ do 2) ac_configure_args1="$ac_configure_args1 '$ac_arg'" if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. + ac_must_keep_next=false # Got value, back to normal. else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac fi ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'" # Get rid of the leading space. @@ -1292,12 +1326,12 @@ _ASBOX case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in *ac_space=\ *) sed -n \ - "s/'"'"'/'"'"'\\\\'"'"''"'"'/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p" + "s/'"'"'/'"'"'\\\\'"'"''"'"'/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p" ;; *) sed -n \ - "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" ;; esac; } @@ -1326,7 +1360,7 @@ _ASBOX for ac_var in $ac_subst_files do eval ac_val=$`echo $ac_var` - echo "$ac_var='"'"'$ac_val'"'"'" + echo "$ac_var='"'"'$ac_val'"'"'" done | sort echo fi @@ -1345,7 +1379,7 @@ _ASBOX echo "$as_me: caught signal $ac_signal" echo "$as_me: exit $exit_status" } >&5 - rm -f core core.* *.core && + rm -f core *.core && rm -rf conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 @@ -1425,7 +1459,7 @@ fi # value. ac_cache_corrupted=false for ac_var in `(set) 2>&1 | - sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do + sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val="\$ac_cv_env_${ac_var}_value" @@ -1442,13 +1476,13 @@ echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then - { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 + { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 + { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 echo "$as_me: former value: $ac_old_val" >&2;} - { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 + { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 echo "$as_me: current value: $ac_new_val" >&2;} - ac_cache_corrupted=: + ac_cache_corrupted=: fi;; esac # Pass precious variables to config.status. @@ -1662,13 +1696,6 @@ echo "${ECHO_T}no" >&6 - - -# RPM needs the current absolute path. -# RPM needs the list of original arguments, but we don't include the RPM -# option itself. -CONFIGURATION_PATH=${PWD-`pwd`} -CONFIGURATION_ARGS=`echo "$*" | sed -e 's/--with-rpm[^ ]*//'` # Set the default installation location. @@ -1677,11 +1704,11 @@ CONFIGURATION_ARGS=`echo "$*" | sed -e 's/--with-rpm[^ ]*//'` DB_VERSION_MAJOR="4" -DB_VERSION_MINOR="2" +DB_VERSION_MINOR="3" -DB_VERSION_PATCH="52" +DB_VERSION_PATCH="14" -DB_VERSION_STRING='"Sleepycat Software: Berkeley DB 4.2.52: (December 3, 2003)"' +DB_VERSION_STRING='"Sleepycat Software: Berkeley DB 4.3.14: (October 14, 2004)"' # Process all options before using them. @@ -1765,6 +1792,23 @@ yes) echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6;; esac +echo "$as_me:$LINENO: checking if --disable-statistics option specified" >&5 +echo $ECHO_N "checking if --disable-statistics option specified... $ECHO_C" >&6 +# Check whether --enable-statistics or --disable-statistics was given. +if test "${enable_statistics+set}" = set; then + enableval="$enable_statistics" + +else + enableval="yes" +fi; +db_cv_build_statistics="$enableval" +case "$enableval" in + no) echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6;; +yes) echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6;; +esac + echo "$as_me:$LINENO: checking if --disable-verify option specified" >&5 echo $ECHO_N "checking if --disable-verify option specified... $ECHO_C" >&6 # Check whether --enable-verify or --disable-verify was given. @@ -1906,6 +1950,18 @@ fi; echo "$as_me:$LINENO: result: $db_cv_mingw" >&5 echo "${ECHO_T}$db_cv_mingw" >&6 +echo "$as_me:$LINENO: checking if --enable-o_direct option specified" >&5 +echo $ECHO_N "checking if --enable-o_direct option specified... $ECHO_C" >&6 +# Check whether --enable-o_direct or --disable-o_direct was given. +if test "${enable_o_direct+set}" = set; then + enableval="$enable_o_direct" + db_cv_o_direct="$enable_o_direct" +else + db_cv_o_direct="no" +fi; +echo "$as_me:$LINENO: result: $db_cv_o_direct" >&5 +echo "${ECHO_T}$db_cv_o_direct" >&6 + echo "$as_me:$LINENO: checking if --enable-posixmutexes option specified" >&5 echo $ECHO_N "checking if --enable-posixmutexes option specified... $ECHO_C" >&6 # Check whether --enable-posixmutexes or --disable-posixmutexes was given. @@ -1944,6 +2000,7 @@ if test "$db_cv_smallbuild" = "yes"; then db_cv_build_hash="no" db_cv_build_queue="no" db_cv_build_replication="no" + db_cv_build_statistics="no" db_cv_build_verify="no" fi echo "$as_me:$LINENO: result: $db_cv_smallbuild" >&5 @@ -2044,31 +2101,6 @@ fi echo "$as_me:$LINENO: result: $with_mutexalign" >&5 echo "${ECHO_T}$with_mutexalign" >&6 -echo "$as_me:$LINENO: checking if --with-rpm=ARCHIVE option specified" >&5 -echo $ECHO_N "checking if --with-rpm=ARCHIVE option specified... $ECHO_C" >&6 - -# Check whether --with-rpm or --without-rpm was given. -if test "${with_rpm+set}" = set; then - withval="$with_rpm" - with_rpm="$withval" -else - with_rpm="no" -fi; -if test "$with_rpm" = "no"; then - db_cv_rpm="no" - db_cv_path_rpm_archive="Not-configured-with-rpm=ARCHIVE" -else - if test "$with_rpm" = "yes"; then - { { echo "$as_me:$LINENO: error: --with-rpm requires an archive path argument" >&5 -echo "$as_me: error: --with-rpm requires an archive path argument" >&2;} - { (exit 1); exit 1; }; } - fi - db_cv_rpm="yes" - db_cv_path_rpm_archive="$with_rpm" -fi -echo "$as_me:$LINENO: result: $with_rpm" >&5 -echo "${ECHO_T}$with_rpm" >&6 - echo "$as_me:$LINENO: checking if --with-tcl=DIR option specified" >&5 echo $ECHO_N "checking if --with-tcl=DIR option specified... $ECHO_C" >&6 @@ -2103,7 +2135,7 @@ echo "${ECHO_T}$with_uniquename" >&6 else db_cv_uniquename="yes" if test "$with_uniquename" = "yes"; then - DB_VERSION_UNIQUE_NAME="_4002" + DB_VERSION_UNIQUE_NAME="_4003" else DB_VERSION_UNIQUE_NAME="$with_uniquename" fi @@ -2120,6 +2152,25 @@ echo "$as_me: error: --enable-test requires --enable-tcl" >&2;} fi fi +# Uniquename excludes C++, Java, RPC. +if test "$db_cv_uniquename" = "yes"; then + if test "$db_cv_rpc" = "yes"; then + { { echo "$as_me:$LINENO: error: --with-uniquename is not compatible with --enable-rpc" >&5 +echo "$as_me: error: --with-uniquename is not compatible with --enable-rpc" >&2;} + { (exit 1); exit 1; }; } + fi + if test "$db_cv_cxx" = "yes"; then + { { echo "$as_me:$LINENO: error: --with-uniquename is not compatible with --enable-cxx" >&5 +echo "$as_me: error: --with-uniquename is not compatible with --enable-cxx" >&2;} + { (exit 1); exit 1; }; } + fi + if test "$db_cv_java" = "yes"; then + { { echo "$as_me:$LINENO: error: --with-uniquename is not compatible with --enable-java" >&5 +echo "$as_me: error: --with-uniquename is not compatible with --enable-java" >&2;} + { (exit 1); exit 1; }; } + fi +fi + # Set some #defines based on configuration options. if test "$db_cv_diagnostic" = "yes"; then cat >>confdefs.h <<\_ACEOF @@ -2424,133 +2475,6 @@ echo "$as_me: error: No cp utility found." >&2;} { (exit 1); exit 1; }; } fi -if test "$db_cv_rpm" = "yes"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ldconfig", so it can be a program name with args. -set dummy ${ac_tool_prefix}ldconfig; ac_word=$2 -echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 -if test "${ac_cv_prog_path_ldconfig+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test -n "$path_ldconfig"; then - ac_cv_prog_path_ldconfig="$path_ldconfig" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_path_ldconfig="${ac_tool_prefix}ldconfig" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done -done - -fi -fi -path_ldconfig=$ac_cv_prog_path_ldconfig -if test -n "$path_ldconfig"; then - echo "$as_me:$LINENO: result: $path_ldconfig" >&5 -echo "${ECHO_T}$path_ldconfig" >&6 -else - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 -fi - -fi -if test -z "$ac_cv_prog_path_ldconfig"; then - ac_ct_path_ldconfig=$path_ldconfig - # Extract the first word of "ldconfig", so it can be a program name with args. -set dummy ldconfig; ac_word=$2 -echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 -if test "${ac_cv_prog_ac_ct_path_ldconfig+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test -n "$ac_ct_path_ldconfig"; then - ac_cv_prog_ac_ct_path_ldconfig="$ac_ct_path_ldconfig" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_path_ldconfig="ldconfig" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done -done - - test -z "$ac_cv_prog_ac_ct_path_ldconfig" && ac_cv_prog_ac_ct_path_ldconfig="missing_ldconfig" -fi -fi -ac_ct_path_ldconfig=$ac_cv_prog_ac_ct_path_ldconfig -if test -n "$ac_ct_path_ldconfig"; then - echo "$as_me:$LINENO: result: $ac_ct_path_ldconfig" >&5 -echo "${ECHO_T}$ac_ct_path_ldconfig" >&6 -else - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 -fi - - path_ldconfig=$ac_ct_path_ldconfig -else - path_ldconfig="$ac_cv_prog_path_ldconfig" -fi - - # Extract the first word of "$path_ldconfig", so it can be a program name with args. -set dummy $path_ldconfig; ac_word=$2 -echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 -if test "${ac_cv_path_db_cv_path_ldconfig+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - case $db_cv_path_ldconfig in - [\\/]* | ?:[\\/]*) - ac_cv_path_db_cv_path_ldconfig="$db_cv_path_ldconfig" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_db_cv_path_ldconfig="$as_dir/$ac_word$ac_exec_ext" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done -done - - test -z "$ac_cv_path_db_cv_path_ldconfig" && ac_cv_path_db_cv_path_ldconfig="missing_ldconfig" - ;; -esac -fi -db_cv_path_ldconfig=$ac_cv_path_db_cv_path_ldconfig - -if test -n "$db_cv_path_ldconfig"; then - echo "$as_me:$LINENO: result: $db_cv_path_ldconfig" >&5 -echo "${ECHO_T}$db_cv_path_ldconfig" >&6 -else - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 -fi - - if test "$db_cv_path_ldconfig" != missing_ldconfig; then - RPM_POST_INSTALL="%post -p $db_cv_path_ldconfig" - RPM_POST_UNINSTALL="%postun -p $db_cv_path_ldconfig" - fi -fi - if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ln", so it can be a program name with args. set dummy ${ac_tool_prefix}ln; ac_word=$2 @@ -2932,17 +2856,17 @@ echo "$as_me: error: No rm utility found." >&2;} { (exit 1); exit 1; }; } fi -if test "$db_cv_rpm" = "yes"; then +if test "$db_cv_rpc" = "yes"; then if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}rpm", so it can be a program name with args. -set dummy ${ac_tool_prefix}rpm; ac_word=$2 + # Extract the first word of "${ac_tool_prefix}rpcgen", so it can be a program name with args. +set dummy ${ac_tool_prefix}rpcgen; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 -if test "${ac_cv_prog_db_cv_path_rpm+set}" = set; then +if test "${ac_cv_prog_db_cv_path_rpcgen+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test -n "$db_cv_path_rpm"; then - ac_cv_prog_db_cv_path_rpm="$db_cv_path_rpm" # Let the user override the test. + if test -n "$db_cv_path_rpcgen"; then + ac_cv_prog_db_cv_path_rpcgen="$db_cv_path_rpcgen" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -2951,7 +2875,7 @@ do test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_db_cv_path_rpm="${ac_tool_prefix}rpm" + ac_cv_prog_db_cv_path_rpcgen="${ac_tool_prefix}rpcgen" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi @@ -2960,27 +2884,27 @@ done fi fi -db_cv_path_rpm=$ac_cv_prog_db_cv_path_rpm -if test -n "$db_cv_path_rpm"; then - echo "$as_me:$LINENO: result: $db_cv_path_rpm" >&5 -echo "${ECHO_T}$db_cv_path_rpm" >&6 +db_cv_path_rpcgen=$ac_cv_prog_db_cv_path_rpcgen +if test -n "$db_cv_path_rpcgen"; then + echo "$as_me:$LINENO: result: $db_cv_path_rpcgen" >&5 +echo "${ECHO_T}$db_cv_path_rpcgen" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi fi -if test -z "$ac_cv_prog_db_cv_path_rpm"; then - ac_ct_db_cv_path_rpm=$db_cv_path_rpm - # Extract the first word of "rpm", so it can be a program name with args. -set dummy rpm; ac_word=$2 +if test -z "$ac_cv_prog_db_cv_path_rpcgen"; then + ac_ct_db_cv_path_rpcgen=$db_cv_path_rpcgen + # Extract the first word of "rpcgen", so it can be a program name with args. +set dummy rpcgen; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 -if test "${ac_cv_prog_ac_ct_db_cv_path_rpm+set}" = set; then +if test "${ac_cv_prog_ac_ct_db_cv_path_rpcgen+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test -n "$ac_ct_db_cv_path_rpm"; then - ac_cv_prog_ac_ct_db_cv_path_rpm="$ac_ct_db_cv_path_rpm" # Let the user override the test. + if test -n "$ac_ct_db_cv_path_rpcgen"; then + ac_cv_prog_ac_ct_db_cv_path_rpcgen="$ac_ct_db_cv_path_rpcgen" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -2989,33 +2913,33 @@ do test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_db_cv_path_rpm="rpm" + ac_cv_prog_ac_ct_db_cv_path_rpcgen="rpcgen" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done - test -z "$ac_cv_prog_ac_ct_db_cv_path_rpm" && ac_cv_prog_ac_ct_db_cv_path_rpm="missing_rpm" + test -z "$ac_cv_prog_ac_ct_db_cv_path_rpcgen" && ac_cv_prog_ac_ct_db_cv_path_rpcgen="missing_rpcgen" fi fi -ac_ct_db_cv_path_rpm=$ac_cv_prog_ac_ct_db_cv_path_rpm -if test -n "$ac_ct_db_cv_path_rpm"; then - echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_rpm" >&5 -echo "${ECHO_T}$ac_ct_db_cv_path_rpm" >&6 +ac_ct_db_cv_path_rpcgen=$ac_cv_prog_ac_ct_db_cv_path_rpcgen +if test -n "$ac_ct_db_cv_path_rpcgen"; then + echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_rpcgen" >&5 +echo "${ECHO_T}$ac_ct_db_cv_path_rpcgen" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi - db_cv_path_rpm=$ac_ct_db_cv_path_rpm + db_cv_path_rpcgen=$ac_ct_db_cv_path_rpcgen else - db_cv_path_rpm="$ac_cv_prog_db_cv_path_rpm" + db_cv_path_rpcgen="$ac_cv_prog_db_cv_path_rpcgen" fi - if test "$db_cv_path_rpm" = missing_rpm; then - { { echo "$as_me:$LINENO: error: No rpm utility found." >&5 -echo "$as_me: error: No rpm utility found." >&2;} + if test "$db_cv_path_rpcgen" = missing_rpcgen; then + { { echo "$as_me:$LINENO: error: No rpcgen utility found." >&5 +echo "$as_me: error: No rpcgen utility found." >&2;} { (exit 1); exit 1; }; } fi fi @@ -3374,6 +3298,7 @@ fi # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6 @@ -3390,6 +3315,7 @@ do case $as_dir/ in ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. @@ -3397,20 +3323,20 @@ case $as_dir/ in # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do - if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then - if test $ac_prog = install && - grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # AIX install. It has an incompatible calling convention. - : - elif test $ac_prog = install && - grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # program-specific install script used by HP pwplus--don't use. - : - else - ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" - break 3 - fi - fi + if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi done done ;; @@ -3441,31 +3367,12 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' -# RPM support: change the standard make and install targets -if test "$db_cv_rpm" = "yes"; then - BUILD_TARGET="rpm_build" - - # Check if we are running RPM version 3 or 4. - case "`rpm --version`" in - *version\ 4*) - RPM_BUILD="rpmbuild" - echo "_topdir $CONFIGURATION_PATH" > rpm-macro-defines;; - *version\ 3*) - RPM_BUILD="rpm" - echo "topdir: $CONFIGURATION_PATH" > rpm-macro-defines;; - esac - INSTALL_TARGET="rpm_install" -else - BUILD_TARGET="library_build" - INSTALL_TARGET="library_install" -fi +BUILD_TARGET="library_build" +INSTALL_TARGET="library_install" # This is where we handle stuff that autoconf can't handle: compiler, # preprocessor and load flags, libraries that the standard tests don't -# look for. The default optimization is -O. We would like to set the -# default optimization for systems using gcc to -O2, but we can't. By -# the time we know we're using gcc, it's too late to set optimization -# flags. +# look for. # # There are additional libraries we need for some compiler/architecture # combinations. @@ -3476,7 +3383,6 @@ fi # The makefile CC may be different than the CC used in config testing, # because the makefile CC may be set to use $(LIBTOOL). # -# XXX # Don't override anything if it's already set from the environment. optimize_def="-O" case "$host_os" in @@ -3485,19 +3391,14 @@ aix4.3.*|aix5*) CC=${CC-"xlc_r"} CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE" LDFLAGS="$LDFLAGS -Wl,-brtl";; -bsdi3*) optimize_def="-O2" - CC=${CC-"shlicc2"} +bsdi3*) CC=${CC-"shlicc2"} LIBS="$LIBS -lipc";; -bsdi*) optimize_def="-O2";; cygwin*) - optimize_def="-O2" CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";; freebsd*) - optimize_def="-O2" CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE" LDFLAGS="$LDFLAGS -pthread";; gnu*|k*bsd*-gnu|linux*) - optimize_def="-O2" CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";; hpux*) CPPFLAGS="$CPPFLAGS -D_REENTRANT";; irix*) optimize_def="-O2" @@ -3664,7 +3565,6 @@ ac_compiler=`set X $ac_compile; echo $2` (exit $ac_status); } cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -3684,8 +3584,8 @@ ac_clean_files="$ac_clean_files a.out a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. -echo "$as_me:$LINENO: checking for C compiler default output" >&5 -echo $ECHO_N "checking for C compiler default output... $ECHO_C" >&6 +echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6 ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5 (eval $ac_link_default) 2>&5 @@ -3705,23 +3605,23 @@ do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) - ;; + ;; conftest.$ac_ext ) - # This is the source file. - ;; + # This is the source file. + ;; [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; + # We found the default executable, but exeext='' is most + # certainly right. + break;; *.* ) - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - # FIXME: I believe we export ac_cv_exeext for Libtool, - # but it would be cool to find out if it's true. Does anybody - # maintain Libtool? --akim. - export ac_cv_exeext - break;; + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + # FIXME: I believe we export ac_cv_exeext for Libtool, + # but it would be cool to find out if it's true. Does anybody + # maintain Libtool? --akim. + export ac_cv_exeext + break;; * ) - break;; + break;; esac done else @@ -3795,8 +3695,8 @@ for ac_file in conftest.exe conftest conftest.*; do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - export ac_cv_exeext - break;; + export ac_cv_exeext + break;; * ) break;; esac done @@ -3821,7 +3721,6 @@ if test "${ac_cv_objext+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -3872,7 +3771,6 @@ if test "${ac_cv_c_compiler_gnu+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -3892,11 +3790,21 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -3909,7 +3817,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi @@ -3925,7 +3833,6 @@ if test "${ac_cv_prog_cc_g+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -3942,11 +3849,21 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -3959,7 +3876,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_prog_cc_g=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 echo "${ECHO_T}$ac_cv_prog_cc_g" >&6 @@ -3986,7 +3903,6 @@ else ac_cv_prog_cc_stdc=no ac_save_CC=$CC cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -4014,6 +3930,16 @@ static char *f (char * (*g) (char **, int), char **p, ...) va_end (v); return s; } + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std1 is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std1. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; @@ -4040,11 +3966,21 @@ do CC="$ac_save_CC $ac_arg" rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -4057,7 +3993,7 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext +rm -f conftest.err conftest.$ac_objext done rm -f conftest.$ac_ext conftest.$ac_objext CC=$ac_save_CC @@ -4085,11 +4021,21 @@ cat >conftest.$ac_ext <<_ACEOF _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -4104,7 +4050,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 'void exit (int);' do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -4122,11 +4067,21 @@ exit (42); _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -4139,9 +4094,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 continue fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -4158,11 +4112,21 @@ exit (42); _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -4174,7 +4138,7 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done rm -f conftest* if test -n "$ac_declaration"; then @@ -4188,7 +4152,7 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' @@ -4196,10 +4160,19 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $ ac_compiler_gnu=$ac_cv_c_compiler_gnu -# Checks for compiler characteristics. - - +# Set specific per-compiler flags. +if test "$GCC" = "yes"; then + # We want -O2 if we're using gcc. + CFLAGS="$CFLAGS " + CFLAGS=`echo "$CFLAGS" | sed 's/-O /-O2 /g'` +else + case "$host_os" in + hpux11*) + CPPFLAGS="$CPPFLAGS -mt";; + esac +fi +# Checks for compiler characteristics. DB_PROTO1="#undef __P" # AC_PROG_CC_STDC only sets ac_cv_prog_cc_stdc if the test fails, so @@ -4217,7 +4190,6 @@ if test "${ac_cv_c_const+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -4280,11 +4252,21 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -4297,7 +4279,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_c_const=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5 echo "${ECHO_T}$ac_cv_c_const" >&6 @@ -4850,7 +4832,6 @@ if test "${ac_cv_cxx_compiler_gnu+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -4870,11 +4851,21 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -4887,7 +4878,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi @@ -4903,7 +4894,6 @@ if test "${ac_cv_prog_cxx_g+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -4920,11 +4910,21 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -4937,7 +4937,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_prog_cxx_g=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6 @@ -4965,7 +4965,6 @@ for ac_declaration in \ 'void exit (int);' do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -4983,11 +4982,21 @@ exit (42); _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -5000,9 +5009,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 continue fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -5019,23 +5027,33 @@ exit (42); _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - break -else + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done rm -f conftest* if test -n "$ac_declaration"; then @@ -5075,7 +5093,6 @@ do # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -5086,7 +5103,7 @@ cat >>conftest.$ac_ext <<_ACEOF #else # include #endif - Syntax error + Syntax error _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 @@ -5098,6 +5115,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag else ac_cpp_err= fi @@ -5118,7 +5136,6 @@ rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether non-existent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -5136,6 +5153,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag else ac_cpp_err= fi @@ -5182,7 +5200,6 @@ do # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -5193,7 +5210,7 @@ cat >>conftest.$ac_ext <<_ACEOF #else # include #endif - Syntax error + Syntax error _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 @@ -5205,6 +5222,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag else ac_cpp_err= fi @@ -5225,7 +5243,6 @@ rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether non-existent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -5243,6 +5260,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag else ac_cpp_err= fi @@ -5299,7 +5317,6 @@ ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ex ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -5317,11 +5334,21 @@ std::ostream *o; return 0; _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -5334,7 +5361,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 db_cv_cxx_have_stdheaders=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' @@ -5542,10 +5569,10 @@ for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do fi done done -SED=$lt_cv_path_SED fi +SED=$lt_cv_path_SED echo "$as_me:$LINENO: result: $SED" >&5 echo "${ECHO_T}$SED" >&6 @@ -5588,7 +5615,7 @@ echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6 # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' - # Canonicalize the path of ld + # Canonicalize the pathname of ld ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` @@ -5657,7 +5684,7 @@ if test "${lt_cv_prog_gnu_ld+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else # I'd rather use --version here, but apparently some GNU ld's only accept -v. -case `"$LD" -v 2>&1 &1 &5 echo $ECHO_N "checking for BSD-compatible nm... $ECHO_C" >&6 @@ -5772,34 +5808,30 @@ beos*) lt_cv_deplibs_check_method=pass_all ;; -bsdi4*) +bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; -cygwin* | mingw* | pw32*) - # win32_libid is a shell function defined in ltmain.sh +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='win32_libid' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump'. + lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) - # this will be overwritten by pass_all, but leave it in just in case - lt_cv_deplibs_check_method='file_magic Mach-O dynamically linked shared library' - lt_cv_file_magic_cmd='/usr/bin/file -L' - case "$host_os" in - rhapsody* | darwin1.[012]) - lt_cv_file_magic_test_file=`/System/Library/Frameworks/System.framework/System` - ;; - *) # Darwin 1.3 on - lt_cv_file_magic_test_file='/usr/lib/libSystem.dylib' - ;; - esac lt_cv_deplibs_check_method=pass_all ;; -freebsd*) +freebsd* | kfreebsd*-gnu) if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then case $host_cpu in i*86 ) @@ -5838,36 +5870,27 @@ hpux10.20* | hpux11*) ;; irix5* | irix6* | nonstopux*) - case $host_os in - irix5* | nonstopux*) - # this will be overridden with pass_all, but let us keep it just in case - lt_cv_deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1" - ;; - *) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - # this will be overridden with pass_all, but let us keep it just in case - lt_cv_deplibs_check_method="file_magic ELF ${libmagic} MSB mips-[1234] dynamic lib MIPS - version 1" - ;; + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; esac - lt_cv_file_magic_test_file=`echo /lib${libsuff}/libc.so*` lt_cv_deplibs_check_method=pass_all ;; # This must be Linux ELF. linux*) case $host_cpu in - alpha* | hppa* | i*86 | ia64* | m68* | mips | mipsel | powerpc* | sparc* | s390* | sh*) + alpha*|hppa*|i*86|ia64*|m68*|mips*|powerpc*|sparc*|s390*|sh*) lt_cv_deplibs_check_method=pass_all ;; *) # glibc up to 2.1.1 does not perform some relocations on ARM + # this will be overridden with pass_all, but let us keep it just in case lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; esac lt_cv_file_magic_test_file=`echo /lib/libc.so* /lib/libc-*.so` + lt_cv_deplibs_check_method=pass_all ;; netbsd*) @@ -5884,24 +5907,19 @@ newos6*) lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; -nto-qnx | nto-qnx6*) +nto-qnx*) lt_cv_deplibs_check_method=unknown ;; openbsd*) - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB shared object' + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else - lt_cv_deplibs_check_method='file_magic OpenBSD.* shared library' + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) - # this will be overridden with pass_all, but let us keep it just in case - lt_cv_deplibs_check_method='file_magic COFF format alpha shared library' - lt_cv_file_magic_test_file=/shlib/libc.so lt_cv_deplibs_check_method=pass_all ;; @@ -5911,7 +5929,6 @@ sco3.2v5*) solaris*) lt_cv_deplibs_check_method=pass_all - lt_cv_file_magic_test_file=/lib/libc.so ;; sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) @@ -5990,7 +6007,7 @@ ia64-*-hpux*) ;; *-*-irix6*) # Find out which ABI we are using. - echo '#line 5993 "configure"' > conftest.$ac_ext + echo '#line 6010 "configure"' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? @@ -6039,7 +6056,7 @@ x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; - ppc64-*linux*) + ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) @@ -6087,7 +6104,6 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $ ac_compiler_gnu=$ac_cv_c_compiler_gnu cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6104,11 +6120,21 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -6121,7 +6147,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 lt_cv_cc_needs_belf=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' @@ -6170,7 +6197,6 @@ do # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6181,7 +6207,7 @@ cat >>conftest.$ac_ext <<_ACEOF #else # include #endif - Syntax error + Syntax error _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 @@ -6193,6 +6219,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi @@ -6213,7 +6240,6 @@ rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether non-existent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6231,6 +6257,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi @@ -6277,7 +6304,6 @@ do # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6288,7 +6314,7 @@ cat >>conftest.$ac_ext <<_ACEOF #else # include #endif - Syntax error + Syntax error _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 @@ -6300,6 +6326,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi @@ -6320,7 +6347,6 @@ rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether non-existent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6338,6 +6364,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi @@ -6383,7 +6410,6 @@ if test "${ac_cv_header_stdc+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6404,11 +6430,21 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -6421,12 +6457,11 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_stdc=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6448,7 +6483,6 @@ fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6473,7 +6507,6 @@ if test $ac_cv_header_stdc = yes; then : else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6485,9 +6518,9 @@ cat >>conftest.$ac_ext <<_ACEOF # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif @@ -6498,7 +6531,7 @@ main () int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) + || toupper (i) != TOUPPER (i)) exit(2); exit (0); } @@ -6523,7 +6556,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_header_stdc=no fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi fi @@ -6548,7 +6581,7 @@ fi for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h + inttypes.h stdint.h unistd.h do as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` echo "$as_me:$LINENO: checking for $ac_header" >&5 @@ -6557,7 +6590,6 @@ if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6569,11 +6601,21 @@ $ac_includes_default _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -6586,7 +6628,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 @@ -6617,7 +6659,6 @@ else echo "$as_me:$LINENO: checking $ac_header usability" >&5 echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6628,11 +6669,21 @@ $ac_includes_default _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -6645,7 +6696,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 echo "${ECHO_T}$ac_header_compiler" >&6 @@ -6653,7 +6704,6 @@ echo "${ECHO_T}$ac_header_compiler" >&6 echo "$as_me:$LINENO: checking $ac_header presence" >&5 echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -6671,6 +6721,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi @@ -6690,32 +6741,31 @@ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 echo "${ECHO_T}$ac_header_preproc" >&6 # So? What about this header? -case $ac_header_compiler:$ac_header_preproc in - yes:no ) +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} - ( - cat <<\_ASBOX -## ------------------------------------ ## -## Report this to bug-autoconf@gnu.org. ## -## ------------------------------------ ## -_ASBOX - ) | - sed "s/^/$as_me: WARNING: /" >&2 + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes ;; - no:yes ) + no:yes:* ) { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 -echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## ------------------------------------ ## -## Report this to bug-autoconf@gnu.org. ## +## Report this to support@sleepycat.com ## ## ------------------------------------ ## _ASBOX ) | @@ -6727,7 +6777,7 @@ echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - eval "$as_ac_Header=$ac_header_preproc" + eval "$as_ac_Header=\$ac_header_preproc" fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 @@ -6747,7 +6797,7 @@ ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_f77_compiler_gnu if test -n "$ac_tool_prefix"; then - for ac_prog in g77 f77 xlf frt pgf77 fl32 af77 fort77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 lf95 g95 + for ac_prog in g77 f77 xlf frt pgf77 fort77 fl32 af77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 ifc efc pgf95 lf95 gfortran do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 @@ -6789,7 +6839,7 @@ fi fi if test -z "$F77"; then ac_ct_F77=$F77 - for ac_prog in g77 f77 xlf frt pgf77 fl32 af77 fort77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 lf95 g95 + for ac_prog in g77 f77 xlf frt pgf77 fort77 fl32 af77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 ifc efc pgf95 lf95 gfortran do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 @@ -6834,7 +6884,7 @@ fi # Provide some information about the compiler. -echo "$as_me:6837:" \ +echo "$as_me:6887:" \ "checking for Fortran 77 compiler version" >&5 ac_compiler=`set X $ac_compile; echo $2` { (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 @@ -6852,9 +6902,10 @@ ac_compiler=`set X $ac_compile; echo $2` ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } +rm -f a.out # If we don't use `.F' as extension, the preprocessor is not run on the -# input file. +# input file. (Note that this only needs to work for GNU compilers.) ac_save_ext=$ac_ext ac_ext=F echo "$as_me:$LINENO: checking whether we are using the GNU Fortran 77 compiler" >&5 @@ -6872,11 +6923,21 @@ else _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -6889,14 +6950,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_f77_compiler_gnu=$ac_compiler_gnu fi echo "$as_me:$LINENO: result: $ac_cv_f77_compiler_gnu" >&5 echo "${ECHO_T}$ac_cv_f77_compiler_gnu" >&6 ac_ext=$ac_save_ext -G77=`test $ac_compiler_gnu = yes && echo yes` ac_test_FFLAGS=${FFLAGS+set} ac_save_FFLAGS=$FFLAGS FFLAGS= @@ -6913,11 +6973,21 @@ cat >conftest.$ac_ext <<_ACEOF _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -6930,7 +7000,7 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_prog_f77_g=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_prog_f77_g" >&5 @@ -6938,18 +7008,20 @@ echo "${ECHO_T}$ac_cv_prog_f77_g" >&6 if test "$ac_test_FFLAGS" = set; then FFLAGS=$ac_save_FFLAGS elif test $ac_cv_prog_f77_g = yes; then - if test "$G77" = yes; then + if test "x$ac_cv_f77_compiler_gnu" = xyes; then FFLAGS="-g -O2" else FFLAGS="-g" fi else - if test "$G77" = yes; then + if test "x$ac_cv_f77_compiler_gnu" = xyes; then FFLAGS="-O2" else FFLAGS= fi fi + +G77=`test $ac_compiler_gnu = yes && echo yes` ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' @@ -6967,7 +7039,7 @@ if test "${lt_cv_sys_max_cmd_len+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else i=0 - testring="ABCD" + teststring="ABCD" case $build_os in msdosdjgpp*) @@ -6996,20 +7068,40 @@ else lt_cv_sys_max_cmd_len=8192; ;; + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* ) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for *BSD + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + ;; + *) # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. - while (test "X"`$CONFIG_SHELL $0 --fallback-echo "X$testring" 2>/dev/null` \ - = "XX$testring") >/dev/null 2>&1 && - new_result=`expr "X$testring" : ".*" 2>&1` && + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL $0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && lt_cv_sys_max_cmd_len=$new_result && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` - testring=$testring$testring + teststring=$teststring$teststring done - testring= + teststring= # Add a significant safety factor because C++ compilers can tack on massive # amounts of additional arguments before passing them to the linker. # It appears as though 1/2 is a usable value. @@ -7077,7 +7169,7 @@ osf*) symcode='[BCDEGQRST]' ;; solaris* | sysv5*) - symcode='[BDT]' + symcode='[BDRT]' ;; sysv4) symcode='[DFNSTU]' @@ -7095,7 +7187,7 @@ esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) - symcode='[ABCDGISTW]' ;; + symcode='[ABCDGIRSTW]' ;; esac # Try without a prefix undercore, then with it. @@ -7569,6 +7661,8 @@ if test -n "$RANLIB"; then old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi +cc_basename=`$echo X"$compiler" | $Xsed -e 's%^.*/%%'` + # Only perform the check for file, if the check method requires it case $deplibs_check_method in file_magic*) @@ -7825,7 +7919,8 @@ lt_prog_compiler_no_builtin_flag= if test "$GCC" = yes; then lt_prog_compiler_no_builtin_flag=' -fno-builtin' - echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 + +echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6 if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 @@ -7843,11 +7938,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:7846: $lt_compile\"" >&5) + (eval echo "\"\$as_me:7941: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:7850: \$? = $ac_status" >&5 + echo "$as_me:7945: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings @@ -7954,6 +8049,16 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case "$cc_basename" in + xlc*) + lt_prog_compiler_pic='-qnocommon' + lt_prog_compiler_wl='-Wl,' + ;; + esac + ;; mingw* | pw32* | os2*) # This hack is so that the source file can tell whether it is being @@ -7990,12 +8095,12 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 linux*) case $CC in - icc|ecc) + icc* | ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; - ccc) + ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' @@ -8057,7 +8162,8 @@ echo "${ECHO_T}$lt_prog_compiler_pic" >&6 # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then - echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic works... $ECHO_C" >&6 if test "${lt_prog_compiler_pic_works+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 @@ -8075,11 +8181,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:8078: $lt_compile\"" >&5) + (eval echo "\"\$as_me:8184: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:8082: \$? = $ac_status" >&5 + echo "$as_me:8188: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings @@ -8126,13 +8232,6 @@ else mkdir out printf "$lt_simple_compile_test_code" > conftest.$ac_ext - # According to Tom Tromey, Ian Lance Taylor reported there are C compilers - # that will create temporary files in the current directory regardless of - # the output directory. Thus, making CWD read-only will cause this test - # to fail, enabling locking or at least warning the user not to do parallel - # builds. - chmod -w . - lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. @@ -8142,11 +8241,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:8145: $lt_compile\"" >&5) + (eval echo "\"\$as_me:8244: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:8149: \$? = $ac_status" >&5 + echo "$as_me:8248: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -8156,8 +8255,11 @@ else fi fi chmod u+w . - $rm conftest* out/* - rmdir out + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out cd .. rmdir conftest $rm conftest* @@ -8354,6 +8456,31 @@ EOF hardcode_shlibpath_var=no ;; + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_cmds="$tmp_archive_cmds" + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ 01.* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + archive_expsym_cmds="$tmp_archive_cmds" + fi + else + ld_shlibs=no + fi + ;; + *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' @@ -8482,7 +8609,6 @@ EOF allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an empty executable. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -8499,11 +8625,21 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -8520,7 +8656,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -8533,7 +8670,6 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi else # Determine the default libpath from the value encoded in an empty executable. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -8550,11 +8686,21 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -8571,7 +8717,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -8598,7 +8745,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ld_shlibs=no ;; - bsdi4*) + bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; @@ -8612,7 +8759,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. - shrext=".dll" + shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. @@ -8624,43 +8771,52 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ;; darwin* | rhapsody*) - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - archive_cmds_need_lc=no case "$host_os" in - rhapsody* | darwin1.[012]) - allow_undefined_flag='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && allow_undefined_flag='-flat_namespace -undefined suppress' - ;; + rhapsody* | darwin1.[012]) + allow_undefined_flag='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; esac - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. Also zsh mangles - # `"' quotes if we put them in here... so don't! - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | grep 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_cmds='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - archive_cmds='$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - module_cmds='$CC -bundle ${wl}-bind_at_load $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported - whole_archive_flag_spec='-all_load $convenience' + whole_archive_flag_spec='' link_all_deplibs=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case "$cc_basename" in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs=no + ;; + esac fi ;; @@ -8694,7 +8850,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd*) + freebsd* | kfreebsd*-gnu) archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes @@ -8805,6 +8961,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_shlibpath_var=no if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' else @@ -9054,78 +9211,12 @@ echo "${ECHO_T}$archive_cmds_need_lc" >&6 ;; esac -echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 -hardcode_action= -if test -n "$hardcode_libdir_flag_spec" || \ - test -n "$runpath_var " || \ - test "X$hardcode_automatic"="Xyes" ; then - - # We can hardcode non-existant directories. - if test "$hardcode_direct" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, )" != no && - test "$hardcode_minus_L" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action=unsupported -fi -echo "$as_me:$LINENO: result: $hardcode_action" >&5 -echo "${ECHO_T}$hardcode_action" >&6 - -if test "$hardcode_action" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi - -striplib= -old_striplib= -echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 -echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 -if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 - else - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 -fi - ;; - *) - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 - ;; - esac -fi - echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 library_names_spec= libname_spec='lib$name' soname_spec= -shrext=".so" +shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= @@ -9213,7 +9304,7 @@ aix4* | aix5*) amigaos*) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done' + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; beos*) @@ -9222,7 +9313,7 @@ beos*) shlibpath_var=LIBRARY_PATH ;; -bsdi4*) +bsdi[45]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' @@ -9238,7 +9329,7 @@ bsdi4*) cygwin* | mingw* | pw32*) version_type=windows - shrext=".dll" + shrext_cmds=".dll" need_version=no need_lib_prefix=no @@ -9260,7 +9351,7 @@ cygwin* | mingw* | pw32*) cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/lib /lib/w32api /usr/lib /usr/local/lib" + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw*) # MinGW DLLs use traditional 'lib' prefix @@ -9299,17 +9390,16 @@ darwin* | rhapsody*) version_type=darwin need_lib_prefix=no need_version=no - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH - shrext='$(test .$module = .yes && echo .so || echo .dylib)' + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' fi sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; @@ -9327,6 +9417,18 @@ freebsd1*) dynamic_linker=no ;; +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + freebsd*) objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` version_type=freebsd-$objformat @@ -9375,7 +9477,7 @@ hpux9* | hpux10* | hpux11*) need_version=no case "$host_cpu" in ia64*) - shrext='.so' + shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH @@ -9390,7 +9492,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) - shrext='.sl' + shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH @@ -9401,7 +9503,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) - shrext='.sl' + shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH @@ -9470,6 +9572,12 @@ linux*) # before this can be enabled. hardcode_into_libs=yes + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`$SED -e 's/:,\t/ /g;s/=^=*$//;s/=^= * / /g' /etc/ld.so.conf | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, @@ -9479,6 +9587,18 @@ linux*) dynamic_linker='GNU/Linux ld.so' ;; +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + netbsd*) version_type=sunos need_lib_prefix=no @@ -9488,7 +9608,7 @@ netbsd*) finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi @@ -9504,7 +9624,7 @@ newsos6) shlibpath_overrides_runpath=yes ;; -nto-qnx | nto-qnx6*) +nto-qnx*) version_type=linux need_lib_prefix=no need_version=no @@ -9537,7 +9657,7 @@ openbsd*) os2*) libname_spec='$name' - shrext=".dll" + shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' @@ -9635,15 +9755,81 @@ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 echo "${ECHO_T}$dynamic_linker" >&6 test "$dynamic_linker" = no && can_build_shared=no -if test "x$enable_dlopen" != xyes; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - - case $host_os in +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || \ + test -n "$runpath_var" || \ + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action" >&5 +echo "${ECHO_T}$hardcode_action" >&6 + +if test "$hardcode_action" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= @@ -9670,7 +9856,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -9694,11 +9879,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -9711,7 +9906,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 @@ -9735,21 +9931,28 @@ if test "${ac_cv_func_shl_load+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char shl_load (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef shl_load + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -9780,11 +9983,21 @@ return f != shl_load; _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -9797,7 +10010,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_shl_load=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 echo "${ECHO_T}$ac_cv_func_shl_load" >&6 @@ -9812,7 +10026,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -9836,11 +10049,21 @@ shl_load (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -9853,7 +10076,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_shl_load=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 @@ -9867,21 +10091,28 @@ if test "${ac_cv_func_dlopen+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char dlopen (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef dlopen + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -9912,11 +10143,21 @@ return f != dlopen; _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -9929,7 +10170,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 echo "${ECHO_T}$ac_cv_func_dlopen" >&6 @@ -9944,7 +10186,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -9968,11 +10209,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -9985,7 +10236,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 @@ -10001,7 +10253,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -10025,11 +10276,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -10042,7 +10303,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_svld_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 @@ -10058,7 +10320,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -10082,11 +10343,21 @@ dld_link (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -10099,7 +10370,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_dld_link=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 @@ -10154,7 +10426,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext < conftest.$ac_ext <&1 | grep 'Apple' >/dev/null ; then - archive_cmds_need_lc=no - case "$host_os" in - rhapsody* | darwin1.[012]) - allow_undefined_flag='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && allow_undefined_flag='-flat_namespace -undefined suppress' - ;; - esac - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. Also zsh mangles - # `"' quotes if we put them in here... so don't! - output_verbose_link_cmd='echo' - archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags -install_name $rpath/$soname $verstring' - module_cmds='$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's - archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - hardcode_direct=no - hardcode_automatic=yes - hardcode_shlibpath_var=unsupported - whole_archive_flag_spec='-all_load $convenience' - link_all_deplibs=yes - fi ;; esac echo "$as_me:$LINENO: result: $enable_shared" >&5 @@ -10434,7 +10678,8 @@ if test -f "$ltmain"; then # Now quote all the things that may contain metacharacters while being # careful not to overquote the AC_SUBSTed values. We take copies of the # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM SED SHELL \ + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ libname_spec library_names_spec soname_spec extract_expsyms_cmds \ old_striplib striplib file_magic_cmd finish_cmds finish_eval \ deplibs_check_method reload_flag reload_cmds need_locks \ @@ -10552,7 +10797,7 @@ Xsed="$SED -e s/^X//" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. -if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH # The names of the tagged configurations supported by this script. available_tags= @@ -10612,7 +10857,7 @@ LN_S=$lt_LN_S NM=$lt_NM # A symbol stripping program -STRIP=$STRIP +STRIP=$lt_STRIP # Used to examine libraries when file_magic_cmd begins "file" MAGIC_CMD=$MAGIC_CMD @@ -10643,7 +10888,7 @@ objext="$ac_objext" libext="$libext" # Shared library suffix (normally ".so"). -shrext='$shrext' +shrext_cmds='$shrext_cmds' # Executable file suffix (normally ""). exeext="$exeext" @@ -10885,7 +11130,10 @@ else # If there is no Makefile yet, we rely on a make rule to execute # `config.status --recheck' to rerun these tests and create the # libtool script then. - test -f Makefile && make "$ltmain" + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi fi @@ -11066,7 +11314,7 @@ echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6 # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' - # Canonicalize the path of ld + # Canonicalize the pathname of ld ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` @@ -11135,7 +11383,7 @@ if test "${lt_cv_prog_gnu_ld+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else # I'd rather use --version here, but apparently some GNU ld's only accept -v. -case `"$LD" -v 2>&1 &1 conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -11305,11 +11552,21 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -11326,7 +11583,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -11340,7 +11598,6 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi else # Determine the default libpath from the value encoded in an empty executable. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -11357,11 +11614,21 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -11378,7 +11645,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -11405,6 +11673,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi esac ;; + cygwin* | mingw* | pw32*) # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, # as there is no search path for DLLs. @@ -11428,44 +11697,68 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ld_shlibs_CXX=no fi ;; - - darwin* | rhapsody*) - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - archive_cmds_need_lc_CXX=no - case "$host_os" in - rhapsody* | darwin1.[012]) - allow_undefined_flag_CXX='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && allow_undefined_flag_CXX='-flat_namespace -undefined suppress' - ;; - esac - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | grep 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_cmds_CXX='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - archive_cmds_CXX='$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[012]) + allow_undefined_flag_CXX='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_CXX='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + whole_archive_flag_spec_CXX='' + link_all_deplibs_CXX=yes + + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes fi - module_cmds_CXX='$CC -bundle ${wl}-bind_at_load $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + archive_cmds_CXX='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + else + archive_cmds_CXX='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' else - archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + case "$cc_basename" in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_CXX='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_CXX=no + ;; + esac fi - module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - hardcode_direct_CXX=no - hardcode_automatic_CXX=yes - hardcode_shlibpath_var_CXX=unsupported - whole_archive_flag_spec_CXX='-all_load $convenience' - link_all_deplibs_CXX=yes - fi - ;; + ;; dgux*) case $cc_basename in @@ -11491,7 +11784,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi freebsd-elf*) archive_cmds_need_lc_CXX=no ;; - freebsd*) + freebsd* | kfreebsd*-gnu) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions ld_shlibs_CXX=yes @@ -11522,7 +11815,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | egrep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' ;; *) if test "$GXX" = yes; then @@ -11671,9 +11964,20 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi icpc) # Intel C++ with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac archive_cmds_need_lc_CXX=no - archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' @@ -11730,6 +12034,20 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; + openbsd2*) + # C++ shared libraries are fairly broken + ld_shlibs_CXX=no + ;; + openbsd*) + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='${wl}-E' + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; osf3*) case $cc_basename in KCC) @@ -12189,6 +12507,16 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 ;; esac ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case "$cc_basename" in + xlc*) + lt_prog_compiler_pic_CXX='-qnocommon' + lt_prog_compiler_wl_CXX='-Wl,' + ;; + esac + ;; dgux*) case $cc_basename in ec++) @@ -12202,7 +12530,7 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 ;; esac ;; - freebsd*) + freebsd* | kfreebsd*-gnu) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) @@ -12370,7 +12698,8 @@ echo "${ECHO_T}$lt_prog_compiler_pic_CXX" >&6 # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_CXX"; then - echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... $ECHO_C" >&6 if test "${lt_prog_compiler_pic_works_CXX+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 @@ -12388,11 +12717,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:12391: $lt_compile\"" >&5) + (eval echo "\"\$as_me:12720: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:12395: \$? = $ac_status" >&5 + echo "$as_me:12724: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings @@ -12439,13 +12768,6 @@ else mkdir out printf "$lt_simple_compile_test_code" > conftest.$ac_ext - # According to Tom Tromey, Ian Lance Taylor reported there are C compilers - # that will create temporary files in the current directory regardless of - # the output directory. Thus, making CWD read-only will cause this test - # to fail, enabling locking or at least warning the user not to do parallel - # builds. - chmod -w . - lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. @@ -12455,11 +12777,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:12458: $lt_compile\"" >&5) + (eval echo "\"\$as_me:12780: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:12462: \$? = $ac_status" >&5 + echo "$as_me:12784: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -12469,8 +12791,11 @@ else fi fi chmod u+w . - $rm conftest* out/* - rmdir out + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out cd .. rmdir conftest $rm conftest* @@ -12598,107 +12923,41 @@ echo "${ECHO_T}$archive_cmds_need_lc_CXX" >&6 ;; esac -echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 -hardcode_action_CXX= -if test -n "$hardcode_libdir_flag_spec_CXX" || \ - test -n "$runpath_var CXX" || \ - test "X$hardcode_automatic_CXX"="Xyes" ; then - - # We can hardcode non-existant directories. - if test "$hardcode_direct_CXX" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, CXX)" != no && - test "$hardcode_minus_L_CXX" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action_CXX=relink +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action_CXX=immediate + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action_CXX=unsupported + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi -echo "$as_me:$LINENO: result: $hardcode_action_CXX" >&5 -echo "${ECHO_T}$hardcode_action_CXX" >&6 +need_lib_prefix=unknown +hardcode_into_libs=no -if test "$hardcode_action_CXX" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi - -striplib= -old_striplib= -echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 -echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 -if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 - else - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 -fi - ;; - *) - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 - ;; - esac -fi - -echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 -echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -if test "$GCC" = yes; then - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi -need_lib_prefix=unknown -hardcode_into_libs=no - -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown case $host_os in aix3*) @@ -12757,7 +13016,7 @@ aix4* | aix5*) amigaos*) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done' + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; beos*) @@ -12766,7 +13025,7 @@ beos*) shlibpath_var=LIBRARY_PATH ;; -bsdi4*) +bsdi[45]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' @@ -12782,7 +13041,7 @@ bsdi4*) cygwin* | mingw* | pw32*) version_type=windows - shrext=".dll" + shrext_cmds=".dll" need_version=no need_lib_prefix=no @@ -12804,7 +13063,7 @@ cygwin* | mingw* | pw32*) cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/lib /lib/w32api /usr/lib /usr/local/lib" + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw*) # MinGW DLLs use traditional 'lib' prefix @@ -12843,17 +13102,16 @@ darwin* | rhapsody*) version_type=darwin need_lib_prefix=no need_version=no - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH - shrext='$(test .$module = .yes && echo .so || echo .dylib)' + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' fi sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; @@ -12871,6 +13129,18 @@ freebsd1*) dynamic_linker=no ;; +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + freebsd*) objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` version_type=freebsd-$objformat @@ -12919,7 +13189,7 @@ hpux9* | hpux10* | hpux11*) need_version=no case "$host_cpu" in ia64*) - shrext='.so' + shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH @@ -12934,7 +13204,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) - shrext='.sl' + shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH @@ -12945,7 +13215,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) - shrext='.sl' + shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH @@ -13014,6 +13284,12 @@ linux*) # before this can be enabled. hardcode_into_libs=yes + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`$SED -e 's/:,\t/ /g;s/=^=*$//;s/=^= * / /g' /etc/ld.so.conf | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, @@ -13023,6 +13299,18 @@ linux*) dynamic_linker='GNU/Linux ld.so' ;; +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + netbsd*) version_type=sunos need_lib_prefix=no @@ -13032,7 +13320,7 @@ netbsd*) finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi @@ -13048,7 +13336,7 @@ newsos6) shlibpath_overrides_runpath=yes ;; -nto-qnx | nto-qnx6*) +nto-qnx*) version_type=linux need_lib_prefix=no need_version=no @@ -13081,7 +13369,7 @@ openbsd*) os2*) libname_spec='$name' - shrext=".dll" + shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' @@ -13179,6 +13467,72 @@ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 echo "${ECHO_T}$dynamic_linker" >&6 test "$dynamic_linker" = no && can_build_shared=no +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || \ + test -n "$runpath_var_CXX" || \ + test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, CXX)" != no && + test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_CXX" >&5 +echo "${ECHO_T}$hardcode_action_CXX" >&6 + +if test "$hardcode_action_CXX" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown @@ -13214,7 +13568,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -13238,11 +13591,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -13255,7 +13618,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 @@ -13279,21 +13643,28 @@ if test "${ac_cv_func_shl_load+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char shl_load (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef shl_load + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -13324,11 +13695,21 @@ return f != shl_load; _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -13341,7 +13722,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_shl_load=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 echo "${ECHO_T}$ac_cv_func_shl_load" >&6 @@ -13356,7 +13738,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -13380,11 +13761,21 @@ shl_load (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -13397,7 +13788,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_shl_load=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 @@ -13411,21 +13803,28 @@ if test "${ac_cv_func_dlopen+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char dlopen (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef dlopen + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -13456,11 +13855,21 @@ return f != dlopen; _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -13473,7 +13882,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 echo "${ECHO_T}$ac_cv_func_dlopen" >&6 @@ -13488,7 +13898,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -13512,11 +13921,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -13529,7 +13948,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 @@ -13545,7 +13965,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -13569,11 +13988,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -13586,7 +14015,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_svld_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 @@ -13602,7 +14032,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -13626,11 +14055,21 @@ dld_link (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -13643,7 +14082,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_dld_link=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 @@ -13698,7 +14138,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext < conftest.$ac_ext <&6 lt_prog_compiler_static_F77='-bnso -bI:/lib/syscalls.exp' fi ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case "$cc_basename" in + xlc*) + lt_prog_compiler_pic_F77='-qnocommon' + lt_prog_compiler_wl_F77='-Wl,' + ;; + esac + ;; mingw* | pw32* | os2*) # This hack is so that the source file can tell whether it is being @@ -14533,12 +14987,12 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 linux*) case $CC in - icc|ecc) + icc* | ecc*) lt_prog_compiler_wl_F77='-Wl,' lt_prog_compiler_pic_F77='-KPIC' lt_prog_compiler_static_F77='-static' ;; - ccc) + ccc*) lt_prog_compiler_wl_F77='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static_F77='-non_shared' @@ -14600,7 +15054,8 @@ echo "${ECHO_T}$lt_prog_compiler_pic_F77" >&6 # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_F77"; then - echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works" >&5 + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works... $ECHO_C" >&6 if test "${lt_prog_compiler_pic_works_F77+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 @@ -14618,11 +15073,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:14621: $lt_compile\"" >&5) + (eval echo "\"\$as_me:15076: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:14625: \$? = $ac_status" >&5 + echo "$as_me:15080: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings @@ -14669,13 +15124,6 @@ else mkdir out printf "$lt_simple_compile_test_code" > conftest.$ac_ext - # According to Tom Tromey, Ian Lance Taylor reported there are C compilers - # that will create temporary files in the current directory regardless of - # the output directory. Thus, making CWD read-only will cause this test - # to fail, enabling locking or at least warning the user not to do parallel - # builds. - chmod -w . - lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. @@ -14685,11 +15133,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:14688: $lt_compile\"" >&5) + (eval echo "\"\$as_me:15136: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:14692: \$? = $ac_status" >&5 + echo "$as_me:15140: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -14699,8 +15147,11 @@ else fi fi chmod u+w . - $rm conftest* out/* - rmdir out + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out cd .. rmdir conftest $rm conftest* @@ -14897,6 +15348,31 @@ EOF hardcode_shlibpath_var_F77=no ;; + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_cmds_F77="$tmp_archive_cmds" + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ 01.* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_F77='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + archive_expsym_cmds_F77="$tmp_archive_cmds" + fi + else + ld_shlibs_F77=no + fi + ;; + *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' @@ -15031,11 +15507,21 @@ EOF _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -15052,7 +15538,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -15071,11 +15558,21 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -15092,7 +15589,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -15119,7 +15617,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ld_shlibs_F77=no ;; - bsdi4*) + bsdi[45]*) export_dynamic_flag_spec_F77=-rdynamic ;; @@ -15133,7 +15631,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. - shrext=".dll" + shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds_F77='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. @@ -15145,45 +15643,54 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ;; darwin* | rhapsody*) - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - archive_cmds_need_lc_F77=no case "$host_os" in - rhapsody* | darwin1.[012]) - allow_undefined_flag_F77='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && allow_undefined_flag_F77='-flat_namespace -undefined suppress' - ;; + rhapsody* | darwin1.[012]) + allow_undefined_flag_F77='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_F77='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; esac - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. Also zsh mangles - # `"' quotes if we put them in here... so don't! - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | grep 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_cmds_F77='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - archive_cmds_F77='$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - module_cmds_F77='$CC -bundle ${wl}-bind_at_load $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + archive_cmds_need_lc_F77=no hardcode_direct_F77=no hardcode_automatic_F77=yes hardcode_shlibpath_var_F77=unsupported - whole_archive_flag_spec_F77='-all_load $convenience' + whole_archive_flag_spec_F77='' link_all_deplibs_F77=yes - fi - ;; + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case "$cc_basename" in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_F77=no + ;; + esac + fi + ;; dgux*) archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' @@ -15215,7 +15722,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd*) + freebsd* | kfreebsd*-gnu) archive_cmds_F77='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec_F77='-R$libdir' hardcode_direct_F77=yes @@ -15326,6 +15833,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_shlibpath_var_F77=no if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' export_dynamic_flag_spec_F77='${wl}-E' else @@ -15575,78 +16083,12 @@ echo "${ECHO_T}$archive_cmds_need_lc_F77" >&6 ;; esac -echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 -hardcode_action_F77= -if test -n "$hardcode_libdir_flag_spec_F77" || \ - test -n "$runpath_var F77" || \ - test "X$hardcode_automatic_F77"="Xyes" ; then - - # We can hardcode non-existant directories. - if test "$hardcode_direct_F77" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, F77)" != no && - test "$hardcode_minus_L_F77" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action_F77=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action_F77=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action_F77=unsupported -fi -echo "$as_me:$LINENO: result: $hardcode_action_F77" >&5 -echo "${ECHO_T}$hardcode_action_F77" >&6 - -if test "$hardcode_action_F77" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi - -striplib= -old_striplib= -echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 -echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 -if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 - else - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 -fi - ;; - *) - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 - ;; - esac -fi - echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 library_names_spec= libname_spec='lib$name' soname_spec= -shrext=".so" +shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= @@ -15734,7 +16176,7 @@ aix4* | aix5*) amigaos*) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done' + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; beos*) @@ -15743,7 +16185,7 @@ beos*) shlibpath_var=LIBRARY_PATH ;; -bsdi4*) +bsdi[45]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' @@ -15759,7 +16201,7 @@ bsdi4*) cygwin* | mingw* | pw32*) version_type=windows - shrext=".dll" + shrext_cmds=".dll" need_version=no need_lib_prefix=no @@ -15781,7 +16223,7 @@ cygwin* | mingw* | pw32*) cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/lib /lib/w32api /usr/lib /usr/local/lib" + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw*) # MinGW DLLs use traditional 'lib' prefix @@ -15820,17 +16262,16 @@ darwin* | rhapsody*) version_type=darwin need_lib_prefix=no need_version=no - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH - shrext='$(test .$module = .yes && echo .so || echo .dylib)' + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' fi sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; @@ -15848,6 +16289,18 @@ freebsd1*) dynamic_linker=no ;; +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + freebsd*) objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` version_type=freebsd-$objformat @@ -15896,7 +16349,7 @@ hpux9* | hpux10* | hpux11*) need_version=no case "$host_cpu" in ia64*) - shrext='.so' + shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH @@ -15911,7 +16364,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) - shrext='.sl' + shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH @@ -15922,7 +16375,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) - shrext='.sl' + shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH @@ -15991,6 +16444,12 @@ linux*) # before this can be enabled. hardcode_into_libs=yes + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`$SED -e 's/:,\t/ /g;s/=^=*$//;s/=^= * / /g' /etc/ld.so.conf | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, @@ -16000,6 +16459,18 @@ linux*) dynamic_linker='GNU/Linux ld.so' ;; +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + netbsd*) version_type=sunos need_lib_prefix=no @@ -16009,7 +16480,7 @@ netbsd*) finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi @@ -16025,7 +16496,7 @@ newsos6) shlibpath_overrides_runpath=yes ;; -nto-qnx | nto-qnx6*) +nto-qnx*) version_type=linux need_lib_prefix=no need_version=no @@ -16058,7 +16529,7 @@ openbsd*) os2*) libname_spec='$name' - shrext=".dll" + shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' @@ -16156,6 +16627,73 @@ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 echo "${ECHO_T}$dynamic_linker" >&6 test "$dynamic_linker" = no && can_build_shared=no +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_F77= +if test -n "$hardcode_libdir_flag_spec_F77" || \ + test -n "$runpath_var_F77" || \ + test "X$hardcode_automatic_F77" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_F77" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, F77)" != no && + test "$hardcode_minus_L_F77" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_F77=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_F77=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_F77=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_F77" >&5 +echo "${ECHO_T}$hardcode_action_F77" >&6 + +if test "$hardcode_action_F77" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + + # The else clause should only fire when bootstrapping the # libtool distribution, otherwise you forgot to ship ltmain.sh @@ -16170,7 +16708,8 @@ if test -f "$ltmain"; then # Now quote all the things that may contain metacharacters while being # careful not to overquote the AC_SUBSTed values. We take copies of the # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM SED SHELL \ + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ libname_spec library_names_spec soname_spec extract_expsyms_cmds \ old_striplib striplib file_magic_cmd finish_cmds finish_eval \ deplibs_check_method reload_flag reload_cmds need_locks \ @@ -16300,7 +16839,7 @@ LN_S=$lt_LN_S NM=$lt_NM # A symbol stripping program -STRIP=$STRIP +STRIP=$lt_STRIP # Used to examine libraries when file_magic_cmd begins "file" MAGIC_CMD=$MAGIC_CMD @@ -16331,7 +16870,7 @@ objext="$ac_objext" libext="$libext" # Shared library suffix (normally ".so"). -shrext='$shrext' +shrext_cmds='$shrext_cmds' # Executable file suffix (normally ""). exeext="$exeext" @@ -16548,7 +17087,10 @@ else # If there is no Makefile yet, we rely on a make rule to execute # `config.status --recheck' to rerun these tests and create the # libtool script then. - test -f Makefile && make "$ltmain" + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi fi @@ -16611,7 +17153,8 @@ lt_prog_compiler_no_builtin_flag_GCJ= if test "$GCC" = yes; then lt_prog_compiler_no_builtin_flag_GCJ=' -fno-builtin' - echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 + +echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6 if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 @@ -16629,11 +17172,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:16632: $lt_compile\"" >&5) + (eval echo "\"\$as_me:17175: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:16636: \$? = $ac_status" >&5 + echo "$as_me:17179: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings @@ -16740,6 +17283,16 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 lt_prog_compiler_static_GCJ='-bnso -bI:/lib/syscalls.exp' fi ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case "$cc_basename" in + xlc*) + lt_prog_compiler_pic_GCJ='-qnocommon' + lt_prog_compiler_wl_GCJ='-Wl,' + ;; + esac + ;; mingw* | pw32* | os2*) # This hack is so that the source file can tell whether it is being @@ -16776,12 +17329,12 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 linux*) case $CC in - icc|ecc) + icc* | ecc*) lt_prog_compiler_wl_GCJ='-Wl,' lt_prog_compiler_pic_GCJ='-KPIC' lt_prog_compiler_static_GCJ='-static' ;; - ccc) + ccc*) lt_prog_compiler_wl_GCJ='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static_GCJ='-non_shared' @@ -16843,7 +17396,8 @@ echo "${ECHO_T}$lt_prog_compiler_pic_GCJ" >&6 # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_GCJ"; then - echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works" >&5 + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works... $ECHO_C" >&6 if test "${lt_prog_compiler_pic_works_GCJ+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 @@ -16861,11 +17415,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:16864: $lt_compile\"" >&5) + (eval echo "\"\$as_me:17418: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:16868: \$? = $ac_status" >&5 + echo "$as_me:17422: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings @@ -16912,13 +17466,6 @@ else mkdir out printf "$lt_simple_compile_test_code" > conftest.$ac_ext - # According to Tom Tromey, Ian Lance Taylor reported there are C compilers - # that will create temporary files in the current directory regardless of - # the output directory. Thus, making CWD read-only will cause this test - # to fail, enabling locking or at least warning the user not to do parallel - # builds. - chmod -w . - lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. @@ -16928,11 +17475,11 @@ else -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:16931: $lt_compile\"" >&5) + (eval echo "\"\$as_me:17478: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:16935: \$? = $ac_status" >&5 + echo "$as_me:17482: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -16942,8 +17489,11 @@ else fi fi chmod u+w . - $rm conftest* out/* - rmdir out + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out cd .. rmdir conftest $rm conftest* @@ -17140,6 +17690,31 @@ EOF hardcode_shlibpath_var_GCJ=no ;; + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_cmds_GCJ="$tmp_archive_cmds" + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ 01.* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_GCJ='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + archive_expsym_cmds_GCJ="$tmp_archive_cmds" + fi + else + ld_shlibs_GCJ=no + fi + ;; + *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' @@ -17268,7 +17843,6 @@ EOF allow_undefined_flag_GCJ='-berok' # Determine the default libpath from the value encoded in an empty executable. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -17285,11 +17859,21 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -17306,7 +17890,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -17319,7 +17904,6 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi else # Determine the default libpath from the value encoded in an empty executable. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -17336,11 +17920,21 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -17357,7 +17951,8 @@ else sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" @@ -17384,7 +17979,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ld_shlibs_GCJ=no ;; - bsdi4*) + bsdi[45]*) export_dynamic_flag_spec_GCJ=-rdynamic ;; @@ -17398,7 +17993,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. - shrext=".dll" + shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds_GCJ='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. @@ -17410,43 +18005,52 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ;; darwin* | rhapsody*) - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - archive_cmds_need_lc_GCJ=no case "$host_os" in - rhapsody* | darwin1.[012]) - allow_undefined_flag_GCJ='-undefined suppress' - ;; - *) # Darwin 1.3 on - test -z ${LD_TWOLEVEL_NAMESPACE} && allow_undefined_flag_GCJ='-flat_namespace -undefined suppress' - ;; + rhapsody* | darwin1.[012]) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; esac - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. Also zsh mangles - # `"' quotes if we put them in here... so don't! - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | grep 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_cmds_GCJ='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - archive_cmds_GCJ='$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - module_cmds_GCJ='$CC -bundle ${wl}-bind_at_load $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r ${wl}-bind_at_load -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -bundle $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + archive_cmds_need_lc_GCJ=no hardcode_direct_GCJ=no hardcode_automatic_GCJ=yes hardcode_shlibpath_var_GCJ=unsupported - whole_archive_flag_spec_GCJ='-all_load $convenience' + whole_archive_flag_spec_GCJ='' link_all_deplibs_GCJ=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case "$cc_basename" in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_GCJ=no + ;; + esac fi ;; @@ -17480,7 +18084,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd*) + freebsd* | kfreebsd*-gnu) archive_cmds_GCJ='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec_GCJ='-R$libdir' hardcode_direct_GCJ=yes @@ -17591,6 +18195,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_shlibpath_var_GCJ=no if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' export_dynamic_flag_spec_GCJ='${wl}-E' else @@ -17840,103 +18445,37 @@ echo "${ECHO_T}$archive_cmds_need_lc_GCJ" >&6 ;; esac -echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 -hardcode_action_GCJ= -if test -n "$hardcode_libdir_flag_spec_GCJ" || \ - test -n "$runpath_var GCJ" || \ - test "X$hardcode_automatic_GCJ"="Xyes" ; then - - # We can hardcode non-existant directories. - if test "$hardcode_direct_GCJ" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, GCJ)" != no && - test "$hardcode_minus_L_GCJ" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action_GCJ=relink +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action_GCJ=immediate + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action_GCJ=unsupported + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi -echo "$as_me:$LINENO: result: $hardcode_action_GCJ" >&5 -echo "${ECHO_T}$hardcode_action_GCJ" >&6 - -if test "$hardcode_action_GCJ" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi - -striplib= -old_striplib= -echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 -echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 -if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6 - else - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 -fi - ;; - *) - echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6 - ;; - esac -fi - -echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 -echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -if test "$GCC" = yes; then - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi -need_lib_prefix=unknown -hardcode_into_libs=no +need_lib_prefix=unknown +hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments @@ -17999,7 +18538,7 @@ aix4* | aix5*) amigaos*) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done' + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; beos*) @@ -18008,7 +18547,7 @@ beos*) shlibpath_var=LIBRARY_PATH ;; -bsdi4*) +bsdi[45]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' @@ -18024,7 +18563,7 @@ bsdi4*) cygwin* | mingw* | pw32*) version_type=windows - shrext=".dll" + shrext_cmds=".dll" need_version=no need_lib_prefix=no @@ -18046,7 +18585,7 @@ cygwin* | mingw* | pw32*) cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/lib /lib/w32api /usr/lib /usr/local/lib" + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw*) # MinGW DLLs use traditional 'lib' prefix @@ -18085,17 +18624,16 @@ darwin* | rhapsody*) version_type=darwin need_lib_prefix=no need_version=no - # FIXME: Relying on posixy $() will cause problems for - # cross-compilation, but unfortunately the echo tests do not - # yet detect zsh echo's removal of \ escapes. library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH - shrext='$(test .$module = .yes && echo .so || echo .dylib)' + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. - if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then - sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' fi sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; @@ -18113,6 +18651,18 @@ freebsd1*) dynamic_linker=no ;; +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + freebsd*) objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` version_type=freebsd-$objformat @@ -18161,7 +18711,7 @@ hpux9* | hpux10* | hpux11*) need_version=no case "$host_cpu" in ia64*) - shrext='.so' + shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH @@ -18176,7 +18726,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) - shrext='.sl' + shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH @@ -18187,7 +18737,7 @@ hpux9* | hpux10* | hpux11*) sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) - shrext='.sl' + shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH @@ -18256,6 +18806,12 @@ linux*) # before this can be enabled. hardcode_into_libs=yes + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`$SED -e 's/:,\t/ /g;s/=^=*$//;s/=^= * / /g' /etc/ld.so.conf | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, @@ -18265,6 +18821,18 @@ linux*) dynamic_linker='GNU/Linux ld.so' ;; +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + netbsd*) version_type=sunos need_lib_prefix=no @@ -18274,7 +18842,7 @@ netbsd*) finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi @@ -18290,7 +18858,7 @@ newsos6) shlibpath_overrides_runpath=yes ;; -nto-qnx | nto-qnx6*) +nto-qnx*) version_type=linux need_lib_prefix=no need_version=no @@ -18323,7 +18891,7 @@ openbsd*) os2*) libname_spec='$name' - shrext=".dll" + shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' @@ -18421,6 +18989,72 @@ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 echo "${ECHO_T}$dynamic_linker" >&6 test "$dynamic_linker" = no && can_build_shared=no +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_GCJ= +if test -n "$hardcode_libdir_flag_spec_GCJ" || \ + test -n "$runpath_var_GCJ" || \ + test "X$hardcode_automatic_GCJ" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_GCJ" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, GCJ)" != no && + test "$hardcode_minus_L_GCJ" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_GCJ=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_GCJ=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_GCJ=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_GCJ" >&5 +echo "${ECHO_T}$hardcode_action_GCJ" >&6 + +if test "$hardcode_action_GCJ" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown @@ -18456,7 +19090,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -18480,11 +19113,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -18497,7 +19140,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 @@ -18521,21 +19165,28 @@ if test "${ac_cv_func_shl_load+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char shl_load (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef shl_load + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -18566,11 +19217,21 @@ return f != shl_load; _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -18583,7 +19244,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_shl_load=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 echo "${ECHO_T}$ac_cv_func_shl_load" >&6 @@ -18598,7 +19260,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -18622,11 +19283,21 @@ shl_load (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -18639,7 +19310,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_shl_load=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 @@ -18653,21 +19325,28 @@ if test "${ac_cv_func_dlopen+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char dlopen (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef dlopen + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -18698,11 +19377,21 @@ return f != dlopen; _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -18715,7 +19404,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 echo "${ECHO_T}$ac_cv_func_dlopen" >&6 @@ -18730,7 +19420,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -18754,11 +19443,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -18771,7 +19470,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 @@ -18787,7 +19487,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -18811,11 +19510,21 @@ dlopen (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -18828,7 +19537,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_svld_dlopen=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 @@ -18844,7 +19554,6 @@ else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -18868,11 +19577,21 @@ dld_link (); _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -18885,7 +19604,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_dld_link=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 @@ -18940,7 +19660,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext < conftest.$ac_ext <&6 versuffix="" release="" libname=libfoo - eval _SOSUFFIX=\"$shrext\" - if test "X$_SOSUFFIX" = "" ; then + eval _SOSUFFIX=\"$shrext_cmds\" + if test "$_SOSUFFIX" = "" ; then _SOSUFFIX=".so" if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then - if test "X$_SOSUFFIX_MESSAGE" = "X"; then + if test "$_SOSUFFIX_MESSAGE" = ""; then _SOSUFFIX_MESSAGE=yes { echo "$as_me:$LINENO: WARNING: libtool may not know about this architecture." >&5 echo "$as_me: WARNING: libtool may not know about this architecture." >&2;} @@ -20084,11 +20812,11 @@ echo $ECHO_N "checking MODSUFFIX from libtool... $ECHO_C" >&6 versuffix="" release="" libname=libfoo - eval _SOSUFFIX=\"$shrext\" - if test "X$_SOSUFFIX" = "" ; then + eval _SOSUFFIX=\"$shrext_cmds\" + if test "$_SOSUFFIX" = "" ; then _SOSUFFIX=".so" if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then - if test "X$_SOSUFFIX_MESSAGE" = "X"; then + if test "$_SOSUFFIX_MESSAGE" = ""; then _SOSUFFIX_MESSAGE=yes { echo "$as_me:$LINENO: WARNING: libtool may not know about this architecture." >&5 echo "$as_me: WARNING: libtool may not know about this architecture." >&2;} @@ -20111,11 +20839,11 @@ echo $ECHO_N "checking JMODSUFFIX from libtool... $ECHO_C" >&6 versuffix="" release="" libname=libfoo - eval _SOSUFFIX=\"$shrext\" - if test "X$_SOSUFFIX" = "" ; then + eval _SOSUFFIX=\"$shrext_cmds\" + if test "$_SOSUFFIX" = "" ; then _SOSUFFIX=".so" if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then - if test "X$_SOSUFFIX_MESSAGE" = "X"; then + if test "$_SOSUFFIX_MESSAGE" = ""; then _SOSUFFIX_MESSAGE=yes { echo "$as_me:$LINENO: WARNING: libtool may not know about this architecture." >&5 echo "$as_me: WARNING: libtool may not know about this architecture." >&2;} @@ -20147,7 +20875,7 @@ MAKEFILE_CXXLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK}" LIBTOOL="\$(SHELL) ./libtool" case "$host_os" in -cygwin*) +cygwin* | mingw*) MAKEFILE_SOLINK="$MAKEFILE_SOLINK -no-undefined" MAKEFILE_XSOLINK="$MAKEFILE_XSOLINK -no-undefined";; esac @@ -20170,6 +20898,17 @@ else enable_static="yes" fi +case "$host_os" in + darwin*) + LIBTSO_MODULE="" + LIBTSO_MODSUFFIX=".dylib" + ;; + *) + LIBTSO_MODULE="-module" + LIBTSO_MODSUFFIX=@MODSUFFIX@ + ;; +esac + # C API. if test "$enable_shared" = "no"; then DEFAULT_LIB="\$(libdb_version)" @@ -20309,7 +21048,7 @@ else JAVA_TEST=Test.java CLASS_TEST=Test.class cat << \EOF > $JAVA_TEST -/* #line 20312 "configure" */ +/* #line 21051 "configure" */ public class Test { } EOF @@ -20568,7 +21307,7 @@ EOF if uudecode$EXEEXT Test.uue; then ac_cv_prog_uudecode_base64=yes else - echo "configure: 20571: uudecode had trouble decoding base 64 file 'Test.uue'" >&5 + echo "configure: 21310: uudecode had trouble decoding base 64 file 'Test.uue'" >&5 echo "configure: failed file was:" >&5 cat Test.uue >&5 ac_cv_prog_uudecode_base64=no @@ -20686,7 +21425,7 @@ else JAVA_TEST=Test.java CLASS_TEST=Test.class cat << \EOF > $JAVA_TEST -/* #line 20689 "configure" */ +/* #line 21428 "configure" */ public class Test { } EOF @@ -20723,7 +21462,7 @@ JAVA_TEST=Test.java CLASS_TEST=Test.class TEST=Test cat << \EOF > $JAVA_TEST -/* [#]line 20726 "configure" */ +/* [#]line 21465 "configure" */ public class Test { public static void main (String args[]) { System.exit (0); @@ -20941,242 +21680,268 @@ else PATH_SEPARATOR="/" fi -# Optional RPC client/server. -if test "$db_cv_rpc" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_RPC 1 -_ACEOF - - - - - RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)" - ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS" - - case "$host_os" in - hpux*) - echo "$as_me:$LINENO: checking for svc_run" >&5 -echo $ECHO_N "checking for svc_run... $ECHO_C" >&6 -if test "${ac_cv_func_svc_run+set}" = set; then +# Checks for include files, structures, C types. +echo "$as_me:$LINENO: checking whether stat file-mode macros are broken" >&5 +echo $ECHO_N "checking whether stat file-mode macros are broken... $ECHO_C" >&6 +if test "${ac_cv_header_stat_broken+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char svc_run (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include +#include +#include + +#if defined(S_ISBLK) && defined(S_IFDIR) +# if S_ISBLK (S_IFDIR) +You lose. +# endif #endif -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -{ + +#if defined(S_ISBLK) && defined(S_IFCHR) +# if S_ISBLK (S_IFCHR) +You lose. +# endif #endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char svc_run (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_svc_run) || defined (__stub___svc_run) -choke me -#else -char (*f) () = svc_run; + +#if defined(S_ISLNK) && defined(S_IFREG) +# if S_ISLNK (S_IFREG) +You lose. +# endif #endif -#ifdef __cplusplus -} + +#if defined(S_ISSOCK) && defined(S_IFREG) +# if S_ISSOCK (S_IFREG) +You lose. +# endif #endif +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "You lose" >/dev/null 2>&1; then + ac_cv_header_stat_broken=yes +else + ac_cv_header_stat_broken=no +fi +rm -f conftest* + +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stat_broken" >&5 +echo "${ECHO_T}$ac_cv_header_stat_broken" >&6 +if test $ac_cv_header_stat_broken = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STAT_MACROS_BROKEN 1 +_ACEOF + +fi + +echo "$as_me:$LINENO: checking whether time.h and sys/time.h may both be included" >&5 +echo $ECHO_N "checking whether time.h and sys/time.h may both be included... $ECHO_C" >&6 +if test "${ac_cv_header_time+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include + int main () { -return f != svc_run; +if ((struct tm *) 0) +return 0; ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_func_svc_run=yes + ac_cv_header_time=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_func_svc_run=no +ac_cv_header_time=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_func_svc_run" >&5 -echo "${ECHO_T}$ac_cv_func_svc_run" >&6 -if test $ac_cv_func_svc_run = yes; then - : -else - echo "$as_me:$LINENO: checking for svc_run in -lnsl" >&5 -echo $ECHO_N "checking for svc_run in -lnsl... $ECHO_C" >&6 -if test "${ac_cv_lib_nsl_svc_run+set}" = set; then +echo "$as_me:$LINENO: result: $ac_cv_header_time" >&5 +echo "${ECHO_T}$ac_cv_header_time" >&6 +if test $ac_cv_header_time = yes; then + +cat >>confdefs.h <<\_ACEOF +#define TIME_WITH_SYS_TIME 1 +_ACEOF + +fi + + + + + + +ac_header_dirent=no +for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do + as_ac_Header=`echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_hdr that defines DIR" >&5 +echo $ECHO_N "checking for $ac_hdr that defines DIR... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lnsl $LIBS" -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +#include +#include <$ac_hdr> -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char svc_run (); int main () { -svc_run (); +if ((DIR *) 0) +return 0; ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_lib_nsl_svc_run=yes + eval "$as_ac_Header=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_lib_nsl_svc_run=no -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS +eval "$as_ac_Header=no" fi -echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_svc_run" >&5 -echo "${ECHO_T}$ac_cv_lib_nsl_svc_run" >&6 -if test $ac_cv_lib_nsl_svc_run = yes; then - LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"; - LIBJSO_LIBS="-lnsl $LIBJSO_LIBS" +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 +_ACEOF +ac_header_dirent=$ac_hdr; break fi -;; - solaris*) - echo "$as_me:$LINENO: checking for svc_run" >&5 -echo $ECHO_N "checking for svc_run... $ECHO_C" >&6 -if test "${ac_cv_func_svc_run+set}" = set; then + +done +# Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. +if test $ac_header_dirent = dirent.h; then + echo "$as_me:$LINENO: checking for library containing opendir" >&5 +echo $ECHO_N "checking for library containing opendir... $ECHO_C" >&6 +if test "${ac_cv_search_opendir+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_func_search_save_LIBS=$LIBS +ac_cv_search_opendir=no +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char svc_run (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" -{ #endif /* We use char because int might match the return type of a gcc2 builtin and then its argument prototype would still apply. */ -char svc_run (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_svc_run) || defined (__stub___svc_run) -choke me -#else -char (*f) () = svc_run; -#endif -#ifdef __cplusplus -} -#endif - +char opendir (); int main () { -return f != svc_run; +opendir (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_func_svc_run=yes + ac_cv_search_opendir="none required" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_func_svc_run=no -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_func_svc_run" >&5 -echo "${ECHO_T}$ac_cv_func_svc_run" >&6 -if test $ac_cv_func_svc_run = yes; then - : -else - -echo "$as_me:$LINENO: checking for svc_run in -lnsl" >&5 -echo $ECHO_N "checking for svc_run in -lnsl... $ECHO_C" >&6 -if test "${ac_cv_lib_nsl_svc_run+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lnsl $LIBS" -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test "$ac_cv_search_opendir" = no; then + for ac_lib in dir; do + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -21189,1053 +21954,2860 @@ extern "C" #endif /* We use char because int might match the return type of a gcc2 builtin and then its argument prototype would still apply. */ -char svc_run (); +char opendir (); int main () { -svc_run (); +opendir (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_lib_nsl_svc_run=yes + ac_cv_search_opendir="-l$ac_lib" +break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_lib_nsl_svc_run=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done fi -echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_svc_run" >&5 -echo "${ECHO_T}$ac_cv_lib_nsl_svc_run" >&6 -if test $ac_cv_lib_nsl_svc_run = yes; then - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBNSL 1 +LIBS=$ac_func_search_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5 +echo "${ECHO_T}$ac_cv_search_opendir" >&6 +if test "$ac_cv_search_opendir" != no; then + test "$ac_cv_search_opendir" = "none required" || LIBS="$ac_cv_search_opendir $LIBS" + +fi + +else + echo "$as_me:$LINENO: checking for library containing opendir" >&5 +echo $ECHO_N "checking for library containing opendir... $ECHO_C" >&6 +if test "${ac_cv_search_opendir+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_func_search_save_LIBS=$LIBS +ac_cv_search_opendir=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ - LIBS="-lnsl $LIBS" +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char opendir (); +int +main () +{ +opendir (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_search_opendir="none required" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test "$ac_cv_search_opendir" = no; then + for ac_lib in x; do + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char opendir (); +int +main () +{ +opendir (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_search_opendir="-l$ac_lib" +break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 fi -;; - esac +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done fi +LIBS=$ac_func_search_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5 +echo "${ECHO_T}$ac_cv_search_opendir" >&6 +if test "$ac_cv_search_opendir" != no; then + test "$ac_cv_search_opendir" = "none required" || LIBS="$ac_cv_search_opendir $LIBS" -case "$host_os" in - darwin*) - LIBTSO_MODULE="" - LIBTSO_MODSUFFIX=".dylib" - ;; - *) - LIBTSO_MODULE="-module" - LIBTSO_MODSUFFIX=@MODSUFFIX@ - ;; -esac - +fi -if test "$db_cv_tcl" = "yes"; then - if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then - { { echo "$as_me:$LINENO: error: Tcl requires shared libraries" >&5 -echo "$as_me: error: Tcl requires shared libraries" >&2;} - { (exit 1); exit 1; }; } - fi +fi - if test "${ac_cv_c_tclconfig+set}" = set; then +for ac_header in sys/select.h sys/time.h sys/fcntl.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 - # First check to see if --with-tclconfig was specified. - if test "${with_tclconfig}" != no; then - if test -f "${with_tclconfig}/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` - else - { { echo "$as_me:$LINENO: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&5 -echo "$as_me: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&2;} - { (exit 1); exit 1; }; } - fi - fi - - # check in a few common install locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in `ls -d /usr/local/lib 2>/dev/null` ; do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i; pwd)` - break - fi - done - fi - - +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 - if test x"${ac_cv_c_tclconfig}" = x ; then - TCL_BIN_DIR="# no Tcl configs found" - { { echo "$as_me:$LINENO: error: can't find Tcl configuration definitions" >&5 -echo "$as_me: error: can't find Tcl configuration definitions" >&2;} - { (exit 1); exit 1; }; } - else - TCL_BIN_DIR=${ac_cv_c_tclconfig} - fi +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------ ## +## Report this to support@sleepycat.com ## +## ------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +echo "$as_me:$LINENO: checking for struct stat.st_blksize" >&5 +echo $ECHO_N "checking for struct stat.st_blksize... $ECHO_C" >&6 +if test "${ac_cv_member_struct_stat_st_blksize+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +static struct stat ac_aggr; +if (ac_aggr.st_blksize) +return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_member_struct_stat_st_blksize=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +static struct stat ac_aggr; +if (sizeof ac_aggr.st_blksize) +return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_member_struct_stat_st_blksize=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_member_struct_stat_st_blksize=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_blksize" >&5 +echo "${ECHO_T}$ac_cv_member_struct_stat_st_blksize" >&6 +if test $ac_cv_member_struct_stat_st_blksize = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 +_ACEOF + + +fi + + + +# db.h includes and , not the other default includes +# autoconf usually includes. For that reason, we specify a set of includes +# for all type checking tests. [#5060] +# +# C99 says types should be in ; include if it exists. +# +# Some systems have types in ; include if it exists. +# +# IBM's OS/390 and z/OS releases have types in not also found +# in ; include if it exists. +db_includes="#include " + +if test "${ac_cv_header_inttypes_h+set}" = set; then + echo "$as_me:$LINENO: checking for inttypes.h" >&5 +echo $ECHO_N "checking for inttypes.h... $ECHO_C" >&6 +if test "${ac_cv_header_inttypes_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: $ac_cv_header_inttypes_h" >&5 +echo "${ECHO_T}$ac_cv_header_inttypes_h" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking inttypes.h usability" >&5 +echo $ECHO_N "checking inttypes.h usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking inttypes.h presence" >&5 +echo $ECHO_N "checking inttypes.h presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: inttypes.h: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: inttypes.h: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: inttypes.h: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: inttypes.h: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: inttypes.h: present but cannot be compiled" >&5 +echo "$as_me: WARNING: inttypes.h: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: inttypes.h: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: inttypes.h: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: inttypes.h: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: inttypes.h: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: inttypes.h: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: inttypes.h: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: inttypes.h: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: inttypes.h: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: inttypes.h: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: inttypes.h: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------ ## +## Report this to support@sleepycat.com ## +## ------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for inttypes.h" >&5 +echo $ECHO_N "checking for inttypes.h... $ECHO_C" >&6 +if test "${ac_cv_header_inttypes_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_header_inttypes_h=$ac_header_preproc +fi +echo "$as_me:$LINENO: result: $ac_cv_header_inttypes_h" >&5 +echo "${ECHO_T}$ac_cv_header_inttypes_h" >&6 + +fi +if test $ac_cv_header_inttypes_h = yes; then + + db_includes="$db_includes +#include " + inttypes_h_decl="#include " +fi + + + +if test "${ac_cv_header_stdint_h+set}" = set; then + echo "$as_me:$LINENO: checking for stdint.h" >&5 +echo $ECHO_N "checking for stdint.h... $ECHO_C" >&6 +if test "${ac_cv_header_stdint_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stdint_h" >&5 +echo "${ECHO_T}$ac_cv_header_stdint_h" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking stdint.h usability" >&5 +echo $ECHO_N "checking stdint.h usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking stdint.h presence" >&5 +echo $ECHO_N "checking stdint.h presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: stdint.h: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: stdint.h: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: stdint.h: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: stdint.h: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: stdint.h: present but cannot be compiled" >&5 +echo "$as_me: WARNING: stdint.h: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: stdint.h: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: stdint.h: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: stdint.h: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: stdint.h: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: stdint.h: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: stdint.h: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: stdint.h: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: stdint.h: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: stdint.h: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: stdint.h: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------ ## +## Report this to support@sleepycat.com ## +## ------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for stdint.h" >&5 +echo $ECHO_N "checking for stdint.h... $ECHO_C" >&6 +if test "${ac_cv_header_stdint_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_header_stdint_h=$ac_header_preproc +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stdint_h" >&5 +echo "${ECHO_T}$ac_cv_header_stdint_h" >&6 + +fi +if test $ac_cv_header_stdint_h = yes; then + + db_includes="$db_includes +#include " + stdint_h_decl="#include " +fi + + + +if test "${ac_cv_header_stddef_h+set}" = set; then + echo "$as_me:$LINENO: checking for stddef.h" >&5 +echo $ECHO_N "checking for stddef.h... $ECHO_C" >&6 +if test "${ac_cv_header_stddef_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stddef_h" >&5 +echo "${ECHO_T}$ac_cv_header_stddef_h" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking stddef.h usability" >&5 +echo $ECHO_N "checking stddef.h usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking stddef.h presence" >&5 +echo $ECHO_N "checking stddef.h presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: stddef.h: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: stddef.h: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: stddef.h: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: stddef.h: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: stddef.h: present but cannot be compiled" >&5 +echo "$as_me: WARNING: stddef.h: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: stddef.h: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: stddef.h: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: stddef.h: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: stddef.h: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: stddef.h: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: stddef.h: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: stddef.h: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: stddef.h: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: stddef.h: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: stddef.h: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------ ## +## Report this to support@sleepycat.com ## +## ------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for stddef.h" >&5 +echo $ECHO_N "checking for stddef.h... $ECHO_C" >&6 +if test "${ac_cv_header_stddef_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_header_stddef_h=$ac_header_preproc +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stddef_h" >&5 +echo "${ECHO_T}$ac_cv_header_stddef_h" >&6 + +fi +if test $ac_cv_header_stddef_h = yes; then + + db_includes="$db_includes +#include " + stddef_h_decl="#include " +fi + + +db_includes="$db_includes +#include " + +# We require off_t and size_t, and we don't try to substitute our own +# if we can't find them. +echo "$as_me:$LINENO: checking for off_t" >&5 +echo $ECHO_N "checking for off_t... $ECHO_C" >&6 +if test "${ac_cv_type_off_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +if ((off_t *) 0) + return 0; +if (sizeof (off_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_off_t=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_off_t=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_off_t" >&5 +echo "${ECHO_T}$ac_cv_type_off_t" >&6 +if test $ac_cv_type_off_t = yes; then + : +else + { { echo "$as_me:$LINENO: error: No off_t type." >&5 +echo "$as_me: error: No off_t type." >&2;} + { (exit 1); exit 1; }; } +fi + +echo "$as_me:$LINENO: checking for size_t" >&5 +echo $ECHO_N "checking for size_t... $ECHO_C" >&6 +if test "${ac_cv_type_size_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +if ((size_t *) 0) + return 0; +if (sizeof (size_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_size_t=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_size_t=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5 +echo "${ECHO_T}$ac_cv_type_size_t" >&6 +if test $ac_cv_type_size_t = yes; then + : +else + { { echo "$as_me:$LINENO: error: No size_t type." >&5 +echo "$as_me: error: No size_t type." >&2;} + { (exit 1); exit 1; }; } +fi + + +# Check for long long and unsigned long long, we only support sequences +# if those types are available. +echo "$as_me:$LINENO: checking for long long" >&5 +echo $ECHO_N "checking for long long... $ECHO_C" >&6 +if test "${ac_cv_type_long_long+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +if ((long long *) 0) + return 0; +if (sizeof (long long)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_long_long=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_long_long=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_long_long" >&5 +echo "${ECHO_T}$ac_cv_type_long_long" >&6 +if test $ac_cv_type_long_long = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE_LONG_LONG 1 +_ACEOF + + +fi +echo "$as_me:$LINENO: checking for unsigned long long" >&5 +echo $ECHO_N "checking for unsigned long long... $ECHO_C" >&6 +if test "${ac_cv_type_unsigned_long_long+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +if ((unsigned long long *) 0) + return 0; +if (sizeof (unsigned long long)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_unsigned_long_long=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_unsigned_long_long=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long_long" >&5 +echo "${ECHO_T}$ac_cv_type_unsigned_long_long" >&6 +if test $ac_cv_type_unsigned_long_long = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE_UNSIGNED_LONG_LONG 1 +_ACEOF + + +fi + + +# We need to know the sizes of various objects on this system. +echo "$as_me:$LINENO: checking for char" >&5 +echo $ECHO_N "checking for char... $ECHO_C" >&6 +if test "${ac_cv_type_char+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +if ((char *) 0) + return 0; +if (sizeof (char)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_char=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_char=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_char" >&5 +echo "${ECHO_T}$ac_cv_type_char" >&6 + +echo "$as_me:$LINENO: checking size of char" >&5 +echo $ECHO_N "checking size of char... $ECHO_C" >&6 +if test "${ac_cv_sizeof_char+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$ac_cv_type_char" = yes; then + # The cast to unsigned long works around a bug in the HP C Compiler + # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects + # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. + # This bug is HP SR number 8606223364. + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (char))) >= 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (char))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_hi=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - echo "$as_me:$LINENO: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5 -echo $ECHO_N "checking for existence of $TCL_BIN_DIR/tclConfig.sh... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes - if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then - echo "$as_me:$LINENO: result: loading" >&5 -echo "${ECHO_T}loading" >&6 - . $TCL_BIN_DIR/tclConfig.sh - else - echo "$as_me:$LINENO: result: file not found" >&5 -echo "${ECHO_T}file not found" >&6 - fi +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (char))) < 0)]; +test_array [0] = 0 - # DB requires at least version 8.4. - if test ${TCL_MAJOR_VERSION} -lt 8 \ - -o ${TCL_MAJOR_VERSION} -eq 8 -a ${TCL_MINOR_VERSION} -lt 4; then - { { echo "$as_me:$LINENO: error: Berkeley DB requires Tcl version 8.4 or better." >&5 -echo "$as_me: error: Berkeley DB requires Tcl version 8.4 or better." >&2;} - { (exit 1); exit 1; }; } - fi + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes - # - # The eval is required to do the TCL_DBGX substitution in the - # TCL_LIB_FILE variable - # - eval TCL_LIB_FILE="${TCL_LIB_FILE}" - eval TCL_LIB_FLAG="${TCL_LIB_FLAG}" - eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (char))) >= $ac_mid)]; +test_array [0] = 0 - # - # If the DB Tcl library isn't loaded with the Tcl spec and library - # flags on AIX, the resulting libdb_tcl-X.Y.so.0 will drop core at - # load time. [#4843] Furthermore, with Tcl 8.3, the link flags - # given by the Tcl spec are insufficient for our use. [#5779] - # - case "$host_os" in - aix4.[2-9].*) - LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG" - LIBTSO_LIBS="$LIBTSO_LIBS -L$TCL_EXEC_PREFIX/lib -ltcl$TCL_VERSION";; - aix*) - LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";; - esac + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_lo=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +ac_lo= ac_hi= +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (char))) <= $ac_mid)]; +test_array [0] = 0 + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_hi=$ac_mid +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +ac_lo=`expr '(' $ac_mid ')' + 1` +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_char=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (char), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (char), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } ;; +esac +else + if test "$cross_compiling" = yes; then + { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run test program while cross compiling +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes - TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}" +long longval () { return (long) (sizeof (char)); } +unsigned long ulongval () { return (long) (sizeof (char)); } +#include +#include +int +main () +{ + FILE *f = fopen ("conftest.val", "w"); + if (! f) + exit (1); + if (((long) (sizeof (char))) < 0) + { + long i = longval (); + if (i != ((long) (sizeof (char)))) + exit (1); + fprintf (f, "%ld\n", i); + } + else + { + unsigned long i = ulongval (); + if (i != ((long) (sizeof (char)))) + exit (1); + fprintf (f, "%lu\n", i); + } + exit (ferror (f) || fclose (f) != 0); - if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then - TCFLAGS="-I$TCL_PREFIX/include" - fi + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sizeof_char=`cat conftest.val` +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)" +( exit $ac_status ) +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (char), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (char), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } fi - -# Optional DB 1.85 compatibility API. -if test "$db_cv_compat185" = "yes"; then - ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS" - - ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS" +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi - -# You can disable pieces of functionality to save space. -# -# Btree is always configured: it is the standard method, and Hash off-page -# duplicates require it. -ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_OBJS)" - -# Hash can be disabled. -if test "$db_cv_build_hash" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_HASH 1 +fi +rm -f conftest.val +else + ac_cv_sizeof_char=0 +fi +fi +echo "$as_me:$LINENO: result: $ac_cv_sizeof_char" >&5 +echo "${ECHO_T}$ac_cv_sizeof_char" >&6 +cat >>confdefs.h <<_ACEOF +#define SIZEOF_CHAR $ac_cv_sizeof_char _ACEOF +echo "$as_me:$LINENO: checking for unsigned char" >&5 +echo $ECHO_N "checking for unsigned char... $ECHO_C" >&6 +if test "${ac_cv_type_unsigned_char+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_OBJS)" - if test "$db_cv_build_verify" = "yes"; then - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_VRFY_OBJS)" - fi +int +main () +{ +if ((unsigned char *) 0) + return 0; +if (sizeof (unsigned char)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_unsigned_char=yes else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS hash_stub${o}" + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_unsigned_char=no fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_char" >&5 +echo "${ECHO_T}$ac_cv_type_unsigned_char" >&6 -# Queue can be disabled. -if test "$db_cv_build_queue" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_QUEUE 1 +echo "$as_me:$LINENO: checking size of unsigned char" >&5 +echo $ECHO_N "checking size of unsigned char... $ECHO_C" >&6 +if test "${ac_cv_sizeof_unsigned_char+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$ac_cv_type_unsigned_char" = yes; then + # The cast to unsigned long works around a bug in the HP C Compiler + # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects + # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. + # This bug is HP SR number 8606223364. + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) >= 0)]; +test_array [0] = 0 + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) <= $ac_mid)]; +test_array [0] = 0 - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_OBJS)" - if test "$db_cv_build_verify" = "yes"; then - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_VRFY_OBJS)" - fi + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_hi=$ac_mid; break else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS qam_stub${o}" + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# The replication code. -if test "$db_cv_build_replication" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_REPLICATION 1 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) < 0)]; +test_array [0] = 0 + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) >= $ac_mid)]; +test_array [0] = 0 - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(REP_OBJS)" + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_lo=$ac_mid; break else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS rep_stub${o}" + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# The verification code. -if test "$db_cv_build_verify" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_VERIFY 1 +ac_lo= ac_hi= +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) <= $ac_mid)]; +test_array [0] = 0 - - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_VRFY_OBJS)" + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_hi=$ac_mid else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS db_vrfy_stub${o}" -fi + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# The crypto support. -if test -d "$srcdir/../crypto" -a "$db_cv_build_cryptography" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_CRYPTO 1 +ac_lo=`expr '(' $ac_mid ')' + 1` +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_unsigned_char=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned char), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (unsigned char), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } ;; +esac +else + if test "$cross_compiling" = yes; then + { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run test program while cross compiling +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes +long longval () { return (long) (sizeof (unsigned char)); } +unsigned long ulongval () { return (long) (sizeof (unsigned char)); } +#include +#include +int +main () +{ + FILE *f = fopen ("conftest.val", "w"); + if (! f) + exit (1); + if (((long) (sizeof (unsigned char))) < 0) + { + long i = longval (); + if (i != ((long) (sizeof (unsigned char)))) + exit (1); + fprintf (f, "%ld\n", i); + } + else + { + unsigned long i = ulongval (); + if (i != ((long) (sizeof (unsigned char)))) + exit (1); + fprintf (f, "%lu\n", i); + } + exit (ferror (f) || fclose (f) != 0); + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sizeof_unsigned_char=`cat conftest.val` +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - CRYPTO_OBJS="\$(CRYPTO_OBJS)" +( exit $ac_status ) +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned char), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (unsigned char), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +fi +rm -f conftest.val else - CRYPTO_OBJS="crypto_stub${o}" + ac_cv_sizeof_unsigned_char=0 fi - -# Optional utilities. -if test "$db_cv_dump185" = "yes"; then - ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS" fi +echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_char" >&5 +echo "${ECHO_T}$ac_cv_sizeof_unsigned_char" >&6 +cat >>confdefs.h <<_ACEOF +#define SIZEOF_UNSIGNED_CHAR $ac_cv_sizeof_unsigned_char +_ACEOF -# Checks for include files, structures, C types. -echo "$as_me:$LINENO: checking whether stat file-mode macros are broken" >&5 -echo $ECHO_N "checking whether stat file-mode macros are broken... $ECHO_C" >&6 -if test "${ac_cv_header_stat_broken+set}" = set; then + +echo "$as_me:$LINENO: checking for short" >&5 +echo $ECHO_N "checking for short... $ECHO_C" >&6 +if test "${ac_cv_type_short+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -#include - -#if defined(S_ISBLK) && defined(S_IFDIR) -# if S_ISBLK (S_IFDIR) -You lose. -# endif -#endif - -#if defined(S_ISBLK) && defined(S_IFCHR) -# if S_ISBLK (S_IFCHR) -You lose. -# endif -#endif - -#if defined(S_ISLNK) && defined(S_IFREG) -# if S_ISLNK (S_IFREG) -You lose. -# endif -#endif - -#if defined(S_ISSOCK) && defined(S_IFREG) -# if S_ISSOCK (S_IFREG) -You lose. -# endif -#endif +$db_includes +int +main () +{ +if ((short *) 0) + return 0; +if (sizeof (short)) + return 0; + ; + return 0; +} _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "You lose" >/dev/null 2>&1; then - ac_cv_header_stat_broken=yes +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_short=yes else - ac_cv_header_stat_broken=no -fi -rm -f conftest* + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +ac_cv_type_short=no fi -echo "$as_me:$LINENO: result: $ac_cv_header_stat_broken" >&5 -echo "${ECHO_T}$ac_cv_header_stat_broken" >&6 -if test $ac_cv_header_stat_broken = yes; then - -cat >>confdefs.h <<\_ACEOF -#define STAT_MACROS_BROKEN 1 -_ACEOF - +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi +echo "$as_me:$LINENO: result: $ac_cv_type_short" >&5 +echo "${ECHO_T}$ac_cv_type_short" >&6 -echo "$as_me:$LINENO: checking whether time.h and sys/time.h may both be included" >&5 -echo $ECHO_N "checking whether time.h and sys/time.h may both be included... $ECHO_C" >&6 -if test "${ac_cv_header_time+set}" = set; then +echo "$as_me:$LINENO: checking size of short" >&5 +echo $ECHO_N "checking size of short... $ECHO_C" >&6 +if test "${ac_cv_sizeof_short+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + if test "$ac_cv_type_short" = yes; then + # The cast to unsigned long works around a bug in the HP C Compiler + # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects + # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. + # This bug is HP SR number 8606223364. + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (short))) >= 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -#include -#include +$db_includes int main () { -if ((struct tm *) 0) -return 0; +static int test_array [1 - 2 * !(((long) (sizeof (short))) <= $ac_mid)]; +test_array [0] = 0 + ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_header_time=yes + ac_hi=$ac_mid; break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_header_time=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: $ac_cv_header_time" >&5 -echo "${ECHO_T}$ac_cv_header_time" >&6 -if test $ac_cv_header_time = yes; then - -cat >>confdefs.h <<\_ACEOF -#define TIME_WITH_SYS_TIME 1 -_ACEOF - +ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi - - - - - - -ac_header_dirent=no -for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do - as_ac_Header=`echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_hdr that defines DIR" >&5 -echo $ECHO_N "checking for $ac_hdr that defines DIR... $ECHO_C" >&6 -if eval "test \"\${$as_ac_Header+set}\" = set"; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -#include <$ac_hdr> +$db_includes int main () { -if ((DIR *) 0) -return 0; +static int test_array [1 - 2 * !(((long) (sizeof (short))) < 0)]; +test_array [0] = 0 + ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_Header=yes" -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -eval "$as_ac_Header=no" -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 -if test `eval echo '${'$as_ac_Header'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 -_ACEOF - -ac_header_dirent=$ac_hdr; break -fi - -done -# Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. -if test $ac_header_dirent = dirent.h; then - echo "$as_me:$LINENO: checking for library containing opendir" >&5 -echo $ECHO_N "checking for library containing opendir... $ECHO_C" >&6 -if test "${ac_cv_search_opendir+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_func_search_save_LIBS=$LIBS -ac_cv_search_opendir=no -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +$db_includes -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char opendir (); int main () { -opendir (); +static int test_array [1 - 2 * !(((long) (sizeof (short))) >= $ac_mid)]; +test_array [0] = 0 + ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_search_opendir="none required" + ac_lo=$ac_mid; break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -if test "$ac_cv_search_opendir" = no; then - for ac_lib in dir; do - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_lo= ac_hi= +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +$db_includes -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char opendir (); int main () { -opendir (); +static int test_array [1 - 2 * !(((long) (sizeof (short))) <= $ac_mid)]; +test_array [0] = 0 + ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_search_opendir="-l$ac_lib" -break + ac_hi=$ac_mid else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext - done -fi -LIBS=$ac_func_search_save_LIBS -fi -echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5 -echo "${ECHO_T}$ac_cv_search_opendir" >&6 -if test "$ac_cv_search_opendir" != no; then - test "$ac_cv_search_opendir" = "none required" || LIBS="$ac_cv_search_opendir $LIBS" - -fi - +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_short=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (short), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (short), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } ;; +esac else - echo "$as_me:$LINENO: checking for library containing opendir" >&5 -echo $ECHO_N "checking for library containing opendir... $ECHO_C" >&6 -if test "${ac_cv_search_opendir+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + if test "$cross_compiling" = yes; then + { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run test program while cross compiling +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } else - ac_func_search_save_LIBS=$LIBS -ac_cv_search_opendir=no -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +$db_includes + +long longval () { return (long) (sizeof (short)); } +unsigned long ulongval () { return (long) (sizeof (short)); } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + exit (1); + if (((long) (sizeof (short))) < 0) + { + long i = longval (); + if (i != ((long) (sizeof (short)))) + exit (1); + fprintf (f, "%ld\n", i); + } + else + { + unsigned long i = ulongval (); + if (i != ((long) (sizeof (short)))) + exit (1); + fprintf (f, "%lu\n", i); + } + exit (ferror (f) || fclose (f) != 0); -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char opendir (); -int -main () -{ -opendir (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext +rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_search_opendir="none required" + ac_cv_sizeof_short=`cat conftest.val` else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +( exit $ac_status ) +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (short), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (short), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -if test "$ac_cv_search_opendir" = no; then - for ac_lib in x; do - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +fi +rm -f conftest.val +else + ac_cv_sizeof_short=0 +fi +fi +echo "$as_me:$LINENO: result: $ac_cv_sizeof_short" >&5 +echo "${ECHO_T}$ac_cv_sizeof_short" >&6 +cat >>confdefs.h <<_ACEOF +#define SIZEOF_SHORT $ac_cv_sizeof_short +_ACEOF + + +echo "$as_me:$LINENO: checking for unsigned short" >&5 +echo $ECHO_N "checking for unsigned short... $ECHO_C" >&6 +if test "${ac_cv_type_unsigned_short+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +$db_includes -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char opendir (); int main () { -opendir (); +if ((unsigned short *) 0) + return 0; +if (sizeof (unsigned short)) + return 0; ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_search_opendir="-l$ac_lib" -break + ac_cv_type_unsigned_short=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +ac_cv_type_unsigned_short=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext - done -fi -LIBS=$ac_func_search_save_LIBS -fi -echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5 -echo "${ECHO_T}$ac_cv_search_opendir" >&6 -if test "$ac_cv_search_opendir" != no; then - test "$ac_cv_search_opendir" = "none required" || LIBS="$ac_cv_search_opendir $LIBS" - -fi - +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi +echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_short" >&5 +echo "${ECHO_T}$ac_cv_type_unsigned_short" >&6 - - - -for ac_header in sys/select.h sys/time.h sys/fcntl.h -do -as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` -if eval "test \"\${$as_ac_Header+set}\" = set"; then - echo "$as_me:$LINENO: checking for $ac_header" >&5 -echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 -if eval "test \"\${$as_ac_Header+set}\" = set"; then +echo "$as_me:$LINENO: checking size of unsigned short" >&5 +echo $ECHO_N "checking size of unsigned short... $ECHO_C" >&6 +if test "${ac_cv_sizeof_unsigned_short+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 -fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 else - # Is the header compilable? -echo "$as_me:$LINENO: checking $ac_header usability" >&5 -echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 + if test "$ac_cv_type_unsigned_short" = yes; then + # The cast to unsigned long works around a bug in the HP C Compiler + # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects + # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. + # This bug is HP SR number 8606223364. + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$ac_includes_default -#include <$ac_header> +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) >= 0)]; +test_array [0] = 0 + + ; + return 0; +} _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_header_compiler=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -ac_header_compiler=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext -echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 -echo "${ECHO_T}$ac_header_compiler" >&6 - -# Is the header present? -echo "$as_me:$LINENO: checking $ac_header presence" >&5 -echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include <$ac_header> +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} _ACEOF -if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 - (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null; then - if test -s conftest.err; then - ac_cpp_err=$ac_c_preproc_warn_flag - else - ac_cpp_err= - fi -else - ac_cpp_err=yes -fi -if test -z "$ac_cpp_err"; then - ac_header_preproc=yes + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_hi=$ac_mid; break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 - ac_header_preproc=no +ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.err conftest.$ac_ext -echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 -echo "${ECHO_T}$ac_header_preproc" >&6 - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc in - yes:no ) - { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 -echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} - ( - cat <<\_ASBOX -## ------------------------------------ ## -## Report this to bug-autoconf@gnu.org. ## -## ------------------------------------ ## -_ASBOX - ) | - sed "s/^/$as_me: WARNING: /" >&2 - ;; - no:yes ) - { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 -echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 -echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} - ( - cat <<\_ASBOX -## ------------------------------------ ## -## Report this to bug-autoconf@gnu.org. ## -## ------------------------------------ ## -_ASBOX - ) | - sed "s/^/$as_me: WARNING: /" >&2 - ;; -esac -echo "$as_me:$LINENO: checking for $ac_header" >&5 -echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 -if eval "test \"\${$as_ac_Header+set}\" = set"; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done else - eval "$as_ac_Header=$ac_header_preproc" -fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 - -fi -if test `eval echo '${'$as_ac_Header'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -echo "$as_me:$LINENO: checking for struct stat.st_blksize" >&5 -echo $ECHO_N "checking for struct stat.st_blksize... $ECHO_C" >&6 -if test "${ac_cv_member_struct_stat_st_blksize+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$ac_includes_default +$db_includes + int main () { -static struct stat ac_aggr; -if (ac_aggr.st_blksize) -return 0; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) < 0)]; +test_array [0] = 0 + ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_member_struct_stat_st_blksize=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$ac_includes_default +$db_includes + int main () { -static struct stat ac_aggr; -if (sizeof ac_aggr.st_blksize) -return 0; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) >= $ac_mid)]; +test_array [0] = 0 + ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_member_struct_stat_st_blksize=yes + ac_lo=$ac_mid; break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_member_struct_stat_st_blksize=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -rm -f conftest.$ac_objext conftest.$ac_ext +ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_blksize" >&5 -echo "${ECHO_T}$ac_cv_member_struct_stat_st_blksize" >&6 -if test $ac_cv_member_struct_stat_st_blksize = yes; then - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 -_ACEOF - +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +ac_lo= ac_hi= fi - - - -# db.h includes and , not the other default includes -# autoconf usually includes. For that reason, we specify a set of includes -# for all type checking tests. [#5060] -# -# IBM's OS/390 and z/OS releases have types in not also found -# in . Include as well, if it exists. - -db_includes="#include " -if test "${ac_cv_header_inttypes_h+set}" = set; then - echo "$as_me:$LINENO: checking for inttypes.h" >&5 -echo $ECHO_N "checking for inttypes.h... $ECHO_C" >&6 -if test "${ac_cv_header_inttypes_h+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_header_inttypes_h" >&5 -echo "${ECHO_T}$ac_cv_header_inttypes_h" >&6 -else - # Is the header compilable? -echo "$as_me:$LINENO: checking inttypes.h usability" >&5 -echo $ECHO_N "checking inttypes.h usability... $ECHO_C" >&6 -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$ac_includes_default -#include +$db_includes + +int +main () +{ +static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_header_compiler=yes + ac_hi=$ac_mid else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_header_compiler=no +ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext -echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 -echo "${ECHO_T}$ac_header_compiler" >&6 - -# Is the header present? -echo "$as_me:$LINENO: checking inttypes.h presence" >&5 -echo $ECHO_N "checking inttypes.h presence... $ECHO_C" >&6 -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_unsigned_short=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned short), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (unsigned short), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } ;; +esac +else + if test "$cross_compiling" = yes; then + { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run test program while cross compiling +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include +$db_includes + +long longval () { return (long) (sizeof (unsigned short)); } +unsigned long ulongval () { return (long) (sizeof (unsigned short)); } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + exit (1); + if (((long) (sizeof (unsigned short))) < 0) + { + long i = longval (); + if (i != ((long) (sizeof (unsigned short)))) + exit (1); + fprintf (f, "%ld\n", i); + } + else + { + unsigned long i = ulongval (); + if (i != ((long) (sizeof (unsigned short)))) + exit (1); + fprintf (f, "%lu\n", i); + } + exit (ferror (f) || fclose (f) != 0); + + ; + return 0; +} _ACEOF -if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 - (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null; then - if test -s conftest.err; then - ac_cpp_err=$ac_c_preproc_warn_flag - else - ac_cpp_err= - fi -else - ac_cpp_err=yes -fi -if test -z "$ac_cpp_err"; then - ac_header_preproc=yes + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sizeof_unsigned_short=`cat conftest.val` else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 - ac_header_preproc=no +( exit $ac_status ) +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned short), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (unsigned short), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } fi -rm -f conftest.err conftest.$ac_ext -echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 -echo "${ECHO_T}$ac_header_preproc" >&6 - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc in - yes:no ) - { echo "$as_me:$LINENO: WARNING: inttypes.h: accepted by the compiler, rejected by the preprocessor!" >&5 -echo "$as_me: WARNING: inttypes.h: accepted by the compiler, rejected by the preprocessor!" >&2;} - { echo "$as_me:$LINENO: WARNING: inttypes.h: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: inttypes.h: proceeding with the preprocessor's result" >&2;} - ( - cat <<\_ASBOX -## ------------------------------------ ## -## Report this to bug-autoconf@gnu.org. ## -## ------------------------------------ ## -_ASBOX - ) | - sed "s/^/$as_me: WARNING: /" >&2 - ;; - no:yes ) - { echo "$as_me:$LINENO: WARNING: inttypes.h: present but cannot be compiled" >&5 -echo "$as_me: WARNING: inttypes.h: present but cannot be compiled" >&2;} - { echo "$as_me:$LINENO: WARNING: inttypes.h: check for missing prerequisite headers?" >&5 -echo "$as_me: WARNING: inttypes.h: check for missing prerequisite headers?" >&2;} - { echo "$as_me:$LINENO: WARNING: inttypes.h: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: inttypes.h: proceeding with the preprocessor's result" >&2;} - ( - cat <<\_ASBOX -## ------------------------------------ ## -## Report this to bug-autoconf@gnu.org. ## -## ------------------------------------ ## -_ASBOX - ) | - sed "s/^/$as_me: WARNING: /" >&2 - ;; -esac -echo "$as_me:$LINENO: checking for inttypes.h" >&5 -echo $ECHO_N "checking for inttypes.h... $ECHO_C" >&6 -if test "${ac_cv_header_inttypes_h+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_cv_header_inttypes_h=$ac_header_preproc +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_header_inttypes_h" >&5 -echo "${ECHO_T}$ac_cv_header_inttypes_h" >&6 - fi -if test $ac_cv_header_inttypes_h = yes; then - - inttypes_decl="#include " - db_includes="$db_includes -#include " +rm -f conftest.val +else + ac_cv_sizeof_unsigned_short=0 fi +fi +echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_short" >&5 +echo "${ECHO_T}$ac_cv_sizeof_unsigned_short" >&6 +cat >>confdefs.h <<_ACEOF +#define SIZEOF_UNSIGNED_SHORT $ac_cv_sizeof_unsigned_short +_ACEOF -db_includes="$db_includes -#include " - -# We need to know the sizes of various objects on this system. -# We don't use the SIZEOF_XXX values created by autoconf. -echo "$as_me:$LINENO: checking for char" >&5 -echo $ECHO_N "checking for char... $ECHO_C" >&6 -if test "${ac_cv_type_char+set}" = set; then +echo "$as_me:$LINENO: checking for int" >&5 +echo $ECHO_N "checking for int... $ECHO_C" >&6 +if test "${ac_cv_type_int+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22246,9 +24818,9 @@ $db_includes int main () { -if ((char *) 0) +if ((int *) 0) return 0; -if (sizeof (char)) +if (sizeof (int)) return 0; ; return 0; @@ -22256,34 +24828,44 @@ if (sizeof (char)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_char=yes + ac_cv_type_int=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_char=no +ac_cv_type_int=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_char" >&5 -echo "${ECHO_T}$ac_cv_type_char" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_int" >&5 +echo "${ECHO_T}$ac_cv_type_int" >&6 -echo "$as_me:$LINENO: checking size of char" >&5 -echo $ECHO_N "checking size of char... $ECHO_C" >&6 -if test "${ac_cv_sizeof_char+set}" = set; then +echo "$as_me:$LINENO: checking size of int" >&5 +echo $ECHO_N "checking size of int... $ECHO_C" >&6 +if test "${ac_cv_sizeof_int+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_char" = yes; then + if test "$ac_cv_type_int" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -22291,7 +24873,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22302,7 +24883,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (int))) >= 0)]; test_array [0] = 0 ; @@ -22311,11 +24892,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22324,7 +24915,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22335,7 +24925,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (int))) <= $ac_mid)]; test_array [0] = 0 ; @@ -22344,11 +24934,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22360,20 +24960,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22384,7 +24983,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (int))) < 0)]; test_array [0] = 0 ; @@ -22393,11 +24992,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22406,7 +25015,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22417,7 +25025,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (int))) >= $ac_mid)]; test_array [0] = 0 ; @@ -22426,11 +25034,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22442,13 +25060,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -22456,14 +25074,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22474,7 +25091,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (int))) <= $ac_mid)]; test_array [0] = 0 ; @@ -22483,11 +25100,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22500,13 +25127,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in -?*) ac_cv_sizeof_char=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (char), 77 +?*) ac_cv_sizeof_int=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (int), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (char), 77 +echo "$as_me: error: cannot compute sizeof (int), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } ;; esac @@ -22519,7 +25146,6 @@ See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22527,8 +25153,8 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (char)); } -unsigned long ulongval () { return (long) (sizeof (char)); } +long longval () { return (long) (sizeof (int)); } +unsigned long ulongval () { return (long) (sizeof (int)); } #include #include int @@ -22538,17 +25164,17 @@ main () FILE *f = fopen ("conftest.val", "w"); if (! f) exit (1); - if (((long) (sizeof (char))) < 0) + if (((long) (sizeof (int))) < 0) { long i = longval (); - if (i != ((long) (sizeof (char)))) + if (i != ((long) (sizeof (int)))) exit (1); fprintf (f, "%ld\n", i); } else { unsigned long i = ulongval (); - if (i != ((long) (sizeof (char)))) + if (i != ((long) (sizeof (int)))) exit (1); fprintf (f, "%lu\n", i); } @@ -22569,41 +25195,40 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_char=`cat conftest.val` + ac_cv_sizeof_int=`cat conftest.val` else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (char), 77 +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (int), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (char), 77 +echo "$as_me: error: cannot compute sizeof (int), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi rm -f conftest.val else - ac_cv_sizeof_char=0 + ac_cv_sizeof_int=0 fi fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_char" >&5 -echo "${ECHO_T}$ac_cv_sizeof_char" >&6 +echo "$as_me:$LINENO: result: $ac_cv_sizeof_int" >&5 +echo "${ECHO_T}$ac_cv_sizeof_int" >&6 cat >>confdefs.h <<_ACEOF -#define SIZEOF_CHAR $ac_cv_sizeof_char +#define SIZEOF_INT $ac_cv_sizeof_int _ACEOF -echo "$as_me:$LINENO: checking for unsigned char" >&5 -echo $ECHO_N "checking for unsigned char... $ECHO_C" >&6 -if test "${ac_cv_type_unsigned_char+set}" = set; then +echo "$as_me:$LINENO: checking for unsigned int" >&5 +echo $ECHO_N "checking for unsigned int... $ECHO_C" >&6 +if test "${ac_cv_type_unsigned_int+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22614,9 +25239,9 @@ $db_includes int main () { -if ((unsigned char *) 0) +if ((unsigned int *) 0) return 0; -if (sizeof (unsigned char)) +if (sizeof (unsigned int)) return 0; ; return 0; @@ -22624,34 +25249,44 @@ if (sizeof (unsigned char)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_unsigned_char=yes + ac_cv_type_unsigned_int=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_unsigned_char=no +ac_cv_type_unsigned_int=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_char" >&5 -echo "${ECHO_T}$ac_cv_type_unsigned_char" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_int" >&5 +echo "${ECHO_T}$ac_cv_type_unsigned_int" >&6 -echo "$as_me:$LINENO: checking size of unsigned char" >&5 -echo $ECHO_N "checking size of unsigned char... $ECHO_C" >&6 -if test "${ac_cv_sizeof_unsigned_char+set}" = set; then +echo "$as_me:$LINENO: checking size of unsigned int" >&5 +echo $ECHO_N "checking size of unsigned int... $ECHO_C" >&6 +if test "${ac_cv_sizeof_unsigned_int+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_unsigned_char" = yes; then + if test "$ac_cv_type_unsigned_int" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -22659,7 +25294,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22670,7 +25304,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) >= 0)]; test_array [0] = 0 ; @@ -22679,11 +25313,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22692,7 +25336,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22703,7 +25346,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) <= $ac_mid)]; test_array [0] = 0 ; @@ -22712,11 +25355,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22728,20 +25381,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22752,7 +25404,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) < 0)]; test_array [0] = 0 ; @@ -22761,11 +25413,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22774,7 +25436,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22785,7 +25446,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) >= $ac_mid)]; test_array [0] = 0 ; @@ -22794,11 +25455,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22810,13 +25481,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -22824,14 +25495,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22842,7 +25512,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) <= $ac_mid)]; test_array [0] = 0 ; @@ -22851,11 +25521,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -22868,13 +25548,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in -?*) ac_cv_sizeof_unsigned_char=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned char), 77 +?*) ac_cv_sizeof_unsigned_int=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned int), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned char), 77 +echo "$as_me: error: cannot compute sizeof (unsigned int), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } ;; esac @@ -22887,7 +25567,6 @@ See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22895,8 +25574,8 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (unsigned char)); } -unsigned long ulongval () { return (long) (sizeof (unsigned char)); } +long longval () { return (long) (sizeof (unsigned int)); } +unsigned long ulongval () { return (long) (sizeof (unsigned int)); } #include #include int @@ -22906,17 +25585,17 @@ main () FILE *f = fopen ("conftest.val", "w"); if (! f) exit (1); - if (((long) (sizeof (unsigned char))) < 0) + if (((long) (sizeof (unsigned int))) < 0) { long i = longval (); - if (i != ((long) (sizeof (unsigned char)))) + if (i != ((long) (sizeof (unsigned int)))) exit (1); fprintf (f, "%ld\n", i); } else { unsigned long i = ulongval (); - if (i != ((long) (sizeof (unsigned char)))) + if (i != ((long) (sizeof (unsigned int)))) exit (1); fprintf (f, "%lu\n", i); } @@ -22937,41 +25616,40 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_unsigned_char=`cat conftest.val` + ac_cv_sizeof_unsigned_int=`cat conftest.val` else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned char), 77 +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned int), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned char), 77 +echo "$as_me: error: cannot compute sizeof (unsigned int), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi rm -f conftest.val else - ac_cv_sizeof_unsigned_char=0 + ac_cv_sizeof_unsigned_int=0 fi fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_char" >&5 -echo "${ECHO_T}$ac_cv_sizeof_unsigned_char" >&6 +echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_int" >&5 +echo "${ECHO_T}$ac_cv_sizeof_unsigned_int" >&6 cat >>confdefs.h <<_ACEOF -#define SIZEOF_UNSIGNED_CHAR $ac_cv_sizeof_unsigned_char +#define SIZEOF_UNSIGNED_INT $ac_cv_sizeof_unsigned_int _ACEOF -echo "$as_me:$LINENO: checking for short" >&5 -echo $ECHO_N "checking for short... $ECHO_C" >&6 -if test "${ac_cv_type_short+set}" = set; then +echo "$as_me:$LINENO: checking for long" >&5 +echo $ECHO_N "checking for long... $ECHO_C" >&6 +if test "${ac_cv_type_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -22982,9 +25660,9 @@ $db_includes int main () { -if ((short *) 0) +if ((long *) 0) return 0; -if (sizeof (short)) +if (sizeof (long)) return 0; ; return 0; @@ -22992,34 +25670,44 @@ if (sizeof (short)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_short=yes + ac_cv_type_long=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_short=no +ac_cv_type_long=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_short" >&5 -echo "${ECHO_T}$ac_cv_type_short" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_long" >&5 +echo "${ECHO_T}$ac_cv_type_long" >&6 -echo "$as_me:$LINENO: checking size of short" >&5 -echo $ECHO_N "checking size of short... $ECHO_C" >&6 -if test "${ac_cv_sizeof_short+set}" = set; then +echo "$as_me:$LINENO: checking size of long" >&5 +echo $ECHO_N "checking size of long... $ECHO_C" >&6 +if test "${ac_cv_sizeof_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_short" = yes; then + if test "$ac_cv_type_long" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -23027,7 +25715,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23038,7 +25725,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (short))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (long))) >= 0)]; test_array [0] = 0 ; @@ -23047,11 +25734,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23060,7 +25757,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23071,7 +25767,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (short))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -23080,11 +25776,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23096,20 +25802,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23120,7 +25825,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (short))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (long))) < 0)]; test_array [0] = 0 ; @@ -23129,11 +25834,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23142,7 +25857,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23153,7 +25867,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (short))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (long))) >= $ac_mid)]; test_array [0] = 0 ; @@ -23162,11 +25876,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23178,13 +25902,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -23192,14 +25916,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23210,7 +25933,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (short))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -23219,11 +25942,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23236,13 +25969,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in -?*) ac_cv_sizeof_short=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (short), 77 +?*) ac_cv_sizeof_long=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (short), 77 +echo "$as_me: error: cannot compute sizeof (long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } ;; esac @@ -23255,7 +25988,6 @@ See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23263,8 +25995,8 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (short)); } -unsigned long ulongval () { return (long) (sizeof (short)); } +long longval () { return (long) (sizeof (long)); } +unsigned long ulongval () { return (long) (sizeof (long)); } #include #include int @@ -23274,17 +26006,17 @@ main () FILE *f = fopen ("conftest.val", "w"); if (! f) exit (1); - if (((long) (sizeof (short))) < 0) + if (((long) (sizeof (long))) < 0) { long i = longval (); - if (i != ((long) (sizeof (short)))) + if (i != ((long) (sizeof (long)))) exit (1); fprintf (f, "%ld\n", i); } else { unsigned long i = ulongval (); - if (i != ((long) (sizeof (short)))) + if (i != ((long) (sizeof (long)))) exit (1); fprintf (f, "%lu\n", i); } @@ -23305,41 +26037,40 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_short=`cat conftest.val` + ac_cv_sizeof_long=`cat conftest.val` else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (short), 77 +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (short), 77 +echo "$as_me: error: cannot compute sizeof (long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi rm -f conftest.val else - ac_cv_sizeof_short=0 + ac_cv_sizeof_long=0 fi fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_short" >&5 -echo "${ECHO_T}$ac_cv_sizeof_short" >&6 +echo "$as_me:$LINENO: result: $ac_cv_sizeof_long" >&5 +echo "${ECHO_T}$ac_cv_sizeof_long" >&6 cat >>confdefs.h <<_ACEOF -#define SIZEOF_SHORT $ac_cv_sizeof_short +#define SIZEOF_LONG $ac_cv_sizeof_long _ACEOF -echo "$as_me:$LINENO: checking for unsigned short" >&5 -echo $ECHO_N "checking for unsigned short... $ECHO_C" >&6 -if test "${ac_cv_type_unsigned_short+set}" = set; then +echo "$as_me:$LINENO: checking for unsigned long" >&5 +echo $ECHO_N "checking for unsigned long... $ECHO_C" >&6 +if test "${ac_cv_type_unsigned_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23350,9 +26081,9 @@ $db_includes int main () { -if ((unsigned short *) 0) +if ((unsigned long *) 0) return 0; -if (sizeof (unsigned short)) +if (sizeof (unsigned long)) return 0; ; return 0; @@ -23360,34 +26091,44 @@ if (sizeof (unsigned short)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_unsigned_short=yes + ac_cv_type_unsigned_long=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_unsigned_short=no +ac_cv_type_unsigned_long=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_short" >&5 -echo "${ECHO_T}$ac_cv_type_unsigned_short" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long" >&5 +echo "${ECHO_T}$ac_cv_type_unsigned_long" >&6 -echo "$as_me:$LINENO: checking size of unsigned short" >&5 -echo $ECHO_N "checking size of unsigned short... $ECHO_C" >&6 -if test "${ac_cv_sizeof_unsigned_short+set}" = set; then +echo "$as_me:$LINENO: checking size of unsigned long" >&5 +echo $ECHO_N "checking size of unsigned long... $ECHO_C" >&6 +if test "${ac_cv_sizeof_unsigned_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_unsigned_short" = yes; then + if test "$ac_cv_type_unsigned_long" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -23395,7 +26136,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23406,7 +26146,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) >= 0)]; test_array [0] = 0 ; @@ -23415,11 +26155,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23428,7 +26178,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23439,7 +26188,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -23448,11 +26197,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23464,20 +26223,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23488,7 +26246,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) < 0)]; test_array [0] = 0 ; @@ -23497,11 +26255,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23510,7 +26278,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23521,7 +26288,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) >= $ac_mid)]; test_array [0] = 0 ; @@ -23530,11 +26297,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23546,13 +26323,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -23560,14 +26337,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23578,7 +26354,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -23587,11 +26363,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23604,13 +26390,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in -?*) ac_cv_sizeof_unsigned_short=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned short), 77 +?*) ac_cv_sizeof_unsigned_long=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned short), 77 +echo "$as_me: error: cannot compute sizeof (unsigned long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } ;; esac @@ -23623,7 +26409,6 @@ See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23631,8 +26416,8 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (unsigned short)); } -unsigned long ulongval () { return (long) (sizeof (unsigned short)); } +long longval () { return (long) (sizeof (unsigned long)); } +unsigned long ulongval () { return (long) (sizeof (unsigned long)); } #include #include int @@ -23642,17 +26427,17 @@ main () FILE *f = fopen ("conftest.val", "w"); if (! f) exit (1); - if (((long) (sizeof (unsigned short))) < 0) + if (((long) (sizeof (unsigned long))) < 0) { long i = longval (); - if (i != ((long) (sizeof (unsigned short)))) + if (i != ((long) (sizeof (unsigned long)))) exit (1); fprintf (f, "%ld\n", i); } else { unsigned long i = ulongval (); - if (i != ((long) (sizeof (unsigned short)))) + if (i != ((long) (sizeof (unsigned long)))) exit (1); fprintf (f, "%lu\n", i); } @@ -23673,41 +26458,40 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_unsigned_short=`cat conftest.val` + ac_cv_sizeof_unsigned_long=`cat conftest.val` else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned short), 77 +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned short), 77 +echo "$as_me: error: cannot compute sizeof (unsigned long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi rm -f conftest.val else - ac_cv_sizeof_unsigned_short=0 + ac_cv_sizeof_unsigned_long=0 fi fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_short" >&5 -echo "${ECHO_T}$ac_cv_sizeof_unsigned_short" >&6 +echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_long" >&5 +echo "${ECHO_T}$ac_cv_sizeof_unsigned_long" >&6 cat >>confdefs.h <<_ACEOF -#define SIZEOF_UNSIGNED_SHORT $ac_cv_sizeof_unsigned_short +#define SIZEOF_UNSIGNED_LONG $ac_cv_sizeof_unsigned_long _ACEOF -echo "$as_me:$LINENO: checking for int" >&5 -echo $ECHO_N "checking for int... $ECHO_C" >&6 -if test "${ac_cv_type_int+set}" = set; then +echo "$as_me:$LINENO: checking for long long" >&5 +echo $ECHO_N "checking for long long... $ECHO_C" >&6 +if test "${ac_cv_type_long_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23718,9 +26502,9 @@ $db_includes int main () { -if ((int *) 0) +if ((long long *) 0) return 0; -if (sizeof (int)) +if (sizeof (long long)) return 0; ; return 0; @@ -23728,34 +26512,44 @@ if (sizeof (int)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_int=yes + ac_cv_type_long_long=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_int=no +ac_cv_type_long_long=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_int" >&5 -echo "${ECHO_T}$ac_cv_type_int" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_long_long" >&5 +echo "${ECHO_T}$ac_cv_type_long_long" >&6 -echo "$as_me:$LINENO: checking size of int" >&5 -echo $ECHO_N "checking size of int... $ECHO_C" >&6 -if test "${ac_cv_sizeof_int+set}" = set; then +echo "$as_me:$LINENO: checking size of long long" >&5 +echo $ECHO_N "checking size of long long... $ECHO_C" >&6 +if test "${ac_cv_sizeof_long_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_int" = yes; then + if test "$ac_cv_type_long_long" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -23763,7 +26557,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23774,7 +26567,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (int))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (long long))) >= 0)]; test_array [0] = 0 ; @@ -23783,11 +26576,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23796,7 +26599,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23807,7 +26609,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (int))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (long long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -23816,11 +26618,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23832,20 +26644,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23856,7 +26667,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (int))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (long long))) < 0)]; test_array [0] = 0 ; @@ -23865,11 +26676,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23878,7 +26699,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23889,7 +26709,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (int))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (long long))) >= $ac_mid)]; test_array [0] = 0 ; @@ -23898,11 +26718,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23914,13 +26744,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -23928,14 +26758,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23946,7 +26775,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (int))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (long long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -23955,11 +26784,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -23972,13 +26811,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in -?*) ac_cv_sizeof_int=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (int), 77 +?*) ac_cv_sizeof_long_long=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (long long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (int), 77 +echo "$as_me: error: cannot compute sizeof (long long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } ;; esac @@ -23991,7 +26830,6 @@ See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -23999,8 +26837,8 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (int)); } -unsigned long ulongval () { return (long) (sizeof (int)); } +long longval () { return (long) (sizeof (long long)); } +unsigned long ulongval () { return (long) (sizeof (long long)); } #include #include int @@ -24010,17 +26848,17 @@ main () FILE *f = fopen ("conftest.val", "w"); if (! f) exit (1); - if (((long) (sizeof (int))) < 0) + if (((long) (sizeof (long long))) < 0) { long i = longval (); - if (i != ((long) (sizeof (int)))) + if (i != ((long) (sizeof (long long)))) exit (1); fprintf (f, "%ld\n", i); } else { unsigned long i = ulongval (); - if (i != ((long) (sizeof (int)))) + if (i != ((long) (sizeof (long long)))) exit (1); fprintf (f, "%lu\n", i); } @@ -24041,41 +26879,40 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_int=`cat conftest.val` + ac_cv_sizeof_long_long=`cat conftest.val` else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (int), 77 +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (long long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (int), 77 +echo "$as_me: error: cannot compute sizeof (long long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi rm -f conftest.val else - ac_cv_sizeof_int=0 + ac_cv_sizeof_long_long=0 fi fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_int" >&5 -echo "${ECHO_T}$ac_cv_sizeof_int" >&6 +echo "$as_me:$LINENO: result: $ac_cv_sizeof_long_long" >&5 +echo "${ECHO_T}$ac_cv_sizeof_long_long" >&6 cat >>confdefs.h <<_ACEOF -#define SIZEOF_INT $ac_cv_sizeof_int +#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long _ACEOF -echo "$as_me:$LINENO: checking for unsigned int" >&5 -echo $ECHO_N "checking for unsigned int... $ECHO_C" >&6 -if test "${ac_cv_type_unsigned_int+set}" = set; then +echo "$as_me:$LINENO: checking for unsigned long long" >&5 +echo $ECHO_N "checking for unsigned long long... $ECHO_C" >&6 +if test "${ac_cv_type_unsigned_long_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24086,9 +26923,9 @@ $db_includes int main () { -if ((unsigned int *) 0) +if ((unsigned long long *) 0) return 0; -if (sizeof (unsigned int)) +if (sizeof (unsigned long long)) return 0; ; return 0; @@ -24096,34 +26933,44 @@ if (sizeof (unsigned int)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_unsigned_int=yes + ac_cv_type_unsigned_long_long=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_unsigned_int=no +ac_cv_type_unsigned_long_long=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_int" >&5 -echo "${ECHO_T}$ac_cv_type_unsigned_int" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long_long" >&5 +echo "${ECHO_T}$ac_cv_type_unsigned_long_long" >&6 -echo "$as_me:$LINENO: checking size of unsigned int" >&5 -echo $ECHO_N "checking size of unsigned int... $ECHO_C" >&6 -if test "${ac_cv_sizeof_unsigned_int+set}" = set; then +echo "$as_me:$LINENO: checking size of unsigned long long" >&5 +echo $ECHO_N "checking size of unsigned long long... $ECHO_C" >&6 +if test "${ac_cv_sizeof_unsigned_long_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_unsigned_int" = yes; then + if test "$ac_cv_type_unsigned_long_long" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -24131,7 +26978,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24142,7 +26988,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long long))) >= 0)]; test_array [0] = 0 ; @@ -24151,11 +26997,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24164,7 +27020,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24175,7 +27030,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -24184,11 +27039,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24200,20 +27065,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24224,7 +27088,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long long))) < 0)]; test_array [0] = 0 ; @@ -24233,11 +27097,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24246,7 +27120,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24257,7 +27130,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long long))) >= $ac_mid)]; test_array [0] = 0 ; @@ -24266,11 +27139,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24282,13 +27165,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -24296,14 +27179,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24314,7 +27196,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (unsigned long long))) <= $ac_mid)]; test_array [0] = 0 ; @@ -24323,11 +27205,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24340,13 +27232,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in -?*) ac_cv_sizeof_unsigned_int=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned int), 77 +?*) ac_cv_sizeof_unsigned_long_long=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned int), 77 +echo "$as_me: error: cannot compute sizeof (unsigned long long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } ;; esac @@ -24359,7 +27251,6 @@ See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24367,8 +27258,8 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (unsigned int)); } -unsigned long ulongval () { return (long) (sizeof (unsigned int)); } +long longval () { return (long) (sizeof (unsigned long long)); } +unsigned long ulongval () { return (long) (sizeof (unsigned long long)); } #include #include int @@ -24378,17 +27269,17 @@ main () FILE *f = fopen ("conftest.val", "w"); if (! f) exit (1); - if (((long) (sizeof (unsigned int))) < 0) + if (((long) (sizeof (unsigned long long))) < 0) { long i = longval (); - if (i != ((long) (sizeof (unsigned int)))) + if (i != ((long) (sizeof (unsigned long long)))) exit (1); fprintf (f, "%ld\n", i); } else { unsigned long i = ulongval (); - if (i != ((long) (sizeof (unsigned int)))) + if (i != ((long) (sizeof (unsigned long long)))) exit (1); fprintf (f, "%lu\n", i); } @@ -24409,41 +27300,40 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_unsigned_int=`cat conftest.val` + ac_cv_sizeof_unsigned_long_long=`cat conftest.val` else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned int), 77 +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long long), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned int), 77 +echo "$as_me: error: cannot compute sizeof (unsigned long long), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi rm -f conftest.val else - ac_cv_sizeof_unsigned_int=0 + ac_cv_sizeof_unsigned_long_long=0 fi fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_int" >&5 -echo "${ECHO_T}$ac_cv_sizeof_unsigned_int" >&6 +echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_long_long" >&5 +echo "${ECHO_T}$ac_cv_sizeof_unsigned_long_long" >&6 cat >>confdefs.h <<_ACEOF -#define SIZEOF_UNSIGNED_INT $ac_cv_sizeof_unsigned_int +#define SIZEOF_UNSIGNED_LONG_LONG $ac_cv_sizeof_unsigned_long_long _ACEOF -echo "$as_me:$LINENO: checking for long" >&5 -echo $ECHO_N "checking for long... $ECHO_C" >&6 -if test "${ac_cv_type_long+set}" = set; then +echo "$as_me:$LINENO: checking for size_t" >&5 +echo $ECHO_N "checking for size_t... $ECHO_C" >&6 +if test "${ac_cv_type_size_t+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24454,9 +27344,9 @@ $db_includes int main () { -if ((long *) 0) +if ((size_t *) 0) return 0; -if (sizeof (long)) +if (sizeof (size_t)) return 0; ; return 0; @@ -24464,34 +27354,44 @@ if (sizeof (long)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_long=yes + ac_cv_type_size_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_long=no +ac_cv_type_size_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_long" >&5 -echo "${ECHO_T}$ac_cv_type_long" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5 +echo "${ECHO_T}$ac_cv_type_size_t" >&6 -echo "$as_me:$LINENO: checking size of long" >&5 -echo $ECHO_N "checking size of long... $ECHO_C" >&6 -if test "${ac_cv_sizeof_long+set}" = set; then +echo "$as_me:$LINENO: checking size of size_t" >&5 +echo $ECHO_N "checking size of size_t... $ECHO_C" >&6 +if test "${ac_cv_sizeof_size_t+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_long" = yes; then + if test "$ac_cv_type_size_t" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -24499,7 +27399,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24510,7 +27409,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (long))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (size_t))) >= 0)]; test_array [0] = 0 ; @@ -24519,11 +27418,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24532,7 +27441,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24543,7 +27451,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (size_t))) <= $ac_mid)]; test_array [0] = 0 ; @@ -24552,11 +27460,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24568,20 +27486,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24592,7 +27509,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (long))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (size_t))) < 0)]; test_array [0] = 0 ; @@ -24601,11 +27518,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24614,7 +27541,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24625,7 +27551,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (long))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (size_t))) >= $ac_mid)]; test_array [0] = 0 ; @@ -24634,11 +27560,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24650,13 +27586,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -24664,14 +27600,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24682,7 +27617,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (size_t))) <= $ac_mid)]; test_array [0] = 0 ; @@ -24691,11 +27626,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24708,13 +27653,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr '(' $ac_mid ')' + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in -?*) ac_cv_sizeof_long=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77 +?*) ac_cv_sizeof_size_t=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (size_t), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (long), 77 +echo "$as_me: error: cannot compute sizeof (size_t), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } ;; esac @@ -24727,7 +27672,6 @@ See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24735,8 +27679,8 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (long)); } -unsigned long ulongval () { return (long) (sizeof (long)); } +long longval () { return (long) (sizeof (size_t)); } +unsigned long ulongval () { return (long) (sizeof (size_t)); } #include #include int @@ -24746,17 +27690,17 @@ main () FILE *f = fopen ("conftest.val", "w"); if (! f) exit (1); - if (((long) (sizeof (long))) < 0) + if (((long) (sizeof (size_t))) < 0) { long i = longval (); - if (i != ((long) (sizeof (long)))) + if (i != ((long) (sizeof (size_t)))) exit (1); fprintf (f, "%ld\n", i); } else { unsigned long i = ulongval (); - if (i != ((long) (sizeof (long)))) + if (i != ((long) (sizeof (size_t)))) exit (1); fprintf (f, "%lu\n", i); } @@ -24777,41 +27721,40 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_long=`cat conftest.val` + ac_cv_sizeof_size_t=`cat conftest.val` else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77 +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (size_t), 77 See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (long), 77 +echo "$as_me: error: cannot compute sizeof (size_t), 77 See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi rm -f conftest.val else - ac_cv_sizeof_long=0 + ac_cv_sizeof_size_t=0 fi fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_long" >&5 -echo "${ECHO_T}$ac_cv_sizeof_long" >&6 +echo "$as_me:$LINENO: result: $ac_cv_sizeof_size_t" >&5 +echo "${ECHO_T}$ac_cv_sizeof_size_t" >&6 cat >>confdefs.h <<_ACEOF -#define SIZEOF_LONG $ac_cv_sizeof_long +#define SIZEOF_SIZE_T $ac_cv_sizeof_size_t _ACEOF -echo "$as_me:$LINENO: checking for unsigned long" >&5 -echo $ECHO_N "checking for unsigned long... $ECHO_C" >&6 -if test "${ac_cv_type_unsigned_long+set}" = set; then +echo "$as_me:$LINENO: checking for char *" >&5 +echo $ECHO_N "checking for char *... $ECHO_C" >&6 +if test "${ac_cv_type_char_p+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24822,9 +27765,9 @@ $db_includes int main () { -if ((unsigned long *) 0) +if ((char * *) 0) return 0; -if (sizeof (unsigned long)) +if (sizeof (char *)) return 0; ; return 0; @@ -24832,34 +27775,44 @@ if (sizeof (unsigned long)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_unsigned_long=yes + ac_cv_type_char_p=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_unsigned_long=no +ac_cv_type_char_p=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long" >&5 -echo "${ECHO_T}$ac_cv_type_unsigned_long" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_char_p" >&5 +echo "${ECHO_T}$ac_cv_type_char_p" >&6 -echo "$as_me:$LINENO: checking size of unsigned long" >&5 -echo $ECHO_N "checking size of unsigned long... $ECHO_C" >&6 -if test "${ac_cv_sizeof_unsigned_long+set}" = set; then +echo "$as_me:$LINENO: checking size of char *" >&5 +echo $ECHO_N "checking size of char *... $ECHO_C" >&6 +if test "${ac_cv_sizeof_char_p+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_unsigned_long" = yes; then + if test "$ac_cv_type_char_p" = yes; then # The cast to unsigned long works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. @@ -24867,7 +27820,6 @@ else if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24878,7 +27830,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) >= 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (char *))) >= 0)]; test_array [0] = 0 ; @@ -24887,11 +27839,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24900,7 +27862,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_lo=0 ac_mid=0 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24911,7 +27872,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (char *))) <= $ac_mid)]; test_array [0] = 0 ; @@ -24920,11 +27881,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24936,20 +27907,19 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24960,7 +27930,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) < 0)]; +static int test_array [1 - 2 * !(((long) (sizeof (char *))) < 0)]; test_array [0] = 0 ; @@ -24969,11 +27939,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -24982,7 +27962,6 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ac_hi=-1 ac_mid=-1 while :; do cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -24993,7 +27972,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) >= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (char *))) >= $ac_mid)]; test_array [0] = 0 ; @@ -25002,11 +27981,21 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -25018,13 +28007,13 @@ else sed 's/^/| /' conftest.$ac_ext >&5 ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done else echo "$as_me: failed program was:" >&5 @@ -25032,14 +28021,13 @@ sed 's/^/| /' conftest.$ac_ext >&5 ac_lo= ac_hi= fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25050,7 +28038,7 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) <= $ac_mid)]; +static int test_array [1 - 2 * !(((long) (sizeof (char *))) <= $ac_mid)]; test_array [0] = 0 ; @@ -25059,216 +28047,52 @@ test_array [0] = 0 _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - ac_hi=$ac_mid -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -ac_lo=`expr '(' $ac_mid ')' + 1` -fi -rm -f conftest.$ac_objext conftest.$ac_ext -done -case $ac_lo in -?*) ac_cv_sizeof_unsigned_long=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long), 77 -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned long), 77 -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } ;; -esac -else - if test "$cross_compiling" = yes; then - { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot run test program while cross compiling -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -$db_includes - -long longval () { return (long) (sizeof (unsigned long)); } -unsigned long ulongval () { return (long) (sizeof (unsigned long)); } -#include -#include -int -main () -{ - - FILE *f = fopen ("conftest.val", "w"); - if (! f) - exit (1); - if (((long) (sizeof (unsigned long))) < 0) - { - long i = longval (); - if (i != ((long) (sizeof (unsigned long)))) - exit (1); - fprintf (f, "%ld\n", i); - } - else - { - unsigned long i = ulongval (); - if (i != ((long) (sizeof (unsigned long)))) - exit (1); - fprintf (f, "%lu\n", i); - } - exit (ferror (f) || fclose (f) != 0); - - ; - return 0; -} -_ACEOF -rm -f conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - ac_cv_sizeof_unsigned_long=`cat conftest.val` -else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long), 77 -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (unsigned long), 77 -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } -fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext -fi -fi -rm -f conftest.val -else - ac_cv_sizeof_unsigned_long=0 -fi -fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_long" >&5 -echo "${ECHO_T}$ac_cv_sizeof_unsigned_long" >&6 -cat >>confdefs.h <<_ACEOF -#define SIZEOF_UNSIGNED_LONG $ac_cv_sizeof_unsigned_long -_ACEOF - - -echo "$as_me:$LINENO: checking for size_t" >&5 -echo $ECHO_N "checking for size_t... $ECHO_C" >&6 -if test "${ac_cv_type_size_t+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -$db_includes - -int -main () -{ -if ((size_t *) 0) - return 0; -if (sizeof (size_t)) - return 0; - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_size_t=yes + ac_hi=$ac_mid else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_size_t=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext +ac_lo=`expr '(' $ac_mid ')' + 1` fi -echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5 -echo "${ECHO_T}$ac_cv_type_size_t" >&6 - -echo "$as_me:$LINENO: checking size of size_t" >&5 -echo $ECHO_N "checking size of size_t... $ECHO_C" >&6 -if test "${ac_cv_sizeof_size_t+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test "$ac_cv_type_size_t" = yes; then - # The cast to unsigned long works around a bug in the HP C Compiler - # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects - # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. - # This bug is HP SR number 8606223364. - if test "$cross_compiling" = yes; then - # Depending upon the size, compute the lo and hi bounds. -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -$db_includes - -int -main () -{ -static int test_array [1 - 2 * !(((long) (sizeof (size_t))) >= 0)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - ac_lo=0 ac_mid=0 - while :; do - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_char_p=$ac_lo;; +'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (char *), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (char *), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } ;; +esac +else + if test "$cross_compiling" = yes; then + { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run test program while cross compiling +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25276,48 +28100,86 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes +long longval () { return (long) (sizeof (char *)); } +unsigned long ulongval () { return (long) (sizeof (char *)); } +#include +#include int main () { -static int test_array [1 - 2 * !(((long) (sizeof (size_t))) <= $ac_mid)]; -test_array [0] = 0 + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + exit (1); + if (((long) (sizeof (char *))) < 0) + { + long i = longval (); + if (i != ((long) (sizeof (char *)))) + exit (1); + fprintf (f, "%ld\n", i); + } + else + { + unsigned long i = ulongval (); + if (i != ((long) (sizeof (char *)))) + exit (1); + fprintf (f, "%lu\n", i); + } + exit (ferror (f) || fclose (f) != 0); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_hi=$ac_mid; break + ac_cv_sizeof_char_p=`cat conftest.val` else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` +( exit $ac_status ) +{ { echo "$as_me:$LINENO: error: cannot compute sizeof (char *), 77 +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (char *), 77 +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } fi -rm -f conftest.$ac_objext conftest.$ac_ext - done +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +fi +rm -f conftest.val else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 + ac_cv_sizeof_char_p=0 +fi +fi +echo "$as_me:$LINENO: result: $ac_cv_sizeof_char_p" >&5 +echo "${ECHO_T}$ac_cv_sizeof_char_p" >&6 +cat >>confdefs.h <<_ACEOF +#define SIZEOF_CHAR_P $ac_cv_sizeof_char_p +_ACEOF -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + + +# We look for u_char, u_short, u_int, u_long -- if we can't find them, +# we create our own. + +echo "$as_me:$LINENO: checking for u_char" >&5 +echo $ECHO_N "checking for u_char... $ECHO_C" >&6 +if test "${ac_cv_type_u_char+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25328,29 +28190,61 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (size_t))) < 0)]; -test_array [0] = 0 - +if ((u_char *) 0) + return 0; +if (sizeof (u_char)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_hi=-1 ac_mid=-1 - while :; do - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_cv_type_u_char=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_u_char=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_u_char" >&5 +echo "${ECHO_T}$ac_cv_type_u_char" >&6 +if test $ac_cv_type_u_char = yes; then + : +else + u_char_decl="typedef unsigned char u_char;" +fi + + + +echo "$as_me:$LINENO: checking for u_short" >&5 +echo $ECHO_N "checking for u_short... $ECHO_C" >&6 +if test "${ac_cv_type_u_short+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25361,53 +28255,61 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (size_t))) >= $ac_mid)]; -test_array [0] = 0 - +if ((u_short *) 0) + return 0; +if (sizeof (u_short)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_lo=$ac_mid; break + ac_cv_type_u_short=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` +ac_cv_type_u_short=no fi -rm -f conftest.$ac_objext conftest.$ac_ext - done -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -ac_lo= ac_hi= +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_cv_type_u_short" >&5 +echo "${ECHO_T}$ac_cv_type_u_short" >&6 +if test $ac_cv_type_u_short = yes; then + : +else + u_short_decl="typedef unsigned short u_short;" fi -rm -f conftest.$ac_objext conftest.$ac_ext -# Binary search between lo and hi bounds. -while test "x$ac_lo" != "x$ac_hi"; do - ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + + + +echo "$as_me:$LINENO: checking for u_int" >&5 +echo $ECHO_N "checking for u_int... $ECHO_C" >&6 +if test "${ac_cv_type_u_int+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25418,52 +28320,61 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (size_t))) <= $ac_mid)]; -test_array [0] = 0 - +if ((u_int *) 0) + return 0; +if (sizeof (u_int)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_hi=$ac_mid + ac_cv_type_u_int=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_lo=`expr '(' $ac_mid ')' + 1` +ac_cv_type_u_int=no fi -rm -f conftest.$ac_objext conftest.$ac_ext -done -case $ac_lo in -?*) ac_cv_sizeof_size_t=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (size_t), 77 -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (size_t), 77 -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } ;; -esac +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_u_int" >&5 +echo "${ECHO_T}$ac_cv_type_u_int" >&6 +if test $ac_cv_type_u_int = yes; then + : else - if test "$cross_compiling" = yes; then - { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot run test program while cross compiling -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } + u_int_decl="typedef unsigned int u_int;" +fi + + + +echo "$as_me:$LINENO: checking for u_long" >&5 +echo $ECHO_N "checking for u_long... $ECHO_C" >&6 +if test "${ac_cv_type_u_long+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25471,83 +28382,65 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (size_t)); } -unsigned long ulongval () { return (long) (sizeof (size_t)); } -#include -#include int main () { - - FILE *f = fopen ("conftest.val", "w"); - if (! f) - exit (1); - if (((long) (sizeof (size_t))) < 0) - { - long i = longval (); - if (i != ((long) (sizeof (size_t)))) - exit (1); - fprintf (f, "%ld\n", i); - } - else - { - unsigned long i = ulongval (); - if (i != ((long) (sizeof (size_t)))) - exit (1); - fprintf (f, "%lu\n", i); - } - exit (ferror (f) || fclose (f) != 0); - +if ((u_long *) 0) + return 0; +if (sizeof (u_long)) + return 0; ; return 0; } _ACEOF -rm -f conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_size_t=`cat conftest.val` + ac_cv_type_u_long=yes else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 + echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (size_t), 77 -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (size_t), 77 -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } -fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +ac_cv_type_u_long=no fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.val +echo "$as_me:$LINENO: result: $ac_cv_type_u_long" >&5 +echo "${ECHO_T}$ac_cv_type_u_long" >&6 +if test $ac_cv_type_u_long = yes; then + : else - ac_cv_sizeof_size_t=0 -fi + u_long_decl="typedef unsigned long u_long;" fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_size_t" >&5 -echo "${ECHO_T}$ac_cv_sizeof_size_t" >&6 -cat >>confdefs.h <<_ACEOF -#define SIZEOF_SIZE_T $ac_cv_sizeof_size_t -_ACEOF -echo "$as_me:$LINENO: checking for char *" >&5 -echo $ECHO_N "checking for char *... $ECHO_C" >&6 -if test "${ac_cv_type_char_p+set}" = set; then +# We look for fixed-size variants of u_char, u_short, u_int, u_long as well. + +echo "$as_me:$LINENO: checking for u_int8_t" >&5 +echo $ECHO_N "checking for u_int8_t... $ECHO_C" >&6 +if test "${ac_cv_type_u_int8_t+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25558,9 +28451,9 @@ $db_includes int main () { -if ((char * *) 0) +if ((u_int8_t *) 0) return 0; -if (sizeof (char *)) +if (sizeof (u_int8_t)) return 0; ; return 0; @@ -25568,42 +28461,67 @@ if (sizeof (char *)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_char_p=yes + ac_cv_type_u_int8_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_char_p=no +ac_cv_type_u_int8_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_char_p" >&5 -echo "${ECHO_T}$ac_cv_type_char_p" >&6 +echo "$as_me:$LINENO: result: $ac_cv_type_u_int8_t" >&5 +echo "${ECHO_T}$ac_cv_type_u_int8_t" >&6 +if test $ac_cv_type_u_int8_t = yes; then + : +else -echo "$as_me:$LINENO: checking size of char *" >&5 -echo $ECHO_N "checking size of char *... $ECHO_C" >&6 -if test "${ac_cv_sizeof_char_p+set}" = set; then + case "1" in + "$ac_cv_sizeof_unsigned_int") + u_int8_decl="typedef unsigned int u_int8_t;";; + "$ac_cv_sizeof_unsigned_char") + u_int8_decl="typedef unsigned char u_int8_t;";; + "$ac_cv_sizeof_unsigned_short") + u_int8_decl="typedef unsigned short u_int8_t;";; + "$ac_cv_sizeof_unsigned_long") + u_int8_decl="typedef unsigned long u_int8_t;";; + "$ac_cv_sizeof_unsigned_long_long") + u_int8_decl="typedef unsigned long long u_int8_t;";; + *) + { { echo "$as_me:$LINENO: error: No unsigned 1-byte integral type" >&5 +echo "$as_me: error: No unsigned 1-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac +fi + + + +echo "$as_me:$LINENO: checking for u_int16_t" >&5 +echo $ECHO_N "checking for u_int16_t... $ECHO_C" >&6 +if test "${ac_cv_type_u_int16_t+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - if test "$ac_cv_type_char_p" = yes; then - # The cast to unsigned long works around a bug in the HP C Compiler - # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects - # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. - # This bug is HP SR number 8606223364. - if test "$cross_compiling" = yes; then - # Depending upon the size, compute the lo and hi bounds. -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25614,29 +28532,77 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char *))) >= 0)]; -test_array [0] = 0 - +if ((u_int16_t *) 0) + return 0; +if (sizeof (u_int16_t)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_lo=0 ac_mid=0 - while :; do - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_cv_type_u_int16_t=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_u_int16_t=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_u_int16_t" >&5 +echo "${ECHO_T}$ac_cv_type_u_int16_t" >&6 +if test $ac_cv_type_u_int16_t = yes; then + : +else + + case "2" in + "$ac_cv_sizeof_unsigned_int") + u_int16_decl="typedef unsigned int u_int16_t;";; + "$ac_cv_sizeof_unsigned_char") + u_int16_decl="typedef unsigned char u_int16_t;";; + "$ac_cv_sizeof_unsigned_short") + u_int16_decl="typedef unsigned short u_int16_t;";; + "$ac_cv_sizeof_unsigned_long") + u_int16_decl="typedef unsigned long u_int16_t;";; + "$ac_cv_sizeof_unsigned_long_long") + u_int16_decl="typedef unsigned long long u_int16_t;";; + *) + { { echo "$as_me:$LINENO: error: No unsigned 2-byte integral type" >&5 +echo "$as_me: error: No unsigned 2-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac +fi + + + +echo "$as_me:$LINENO: checking for int16_t" >&5 +echo $ECHO_N "checking for int16_t... $ECHO_C" >&6 +if test "${ac_cv_type_int16_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25647,45 +28613,77 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char *))) <= $ac_mid)]; -test_array [0] = 0 - +if ((int16_t *) 0) + return 0; +if (sizeof (int16_t)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_hi=$ac_mid; break + ac_cv_type_int16_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_lo=`expr $ac_mid + 1` - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid + 1` +ac_cv_type_int16_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext - done +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_int16_t" >&5 +echo "${ECHO_T}$ac_cv_type_int16_t" >&6 +if test $ac_cv_type_int16_t = yes; then + : else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + case "2" in + "$ac_cv_sizeof_int") + int16_decl="typedef int int16_t;";; + "$ac_cv_sizeof_char") + int16_decl="typedef char int16_t;";; + "$ac_cv_sizeof_short") + int16_decl="typedef short int16_t;";; + "$ac_cv_sizeof_long") + int16_decl="typedef long int16_t;";; + "$ac_cv_sizeof_long_long") + int16_decl="typedef long long int16_t;";; + *) + { { echo "$as_me:$LINENO: error: No signed 2-byte integral type" >&5 +echo "$as_me: error: No signed 2-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac +fi + + + +echo "$as_me:$LINENO: checking for u_int32_t" >&5 +echo $ECHO_N "checking for u_int32_t... $ECHO_C" >&6 +if test "${ac_cv_type_u_int32_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25696,29 +28694,77 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char *))) < 0)]; -test_array [0] = 0 - +if ((u_int32_t *) 0) + return 0; +if (sizeof (u_int32_t)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_hi=-1 ac_mid=-1 - while :; do - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_cv_type_u_int32_t=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_u_int32_t=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_u_int32_t" >&5 +echo "${ECHO_T}$ac_cv_type_u_int32_t" >&6 +if test $ac_cv_type_u_int32_t = yes; then + : +else + + case "4" in + "$ac_cv_sizeof_unsigned_int") + u_int32_decl="typedef unsigned int u_int32_t;";; + "$ac_cv_sizeof_unsigned_char") + u_int32_decl="typedef unsigned char u_int32_t;";; + "$ac_cv_sizeof_unsigned_short") + u_int32_decl="typedef unsigned short u_int32_t;";; + "$ac_cv_sizeof_unsigned_long") + u_int32_decl="typedef unsigned long u_int32_t;";; + "$ac_cv_sizeof_unsigned_long_long") + u_int32_decl="typedef unsigned long long u_int32_t;";; + *) + { { echo "$as_me:$LINENO: error: No unsigned 4-byte integral type" >&5 +echo "$as_me: error: No unsigned 4-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac +fi + + + +echo "$as_me:$LINENO: checking for int32_t" >&5 +echo $ECHO_N "checking for int32_t... $ECHO_C" >&6 +if test "${ac_cv_type_int32_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25729,53 +28775,77 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char *))) >= $ac_mid)]; -test_array [0] = 0 - +if ((int32_t *) 0) + return 0; +if (sizeof (int32_t)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_lo=$ac_mid; break + ac_cv_type_int32_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_hi=`expr '(' $ac_mid ')' - 1` - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - ac_mid=`expr 2 '*' $ac_mid` +ac_cv_type_int32_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext - done +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_int32_t" >&5 +echo "${ECHO_T}$ac_cv_type_int32_t" >&6 +if test $ac_cv_type_int32_t = yes; then + : else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -ac_lo= ac_hi= -fi -rm -f conftest.$ac_objext conftest.$ac_ext + case "4" in + "$ac_cv_sizeof_int") + int32_decl="typedef int int32_t;";; + "$ac_cv_sizeof_char") + int32_decl="typedef char int32_t;";; + "$ac_cv_sizeof_short") + int32_decl="typedef short int32_t;";; + "$ac_cv_sizeof_long") + int32_decl="typedef long int32_t;";; + "$ac_cv_sizeof_long_long") + int32_decl="typedef long long int32_t;";; + *) + { { echo "$as_me:$LINENO: error: No signed 4-byte integral type" >&5 +echo "$as_me: error: No signed 4-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac fi -rm -f conftest.$ac_objext conftest.$ac_ext -# Binary search between lo and hi bounds. -while test "x$ac_lo" != "x$ac_hi"; do - ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + + + +echo "$as_me:$LINENO: checking for u_int64_t" >&5 +echo $ECHO_N "checking for u_int64_t... $ECHO_C" >&6 +if test "${ac_cv_type_u_int64_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25786,52 +28856,77 @@ $db_includes int main () { -static int test_array [1 - 2 * !(((long) (sizeof (char *))) <= $ac_mid)]; -test_array [0] = 0 - +if ((u_int64_t *) 0) + return 0; +if (sizeof (u_int64_t)) + return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_hi=$ac_mid + ac_cv_type_u_int64_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_lo=`expr '(' $ac_mid ')' + 1` +ac_cv_type_u_int64_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext -done -case $ac_lo in -?*) ac_cv_sizeof_char_p=$ac_lo;; -'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (char *), 77 -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (char *), 77 -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } ;; -esac +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_u_int64_t" >&5 +echo "${ECHO_T}$ac_cv_type_u_int64_t" >&6 +if test $ac_cv_type_u_int64_t = yes; then + : else - if test "$cross_compiling" = yes; then - { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot run test program while cross compiling -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } + + case "8" in + "$ac_cv_sizeof_int") + u_int64_decl="typedef int u_int64_t;";; + "$ac_cv_sizeof_char") + u_int64_decl="typedef char u_int64_t;";; + "$ac_cv_sizeof_short") + u_int64_decl="typedef short u_int64_t;";; + "$ac_cv_sizeof_long") + u_int64_decl="typedef long u_int64_t;";; + "$ac_cv_sizeof_long_long") + u_int64_decl="typedef long long u_int64_t;";; + *) + { { echo "$as_me:$LINENO: error: No signed 8-byte integral type" >&5 +echo "$as_me: error: No signed 8-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac +fi + + + +echo "$as_me:$LINENO: checking for int64_t" >&5 +echo $ECHO_N "checking for int64_t... $ECHO_C" >&6 +if test "${ac_cv_type_int64_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25839,86 +28934,82 @@ cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $db_includes -long longval () { return (long) (sizeof (char *)); } -unsigned long ulongval () { return (long) (sizeof (char *)); } -#include -#include int main () { - - FILE *f = fopen ("conftest.val", "w"); - if (! f) - exit (1); - if (((long) (sizeof (char *))) < 0) - { - long i = longval (); - if (i != ((long) (sizeof (char *)))) - exit (1); - fprintf (f, "%ld\n", i); - } - else - { - unsigned long i = ulongval (); - if (i != ((long) (sizeof (char *)))) - exit (1); - fprintf (f, "%lu\n", i); - } - exit (ferror (f) || fclose (f) != 0); - +if ((int64_t *) 0) + return 0; +if (sizeof (int64_t)) + return 0; ; return 0; } _ACEOF -rm -f conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sizeof_char_p=`cat conftest.val` + ac_cv_type_int64_t=yes else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 + echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -( exit $ac_status ) -{ { echo "$as_me:$LINENO: error: cannot compute sizeof (char *), 77 -See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute sizeof (char *), 77 -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } -fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +ac_cv_type_int64_t=no fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f conftest.val +echo "$as_me:$LINENO: result: $ac_cv_type_int64_t" >&5 +echo "${ECHO_T}$ac_cv_type_int64_t" >&6 +if test $ac_cv_type_int64_t = yes; then + : else - ac_cv_sizeof_char_p=0 -fi + + case "8" in + "$ac_cv_sizeof_int") + int64_decl="typedef int int64_t;";; + "$ac_cv_sizeof_char") + int64_decl="typedef char int64_t;";; + "$ac_cv_sizeof_short") + int64_decl="typedef short int64_t;";; + "$ac_cv_sizeof_long") + int64_decl="typedef long int64_t;";; + "$ac_cv_sizeof_long_long") + int64_decl="typedef long long int64_t;";; + *) + { { echo "$as_me:$LINENO: error: No signed 8-byte integral type" >&5 +echo "$as_me: error: No signed 8-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac fi -echo "$as_me:$LINENO: result: $ac_cv_sizeof_char_p" >&5 -echo "${ECHO_T}$ac_cv_sizeof_char_p" >&6 -cat >>confdefs.h <<_ACEOF -#define SIZEOF_CHAR_P $ac_cv_sizeof_char_p -_ACEOF +# Check for ssize_t -- if none exists, find a signed integral type that's +# the same size as a size_t. -# We require off_t and size_t, and we don't try to substitute our own -# if we can't find them. -echo "$as_me:$LINENO: checking for off_t" >&5 -echo $ECHO_N "checking for off_t... $ECHO_C" >&6 -if test "${ac_cv_type_off_t+set}" = set; then +echo "$as_me:$LINENO: checking for ssize_t" >&5 +echo $ECHO_N "checking for ssize_t... $ECHO_C" >&6 +if test "${ac_cv_type_ssize_t+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25929,9 +29020,9 @@ $db_includes int main () { -if ((off_t *) 0) +if ((ssize_t *) 0) return 0; -if (sizeof (off_t)) +if (sizeof (ssize_t)) return 0; ; return 0; @@ -25939,55 +29030,81 @@ if (sizeof (off_t)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_off_t=yes + ac_cv_type_ssize_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_off_t=no +ac_cv_type_ssize_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_off_t" >&5 -echo "${ECHO_T}$ac_cv_type_off_t" >&6 -if test $ac_cv_type_off_t = yes; then +echo "$as_me:$LINENO: result: $ac_cv_type_ssize_t" >&5 +echo "${ECHO_T}$ac_cv_type_ssize_t" >&6 +if test $ac_cv_type_ssize_t = yes; then : else - { { echo "$as_me:$LINENO: error: No off_t type." >&5 -echo "$as_me: error: No off_t type." >&2;} - { (exit 1); exit 1; }; } + + case "$ac_cv_sizeof_size_t" in + "$ac_cv_sizeof_int") + ssize_t_decl="typedef int ssize_t;";; + "$ac_cv_sizeof_char") + ssize_t_decl="typedef char ssize_t;";; + "$ac_cv_sizeof_short") + ssize_t_decl="typedef short ssize_t;";; + "$ac_cv_sizeof_long") + ssize_t_decl="typedef long ssize_t;";; + "$ac_cv_sizeof_long_long") + ssize_t_decl="typedef long long ssize_t;";; + *) + { { echo "$as_me:$LINENO: error: No signed $ac_cv_sizeof_size_t-byte integral type" >&5 +echo "$as_me: error: No signed $ac_cv_sizeof_size_t-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac fi -echo "$as_me:$LINENO: checking for size_t" >&5 -echo $ECHO_N "checking for size_t... $ECHO_C" >&6 -if test "${ac_cv_type_size_t+set}" = set; then + +# Check for uintmax_t -- if none exists, first the largest unsigned integral +# type available. + +echo "$as_me:$LINENO: checking for uintmax_t" >&5 +echo $ECHO_N "checking for uintmax_t... $ECHO_C" >&6 +if test "${ac_cv_type_uintmax_t+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes - +$ac_includes_default int main () { -if ((size_t *) 0) +if ((uintmax_t *) 0) return 0; -if (sizeof (size_t)) +if (sizeof (uintmax_t)) return 0; ; return 0; @@ -25995,46 +29112,46 @@ if (sizeof (size_t)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_size_t=yes + ac_cv_type_uintmax_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_size_t=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5 -echo "${ECHO_T}$ac_cv_type_size_t" >&6 -if test $ac_cv_type_size_t = yes; then - : -else - { { echo "$as_me:$LINENO: error: No size_t type." >&5 -echo "$as_me: error: No size_t type." >&2;} - { (exit 1); exit 1; }; } -fi - - -# We look for u_char, u_short, u_int, u_long -- if we can't find them, -# we create our own. - -echo "$as_me:$LINENO: checking for u_char" >&5 -echo $ECHO_N "checking for u_char... $ECHO_C" >&6 -if test "${ac_cv_type_u_char+set}" = set; then +ac_cv_type_uintmax_t=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_uintmax_t" >&5 +echo "${ECHO_T}$ac_cv_type_uintmax_t" >&6 +if test $ac_cv_type_uintmax_t = yes; then + : +else + echo "$as_me:$LINENO: checking for unsigned long long" >&5 +echo $ECHO_N "checking for unsigned long long... $ECHO_C" >&6 +if test "${ac_cv_type_unsigned_long_long+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -26045,9 +29162,9 @@ $db_includes int main () { -if ((u_char *) 0) +if ((unsigned long long *) 0) return 0; -if (sizeof (u_char)) +if (sizeof (unsigned long long)) return 0; ; return 0; @@ -26055,55 +29172,67 @@ if (sizeof (u_char)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_u_char=yes + ac_cv_type_unsigned_long_long=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_u_char=no +ac_cv_type_unsigned_long_long=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_u_char" >&5 -echo "${ECHO_T}$ac_cv_type_u_char" >&6 -if test $ac_cv_type_u_char = yes; then - : +echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long_long" >&5 +echo "${ECHO_T}$ac_cv_type_unsigned_long_long" >&6 +if test $ac_cv_type_unsigned_long_long = yes; then + uintmax_t_decl="typedef unsigned long long uintmax_t;" else - u_char_decl="typedef unsigned char u_char;" + uintmax_t_decl="typedef unsigned long uintmax_t;" fi +fi -echo "$as_me:$LINENO: checking for u_short" >&5 -echo $ECHO_N "checking for u_short... $ECHO_C" >&6 -if test "${ac_cv_type_u_short+set}" = set; then +# Check for uintptr_t -- if none exists, find an integral type which is +# the same size as a pointer. + +echo "$as_me:$LINENO: checking for uintptr_t" >&5 +echo $ECHO_N "checking for uintptr_t... $ECHO_C" >&6 +if test "${ac_cv_type_uintptr_t+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes - +$ac_includes_default int main () { -if ((u_short *) 0) +if ((uintptr_t *) 0) return 0; -if (sizeof (u_short)) +if (sizeof (uintptr_t)) return 0; ; return 0; @@ -26111,1228 +29240,1246 @@ if (sizeof (u_short)) _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_u_short=yes + ac_cv_type_uintptr_t=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_u_short=no +ac_cv_type_uintptr_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_type_u_short" >&5 -echo "${ECHO_T}$ac_cv_type_u_short" >&6 -if test $ac_cv_type_u_short = yes; then +echo "$as_me:$LINENO: result: $ac_cv_type_uintptr_t" >&5 +echo "${ECHO_T}$ac_cv_type_uintptr_t" >&6 +if test $ac_cv_type_uintptr_t = yes; then : else - u_short_decl="typedef unsigned short u_short;" + + case "$ac_cv_sizeof_char_p" in + "$ac_cv_sizeof_unsigned_int") + uintptr_t_decl="typedef unsigned int uintptr_t;";; + "$ac_cv_sizeof_unsigned_char") + uintptr_t_decl="typedef unsigned char uintptr_t;";; + "$ac_cv_sizeof_unsigned_short") + uintptr_t_decl="typedef unsigned short uintptr_t;";; + "$ac_cv_sizeof_unsigned_long") + uintptr_t_decl="typedef unsigned long uintptr_t;";; + "$ac_cv_sizeof_unsigned_long_long") + uintptr_t_decl="typedef unsigned long long uintptr_t;";; + *) + { { echo "$as_me:$LINENO: error: No unsigned $ac_cv_sizeof_char_p-byte integral type" >&5 +echo "$as_me: error: No unsigned $ac_cv_sizeof_char_p-byte integral type" >&2;} + { (exit 1); exit 1; }; };; + esac fi -echo "$as_me:$LINENO: checking for u_int" >&5 -echo $ECHO_N "checking for u_int... $ECHO_C" >&6 -if test "${ac_cv_type_u_int+set}" = set; then +echo "$as_me:$LINENO: checking for ANSI C exit success/failure values" >&5 +echo $ECHO_N "checking for ANSI C exit success/failure values... $ECHO_C" >&6 +if test "${db_cv_exit_defines+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes - +#include int main () { -if ((u_int *) 0) - return 0; -if (sizeof (u_int)) - return 0; +return (EXIT_SUCCESS); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_u_int=yes + db_cv_exit_defines=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_u_int=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext +db_cv_exit_defines=no fi -echo "$as_me:$LINENO: result: $ac_cv_type_u_int" >&5 -echo "${ECHO_T}$ac_cv_type_u_int" >&6 -if test $ac_cv_type_u_int = yes; then - : -else - u_int_decl="typedef unsigned int u_int;" +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi +echo "$as_me:$LINENO: result: $db_cv_exit_defines" >&5 +echo "${ECHO_T}$db_cv_exit_defines" >&6 +if test "$db_cv_exit_defines" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_EXIT_SUCCESS 1 +_ACEOF -echo "$as_me:$LINENO: checking for u_long" >&5 -echo $ECHO_N "checking for u_long... $ECHO_C" >&6 -if test "${ac_cv_type_u_long+set}" = set; then +fi + +# Test for various functions/libraries -- do tests that change library values +# first. +# +# The Berkeley DB library calls fdatasync, and it's only available in -lrt on +# Solaris. See if we can find it either without additional libraries or in +# -lrt. If fdatasync is found in -lrt, add -lrt to the Java and Tcl shared +# library link lines. +echo "$as_me:$LINENO: checking for library containing fdatasync" >&5 +echo $ECHO_N "checking for library containing fdatasync... $ECHO_C" >&6 +if test "${ac_cv_search_fdatasync+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_func_search_save_LIBS=$LIBS +ac_cv_search_fdatasync=no +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char fdatasync (); int main () { -if ((u_long *) 0) - return 0; -if (sizeof (u_long)) - return 0; +fdatasync (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_u_long=yes + ac_cv_search_fdatasync="none required" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_u_long=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: $ac_cv_type_u_long" >&5 -echo "${ECHO_T}$ac_cv_type_u_long" >&6 -if test $ac_cv_type_u_long = yes; then - : -else - u_long_decl="typedef unsigned long u_long;" fi - - - -echo "$as_me:$LINENO: checking for u_int8_t" >&5 -echo $ECHO_N "checking for u_int8_t... $ECHO_C" >&6 -if test "${ac_cv_type_u_int8_t+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test "$ac_cv_search_fdatasync" = no; then + for ac_lib in rt; do + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char fdatasync (); int main () { -if ((u_int8_t *) 0) - return 0; -if (sizeof (u_int8_t)) - return 0; +fdatasync (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_u_int8_t=yes + ac_cv_search_fdatasync="-l$ac_lib" +break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_u_int8_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done fi -echo "$as_me:$LINENO: result: $ac_cv_type_u_int8_t" >&5 -echo "${ECHO_T}$ac_cv_type_u_int8_t" >&6 -if test $ac_cv_type_u_int8_t = yes; then - : -else - - case "1" in - "$ac_cv_sizeof_unsigned_int") - u_int8_decl="typedef unsigned int u_int8_t;";; - "$ac_cv_sizeof_unsigned_char") - u_int8_decl="typedef unsigned char u_int8_t;";; - "$ac_cv_sizeof_unsigned_short") - u_int8_decl="typedef unsigned short u_int8_t;";; - "$ac_cv_sizeof_unsigned_long") - u_int8_decl="typedef unsigned long u_int8_t;";; - *) - { { echo "$as_me:$LINENO: error: No unsigned 1-byte integral type" >&5 -echo "$as_me: error: No unsigned 1-byte integral type" >&2;} - { (exit 1); exit 1; }; };; - esac +LIBS=$ac_func_search_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_search_fdatasync" >&5 +echo "${ECHO_T}$ac_cv_search_fdatasync" >&6 +if test "$ac_cv_search_fdatasync" != no; then + test "$ac_cv_search_fdatasync" = "none required" || LIBS="$ac_cv_search_fdatasync $LIBS" + if test "$ac_cv_search_fdatasync" != "none required" ; then + LIBJSO_LIBS="$LIBJSO_LIBS -lrt"; + LIBTSO_LIBS="$LIBTSO_LIBS -lrt"; + fi fi - -echo "$as_me:$LINENO: checking for u_int16_t" >&5 -echo $ECHO_N "checking for u_int16_t... $ECHO_C" >&6 -if test "${ac_cv_type_u_int16_t+set}" = set; then +# The test and example programs use the sched_yield function, taken from -lrt +# on Solaris. +echo "$as_me:$LINENO: checking for library containing sched_yield" >&5 +echo $ECHO_N "checking for library containing sched_yield... $ECHO_C" >&6 +if test "${ac_cv_search_sched_yield+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_func_search_save_LIBS=$LIBS +ac_cv_search_sched_yield=no +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char sched_yield (); int main () { -if ((u_int16_t *) 0) - return 0; -if (sizeof (u_int16_t)) - return 0; +sched_yield (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - ac_cv_type_u_int16_t=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -ac_cv_type_u_int16_t=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: $ac_cv_type_u_int16_t" >&5 -echo "${ECHO_T}$ac_cv_type_u_int16_t" >&6 -if test $ac_cv_type_u_int16_t = yes; then - : + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_search_sched_yield="none required" else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - case "2" in - "$ac_cv_sizeof_unsigned_int") - u_int16_decl="typedef unsigned int u_int16_t;";; - "$ac_cv_sizeof_unsigned_char") - u_int16_decl="typedef unsigned char u_int16_t;";; - "$ac_cv_sizeof_unsigned_short") - u_int16_decl="typedef unsigned short u_int16_t;";; - "$ac_cv_sizeof_unsigned_long") - u_int16_decl="typedef unsigned long u_int16_t;";; - *) - { { echo "$as_me:$LINENO: error: No unsigned 2-byte integral type" >&5 -echo "$as_me: error: No unsigned 2-byte integral type" >&2;} - { (exit 1); exit 1; }; };; - esac fi - - - -echo "$as_me:$LINENO: checking for int16_t" >&5 -echo $ECHO_N "checking for int16_t... $ECHO_C" >&6 -if test "${ac_cv_type_int16_t+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test "$ac_cv_search_sched_yield" = no; then + for ac_lib in rt; do + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char sched_yield (); int main () { -if ((int16_t *) 0) - return 0; -if (sizeof (int16_t)) - return 0; +sched_yield (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_int16_t=yes + ac_cv_search_sched_yield="-l$ac_lib" +break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_int16_t=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done +fi +LIBS=$ac_func_search_save_LIBS fi -echo "$as_me:$LINENO: result: $ac_cv_type_int16_t" >&5 -echo "${ECHO_T}$ac_cv_type_int16_t" >&6 -if test $ac_cv_type_int16_t = yes; then - : -else +echo "$as_me:$LINENO: result: $ac_cv_search_sched_yield" >&5 +echo "${ECHO_T}$ac_cv_search_sched_yield" >&6 +if test "$ac_cv_search_sched_yield" != no; then + test "$ac_cv_search_sched_yield" = "none required" || LIBS="$ac_cv_search_sched_yield $LIBS" - case "2" in - "$ac_cv_sizeof_int") - int16_decl="typedef int int16_t;";; - "$ac_cv_sizeof_char") - int16_decl="typedef char int16_t;";; - "$ac_cv_sizeof_short") - int16_decl="typedef short int16_t;";; - "$ac_cv_sizeof_long") - int16_decl="typedef long int16_t;";; - *) - { { echo "$as_me:$LINENO: error: No signed 2-byte integral type" >&5 -echo "$as_me: error: No signed 2-byte integral type" >&2;} - { (exit 1); exit 1; }; };; - esac fi - -echo "$as_me:$LINENO: checking for u_int32_t" >&5 -echo $ECHO_N "checking for u_int32_t... $ECHO_C" >&6 -if test "${ac_cv_type_u_int32_t+set}" = set; then +# !!! +# We can't check for pthreads in the same way we did the test for sched_yield +# because the Solaris C library includes pthread interfaces which are not +# inter-process safe. For that reason we always add -lpthread if we find a +# pthread library. +# +# We can't depend on any specific call existing (pthread_create, for example), +# as it may be #defined in an include file -- OSF/1 (Tru64) has this problem. +echo "$as_me:$LINENO: checking for main in -lpthread" >&5 +echo $ECHO_N "checking for main in -lpthread... $ECHO_C" >&6 +if test "${ac_cv_lib_pthread_main+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes + int main () { -if ((u_int32_t *) 0) - return 0; -if (sizeof (u_int32_t)) - return 0; +main (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_u_int32_t=yes + ac_cv_lib_pthread_main=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_u_int32_t=no +ac_cv_lib_pthread_main=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -echo "$as_me:$LINENO: result: $ac_cv_type_u_int32_t" >&5 -echo "${ECHO_T}$ac_cv_type_u_int32_t" >&6 -if test $ac_cv_type_u_int32_t = yes; then - : -else - - case "4" in - "$ac_cv_sizeof_unsigned_int") - u_int32_decl="typedef unsigned int u_int32_t;";; - "$ac_cv_sizeof_unsigned_char") - u_int32_decl="typedef unsigned char u_int32_t;";; - "$ac_cv_sizeof_unsigned_short") - u_int32_decl="typedef unsigned short u_int32_t;";; - "$ac_cv_sizeof_unsigned_long") - u_int32_decl="typedef unsigned long u_int32_t;";; - *) - { { echo "$as_me:$LINENO: error: No unsigned 4-byte integral type" >&5 -echo "$as_me: error: No unsigned 4-byte integral type" >&2;} - { (exit 1); exit 1; }; };; - esac +echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5 +echo "${ECHO_T}$ac_cv_lib_pthread_main" >&6 +if test $ac_cv_lib_pthread_main = yes; then + TEST_LIBS="$TEST_LIBS -lpthread" fi +ac_cv_lib_pthread=ac_cv_lib_pthread_main - -echo "$as_me:$LINENO: checking for int32_t" >&5 -echo $ECHO_N "checking for int32_t... $ECHO_C" >&6 -if test "${ac_cv_type_int32_t+set}" = set; then +# !!! +# We could be more exact about whether these libraries are needed, but don't +# bother -- if they exist, we load them, it's only the test programs anyway. +echo "$as_me:$LINENO: checking for main in -lm" >&5 +echo $ECHO_N "checking for main in -lm... $ECHO_C" >&6 +if test "${ac_cv_lib_m_main+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_check_lib_save_LIBS=$LIBS +LIBS="-lm $LIBS" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes + int main () { -if ((int32_t *) 0) - return 0; -if (sizeof (int32_t)) - return 0; +main (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_int32_t=yes + ac_cv_lib_m_main=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_int32_t=no +ac_cv_lib_m_main=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -echo "$as_me:$LINENO: result: $ac_cv_type_int32_t" >&5 -echo "${ECHO_T}$ac_cv_type_int32_t" >&6 -if test $ac_cv_type_int32_t = yes; then - : -else - - case "4" in - "$ac_cv_sizeof_int") - int32_decl="typedef int int32_t;";; - "$ac_cv_sizeof_char") - int32_decl="typedef char int32_t;";; - "$ac_cv_sizeof_short") - int32_decl="typedef short int32_t;";; - "$ac_cv_sizeof_long") - int32_decl="typedef long int32_t;";; - *) - { { echo "$as_me:$LINENO: error: No signed 4-byte integral type" >&5 -echo "$as_me: error: No signed 4-byte integral type" >&2;} - { (exit 1); exit 1; }; };; - esac +echo "$as_me:$LINENO: result: $ac_cv_lib_m_main" >&5 +echo "${ECHO_T}$ac_cv_lib_m_main" >&6 +if test $ac_cv_lib_m_main = yes; then + TEST_LIBS="$TEST_LIBS -lm" fi +ac_cv_lib_m=ac_cv_lib_m_main - -# Check for ssize_t -- if none exists, find a signed integral type that's -# the same size as a size_t. - -echo "$as_me:$LINENO: checking for ssize_t" >&5 -echo $ECHO_N "checking for ssize_t... $ECHO_C" >&6 -if test "${ac_cv_type_ssize_t+set}" = set; then +echo "$as_me:$LINENO: checking for main in -lsocket" >&5 +echo $ECHO_N "checking for main in -lsocket... $ECHO_C" >&6 +if test "${ac_cv_lib_socket_main+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsocket $LIBS" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes + int main () { -if ((ssize_t *) 0) - return 0; -if (sizeof (ssize_t)) - return 0; +main (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_ssize_t=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -ac_cv_type_ssize_t=no -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: $ac_cv_type_ssize_t" >&5 -echo "${ECHO_T}$ac_cv_type_ssize_t" >&6 -if test $ac_cv_type_ssize_t = yes; then - : + ac_cv_lib_socket_main=yes else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - case "$ac_cv_sizeof_size_t" in - "$ac_cv_sizeof_int") - ssize_t_decl="typedef int ssize_t;";; - "$ac_cv_sizeof_char") - ssize_t_decl="typedef char ssize_t;";; - "$ac_cv_sizeof_short") - ssize_t_decl="typedef short ssize_t;";; - "$ac_cv_sizeof_long") - ssize_t_decl="typedef long ssize_t;";; - *) - { { echo "$as_me:$LINENO: error: No signed $ac_cv_sizeof_size_t-byte integral type" >&5 -echo "$as_me: error: No signed $ac_cv_sizeof_size_t-byte integral type" >&2;} - { (exit 1); exit 1; }; };; - esac +ac_cv_lib_socket_main=no fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_socket_main" >&5 +echo "${ECHO_T}$ac_cv_lib_socket_main" >&6 +if test $ac_cv_lib_socket_main = yes; then + TEST_LIBS="$TEST_LIBS -lsocket" +fi +ac_cv_lib_socket=ac_cv_lib_socket_main - -# Find the largest integral type. - -echo "$as_me:$LINENO: checking for unsigned long long" >&5 -echo $ECHO_N "checking for unsigned long long... $ECHO_C" >&6 -if test "${ac_cv_type_unsigned_long_long+set}" = set; then +echo "$as_me:$LINENO: checking for main in -lnsl" >&5 +echo $ECHO_N "checking for main in -lnsl... $ECHO_C" >&6 +if test "${ac_cv_lib_nsl_main+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_check_lib_save_LIBS=$LIBS +LIBS="-lnsl $LIBS" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -$db_includes + int main () { -if ((unsigned long long *) 0) - return 0; -if (sizeof (unsigned long long)) - return 0; +main (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_type_unsigned_long_long=yes + ac_cv_lib_nsl_main=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_type_unsigned_long_long=no +ac_cv_lib_nsl_main=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long_long" >&5 -echo "${ECHO_T}$ac_cv_type_unsigned_long_long" >&6 -if test $ac_cv_type_unsigned_long_long = yes; then - db_align_t_decl="typedef unsigned long long db_align_t;" -else - db_align_t_decl="typedef unsigned long db_align_t;" +echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_main" >&5 +echo "${ECHO_T}$ac_cv_lib_nsl_main" >&6 +if test $ac_cv_lib_nsl_main = yes; then + TEST_LIBS="$TEST_LIBS -lnsl" fi +ac_cv_lib_nsl=ac_cv_lib_nsl_main -# Find an integral type which is the same size as a pointer. +# Check for mutexes. +# We do this here because it changes $LIBS. - case "$ac_cv_sizeof_char_p" in - "$ac_cv_sizeof_unsigned_int") - db_alignp_t_decl="typedef unsigned int db_alignp_t;";; - "$ac_cv_sizeof_unsigned_char") - db_alignp_t_decl="typedef unsigned char db_alignp_t;";; - "$ac_cv_sizeof_unsigned_short") - db_alignp_t_decl="typedef unsigned short db_alignp_t;";; - "$ac_cv_sizeof_unsigned_long") - db_alignp_t_decl="typedef unsigned long db_alignp_t;";; - *) - { { echo "$as_me:$LINENO: error: No unsigned $ac_cv_sizeof_char_p-byte integral type" >&5 -echo "$as_me: error: No unsigned $ac_cv_sizeof_char_p-byte integral type" >&2;} - { (exit 1); exit 1; }; };; - esac +# Mutexes we don't test for, but want the #defines to exist for +# other ports. -echo "$as_me:$LINENO: checking for ANSI C exit success/failure values" >&5 -echo $ECHO_N "checking for ANSI C exit success/failure values... $ECHO_C" >&6 -if test "${db_cv_exit_defines+set}" = set; then + + + +echo "$as_me:$LINENO: checking for mutexes" >&5 +echo $ECHO_N "checking for mutexes... $ECHO_C" >&6 +if test "${db_cv_mutex+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else +db_cv_mutex=no + +orig_libs=$LIBS + +# User-specified POSIX or UI mutexes. +# +# There are two different reasons to specify mutexes: First, the application +# is already using one type of mutex and doesn't want to mix-and-match (for +# example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the +# applications POSIX pthreads mutexes don't support inter-process locking, +# but the application wants to use them anyway (for example, some Linux and +# *BSD systems). +# +# Test for LWP threads before testing for UI/POSIX threads, we prefer them +# on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not +# pwrite64, if they load the C library before the appropriate threads library, +# e.g., tclsh using dlopen to load the DB library. By using LWP threads we +# avoid answering lots of user questions, not to mention the bugs. +# +# Otherwise, test for POSIX threads before UI threads. There are Linux systems +# that support a UI compatibility mode, and applications are more likely to be +# written for POSIX threads than UI threads. +# +# Try and link with a threads library if possible. The problem is the Solaris +# C library has UI/POSIX interface stubs, but they're broken, configuring them +# for inter-process mutexes doesn't return an error, but it doesn't work either. +if test "$db_cv_posixmutexes" = yes; then + db_cv_mutex="posix_only"; +fi +if test "$db_cv_uimutexes" = yes; then + db_cv_mutex="ui_only"; +fi + +# User-specified Win32 mutexes (MinGW build) +if test "$db_cv_mingw" = "yes"; then + db_cv_mutex=win32/gcc +fi + +# LWP threads: _lwp_XXX +if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include + +#include int main () { -return (EXIT_SUCCESS); + + static lwp_mutex_t mi = SHAREDMUTEX; + static lwp_cond_t ci = SHAREDCV; + lwp_mutex_t mutex = mi; + lwp_cond_t cond = ci; + exit ( + _lwp_mutex_lock(&mutex) || + _lwp_mutex_unlock(&mutex)); + ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_exit_defines=yes + db_cv_mutex="Solaris/lwp" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -db_cv_exit_defines=no fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $db_cv_exit_defines" >&5 -echo "${ECHO_T}$db_cv_exit_defines" >&6 -if test "$db_cv_exit_defines" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_EXIT_SUCCESS 1 -_ACEOF - +# POSIX.1 pthreads: pthread_XXX +# +# If the user specified we use POSIX pthreads mutexes, and we fail to find the +# full interface, try and configure for just intra-process support. +if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then + LIBS="$LIBS -lpthread" -fi - -# Test for various functions/libraries that the test and example programs use: -# sched_yield function -# pthreads, socket and math libraries -echo "$as_me:$LINENO: checking for sched_yield" >&5 -echo $ECHO_N "checking for sched_yield... $ECHO_C" >&6 -if test "${ac_cv_func_sched_yield+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else +if test "$cross_compiling" = yes; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char sched_yield (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -{ -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char sched_yield (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_sched_yield) || defined (__stub___sched_yield) -choke me -#else -char (*f) () = sched_yield; -#endif -#ifdef __cplusplus -} -#endif +#include int main () { -return f != sched_yield; + + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || + pthread_mutexattr_init(&mutexattr) || + pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); + ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_func_sched_yield=yes + db_cv_mutex=""POSIX/pthreads/library"" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_func_sched_yield=no -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_func_sched_yield" >&5 -echo "${ECHO_T}$ac_cv_func_sched_yield" >&6 -if test $ac_cv_func_sched_yield = yes; then - : -else - echo "$as_me:$LINENO: checking for library containing sched_yield" >&5 -echo $ECHO_N "checking for library containing sched_yield... $ECHO_C" >&6 -if test "${ac_cv_search_sched_yield+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext else - ac_func_search_save_LIBS=$LIBS -ac_cv_search_sched_yield=no -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char sched_yield (); -int -main () -{ -sched_yield (); - ; - return 0; +#include +main() { + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || + pthread_mutexattr_init(&mutexattr) || + pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext +rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_search_sched_yield="none required" + db_cv_mutex=""POSIX/pthreads/library"" else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -if test "$ac_cv_search_sched_yield" = no; then - for ac_lib in rt; do - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi + LIBS="$orig_libs" +fi +if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then + +if test "$cross_compiling" = yes; then + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char sched_yield (); +#include int main () { -sched_yield (); + + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || + pthread_mutexattr_init(&mutexattr) || + pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); + ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_search_sched_yield="-l$ac_lib" -break + db_cv_mutex=""POSIX/pthreads"" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext - done -fi -LIBS=$ac_func_search_save_LIBS -fi -echo "$as_me:$LINENO: result: $ac_cv_search_sched_yield" >&5 -echo "${ECHO_T}$ac_cv_search_sched_yield" >&6 -if test "$ac_cv_search_sched_yield" != no; then - test "$ac_cv_search_sched_yield" = "none required" || LIBS="$ac_cv_search_sched_yield $LIBS" - LOAD_LIBS="$LOAD_LIBS -lrt" -fi - -fi - - -# XXX -# We can't check for pthreads in the same way we did the test for sched_yield -# because the Solaris C library includes pthread interfaces which are not -# thread-safe. For that reason we always add -lpthread if we find a pthread -# library. Also we can't depend on any specific call existing (pthread_create, -# for example), as it may be #defined in an include file -- OSF/1 (Tru64) has -# this problem. -echo "$as_me:$LINENO: checking for main in -lpthread" >&5 -echo $ECHO_N "checking for main in -lpthread... $ECHO_C" >&6 -if test "${ac_cv_lib_pthread_main+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lpthread $LIBS" -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ - -int -main () -{ -main (); - ; - return 0; +#include +main() { + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || + pthread_mutexattr_init(&mutexattr) || + pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext +rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_lib_pthread_main=yes + db_cv_mutex=""POSIX/pthreads"" else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_lib_pthread_main=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5 -echo "${ECHO_T}$ac_cv_lib_pthread_main" >&6 -if test $ac_cv_lib_pthread_main = yes; then - LOAD_LIBS="$LOAD_LIBS -lpthread" fi -ac_cv_lib_pthread=ac_cv_lib_pthread_main - +if test "$db_cv_mutex" = "posix_only"; then -# XXX -# We could be more exact about whether these libraries are needed, but we don't -# bother -- if they exist, we load them. -echo "$as_me:$LINENO: checking for main in -lm" >&5 -echo $ECHO_N "checking for main in -lm... $ECHO_C" >&6 -if test "${ac_cv_lib_m_main+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lm $LIBS" -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +if test "$cross_compiling" = yes; then + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ - +#include int main () { -main (); + + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_mutexattr_init(&mutexattr) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); + ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_lib_m_main=yes + db_cv_mutex=""POSIX/pthreads/private"" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_lib_m_main=no -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -echo "$as_me:$LINENO: result: $ac_cv_lib_m_main" >&5 -echo "${ECHO_T}$ac_cv_lib_m_main" >&6 -if test $ac_cv_lib_m_main = yes; then - LOAD_LIBS="$LOAD_LIBS -lm" fi -ac_cv_lib_m=ac_cv_lib_m_main - -echo "$as_me:$LINENO: checking for main in -lsocket" >&5 -echo $ECHO_N "checking for main in -lsocket... $ECHO_C" >&6 -if test "${ac_cv_lib_socket_main+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lsocket $LIBS" -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ - -int -main () -{ -main (); - ; - return 0; +#include +main() { + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_mutexattr_init(&mutexattr) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext +rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_lib_socket_main=yes + db_cv_mutex=""POSIX/pthreads/private"" else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_lib_socket_main=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_lib_socket_main" >&5 -echo "${ECHO_T}$ac_cv_lib_socket_main" >&6 -if test $ac_cv_lib_socket_main = yes; then - LOAD_LIBS="$LOAD_LIBS -lsocket" fi -ac_cv_lib_socket=ac_cv_lib_socket_main +if test "$db_cv_mutex" = "posix_only"; then + LIBS="$LIBS -lpthread" -echo "$as_me:$LINENO: checking for main in -lnsl" >&5 -echo $ECHO_N "checking for main in -lnsl... $ECHO_C" >&6 -if test "${ac_cv_lib_nsl_main+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lnsl $LIBS" -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +if test "$cross_compiling" = yes; then + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +#include +int +main () +{ + + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; + exit ( + pthread_condattr_init(&condattr) || + pthread_mutexattr_init(&mutexattr) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); -int -main () -{ -main (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_lib_nsl_main=yes + db_cv_mutex=""POSIX/pthreads/library/private"" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -ac_cv_lib_nsl_main=no -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_main" >&5 -echo "${ECHO_T}$ac_cv_lib_nsl_main" >&6 -if test $ac_cv_lib_nsl_main = yes; then - LOAD_LIBS="$LOAD_LIBS -lnsl" fi -ac_cv_lib_nsl=ac_cv_lib_nsl_main - - -# Check for mutexes. -# We do this here because it changes $LIBS. - - -# Mutexes we don't test for, but want the #defines to exist for -# other ports. - - - - - -echo "$as_me:$LINENO: checking for mutexes" >&5 -echo $ECHO_N "checking for mutexes... $ECHO_C" >&6 -if test "${db_cv_mutex+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext else - -db_cv_mutex=no - -orig_libs=$LIBS - -# User-specified POSIX or UI mutexes. -# -# There are two different reasons to specify mutexes: First, the application -# is already using one type of mutex and doesn't want to mix-and-match (for -# example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the -# applications POSIX pthreads mutexes don't support inter-process locking, -# but the application wants to use them anyway (for example, current Linux -# and *BSD systems). -# -# If we're on Solaris, we insist that -lthread or -lpthread be used. The -# problem is the Solaris C library has UI/POSIX interface stubs, but they're -# broken, configuring them for inter-process mutexes doesn't return an error, -# but it doesn't work either. Otherwise, we try first without the library -# and then with it: there's some information that SCO/UnixWare/OpenUNIX needs -# this. [#4950] -# -# Test for LWP threads before testing for UI/POSIX threads, we prefer them -# on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not -# pwrite64, if they load the C library before the appropriate threads library, -# e.g., tclsh using dlopen to load the DB library. By using LWP threads we -# avoid answering lots of user questions, not to mention the bugs. -if test "$db_cv_posixmutexes" = yes; then - case "$host_os" in - solaris*) - db_cv_mutex="posix_library_only";; - *) - db_cv_mutex="posix_only";; - esac -fi - -if test "$db_cv_uimutexes" = yes; then - case "$host_os" in - solaris*) - db_cv_mutex="ui_library_only";; - *) - db_cv_mutex="ui_only";; - esac -fi - -# User-specified Win32 mutexes (MinGW build) -if test "$db_cv_mingw" = "yes"; then - db_cv_mutex=win32/gcc -fi - -# LWP threads: _lwp_XXX -if test "$db_cv_mutex" = no; then -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -int -main () -{ - - static lwp_mutex_t mi = SHAREDMUTEX; - static lwp_cond_t ci = SHAREDCV; - lwp_mutex_t mutex = mi; - lwp_cond_t cond = ci; +#include +main() { + pthread_cond_t cond; + pthread_mutex_t mutex; + pthread_condattr_t condattr; + pthread_mutexattr_t mutexattr; exit ( - _lwp_mutex_lock(&mutex) || - _lwp_mutex_unlock(&mutex)); - - ; - return 0; + pthread_condattr_init(&condattr) || + pthread_mutexattr_init(&mutexattr) || + pthread_cond_init(&cond, &condattr) || + pthread_mutex_init(&mutex, &mutexattr) || + pthread_mutex_lock(&mutex) || + pthread_mutex_unlock(&mutex) || + pthread_mutex_destroy(&mutex) || + pthread_cond_destroy(&cond) || + pthread_condattr_destroy(&condattr) || + pthread_mutexattr_destroy(&mutexattr)); } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext +rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="Solaris/lwp" + db_cv_mutex=""POSIX/pthreads/library/private"" else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi + LIBS="$orig_libs" +fi +if test "$db_cv_mutex" = "posix_only"; then + { { echo "$as_me:$LINENO: error: unable to find POSIX 1003.1 mutex interfaces" >&5 +echo "$as_me: error: unable to find POSIX 1003.1 mutex interfaces" >&2;} + { (exit 1); exit 1; }; } fi # UI threads: thr_XXX -# -# Try with and without the -lthread library. if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then +LIBS="$LIBS -lthread" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -27360,29 +30507,38 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="UI/threads" + db_cv_mutex="UI/threads/library" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS="$orig_libs" fi -if test "$db_cv_mutex" = no -o \ - "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then -LIBS="$LIBS -lthread" +if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -27410,180 +30566,181 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="UI/threads/library" + db_cv_mutex="UI/threads" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS="$orig_libs" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -if test "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then +if test "$db_cv_mutex" = "ui_only"; then { { echo "$as_me:$LINENO: error: unable to find UI mutex interfaces" >&5 echo "$as_me: error: unable to find UI mutex interfaces" >&2;} { (exit 1); exit 1; }; } fi -# POSIX.1 pthreads: pthread_XXX -# -# Try with and without the -lpthread library. If the user specified we use -# POSIX pthreads mutexes, and we fail to find the full interface, try and -# configure for just intra-process support. -if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then - -if test "$cross_compiling" = yes; then - db_cv_mutex="no" -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +# msemaphore: HPPA only +# Try HPPA before general msem test, it needs special alignment. +if test "$db_cv_mutex" = no; then +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -main() { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || - pthread_mutexattr_init(&mutexattr) || - pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); +#include +int +main () +{ + +#if defined(__hppa) + typedef msemaphore tsl_t; + msemaphore x; + msem_init(&x, 0); + msem_lock(&x, 0); + msem_unlock(&x, 0); + exit(0); +#else + FAIL TO COMPILE/LINK +#endif + + ; + return 0; } _ACEOF -rm -f conftest$ac_exeext +rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex=""POSIX/pthreads"" + db_cv_mutex="HP/msem_init" else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 + echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext -fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -if test "$db_cv_mutex" = no -o \ - "$db_cv_mutex" = "posix_only" -o "$db_cv_mutex" = "posix_library_only"; then - LIBS="$LIBS -lpthread" -if test "$cross_compiling" = yes; then - db_cv_mutex="no" -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +# msemaphore: AIX, OSF/1 +if test "$db_cv_mutex" = no; then +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -main() { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || - pthread_mutexattr_init(&mutexattr) || - pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); +#include +#include +int +main () +{ + + typedef msemaphore tsl_t; + msemaphore x; + msem_init(&x, 0); + msem_lock(&x, 0); + msem_unlock(&x, 0); + exit(0); + + ; + return 0; } _ACEOF -rm -f conftest$ac_exeext +rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex=""POSIX/pthreads/library"" + db_cv_mutex="UNIX/msem_init" else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 + echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi - LIBS="$orig_libs" -fi -if test "$db_cv_mutex" = "posix_only"; then -if test "$cross_compiling" = yes; then - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +# ReliantUNIX +if test "$db_cv_mutex" = no; then +LIBS="$LIBS -lmproc" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include +#include int main () { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_mutexattr_init(&mutexattr) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); + typedef spinlock_t tsl_t; + spinlock_t x; + initspin(&x, 1); + cspinlock(&x); + spinunlock(&x); ; return 0; @@ -27591,105 +30748,110 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex=""POSIX/pthreads/private"" + db_cv_mutex="ReliantUNIX/initspin" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS="$orig_libs" +fi + +# SCO: UnixWare has threads in libthread, but OpenServer doesn't. +if test "$db_cv_mutex" = no; then +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -main() { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_mutexattr_init(&mutexattr) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); +int +main () +{ + +#if defined(__USLC__) + exit(0); +#else + FAIL TO COMPILE/LINK +#endif + + ; + return 0; } _ACEOF -rm -f conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex=""POSIX/pthreads/private"" + db_cv_mutex="SCO/x86/cc-assembly" else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 + echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext -fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -if test "$db_cv_mutex" = "posix_only" -o \ - "$db_cv_mutex" = "posix_library_only"; then - LIBS="$LIBS -lpthread" -if test "$cross_compiling" = yes; then - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +# abilock_t: SGI +if test "$db_cv_mutex" = no; then +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include +#include int main () { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_mutexattr_init(&mutexattr) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); + typedef abilock_t tsl_t; + abilock_t x; + init_lock(&x); + acquire_lock(&x); + release_lock(&x); ; return 0; @@ -27697,107 +30859,117 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex=""POSIX/pthreads/library/private"" + db_cv_mutex="SGI/init_lock" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi + +# sema_t: Solaris +# The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever +# turn this test on, unless we find some other platform that uses the old +# POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.) +if test "$db_cv_mutex" = DOESNT_WORK; then +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -main() { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_mutexattr_init(&mutexattr) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); +#include +int +main () +{ + + typedef sema_t tsl_t; + sema_t x; + sema_init(&x, 1, USYNC_PROCESS, NULL); + sema_wait(&x); + sema_post(&x); + + ; + return 0; } _ACEOF -rm -f conftest$ac_exeext +rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex=""POSIX/pthreads/library/private"" + db_cv_mutex="UNIX/sema_init" else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 + echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext -fi - LIBS="$orig_libs" -fi - -if test "$db_cv_mutex" = "posix_only" -o \ - "$db_cv_mutex" = "posix_library_only"; then - { { echo "$as_me:$LINENO: error: unable to find POSIX 1003.1 mutex interfaces" >&5 -echo "$as_me: error: unable to find POSIX 1003.1 mutex interfaces" >&2;} - { (exit 1); exit 1; }; } +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -# msemaphore: HPPA only -# Try HPPA before general msem test, it needs special alignment. +# _lock_try/_lock_clear: Solaris +# On Solaris systems without Pthread or UI mutex interfaces, DB uses the +# undocumented _lock_try _lock_clear function calls instead of either the +# sema_trywait(3T) or sema_wait(3T) function calls. This is because of +# problems in those interfaces in some releases of the Solaris C library. if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include +#include int main () { -#if defined(__hppa) - typedef msemaphore tsl_t; - msemaphore x; - msem_init(&x, 0); - msem_lock(&x, 0); - msem_unlock(&x, 0); - exit(0); -#else - FAIL TO COMPILE/LINK -#endif + typedef lock_t tsl_t; + lock_t x; + _lock_try(&x); + _lock_clear(&x); ; return 0; @@ -27805,47 +30977,53 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="HP/msem_init" + db_cv_mutex="Solaris/_lock_try" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -# msemaphore: AIX, OSF/1 +# _check_lock/_clear_lock: AIX if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -#include +#include int main () { - typedef msemaphore tsl_t; - msemaphore x; - msem_init(&x, 0); - msem_lock(&x, 0); - msem_unlock(&x, 0); - exit(0); + int x; + _check_lock(&x,0,1); + _clear_lock(&x,0); ; return 0; @@ -27853,46 +31031,52 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="UNIX/msem_init" + db_cv_mutex="AIX/_check_lock" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -# ReliantUNIX +# _spin_lock_try/_spin_unlock: Apple/Darwin if test "$db_cv_mutex" = no; then -LIBS="$LIBS -lmproc" cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include int main () { - typedef spinlock_t tsl_t; - spinlock_t x; - initspin(&x, 1); - cspinlock(&x); - spinunlock(&x); + int x; + _spin_lock_try(&x); + _spin_unlock(&x); ; return 0; @@ -27900,30 +31084,39 @@ main () _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="ReliantUNIX/initspin" + db_cv_mutex="Darwin/_spin_lock_try" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -LIBS="$orig_libs" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -# SCO: UnixWare has threads in libthread, but OpenServer doesn't. +# Tru64/cc if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -27934,7 +31127,7 @@ int main () { -#if defined(__USLC__) +#if defined(__alpha) && defined(__DECC) exit(0); #else FAIL TO COMPILE/LINK @@ -27946,217 +31139,254 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="SCO/x86/cc-assembly" + db_cv_mutex="Tru64/cc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# abilock_t: SGI +# Alpha/gcc if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include int main () { - typedef abilock_t tsl_t; - abilock_t x; - init_lock(&x); - acquire_lock(&x); - release_lock(&x); +#if defined(__alpha) && defined(__GNUC__) + exit(0); +#else + FAIL TO COMPILE/LINK +#endif ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="SGI/init_lock" + db_cv_mutex="ALPHA/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# sema_t: Solaris -# The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever -# turn this test on, unless we find some other platform that uses the old -# POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.) -if test "$db_cv_mutex" = DOESNT_WORK; then +# ARM/gcc: Linux +if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include int main () { - typedef sema_t tsl_t; - sema_t x; - sema_init(&x, 1, USYNC_PROCESS, NULL); - sema_wait(&x); - sema_post(&x); +#if defined(__arm__) && defined(__GNUC__) + exit(0); +#else + FAIL TO COMPILE/LINK +#endif ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="UNIX/sema_init" + db_cv_mutex="ARM/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# _lock_try/_lock_clear: Solaris -# On Solaris systems without Pthread or UI mutex interfaces, DB uses the -# undocumented _lock_try _lock_clear function calls instead of either the -# sema_trywait(3T) or sema_wait(3T) function calls. This is because of -# problems in those interfaces in some releases of the Solaris C library. +# PaRisc/gcc: HP/UX if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include int main () { - typedef lock_t tsl_t; - lock_t x; - _lock_try(&x); - _lock_clear(&x); +#if (defined(__hppa) || defined(__hppa__)) && defined(__GNUC__) + exit(0); +#else + FAIL TO COMPILE/LINK +#endif ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="Solaris/_lock_try" + db_cv_mutex="HPPA/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# _check_lock/_clear_lock: AIX +# PPC/gcc: if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include int main () { - int x; - _check_lock(&x,0,1); - _clear_lock(&x,0); +#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__) + exit(0); +#else + FAIL TO COMPILE/LINK +#endif ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="AIX/_check_lock" + db_cv_mutex="PPC/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# _spin_lock_try/_spin_unlock: Apple/Darwin +# Sparc/gcc: SunOS, Solaris if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -28167,39 +31397,50 @@ int main () { - int x; - _spin_lock_try(&x); - _spin_unlock(&x); +#if defined(__sparc__) && defined(__GNUC__) + exit(0); +#else + FAIL TO COMPILE/LINK +#endif ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="Darwin/_spin_lock_try" + db_cv_mutex="Sparc/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# Tru64/cc +# 68K/gcc: SunOS if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -28210,7 +31451,7 @@ int main () { -#if defined(__alpha) && defined(__DECC) +#if (defined(mc68020) || defined(sun3)) && defined(__GNUC__) exit(0); #else FAIL TO COMPILE/LINK @@ -28222,29 +31463,38 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="Tru64/cc-assembly" + db_cv_mutex="68K/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# Alpha/gcc +# x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -28255,7 +31505,7 @@ int main () { -#if defined(__alpha) && defined(__GNUC__) +#if (defined(i386) || defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) exit(0); #else FAIL TO COMPILE/LINK @@ -28267,29 +31517,38 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="ALPHA/gcc-assembly" + db_cv_mutex="x86/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# ARM/gcc: Linux +# S390/cc: IBM OS/390 Unix if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -28300,7 +31559,7 @@ int main () { -#if defined(__arm__) && defined(__GNUC__) +#if defined(__MVS__) && defined(__IBMC__) exit(0); #else FAIL TO COMPILE/LINK @@ -28312,29 +31571,38 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="ARM/gcc-assembly" + db_cv_mutex="S390/cc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# PaRisc/gcc: HP/UX +# S390/gcc: Linux if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -28345,7 +31613,7 @@ int main () { -#if (defined(__hppa) || defined(__hppa__)) && defined(__GNUC__) +#if defined(__s390__) && defined(__GNUC__) exit(0); #else FAIL TO COMPILE/LINK @@ -28357,29 +31625,38 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="HPPA/gcc-assembly" + db_cv_mutex="S390/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# PPC/gcc: +# ia86/gcc: Linux if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -28390,7 +31667,7 @@ int main () { -#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__) +#if defined(__ia64) && defined(__GNUC__) exit(0); #else FAIL TO COMPILE/LINK @@ -28402,29 +31679,38 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="PPC/gcc-assembly" + db_cv_mutex="ia64/gcc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi -# Sparc/gcc: SunOS, Solaris +# uts/cc: UTS if test "$db_cv_mutex" = no; then cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -28435,7 +31721,7 @@ int main () { -#if defined(__sparc__) && defined(__GNUC__) +#if defined(_UTS) exit(0); #else FAIL TO COMPILE/LINK @@ -28447,598 +31733,901 @@ main () _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="Sparc/gcc-assembly" + db_cv_mutex="UTS/cc-assembly" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +# default to UNIX fcntl system call mutexes. +if test "$db_cv_mutex" = no; then + db_cv_mutex="UNIX/fcntl" +fi + +fi +echo "$as_me:$LINENO: result: $db_cv_mutex" >&5 +echo "${ECHO_T}$db_cv_mutex" >&6 + +case "$db_cv_mutex" in +68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_68K_GCC_ASSEMBLY 1 +_ACEOF + + +;; +AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_AIX_CHECK_LOCK 1 +_ACEOF + + +;; +Darwin/_spin_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY 1 +_ACEOF + + +;; +ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_ALPHA_GCC_ASSEMBLY 1 +_ACEOF + + +;; +ARM/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_ARM_GCC_ASSEMBLY 1 +_ACEOF + + +;; +HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_HPPA_MSEM_INIT 1 +_ACEOF + + +;; +HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_HPPA_GCC_ASSEMBLY 1 +_ACEOF + + +;; +ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_IA64_GCC_ASSEMBLY 1 +_ACEOF + + +;; +POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_PTHREADS 1 +_ACEOF + + +;; +POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_PTHREADS 1 +_ACEOF + + + + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_THREAD_ONLY 1 +_ACEOF + + +;; +POSIX/pthreads/library) LIBS="$LIBS -lpthread" + LIBJSO_LIBS="$LIBJSO_LIBS -lpthread" + LIBTSO_LIBS="$LIBTSO_LIBS -lpthread" + ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_PTHREADS 1 +_ACEOF + + +;; +POSIX/pthreads/library/private) + LIBS="$LIBS -lpthread" + LIBJSO_LIBS="$LIBJSO_LIBS -lpthread" + LIBTSO_LIBS="$LIBTSO_LIBS -lpthread" + ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_PTHREADS 1 +_ACEOF + + + + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_THREAD_ONLY 1 +_ACEOF + + +;; +PPC/gcc-assembly) + ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_PPC_GCC_ASSEMBLY 1 +_ACEOF + + +;; +ReliantUNIX/initspin) LIBS="$LIBS -lmproc" + ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_RELIANTUNIX_INITSPIN 1 +_ACEOF + + +;; +S390/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_S390_CC_ASSEMBLY 1 +_ACEOF + + +;; +S390/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_S390_GCC_ASSEMBLY 1 +_ACEOF + + +;; +SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_SCO_X86_CC_ASSEMBLY 1 +_ACEOF + + +;; +SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_SGI_INIT_LOCK 1 +_ACEOF + + +;; +Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_SOLARIS_LOCK_TRY 1 +_ACEOF + + +;; +Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_SOLARIS_LWP 1 +_ACEOF + + +;; +Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_SPARC_GCC_ASSEMBLY 1 +_ACEOF + + +;; +Tru64/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_TRU64_CC_ASSEMBLY 1 +_ACEOF + + +;; + +UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_UI_THREADS 1 +_ACEOF + + +;; +UI/threads/library) LIBS="$LIBS -lthread" + ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_UI_THREADS 1 +_ACEOF + + +;; +UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_MSEM_INIT 1 +_ACEOF + + +;; +UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_SEMA_INIT 1 +_ACEOF + + +;; +UTS/cc-assembly) ADDITIONAL_OBJS="uts4.cc${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_UTS_CC_ASSEMBLY 1 +_ACEOF + + +;; +win32) ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_WIN32 1 +_ACEOF + -# 68K/gcc: SunOS -if test "$db_cv_mutex" = no; then -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ +;; +win32/gcc) ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_WIN32_GCC 1 _ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ - -int -main () -{ -#if (defined(mc68020) || defined(sun3)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif - ; - return 0; -} +;; +x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_X86_GCC_ASSEMBLY 1 _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - db_cv_mutex="68K/gcc-assembly" -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -# x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux -if test "$db_cv_mutex" = no; then -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ +;; +UNIX/fcntl) { echo "$as_me:$LINENO: WARNING: NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE." >&5 +echo "$as_me: WARNING: NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE." >&2;} + ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS" + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_FCNTL 1 _ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -int -main () -{ -#if (defined(i386) || defined(__i386__)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif +;; +*) { { echo "$as_me:$LINENO: error: Unknown mutex interface: $db_cv_mutex" >&5 +echo "$as_me: error: Unknown mutex interface: $db_cv_mutex" >&2;} + { (exit 1); exit 1; }; };; +esac - ; - return 0; -} +if test "$db_cv_mutex" != "UNIX/fcntl"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_THREADS 1 _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - db_cv_mutex="x86/gcc-assembly" -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f conftest.$ac_objext conftest.$ac_ext + + fi -# S390/cc: IBM OS/390 Unix -if test "$db_cv_mutex" = no; then -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +# There are 3 classes of mutexes: +# +# 1: Mutexes requiring no cleanup, for example, test-and-set mutexes. +# 2: Mutexes that must be destroyed, but which don't hold permanent system +# resources, for example, pthread mutexes on MVS aka OS/390 aka z/OS. +# 3: Mutexes that must be destroyed, even after the process is gone, for +# example, pthread mutexes on QNX and binary semaphores on VxWorks. +# +# DB cannot currently distinguish between #2 and #3 because DB does not know +# if the application is running environment recovery as part of startup and +# does not need to do cleanup, or if the environment is being removed and/or +# recovered in a loop in the application, and so does need to clean up. If +# we get it wrong, we're going to call the mutex destroy routine on a random +# piece of memory, which usually works, but just might drop core. For now, +# we group #2 and #3 into the HAVE_MUTEX_SYSTEM_RESOURCES define, until we +# have a better solution or reason to solve this in a general way -- so far, +# the places we've needed to handle this are few. -int -main () -{ -#if defined(__MVS__) && defined(__IBMC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif - ; - return 0; -} +case "$host_os$db_cv_mutex" in +*qnx*POSIX/pthread*|openedition*POSIX/pthread*) + cat >>confdefs.h <<\_ACEOF +#define HAVE_MUTEX_SYSTEM_RESOURCES 1 _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - db_cv_mutex="S390/cc-assembly" -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +;; +esac -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi +# Checks for system functions for which we have replacements. +# +# XXX +# The only portable getcwd call is getcwd(char *, size_t), where the +# buffer is non-NULL -- Solaris can't handle a NULL buffer, and they +# deleted getwd(). -# S390/gcc: Linux -if test "$db_cv_mutex" = no; then -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -int -main () -{ -#if defined(__s390__) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - db_cv_mutex="S390/gcc-assembly" -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi -# ia86/gcc: Linux -if test "$db_cv_mutex" = no; then -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +for ac_func in getcwd getopt memcmp memcpy memmove raise +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func -int -main () -{ +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -#if defined(__ia64) && defined(__GNUC__) - exit(0); +#ifdef __STDC__ +# include #else - FAIL TO COMPILE/LINK +# include +#endif + +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} #endif +int +main () +{ +return f != $ac_func; ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="ia64/gcc-assembly" + eval "$as_ac_var=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +eval "$as_ac_var=no" fi -rm -f conftest.$ac_objext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF -# uts/cc: UTS -if test "$db_cv_mutex" = no; then -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +else + case $LIBOBJS in + "$ac_func.$ac_objext" | \ + *" $ac_func.$ac_objext" | \ + "$ac_func.$ac_objext "* | \ + *" $ac_func.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS $ac_func.$ac_objext" ;; +esac + +fi +done + + + + + +for ac_func in strcasecmp strdup strerror +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func -int -main () -{ +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -#if defined(_UTS) - exit(0); +#ifdef __STDC__ +# include #else - FAIL TO COMPILE/LINK +# include +#endif + +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} #endif +int +main () +{ +return f != $ac_func; ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_mutex="UTS/cc-assembly" + eval "$as_ac_var=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +eval "$as_ac_var=no" fi -rm -f conftest.$ac_objext conftest.$ac_ext -fi - -# default to UNIX fcntl system call mutexes. -if test "$db_cv_mutex" = no; then - db_cv_mutex="UNIX/fcntl" -fi - +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $db_cv_mutex" >&5 -echo "${ECHO_T}$db_cv_mutex" >&6 - -case "$db_cv_mutex" in -68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_68K_GCC_ASSEMBLY 1 -_ACEOF - - -;; -AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_AIX_CHECK_LOCK 1 -_ACEOF - - -;; -Darwin/_spin_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY 1 -_ACEOF - - -;; -ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_ALPHA_GCC_ASSEMBLY 1 -_ACEOF - - -;; -ARM/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_ARM_GCC_ASSEMBLY 1 -_ACEOF - - -;; -HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_HPPA_MSEM_INIT 1 -_ACEOF - - -;; -HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_HPPA_GCC_ASSEMBLY 1 -_ACEOF - - -;; -ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_IA64_GCC_ASSEMBLY 1 -_ACEOF - - -;; -POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_PTHREADS 1 -_ACEOF - - -;; -POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_PTHREADS 1 -_ACEOF - - - - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_THREAD_ONLY 1 -_ACEOF - - -;; -POSIX/pthreads/library) LIBS="$LIBS -lpthread" - ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_PTHREADS 1 -_ACEOF - - -;; -POSIX/pthreads/library/private) - LIBS="$LIBS -lpthread" - ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_PTHREADS 1 -_ACEOF - - - - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_THREAD_ONLY 1 +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF +else + case $LIBOBJS in + "$ac_func.$ac_objext" | \ + *" $ac_func.$ac_objext" | \ + "$ac_func.$ac_objext "* | \ + *" $ac_func.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS $ac_func.$ac_objext" ;; +esac -;; -PPC/gcc-assembly) - ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_PPC_GCC_ASSEMBLY 1 -_ACEOF - +fi +done -;; -ReliantUNIX/initspin) LIBS="$LIBS -lmproc" - ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_RELIANTUNIX_INITSPIN 1 -_ACEOF -;; -S390/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_S390_CC_ASSEMBLY 1 -_ACEOF +# Check for system functions we optionally use. -;; -S390/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_S390_GCC_ASSEMBLY 1 -_ACEOF -;; -SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_SCO_X86_CC_ASSEMBLY 1 -_ACEOF -;; -SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_SGI_INIT_LOCK 1 +for ac_func in _fstati64 clock_gettime directio fdatasync ftruncate getrusage +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -;; -Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_SOLARIS_LOCK_TRY 1 -_ACEOF - +#ifdef __STDC__ +# include +#else +# include +#endif -;; -Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_SOLARIS_LWP 1 -_ACEOF +#undef $ac_func +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif -;; -Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_SPARC_GCC_ASSEMBLY 1 +int +main () +{ +return f != $ac_func; + ; + return 0; +} _ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_var=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - -;; -Tru64/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_TRU64_CC_ASSEMBLY 1 +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF +fi +done -;; -UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_UI_THREADS 1 -_ACEOF -;; -UI/threads/library) LIBS="$LIBS -lthread" - ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_UI_THREADS 1 -_ACEOF -;; -UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_MSEM_INIT 1 +for ac_func in gettimeofday getuid pstat_getdynamic rand sched_yield +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -;; -UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_SEMA_INIT 1 -_ACEOF - +#ifdef __STDC__ +# include +#else +# include +#endif -;; -UTS/cc-assembly) ADDITIONAL_OBJS="uts4.cc${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_UTS_CC_ASSEMBLY 1 -_ACEOF +#undef $ac_func +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif -;; -win32) ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_WIN32 1 +int +main () +{ +return f != $ac_func; + ; + return 0; +} _ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_var=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - -;; -win32/gcc) ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_WIN32_GCC 1 +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF +fi +done -;; -x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_X86_GCC_ASSEMBLY 1 -_ACEOF -;; -UNIX/fcntl) { echo "$as_me:$LINENO: WARNING: NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE." >&5 -echo "$as_me: WARNING: NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE." >&2;} - ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS" - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_FCNTL 1 -_ACEOF -;; -*) { { echo "$as_me:$LINENO: error: Unknown mutex interface: $db_cv_mutex" >&5 -echo "$as_me: error: Unknown mutex interface: $db_cv_mutex" >&2;} - { (exit 1); exit 1; }; };; -esac -if test "$db_cv_mutex" != "UNIX/fcntl"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_THREADS 1 -_ACEOF +for ac_func in select snprintf srand strtoul sysconf vsnprintf yield +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func -fi +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -# There are 3 classes of mutexes: -# -# 1: Mutexes requiring no cleanup, for example, test-and-set mutexes. -# 2: Mutexes that must be destroyed, but which don't hold permanent system -# resources, for example, pthread mutexes on MVS aka OS/390 aka z/OS. -# 3: Mutexes that must be destroyed, even after the process is gone, for -# example, pthread mutexes on QNX and binary semaphores on VxWorks. -# -# DB cannot currently distinguish between #2 and #3 because DB does not know -# if the application is running environment recovery as part of startup and -# does not need to do cleanup, or if the environment is being removed and/or -# recovered in a loop in the application, and so does need to clean up. If -# we get it wrong, we're going to call the mutex destroy routine on a random -# piece of memory, which usually works, but just might drop core. For now, -# we group #2 and #3 into the HAVE_MUTEX_SYSTEM_RESOURCES define, until we -# have a better solution or reason to solve this in a general way -- so far, -# the places we've needed to handle this are few. +#ifdef __STDC__ +# include +#else +# include +#endif +#undef $ac_func +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif -case "$host_os$db_cv_mutex" in -*qnx*POSIX/pthread*|openedition*POSIX/pthread*) - cat >>confdefs.h <<\_ACEOF -#define HAVE_MUTEX_SYSTEM_RESOURCES 1 +int +main () +{ +return f != $ac_func; + ; + return 0; +} _ACEOF -;; -esac - -# Checks for system functions for which we have replacements. -# -# XXX -# The only portable getcwd call is getcwd(char *, size_t), where the -# buffer is non-NULL -- Solaris can't handle a NULL buffer, and they -# deleted getwd(). +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_var=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF +fi +done +# Pread/pwrite. +# HP-UX has pread/pwrite, but it doesn't work with largefile support. +# NCR's version of System V R 4.3 has pread/pwrite symbols, but no support. +case "$host_os-$host_vendor" in +hpux*|sysv4.3*-ncr) + { echo "$as_me:$LINENO: WARNING: pread/pwrite interfaces ignored on $host_os-$host_vendor." >&5 +echo "$as_me: WARNING: pread/pwrite interfaces ignored on $host_os-$host_vendor." >&2;};; +*) -for ac_func in getcwd getopt memcmp memcpy memmove raise +for ac_func in pread pwrite do as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` echo "$as_me:$LINENO: checking for $ac_func" >&5 @@ -29047,21 +32636,28 @@ if eval "test \"\${$as_ac_var+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef $ac_func + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -29085,328 +32681,596 @@ char (*f) () = $ac_func; int main () { -return f != $ac_func; +return f != $ac_func; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_var=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done +;; +esac + +# Check for fcntl(2) to deny child process access to file descriptors. +echo "$as_me:$LINENO: checking for fcntl/F_SETFD" >&5 +echo $ECHO_N "checking for fcntl/F_SETFD... $ECHO_C" >&6 +if test "${db_cv_fcntl_f_setfd+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +#include +#include +int +main () +{ + + fcntl(1, F_SETFD, 1); + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + db_cv_fcntl_f_setfd=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +db_cv_fcntl_f_setfd=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $db_cv_fcntl_f_setfd" >&5 +echo "${ECHO_T}$db_cv_fcntl_f_setfd" >&6 +if test "$db_cv_fcntl_f_setfd" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_FCNTL_F_SETFD 1 +_ACEOF + + + +fi + +# A/UX has a broken getopt(3). +case "$host_os" in +aux*) case $LIBOBJS in + "getopt.$ac_objext" | \ + *" getopt.$ac_objext" | \ + "getopt.$ac_objext "* | \ + *" getopt.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt.$ac_objext" ;; +esac +;; +esac + +# Linux has a broken O_DIRECT flag, but you can't detect it at configure time. +# Linux and SGI require buffer alignment we may not match, otherwise writes +# will fail. Default to not using the O_DIRECT flag. +if test "$db_cv_o_direct" = "yes"; then + echo "$as_me:$LINENO: checking for open/O_DIRECT" >&5 +echo $ECHO_N "checking for open/O_DIRECT... $ECHO_C" >&6 +if test "${db_cv_open_o_direct+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + + #include + #include +int +main () +{ + + open("a", O_RDONLY | O_DIRECT, 0); + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + db_cv_open_o_direct=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +db_cv_open_o_direct=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $db_cv_open_o_direct" >&5 +echo "${ECHO_T}$db_cv_open_o_direct" >&6 + if test \ + "$db_cv_o_direct" = "yes" -a "$db_cv_open_o_direct" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_O_DIRECT 1 +_ACEOF + + + + fi +fi + +# Check for largefile support. +# Check whether --enable-largefile or --disable-largefile was given. +if test "${enable_largefile+set}" = set; then + enableval="$enable_largefile" + +fi; +if test "$enable_largefile" != no; then + + echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5 +echo $ECHO_N "checking for special C compiler options needed for large files... $ECHO_C" >&6 +if test "${ac_cv_sys_largefile_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_sys_largefile_CC=no + if test "$GCC" != yes; then + ac_save_CC=$CC + while :; do + # IRIX 6.2 and later do not support large files by default, + # so use the C compiler's -n32 option if that helps. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ + ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - +rm -f conftest.err conftest.$ac_objext + CC="$CC -n32" + rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sys_largefile_CC=' -n32'; break else - LIBOBJS="$LIBOBJS $ac_func.$ac_objext" -fi -done - - - - - + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +fi +rm -f conftest.err conftest.$ac_objext + break + done + CC=$ac_save_CC + rm -f conftest.$ac_ext + fi +fi +echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5 +echo "${ECHO_T}$ac_cv_sys_largefile_CC" >&6 + if test "$ac_cv_sys_largefile_CC" != no; then + CC=$CC$ac_cv_sys_largefile_CC + fi -for ac_func in snprintf strcasecmp strdup strerror vsnprintf -do -as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_func" >&5 -echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 -if eval "test \"\${$as_ac_var+set}\" = set"; then + echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5 +echo $ECHO_N "checking for _FILE_OFFSET_BITS value needed for large files... $ECHO_C" >&6 +if test "${ac_cv_sys_file_offset_bits+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else + while :; do + ac_cv_sys_file_offset_bits=no cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $ac_func (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -{ -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char $ac_func (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_$ac_func) || defined (__stub___$ac_func) -choke me -#else -char (*f) () = $ac_func; -#endif -#ifdef __cplusplus -} -#endif - +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; int main () { -return f != $ac_func; + ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -else - LIBOBJS="$LIBOBJS $ac_func.$ac_objext" fi -done - - - -# Check for system functions we optionally use. - - - - - - -for ac_func in _fstati64 clock_gettime directio getrusage gettimeofday getuid -do -as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_func" >&5 -echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 -if eval "test \"\${$as_ac_var+set}\" = set"; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $ac_func (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" +#define _FILE_OFFSET_BITS 64 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () { -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char $ac_func (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_$ac_func) || defined (__stub___$ac_func) -choke me -#else -char (*f) () = $ac_func; -#endif -#ifdef __cplusplus + + ; + return 0; } -#endif +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sys_file_offset_bits=64; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + break +done +fi +echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5 +echo "${ECHO_T}$ac_cv_sys_file_offset_bits" >&6 +if test "$ac_cv_sys_file_offset_bits" != no; then + +cat >>confdefs.h <<_ACEOF +#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits +_ACEOF +fi +rm -f conftest* + echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5 +echo $ECHO_N "checking for _LARGE_FILES value needed for large files... $ECHO_C" >&6 +if test "${ac_cv_sys_large_files+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + while :; do + ac_cv_sys_large_files=no + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; int main () { -return f != $ac_func; + ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - fi -done - - - - - - - -for ac_func in pstat_getdynamic sched_yield select strtoul sysconf yield -do -as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_func" >&5 -echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 -if eval "test \"\${$as_ac_var+set}\" = set"; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $ac_func (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -{ -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char $ac_func (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_$ac_func) || defined (__stub___$ac_func) -choke me -#else -char (*f) () = $ac_func; -#endif -#ifdef __cplusplus -} -#endif - +#define _LARGE_FILES 1 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; int main () { -return f != $ac_func; + ; return 0; } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + ac_cv_sys_large_files=1; break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + break +done fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5 +echo "${ECHO_T}$ac_cv_sys_large_files" >&6 +if test "$ac_cv_sys_large_files" != no; then + +cat >>confdefs.h <<_ACEOF +#define _LARGE_FILES $ac_cv_sys_large_files _ACEOF fi -done +rm -f conftest* +fi -# Pread/pwrite. -# HP-UX has pread/pwrite, but it doesn't work with largefile support. -# NCR's version of System V R 4.3 has pread/pwrite symbols, but no support. -case "$host_os-$host_vendor" in -hpux*|sysv4.3*-ncr) - { echo "$as_me:$LINENO: WARNING: pread/pwrite interfaces ignored on $host_os-$host_vendor." >&5 -echo "$as_me: WARNING: pread/pwrite interfaces ignored on $host_os-$host_vendor." >&2;};; -*) +# Figure out how to create shared regions. +# +# First, we look for mmap. +# +# BSD/OS has mlock(2), but it doesn't work until the 4.1 release. +# +# Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol +# is defined in the C library) but does not support munmap(2). Don't +# try to use mmap if we can't find munmap. +# +# Ultrix has mmap(2), but it doesn't work. +mmap_ok=no +case "$host_os" in +bsdi3*|bsdi4.0) + { echo "$as_me:$LINENO: WARNING: mlock(2) interface ignored on $host_os-$host_vendor." >&5 +echo "$as_me: WARNING: mlock(2) interface ignored on $host_os-$host_vendor." >&2;} + mmap_ok=yes -for ac_func in pread pwrite +for ac_func in mmap munmap do as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` echo "$as_me:$LINENO: checking for $ac_func" >&5 @@ -29415,21 +33279,28 @@ if eval "test \"\${$as_ac_var+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef $ac_func + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -29460,11 +33331,21 @@ return f != $ac_func; _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? @@ -29477,7 +33358,8 @@ sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 @@ -29486,498 +33368,626 @@ if test `eval echo '${'$as_ac_var'}'` = yes; then #define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF +else + mmap_ok=no fi done ;; -esac +ultrix*) + { echo "$as_me:$LINENO: WARNING: mmap(2) interface ignored on $host_os-$host_vendor." >&5 +echo "$as_me: WARNING: mmap(2) interface ignored on $host_os-$host_vendor." >&2;};; +*) + mmap_ok=yes -# Check for fcntl(2) to deny child process access to file descriptors. -echo "$as_me:$LINENO: checking for fcntl/F_SETFD" >&5 -echo $ECHO_N "checking for fcntl/F_SETFD... $ECHO_C" >&6 -if test "${db_cv_fcntl_f_setfd+set}" = set; then + +for ac_func in mlock munlock +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func -#include -#include -int -main () -{ - - fcntl(1, F_SETFD, 1); - - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - db_cv_fcntl_f_setfd=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -db_cv_fcntl_f_setfd=no -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: $db_cv_fcntl_f_setfd" >&5 -echo "${ECHO_T}$db_cv_fcntl_f_setfd" >&6 -if test "$db_cv_fcntl_f_setfd" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_FCNTL_F_SETFD 1 -_ACEOF - - - -fi - -# A/UX has a broken getopt(3). -case "$host_os" in -aux*) LIBOBJS="$LIBOBJS getopt.$ac_objext";; -esac +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -# Linux has a broken O_DIRECT flag, but we allow people to override it from -# the command line. -test_host_prw=yes -echo "$as_me:$LINENO: checking for open/O_DIRECT" >&5 -echo $ECHO_N "checking for open/O_DIRECT... $ECHO_C" >&6 -if test "${db_cv_open_o_direct+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else +#ifdef __STDC__ +# include +#else +# include +#endif -cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif -#include -#include int main () { - - open("a", O_RDONLY | O_DIRECT, 0); - +return f != $ac_func; ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - db_cv_open_o_direct=yes; test_host_prw=no + eval "$as_ac_var=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -db_cv_open_o_direct=no -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +eval "$as_ac_var=no" fi -echo "$as_me:$LINENO: result: $db_cv_open_o_direct" >&5 -echo "${ECHO_T}$db_cv_open_o_direct" >&6 -if test "$test_host_prw" = "no" -a "$db_cv_open_o_direct" = "yes"; then - case "$host_os" in - linux*) - db_cv_open_o_direct=no; - { echo "$as_me:$LINENO: WARNING: O_DIRECT interface ignored on $host_os-$host_vendor." >&5 -echo "$as_me: WARNING: O_DIRECT interface ignored on $host_os-$host_vendor." >&2;};; - esac +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -if test "$db_cv_open_o_direct" = "yes"; then - cat >>confdefs.h <<\_ACEOF -#define HAVE_O_DIRECT 1 +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF - - fi +done -# Check for largefile support. -# Check whether --enable-largefile or --disable-largefile was given. -if test "${enable_largefile+set}" = set; then - enableval="$enable_largefile" -fi; -if test "$enable_largefile" != no; then - echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5 -echo $ECHO_N "checking for special C compiler options needed for large files... $ECHO_C" >&6 -if test "${ac_cv_sys_largefile_CC+set}" = set; then +for ac_func in mmap munmap +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - ac_cv_sys_largefile_CC=no - if test "$GCC" != yes; then - ac_save_CC=$CC - while :; do - # IRIX 6.2 and later do not support large files by default, - # so use the C compiler's -n32 option if that helps. - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif + int main () { - +return f != $ac_func; ; return 0; } _ACEOF - rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - break -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -fi -rm -f conftest.$ac_objext - CC="$CC -n32" - rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sys_largefile_CC=' -n32'; break + eval "$as_ac_var=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +eval "$as_ac_var=no" fi -rm -f conftest.$ac_objext - break - done - CC=$ac_save_CC - rm -f conftest.$ac_ext - fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5 -echo "${ECHO_T}$ac_cv_sys_largefile_CC" >&6 - if test "$ac_cv_sys_largefile_CC" != no; then - CC=$CC$ac_cv_sys_largefile_CC - fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF - echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5 -echo $ECHO_N "checking for _FILE_OFFSET_BITS value needed for large files... $ECHO_C" >&6 -if test "${ac_cv_sys_file_offset_bits+set}" = set; then +else + mmap_ok=no +fi +done +;; +esac + +# Second, we look for shmget. +# +# SunOS has the shmget(2) interfaces, but there appears to be a missing +# #include file, so we ignore them. +shmget_ok=no +case "$host_os" in +sunos*) + { echo "$as_me:$LINENO: WARNING: shmget(2) interface ignored on $host_os-$host_vendor." >&5 +echo "$as_me: WARNING: shmget(2) interface ignored on $host_os-$host_vendor." >&2;};; +*) + shmget_ok=yes + +for ac_func in shmget +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - while :; do - ac_cv_sys_file_offset_bits=no cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; -int -main () -{ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' - { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 - (eval $ac_try) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - break -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif -fi -rm -f conftest.$ac_objext conftest.$ac_ext - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#define _FILE_OFFSET_BITS 64 -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; int main () { - +return f != $ac_func; ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sys_file_offset_bits=64; break + eval "$as_ac_var=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +else + shmget_ok=no fi -rm -f conftest.$ac_objext conftest.$ac_ext - break done +;; +esac + +# We require either mmap/munmap(2) or shmget(2). +if test "$mmap_ok" = "no" -a "$shmget_ok" = "no"; then + { echo "$as_me:$LINENO: WARNING: Neither mmap/munmap(2) or shmget(2) library functions." >&5 +echo "$as_me: WARNING: Neither mmap/munmap(2) or shmget(2) library functions." >&2;} fi -echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5 -echo "${ECHO_T}$ac_cv_sys_file_offset_bits" >&6 -if test "$ac_cv_sys_file_offset_bits" != no; then -cat >>confdefs.h <<_ACEOF -#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits +# Optional RPC client/server. +if test "$db_cv_rpc" = "yes"; then + + cat >>confdefs.h <<\_ACEOF +#define HAVE_RPC 1 _ACEOF -fi -rm -f conftest* - echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5 -echo $ECHO_N "checking for _LARGE_FILES value needed for large files... $ECHO_C" >&6 -if test "${ac_cv_sys_large_files+set}" = set; then + + + + # We use the target's rpcgen utility because it may be architecture + # specific, for example, 32- or 64-bit specific. + XDR_FILE=$srcdir/../rpc_server/db_server.x + + # Prefer the -C option to rpcgen which generates ANSI C-conformant + # code. + RPCGEN="rpcgen -C" + echo "$as_me:$LINENO: checking \"$RPCGEN\" build of db_server.h" >&5 +echo $ECHO_N "checking \"$RPCGEN\" build of db_server.h... $ECHO_C" >&6 + $RPCGEN -h $XDR_FILE > db_server.h 2>/dev/null + if test $? -ne 0; then + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + + # Try rpcgen without the -C option. + RPCGEN="rpcgen" + echo "$as_me:$LINENO: checking \"$RPCGEN\" build of db_server.h" >&5 +echo $ECHO_N "checking \"$RPCGEN\" build of db_server.h... $ECHO_C" >&6 + $RPCGEN -h $XDR_FILE > db_server.h 2>/dev/null + if test $? -ne 0; then + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + { { echo "$as_me:$LINENO: error: Unable to build RPC support: $RPCGEN failed." >&5 +echo "$as_me: error: Unable to build RPC support: $RPCGEN failed." >&2;} + { (exit 1); exit 1; }; } + fi + fi + + # Some rpcgen programs generate a set of client stubs called something + # like __db_env_create_4003 and functions on the server to handle the + # request called something like __db_env_create_4003_svc. Others + # expect client and server stubs to both be called __db_env_create_4003. + # + # We have to generate code in whichever format rpcgen expects, and the + # only reliable way to do that is to check what is in the db_server.h + # file we just created. + if grep "env_create_[0-9]*_svc" db_server.h >/dev/null 2>&1 ; then + sed 's/__SVCSUFFIX__/_svc/' \ + < $srcdir/../rpc_server/c/gen_db_server.c > gen_db_server.c + else + sed 's/__SVCSUFFIX__//' \ + < $srcdir/../rpc_server/c/gen_db_server.c > gen_db_server.c + fi + + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + + $RPCGEN -l $XDR_FILE | + sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ + -e '1,/^#include/s/^#include/#include "db_config.h"\ +&/' > db_server_clnt.c + + $RPCGEN -s tcp $XDR_FILE | + sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ + -e 's/^main *()/__dbsrv_main()/' \ + -e 's/^main *(.*argc.*argv.*)/__dbsrv_main(int argc, char *argv)/' \ + -e '/^db_rpc_serverprog/,/^}/{' \ + -e 's/return;//' \ + -e 's/^}/__dbsrv_timeout(0);}/' \ + -e '}' \ + -e '1,/^#include/s/^#include/#include "db_config.h"\ +&/' > db_server_svc.c + + $RPCGEN -c $XDR_FILE | + sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ + -e '1,/^#include/s/^#include/#include "db_config.h"\ +&/' > db_server_xdr.c + + RPC_SERVER_H=db_server.h + RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)" + ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS" + + case "$host_os" in + hpux*) + echo "$as_me:$LINENO: checking for svc_run" >&5 +echo $ECHO_N "checking for svc_run... $ECHO_C" >&6 +if test "${ac_cv_func_svc_run+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - while :; do - ac_cv_sys_large_files=no cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; +/* Define svc_run to an innocuous variant, in case declares svc_run. + For example, HP-UX 11i declares gettimeofday. */ +#define svc_run innocuous_svc_run + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char svc_run (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef svc_run + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char svc_run (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_svc_run) || defined (__stub___svc_run) +choke me +#else +char (*f) () = svc_run; +#endif +#ifdef __cplusplus +} +#endif + int main () { - +return f != svc_run; ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - break + ac_cv_func_svc_run=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +ac_cv_func_svc_run=no fi -rm -f conftest.$ac_objext conftest.$ac_ext - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_svc_run" >&5 +echo "${ECHO_T}$ac_cv_func_svc_run" >&6 +if test $ac_cv_func_svc_run = yes; then + : +else + echo "$as_me:$LINENO: checking for svc_run in -lnsl" >&5 +echo $ECHO_N "checking for svc_run in -lnsl... $ECHO_C" >&6 +if test "${ac_cv_lib_nsl_svc_run+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lnsl $LIBS" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#define _LARGE_FILES 1 -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char svc_run (); int main () { - +svc_run (); ; return 0; } _ACEOF -rm -f conftest.$ac_objext -if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest.$ac_objext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - ac_cv_sys_large_files=1; break + ac_cv_lib_nsl_svc_run=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 +ac_cv_lib_nsl_svc_run=no fi -rm -f conftest.$ac_objext conftest.$ac_ext - break -done -fi -echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5 -echo "${ECHO_T}$ac_cv_sys_large_files" >&6 -if test "$ac_cv_sys_large_files" != no; then - -cat >>confdefs.h <<_ACEOF -#define _LARGE_FILES $ac_cv_sys_large_files -_ACEOF - +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -rm -f conftest* +echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_svc_run" >&5 +echo "${ECHO_T}$ac_cv_lib_nsl_svc_run" >&6 +if test $ac_cv_lib_nsl_svc_run = yes; then + LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"; + LIBJSO_LIBS="-lnsl $LIBJSO_LIBS" fi - -# Figure out how to create shared regions. -# -# First, we look for mmap. -# -# BSD/OS has mlock(2), but it doesn't work until the 4.1 release. -# -# Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol -# is defined in the C library) but does not support munmap(2). Don't -# try to use mmap if we can't find munmap. -# -# Ultrix has mmap(2), but it doesn't work. -mmap_ok=no -case "$host_os" in -bsdi3*|bsdi4.0) - { echo "$as_me:$LINENO: WARNING: mlock(2) interface ignored on $host_os-$host_vendor." >&5 -echo "$as_me: WARNING: mlock(2) interface ignored on $host_os-$host_vendor." >&2;} - mmap_ok=yes - - -for ac_func in mmap munmap -do -as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_func" >&5 -echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 -if eval "test \"\${$as_ac_var+set}\" = set"; then +fi +;; + solaris*) + echo "$as_me:$LINENO: checking for svc_run" >&5 +echo $ECHO_N "checking for svc_run... $ECHO_C" >&6 +if test "${ac_cv_func_svc_run+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define svc_run to an innocuous variant, in case declares svc_run. + For example, HP-UX 11i declares gettimeofday. */ +#define svc_run innocuous_svc_run + /* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $ac_func (); below. + which can conflict with char svc_run (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif + +#undef svc_run + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" @@ -29985,14 +33995,14 @@ extern "C" #endif /* We use char because int might match the return type of a gcc2 builtin and then its argument prototype would still apply. */ -char $ac_func (); +char svc_run (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ -#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +#if defined (__stub_svc_run) || defined (__stub___svc_run) choke me #else -char (*f) () = $ac_func; +char (*f) () = svc_run; #endif #ifdef __cplusplus } @@ -30001,326 +34011,482 @@ char (*f) () = $ac_func; int main () { -return f != $ac_func; +return f != svc_run; ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + ac_cv_func_svc_run=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" +ac_cv_func_svc_run=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - +echo "$as_me:$LINENO: result: $ac_cv_func_svc_run" >&5 +echo "${ECHO_T}$ac_cv_func_svc_run" >&6 +if test $ac_cv_func_svc_run = yes; then + : else - mmap_ok=no -fi -done -;; -ultrix*) - { echo "$as_me:$LINENO: WARNING: mmap(2) interface ignored on $host_os-$host_vendor." >&5 -echo "$as_me: WARNING: mmap(2) interface ignored on $host_os-$host_vendor." >&2;};; -*) - mmap_ok=yes - -for ac_func in mlock munlock -do -as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_func" >&5 -echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 -if eval "test \"\${$as_ac_var+set}\" = set"; then +echo "$as_me:$LINENO: checking for svc_run in -lnsl" >&5 +echo $ECHO_N "checking for svc_run in -lnsl... $ECHO_C" >&6 +if test "${ac_cv_lib_nsl_svc_run+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" + ac_check_lib_save_LIBS=$LIBS +LIBS="-lnsl $LIBS" +cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $ac_func (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif + /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" -{ #endif /* We use char because int might match the return type of a gcc2 builtin and then its argument prototype would still apply. */ -char $ac_func (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_$ac_func) || defined (__stub___$ac_func) -choke me -#else -char (*f) () = $ac_func; -#endif -#ifdef __cplusplus -} -#endif - +char svc_run (); int main () { -return f != $ac_func; +svc_run (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + ac_cv_lib_nsl_svc_run=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" +ac_cv_lib_nsl_svc_run=no fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then +echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_svc_run" >&5 +echo "${ECHO_T}$ac_cv_lib_nsl_svc_run" >&6 +if test $ac_cv_lib_nsl_svc_run = yes; then cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +#define HAVE_LIBNSL 1 _ACEOF + LIBS="-lnsl $LIBS" + fi -done +fi +;; + esac +fi -for ac_func in mmap munmap -do -as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_func" >&5 -echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 -if eval "test \"\${$as_ac_var+set}\" = set"; then +# Optional Tcl support. +if test "$db_cv_tcl" = "yes"; then + + if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then + { { echo "$as_me:$LINENO: error: Tcl requires shared libraries" >&5 +echo "$as_me: error: Tcl requires shared libraries" >&2;} + { (exit 1); exit 1; }; } + fi + + + + + if test "${ac_cv_c_tclconfig+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $ac_func (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -{ -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char $ac_func (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_$ac_func) || defined (__stub___$ac_func) -choke me -#else -char (*f) () = $ac_func; -#endif -#ifdef __cplusplus -} -#endif + + + # First check to see if --with-tclconfig was specified. + if test "${with_tclconfig}" != no; then + if test -f "${with_tclconfig}/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` + else + { { echo "$as_me:$LINENO: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&5 +echo "$as_me: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&2;} + { (exit 1); exit 1; }; } + fi + fi + + # check in a few common install locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in `ls -d /usr/local/lib 2>/dev/null` ; do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i; pwd)` + break + fi + done + fi + + +fi + + + if test x"${ac_cv_c_tclconfig}" = x ; then + TCL_BIN_DIR="# no Tcl configs found" + { { echo "$as_me:$LINENO: error: can't find Tcl configuration definitions" >&5 +echo "$as_me: error: can't find Tcl configuration definitions" >&2;} + { (exit 1); exit 1; }; } + else + TCL_BIN_DIR=${ac_cv_c_tclconfig} + fi + + + echo "$as_me:$LINENO: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5 +echo $ECHO_N "checking for existence of $TCL_BIN_DIR/tclConfig.sh... $ECHO_C" >&6 + + if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then + echo "$as_me:$LINENO: result: loading" >&5 +echo "${ECHO_T}loading" >&6 + . $TCL_BIN_DIR/tclConfig.sh + else + echo "$as_me:$LINENO: result: file not found" >&5 +echo "${ECHO_T}file not found" >&6 + fi + + # DB requires at least version 8.4. + if test ${TCL_MAJOR_VERSION} -lt 8 \ + -o ${TCL_MAJOR_VERSION} -eq 8 -a ${TCL_MINOR_VERSION} -lt 4; then + { { echo "$as_me:$LINENO: error: Berkeley DB requires Tcl version 8.4 or better." >&5 +echo "$as_me: error: Berkeley DB requires Tcl version 8.4 or better." >&2;} + { (exit 1); exit 1; }; } + fi + + # + # The eval is required to do the TCL_DBGX substitution in the + # TCL_LIB_FILE variable + # + eval TCL_LIB_FILE="${TCL_LIB_FILE}" + eval TCL_LIB_FLAG="${TCL_LIB_FLAG}" + eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" + + # + # If the DB Tcl library isn't loaded with the Tcl spec and library + # flags on AIX, the resulting libdb_tcl-X.Y.so.0 will drop core at + # load time. [#4843] Furthermore, with Tcl 8.3, the link flags + # given by the Tcl spec are insufficient for our use. [#5779] + # + case "$host_os" in + aix4.[2-9].*) + LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG" + LIBTSO_LIBS="$LIBTSO_LIBS -L$TCL_EXEC_PREFIX/lib -ltcl$TCL_VERSION";; + aix*) + LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";; + esac + + + + + + TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}" + + + if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then + TCFLAGS="-I$TCL_PREFIX/include" + fi + + INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)" + +fi + +# Optional sequence code. + + echo "$as_me:$LINENO: checking for 64-bit integral type support for sequences" >&5 +echo $ECHO_N "checking for 64-bit integral type support for sequences... $ECHO_C" >&6 + db_cv_build_sequence="yes" + + # Have to be able to cast variables to the "unsigned long long" and + # "long long" types, that's our cast for the printf "%ll[du]" format. + if test "$ac_cv_type_long_long" = "no"; then + db_cv_build_sequence="no" + fi + if test "$ac_cv_type_unsigned_long_long" = "no"; then + db_cv_build_sequence="no" + fi + + # Test to see if we can declare variables of the appropriate size + # and format them. If we're cross-compiling, all we get is a link + # test, which won't test for the appropriate printf format strings. + if test "$db_cv_build_sequence" = "yes"; then + if test "$cross_compiling" = yes; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ int main () { -return f != $ac_func; + + long long l; + unsigned long long u; + char buf100; + + buf0 = 'a'; + l = 9223372036854775807LL; + (void)snprintf(buf, sizeof(buf), "%lld", l); + if (strcmp(buf, "9223372036854775807")) + return (1); + u = 18446744073709551615ULL; + (void)snprintf(buf, sizeof(buf), "%llu", u); + if (strcmp(buf, "18446744073709551615")) + return (1); + return (0); + ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 + (eval $ac_link) 2>conftest.er1 ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + : else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" -fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext -fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -else - mmap_ok=no +db_cv_build_sequence="no" fi -done -;; -esac - -# Second, we look for shmget. -# -# SunOS has the shmget(2) interfaces, but there appears to be a missing -# #include file, so we ignore them. -shmget_ok=no -case "$host_os" in -sunos*) - { echo "$as_me:$LINENO: WARNING: shmget(2) interface ignored on $host_os-$host_vendor." >&5 -echo "$as_me: WARNING: shmget(2) interface ignored on $host_os-$host_vendor." >&2;};; -*) - shmget_ok=yes - -for ac_func in shmget -do -as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` -echo "$as_me:$LINENO: checking for $ac_func" >&5 -echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 -if eval "test \"\${$as_ac_var+set}\" = set"; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext else cat >conftest.$ac_ext <<_ACEOF -#line $LINENO "configure" /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $ac_func (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif -/* Override any gcc2 internal prototype to avoid an error. */ -#ifdef __cplusplus -extern "C" -{ -#endif -/* We use char because int might match the return type of a gcc2 - builtin and then its argument prototype would still apply. */ -char $ac_func (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined (__stub_$ac_func) || defined (__stub___$ac_func) -choke me -#else -char (*f) () = $ac_func; -#endif -#ifdef __cplusplus -} -#endif -int -main () -{ -return f != $ac_func; - ; - return 0; -} + main() { + long long l; + unsigned long long u; + char buf[100]; + + buf[0] = 'a'; + l = 9223372036854775807LL; + (void)snprintf(buf, sizeof(buf), "%lld", l); + if (strcmp(buf, "9223372036854775807")) + return (1); + u = 18446744073709551615ULL; + (void)snprintf(buf, sizeof(buf), "%llu", u); + if (strcmp(buf, "18446744073709551615")) + return (1); + return (0); + } _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext +rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -s conftest$ac_exeext' + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - eval "$as_ac_var=yes" + : else - echo "$as_me: failed program was:" >&5 + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -eval "$as_ac_var=no" +( exit $ac_status ) +db_cv_build_sequence="no" fi -rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi -echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 -echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 -if test `eval echo '${'$as_ac_var'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 + fi + if test "$db_cv_build_sequence" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_SEQUENCE 1 +_ACEOF + + + + + + db_seq_decl="typedef int64_t db_seq_t;"; + else + # It still has to compile, but it won't run. + db_seq_decl="typedef int db_seq_t;"; + fi + echo "$as_me:$LINENO: result: $db_cv_build_sequence" >&5 +echo "${ECHO_T}$db_cv_build_sequence" >&6 + + +# Optional DB 1.85 compatibility API. +if test "$db_cv_compat185" = "yes"; then + ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS" + + ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS" +fi + +# Optional utilities. +if test "$db_cv_dump185" = "yes"; then + ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS" +fi + +# You can disable pieces of functionality to save space. +# +# Btree is always configured: it is the standard method, and Hash off-page +# duplicates require it. +ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_OBJS)" + +# Hash can be disabled. +if test "$db_cv_build_hash" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_HASH 1 _ACEOF + + + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_OBJS)" + if test "$db_cv_build_verify" = "yes"; then + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_VRFY_OBJS)" + fi else - shmget_ok=no + ADDITIONAL_OBJS="$ADDITIONAL_OBJS hash_stub${o}" fi -done -;; -esac -# We require either mmap/munmap(2) or shmget(2). -if test "$mmap_ok" = "no" -a "$shmget_ok" = "no"; then - { echo "$as_me:$LINENO: WARNING: Neither mmap/munmap(2) or shmget(2) library functions." >&5 -echo "$as_me: WARNING: Neither mmap/munmap(2) or shmget(2) library functions." >&2;} +# Queue can be disabled. +if test "$db_cv_build_queue" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_QUEUE 1 +_ACEOF + + + + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_OBJS)" + if test "$db_cv_build_verify" = "yes"; then + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_VRFY_OBJS)" + fi +else + ADDITIONAL_OBJS="$ADDITIONAL_OBJS qam_stub${o}" +fi + +# Replication can be disabled. +if test "$db_cv_build_replication" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_REPLICATION 1 +_ACEOF + + + + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(REP_OBJS)" +else + ADDITIONAL_OBJS="$ADDITIONAL_OBJS rep_stub${o}" +fi + +# The statistics code can be disabled. +if test "$db_cv_build_statistics" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_STATISTICS 1 +_ACEOF + + + +fi + +# The verification code can be disabled. +if test "$db_cv_build_verify" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_VERIFY 1 +_ACEOF + + + + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_VRFY_OBJS)" +else + ADDITIONAL_OBJS="$ADDITIONAL_OBJS db_vrfy_stub${o}" +fi + +# The crypto code can be disabled. +if test -d "$srcdir/../crypto" -a "$db_cv_build_cryptography" = "yes"; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_CRYPTO 1 +_ACEOF + + + + + CRYPTO_OBJS="\$(CRYPTO_OBJS)" +else + CRYPTO_OBJS="crypto_stub${o}" fi # We need to add the additional object files into the Makefile with the correct @@ -30357,10 +34523,10 @@ fi # to do DB_VERSION_UNIQUE_NAME substitution. if test "$db_cv_uniquename" = "yes"; then CREATE_LIST="$CREATE_LIST - db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in" + db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in" else CREATE_LIST="$CREATE_LIST - db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_prot.in" + db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/ext_prot.in" fi # If configured for unique names, create the db_int_uext.h file (which @@ -30387,10 +34553,6 @@ if test "$db_cv_compat185" = "yes"; then fi fi -if test "$db_cv_rpm" = "yes"; then - CREATE_LIST="$CREATE_LIST db.spec:../dist/db.spec.in" -fi - ac_config_files="$ac_config_files $CREATE_LIST" cat >confcache <<\_ACEOF @@ -30421,13 +34583,13 @@ _ACEOF # `set' does not quote correctly, so add quotes (double-quote # substitution turns \\\\ into \\, and sed turns \\ into \). sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n \ - "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" ;; esac; } | @@ -30457,13 +34619,13 @@ test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=/{ + ac_vpsub='/^[ ]*VPATH[ ]*=/{ s/:*\$(srcdir):*/:/; s/:*\${srcdir}:*/:/; s/:*@srcdir@:*/:/; -s/^\([^=]*=[ ]*\):*/\1/; +s/^\([^=]*=[ ]*\):*/\1/; s/:*$//; -s/^[^=]*=[ ]*$//; +s/^[^=]*=[ ]*$//; }' fi @@ -30474,7 +34636,7 @@ ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_i=`echo "$ac_i" | - sed 's/\$U\././;s/\.o$//;s/\.obj$//'` + sed 's/\$U\././;s/\.o$//;s/\.obj$//'` # 2. Add them. ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext" ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo' @@ -30518,9 +34680,10 @@ if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then set -o posix fi +DUALCASE=1; export DUALCASE # for MKS sh # Support unset when possible. -if (FOO=FOO; unset FOO) >/dev/null 2>&1; then +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false @@ -30539,7 +34702,7 @@ for as_var in \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do - if (set +x; test -n "`(eval $as_var=C; export $as_var) 2>&1`"); then + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else $as_unset $as_var @@ -30718,16 +34881,17 @@ rm -f conf$$ conf$$.exe conf$$.file if mkdir -p . 2>/dev/null; then as_mkdir_p=: else + test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_executable_p="test -f" # Sed expression to map a string onto a valid CPP name. -as_tr_cpp="sed y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g" +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. -as_tr_sh="sed y%*+%pp%;s%[^_$as_cr_alnum]%_%g" +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" # IFS @@ -30753,8 +34917,8 @@ _ASBOX } >&5 cat >&5 <<_CSEOF -This file was extended by Berkeley DB $as_me 4.2.52, which was -generated by GNU Autoconf 2.57. Invocation command line was +This file was extended by Berkeley DB $as_me 4.3.14, which was +generated by GNU Autoconf 2.59. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS @@ -30798,9 +34962,9 @@ Usage: $0 [OPTIONS] [FILE]... -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] - instantiate the configuration file FILE + instantiate the configuration file FILE --header=FILE[:TEMPLATE] - instantiate the configuration header FILE + instantiate the configuration header FILE Configuration files: $config_files @@ -30813,12 +34977,11 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF ac_cs_version="\\ -Berkeley DB config.status 4.2.52 -configured by $0, generated by GNU Autoconf 2.57, +Berkeley DB config.status 4.3.14 +configured by $0, generated by GNU Autoconf 2.59, with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\" -Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 -Free Software Foundation, Inc. +Copyright (C) 2003 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." srcdir=$srcdir @@ -31023,6 +35186,8 @@ s,@CPPFLAGS@,$CPPFLAGS,;t t s,@CRYPTO_OBJS@,$CRYPTO_OBJS,;t t s,@CXX@,$CXX,;t t s,@CXXFLAGS@,$CXXFLAGS,;t t +s,@DB_PROTO1@,$DB_PROTO1,;t t +s,@DB_PROTO2@,$DB_PROTO2,;t t s,@DEFAULT_LIB@,$DEFAULT_LIB,;t t s,@DEFAULT_LIB_CXX@,$DEFAULT_LIB_CXX,;t t s,@INSTALLER@,$INSTALLER,;t t @@ -31038,7 +35203,6 @@ s,@LIBTSO_LIBS@,$LIBTSO_LIBS,;t t s,@LIBTSO_MODSUFFIX@,$LIBTSO_MODSUFFIX,;t t s,@LIBTSO_MODULE@,$LIBTSO_MODULE,;t t s,@LIBXSO_LIBS@,$LIBXSO_LIBS,;t t -s,@LOAD_LIBS@,$LOAD_LIBS,;t t s,@MAKEFILE_CC@,$MAKEFILE_CC,;t t s,@MAKEFILE_CCLINK@,$MAKEFILE_CCLINK,;t t s,@MAKEFILE_CXX@,$MAKEFILE_CXX,;t t @@ -31049,11 +35213,9 @@ s,@OSDIR@,$OSDIR,;t t s,@POSTLINK@,$POSTLINK,;t t s,@REPLACEMENT_OBJS@,$REPLACEMENT_OBJS,;t t s,@RPC_CLIENT_OBJS@,$RPC_CLIENT_OBJS,;t t -s,@RPM_BUILD@,$RPM_BUILD,;t t -s,@RPM_POST_INSTALL@,$RPM_POST_INSTALL,;t t -s,@RPM_POST_UNINSTALL@,$RPM_POST_UNINSTALL,;t t +s,@RPC_SERVER_H@,$RPC_SERVER_H,;t t s,@SOFLAGS@,$SOFLAGS,;t t -s,@db_cv_path_rpm_archive@,$db_cv_path_rpm_archive,;t t +s,@TEST_LIBS@,$TEST_LIBS,;t t s,@db_int_def@,$db_int_def,;t t s,@o@,$o,;t t s,@DB_VERSION_MAJOR@,$DB_VERSION_MAJOR,;t t @@ -31067,9 +35229,6 @@ s,@db_cv_path_chmod@,$db_cv_path_chmod,;t t s,@ac_ct_db_cv_path_chmod@,$ac_ct_db_cv_path_chmod,;t t s,@db_cv_path_cp@,$db_cv_path_cp,;t t s,@ac_ct_db_cv_path_cp@,$ac_ct_db_cv_path_cp,;t t -s,@path_ldconfig@,$path_ldconfig,;t t -s,@ac_ct_path_ldconfig@,$ac_ct_path_ldconfig,;t t -s,@db_cv_path_ldconfig@,$db_cv_path_ldconfig,;t t s,@db_cv_path_ln@,$db_cv_path_ln,;t t s,@ac_ct_db_cv_path_ln@,$ac_ct_db_cv_path_ln,;t t s,@db_cv_path_mkdir@,$db_cv_path_mkdir,;t t @@ -31079,8 +35238,8 @@ s,@ac_ct_path_ranlib@,$ac_ct_path_ranlib,;t t s,@db_cv_path_ranlib@,$db_cv_path_ranlib,;t t s,@db_cv_path_rm@,$db_cv_path_rm,;t t s,@ac_ct_db_cv_path_rm@,$ac_ct_db_cv_path_rm,;t t -s,@db_cv_path_rpm@,$db_cv_path_rpm,;t t -s,@ac_ct_db_cv_path_rpm@,$ac_ct_db_cv_path_rpm,;t t +s,@db_cv_path_rpcgen@,$db_cv_path_rpcgen,;t t +s,@ac_ct_db_cv_path_rpcgen@,$ac_ct_db_cv_path_rpcgen,;t t s,@path_sh@,$path_sh,;t t s,@ac_ct_path_sh@,$ac_ct_path_sh,;t t s,@db_cv_path_sh@,$db_cv_path_sh,;t t @@ -31097,8 +35256,6 @@ s,@CC@,$CC,;t t s,@ac_ct_CC@,$ac_ct_CC,;t t s,@EXEEXT@,$EXEEXT,;t t s,@OBJEXT@,$OBJEXT,;t t -s,@DB_PROTO1@,$DB_PROTO1,;t t -s,@DB_PROTO2@,$DB_PROTO2,;t t s,@DB_CONST@,$DB_CONST,;t t s,@CCC@,$CCC,;t t s,@ac_ct_CCC@,$ac_ct_CCC,;t t @@ -31125,12 +35282,9 @@ s,@JAVAC@,$JAVAC,;t t s,@JAVA@,$JAVA,;t t s,@uudecode@,$uudecode,;t t s,@_ACJNI_JAVAC@,$_ACJNI_JAVAC,;t t -s,@TCFLAGS@,$TCFLAGS,;t t -s,@TCL_BIN_DIR@,$TCL_BIN_DIR,;t t -s,@TCL_SRC_DIR@,$TCL_SRC_DIR,;t t -s,@TCL_LIB_FILE@,$TCL_LIB_FILE,;t t -s,@TCL_TCLSH@,$TCL_TCLSH,;t t -s,@inttypes_decl@,$inttypes_decl,;t t +s,@inttypes_h_decl@,$inttypes_h_decl,;t t +s,@stdint_h_decl@,$stdint_h_decl,;t t +s,@stddef_h_decl@,$stddef_h_decl,;t t s,@u_char_decl@,$u_char_decl,;t t s,@u_short_decl@,$u_short_decl,;t t s,@u_int_decl@,$u_int_decl,;t t @@ -31140,10 +35294,18 @@ s,@u_int16_decl@,$u_int16_decl,;t t s,@int16_decl@,$int16_decl,;t t s,@u_int32_decl@,$u_int32_decl,;t t s,@int32_decl@,$int32_decl,;t t +s,@u_int64_decl@,$u_int64_decl,;t t +s,@int64_decl@,$int64_decl,;t t s,@ssize_t_decl@,$ssize_t_decl,;t t -s,@db_align_t_decl@,$db_align_t_decl,;t t -s,@db_alignp_t_decl@,$db_alignp_t_decl,;t t +s,@uintmax_t_decl@,$uintmax_t_decl,;t t +s,@uintptr_t_decl@,$uintptr_t_decl,;t t s,@LIBOBJS@,$LIBOBJS,;t t +s,@TCFLAGS@,$TCFLAGS,;t t +s,@TCL_BIN_DIR@,$TCL_BIN_DIR,;t t +s,@TCL_SRC_DIR@,$TCL_SRC_DIR,;t t +s,@TCL_LIB_FILE@,$TCL_LIB_FILE,;t t +s,@TCL_TCLSH@,$TCL_TCLSH,;t t +s,@db_seq_decl@,$db_seq_decl,;t t s,@LTLIBOBJS@,$LTLIBOBJS,;t t CEOF @@ -31174,9 +35336,9 @@ _ACEOF (echo ':t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed if test -z "$ac_sed_cmds"; then - ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed" + ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed" else - ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed" + ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed" fi ac_sed_frag=`expr $ac_sed_frag + 1` ac_beg=$ac_end @@ -31194,21 +35356,21 @@ for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". case $ac_file in - | *:- | *:-:* ) # input from stdin - cat >$tmp/stdin - ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` - ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` - ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; * ) ac_file_in=$ac_file.in ;; esac # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories. ac_dir=`(dirname "$ac_file") 2>/dev/null || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| \ - . : '\(.\)' 2>/dev/null || + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } @@ -31224,10 +35386,10 @@ echo X"$ac_file" | as_dirs="$as_dir $as_dirs" as_dir=`(dirname "$as_dir") 2>/dev/null || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| \ - . : '\(.\)' 2>/dev/null || + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } @@ -31265,12 +35427,45 @@ case $srcdir in ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_builddir$srcdir ;; esac -# Don't blindly perform a `cd "$ac_dir"/$ac_foo && pwd` since $ac_foo can be -# absolute. -ac_abs_builddir=`cd "$ac_dir" && cd $ac_builddir && pwd` -ac_abs_top_builddir=`cd "$ac_dir" && cd ${ac_top_builddir}. && pwd` -ac_abs_srcdir=`cd "$ac_dir" && cd $ac_srcdir && pwd` -ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd` + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac case $INSTALL in @@ -31292,7 +35487,7 @@ echo "$as_me: creating $ac_file" >&6;} configure_input="$ac_file. " fi configure_input=$configure_input"Generated from `echo $ac_file_in | - sed 's,.*/,,'` by configure." + sed 's,.*/,,'` by configure." # First look for the input files in the build tree, otherwise in the # src tree. @@ -31301,24 +35496,24 @@ echo "$as_me: creating $ac_file" >&6;} case $f in -) echo $tmp/stdin ;; [\\/$]*) - # Absolute (can't be DOS-style, as IFS=:) - test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 echo "$as_me: error: cannot find input file: $f" >&2;} { (exit 1); exit 1; }; } - echo $f;; + echo "$f";; *) # Relative - if test -f "$f"; then - # Build tree - echo $f - elif test -f "$srcdir/$f"; then - # Source tree - echo $srcdir/$f - else - # /dev/null tree - { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 echo "$as_me: error: cannot find input file: $f" >&2;} { (exit 1); exit 1; }; } - fi;; + fi;; esac done` || { (exit 1); exit 1; } _ACEOF @@ -31360,12 +35555,12 @@ cat >>$CONFIG_STATUS <<\_ACEOF # NAME is the cpp macro being defined and VALUE is the value it is being given. # # ac_d sets the value in "#define NAME VALUE" lines. -ac_dA='s,^\([ ]*\)#\([ ]*define[ ][ ]*\)' -ac_dB='[ ].*$,\1#\2' +ac_dA='s,^\([ ]*\)#\([ ]*define[ ][ ]*\)' +ac_dB='[ ].*$,\1#\2' ac_dC=' ' ac_dD=',;t' # ac_u turns "#undef NAME" without trailing blanks into "#define NAME VALUE". -ac_uA='s,^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)' +ac_uA='s,^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)' ac_uB='$,\1#\2define\3' ac_uC=' ' ac_uD=',;t' @@ -31374,11 +35569,11 @@ for ac_file in : $CONFIG_HEADERS; do test "x$ac_file" = x: && continue # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". case $ac_file in - | *:- | *:-:* ) # input from stdin - cat >$tmp/stdin - ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` - ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` - ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; * ) ac_file_in=$ac_file.in ;; esac @@ -31392,28 +35587,29 @@ echo "$as_me: creating $ac_file" >&6;} case $f in -) echo $tmp/stdin ;; [\\/$]*) - # Absolute (can't be DOS-style, as IFS=:) - test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 echo "$as_me: error: cannot find input file: $f" >&2;} { (exit 1); exit 1; }; } - echo $f;; + # Do quote $f, to prevent DOS paths from being IFS'd. + echo "$f";; *) # Relative - if test -f "$f"; then - # Build tree - echo $f - elif test -f "$srcdir/$f"; then - # Source tree - echo $srcdir/$f - else - # /dev/null tree - { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 echo "$as_me: error: cannot find input file: $f" >&2;} { (exit 1); exit 1; }; } - fi;; + fi;; esac done` || { (exit 1); exit 1; } # Remove the trailing spaces. - sed 's/[ ]*$//' $ac_file_inputs >$tmp/in + sed 's/[ ]*$//' $ac_file_inputs >$tmp/in _ACEOF @@ -31436,9 +35632,9 @@ s/[\\&,]/\\&/g s,[\\$`],\\&,g t clear : clear -s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*\)\(([^)]*)\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1\2${ac_dC}\3${ac_dD},gp +s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*\)\(([^)]*)\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1\2${ac_dC}\3${ac_dD},gp t end -s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1${ac_dC}\2${ac_dD},gp +s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1${ac_dC}\2${ac_dD},gp : end _ACEOF # If some macros were called several times there might be several times @@ -31452,13 +35648,13 @@ rm -f confdef2sed.sed # example, in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. cat >>conftest.undefs <<\_ACEOF -s,^[ ]*#[ ]*undef[ ][ ]*[a-zA-Z_][a-zA-Z_0-9]*,/* & */, +s,^[ ]*#[ ]*undef[ ][ ]*[a-zA-Z_][a-zA-Z_0-9]*,/* & */, _ACEOF # Break up conftest.defines because some shells have a limit on the size # of here documents, and old seds have small limits too (100 cmds). echo ' # Handle all the #define templates only if necessary.' >>$CONFIG_STATUS -echo ' if grep "^[ ]*#[ ]*define" $tmp/in >/dev/null; then' >>$CONFIG_STATUS +echo ' if grep "^[ ]*#[ ]*define" $tmp/in >/dev/null; then' >>$CONFIG_STATUS echo ' # If there are no defines, we may have an empty if/fi' >>$CONFIG_STATUS echo ' :' >>$CONFIG_STATUS rm -f conftest.tail @@ -31467,7 +35663,7 @@ do # Write a limited-size here document to $tmp/defines.sed. echo ' cat >$tmp/defines.sed <>$CONFIG_STATUS # Speed up: don't consider the non `#define' lines. - echo '/^[ ]*#[ ]*define/!b' >>$CONFIG_STATUS + echo '/^[ ]*#[ ]*define/!b' >>$CONFIG_STATUS # Work around the forget-to-reset-the-flag bug. echo 't clr' >>$CONFIG_STATUS echo ': clr' >>$CONFIG_STATUS @@ -31494,7 +35690,7 @@ do # Write a limited-size here document to $tmp/undefs.sed. echo ' cat >$tmp/undefs.sed <>$CONFIG_STATUS # Speed up: don't consider the non `#undef' - echo '/^[ ]*#[ ]*undef/!b' >>$CONFIG_STATUS + echo '/^[ ]*#[ ]*undef/!b' >>$CONFIG_STATUS # Work around the forget-to-reset-the-flag bug. echo 't clr' >>$CONFIG_STATUS echo ': clr' >>$CONFIG_STATUS @@ -31528,10 +35724,10 @@ echo "$as_me: $ac_file is unchanged" >&6;} else ac_dir=`(dirname "$ac_file") 2>/dev/null || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| \ - . : '\(.\)' 2>/dev/null || + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } @@ -31547,10 +35743,10 @@ echo X"$ac_file" | as_dirs="$as_dir $as_dirs" as_dir=`(dirname "$as_dir") 2>/dev/null || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| \ - . : '\(.\)' 2>/dev/null || + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } diff --git a/db/dist/configure.ac b/db/dist/configure.ac index 839b47da8..5c337c590 100644 --- a/db/dist/configure.ac +++ b/db/dist/configure.ac @@ -1,4 +1,4 @@ -# $Id: configure.ac,v 11.198 2003/10/14 20:24:06 mjc Exp $ +# $Id: configure.ac,v 11.219 2004/09/16 01:27:42 bostic Exp $ # Process this file with autoconf to produce a configure script. PACKAGE=db @@ -33,6 +33,8 @@ AC_SUBST(CPPFLAGS) AC_SUBST(CRYPTO_OBJS) AC_SUBST(CXX) AC_SUBST(CXXFLAGS) +AC_SUBST(DB_PROTO1) +AC_SUBST(DB_PROTO2) AC_SUBST(DEFAULT_LIB) AC_SUBST(DEFAULT_LIB_CXX) AC_SUBST(INSTALLER) @@ -49,7 +51,6 @@ AC_SUBST(LIBTSO_LIBS) AC_SUBST(LIBTSO_MODSUFFIX) AC_SUBST(LIBTSO_MODULE) AC_SUBST(LIBXSO_LIBS) -AC_SUBST(LOAD_LIBS) AC_SUBST(MAKEFILE_CC) AC_SUBST(MAKEFILE_CCLINK) AC_SUBST(MAKEFILE_CXX) @@ -61,20 +62,12 @@ AC_SUBST(PATH_SEPARATOR) AC_SUBST(POSTLINK) AC_SUBST(REPLACEMENT_OBJS) AC_SUBST(RPC_CLIENT_OBJS) -AC_SUBST(RPM_BUILD) -AC_SUBST(RPM_POST_INSTALL) -AC_SUBST(RPM_POST_UNINSTALL) +AC_SUBST(RPC_SERVER_H) AC_SUBST(SOFLAGS) -AC_SUBST(db_cv_path_rpm_archive) +AC_SUBST(TEST_LIBS) AC_SUBST(db_int_def) AC_SUBST(o) -# RPM needs the current absolute path. -# RPM needs the list of original arguments, but we don't include the RPM -# option itself. -CONFIGURATION_PATH=${PWD-`pwd`} -CONFIGURATION_ARGS=`echo "$*" | sed -e 's/--with-rpm[[^ ]]*//'` - # Set the default installation location. AC_PREFIX_DEFAULT(/usr/local/BerkeleyDB.__EDIT_DB_VERSION_MAJOR__.__EDIT_DB_VERSION_MINOR__) @@ -111,7 +104,7 @@ fi if test "$db_cv_umrw" = "yes"; then AC_DEFINE(UMRW) AH_TEMPLATE(UMRW, - [Define to 1 to mask harmless unitialized memory read/writes.]) + [Define to 1 to mask harmless uninitialized memory read/writes.]) fi if test "$db_cv_test" = "yes"; then @@ -124,31 +117,12 @@ fi AM_PROGRAMS_SET AC_PROG_INSTALL -# RPM support: change the standard make and install targets -if test "$db_cv_rpm" = "yes"; then - BUILD_TARGET="rpm_build" - - # Check if we are running RPM version 3 or 4. - case "`rpm --version`" in - *version\ 4*) - RPM_BUILD="rpmbuild" - echo "_topdir $CONFIGURATION_PATH" > rpm-macro-defines;; - *version\ 3*) - RPM_BUILD="rpm" - echo "topdir: $CONFIGURATION_PATH" > rpm-macro-defines;; - esac - INSTALL_TARGET="rpm_install" -else - BUILD_TARGET="library_build" - INSTALL_TARGET="library_install" -fi +BUILD_TARGET="library_build" +INSTALL_TARGET="library_install" # This is where we handle stuff that autoconf can't handle: compiler, # preprocessor and load flags, libraries that the standard tests don't -# look for. The default optimization is -O. We would like to set the -# default optimization for systems using gcc to -O2, but we can't. By -# the time we know we're using gcc, it's too late to set optimization -# flags. +# look for. # # There are additional libraries we need for some compiler/architecture # combinations. @@ -159,7 +133,6 @@ fi # The makefile CC may be different than the CC used in config testing, # because the makefile CC may be set to use $(LIBTOOL). # -# XXX # Don't override anything if it's already set from the environment. optimize_def="-O" case "$host_os" in @@ -168,19 +141,14 @@ aix4.3.*|aix5*) CC=${CC-"xlc_r"} CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE" LDFLAGS="$LDFLAGS -Wl,-brtl";; -bsdi3*) optimize_def="-O2" - CC=${CC-"shlicc2"} +bsdi3*) CC=${CC-"shlicc2"} LIBS="$LIBS -lipc";; -bsdi*) optimize_def="-O2";; cygwin*) - optimize_def="-O2" CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";; freebsd*) - optimize_def="-O2" CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE" LDFLAGS="$LDFLAGS -pthread";; gnu*|k*bsd*-gnu|linux*) - optimize_def="-O2" CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";; hpux*) CPPFLAGS="$CPPFLAGS -D_REENTRANT";; irix*) optimize_def="-O2" @@ -222,10 +190,19 @@ AC_SUBST(db_cv_build_type) # for any compiler other than gcc. AC_PROG_CC(cc gcc) -# Checks for compiler characteristics. -AC_SUBST(DB_PROTO1) -AC_SUBST(DB_PROTO2) +# Set specific per-compiler flags. +if test "$GCC" = "yes"; then + # We want -O2 if we're using gcc. + CFLAGS="$CFLAGS " + CFLAGS=`echo "$CFLAGS" | sed 's/-O /-O2 /g'` +else + case "$host_os" in + hpux11*) + CPPFLAGS="$CPPFLAGS -mt";; + esac +fi +# Checks for compiler characteristics. DB_PROTO1="#undef __P" # AC_PROG_CC_STDC only sets ac_cv_prog_cc_stdc if the test fails, so @@ -323,7 +300,7 @@ MAKEFILE_CXXLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK}" LIBTOOL="\$(SHELL) ./libtool" case "$host_os" in -cygwin*) +cygwin* | mingw*) MAKEFILE_SOLINK="$MAKEFILE_SOLINK -no-undefined" MAKEFILE_XSOLINK="$MAKEFILE_XSOLINK -no-undefined";; esac @@ -346,6 +323,17 @@ else enable_static="yes" fi +case "$host_os" in + darwin*) + LIBTSO_MODULE="" + LIBTSO_MODSUFFIX=".dylib" + ;; + *) + LIBTSO_MODULE="-module" + LIBTSO_MODSUFFIX=@MODSUFFIX@ + ;; +esac + # C API. if test "$enable_shared" = "no"; then DEFAULT_LIB="\$(libdb_version)" @@ -428,111 +416,6 @@ else PATH_SEPARATOR="/" fi -# Optional RPC client/server. -if test "$db_cv_rpc" = "yes"; then - AC_DEFINE(HAVE_RPC) - AH_TEMPLATE(HAVE_RPC, [Define to 1 if building RPC client/server.]) - - RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)" - ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS" - - case "$host_os" in - hpux*) - AC_CHECK_FUNC(svc_run,, - AC_CHECK_LIB(nsl, svc_run, - LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"; - LIBJSO_LIBS="-lnsl $LIBJSO_LIBS"));; - solaris*) - AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));; - esac -fi - -case "$host_os" in - darwin*) - LIBTSO_MODULE="" - LIBTSO_MODSUFFIX=".dylib" - ;; - *) - LIBTSO_MODULE="-module" - LIBTSO_MODSUFFIX=@MODSUFFIX@ - ;; -esac - -AM_TCL_LOAD - -# Optional DB 1.85 compatibility API. -if test "$db_cv_compat185" = "yes"; then - ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS" - - ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS" -fi - -# You can disable pieces of functionality to save space. -# -# Btree is always configured: it is the standard method, and Hash off-page -# duplicates require it. -ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_OBJS)" - -# Hash can be disabled. -if test "$db_cv_build_hash" = "yes"; then - AC_DEFINE(HAVE_HASH) - AH_TEMPLATE(HAVE_HASH, [Define to 1 if building Hash access method.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_OBJS)" - if test "$db_cv_build_verify" = "yes"; then - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_VRFY_OBJS)" - fi -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS hash_stub${o}" -fi - -# Queue can be disabled. -if test "$db_cv_build_queue" = "yes"; then - AC_DEFINE(HAVE_QUEUE) - AH_TEMPLATE(HAVE_QUEUE, [Define to 1 if building Queue access method.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_OBJS)" - if test "$db_cv_build_verify" = "yes"; then - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_VRFY_OBJS)" - fi -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS qam_stub${o}" -fi - -# The replication code. -if test "$db_cv_build_replication" = "yes"; then - AC_DEFINE(HAVE_REPLICATION) - AH_TEMPLATE(HAVE_REPLICATION, - [Define to 1 if building replication support.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(REP_OBJS)" -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS rep_stub${o}" -fi - -# The verification code. -if test "$db_cv_build_verify" = "yes"; then - AC_DEFINE(HAVE_VERIFY) - AH_TEMPLATE(HAVE_VERIFY, - [Define to 1 if building access method verification support.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_VRFY_OBJS)" -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS db_vrfy_stub${o}" -fi - -# The crypto support. -if test -d "$srcdir/../crypto" -a "$db_cv_build_cryptography" = "yes"; then - AC_DEFINE(HAVE_CRYPTO) - AH_TEMPLATE(HAVE_CRYPTO, - [Define to 1 if Berkeley DB release includes strong cryptography.]) - - CRYPTO_OBJS="\$(CRYPTO_OBJS)" -else - CRYPTO_OBJS="crypto_stub${o}" -fi - -# Optional utilities. -if test "$db_cv_dump185" = "yes"; then - ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS" -fi - # Checks for include files, structures, C types. AC_HEADER_STAT AC_HEADER_TIME @@ -550,27 +433,39 @@ if test "$db_cv_exit_defines" = "yes"; then [Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines.]) fi -# Test for various functions/libraries that the test and example programs use: -# sched_yield function -# pthreads, socket and math libraries -AC_CHECK_FUNC(sched_yield,, - AC_SEARCH_LIBS(sched_yield, rt, LOAD_LIBS="$LOAD_LIBS -lrt")) - -# XXX +# Test for various functions/libraries -- do tests that change library values +# first. +# +# The Berkeley DB library calls fdatasync, and it's only available in -lrt on +# Solaris. See if we can find it either without additional libraries or in +# -lrt. If fdatasync is found in -lrt, add -lrt to the Java and Tcl shared +# library link lines. +AC_SEARCH_LIBS(fdatasync, rt, [dnl + if test "$ac_cv_search_fdatasync" != "none required" ; then + LIBJSO_LIBS="$LIBJSO_LIBS -lrt"; + LIBTSO_LIBS="$LIBTSO_LIBS -lrt"; + fi]) + +# The test and example programs use the sched_yield function, taken from -lrt +# on Solaris. +AC_SEARCH_LIBS(sched_yield, rt) + +# !!! # We can't check for pthreads in the same way we did the test for sched_yield # because the Solaris C library includes pthread interfaces which are not -# thread-safe. For that reason we always add -lpthread if we find a pthread -# library. Also we can't depend on any specific call existing (pthread_create, -# for example), as it may be #defined in an include file -- OSF/1 (Tru64) has -# this problem. -AC_HAVE_LIBRARY(pthread, LOAD_LIBS="$LOAD_LIBS -lpthread") +# inter-process safe. For that reason we always add -lpthread if we find a +# pthread library. +# +# We can't depend on any specific call existing (pthread_create, for example), +# as it may be #defined in an include file -- OSF/1 (Tru64) has this problem. +AC_HAVE_LIBRARY(pthread, TEST_LIBS="$TEST_LIBS -lpthread") -# XXX -# We could be more exact about whether these libraries are needed, but we don't -# bother -- if they exist, we load them. -AC_HAVE_LIBRARY(m, LOAD_LIBS="$LOAD_LIBS -lm") -AC_HAVE_LIBRARY(socket, LOAD_LIBS="$LOAD_LIBS -lsocket") -AC_HAVE_LIBRARY(nsl, LOAD_LIBS="$LOAD_LIBS -lnsl") +# !!! +# We could be more exact about whether these libraries are needed, but don't +# bother -- if they exist, we load them, it's only the test programs anyway. +AC_HAVE_LIBRARY(m, TEST_LIBS="$TEST_LIBS -lm") +AC_HAVE_LIBRARY(socket, TEST_LIBS="$TEST_LIBS -lsocket") +AC_HAVE_LIBRARY(nsl, TEST_LIBS="$TEST_LIBS -lnsl") # Check for mutexes. # We do this here because it changes $LIBS. @@ -583,11 +478,12 @@ AM_DEFINE_MUTEXES # buffer is non-NULL -- Solaris can't handle a NULL buffer, and they # deleted getwd(). AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove raise) -AC_REPLACE_FUNCS(snprintf strcasecmp strdup strerror vsnprintf) +AC_REPLACE_FUNCS(strcasecmp strdup strerror) # Check for system functions we optionally use. -AC_CHECK_FUNCS(_fstati64 clock_gettime directio getrusage gettimeofday getuid) -AC_CHECK_FUNCS(pstat_getdynamic sched_yield select strtoul sysconf yield) +AC_CHECK_FUNCS(_fstati64 clock_gettime directio fdatasync ftruncate getrusage) +AC_CHECK_FUNCS(gettimeofday getuid pstat_getdynamic rand sched_yield) +AC_CHECK_FUNCS(select snprintf srand strtoul sysconf vsnprintf yield) # Pread/pwrite. # HP-UX has pread/pwrite, but it doesn't work with largefile support. @@ -618,26 +514,22 @@ case "$host_os" in aux*) AC_LIBOBJ([getopt]);; esac -# Linux has a broken O_DIRECT flag, but we allow people to override it from -# the command line. -test_host_prw=yes -AC_CACHE_CHECK([for open/O_DIRECT], db_cv_open_o_direct, [ -AC_TRY_LINK([ -#include -#include ], [ - open("a", O_RDONLY | O_DIRECT, 0); -], [db_cv_open_o_direct=yes; test_host_prw=no], [db_cv_open_o_direct=no])]) -if test "$test_host_prw" = "no" -a "$db_cv_open_o_direct" = "yes"; then - case "$host_os" in - linux*) - db_cv_open_o_direct=no; - AC_MSG_WARN( - [O_DIRECT interface ignored on $host_os-$host_vendor.]);; - esac -fi -if test "$db_cv_open_o_direct" = "yes"; then - AC_DEFINE(HAVE_O_DIRECT) - AH_TEMPLATE(HAVE_O_DIRECT, [Define to 1 if you have the O_DIRECT flag.]) +# Linux has a broken O_DIRECT flag, but you can't detect it at configure time. +# Linux and SGI require buffer alignment we may not match, otherwise writes +# will fail. Default to not using the O_DIRECT flag. +if test "$db_cv_o_direct" = "yes"; then + AC_CACHE_CHECK([for open/O_DIRECT], db_cv_open_o_direct, [ + AC_TRY_LINK([ + #include + #include ], [ + open("a", O_RDONLY | O_DIRECT, 0); + ], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no])]) + if test \ + "$db_cv_o_direct" = "yes" -a "$db_cv_open_o_direct" = "yes"; then + AC_DEFINE(HAVE_O_DIRECT) + AH_TEMPLATE(HAVE_O_DIRECT, + [Define to 1 if you have the O_DIRECT flag.]) + fi fi # Check for largefile support. @@ -686,6 +578,99 @@ if test "$mmap_ok" = "no" -a "$shmget_ok" = "no"; then AC_MSG_WARN([Neither mmap/munmap(2) or shmget(2) library functions.]) fi +# Optional RPC client/server. +if test "$db_cv_rpc" = "yes"; then + AM_RPC_CONFIGURE +fi + +# Optional Tcl support. +if test "$db_cv_tcl" = "yes"; then + AM_TCL_LOAD +fi + +# Optional sequence code. +AM_SEQUENCE_CONFIGURE + +# Optional DB 1.85 compatibility API. +if test "$db_cv_compat185" = "yes"; then + ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS" + + ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS" +fi + +# Optional utilities. +if test "$db_cv_dump185" = "yes"; then + ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS" +fi + +# You can disable pieces of functionality to save space. +# +# Btree is always configured: it is the standard method, and Hash off-page +# duplicates require it. +ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_OBJS)" + +# Hash can be disabled. +if test "$db_cv_build_hash" = "yes"; then + AC_DEFINE(HAVE_HASH) + AH_TEMPLATE(HAVE_HASH, [Define to 1 if building Hash access method.]) + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_OBJS)" + if test "$db_cv_build_verify" = "yes"; then + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_VRFY_OBJS)" + fi +else + ADDITIONAL_OBJS="$ADDITIONAL_OBJS hash_stub${o}" +fi + +# Queue can be disabled. +if test "$db_cv_build_queue" = "yes"; then + AC_DEFINE(HAVE_QUEUE) + AH_TEMPLATE(HAVE_QUEUE, [Define to 1 if building Queue access method.]) + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_OBJS)" + if test "$db_cv_build_verify" = "yes"; then + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_VRFY_OBJS)" + fi +else + ADDITIONAL_OBJS="$ADDITIONAL_OBJS qam_stub${o}" +fi + +# Replication can be disabled. +if test "$db_cv_build_replication" = "yes"; then + AC_DEFINE(HAVE_REPLICATION) + AH_TEMPLATE(HAVE_REPLICATION, + [Define to 1 if building replication support.]) + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(REP_OBJS)" +else + ADDITIONAL_OBJS="$ADDITIONAL_OBJS rep_stub${o}" +fi + +# The statistics code can be disabled. +if test "$db_cv_build_statistics" = "yes"; then + AC_DEFINE(HAVE_STATISTICS) + AH_TEMPLATE(HAVE_STATISTICS, + [Define to 1 if building statistics support.]) +fi + +# The verification code can be disabled. +if test "$db_cv_build_verify" = "yes"; then + AC_DEFINE(HAVE_VERIFY) + AH_TEMPLATE(HAVE_VERIFY, + [Define to 1 if building access method verification support.]) + ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_VRFY_OBJS)" +else + ADDITIONAL_OBJS="$ADDITIONAL_OBJS db_vrfy_stub${o}" +fi + +# The crypto code can be disabled. +if test -d "$srcdir/../crypto" -a "$db_cv_build_cryptography" = "yes"; then + AC_DEFINE(HAVE_CRYPTO) + AH_TEMPLATE(HAVE_CRYPTO, + [Define to 1 if Berkeley DB release includes strong cryptography.]) + + CRYPTO_OBJS="\$(CRYPTO_OBJS)" +else + CRYPTO_OBJS="crypto_stub${o}" +fi + # We need to add the additional object files into the Makefile with the correct # suffix. We can't use $LTLIBOBJS itself, because that variable has $U encoded # in it for automake, and that's not what we want. See SR #7227 for additional @@ -720,10 +705,10 @@ fi # to do DB_VERSION_UNIQUE_NAME substitution. if test "$db_cv_uniquename" = "yes"; then CREATE_LIST="$CREATE_LIST - db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in" + db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in" else CREATE_LIST="$CREATE_LIST - db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_prot.in" + db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/ext_prot.in" fi # If configured for unique names, create the db_int_uext.h file (which @@ -750,9 +735,5 @@ if test "$db_cv_compat185" = "yes"; then fi fi -if test "$db_cv_rpm" = "yes"; then - CREATE_LIST="$CREATE_LIST db.spec:../dist/db.spec.in" -fi - AC_CONFIG_FILES($CREATE_LIST) AC_OUTPUT diff --git a/db/dist/gen_rec.awk b/db/dist/gen_rec.awk index 592c1f39b..8dddc1052 100644 --- a/db/dist/gen_rec.awk +++ b/db/dist/gen_rec.awk @@ -2,10 +2,10 @@ # # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: gen_rec.awk,v 11.87 2003/11/14 05:32:38 ubell Exp $ +# $Id: gen_rec.awk,v 11.108 2004/09/22 18:03:49 bostic Exp $ # # This awk script generates all the log, print, and read routines for the DB @@ -41,21 +41,25 @@ # PGDBT Just like DBT, only we know it stores a page or page # header, so we can byte-swap it (once we write the # byte-swapping code, which doesn't exist yet). +# LOCKS Just like DBT, but uses a print function for locks. BEGIN { if (source_file == "" || header_file == "" || template_file == "") { print "Usage: gen_rec.awk requires three variables to be set:" - print "\tsource_file\t-- the C source file being created" - print "\theader_file\t-- the C #include file being created" + print "\theader_file\t-- the recover #include file being created" + print "\tprint_file\t-- the print source file being created" + print "\tsource_file\t-- the recover source file being created" print "\ttemplate_file\t-- the template file being created" exit } FS="[\t ][\t ]*" CFILE=source_file HFILE=header_file + PFILE=print_file TFILE=template_file dbprivate = 0 + buf_only = 1; } /^[ ]*DBPRIVATE/ { dbprivate = 1 @@ -64,9 +68,17 @@ BEGIN { prefix = $2 num_funcs = 0; - # Start .c file. - printf("/* Do not edit: automatically built by gen_rec.awk. */\n") \ + # Start .c files. + printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \ > CFILE + printf("#include \"db_config.h\"\n\n") >> CFILE + printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \ + > PFILE + printf("#include \"db_config.h\"\n\n") >> PFILE + if (prefix == "__ham") + printf("#ifdef HAVE_HASH\n") >> PFILE + if (prefix == "__qam") + printf("#ifdef HAVE_QUEUE\n") >> PFILE # Start .h file, make the entire file conditional. printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \ @@ -90,8 +102,11 @@ BEGIN { for (i = 2; i < NF; i++) printf("%s ", $i) >> CFILE printf("%s\n", $i) >> CFILE + for (i = 2; i < NF; i++) + printf("%s ", $i) >> PFILE + printf("%s\n", $i) >> PFILE } -/^[ ]*(BEGIN|IGNORED)/ { +/^[ ]*(BEGIN|IGNORED|BEGIN_BUF)/ { if (in_begin) { print "Invalid format: missing END statement" exit @@ -100,21 +115,22 @@ BEGIN { is_dbt = 0; has_dbp = 0; is_uint = 0; - need_log_function = ($1 == "BEGIN"); + need_log_function = ($1 == "BEGIN") || ($1 == "BEGIN_BUF"); + not_buf = ($1 == "BEGIN") || ($1 == "IGNORED"); + if (not_buf) + buf_only = 0; nvars = 0; - # number of locks that the getpgnos functions will return - nlocks = 0; - thisfunc = $2; funcname = sprintf("%s_%s", prefix, $2); - rectype = $3; + if (not_buf) + rectype = $3; funcs[num_funcs] = funcname; ++num_funcs; } -/^[ ]*(DB|ARG|DBT|PGDBT|POINTER|TIME)/ { +/^[ ]*(DB|ARG|DBT|LOCKS|PGDBT|POINTER|TIME)/ { vars[nvars] = $2; types[nvars] = $3; atypes[nvars] = $1; @@ -147,14 +163,18 @@ BEGIN { } # Declare the record type. - printf("#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE + if (not_buf) { + printf("#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE + } # Structure declaration. printf("typedef struct _%s_args {\n", funcname) >> HFILE # Here are the required fields for every structure - printf("\tu_int32_t type;\n\tDB_TXN *txnid;\n") >> HFILE - printf("\tDB_LSN prev_lsn;\n") >>HFILE + if (not_buf) { + printf("\tu_int32_t type;\n\tDB_TXN *txnid;\n") >> HFILE + printf("\tDB_LSN prev_lsn;\n") >>HFILE + } # Here are the specified fields. for (i = 0; i < nvars; i++) { @@ -167,25 +187,22 @@ BEGIN { } printf("} %s_args;\n\n", funcname) >> HFILE - # Output the log, print, read, and getpgnos functions. + # Output the log, print and read functions. if (need_log_function) { log_function(); - - # The getpgnos function calls DB-private (__rep_*) functions, - # so we only generate it for our own logging functions, - # not application-specific ones. - if (dbprivate) { - getpgnos_function(); - } } - print_function(); + if (not_buf) { + print_function(); + } read_function(); # Recovery template - cmd = sprintf(\ + if (not_buf) { + cmd = sprintf(\ "sed -e s/PREF/%s/ -e s/FUNC/%s/ < template/rec_ctemp >> %s", - prefix, thisfunc, TFILE) - system(cmd); + prefix, thisfunc, TFILE) + system(cmd); + } # Done writing stuff, reset and continue. in_begin = 0; @@ -195,80 +212,63 @@ END { # End the conditional for the HFILE printf("#endif\n") >> HFILE; + if (buf_only == 1) + exit + # Print initialization routine; function prototype p[1] = sprintf("int %s_init_print %s%s", prefix, "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ", "db_recops, void *), size_t *));"); p[2] = ""; - proto_format(p); + proto_format(p, PFILE); # Create the routine to call __db_add_recovery(print_fn, id) printf("int\n%s_init_print(dbenv, dtabp, dtabsizep)\n", \ - prefix) >> CFILE; - printf("\tDB_ENV *dbenv;\n") >> CFILE;; - printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE; - printf(" db_recops, void *));\n") >> CFILE; - printf("\tsize_t *dtabsizep;\n{\n") >> CFILE; + prefix) >> PFILE; + printf("\tDB_ENV *dbenv;\n") >> PFILE;; + printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> PFILE; + printf(" db_recops, void *));\n") >> PFILE; + printf("\tsize_t *dtabsizep;\n{\n") >> PFILE; # If application-specific, the user will need a prototype for # __db_add_recovery, since they won't have DB's. if (!dbprivate) { - printf("\tint __db_add_recovery __P((DB_ENV *,\n") >> CFILE; + printf("\tint __db_add_recovery __P((DB_ENV *,\n") >> PFILE; printf(\ -"\t int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),\n") >> CFILE; - printf("\t size_t *,\n") >> CFILE; +"\t int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),\n") >> PFILE; + printf("\t size_t *,\n") >> PFILE; printf(\ "\t int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));\n") \ - >> CFILE; + >> PFILE; } - printf("\tint ret;\n\n") >> CFILE; + printf("\tint ret;\n\n") >> PFILE; for (i = 0; i < num_funcs; i++) { - printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE; - printf("dtabp, dtabsizep,\n") >> CFILE; + printf("\tif ((ret = __db_add_recovery(dbenv, ") >> PFILE; + printf("dtabp, dtabsizep,\n") >> PFILE; printf("\t %s_print, DB_%s)) != 0)\n", \ - funcs[i], funcs[i]) >> CFILE; - printf("\t\treturn (ret);\n") >> CFILE; - } - printf("\treturn (0);\n}\n\n") >> CFILE; - - # We only want to generate *_init_{getpgnos,recover} functions - # if this is a DB-private, rather than application-specific, - # set of recovery functions. Application-specific recovery functions - # should be dispatched using the DB_ENV->set_app_dispatch callback - # rather than a DB dispatch table ("dtab"). + funcs[i], funcs[i]) >> PFILE; + printf("\t\treturn (ret);\n") >> PFILE; + } + printf("\treturn (0);\n}\n") >> PFILE; + if (prefix == "__ham") + printf("#endif /* HAVE_HASH */\n") >> PFILE + if (prefix == "__qam") + printf("#endif /* HAVE_QUEUE */\n") >> PFILE + + # We only want to generate *_init_recover functions if this is a + # DB-private, rather than application-specific, set of recovery + # functions. Application-specific recovery functions should be + # dispatched using the DB_ENV->set_app_dispatch callback rather + # than a DB dispatch table ("dtab"). if (!dbprivate) exit - # Page number initialization routine; function prototype - printf("#ifdef HAVE_REPLICATION\n") >> CFILE; - p[1] = sprintf("int %s_init_getpgnos %s%s", prefix, - "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ", - "db_recops, void *), size_t *));"); - p[2] = ""; - proto_format(p); - - # Create the routine to call db_add_recovery(pgno_fn, id) - printf("int\n%s_init_getpgnos(dbenv, dtabp, dtabsizep)\n", \ - prefix) >> CFILE; - printf("\tDB_ENV *dbenv;\n") >> CFILE; - printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE; - printf(" db_recops, void *));\n") >> CFILE; - printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE; - for (i = 0; i < num_funcs; i++) { - printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE; - printf("dtabp, dtabsizep,\n") >> CFILE; - printf("\t %s_getpgnos, DB_%s)) != 0)\n", \ - funcs[i], funcs[i]) >> CFILE; - printf("\t\treturn (ret);\n") >> CFILE; - } - printf("\treturn (0);\n}\n#endif /* HAVE_REPLICATION */\n\n") >> CFILE; - # Recover initialization routine p[1] = sprintf("int %s_init_recover %s%s", prefix, "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ", "db_recops, void *), size_t *));"); p[2] = ""; - proto_format(p); + proto_format(p, CFILE); # Create the routine to call db_add_recovery(func, id) printf("int\n%s_init_recover(dbenv, dtabp, dtabsizep)\n", \ @@ -290,34 +290,44 @@ END { function log_function() { # Write the log function; function prototype pi = 1; - p[pi++] = sprintf("int %s_log", funcname); - p[pi++] = " "; - if (has_dbp == 1) { - p[pi++] = "__P((DB *, DB_TXN *, DB_LSN *, u_int32_t"; + if (not_buf) { + p[pi++] = sprintf("int %s_log", funcname); + p[pi++] = " "; + if (has_dbp == 1) { + p[pi++] = "__P((DB *"; + } else { + p[pi++] = "__P((DB_ENV *"; + } + p[pi++] = ", DB_TXN *, DB_LSN *, u_int32_t"; } else { - p[pi++] = "__P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t"; + p[pi++] = sprintf("int %s_buf", funcname); + p[pi++] = " "; + p[pi++] = "__P((u_int8_t *, size_t, size_t *"; } for (i = 0; i < nvars; i++) { if (modes[i] == "DB") continue; p[pi++] = ", "; p[pi++] = sprintf("%s%s%s", - (modes[i] == "DBT" || modes[i] == "PGDBT") ? "const " : "", - types[i], - (modes[i] == "DBT" || modes[i] == "PGDBT") ? " *" : ""); + (modes[i] == "DBT" || modes[i] == "LOCKS" || + modes[i] == "PGDBT") ? "const " : "", types[i], + (modes[i] == "DBT" || modes[i] == "LOCKS" || + modes[i] == "PGDBT") ? " *" : ""); } p[pi++] = ""; p[pi++] = "));"; p[pi++] = ""; - proto_format(p); + proto_format(p, CFILE); # Function declaration - if (has_dbp == 1) { + if (not_buf == 1 && has_dbp == 1) { printf("int\n%s_log(dbp, txnid, ret_lsnp, flags", \ funcname) >> CFILE; - } else { + } else if (not_buf == 1) { printf("int\n%s_log(dbenv, txnid, ret_lsnp, flags", \ funcname) >> CFILE; + } else { + printf("int\n%s_buf(buf, max, lenp", funcname) >> CFILE; } for (i = 0; i < nvars; i++) { if (modes[i] == "DB") { @@ -335,16 +345,22 @@ function log_function() { printf(")\n") >> CFILE; # Now print the parameters - if (has_dbp == 1) { - printf("\tDB *dbp;\n") >> CFILE; + if (not_buf == 1) { + if (has_dbp == 1) { + printf("\tDB *dbp;\n") >> CFILE; + } else { + printf("\tDB_ENV *dbenv;\n") >> CFILE; + } + printf("\tDB_TXN *txnid;\n\tDB_LSN *ret_lsnp;\n") >> CFILE; + printf("\tu_int32_t flags;\n") >> CFILE; } else { - printf("\tDB_ENV *dbenv;\n") >> CFILE; + printf("\tu_int8_t *buf;\n") >> CFILE; + printf("\tsize_t max, *lenp;\n") >> CFILE; } - printf("\tDB_TXN *txnid;\n\tDB_LSN *ret_lsnp;\n") >> CFILE; - printf("\tu_int32_t flags;\n") >> CFILE; for (i = 0; i < nvars; i++) { # We just skip for modes == DB. - if (modes[i] == "DBT" || modes[i] == "PGDBT") + if (modes[i] == "DBT" || + modes[i] == "LOCKS" || modes[i] == "PGDBT") printf("\tconst %s *%s;\n", types[i], vars[i]) >> CFILE; else if (modes[i] != "DB") printf("\t%s %s;\n", types[i], vars[i]) >> CFILE; @@ -352,136 +368,185 @@ function log_function() { # Function body and local decls printf("{\n") >> CFILE; - printf("\tDBT logrec;\n") >> CFILE; - if (has_dbp == 1) - printf("\tDB_ENV *dbenv;\n") >> CFILE; - if (dbprivate) - printf("\tDB_TXNLOGREC *lr;\n") >> CFILE; - printf("\tDB_LSN *lsnp, null_lsn;\n") >> CFILE; - printf("\tu_int32_t ") >> CFILE; - if (is_dbt == 1) - printf("zero, ") >> CFILE; - if (is_uint == 1) - printf("uinttmp, ") >> CFILE; - printf("rectype, txn_num;\n") >> CFILE; - printf("\tu_int npad;\n") >> CFILE; + if (not_buf == 1) { + printf("\tDBT logrec;\n") >> CFILE; + if (has_dbp == 1) + printf("\tDB_ENV *dbenv;\n") >> CFILE; + if (dbprivate) + printf("\tDB_TXNLOGREC *lr;\n") >> CFILE; + printf("\tDB_LSN *lsnp, null_lsn, *rlsnp;\n") >> CFILE; + printf("\tu_int32_t ") >> CFILE; + if (is_dbt == 1) + printf("zero, ") >> CFILE; + if (is_uint == 1) + printf("uinttmp, ") >> CFILE; + printf("rectype, txn_num;\n") >> CFILE; + printf("\tu_int npad;\n") >> CFILE; + } else { + if (is_dbt == 1) + printf("\tu_int32_t zero;\n") >> CFILE; + if (is_uint == 1) + printf("\tu_int32_t uinttmp;\n") >> CFILE; + printf("\tu_int8_t *endbuf;\n") >> CFILE; + } printf("\tu_int8_t *bp;\n") >> CFILE; printf("\tint ") >> CFILE; - if (dbprivate) { + if (dbprivate && not_buf == 1) { printf("is_durable, ") >> CFILE; } printf("ret;\n\n") >> CFILE; # Initialization - if (has_dbp == 1) - printf("\tdbenv = dbp->dbenv;\n") >> CFILE; - printf("\trectype = DB_%s;\n", funcname) >> CFILE; - printf("\tnpad = 0;\n\n") >> CFILE; + if (not_buf == 1) { + if (has_dbp == 1) + printf("\tdbenv = dbp->dbenv;\n") >> CFILE; + if (dbprivate) + printf("\tCOMPQUIET(lr, NULL);\n\n") >> CFILE; + printf("\trectype = DB_%s;\n", funcname) >> CFILE; + printf("\tnpad = 0;\n") >> CFILE; + printf("\trlsnp = ret_lsnp;\n\n") >> CFILE; + } + printf("\tret = 0;\n\n") >> CFILE; + + if (not_buf) { + if (dbprivate) { + printf("\tif (LF_ISSET(DB_LOG_NOT_DURABLE)") \ + >> CFILE; + if (has_dbp == 1) { + printf(" ||\n\t ") >> CFILE; + printf("F_ISSET(dbp, DB_AM_NOT_DURABLE)) {\n") \ + >> CFILE; + } else { + printf(") {\n") >> CFILE; + printf("\t\tif (txnid == NULL)\n") >> CFILE; + printf("\t\t\treturn (0);\n") >> CFILE; + } + printf("\t\tis_durable = 0;\n") >> CFILE; + printf("\t} else\n") >> CFILE; + printf("\t\tis_durable = 1;\n\n") >> CFILE; + } + printf("\tif (txnid == NULL) {\n") >> CFILE; + printf("\t\ttxn_num = 0;\n") >> CFILE; + printf("\t\tlsnp = &null_lsn;\n") >> CFILE; + printf("\t\tnull_lsn.file = null_lsn.offset = 0;\n") >> CFILE; + printf("\t} else {\n") >> CFILE; + if (dbprivate && funcname != "__db_debug") { + printf(\ + "\t\tif (TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE; + printf("\t\t (ret = __txn_activekids(") >> CFILE; + printf("dbenv, rectype, txnid)) != 0)\n") >> CFILE; + printf("\t\t\treturn (ret);\n") >> CFILE; + } + printf("\t\t/*\n\t\t * We need to assign begin_lsn while ") \ + >> CFILE; + printf("holding region mutex.\n") >> CFILE; + printf("\t\t * That assignment is done inside the ") >> CFILE; + printf("DbEnv->log_put call,\n\t\t * ") >> CFILE; + printf("so pass in the appropriate memory location to be ") \ + >> CFILE; + printf("filled\n\t\t * in by the log_put code.\n\t\t*/\n") \ + >> CFILE; + printf("\t\tDB_SET_BEGIN_LSNP(txnid, &rlsnp);\n") >> CFILE; + printf("\t\ttxn_num = txnid->txnid;\n") >> CFILE; + printf("\t\tlsnp = &txnid->last_lsn;\n") >> CFILE; + printf("\t}\n\n") >> CFILE; + # Malloc + printf("\tlogrec.size = ") >> CFILE; + printf("sizeof(rectype) + ") >> CFILE; + printf("sizeof(txn_num) + sizeof(DB_LSN)") >> CFILE; + for (i = 0; i < nvars; i++) + printf("\n\t + %s", sizes[i]) >> CFILE; + printf(";\n") >> CFILE + if (dbprivate) { + printf("\tif (CRYPTO_ON(dbenv)) {\n") >> CFILE; + printf("\t\tnpad =\n") >> CFILE; + printf("\t\t ((DB_CIPHER *)dbenv->crypto_handle)") \ + >> CFILE; + printf("->adj_size(logrec.size);\n") >> CFILE; + printf("\t\tlogrec.size += npad;\n\t}\n\n") >> CFILE - if (dbprivate) { - printf("\tis_durable = 1;\n") >> CFILE; - printf("\tif (LF_ISSET(DB_LOG_NOT_DURABLE) ||\n") >> CFILE; - printf("\t F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)") >> CFILE; - if (has_dbp == 1) { - printf(" ||\n\t ") >> CFILE; - printf("F_ISSET(dbp, DB_AM_NOT_DURABLE)) {\n") >> CFILE; - printf("\t\tif (F_ISSET(dbenv, ") >> CFILE; - printf("DB_ENV_TXN_NOT_DURABLE) && ") >> CFILE; - printf("txnid == NULL)\n") >> CFILE; + printf("\tif (is_durable || txnid == NULL) {\n") \ + >> CFILE; + printf("\t\tif ((ret =\n\t\t __os_malloc(dbenv, ") \ + >> CFILE; + printf("logrec.size, &logrec.data)) != 0)\n") >> CFILE; + printf("\t\t\treturn (ret);\n") >> CFILE; + printf("\t} else {\n") >> CFILE; + write_malloc("\t\t", + "lr", "logrec.size + sizeof(DB_TXNLOGREC)", CFILE) + printf("#ifdef DIAGNOSTIC\n") >> CFILE; + printf("\t\tif ((ret =\n\t\t __os_malloc(dbenv, ") \ + >> CFILE; + printf("logrec.size, &logrec.data)) != 0) {\n") \ + >> CFILE; + printf("\t\t\t__os_free(dbenv, lr);\n") >> CFILE; + printf("\t\t\treturn (ret);\n") >> CFILE; + printf("\t\t}\n") >> CFILE; + printf("#else\n") >> CFILE; + printf("\t\tlogrec.data = lr->data;\n") >> CFILE; + printf("#endif\n") >> CFILE; + printf("\t}\n") >> CFILE; } else { - printf(") {\n") >> CFILE; - printf("\t\tif (txnid == NULL)\n") >> CFILE; + write_malloc("\t", "logrec.data", "logrec.size", CFILE) + printf("\tbp = logrec.data;\n\n") >> CFILE; } - printf("\t\t\treturn (0);\n") >> CFILE; - printf("\t\tis_durable = 0;\n") >> CFILE; - printf("\t}\n") >> CFILE; - } - - printf("\tif (txnid == NULL) {\n") >> CFILE; - printf("\t\ttxn_num = 0;\n") >> CFILE; - printf("\t\tnull_lsn.file = 0;\n") >> CFILE; - printf("\t\tnull_lsn.offset = 0;\n") >> CFILE; - printf("\t\tlsnp = &null_lsn;\n") >> CFILE; - printf("\t} else {\n") >> CFILE; - if (dbprivate && funcname != "__db_debug") { - printf(\ - "\t\tif (TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE; - printf("\t\t (ret = __txn_activekids(") >> CFILE; - printf("dbenv, rectype, txnid)) != 0)\n") >> CFILE; - printf("\t\t\treturn (ret);\n") >> CFILE; - } - printf("\t\ttxn_num = txnid->txnid;\n") >> CFILE; - printf("\t\tlsnp = &txnid->last_lsn;\n") >> CFILE; - printf("\t}\n\n") >> CFILE; - - # Malloc - printf("\tlogrec.size = sizeof(rectype) + ") >> CFILE; - printf("sizeof(txn_num) + sizeof(DB_LSN)") >> CFILE; - for (i = 0; i < nvars; i++) - printf("\n\t + %s", sizes[i]) >> CFILE; - printf(";\n") >> CFILE - if (dbprivate) { - printf("\tif (CRYPTO_ON(dbenv)) {\n") >> CFILE; - printf("\t\tnpad =\n") >> CFILE; - printf("\t\t ((DB_CIPHER *)dbenv->crypto_handle)") >> CFILE; - printf("->adj_size(logrec.size);\n") >> CFILE; - printf("\t\tlogrec.size += npad;\n\t}\n\n") >> CFILE - - printf("\tif (!is_durable && txnid != NULL) {\n") >> CFILE; - write_malloc("\t\t", - "lr", "logrec.size + sizeof(DB_TXNLOGREC)", CFILE) - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - printf("\t\tgoto do_malloc;\n") >> CFILE; - printf("#else\n") >> CFILE; - printf("\t\tlogrec.data = &lr->data;\n") >> CFILE; - printf("#endif\n") >> CFILE; - printf("\t} else {\n") >> CFILE; - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - printf("do_malloc:\n") >> CFILE; - printf("#endif\n") >> CFILE; - printf("\t\tif ((ret =\n\t\t __os_malloc(dbenv, ") >> CFILE; - printf("logrec.size, &logrec.data)) != 0) {\n") >> CFILE; - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - printf("\t\t\tif (!is_durable && txnid != NULL)\n") >> CFILE; - printf("\t\t\t\t(void)__os_free(dbenv, lr);\n") >> CFILE; - printf("#endif\n") >> CFILE; - printf("\t\t\treturn (ret);\n") >> CFILE; - printf("\t\t}\n") >> CFILE; - printf("\t}\n") >> CFILE; - } else - write_malloc("\t", "logrec.data", "logrec.size", CFILE) - - printf("\tif (npad > 0)\n") >> CFILE; - printf("\t\tmemset((u_int8_t *)logrec.data + logrec.size ") >> CFILE; - printf("- npad, 0, npad);\n\n") >> CFILE; - - # Copy args into buffer - printf("\tbp = logrec.data;\n\n") >> CFILE; - printf("\tmemcpy(bp, &rectype, sizeof(rectype));\n") >> CFILE; - printf("\tbp += sizeof(rectype);\n\n") >> CFILE; - printf("\tmemcpy(bp, &txn_num, sizeof(txn_num));\n") >> CFILE; - printf("\tbp += sizeof(txn_num);\n\n") >> CFILE; - printf("\tmemcpy(bp, lsnp, sizeof(DB_LSN));\n") >> CFILE; - printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE; + printf("\tif (npad > 0)\n") >> CFILE; + printf("\t\tmemset((u_int8_t *)logrec.data + logrec.size ") \ + >> CFILE; + printf("- npad, 0, npad);\n\n") >> CFILE; + printf("\tbp = logrec.data;\n\n") >> CFILE; + + # Copy args into buffer + printf("\tmemcpy(bp, &rectype, sizeof(rectype));\n") >> CFILE; + printf("\tbp += sizeof(rectype);\n\n") >> CFILE; + printf("\tmemcpy(bp, &txn_num, sizeof(txn_num));\n") >> CFILE; + printf("\tbp += sizeof(txn_num);\n\n") >> CFILE; + printf("\tmemcpy(bp, lsnp, sizeof(DB_LSN));\n") >> CFILE; + printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE; + } else { + printf("\tbp = buf;\n") >> CFILE; + printf("\tendbuf = bp + max;\n\n") >> CFILE + } for (i = 0; i < nvars; i ++) { if (modes[i] == "ARG" || modes[i] == "TIME") { printf("\tuinttmp = (u_int32_t)%s;\n", \ vars[i]) >> CFILE; + if (not_buf == 0) { + printf("\tif (bp + sizeof(uinttmp) > endbuf)\n") \ + >> CFILE; + printf("\t\treturn (ENOMEM);\n") >> CFILE; + } printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \ >> CFILE; printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE; - } else if (modes[i] == "DBT" || modes[i] == "PGDBT") { + } else if (modes[i] == "DBT" || \ + modes[i] == "LOCKS" || modes[i] == "PGDBT") { printf("\tif (%s == NULL) {\n", vars[i]) >> CFILE; printf("\t\tzero = 0;\n") >> CFILE; + if (not_buf == 0) { + printf("\t\tif (bp + sizeof(u_int32_t) > endbuf)\n") \ + >> CFILE; + printf("\t\t\treturn (ENOMEM);\n") >> CFILE; + } printf("\t\tmemcpy(bp, &zero, sizeof(u_int32_t));\n") \ >> CFILE; printf("\t\tbp += sizeof(u_int32_t);\n") >> CFILE; printf("\t} else {\n") >> CFILE; + if (not_buf == 0) { + printf("\t\tif (bp + sizeof(%s->size) > endbuf)\n", \ + vars[i]) >> CFILE; + printf("\t\t\treturn (ENOMEM);\n") >> CFILE; + } printf("\t\tmemcpy(bp, &%s->size, ", vars[i]) >> CFILE; printf("sizeof(%s->size));\n", vars[i]) >> CFILE; printf("\t\tbp += sizeof(%s->size);\n", vars[i]) \ >> CFILE; + if (not_buf == 0) { + printf("\t\tif (bp + %s->size > endbuf)\n", \ + vars[i]) >> CFILE; + printf("\t\t\treturn (ENOMEM);\n") >> CFILE; + } printf("\t\tmemcpy(bp, %s->data, %s->size);\n", \ vars[i], vars[i]) >> CFILE; printf("\t\tbp += %s->size;\n\t}\n\n", \ @@ -504,6 +569,11 @@ function log_function() { >> CFILE; printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE; } else { # POINTER + if (not_buf == 0) { + printf("\tif (bp + %s > endbuf)\n", \ + sizes[i]) >> CFILE; + printf("\t\treturn (ENOMEM);\n") >> CFILE; + } printf("\tif (%s != NULL)\n", vars[i]) >> CFILE; printf("\t\tmemcpy(bp, %s, %s);\n", vars[i], \ sizes[i]) >> CFILE; @@ -516,87 +586,96 @@ function log_function() { # Error checking. User code won't have DB_ASSERT available, but # this is a pretty unlikely assertion anyway, so we just leave it out # rather than requiring assert.h. - if (dbprivate) { - printf("\tDB_ASSERT((u_int32_t)") >> CFILE; - printf("(bp - (u_int8_t *)logrec.data) <= logrec.size);\n\n") \ - >> CFILE; - } - + if (not_buf == 1) { + if (dbprivate) { + printf("\tDB_ASSERT((u_int32_t)") >> CFILE; + printf("(bp - (u_int8_t *)logrec.data) ") >> CFILE; + printf("<= logrec.size);\n\n") >> CFILE; + # Save the log record off in the txn's linked list, + # or do log call. + # We didn't call the crypto alignment function when + # we created this log record (because we don't have + # the right header files to find the function), so + # we have to copy the log record to make sure the + # alignment is correct. + printf("\tif (is_durable || txnid == NULL) {\n") \ + >> CFILE; + # Output the log record and update the return LSN. + printf("\t\tif ((ret = __log_put(dbenv, rlsnp,") \ + >> CFILE; + printf("(DBT *)&logrec,\n") >> CFILE; + printf("\t\t flags | DB_LOG_NOCOPY)) == 0") >> CFILE; + printf(" && txnid != NULL) {\n") >> CFILE; + printf("\t\t\ttxnid->last_lsn = *rlsnp;\n") >> CFILE; + + printf("\t\t\tif (rlsnp != ret_lsnp)\n") >> CFILE; + printf("\t\t\t\t *ret_lsnp = *rlsnp;\n") >> CFILE; + printf("\t\t}\n\t} else {\n") >> CFILE; + printf("#ifdef DIAGNOSTIC\n") >> CFILE; + + # Add the debug bit if we are logging a ND record. + printf("\t\t/*\n") >> CFILE; + printf("\t\t * Set the debug bit if we are") >> CFILE; + printf(" going to log non-durable\n") >> CFILE; + printf("\t\t * transactions so they will be ignored") \ + >> CFILE; + printf(" by recovery.\n") >> CFILE; + printf("\t\t */\n") >> CFILE; + printf("\t\tmemcpy(lr->data, logrec.data, ") >> CFILE + printf("logrec.size);\n") >> CFILE; + printf("\t\trectype |= DB_debug_FLAG;\n") >> CFILE; + printf("\t\tmemcpy(") >> CFILE + printf("logrec.data, &rectype, sizeof(rectype));\n\n") \ + >> CFILE; + # Output the log record. + printf("\t\tret = __log_put(dbenv,\n") >> CFILE; + printf("\t\t rlsnp, (DBT *)&logrec, ") >> CFILE; + printf("flags | DB_LOG_NOCOPY);\n") >> CFILE; + printf("#else\n") >> CFILE; + printf("\t\tret = 0;\n") >> CFILE; + printf("#endif\n") >> CFILE; + # Add a ND record to the txn list. + printf("\t\tSTAILQ_INSERT_HEAD(&txnid") >> CFILE; + printf("->logs, lr, links);\n") >> CFILE; + # Update the return LSN. + printf("\t\tLSN_NOT_LOGGED(*ret_lsnp);\n") >> CFILE; + printf("\t}\n\n") >> CFILE; + } else { + printf("\tif ((ret = dbenv->log_put(dbenv, rlsnp,") >> CFILE; + printf(" (DBT *)&logrec,\n") >> CFILE; + printf("\t flags | DB_LOG_NOCOPY)) == 0") >> CFILE; + printf(" && txnid != NULL) {\n") >> CFILE; + + # Update the transactions last_lsn. + printf("\t\ttxnid->last_lsn = *rlsnp;\n") >> CFILE; + printf("\t\tif (rlsnp != ret_lsnp)\n") >> CFILE; + printf("\t\t\t *ret_lsnp = *rlsnp;\n") >> CFILE; + printf("\t}\n") >> CFILE; - # Save the log record off in the txn's linked list, or do log call. - # - # We didn't call the crypto alignment function when we created this - # log record (because we don't have the right header files to find - # the function), so we have to copy the log record to make sure the - # alignment is correct. - if (dbprivate) { - # Add the debug bit if we are logging a ND record. - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - printf("\tif (!is_durable && txnid != NULL) {\n") >> CFILE; - printf("\t\t /*\n") >> CFILE; - printf("\t\t * We set the debug bit if we are going\n") \ - >> CFILE; - printf("\t\t * to log non-durable transactions so\n") >> CFILE; - printf("\t\t * they will be ignored by recovery.\n") >> CFILE; - printf("\t\t */\n") >> CFILE; - printf("\t\tmemcpy(lr->data, logrec.data, logrec.size);\n") \ - >> CFILE; - printf("\t\trectype |= DB_debug_FLAG;\n") >> CFILE; - printf("\t\tmemcpy(logrec.data, &rectype, sizeof(rectype));\n")\ - >> CFILE; - printf("\t}\n") >> CFILE; - printf("#endif\n\n") >> CFILE; - - # Add an ND record to the list. - printf("\tif (!is_durable && txnid != NULL) {\n") >> CFILE; - printf("\t\tret = 0;\n") >> CFILE; - printf("\t\tSTAILQ_INSERT_HEAD(&txnid") >> CFILE; - printf("->logs, lr, links);\n") >> CFILE; - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - printf("\t\tgoto do_put;\n") >> CFILE; - printf("#endif\n") >> CFILE; - printf("\t}") >> CFILE; - # Output the log record. - printf(" else{\n") >> CFILE; - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - printf("do_put:\n") >> CFILE; - printf("#endif\n") >> CFILE; - printf("\t\tret = __log_put(dbenv,\n") >> CFILE; - printf("\t\t ret_lsnp, (DBT *)&logrec, ") >> CFILE; - printf("flags | DB_LOG_NOCOPY);\n") >> CFILE; - - # Update the transactions last_lsn. - printf("\t\tif (ret == 0 && txnid != NULL)\n") >> CFILE; - printf("\t\t\ttxnid->last_lsn = *ret_lsnp;\n") >> CFILE; - printf("\t}\n\n") >> CFILE; - printf("\tif (!is_durable)\n") >> CFILE; - printf("\t\tLSN_NOT_LOGGED(*ret_lsnp);\n") >> CFILE; + } + # If out of disk space log writes may fail. If we are debugging + # that print out which records did not make it to disk. + printf("#ifdef LOG_DIAGNOSTIC\n") >> CFILE + printf("\tif (ret != 0)\n") >> CFILE; + printf("\t\t(void)%s_print(dbenv,\n", funcname) >> CFILE; + printf("\t\t (DBT *)&logrec, ret_lsnp, NULL, NULL);\n") \ + >> CFILE + printf("#endif\n\n") >> CFILE + # Free and return + if (dbprivate) { + printf("#ifdef DIAGNOSTIC\n") >> CFILE + write_free("\t", "logrec.data", CFILE) + printf("#else\n") >> CFILE + printf("\tif (is_durable || txnid == NULL)\n") >> CFILE; + write_free("\t\t", "logrec.data", CFILE) + printf("#endif\n") >> CFILE + } else { + write_free("\t", "logrec.data", CFILE) + } } else { - printf("\tret = dbenv->log_put(dbenv, ") >> CFILE; - printf("ret_lsnp, (DBT *)&logrec, flags);\n") >> CFILE; - - # Update the transactions last_lsn. - printf("\tif (ret == 0 && txnid != NULL)\n") >> CFILE; - printf("\t\ttxnid->last_lsn = *ret_lsnp;\n\n") >> CFILE; + printf("\t*lenp = (u_int32_t)(bp - buf);\n\n") >> CFILE } - # If out of disk space log writes may fail. If we are debugging - # that print out which records did not make it to disk. - printf("#ifdef LOG_DIAGNOSTIC\n") >> CFILE - printf("\tif (ret != 0)\n") >> CFILE; - printf("\t\t(void)%s_print(dbenv,\n", funcname) >> CFILE; - printf("\t\t (DBT *)&logrec, ret_lsnp, NULL, NULL);\n") >> CFILE - printf("#endif\n") >> CFILE - - # Free and return - if (dbprivate) { - printf("#ifndef DIAGNOSTIC\n") >> CFILE - printf("\tif (is_durable || txnid == NULL)\n") >> CFILE; - printf("#endif\n") >> CFILE - write_free("\t\t", "logrec.data", CFILE) - } else { - write_free("\t", "logrec.data", CFILE) - } printf("\treturn (ret);\n}\n\n") >> CFILE; } @@ -606,125 +685,143 @@ function print_function() { p[2] = " "; p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));"; p[4] = ""; - proto_format(p); + proto_format(p, PFILE); # Function declaration - printf("int\n%s_print(dbenv, ", funcname) >> CFILE; - printf("dbtp, lsnp, notused2, notused3)\n") >> CFILE; - printf("\tDB_ENV *dbenv;\n") >> CFILE; - printf("\tDBT *dbtp;\n") >> CFILE; - printf("\tDB_LSN *lsnp;\n") >> CFILE; - printf("\tdb_recops notused2;\n\tvoid *notused3;\n{\n") >> CFILE; + printf("int\n%s_print(dbenv, ", funcname) >> PFILE; + printf("dbtp, lsnp, notused2, notused3)\n") >> PFILE; + printf("\tDB_ENV *dbenv;\n") >> PFILE; + printf("\tDBT *dbtp;\n") >> PFILE; + printf("\tDB_LSN *lsnp;\n") >> PFILE; + printf("\tdb_recops notused2;\n\tvoid *notused3;\n{\n") >> PFILE; # Locals - printf("\t%s_args *argp;\n", funcname) >> CFILE; + printf("\t%s_args *argp;\n", funcname) >> PFILE; for (i = 0; i < nvars; i ++) if (modes[i] == "TIME") { - printf("\tstruct tm *lt;\n") >> CFILE + printf("\tstruct tm *lt;\n") >> PFILE + printf("\ttime_t timeval;\n") >> PFILE break; } for (i = 0; i < nvars; i ++) if (modes[i] == "DBT" || modes[i] == "PGDBT") { - printf("\tu_int32_t i;\n") >> CFILE - printf("\tint ch;\n") >> CFILE + printf("\tu_int32_t i;\n") >> PFILE + printf("\tint ch;\n") >> PFILE break; } - printf("\tint ret;\n\n") >> CFILE; + printf("\tint ret;\n\n") >> PFILE; # Get rid of complaints about unused parameters. - printf("\tnotused2 = DB_TXN_ABORT;\n\tnotused3 = NULL;\n\n") >> CFILE; + printf("\tnotused2 = DB_TXN_ABORT;\n\tnotused3 = NULL;\n\n") >> PFILE; # Call read routine to initialize structure printf("\tif ((ret = %s_read(dbenv, dbtp->data, &argp)) != 0)\n", \ - funcname) >> CFILE; - printf("\t\treturn (ret);\n") >> CFILE; + funcname) >> PFILE; + printf("\t\treturn (ret);\n") >> PFILE; # Print values in every record printf("\t(void)printf(\n\t \"[%%lu][%%lu]%s%%s: ",\ - funcname) >> CFILE; - printf("rec: %%lu txnid %%lx ") >> CFILE; - printf("prevlsn [%%lu][%%lu]\\n\",\n") >> CFILE; - printf("\t (u_long)lsnp->file,\n") >> CFILE; - printf("\t (u_long)lsnp->offset,\n") >> CFILE; + funcname) >> PFILE; + printf("rec: %%lu txnid %%lx ") >> PFILE; + printf("prevlsn [%%lu][%%lu]\\n\",\n") >> PFILE; + printf("\t (u_long)lsnp->file,\n") >> PFILE; + printf("\t (u_long)lsnp->offset,\n") >> PFILE; printf("\t (argp->type & DB_debug_FLAG) ? \"_debug\" : \"\",\n") \ - >> CFILE; - printf("\t (u_long)argp->type,\n") >> CFILE; - printf("\t (u_long)argp->txnid->txnid,\n") >> CFILE; - printf("\t (u_long)argp->prev_lsn.file,\n") >> CFILE; - printf("\t (u_long)argp->prev_lsn.offset);\n") >> CFILE; + >> PFILE; + printf("\t (u_long)argp->type,\n") >> PFILE; + printf("\t (u_long)argp->txnid->txnid,\n") >> PFILE; + printf("\t (u_long)argp->prev_lsn.file,\n") >> PFILE; + printf("\t (u_long)argp->prev_lsn.offset);\n") >> PFILE; # Now print fields of argp for (i = 0; i < nvars; i ++) { if (modes[i] == "TIME") { - printf("\tlt = localtime((time_t *)&argp->%s);\n", - vars[i]) >> CFILE; + printf("\ttimeval = (time_t)argp->%s;\n", + vars[i]) >> PFILE; + printf("\tlt = localtime(&timeval);\n") >> PFILE; printf("\t(void)printf(\n\t \"\\t%s: ", - vars[i]) >> CFILE; + vars[i]) >> PFILE; } else - printf("\t(void)printf(\"\\t%s: ", vars[i]) >> CFILE; + printf("\t(void)printf(\"\\t%s: ", vars[i]) >> PFILE; if (modes[i] == "DBT" || modes[i] == "PGDBT") { - printf("\");\n") >> CFILE; - printf("\tfor (i = 0; i < ") >> CFILE; - printf("argp->%s.size; i++) {\n", vars[i]) >> CFILE; + printf("\");\n") >> PFILE; + printf("\tfor (i = 0; i < ") >> PFILE; + printf("argp->%s.size; i++) {\n", vars[i]) >> PFILE; printf("\t\tch = ((u_int8_t *)argp->%s.data)[i];\n", \ - vars[i]) >> CFILE; - printf("\t\tprintf(isprint(ch) || ch == 0x0a") >> CFILE; - printf(" ? \"%%c\" : \"%%#x \", ch);\n") >> CFILE; - printf("\t}\n\t(void)printf(\"\\n\");\n") >> CFILE; + vars[i]) >> PFILE; + printf("\t\tprintf(isprint(ch) || ch == 0x0a") >> PFILE; + printf(" ? \"%%c\" : \"%%#x \", ch);\n") >> PFILE; + printf("\t}\n\t(void)printf(\"\\n\");\n") >> PFILE; } else if (types[i] == "DB_LSN *") { printf("[%%%s][%%%s]\\n\",\n", \ - formats[i], formats[i]) >> CFILE; + formats[i], formats[i]) >> PFILE; printf("\t (u_long)argp->%s.file,", \ - vars[i]) >> CFILE; + vars[i]) >> PFILE; printf(" (u_long)argp->%s.offset);\n", \ - vars[i]) >> CFILE; + vars[i]) >> PFILE; } else if (modes[i] == "TIME") { # Time values are displayed in two ways: the standard # string returned by ctime, and in the input format # expected by db_recover -t. printf(\ "%%%s (%%.24s, 20%%02lu%%02lu%%02lu%%02lu%%02lu.%%02lu)\\n\",\n", \ - formats[i]) >> CFILE; - printf("\t (long)argp->%s, ", vars[i]) >> CFILE; - printf("ctime((time_t *)&argp->%s),", vars[i]) >> CFILE; - printf("\n\t (u_long)lt->tm_year - 100, ") >> CFILE; - printf("(u_long)lt->tm_mon+1,") >> CFILE; - printf("\n\t (u_long)lt->tm_mday, ") >> CFILE; - printf("(u_long)lt->tm_hour,") >> CFILE; - printf("\n\t (u_long)lt->tm_min, ") >> CFILE; - printf("(u_long)lt->tm_sec);\n") >> CFILE; + formats[i]) >> PFILE; + printf("\t (long)argp->%s, ", vars[i]) >> PFILE; + printf("ctime(&timeval),", vars[i]) >> PFILE; + printf("\n\t (u_long)lt->tm_year - 100, ") >> PFILE; + printf("(u_long)lt->tm_mon+1,") >> PFILE; + printf("\n\t (u_long)lt->tm_mday, ") >> PFILE; + printf("(u_long)lt->tm_hour,") >> PFILE; + printf("\n\t (u_long)lt->tm_min, ") >> PFILE; + printf("(u_long)lt->tm_sec);\n") >> PFILE; + } else if (modes[i] == "LOCKS") { + printf("\\n\");\n") >> PFILE; + printf("\t__lock_list_print(dbenv, &argp->locks);\n") \ + >> PFILE; } else { if (formats[i] == "lx") - printf("0x") >> CFILE; - printf("%%%s\\n\", ", formats[i]) >> CFILE; + printf("0x") >> PFILE; + printf("%%%s\\n\", ", formats[i]) >> PFILE; if (formats[i] == "lx" || formats[i] == "lu") - printf("(u_long)") >> CFILE; + printf("(u_long)") >> PFILE; if (formats[i] == "ld") - printf("(long)") >> CFILE; - printf("argp->%s);\n", vars[i]) >> CFILE; + printf("(long)") >> PFILE; + printf("argp->%s);\n", vars[i]) >> PFILE; } } - printf("\t(void)printf(\"\\n\");\n") >> CFILE; - write_free("\t", "argp", CFILE); - printf("\treturn (0);\n") >> CFILE; - printf("}\n\n") >> CFILE; + printf("\t(void)printf(\"\\n\");\n") >> PFILE; + write_free("\t", "argp", PFILE); + printf("\treturn (0);\n") >> PFILE; + printf("}\n\n") >> PFILE; } function read_function() { # Write the read function; function prototype - p[1] = sprintf("int %s_read __P((DB_ENV *, void *,", funcname); + if (not_buf == 1) + p[1] = sprintf("int %s_read __P((DB_ENV *, void *,", funcname); + else + p[1] = sprintf("int %s_read __P((DB_ENV *, void *, void **,", \ + funcname); p[2] = " "; p[3] = sprintf("%s_args **));", funcname); p[4] = ""; - proto_format(p); + proto_format(p, CFILE); # Function declaration - printf("int\n%s_read(dbenv, recbuf, argpp)\n", funcname) >> CFILE; + if (not_buf == 1) + printf("int\n%s_read(dbenv, recbuf, argpp)\n", funcname) \ + >> CFILE; + else + printf(\ + "int\n%s_read(dbenv, recbuf, nextp, argpp)\n", funcname) \ + >> CFILE; # Now print the parameters printf("\tDB_ENV *dbenv;\n") >> CFILE; printf("\tvoid *recbuf;\n") >> CFILE; + if (not_buf == 0) + printf("\tvoid **nextp;\n") >> CFILE; printf("\t%s_args **argpp;\n", funcname) >> CFILE; # Function body and local decls @@ -742,26 +839,37 @@ function read_function() { printf("\n\tdbenv = NULL;\n") >> CFILE; } - malloc_size = sprintf("sizeof(%s_args) + sizeof(DB_TXN)", funcname) + if (not_buf == 1) { + malloc_size = sprintf("sizeof(%s_args) + sizeof(DB_TXN)", \ + funcname) + } else { + malloc_size = sprintf("sizeof(%s_args)", funcname) + } write_malloc("\t", "argp", malloc_size, CFILE) # Set up the pointers to the txnid. - printf("\targp->txnid = (DB_TXN *)&argp[1];\n\n") >> CFILE; + printf("\tbp = recbuf;\n") >> CFILE; - # First get the record type, prev_lsn, and txnid fields. + if (not_buf == 1) { + printf("\targp->txnid = (DB_TXN *)&argp[1];\n\n") >> CFILE; - printf("\tbp = recbuf;\n") >> CFILE; - printf("\tmemcpy(&argp->type, bp, sizeof(argp->type));\n") >> CFILE; - printf("\tbp += sizeof(argp->type);\n\n") >> CFILE; - printf("\tmemcpy(&argp->txnid->txnid, bp, ") >> CFILE; - printf("sizeof(argp->txnid->txnid));\n") >> CFILE; - printf("\tbp += sizeof(argp->txnid->txnid);\n\n") >> CFILE; - printf("\tmemcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));\n") >> CFILE; - printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE; + # First get the record type, prev_lsn, and txnid fields. + + printf("\tmemcpy(&argp->type, bp, sizeof(argp->type));\n") \ + >> CFILE; + printf("\tbp += sizeof(argp->type);\n\n") >> CFILE; + printf("\tmemcpy(&argp->txnid->txnid, bp, ") >> CFILE; + printf("sizeof(argp->txnid->txnid));\n") >> CFILE; + printf("\tbp += sizeof(argp->txnid->txnid);\n\n") >> CFILE; + printf("\tmemcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));\n") \ + >> CFILE; + printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE; + } # Now get rest of data. for (i = 0; i < nvars; i ++) { - if (modes[i] == "DBT" || modes[i] == "PGDBT") { + if (modes[i] == "DBT" || \ + modes[i] == "LOCKS" || modes[i] == "PGDBT") { printf("\tmemset(&argp->%s, 0, sizeof(argp->%s));\n", \ vars[i], vars[i]) >> CFILE; printf("\tmemcpy(&argp->%s.size, ", vars[i]) >> CFILE; @@ -785,119 +893,17 @@ function read_function() { } # Free and return + if (not_buf == 0) + printf("\t*nextp = bp;\n") >> CFILE; printf("\t*argpp = argp;\n") >> CFILE; printf("\treturn (0);\n}\n\n") >> CFILE; } -function getpgnos_function() { - # Write the getpgnos function; function prototype - printf("#ifdef HAVE_REPLICATION\n") >> CFILE; - p[1] = sprintf("int %s_getpgnos", funcname); - p[2] = " "; - p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));"; - p[4] = ""; - proto_format(p); - - # Function declaration - printf("int\n%s_getpgnos(dbenv, ", funcname) >> CFILE; - printf("rec, lsnp, notused1, summary)\n") >> CFILE; - printf("\tDB_ENV *dbenv;\n") >> CFILE; - printf("\tDBT *rec;\n") >> CFILE; - printf("\tDB_LSN *lsnp;\n") >> CFILE; - printf("\tdb_recops notused1;\n") >> CFILE; - printf("\tvoid *summary;\n{\n") >> CFILE; - - # If there are no locks, return this fact. - if (nlocks == 0) { - printf("\tTXN_RECS *t;\n") >> CFILE; - printf("\tint ret;\n") >> CFILE; - printf("\tCOMPQUIET(rec, NULL);\n") >> CFILE; - printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n") >> CFILE; - - printf("\n\tt = (TXN_RECS *)summary;\n") >> CFILE; - printf("\n\tif ((ret = __rep_check_alloc(dbenv, ") >> CFILE; - printf("t, 1)) != 0)\n") >> CFILE; - printf("\t\treturn (ret);\n") >> CFILE; - - printf("\n\tt->array[t->npages].flags = LSN_PAGE_NOLOCK;\n") \ - >> CFILE; - printf("\tt->array[t->npages].lsn = *lsnp;\n") >> CFILE; - printf("\tt->array[t->npages].fid = DB_LOGFILEID_INVALID;\n") \ - >> CFILE; - printf("\tmemset(&t->array[t->npages].pgdesc, 0,\n") >> CFILE; - printf("\t sizeof(t->array[t->npages].pgdesc));\n") >> CFILE; - printf("\n\tt->npages++;\n") >> CFILE; - - printf("\n") >> CFILE; - printf("\treturn (0);\n") >> CFILE; - printf("}\n#endif /* HAVE_REPLICATION */\n\n") >> CFILE; - return; - } - - # Locals - printf("\tDB *dbp;\n") >> CFILE; - printf("\tTXN_RECS *t;\n") >> CFILE; - printf("\t%s_args *argp;\n", funcname) >> CFILE; - printf("\tint ret;\n\n") >> CFILE; - - # Shut up compiler. - printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n\n") >> CFILE; - - printf("\targp = NULL;\n") >> CFILE; - printf("\tt = (TXN_RECS *)summary;\n\n") >> CFILE; - - printf("\tif ((ret = %s_read(dbenv, rec->data, &argp)) != 0)\n", \ - funcname) >> CFILE; - printf("\t\treturn (ret);\n") >> CFILE; - - # Get file ID. - printf("\n\tif ((ret = __dbreg_id_to_db(dbenv,\n\t ") >> CFILE; - printf("argp->txnid, &dbp, argp->fileid, 0)) != 0)\n") >> CFILE; - printf("\t\tgoto err;\n") >> CFILE; - - printf("\n\tif ((ret = __rep_check_alloc(dbenv, t, %d)) != 0)\n", \ - nlocks) >> CFILE; - printf("\t\tgoto err;\n\n") >> CFILE; - - for (i = 1; i <= nlocks; i++) { - if (lock_if_zero[i]) { - indent = "\t"; - } else { - indent = "\t\t"; - printf("\tif (argp->%s != PGNO_INVALID) {\n", \ - lock_pgnos[i]) >> CFILE; - } - printf("%st->array[t->npages].flags = 0;\n", indent) >> CFILE; - printf("%st->array[t->npages].fid = argp->fileid;\n", indent) \ - >> CFILE; - printf("%st->array[t->npages].lsn = *lsnp;\n", indent) >> CFILE; - printf("%st->array[t->npages].pgdesc.pgno = argp->%s;\n", \ - indent, lock_pgnos[i]) >> CFILE; - printf("%st->array[t->npages].pgdesc.type = DB_PAGE_LOCK;\n", \ - indent) >> CFILE; - printf("%smemcpy(t->array[t->npages].pgdesc.fileid, ", indent) \ - >> CFILE; - printf("dbp->fileid,\n%s DB_FILE_ID_LEN);\n", \ - indent, indent) >> CFILE; - printf("%st->npages++;\n", indent) >> CFILE; - if (!lock_if_zero[i]) { - printf("\t}\n") >> CFILE; - } - } - - printf("\nerr:\tif (argp != NULL)\n") >> CFILE; - write_free("\t", "argp", CFILE); - - printf("\treturn (ret);\n") >> CFILE; - - printf("}\n#endif /* HAVE_REPLICATION */\n\n") >> CFILE; -} - # proto_format -- # Pretty-print a function prototype. -function proto_format(p) +function proto_format(p, fp) { - printf("/*\n") >> CFILE; + printf("/*\n") >> fp; s = ""; for (i = 1; i in p; ++i) @@ -905,24 +911,24 @@ function proto_format(p) t = " * PUBLIC: " if (length(s) + length(t) < 80) - printf("%s%s", t, s) >> CFILE; + printf("%s%s", t, s) >> fp; else { split(s, p, "__P"); len = length(t) + length(p[1]); - printf("%s%s", t, p[1]) >> CFILE + printf("%s%s", t, p[1]) >> fp n = split(p[2], comma, ","); comma[1] = "__P" comma[1]; for (i = 1; i <= n; i++) { if (len + length(comma[i]) > 70) { - printf("\n * PUBLIC: ") >> CFILE; + printf("\n * PUBLIC: ") >> fp; len = 0; } - printf("%s%s", comma[i], i == n ? "" : ",") >> CFILE; + printf("%s%s", comma[i], i == n ? "" : ",") >> fp; len += length(comma[i]) + 2; } } - printf("\n */\n") >> CFILE; + printf("\n */\n") >> fp; delete p; } @@ -941,8 +947,8 @@ function write_malloc(tab, ptr, size, file) function write_free(tab, ptr, file) { if (dbprivate) { - print(tab "__os_free(dbenv, " ptr ");\n") >> file + print(tab "__os_free(dbenv, " ptr ");") >> file } else { - print(tab "free(" ptr ");\n") >> file + print(tab "free(" ptr ");") >> file } } diff --git a/db/dist/gen_rpc.awk b/db/dist/gen_rpc.awk index 9e57a98f3..7eee77b3d 100644 --- a/db/dist/gen_rpc.awk +++ b/db/dist/gen_rpc.awk @@ -1,5 +1,5 @@ # -# $Id: gen_rpc.awk,v 11.54 2003/09/04 23:59:03 bostic Exp $ +# $Id: gen_rpc.awk,v 11.58 2004/08/19 20:28:37 mjc Exp $ # Awk script for generating client/server RPC code. # # This awk script generates most of the RPC routines for DB client/server @@ -14,7 +14,6 @@ # xidsize -- size of GIDs # client_file -- the C source file being created for client code # ctmpl_file -- the C template file being created for client code -# sed_file -- the sed file created to alter server proc code # server_file -- the C source file being created for server code # stmpl_file -- the C template file being created for server code # xdr_file -- the XDR message file created @@ -23,15 +22,13 @@ BEGIN { if (major == "" || minor == "" || xidsize == "" || client_file == "" || ctmpl_file == "" || - sed_file == "" || server_file == "" || - stmpl_file == "" || xdr_file == "") { + server_file == "" || stmpl_file == "" || xdr_file == "") { print "Usage: gen_rpc.awk requires these variables be set:" print "\tmajor\t-- Major version number" print "\tminor\t-- Minor version number" print "\txidsize\t-- GID size" print "\tclient_file\t-- the client C source file being created" print "\tctmpl_file\t-- the client template file being created" - print "\tsed_file\t-- the sed command file being created" print "\tserver_file\t-- the server C source file being created" print "\tstmpl_file\t-- the server template file being created" print "\txdr_file\t-- the XDR message file being created" @@ -51,32 +48,24 @@ BEGIN { printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \ > SFILE - # Server procedure template and a sed file to massage an existing - # template source file to change args. - # SEDFILE should be same name as PFILE but .c - # + # Server procedure template. PFILE = stmpl_file - SEDFILE = sed_file - printf("") > SEDFILE - printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \ - > PFILE - XFILE = xdr_file printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \ > XFILE nendlist = 1; } END { - printf("#endif /* HAVE_RPC */\n") >> CFILE - printf("#endif /* HAVE_RPC */\n") >> TFILE - printf("program DB_RPC_SERVERPROG {\n") >> XFILE - printf("\tversion DB_RPC_SERVERVERS {\n") >> XFILE + if (error == 0) { + printf("program DB_RPC_SERVERPROG {\n") >> XFILE + printf("\tversion DB_RPC_SERVERVERS {\n") >> XFILE - for (i = 1; i < nendlist; ++i) - printf("\t\t%s;\n", endlist[i]) >> XFILE + for (i = 1; i < nendlist; ++i) + printf("\t\t%s;\n", endlist[i]) >> XFILE - printf("\t} = %d%03d;\n", major, minor) >> XFILE - printf("} = 351457;\n") >> XFILE + printf("\t} = %d%03d;\n", major, minor) >> XFILE + printf("} = 351457;\n") >> XFILE + } } /^[ ]*BEGIN/ { @@ -205,33 +194,29 @@ END { if (first == 0) { printf("#include \"db_config.h\"\n") >> CFILE printf("\n") >> CFILE - printf("#ifdef HAVE_RPC\n") >> CFILE printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE - printf("#include \n\n") >> CFILE + printf("#include \n") >> CFILE + printf("\n") >> CFILE printf("#include \n") >> CFILE - printf("#include \n") >> CFILE printf("\n") >> CFILE printf("#include \n") >> CFILE printf("#endif\n") >> CFILE printf("\n") >> CFILE + printf("#include \"db_server.h\"\n") >> CFILE + printf("\n") >> CFILE printf("#include \"db_int.h\"\n") >> CFILE printf("#include \"dbinc/txn.h\"\n") >> CFILE - printf("\n") >> CFILE - printf("#include \"dbinc_auto/db_server.h\"\n") >> CFILE printf("#include \"dbinc_auto/rpc_client_ext.h\"\n") >> CFILE printf("\n") >> CFILE printf("#include \"db_config.h\"\n") >> TFILE printf("\n") >> TFILE - printf("#ifdef HAVE_RPC\n") >> TFILE printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE printf("#include \n") >> TFILE - printf("#include \n") >> TFILE printf("\n") >> TFILE printf("#include \n") >> TFILE printf("#endif\n") >> TFILE printf("#include \"db_int.h\"\n") >> TFILE - printf("#include \"dbinc_auto/db_server.h\"\n") >> TFILE printf("#include \"dbinc/txn.h\"\n") >> TFILE printf("\n") >> TFILE @@ -241,13 +226,13 @@ END { printf("#include \n") >> SFILE printf("\n") >> SFILE printf("#include \n") >> SFILE - printf("#include \n") >> SFILE printf("\n") >> SFILE printf("#include \n") >> SFILE printf("#endif\n") >> SFILE printf("\n") >> SFILE + printf("#include \"db_server.h\"\n") >> SFILE + printf("\n") >> SFILE printf("#include \"db_int.h\"\n") >> SFILE - printf("#include \"dbinc_auto/db_server.h\"\n") >> SFILE printf("#include \"dbinc/db_server_int.h\"\n") >> SFILE printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> SFILE printf("\n") >> SFILE @@ -262,10 +247,10 @@ END { printf("#include \n") >> PFILE printf("#endif\n") >> PFILE printf("\n") >> PFILE + printf("#include \"db_server.h\"\n") >> PFILE + printf("\n") >> PFILE printf("#include \"db_int.h\"\n") >> PFILE - printf("#include \"dbinc_auto/db_server.h\"\n") >> PFILE printf("#include \"dbinc/db_server_int.h\"\n") >> PFILE - printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> PFILE printf("\n") >> PFILE first = 1; @@ -310,6 +295,7 @@ END { # # Spit out PUBLIC prototypes. # + delete p; pi = 1; p[pi++] = sprintf("int __dbcl_%s __P((", name); p[pi++] = ""; @@ -318,9 +304,8 @@ END { p[pi++] = ", "; } p[pi - 1] = ""; - p[pi++] = "));"; - p[pi] = ""; - proto_format(p, 0, CFILE); + p[pi] = "));"; + proto_format(p, CFILE); # # Spit out function name/args. @@ -476,13 +461,9 @@ END { # # First spit out PUBLIC prototypes for server functions. # - p[1] = sprintf("__%s_reply *__db_%s_%d%03d __P((__%s_msg *, struct svc_req *));", - name, name, major, minor, name); - p[2] = ""; - proto_format(p, 0, SFILE); - printf("__%s_reply *\n", name) >> SFILE - printf("__db_%s_%d%03d(msg, req)\n", name, major, minor) >> SFILE + printf("__db_%s_%d%03d__SVCSUFFIX__(msg, req)\n", \ + name, major, minor) >> SFILE printf("\t__%s_msg *msg;\n", name) >> SFILE; printf("\tstruct svc_req *req;\n", name) >> SFILE; printf("{\n") >> SFILE @@ -529,7 +510,7 @@ END { sep, args[i], args[i]) >> SFILE } if (rpc_type[i] == "GID") { - printf("%smsg->%s", sep, args[i]) >> SFILE + printf("%s(u_int8_t *)msg->%s", sep, args[i]) >> SFILE } if (rpc_type[i] == "INT") { printf("%smsg->%s", sep, args[i]) >> SFILE @@ -568,16 +549,9 @@ END { # ===================================================== # Generate Procedure Template Server code # - # Produce SED file commands if needed at the same time - # # Spit out comment, prototype, function name and arg list. - # - printf("/^\\/\\* BEGIN __%s_proc/,/^\\/\\* END __%s_proc/c\\\n", \ - name, name) >> SEDFILE - printf("/* BEGIN __%s_proc */\n", name) >> PFILE - printf("/* BEGIN __%s_proc */\\\n", name) >> SEDFILE - + delete p; pi = 1; p[pi++] = sprintf("void __%s_proc __P((", name); p[pi++] = ""; @@ -646,12 +620,9 @@ END { p[pi++] = "));"; } p[pi++] = ""; - proto_format(p, 1, SEDFILE); printf("void\n") >> PFILE - printf("void\\\n") >> SEDFILE printf("__%s_proc(", name) >> PFILE - printf("__%s_proc(", name) >> SEDFILE sep = ""; argcount = 0; for (i = 0; i < nvars; ++i) { @@ -664,27 +635,21 @@ END { continue; if (rpc_type[i] == "ID") { printf("%s%scl_id", sep, args[i]) >> PFILE - printf("%s%scl_id", sep, args[i]) >> SEDFILE } if (rpc_type[i] == "STRING") { printf("%s%s", sep, args[i]) >> PFILE - printf("%s%s", sep, args[i]) >> SEDFILE } if (rpc_type[i] == "GID") { printf("%s%s", sep, args[i]) >> PFILE - printf("%s%s", sep, args[i]) >> SEDFILE } if (rpc_type[i] == "INT") { printf("%s%s", sep, args[i]) >> PFILE - printf("%s%s", sep, args[i]) >> SEDFILE } if (rpc_type[i] == "INTRET") { printf("%s%s", sep, args[i]) >> PFILE - printf("%s%s", sep, args[i]) >> SEDFILE } if (rpc_type[i] == "LIST") { printf("%s%s", sep, args[i]) >> PFILE - printf("%s%s", sep, args[i]) >> SEDFILE argcount++; split_lines(); if (argcount == 0) { @@ -693,11 +658,9 @@ END { sep = ", "; } printf("%s%slen", sep, args[i]) >> PFILE - printf("%s%slen", sep, args[i]) >> SEDFILE } if (rpc_type[i] == "DBT") { printf("%s%sdlen", sep, args[i]) >> PFILE - printf("%s%sdlen", sep, args[i]) >> SEDFILE sep = ", "; argcount++; split_lines(); @@ -707,7 +670,6 @@ END { sep = ", "; } printf("%s%sdoff", sep, args[i]) >> PFILE - printf("%s%sdoff", sep, args[i]) >> SEDFILE argcount++; split_lines(); if (argcount == 0) { @@ -716,7 +678,6 @@ END { sep = ", "; } printf("%s%sulen", sep, args[i]) >> PFILE - printf("%s%sulen", sep, args[i]) >> SEDFILE argcount++; split_lines(); if (argcount == 0) { @@ -725,7 +686,6 @@ END { sep = ", "; } printf("%s%sflags", sep, args[i]) >> PFILE - printf("%s%sflags", sep, args[i]) >> SEDFILE argcount++; split_lines(); if (argcount == 0) { @@ -734,7 +694,6 @@ END { sep = ", "; } printf("%s%sdata", sep, args[i]) >> PFILE - printf("%s%sdata", sep, args[i]) >> SEDFILE argcount++; split_lines(); if (argcount == 0) { @@ -743,18 +702,14 @@ END { sep = ", "; } printf("%s%ssize", sep, args[i]) >> PFILE - printf("%s%ssize", sep, args[i]) >> SEDFILE } sep = ", "; } printf("%sreplyp",sep) >> PFILE - printf("%sreplyp",sep) >> SEDFILE if (xdr_free) { printf("%sfreep)\n",sep) >> PFILE - printf("%sfreep)\\\n",sep) >> SEDFILE } else { printf(")\n") >> PFILE - printf(")\\\n") >> SEDFILE } # # Spit out arg types/names; @@ -762,65 +717,44 @@ END { for (i = 0; i < nvars; ++i) { if (rpc_type[i] == "ID") { printf("\tlong %scl_id;\n", args[i]) >> PFILE - printf("\\\tlong %scl_id;\\\n", args[i]) >> SEDFILE } if (rpc_type[i] == "STRING") { printf("\tchar *%s;\n", args[i]) >> PFILE - printf("\\\tchar *%s;\\\n", args[i]) >> SEDFILE } if (rpc_type[i] == "GID") { printf("\tu_int8_t *%s;\n", args[i]) >> PFILE - printf("\\\tu_int8_t *%s;\\\n", args[i]) >> SEDFILE } if (rpc_type[i] == "INT") { printf("\tu_int32_t %s;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %s;\\\n", args[i]) >> SEDFILE } if (rpc_type[i] == "LIST" && list_type[i] == "GID") { printf("\tu_int8_t * %s;\n", args[i]) >> PFILE - printf("\\\tu_int8_t * %s;\\\n", args[i]) >> SEDFILE } if (rpc_type[i] == "LIST" && list_type[i] == "INT") { printf("\tu_int32_t * %s;\n", args[i]) >> PFILE - printf("\\\tu_int32_t * %s;\\\n", \ - args[i]) >> SEDFILE printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE } if (rpc_type[i] == "LIST" && list_type[i] == "ID") { printf("\tu_int32_t * %s;\n", args[i]) >> PFILE - printf("\\\tu_int32_t * %s;\\\n", args[i]) \ - >> SEDFILE } if (rpc_type[i] == "LIST") { printf("\tu_int32_t %slen;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %slen;\\\n", args[i]) \ - >> SEDFILE } if (rpc_type[i] == "DBT") { printf("\tu_int32_t %sdlen;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %sdlen;\\\n", args[i]) >> SEDFILE printf("\tu_int32_t %sdoff;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %sdoff;\\\n", args[i]) >> SEDFILE printf("\tu_int32_t %sulen;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %sulen;\\\n", args[i]) >> SEDFILE printf("\tu_int32_t %sflags;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %sflags;\\\n", args[i]) >> SEDFILE printf("\tvoid *%sdata;\n", args[i]) >> PFILE - printf("\\\tvoid *%sdata;\\\n", args[i]) >> SEDFILE printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE - printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE } } printf("\t__%s_reply *replyp;\n",name) >> PFILE - printf("\\\t__%s_reply *replyp;\\\n",name) >> SEDFILE if (xdr_free) { printf("\tint * freep;\n") >> PFILE - printf("\\\tint * freep;\\\n") >> SEDFILE } printf("/* END __%s_proc */\n", name) >> PFILE - printf("/* END __%s_proc */\n", name) >> SEDFILE # # Function body @@ -853,6 +787,7 @@ END { # # Spit out PUBLIC prototypes. # + delete p; pi = 1; p[pi++] = sprintf("int __dbcl_%s __P((", name); p[pi++] = ""; @@ -861,9 +796,8 @@ END { p[pi++] = ", "; } p[pi - 1] = ""; - p[pi++] = "));"; - p[pi] = ""; - proto_format(p, 0, CFILE); + p[pi] = "));"; + proto_format(p, CFILE); # # Spit out function name/args. @@ -1106,6 +1040,7 @@ END { # # If we are doing a list, write prototypes # + delete p; pi = 1; p[pi++] = sprintf("int __dbcl_%s_ret __P((", name); p[pi++] = ""; @@ -1113,9 +1048,8 @@ END { p[pi++] = pr_type[i]; p[pi++] = ", "; } - p[pi++] = sprintf("__%s_reply *));", name); - p[pi++] = ""; - proto_format(p, 0, TFILE); + p[pi] = sprintf("__%s_reply *));", name); + proto_format(p, TFILE); printf("int\n") >> TFILE printf("__dbcl_%s_ret(", name) >> TFILE @@ -1206,32 +1140,22 @@ function split_lines() { sub("[ ]$", "", sep) printf("%s\n\t\t", sep) >> PFILE - printf("%s\\\n\\\t\\\t", sep) >> SEDFILE } } # proto_format -- # Pretty-print a function prototype. -function proto_format(p, sedfile, OUTPUT) +function proto_format(p, OUTPUT) { - if (sedfile) - printf("/*\\\n") >> OUTPUT; - else - printf("/*\n") >> OUTPUT; + printf("/*\n") >> OUTPUT; s = ""; for (i = 1; i in p; ++i) s = s p[i]; - if (sedfile) - t = "\\ * PUBLIC: " - else - t = " * PUBLIC: " + t = " * PUBLIC: " if (length(s) + length(t) < 80) - if (sedfile) - printf("%s%s", t, s) >> OUTPUT; - else - printf("%s%s", t, s) >> OUTPUT; + printf("%s%s", t, s) >> OUTPUT; else { split(s, p, "__P"); len = length(t) + length(p[1]); @@ -1241,20 +1165,12 @@ function proto_format(p, sedfile, OUTPUT) comma[1] = "__P" comma[1]; for (i = 1; i <= n; i++) { if (len + length(comma[i]) > 75) { - if (sedfile) - printf(\ - "\\\n\\ * PUBLIC: ") >> OUTPUT; - else - printf("\n * PUBLIC: ") >> OUTPUT; + printf("\n * PUBLIC: ") >> OUTPUT; len = 0; } printf("%s%s", comma[i], i == n ? "" : ",") >> OUTPUT; len += length(comma[i]); } } - if (sedfile) - printf("\\\n\\ */\\\n") >> OUTPUT; - else - printf("\n */\n") >> OUTPUT; - delete p; + printf("\n */\n") >> OUTPUT; } diff --git a/db/dist/ltmain.sh b/db/dist/ltmain.sh index 3526e4b9b..c96a96ddd 100644 --- a/db/dist/ltmain.sh +++ b/db/dist/ltmain.sh @@ -1,7 +1,7 @@ # ltmain.sh - Provide generalized library-building support services. # NOTE: Changing this file will not affect anything until you rerun configure. # -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003 +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 # Free Software Foundation, Inc. # Originally by Gordon Matzigkeit , 1996 # @@ -24,6 +24,34 @@ # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. +basename="s,^.*/,,g" + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + +# The name of this program: +progname=`echo "$progpath" | $SED $basename` +modename="$progname" + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 + +PROGRAM=ltmain.sh +PACKAGE=libtool +VERSION=1.5.8 +TIMESTAMP=" (1.1220.2.117 2004/08/04 14:12:05)" + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes. +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + # Check that we have a working $echo. if test "X$1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. @@ -36,7 +64,7 @@ elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then : else # Restart under the correct shell, and then maybe $echo will work. - exec $SHELL "$0" --no-reexec ${1+"$@"} + exec $SHELL "$progpath" --no-reexec ${1+"$@"} fi if test "X$1" = X--fallback-echo; then @@ -45,19 +73,9 @@ if test "X$1" = X--fallback-echo; then cat <&2 $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Global variables. @@ -118,10 +137,13 @@ o2lo="s/\\.${objext}\$/.lo/" # Shell function definitions: # This seems to be the best place for them +# func_win32_libid arg +# return the library type of file 'arg' +# # Need a lot of goo to handle *both* DLLs and import libs # Has to be a shell function in order to 'eat' the argument # that is supplied when $file_magic_command is called. -win32_libid () { +func_win32_libid () { win32_libid_type="unknown" win32_fileres=`file -L $1 2>/dev/null` case $win32_fileres in @@ -130,7 +152,7 @@ win32_libid () { ;; *ar\ archive*) # could be an import, or static if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \ - grep -E 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then + $EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then win32_nmres=`eval $NM -f posix -A $1 | \ sed -n -e '1,100{/ I /{x;/import/!{s/^/import/;h;p;};x;};}'` if test "X$win32_nmres" = "Ximport" ; then @@ -140,7 +162,7 @@ win32_libid () { fi fi ;; - *DLL*) + *DLL*) win32_libid_type="x86 DLL" ;; *executable*) # but shell scripts are "executable" too... @@ -154,9 +176,192 @@ win32_libid () { $echo $win32_libid_type } + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () { + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + case "$@ " in + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + $echo "$modename: unable to infer tagged configuration" + $echo "$modename: specify a tag with \`--tag'" 1>&2 + exit $EXIT_FAILURE +# else +# $echo "$modename: using $tagname tagged configuration" + fi + ;; + esac + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () { + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + my_status="" + + $show "${rm}r $my_gentop" + $run ${rm}r "$my_gentop" + $show "$mkdir $my_gentop" + $run $mkdir "$my_gentop" + my_status=$? + if test "$my_status" -ne 0 && test ! -d "$my_gentop"; then + exit $my_status + fi + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + my_xlib=`$echo "X$my_xlib" | $Xsed -e 's%^.*/%%'` + my_xdir="$my_gentop/$my_xlib" + + $show "${rm}r $my_xdir" + $run ${rm}r "$my_xdir" + $show "$mkdir $my_xdir" + $run $mkdir "$my_xdir" + status=$? + if test "$status" -ne 0 && test ! -d "$my_xdir"; then + exit $status + fi + case $host in + *-darwin*) + $show "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + if test -z "$run"; then + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`basename $darwin_archive` + darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null` + if test -n "$darwin_arches"; then + darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + $show "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + # Remove the table of contents from the thin files. + $AR -d "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" __.SYMDEF 2>/dev/null || true + $AR -d "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" __.SYMDEF\ SORTED 2>/dev/null || true + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + $AR -xo "${darwin_base_archive}" + rm "${darwin_base_archive}" + cd "$darwin_curdir" + done # $darwin_arches + ## Okay now we have a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f | xargs basename | sort -u | $NL2SP` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` + lipo -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + rm -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + (cd $my_xdir && $AR x $my_xabs) || exit $? + fi # $darwin_arches + fi # $run + ;; + *) + # We will extract separately just the conflicting names and we will + # no longer touch any unique names. It is faster to leave these + # extract automatically by $AR in one run. + $show "(cd $my_xdir && $AR x $my_xabs)" + $run eval "(cd \$my_xdir && $AR x \$my_xabs)" || exit $? + if ($AR t "$my_xabs" | sort | sort -uc >/dev/null 2>&1); then + : + else + $echo "$modename: warning: object name conflicts; renaming object files" 1>&2 + $echo "$modename: warning: to ensure that they will not overwrite" 1>&2 + $AR t "$my_xabs" | sort | uniq -cd | while read -r count name + do + i=1 + while test "$i" -le "$count" + do + # Put our $i before any first dot (extension) + # Never overwrite any file + name_to="$name" + while test "X$name_to" = "X$name" || test -f "$my_xdir/$name_to" + do + name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"` + done + $show "(cd $my_xdir && $AR xN $i $my_xabs '$name' && $mv '$name' '$name_to')" + $run eval "(cd \$my_xdir && $AR xN $i \$my_xabs '$name' && $mv '$name' '$name_to')" || exit $? + i=`expr $i + 1` + done + done + fi + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` + done + + func_extract_archives_result="$my_oldobjs" +} # End of Shell function definitions ##################################### +# Darwin sucks +eval std_shrext=\"$shrext_cmds\" + # Parse our command line options once, thoroughly. while test "$#" -gt 0 do @@ -176,12 +381,13 @@ do ;; tag) tagname="$arg" + preserve_args="${preserve_args}=$arg" # Check whether tagname contains only valid characters case $tagname in *[!-_A-Za-z0-9,/]*) $echo "$progname: invalid tag name: $tagname" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac @@ -191,10 +397,10 @@ do # not specially marked. ;; *) - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$0" > /dev/null; then + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then taglist="$taglist $tagname" # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $0`" + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`" else $echo "$progname: ignoring unknown tag $tagname" 1>&2 fi @@ -223,21 +429,22 @@ do $echo "Copyright (C) 2003 Free Software Foundation, Inc." $echo "This is free software; see the source for copying conditions. There is NO" $echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - exit 0 + exit $EXIT_SUCCESS ;; --config) - ${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $0 + ${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath # Now print the configurations for the tags. for tagname in $taglist; do - ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$0" + ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath" done - exit 0 + exit $EXIT_SUCCESS ;; --debug) $echo "$progname: enabling shell trace mode" set -x + preserve_args="$preserve_args $arg" ;; --dry-run | -n) @@ -256,7 +463,7 @@ do else $echo "disable static libraries" fi - exit 0 + exit $EXIT_SUCCESS ;; --finish) mode="finish" ;; @@ -268,6 +475,7 @@ do --quiet | --silent) show=: + preserve_args="$preserve_args $arg" ;; --tag) prevopt="--tag" prev=tag ;; @@ -275,6 +483,7 @@ do set tag "$optarg" ${1+"$@"} shift prev=tag + preserve_args="$preserve_args --tag" ;; -dlopen) @@ -285,7 +494,7 @@ do -*) $echo "$modename: unrecognized option \`$arg'" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; *) @@ -298,7 +507,7 @@ done if test -n "$prevopt"; then $echo "$modename: option \`$prevopt' requires an argument" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # If this variable is set in any of the actions, the command in it @@ -354,7 +563,7 @@ if test -z "$show_help"; then if test -n "$execute_dlfiles" && test "$mode" != execute; then $echo "$modename: unrecognized option \`-dlopen'" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. @@ -369,9 +578,11 @@ if test -z "$show_help"; then # Get the compilation command and the source file. base_compile= srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes suppress_output= arg_mode=normal libobj= + later= for arg do @@ -394,24 +605,19 @@ if test -z "$show_help"; then -o) if test -n "$libobj" ; then $echo "$modename: you cannot specify \`-o' more than once" 1>&2 - exit 1 + exit $EXIT_FAILURE fi arg_mode=target continue ;; - -static) - build_old_libs=yes + -static | -prefer-pic | -prefer-non-pic) + later="$later $arg" continue ;; - -prefer-pic) - pic_mode=yes - continue - ;; - - -prefer-non-pic) - pic_mode=no + -no-suppress) + suppress_opt=no continue ;; @@ -424,7 +630,7 @@ if test -z "$show_help"; then args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"` lastarg= save_ifs="$IFS"; IFS=',' - for arg in $args; do + for arg in $args; do IFS="$save_ifs" # Double-quote args containing other shell metacharacters. @@ -474,11 +680,11 @@ if test -z "$show_help"; then case $arg_mode in arg) $echo "$modename: you must specify an argument for -Xcompile" - exit 1 + exit $EXIT_FAILURE ;; target) $echo "$modename: you must specify a target with \`-o'" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; *) # Get the name of the library object. @@ -511,50 +717,30 @@ if test -z "$show_help"; then *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;; *) $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac - # Infer tagged configuration to use if any are available and - # if one wasn't chosen via the "--tag" command line option. - # Only attempt this if the compiler in the base compile - # command doesn't match the default compiler. - if test -n "$available_tags" && test -z "$tagname"; then - case $base_compile in - # Blanks in the command may have been stripped by the calling shell, - # but not from the CC environment variable when configure was run. - " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "*) ;; - # Blanks at the start of $base_compile will cause this to fail - # if we don't check for them as well. - *) - for z in $available_tags; do - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$0" > /dev/null; then - # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $0`" - case "$base_compile " in - "$CC "* | " $CC "* | "`$echo $CC` "* | " `$echo $CC` "*) - # The compiler in the base compile command matches - # the one in the tagged configuration. - # Assume this is the tagged configuration we want. - tagname=$z - break - ;; - esac - fi - done - # If $tagname still isn't set, then no tagged configuration - # was found and let the user know that the "--tag" command - # line option must be used. - if test -z "$tagname"; then - $echo "$modename: unable to infer tagged configuration" - $echo "$modename: specify a tag with \`--tag'" 1>&2 - exit 1 -# else -# $echo "$modename: using $tagname tagged configuration" - fi + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -static) + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue ;; esac - fi + done objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'` @@ -568,7 +754,7 @@ if test -z "$show_help"; then if test -z "$base_compile"; then $echo "$modename: you must specify a compilation command" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Delete any leftover library objects. @@ -579,7 +765,7 @@ if test -z "$show_help"; then fi $run $rm $removelist - trap "$run $rm $removelist; exit 1" 1 2 15 + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in @@ -598,7 +784,7 @@ if test -z "$show_help"; then output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} lockfile="$output_obj.lock" removelist="$removelist $output_obj $lockfile" - trap "$run $rm $removelist; exit 1" 1 2 15 + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 else output_obj= need_locks=no @@ -608,7 +794,7 @@ if test -z "$show_help"; then # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test "$need_locks" = yes; then - until $run ln "$0" "$lockfile" 2>/dev/null; do + until $run ln "$progpath" "$lockfile" 2>/dev/null; do $show "Waiting for $lockfile to be removed" sleep 2 done @@ -626,7 +812,7 @@ avoid parallel builds (make -j) in this platform, or get a better compiler." $run $rm $removelist - exit 1 + exit $EXIT_FAILURE fi $echo $srcfile > "$lockfile" fi @@ -681,7 +867,7 @@ EOF if $run eval "$command"; then : else test -n "$output_obj" && $run $rm $removelist - exit 1 + exit $EXIT_FAILURE fi if test "$need_locks" = warn && @@ -701,7 +887,7 @@ avoid parallel builds (make -j) in this platform, or get a better compiler." $run $rm $removelist - exit 1 + exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one @@ -722,7 +908,9 @@ pic_object='$objdir/$objname' EOF # Allow error messages only from the first compilation. - suppress_output=' >/dev/null 2>&1' + if test "$suppress_opt" = yes; then + suppress_output=' >/dev/null 2>&1' + fi else # No PIC object so indicate it doesn't exist in the libtool # object file. @@ -751,7 +939,7 @@ EOF if $run eval "$command"; then : else $run $rm $removelist - exit 1 + exit $EXIT_FAILURE fi if test "$need_locks" = warn && @@ -771,7 +959,7 @@ avoid parallel builds (make -j) in this platform, or get a better compiler." $run $rm $removelist - exit 1 + exit $EXIT_FAILURE fi # Just move the object if needed @@ -809,7 +997,7 @@ EOF $run $rm "$lockfile" fi - exit 0 + exit $EXIT_SUCCESS ;; # libtool link mode @@ -835,7 +1023,7 @@ EOF ;; esac libtool_args="$nonopt" - base_compile="$nonopt" + base_compile="$nonopt $@" compile_command="$nonopt" finalize_command="$nonopt" @@ -867,6 +1055,7 @@ EOF no_install=no objs= non_pic_objects= + precious_files_regex= prefer_static_libs=no preload=no prev= @@ -880,6 +1069,8 @@ EOF vinfo= vinfo_number=no + func_infer_tag $base_compile + # We need to know -static, to get the right output filenames. for arg do @@ -911,7 +1102,6 @@ EOF # Go through the arguments, transforming them on the way. while test "$#" -gt 0; do arg="$1" - base_compile="$base_compile $arg" shift case $arg in *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") @@ -975,7 +1165,7 @@ EOF export_symbols="$arg" if test ! -f "$arg"; then $echo "$modename: symbol file \`$arg' does not exist" - exit 1 + exit $EXIT_FAILURE fi prev= continue @@ -990,6 +1180,11 @@ EOF prev= continue ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; release) release="-$arg" prev= @@ -1022,7 +1217,7 @@ EOF test "$pic_object" = none && \ test "$non_pic_object" = none; then $echo "$modename: cannot find name of object for \`$arg'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Extract subdirectory from the argument. @@ -1075,7 +1270,7 @@ EOF # Only an error if not doing a dry-run. if test -z "$run"; then $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 - exit 1 + exit $EXIT_FAILURE else # Dry-run case. @@ -1096,7 +1291,7 @@ EOF done else $echo "$modename: link input file \`$save_arg' does not exist" - exit 1 + exit $EXIT_FAILURE fi arg=$save_arg prev= @@ -1108,7 +1303,7 @@ EOF [\\/]* | [A-Za-z]:[\\/]*) ;; *) $echo "$modename: only absolute run-paths are allowed" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac if test "$prev" = rpath; then @@ -1148,6 +1343,11 @@ EOF finalize_command="$finalize_command $qarg" continue ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; *) eval "$prev=\"\$arg\"" prev= @@ -1196,7 +1396,7 @@ EOF -export-symbols | -export-symbols-regex) if test -n "$export_symbols" || test -n "$export_symbols_regex"; then $echo "$modename: more than one -exported-symbols argument is not allowed" - exit 1 + exit $EXIT_FAILURE fi if test "X$arg" = "X-export-symbols"; then prev=expsyms @@ -1232,7 +1432,7 @@ EOF absdir=`cd "$dir" && pwd` if test -z "$absdir"; then $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi dir="$absdir" ;; @@ -1287,6 +1487,11 @@ EOF continue ;; + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + deplibs="$deplibs $arg" + continue + ;; + -module) module=yes continue @@ -1315,6 +1520,28 @@ EOF continue ;; + ################################################################ + #### Local edit for Sleepycat SR #8705 + #### This case was given to us by Albert Chin, and we expect + #### this to be included in future versions of libtool, + #### though we must verify that before upgrading. + ################################################################ + # Flags for IRIX and Solaris compiler + -64|-mips[0-9]|-xarch=*) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + compiler_flags="$compiler_flags $arg" + continue + ;; + -shrext) prev=shrext continue @@ -1351,6 +1578,11 @@ EOF -o) prev=output ;; + -precious-files-regex) + prev=precious_regex + continue + ;; + -release) prev=release continue @@ -1373,7 +1605,7 @@ EOF [\\/]* | [A-Za-z]:[\\/]*) ;; *) $echo "$modename: only absolute run-paths are allowed" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac case "$xrpath " in @@ -1496,7 +1728,7 @@ EOF test "$pic_object" = none && \ test "$non_pic_object" = none; then $echo "$modename: cannot find name of object for \`$arg'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Extract subdirectory from the argument. @@ -1549,7 +1781,7 @@ EOF # Only an error if not doing a dry-run. if test -z "$run"; then $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 - exit 1 + exit $EXIT_FAILURE else # Dry-run case. @@ -1616,48 +1848,7 @@ EOF if test -n "$prev"; then $echo "$modename: the \`$prevarg' option requires an argument" 1>&2 $echo "$help" 1>&2 - exit 1 - fi - - # Infer tagged configuration to use if any are available and - # if one wasn't chosen via the "--tag" command line option. - # Only attempt this if the compiler in the base link - # command doesn't match the default compiler. - if test -n "$available_tags" && test -z "$tagname"; then - case $base_compile in - # Blanks in the command may have been stripped by the calling shell, - # but not from the CC environment variable when configure was run. - "$CC "* | " $CC "* | "`$echo $CC` "* | " `$echo $CC` "*) ;; - # Blanks at the start of $base_compile will cause this to fail - # if we don't check for them as well. - *) - for z in $available_tags; do - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$0" > /dev/null; then - # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $0`" - case $base_compile in - "$CC "* | " $CC "* | "`$echo $CC` "* | " `$echo $CC` "*) - # The compiler in $compile_command matches - # the one in the tagged configuration. - # Assume this is the tagged configuration we want. - tagname=$z - break - ;; - esac - fi - done - # If $tagname still isn't set, then no tagged configuration - # was found and let the user know that the "--tag" command - # line option must be used. - if test -z "$tagname"; then - $echo "$modename: unable to infer tagged configuration" - $echo "$modename: specify a tag with \`--tag'" 1>&2 - exit 1 -# else -# $echo "$modename: using $tagname tagged configuration" - fi - ;; - esac + exit $EXIT_FAILURE fi if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then @@ -1701,7 +1892,7 @@ EOF "") $echo "$modename: you must specify an output file" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; *.$libext) linkmode=oldlib ;; *.lo | *.$objext) linkmode=obj ;; @@ -1711,7 +1902,7 @@ EOF case $host in *cygwin* | *mingw* | *pw32*) - # don't eliminate duplcations in $postdeps and $predeps + # don't eliminate duplications in $postdeps and $predeps duplicate_compiler_generated_deps=yes ;; *) @@ -1764,7 +1955,7 @@ EOF *.la) ;; *) $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac done @@ -1802,6 +1993,15 @@ EOF lib= found=no case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + fi + continue + ;; -l*) if test "$linkmode" != lib && test "$linkmode" != prog; then $echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2 @@ -1813,12 +2013,18 @@ EOF fi name=`$echo "X$deplib" | $Xsed -e 's/^-l//'` for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do - # Search the libtool library - lib="$searchdir/lib${name}.la" - if test -f "$lib"; then - found=yes - break - fi + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done done if test "$found" != yes; then # deplib doesn't seem to be a libtool library @@ -1883,11 +2089,11 @@ EOF fi if test "$pass" = scan; then deplibs="$deplib $deplibs" - newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi + newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` ;; *) $echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2 @@ -1915,7 +2121,22 @@ EOF fi case $linkmode in lib) - if test "$deplibs_check_method" != pass_all; then + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method + match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` + if eval $echo \"$deplib\" 2>/dev/null \ + | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then $echo $echo "*** Warning: Trying to link with static lib archive $deplib." $echo "*** I have the capability to make that library automatically link in when" @@ -1966,14 +2187,14 @@ EOF if test "$found" = yes || test -f "$lib"; then : else $echo "$modename: cannot find the library \`$lib'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Check to see that this really is a libtool archive. if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : else $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit 1 + exit $EXIT_FAILURE fi ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` @@ -2009,7 +2230,7 @@ EOF if test -z "$libdir"; then if test -z "$old_library"; then $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # It is a libtool convenience library, so add in its objects. convenience="$convenience $ladir/$objdir/$old_library" @@ -2026,12 +2247,12 @@ EOF done elif test "$linkmode" != prog && test "$linkmode" != lib; then $echo "$modename: \`$lib' is not a convenience library" 1>&2 - exit 1 + exit $EXIT_FAILURE fi continue fi # $pass = conv - + # Get the name of the library we link against. linklib= for l in $old_library $library_names; do @@ -2039,16 +2260,18 @@ EOF done if test -z "$linklib"; then $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # This library was specified with -dlopen. if test "$pass" = dlopen; then if test -z "$libdir"; then $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi - if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then # If there is no dlname, no dlopen support or we're linking # statically, we need to preload. We also need to preload any # dependent libraries so libltdl's deplib preloader doesn't @@ -2086,10 +2309,17 @@ EOF absdir="$libdir" fi else - dir="$ladir/$objdir" - absdir="$abs_ladir/$objdir" - # Remove this search path later - notinst_path="$notinst_path $abs_ladir" + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + fi fi # $installed = yes name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` @@ -2097,7 +2327,7 @@ EOF if test "$pass" = dlpreopen; then if test -z "$libdir"; then $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Prefer using a static library (so that no silly _DYNAMIC symbols # are required to link). @@ -2124,7 +2354,7 @@ EOF continue fi - + if test "$linkmode" = prog && test "$pass" != link; then newlib_search_path="$newlib_search_path $ladir" deplibs="$lib $deplibs" @@ -2211,17 +2441,18 @@ EOF need_relink=yes fi # This is a shared library - - # Warn about portability, can't link against -module's on some systems (darwin) - if test "$shouldnotlink" = yes && test "$pass" = link ; then + + # Warn about portability, can't link against -module's on + # some systems (darwin) + if test "$shouldnotlink" = yes && test "$pass" = link ; then $echo if test "$linkmode" = prog; then $echo "*** Warning: Linking the executable $output against the loadable module" else $echo "*** Warning: Linking the shared library $output against the loadable module" fi - $echo "*** $linklib is not portable!" - fi + $echo "*** $linklib is not portable!" + fi if test "$linkmode" = lib && test "$hardcode_into_libs" = yes; then # Hardcode the library path. @@ -2279,9 +2510,10 @@ EOF else $show "extracting exported symbol list from \`$soname'" save_ifs="$IFS"; IFS='~' - eval cmds=\"$extract_expsyms_cmds\" + cmds=$extract_expsyms_cmds for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || exit $? done @@ -2292,9 +2524,10 @@ EOF if test -f "$output_objdir/$newlib"; then :; else $show "generating import library for \`$soname'" save_ifs="$IFS"; IFS='~' - eval cmds=\"$old_archive_from_expsyms_cmds\" + cmds=$old_archive_from_expsyms_cmds for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || exit $? done @@ -2317,9 +2550,9 @@ EOF case $host in *-*-sco3.2v5* ) add_dir="-L$dir" ;; *-*-darwin* ) - # if the lib is a module then we can not link against it, someone - # is ignoring the new warnings I added - if /usr/bin/file -L $add 2> /dev/null | grep "bundle" >/dev/null ; then + # if the lib is a module then we can not link against + # it, someone is ignoring the new warnings I added + if /usr/bin/file -L $add 2> /dev/null | $EGREP "bundle" >/dev/null ; then $echo "** Warning, lib $linklib is a module, not a shared library" if test -z "$old_library" ; then $echo @@ -2327,7 +2560,7 @@ EOF $echo "** The link will probably fail, sorry" else add="$dir/$old_library" - fi + fi fi esac elif test "$hardcode_minus_L" = no; then @@ -2352,7 +2585,7 @@ EOF if test -n "$inst_prefix_dir"; then case "$libdir" in [\\/]*) - add_dir="-L$inst_prefix_dir$libdir $add_dir" + add_dir="$add_dir -L$inst_prefix_dir$libdir" ;; esac fi @@ -2369,7 +2602,7 @@ EOF if test "$lib_linked" != yes; then $echo "$modename: configuration error: unsupported hardcode properties" - exit 1 + exit $EXIT_FAILURE fi if test -n "$add_shlibpath"; then @@ -2412,7 +2645,8 @@ EOF esac add="-l$name" elif test "$hardcode_automatic" = yes; then - if test -n "$inst_prefix_dir" && test -f "$inst_prefix_dir$libdir/$linklib" ; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then add="$inst_prefix_dir$libdir/$linklib" else add="$libdir/$linklib" @@ -2424,7 +2658,7 @@ EOF if test -n "$inst_prefix_dir"; then case "$libdir" in [\\/]*) - add_dir="-L$inst_prefix_dir$libdir $add_dir" + add_dir="$add_dir -L$inst_prefix_dir$libdir" ;; esac fi @@ -2492,7 +2726,8 @@ EOF if test "$linkmode" = lib; then if test -n "$dependency_libs" && - { test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes || + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || test "$link_static" = yes; }; then # Extract -R from dependency_libs temp_deplibs= @@ -2549,7 +2784,7 @@ EOF eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` if test -z "$libdir"; then $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 - exit 1 + exit $EXIT_FAILURE fi if test "$absdir" != "$libdir"; then $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2 @@ -2559,7 +2794,8 @@ EOF depdepl= case $host in *-*-darwin*) - # we do not want to link against static libs, but need to link against shared + # we do not want to link against static libs, + # but need to link against shared eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names" ; then for tmp in $deplibrary_names ; do @@ -2567,39 +2803,42 @@ EOF done if test -f "$path/$depdepl" ; then depdepl="$path/$depdepl" - fi - newlib_search_path="$newlib_search_path $path" - path="" + fi + # do not add paths which are already there + case " $newlib_search_path " in + *" $path "*) ;; + *) newlib_search_path="$newlib_search_path $path";; + esac fi + path="" ;; *) - path="-L$path" - ;; - esac - + path="-L$path" + ;; + esac ;; - -l*) + -l*) case $host in *-*-darwin*) - # Again, we only want to link against shared libraries - eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"` - for tmp in $newlib_search_path ; do - if test -f "$tmp/lib$tmp_libs.dylib" ; then - eval depdepl="$tmp/lib$tmp_libs.dylib" - break - fi - done - path="" + # Again, we only want to link against shared libraries + eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"` + for tmp in $newlib_search_path ; do + if test -f "$tmp/lib$tmp_libs.dylib" ; then + eval depdepl="$tmp/lib$tmp_libs.dylib" + break + fi + done + path="" ;; *) continue ;; - esac + esac ;; *) continue ;; esac case " $deplibs " in *" $depdepl "*) ;; - *) deplibs="$deplibs $depdepl" ;; - esac + *) deplibs="$depdepl $deplibs" ;; + esac case " $deplibs " in *" $path "*) ;; *) deplibs="$deplibs $path" ;; @@ -2689,7 +2928,8 @@ EOF eval $var=\"$tmp_libs\" done # for var fi - # Last step: remove runtime libs from dependency_libs (they stay in deplibs) + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) tmp_libs= for i in $dependency_libs ; do case " $predeps $postdeps $compiler_lib_search_path " in @@ -2749,19 +2989,19 @@ EOF case $outputname in lib*) name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` - eval shared_ext=\"$shrext\" + eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" ;; *) if test "$module" = no; then $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi if test "$need_lib_prefix" != no; then # Add the "lib" prefix for modules if required name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` - eval shared_ext=\"$shrext\" + eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" else libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` @@ -2772,7 +3012,7 @@ EOF if test -n "$objs"; then if test "$deplibs_check_method" != pass_all; then $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1 - exit 1 + exit $EXIT_FAILURE else $echo $echo "*** Warning: Linking the shared library $output against the non-libtool" @@ -2820,13 +3060,13 @@ EOF if test -n "$8"; then $echo "$modename: too many parameters to \`-version-info'" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # convert absolute version numbers to libtool ages # this retains compatibility with .la files and attempts # to make the code below a bit more comprehensible - + case $vinfo_number in yes) number_major="$2" @@ -2870,7 +3110,7 @@ EOF *) $echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2 $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac @@ -2879,7 +3119,7 @@ EOF *) $echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2 $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac @@ -2888,14 +3128,14 @@ EOF *) $echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2 $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac if test "$age" -gt "$current"; then $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2 $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Calculate the version variables. @@ -2912,7 +3152,7 @@ EOF versuffix="$major.$age.$revision" # Darwin ld doesn't like 0 for these options... minor_current=`expr $current + 1` - verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + verstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" ;; freebsd-aout) @@ -2984,7 +3224,7 @@ EOF *) $echo "$modename: unknown library version type \`$version_type'" 1>&2 $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac @@ -3038,6 +3278,12 @@ EOF *.$objext) ;; $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi removelist="$removelist $p" ;; *) ;; @@ -3517,7 +3763,7 @@ EOF fi # Get the real and link names of the library. - eval shared_ext=\"$shrext\" + eval shared_ext=\"$shrext_cmds\" eval library_names=\"$library_names_spec\" set dummy $library_names realname="$2" @@ -3547,10 +3793,11 @@ EOF $show "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $run $rm $export_symbols - eval cmds=\"$export_symbols_cmds\" + cmds=$export_symbols_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" if len=`expr "X$cmd" : ".*"` && test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then $show "$cmd" @@ -3580,12 +3827,12 @@ EOF for test_deplib in $deplibs; do case " $convenience " in *" $test_deplib "*) ;; - *) + *) tmp_deplibs="$tmp_deplibs $test_deplib" ;; esac done - deplibs="$tmp_deplibs" + deplibs="$tmp_deplibs" if test -n "$convenience"; then if test -n "$whole_archive_flag_spec"; then @@ -3593,67 +3840,13 @@ EOF eval libobjs=\"\$libobjs $whole_archive_flag_spec\" else gentop="$output_objdir/${outputname}x" - $show "${rm}r $gentop" - $run ${rm}r "$gentop" - $show "$mkdir $gentop" - $run $mkdir "$gentop" - status=$? - if test "$status" -ne 0 && test ! -d "$gentop"; then - exit $status - fi generated="$generated $gentop" - for xlib in $convenience; do - # Extract the objects. - case $xlib in - [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;; - *) xabs=`pwd`"/$xlib" ;; - esac - xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'` - xdir="$gentop/$xlib" - - $show "${rm}r $xdir" - $run ${rm}r "$xdir" - $show "$mkdir $xdir" - $run $mkdir "$xdir" - status=$? - if test "$status" -ne 0 && test ! -d "$xdir"; then - exit $status - fi - # We will extract separately just the conflicting names and we will no - # longer touch any unique names. It is faster to leave these extract - # automatically by $AR in one run. - $show "(cd $xdir && $AR x $xabs)" - $run eval "(cd \$xdir && $AR x \$xabs)" || exit $? - if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then - : - else - $echo "$modename: warning: object name conflicts; renaming object files" 1>&2 - $echo "$modename: warning: to ensure that they will not overwrite" 1>&2 - $AR t "$xabs" | sort | uniq -cd | while read -r count name - do - i=1 - while test "$i" -le "$count" - do - # Put our $i before any first dot (extension) - # Never overwrite any file - name_to="$name" - while test "X$name_to" = "X$name" || test -f "$xdir/$name_to" - do - name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"` - done - $show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')" - $run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $? - i=`expr $i + 1` - done - done - fi - - libobjs="$libobjs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` - done + func_extract_archives $gentop $convenience + libobjs="$libobjs $func_extract_archives_result" fi fi - + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then eval flag=\"$thread_safe_flag_spec\" linker_flags="$linker_flags $flag" @@ -3667,19 +3860,23 @@ EOF # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then - eval cmds=\"$module_expsym_cmds\" + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds else - eval cmds=\"$module_cmds\" + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then - eval cmds=\"$archive_expsym_cmds\" + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds else - eval cmds=\"$archive_cmds\" + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds fi fi - if test "X$skipped_export" != "X:" && len=`expr "X$cmds" : ".*"` && + if test "X$skipped_export" != "X:" && len=`expr "X$test_cmds" : ".*"` && test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then : else @@ -3780,19 +3977,28 @@ EOF # value of $libobjs for piecewise linking. # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then - eval cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds else - eval cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi fi # Append the command to remove the reloadable object files # to the just-reset $cmds. - eval cmds=\"\$cmds~$rm $delfiles\" + eval cmds=\"\$cmds~\$rm $delfiles\" fi save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || exit $? done @@ -3801,7 +4007,7 @@ EOF # Restore the uninstalled library and exit if test "$mode" = relink; then $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $? - exit 0 + exit $EXIT_SUCCESS fi # Create links to the real library. @@ -3849,7 +4055,7 @@ EOF *.lo) if test -n "$objs$old_deplibs"; then $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2 - exit 1 + exit $EXIT_FAILURE fi libobj="$output" obj=`$echo "X$output" | $Xsed -e "$lo2o"` @@ -3878,64 +4084,10 @@ EOF eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\" else gentop="$output_objdir/${obj}x" - $show "${rm}r $gentop" - $run ${rm}r "$gentop" - $show "$mkdir $gentop" - $run $mkdir "$gentop" - status=$? - if test "$status" -ne 0 && test ! -d "$gentop"; then - exit $status - fi generated="$generated $gentop" - for xlib in $convenience; do - # Extract the objects. - case $xlib in - [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;; - *) xabs=`pwd`"/$xlib" ;; - esac - xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'` - xdir="$gentop/$xlib" - - $show "${rm}r $xdir" - $run ${rm}r "$xdir" - $show "$mkdir $xdir" - $run $mkdir "$xdir" - status=$? - if test "$status" -ne 0 && test ! -d "$xdir"; then - exit $status - fi - # We will extract separately just the conflicting names and we will no - # longer touch any unique names. It is faster to leave these extract - # automatically by $AR in one run. - $show "(cd $xdir && $AR x $xabs)" - $run eval "(cd \$xdir && $AR x \$xabs)" || exit $? - if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then - : - else - $echo "$modename: warning: object name conflicts; renaming object files" 1>&2 - $echo "$modename: warning: to ensure that they will not overwrite" 1>&2 - $AR t "$xabs" | sort | uniq -cd | while read -r count name - do - i=1 - while test "$i" -le "$count" - do - # Put our $i before any first dot (extension) - # Never overwrite any file - name_to="$name" - while test "X$name_to" = "X$name" || test -f "$xdir/$name_to" - do - name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"` - done - $show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')" - $run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $? - i=`expr $i + 1` - done - done - fi - - reload_conv_objs="$reload_objs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` - done + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" fi fi @@ -3943,10 +4095,11 @@ EOF reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test output="$obj" - eval cmds=\"$reload_cmds\" + cmds=$reload_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || exit $? done @@ -3959,7 +4112,7 @@ EOF $run ${rm}r $gentop fi - exit 0 + exit $EXIT_SUCCESS fi if test "$build_libtool_libs" != yes; then @@ -3972,17 +4125,18 @@ EOF # accidentally link it into a program. # $show "echo timestamp > $libobj" # $run eval "echo timestamp > $libobj" || exit $? - exit 0 + exit $EXIT_SUCCESS fi if test -n "$pic_flag" || test "$pic_mode" != default; then # Only do commands if we really have different PIC objects. reload_objs="$libobjs $reload_conv_objs" output="$libobj" - eval cmds=\"$reload_cmds\" + cmds=$reload_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || exit $? done @@ -3994,7 +4148,7 @@ EOF $run ${rm}r $gentop fi - exit 0 + exit $EXIT_SUCCESS ;; prog) @@ -4312,7 +4466,7 @@ static const void *lt_preloaded_setup() { ;; *) $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac else @@ -4400,7 +4554,7 @@ static const void *lt_preloaded_setup() { # Link the executable and exit $show "$link_command" $run eval "$link_command" || exit $? - exit 0 + exit $EXIT_SUCCESS fi if test "$hardcode_action" = relink; then @@ -4455,10 +4609,10 @@ static const void *lt_preloaded_setup() { fi # Quote $echo for shipping. - if test "X$echo" = "X$SHELL $0 --fallback-echo"; then - case $0 in - [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $0 --fallback-echo";; - *) qecho="$SHELL `pwd`/$0 --fallback-echo";; + if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then + case $progpath in + [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; + *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; esac qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"` else @@ -4484,7 +4638,7 @@ static const void *lt_preloaded_setup() { cwrappersource=`$echo ${objdir}/lt-${output}.c` cwrapper=`$echo ${output}.exe` $rm $cwrappersource $cwrapper - trap "$rm $cwrappersource $cwrapper; exit 1" 1 2 15 + trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 cat > $cwrappersource < $output "\ #! $SHELL @@ -4714,7 +4868,7 @@ sed_quote_subst='$sed_quote_subst' # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. -if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH relink_command=\"$relink_command\" @@ -4793,7 +4947,7 @@ else else $echo \"\$relink_command_output\" >&2 $rm \"\$progdir/\$file\" - exit 1 + exit $EXIT_FAILURE fi fi @@ -4855,20 +5009,20 @@ else esac $echo >> $output "\ \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\" - exit 1 + exit $EXIT_FAILURE fi else # The program doesn't exist. \$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2 \$echo \"This script is just a wrapper for \$program.\" 1>&2 $echo \"See the $PACKAGE documentation for more information.\" 1>&2 - exit 1 + exit $EXIT_FAILURE fi fi\ " chmod +x $output fi - exit 0 + exit $EXIT_SUCCESS ;; esac @@ -4891,76 +5045,21 @@ fi\ if test -n "$addlibs"; then gentop="$output_objdir/${outputname}x" - $show "${rm}r $gentop" - $run ${rm}r "$gentop" - $show "$mkdir $gentop" - $run $mkdir "$gentop" - status=$? - if test "$status" -ne 0 && test ! -d "$gentop"; then - exit $status - fi generated="$generated $gentop" - # Add in members from convenience archives. - for xlib in $addlibs; do - # Extract the objects. - case $xlib in - [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;; - *) xabs=`pwd`"/$xlib" ;; - esac - xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'` - xdir="$gentop/$xlib" - - $show "${rm}r $xdir" - $run ${rm}r "$xdir" - $show "$mkdir $xdir" - $run $mkdir "$xdir" - status=$? - if test "$status" -ne 0 && test ! -d "$xdir"; then - exit $status - fi - # We will extract separately just the conflicting names and we will no - # longer touch any unique names. It is faster to leave these extract - # automatically by $AR in one run. - $show "(cd $xdir && $AR x $xabs)" - $run eval "(cd \$xdir && $AR x \$xabs)" || exit $? - if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then - : - else - $echo "$modename: warning: object name conflicts; renaming object files" 1>&2 - $echo "$modename: warning: to ensure that they will not overwrite" 1>&2 - $AR t "$xabs" | sort | uniq -cd | while read -r count name - do - i=1 - while test "$i" -le "$count" - do - # Put our $i before any first dot (extension) - # Never overwrite any file - name_to="$name" - while test "X$name_to" = "X$name" || test -f "$xdir/$name_to" - do - name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"` - done - $show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')" - $run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $? - i=`expr $i + 1` - done - done - fi - - oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print -o -name \*.lo -print | $NL2SP` - done + func_extract_archives $gentop $addlibs + oldobjs="$oldobjs $func_extract_archives_result" fi # Do each command in the archive commands. if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then - eval cmds=\"$old_archive_from_new_cmds\" + cmds=$old_archive_from_new_cmds else eval cmds=\"$old_archive_cmds\" if len=`expr "X$cmds" : ".*"` && test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then - : + cmds=$old_archive_cmds else # the command line is too long to link in one step, link in parts $echo "using piecewise archive linking..." @@ -4987,7 +5086,7 @@ fi\ for obj in $save_oldobjs do last_oldobj=$obj - done + done for obj in $save_oldobjs do oldobjs="$objlist $obj" @@ -5001,7 +5100,7 @@ fi\ oldobjs=$objlist if test "$obj" = "$last_oldobj" ; then RANLIB=$save_RANLIB - fi + fi test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" objlist= @@ -5012,12 +5111,13 @@ fi\ if test "X$oldobjs" = "X" ; then eval cmds=\"\$concat_cmds\" else - eval cmds=\"\$concat_cmds~$old_archive_cmds\" + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" fi fi fi save_ifs="$IFS"; IFS='~' for cmd in $cmds; do + eval cmd=\"$cmd\" IFS="$save_ifs" $show "$cmd" $run eval "$cmd" || exit $? @@ -5049,8 +5149,12 @@ fi\ fi done # Quote the link command for shipping. - relink_command="(cd `pwd`; $SHELL $0 --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + # Only create the output if not a dry run. if test -z "$run"; then @@ -5069,7 +5173,7 @@ fi\ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` if test -z "$libdir"; then $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 - exit 1 + exit $EXIT_FAILURE fi newdependency_libs="$newdependency_libs $libdir/$name" ;; @@ -5083,7 +5187,7 @@ fi\ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` if test -z "$libdir"; then $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit 1 + exit $EXIT_FAILURE fi newdlfiles="$newdlfiles $libdir/$name" done @@ -5094,11 +5198,30 @@ fi\ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` if test -z "$libdir"; then $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit 1 + exit $EXIT_FAILURE fi newdlprefiles="$newdlprefiles $libdir/$name" done dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlfiles="$newdlfiles $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlprefiles="$newdlprefiles $abs" + done + dlprefiles="$newdlprefiles" fi $rm $output # place dlname in correct position for cygwin @@ -5155,7 +5278,7 @@ relink_command=\"$relink_command\"" $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $? ;; esac - exit 0 + exit $EXIT_SUCCESS ;; # libtool install mode @@ -5244,13 +5367,13 @@ relink_command=\"$relink_command\"" if test -z "$install_prog"; then $echo "$modename: you must specify an install program" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi if test -n "$prev"; then $echo "$modename: the \`$prev' option requires an argument" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi if test -z "$files"; then @@ -5260,7 +5383,7 @@ relink_command=\"$relink_command\"" $echo "$modename: you must specify a destination" 1>&2 fi $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Strip any trailing slash from the destination. @@ -5281,7 +5404,7 @@ relink_command=\"$relink_command\"" if test "$#" -gt 2; then $echo "$modename: \`$dest' is not a directory" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi fi case $destdir in @@ -5293,7 +5416,7 @@ relink_command=\"$relink_command\"" *) $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac done @@ -5322,7 +5445,7 @@ relink_command=\"$relink_command\"" else $echo "$modename: \`$file' is not a valid libtool archive" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi library_names= @@ -5364,7 +5487,7 @@ relink_command=\"$relink_command\"" # but it's something to keep an eye on. if test "$inst_prefix_dir" = "$destdir"; then $echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2 - exit 1 + exit $EXIT_FAILURE fi if test -n "$inst_prefix_dir"; then @@ -5379,7 +5502,7 @@ relink_command=\"$relink_command\"" if $run eval "$relink_command"; then : else $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 - exit 1 + exit $EXIT_FAILURE fi fi @@ -5414,10 +5537,11 @@ relink_command=\"$relink_command\"" # Do each command in the postinstall commands. lib="$destdir/$realname" - eval cmds=\"$postinstall_cmds\" + cmds=$postinstall_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || exit $? done @@ -5457,7 +5581,7 @@ relink_command=\"$relink_command\"" *) $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac @@ -5475,7 +5599,7 @@ relink_command=\"$relink_command\"" $show "$install_prog $staticobj $staticdest" $run eval "$install_prog \$staticobj \$staticdest" || exit $? fi - exit 0 + exit $EXIT_SUCCESS ;; *) @@ -5529,7 +5653,7 @@ relink_command=\"$relink_command\"" # Check the variables that should have been set. if test -z "$notinst_deplibs"; then $echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi finalize=yes @@ -5570,8 +5694,12 @@ relink_command=\"$relink_command\"" tmpdir="/tmp" test -n "$TMPDIR" && tmpdir="$TMPDIR" tmpdir="$tmpdir/libtool-$$" - if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then : + save_umask=`umask` + umask 0077 + if $mkdir "$tmpdir"; then + umask $save_umask else + umask $save_umask $echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2 continue fi @@ -5630,16 +5758,17 @@ relink_command=\"$relink_command\"" $show "$install_prog $file $oldlib" $run eval "$install_prog \$file \$oldlib" || exit $? - if test -n "$stripme" && test -n "$striplib"; then + if test -n "$stripme" && test -n "$old_striplib"; then $show "$old_striplib $oldlib" $run eval "$old_striplib $oldlib" || exit $? fi # Do each command in the postinstall commands. - eval cmds=\"$old_postinstall_cmds\" + cmds=$old_postinstall_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || exit $? done @@ -5653,9 +5782,9 @@ relink_command=\"$relink_command\"" if test -n "$current_libdirs"; then # Maybe just do a dry run. test -n "$run" && current_libdirs=" -n$current_libdirs" - exec_cmd='$SHELL $0 --finish$current_libdirs' + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' else - exit 0 + exit $EXIT_SUCCESS fi ;; @@ -5674,10 +5803,11 @@ relink_command=\"$relink_command\"" for libdir in $libdirs; do if test -n "$finish_cmds"; then # Do each command in the finish commands. - eval cmds=\"$finish_cmds\" + cmds=$finish_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" || admincmds="$admincmds $cmd" @@ -5694,7 +5824,7 @@ relink_command=\"$relink_command\"" fi # Exit here if they wanted silent mode. - test "$show" = : && exit 0 + test "$show" = : && exit $EXIT_SUCCESS $echo "----------------------------------------------------------------------" $echo "Libraries have been installed in:" @@ -5730,7 +5860,7 @@ relink_command=\"$relink_command\"" $echo "See any operating system documentation about shared libraries for" $echo "more information, such as the ld(1) and ld.so(8) manual pages." $echo "----------------------------------------------------------------------" - exit 0 + exit $EXIT_SUCCESS ;; # libtool execute mode @@ -5742,7 +5872,7 @@ relink_command=\"$relink_command\"" if test -z "$cmd"; then $echo "$modename: you must specify a COMMAND" 1>&2 $echo "$help" - exit 1 + exit $EXIT_FAILURE fi # Handle -dlopen flags immediately. @@ -5750,7 +5880,7 @@ relink_command=\"$relink_command\"" if test ! -f "$file"; then $echo "$modename: \`$file' is not a file" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi dir= @@ -5761,7 +5891,7 @@ relink_command=\"$relink_command\"" else $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi # Read the libtool library. @@ -5788,7 +5918,7 @@ relink_command=\"$relink_command\"" dir="$dir/$objdir" else $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2 - exit 1 + exit $EXIT_FAILURE fi ;; @@ -5868,7 +5998,7 @@ relink_command=\"$relink_command\"" $echo "export $shlibpath_var" fi $echo "$cmd$args" - exit 0 + exit $EXIT_SUCCESS fi ;; @@ -5896,7 +6026,7 @@ relink_command=\"$relink_command\"" if test -z "$rm"; then $echo "$modename: you must specify an RM program" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi rmdirs= @@ -5951,10 +6081,11 @@ relink_command=\"$relink_command\"" if test "$mode" = uninstall; then if test -n "$library_names"; then # Do each command in the postuninstall commands. - eval cmds=\"$postuninstall_cmds\" + cmds=$postuninstall_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" if test "$?" -ne 0 && test "$rmforce" != yes; then @@ -5966,10 +6097,11 @@ relink_command=\"$relink_command\"" if test -n "$old_library"; then # Do each command in the old_postuninstall commands. - eval cmds=\"$old_postuninstall_cmds\" + cmds=$old_postuninstall_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" + eval cmd=\"$cmd\" $show "$cmd" $run eval "$cmd" if test "$?" -ne 0 && test "$rmforce" != yes; then @@ -6008,7 +6140,7 @@ relink_command=\"$relink_command\"" if test "$mode" = clean ; then noexename=$name case $file in - *.exe) + *.exe) file=`$echo $file|${SED} 's,.exe$,,'` noexename=`$echo $name|${SED} 's,.exe$,,'` # $file with .exe has already been added to rmfiles, @@ -6053,20 +6185,20 @@ relink_command=\"$relink_command\"" "") $echo "$modename: you must specify a MODE" 1>&2 $echo "$generic_help" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac if test -z "$exec_cmd"; then $echo "$modename: invalid operation mode \`$mode'" 1>&2 $echo "$generic_help" 1>&2 - exit 1 + exit $EXIT_FAILURE fi fi # test -z "$show_help" if test -n "$exec_cmd"; then eval exec $exec_cmd - exit 1 + exit $EXIT_FAILURE fi # We need to display help for each of the modes. @@ -6102,7 +6234,7 @@ MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for a more detailed description of MODE. Report bugs to ." - exit 0 + exit $EXIT_SUCCESS ;; clean) @@ -6214,6 +6346,8 @@ The following components of LINK-COMMAND are treated specially: -no-undefined declare that a library does not refer to external symbols -o OUTPUT-FILE create OUTPUT-FILE from the specified objects -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX -release RELEASE specify package release information -rpath LIBDIR the created library will eventually be installed in LIBDIR -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries @@ -6255,14 +6389,14 @@ Otherwise, only FILE itself is deleted using RM." *) $echo "$modename: invalid operation mode \`$mode'" 1>&2 $echo "$help" 1>&2 - exit 1 + exit $EXIT_FAILURE ;; esac $echo $echo "Try \`$modename --help' for more information about other modes." -exit 0 +exit $EXIT_SUCCESS # The TAGs below are defined such that we never get into a situation # in which we disable both kinds of libraries. Given conflicting diff --git a/db/dist/pubdef.in b/db/dist/pubdef.in index 5c028a20b..90a9d719e 100644 --- a/db/dist/pubdef.in +++ b/db/dist/pubdef.in @@ -1,8 +1,8 @@ # Name # D == documentation # I == include file -# C == Java case value (declared and initialized) -# J == Java constant (declared only) +# J == Java constant +# N == wrapped by the Java native layer DB_AFTER D I J DB_AGGRESSIVE D I J DB_ALREADY_ABORTED * I * @@ -20,6 +20,7 @@ DB_AM_DUPSORT * I * DB_AM_ENCRYPT * I * DB_AM_FIXEDLEN * I * DB_AM_INMEM * I * +DB_AM_INORDER * I * DB_AM_IN_RENAME * I * DB_AM_NOT_DURABLE * I * DB_AM_OPEN_CALLED * I * @@ -44,11 +45,12 @@ DB_ARCH_LOG D I J DB_ARCH_REMOVE D I J DB_AUTO_COMMIT D I J DB_BEFORE D I J -DB_BTREE D I C +DB_BTREE D I J DB_BTREEMAGIC * I * DB_BTREEOLDVER * I * DB_BTREEVERSION * I * -DB_CACHED_COUNTS * I J +DB_BUFFER_SMALL D I N +DB_CACHED_COUNTS * I * DB_CDB_ALLDB D I J DB_CHKSUM D I J DB_CONFIG D * * @@ -56,23 +58,26 @@ DB_CONSUME D I J DB_CONSUME_WAIT D I J DB_CREATE D I J DB_CURRENT D I J -DB_CXX_NO_EXCEPTIONS D I J +DB_CXX_NO_EXCEPTIONS D I * DB_DBM_HSEARCH * I * -DB_DBT_APPMALLOC D I * +DB_DBT_APPMALLOC D I N DB_DBT_DUPOK * I * DB_DBT_ISSET * I * DB_DBT_MALLOC D I J DB_DBT_PARTIAL D I J -DB_DBT_REALLOC D I J +DB_DBT_REALLOC D I N DB_DBT_USERMEM D I J +DB_DEGREE_2 D I J DB_DELETED * I * DB_DIRECT D I * DB_DIRECT_DB D I J DB_DIRECT_LOG D I J DB_DIRTY_READ D I J -DB_DONOTINDEX D I C +DB_DONOTINDEX D I J +DB_DSYNC_LOG D I J DB_DUP D I J DB_DUPSORT D I J +DB_DURABLE_UNKNOWN * I * DB_EID_BROADCAST D I J DB_EID_INVALID D I J DB_ENCRYPT D I J @@ -84,9 +89,11 @@ DB_ENV_CREATE * I * DB_ENV_DBLOCAL * I * DB_ENV_DIRECT_DB * I * DB_ENV_DIRECT_LOG * I * +DB_ENV_DSYNC_LOG * I * DB_ENV_FATAL * I * DB_ENV_LOCKDOWN * I * DB_ENV_LOG_AUTOREMOVE * I * +DB_ENV_LOG_INMEMORY * I * DB_ENV_NOLOCKING * I * DB_ENV_NOMMAP * I * DB_ENV_NOPANIC * I * @@ -100,14 +107,12 @@ DB_ENV_SYSTEM_MEM * I * DB_ENV_THREAD * I * DB_ENV_TIME_NOTGRANTED * I * DB_ENV_TXN_NOSYNC * I * -DB_ENV_TXN_NOT_DURABLE * I * DB_ENV_TXN_WRITE_NOSYNC * I * DB_ENV_YIELDCPU * I * DB_EXCL D I J DB_EXTENT * I * DB_FAST_STAT D I J DB_FCNTL_LOCKING * I * -DB_FILEOPEN D I C DB_FILE_ID_LEN * I * DB_FIRST D I J DB_FLUSH D I J @@ -117,7 +122,7 @@ DB_GET_BOTHC * I * DB_GET_BOTH_RANGE D I J DB_GET_RECNO D I J DB_HANDLE_LOCK * I * -DB_HASH D I C +DB_HASH D I J DB_HASHMAGIC * I * DB_HASHOLDVER * I * DB_HASHVERSION * I * @@ -128,17 +133,19 @@ DB_INIT_LOG D I J DB_INIT_MPOOL D I J DB_INIT_REP D I J DB_INIT_TXN D I J +DB_INORDER D I J DB_JOINENV D I J DB_JOIN_ITEM D I J DB_JOIN_NOSORT D I J -DB_KEYEMPTY D I C -DB_KEYEXIST D I C +DB_KEYEMPTY D I J +DB_KEYEXIST D I J DB_KEYFIRST D I J DB_KEYLAST D I J DB_LAST D I J DB_LOCKDOWN D I J DB_LOCKVERSION * I * -DB_LOCK_DEADLOCK D I C +DB_LOCK_ABORT * I * +DB_LOCK_DEADLOCK D I N DB_LOCK_DEFAULT D I J DB_LOCK_DIRTY * I * DB_LOCK_DUMP * I * @@ -150,12 +157,13 @@ DB_LOCK_IREAD D I J DB_LOCK_IWR D I J DB_LOCK_IWRITE D I J DB_LOCK_MAXLOCKS D I J +DB_LOCK_MAXWRITE D I J DB_LOCK_MINLOCKS D I J DB_LOCK_MINWRITE D I J DB_LOCK_NG * I * DB_LOCK_NORUN * I * DB_LOCK_NOTEXIST * I * -DB_LOCK_NOTGRANTED D I C +DB_LOCK_NOTGRANTED D I J DB_LOCK_NOWAIT D I J DB_LOCK_OLDEST D I J DB_LOCK_PUT D I J @@ -182,17 +190,19 @@ DB_LOGMAGIC * I * DB_LOGOLDVER * I * DB_LOGVERSION * I * DB_LOG_AUTOREMOVE D I J +DB_LOG_BUFFER_FULL D I * DB_LOG_CHKPNT * I * DB_LOG_COMMIT * I * DB_LOG_DISK * I * +DB_LOG_INMEMORY D I J DB_LOG_LOCKED * I * DB_LOG_NOCOPY * I * DB_LOG_NOT_DURABLE * I * DB_LOG_PERM * I * +DB_LOG_RESEND * I * DB_LOG_SILENT_ERR * I * DB_LOG_WRNOSYNC * I * DB_LSTAT_ABORTED * I * -DB_LSTAT_ERR * I * DB_LSTAT_EXPIRED * I * DB_LSTAT_FREE * I * DB_LSTAT_HELD * I * @@ -205,10 +215,11 @@ DB_MPOOL_CLEAN D I * DB_MPOOL_CREATE D I * DB_MPOOL_DIRTY D I * DB_MPOOL_DISCARD D I * +DB_MPOOL_FREE * I * DB_MPOOL_LAST D I * DB_MPOOL_NEW D I * DB_MPOOL_NOFILE D I J -DB_MPOOL_UNLINK * I * +DB_MPOOL_UNLINK D I J DB_MULTIPLE D I J DB_MULTIPLE_INIT D I * DB_MULTIPLE_KEY D I J @@ -225,23 +236,23 @@ DB_NOMMAP D I J DB_NOORDERCHK D I J DB_NOOVERWRITE D I J DB_NOPANIC D I J -DB_NOSERVER D I C -DB_NOSERVER_HOME D I C -DB_NOSERVER_ID D I C +DB_NOSERVER D I * +DB_NOSERVER_HOME D I J +DB_NOSERVER_ID D I J DB_NOSYNC D I J -DB_NOTFOUND D I C +DB_NOTFOUND D I J DB_NO_AUTO_COMMIT * I * DB_ODDFILESIZE D I * DB_OK_BTREE * I * DB_OK_HASH * I * DB_OK_QUEUE * I * DB_OK_RECNO * I * -DB_OLD_VERSION D I C +DB_OLD_VERSION D I * DB_OPFLAGS_MASK * I * DB_ORDERCHKONLY D I J DB_OVERWRITE D I J DB_PAGE_LOCK * I * -DB_PAGE_NOTFOUND D I C +DB_PAGE_NOTFOUND D I * DB_PANIC_ENVIRONMENT D I J DB_POSITION D I J DB_PREV D I J @@ -258,12 +269,12 @@ DB_PR_RECOVERYTEST * I * DB_QAMMAGIC * I * DB_QAMOLDVER * I * DB_QAMVERSION * I * -DB_QUEUE D I C +DB_QUEUE D I J DB_RDONLY D I J DB_RDWRMASTER * I * -DB_RECNO D I C +DB_RECNO D I J DB_RECNUM D I J -DB_RECORDCOUNT * I J +DB_RECORDCOUNT * I * DB_RECORD_LOCK * I * DB_RECOVER D I J DB_RECOVER_FATAL D I J @@ -274,37 +285,53 @@ DB_RENAMEMAGIC * I * DB_RENUMBER D I J DB_REP_CLIENT D I J DB_REP_CREATE * I * -DB_REP_DUPMASTER D I C -DB_REP_HANDLE_DEAD D I C -DB_REP_HOLDELECTION D I C +DB_REP_DUPMASTER D I J +DB_REP_EGENCHG * I * +DB_REP_HANDLE_DEAD D I N +DB_REP_HOLDELECTION D I J DB_REP_ISPERM D I J -DB_REP_LOGSONLY D I J +DB_REP_LOGREADY * I * DB_REP_MASTER D I J -DB_REP_NEWMASTER D I C -DB_REP_NEWSITE D I C +DB_REP_NEWMASTER D I J +DB_REP_NEWSITE D I J DB_REP_NOBUFFER D I J DB_REP_NOTPERM D I J -DB_REP_OUTDATED D I C +DB_REP_PAGEDONE * I * DB_REP_PERMANENT D I J -DB_REP_UNAVAIL D I J +DB_REP_STARTUPDONE D I J +DB_REP_UNAVAIL D I * DB_REVSPLITOFF D I J DB_RMW D I J DB_RPCCLIENT D I J -DB_RUNRECOVERY D I C +DB_RUNRECOVERY D I N DB_SALVAGE D I J -DB_SECONDARY_BAD D I C +DB_SECONDARY_BAD D I * +DB_SEQUENCE_VERSION * I * +DB_SEQ_DEC D I J +DB_SEQ_INC D I J +DB_SEQ_RANGE_SET * I * +DB_SEQ_WRAP D I J DB_SET D I J +DB_SET_BEGIN_LSNP * I * DB_SET_LOCK_TIMEOUT D I J DB_SET_RANGE D I J DB_SET_RECNO D I J DB_SET_TXN_NOW * I * DB_SET_TXN_TIMEOUT D I J DB_SNAPSHOT D I J +DB_STAT_ALL D I * DB_STAT_CLEAR D I J +DB_STAT_LOCK_CONF D I * +DB_STAT_LOCK_LOCKERS D I * +DB_STAT_LOCK_OBJECTS D I * +DB_STAT_LOCK_PARAMS D I * +DB_STAT_MEMP_HASH D I * +DB_STAT_SUBSYSTEM D I * DB_SURPRISE_KID * I * DB_SWAPBYTES * I * DB_SYSTEM_MEM D I J DB_TEST_ELECTINIT * I * +DB_TEST_ELECTVOTE1 * I * DB_TEST_POSTDESTROY * I * DB_TEST_POSTLOG * I * DB_TEST_POSTLOGMETA * I * @@ -318,39 +345,39 @@ DB_TIMEOUT * I * DB_TIME_NOTGRANTED D I J DB_TRUNCATE D I J DB_TXNVERSION * I * -DB_TXN_ABORT D I C -DB_TXN_APPLY D I C +DB_TXN_ABORT D I J +DB_TXN_APPLY D I J DB_TXN_BACKWARD_ALLOC * I * -DB_TXN_BACKWARD_ROLL D I C +DB_TXN_BACKWARD_ROLL D I J DB_TXN_CKP * I * -DB_TXN_FORWARD_ROLL D I C -DB_TXN_GETPGNOS * I * +DB_TXN_FORWARD_ROLL D I J DB_TXN_NOSYNC D I J DB_TXN_NOT_DURABLE D I J DB_TXN_NOWAIT D I J DB_TXN_OPENFILES * I * DB_TXN_POPENFILES * I * -DB_TXN_PRINT D I C +DB_TXN_PRINT D I J DB_TXN_SYNC D I J DB_TXN_WRITE_NOSYNC D I J DB_UNDO * I * -DB_UNKNOWN D I C +DB_UNKNOWN D I J +DB_UNREF * I * DB_UPDATE_SECONDARY * I * DB_UPGRADE D I J DB_USE_ENVIRON D I J DB_USE_ENVIRON_ROOT D I J -DB_VERB_CHKPOINT D I J DB_VERB_DEADLOCK D I J DB_VERB_RECOVERY D I J DB_VERB_REPLICATION D I J DB_VERB_WAITSFOR D I J DB_VERIFY D I J -DB_VERIFY_BAD D I C +DB_VERIFY_BAD D I N DB_VERIFY_FATAL * I * DB_VERSION_MAJOR * I J DB_VERSION_MINOR * I J +DB_VERSION_MISMATCH D I * DB_VERSION_PATCH * I J -DB_VERSION_STRING * I * +DB_VERSION_STRING * I N DB_WRITECURSOR D I J DB_WRITELOCK * I * DB_WRITEOPEN * I * diff --git a/db/dist/s_include b/db/dist/s_include index 44bfce30e..ab058d86d 100755 --- a/db/dist/s_include +++ b/db/dist/s_include @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_include,v 1.19 2002/03/27 04:31:50 bostic Exp $ +# $Id: s_include,v 1.22 2004/04/19 18:27:17 mjc Exp $ # # Build the automatically generated function prototype files. @@ -72,13 +72,18 @@ head defonly _DB_INT_DEF_IN_ > $i_dfile # Process the standard directories, creating per-directory prototype # files and adding to the external prototype and #define files. for i in db btree clib common crypto dbreg env fileops hash hmac \ - lock log mp mutex os qam rep rpc_client rpc_server tcl txn xa; do + lock log mp mutex os qam rep rpc_client rpc_server sequence tcl txn xa; do head "_${i}_ext_h_" > $i_pfile - f="../$i/*.c" - [ $i = os ] && f="$f ../os_win32/*.c" - [ $i = rpc_server ] && f="../$i/c/*.c" - [ $i = crypto ] && f="../$i/*.c ../$i/*/*.c" + if [ $i = os ] ; then + f=`ls ../$i/*.c ../os_win32/*.c` + elif [ $i = rpc_server ] ; then + f=`ls ../$i/c/*.c` + elif [ $i = crypto ] ; then + f=`ls ../$i/*.c ../$i/*/*.c` + else + f=`ls ../$i/*.c` + fi awk -f gen_inc.awk \ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ -v e_dfile=$e_dfile \ @@ -96,7 +101,7 @@ done # Process directories which only add to the external prototype and #define # files. for i in dbm hsearch; do - f="../$i/*.c" + f=`ls ../$i/*.c` awk -f gen_inc.awk \ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ -v e_dfile=$e_dfile \ @@ -105,16 +110,6 @@ for i in dbm hsearch; do -v i_pfile="" $f done -# RPC uses rpcgen to generate a header file; post-process it to add more -# interfaces to the internal #define file. -sed -e '/extern bool_t xdr___/{' \ - -e 's/.* //' \ - -e 's/();//' \ - -e 's/.*/#define & &@DB_VERSION_UNIQUE_NAME@/' \ - -e 'p' \ - -e '}' \ - -e d < ../dbinc_auto/db_server.h >> $i_dfile - # There are a few globals in DB -- add them to the external/internal # #define files. (echo "#define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@"; @@ -141,7 +136,7 @@ cmp $i_dfile $f > /dev/null 2>&1 || head space defonly _DB_EXT_185_DEF_IN_ > $e_dfile head space _DB_EXT_185_PROT_IN_ > $e_pfile -f="../db185/*.c" +f=`ls ../db185/*.c` awk -f gen_inc.awk \ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ -v e_dfile=$e_dfile \ diff --git a/db/dist/s_java b/db/dist/s_java index c262a9397..ae715ccd6 100755 --- a/db/dist/s_java +++ b/db/dist/s_java @@ -1,9 +1,8 @@ #!/bin/sh - -# $Id: s_java,v 11.6 2003/11/07 14:39:26 bostic Exp $ +# $Id: s_java,v 11.8 2004/04/06 20:43:35 mjc Exp $ # # Build the Java files. +sh s_java_stat # Create Java stat methods sh s_java_swig # Create core Java API with SWIG sh s_java_const # Create Java constants -sh s_java_stat # Create Java stat methods -sh s_java_camel # Camel-case the Java API. diff --git a/db/dist/s_java_const b/db/dist/s_java_const index 338bb13ec..644b32324 100755 --- a/db/dist/s_java_const +++ b/db/dist/s_java_const @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_java_const,v 1.25 2003/09/04 23:59:03 bostic Exp $ +# $Id: s_java_const,v 1.27 2004/07/30 14:51:37 mjc Exp $ # # Build the Java files. @@ -10,65 +10,26 @@ msgjava="/* DO NOT EDIT: automatically built by dist/s_java_const. */" t=/tmp/__java trap 'rm -f $t; exit 0' 0 1 2 3 13 15 -# Build {debug,release}/DbConstants.java. -for build in debug release ; do - isdebug=`echo $build | sed 's/debug/true/;s/release/false/'` - (echo "$msgjava" && - echo && - echo 'package com.sleepycat.db;' && - echo && - echo 'class DbConstants' && - echo '{' && - for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \ - egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \ - done | - sed -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/" \ - -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/" \ - -e "s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/" \ - -e 's/^#define[ ][ ]*//' \ - -e 's/[()=,]/ /g' \ - -e 's/\/\*/ /' | \ - awk '{ print " static final int " $1 " = " $2 ";" }' && - echo && - echo " static final boolean DB_DEBUG = ${isdebug};" - echo '}' && - echo && - echo '// end of DbConstants.java') > $t - - f=../java/src/com/sleepycat/db/$build/DbConstants.java - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) -done - -# Build Db.java. -f=../java/src/com/sleepycat/db/Db.java -sed '/BEGIN-JAVA-SPECIAL-CONSTANTS/q' < $f > $t -(echo " $msgjava" && - for i in `egrep '^DB_.*C$' pubdef.in | awk '{print $1}'`; do \ - egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \ - done | - sed -e 's/^#define[ ][ ]*//' \ - -e 's/[()=,]/ /g' | - awk '{ print " public static final int " $1 " = " $2 ";" }') >> $t -(for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \ - egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \ - done | - sed -e 's/^#define[ ][ ]*//' \ - -e 's/[()=,]/ /g' | - awk '{ print " public static final int " $1 ";" }') >> $t -sed -n \ - '/END-JAVA-SPECIAL-CONSTANTS/,/BEGIN-JAVA-CONSTANT-INITIALIZATION/p' \ - < $f >> $t -(echo " $msgjava" && +(echo "$msgjava" && + echo && + echo 'package com.sleepycat.db.internal;' && + echo && + echo 'public interface DbConstants' && + echo '{' && for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \ - egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \ + egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \ done | - sed -e 's/^#define[ ][ ]*//' \ + sed -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/" \ + -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/" \ + -e "s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/" \ + -e 's/^#define[ ][ ]*//' \ -e 's/[()=,]/ /g' \ -e 's/\/\*/ /' | \ - awk '{ print " " $1 " = DbConstants." $1 ";" }') >> $t -sed -n '/END-JAVA-CONSTANT-INITIALIZATION/,$p' < $f >> $t + awk '{ print " int " $1 " = " $2 ";" }' && + echo '}' && + echo && + echo '// end of DbConstants.java') > $t -f=../java/src/com/sleepycat/db/Db.java +f=../java/src/com/sleepycat/db/internal/DbConstants.java cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) diff --git a/db/dist/s_java_stat b/db/dist/s_java_stat index 893d3dc7e..4eb2ab147 100755 --- a/db/dist/s_java_stat +++ b/db/dist/s_java_stat @@ -1,186 +1,314 @@ #!/bin/sh - -# $Id: s_java_stat,v 1.26 2003/09/04 23:59:04 bostic Exp $ +# $Id: s_java_stat,v 1.33 2004/09/28 19:30:36 mjc Exp $ # # Build the Java files. -msgjava="/* DO NOT EDIT: automatically built by dist/s_java_stat. */" +msgjava="/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */" + +s=/tmp/__java.sed t=/tmp/__java c=/tmp/__javajnic -h=/tmp/__javajnih -trap 'rm -f $t $c $h; exit 0' 0 1 2 3 13 15 +u1=/tmp/__javautil1 +u2=/tmp/__javautil2 +trap 'rm -f $t $c $u1 $u2; exit 0' 0 1 2 3 13 15 # Script to convert DB C structure declarations into Java declarations. jclass() { - cat ../dbinc/db.in | - sed -n \ - -e "/struct $1 {/,/^}/{" \ - -e "/$1/d" \ - -e '/;/!d' \ - -e '/^}/d' \ - -e '/char[ ]*\*/{' \ - -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public String \1;/p"\ - -e 'd' \ - -e '}' \ - -e '/time_t/{' \ - -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public long \1;/p" \ - -e 'd' \ - -e '}' \ - -e '/DB_LSN[ ]*/{' \ - -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public DbLsn \1;/p"\ - -e 'd' \ - -e '}' \ - -e '/DB_TXN_ACTIVE[ ]*\*/{' \ - -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public Active \1[];/p"\ - -e 'd' \ - -e '}' \ - -e '/u_int8_t[ ]*xid\[/{' \ - -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public byte[] xid;/p"\ - -e 'd' \ - -e '}' \ - -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public int \1;/p" \ - -e '}' + cat > $s <> $c - echo " jobject jobj, struct $1 *statp) {" >> $c - cat ../dbinc/db.in | - sed -n \ - -e "/struct $1 {/,/^}/{" \ - -e "/$1/d" \ - -e '/;/!d' \ - -e '/^}/d' \ - -e '/char[ ]*\*/{' \ - -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_STRING(jnienv, cl, jobj, statp, \1);/p"\ - -e 'd' \ - -e '}' \ - -e '/time_t/{' \ - -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LONG(jnienv, cl, jobj, statp, \1);/p" \ - -e 'd' \ - -e '}' \ - -e '/DB_LSN[ ]*/{' \ - -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, \1);/p"\ - -e 'd' \ - -e '}' \ - -e '/DB_TXN_ACTIVE[ ]*\*/{' \ - -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_ACTIVE(jnienv, cl, jobj, statp, \1);/p"\ - -e 'd' \ - -e '}' \ - -e '/u_int8_t[ ]*xid\[/{' \ - -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_XID(jnienv, cl, jobj, statp, xid);/p"\ - -e 'd' \ - -e '}' \ - -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_INT(jnienv, cl, jobj, statp, \1);/p" \ - -e '}' >> $c + fill=__dbj_fill_$1 + j_class=$2 + jni_fieldid_decls $1 $2 + jni_fieldids $1 $2 + cat > $s <> $c + echo " jobject jobj, struct __db_$1 *statp) {" >> $c + sed -n -f $s < ../dbinc/db.in >> $c echo ' return (0);' >> $c echo '}' >> $c } +jni_fieldid_decls() +{ + cat > $s <> $u1 +} + +jni_fieldids() +{ + cat > $s <> $u2 +} + # Script to convert DB C structure declarations into a toString method body jclass_toString() { - echo "/**" - echo " * Provide a string representation of all the fields contained" - echo " * within this class." - echo " *" - echo " * @return The string representation." - echo " */" + cat > $s <> $c - stat_class() { - c_struct=$1 + c_struct=__db_$1 j_class=$2 - fill=$3 + extends=$3 (echo "$msgjava" echo echo 'package com.sleepycat.db;' echo - echo "/**" - echo " * Statistics associated with $j_class generated by" - echo " * DbEnv on request.

" - echo " * The information contained within instances of this" - echo " * class is a snapshot in time, it is not continually updated." - echo " */" - echo "public class $j_class" - echo '{' - jclass $c_struct - echo - jclass_toString $c_struct $j_class - echo '}' - echo "// end of $j_class.java") > $t - jclass_jni $c_struct $fill $c + echo "public class $j_class$extends {" + echo " // no public constructor" + echo " protected $j_class() {}" + jclass $1 + jclass_toString $1 $2 + echo '}') > $t + jclass_jni $1 $2 f=../java/src/com/sleepycat/db/$j_class.java cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) } -stat_class __db_bt_stat DbBtreeStat __dbj_fill_bt_stat -stat_class __db_h_stat DbHashStat __dbj_fill_h_stat -stat_class __db_lock_stat DbLockStat __dbj_fill_lock_stat -stat_class __db_log_stat DbLogStat __dbj_fill_log_stat -stat_class __db_mpool_fstat DbMpoolFStat __dbj_fill_mpool_fstat -stat_class __db_mpool_stat DbMpoolStat __dbj_fill_mpool_stat -stat_class __db_qam_stat DbQueueStat __dbj_fill_qam_stat -stat_class __db_rep_stat DbRepStat __dbj_fill_rep_stat +echo "$msgjava" > $c +> $u1 +> $u2 -# Build DbTxnStat.java - special because of embedded Active class -(echo "$msgjava" && - echo && - echo 'package com.sleepycat.db;' && - echo && - echo "/**" - echo " * Statistics associated with DbTxnStat generated by" - echo " * DbEnv on request." - echo " * The information contained within instances of this" - echo " * class is a snapshot in time, it is not continually updated." - echo " */" - echo "public class DbTxnStat" - echo '{' - echo " public static class Active {" - jclass __db_txn_active " " - jclass_toString __db_txn_active Active " " +stat_class bt_stat BtreeStats " extends DatabaseStats" +stat_class h_stat HashStats " extends DatabaseStats" +stat_class lock_stat LockStats +stat_class log_stat LogStats +stat_class mpool_fstat CacheFileStats +stat_class mpool_stat CacheStats +stat_class qam_stat QueueStats " extends DatabaseStats" +stat_class rep_stat ReplicationStats +stat_class seq_stat SequenceStats +# Build TransactionStats.java - special because of embedded Active class +(echo "$msgjava" + echo + echo 'package com.sleepycat.db;' + echo + echo 'import com.sleepycat.db.internal.DbUtil;' + echo + echo "public class TransactionStats" + echo '{' + echo " // no public constructor" + echo " protected TransactionStats() {}" + echo + echo -n " public static class Active {" + echo " // no public constructor" + echo " protected Active() {}" + jclass txn_active " " + jclass_toString txn_active Active " " echo ' };' - jclass __db_txn_stat - jclass_toString __db_txn_stat DbTxnStat + jclass txn_stat + jclass_toString txn_stat TransactionStats echo '}' - echo '// end of DbTxnStat.java') > $t -jclass_jni __db_txn_stat __dbj_fill_txn_stat $c $h -jclass_jni __db_txn_active __dbj_fill_txn_active $c $h -f=../java/src/com/sleepycat/db/DbTxnStat.java + echo '// end of TransactionStats.java') > $t +jclass_jni txn_stat __dbj_fill_txn_stat +jclass_jni txn_active __dbj_fill_txn_active +f=../java/src/com/sleepycat/db/TransactionStats.java cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) @@ -188,3 +316,12 @@ mv $c $t f=../libdb_java/java_stat_auto.c cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) + +f=../libdb_java/java_util.i +sed '/BEGIN-STAT-FIELD-DECLS/q' < $f > $t +cat $u1 >> $t +sed -n '/END-STAT-FIELD-DECLS/,/BEGIN-STAT-FIELDS/p' < $f >> $t +cat $u2 >> $t +sed -n '/END-STAT-FIELDS/,$p' < $f >> $t +cmp $t $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $t $f && chmod 644 $f) diff --git a/db/dist/s_java_swig b/db/dist/s_java_swig index 569a5e2bc..c1e67e316 100755 --- a/db/dist/s_java_swig +++ b/db/dist/s_java_swig @@ -1,11 +1,12 @@ #!/bin/sh - -# $Id: s_java_swig,v 11.6 2003/05/19 01:14:37 bostic Exp $ +# $Id: s_java_swig,v 11.11 2004/09/23 17:31:53 mjc Exp $ # # Run SWIG to generate the Java APIs SWIG=swig SWIG_DIR=../libdb_java SWIG_FILE=$SWIG_DIR/db.i +PACKAGE="com.sleepycat.db.internal" die() { echo "$@" >&2 @@ -20,7 +21,7 @@ for api in java ; do swig_args="" case $api in java) - swig_args="-nodefault -package com.sleepycat.db $args" + swig_args="-nodefault -package $PACKAGE $args" ;; esac @@ -28,9 +29,15 @@ for api in java ; do -o ../libdb_$api/db_${api}_wrap.c $SWIG_FILE || exit $? done +# Skip Java sources if run with "-n" +if [ "x$1" = "x-n" ] ; then + rm -f $SWIG_DIR/*.java + exit 0 +fi + # Fixups for Java JAVA_SRCTOP=../java/src -JAVA_PKGDIR=com/sleepycat/db +JAVA_PKGDIR=com/sleepycat/db/internal JAVA_SRCDIR=$JAVA_SRCTOP/$JAVA_PKGDIR # SWIG 1.3.18 puts the Java files in the same directory as the native code. @@ -38,8 +45,11 @@ cd $SWIG_DIR [ -f Db.java ] || exit 1 for f in *.java ; do + case $f in + SWIGTYPE*) + die "Interface contains unresolved types: $f" + esac rm -f $JAVA_SRCDIR/$f - sed -f $SWIG_DIR/java-post.sed < $f > $JAVA_SRCDIR/$f || exit $? + perl -p $SWIG_DIR/java-post.pl < $f > $JAVA_SRCDIR/$f || exit $? rm -f $f done -cd ../dist diff --git a/db/dist/s_je2db b/db/dist/s_je2db new file mode 100644 index 000000000..aaaa42cf1 --- /dev/null +++ b/db/dist/s_je2db @@ -0,0 +1,88 @@ +#!/bin/sh - + +# The examples must be hand-edited after they are copied: +# - add setInitializeCache(true), setInitializeLocking(true), setType(BTREE) +# - add null databaseName param to openDatabase() and openSecondaryDatabase() +# - remove foreign key configuration and imports + +COPY_EXAMPLES=0 + +JEDIR=$1 +if [ $# -eq 1 ] ; then + DBDIR=.. +else + DBDIR=$2 +fi + +if [ ! -d "$DBDIR/dbinc" -o ! -f "$JEDIR/build.xml" ] ; then + echo >&2 "Usage $0 /path/to/je [ /path/to/db ]" + exit 1 +fi + +JEDIR=$(cd "$JEDIR" ; /bin/pwd) +DBDIR=$(cd "$DBDIR" ; /bin/pwd) + +JESRC="$JEDIR/src" +JETEST="$JEDIR/test" +JEEXAMPLES="$JEDIR/examples" +DBSRC="$DBDIR/java/src" +DBTEST="$DBDIR/test/scr024/src" +DBEXAMPLES="$DBDIR/examples_java/src" +DIRMATCH="com/sleepycat\(/examples\)*/\(\(bind\)\|\(collections\)\|\(util\)\)" + +cd "$JESRC" +for d in `find . -type d | grep -v CVS | grep $DIRMATCH` ; do + #echo "$DBSRC/$d" + mkdir -p "$DBSRC/$d" +done +cd "$JETEST" +for d in `find . -type d | grep -v CVS | grep $DIRMATCH` ; do + #echo "$DBTEST/$d" + mkdir -p "$DBTEST/$d" +done +if [ $COPY_EXAMPLES -eq 1 ] ; then + cd "$JEEXAMPLES" + for d in `find . -type d | grep -v CVS | grep $DIRMATCH` ; do + #echo "$DBEXAMPLES/$d" + mkdir -p "$DBEXAMPLES/$d" + done +fi + +E1='s/com\.sleepycat\.je/com.sleepycat.db/g' +E2='/import com\.sleepycat\.db\.ForeignKeyNullifier/d' +E3='/implements/s/, ForeignKeyNullifier//' +E4='//,//d' +EXCLUDETESTS="\(\(ForeignKeyTest\)\|\(TupleSerialFactoryTest\)\)" + +cd "$JESRC" +for f in `find . -name '*.java' | grep $DIRMATCH` ; do + #echo $DBSRC/$f + sed -e "$E1" -e "$E2" -e "$E3" -e "$E4" < $f > $DBSRC/$f.sed.out + diff -q -I "\$\Id:" $DBSRC/$f $DBSRC/$f.sed.out || \ + mv -f $DBSRC/$f.sed.out $DBSRC/$f + rm -f $DBSRC/$f.sed.out +done + +cd "$JETEST" +for f in `find . -name '*.java' | grep $DIRMATCH | grep -v $EXCLUDETESTS` ; do + #echo $DBTEST/$f + sed -e "$E1" < $f > $DBTEST/$f.sed.out + diff -q -I "\$\Id:" $DBTEST/$f $DBTEST/$f.sed.out || \ + mv -f $DBTEST/$f.sed.out $DBTEST/$f + rm -f $DBTEST/$f.sed.out +done +cp -f "com/sleepycat/collections/test/serial/TestSerial.java.original" \ + "$DBTEST/com/sleepycat/collections/test/serial" + +if [ $COPY_EXAMPLES -eq 1 ] ; then + cd "$JEEXAMPLES" + for f in `find . -name '*.java' | grep $DIRMATCH` ; do + #echo $DBEXAMPLES/$f + sed -e "$E1" < $f > $DBEXAMPLES/$f.sed.out + diff -q -I "\$\Id:" $DBEXAMPLES/$f $DBEXAMPLES/$f.sed.out || \ + mv -f $DBEXAMPLES/$f.sed.out $DBEXAMPLES/$f + rm -f $DBEXAMPLES/$f.sed.out + done +fi + +exit 0 diff --git a/db/dist/s_perm b/db/dist/s_perm index 16450873a..8c3a0f746 100755 --- a/db/dist/s_perm +++ b/db/dist/s_perm @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_perm,v 1.29 2003/11/17 21:11:45 bostic Exp $ +# $Id: s_perm,v 1.31 2004/10/05 18:56:58 bostic Exp $ d=.. echo 'Updating Berkeley DB source tree permissions...' @@ -25,7 +25,6 @@ run dist/s_config 555 run dist/s_crypto 555 run dist/s_include 555 run dist/s_java 555 -run dist/s_java_camel 555 run dist/s_java_const 555 run dist/s_java_stat 555 run dist/s_java_swig 555 @@ -40,6 +39,7 @@ run dist/s_vxworks 555 run dist/s_win32 555 run dist/s_win32_dsp 555 run dist/vx_buildcd 555 +run mod_db4/configure 555 run perl/BerkeleyDB/dbinfo 555 run perl/BerkeleyDB/mkpod 555 diff --git a/db/dist/s_recover b/db/dist/s_recover index 331ae623d..b30a08554 100755 --- a/db/dist/s_recover +++ b/db/dist/s_recover @@ -1,30 +1,29 @@ #!/bin/sh - -# $Id: s_recover,v 1.14 2002/03/27 04:31:51 bostic Exp $ +# $Id: s_recover,v 1.17 2004/06/17 17:35:19 bostic Exp $ # # Build the automatically generated logging/recovery files. -tmp=/tmp/__db_a +header=/tmp/__db_a loglist=/tmp/__db_b -source=/tmp/__db_c -header=/tmp/__db_d +print=/tmp/__db_c +source=/tmp/__db_d template=/tmp/__db_e +tmp=/tmp/__db_f -trap 'rm -f $tmp $loglist $source $header $template; exit 1' 1 2 3 13 15 -trap 'rm -f $tmp $loglist $source $header $template; exit 0' 0 +trap 'rm -f /tmp/__db_[abcdef]; exit 1' 1 2 3 13 15 +trap 'rm -f /tmp/__db_[abcdef]; exit 0' 0 -DIR="db dbreg btree hash qam txn" +DIR="db dbreg btree fileops hash qam rep txn" # Check to make sure we haven't duplicated a log record entry, and build # the list of log record types that the test suite uses. for i in $DIR; do - p=none for f in ../$i/*.src; do # Grab the PREFIX; there should only be one per file, and # so it's okay to just take the first. grep '^PREFIX' $f | sed q egrep '^BEGIN[ ]|^IGNORED[ ]|^DEPRECATED[ ]' $f | awk '{print $1 "\t" $2 "\t" $3}' - done done > $loglist grep -v '^PREFIX' $loglist | @@ -40,12 +39,13 @@ cmp $loglist $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $loglist $f && chmod 444 $f) # Build DB's recovery routines. -for i in db dbreg btree fileops hash qam txn; do +for i in $DIR; do for f in ../$i/*.src; do subsystem=`basename $f .src` awk -f gen_rec.awk \ - -v source_file=$source \ -v header_file=$header \ + -v print_file=$print\ + -v source_file=$source \ -v template_file=$template < $f f=../dbinc_auto/${subsystem}_auto.h @@ -56,6 +56,10 @@ for i in db dbreg btree fileops hash qam txn; do cmp $source $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $source $f && chmod 444 $f) + f=../$i/${subsystem}_autop.c + cmp $print $f > /dev/null 2>&1 || + (echo "Building $f" && + rm -f $f && cp $print $f && chmod 444 $f) f=template/rec_${subsystem} cmp $template $f > /dev/null 2>&1 || (echo "Building $f" && diff --git a/db/dist/s_rpc b/db/dist/s_rpc index 6c420eee1..8dada0e3b 100644 --- a/db/dist/s_rpc +++ b/db/dist/s_rpc @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_rpc,v 11.19 2003/07/02 15:27:44 bostic Exp $ +# $Id: s_rpc,v 11.20 2004/03/11 20:11:17 bostic Exp $ # # Build the automatically generated RPC files @@ -13,125 +13,26 @@ trap 'rm -f $t ; exit 1' 1 2 3 13 15 client_file=../rpc_client/gen_client.c ctmpl_file=./template/gen_client_ret -dbinc_file=../dbinc/db.in -defs_file=../dbinc_auto/rpc_defs.in -header_file=../dbinc_auto/db_server.h -proc_file=../rpc_server/c/db_server_proc.c -rpcclnt_file=../rpc_client/db_server_clnt.c -rpcsvc_file=../rpc_server/c/db_server_svc.c -rpcxdr_file=../rpc_server/c/db_server_xdr.c -sed_file=../rpc_server/c/db_server_proc.sed server_file=../rpc_server/c/gen_db_server.c stmpl_file=./template/db_server_proc xdr_file=../rpc_server/db_server.x -# -# NOTE: We do NOT want to remove proc_file. It is what we apply $sed_file -# to, but we do not want to remove it, it does not get built in place. -rm -f $client_file \ - $ctmpl_file \ - $header_file \ - $rpcclnt_file \ - $rpcsvc_file \ - $rpcxdr_file \ - $sed_file \ - $server_file \ - $stmpl_file \ - $xdr_file +rm -f $client_file $ctmpl_file $server_file $stmpl_file $xdr_file # # Generate client/server/XDR code # xidsize=\ -`awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' $dbinc_file` +`awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' ../dbinc/db.in` awk -f gen_rpc.awk \ - -v major=$DB_VERSION_MAJOR \ - -v minor=$DB_VERSION_MINOR \ - -v xidsize=$xidsize \ -v client_file=$client_file \ -v ctmpl_file=$ctmpl_file \ - -v sed_file=$sed_file \ + -v major=$DB_VERSION_MAJOR \ + -v minor=$DB_VERSION_MINOR \ -v server_file=$server_file \ -v stmpl_file=$stmpl_file \ - -v xdr_file=$xdr_file < ../rpc_server/rpc.src -chmod 444 $client_file $server_file - -# -# Now run rpcgen to generate all our sources from the XDR file -# -rpcgen -h $xdr_file > $header_file -rpcgen -l $xdr_file > $rpcclnt_file -rpcgen -s tcp $xdr_file > $rpcsvc_file -rpcgen -c $xdr_file > $rpcxdr_file + -v xdr_file=$xdr_file \ + -v xidsize=$xidsize < ../rpc_server/rpc.src -# -# Run various server files through sed. -# -cat <$t -s/^#include[ ]"db_server.h"/#include "db_config.h"\\ -\\ -\\#ifndef NO_SYSTEM_INCLUDES\\ -\\#include \\ -\\#include / -/^#include /a\\ -\\#endif\\ -\\ -\\#include "db_int.h"\\ -\\#include "dbinc_auto/db_server.h"\\ -\\#include "dbinc/db_server_int.h"\\ -\\#include "dbinc_auto/rpc_server_ext.h" -/^ return;/i\\ -\\ __dbsrv_timeout(0); -s/svc_sendreply(transp, xdr_void,/svc_sendreply(transp, (xdrproc_t)xdr_void,/ -s/svc_getargs(transp, xdr_argument, &argument)/svc_getargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/ -s/svc_sendreply(transp, xdr_result, result)/svc_sendreply(transp, (xdrproc_t)xdr_result, result)/ -s/svc_freeargs(transp, xdr_argument, &argument)/svc_freeargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/ -s/^main/void __dbsrv_main/ -ENDOFSEDTEXT -sed -f $t $rpcsvc_file > ${rpcsvc_file}.new -mv ${rpcsvc_file}.new $rpcsvc_file - -sed -f $sed_file $proc_file > ${proc_file}.new -mv ${proc_file}.new $proc_file - -# Run rpcgen files through sed to add HAVE_RPC ifdef and appropriate -# includes. -cat <$t -1i\\ -\\#include "db_config.h"\\ -\\ -\\#ifdef HAVE_RPC -/^#include "db_server.h"/c\\ -\\#ifndef NO_SYSTEM_INCLUDES\\ -\\#include \\ -\\ -\\#include \\ -\\#endif\\ -\\ -\\#include "db_int.h"\\ -\\#include "dbinc_auto/db_server.h" -\$a\\ -\\#endif /* HAVE_RPC */ -ENDOFSEDTEXT - -sed -f $t $rpcxdr_file > ${rpcxdr_file}.new -mv ${rpcxdr_file}.new $rpcxdr_file -sed -f $t $rpcclnt_file > ${rpcclnt_file}.new -mv ${rpcclnt_file}.new $rpcclnt_file - -# Copy the DB_RPC SERVER #defines into a separate file so -# they can be part of db.h. -msgc="/* DO NOT EDIT: automatically built by dist/s_rpc. */" -(echo "" && echo "$msgc" && - sed -n -e "/DB_RPC_SERVER/p" $header_file) > $defs_file - -# Fix up the header file: -# Remove the DB_RPC_SERVER #defines. -# Remove the include, it needs to be included earlier -# than that. -sed -e "/DB_RPC_SERVER/d"\ - -e "/^#include.*/d" $header_file > ${header_file}.new -mv ${header_file}.new $header_file - -chmod 444 $header_file $rpcclnt_file $rpcsvc_file $rpcxdr_file +chmod 444 $client_file $server_file diff --git a/db/dist/s_symlink b/db/dist/s_symlink index 17498063b..533eb6b44 100755 --- a/db/dist/s_symlink +++ b/db/dist/s_symlink @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_symlink,v 1.31 2003/07/02 15:27:44 bostic Exp $ +# $Id: s_symlink,v 1.32 2004/04/01 15:10:53 bostic Exp $ echo 'Creating Berkeley DB source tree symbolic links...' @@ -51,6 +51,7 @@ build qam/tags ../dist/tags build rep/tags ../dist/tags build rpc_client/tags ../dist/tags build rpc_server/tags ../dist/tags +build sequence/tags ../dist/tags build tcl/tags ../dist/tags build txn/tags ../dist/tags build xa/tags ../dist/tags diff --git a/db/dist/s_tags b/db/dist/s_tags index 68967eba1..d1c21e5b2 100755 --- a/db/dist/s_tags +++ b/db/dist/s_tags @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_tags,v 1.17 2003/03/18 02:16:32 bostic Exp $ +# $Id: s_tags,v 1.18 2004/04/01 15:11:14 bostic Exp $ # # Build tags files. @@ -29,6 +29,7 @@ files=`echo ../dbinc/*.h \ ../rep/*.[ch] \ ../rpc_client/*.[ch] \ ../rpc_server/c/*.[ch] \ + ../sequence/*.[ch] \ ../tcl/*.[ch] \ ../txn/*.[ch] \ ../xa/*.[ch] \ diff --git a/db/dist/s_test b/db/dist/s_test index 012fa7a09..df0648d56 100755 --- a/db/dist/s_test +++ b/db/dist/s_test @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_test,v 1.28 2003/10/14 17:13:55 sandstro Exp $ +# $Id: s_test,v 1.29 2004/05/13 18:51:43 mjc Exp $ # # Build the Tcl test files. @@ -22,6 +22,7 @@ trap 'rm -f $t; exit 0' 0 1 2 3 13 15 echo "" && \ echo "set src_root @srcdir@/.." && \ echo "set test_path @srcdir@/../test" && \ + echo "set je_root @srcdir@/../../je" && \ echo "" && \ echo "global testdir" && \ echo "set testdir ./TESTDIR" && \ @@ -46,6 +47,7 @@ cmp $t $f > /dev/null 2>&1 || echo "" && \ echo "set src_root .." && \ echo "set test_path ../test" && \ + echo "set je_root ../../je" && \ echo "" && \ echo "global testdir" && \ echo "set testdir ./TESTDIR" && \ diff --git a/db/dist/s_vxworks b/db/dist/s_vxworks index 8802f5e72..e46d6e413 100644 --- a/db/dist/s_vxworks +++ b/db/dist/s_vxworks @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_vxworks,v 1.42 2003/08/27 16:23:33 sue Exp $ +# $Id: s_vxworks,v 1.49 2004/10/07 18:01:56 sue Exp $ # # Build the VxWorks files. @@ -9,10 +9,12 @@ msgc="/* DO NOT EDIT: automatically built by dist/s_vxworks. */" s=/tmp/__db_a t=/tmp/__db_b -vxfilelist=/tmp/__db_c +u=/tmp/__db_c +vxfilelist=/tmp/__db_d +vxsmallfiles=/tmp/__db_e -trap 'rm -f $s $t $vxfilelist ; exit 0' 0 -trap 'rm -f $s $t $vxfilelist ; exit 1' 1 2 3 13 15 +trap 'rm -f $s $t $u $vxfilelist $vxsmallfiles ; exit 0' 0 +trap 'rm -f $s $t $u $vxfilelist $vxsmallfiles ; exit 1' 1 2 3 13 15 # Build the VxWorks automatically generated files. f=../build_vxworks/db.h @@ -27,17 +29,22 @@ i\\ #define __vxworks\\ #endif } +/@inttypes_h_decl@/d +/@stdint_h_decl@/d +/@stddef_h_decl@/d s/@u_int8_decl@/typedef unsigned char u_int8_t;/ -s/@u_int16_decl@/typedef unsigned short u_int16_t;/ -s/@u_int32_decl@/typedef unsigned int u_int32_t;/ -/@inttypes_decl@/d /@int16_decl@/d +s/@u_int16_decl@/typedef unsigned short u_int16_t;/ /@int32_decl@/d +s/@u_int32_decl@/typedef unsigned int u_int32_t;/ +s/@int64_decl@// +s/@u_int64_decl@// /@u_char_decl@/d /@u_short_decl@/d /@u_int_decl@/d /@u_long_decl@/d /@ssize_t_decl@/d +s/@db_seq_decl@/typedef int db_seq_t;/ s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/ s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/ s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/ @@ -49,7 +56,6 @@ s/@DB_PROTO2@/#define __P(protos) protos/ ENDOFSEDTEXT (echo "$msgc" && sed -f $s ../dbinc/db.in && - cat ../dbinc_auto/rpc_defs.in && cat ../dbinc_auto/ext_prot.in) > $t cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) @@ -57,8 +63,8 @@ cmp $t $f > /dev/null 2>&1 || f=../build_vxworks/db_int.h cat < $s s/@PATH_SEPARATOR@/\/\\\\\\\\/ -s/@db_align_t_decl@/typedef unsigned long db_align_t;/ -s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/ +s/@uintmax_t_decl@/typedef unsigned long uintmax_t;/ +s/@uintptr_t_decl@/typedef unsigned long uintptr_t;/ s/@db_int_def@// ENDOFSEDTEXT (echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t @@ -70,6 +76,19 @@ f=../build_vxworks/db_config.h cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) +f=../build_vxworks/db_config_small.h +(echo "$msgc" && + sed -e "s/__EDIT_DB_VERSION__/$DB_VERSION/" \ + -e "s;^#define.*HAVE_CRYPTO.*1;/* #undef HAVE_CRYPTO */;" \ + -e "s;^#define.*HAVE_HASH.*1;/* #undef HAVE_HASH */;" \ + -e "s;^#define.*HAVE_QUEUE.*1;/* #undef HAVE_QUEUE */;" \ + -e "s;^#define.*HAVE_REPLICATION.*1;/* #undef HAVE_REPLICATION */;" \ + -e "s;^#define.*HAVE_STATISTICS.*1;/* #undef HAVE_STATISTICS */;" \ + -e "s;^#define.*HAVE_VERIFY.*1;/* #undef HAVE_VERIFY */;" \ + vx_config.in) > $t +cmp $t $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) + # Build a sed script that will change a "standard" DB utility into # VxWorks-compatible code. transform() @@ -182,8 +201,16 @@ sed -e '/^$/d' -e '/^[ #]/d' srcfiles.in | egrep -w vx | sed 's/[ ].*//' > $vxfilelist +# Build the list of files VxWorks knows about. +sed -e '/^$/d' -e '/^[ #]/d' srcfiles.in | + egrep -w vxsmall | + sed 's/[ ].*//' > $vxsmallfiles + # Build VxWorks Tornado 2.0 project files for the library itself. for v in 0 2 ; do + # + # Build regular project files + # (cat vx_2.${v}/BerkeleyDB.wpj for i in `cat $vxfilelist`; do o=" FILE_\$(PRJ_DIR)/../$i" @@ -215,120 +242,45 @@ for v in 0 2 ; do echo " userComments" echo "BerkeleyDB" echo "") > $t + # + # Build small lib project files + # + (cat vx_2.${v}/BerkeleyDBsmall.wpj + for i in `cat $vxsmallfiles`; do + o=" FILE_\$(PRJ_DIR)/../$i" + echo "${o}_dependDone" + echo "TRUE" + echo "" + echo + echo "${o}_dependencies" + echo "\$(PRJ_DIR)/db_config.h \\" + echo " \$(PRJ_DIR)/db_int.h \\" + echo " \$(PRJ_DIR)/db.h" + echo "" + echo + echo "${o}_objects" + echo "`basename $i .c`.o" + echo "" + echo + echo "${o}_tool" + echo "C/C++ compiler" + echo "" + echo + done + echo " PROJECT_FILES" + sed -e '$!s/$/ \\/' \ + -e 's/^/$(PRJ_DIR)\/..\//' \ + -e '1!s/^/ /' < $vxsmallfiles + echo "" + echo + echo " userComments" + echo "BerkeleyDB" + echo "") > $u f=../build_vxworks/BerkeleyDB2${v}.wpj cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) + f=../build_vxworks/BerkeleyDB2${v}small.wpj + cmp $u $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $u $f && chmod 444 $f) done -# Build VxWorks Tornado 3.1 project files for the utilities. -for i in $PROGRAM_LIST; do - if [ $i = "ex_access" ]; then - target=dbdemo - dir=../examples_c - else - target=$i - dir=../$i - fi - - cp vx_3.1/Makefile.custom $t - f=../build_vxworks/$target/$target/Makefile.custom - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - - for j in component.cdf component.wpj; do - # - # Some parts of the component files needs to have the - # name in all capitals. Sigh. - # - z=`echo $target | tr "a-z" "A-Z"` - sed -e "s/__DB_APPLICATION_NAME__/$target/g" \ - -e "s/__DB_CAPAPPL_NAME__/$z/g" < vx_3.1/$j > $t - f=../build_vxworks/$target/$target/$j - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - done -done - -# Build VxWorks Tornado 3.1 project files for the library itself. -cp vx_3.1/Makefile.custom $t -f=../build_vxworks/BerkeleyDB/Makefile.custom -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -(cat vx_3.1/cdf.1 -echo -n " MODULES" -for i in `cat $vxfilelist`; do - echo " `basename $i .c`.o" -done | sort | sed -e '$!s/$/ \\/' -cat vx_3.1/cdf.2 -for i in `cat $vxfilelist`; do - b="`basename $i .c`.o" - echo "Module $b {" - echo - echo " NAME $b" - echo " SRC_PATH_NAME \$(PRJ_DIR)/../../$i" - echo "}" - echo -done -cat vx_3.1/cdf.3)> $t -f=../build_vxworks/BerkeleyDB/component.cdf -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -(cat vx_3.1/wpj.1 -for i in `cat $vxfilelist`; do - o=" BUILD_PENTIUM2gnu.debug_FILE_\$(PRJ_DIR)/../../$i" - echo "${o}_infoTags" - echo "toolMacro objects" - echo "" - echo - echo "${o}_objects" - echo "`basename $i .c`.o" - echo "" - echo - echo "${o}_toolMacro" - echo "CC" - echo "" - echo -done -cat vx_3.1/wpj.2 -for i in `cat $vxfilelist`; do - o=" BUILD_PENTIUM2gnu.release_FILE_\$(PRJ_DIR)/../../$i" - echo "${o}_infoTags" - echo "toolMacro objects" - echo "" - echo - echo "${o}_objects" - echo "`basename $i .c`.o" - echo "" - echo - echo "${o}_toolMacro" - echo "CC" - echo "" - echo -done -cat vx_3.1/wpj.3 -for i in `cat $vxfilelist`; do - o=" BUILD_PENTIUMgnu.debug_FILE_\$(PRJ_DIR)/../../$i" - echo "${o}_infoTags" - echo "toolMacro objects" - echo "" - echo - echo "${o}_objects" - echo "`basename $i .c`.o" - echo "" - echo - echo "${o}_toolMacro" - echo "CC" - echo "" - echo -done -cat vx_3.1/wpj.4 -sort $vxfilelist | -sed -e 's/^/$(PRJ_DIR)\/..\/..\//' \ - -e '1!s/^/ /' \ - -e '$!s/$/ \\/' -cat vx_3.1/wpj.5) > $t -f=../build_vxworks/BerkeleyDB/component.wpj -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) diff --git a/db/dist/s_win32 b/db/dist/s_win32 index fd63a8a6a..0e8601ce8 100755 --- a/db/dist/s_win32 +++ b/db/dist/s_win32 @@ -1,5 +1,5 @@ #!/bin/sh - -# $Id: s_win32,v 1.28 2003/05/20 23:37:20 mjc Exp $ +# $Id: s_win32,v 1.36 2004/09/16 20:52:41 mjc Exp $ # # Build Windows/32 include files. @@ -17,15 +17,20 @@ trap 'rm -f $s $t ; exit 1' 1 2 3 13 15 # Build the Win32 automatically generated files. f=../build_win32/db.h cat < $s -/@inttypes_decl@/d +/@inttypes_h_decl@/d +/@stdint_h_decl@/d +s/@stddef_h_decl@/#include / s/@u_int8_decl@/typedef unsigned char u_int8_t;/ s/@int16_decl@/typedef short int16_t;/ s/@u_int16_decl@/typedef unsigned short u_int16_t;/ s/@int32_decl@/typedef int int32_t;/ s/@u_int32_decl@/typedef unsigned int u_int32_t;/ +s/@int64_decl@/typedef __int64 int64_t;/ +s/@u_int64_decl@/typedef unsigned __int64 u_int64_t;/ +s/@db_seq_decl@/typedef int64_t db_seq_t;/ /@u_char_decl@/{ i\\ -#if !defined(_WINSOCKAPI_) +#ifndef _WINSOCKAPI_ s/@u_char_decl@/typedef unsigned char u_char;/ } s/@u_short_decl@/typedef unsigned short u_short;/ @@ -37,10 +42,20 @@ s/@u_int_decl@/typedef unsigned int u_int;/ } /@ssize_t_decl@/{ i\\ -#if defined(_WIN64)\\ -typedef __int64 ssize_t;\\ +#ifdef _WIN64\\ +typedef int64_t ssize_t;\\ #else\\ -typedef int ssize_t;\\ +typedef int32_t ssize_t;\\ +#endif + d +} +s/@uintmax_t_decl@/typedef u_int64_t uintmax_t;/ +/@uintptr_t_decl@/{ + i\\ +#ifdef _WIN64\\ +typedef u_int64_t uintptr_t;\\ +#else\\ +typedef u_int32_t uintptr_t;\\ #endif d } @@ -55,32 +70,41 @@ s/@DB_PROTO2@/#define __P(protos) protos/ ENDOFSEDTEXT (echo "$msgc" && sed -f $s ../dbinc/db.in && - cat ../dbinc_auto/rpc_defs.in && cat ../dbinc_auto/ext_prot.in) > $t cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) +f=../build_win64/db.h +cmp $t $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) f=../build_win32/db_cxx.h cat < $s s/@cxx_have_stdheaders@/#define HAVE_CXX_STDHEADERS 1/ ENDOFSEDTEXT (echo "$msgc" && sed -f $s ../dbinc/db_cxx.in) > $t +cmp $t $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) +f=../build_win64/db_cxx.h cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) f=../build_win32/db_int.h cat < $s s/@PATH_SEPARATOR@/\\\\\\\\\/:/ -s/@db_align_t_decl@/typedef unsigned long db_align_t;/ -s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/ s/@db_int_def@// ENDOFSEDTEXT (echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t +cmp $t $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) +f=../build_win64/db_int.h cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) f=../build_win32/db_config.h (echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" win_config.in) > $t +cmp $t $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) +f=../build_win64/db_config.h cmp $t $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) @@ -96,9 +120,6 @@ cmp $t $f > /dev/null 2>&1 || f=../build_win32/libdb.def (echo $msgw && - echo && - echo \ - "DESCRIPTION 'Berkeley DB $DB_VERSION_MAJOR.$DB_VERSION_MINOR Library'" && echo && echo EXPORTS; a=1 @@ -111,6 +132,9 @@ cmp $t $f > /dev/null 2>&1 || f=../build_win32/win_db.h i=win_db.in +cmp $i $f > /dev/null 2>&1 || + (echo "Building $f" && rm -f $f && cp $i $f && chmod 444 $f) +f=../build_win64/win_db.h cmp $i $f > /dev/null 2>&1 || (echo "Building $f" && rm -f $f && cp $i $f && chmod 444 $f) diff --git a/db/dist/s_win32_dsp b/db/dist/s_win32_dsp index cd9ce00f0..10e0ccc4b 100644 --- a/db/dist/s_win32_dsp +++ b/db/dist/s_win32_dsp @@ -1,11 +1,10 @@ #!/bin/sh - -# $Id: s_win32_dsp,v 1.10 2003/03/24 10:35:21 mjc Exp $ +# $Id: s_win32_dsp,v 1.12 2004/08/20 15:01:06 mjc Exp $ # # Build Windows/32 .dsp files. . RELEASE -BUILDDIR=../build_win32 SRCFILES=srcfiles.in create_dsp() @@ -27,7 +26,7 @@ create_dsp() # forward slash, but we need that when feeding that to the # later sed command. set - `echo $srcpath | sed -e 's;\(.*\)/;../\\1 ;' \ - -e 's;../build_win32;.;' \ + -e "s;$BUILDDIR;.;" \ -e 's;/;\\\\\\\\;g'` srcdir="$1" srcfile="$2" @@ -68,49 +67,52 @@ sed -e "s/#.*$//" \ MODULES="`sed -e 's/^[^ ]* //' < $TMPA \ | tr ' ' '\012' | sort | uniq`" -for module in $MODULES +for BUILDDIR in ../build_win32 ../build_win64 do - case "$module" in - dynamic ) - create_dsp db_dll " $module " $TMPA \ - $BUILDDIR/dynamic_dsp.src $BUILDDIR/srcfile_dsp.src - ;; - small ) - create_dsp db_small " $module " $TMPA \ - $BUILDDIR/small_dsp.src $BUILDDIR/srcfile_dsp.src - ;; - static ) - create_dsp db_static " $module " $TMPA \ - $BUILDDIR/static_dsp.src $BUILDDIR/srcfile_dsp.src - ;; - java ) - create_dsp db_java " $module " $TMPA \ - $BUILDDIR/java_dsp.src $BUILDDIR/srcfile_dsp.src - ;; - tcl ) - create_dsp db_tcl " $module " $TMPA \ - $BUILDDIR/tcl_dsp.src $BUILDDIR/srcfile_dsp.src - ;; - testutil ) - create_dsp db_test " $module " $TMPA \ - $BUILDDIR/db_test.src $BUILDDIR/srcfile_dsp.src - ;; - app=* ) - appname=`echo $module | sed -e 's/^app=//'` - if [ -f $BUILDDIR/$appname.src ] ; then - srcname=$BUILDDIR/$appname.src - else - srcname=$BUILDDIR/app_dsp.src - fi - create_dsp $appname " $module " $TMPA \ - $srcname $BUILDDIR/srcfile_dsp.src - ;; - vx ) - ;; - * ) - echo "s_win32_dsp: module name $module in $SRCFILES is unknown type" - ;; - esac + for module in $MODULES + do + case "$module" in + dynamic ) + create_dsp db_dll " $module " $TMPA \ + $BUILDDIR/dynamic_dsp.src $BUILDDIR/srcfile_dsp.src + ;; + small ) + create_dsp db_small " $module " $TMPA \ + $BUILDDIR/small_dsp.src $BUILDDIR/srcfile_dsp.src + ;; + static ) + create_dsp db_static " $module " $TMPA \ + $BUILDDIR/static_dsp.src $BUILDDIR/srcfile_dsp.src + ;; + java ) + create_dsp db_java " $module " $TMPA \ + $BUILDDIR/java_dsp.src $BUILDDIR/srcfile_dsp.src + ;; + tcl ) + create_dsp db_tcl " $module " $TMPA \ + $BUILDDIR/tcl_dsp.src $BUILDDIR/srcfile_dsp.src + ;; + testutil ) + create_dsp db_test " $module " $TMPA \ + $BUILDDIR/db_test.src $BUILDDIR/srcfile_dsp.src + ;; + app=* ) + appname=`echo $module | sed -e 's/^app=//'` + if [ -f $BUILDDIR/$appname.src ] ; then + srcname=$BUILDDIR/$appname.src + else + srcname=$BUILDDIR/app_dsp.src + fi + create_dsp $appname " $module " $TMPA \ + $srcname $BUILDDIR/srcfile_dsp.src + ;; + vx|vxsmall ) + ;; + * ) + echo "s_win32_dsp: module name $module in $SRCFILES is unknown type" + ;; + esac + done done rm -f $TMPA diff --git a/db/dist/srcfiles.in b/db/dist/srcfiles.in index 3eb8733a3..d45dbf8c6 100644 --- a/db/dist/srcfiles.in +++ b/db/dist/srcfiles.in @@ -1,4 +1,4 @@ -# $Id: srcfiles.in,v 1.69 2003/11/21 02:24:47 bostic Exp $ +# $Id: srcfiles.in,v 1.86 2004/09/23 19:54:23 bostic Exp $ # # This is an input file for the s_win32_dsp and s_vxworks scripts. It lists # the source files in the Berkeley DB tree and notes which are used to build @@ -19,25 +19,27 @@ # tcl File is in the Windows tcl DLL (db_tcl.dsp) # testutil File is used for Windows testing (db_test.dsp) # vx File is in the VxWorks library. +# vxsmall File is in the small VxWorks library. -btree/bt_compare.c dynamic small static vx -btree/bt_conv.c dynamic small static vx -btree/bt_curadj.c dynamic small static vx -btree/bt_cursor.c dynamic small static vx -btree/bt_delete.c dynamic small static vx -btree/bt_method.c dynamic small static vx -btree/bt_open.c dynamic small static vx -btree/bt_put.c dynamic small static vx -btree/bt_rec.c dynamic small static vx -btree/bt_reclaim.c dynamic small static vx -btree/bt_recno.c dynamic small static vx -btree/bt_rsearch.c dynamic small static vx -btree/bt_search.c dynamic small static vx -btree/bt_split.c dynamic small static vx -btree/bt_stat.c dynamic small static vx -btree/bt_upgrade.c dynamic small static vx +btree/bt_compare.c dynamic small static vx vxsmall +btree/bt_conv.c dynamic small static vx vxsmall +btree/bt_curadj.c dynamic small static vx vxsmall +btree/bt_cursor.c dynamic small static vx vxsmall +btree/bt_delete.c dynamic small static vx vxsmall +btree/bt_method.c dynamic small static vx vxsmall +btree/bt_open.c dynamic small static vx vxsmall +btree/bt_put.c dynamic small static vx vxsmall +btree/bt_rec.c dynamic small static vx vxsmall +btree/bt_reclaim.c dynamic small static vx vxsmall +btree/bt_recno.c dynamic small static vx vxsmall +btree/bt_rsearch.c dynamic small static vx vxsmall +btree/bt_search.c dynamic small static vx vxsmall +btree/bt_split.c dynamic small static vx vxsmall +btree/bt_stat.c dynamic small static vx vxsmall +btree/bt_upgrade.c dynamic small static vx vxsmall btree/bt_verify.c dynamic static vx -btree/btree_auto.c dynamic small static vx +btree/btree_auto.c dynamic small static vx vxsmall +btree/btree_autop.c app=db_printlog build_vxworks/db_archive/db_archive.c build_vxworks/db_checkpoint/db_checkpoint.c build_vxworks/db_deadlock/db_deadlock.c @@ -54,25 +56,24 @@ build_win32/libdb.def dynamic build_win32/libdb.rc dynamic build_win32/libdb_tcl.def tcl clib/getcwd.c -clib/getopt.c vx +clib/getopt.c vx vxsmall clib/memcmp.c clib/memmove.c clib/raise.c -clib/snprintf.c vx -clib/strcasecmp.c dynamic small static vx -clib/strdup.c vx +clib/snprintf.c vx vxsmall +clib/strcasecmp.c dynamic small static vx vxsmall +clib/strdup.c vx vxsmall clib/strerror.c -clib/vsnprintf.c vx -common/crypto_stub.c small -common/db_byteorder.c dynamic small static vx -common/db_err.c dynamic small static vx -common/db_getlong.c dynamic small static vx -common/db_idspace.c dynamic small static vx -common/db_log2.c dynamic small static vx -common/util_arg.c vx -common/util_cache.c dynamic small static vx -common/util_log.c dynamic small static vx -common/util_sig.c dynamic small static vx +common/crypto_stub.c small vxsmall +common/db_byteorder.c dynamic small static vx vxsmall +common/db_err.c dynamic small static vx vxsmall +common/db_getlong.c dynamic small static vx vxsmall +common/db_idspace.c dynamic small static vx vxsmall +common/db_log2.c dynamic small static vx vxsmall +common/util_arg.c vx vxsmall +common/util_cache.c dynamic small static vx vxsmall +common/util_log.c dynamic small static vx vxsmall +common/util_sig.c dynamic small static vx vxsmall crypto/aes_method.c dynamic static vx crypto/crypto.c dynamic static vx crypto/mersenne/mt19937db.c dynamic static vx @@ -87,34 +88,40 @@ cxx/cxx_lock.cpp dynamic small static cxx/cxx_logc.cpp dynamic small static cxx/cxx_mpool.cpp dynamic small static cxx/cxx_multi.cpp dynamic small static +cxx/cxx_seq.cpp dynamic small static cxx/cxx_txn.cpp dynamic small static -db/crdel_auto.c dynamic small static vx -db/crdel_rec.c dynamic small static vx -db/db.c dynamic small static vx -db/db_am.c dynamic small static vx -db/db_auto.c dynamic small static vx -db/db_cam.c dynamic small static vx -db/db_conv.c dynamic small static vx -db/db_dispatch.c dynamic small static vx -db/db_dup.c dynamic small static vx -db/db_iface.c dynamic small static vx -db/db_join.c dynamic small static vx -db/db_meta.c dynamic small static vx -db/db_method.c dynamic small static vx -db/db_open.c dynamic small static vx -db/db_overflow.c dynamic small static vx +db/crdel_auto.c dynamic small static vx vxsmall +db/crdel_autop.c app=db_printlog +db/crdel_rec.c dynamic small static vx vxsmall +db/db.c dynamic small static vx vxsmall +db/db_am.c dynamic small static vx vxsmall +db/db_auto.c dynamic small static vx vxsmall +db/db_autop.c app=db_printlog +db/db_cam.c dynamic small static vx vxsmall +db/db_conv.c dynamic small static vx vxsmall +db/db_dispatch.c dynamic small static vx vxsmall +db/db_dup.c dynamic small static vx vxsmall +db/db_iface.c dynamic small static vx vxsmall +db/db_join.c dynamic small static vx vxsmall +db/db_meta.c dynamic small static vx vxsmall +db/db_method.c dynamic small static vx vxsmall +db/db_open.c dynamic small static vx vxsmall +db/db_overflow.c dynamic small static vx vxsmall db/db_ovfl_vrfy.c dynamic static vx -db/db_pr.c dynamic small static vx -db/db_rec.c dynamic small static vx -db/db_reclaim.c dynamic small static vx -db/db_remove.c dynamic small static vx -db/db_rename.c dynamic small static vx -db/db_ret.c dynamic small static vx -db/db_truncate.c dynamic small static vx -db/db_upg.c dynamic small static vx -db/db_upg_opd.c dynamic small static vx +db/db_pr.c dynamic small static vx vxsmall +db/db_rec.c dynamic small static vx vxsmall +db/db_reclaim.c dynamic small static vx vxsmall +db/db_remove.c dynamic small static vx vxsmall +db/db_rename.c dynamic small static vx vxsmall +db/db_ret.c dynamic small static vx vxsmall +db/db_setid.c dynamic small static vx vxsmall +db/db_setlsn.c dynamic small static vx vxsmall +db/db_stati.c dynamic small static vx vxsmall +db/db_truncate.c dynamic small static vx vxsmall +db/db_upg.c dynamic small static vx vxsmall +db/db_upg_opd.c dynamic small static vx vxsmall db/db_vrfy.c dynamic static vx -db/db_vrfy_stub.c small +db/db_vrfy_stub.c small vxsmall db/db_vrfyutil.c dynamic static vx db185/db185.c db_archive/db_archive.c app=db_archive @@ -125,25 +132,32 @@ db_dump185/db_dump185.c db_load/db_load.c app=db_load db_printlog/db_printlog.c app=db_printlog db_recover/db_recover.c app=db_recover +db_server_clnt.c +db_server_svc.c +db_server_xdr.c db_stat/db_stat.c app=db_stat db_upgrade/db_upgrade.c app=db_upgrade db_verify/db_verify.c app=db_verify dbm/dbm.c dynamic static -dbreg/dbreg.c dynamic small static vx -dbreg/dbreg_auto.c dynamic small static vx -dbreg/dbreg_rec.c dynamic small static vx -dbreg/dbreg_util.c dynamic small static vx -env/db_salloc.c dynamic small static vx -env/db_shash.c dynamic small static vx -env/env_file.c dynamic small static vx -env/env_method.c dynamic small static vx -env/env_open.c dynamic small static vx -env/env_recover.c dynamic small static vx -env/env_region.c dynamic small static vx +dbreg/dbreg.c dynamic small static vx vxsmall +dbreg/dbreg_auto.c dynamic small static vx vxsmall +dbreg/dbreg_autop.c app=db_printlog +dbreg/dbreg_rec.c dynamic small static vx vxsmall +dbreg/dbreg_stat.c dynamic small static vx vxsmall +dbreg/dbreg_util.c dynamic small static vx vxsmall +env/db_salloc.c dynamic small static vx vxsmall +env/db_shash.c dynamic small static vx vxsmall +env/env_file.c dynamic small static vx vxsmall +env/env_method.c dynamic small static vx vxsmall +env/env_open.c dynamic small static vx vxsmall +env/env_recover.c dynamic small static vx vxsmall +env/env_region.c dynamic small static vx vxsmall +env/env_stat.c dynamic small static vx vxsmall examples_c/bench_001.c examples_c/ex_access.c app=ex_access examples_c/ex_apprec/ex_apprec.c examples_c/ex_apprec/ex_apprec_auto.c +examples_c/ex_apprec/ex_apprec_autop.c examples_c/ex_apprec/ex_apprec_rec.c examples_c/ex_btrec.c app=ex_btrec examples_c/ex_dbclient.c @@ -155,23 +169,34 @@ examples_c/ex_repquote/ex_rq_main.c app=ex_repquote examples_c/ex_repquote/ex_rq_master.c app=ex_repquote examples_c/ex_repquote/ex_rq_net.c app=ex_repquote examples_c/ex_repquote/ex_rq_util.c app=ex_repquote +examples_c/ex_sequence.c examples_c/ex_thread.c examples_c/ex_tpcb.c app=ex_tpcb +examples_c/getting_started/example_database_load.c +examples_c/getting_started/example_database_read.c +examples_c/getting_started/gettingstarted_common.c examples_cxx/AccessExample.cpp app=excxx_access examples_cxx/BtRecExample.cpp app=excxx_btrec examples_cxx/EnvExample.cpp app=excxx_env examples_cxx/LockExample.cpp app=excxx_lock examples_cxx/MpoolExample.cpp app=excxx_mpool +examples_cxx/SequenceExample.cpp examples_cxx/TpcbExample.cpp app=excxx_tpcb -fileops/fileops_auto.c dynamic small static vx -fileops/fop_basic.c dynamic small static vx -fileops/fop_rec.c dynamic small static vx -fileops/fop_util.c dynamic small static vx +examples_cxx/getting_started/MyDb.cpp +examples_cxx/getting_started/excxx_example_database_load.cpp +examples_cxx/getting_started/excxx_example_database_read.cpp +fileops/fileops_auto.c dynamic small static vx vxsmall +fileops/fileops_autop.c app=db_printlog +fileops/fop_basic.c dynamic small static vx vxsmall +fileops/fop_rec.c dynamic small static vx vxsmall +fileops/fop_util.c dynamic small static vx vxsmall +gen_db_server.c hash/hash.c dynamic static vx hash/hash_auto.c dynamic static vx +hash/hash_autop.c app=db_printlog hash/hash_conv.c dynamic static vx hash/hash_dup.c dynamic static vx -hash/hash_func.c dynamic small static vx +hash/hash_func.c dynamic small static vx vxsmall hash/hash_meta.c dynamic static vx hash/hash_method.c dynamic static vx hash/hash_open.c dynamic static vx @@ -179,71 +204,77 @@ hash/hash_page.c dynamic static vx hash/hash_rec.c dynamic static vx hash/hash_reclaim.c dynamic static vx hash/hash_stat.c dynamic static vx -hash/hash_stub.c small +hash/hash_stub.c small vxsmall hash/hash_upgrade.c dynamic static vx hash/hash_verify.c dynamic static vx -hmac/hmac.c dynamic small static vx -hmac/sha1.c dynamic small static vx +hmac/hmac.c dynamic small static vx vxsmall +hmac/sha1.c dynamic small static vx vxsmall hsearch/hsearch.c dynamic static vx libdb_java/db_java_wrap.c java -lock/lock.c dynamic small static vx -lock/lock_deadlock.c dynamic small static vx -lock/lock_method.c dynamic small static vx -lock/lock_region.c dynamic small static vx -lock/lock_stat.c dynamic small static vx -lock/lock_util.c dynamic small static vx -log/log.c dynamic small static vx -log/log_archive.c dynamic small static vx -log/log_compare.c dynamic small static vx -log/log_get.c dynamic small static vx -log/log_method.c dynamic small static vx -log/log_put.c dynamic small static vx -mp/mp_alloc.c dynamic small static vx -mp/mp_bh.c dynamic small static vx -mp/mp_fget.c dynamic small static vx -mp/mp_fopen.c dynamic small static vx -mp/mp_fput.c dynamic small static vx -mp/mp_fset.c dynamic small static vx -mp/mp_method.c dynamic small static vx -mp/mp_region.c dynamic small static vx -mp/mp_register.c dynamic small static vx -mp/mp_stat.c dynamic small static vx -mp/mp_sync.c dynamic small static vx -mp/mp_trickle.c dynamic small static vx +lock/lock.c dynamic small static vx vxsmall +lock/lock_deadlock.c dynamic small static vx vxsmall +lock/lock_id.c dynamic small static vx vxsmall +lock/lock_list.c dynamic small static vx vxsmall +lock/lock_method.c dynamic small static vx vxsmall +lock/lock_region.c dynamic small static vx vxsmall +lock/lock_stat.c dynamic small static vx vxsmall +lock/lock_timer.c dynamic small static vx vxsmall +lock/lock_util.c dynamic small static vx vxsmall +log/log.c dynamic small static vx vxsmall +log/log_archive.c dynamic small static vx vxsmall +log/log_compare.c dynamic small static vx vxsmall +log/log_get.c dynamic small static vx vxsmall +log/log_method.c dynamic small static vx vxsmall +log/log_put.c dynamic small static vx vxsmall +log/log_stat.c dynamic small static vx vxsmall +mp/mp_alloc.c dynamic small static vx vxsmall +mp/mp_bh.c dynamic small static vx vxsmall +mp/mp_fget.c dynamic small static vx vxsmall +mp/mp_fmethod.c dynamic small static vx vxsmall +mp/mp_fopen.c dynamic small static vx vxsmall +mp/mp_fput.c dynamic small static vx vxsmall +mp/mp_fset.c dynamic small static vx vxsmall +mp/mp_method.c dynamic small static vx vxsmall +mp/mp_region.c dynamic small static vx vxsmall +mp/mp_register.c dynamic small static vx vxsmall +mp/mp_stat.c dynamic small static vx vxsmall +mp/mp_sync.c dynamic small static vx vxsmall +mp/mp_trickle.c dynamic small static vx vxsmall mutex/mut_fcntl.c mutex/mut_pthread.c -mutex/mut_tas.c vx +mutex/mut_tas.c vx vxsmall mutex/mut_win32.c dynamic small static -mutex/mutex.c dynamic small static vx +mutex/mutex.c dynamic small static vx vxsmall mutex/tm.c os/os_abs.c -os/os_alloc.c dynamic small static vx -os/os_clock.c vx +os/os_alloc.c dynamic small static vx vxsmall +os/os_clock.c vx vxsmall os/os_config.c -os/os_dir.c vx -os/os_errno.c vx -os/os_fid.c vx -os/os_fsync.c vx -os/os_handle.c vx -os/os_id.c dynamic small static vx +os/os_dir.c vx vxsmall +os/os_errno.c vx vxsmall +os/os_fid.c vx vxsmall +os/os_fsync.c vx vxsmall +os/os_handle.c vx vxsmall +os/os_id.c dynamic small static vx vxsmall os/os_map.c -os/os_method.c dynamic small static vx -os/os_oflags.c dynamic small static vx -os/os_open.c vx -os/os_region.c dynamic small static vx -os/os_rename.c vx -os/os_root.c dynamic small static vx -os/os_rpath.c dynamic small static vx -os/os_rw.c vx -os/os_seek.c vx -os/os_sleep.c vx -os/os_spin.c vx -os/os_stat.c vx -os/os_tmpdir.c dynamic small static vx -os/os_unlink.c dynamic small static vx -os_vxworks/os_vx_abs.c vx -os_vxworks/os_vx_config.c vx -os_vxworks/os_vx_map.c vx +os/os_method.c dynamic small static vx vxsmall +os/os_oflags.c dynamic small static vx vxsmall +os/os_open.c vx vxsmall +os/os_region.c dynamic small static vx vxsmall +os/os_rename.c vx vxsmall +os/os_root.c dynamic small static vx vxsmall +os/os_rpath.c dynamic small static vx vxsmall +os/os_rw.c vx vxsmall +os/os_seek.c vx vxsmall +os/os_sleep.c vx vxsmall +os/os_spin.c vx vxsmall +os/os_stat.c vx vxsmall +os/os_tmpdir.c dynamic small static vx vxsmall +os/os_truncate.c vx vxsmall +os/os_unlink.c vx vxsmall +os_vxworks/os_vx_abs.c vx vxsmall +os_vxworks/os_vx_config.c vx vxsmall +os_vxworks/os_vx_map.c vx vxsmall os_win32/os_abs.c dynamic small static os_win32/os_clock.c dynamic small static os_win32/os_config.c dynamic small static @@ -260,33 +291,38 @@ os_win32/os_seek.c dynamic small static os_win32/os_sleep.c dynamic small static os_win32/os_spin.c dynamic small static os_win32/os_stat.c dynamic small static +os_win32/os_truncate.c dynamic small static +os_win32/os_unlink.c dynamic small static qam/qam.c dynamic static vx qam/qam_auto.c dynamic static vx +qam/qam_autop.c app=db_printlog qam/qam_conv.c dynamic static vx qam/qam_files.c dynamic static vx qam/qam_method.c dynamic static vx qam/qam_open.c dynamic static vx qam/qam_rec.c dynamic static vx qam/qam_stat.c dynamic static vx -qam/qam_stub.c small +qam/qam_stub.c small vxsmall qam/qam_upgrade.c dynamic static vx qam/qam_verify.c dynamic static vx +rep/rep_auto.c dynamic static vx +rep/rep_autop.c app=db_printlog +rep/rep_backup.c dynamic static vx rep/rep_method.c dynamic static vx rep/rep_record.c dynamic static vx rep/rep_region.c dynamic static vx -rep/rep_stub.c small +rep/rep_stat.c dynamic static vx +rep/rep_stub.c small vxsmall rep/rep_util.c dynamic static vx -rpc_client/client.c vx -rpc_client/db_server_clnt.c vx -rpc_client/gen_client.c vx -rpc_client/gen_client_ret.c vx +rpc_client/client.c +rpc_client/gen_client.c +rpc_client/gen_client_ret.c rpc_server/c/db_server_proc.c -rpc_server/c/db_server_svc.c rpc_server/c/db_server_util.c -rpc_server/c/db_server_xdr.c vx -rpc_server/c/gen_db_server.c rpc_server/cxx/db_server_cxxproc.cpp rpc_server/cxx/db_server_cxxutil.cpp +sequence/seq_stat.c dynamic small static +sequence/sequence.c dynamic small static tcl/tcl_compat.c tcl tcl/tcl_db.c tcl tcl/tcl_db_pkg.c tcl @@ -297,6 +333,7 @@ tcl/tcl_lock.c tcl tcl/tcl_log.c tcl tcl/tcl_mp.c tcl tcl/tcl_rep.c tcl +tcl/tcl_seq.c tcl tcl/tcl_txn.c tcl tcl/tcl_util.c tcl test_perf/db_perf.c app=db_perf @@ -320,14 +357,15 @@ test_perf/perf_trickle.c app=db_perf test_perf/perf_txn.c app=db_perf test_perf/perf_util.c app=db_perf test_perf/perf_vx.c -txn/txn.c dynamic small static vx -txn/txn_auto.c dynamic small static vx -txn/txn_method.c dynamic small static vx -txn/txn_rec.c dynamic small static vx -txn/txn_recover.c dynamic small static vx -txn/txn_region.c dynamic small static vx -txn/txn_stat.c dynamic small static vx -txn/txn_util.c dynamic small static vx -xa/xa.c dynamic small static vx -xa/xa_db.c dynamic small static vx -xa/xa_map.c dynamic small static vx +txn/txn.c dynamic small static vx vxsmall +txn/txn_auto.c dynamic small static vx vxsmall +txn/txn_autop.c app=db_printlog +txn/txn_method.c dynamic small static vx vxsmall +txn/txn_rec.c dynamic small static vx vxsmall +txn/txn_recover.c dynamic small static vx vxsmall +txn/txn_region.c dynamic small static vx vxsmall +txn/txn_stat.c dynamic small static vx vxsmall +txn/txn_util.c dynamic small static vx vxsmall +xa/xa.c dynamic small static vx vxsmall +xa/xa_db.c dynamic small static vx vxsmall +xa/xa_map.c dynamic small static vx vxsmall diff --git a/db/dist/tags b/db/dist/tags index a08477953..1e2cce270 100644 --- a/db/dist/tags +++ b/db/dist/tags @@ -1,16 +1,17 @@ ACQUIRE ../btree/bt_cursor.c /^#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pag/ ACQUIRE_COUPLE ../btree/bt_cursor.c /^#define ACQUIRE_COUPLE(dbc, mode, lpgno, lock, fpg/ -ACQUIRE_CUR ../btree/bt_cursor.c /^#define ACQUIRE_CUR(dbc, mode, p, ret) { \\$/ -ACQUIRE_CUR_COUPLE ../btree/bt_cursor.c /^#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) { / -ACQUIRE_WRITE_LOCK ../btree/bt_cursor.c /^#define ACQUIRE_WRITE_LOCK(dbc, ret) { \\$/ -ACTION ../dbinc/db.in 1965 +ACQUIRE_CUR ../btree/bt_cursor.c /^#define ACQUIRE_CUR(dbc, mode, p, ret) do { \\$/ +ACQUIRE_CUR_COUPLE ../btree/bt_cursor.c /^#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) do {/ +ACQUIRE_WRITE_LOCK ../btree/bt_cursor.c /^#define ACQUIRE_WRITE_LOCK(dbc, ret) do { \\$/ +ACTION ../dbinc/db.in 2149 ACTIVATE_CTP ../dbinc/db_server_int.h /^#define ACTIVATE_CTP(ctp, id, type) { \\$/ -ALIGN ../dbinc/db_int.in /^#define ALIGN(v, bound) (((v) + (bound) - 1) & ~((/ +ALIGNP_DEC ../dbinc/db_int.in /^#define ALIGNP_DEC(p, bound) \\$/ +ALIGNP_INC ../dbinc/db_int.in /^#define ALIGNP_INC(p, bound) \\$/ ANYSITE ../rep/rep_record.c /^#define ANYSITE(rep)$/ -APPNAME ../dbinc/db_int.in 216 -BACKUP_PREFIX ../db/db.c 994 -BADARG ../clib/getopt.c 61 -BADCH ../clib/getopt.c 59 +APPNAME ../dbinc/db_int.in 288 +BACKUP_PREFIX ../db/db.c 1008 +BADARG ../clib/getopt.c 59 +BADCH ../clib/getopt.c 57 BAD_BLOCK_LENGTH ../crypto/rijndael/rijndael-api-fst.h 64 BAD_CIPHER_INSTANCE ../crypto/rijndael/rijndael-api-fst.h 65 BAD_CIPHER_MODE ../crypto/rijndael/rijndael-api-fst.h 62 @@ -19,16 +20,18 @@ BAD_DATA ../crypto/rijndael/rijndael-api-fst.h 66 BAD_KEY_DIR ../crypto/rijndael/rijndael-api-fst.h 59 BAD_KEY_INSTANCE ../crypto/rijndael/rijndael-api-fst.h 61 BAD_KEY_MAT ../crypto/rijndael/rijndael-api-fst.h 60 -BAD_KILLID ../lock/lock_deadlock.c 42 +BAD_KILLID ../lock/lock_deadlock.c 40 BAD_OTHER ../crypto/rijndael/rijndael-api-fst.h 67 BFMSG ../db185/db185.c 176 BH ../dbinc/mp.h 13 -BH_CALLPGIN ../dbinc/mp.h 303 -BH_DIRTY ../dbinc/mp.h 304 -BH_DIRTY_CREATE ../dbinc/mp.h 305 -BH_DISCARD ../dbinc/mp.h 306 -BH_LOCKED ../dbinc/mp.h 307 -BH_TRASH ../dbinc/mp.h 308 +BH_CALLPGIN ../dbinc/mp.h 331 +BH_DIRTY ../dbinc/mp.h 332 +BH_DIRTY_CREATE ../dbinc/mp.h 333 +BH_DISCARD ../dbinc/mp.h 334 +BH_FREE_FREEMEM ../dbinc/mp.h 318 +BH_FREE_UNLOCKED ../dbinc/mp.h 319 +BH_LOCKED ../dbinc/mp.h 335 +BH_TRASH ../dbinc/mp.h 336 BINTERNAL_PSIZE ../dbinc/db_page.h /^#define BINTERNAL_PSIZE(len) \\$/ BINTERNAL_SIZE ../dbinc/db_page.h /^#define BINTERNAL_SIZE(len) \\$/ BITSPERBLOCK ../crypto/rijndael/rijndael-api-fst.h 56 @@ -82,6 +85,7 @@ CHARKEY ../dbinc/hash.h 92 CHECK_LSN ../dbinc/log.h /^#define CHECK_LSN(redo, cmp, lsn, prev) \\$/ CIPHER_AES ../dbinc/crypto.h 45 CIPHER_ANY ../dbinc/crypto.h 49 +CKPLSN_CMP ../dbinc/log.h 363 CLEAR_BYTE ../dbinc/debug.h 28 CLEAR_MAP ../lock/lock_deadlock.c /^#define CLEAR_MAP(M, N) { \\$/ CLIENT_ONLY ../rep/rep_record.c /^#define CLIENT_ONLY(rep, rp) do { \\$/ @@ -89,7 +93,7 @@ CLOSE_HANDLE ../fileops/fop_util.c /^#define CLOSE_HANDLE(D, F) { \\$/ CLR_MAP ../lock/lock_deadlock.c /^#define CLR_MAP(M, B) ((M)[(B) \/ 32] &= ~(1 << ((B/ COMPQUIET ../dbinc/debug.h /^#define COMPQUIET(n, v) \\$/ CONFLICTS ../dbinc/lock.h /^#define CONFLICTS(T, R, HELD, WANTED) \\$/ -COPY_OBJ ../lock/lock.c /^#define COPY_OBJ(dp, obj) do { \\$/ +COPY_OBJ ../lock/lock_list.c /^#define COPY_OBJ(dp, obj) do { \\$/ COPY_RET_MEM ../dbinc/db_int.in /^#define COPY_RET_MEM(src, dest) \\$/ CRYPTO_ON ../dbinc/db_int.in /^#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle !/ CT_CURSOR ../dbinc/db_server_int.h 32 @@ -104,428 +108,452 @@ C_EQUAL ../btree/bt_recno.c /^#define C_EQUAL(cp1, cp2) \\$/ C_LESSTHAN ../btree/bt_recno.c /^#define C_LESSTHAN(cp1, cp2) \\$/ C_RECNUM ../dbinc/btree.h 221 C_RENUMBER ../dbinc/btree.h 228 -DATA_INIT_CNT ../env/env_method.c 603 -DB ../dbinc/db.in 97 -DB2JDBENV ../libdb_java/db_java_wrap.c 385 -DBC ../dbinc/db.in 122 -DBC2JDBENV ../libdb_java/db_java_wrap.c 386 -DBC_ACTIVE ../dbinc/db.in 1496 -DBC_COMPENSATE ../dbinc/db.in 1497 -DBC_DIRTY_READ ../dbinc/db.in 1498 -DBC_INTERNAL ../dbinc/db.in 123 +DATA_INIT_CNT ../env/env_method.c 640 +DB ../dbinc/db.in 127 +DB2JDBENV ../libdb_java/db_java_wrap.c 812 +DBC ../dbinc/db.in 155 +DBC2JDBENV ../libdb_java/db_java_wrap.c 813 +DBC_ACTIVE ../dbinc/db.in 1646 +DBC_COMPENSATE ../dbinc/db.in 1647 +DBC_DEGREE_2 ../dbinc/db.in 1648 +DBC_DIRTY_READ ../dbinc/db.in 1649 +DBC_INTERNAL ../dbinc/db.in 156 DBC_LOGGING ../dbinc/db_int.in /^#define DBC_LOGGING(dbc) \\$/ DBC_METHOD ../cxx/cxx_dbc.cpp /^#define DBC_METHOD(_name, _argspec, _arglist, _ret/ -DBC_MULTIPLE ../dbinc/db.in 1505 -DBC_MULTIPLE_KEY ../dbinc/db.in 1506 -DBC_OPD ../dbinc/db.in 1499 -DBC_OWN_LID ../dbinc/db.in 1507 -DBC_RECOVER ../dbinc/db.in 1500 -DBC_RMW ../dbinc/db.in 1501 -DBC_TRANSIENT ../dbinc/db.in 1502 -DBC_WRITECURSOR ../dbinc/db.in 1503 -DBC_WRITER ../dbinc/db.in 1504 +DBC_MULTIPLE ../dbinc/db.in 1656 +DBC_MULTIPLE_KEY ../dbinc/db.in 1657 +DBC_OPD ../dbinc/db.in 1650 +DBC_OWN_LID ../dbinc/db.in 1658 +DBC_RECOVER ../dbinc/db.in 1651 +DBC_RMW ../dbinc/db.in 1652 +DBC_TRANSIENT ../dbinc/db.in 1653 +DBC_WRITECURSOR ../dbinc/db.in 1654 +DBC_WRITER ../dbinc/db.in 1655 DBENV_LOGGING ../dbinc/db_int.in /^#define DBENV_LOGGING(dbenv) \\$/ DBENV_METHOD ../cxx/cxx_env.cpp /^#define DBENV_METHOD(_name, _argspec, _arglist) / DBENV_METHOD_ERR ../cxx/cxx_env.cpp /^#define DBENV_METHOD_ERR(_name, _argspec, _arglist/ DBENV_METHOD_QUIET ../cxx/cxx_env.cpp /^#define DBENV_METHOD_QUIET(_name, _argspec, _argli/ DBENV_METHOD_VOID ../cxx/cxx_env.cpp /^#define DBENV_METHOD_VOID(_name, _argspec, _arglis/ -DBLOG_FORCE_OPEN ../dbinc/log.h 104 -DBLOG_RECOVER ../dbinc/log.h 103 -DBM ../dbinc/db.in 1902 +DBLOG_FORCE_OPEN ../dbinc/log.h 106 +DBLOG_RECOVER ../dbinc/log.h 105 +DBM ../dbinc/db.in 2086 DBMETASIZE ../dbinc/db_page.h 180 DBMETA_CHKSUM ../dbinc/db_page.h 76 -DBM_INSERT ../dbinc/db.in 1904 -DBM_REPLACE ../dbinc/db.in 1905 -DBM_SUFFIX ../dbinc/db.in 1911 +DBM_INSERT ../dbinc/db.in 2088 +DBM_REPLACE ../dbinc/db.in 2089 +DBM_SUFFIX ../dbinc/db.in 2095 DBREG_CHKPNT ../dbinc/log.h 49 DBREG_CLOSE ../dbinc/log.h 50 DBREG_OPEN ../dbinc/log.h 51 DBREG_RCLOSE ../dbinc/log.h 52 -DBT ../dbinc/db.in 100 -DBTBUFLEN ../db/db_pr.c 756 -DBTCL_DBM ../dbinc/tcl_db.h 21 -DBTCL_NDBM ../dbinc/tcl_db.h 22 -DBTCL_PREP ../dbinc/tcl_db.h 19 +DBREP_OPENFILES ../dbinc/rep.h 255 +DBSEQ_METHOD ../cxx/cxx_seq.cpp /^#define DBSEQ_METHOD(_name, _argspec, _arglist, _d/ +DBT ../dbinc/db.in 130 +DBTBUFLEN ../db/db_pr.c 1122 +DBTCL_DBM ../dbinc/tcl_db.h 22 +DBTCL_NDBM ../dbinc/tcl_db.h 23 +DBTCL_PREP ../dbinc/tcl_db.h 20 DBTXN_METHOD ../cxx/cxx_txn.cpp /^#define DBTXN_METHOD(_name, _delete, _argspec, _ar/ -DBTYPE ../dbinc/db.in 965 +DBTYPE ../dbinc/db.in 1101 DB_ADDSTR ../env/env_open.c /^#define DB_ADDSTR(add) { \\$/ DB_ADD_BIG ../dbinc/db_am.h 25 DB_ADD_DUP ../dbinc/db_am.h 23 -DB_ADD_PAGE ../dbinc/db_am.h 27 DB_AES_CHUNK ../dbinc/crypto.h 68 DB_AES_KEYLEN ../dbinc/crypto.h 67 -DB_AFTER ../dbinc/db.in 985 -DB_AGGRESSIVE ../dbinc/db.in 293 -DB_ALREADY_ABORTED ../dbinc/db.in 1075 -DB_AM_CHKSUM ../dbinc/db.in 1334 -DB_AM_CL_WRITER ../dbinc/db.in 1335 -DB_AM_COMPENSATE ../dbinc/db.in 1336 -DB_AM_CREATED ../dbinc/db.in 1337 -DB_AM_CREATED_MSTR ../dbinc/db.in 1338 -DB_AM_DBM_ERROR ../dbinc/db.in 1339 -DB_AM_DELIMITER ../dbinc/db.in 1340 -DB_AM_DIRTY ../dbinc/db.in 1341 -DB_AM_DISCARD ../dbinc/db.in 1342 -DB_AM_DUP ../dbinc/db.in 1343 -DB_AM_DUPSORT ../dbinc/db.in 1344 -DB_AM_ENCRYPT ../dbinc/db.in 1345 -DB_AM_FIXEDLEN ../dbinc/db.in 1346 -DB_AM_INMEM ../dbinc/db.in 1347 -DB_AM_IN_RENAME ../dbinc/db.in 1348 -DB_AM_NOT_DURABLE ../dbinc/db.in 1349 -DB_AM_OPEN_CALLED ../dbinc/db.in 1350 -DB_AM_PAD ../dbinc/db.in 1351 -DB_AM_PGDEF ../dbinc/db.in 1352 -DB_AM_RDONLY ../dbinc/db.in 1353 -DB_AM_RECNUM ../dbinc/db.in 1354 -DB_AM_RECOVER ../dbinc/db.in 1355 -DB_AM_RENUMBER ../dbinc/db.in 1356 -DB_AM_REPLICATION ../dbinc/db.in 1357 -DB_AM_REVSPLITOFF ../dbinc/db.in 1358 -DB_AM_SECONDARY ../dbinc/db.in 1359 -DB_AM_SNAPSHOT ../dbinc/db.in 1360 -DB_AM_SUBDB ../dbinc/db.in 1361 -DB_AM_SWAP ../dbinc/db.in 1362 -DB_AM_TXN ../dbinc/db.in 1363 -DB_AM_VERIFYING ../dbinc/db.in 1364 -DB_APPEND ../dbinc/db.in 986 -DB_ARCH_ABS ../dbinc/db.in 467 -DB_ARCH_DATA ../dbinc/db.in 468 -DB_ARCH_LOG ../dbinc/db.in 469 -DB_ARCH_REMOVE ../dbinc/db.in 470 +DB_AFTER ../dbinc/db.in 1123 +DB_AGGRESSIVE ../dbinc/db.in 344 +DB_ALIGN ../dbinc/db_int.in /^#define DB_ALIGN(v, bound) \\$/ +DB_ALREADY_ABORTED ../dbinc/db.in 1215 +DB_AM_CHKSUM ../dbinc/db.in 1483 +DB_AM_CL_WRITER ../dbinc/db.in 1484 +DB_AM_COMPENSATE ../dbinc/db.in 1485 +DB_AM_CREATED ../dbinc/db.in 1486 +DB_AM_CREATED_MSTR ../dbinc/db.in 1487 +DB_AM_DBM_ERROR ../dbinc/db.in 1488 +DB_AM_DELIMITER ../dbinc/db.in 1489 +DB_AM_DIRTY ../dbinc/db.in 1490 +DB_AM_DISCARD ../dbinc/db.in 1491 +DB_AM_DUP ../dbinc/db.in 1492 +DB_AM_DUPSORT ../dbinc/db.in 1493 +DB_AM_ENCRYPT ../dbinc/db.in 1494 +DB_AM_FIXEDLEN ../dbinc/db.in 1495 +DB_AM_INMEM ../dbinc/db.in 1496 +DB_AM_INORDER ../dbinc/db.in 1497 +DB_AM_IN_RENAME ../dbinc/db.in 1498 +DB_AM_NOT_DURABLE ../dbinc/db.in 1499 +DB_AM_OPEN_CALLED ../dbinc/db.in 1500 +DB_AM_PAD ../dbinc/db.in 1501 +DB_AM_PGDEF ../dbinc/db.in 1502 +DB_AM_RDONLY ../dbinc/db.in 1503 +DB_AM_RECNUM ../dbinc/db.in 1504 +DB_AM_RECOVER ../dbinc/db.in 1505 +DB_AM_RENUMBER ../dbinc/db.in 1506 +DB_AM_REPLICATION ../dbinc/db.in 1507 +DB_AM_REVSPLITOFF ../dbinc/db.in 1508 +DB_AM_SECONDARY ../dbinc/db.in 1509 +DB_AM_SNAPSHOT ../dbinc/db.in 1510 +DB_AM_SUBDB ../dbinc/db.in 1511 +DB_AM_SWAP ../dbinc/db.in 1512 +DB_AM_TXN ../dbinc/db.in 1513 +DB_AM_VERIFYING ../dbinc/db.in 1514 +DB_APPEND ../dbinc/db.in 1124 +DB_ARCH_ABS ../dbinc/db.in 520 +DB_ARCH_DATA ../dbinc/db.in 521 +DB_ARCH_LOG ../dbinc/db.in 522 +DB_ARCH_REMOVE ../dbinc/db.in 523 DB_ASSERT ../dbinc/debug.h /^#define DB_ASSERT(e) ((e) ? (void)0 : __db_assert(/ -DB_AUTO_COMMIT ../dbinc/db.in 190 -DB_BEFORE ../dbinc/db.in 987 +DB_AUTO_COMMIT ../dbinc/db.in 225 +DB_BEFORE ../dbinc/db.in 1125 DB_BEGIN_SINGLE_THREAD ../dbinc/mutex.h 17 -DB_BTREEMAGIC ../dbinc/db.in 971 -DB_BTREEOLDVER ../dbinc/db.in 970 -DB_BTREEVERSION ../dbinc/db.in 969 -DB_BTREE_STAT ../dbinc/db.in 98 -DB_CACHED_COUNTS ../dbinc/db.in 988 +DB_BTREEMAGIC ../dbinc/db.in 1107 +DB_BTREEOLDVER ../dbinc/db.in 1106 +DB_BTREEVERSION ../dbinc/db.in 1105 +DB_BTREE_STAT ../dbinc/db.in 128 +DB_BUFFER_SMALL ../dbinc/db.in 1187 +DB_CACHED_COUNTS ../dbinc/db.in 1126 DB_CACHESIZE_MIN ../dbinc/mp.h 19 -DB_CACHE_PRIORITY ../dbinc/db.in 588 +DB_CACHE_PRIORITY ../dbinc/db.in 651 DB_CALLBACK_C_INTERCEPT ../cxx/cxx_db.cpp /^#define DB_CALLBACK_C_INTERCEPT(_name, _rettype, _/ -DB_CDB_ALLDB ../dbinc/db.in 242 -DB_CHKSUM ../dbinc/db.in 271 -DB_CIPHER ../dbinc/db.in 99 -DB_COMMIT_FLUSH ../dbinc/log.h 264 -DB_CONSUME ../dbinc/db.in 989 -DB_CONSUME_WAIT ../dbinc/db.in 990 -DB_CREATE ../dbinc/db.in 157 -DB_CURRENT ../dbinc/db.in 991 -DB_CXX_NO_EXCEPTIONS ../dbinc/db.in 158 -DB_CXX_PRIVATE_ENV ../dbinc/cxx_int.h 79 -DB_DBM_HSEARCH ../dbinc/db.in 1896 -DB_DBT_APPMALLOC ../dbinc/db.in 142 -DB_DBT_DUPOK ../dbinc/db.in 148 -DB_DBT_ISSET ../dbinc/db.in 143 -DB_DBT_MALLOC ../dbinc/db.in 144 -DB_DBT_PARTIAL ../dbinc/db.in 145 -DB_DBT_REALLOC ../dbinc/db.in 146 -DB_DBT_USERMEM ../dbinc/db.in 147 -DB_DEF_IOSIZE ../dbinc/db_int.in 65 -DB_DELETED ../dbinc/db.in 1076 -DB_DIRECT ../dbinc/db.in 264 -DB_DIRECT_DB ../dbinc/db.in 243 -DB_DIRECT_LOG ../dbinc/db.in 244 -DB_DIRTY_READ ../dbinc/db.in 191 -DB_DONOTINDEX ../dbinc/db.in 1049 -DB_DUP ../dbinc/db.in 272 -DB_DUPSORT ../dbinc/db.in 273 -DB_EID_BROADCAST ../dbinc/db.in 890 -DB_EID_INVALID ../dbinc/db.in 891 -DB_ENCRYPT ../dbinc/db.in 274 -DB_ENCRYPT_AES ../dbinc/db.in 237 +DB_CDB_ALLDB ../dbinc/db.in 278 +DB_CHKSUM ../dbinc/db.in 314 +DB_CIPHER ../dbinc/db.in 129 +DB_COMMIT_FLUSH ../dbinc/log.h 292 +DB_CONSUME ../dbinc/db.in 1127 +DB_CONSUME_WAIT ../dbinc/db.in 1128 +DB_CREATE ../dbinc/db.in 190 +DB_CURRENT ../dbinc/db.in 1129 +DB_CXX_NO_EXCEPTIONS ../dbinc/db.in 191 +DB_CXX_PRIVATE_ENV ../dbinc/cxx_int.h 75 +DB_DBM_HSEARCH ../dbinc/db.in 2080 +DB_DBT_APPMALLOC ../dbinc/db.in 175 +DB_DBT_DUPOK ../dbinc/db.in 181 +DB_DBT_ISSET ../dbinc/db.in 176 +DB_DBT_MALLOC ../dbinc/db.in 177 +DB_DBT_PARTIAL ../dbinc/db.in 178 +DB_DBT_REALLOC ../dbinc/db.in 179 +DB_DBT_USERMEM ../dbinc/db.in 180 +DB_DEF_IOSIZE ../dbinc/db_int.in 96 +DB_DEGREE_2 ../dbinc/db.in 226 +DB_DELETED ../dbinc/db.in 1216 +DB_DESTRUCTOR ../cxx/cxx_db.cpp /^#define DB_DESTRUCTOR(_name, _argspec, _arglist, _/ +DB_DIRECT ../dbinc/db.in 306 +DB_DIRECT_DB ../dbinc/db.in 279 +DB_DIRECT_LOG ../dbinc/db.in 280 +DB_DIRTY_READ ../dbinc/db.in 227 +DB_DONOTINDEX ../dbinc/db.in 1188 +DB_DSYNC_LOG ../dbinc/db.in 281 +DB_DUP ../dbinc/db.in 315 +DB_DUPSORT ../dbinc/db.in 316 +DB_DURABLE_UNKNOWN ../dbinc/db.in 307 +DB_EID_BROADCAST ../dbinc/db.in 957 +DB_EID_INVALID ../dbinc/db.in 958 +DB_ENCRYPT ../dbinc/db.in 317 +DB_ENCRYPT_AES ../dbinc/db.in 273 DB_ENC_MAGIC ../dbinc/hmac.h 29 DB_END_SINGLE_THREAD ../dbinc/mutex.h 18 -DB_ENV ../dbinc/db.in 101 -DB_ENV_AUTO_COMMIT ../dbinc/db.in 1866 -DB_ENV_CDB ../dbinc/db.in 1867 -DB_ENV_CDB_ALLDB ../dbinc/db.in 1868 -DB_ENV_CREATE ../dbinc/db.in 1869 -DB_ENV_DBLOCAL ../dbinc/db.in 1870 -DB_ENV_DIRECT_DB ../dbinc/db.in 1871 -DB_ENV_DIRECT_LOG ../dbinc/db.in 1872 -DB_ENV_FATAL ../dbinc/db.in 1873 +DB_ENV ../dbinc/db.in 131 +DB_ENV_AUTO_COMMIT ../dbinc/db.in 2049 +DB_ENV_CDB ../dbinc/db.in 2050 +DB_ENV_CDB_ALLDB ../dbinc/db.in 2051 +DB_ENV_CREATE ../dbinc/db.in 2052 +DB_ENV_DBLOCAL ../dbinc/db.in 2053 +DB_ENV_DIRECT_DB ../dbinc/db.in 2054 +DB_ENV_DIRECT_LOG ../dbinc/db.in 2055 +DB_ENV_DSYNC_LOG ../dbinc/db.in 2056 +DB_ENV_FATAL ../dbinc/db.in 2057 DB_ENV_INTERNAL ../libdb_java/db_java_wrap.c /^#define DB_ENV_INTERNAL(dbenv) ((dbenv)->api2_inte/ -DB_ENV_LOCKDOWN ../dbinc/db.in 1874 -DB_ENV_LOG_AUTOREMOVE ../dbinc/db.in 1875 -DB_ENV_NOLOCKING ../dbinc/db.in 1876 -DB_ENV_NOMMAP ../dbinc/db.in 1877 -DB_ENV_NOPANIC ../dbinc/db.in 1878 -DB_ENV_OPEN_CALLED ../dbinc/db.in 1879 -DB_ENV_OVERWRITE ../dbinc/db.in 1880 -DB_ENV_PRIVATE ../dbinc/db.in 1881 -DB_ENV_REGION_INIT ../dbinc/db.in 1882 -DB_ENV_RPCCLIENT ../dbinc/db.in 1883 -DB_ENV_RPCCLIENT_GIVEN ../dbinc/db.in 1884 -DB_ENV_SYSTEM_MEM ../dbinc/db.in 1885 -DB_ENV_TEST_RECOVERY ../dbinc/debug.h /^#define DB_ENV_TEST_RECOVERY(env, val, ret, name) / -DB_ENV_THREAD ../dbinc/db.in 1886 -DB_ENV_TIME_NOTGRANTED ../dbinc/db.in 1887 -DB_ENV_TXN_NOSYNC ../dbinc/db.in 1888 -DB_ENV_TXN_NOT_DURABLE ../dbinc/db.in 1889 -DB_ENV_TXN_WRITE_NOSYNC ../dbinc/db.in 1890 -DB_ENV_YIELDCPU ../dbinc/db.in 1891 +DB_ENV_LOCKDOWN ../dbinc/db.in 2058 +DB_ENV_LOG_AUTOREMOVE ../dbinc/db.in 2059 +DB_ENV_LOG_INMEMORY ../dbinc/db.in 2060 +DB_ENV_NOLOCKING ../dbinc/db.in 2061 +DB_ENV_NOMMAP ../dbinc/db.in 2062 +DB_ENV_NOPANIC ../dbinc/db.in 2063 +DB_ENV_OPEN_CALLED ../dbinc/db.in 2064 +DB_ENV_OVERWRITE ../dbinc/db.in 2065 +DB_ENV_PRIVATE ../dbinc/db.in 2066 +DB_ENV_REGION_INIT ../dbinc/db.in 2067 +DB_ENV_RPCCLIENT ../dbinc/db.in 2068 +DB_ENV_RPCCLIENT_GIVEN ../dbinc/db.in 2069 +DB_ENV_SYSTEM_MEM ../dbinc/db.in 2070 +DB_ENV_TEST_RECOVERY ../dbinc/debug.h /^#define DB_ENV_TEST_RECOVERY(env, val, ret, name) / +DB_ENV_THREAD ../dbinc/db.in 2071 +DB_ENV_TIME_NOTGRANTED ../dbinc/db.in 2072 +DB_ENV_TXN_NOSYNC ../dbinc/db.in 2073 +DB_ENV_TXN_WRITE_NOSYNC ../dbinc/db.in 2074 +DB_ENV_YIELDCPU ../dbinc/db.in 2075 DB_ERROR ../dbinc/cxx_int.h /^#define DB_ERROR(env, caller, ecode, policy) \\$/ DB_ERROR_DBT ../dbinc/cxx_int.h /^#define DB_ERROR_DBT(env, caller, dbt, policy) \\$/ -DB_EXCL ../dbinc/db.in 223 -DB_EXTENT ../dbinc/db.in 265 -DB_FAST_STAT ../dbinc/db.in 992 -DB_FCNTL_LOCKING ../dbinc/db.in 224 -DB_FCNTL_OFF_GEN ../dbinc/mutex.h 954 -DB_FCNTL_OFF_LOCK ../dbinc/mutex.h 955 -DB_FCNTL_OFF_MPOOL ../dbinc/mutex.h 956 -DB_FH ../dbinc/db.in 124 -DB_FH_NOSYNC ../dbinc/os.h 72 -DB_FH_OPENED ../dbinc/os.h 73 -DB_FH_UNLINK ../dbinc/os.h 74 -DB_FILEOPEN ../dbinc/db.in 1050 -DB_FILE_ID_LEN ../dbinc/db.in 317 -DB_FIRST ../dbinc/db.in 993 -DB_FLUSH ../dbinc/db.in 473 -DB_FORCE ../dbinc/db.in 159 -DB_FTYPE_NOTSET ../dbinc/db_int.in 362 -DB_FTYPE_SET ../dbinc/db_int.in 361 -DB_GET_BOTH ../dbinc/db.in 994 -DB_GET_BOTHC ../dbinc/db.in 995 -DB_GET_BOTH_RANGE ../dbinc/db.in 996 -DB_GET_RECNO ../dbinc/db.in 997 +DB_EXCL ../dbinc/db.in 259 +DB_EXTENT ../dbinc/db.in 308 +DB_FAST_STAT ../dbinc/db.in 1130 +DB_FCNTL_LOCKING ../dbinc/db.in 260 +DB_FCNTL_OFF_GEN ../dbinc/mutex.h 960 +DB_FCNTL_OFF_LOCK ../dbinc/mutex.h 961 +DB_FCNTL_OFF_MPOOL ../dbinc/mutex.h 962 +DB_FH ../dbinc/db.in 157 +DB_FH_NOSYNC ../dbinc/os.h 84 +DB_FH_OPENED ../dbinc/os.h 85 +DB_FH_UNLINK ../dbinc/os.h 86 +DB_FILE_ID_LEN ../dbinc/db.in 369 +DB_FIRST ../dbinc/db.in 1131 +DB_FLUSH ../dbinc/db.in 526 +DB_FORCE ../dbinc/db.in 192 +DB_FTYPE_NOTSET ../dbinc/db_int.in 437 +DB_FTYPE_SET ../dbinc/db_int.in 436 +DB_GET_BOTH ../dbinc/db.in 1132 +DB_GET_BOTHC ../dbinc/db.in 1133 +DB_GET_BOTH_RANGE ../dbinc/db.in 1134 +DB_GET_RECNO ../dbinc/db.in 1135 DB_GLOBAL ../dbinc/globals.h /^#define DB_GLOBAL(v) __db_global_values.v$/ -DB_GROW_SIZE ../dbinc/log.h 87 -DB_HANDLE_LOCK ../dbinc/db.in 431 -DB_HASHMAGIC ../dbinc/db.in 975 -DB_HASHOLDVER ../dbinc/db.in 974 -DB_HASHVERSION ../dbinc/db.in 973 +DB_GROW_SIZE ../dbinc/log.h 89 +DB_HANDLE_LOCK ../dbinc/db.in 484 +DB_HASHMAGIC ../dbinc/db.in 1111 +DB_HASHOLDVER ../dbinc/db.in 1110 +DB_HASHVERSION ../dbinc/db.in 1109 DB_HASH_DUP ../dbinc/db_page.h 123 DB_HASH_DUPSORT ../dbinc/db_page.h 125 -DB_HASH_STAT ../dbinc/db.in 102 +DB_HASH_STAT ../dbinc/db.in 132 DB_HASH_SUBDB ../dbinc/db_page.h 124 DB_HTONL ../dbinc/db_swap.h /^#define DB_HTONL(p) do { \\$/ DB_ILLEGAL_AFTER_OPEN ../dbinc/db_int.in /^#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \\$/ DB_ILLEGAL_BEFORE_OPEN ../dbinc/db_int.in /^#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \\$/ DB_ILLEGAL_IN_ENV ../dbinc/db_int.in /^#define DB_ILLEGAL_IN_ENV(dbp, name) \\$/ DB_ILLEGAL_METHOD ../dbinc/db_int.in /^#define DB_ILLEGAL_METHOD(dbp, flags) { \\$/ -DB_INITENV_CDB ../env/env_open.c 96 -DB_INITENV_CDB_ALLDB ../env/env_open.c 97 -DB_INITENV_LOCK ../env/env_open.c 98 -DB_INITENV_LOG ../env/env_open.c 99 -DB_INITENV_MPOOL ../env/env_open.c 100 -DB_INITENV_REP ../env/env_open.c 101 -DB_INITENV_TXN ../env/env_open.c 102 -DB_INITIALIZE_DB_GLOBALS ../env/env_method.c 29 -DB_INIT_CDB ../dbinc/db.in 208 -DB_INIT_LOCK ../dbinc/db.in 209 -DB_INIT_LOG ../dbinc/db.in 210 -DB_INIT_MPOOL ../dbinc/db.in 211 -DB_INIT_REP ../dbinc/db.in 212 -DB_INIT_TXN ../dbinc/db.in 213 +DB_INITENV_CDB ../env/env_open.c 95 +DB_INITENV_CDB_ALLDB ../env/env_open.c 96 +DB_INITENV_LOCK ../env/env_open.c 97 +DB_INITENV_LOG ../env/env_open.c 98 +DB_INITENV_MPOOL ../env/env_open.c 99 +DB_INITENV_REP ../env/env_open.c 100 +DB_INITENV_TXN ../env/env_open.c 101 +DB_INITIALIZE_DB_GLOBALS ../env/env_method.c 31 +DB_INIT_CDB ../dbinc/db.in 244 +DB_INIT_LOCK ../dbinc/db.in 245 +DB_INIT_LOG ../dbinc/db.in 246 +DB_INIT_MPOOL ../dbinc/db.in 247 +DB_INIT_REP ../dbinc/db.in 248 +DB_INIT_TXN ../dbinc/db.in 249 +DB_INORDER ../dbinc/db.in 318 DB_INTERNAL ../libdb_java/db_java_wrap.c /^#define DB_INTERNAL(db) ((db)->api_internal)$/ -DB_IO_READ ../dbinc/os.h 43 -DB_IO_WRITE ../dbinc/os.h 44 +DB_IO_READ ../dbinc/os.h 55 +DB_IO_WRITE ../dbinc/os.h 56 DB_IS_THREADED ../dbinc/db_int.in /^#define DB_IS_THREADED(dbp) \\$/ -DB_IV_BYTES ../dbinc/db_int.in 412 -DB_JOINENV ../dbinc/db.in 214 -DB_JOIN_ITEM ../dbinc/db.in 998 -DB_JOIN_NOSORT ../dbinc/db.in 288 -DB_KEYEMPTY ../dbinc/db.in 1051 -DB_KEYEXIST ../dbinc/db.in 1052 -DB_KEYFIRST ../dbinc/db.in 999 -DB_KEYLAST ../dbinc/db.in 1000 -DB_KEY_RANGE ../dbinc/db.in 126 -DB_LAST ../dbinc/db.in 1001 -DB_LINE ../dbinc/db_int.in 153 -DB_LOCK ../dbinc/db.in 105 -DB_LOCKDOWN ../dbinc/db.in 215 +DB_IV_BYTES ../dbinc/db_int.in 489 +DB_JOINENV ../dbinc/db.in 250 +DB_JOIN_ITEM ../dbinc/db.in 1136 +DB_JOIN_NOSORT ../dbinc/db.in 339 +DB_KEYEMPTY ../dbinc/db.in 1189 +DB_KEYEXIST ../dbinc/db.in 1190 +DB_KEYFIRST ../dbinc/db.in 1137 +DB_KEYLAST ../dbinc/db.in 1138 +DB_KEY_RANGE ../dbinc/db.in 159 +DB_LAST ../dbinc/db.in 1139 +DB_LOCK ../dbinc/db.in 135 +DB_LOCKDOWN ../dbinc/db.in 251 DB_LOCKER_DELETED ../dbinc/lock.h 143 DB_LOCKER_DIRTY ../dbinc/lock.h 144 DB_LOCKER_INABORT ../dbinc/lock.h 145 DB_LOCKER_TIMEOUT ../dbinc/lock.h 146 -DB_LOCKREQ ../dbinc/db.in 106 -DB_LOCKVERSION ../dbinc/db.in 315 -DB_LOCK_CDB_N ../lock/lock_region.c 55 -DB_LOCK_DEADLOCK ../dbinc/db.in 1053 -DB_LOCK_DEFAULT ../dbinc/db.in 324 +DB_LOCKREQ ../dbinc/db.in 136 +DB_LOCKVERSION ../dbinc/db.in 367 +DB_LOCK_ABORT ../dbinc/db.in 387 +DB_LOCK_CDB_N ../lock/lock_region.c 53 +DB_LOCK_DEADLOCK ../dbinc/db.in 1191 +DB_LOCK_DEFAULT ../dbinc/db.in 376 DB_LOCK_DEFAULT_N ../dbinc/lock.h 13 -DB_LOCK_DOALL ../dbinc/lock.h 196 -DB_LOCK_EXPIRE ../dbinc/db.in 325 -DB_LOCK_FREE ../dbinc/lock.h 197 -DB_LOCK_ILOCK ../dbinc/db.in 103 +DB_LOCK_DOALL ../dbinc/lock.h 200 +DB_LOCK_DOWNGRADE ../dbinc/lock.h 201 +DB_LOCK_EXPIRE ../dbinc/db.in 377 +DB_LOCK_FREE ../dbinc/lock.h 202 +DB_LOCK_ILOCK ../dbinc/db.in 133 DB_LOCK_INVALIDID ../dbinc/lock.h 20 DB_LOCK_MAXID ../dbinc/lock.h 21 -DB_LOCK_MAXLOCKS ../dbinc/db.in 326 -DB_LOCK_MINLOCKS ../dbinc/db.in 327 -DB_LOCK_MINWRITE ../dbinc/db.in 328 -DB_LOCK_NOPROMOTE ../dbinc/lock.h 198 -DB_LOCK_NORUN ../dbinc/db.in 323 -DB_LOCK_NOTEXIST ../dbinc/db.in 1077 -DB_LOCK_NOTGRANTED ../dbinc/db.in 1054 -DB_LOCK_NOWAIT ../dbinc/db.in 334 -DB_LOCK_NOWAITERS ../dbinc/lock.h 200 -DB_LOCK_OLDEST ../dbinc/db.in 329 -DB_LOCK_RANDOM ../dbinc/db.in 330 -DB_LOCK_RECORD ../dbinc/db.in 335 -DB_LOCK_REMOVE ../dbinc/db.in 336 -DB_LOCK_RIW_N ../lock/lock_region.c 36 -DB_LOCK_SET_TIMEOUT ../dbinc/db.in 337 -DB_LOCK_STAT ../dbinc/db.in 104 -DB_LOCK_SWITCH ../dbinc/db.in 338 -DB_LOCK_UNLINK ../dbinc/lock.h 199 -DB_LOCK_UPGRADE ../dbinc/db.in 339 -DB_LOCK_YOUNGEST ../dbinc/db.in 331 +DB_LOCK_MAXLOCKS ../dbinc/db.in 378 +DB_LOCK_MAXWRITE ../dbinc/db.in 379 +DB_LOCK_MINLOCKS ../dbinc/db.in 380 +DB_LOCK_MINWRITE ../dbinc/db.in 381 +DB_LOCK_NOPROMOTE ../dbinc/lock.h 203 +DB_LOCK_NOREGION ../dbinc/lock.h 205 +DB_LOCK_NORUN ../dbinc/db.in 375 +DB_LOCK_NOTEXIST ../dbinc/db.in 1217 +DB_LOCK_NOTGRANTED ../dbinc/db.in 1192 +DB_LOCK_NOWAIT ../dbinc/db.in 388 +DB_LOCK_NOWAITERS ../dbinc/lock.h 206 +DB_LOCK_OLDEST ../dbinc/db.in 382 +DB_LOCK_RANDOM ../dbinc/db.in 383 +DB_LOCK_RECORD ../dbinc/db.in 389 +DB_LOCK_REMOVE ../dbinc/db.in 390 +DB_LOCK_RIW_N ../lock/lock_region.c 34 +DB_LOCK_SET_TIMEOUT ../dbinc/db.in 391 +DB_LOCK_STAT ../dbinc/db.in 134 +DB_LOCK_SWITCH ../dbinc/db.in 392 +DB_LOCK_UNLINK ../dbinc/lock.h 204 +DB_LOCK_UPGRADE ../dbinc/db.in 393 +DB_LOCK_YOUNGEST ../dbinc/db.in 384 DB_LOG ../dbinc/log.h 58 -DB_LOGC ../dbinc/db.in 107 -DB_LOGC_BUF_SIZE ../dbinc/db.in 521 -DB_LOGFILEID_INVALID ../dbinc/db.in 1118 -DB_LOGMAGIC ../dbinc/db.in 464 -DB_LOGOLDVER ../dbinc/db.in 463 -DB_LOGVERSION ../dbinc/db.in 462 -DB_LOG_AUTOREMOVE ../dbinc/db.in 245 -DB_LOG_CHKPNT ../dbinc/db.in 474 -DB_LOG_COMMIT ../dbinc/db.in 475 -DB_LOG_DISK ../dbinc/db.in 533 -DB_LOG_LOCKED ../dbinc/db.in 534 -DB_LOG_NOCOPY ../dbinc/db.in 476 -DB_LOG_NOT_DURABLE ../dbinc/db.in 477 -DB_LOG_PERM ../dbinc/db.in 478 -DB_LOG_SILENT_ERR ../dbinc/db.in 535 -DB_LOG_STAT ../dbinc/db.in 108 -DB_LOG_WRNOSYNC ../dbinc/db.in 479 -DB_LSN ../dbinc/db.in 109 -DB_MAC_KEY ../dbinc/db_int.in 413 +DB_LOGC ../dbinc/db.in 137 +DB_LOGC_BUF_SIZE ../dbinc/db.in 575 +DB_LOGFILEID_INVALID ../dbinc/db.in 1261 +DB_LOGMAGIC ../dbinc/db.in 517 +DB_LOGOLDVER ../dbinc/db.in 516 +DB_LOGVERSION ../dbinc/db.in 515 +DB_LOG_AUTOREMOVE ../dbinc/db.in 282 +DB_LOG_BUFFER_FULL ../dbinc/db.in 1193 +DB_LOG_CHKPNT ../dbinc/db.in 527 +DB_LOG_COMMIT ../dbinc/db.in 528 +DB_LOG_DISK ../dbinc/db.in 587 +DB_LOG_INMEMORY ../dbinc/db.in 283 +DB_LOG_LOCKED ../dbinc/db.in 588 +DB_LOG_NOCOPY ../dbinc/db.in 529 +DB_LOG_NOT_DURABLE ../dbinc/db.in 530 +DB_LOG_PERM ../dbinc/db.in 531 +DB_LOG_RESEND ../dbinc/db.in 532 +DB_LOG_SILENT_ERR ../dbinc/db.in 589 +DB_LOG_STAT ../dbinc/db.in 138 +DB_LOG_WRNOSYNC ../dbinc/db.in 533 +DB_LSN ../dbinc/db.in 139 +DB_MAC_KEY ../dbinc/db_int.in 490 DB_MAC_MAGIC ../dbinc/hmac.h 28 -DB_MAXMMAPSIZE ../mp/mp_fopen.c 962 -DB_MAX_HANDLES ../dbinc/mutex.h 973 -DB_MAX_PAGES ../dbinc/db.in 79 -DB_MAX_PGSIZE ../dbinc/db_int.in 54 -DB_MAX_RECORDS ../dbinc/db.in 82 +DB_MAXMMAPSIZE ../mp/mp_fopen.c 493 +DB_MAX_HANDLES ../dbinc/mutex.h 979 +DB_MAX_PAGES ../dbinc/db.in 107 +DB_MAX_PGSIZE ../dbinc/db_int.in 85 +DB_MAX_RECORDS ../dbinc/db.in 110 DB_METHOD ../cxx/cxx_db.cpp /^#define DB_METHOD(_name, _argspec, _arglist, _reto/ -DB_METHOD_CHECKED ../cxx/cxx_db.cpp /^#define DB_METHOD_CHECKED(_name, _cleanup, _argspe/ DB_METHOD_QUIET ../cxx/cxx_db.cpp /^#define DB_METHOD_QUIET(_name, _argspec, _arglist)/ DB_METHOD_VOID ../cxx/cxx_db.cpp /^#define DB_METHOD_VOID(_name, _argspec, _arglist) / -DB_MINPAGECACHE ../dbinc/db_int.in 59 -DB_MIN_PGSIZE ../dbinc/db_int.in 53 -DB_MPOOL ../dbinc/db.in 110 -DB_MPOOLFILE ../dbinc/db.in 113 +DB_MINPAGECACHE ../dbinc/db_int.in 90 +DB_MIN_PGSIZE ../dbinc/db_int.in 84 +DB_MPOOL ../dbinc/db.in 140 +DB_MPOOLFILE ../dbinc/db.in 143 DB_MPOOLFILE_METHOD ../cxx/cxx_mpool.cpp /^#define DB_MPOOLFILE_METHOD(_name, _argspec, _argl/ DB_MPOOLFILE_METHOD_VOID ../cxx/cxx_mpool.cpp /^#define DB_MPOOLFILE_METHOD_VOID(_name, _argspec, / -DB_MPOOL_CLEAN ../dbinc/db.in 573 -DB_MPOOL_CREATE ../dbinc/db.in 568 -DB_MPOOL_DIRTY ../dbinc/db.in 574 -DB_MPOOL_DISCARD ../dbinc/db.in 575 -DB_MPOOL_FSTAT ../dbinc/db.in 111 +DB_MPOOL_CLEAN ../dbinc/db.in 635 +DB_MPOOL_CREATE ../dbinc/db.in 630 +DB_MPOOL_DIRTY ../dbinc/db.in 636 +DB_MPOOL_DISCARD ../dbinc/db.in 637 +DB_MPOOL_FREE ../dbinc/db.in 638 +DB_MPOOL_FSTAT ../dbinc/db.in 141 DB_MPOOL_HASH ../dbinc/mp.h 14 -DB_MPOOL_LAST ../dbinc/db.in 569 -DB_MPOOL_NEW ../dbinc/db.in 570 -DB_MPOOL_NOFILE ../dbinc/db.in 578 -DB_MPOOL_STAT ../dbinc/db.in 112 -DB_MPOOL_UNLINK ../dbinc/db.in 579 +DB_MPOOL_LAST ../dbinc/db.in 631 +DB_MPOOL_NEW ../dbinc/db.in 632 +DB_MPOOL_NOFILE ../dbinc/db.in 641 +DB_MPOOL_STAT ../dbinc/db.in 142 +DB_MPOOL_UNLINK ../dbinc/db.in 642 DB_MPREG ../dbinc/mp.h 15 -DB_MULTIPLE ../dbinc/db.in 1030 +DB_MSGBUF_FLUSH ../dbinc/db_int.in /^#define DB_MSGBUF_FLUSH(dbenv, a) do { \\$/ +DB_MSGBUF_INIT ../dbinc/db_int.in /^#define DB_MSGBUF_INIT(a) do { \\$/ +DB_MULTIPLE ../dbinc/db.in 1168 DB_MULTIPLE_INIT ../dbinc/db.in /^#define DB_MULTIPLE_INIT(pointer, dbt) \\$/ -DB_MULTIPLE_KEY ../dbinc/db.in 1031 +DB_MULTIPLE_KEY ../dbinc/db.in 1169 DB_MULTIPLE_KEY_NEXT ../dbinc/db.in /^#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey,/ DB_MULTIPLE_NEXT ../dbinc/db.in /^#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, re/ DB_MULTIPLE_RECNO_NEXT ../dbinc/db.in /^#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno/ -DB_MUTEX ../dbinc/db.in 128 -DB_NEEDSPLIT ../dbinc/db.in 1078 -DB_NEXT ../dbinc/db.in 1002 -DB_NEXT_DUP ../dbinc/db.in 1003 -DB_NEXT_NODUP ../dbinc/db.in 1004 -DB_NODUPDATA ../dbinc/db.in 1005 -DB_NOLOCKING ../dbinc/db.in 246 -DB_NOMMAP ../dbinc/db.in 160 +DB_MUTEX ../dbinc/db.in 161 +DB_NEEDSPLIT ../dbinc/db.in 1218 +DB_NEXT ../dbinc/db.in 1140 +DB_NEXT_DUP ../dbinc/db.in 1141 +DB_NEXT_NODUP ../dbinc/db.in 1142 +DB_NODUPDATA ../dbinc/db.in 1143 +DB_NOLOCKING ../dbinc/db.in 284 +DB_NOMMAP ../dbinc/db.in 193 DB_NONBLOCK ../dbinc/db_int.in /^#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSE/ -DB_NOORDERCHK ../dbinc/db.in 294 -DB_NOOVERWRITE ../dbinc/db.in 1006 -DB_NOPANIC ../dbinc/db.in 247 -DB_NOSERVER ../dbinc/db.in 1055 -DB_NOSERVER_HOME ../dbinc/db.in 1056 -DB_NOSERVER_ID ../dbinc/db.in 1057 -DB_NOSYNC ../dbinc/db.in 1007 -DB_NOTFOUND ../dbinc/db.in 1058 -DB_NO_AUTO_COMMIT ../dbinc/db.in 192 +DB_NOORDERCHK ../dbinc/db.in 345 +DB_NOOVERWRITE ../dbinc/db.in 1144 +DB_NOPANIC ../dbinc/db.in 285 +DB_NOSERVER ../dbinc/db.in 1194 +DB_NOSERVER_HOME ../dbinc/db.in 1195 +DB_NOSERVER_ID ../dbinc/db.in 1196 +DB_NOSYNC ../dbinc/db.in 1145 +DB_NOTFOUND ../dbinc/db.in 1197 +DB_NO_AUTO_COMMIT ../dbinc/db.in 228 DB_NTOHL ../dbinc/db_swap.h /^#define DB_NTOHL(p) do { \\$/ -DB_ODDFILESIZE ../dbinc/db.in 266 -DB_OK_BTREE ../dbinc/db.in 1328 -DB_OK_HASH ../dbinc/db.in 1329 -DB_OK_QUEUE ../dbinc/db.in 1330 -DB_OK_RECNO ../dbinc/db.in 1331 -DB_OLD_VERSION ../dbinc/db.in 1059 -DB_OPFLAGS_MASK ../dbinc/db.in 1023 -DB_OPNOTSUP ../dbinc/db_int.in 183 -DB_ORDERCHKONLY ../dbinc/db.in 295 -DB_OSO_CREATE ../dbinc/os.h 20 -DB_OSO_DIRECT ../dbinc/os.h 21 -DB_OSO_EXCL ../dbinc/os.h 22 -DB_OSO_LOG ../dbinc/os.h 23 -DB_OSO_RDONLY ../dbinc/os.h 24 -DB_OSO_REGION ../dbinc/os.h 25 -DB_OSO_SEQ ../dbinc/os.h 26 -DB_OSO_TEMP ../dbinc/os.h 27 -DB_OSO_TRUNC ../dbinc/os.h 28 -DB_OS_SEEK ../dbinc/os.h 37 +DB_ODDFILESIZE ../dbinc/db.in 309 +DB_OK_BTREE ../dbinc/db.in 1477 +DB_OK_HASH ../dbinc/db.in 1478 +DB_OK_QUEUE ../dbinc/db.in 1479 +DB_OK_RECNO ../dbinc/db.in 1480 +DB_OLD_VERSION ../dbinc/db.in 1198 +DB_OPFLAGS_MASK ../dbinc/db.in 1161 +DB_OPNOTSUP ../dbinc/db_int.in 255 +DB_ORDERCHKONLY ../dbinc/db.in 346 +DB_OSO_CREATE ../dbinc/os.h 31 +DB_OSO_DIRECT ../dbinc/os.h 32 +DB_OSO_DSYNC ../dbinc/os.h 33 +DB_OSO_EXCL ../dbinc/os.h 34 +DB_OSO_LOG ../dbinc/os.h 35 +DB_OSO_RDONLY ../dbinc/os.h 36 +DB_OSO_REGION ../dbinc/os.h 37 +DB_OSO_SEQ ../dbinc/os.h 38 +DB_OSO_TEMP ../dbinc/os.h 39 +DB_OSO_TRUNC ../dbinc/os.h 40 +DB_OS_SEEK ../dbinc/os.h 49 DB_OVERFLOWED_DBT ../dbinc/cxx_int.h /^#define DB_OVERFLOWED_DBT(dbt) \\$/ -DB_OVERWRITE ../dbinc/db.in 248 +DB_OVERWRITE ../dbinc/db.in 286 DB_PAGE_DB_LEN ../dbinc/db_page.h 58 -DB_PAGE_LOCK ../dbinc/db.in 433 -DB_PAGE_NOTFOUND ../dbinc/db.in 1060 +DB_PAGE_LOCK ../dbinc/db.in 486 +DB_PAGE_NOTFOUND ../dbinc/db.in 1199 DB_PAGE_QUEUE_LEN ../dbinc/db_page.h 59 -DB_PANIC_ENVIRONMENT ../dbinc/db.in 249 +DB_PANIC_ENVIRONMENT ../dbinc/db.in 287 +DB_PCT ../dbinc/db_int.in /^#define DB_PCT(v, total) \\$/ +DB_PCT_PG ../dbinc/db_int.in /^#define DB_PCT_PG(v, total, pgsize) \\$/ DB_PKG ../libdb_java/db_java_wrap.c 80 -DB_POSITION ../dbinc/db.in 1008 -DB_PREPLIST ../dbinc/db.in 114 -DB_PREV ../dbinc/db.in 1009 -DB_PREV_NODUP ../dbinc/db.in 1010 -DB_PRINTABLE ../dbinc/db.in 298 -DB_PRIVATE ../dbinc/db.in 216 -DB_PR_PAGE ../dbinc/db.in 296 -DB_PR_RECOVERYTEST ../dbinc/db.in 297 -DB_PUT_ACTION ../db/db_meta.c /^#define DB_PUT_ACTION(dbc, action, lockp) \\$/ -DB_QAMMAGIC ../dbinc/db.in 979 -DB_QAMOLDVER ../dbinc/db.in 978 -DB_QAMVERSION ../dbinc/db.in 977 -DB_QUEUE_STAT ../dbinc/db.in 115 -DB_RDONLY ../dbinc/db.in 161 -DB_RDWRMASTER ../dbinc/db.in 225 -DB_REAL_ERR ../dbinc/debug.h /^#define DB_REAL_ERR(env, error, error_set, stderr_/ -DB_RECNUM ../dbinc/db.in 275 -DB_RECORDCOUNT ../dbinc/db.in 1011 -DB_RECORD_LOCK ../dbinc/db.in 432 -DB_RECOVER ../dbinc/db.in 162 -DB_RECOVER_FATAL ../dbinc/db.in 217 +DB_POSITION ../dbinc/db.in 1146 +DB_PREPLIST ../dbinc/db.in 144 +DB_PREV ../dbinc/db.in 1147 +DB_PREV_NODUP ../dbinc/db.in 1148 +DB_PRINTABLE ../dbinc/db.in 349 +DB_PRIVATE ../dbinc/db.in 252 +DB_PR_PAGE ../dbinc/db.in 347 +DB_PR_RECOVERYTEST ../dbinc/db.in 348 +DB_QAMMAGIC ../dbinc/db.in 1115 +DB_QAMOLDVER ../dbinc/db.in 1114 +DB_QAMVERSION ../dbinc/db.in 1113 +DB_QUEUE_STAT ../dbinc/db.in 145 +DB_RDONLY ../dbinc/db.in 194 +DB_RDWRMASTER ../dbinc/db.in 261 +DB_REAL_ERR ../dbinc/debug.h /^#define DB_REAL_ERR(env, error, error_set, default/ +DB_REAL_MSG ../dbinc/debug.h /^#define DB_REAL_MSG(env, fmt) { \\$/ +DB_RECNUM ../dbinc/db.in 319 +DB_RECORDCOUNT ../dbinc/db.in 1149 +DB_RECORD_LOCK ../dbinc/db.in 485 +DB_RECOVER ../dbinc/db.in 195 +DB_RECOVER_FATAL ../dbinc/db.in 253 DB_REDO ../dbinc/db.in /^#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL |/ +DB_REGENV_REPLOCKED ../dbinc/region.h 170 +DB_REGENV_TIMEOUT ../dbinc/region.h 172 DB_REGION_ENV ../dbinc/region.h 104 DB_REGION_FMT ../dbinc/region.h 103 -DB_REGION_INIT ../dbinc/db.in 250 -DB_REGION_MAGIC ../dbinc/db.in 1582 +DB_REGION_INIT ../dbinc/db.in 288 +DB_REGION_MAGIC ../dbinc/db.in 1734 DB_REGION_NAME_LENGTH ../dbinc/region.h 105 DB_REGION_PREFIX ../dbinc/region.h 102 DB_REM_BIG ../dbinc/db_am.h 26 DB_REM_DUP ../dbinc/db_am.h 24 -DB_REM_PAGE ../dbinc/db_am.h 28 -DB_RENAMEMAGIC ../dbinc/db.in 967 -DB_RENUMBER ../dbinc/db.in 276 -DB_REP ../dbinc/db.in 116 -DB_REPVERSION ../dbinc/rep.h 173 -DB_REP_CLIENT ../dbinc/db.in 894 -DB_REP_CREATE ../dbinc/db.in 202 -DB_REP_DUPMASTER ../dbinc/db.in 1061 -DB_REP_HANDLE_DEAD ../dbinc/db.in 1062 -DB_REP_HOLDELECTION ../dbinc/db.in 1063 -DB_REP_ISPERM ../dbinc/db.in 1064 -DB_REP_LOGSONLY ../dbinc/db.in 895 -DB_REP_MASTER ../dbinc/db.in 896 -DB_REP_MAX_GAP ../dbinc/rep.h 64 -DB_REP_NEWMASTER ../dbinc/db.in 1065 -DB_REP_NEWSITE ../dbinc/db.in 1066 -DB_REP_NOBUFFER ../dbinc/db.in 309 -DB_REP_NOTPERM ../dbinc/db.in 1067 -DB_REP_OUTDATED ../dbinc/db.in 1068 -DB_REP_PERMANENT ../dbinc/db.in 310 -DB_REP_REQUEST_GAP ../dbinc/rep.h 63 -DB_REP_STAT ../dbinc/db.in 117 -DB_REP_UNAVAIL ../dbinc/db.in 1069 +DB_RENAMEMAGIC ../dbinc/db.in 1103 +DB_RENUMBER ../dbinc/db.in 320 +DB_REP ../dbinc/db.in 146 +DB_REPVERSION ../dbinc/rep.h 268 +DB_REP_CLIENT ../dbinc/db.in 961 +DB_REP_CREATE ../dbinc/db.in 238 +DB_REP_DUPMASTER ../dbinc/db.in 1200 +DB_REP_EGENCHG ../dbinc/db.in 1219 +DB_REP_HANDLE_DEAD ../dbinc/db.in 1201 +DB_REP_HOLDELECTION ../dbinc/db.in 1202 +DB_REP_ISPERM ../dbinc/db.in 1203 +DB_REP_LOGREADY ../dbinc/db.in 1220 +DB_REP_MASTER ../dbinc/db.in 962 +DB_REP_MAX_GAP ../dbinc/rep.h 125 +DB_REP_NEWMASTER ../dbinc/db.in 1204 +DB_REP_NEWSITE ../dbinc/db.in 1205 +DB_REP_NOBUFFER ../dbinc/db.in 361 +DB_REP_NOTPERM ../dbinc/db.in 1206 +DB_REP_PAGEDONE ../dbinc/db.in 1221 +DB_REP_PERMANENT ../dbinc/db.in 362 +DB_REP_REQUEST_GAP ../dbinc/rep.h 124 +DB_REP_STARTUPDONE ../dbinc/db.in 1207 +DB_REP_STAT ../dbinc/db.in 147 +DB_REP_UNAVAIL ../dbinc/db.in 1208 DB_RETOK_DBCDEL ../dbinc/db_int.in /^#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) / DB_RETOK_DBCGET ../dbinc/db_int.in /^#define DB_RETOK_DBCGET(ret) ((ret) == 0 || (ret) / DB_RETOK_DBCPUT ../dbinc/db_int.in /^#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) / @@ -536,13 +564,31 @@ DB_RETOK_LGGET ../dbinc/db_int.in /^#define DB_RETOK_LGGET(ret) ((ret) == 0 || ( DB_RETOK_MPGET ../dbinc/db_int.in /^#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) =/ DB_RETOK_REPPMSG ../dbinc/db_int.in /^#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || \\$/ DB_RETOK_STD ../dbinc/db_int.in /^#define DB_RETOK_STD(ret) ((ret) == 0)$/ -DB_RETRY ../dbinc/db_int.in 68 -DB_REVSPLITOFF ../dbinc/db.in 277 -DB_RMW ../dbinc/db.in 1032 -DB_RPCCLIENT ../dbinc/db.in 197 -DB_RUNRECOVERY ../dbinc/db.in 1070 -DB_SALVAGE ../dbinc/db.in 299 -DB_SECONDARY_BAD ../dbinc/db.in 1071 +DB_RETRY ../dbinc/os.h 18 +DB_REVSPLITOFF ../dbinc/db.in 321 +DB_RMW ../dbinc/db.in 1170 +DB_RPC2ND_CONCATDATAKEY ../dbinc/db_int.in 511 +DB_RPC2ND_CONCATKEYDATA ../dbinc/db_int.in 510 +DB_RPC2ND_CONSTANT ../dbinc/db_int.in 514 +DB_RPC2ND_GETNAME ../dbinc/db_int.in 516 +DB_RPC2ND_GETZIP ../dbinc/db_int.in 515 +DB_RPC2ND_MASK ../dbinc/db_int.in 506 +DB_RPC2ND_NOOP ../dbinc/db_int.in 509 +DB_RPC2ND_REVERSECONCAT ../dbinc/db_int.in 512 +DB_RPC2ND_REVERSEDATA ../dbinc/db_int.in 508 +DB_RPC2ND_TRUNCDATA ../dbinc/db_int.in 513 +DB_RPCCLIENT ../dbinc/db.in 233 +DB_RUNRECOVERY ../dbinc/db.in 1209 +DB_SALVAGE ../dbinc/db.in 350 +DB_SECONDARY_BAD ../dbinc/db.in 1210 +DB_SEQUENCE ../dbinc/db.in 148 +DB_SEQUENCE_STAT ../dbinc/db.in 150 +DB_SEQUENCE_VERSION ../dbinc/db.in 1117 +DB_SEQ_DEC ../dbinc/db.in 1033 +DB_SEQ_INC ../dbinc/db.in 1034 +DB_SEQ_RANGE_SET ../dbinc/db.in 1035 +DB_SEQ_RECORD ../dbinc/db.in 149 +DB_SEQ_WRAP ../dbinc/db.in 1036 DB_SERVER_DBFLAGS ../dbinc/db_server_int.h 61 DB_SERVER_DBNOSHARE ../dbinc/db_server_int.h 62 DB_SERVER_ENVFLAGS ../dbinc/db_server_int.h 57 @@ -550,95 +596,107 @@ DB_SERVER_FLAGMASK ../dbinc/db_server_int.h 28 DB_SERVER_IDLETIMEOUT ../dbinc/db_server_int.h 15 DB_SERVER_MAXTIMEOUT ../dbinc/db_server_int.h 14 DB_SERVER_TIMEOUT ../dbinc/db_server_int.h 13 -DB_SET ../dbinc/db.in 1012 +DB_SET ../dbinc/db.in 1150 +DB_SET_BEGIN_LSNP ../dbinc/log.h /^#define DB_SET_BEGIN_LSNP(txn, rlsnp) do { \\$/ DB_SET_CALLBACK ../cxx/cxx_db.cpp /^#define DB_SET_CALLBACK(_cxxname, _name, _cxxargsp/ -DB_SET_LOCK_TIMEOUT ../dbinc/db.in 1013 -DB_SET_RANGE ../dbinc/db.in 1014 -DB_SET_RECNO ../dbinc/db.in 1015 -DB_SET_TXN_NOW ../dbinc/db.in 1016 -DB_SET_TXN_TIMEOUT ../dbinc/db.in 1017 -DB_SNAPSHOT ../dbinc/db.in 278 -DB_STAT_CLEAR ../dbinc/db.in 283 -DB_SURPRISE_KID ../dbinc/db.in 1079 -DB_SWAPBYTES ../dbinc/db.in 1081 -DB_SYSTEM_MEM ../dbinc/db.in 218 -DB_TEST_ELECTINIT ../dbinc/db.in 1854 -DB_TEST_POSTDESTROY ../dbinc/db.in 1855 -DB_TEST_POSTLOG ../dbinc/db.in 1856 -DB_TEST_POSTLOGMETA ../dbinc/db.in 1857 -DB_TEST_POSTOPEN ../dbinc/db.in 1858 -DB_TEST_POSTSYNC ../dbinc/db.in 1859 -DB_TEST_PREDESTROY ../dbinc/db.in 1860 -DB_TEST_PREOPEN ../dbinc/db.in 1861 -DB_TEST_RECOVERY ../dbinc/debug.h /^#define DB_TEST_RECOVERY(dbp, val, ret, name) \\/ -DB_TEST_RECOVERY_LABEL ../dbinc/debug.h 206 -DB_TEST_SUBDB_LOCKS ../dbinc/db.in 1862 -DB_TEST_SUBLOCKS ../dbinc/debug.h /^#define DB_TEST_SUBLOCKS(env, flags) \\$/ -DB_THREAD ../dbinc/db.in 163 -DB_TIMEOUT ../dbinc/db.in 1082 -DB_TIME_NOTGRANTED ../dbinc/db.in 251 -DB_TRAIL ../env/env_open.c 869 -DB_TRUNCATE ../dbinc/db.in 164 -DB_TXN ../dbinc/db.in 118 -DB_TXNHEAD ../dbinc/db_int.in 419 -DB_TXNLIST ../dbinc/db_int.in 420 +DB_SET_LOCK_TIMEOUT ../dbinc/db.in 1151 +DB_SET_RANGE ../dbinc/db.in 1152 +DB_SET_RECNO ../dbinc/db.in 1153 +DB_SET_TXN_NOW ../dbinc/db.in 1154 +DB_SET_TXN_TIMEOUT ../dbinc/db.in 1155 +DB_SNAPSHOT ../dbinc/db.in 322 +DB_STAT_ALL ../dbinc/db.in 327 +DB_STAT_CLEAR ../dbinc/db.in 328 +DB_STAT_LOCK_CONF ../dbinc/db.in 329 +DB_STAT_LOCK_FLAGS ../lock/lock_stat.c 150 +DB_STAT_LOCK_LOCKERS ../dbinc/db.in 330 +DB_STAT_LOCK_OBJECTS ../dbinc/db.in 331 +DB_STAT_LOCK_PARAMS ../dbinc/db.in 332 +DB_STAT_MEMP_FLAGS ../mp/mp_stat.c 289 +DB_STAT_MEMP_HASH ../dbinc/db.in 333 +DB_STAT_SUBSYSTEM ../dbinc/db.in 334 +DB_SURPRISE_KID ../dbinc/db.in 1222 +DB_SWAPBYTES ../dbinc/db.in 1224 +DB_SYSTEM_MEM ../dbinc/db.in 254 +DB_TEST_CHECKPOINT ../dbinc/debug.h /^#define DB_TEST_CHECKPOINT(env, val) \\$/ +DB_TEST_ELECTINIT ../dbinc/db.in 2035 +DB_TEST_ELECTVOTE1 ../dbinc/db.in 2036 +DB_TEST_POSTDESTROY ../dbinc/db.in 2037 +DB_TEST_POSTLOG ../dbinc/db.in 2038 +DB_TEST_POSTLOGMETA ../dbinc/db.in 2039 +DB_TEST_POSTOPEN ../dbinc/db.in 2040 +DB_TEST_POSTSYNC ../dbinc/db.in 2041 +DB_TEST_PREDESTROY ../dbinc/db.in 2042 +DB_TEST_PREOPEN ../dbinc/db.in 2043 +DB_TEST_RECOVERY ../dbinc/debug.h /^#define DB_TEST_RECOVERY(dbp, val, ret, name) do {/ +DB_TEST_RECOVERY_LABEL ../dbinc/debug.h 248 +DB_TEST_SUBDB_LOCKS ../dbinc/db.in 2044 +DB_TEST_SUBLOCKS ../dbinc/debug.h /^#define DB_TEST_SUBLOCKS(env, flags) do { \\$/ +DB_THREAD ../dbinc/db.in 196 +DB_TIMEOUT ../dbinc/db.in 1225 +DB_TIME_NOTGRANTED ../dbinc/db.in 289 +DB_TRAIL ../env/env_open.c 923 +DB_TRUNCATE ../dbinc/db.in 197 +DB_TXN ../dbinc/db.in 151 +DB_TXNHEAD ../dbinc/db_int.in 523 +DB_TXNLIST ../dbinc/db_int.in 524 DB_TXNLIST_MASK ../dbinc/db_dispatch.h /^#define DB_TXNLIST_MASK(hp, n) (n % hp->nslots)$/ -DB_TXNLIST_MAX_PGNO ../db/db_dispatch.c 1408 +DB_TXNLIST_MAX_PGNO ../db/db_dispatch.c 1468 DB_TXNLOGREC ../dbinc/txn.h 24 -DB_TXNMGR ../dbinc/db.in 121 +DB_TXNMGR ../dbinc/db.in 154 DB_TXNREGION ../dbinc/txn.h 23 -DB_TXNVERSION ../dbinc/db.in 730 -DB_TXN_ACTIVE ../dbinc/db.in 119 -DB_TXN_CKP ../dbinc/db.in 1083 -DB_TXN_NOSYNC ../dbinc/db.in 165 -DB_TXN_NOT_DURABLE ../dbinc/db.in 166 -DB_TXN_NOWAIT ../dbinc/db.in 231 -DB_TXN_STAT ../dbinc/db.in 120 -DB_TXN_SYNC ../dbinc/db.in 232 -DB_TXN_WRITE_NOSYNC ../dbinc/db.in 252 +DB_TXNVERSION ../dbinc/db.in 795 +DB_TXN_ACTIVE ../dbinc/db.in 152 +DB_TXN_CKP ../dbinc/db.in 1226 +DB_TXN_NOSYNC ../dbinc/db.in 198 +DB_TXN_NOT_DURABLE ../dbinc/db.in 199 +DB_TXN_NOWAIT ../dbinc/db.in 267 +DB_TXN_STAT ../dbinc/db.in 153 +DB_TXN_SYNC ../dbinc/db.in 268 +DB_TXN_WRITE_NOSYNC ../dbinc/db.in 294 DB_UNDO ../dbinc/db.in /^#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \\$/ -DB_UPDATE_SECONDARY ../dbinc/db.in 1018 -DB_UPGRADE ../dbinc/db.in 258 -DB_USE_ENVIRON ../dbinc/db.in 167 -DB_USE_ENVIRON_ROOT ../dbinc/db.in 168 -DB_VERB_CHKPOINT ../dbinc/db.in 1606 -DB_VERB_DEADLOCK ../dbinc/db.in 1607 -DB_VERB_RECOVERY ../dbinc/db.in 1608 -DB_VERB_REPLICATION ../dbinc/db.in 1609 -DB_VERB_WAITSFOR ../dbinc/db.in 1610 -DB_VERIFY ../dbinc/db.in 259 -DB_VERIFY_BAD ../dbinc/db.in 1072 -DB_VERIFY_FATAL ../dbinc/db.in 1084 +DB_UNREF ../dbinc/db.in 351 +DB_UPDATE_SECONDARY ../dbinc/db.in 1156 +DB_UPGRADE ../dbinc/db.in 300 +DB_USE_ENVIRON ../dbinc/db.in 200 +DB_USE_ENVIRON_ROOT ../dbinc/db.in 201 +DB_VERB_DEADLOCK ../dbinc/db.in 1764 +DB_VERB_RECOVERY ../dbinc/db.in 1765 +DB_VERB_REPLICATION ../dbinc/db.in 1766 +DB_VERB_WAITSFOR ../dbinc/db.in 1767 +DB_VERIFY ../dbinc/db.in 301 +DB_VERIFY_BAD ../dbinc/db.in 1211 +DB_VERIFY_FATAL ../dbinc/db.in 1227 DB_VERSION_MAJOR ../dbinc/db.in 43 DB_VERSION_MINOR ../dbinc/db.in 44 +DB_VERSION_MISMATCH ../dbinc/db.in 1212 DB_VERSION_PATCH ../dbinc/db.in 45 DB_VERSION_STRING ../dbinc/db.in 46 -DB_WRITECURSOR ../dbinc/db.in 1019 -DB_WRITELOCK ../dbinc/db.in 1020 -DB_WRITEOPEN ../dbinc/db.in 226 -DB_XA_CREATE ../dbinc/db.in 203 -DB_XIDDATASIZE ../dbinc/db.in 853 -DB_YIELDCPU ../dbinc/db.in 253 -DB_debug_FLAG ../dbinc/db.in 505 -DB_user_BEGIN ../dbinc/db.in 504 +DB_WRITECURSOR ../dbinc/db.in 1157 +DB_WRITELOCK ../dbinc/db.in 1158 +DB_WRITEOPEN ../dbinc/db.in 262 +DB_XA_CREATE ../dbinc/db.in 239 +DB_XIDDATASIZE ../dbinc/db.in 920 +DB_YIELDCPU ../dbinc/db.in 295 +DB_debug_FLAG ../dbinc/db.in 559 +DB_user_BEGIN ../dbinc/db.in 558 DCHARHASH ../hash/hash_func.c /^#define DCHARHASH(h, c) ((h) = 0x63c63cd9*(h) + 0x/ -DD_INVALID_ID ../lock/lock_deadlock.c 346 +DD_INVALID_ID ../lock/lock_deadlock.c 370 DEBUG_LREAD ../dbinc/debug.h /^#define DEBUG_LREAD(C, T, O, K, A, F) LOG_OP(C, T,/ DEBUG_LWRITE ../dbinc/debug.h /^#define DEBUG_LWRITE(C, T, O, K, A, F) LOG_OP(C, T/ -DEFINE_DB_CLASS ../dbinc/db_cxx.in /^#define DEFINE_DB_CLASS(name) \\$/ DEFMINKEYPAGE ../dbinc/btree.h 53 DEF_MAX_TXNS ../dbinc/txn.h 34 DELOVFL ../dbinc/hash.h 129 DELPAIR ../dbinc/hash.h 127 DIR_DECRYPT ../crypto/rijndael/rijndael-api-fst.h 48 DIR_ENCRYPT ../crypto/rijndael/rijndael-api-fst.h 47 -DISCARD_CUR ../btree/bt_cursor.c /^#define DISCARD_CUR(dbc, ret) { \\$/ +DISCARD_CUR ../btree/bt_cursor.c /^#define DISCARD_CUR(dbc, ret) do { \\$/ DO_PREPLIST ../tcl/tcl_txn.c /^#define DO_PREPLIST(count) \\$/ -DO_TRADE ../txn/txn_util.c 220 +DO_TRADE ../txn/txn_util.c 218 DUP_SIZE ../dbinc/hash.h /^#define DUP_SIZE(len) ((len) + 2 * sizeof(db_indx_/ +Db ../libdb_java/db_java_wrap.c 1737 Db::Db ../cxx/cxx_db.cpp /^Db::Db(DbEnv *env, u_int32_t flags)$/ Db::cleanup ../cxx/cxx_db.cpp /^void Db::cleanup()$/ -Db::close ../cxx/cxx_db.cpp /^int Db::close(u_int32_t flags)$/ Db::error_policy ../cxx/cxx_db.cpp /^int Db::error_policy()$/ Db::errx ../cxx/cxx_db.cpp /^void Db::errx(const char *format, ...)$/ Db::get_app_private ../cxx/cxx_db.cpp /^void *Db::get_app_private() const$/ @@ -648,18 +706,22 @@ Db::get_mpf ../cxx/cxx_db.cpp /^DbMpoolFile *Db::get_mpf()$/ Db::initialize ../cxx/cxx_db.cpp /^int Db::initialize()$/ Db::pget ../cxx/cxx_db.cpp /^int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Db/ Db::set_app_private ../cxx/cxx_db.cpp /^void Db::set_app_private(void *value)$/ -Db::set_error_stream ../cxx/cxx_db.cpp /^void Db::set_error_stream(__DB_OSTREAMCLASS *error/ +Db::set_error_stream ../cxx/cxx_db.cpp /^void Db::set_error_stream(__DB_STD(ostream) *error/ +Db::set_message_stream ../cxx/cxx_db.cpp /^void Db::set_message_stream(__DB_STD(ostream) *mes/ +Db::set_msgcall ../cxx/cxx_db.cpp /^void Db::set_msgcall(void (*arg)(const DbEnv *, co/ Db::verify ../cxx/cxx_db.cpp /^int Db::verify(const char *name, const char *subdb/ Db::~Db ../cxx/cxx_db.cpp /^Db::~Db()$/ DbDeadlockException::DbDeadlockException ../cxx/cxx_except.cpp /^DbDeadlockException::DbDeadlockException(const cha/ DbDeadlockException::operator = ../cxx/cxx_except.cpp /^&DbDeadlockException::operator =(const DbDeadlockE/ -DbDeadlockException::~DbDeadlockException ../cxx/cxx_except.cpp /^DbDeadlockException::~DbDeadlockException()$/ +DbDeadlockException::~DbDeadlockException ../cxx/cxx_except.cpp /^DbDeadlockException::~DbDeadlockException() throw(/ +DbEnv ../libdb_java/db_java_wrap.c 1740 DbEnv::DbEnv ../cxx/cxx_env.cpp /^DbEnv::DbEnv(u_int32_t flags)$/ DbEnv::_app_dispatch_intercept ../cxx/cxx_env.cpp /^int DbEnv::_app_dispatch_intercept(DB_ENV *env, DB/ DbEnv::_feedback_intercept ../cxx/cxx_env.cpp /^void DbEnv::_feedback_intercept(DB_ENV *env, int o/ DbEnv::_paniccall_intercept ../cxx/cxx_env.cpp /^void DbEnv::_paniccall_intercept(DB_ENV *env, int / DbEnv::_rep_send_intercept ../cxx/cxx_env.cpp /^int DbEnv::_rep_send_intercept(DB_ENV *env, const / -DbEnv::_stream_error_function ../cxx/cxx_env.cpp /^void DbEnv::_stream_error_function(const char *pre/ +DbEnv::_stream_error_function ../cxx/cxx_env.cpp /^void DbEnv::_stream_error_function($/ +DbEnv::_stream_message_function ../cxx/cxx_env.cpp /^void DbEnv::_stream_message_function(const DB_ENV / DbEnv::cleanup ../cxx/cxx_env.cpp /^void DbEnv::cleanup()$/ DbEnv::close ../cxx/cxx_env.cpp /^int DbEnv::close(u_int32_t flags)$/ DbEnv::error_policy ../cxx/cxx_env.cpp /^int DbEnv::error_policy()$/ @@ -669,24 +731,114 @@ DbEnv::remove ../cxx/cxx_env.cpp /^int DbEnv::remove(const char *db_home, u_int3 DbEnv::runtime_error ../cxx/cxx_env.cpp /^void DbEnv::runtime_error(DbEnv *env,$/ DbEnv::runtime_error_dbt ../cxx/cxx_env.cpp /^void DbEnv::runtime_error_dbt(DbEnv *env,$/ DbEnv::runtime_error_lock_get ../cxx/cxx_env.cpp /^void DbEnv::runtime_error_lock_get(DbEnv *env,$/ -DbEnv::set_error_stream ../cxx/cxx_env.cpp /^void DbEnv::set_error_stream(__DB_OSTREAMCLASS *st/ +DbEnv::set_error_stream ../cxx/cxx_env.cpp /^void DbEnv::set_error_stream(__DB_STD(ostream) *st/ DbEnv::set_feedback ../cxx/cxx_env.cpp /^int DbEnv::set_feedback(void (*arg)(DbEnv *, int, / +DbEnv::set_message_stream ../cxx/cxx_env.cpp /^void DbEnv::set_message_stream(__DB_STD(ostream) */ +DbEnv::set_paniccall ../cxx/cxx_env.cpp /^int DbEnv::set_paniccall(void (*arg)(DbEnv *, int)/ DbEnv::strerror ../cxx/cxx_env.cpp /^char *DbEnv::strerror(int error)$/ DbEnv::wrap_DB_ENV ../cxx/cxx_env.cpp /^DbEnv *DbEnv::wrap_DB_ENV(DB_ENV *dbenv)$/ DbEnv::~DbEnv ../cxx/cxx_env.cpp /^DbEnv::~DbEnv()$/ +DbEnv_close ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_close(struct DbEnv *self,u_int32_t / +DbEnv_dbremove ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_dbremove(struct DbEnv *self,DB_TXN / +DbEnv_dbrename ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_dbrename(struct DbEnv *self,DB_TXN / +DbEnv_err ../libdb_java/db_java_wrap.c /^void DbEnv_err(struct DbEnv *self,int error,char c/ +DbEnv_errx ../libdb_java/db_java_wrap.c /^void DbEnv_errx(struct DbEnv *self,char const *mes/ +DbEnv_get_cachesize ../libdb_java/db_java_wrap.c /^jlong DbEnv_get_cachesize(struct DbEnv *self){$/ +DbEnv_get_cachesize_ncache ../libdb_java/db_java_wrap.c /^int DbEnv_get_cachesize_ncache(struct DbEnv *self)/ +DbEnv_get_data_dirs ../libdb_java/db_java_wrap.c /^char const **DbEnv_get_data_dirs(struct DbEnv *sel/ +DbEnv_get_encrypt_flags ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_encrypt_flags(struct DbEnv *se/ +DbEnv_get_errpfx ../libdb_java/db_java_wrap.c /^char const *DbEnv_get_errpfx(struct DbEnv *self){$/ +DbEnv_get_flags ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_flags(struct DbEnv *self){$/ +DbEnv_get_home ../libdb_java/db_java_wrap.c /^char const *DbEnv_get_home(struct DbEnv *self){$/ +DbEnv_get_lg_bsize ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_lg_bsize(struct DbEnv *self){$/ +DbEnv_get_lg_dir ../libdb_java/db_java_wrap.c /^char const *DbEnv_get_lg_dir(struct DbEnv *self){$/ +DbEnv_get_lg_max ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_lg_max(struct DbEnv *self){$/ +DbEnv_get_lg_regionmax ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_lg_regionmax(struct DbEnv *sel/ +DbEnv_get_lk_conflicts ../libdb_java/db_java_wrap.c /^struct __db_lk_conflicts DbEnv_get_lk_conflicts(st/ +DbEnv_get_lk_detect ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_lk_detect(struct DbEnv *self){/ +DbEnv_get_lk_max_lockers ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_lk_max_lockers(struct DbEnv *s/ +DbEnv_get_lk_max_locks ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_lk_max_locks(struct DbEnv *sel/ +DbEnv_get_lk_max_objects ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_lk_max_objects(struct DbEnv *s/ +DbEnv_get_mp_mmapsize ../libdb_java/db_java_wrap.c /^size_t DbEnv_get_mp_mmapsize(struct DbEnv *self){$/ +DbEnv_get_open_flags ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_open_flags(struct DbEnv *self)/ +DbEnv_get_rep_limit ../libdb_java/db_java_wrap.c /^jlong DbEnv_get_rep_limit(struct DbEnv *self){$/ +DbEnv_get_shm_key ../libdb_java/db_java_wrap.c /^long DbEnv_get_shm_key(struct DbEnv *self){$/ +DbEnv_get_tas_spins ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_tas_spins(struct DbEnv *self){/ +DbEnv_get_timeout ../libdb_java/db_java_wrap.c /^db_timeout_t DbEnv_get_timeout(struct DbEnv *self,/ +DbEnv_get_tmp_dir ../libdb_java/db_java_wrap.c /^char const *DbEnv_get_tmp_dir(struct DbEnv *self){/ +DbEnv_get_tx_max ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_get_tx_max(struct DbEnv *self){$/ +DbEnv_get_tx_timestamp ../libdb_java/db_java_wrap.c /^time_t DbEnv_get_tx_timestamp(struct DbEnv *self){/ +DbEnv_get_verbose ../libdb_java/db_java_wrap.c /^int_bool DbEnv_get_verbose(struct DbEnv *self,u_in/ DbEnv_get_version_major ../libdb_java/db_java_wrap.c /^int DbEnv_get_version_major(){$/ DbEnv_get_version_minor ../libdb_java/db_java_wrap.c /^int DbEnv_get_version_minor(){$/ DbEnv_get_version_patch ../libdb_java/db_java_wrap.c /^int DbEnv_get_version_patch(){$/ DbEnv_get_version_string ../libdb_java/db_java_wrap.c /^char const *DbEnv_get_version_string(){$/ +DbEnv_lock_detect ../libdb_java/db_java_wrap.c /^int DbEnv_lock_detect(struct DbEnv *self,u_int32_t/ +DbEnv_lock_get ../libdb_java/db_java_wrap.c /^DB_LOCK *DbEnv_lock_get(struct DbEnv *self,u_int32/ +DbEnv_lock_id ../libdb_java/db_java_wrap.c /^u_int32_t DbEnv_lock_id(struct DbEnv *self){$/ +DbEnv_lock_id_free ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_lock_id_free(struct DbEnv *self,u_i/ +DbEnv_lock_put ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_lock_put(struct DbEnv *self,DB_LOCK/ +DbEnv_lock_stat ../libdb_java/db_java_wrap.c /^DB_LOCK_STAT *DbEnv_lock_stat(struct DbEnv *self,u/ +DbEnv_log_archive ../libdb_java/db_java_wrap.c /^char **DbEnv_log_archive(struct DbEnv *self,u_int3/ DbEnv_log_compare ../libdb_java/db_java_wrap.c /^int DbEnv_log_compare(DB_LSN const *lsn0,DB_LSN co/ +DbEnv_log_cursor ../libdb_java/db_java_wrap.c /^DB_LOGC *DbEnv_log_cursor(struct DbEnv *self,u_int/ +DbEnv_log_file ../libdb_java/db_java_wrap.c /^char *DbEnv_log_file(struct DbEnv *self,DB_LSN *ls/ +DbEnv_log_flush ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_log_flush(struct DbEnv *self,DB_LSN/ +DbEnv_log_put ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_log_put(struct DbEnv *self,DB_LSN */ +DbEnv_log_stat ../libdb_java/db_java_wrap.c /^DB_LOG_STAT *DbEnv_log_stat(struct DbEnv *self,u_i/ +DbEnv_memp_fstat ../libdb_java/db_java_wrap.c /^DB_MPOOL_FSTAT **DbEnv_memp_fstat(struct DbEnv *se/ +DbEnv_memp_stat ../libdb_java/db_java_wrap.c /^DB_MPOOL_STAT *DbEnv_memp_stat(struct DbEnv *self,/ +DbEnv_memp_trickle ../libdb_java/db_java_wrap.c /^int DbEnv_memp_trickle(struct DbEnv *self,int perc/ +DbEnv_open ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_open(struct DbEnv *self,char const / +DbEnv_remove ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_remove(struct DbEnv *self,char cons/ +DbEnv_rep_elect ../libdb_java/db_java_wrap.c /^int DbEnv_rep_elect(struct DbEnv *self,int nsites,/ +DbEnv_rep_process_message ../libdb_java/db_java_wrap.c /^int DbEnv_rep_process_message(struct DbEnv *self,D/ +DbEnv_rep_start ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_rep_start(struct DbEnv *self,DBT *c/ +DbEnv_rep_stat ../libdb_java/db_java_wrap.c /^DB_REP_STAT *DbEnv_rep_stat(struct DbEnv *self,u_i/ +DbEnv_set_app_dispatch ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_app_dispatch(struct DbEnv *self/ +DbEnv_set_cachesize ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_cachesize(struct DbEnv *self,jl/ +DbEnv_set_data_dir ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_data_dir(struct DbEnv *self,cha/ +DbEnv_set_encrypt ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_encrypt(struct DbEnv *self,char/ +DbEnv_set_errcall ../libdb_java/db_java_wrap.c /^void DbEnv_set_errcall(struct DbEnv *self,void (*d/ +DbEnv_set_errpfx ../libdb_java/db_java_wrap.c /^void DbEnv_set_errpfx(struct DbEnv *self,char cons/ +DbEnv_set_feedback ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_feedback(struct DbEnv *self,voi/ +DbEnv_set_flags ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_flags(struct DbEnv *self,u_int3/ +DbEnv_set_lg_bsize ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lg_bsize(struct DbEnv *self,u_i/ +DbEnv_set_lg_dir ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lg_dir(struct DbEnv *self,char / +DbEnv_set_lg_max ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lg_max(struct DbEnv *self,u_int/ +DbEnv_set_lg_regionmax ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lg_regionmax(struct DbEnv *self/ +DbEnv_set_lk_conflicts ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lk_conflicts(struct DbEnv *self/ +DbEnv_set_lk_detect ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lk_detect(struct DbEnv *self,u_/ +DbEnv_set_lk_max_lockers ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lk_max_lockers(struct DbEnv *se/ +DbEnv_set_lk_max_locks ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lk_max_locks(struct DbEnv *self/ +DbEnv_set_lk_max_objects ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_lk_max_objects(struct DbEnv *se/ +DbEnv_set_mp_mmapsize ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_mp_mmapsize(struct DbEnv *self,/ +DbEnv_set_msgcall ../libdb_java/db_java_wrap.c /^void DbEnv_set_msgcall(struct DbEnv *self,void (*d/ +DbEnv_set_paniccall ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_paniccall(struct DbEnv *self,vo/ +DbEnv_set_rep_limit ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_rep_limit(struct DbEnv *self,jl/ +DbEnv_set_rep_transport ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_rep_transport(struct DbEnv *sel/ +DbEnv_set_rpc_server ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_rpc_server(struct DbEnv *self,v/ +DbEnv_set_shm_key ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_shm_key(struct DbEnv *self,long/ +DbEnv_set_tas_spins ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_tas_spins(struct DbEnv *self,u_/ +DbEnv_set_timeout ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_timeout(struct DbEnv *self,db_t/ +DbEnv_set_tmp_dir ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_tmp_dir(struct DbEnv *self,char/ +DbEnv_set_tx_max ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_tx_max(struct DbEnv *self,u_int/ +DbEnv_set_tx_timestamp ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_tx_timestamp(struct DbEnv *self/ +DbEnv_set_verbose ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_set_verbose(struct DbEnv *self,u_in/ DbEnv_strerror ../libdb_java/db_java_wrap.c /^char const *DbEnv_strerror(int error){$/ +DbEnv_txn_begin ../libdb_java/db_java_wrap.c /^DB_TXN *DbEnv_txn_begin(struct DbEnv *self,DB_TXN / +DbEnv_txn_checkpoint ../libdb_java/db_java_wrap.c /^db_ret_t DbEnv_txn_checkpoint(struct DbEnv *self,u/ +DbEnv_txn_recover ../libdb_java/db_java_wrap.c /^DB_PREPLIST *DbEnv_txn_recover(struct DbEnv *self,/ +DbEnv_txn_stat ../libdb_java/db_java_wrap.c /^DB_TXN_STAT *DbEnv_txn_stat(struct DbEnv *self,u_i/ DbException::DbException ../cxx/cxx_except.cpp /^DbException::DbException(int err)$/ +DbException::describe ../cxx/cxx_except.cpp /^void DbException::describe(const char *prefix, con/ DbException::get_env ../cxx/cxx_except.cpp /^DbEnv *DbException::get_env() const$/ DbException::get_errno ../cxx/cxx_except.cpp /^int DbException::get_errno() const$/ DbException::operator = ../cxx/cxx_except.cpp /^DbException &DbException::operator = (const DbExce/ DbException::set_env ../cxx/cxx_except.cpp /^void DbException::set_env(DbEnv *env)$/ -DbException::what ../cxx/cxx_except.cpp /^const char *DbException::what() const$/ -DbException::~DbException ../cxx/cxx_except.cpp /^DbException::~DbException()$/ +DbException::what ../cxx/cxx_except.cpp /^const char *DbException::what() const throw()$/ +DbException::~DbException ../cxx/cxx_except.cpp /^DbException::~DbException() throw()$/ +DbLock ../libdb_java/db_java_wrap.c 1741 DbLock::DbLock ../cxx/cxx_lock.cpp /^DbLock::DbLock(DB_LOCK value)$/ DbLock::operator = ../cxx/cxx_lock.cpp /^DbLock &DbLock::operator = (const DbLock &that)$/ DbLockNotGrantedException::DbLockNotGrantedException ../cxx/cxx_except.cpp /^DbLockNotGrantedException::DbLockNotGrantedExcepti/ @@ -697,47 +849,158 @@ DbLockNotGrantedException::get_obj ../cxx/cxx_except.cpp /^const Dbt* DbLockNotG DbLockNotGrantedException::get_op ../cxx/cxx_except.cpp /^db_lockop_t DbLockNotGrantedException::get_op() co/ DbLockNotGrantedException::operator = ../cxx/cxx_except.cpp /^&DbLockNotGrantedException::operator =(const DbLoc/ DbLockNotGrantedException::~DbLockNotGrantedException ../cxx/cxx_except.cpp /^DbLockNotGrantedException::~DbLockNotGrantedExcept/ +DbLogc ../libdb_java/db_java_wrap.c 1742 DbLogc::close ../cxx/cxx_logc.cpp /^int DbLogc::close(u_int32_t _flags)$/ DbLogc::get ../cxx/cxx_logc.cpp /^int DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t _/ DbLogc::~DbLogc ../cxx/cxx_logc.cpp /^DbLogc::~DbLogc()$/ +DbLogc_close ../libdb_java/db_java_wrap.c /^db_ret_t DbLogc_close(struct DbLogc *self,u_int32_/ +DbLogc_get ../libdb_java/db_java_wrap.c /^int DbLogc_get(struct DbLogc *self,DB_LSN *lsn,DBT/ +DbLsn ../libdb_java/db_java_wrap.c 1743 DbMemoryException::DbMemoryException ../cxx/cxx_except.cpp /^DbMemoryException::DbMemoryException(Dbt *dbt)$/ DbMemoryException::get_dbt ../cxx/cxx_except.cpp /^Dbt *DbMemoryException::get_dbt() const$/ DbMemoryException::operator = ../cxx/cxx_except.cpp /^&DbMemoryException::operator =(const DbMemoryExcep/ -DbMemoryException::~DbMemoryException ../cxx/cxx_except.cpp /^DbMemoryException::~DbMemoryException()$/ +DbMemoryException::~DbMemoryException ../cxx/cxx_except.cpp /^DbMemoryException::~DbMemoryException() throw()$/ +DbMpoolFile ../libdb_java/db_java_wrap.c 1744 DbMpoolFile::DbMpoolFile ../cxx/cxx_mpool.cpp /^DbMpoolFile::DbMpoolFile()$/ DbMpoolFile::close ../cxx/cxx_mpool.cpp /^int DbMpoolFile::close(u_int32_t flags)$/ DbMpoolFile::~DbMpoolFile ../cxx/cxx_mpool.cpp /^DbMpoolFile::~DbMpoolFile()$/ +DbMpoolFile_get_flags ../libdb_java/db_java_wrap.c /^u_int32_t DbMpoolFile_get_flags(struct DbMpoolFile/ +DbMpoolFile_get_maxsize ../libdb_java/db_java_wrap.c /^jlong DbMpoolFile_get_maxsize(struct DbMpoolFile */ +DbMpoolFile_get_priority ../libdb_java/db_java_wrap.c /^DB_CACHE_PRIORITY DbMpoolFile_get_priority(struct / +DbMpoolFile_set_flags ../libdb_java/db_java_wrap.c /^db_ret_t DbMpoolFile_set_flags(struct DbMpoolFile / +DbMpoolFile_set_maxsize ../libdb_java/db_java_wrap.c /^db_ret_t DbMpoolFile_set_maxsize(struct DbMpoolFil/ +DbMpoolFile_set_priority ../libdb_java/db_java_wrap.c /^db_ret_t DbMpoolFile_set_priority(struct DbMpoolFi/ DbMultipleDataIterator::next ../cxx/cxx_multi.cpp /^bool DbMultipleDataIterator::next(Dbt &data)$/ DbMultipleIterator::DbMultipleIterator ../cxx/cxx_multi.cpp /^DbMultipleIterator::DbMultipleIterator(const Dbt &/ DbMultipleKeyDataIterator::next ../cxx/cxx_multi.cpp /^bool DbMultipleKeyDataIterator::next(Dbt &key, Dbt/ DbMultipleRecnoDataIterator::next ../cxx/cxx_multi.cpp /^bool DbMultipleRecnoDataIterator::next(db_recno_t / DbRunRecoveryException::DbRunRecoveryException ../cxx/cxx_except.cpp /^DbRunRecoveryException::DbRunRecoveryException(con/ DbRunRecoveryException::operator = ../cxx/cxx_except.cpp /^&DbRunRecoveryException::operator =(const DbRunRec/ -DbRunRecoveryException::~DbRunRecoveryException ../cxx/cxx_except.cpp /^DbRunRecoveryException::~DbRunRecoveryException()$/ +DbRunRecoveryException::~DbRunRecoveryException ../cxx/cxx_except.cpp /^DbRunRecoveryException::~DbRunRecoveryException() / +DbSequence ../libdb_java/db_java_wrap.c 1745 +DbSequence::DbSequence ../cxx/cxx_seq.cpp /^DbSequence::DbSequence(Db *db, u_int32_t flags)$/ +DbSequence::get_key ../cxx/cxx_seq.cpp /^Dbt *DbSequence::get_key()$/ +DbSequence::wrap_DB_SEQUENCE ../cxx/cxx_seq.cpp /^DbSequence *DbSequence::wrap_DB_SEQUENCE(DB_SEQUEN/ +DbSequence::~DbSequence ../cxx/cxx_seq.cpp /^DbSequence::~DbSequence()$/ +DbSequence_close ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_close(struct DbSequence *self,/ +DbSequence_get ../libdb_java/db_java_wrap.c /^db_seq_t DbSequence_get(struct DbSequence *self,DB/ +DbSequence_get_cachesize ../libdb_java/db_java_wrap.c /^int32_t DbSequence_get_cachesize(struct DbSequence/ +DbSequence_get_db ../libdb_java/db_java_wrap.c /^DB *DbSequence_get_db(struct DbSequence *self){$/ +DbSequence_get_flags ../libdb_java/db_java_wrap.c /^u_int32_t DbSequence_get_flags(struct DbSequence */ +DbSequence_get_key ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_get_key(struct DbSequence *sel/ +DbSequence_get_range_max ../libdb_java/db_java_wrap.c /^db_seq_t DbSequence_get_range_max(struct DbSequenc/ +DbSequence_get_range_min ../libdb_java/db_java_wrap.c /^db_seq_t DbSequence_get_range_min(struct DbSequenc/ +DbSequence_initial_value ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_initial_value(struct DbSequenc/ +DbSequence_open ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_open(struct DbSequence *self,D/ +DbSequence_remove ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_remove(struct DbSequence *self/ +DbSequence_set_cachesize ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_set_cachesize(struct DbSequenc/ +DbSequence_set_flags ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_set_flags(struct DbSequence *s/ +DbSequence_set_range ../libdb_java/db_java_wrap.c /^db_ret_t DbSequence_set_range(struct DbSequence *s/ +DbSequence_stat ../libdb_java/db_java_wrap.c /^DB_SEQUENCE_STAT *DbSequence_stat(struct DbSequenc/ +DbTxn ../libdb_java/db_java_wrap.c 1746 DbTxn::DbTxn ../cxx/cxx_txn.cpp /^DbTxn::DbTxn()$/ DbTxn::~DbTxn ../cxx/cxx_txn.cpp /^DbTxn::~DbTxn()$/ +DbTxn_abort ../libdb_java/db_java_wrap.c /^db_ret_t DbTxn_abort(struct DbTxn *self){$/ +DbTxn_commit ../libdb_java/db_java_wrap.c /^db_ret_t DbTxn_commit(struct DbTxn *self,u_int32_t/ +DbTxn_discard ../libdb_java/db_java_wrap.c /^db_ret_t DbTxn_discard(struct DbTxn *self,u_int32_/ +DbTxn_id ../libdb_java/db_java_wrap.c /^u_int32_t DbTxn_id(struct DbTxn *self){$/ +DbTxn_prepare ../libdb_java/db_java_wrap.c /^db_ret_t DbTxn_prepare(struct DbTxn *self,u_int8_t/ +DbTxn_set_timeout ../libdb_java/db_java_wrap.c /^db_ret_t DbTxn_set_timeout(struct DbTxn *self,db_t/ +Db_associate ../libdb_java/db_java_wrap.c /^db_ret_t Db_associate(struct Db *self,DB_TXN *txni/ +Db_close ../libdb_java/db_java_wrap.c /^int Db_close(struct Db *self,u_int32_t flags){$/ +Db_cursor ../libdb_java/db_java_wrap.c /^DBC *Db_cursor(struct Db *self,DB_TXN *txnid,u_int/ +Db_del ../libdb_java/db_java_wrap.c /^int Db_del(struct Db *self,DB_TXN *txnid,DBT *key,/ +Db_err ../libdb_java/db_java_wrap.c /^void Db_err(struct Db *self,int error,char const */ +Db_errx ../libdb_java/db_java_wrap.c /^void Db_errx(struct Db *self,char const *message){/ +Db_get ../libdb_java/db_java_wrap.c /^int Db_get(struct Db *self,DB_TXN *txnid,DBT *key,/ +Db_get_bt_minkey ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_bt_minkey(struct Db *self){$/ +Db_get_byteswapped ../libdb_java/db_java_wrap.c /^int_bool Db_get_byteswapped(struct Db *self){$/ +Db_get_cachesize ../libdb_java/db_java_wrap.c /^jlong Db_get_cachesize(struct Db *self){$/ +Db_get_cachesize_ncache ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_cachesize_ncache(struct Db *self)/ +Db_get_dbname ../libdb_java/db_java_wrap.c /^char const *Db_get_dbname(struct Db *self){$/ +Db_get_encrypt_flags ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_encrypt_flags(struct Db *self){$/ +Db_get_errpfx ../libdb_java/db_java_wrap.c /^char const *Db_get_errpfx(struct Db *self){$/ +Db_get_filename ../libdb_java/db_java_wrap.c /^char const *Db_get_filename(struct Db *self){$/ +Db_get_flags ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_flags(struct Db *self){$/ +Db_get_h_ffactor ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_h_ffactor(struct Db *self){$/ +Db_get_h_nelem ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_h_nelem(struct Db *self){$/ +Db_get_lorder ../libdb_java/db_java_wrap.c /^int Db_get_lorder(struct Db *self){$/ +Db_get_mpf ../libdb_java/db_java_wrap.c /^DB_MPOOLFILE *Db_get_mpf(struct Db *self){$/ +Db_get_open_flags ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_open_flags(struct Db *self){$/ +Db_get_pagesize ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_pagesize(struct Db *self){$/ +Db_get_q_extentsize ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_q_extentsize(struct Db *self){$/ +Db_get_re_delim ../libdb_java/db_java_wrap.c /^int Db_get_re_delim(struct Db *self){$/ +Db_get_re_len ../libdb_java/db_java_wrap.c /^u_int32_t Db_get_re_len(struct Db *self){$/ +Db_get_re_pad ../libdb_java/db_java_wrap.c /^int Db_get_re_pad(struct Db *self){$/ +Db_get_re_source ../libdb_java/db_java_wrap.c /^char const *Db_get_re_source(struct Db *self){$/ +Db_get_transactional ../libdb_java/db_java_wrap.c /^int_bool Db_get_transactional(struct Db *self){$/ +Db_get_type ../libdb_java/db_java_wrap.c /^DBTYPE Db_get_type(struct Db *self){$/ +Db_join ../libdb_java/db_java_wrap.c /^DBC *Db_join(struct Db *self,DBC **curslist,u_int3/ +Db_key_range ../libdb_java/db_java_wrap.c /^db_ret_t Db_key_range(struct Db *self,DB_TXN *txni/ +Db_open ../libdb_java/db_java_wrap.c /^db_ret_t Db_open(struct Db *self,DB_TXN *txnid,cha/ +Db_pget ../libdb_java/db_java_wrap.c /^int Db_pget(struct Db *self,DB_TXN *txnid,DBT *key/ +Db_put ../libdb_java/db_java_wrap.c /^int Db_put(struct Db *self,DB_TXN *txnid,DBT *key,/ +Db_remove ../libdb_java/db_java_wrap.c /^db_ret_t Db_remove(struct Db *self,char const *fil/ +Db_rename ../libdb_java/db_java_wrap.c /^db_ret_t Db_rename(struct Db *self,char const *fil/ +Db_set_append_recno ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_append_recno(struct Db *self,int (/ +Db_set_bt_compare ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_bt_compare(struct Db *self,int (*b/ +Db_set_bt_maxkey ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_bt_maxkey(struct Db *self,u_int32_/ +Db_set_bt_minkey ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_bt_minkey(struct Db *self,u_int32_/ +Db_set_bt_prefix ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_bt_prefix(struct Db *self,size_t (/ +Db_set_cachesize ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_cachesize(struct Db *self,jlong by/ +Db_set_dup_compare ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_dup_compare(struct Db *self,int (*/ +Db_set_encrypt ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_encrypt(struct Db *self,char const/ +Db_set_errpfx ../libdb_java/db_java_wrap.c /^void Db_set_errpfx(struct Db *self,char const *err/ +Db_set_feedback ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_feedback(struct Db *self,void (*db/ +Db_set_flags ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_flags(struct Db *self,u_int32_t fl/ +Db_set_h_ffactor ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_h_ffactor(struct Db *self,u_int32_/ +Db_set_h_hash ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_h_hash(struct Db *self,u_int32_t (/ +Db_set_h_nelem ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_h_nelem(struct Db *self,u_int32_t / +Db_set_lorder ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_lorder(struct Db *self,int lorder)/ +Db_set_pagesize ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_pagesize(struct Db *self,u_int32_t/ +Db_set_q_extentsize ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_q_extentsize(struct Db *self,u_int/ +Db_set_re_delim ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_re_delim(struct Db *self,int re_de/ +Db_set_re_len ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_re_len(struct Db *self,u_int32_t r/ +Db_set_re_pad ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_re_pad(struct Db *self,int re_pad)/ +Db_set_re_source ../libdb_java/db_java_wrap.c /^db_ret_t Db_set_re_source(struct Db *self,char *so/ +Db_stat ../libdb_java/db_java_wrap.c /^void *Db_stat(struct Db *self,DB_TXN *txnid,u_int3/ +Db_sync ../libdb_java/db_java_wrap.c /^db_ret_t Db_sync(struct Db *self,u_int32_t flags){/ Db_tcl_Init ../tcl/tcl_db_pkg.c /^Db_tcl_Init(interp)$/ +Db_truncate ../libdb_java/db_java_wrap.c /^int Db_truncate(struct Db *self,DB_TXN *txnid,u_in/ +Db_upgrade ../libdb_java/db_java_wrap.c /^db_ret_t Db_upgrade(struct Db *self,char const *fi/ +Db_verify ../libdb_java/db_java_wrap.c /^int_bool Db_verify(struct Db *self,char const *fil/ +Dbc ../libdb_java/db_java_wrap.c 1738 Dbc::get ../cxx/cxx_dbc.cpp /^int Dbc::get(Dbt* key, Dbt *data, u_int32_t _flags/ Dbc::pget ../cxx/cxx_dbc.cpp /^int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_in/ Dbc::~Dbc ../cxx/cxx_dbc.cpp /^Dbc::~Dbc()$/ +Dbc_close ../libdb_java/db_java_wrap.c /^db_ret_t Dbc_close(struct Dbc *self){$/ +Dbc_count ../libdb_java/db_java_wrap.c /^db_recno_t Dbc_count(struct Dbc *self,u_int32_t fl/ +Dbc_del ../libdb_java/db_java_wrap.c /^int Dbc_del(struct Dbc *self,u_int32_t flags){$/ +Dbc_dup ../libdb_java/db_java_wrap.c /^DBC *Dbc_dup(struct Dbc *self,u_int32_t flags){$/ +Dbc_get ../libdb_java/db_java_wrap.c /^int Dbc_get(struct Dbc *self,DBT *key,DBT *data,u_/ +Dbc_pget ../libdb_java/db_java_wrap.c /^int Dbc_pget(struct Dbc *self,DBT *key,DBT *pkey,D/ +Dbc_put ../libdb_java/db_java_wrap.c /^int Dbc_put(struct Dbc *self,DBT *key,DBT *data,u_/ +Dbt ../libdb_java/db_java_wrap.c 1739 Dbt::Dbt ../cxx/cxx_dbt.cpp /^Dbt::Dbt()$/ Dbt::operator = ../cxx/cxx_dbt.cpp /^Dbt &Dbt::operator = (const Dbt &that)$/ Dbt::~Dbt ../cxx/cxx_dbt.cpp /^Dbt::~Dbt()$/ -EMSG ../clib/getopt.c 63 +EMSG ../clib/getopt.c 61 ENV_ILLEGAL_AFTER_OPEN ../dbinc/db_int.in /^#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \\$/ ENV_ILLEGAL_BEFORE_OPEN ../dbinc/db_int.in /^#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \\$/ +ENV_NOT_CONFIGURED ../dbinc/db_int.in /^#define ENV_NOT_CONFIGURED(dbenv, handle, i, flags/ ENV_REQUIRES_CONFIG ../dbinc/db_int.in /^#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flag/ EPG ../dbinc/btree.h 50 EPRINT ../dbinc/db_verify.h /^#define EPRINT(x) do { \\$/ FALSE ../crypto/rijndael/rijndael-api-fst.h 55 FAST_HASH ../lock/lock_util.c /^#define FAST_HASH(P) { \\$/ -FLAG_CHECK ../dbinc/tcl_db.h /^#define FLAG_CHECK(flag) \\$/ -FLAG_CHECK2 ../dbinc/tcl_db.h /^#define FLAG_CHECK2(flag,val) \\$/ +FLAG_CHECK ../dbinc/tcl_db.h /^#define FLAG_CHECK(flag) do { \\$/ +FLAG_CHECK2 ../dbinc/tcl_db.h /^#define FLAG_CHECK2(flag, val) do { \\$/ FLD_CLR ../dbinc/db_int.in /^#define FLD_CLR(fld, f) (fld) &= ~(f)$/ FLD_ISSET ../dbinc/db_int.in /^#define FLD_ISSET(fld, f) ((fld) & (f))$/ FLD_SET ../dbinc/db_int.in /^#define FLD_SET(fld, f) (fld) |= (f)$/ -FMAP_ENTRIES ../mp/mp_stat.c 260 -FNAME ../dbinc/db.in 125 +FMAP_ENTRIES ../mp/mp_stat.c 304 +FNAME ../dbinc/db.in 158 +FREE_IF_CHANGED ../rpc_client/gen_client_ret.c /^#define FREE_IF_CHANGED(dbtp, orig) do { \\$/ FREE_IF_NEEDED ../dbinc/db_int.in /^#define FREE_IF_NEEDED(sdbp, dbt) \\$/ F_CLR ../dbinc/db_int.in /^#define F_CLR(p, f) (p)->flags &= ~(f)$/ F_ISSET ../dbinc/db_int.in /^#define F_ISSET(p, f) ((p)->flags & (f))$/ @@ -746,21 +1009,21 @@ GETU32 ../crypto/rijndael/rijndael-alg-fst.c /^#define GETU32(p) SWAP(*((u32 *)( GET_BINTERNAL ../dbinc/db_page.h /^#define GET_BINTERNAL(dbp, pg, indx) \\$/ GET_BKEYDATA ../dbinc/db_page.h /^#define GET_BKEYDATA(dbp, pg, indx) \\$/ GET_BOVERFLOW ../dbinc/db_page.h /^#define GET_BOVERFLOW(dbp, pg, indx) \\$/ -GET_COUNT ../lock/lock.c /^#define GET_COUNT(dp, count) do { \\$/ -GET_ENVLOCK ../fileops/fop_util.c /^#define GET_ENVLOCK(ENV, ID, L) do { \\$/ +GET_COUNT ../lock/lock_list.c /^#define GET_COUNT(dp, count) do { \\$/ +GET_ENVLOCK ../fileops/fop_util.c /^#define GET_ENVLOCK(ENV, ID, L) (0)$/ GET_HANDLE ../mutex/mut_win32.c /^#define GET_HANDLE(mutexp, event) do { \\$/ GET_PAGE ../db/db_upg_opd.c /^#define GET_PAGE(dbp, fhp, pgno, page) { \\$/ -GET_PCOUNT ../lock/lock.c /^#define GET_PCOUNT(dp, count) do { \\$/ -GET_PGNO ../lock/lock.c /^#define GET_PGNO(dp, pgno) do { \\$/ +GET_PCOUNT ../lock/lock_list.c /^#define GET_PCOUNT(dp, count) do { \\$/ +GET_PGNO ../lock/lock_list.c /^#define GET_PGNO(dp, pgno) do { \\$/ GET_RINTERNAL ../dbinc/db_page.h /^#define GET_RINTERNAL(dbp, pg, indx) \\$/ -GET_SIZE ../lock/lock.c /^#define GET_SIZE(dp, size) do { \\$/ -GIGABYTE ../dbinc/db_int.in 42 +GET_SIZE ../lock/lock_list.c /^#define GET_SIZE(dp, size) do { \\$/ +GIGABYTE ../dbinc/db_int.in 73 GLOB_CHAR ../tcl/tcl_internal.c /^#define GLOB_CHAR(c) ((c) == '*' || (c) == '?')$/ GUARD_BYTE ../dbinc/debug.h 29 -HASH4 ../hash/hash_func.c 168 -HASH4a ../hash/hash_func.c 166 -HASH4b ../hash/hash_func.c 167 -HASHC ../hash/hash_func.c 115 +HASH4 ../hash/hash_func.c 166 +HASH4a ../hash/hash_func.c 164 +HASH4b ../hash/hash_func.c 165 +HASHC ../hash/hash_func.c 113 HASHINSERT ../dbinc/db_shash.h /^#define HASHINSERT(begin, ndx, type, field, elt) d/ HASHLOOKUP ../dbinc/db_shash.h /^#define HASHLOOKUP(begin, ndx, type, field, elt, r/ HASHMAGIC ../dbinc/db_185.in 138 @@ -769,13 +1032,13 @@ HASHVERSION ../dbinc/db_185.in 139 HASH_UNUSED1 ../dbinc/hash.h 130 HASH_UNUSED2 ../dbinc/hash.h 131 HDR ../dbinc/log.h 59 -HDR_CRYPTO_SZ ../dbinc/log.h 132 -HDR_NORMAL_SZ ../dbinc/log.h 131 +HDR_CRYPTO_SZ ../dbinc/log.h 134 +HDR_NORMAL_SZ ../dbinc/log.h 133 HKEYDATA_DATA ../dbinc/db_page.h /^#define HKEYDATA_DATA(p) (((u_int8_t *)p) + SSZA(H/ HKEYDATA_PSIZE ../dbinc/db_page.h /^#define HKEYDATA_PSIZE(len) \\$/ HKEYDATA_SIZE ../dbinc/db_page.h /^#define HKEYDATA_SIZE(len) \\$/ -HMAC_BLOCK_SIZE ../hmac/hmac.c 28 -HMAC_OUTPUT_SIZE ../hmac/hmac.c 27 +HMAC_BLOCK_SIZE ../hmac/hmac.c 26 +HMAC_OUTPUT_SIZE ../hmac/hmac.c 25 HOFFDUP_PGNO ../dbinc/db_page.h /^#define HOFFDUP_PGNO(p) (((u_int8_t *)p) + SSZ(HO/ HOFFDUP_SIZE ../dbinc/db_page.h 512 HOFFPAGE_PGNO ../dbinc/db_page.h /^#define HOFFPAGE_PGNO(p) (((u_int8_t *)p) + SSZ(HO/ @@ -785,6 +1048,7 @@ HOFFPAGE_TLEN ../dbinc/db_page.h /^#define HOFFPAGE_TLEN(p) (((u_int8_t *)p) + S HOFFSET ../dbinc/db_page.h /^#define HOFFSET(p) (((PAGE *)p)->hf_offset)$/ HPAGE_PTYPE ../dbinc/db_page.h /^#define HPAGE_PTYPE(p) (*(u_int8_t *)p)$/ HPAGE_TYPE ../dbinc/db_page.h /^#define HPAGE_TYPE(dbp, pg, indx) (*P_ENTRY(dbp, p/ +HPUX_MUTEX_PAD ../dbinc/mutex.h 131 H_CONTINUE ../dbinc/hash.h 79 H_DATAINDEX ../dbinc/db_page.h /^#define H_DATAINDEX(indx) ((indx) + 1)$/ H_DELETED ../dbinc/hash.h 80 @@ -804,11 +1068,14 @@ H_OK ../dbinc/hash.h 87 H_PAIRDATA ../dbinc/db_page.h /^#define H_PAIRDATA(dbp, pg, indx) P_ENTRY(dbp, pg,/ H_PAIRKEY ../dbinc/db_page.h /^#define H_PAIRKEY(dbp, pg, indx) P_ENTRY(dbp, pg, / H_PAIRSIZE ../dbinc/db_page.h /^#define H_PAIRSIZE(dbp, pg, psize, indx) \\$/ -ILLEGAL_SIZE ../env/db_salloc.c 171 -INITIAL_DEAD_ALLOC ../lock/lock_deadlock.c 659 +ILLEGAL_SIZE ../env/db_salloc.c 37 +INITIAL_DEAD_ALLOC ../lock/lock_deadlock.c 699 INIT_LSN ../dbinc/db_int.in /^#define INIT_LSN(LSN) do { \\$/ INP_OFFSET ../db/db_vrfy.c /^#define INP_OFFSET(dbp, h, i) \\$/ -INVALID_LSNMSG ../tcl/tcl_internal.c 506 +INT64_FMT ../dbinc/db_int.in 55 +INT64_MAX ../dbinc/db_int.in 51 +INT64_MIN ../dbinc/db_int.in 52 +INVALID_LSNMSG ../tcl/tcl_internal.c 518 INVALID_ORDER ../dbinc/btree.h 59 INVALID_REGION_ID ../dbinc/region.h 107 INVALID_REGION_SEGID ../dbinc/region.h 119 @@ -819,14 +1086,15 @@ ISBIG ../dbinc/hash.h /^#define ISBIG(I, N) (((N) > ((I)->hdr->dbmeta.page/ ISDOT ../clib/getcwd.c /^#define ISDOT(dp) \\$/ ISINTERNAL ../dbinc/btree.h /^#define ISINTERNAL(p) (TYPE(p) == P_IBTREE || TYPE/ ISLEAF ../dbinc/btree.h /^#define ISLEAF(p) (TYPE(p) == P_LBTREE || \\$/ -ISSET_MAP ../lock/lock_deadlock.c /^#define ISSET_MAP(M, N) ((M)[(N) \/ 32] & (1 << (N)/ +ISSET_MAP ../lock/lock_deadlock.c /^#define ISSET_MAP(M, N) ((M)[(N) \/ 32] & (1 << ((N/ IS_AUTO_COMMIT ../dbinc/db_am.h /^#define IS_AUTO_COMMIT(dbenv, txn, flags) \\$/ IS_BTREE_PAGE ../btree/bt_rec.c /^#define IS_BTREE_PAGE(pagep) \\$/ +IS_CLIENT_PGRECOVER ../dbinc/rep.h /^#define IS_CLIENT_PGRECOVER(dbenv) \\$/ IS_CUR_DELETED ../btree/bt_cursor.c /^#define IS_CUR_DELETED(dbc) \\$/ IS_CUR_DUPLICATE ../btree/bt_cursor.c /^#define IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx/ IS_DELETED ../btree/bt_cursor.c /^#define IS_DELETED(dbp, page, indx) \\$/ IS_DUPLICATE ../btree/bt_cursor.c /^#define IS_DUPLICATE(dbc, i1, i2) \\$/ -IS_ENV_REPLICATED ../dbinc/rep.h /^#define IS_ENV_REPLICATED(E) (!IS_RECOVERING(E) &&/ +IS_ENV_REPLICATED ../dbinc/rep.h /^#define IS_ENV_REPLICATED(E) (REP_ON(E) && \\$/ IS_HELP ../dbinc/tcl_db.h /^#define IS_HELP(s) \\$/ IS_INITIALIZED ../dbinc/db_int.in /^#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno/ IS_INIT_LSN ../dbinc/db_int.in /^#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN)/ @@ -836,216 +1104,227 @@ IS_READONLY ../db/db_iface.c /^#define IS_READONLY(dbp) \\$/ IS_RECOVERING ../dbinc/db_int.in /^#define IS_RECOVERING(dbenv) \\$/ IS_REPLICATED ../dbinc/rep.h /^#define IS_REPLICATED(E, D) \\$/ IS_REP_CLIENT ../dbinc/rep.h /^#define IS_REP_CLIENT(dbenv) \\$/ -IS_REP_LOGSONLY ../dbinc/rep.h /^#define IS_REP_LOGSONLY(dbenv) \\$/ IS_REP_MASTER ../dbinc/rep.h /^#define IS_REP_MASTER(dbenv) \\$/ -IS_SIMPLE ../rep/rep_record.c /^#define IS_SIMPLE(R) ((R) != DB___txn_regop && (R)/ IS_SUBTRANSACTION ../dbinc/db_int.in /^#define IS_SUBTRANSACTION(txn) \\$/ IS_VALID_PAGESIZE ../dbinc/db_int.in /^#define IS_VALID_PAGESIZE(x) \\$/ IS_VALID_PGNO ../dbinc/db_verify.h /^#define IS_VALID_PGNO(x) ((x) <= vdp->last_pgno)$/ IS_WRITELOCK ../dbinc/lock.h /^#define IS_WRITELOCK(m) \\$/ -IS_XA_TXN ../txn/txn_rec.c /^#define IS_XA_TXN(R) (R->xid.size != 0)$/ IS_ZERO_LSN ../dbinc/db_int.in /^#define IS_ZERO_LSN(LSN) ((LSN).file == 0)$/ -ITEM_BEGIN ../btree/bt_verify.c 603 -ITEM_END ../btree/bt_verify.c 604 JAVADB_STAT_ACTIVE ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_ACTIVE(jenv, cl, jobj, statp, / -JAVADB_STAT_INT ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_INT(jenv, cl, jobj, statp, nam/ -JAVADB_STAT_LONG ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_LONG(jenv, cl, jobj, statp, na/ -JAVADB_STAT_LSN ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_LSN(jenv, cl, jobj, statp, nam/ -JAVADB_STAT_STRING ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_STRING(jenv, cl, jobj, statp, / -JAVADB_STAT_XID ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_XID(jenv, cl, jobj, statp, nam/ -JDBENV ../libdb_java/db_java_wrap.c 384 +JAVADB_STAT_INT ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_INT(jenv, jobj, fid, statp, na/ +JAVADB_STAT_LONG ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_LONG(jenv, jobj, fid, statp, n/ +JAVADB_STAT_LSN ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_LSN(jenv, jobj, fid, statp, na/ +JAVADB_STAT_STRING ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_STRING(jenv, jobj, fid, statp,/ +JAVADB_STAT_XID ../libdb_java/db_java_wrap.c /^#define JAVADB_STAT_XID(jenv, jobj, fid, statp, na/ +JDBENV ../libdb_java/db_java_wrap.c 811 JOIN_RETRY ../dbinc/db_join.h 27 -Java_com_sleepycat_db_DbUtil_is_1big_1endian ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_DbUtil_is_1big_1endian(JNIEn/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbremove ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbrename ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1err ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1errx ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1cachesize_1ncache ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1data_1dirs ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1encrypt_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1home ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1bsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1regionmax ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1conflicts ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1detect ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1max_1lockers ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1max_1locks ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1max_1objects ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1mp_1mmapsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1open_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1rep_1limit ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1shm_1key ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tas_1spins ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1timeout ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tmp_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tx_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tx_1timestamp ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1verbose ../libdb_java/db_java_wrap.c /^JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_d/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1major ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1minor ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1patch ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1string ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1detect ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1get ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1id ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1id_1free ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1archive ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1compare ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1cursor ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1file ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1flush ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1fstat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1trickle ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1open0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1remove0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1elect ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1process_1message ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1start ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setErrorHandler ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setFeedbackHandler ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setPanicHandler ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1app_1dispatch ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1data_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1encrypt ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1bsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1regionmax ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1conflicts ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1detect ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1lockers ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1locks ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1objects ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1mp_1mmapsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rep_1limit ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rep_1transport ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rpc_1server ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1shm_1key ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tas_1spins ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1timeout ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tmp_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tx_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tx_1timestamp0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1verbose ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1strerror ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1begin ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1checkpoint ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1recover ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ -Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_DbLogc_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbLogc_1get ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbLsn_1get_1file ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbLsn_1get_1offset ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1get_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1get_1maxsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1get_1priority ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1set_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1set_1maxsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1set_1priority ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbTxn_1abort0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbTxn_1commit0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbTxn_1discard0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbTxn_1id ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbTxn_1prepare ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_DbTxn_1set_1timeout ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1associate ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1cursor ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_Db_1del ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1err ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1errx ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_10 ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_11 ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1bt_1minkey ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1byteswapped ../libdb_java/db_java_wrap.c /^JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_d/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1cachesize_1ncache ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1dbname ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1encrypt_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1filename ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1flags_1raw ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1h_1ffactor ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1h_1nelem ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1lorder ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1mpf ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1open_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1pagesize ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1q_1extentsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1delim ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1len ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1pad ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1source ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1transactional ../libdb_java/db_java_wrap.c /^JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_d/ -Java_com_sleepycat_db_db_1javaJNI_Db_1get_1type ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1join ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_Db_1key_1range ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1open0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1remove0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1rename0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1setFeedbackHandler ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1append_1recno ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1compare ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1maxkey ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1minkey ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1prefix ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1dup_1compare ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1encrypt ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1ffactor ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1hash ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1nelem ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1lorder ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1pagesize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1q_1extentsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1delim ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1len ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1pad ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1source ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_Db_1sync ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1truncate ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1upgrade ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Db_1verify0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Dbc_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Dbc_1count ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Dbc_1del ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Dbc_1dup ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_10 ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_11 ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_Dbc_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_deleteRef0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_delete_1DbLock ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_delete_1DbLsn ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1j/ -Java_com_sleepycat_db_db_1javaJNI_getDbEnv0 ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_getDbRef0 ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_initDbEnvRef0 ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_initDbRef0 ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db/ -Java_com_sleepycat_db_db_1javaJNI_initialize ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEn/ -Java_com_sleepycat_db_db_1javaJNI_new_1Db ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_new_1DbEnv ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -Java_com_sleepycat_db_db_1javaJNI_new_1DbLsn ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1/ -LCK_ALWAYS ../dbinc/db_am.h 90 -LCK_COUPLE ../dbinc/db_am.h 91 -LCK_COUPLE_ALWAYS ../dbinc/db_am.h 92 -LCK_DOWNGRADE ../dbinc/db_am.h 93 -LCK_ROLLBACK ../dbinc/db_am.h 94 +Java_com_sleepycat_db_internal_DbUtil_is_1big_1endian ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_internal_DbUtil_is_1big_1end/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1dbremove ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1dbrename ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1err ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1errx ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1cachesize_1ncache ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1data_1dirs ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1encrypt_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1errpfx ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1home ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1bsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1regionmax ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1conflicts ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1detect ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1max_1lockers ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1max_1locks ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1max_1objects ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1mp_1mmapsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1open_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1rep_1limit ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1shm_1key ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tas_1spins ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1timeout ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tmp_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tx_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tx_1timestamp ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1verbose ../libdb_java/db_java_wrap.c /^JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_i/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1major ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1minor ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1patch ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1string ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1detect ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1get ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1id ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1id_1free ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1vec ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1archive ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1compare ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1cursor ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1file ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1flush ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1memp_1fstat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1memp_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1memp_1trickle ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1open ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1remove0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1elect ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1process_1message ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1start ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1app_1dispatch ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1data_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1encrypt ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1errcall ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1errpfx ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1feedback ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1bsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1regionmax ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1conflicts ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1detect ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1max_1lockers ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1max_1locks ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1max_1objects ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1mp_1mmapsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1msgcall ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1paniccall ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1rep_1limit ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1rep_1transport ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1rpc_1server ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1shm_1key ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tas_1spins ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1timeout ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tmp_1dir ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tx_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tx_1timestamp0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1verbose ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1strerror ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1begin ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1checkpoint ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1recover ../libdb_java/db_java_wrap.c /^JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbLogc_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbLogc_1get ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1get_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1get_1maxsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1get_1priority ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1set_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1set_1maxsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1set_1priority ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1db ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1key ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1range_1max ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1range_1min ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1initial_1value ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1open ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1remove0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1set_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1set_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1set_1range ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1abort0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1commit0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1discard0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1id ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1prepare ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1set_1timeout ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1associate ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1cursor ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1del ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1err ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1errx ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1bt_1minkey ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1byteswapped ../libdb_java/db_java_wrap.c /^JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_i/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1cachesize_1ncache ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1dbname ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1encrypt_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1errpfx ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1filename ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1h_1ffactor ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1h_1nelem ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1lorder ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1mpf ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1open_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1pagesize ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1q_1extentsize ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1delim ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1len ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1pad ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1source ../libdb_java/db_java_wrap.c /^JNIEXPORT jstring JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1transactional ../libdb_java/db_java_wrap.c /^JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_i/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1type ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1join ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1key_1range ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1open ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1pget ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1remove0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1rename0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1append_1recno ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1compare ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1maxkey ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1minkey ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1prefix ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1cachesize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1dup_1compare ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1encrypt ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1errpfx ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1feedback ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1flags ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1h_1ffactor ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1h_1hash ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1h_1nelem ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1lorder ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1pagesize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1q_1extentsize ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1delim ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1len ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1pad ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1source ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1stat ../libdb_java/db_java_wrap.c /^JNIEXPORT jobject JNICALL Java_com_sleepycat_db_in/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1sync ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1truncate ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1upgrade ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Db_1verify0 ../libdb_java/db_java_wrap.c /^JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_i/ +Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1close0 ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1count ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1del ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1dup ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1get ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1pget ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1put ../libdb_java/db_java_wrap.c /^JNIEXPORT jint JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_deleteRef0 ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_internal_db_1javaJNI_deleteR/ +Java_com_sleepycat_db_internal_db_1javaJNI_delete_1DbLock ../libdb_java/db_java_wrap.c /^JNIEXPORT void JNICALL Java_com_sleepycat_db_inter/ +Java_com_sleepycat_db_internal_db_1javaJNI_getDbEnv0 ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_internal_db_1javaJNI_getDbEn/ +Java_com_sleepycat_db_internal_db_1javaJNI_initDbEnvRef0 ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_internal_db_1javaJNI_initDbE/ +Java_com_sleepycat_db_internal_db_1javaJNI_initDbRef0 ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_internal_db_1javaJNI_initDbR/ +Java_com_sleepycat_db_internal_db_1javaJNI_initialize ../libdb_java/db_java_wrap.c /^Java_com_sleepycat_db_internal_db_1javaJNI_initial/ +Java_com_sleepycat_db_internal_db_1javaJNI_new_1Db ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_new_1DbEnv ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +Java_com_sleepycat_db_internal_db_1javaJNI_new_1DbSequence ../libdb_java/db_java_wrap.c /^JNIEXPORT jlong JNICALL Java_com_sleepycat_db_inte/ +LASTCKP_CMP ../dbinc/log.h 364 +LCK_ALWAYS ../dbinc/db_am.h 114 +LCK_COUPLE ../dbinc/db_am.h 115 +LCK_COUPLE_ALWAYS ../dbinc/db_am.h 116 +LCK_DOWNGRADE ../dbinc/db_am.h 117 +LCK_ROLLBACK ../dbinc/db_am.h 118 LEAFLEVEL ../dbinc/db_page.h 240 LEN_HDATA ../dbinc/db_page.h /^#define LEN_HDATA(dbp, p, psize, indx) \\$/ LEN_HITEM ../dbinc/db_page.h /^#define LEN_HITEM(dbp, pg, pgsize, indx) \\$/ @@ -1058,33 +1337,29 @@ LFPREFIX ../dbinc/log.h 63 LF_CLR ../dbinc/db_int.in /^#define LF_CLR(f) ((flags) &= ~(f))$/ LF_ISSET ../dbinc/db_int.in /^#define LF_ISSET(f) ((flags) & (f))$/ LF_SET ../dbinc/db_int.in /^#define LF_SET(f) ((flags) |= (f))$/ -LG_BASE_REGION_SIZE ../dbinc/log.h 69 -LG_BSIZE_DEFAULT ../dbinc/log.h 68 -LG_MAINT_SIZE ../dbinc/log.h 248 +LG_BASE_REGION_SIZE ../dbinc/log.h 71 +LG_BSIZE_DEFAULT ../dbinc/log.h 69 +LG_BSIZE_INMEM ../dbinc/log.h 70 +LG_MAINT_SIZE ../dbinc/log.h 277 LG_MAX_DEFAULT ../dbinc/log.h 67 +LG_MAX_INMEM ../dbinc/log.h 68 LIST_EMPTY ../dbinc/queue.h /^#define LIST_EMPTY(head) ((head)->lh_first == NULL/ LIST_ENTRY ../dbinc/queue.h /^#define LIST_ENTRY(type) \\$/ LIST_FIRST ../dbinc/queue.h /^#define LIST_FIRST(head) ((head)->lh_first)$/ LIST_FOREACH ../dbinc/queue.h /^#define LIST_FOREACH(var, head, field) \\$/ LIST_HEAD ../dbinc/queue.h /^#define LIST_HEAD(name, type) \\$/ LIST_HEAD_INITIALIZER ../dbinc/queue.h /^#define LIST_HEAD_INITIALIZER(head) \\$/ -LIST_INCREMENT ../log/log_archive.c 207 +LIST_INCREMENT ../log/log_archive.c 209 LIST_INIT ../dbinc/queue.h /^#define LIST_INIT(head) do { \\$/ LIST_INSERT_AFTER ../dbinc/queue.h /^#define LIST_INSERT_AFTER(listelm, elm, field) do / LIST_INSERT_BEFORE ../dbinc/queue.h /^#define LIST_INSERT_BEFORE(listelm, elm, field) do/ LIST_INSERT_HEAD ../dbinc/queue.h /^#define LIST_INSERT_HEAD(head, elm, field) do { / LIST_NEXT ../dbinc/queue.h /^#define LIST_NEXT(elm, field) ((elm)->field.le_nex/ LIST_REMOVE ../dbinc/queue.h /^#define LIST_REMOVE(elm, field) do { \\$/ -LOAD_ACTUAL_MUTEX_CODE ../mutex/mut_tas.c 24 +LOAD_ACTUAL_MUTEX_CODE ../mutex/mut_tas.c 22 LOCKER_LOCK ../dbinc/lock.h /^#define LOCKER_LOCK(lt, reg, locker, ndx) \\$/ LOCKING_ON ../dbinc/db_int.in /^#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != N/ -LOCKREGION ../dbinc/lock.h /^#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &(l/ -LOCK_DUMP_ALL ../lock/lock_stat.c 137 -LOCK_DUMP_CONF ../lock/lock_stat.c 132 -LOCK_DUMP_LOCKERS ../lock/lock_stat.c 133 -LOCK_DUMP_MEM ../lock/lock_stat.c 134 -LOCK_DUMP_OBJECTS ../lock/lock_stat.c 135 -LOCK_DUMP_PARAMS ../lock/lock_stat.c 136 +LOCKREGION ../dbinc/lock.h /^#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &((/ LOCK_INIT ../dbinc/lock.h /^#define LOCK_INIT(lock) ((lock).off = LOCK_INVALI/ LOCK_INVALID ../dbinc/lock.h 27 LOCK_ISSET ../dbinc/lock.h /^#define LOCK_ISSET(lock) ((lock).off != LOCK_INVAL/ @@ -1099,20 +1374,21 @@ LOG ../dbinc/log.h 60 LOGGING_ON ../dbinc/db_int.in /^#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != N/ LOGP ../dbinc/log.h 61 LOG_OP ../dbinc/debug.h /^#define LOG_OP(C, T, O, K, A, F) { \\$/ -LOWER_MASK ../crypto/mersenne/mt19937db.c 42 +LOWER_MASK ../crypto/mersenne/mt19937db.c 41 LSN ../dbinc/db_page.h /^#define LSN(p) (((PAGE *)p)->lsn)$/ LSN_NOT_LOGGED ../dbinc/db_int.in /^#define LSN_NOT_LOGGED(LSN) do { \\$/ -LSN_PAGE_NOLOCK ../dbinc/rep.h 207 -M ../crypto/mersenne/mt19937db.c 39 -MAKE_STAT_LIST ../dbinc/tcl_db.h /^#define MAKE_STAT_LIST(s,v) \\$/ -MAKE_STAT_LSN ../dbinc/tcl_db.h /^#define MAKE_STAT_LSN(s, lsn) \\$/ -MAKE_STAT_STRLIST ../dbinc/tcl_db.h /^#define MAKE_STAT_STRLIST(s,s1) \\$/ +M ../crypto/mersenne/mt19937db.c 38 +MAKE_STAT_LIST ../dbinc/tcl_db.h /^#define MAKE_STAT_LIST(s, v) do { \\$/ +MAKE_STAT_LSN ../dbinc/tcl_db.h /^#define MAKE_STAT_LSN(s, lsn) do { \\$/ +MAKE_STAT_STRLIST ../dbinc/tcl_db.h /^#define MAKE_STAT_STRLIST(s,s1) do { \\$/ +MAKE_WSTAT_LIST ../dbinc/tcl_db.h /^#define MAKE_WSTAT_LIST(s, v) do { \\$/ MAP_FAILED ../mutex/tm.c 733 MAP_FILE ../mutex/tm.c 736 -MASTER_CHECK ../rep/rep_record.c /^#define MASTER_CHECK(dbenv, eid, rep) \\$/ +MASTER_CHECK ../rep/rep_record.c /^#define MASTER_CHECK(dbenv, eid, rep) do { \\$/ MASTER_ONLY ../rep/rep_record.c /^#define MASTER_ONLY(rep, rp) do { \\$/ -MATRIX_A ../crypto/mersenne/mt19937db.c 40 -MAXARGS ../common/util_arg.c 36 +MASTER_UPDATE ../rep/rep_record.c /^#define MASTER_UPDATE(dbenv, renv) do { \\$/ +MATRIX_A ../crypto/mersenne/mt19937db.c 39 +MAXARGS ../common/util_arg.c 34 MAXBQUALSIZE ../dbinc/xa.h 22 MAXBTREELEVEL ../dbinc/db_page.h 241 MAXGTRIDSIZE ../dbinc/xa.h 21 @@ -1120,17 +1396,17 @@ MAXINFOSIZE ../dbinc/xa.h 46 MAXKB ../crypto/rijndael/rijndael-alg-fst.h 33 MAXKC ../crypto/rijndael/rijndael-alg-fst.h 32 MAXNR ../crypto/rijndael/rijndael-alg-fst.h 34 -MAXPATHLEN ../dbinc/db_int.in 201 -MAX_ID ../dbinc/tcl_db.h 18 +MAXPATHLEN ../dbinc/db_int.in 273 +MAX_ID ../dbinc/tcl_db.h 19 MAX_IV_SIZE ../crypto/rijndael/rijndael-api-fst.h 71 MAX_KEY_SIZE ../crypto/rijndael/rijndael-api-fst.h 70 MAX_LSN ../dbinc/db_int.in /^#define MAX_LSN(LSN) do { \\$/ -MAX_LSN_TO_TEXT ../db/db.c 997 +MAX_LSN_TO_TEXT ../db/db.c 1011 MAX_PAGE_NUMBER ../dbinc/db_185.in 79 MAX_PAGE_OFFSET ../dbinc/db_185.in 81 -MAX_PGNOS ../lock/lock.c 2188 +MAX_PGNOS ../lock/lock_list.c 54 MAX_REC_NUMBER ../dbinc/db_185.in 83 -MEGABYTE ../dbinc/db_int.in 41 +MEGABYTE ../dbinc/db_int.in 72 MINFILL ../dbinc/hash.h 106 MODE_CBC ../crypto/rijndael/rijndael-api-fst.h 50 MODE_CFB1 ../crypto/rijndael/rijndael-api-fst.h 51 @@ -1138,57 +1414,56 @@ MODE_ECB ../crypto/rijndael/rijndael-api-fst.h 49 MPF_ILLEGAL_AFTER_OPEN ../dbinc/mp.h /^#define MPF_ILLEGAL_AFTER_OPEN(dbmfp, name) \\$/ MPF_ILLEGAL_BEFORE_OPEN ../dbinc/mp.h /^#define MPF_ILLEGAL_BEFORE_OPEN(dbmfp, name) \\$/ MPOOL ../dbinc/mp.h 16 -MPOOLFILE ../dbinc/db.in 127 -MPOOL_BASE_DECREMENT ../dbinc/mp.h 186 -MPOOL_DUMP_ALL ../mp/mp_stat.c 264 -MPOOL_DUMP_HASH ../mp/mp_stat.c 262 -MPOOL_DUMP_MEM ../mp/mp_stat.c 263 +MPOOLFILE ../dbinc/db.in 160 +MPOOL_BASE_DECREMENT ../dbinc/mp.h 203 MPOOL_ON ../dbinc/db_int.in /^#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NU/ -MPOOL_PRI_DEFAULT ../dbinc/mp.h 194 -MPOOL_PRI_DIRTY ../dbinc/mp.h 196 -MPOOL_PRI_HIGH ../dbinc/mp.h 195 -MPOOL_PRI_LOW ../dbinc/mp.h 193 -MPOOL_PRI_VERY_HIGH ../dbinc/mp.h 197 -MPOOL_PRI_VERY_LOW ../dbinc/mp.h 192 -MP_CAN_MMAP ../dbinc/mp.h 281 -MP_DIRECT ../dbinc/mp.h 282 -MP_EXTENT ../dbinc/mp.h 283 -MP_FAKE_DEADFILE ../dbinc/mp.h 284 -MP_FAKE_FILEWRITTEN ../dbinc/mp.h 285 -MP_FAKE_NB ../dbinc/mp.h 286 -MP_FAKE_UOC ../dbinc/mp.h 287 -MP_FILEID_SET ../dbinc/db.in 672 -MP_FLUSH ../dbinc/db.in 673 -MP_NOT_DURABLE ../dbinc/mp.h 288 -MP_OPEN_CALLED ../dbinc/db.in 674 -MP_READONLY ../dbinc/db.in 675 -MP_TEMP ../dbinc/mp.h 289 -MSG ../mutex/mut_fcntl.c 152 -MSG1 ../mutex/mut_pthread.c 24 -MSG2 ../mutex/mut_pthread.c 26 -MSGBUF_LEN ../lock/lock_deadlock.c 802 +MPOOL_PRI_DEFAULT ../dbinc/mp.h 211 +MPOOL_PRI_DIRTY ../dbinc/mp.h 213 +MPOOL_PRI_HIGH ../dbinc/mp.h 212 +MPOOL_PRI_LOW ../dbinc/mp.h 210 +MPOOL_PRI_VERY_HIGH ../dbinc/mp.h 214 +MPOOL_PRI_VERY_LOW ../dbinc/mp.h 209 +MP_CAN_MMAP ../dbinc/mp.h 302 +MP_DIRECT ../dbinc/mp.h 303 +MP_DURABLE_UNKNOWN ../dbinc/mp.h 304 +MP_EXTENT ../dbinc/mp.h 305 +MP_FAKE_DEADFILE ../dbinc/mp.h 306 +MP_FAKE_FILEWRITTEN ../dbinc/mp.h 307 +MP_FAKE_NB ../dbinc/mp.h 308 +MP_FAKE_UOC ../dbinc/mp.h 309 +MP_FILEID_SET ../dbinc/db.in 735 +MP_FLUSH ../dbinc/db.in 736 +MP_NOT_DURABLE ../dbinc/mp.h 310 +MP_OPEN_CALLED ../dbinc/db.in 737 +MP_READONLY ../dbinc/db.in 738 +MP_TEMP ../dbinc/mp.h 311 +MSG ../mutex/mut_fcntl.c 151 +MSG1 ../mutex/mut_pthread.c 23 +MSG2 ../mutex/mut_pthread.c 25 MSG_SIZE ../dbinc/tcl_db.h 13 -MS_PER_SEC ../dbinc/db_int.in 44 +MS_PER_SEC ../dbinc/db_int.in 75 MT_FILE ../mutex/tm.c 42 MT_FILE_QUIT ../mutex/tm.c 43 MUTEX_ALIGN ../dbinc/mutex.h 82 -MUTEX_ALLOC ../dbinc/mutex.h 831 +MUTEX_ALLOC ../dbinc/mutex.h 830 +MUTEX_CLEAR ../dbinc/mutex.h /^#define MUTEX_CLEAR(mp) { \\$/ MUTEX_DESTROY ../dbinc/mutex.h /^#define MUTEX_DESTROY(x) sema_destroy(x)$/ MUTEX_FIELDS ../dbinc/mutex.h 26 -MUTEX_IGNORE ../dbinc/mutex.h 832 +MUTEX_IGNORE ../dbinc/mutex.h 831 MUTEX_INIT ../dbinc/mutex.h /^#define MUTEX_INIT(x) 0$/ -MUTEX_INITED ../dbinc/mutex.h 833 +MUTEX_INITED ../dbinc/mutex.h 832 MUTEX_LOCK ../dbinc/mutex.h /^#define MUTEX_LOCK(dbenv, mp) \\$/ -MUTEX_LOGICAL_LOCK ../dbinc/mutex.h 834 -MUTEX_MPOOL ../dbinc/mutex.h 835 +MUTEX_LOGICAL_LOCK ../dbinc/mutex.h 833 +MUTEX_MPOOL ../dbinc/mutex.h 834 MUTEX_NO_MALLOC_LOCKS ../dbinc/mutex.h 126 -MUTEX_NO_RECORD ../dbinc/mutex.h 836 -MUTEX_NO_RLOCK ../dbinc/mutex.h 837 +MUTEX_NO_RECORD ../dbinc/mutex.h 835 +MUTEX_NO_RLOCK ../dbinc/mutex.h 836 MUTEX_NO_SHMGET_LOCKS ../dbinc/mutex.h 127 -MUTEX_PAUSE ../dbinc/mutex.h 363 -MUTEX_SELF_BLOCK ../dbinc/mutex.h 838 +MUTEX_PAUSE ../dbinc/mutex.h 364 +MUTEX_SELF_BLOCK ../dbinc/mutex.h 837 MUTEX_SET ../dbinc/mutex.h /^#define MUTEX_SET(x) (!_check_lock(x, 0, 1))$/ -MUTEX_THREAD ../dbinc/mutex.h 839 +MUTEX_SET_TEST ../dbinc/mutex.h 380 +MUTEX_THREAD ../dbinc/mutex.h 838 MUTEX_THREAD_LOCK ../dbinc/mutex.h /^#define MUTEX_THREAD_LOCK(dbenv, mp) \\$/ MUTEX_THREAD_UNLOCK ../dbinc/mutex.h /^#define MUTEX_THREAD_UNLOCK(dbenv, mp) \\$/ MUTEX_UNLOCK ../dbinc/mutex.h /^#define MUTEX_UNLOCK(dbenv, mp) \\$/ @@ -1197,13 +1472,14 @@ MUTEX_WAKEME ../mutex/tm.c 58 M_16_SWAP ../dbinc/db_swap.h /^#define M_16_SWAP(a) { \\$/ M_32_SWAP ../dbinc/db_swap.h /^#define M_32_SWAP(a) { \\$/ Mtm ../mutex/tm.c /^main(argc, argv)$/ -N ../crypto/mersenne/mt19937db.c 38 -NAME_TO_DB ../dbinc/tcl_db.h /^#define NAME_TO_DB(name) (DB *)_NameToPtr((name))$/ -NAME_TO_DBC ../dbinc/tcl_db.h /^#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name)/ -NAME_TO_ENV ../dbinc/tcl_db.h /^#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((na/ -NAME_TO_LOCK ../dbinc/tcl_db.h /^#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((/ -NAME_TO_MP ../dbinc/tcl_db.h /^#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPt/ -NAME_TO_TXN ../dbinc/tcl_db.h /^#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((na/ +N ../crypto/mersenne/mt19937db.c 37 +NAME_TO_DB ../dbinc/tcl_db.h /^#define NAME_TO_DB(name) (DB *)_NameToPtr((name))$/ +NAME_TO_DBC ../dbinc/tcl_db.h /^#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name)/ +NAME_TO_ENV ../dbinc/tcl_db.h /^#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((na/ +NAME_TO_LOCK ../dbinc/tcl_db.h /^#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((/ +NAME_TO_MP ../dbinc/tcl_db.h /^#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPt/ +NAME_TO_SEQUENCE ../dbinc/tcl_db.h /^#define NAME_TO_SEQUENCE(name) (DB_SEQUENCE *)_Nam/ +NAME_TO_TXN ../dbinc/tcl_db.h /^#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((na/ NAMLEN ../clib/getcwd.c /^# define NAMLEN(dirent) strlen((dirent)->d_name)$/ NBUCKET ../dbinc/mp.h /^#define NBUCKET(mc, mf_offset, pgno) \\$/ NCACHE ../dbinc/mp.h /^#define NCACHE(mp, mf_offset, pgno) \\$/ @@ -1213,24 +1489,26 @@ NCACHED30 ../dbinc/db_upgrade.h 152 NDX_INVALID ../dbinc/hash.h 110 NELEM ../libdb_java/db_java_wrap.c /^#define NELEM(x) (sizeof (x) \/ sizeof (x[0]))$/ NEXT_PGNO ../dbinc/db_page.h /^#define NEXT_PGNO(p) (((PAGE *)p)->next_pgno)$/ +NOWAIT_FLAG ../dbinc/db_int.in /^#define NOWAIT_FLAG(txn) \\$/ NUM_ENT ../dbinc/db_page.h /^#define NUM_ENT(p) (((PAGE *)p)->entries)$/ +NewStringObj ../dbinc/tcl_db.h /^#define NewStringObj(a, b) \\$/ OBJECT_LOCK ../dbinc/lock.h /^#define OBJECT_LOCK(lt, reg, obj, ndx) \\$/ OBJ_LINKS_VALID ../dbinc/lock.h /^#define OBJ_LINKS_VALID(L) ((L)->links.stqe_prev !/ -OKFLAGS ../btree/bt_verify.c 37 -OKFLAGS_CDB ../env/env_open.c 87 -OK_CRYPTO_FLAGS ../env/env_method.c 335 -OK_FLAGS ../env/env_method.c 528 -ON_ERROR_RETURN ../dbinc/cxx_int.h 61 -ON_ERROR_THROW ../dbinc/cxx_int.h 60 -ON_ERROR_UNKNOWN ../dbinc/cxx_int.h 62 +OKFLAGS ../db/db_iface.c 1068 +OKFLAGS_CDB ../env/env_open.c 86 +OK_CRYPTO_FLAGS ../env/env_method.c 351 +OK_FLAGS ../env/env_method.c 550 +ON_ERROR_RETURN ../dbinc/cxx_int.h 57 +ON_ERROR_THROW ../dbinc/cxx_int.h 56 +ON_ERROR_UNKNOWN ../dbinc/cxx_int.h 58 OPCODE_OF ../dbinc/hash.h /^#define OPCODE_OF(N) (N & ~PAIR_MASK)$/ OR_MAP ../lock/lock_deadlock.c /^#define OR_MAP(D, S, N) { \\$/ -OS_VMPAGESIZE ../dbinc/region.h 283 +OS_VMPAGESIZE ../dbinc/region.h 292 OS_VMROUNDOFF ../dbinc/region.h /^#define OS_VMROUNDOFF(i) { \\$/ -OVERFLOW_ERROR ../common/db_err.c 333 +OVERFLOW_ERROR ../clib/snprintf.c 122 OV_LEN ../dbinc/db_page.h /^#define OV_LEN(p) (((PAGE *)p)->hf_offset)$/ OV_REF ../dbinc/db_page.h /^#define OV_REF(p) (((PAGE *)p)->entries)$/ -O_ACCMODE ../os/os_oflags.c 50 +O_ACCMODE ../os/os_oflags.c 53 O_INDX ../dbinc/db_page.h 596 PAIR_DATAMASK ../dbinc/hash.h 118 PAIR_DUPMASK ../dbinc/hash.h 119 @@ -1241,24 +1519,24 @@ PAIR_KEYMASK ../dbinc/hash.h 117 PAIR_MASK ../dbinc/hash.h 120 PANIC_CHECK ../dbinc/region.h /^#define PANIC_CHECK(dbenv) \\$/ PANIC_SET ../dbinc/region.h /^#define PANIC_SET(dbenv, onoff) \\$/ -PATH_DOT ../dbinc/db_int.in 203 -PATH_SEPARATOR ../dbinc/db_int.in 205 +PATH_DOT ../dbinc/db_int.in 275 +PATH_SEPARATOR ../dbinc/db_int.in 277 PGNO ../dbinc/db_page.h /^#define PGNO(p) (((PAGE *)p)->pgno)$/ PGNO_BASE_MD ../dbinc/db_page.h 34 PGNO_INVALID ../dbinc/db_page.h 33 POWER_OF_TWO ../dbinc/db_int.in /^#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)$/ -PREPLISTSIZE ../rep/rep_method.c 495 +PREPLISTSIZE ../rep/rep_method.c 512 PREV_PGNO ../dbinc/db_page.h /^#define PREV_PGNO(p) (((PAGE *)p)->prev_pgno)$/ -PTHREAD_UNLOCK_ATTEMPTS ../mutex/mut_pthread.c 55 +PTHREAD_UNLOCK_ATTEMPTS ../mutex/mut_pthread.c 57 PUTOVFL ../dbinc/hash.h 128 PUTPAIR ../dbinc/hash.h 126 PUTU32 ../crypto/rijndael/rijndael-alg-fst.c /^#define PUTU32(ct, st) { *((u32 *)(ct)) = SWAP((st/ -PUT_COUNT ../lock/lock.c /^#define PUT_COUNT(dp, count) do { u_int32_t *ip =/ +PUT_COUNT ../lock/lock_list.c /^#define PUT_COUNT(dp, count) do { u_int32_t *ip = / PUT_HKEYDATA ../dbinc/db_page.h /^#define PUT_HKEYDATA(pe, kd, len, type) { \\$/ PUT_PAGE ../db/db_upg_opd.c /^#define PUT_PAGE(dbp, fhp, pgno, page) { \\$/ -PUT_PCOUNT ../lock/lock.c /^#define PUT_PCOUNT(dp, count) do { u_int16_t *ip / -PUT_PGNO ../lock/lock.c /^#define PUT_PGNO(dp, pgno) do { db_pgno_t *ip = (/ -PUT_SIZE ../lock/lock.c /^#define PUT_SIZE(dp, size) do { u_int16_t *ip = (/ +PUT_PCOUNT ../lock/lock_list.c /^#define PUT_PCOUNT(dp, count) do { u_int16_t *ip =/ +PUT_PGNO ../lock/lock_list.c /^#define PUT_PGNO(dp, pgno) do { db_pgno_t *ip = (d/ +PUT_SIZE ../lock/lock_list.c /^#define PUT_SIZE(dp, size) do { u_int16_t *ip = (u/ P_16_COPY ../dbinc/db_swap.h /^#define P_16_COPY(a, b) { \\$/ P_16_SWAP ../dbinc/db_swap.h /^#define P_16_SWAP(a) { \\$/ P_32_COPY ../dbinc/db_swap.h /^#define P_32_COPY(a, b) { \\$/ @@ -1286,12 +1564,12 @@ P_OVFLSPACE ../dbinc/db_page.h /^#define P_OVFLSPACE(dbp, psize, pg) (P_MAXSPACE P_PAGETYPE_MAX ../dbinc/db_page.h 50 P_QAMDATA ../dbinc/db_page.h 48 P_QAMMETA ../dbinc/db_page.h 47 -P_TO_UINT16 ../dbinc/db_int.in /^#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p/ -P_TO_UINT32 ../dbinc/db_int.in /^#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p/ -P_TO_ULONG ../dbinc/db_int.in /^#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))$/ +P_TO_UINT16 ../dbinc/db_int.in /^#define P_TO_UINT16(p) ((u_int16_t)(uintptr_t)(p))/ +P_TO_UINT32 ../dbinc/db_int.in /^#define P_TO_UINT32(p) ((u_int32_t)(uintptr_t)(p))/ +P_TO_ULONG ../dbinc/db_int.in /^#define P_TO_ULONG(p) ((u_long)(uintptr_t)(p))$/ QAM_AFTER_CURRENT ../dbinc/qam.h /^#define QAM_AFTER_CURRENT(meta, recno) \\$/ QAM_BEFORE_FIRST ../dbinc/qam.h /^#define QAM_BEFORE_FIRST(meta, recno) \\$/ -QAM_EXNAME ../qam/qam_files.c /^#define QAM_EXNAME(Q, I, B, L) \\$/ +QAM_EXNAME ../qam/qam_files.c /^#define QAM_EXNAME(Q, I, B, L) \\$/ QAM_GET_RECORD ../dbinc/qam.h /^#define QAM_GET_RECORD(dbp, page, index) \\$/ QAM_NOT_VALID ../dbinc/qam.h /^#define QAM_NOT_VALID(meta, recno) \\$/ QAM_PAGE_EXTENT ../dbinc/qam.h /^#define QAM_PAGE_EXTENT(dbp, pgno) \\$/ @@ -1304,7 +1582,7 @@ QAM_SETCUR ../dbinc/qam.h 141 QAM_SETFIRST ../dbinc/qam.h 140 QAM_TRUNCATE ../dbinc/qam.h 142 QAM_VALID ../dbinc/qam.h 18 -QDEBUG ../qam/qam.c 613 +QDEBUG ../qam/qam.c 611 QMD_TRACE_ELEM ../dbinc/queue.h /^#define QMD_TRACE_ELEM(elem) do { \\$/ QMD_TRACE_HEAD ../dbinc/queue.h /^#define QMD_TRACE_HEAD(head) do { \\$/ QPAGE_CHKSUM ../dbinc/db_page.h 293 @@ -1321,72 +1599,81 @@ R1 ../hmac/sha1.c /^#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x/ R2 ../hmac/sha1.c /^#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EB/ R3 ../hmac/sha1.c /^#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i/ R4 ../hmac/sha1.c /^#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1/ -RAND_MAX ../tcl/tcl_util.c 92 RECNO ../dbinc/btree.h 51 -RECNO_OOB ../dbinc/db_int.in 47 -REC_CLOSE ../dbinc/db_am.h 54 +RECNO_OOB ../dbinc/db_int.in 78 +REC_CLOSE ../dbinc/db_am.h 53 +REC_FGET ../dbinc/db_am.h /^#define REC_FGET(mpf, pgno, pagep, cont) \\$/ REC_INTRO ../dbinc/db_am.h /^#define REC_INTRO(func, inc_count) do { \\$/ -REC_NOOP_CLOSE ../dbinc/db_am.h 72 +REC_NOOP_CLOSE ../dbinc/db_am.h 71 REC_NOOP_INTRO ../dbinc/db_am.h /^#define REC_NOOP_INTRO(func) do { \\$/ REC_PRINT ../dbinc/db_am.h /^#define REC_PRINT(func) \\$/ -REGINFO ../dbinc/db_int.in 418 -REGION_CREATE ../dbinc/region.h 218 -REGION_CREATE_OK ../dbinc/region.h 219 +REGINFO ../dbinc/db_int.in 522 +REGION_CREATE ../dbinc/region.h 229 +REGION_CREATE_OK ../dbinc/region.h 230 REGION_ID_ENV ../dbinc/region.h 108 -REGION_JOIN_OK ../dbinc/region.h 220 -REL_ENVLOCK ../fileops/fop_util.c /^#define REL_ENVLOCK(ENV, L) \\$/ -REPDBNAME ../rep/rep_method.c 396 -REP_ALIVE ../dbinc/rep.h 11 -REP_ALIVE_REQ ../dbinc/rep.h 12 -REP_ALL_REQ ../dbinc/rep.h 13 -REP_DUPMASTER ../dbinc/rep.h 14 -REP_FILE ../dbinc/rep.h 15 -REP_FILE_REQ ../dbinc/rep.h 16 -REP_F_EPHASE1 ../dbinc/rep.h 89 -REP_F_EPHASE2 ../dbinc/rep.h 90 -REP_F_LOGSONLY ../dbinc/rep.h 91 -REP_F_MASTER ../dbinc/rep.h 92 -REP_F_MASTERELECT ../dbinc/rep.h 93 -REP_F_NOARCHIVE ../dbinc/rep.h 94 -REP_F_READY ../dbinc/rep.h 95 -REP_F_RECOVER ../dbinc/rep.h 96 -REP_F_TALLY ../dbinc/rep.h 97 -REP_F_UPGRADE ../dbinc/rep.h 98 -REP_ISCLIENT ../dbinc/rep.h 99 -REP_LOG ../dbinc/rep.h 17 -REP_LOG_MORE ../dbinc/rep.h 18 -REP_LOG_REQ ../dbinc/rep.h 19 -REP_MASTER_REQ ../dbinc/rep.h 20 -REP_NEWCLIENT ../dbinc/rep.h 21 -REP_NEWFILE ../dbinc/rep.h 22 -REP_NEWMASTER ../dbinc/rep.h 23 -REP_NEWSITE ../dbinc/rep.h 24 +REGION_JOIN_OK ../dbinc/region.h 231 +REPDBNAME ../rep/rep_method.c 407 +REPPAGENAME ../rep/rep_method.c 408 +REP_ALIVE ../dbinc/rep.h 13 +REP_ALIVE_REQ ../dbinc/rep.h 14 +REP_ALL_REQ ../dbinc/rep.h 15 +REP_DUPMASTER ../dbinc/rep.h 16 +REP_EGENNAME ../dbinc/rep.h 90 +REP_FILE ../dbinc/rep.h 17 +REP_FILE_FAIL ../dbinc/rep.h 18 +REP_FILE_REQ ../dbinc/rep.h 19 +REP_F_CLIENT ../dbinc/rep.h 167 +REP_F_EPHASE1 ../dbinc/rep.h 168 +REP_F_EPHASE2 ../dbinc/rep.h 169 +REP_F_MASTER ../dbinc/rep.h 170 +REP_F_MASTERELECT ../dbinc/rep.h 171 +REP_F_NOARCHIVE ../dbinc/rep.h 172 +REP_F_READY ../dbinc/rep.h 173 +REP_F_RECOVER_LOG ../dbinc/rep.h 174 +REP_F_RECOVER_MASK ../dbinc/rep.h 187 +REP_F_RECOVER_PAGE ../dbinc/rep.h 175 +REP_F_RECOVER_UPDATE ../dbinc/rep.h 176 +REP_F_RECOVER_VERIFY ../dbinc/rep.h 177 +REP_F_TALLY ../dbinc/rep.h 178 +REP_LOG ../dbinc/rep.h 20 +REP_LOG_MORE ../dbinc/rep.h 21 +REP_LOG_REQ ../dbinc/rep.h 22 +REP_MASTER_REQ ../dbinc/rep.h 23 +REP_NEWCLIENT ../dbinc/rep.h 24 +REP_NEWFILE ../dbinc/rep.h 25 +REP_NEWMASTER ../dbinc/rep.h 26 +REP_NEWSITE ../dbinc/rep.h 27 REP_ON ../dbinc/db_int.in /^#define REP_ON(dbenv) ((dbenv)->rep_handle != NUL/ -REP_PAGE ../dbinc/rep.h 30 -REP_PAGE_REQ ../dbinc/rep.h 31 -REP_PLIST ../dbinc/rep.h 32 -REP_PLIST_REQ ../dbinc/rep.h 33 -REP_VERIFY ../dbinc/rep.h 34 -REP_VERIFY_FAIL ../dbinc/rep.h 35 -REP_VERIFY_REQ ../dbinc/rep.h 36 -REP_VOTE1 ../dbinc/rep.h 37 -REP_VOTE2 ../dbinc/rep.h 38 +REP_PAGE ../dbinc/rep.h 33 +REP_PAGE_FAIL ../dbinc/rep.h 34 +REP_PAGE_MORE ../dbinc/rep.h 35 +REP_PAGE_REQ ../dbinc/rep.h 36 +REP_PRINT_MESSAGE ../dbinc/rep.h /^#define REP_PRINT_MESSAGE(dbenv, eid, rp, str) / +REP_UPDATE ../dbinc/rep.h 37 +REP_UPDATE_REQ ../dbinc/rep.h 38 +REP_VERIFY ../dbinc/rep.h 39 +REP_VERIFY_FAIL ../dbinc/rep.h 40 +REP_VERIFY_REQ ../dbinc/rep.h 41 +REP_VOTE1 ../dbinc/rep.h 42 +REP_VOTE2 ../dbinc/rep.h 43 RESET_RET_MEM ../dbinc/db_int.in /^#define RESET_RET_MEM(dbc) \\$/ +RETRY_CHK ../dbinc/os.h /^#define RETRY_CHK(op, ret) do { \\$/ RET_ERROR ../dbinc/db_185.in 58 -RET_SIZE ../lock/lock.c /^#define RET_SIZE(size, count) ((size) + \\$/ +RET_SIZE ../lock/lock_list.c /^#define RET_SIZE(size, count) ((size) + \\$/ RET_SPECIAL ../dbinc/db_185.in 60 RET_SUCCESS ../dbinc/db_185.in 59 RE_NREC ../dbinc/db_page.h /^#define RE_NREC(p) \\$/ RE_NREC_ADJ ../dbinc/db_page.h /^#define RE_NREC_ADJ(p, adj) \\$/ RE_NREC_SET ../dbinc/db_page.h /^#define RE_NREC_SET(p, num) \\$/ +RINGBUF_LEN ../dbinc/log.h /^#define RINGBUF_LEN(lp, start, end) \\$/ RINTERNAL_PSIZE ../dbinc/db_page.h 650 RINTERNAL_SIZE ../dbinc/db_page.h 648 -RLOCK ../log/log_get.c 26 +RLOCK ../log/log_get.c 25 RMNAMESZ ../dbinc/xa.h 44 RPC_ON ../dbinc/db_int.in /^#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL/ -RPC_SVC_FG ../rpc_server/c/db_server_svc.c 24 +RPRINT ../dbinc/rep.h /^#define RPRINT(e, r, x) do { \\$/ RSMSG ../db185/db185.c 522 -R_ADDR ../dbinc/region.h /^#define R_ADDR(base, offset) \\$/ +R_ADDR ../dbinc/region.h /^#define R_ADDR(dbenv, base, offset) \\$/ R_CURSOR ../dbinc/db_185.in 93 R_DUP ../dbinc/db_185.in 125 R_FIRST ../dbinc/db_185.in 95 @@ -1398,31 +1685,36 @@ R_LOCK ../dbinc/region.h /^#define R_LOCK(dbenv, reginfo) \\$/ R_NEXT ../dbinc/db_185.in 99 R_NOKEY ../dbinc/db_185.in 155 R_NOOVERWRITE ../dbinc/db_185.in 100 -R_OFFSET ../dbinc/region.h /^#define R_OFFSET(base, p) \\$/ +R_OFFSET ../dbinc/region.h /^#define R_OFFSET(dbenv, base, p) \\$/ R_PREV ../dbinc/db_185.in 101 R_RECNOSYNC ../dbinc/db_185.in 103 R_SETCURSOR ../dbinc/db_185.in 102 R_SNAPSHOT ../dbinc/db_185.in 156 R_UNLOCK ../dbinc/region.h /^#define R_UNLOCK(dbenv, reginfo) \\$/ -SALVAGE_HASH ../dbinc/db_verify.h 108 -SALVAGE_IGNORE ../dbinc/db_verify.h 103 -SALVAGE_INVALID ../dbinc/db_verify.h 102 -SALVAGE_LBTREE ../dbinc/db_verify.h 107 -SALVAGE_LDUP ../dbinc/db_verify.h 104 -SALVAGE_LRECNO ../dbinc/db_verify.h 109 -SALVAGE_LRECNODUP ../dbinc/db_verify.h 105 -SALVAGE_OVERFLOW ../dbinc/db_verify.h 106 -SALVAGE_PRINTABLE ../dbinc/db_verify.h 134 -SALVAGE_PRINTFOOTER ../dbinc/db_verify.h 136 -SALVAGE_PRINTHEADER ../dbinc/db_verify.h 135 -SA_SKIPFIRSTKEY ../dbinc/db_verify.h 65 -SERIAL_INIT ../os/os_fid.c 35 +SALVAGE_HASH ../dbinc/db_verify.h 109 +SALVAGE_IGNORE ../dbinc/db_verify.h 104 +SALVAGE_INVALID ../dbinc/db_verify.h 103 +SALVAGE_LBTREE ../dbinc/db_verify.h 108 +SALVAGE_LDUP ../dbinc/db_verify.h 105 +SALVAGE_LRECNO ../dbinc/db_verify.h 110 +SALVAGE_LRECNODUP ../dbinc/db_verify.h 106 +SALVAGE_OVERFLOW ../dbinc/db_verify.h 107 +SALVAGE_PRINTABLE ../dbinc/db_verify.h 135 +SALVAGE_PRINTFOOTER ../dbinc/db_verify.h 137 +SALVAGE_PRINTHEADER ../dbinc/db_verify.h 136 +SA_SKIPFIRSTKEY ../dbinc/db_verify.h 66 +SEQ_ILLEGAL_AFTER_OPEN ../sequence/sequence.c /^#define SEQ_ILLEGAL_AFTER_OPEN(seq, name) \\$/ +SEQ_ILLEGAL_BEFORE_OPEN ../sequence/sequence.c /^#define SEQ_ILLEGAL_BEFORE_OPEN(seq, name) \\$/ +SEQ_OPEN_FLAGS ../sequence/sequence.c 123 +SEQ_SET_FLAGS ../sequence/sequence.c 252 SET_LOG_FLAGS ../txn/txn.c /^#define SET_LOG_FLAGS(dbenv, txnp, lflags) \\$/ SET_MAP ../lock/lock_deadlock.c /^#define SET_MAP(M, B) ((M)[(B) \/ 32] |= (1 << ((B)/ SET_RET_MEM ../dbinc/db_int.in /^#define SET_RET_MEM(dbc, owner) \\$/ SET_TXN ../xa/xa_db.c /^#define SET_TXN(PARAM, LOCAL) { \\$/ -SHA1HANDSOFF ../hmac/sha1.c 76 -SHALLOC_FRAGMENT ../env/db_salloc.c 149 +SHA1HANDSOFF ../hmac/sha1.c 77 +SHALLOC_FRAGMENT ../env/db_salloc.c 192 +SHM_R ../os/os_oflags.c 136 +SHM_W ../os/os_oflags.c 139 SHOBJECT_LOCK ../dbinc/lock.h /^#define SHOBJECT_LOCK(lt, reg, shobj, ndx) \\$/ SH_DBT_PTR ../dbinc/lock.h /^#define SH_DBT_PTR(p) ((void *)(((u_int8_t *)(p)) / SH_LIST_EMPTY ../dbinc/shqueue.h /^#define SH_LIST_EMPTY(head) \\$/ @@ -1483,7 +1775,7 @@ SPLITOLD ../dbinc/hash.h 132 SPL_NRECS ../dbinc/btree.h 69 SSZ ../dbinc/db_int.in /^#define SSZ(name, field) P_TO_UINT16(&(((name *)0/ SSZA ../dbinc/db_int.in /^#define SSZA(name, field) P_TO_UINT16(&(((name *)0/ -STACK_TO_CURSOR ../btree/bt_recno.c /^#define STACK_TO_CURSOR(cp) { \\$/ +STACK_TO_CURSOR ../btree/bt_recno.c /^#define STACK_TO_CURSOR(cp, ret) { \\$/ STAILQ_CONCAT ../dbinc/queue.h /^#define STAILQ_CONCAT(head1, head2) do { \\$/ STAILQ_EMPTY ../dbinc/queue.h /^#define STAILQ_EMPTY(head) ((head)->stqh_first == / STAILQ_ENTRY ../dbinc/queue.h /^#define STAILQ_ENTRY(type) \\$/ @@ -1500,24 +1792,32 @@ STAILQ_NEXT ../dbinc/queue.h /^#define STAILQ_NEXT(elm, field) ((elm)->field.stq STAILQ_REMOVE ../dbinc/queue.h /^#define STAILQ_REMOVE(head, elm, type, field) do {/ STAILQ_REMOVE_HEAD ../dbinc/queue.h /^#define STAILQ_REMOVE_HEAD(head, field) do { \\$/ STAILQ_REMOVE_HEAD_UNTIL ../dbinc/queue.h /^#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field)/ -STDERR_FILENO ../common/db_err.c 335 +STAT_FMT ../dbinc/db_int.in /^#define STAT_FMT(msg, fmt, type, v) do { \\$/ +STAT_HEX ../dbinc/db_int.in /^#define STAT_HEX(msg, v) \\$/ +STAT_ISSET ../dbinc/db_int.in /^#define STAT_ISSET(msg, p) \\$/ +STAT_LONG ../dbinc/db_int.in /^#define STAT_LONG(msg, v) \\$/ +STAT_LSN ../dbinc/db_int.in /^#define STAT_LSN(msg, lsnp) \\$/ +STAT_STRING ../dbinc/db_int.in /^#define STAT_STRING(msg, p) do { \\$/ +STAT_ULONG ../dbinc/db_int.in /^#define STAT_ULONG(msg, v) \\$/ +STDERR_FILENO ../clib/snprintf.c 124 STD_LOCKING ../dbinc/db_int.in /^#define STD_LOCKING(dbc) \\$/ STK_CLRDBC ../dbinc/btree.h 75 STK_NOLOCK ../dbinc/btree.h 76 -ST_DUPOK ../dbinc/db_verify.h 51 -ST_DUPSET ../dbinc/db_verify.h 52 -ST_DUPSORT ../dbinc/db_verify.h 53 -ST_IS_RECNO ../dbinc/db_verify.h 54 -ST_OVFL_LEAF ../dbinc/db_verify.h 55 -ST_RECNUM ../dbinc/db_verify.h 56 -ST_RELEN ../dbinc/db_verify.h 57 -ST_TOPLEVEL ../dbinc/db_verify.h 58 +ST_DUPOK ../dbinc/db_verify.h 52 +ST_DUPSET ../dbinc/db_verify.h 53 +ST_DUPSORT ../dbinc/db_verify.h 54 +ST_IS_RECNO ../dbinc/db_verify.h 55 +ST_OVFL_LEAF ../dbinc/db_verify.h 56 +ST_RECNUM ../dbinc/db_verify.h 57 +ST_RELEN ../dbinc/db_verify.h 58 +ST_TOPLEVEL ../dbinc/db_verify.h 59 SWAP ../crypto/rijndael/rijndael-alg-fst.c /^#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrot/ SWAP16 ../dbinc/db_swap.h /^#define SWAP16(p) { \\$/ SWAP32 ../dbinc/db_swap.h /^#define SWAP32(p) { \\$/ -SWIG_JavaExceptionCodes ../libdb_java/db_java_wrap.c 30 -SWIG_JavaThrowException ../libdb_java/db_java_wrap.c /^void SWIG_JavaThrowException(JNIEnv *jenv, SWIG_Ja/ +SWIG_JavaExceptionCodes ../libdb_java/db_java_wrap.c 31 +SWIG_JavaThrowException ../libdb_java/db_java_wrap.c /^static void SWIG_JavaThrowException(JNIEnv *jenv, / SWIG_NOINCLUDE ../libdb_java/db_java_wrap.c 75 +SWIG_contract_assert ../libdb_java/db_java_wrap.c /^#define SWIG_contract_assert(nullreturn, expr, msg/ S_APPEND ../dbinc/btree.h 103 S_DELETE ../dbinc/btree.h 117 S_DELNO ../dbinc/btree.h 104 @@ -1527,13 +1827,13 @@ S_EXACT ../dbinc/btree.h 107 S_FIND ../dbinc/btree.h 118 S_FIND_WR ../dbinc/btree.h 119 S_INSERT ../dbinc/btree.h 120 -S_IRGRP ../os/os_oflags.c 83 -S_IROTH ../os/os_oflags.c 89 -S_IRUSR ../os/os_oflags.c 77 +S_IRGRP ../os/os_oflags.c 74 +S_IROTH ../os/os_oflags.c 80 +S_IRUSR ../os/os_oflags.c 68 S_ISDIR ../os/os_stat.c /^#define S_ISDIR(m) (_S_IFDIR & (m))$/ -S_IWGRP ../os/os_oflags.c 86 -S_IWOTH ../os/os_oflags.c 92 -S_IWUSR ../os/os_oflags.c 80 +S_IWGRP ../os/os_oflags.c 77 +S_IWOTH ../os/os_oflags.c 83 +S_IWUSR ../os/os_oflags.c 71 S_KEYFIRST ../dbinc/btree.h 121 S_KEYLAST ../dbinc/btree.h 122 S_PARENT ../dbinc/btree.h 108 @@ -1560,13 +1860,14 @@ TAILQ_LAST ../dbinc/queue.h /^#define TAILQ_LAST(head, headname) \\$/ TAILQ_NEXT ../dbinc/queue.h /^#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_n/ TAILQ_PREV ../dbinc/queue.h /^#define TAILQ_PREV(elm, headname, field) \\$/ TAILQ_REMOVE ../dbinc/queue.h /^#define TAILQ_REMOVE(head, elm, field) do { \\$/ -TCLDB_SENDITEMS ../tcl/tcl_db_pkg.c 3089 -TEMPERING_MASK_B ../crypto/mersenne/mt19937db.c 45 -TEMPERING_MASK_C ../crypto/mersenne/mt19937db.c 46 +TCLDB_SENDITEMS ../tcl/tcl_db_pkg.c 3467 +TEMPERING_MASK_B ../crypto/mersenne/mt19937db.c 44 +TEMPERING_MASK_C ../crypto/mersenne/mt19937db.c 45 TEMPERING_SHIFT_L ../crypto/mersenne/mt19937db.c /^#define TEMPERING_SHIFT_L(y) (y >> 18)$/ TEMPERING_SHIFT_S ../crypto/mersenne/mt19937db.c /^#define TEMPERING_SHIFT_S(y) (y << 7)$/ TEMPERING_SHIFT_T ../crypto/mersenne/mt19937db.c /^#define TEMPERING_SHIFT_T(y) (y << 15)$/ TEMPERING_SHIFT_U ../crypto/mersenne/mt19937db.c /^#define TEMPERING_SHIFT_U(y) (y >> 11)$/ +TIMESTAMP_CHECK ../rep/rep_util.c /^#define TIMESTAMP_CHECK(dbenv, ts, renv) \\$/ TLOOP ../clib/memmove.c /^#define TLOOP(s) if (t) TLOOP1(s)$/ TLOOP1 ../clib/memmove.c /^#define TLOOP1(s) do { s; } while (--t)$/ TMASYNC ../dbinc/xa.h 90 @@ -1594,40 +1895,41 @@ TM_RESUME ../dbinc/xa.h 117 TRACEBUF ../dbinc/queue.h 193 TRASHIT ../dbinc/queue.h /^#define TRASHIT(x) do {(x) = (void *)-1;} while (0/ TRUE ../crypto/rijndael/rijndael-api-fst.h 53 -TXN2JDBENV ../libdb_java/db_java_wrap.c 387 +TXN2JDBENV ../libdb_java/db_java_wrap.c 814 TXNLIST_NEW ../dbinc/db_dispatch.h 101 -TXN_ABORT ../dbinc/txn.h 145 +TXN_ABORT ../dbinc/txn.h 207 TXN_ABORTED ../dbinc/txn.h 47 TXN_BUBBLE ../db/db_dispatch.c /^#define TXN_BUBBLE(AP, MAX) { \\$/ -TXN_CHILDCOMMIT ../dbinc/db.in 833 -TXN_COMMIT ../dbinc/txn.h 143 +TXN_CHILDCOMMIT ../dbinc/db.in 898 +TXN_COMMIT ../dbinc/txn.h 205 TXN_COMMITTED ../dbinc/txn.h 49 -TXN_COMPENSATE ../dbinc/db.in 834 -TXN_DIRTY_READ ../dbinc/db.in 835 +TXN_COMPENSATE ../dbinc/db.in 899 +TXN_DEADLOCK ../dbinc/db.in 900 +TXN_DEGREE_2 ../dbinc/db.in 901 +TXN_DIRTY_READ ../dbinc/db.in 902 TXN_DTL_COLLECTED ../dbinc/txn.h 51 TXN_DTL_RESTORED ../dbinc/txn.h 52 -TXN_EVENT ../txn/txn_util.c 27 +TXN_EVENT ../txn/txn_util.c 25 TXN_EVENT_T ../dbinc/txn.h 21 -TXN_EXPECTED ../dbinc/txn.h 148 -TXN_IGNORE ../dbinc/txn.h 147 +TXN_EXPECTED ../dbinc/txn.h 209 +TXN_IGNORE ../dbinc/txn.h 208 TXN_INVALID ../dbinc/txn.h 32 TXN_IN_RECOVERY ../dbinc/txn.h 115 -TXN_LOCKTIMEOUT ../dbinc/db.in 836 +TXN_LOCKTIMEOUT ../dbinc/db.in 903 TXN_MAINT_SIZE ../dbinc/txn.h 120 -TXN_MALLOC ../dbinc/db.in 837 +TXN_MALLOC ../dbinc/db.in 904 TXN_MAXIMUM ../dbinc/txn.h 31 TXN_MINIMUM ../dbinc/txn.h 30 -TXN_NOSYNC ../dbinc/db.in 838 -TXN_NOTFOUND ../dbinc/txn.h 146 -TXN_NOWAIT ../dbinc/db.in 839 -TXN_OK ../dbinc/txn.h 142 +TXN_NOSYNC ../dbinc/db.in 905 +TXN_NOWAIT ../dbinc/db.in 906 +TXN_OK ../dbinc/txn.h 204 TXN_ON ../dbinc/db_int.in /^#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL/ -TXN_PREPARE ../dbinc/txn.h 144 +TXN_PREPARE ../dbinc/txn.h 206 TXN_PREPARED ../dbinc/txn.h 48 -TXN_RESTORED ../dbinc/db.in 840 +TXN_RESTORED ../dbinc/db.in 907 TXN_RUNNING ../dbinc/txn.h 46 -TXN_SYNC ../dbinc/db.in 841 -TXN_UNEXPECTED ../dbinc/txn.h 149 +TXN_SYNC ../dbinc/db.in 908 +TXN_UNEXPECTED ../dbinc/txn.h 210 TXN_XA_ABORTED ../dbinc/txn.h 57 TXN_XA_DEADLOCKED ../dbinc/txn.h 58 TXN_XA_ENDED ../dbinc/txn.h 59 @@ -1637,32 +1939,37 @@ TXN_XA_SUSPENDED ../dbinc/txn.h 62 TYPE ../dbinc/db_page.h /^#define TYPE(p) (((PAGE *)p)->type)$/ TYPE_ERR_PRINT ../dbinc/db_verify.h /^#define TYPE_ERR_PRINT(dbenv, func, pgno, ptype) / T_RESTORED ../db/db_dispatch.c /^#define T_RESTORED(txn) ((txn) != NULL && F_/ -UINT16_T_MAX ../dbinc/db_int.in 38 -UINT32_T_MAX ../dbinc/db_int.in 39 +UINT16_MAX ../dbinc/db_int.in 39 +UINT32_CMP ../lock/lock_list.c /^#define UINT32_CMP(A, B) ((A) == (B) ? 0 : ((A) > / +UINT32_MAX ../dbinc/db_int.in 42 +UINT64_FMT ../dbinc/db_int.in 56 +UINT64_MAX ../dbinc/db_int.in 53 UMRW_SET ../dbinc/debug.h /^#define UMRW_SET(v) (v) = 0$/ UNLOCKREGION ../dbinc/lock.h /^#define UNLOCKREGION(dbenv, lt) R_UNLOCK((dbenv),/ -UPPER_MASK ../crypto/mersenne/mt19937db.c 41 -UPREFIX ../clib/strerror.c 57 -USEC_PER_MS ../dbinc/db_int.in 45 -VRFY_CHILDINFO ../dbinc/db_int.in 421 -VRFY_DBINFO ../dbinc/db_int.in 422 -VRFY_DUPS_UNSORTED ../dbinc/db_verify.h 181 -VRFY_HAS_DUPS ../dbinc/db_verify.h 182 -VRFY_HAS_DUPSORT ../dbinc/db_verify.h 183 -VRFY_HAS_RECNUMS ../dbinc/db_verify.h 185 -VRFY_HAS_SUBDBS ../dbinc/db_verify.h 184 -VRFY_INCOMPLETE ../dbinc/db_verify.h 186 -VRFY_IS_ALLZEROES ../dbinc/db_verify.h 187 -VRFY_IS_FIXEDLEN ../dbinc/db_verify.h 188 -VRFY_IS_RECNO ../dbinc/db_verify.h 189 -VRFY_IS_RRECNO ../dbinc/db_verify.h 190 -VRFY_LEAFCHAIN_BROKEN ../dbinc/db_verify.h 137 -VRFY_OVFL_LEAFSEEN ../dbinc/db_verify.h 191 -VRFY_PAGEINFO ../dbinc/db_int.in 423 -VRFY_QMETA_SET ../dbinc/db_verify.h 138 -V_DUPLICATE ../dbinc/db_verify.h 202 -V_OVERFLOW ../dbinc/db_verify.h 203 -V_RECNO ../dbinc/db_verify.h 204 +UPPER_MASK ../crypto/mersenne/mt19937db.c 40 +UPREFIX ../clib/strerror.c 55 +USEC_PER_MS ../dbinc/db_int.in 76 +VERIFY_FLAGS ../db/db_vrfy.c 65 +VRFY_CHILDINFO ../dbinc/db_int.in 525 +VRFY_DBINFO ../dbinc/db_int.in 526 +VRFY_DUPS_UNSORTED ../dbinc/db_verify.h 182 +VRFY_HAS_DUPS ../dbinc/db_verify.h 183 +VRFY_HAS_DUPSORT ../dbinc/db_verify.h 184 +VRFY_HAS_RECNUMS ../dbinc/db_verify.h 186 +VRFY_HAS_SUBDBS ../dbinc/db_verify.h 185 +VRFY_INCOMPLETE ../dbinc/db_verify.h 187 +VRFY_IS_ALLZEROES ../dbinc/db_verify.h 188 +VRFY_IS_FIXEDLEN ../dbinc/db_verify.h 189 +VRFY_IS_RECNO ../dbinc/db_verify.h 190 +VRFY_IS_RRECNO ../dbinc/db_verify.h 191 +VRFY_ITEM ../btree/bt_verify.c 507 +VRFY_LEAFCHAIN_BROKEN ../dbinc/db_verify.h 138 +VRFY_OVFL_LEAFSEEN ../dbinc/db_verify.h 192 +VRFY_PAGEINFO ../dbinc/db_int.in 527 +VRFY_QMETA_SET ../dbinc/db_verify.h 139 +V_DUPLICATE ../dbinc/db_verify.h 203 +V_OVERFLOW ../dbinc/db_verify.h 204 +V_RECNO ../dbinc/db_verify.h 205 WRAPPED_CLASS ../dbinc/cxx_int.h /^#define WRAPPED_CLASS(_WRAPPER_CLASS, _IMP_CLASS, / XAER_ASYNC ../dbinc/xa.h 164 XAER_DUPID ../dbinc/xa.h 173 @@ -1672,7 +1979,7 @@ XAER_OUTSIDE ../dbinc/xa.h 174 XAER_PROTO ../dbinc/xa.h 170 XAER_RMERR ../dbinc/xa.h 166 XAER_RMFAIL ../dbinc/xa.h 172 -XA_FLAGS ../xa/xa.c 176 +XA_FLAGS ../xa/xa.c 174 XA_H ../dbinc/xa.h 15 XA_HEURCOM ../dbinc/xa.h 152 XA_HEURHAZ ../dbinc/xa.h 150 @@ -1696,7 +2003,7 @@ XID ../dbinc/xa.h 30 XIDDATASIZE ../dbinc/xa.h 20 ZEROPG_ERR_PRINT ../dbinc/db_verify.h /^#define ZEROPG_ERR_PRINT(dbenv, pgno, str) do { / ZERO_LSN ../dbinc/db_int.in /^#define ZERO_LSN(LSN) do { \\$/ -ZF_LARGE_WRITE ../os/os_rw.c 247 +ZF_LARGE_WRITE ../os/os_rw.c 273 _CXX_INT_H_ ../dbinc/cxx_int.h 11 _CopyObjBytes ../tcl/tcl_internal.c /^_CopyObjBytes(interp, obj, newp, sizep, freep)$/ _DB_185_H_ ../dbinc/db_185.in 39 @@ -1728,10 +2035,10 @@ _DB_VERIFY_H_ ../dbinc/db_verify.h 11 _DbInfoDelete ../tcl/tcl_db.c /^_DbInfoDelete(interp, dbip)$/ _DeleteInfo ../tcl/tcl_internal.c /^_DeleteInfo(p)$/ _EnvInfoDelete ../tcl/tcl_env.c /^_EnvInfoDelete(interp, envip)$/ -_ErrorFunc ../tcl/tcl_internal.c /^_ErrorFunc(pfx, msg)$/ +_ErrorFunc ../tcl/tcl_internal.c /^_ErrorFunc(dbenv, pfx, msg)$/ _ErrorSetup ../tcl/tcl_internal.c /^_ErrorSetup(interp, ret, errmsg)$/ _FOP_H_ ../dbinc/fop.h 11 -_GetFlagsList ../tcl/tcl_internal.c /^_GetFlagsList(interp, flags, func)$/ +_GetFlagsList ../tcl/tcl_internal.c /^_GetFlagsList(interp, flags, fnp)$/ _GetGlobPrefix ../tcl/tcl_internal.c /^_GetGlobPrefix(pattern, prefix)$/ _GetLsn ../tcl/tcl_internal.c /^_GetLsn(interp, obj, lsn)$/ _GetThisLock ../tcl/tcl_lock.c /^_GetThisLock(interp, envp, lockid, flag, objp, mod/ @@ -1751,14 +2058,16 @@ _Set3DBTList ../tcl/tcl_internal.c /^_Set3DBTList(interp, list, elem1, is1recno, _SetInfoData ../tcl/tcl_internal.c /^_SetInfoData(p, data)$/ _SetListElem ../tcl/tcl_internal.c /^_SetListElem(interp, list, elem1, e1cnt, elem2, e2/ _SetListElemInt ../tcl/tcl_internal.c /^_SetListElemInt(interp, list, elem1, elem2)$/ +_SetListElemWideInt ../tcl/tcl_internal.c /^_SetListElemWideInt(interp, list, elem1, elem2)$/ _SetListRecnoElem ../tcl/tcl_internal.c /^_SetListRecnoElem(interp, list, elem1, elem2, e2si/ _SetMultiList ../tcl/tcl_internal.c /^_SetMultiList(interp, list, key, data, type, flag)/ _TXN_H_ ../dbinc/txn.h 11 _TxnInfoDelete ../tcl/tcl_txn.c /^_TxnInfoDelete(interp, txnip)$/ __BIT_TYPES_DEFINED__ ../dbinc/db.in 62 -__DBC_INTERNAL ../dbinc/db_int.in 297 -__DB_OSTREAMCLASS ../dbinc/db_cxx.in 57 +__DBC_INTERNAL ../dbinc/db_int.in 372 __DB_OVFL ../env/env_open.c /^#define __DB_OVFL(v, max) \\$/ +__DB_STD ../dbinc/db_cxx.in /^#define __DB_STD(x) std::x$/ +__ENV_LPUT ../dbinc/db_am.h /^#define __ENV_LPUT(dbenv, lock, flags) \\$/ __LPUT ../dbinc/db_am.h /^#define __LPUT(dbc, lock) \\$/ __P ../dbinc/db_185.in /^#define __P(protos) protos \/* ANSI C prototypes */ __P_DUPLICATE ../dbinc/db_page.h 38 @@ -1782,9 +2091,8 @@ __attribute__ ../dbinc/debug.h /^#define __attribute__(s)$/ __bam_30_btreemeta ../btree/bt_upgrade.c /^__bam_30_btreemeta(dbp, real_name, buf)$/ __bam_31_btreemeta ../btree/bt_upgrade.c /^__bam_31_btreemeta(dbp, real_name, flags, fhp, h, / __bam_31_lbtree ../btree/bt_upgrade.c /^__bam_31_lbtree(dbp, real_name, flags, fhp, h, dir/ -__bam_adj_getpgnos ../btree/btree_auto.c /^__bam_adj_getpgnos(dbenv, rec, lsnp, notused1, sum/ __bam_adj_log ../btree/btree_auto.c /^__bam_adj_log(dbp, txnid, ret_lsnp, flags, pgno, l/ -__bam_adj_print ../btree/btree_auto.c /^__bam_adj_print(dbenv, dbtp, lsnp, notused2, notus/ +__bam_adj_print ../btree/btree_autop.c /^__bam_adj_print(dbenv, dbtp, lsnp, notused2, notus/ __bam_adj_read ../btree/btree_auto.c /^__bam_adj_read(dbenv, recbuf, argpp)$/ __bam_adj_recover ../btree/bt_rec.c /^__bam_adj_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_adjindx ../btree/bt_delete.c /^__bam_adjindx(dbc, h, indx, indx_copy, is_insert)$/ @@ -1819,22 +2127,18 @@ __bam_ca_rsplit ../btree/bt_curadj.c /^__bam_ca_rsplit(my_dbc, fpgno, tpgno)$/ __bam_ca_split ../btree/bt_curadj.c /^__bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_/ __bam_ca_undodup ../btree/bt_curadj.c /^__bam_ca_undodup(dbp, first, fpgno, fi, ti)$/ __bam_ca_undosplit ../btree/bt_curadj.c /^__bam_ca_undosplit(dbp, frompgno, topgno, lpgno, s/ -__bam_cadjust_getpgnos ../btree/btree_auto.c /^__bam_cadjust_getpgnos(dbenv, rec, lsnp, notused1,/ __bam_cadjust_log ../btree/btree_auto.c /^__bam_cadjust_log(dbp, txnid, ret_lsnp, flags, pgn/ -__bam_cadjust_print ../btree/btree_auto.c /^__bam_cadjust_print(dbenv, dbtp, lsnp, notused2, n/ +__bam_cadjust_print ../btree/btree_autop.c /^__bam_cadjust_print(dbenv, dbtp, lsnp, notused2, n/ __bam_cadjust_read ../btree/btree_auto.c /^__bam_cadjust_read(dbenv, recbuf, argpp)$/ __bam_cadjust_recover ../btree/bt_rec.c /^__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)/ -__bam_cdel_getpgnos ../btree/btree_auto.c /^__bam_cdel_getpgnos(dbenv, rec, lsnp, notused1, su/ __bam_cdel_log ../btree/btree_auto.c /^__bam_cdel_log(dbp, txnid, ret_lsnp, flags, pgno, / -__bam_cdel_print ../btree/btree_auto.c /^__bam_cdel_print(dbenv, dbtp, lsnp, notused2, notu/ +__bam_cdel_print ../btree/btree_autop.c /^__bam_cdel_print(dbenv, dbtp, lsnp, notused2, notu/ __bam_cdel_read ../btree/btree_auto.c /^__bam_cdel_read(dbenv, recbuf, argpp)$/ __bam_cdel_recover ../btree/bt_rec.c /^__bam_cdel_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_cmp ../btree/bt_compare.c /^__bam_cmp(dbp, dbt, h, indx, func, cmpp)$/ __bam_copy ../btree/bt_split.c /^__bam_copy(dbp, pp, cp, nxt, stop)$/ -__bam_cprint ../btree/bt_curadj.c /^__bam_cprint(dbc)$/ -__bam_curadj_getpgnos ../btree/btree_auto.c /^__bam_curadj_getpgnos(dbenv, rec, lsnp, notused1, / __bam_curadj_log ../btree/btree_auto.c /^__bam_curadj_log(dbp, txnid, ret_lsnp, flags, mode/ -__bam_curadj_print ../btree/btree_auto.c /^__bam_curadj_print(dbenv, dbtp, lsnp, notused2, no/ +__bam_curadj_print ../btree/btree_autop.c /^__bam_curadj_print(dbenv, dbtp, lsnp, notused2, no/ __bam_curadj_read ../btree/btree_auto.c /^__bam_curadj_read(dbenv, recbuf, argpp)$/ __bam_curadj_recover ../btree/bt_rec.c /^__bam_curadj_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_db_close ../btree/bt_method.c /^__bam_db_close(dbp)$/ @@ -1849,9 +2153,8 @@ __bam_get_prev ../btree/bt_cursor.c /^__bam_get_prev(dbc)$/ __bam_getboth_finddatum ../btree/bt_cursor.c /^__bam_getboth_finddatum(dbc, data, flags)$/ __bam_getbothc ../btree/bt_cursor.c /^__bam_getbothc(dbc, data)$/ __bam_iitem ../btree/bt_put.c /^__bam_iitem(dbc, key, data, op, flags)$/ -__bam_init_getpgnos ../btree/btree_auto.c /^__bam_init_getpgnos(dbenv, dtabp, dtabsizep)$/ __bam_init_meta ../btree/bt_open.c /^__bam_init_meta(dbp, meta, pgno, lsnp)$/ -__bam_init_print ../btree/btree_auto.c /^__bam_init_print(dbenv, dtabp, dtabsizep)$/ +__bam_init_print ../btree/btree_autop.c /^__bam_init_print(dbenv, dtabp, dtabsizep)$/ __bam_init_recover ../btree/btree_auto.c /^__bam_init_recover(dbenv, dtabp, dtabsizep)$/ __bam_isopd ../btree/bt_cursor.c /^__bam_isopd(dbc, pgnop)$/ __bam_key_range ../btree/bt_stat.c /^__bam_key_range(dbc, dbt, kp, flags)$/ @@ -1870,30 +2173,32 @@ __bam_partsize ../btree/bt_put.c /^__bam_partsize(dbp, op, data, h, indx)$/ __bam_pgin ../btree/bt_conv.c /^__bam_pgin(dbenv, dummydbp, pg, pp, cookie)$/ __bam_pgout ../btree/bt_conv.c /^__bam_pgout(dbenv, dummydbp, pg, pp, cookie)$/ __bam_pinsert ../btree/bt_split.c /^__bam_pinsert(dbc, parent, lchild, rchild, space_c/ +__bam_print_cursor ../btree/bt_stat.c /^__bam_print_cursor(dbc)$/ __bam_psplit ../btree/bt_split.c /^__bam_psplit(dbc, cp, lp, rp, splitret)$/ -__bam_rcuradj_getpgnos ../btree/btree_auto.c /^__bam_rcuradj_getpgnos(dbenv, rec, lsnp, notused1,/ __bam_rcuradj_log ../btree/btree_auto.c /^__bam_rcuradj_log(dbp, txnid, ret_lsnp, flags, mod/ -__bam_rcuradj_print ../btree/btree_auto.c /^__bam_rcuradj_print(dbenv, dbtp, lsnp, notused2, n/ +__bam_rcuradj_print ../btree/btree_autop.c /^__bam_rcuradj_print(dbenv, dbtp, lsnp, notused2, n/ __bam_rcuradj_read ../btree/btree_auto.c /^__bam_rcuradj_read(dbenv, recbuf, argpp)$/ __bam_rcuradj_recover ../btree/bt_rec.c /^__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info)/ __bam_read_root ../btree/bt_open.c /^__bam_read_root(dbp, txn, base_pgno, flags)$/ __bam_reclaim ../btree/bt_reclaim.c /^__bam_reclaim(dbp, txn)$/ -__bam_repl_getpgnos ../btree/btree_auto.c /^__bam_repl_getpgnos(dbenv, rec, lsnp, notused1, su/ +__bam_relink ../btree/bt_delete.c /^__bam_relink(dbc, pagep, new_next)$/ +__bam_relink_log ../btree/btree_auto.c /^__bam_relink_log(dbp, txnid, ret_lsnp, flags, pgno/ +__bam_relink_print ../btree/btree_autop.c /^__bam_relink_print(dbenv, dbtp, lsnp, notused2, no/ +__bam_relink_read ../btree/btree_auto.c /^__bam_relink_read(dbenv, recbuf, argpp)$/ +__bam_relink_recover ../btree/bt_rec.c /^__bam_relink_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_repl_log ../btree/btree_auto.c /^__bam_repl_log(dbp, txnid, ret_lsnp, flags, pgno, / -__bam_repl_print ../btree/btree_auto.c /^__bam_repl_print(dbenv, dbtp, lsnp, notused2, notu/ +__bam_repl_print ../btree/btree_autop.c /^__bam_repl_print(dbenv, dbtp, lsnp, notused2, notu/ __bam_repl_read ../btree/btree_auto.c /^__bam_repl_read(dbenv, recbuf, argpp)$/ __bam_repl_recover ../btree/bt_rec.c /^__bam_repl_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_ritem ../btree/bt_put.c /^__bam_ritem(dbc, h, indx, data)$/ __bam_root ../btree/bt_split.c /^__bam_root(dbc, cp)$/ -__bam_root_getpgnos ../btree/btree_auto.c /^__bam_root_getpgnos(dbenv, rec, lsnp, notused1, su/ __bam_root_log ../btree/btree_auto.c /^__bam_root_log(dbp, txnid, ret_lsnp, flags, meta_p/ -__bam_root_print ../btree/btree_auto.c /^__bam_root_print(dbenv, dbtp, lsnp, notused2, notu/ +__bam_root_print ../btree/btree_autop.c /^__bam_root_print(dbenv, dbtp, lsnp, notused2, notu/ __bam_root_read ../btree/btree_auto.c /^__bam_root_read(dbenv, recbuf, argpp)$/ __bam_root_recover ../btree/bt_rec.c /^__bam_root_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_rsearch ../btree/bt_rsearch.c /^__bam_rsearch(dbc, recnop, flags, stop, exactp)$/ -__bam_rsplit_getpgnos ../btree/btree_auto.c /^__bam_rsplit_getpgnos(dbenv, rec, lsnp, notused1, / __bam_rsplit_log ../btree/btree_auto.c /^__bam_rsplit_log(dbp, txnid, ret_lsnp, flags, pgno/ -__bam_rsplit_print ../btree/btree_auto.c /^__bam_rsplit_print(dbenv, dbtp, lsnp, notused2, no/ +__bam_rsplit_print ../btree/btree_autop.c /^__bam_rsplit_print(dbenv, dbtp, lsnp, notused2, no/ __bam_rsplit_read ../btree/btree_auto.c /^__bam_rsplit_read(dbenv, recbuf, argpp)$/ __bam_rsplit_recover ../btree/bt_rec.c /^__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_safe_getdata ../btree/bt_verify.c /^__bam_safe_getdata(dbp, h, i, ovflok, dbt, freedbt/ @@ -1906,13 +2211,13 @@ __bam_set_bt_minkey ../btree/bt_method.c /^__bam_set_bt_minkey(dbp, bt_minkey)$/ __bam_set_bt_prefix ../btree/bt_method.c /^__bam_set_bt_prefix(dbp, func)$/ __bam_set_flags ../btree/bt_method.c /^__bam_set_flags(dbp, flagsp)$/ __bam_split ../btree/bt_split.c /^__bam_split(dbc, arg, root_pgnop)$/ -__bam_split_getpgnos ../btree/btree_auto.c /^__bam_split_getpgnos(dbenv, rec, lsnp, notused1, s/ __bam_split_log ../btree/btree_auto.c /^__bam_split_log(dbp, txnid, ret_lsnp, flags, left,/ -__bam_split_print ../btree/btree_auto.c /^__bam_split_print(dbenv, dbtp, lsnp, notused2, not/ +__bam_split_print ../btree/btree_autop.c /^__bam_split_print(dbenv, dbtp, lsnp, notused2, not/ __bam_split_read ../btree/btree_auto.c /^__bam_split_read(dbenv, recbuf, argpp)$/ __bam_split_recover ../btree/bt_rec.c /^__bam_split_recover(dbenv, dbtp, lsnp, op, info)$/ __bam_stat ../btree/bt_stat.c /^__bam_stat(dbc, spp, flags)$/ __bam_stat_callback ../btree/bt_stat.c /^__bam_stat_callback(dbp, h, cookie, putp)$/ +__bam_stat_print ../btree/bt_stat.c /^__bam_stat_print(dbc, flags)$/ __bam_stkgrow ../btree/bt_search.c /^__bam_stkgrow(dbenv, cp)$/ __bam_stkrel ../btree/bt_search.c /^__bam_stkrel(dbc, flags)$/ __bam_total ../btree/bt_rsearch.c /^__bam_total(dbp, h)$/ @@ -1928,17 +2233,16 @@ __bam_vrfy_treeorder ../btree/bt_verify.c /^__bam_vrfy_treeorder(dbp, pgno, h, l __bhcmp ../mp/mp_sync.c /^__bhcmp(p1, p2)$/ __build_data ../log/log_archive.c /^__build_data(dbenv, pref, listp)$/ __cmpfunc ../log/log_archive.c /^__cmpfunc(p1, p2)$/ -__crdel_init_getpgnos ../db/crdel_auto.c /^__crdel_init_getpgnos(dbenv, dtabp, dtabsizep)$/ -__crdel_init_print ../db/crdel_auto.c /^__crdel_init_print(dbenv, dtabp, dtabsizep)$/ +__crdel_init_print ../db/crdel_autop.c /^__crdel_init_print(dbenv, dtabp, dtabsizep)$/ __crdel_init_recover ../db/crdel_auto.c /^__crdel_init_recover(dbenv, dtabp, dtabsizep)$/ -__crdel_metasub_getpgnos ../db/crdel_auto.c /^__crdel_metasub_getpgnos(dbenv, rec, lsnp, notused/ __crdel_metasub_log ../db/crdel_auto.c /^__crdel_metasub_log(dbp, txnid, ret_lsnp, flags, p/ -__crdel_metasub_print ../db/crdel_auto.c /^__crdel_metasub_print(dbenv, dbtp, lsnp, notused2,/ +__crdel_metasub_print ../db/crdel_autop.c /^__crdel_metasub_print(dbenv, dbtp, lsnp, notused2,/ __crdel_metasub_read ../db/crdel_auto.c /^__crdel_metasub_read(dbenv, recbuf, argpp)$/ __crdel_metasub_recover ../db/crdel_rec.c /^__crdel_metasub_recover(dbenv, dbtp, lsnp, op, inf/ __crypto_algsetup ../crypto/crypto.c /^__crypto_algsetup(dbenv, db_cipher, alg, do_init)$/ __crypto_dbenv_close ../crypto/crypto.c /^__crypto_dbenv_close(dbenv)$/ __crypto_decrypt_meta ../crypto/crypto.c /^__crypto_decrypt_meta(dbenv, dbp, mbuf, do_metachk/ +__crypto_region_destroy ../crypto/crypto.c /^__crypto_region_destroy(dbenv)$/ __crypto_region_init ../crypto/crypto.c /^__crypto_region_init(dbenv)$/ __crypto_set_passwd ../crypto/crypto.c /^__crypto_set_passwd(dbenv_src, dbenv_dest)$/ __db185_open ../db185/db185.c /^__db185_open(file, oflags, mode, type, openinfo)$/ @@ -1950,9 +2254,8 @@ __db_SHA1Update ../hmac/sha1.c /^__db_SHA1Update(context, data, len)$/ __db_SHAPrintContext ../hmac/sha1.c /^__db_SHAPrintContext(context, msg)$/ __db_add_limbo ../db/db_dispatch.c /^__db_add_limbo(dbenv, info, fileid, pgno, count)$/ __db_add_recovery ../db/db_dispatch.c /^__db_add_recovery(dbenv, dtab, dtabsize, func, ndx/ -__db_addrem_getpgnos ../db/db_auto.c /^__db_addrem_getpgnos(dbenv, rec, lsnp, notused1, s/ __db_addrem_log ../db/db_auto.c /^__db_addrem_log(dbp, txnid, ret_lsnp, flags,$/ -__db_addrem_print ../db/db_auto.c /^__db_addrem_print(dbenv, dbtp, lsnp, notused2, not/ +__db_addrem_print ../db/db_autop.c /^__db_addrem_print(dbenv, dbtp, lsnp, notused2, not/ __db_addrem_read ../db/db_auto.c /^__db_addrem_read(dbenv, recbuf, argpp)$/ __db_addrem_recover ../db/db_rec.c /^__db_addrem_recover(dbenv, dbtp, lsnp, op, info)$/ __db_append_primary ../db/db_am.c /^__db_append_primary(dbc, key, data)$/ @@ -1964,14 +2267,13 @@ __db_associate_arg ../db/db_iface.c /^__db_associate_arg(dbp, sdbp, callback, fl __db_associate_pp ../db/db_iface.c /^__db_associate_pp(dbp, txn, sdbp, callback, flags)/ __db_associate_proc ../rpc_server/c/db_server_proc.c /^__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id/ __db_backup_name ../db/db.c /^__db_backup_name(dbenv, name, txn, backup)$/ -__db_big_getpgnos ../db/db_auto.c /^__db_big_getpgnos(dbenv, rec, lsnp, notused1, summ/ __db_big_log ../db/db_auto.c /^__db_big_log(dbp, txnid, ret_lsnp, flags,$/ -__db_big_print ../db/db_auto.c /^__db_big_print(dbenv, dbtp, lsnp, notused2, notuse/ +__db_big_print ../db/db_autop.c /^__db_big_print(dbenv, dbtp, lsnp, notused2, notuse/ __db_big_read ../db/db_auto.c /^__db_big_read(dbenv, recbuf, argpp)$/ __db_big_recover ../db/db_rec.c /^__db_big_recover(dbenv, dbtp, lsnp, op, info)$/ __db_blockDecrypt ../crypto/rijndael/rijndael-api-fst.c /^__db_blockDecrypt(cipher, key, input, inputLen, ou/ __db_blockEncrypt ../crypto/rijndael/rijndael-api-fst.c /^__db_blockEncrypt(cipher, key, input, inputLen, ou/ -__db_bmeta ../db/db_pr.c /^__db_bmeta(dbp, fp, h, flags)$/ +__db_bmeta ../db/db_pr.c /^__db_bmeta(dbp, h, flags)$/ __db_bt_maxkey_proc ../rpc_server/c/db_server_proc.c /^__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)$/ __db_bt_minkey_proc ../rpc_server/c/db_server_proc.c /^__db_bt_minkey_proc(dbpcl_id, minkey, replyp)$/ __db_build_bi ../db/db_upg_opd.c /^__db_build_bi(dbp, fhp, ipage, page, indx, nomemp)/ @@ -2006,22 +2308,20 @@ __db_c_put_arg ../db/db_iface.c /^__db_c_put_arg(dbc, key, data, flags)$/ __db_c_put_pp ../db/db_iface.c /^__db_c_put_pp(dbc, key, data, flags)$/ __db_c_secondary_get_pp ../db/db_cam.c /^__db_c_secondary_get_pp(dbc, skey, data, flags)$/ __db_check_chksum ../hmac/hmac.c /^__db_check_chksum(dbenv, db_cipher, chksum, data, / +__db_check_lsn ../common/db_err.c /^__db_check_lsn(dbenv, lsn, prev)$/ __db_check_txn ../common/db_err.c /^__db_check_txn(dbp, txn, assoc_lid, read_op)$/ __db_chk_meta ../db/db_open.c /^__db_chk_meta(dbenv, dbp, meta, do_metachk)$/ __db_chksum ../hmac/hmac.c /^__db_chksum(data, data_len, mac_key, store)$/ __db_cipherInit ../crypto/rijndael/rijndael-api-fst.c /^__db_cipherInit(cipher, mode, IV)$/ __db_cipherUpdateRounds ../crypto/rijndael/rijndael-api-fst.c /^__db_cipherUpdateRounds(cipher, key, input, inputL/ -__db_cksum_getpgnos ../db/db_auto.c /^__db_cksum_getpgnos(dbenv, rec, lsnp, notused1, su/ __db_cksum_log ../db/db_auto.c /^__db_cksum_log(dbenv, txnid, ret_lsnp, flags)$/ -__db_cksum_print ../db/db_auto.c /^__db_cksum_print(dbenv, dbtp, lsnp, notused2, notu/ +__db_cksum_print ../db/db_autop.c /^__db_cksum_print(dbenv, dbtp, lsnp, notused2, notu/ __db_cksum_read ../db/db_auto.c /^__db_cksum_read(dbenv, recbuf, argpp)$/ __db_cksum_recover ../db/db_rec.c /^__db_cksum_recover(dbenv, dbtp, lsnp, op, info)$/ __db_close ../db/db.c /^__db_close(dbp, txn, flags)$/ __db_close_int ../rpc_server/c/db_server_util.c /^__db_close_int(id, flags)$/ __db_close_pp ../db/db_iface.c /^__db_close_pp(dbp, flags)$/ __db_close_proc ../rpc_server/c/db_server_proc.c /^__db_close_proc(dbpcl_id, flags, replyp)$/ -__db_cprint ../db/db_am.c /^__db_cprint(dbp)$/ -__db_cprint_item ../db/db_am.c /^int __db_cprint_item(dbc)$/ __db_create_proc ../rpc_server/c/db_server_proc.c /^__db_create_proc(dbenvcl_id, flags, replyp)$/ __db_curinval ../db/db_iface.c /^__db_curinval(dbenv)$/ __db_cursor ../db/db_iface.c /^__db_cursor(dbp, txn, dbcp, flags)$/ @@ -2029,55 +2329,55 @@ __db_cursor_arg ../db/db_iface.c /^__db_cursor_arg(dbp, flags)$/ __db_cursor_check ../db/db_truncate.c /^__db_cursor_check(dbp)$/ __db_cursor_int ../db/db_am.c /^__db_cursor_int(dbp, txn, dbtype, root, is_opd, lo/ __db_cursor_pp ../db/db_iface.c /^__db_cursor_pp(dbp, txn, dbcp, flags)$/ -__db_cursor_proc ../rpc_server/c/db_server_proc.c /^__db_cursor_proc(dbpcl_id, txnpcl_id,$/ -__db_db_associate_4002 ../rpc_client/db_server_clnt.c /^__db_db_associate_4002(argp, clnt)$/ -__db_db_bt_maxkey_4002 ../rpc_client/db_server_clnt.c /^__db_db_bt_maxkey_4002(argp, clnt)$/ -__db_db_bt_minkey_4002 ../rpc_client/db_server_clnt.c /^__db_db_bt_minkey_4002(argp, clnt)$/ -__db_db_close_4002 ../rpc_client/db_server_clnt.c /^__db_db_close_4002(argp, clnt)$/ -__db_db_create_4002 ../rpc_client/db_server_clnt.c /^__db_db_create_4002(argp, clnt)$/ -__db_db_cursor_4002 ../rpc_client/db_server_clnt.c /^__db_db_cursor_4002(argp, clnt)$/ -__db_db_del_4002 ../rpc_client/db_server_clnt.c /^__db_db_del_4002(argp, clnt)$/ -__db_db_encrypt_4002 ../rpc_client/db_server_clnt.c /^__db_db_encrypt_4002(argp, clnt)$/ -__db_db_extentsize_4002 ../rpc_client/db_server_clnt.c /^__db_db_extentsize_4002(argp, clnt)$/ -__db_db_flags_4002 ../rpc_client/db_server_clnt.c /^__db_db_flags_4002(argp, clnt)$/ -__db_db_get_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_4002(argp, clnt)$/ -__db_db_get_bt_minkey_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_bt_minkey_4002(argp, clnt)$/ -__db_db_get_encrypt_flags_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_encrypt_flags_4002(argp, clnt)$/ -__db_db_get_extentsize_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_extentsize_4002(argp, clnt)$/ -__db_db_get_flags_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_flags_4002(argp, clnt)$/ -__db_db_get_h_ffactor_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_h_ffactor_4002(argp, clnt)$/ -__db_db_get_h_nelem_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_h_nelem_4002(argp, clnt)$/ -__db_db_get_lorder_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_lorder_4002(argp, clnt)$/ -__db_db_get_name_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_name_4002(argp, clnt)$/ -__db_db_get_open_flags_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_open_flags_4002(argp, clnt)$/ -__db_db_get_pagesize_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_pagesize_4002(argp, clnt)$/ -__db_db_get_re_delim_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_re_delim_4002(argp, clnt)$/ -__db_db_get_re_len_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_re_len_4002(argp, clnt)$/ -__db_db_get_re_pad_4002 ../rpc_client/db_server_clnt.c /^__db_db_get_re_pad_4002(argp, clnt)$/ -__db_db_h_ffactor_4002 ../rpc_client/db_server_clnt.c /^__db_db_h_ffactor_4002(argp, clnt)$/ -__db_db_h_nelem_4002 ../rpc_client/db_server_clnt.c /^__db_db_h_nelem_4002(argp, clnt)$/ -__db_db_join_4002 ../rpc_client/db_server_clnt.c /^__db_db_join_4002(argp, clnt)$/ -__db_db_key_range_4002 ../rpc_client/db_server_clnt.c /^__db_db_key_range_4002(argp, clnt)$/ -__db_db_lorder_4002 ../rpc_client/db_server_clnt.c /^__db_db_lorder_4002(argp, clnt)$/ -__db_db_open_4002 ../rpc_client/db_server_clnt.c /^__db_db_open_4002(argp, clnt)$/ -__db_db_pagesize_4002 ../rpc_client/db_server_clnt.c /^__db_db_pagesize_4002(argp, clnt)$/ -__db_db_pget_4002 ../rpc_client/db_server_clnt.c /^__db_db_pget_4002(argp, clnt)$/ -__db_db_put_4002 ../rpc_client/db_server_clnt.c /^__db_db_put_4002(argp, clnt)$/ -__db_db_re_delim_4002 ../rpc_client/db_server_clnt.c /^__db_db_re_delim_4002(argp, clnt)$/ -__db_db_re_len_4002 ../rpc_client/db_server_clnt.c /^__db_db_re_len_4002(argp, clnt)$/ -__db_db_re_pad_4002 ../rpc_client/db_server_clnt.c /^__db_db_re_pad_4002(argp, clnt)$/ -__db_db_remove_4002 ../rpc_client/db_server_clnt.c /^__db_db_remove_4002(argp, clnt)$/ -__db_db_rename_4002 ../rpc_client/db_server_clnt.c /^__db_db_rename_4002(argp, clnt)$/ -__db_db_stat_4002 ../rpc_client/db_server_clnt.c /^__db_db_stat_4002(argp, clnt)$/ -__db_db_sync_4002 ../rpc_client/db_server_clnt.c /^__db_db_sync_4002(argp, clnt)$/ -__db_db_truncate_4002 ../rpc_client/db_server_clnt.c /^__db_db_truncate_4002(argp, clnt)$/ -__db_dbc_close_4002 ../rpc_client/db_server_clnt.c /^__db_dbc_close_4002(argp, clnt)$/ -__db_dbc_count_4002 ../rpc_client/db_server_clnt.c /^__db_dbc_count_4002(argp, clnt)$/ -__db_dbc_del_4002 ../rpc_client/db_server_clnt.c /^__db_dbc_del_4002(argp, clnt)$/ -__db_dbc_dup_4002 ../rpc_client/db_server_clnt.c /^__db_dbc_dup_4002(argp, clnt)$/ -__db_dbc_get_4002 ../rpc_client/db_server_clnt.c /^__db_dbc_get_4002(argp, clnt)$/ -__db_dbc_pget_4002 ../rpc_client/db_server_clnt.c /^__db_dbc_pget_4002(argp, clnt)$/ -__db_dbc_put_4002 ../rpc_client/db_server_clnt.c /^__db_dbc_put_4002(argp, clnt)$/ +__db_cursor_proc ../rpc_server/c/db_server_proc.c /^__db_cursor_proc(dbpcl_id, txnpcl_id, flags, reply/ +__db_db_associate_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_associate_4003__SVCSUFFIX__(msg, req)$/ +__db_db_bt_maxkey_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_bt_maxkey_4003__SVCSUFFIX__(msg, req)$/ +__db_db_bt_minkey_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_bt_minkey_4003__SVCSUFFIX__(msg, req)$/ +__db_db_close_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_close_4003__SVCSUFFIX__(msg, req)$/ +__db_db_create_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_create_4003__SVCSUFFIX__(msg, req)$/ +__db_db_cursor_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_cursor_4003__SVCSUFFIX__(msg, req)$/ +__db_db_del_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_del_4003__SVCSUFFIX__(msg, req)$/ +__db_db_encrypt_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_encrypt_4003__SVCSUFFIX__(msg, req)$/ +__db_db_extentsize_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_extentsize_4003__SVCSUFFIX__(msg, req)$/ +__db_db_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_flags_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_bt_minkey_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_bt_minkey_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_encrypt_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_encrypt_flags_4003__SVCSUFFIX__(msg, r/ +__db_db_get_extentsize_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_extentsize_4003__SVCSUFFIX__(msg, req)/ +__db_db_get_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_flags_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_h_ffactor_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_h_ffactor_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_h_nelem_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_h_nelem_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_lorder_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_lorder_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_name_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_name_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_open_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_open_flags_4003__SVCSUFFIX__(msg, req)/ +__db_db_get_pagesize_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_pagesize_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_re_delim_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_re_delim_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_re_len_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_re_len_4003__SVCSUFFIX__(msg, req)$/ +__db_db_get_re_pad_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_get_re_pad_4003__SVCSUFFIX__(msg, req)$/ +__db_db_h_ffactor_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_h_ffactor_4003__SVCSUFFIX__(msg, req)$/ +__db_db_h_nelem_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_h_nelem_4003__SVCSUFFIX__(msg, req)$/ +__db_db_join_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_join_4003__SVCSUFFIX__(msg, req)$/ +__db_db_key_range_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_key_range_4003__SVCSUFFIX__(msg, req)$/ +__db_db_lorder_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_lorder_4003__SVCSUFFIX__(msg, req)$/ +__db_db_open_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_open_4003__SVCSUFFIX__(msg, req)$/ +__db_db_pagesize_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_pagesize_4003__SVCSUFFIX__(msg, req)$/ +__db_db_pget_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_pget_4003__SVCSUFFIX__(msg, req)$/ +__db_db_put_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_put_4003__SVCSUFFIX__(msg, req)$/ +__db_db_re_delim_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_re_delim_4003__SVCSUFFIX__(msg, req)$/ +__db_db_re_len_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_re_len_4003__SVCSUFFIX__(msg, req)$/ +__db_db_re_pad_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_re_pad_4003__SVCSUFFIX__(msg, req)$/ +__db_db_remove_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_remove_4003__SVCSUFFIX__(msg, req)$/ +__db_db_rename_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_rename_4003__SVCSUFFIX__(msg, req)$/ +__db_db_stat_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_stat_4003__SVCSUFFIX__(msg, req)$/ +__db_db_sync_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_sync_4003__SVCSUFFIX__(msg, req)$/ +__db_db_truncate_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_db_truncate_4003__SVCSUFFIX__(msg, req)$/ +__db_dbc_close_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_dbc_close_4003__SVCSUFFIX__(msg, req)$/ +__db_dbc_count_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_dbc_count_4003__SVCSUFFIX__(msg, req)$/ +__db_dbc_del_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_dbc_del_4003__SVCSUFFIX__(msg, req)$/ +__db_dbc_dup_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_dbc_dup_4003__SVCSUFFIX__(msg, req)$/ +__db_dbc_get_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_dbc_get_4003__SVCSUFFIX__(msg, req)$/ +__db_dbc_pget_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_dbc_pget_4003__SVCSUFFIX__(msg, req)$/ +__db_dbc_put_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_dbc_put_4003__SVCSUFFIX__(msg, req)$/ __db_dbenv_mpool ../db/db.c /^__db_dbenv_mpool(dbp, fname, flags)$/ __db_dbenv_setup ../db/db.c /^__db_dbenv_setup(dbp, txn, fname, id, flags)$/ __db_dbm_close ../dbm/dbm.c /^__db_dbm_close()$/ @@ -2089,135 +2389,52 @@ __db_dbm_nextkey ../dbm/dbm.c /^__db_dbm_nextkey(key)$/ __db_dbm_store ../dbm/dbm.c /^__db_dbm_store(key, dat)$/ __db_dbtxn_remove ../db/db_remove.c /^__db_dbtxn_remove(dbp, txn, name)$/ __db_dbtype_to_string ../db/db_pr.c /^__db_dbtype_to_string(type)$/ -__db_debug_getpgnos ../db/db_auto.c /^__db_debug_getpgnos(dbenv, rec, lsnp, notused1, su/ __db_debug_log ../db/db_auto.c /^__db_debug_log(dbenv, txnid, ret_lsnp, flags,$/ -__db_debug_print ../db/db_auto.c /^__db_debug_print(dbenv, dbtp, lsnp, notused2, notu/ +__db_debug_print ../db/db_autop.c /^__db_debug_print(dbenv, dbtp, lsnp, notused2, notu/ __db_debug_read ../db/db_auto.c /^__db_debug_read(dbenv, recbuf, argpp)$/ __db_debug_recover ../db/db_rec.c /^__db_debug_recover(dbenv, dbtp, lsnp, op, info)$/ -__db_default_getpgnos ../db/db_dispatch.c /^__db_default_getpgnos(dbenv, lsnp, summary)$/ __db_del ../db/db_am.c /^__db_del(dbp, txn, key, flags)$/ __db_del_arg ../db/db_iface.c /^__db_del_arg(dbp, flags)$/ __db_del_pp ../db/db_iface.c /^__db_del_pp(dbp, txn, key, flags)$/ -__db_del_proc ../rpc_server/c/db_server_proc.c /^__db_del_proc(dbpcl_id, txnpcl_id, keydlen,$/ +__db_del_proc ../rpc_server/c/db_server_proc.c /^__db_del_proc(dbpcl_id, txnpcl_id, keydlen, keydof/ __db_derive_mac ../hmac/hmac.c /^__db_derive_mac(passwd, plen, mac_key)$/ __db_des_destroy ../env/env_region.c /^__db_des_destroy(dbenv, rp, shmem_safe)$/ __db_des_get ../env/env_region.c /^__db_des_get(dbenv, env_infop, infop, rpp)$/ __db_disassociate ../db/db.c /^__db_disassociate(sdbp)$/ __db_dispatch ../db/db_dispatch.c /^__db_dispatch(dbenv, dtab, dtabsize, db, lsnp, red/ __db_ditem ../db/db_dup.c /^__db_ditem(dbc, pagep, indx, nbytes)$/ +__db_dl ../env/env_stat.c /^__db_dl(dbenv, msg, value)$/ +__db_dl_pct ../env/env_stat.c /^__db_dl_pct(dbenv, msg, value, pct, tag)$/ +__db_dlbytes ../env/env_stat.c /^__db_dlbytes(dbenv, msg, gbytes, mbytes, bytes)$/ __db_do_the_limbo ../db/db_dispatch.c /^__db_do_the_limbo(dbenv, ptxn, txn, hp, state)$/ __db_doff ../db/db_overflow.c /^__db_doff(dbc, pgno)$/ -__db_dump ../db/db_pr.c /^__db_dump(dbp, op, name)$/ +__db_dump ../db/db_pr.c /^__db_dump(dbp, subname, callback, handle, pflag, k/ +__db_dump_pp ../db/db_pr.c /^__db_dump_pp(dbp, subname, callback, handle, pflag/ +__db_dumptree ../db/db_pr.c /^__db_dumptree(dbp, op, name)$/ __db_duperr ../db/db_cam.c /^__db_duperr(dbp, flags)$/ __db_e_attach ../env/env_region.c /^__db_e_attach(dbenv, init_flagsp)$/ __db_e_detach ../env/env_region.c /^__db_e_detach(dbenv, destroy)$/ __db_e_remfile ../env/env_region.c /^__db_e_remfile(dbenv)$/ __db_e_remove ../env/env_region.c /^__db_e_remove(dbenv, flags)$/ -__db_e_stat ../env/env_region.c /^__db_e_stat(dbenv, arg_renv, arg_regions, arg_regi/ __db_encrypt_proc ../rpc_server/c/db_server_proc.c /^__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)/ -__db_env_cachesize_4002 ../rpc_client/db_server_clnt.c /^__db_env_cachesize_4002(argp, clnt)$/ -__db_env_close ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_close(struct __db_env *se/ -__db_env_close_4002 ../rpc_client/db_server_clnt.c /^__db_env_close_4002(argp, clnt)$/ +__db_env_cachesize_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_cachesize_4003__SVCSUFFIX__(msg, req)$/ +__db_env_close_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_close_4003__SVCSUFFIX__(msg, req)$/ __db_env_config ../env/env_method.c /^__db_env_config(dbenv, i, flags)$/ -__db_env_create_4002 ../rpc_client/db_server_clnt.c /^__db_env_create_4002(argp, clnt)$/ -__db_env_dbremove ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_dbremove(struct __db_env / -__db_env_dbremove_4002 ../rpc_client/db_server_clnt.c /^__db_env_dbremove_4002(argp, clnt)$/ -__db_env_dbrename ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_dbrename(struct __db_env / -__db_env_dbrename_4002 ../rpc_client/db_server_clnt.c /^__db_env_dbrename_4002(argp, clnt)$/ -__db_env_encrypt_4002 ../rpc_client/db_server_clnt.c /^__db_env_encrypt_4002(argp, clnt)$/ -__db_env_err ../libdb_java/db_java_wrap.c /^static void __db_env_err(struct __db_env *self,int/ -__db_env_errx ../libdb_java/db_java_wrap.c /^static void __db_env_errx(struct __db_env *self,ch/ -__db_env_flags_4002 ../rpc_client/db_server_clnt.c /^__db_env_flags_4002(argp, clnt)$/ -__db_env_get_cachesize ../libdb_java/db_java_wrap.c /^static jlong __db_env_get_cachesize(struct __db_en/ -__db_env_get_cachesize_4002 ../rpc_client/db_server_clnt.c /^__db_env_get_cachesize_4002(argp, clnt)$/ -__db_env_get_cachesize_ncache ../libdb_java/db_java_wrap.c /^static int __db_env_get_cachesize_ncache(struct __/ -__db_env_get_data_dirs ../libdb_java/db_java_wrap.c /^static char const **__db_env_get_data_dirs(struct / -__db_env_get_encrypt_flags ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_encrypt_flags(struct/ -__db_env_get_encrypt_flags_4002 ../rpc_client/db_server_clnt.c /^__db_env_get_encrypt_flags_4002(argp, clnt)$/ -__db_env_get_flags ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_flags(struct __db_en/ -__db_env_get_flags_4002 ../rpc_client/db_server_clnt.c /^__db_env_get_flags_4002(argp, clnt)$/ -__db_env_get_home ../libdb_java/db_java_wrap.c /^static char const *__db_env_get_home(struct __db_e/ -__db_env_get_home_4002 ../rpc_client/db_server_clnt.c /^__db_env_get_home_4002(argp, clnt)$/ -__db_env_get_lg_bsize ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_lg_bsize(struct __db/ -__db_env_get_lg_dir ../libdb_java/db_java_wrap.c /^static char const *__db_env_get_lg_dir(struct __db/ -__db_env_get_lg_max ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_lg_max(struct __db_e/ -__db_env_get_lg_regionmax ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_lg_regionmax(struct / -__db_env_get_lk_conflicts ../libdb_java/db_java_wrap.c /^static struct __db_lk_conflicts __db_env_get_lk_co/ -__db_env_get_lk_detect ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_lk_detect(struct __d/ -__db_env_get_lk_max_lockers ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_lk_max_lockers(struc/ -__db_env_get_lk_max_locks ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_lk_max_locks(struct / -__db_env_get_lk_max_objects ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_lk_max_objects(struc/ -__db_env_get_mp_mmapsize ../libdb_java/db_java_wrap.c /^static size_t __db_env_get_mp_mmapsize(struct __db/ -__db_env_get_open_flags ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_open_flags(struct __/ -__db_env_get_open_flags_4002 ../rpc_client/db_server_clnt.c /^__db_env_get_open_flags_4002(argp, clnt)$/ -__db_env_get_rep_limit ../libdb_java/db_java_wrap.c /^static jlong __db_env_get_rep_limit(struct __db_en/ -__db_env_get_shm_key ../libdb_java/db_java_wrap.c /^static long __db_env_get_shm_key(struct __db_env */ -__db_env_get_tas_spins ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_tas_spins(struct __d/ -__db_env_get_timeout ../libdb_java/db_java_wrap.c /^static db_timeout_t __db_env_get_timeout(struct __/ -__db_env_get_tmp_dir ../libdb_java/db_java_wrap.c /^static char const *__db_env_get_tmp_dir(struct __d/ -__db_env_get_tx_max ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_get_tx_max(struct __db_e/ -__db_env_get_tx_timestamp ../libdb_java/db_java_wrap.c /^static time_t __db_env_get_tx_timestamp(struct __d/ -__db_env_get_verbose ../libdb_java/db_java_wrap.c /^static int_bool __db_env_get_verbose(struct __db_e/ -__db_env_lock_detect ../libdb_java/db_java_wrap.c /^static int __db_env_lock_detect(struct __db_env *s/ -__db_env_lock_get ../libdb_java/db_java_wrap.c /^static DB_LOCK *__db_env_lock_get(struct __db_env / -__db_env_lock_id ../libdb_java/db_java_wrap.c /^static u_int32_t __db_env_lock_id(struct __db_env / -__db_env_lock_id_free ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_lock_id_free(struct __db_/ -__db_env_lock_put ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_lock_put(struct __db_env / -__db_env_lock_stat ../libdb_java/db_java_wrap.c /^static DB_LOCK_STAT *__db_env_lock_stat(struct __d/ -__db_env_log_archive ../libdb_java/db_java_wrap.c /^static char **__db_env_log_archive(struct __db_env/ -__db_env_log_cursor ../libdb_java/db_java_wrap.c /^static DB_LOGC *__db_env_log_cursor(struct __db_en/ -__db_env_log_file ../libdb_java/db_java_wrap.c /^static char *__db_env_log_file(struct __db_env *se/ -__db_env_log_flush ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_log_flush(struct __db_env/ -__db_env_log_put ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_log_put(struct __db_env */ -__db_env_log_stat ../libdb_java/db_java_wrap.c /^static DB_LOG_STAT *__db_env_log_stat(struct __db_/ -__db_env_memp_fstat ../libdb_java/db_java_wrap.c /^static DB_MPOOL_FSTAT **__db_env_memp_fstat(struct/ -__db_env_memp_stat ../libdb_java/db_java_wrap.c /^static DB_MPOOL_STAT *__db_env_memp_stat(struct __/ -__db_env_memp_trickle ../libdb_java/db_java_wrap.c /^static int __db_env_memp_trickle(struct __db_env */ -__db_env_open ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_open(struct __db_env *sel/ -__db_env_open_4002 ../rpc_client/db_server_clnt.c /^__db_env_open_4002(argp, clnt)$/ -__db_env_remove ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_remove(struct __db_env *s/ -__db_env_remove_4002 ../rpc_client/db_server_clnt.c /^__db_env_remove_4002(argp, clnt)$/ -__db_env_rep_elect ../libdb_java/db_java_wrap.c /^static int __db_env_rep_elect(struct __db_env *sel/ -__db_env_rep_process_message ../libdb_java/db_java_wrap.c /^static int __db_env_rep_process_message(struct __d/ -__db_env_rep_start ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_rep_start(struct __db_env/ -__db_env_rep_stat ../libdb_java/db_java_wrap.c /^static DB_REP_STAT *__db_env_rep_stat(struct __db_/ -__db_env_set_app_dispatch ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_app_dispatch(struct _/ -__db_env_set_cachesize ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_cachesize(struct __db/ -__db_env_set_data_dir ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_data_dir(struct __db_/ -__db_env_set_encrypt ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_encrypt(struct __db_e/ -__db_env_set_errcall ../libdb_java/db_java_wrap.c /^static void __db_env_set_errcall(struct __db_env */ -__db_env_set_feedback ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_feedback(struct __db_/ -__db_env_set_flags ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_flags(struct __db_env/ -__db_env_set_lg_bsize ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lg_bsize(struct __db_/ -__db_env_set_lg_dir ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lg_dir(struct __db_en/ -__db_env_set_lg_max ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lg_max(struct __db_en/ -__db_env_set_lg_regionmax ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lg_regionmax(struct _/ -__db_env_set_lk_conflicts ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lk_conflicts(struct _/ -__db_env_set_lk_detect ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lk_detect(struct __db/ -__db_env_set_lk_max_lockers ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lk_max_lockers(struct/ -__db_env_set_lk_max_locks ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lk_max_locks(struct _/ -__db_env_set_lk_max_objects ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_lk_max_objects(struct/ -__db_env_set_mp_mmapsize ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_mp_mmapsize(struct __/ -__db_env_set_paniccall ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_paniccall(struct __db/ -__db_env_set_rep_limit ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_rep_limit(struct __db/ -__db_env_set_rep_transport ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_rep_transport(struct / -__db_env_set_rpc_server ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_rpc_server(struct __d/ -__db_env_set_shm_key ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_shm_key(struct __db_e/ -__db_env_set_tas_spins ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_tas_spins(struct __db/ -__db_env_set_timeout ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_timeout(struct __db_e/ -__db_env_set_tmp_dir ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_tmp_dir(struct __db_e/ -__db_env_set_tx_max ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_tx_max(struct __db_en/ -__db_env_set_tx_timestamp ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_tx_timestamp(struct _/ -__db_env_set_verbose ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_set_verbose(struct __db_e/ -__db_env_txn_begin ../libdb_java/db_java_wrap.c /^static DB_TXN *__db_env_txn_begin(struct __db_env / -__db_env_txn_checkpoint ../libdb_java/db_java_wrap.c /^static db_ret_t __db_env_txn_checkpoint(struct __d/ -__db_env_txn_recover ../libdb_java/db_java_wrap.c /^static DB_PREPLIST *__db_env_txn_recover(struct __/ -__db_env_txn_stat ../libdb_java/db_java_wrap.c /^static DB_TXN_STAT *__db_env_txn_stat(struct __db_/ +__db_env_create_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_create_4003__SVCSUFFIX__(msg, req)$/ +__db_env_dbremove_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_dbremove_4003__SVCSUFFIX__(msg, req)$/ +__db_env_dbrename_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_dbrename_4003__SVCSUFFIX__(msg, req)$/ +__db_env_encrypt_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_encrypt_4003__SVCSUFFIX__(msg, req)$/ +__db_env_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_flags_4003__SVCSUFFIX__(msg, req)$/ +__db_env_get_cachesize_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_get_cachesize_4003__SVCSUFFIX__(msg, req)/ +__db_env_get_encrypt_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_get_encrypt_flags_4003__SVCSUFFIX__(msg, / +__db_env_get_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_get_flags_4003__SVCSUFFIX__(msg, req)$/ +__db_env_get_home_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_get_home_4003__SVCSUFFIX__(msg, req)$/ +__db_env_get_open_flags_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_get_open_flags_4003__SVCSUFFIX__(msg, req/ +__db_env_open_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_open_4003__SVCSUFFIX__(msg, req)$/ +__db_env_remove_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_env_remove_4003__SVCSUFFIX__(msg, req)$/ __db_err ../common/db_err.c /^__db_err(const DB_ENV *dbenv, const char *fmt, .../ -__db_err_internal ../libdb_java/db_java_wrap.c /^static void __db_err_internal(struct __db *self,in/ __db_errcall ../common/db_err.c /^__db_errcall(dbenv, error, error_set, fmt, ap)$/ __db_errfile ../common/db_err.c /^__db_errfile(dbenv, error, error_set, fmt, ap)$/ -__db_errx ../libdb_java/db_java_wrap.c /^static void __db_errx(struct __db *self,char const/ __db_extentsize_proc ../rpc_server/c/db_server_proc.c /^__db_extentsize_proc(dbpcl_id, extentsize, replyp)/ __db_faultmem ../env/env_region.c /^__db_faultmem(dbenv, addr, size, created)$/ __db_fcchk ../common/db_err.c /^__db_fcchk(dbenv, name, flags, flag1, flag2)$/ @@ -2228,6 +2445,7 @@ __db_fcntl_mutex_lock ../mutex/mut_fcntl.c /^__db_fcntl_mutex_lock(dbenv, mutexp __db_fcntl_mutex_unlock ../mutex/mut_fcntl.c /^__db_fcntl_mutex_unlock(dbenv, mutexp)$/ __db_fd_pp ../db/db_iface.c /^__db_fd_pp(dbp, fdp)$/ __db_ferr ../common/db_err.c /^__db_ferr(dbenv, name, iscombo)$/ +__db_fileid_reset ../db/db_setid.c /^__db_fileid_reset(dbenv, name, passwd)$/ __db_fileinit ../env/env_file.c /^__db_fileinit(dbenv, fhp, size, zerofill)$/ __db_flags_proc ../rpc_server/c/db_server_proc.c /^__db_flags_proc(dbpcl_id, flags, replyp)$/ __db_fnl ../common/db_err.c /^__db_fnl(dbenv, name)$/ @@ -2235,47 +2453,37 @@ __db_free ../db/db_meta.c /^__db_free(dbc, h)$/ __db_generate_iv ../crypto/mersenne/mt19937db.c /^__db_generate_iv(dbenv, iv)$/ __db_genrand ../crypto/mersenne/mt19937db.c /^__db_genrand(dbenv)$/ __db_get ../db/db_iface.c /^__db_get(dbp, txn, key, data, flags)$/ -__db_get__SWIG_0 ../libdb_java/db_java_wrap.c /^static int __db_get__SWIG_0(struct __db *self,DB_T/ __db_get_arg ../db/db_iface.c /^__db_get_arg(dbp, key, data, flags)$/ -__db_get_bt_minkey ../libdb_java/db_java_wrap.c /^static u_int32_t __db_get_bt_minkey(struct __db *s/ __db_get_bt_minkey_proc ../rpc_server/c/db_server_proc.c /^__db_get_bt_minkey_proc(dbpcl_id, replyp)$/ __db_get_byteswapped ../db/db_method.c /^__db_get_byteswapped(dbp, isswapped)$/ __db_get_cachesize ../db/db_method.c /^__db_get_cachesize(dbp, cache_gbytesp, cache_bytes/ -__db_get_cachesize_ncache ../libdb_java/db_java_wrap.c /^static u_int32_t __db_get_cachesize_ncache(struct / __db_get_dbname ../db/db_method.c /^__db_get_dbname(dbp, fnamep, dnamep)$/ __db_get_encrypt_flags ../db/db_method.c /^__db_get_encrypt_flags(dbp, flagsp)$/ __db_get_encrypt_flags_proc ../rpc_server/c/db_server_proc.c /^__db_get_encrypt_flags_proc(dbpcl_id, replyp)$/ -__db_get_env ../db/db_method.c /^__db_get_env(dbp, dbenvp)$/ +__db_get_env ../db/db_method.c /^__db_get_env(dbp)$/ __db_get_errfile ../db/db_method.c /^__db_get_errfile(dbp, errfilep)$/ __db_get_errpfx ../db/db_method.c /^__db_get_errpfx(dbp, errpfxp)$/ __db_get_extentsize_proc ../rpc_server/c/db_server_proc.c /^__db_get_extentsize_proc(dbpcl_id, replyp)$/ -__db_get_filename ../libdb_java/db_java_wrap.c /^static char const *__db_get_filename(struct __db */ __db_get_flags ../db/db_method.c /^__db_get_flags(dbp, flagsp)$/ +__db_get_flags_fn ../db/db_pr.c /^__db_get_flags_fn()$/ __db_get_flags_proc ../rpc_server/c/db_server_proc.c /^__db_get_flags_proc(dbpcl_id, replyp)$/ -__db_get_flags_raw ../libdb_java/db_java_wrap.c /^static u_int32_t __db_get_flags_raw(struct __db *s/ -__db_get_h_ffactor ../libdb_java/db_java_wrap.c /^static u_int32_t __db_get_h_ffactor(struct __db *s/ __db_get_h_ffactor_proc ../rpc_server/c/db_server_proc.c /^__db_get_h_ffactor_proc(dbpcl_id, replyp)$/ -__db_get_h_nelem ../libdb_java/db_java_wrap.c /^static u_int32_t __db_get_h_nelem(struct __db *sel/ __db_get_h_nelem_proc ../rpc_server/c/db_server_proc.c /^__db_get_h_nelem_proc(dbpcl_id, replyp)$/ __db_get_lorder ../db/db_method.c /^__db_get_lorder(dbp, db_lorderp)$/ __db_get_lorder_proc ../rpc_server/c/db_server_proc.c /^__db_get_lorder_proc(dbpcl_id, replyp)$/ -__db_get_mpf ../libdb_java/db_java_wrap.c /^static DB_MPOOLFILE *__db_get_mpf(struct __db *sel/ +__db_get_msgfile ../db/db_method.c /^__db_get_msgfile(dbp, msgfilep)$/ __db_get_name_proc ../rpc_server/c/db_server_proc.c /^__db_get_name_proc(dbpcl_id, replyp)$/ __db_get_open_flags ../db/db_open.c /^__db_get_open_flags(dbp, flagsp)$/ __db_get_open_flags_proc ../rpc_server/c/db_server_proc.c /^__db_get_open_flags_proc(dbpcl_id, replyp)$/ __db_get_pagesize ../db/db_method.c /^__db_get_pagesize(dbp, db_pagesizep)$/ __db_get_pagesize_proc ../rpc_server/c/db_server_proc.c /^__db_get_pagesize_proc(dbpcl_id, replyp)$/ __db_get_pp ../db/db_iface.c /^__db_get_pp(dbp, txn, key, data, flags)$/ -__db_get_proc ../rpc_server/c/db_server_proc.c /^__db_get_proc(dbpcl_id, txnpcl_id, keydlen,$/ -__db_get_q_extentsize ../libdb_java/db_java_wrap.c /^static u_int32_t __db_get_q_extentsize(struct __db/ -__db_get_re_delim ../libdb_java/db_java_wrap.c /^static int __db_get_re_delim(struct __db *self){$/ +__db_get_proc ../rpc_server/c/db_server_proc.c /^__db_get_proc(dbpcl_id, txnpcl_id, keydlen, keydof/ __db_get_re_delim_proc ../rpc_server/c/db_server_proc.c /^__db_get_re_delim_proc(dbpcl_id, replyp)$/ -__db_get_re_len ../libdb_java/db_java_wrap.c /^static u_int32_t __db_get_re_len(struct __db *self/ __db_get_re_len_proc ../rpc_server/c/db_server_proc.c /^__db_get_re_len_proc(dbpcl_id, replyp)$/ -__db_get_re_pad ../libdb_java/db_java_wrap.c /^static int __db_get_re_pad(struct __db *self){$/ __db_get_re_pad_proc ../rpc_server/c/db_server_proc.c /^__db_get_re_pad_proc(dbpcl_id, replyp)$/ -__db_get_re_source ../libdb_java/db_java_wrap.c /^static char const *__db_get_re_source(struct __db / -__db_get_transactional ../db/db_method.c /^__db_get_transactional(dbp, istxnp)$/ +__db_get_seq_flags_fn ../sequence/seq_stat.c /^__db_get_seq_flags_fn()$/ +__db_get_transactional ../db/db_method.c /^__db_get_transactional(dbp)$/ __db_get_type ../db/db_method.c /^__db_get_type(dbp, dbtype)$/ __db_getlong ../common/db_getlong.c /^__db_getlong(dbenv, progname, p, min, max, storep)/ __db_getulong ../common/db_getlong.c /^__db_getulong(dbenv, progname, p, min, max, storep/ @@ -2287,19 +2495,17 @@ __db_hashinit ../env/db_shash.c /^__db_hashinit(begin, nelements)$/ __db_hcreate ../hsearch/hsearch.c /^__db_hcreate(nel)$/ __db_hdestroy ../hsearch/hsearch.c /^__db_hdestroy()$/ __db_hmac ../hmac/hmac.c /^__db_hmac(k, data, data_len, mac)$/ -__db_hmeta ../db/db_pr.c /^__db_hmeta(dbp, fp, h, flags)$/ +__db_hmeta ../db/db_pr.c /^__db_hmeta(dbp, h, flags)$/ __db_home ../env/env_open.c /^__db_home(dbenv, db_home, flags)$/ __db_hsearch ../hsearch/hsearch.c /^__db_hsearch(item, action)$/ __db_idcmp ../common/db_idspace.c /^__db_idcmp(a, b)$/ __db_idspace ../common/db_idspace.c /^__db_idspace(inuse, n, minp, maxp)$/ __db_infohead ../dbinc/tcl_db.h 155 __db_init ../db/db_method.c /^__db_init(dbp, flags)$/ -__db_init_getpgnos ../db/db_auto.c /^__db_init_getpgnos(dbenv, dtabp, dtabsizep)$/ __db_init_meta ../db/db_meta.c /^__db_init_meta(dbp, p, pgno, pgtype)$/ -__db_init_print ../db/db_auto.c /^__db_init_print(dbenv, dtabp, dtabsizep)$/ +__db_init_print ../db/db_autop.c /^__db_init_print(dbenv, dtabp, dtabsizep)$/ __db_init_recover ../db/db_auto.c /^__db_init_recover(dbenv, dtabp, dtabsizep)$/ __db_init_subdb ../db/db_open.c /^__db_init_subdb(mdbp, dbp, name, txn)$/ -__db_inmemdbflags ../db/db_pr.c /^__db_inmemdbflags(flags, cookie, callback)$/ __db_is_valid_magicno ../db/db_vrfy.c /^__db_is_valid_magicno(magic, typep)$/ __db_is_valid_pagetype ../db/db_vrfy.c /^__db_is_valid_pagetype(type)$/ __db_isbigendian ../common/db_byteorder.c /^__db_isbigendian()$/ @@ -2314,11 +2520,10 @@ __db_join_get_pp ../db/db_join.c /^__db_join_get_pp(dbc, key, data, flags)$/ __db_join_getnext ../db/db_join.c /^__db_join_getnext(dbc, key, data, exhausted, opmod/ __db_join_pp ../db/db_iface.c /^__db_join_pp(primary, curslist, dbcp, flags)$/ __db_join_primget ../db/db_join.c /^__db_join_primget(dbp, txn, lockerid, key, data, f/ -__db_join_proc ../rpc_server/c/db_server_proc.c /^__db_join_proc(dbpcl_id, curs, curslen,$/ +__db_join_proc ../rpc_server/c/db_server_proc.c /^__db_join_proc(dbpcl_id, curs, curslen, flags, rep/ __db_join_put ../db/db_join.c /^__db_join_put(dbc, key, data, flags)$/ -__db_key_range ../libdb_java/db_java_wrap.c /^static db_ret_t __db_key_range(struct __db *self,D/ __db_key_range_pp ../db/db_iface.c /^__db_key_range_pp(dbp, txn, key, kr, flags)$/ -__db_key_range_proc ../rpc_server/c/db_server_proc.c /^__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,$/ +__db_key_range_proc ../rpc_server/c/db_server_proc.c /^__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen, / __db_lastpgno ../db/db_upg.c /^__db_lastpgno(dbp, real_name, fhp, pgno_lastp)$/ __db_lget ../db/db_meta.c /^__db_lget(dbc, action, pgno, mode, lkflags, lockp)/ __db_limbo_bucket ../db/db_dispatch.c /^__db_limbo_bucket(dbenv, txn, elp, state)$/ @@ -2327,38 +2532,35 @@ __db_limbo_move ../db/db_dispatch.c /^__db_limbo_move(dbenv, ptxn, txn, elp)$/ __db_limbo_prepare ../db/db_dispatch.c /^__db_limbo_prepare(dbp, txn, elp)$/ __db_loadme ../db/db_pr.c /^__db_loadme()$/ __db_lock_move ../db/db_dispatch.c /^__db_lock_move(dbenv, fileid, pgno, mode, ptxn, tx/ +__db_lockmode_to_string ../db/db_pr.c /^__db_lockmode_to_string(mode)$/ __db_log2 ../common/db_log2.c /^__db_log2(num)$/ -__db_log_cursor_close ../libdb_java/db_java_wrap.c /^static db_ret_t __db_log_cursor_close(struct __db_/ -__db_log_cursor_get ../libdb_java/db_java_wrap.c /^static int __db_log_cursor_get(struct __db_log_cur/ +__db_log_corrupt ../env/env_recover.c /^__db_log_corrupt(dbenv, lsnp)$/ __db_log_page ../db/db.c /^__db_log_page(dbp, txn, lsn, pgno, page)$/ __db_logmsg ../common/db_err.c /^__db_logmsg(const DB_ENV *dbenv,$/ __db_lorder_proc ../rpc_server/c/db_server_proc.c /^__db_lorder_proc(dbpcl_id, lorder, replyp)$/ __db_lprint ../db/db_meta.c /^__db_lprint(dbc)$/ __db_lput ../db/db_meta.c /^__db_lput(dbc, lockp)$/ __db_lsgenrand ../crypto/mersenne/mt19937db.c /^__db_lsgenrand(seed_array, mt, mtip)$/ -__db_lsn_get_file ../libdb_java/db_java_wrap.c /^static u_int32_t __db_lsn_get_file(struct __db_lsn/ -__db_lsn_get_offset ../libdb_java/db_java_wrap.c /^static u_int32_t __db_lsn_get_offset(struct __db_l/ +__db_lsn_reset ../db/db_setlsn.c /^__db_lsn_reset(dbenv, name, passwd)$/ __db_maintinit ../dbinc/mutex.h /^#define __db_maintinit(a, b, c) __db_shreg_mainti/ __db_makeKey ../crypto/rijndael/rijndael-api-fst.c /^__db_makeKey(key, direction, keyLen, keyMaterial)$/ __db_makecopy ../db/db.c /^__db_makecopy(dbenv, src, dest)$/ __db_map_flags ../db/db_method.c /^__db_map_flags(dbp, inflagsp, outflagsp)$/ -__db_map_rmid ../xa/xa_map.c /^__db_map_rmid(rmid, env)$/ -__db_map_xid ../xa/xa_map.c /^__db_map_xid(env, xid, off)$/ +__db_map_rmid ../xa/xa_map.c /^__db_map_rmid(rmid, dbenv)$/ +__db_map_xid ../xa/xa_map.c /^__db_map_xid(dbenv, xid, off)$/ __db_master_open ../db/db.c /^__db_master_open(subdbp, txn, name, flags, mode, d/ __db_master_update ../db/db.c /^__db_master_update(mdbp, sdbp, txn, subdb, type, a/ -__db_meta ../db/db_pr.c /^__db_meta(dbp, dbmeta, fp, fn, flags)$/ +__db_meta ../db/db_pr.c /^__db_meta(dbp, dbmeta, fn, flags)$/ __db_meta2pgset ../db/db_vrfy.c /^__db_meta2pgset(dbp, vdp, pgno, flags, pgset)$/ __db_meta_setup ../db/db_open.c /^__db_meta_setup(dbenv, dbp, name, meta, oflags, do/ __db_metaswap ../db/db_conv.c /^__db_metaswap(pg)$/ __db_mi_env ../env/env_method.c /^__db_mi_env(dbenv, name)$/ __db_mi_open ../env/env_method.c /^__db_mi_open(dbenv, name, after)$/ __db_moff ../db/db_overflow.c /^__db_moff(dbp, dbt, pgno, tlen, cmpfunc, cmpp)$/ -__db_mpoolfile_get_flags ../libdb_java/db_java_wrap.c /^static u_int32_t __db_mpoolfile_get_flags(struct _/ -__db_mpoolfile_get_maxsize ../libdb_java/db_java_wrap.c /^static jlong __db_mpoolfile_get_maxsize(struct __d/ -__db_mpoolfile_get_priority ../libdb_java/db_java_wrap.c /^static DB_CACHE_PRIORITY __db_mpoolfile_get_priori/ -__db_mpoolfile_set_flags ../libdb_java/db_java_wrap.c /^static db_ret_t __db_mpoolfile_set_flags(struct __/ -__db_mpoolfile_set_maxsize ../libdb_java/db_java_wrap.c /^static db_ret_t __db_mpoolfile_set_maxsize(struct / -__db_mpoolfile_set_priority ../libdb_java/db_java_wrap.c /^static db_ret_t __db_mpoolfile_set_priority(struct/ +__db_msg ../common/db_err.c /^__db_msg(const DB_ENV *dbenv, const char *fmt, .../ +__db_msgadd ../common/db_err.c /^__db_msgadd(DB_ENV *dbenv, DB_MSGBUF *mbp, const c/ +__db_msgcall ../common/db_err.c /^__db_msgcall(dbenv, fmt, ap)$/ +__db_msgfile ../common/db_err.c /^__db_msgfile(dbenv, fmt, ap)$/ __db_mutex_alloc_int ../mutex/mutex.c /^__db_mutex_alloc_int(dbenv, infop, storep)$/ __db_mutex_destroy ../dbinc/mutex.h /^#define __db_mutex_destroy(a) __db_pthread_mutex_/ __db_mutex_free ../mutex/mutex.c /^__db_mutex_free(dbenv, infop, mutexp)$/ @@ -2383,9 +2585,8 @@ __db_ndbm_store ../dbm/dbm.c /^__db_ndbm_store(dbm, key, data, flags)$/ __db_new ../db/db_meta.c /^__db_new(dbc, type, pagepp)$/ __db_new_file ../db/db_open.c /^__db_new_file(dbp, txn, fhp, name)$/ __db_no_open ../dbm/dbm.c /^__db_no_open()$/ -__db_noop_getpgnos ../db/db_auto.c /^__db_noop_getpgnos(dbenv, rec, lsnp, notused1, sum/ __db_noop_log ../db/db_auto.c /^__db_noop_log(dbp, txnid, ret_lsnp, flags, pgno, p/ -__db_noop_print ../db/db_auto.c /^__db_noop_print(dbenv, dbtp, lsnp, notused2, notus/ +__db_noop_print ../db/db_autop.c /^__db_noop_print(dbenv, dbtp, lsnp, notused2, notus/ __db_noop_read ../db/db_auto.c /^__db_noop_read(dbenv, recbuf, argpp)$/ __db_noop_recover ../db/db_rec.c /^__db_noop_recover(dbenv, dbtp, lsnp, op, info)$/ __db_nosystemmem ../os/os_map.c /^__db_nosystemmem(dbenv)$/ @@ -2395,13 +2596,12 @@ __db_omode ../os/os_oflags.c /^__db_omode(perm)$/ __db_open ../db/db_open.c /^__db_open(dbp, txn, fname, dname, type, flags, mod/ __db_open_arg ../db/db_iface.c /^__db_open_arg(dbp, txn, fname, dname, type, flags)/ __db_open_pp ../db/db_iface.c /^__db_open_pp(dbp, txn, fname, dname, type, flags, / -__db_open_proc ../rpc_server/c/db_server_proc.c /^__db_open_proc(dbpcl_id, txnpcl_id, name,$/ +__db_open_proc ../rpc_server/c/db_server_proc.c /^__db_open_proc(dbpcl_id, txnpcl_id, name, subdb, t/ __db_overwrite ../env/env_file.c /^__db_overwrite(dbenv, path)$/ __db_overwrite_pass ../env/env_file.c /^__db_overwrite_pass(dbenv, path, fhp, mbytes, byte/ __db_ovref ../db/db_overflow.c /^__db_ovref(dbc, pgno, adjust)$/ -__db_ovref_getpgnos ../db/db_auto.c /^__db_ovref_getpgnos(dbenv, rec, lsnp, notused1, su/ __db_ovref_log ../db/db_auto.c /^__db_ovref_log(dbp, txnid, ret_lsnp, flags, pgno, / -__db_ovref_print ../db/db_auto.c /^__db_ovref_print(dbenv, dbtp, lsnp, notused2, notu/ +__db_ovref_print ../db/db_autop.c /^__db_ovref_print(dbenv, dbtp, lsnp, notused2, notu/ __db_ovref_read ../db/db_auto.c /^__db_ovref_read(dbenv, recbuf, argpp)$/ __db_ovref_recover ../db/db_rec.c /^__db_ovref_recover(dbenv, dbtp, lsnp, op, info)$/ __db_padDecrypt ../crypto/rijndael/rijndael-api-fst.c /^__db_padDecrypt(cipher, key, input, inputOctets, o/ @@ -2413,55 +2613,61 @@ __db_panic ../common/db_err.c /^__db_panic(dbenv, errval)$/ __db_panic_msg ../common/db_err.c /^__db_panic_msg(dbenv)$/ __db_parse ../env/env_open.c /^__db_parse(dbenv, s)$/ __db_partsize ../db/db_cam.c /^__db_partsize(nbytes, data)$/ -__db_pg_alloc_getpgnos ../db/db_auto.c /^__db_pg_alloc_getpgnos(dbenv, rec, lsnp, notused1,/ __db_pg_alloc_log ../db/db_auto.c /^__db_pg_alloc_log(dbp, txnid, ret_lsnp, flags, met/ -__db_pg_alloc_print ../db/db_auto.c /^__db_pg_alloc_print(dbenv, dbtp, lsnp, notused2, n/ +__db_pg_alloc_print ../db/db_autop.c /^__db_pg_alloc_print(dbenv, dbtp, lsnp, notused2, n/ __db_pg_alloc_read ../db/db_auto.c /^__db_pg_alloc_read(dbenv, recbuf, argpp)$/ __db_pg_alloc_recover ../db/db_rec.c /^__db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)/ -__db_pg_free_getpgnos ../db/db_auto.c /^__db_pg_free_getpgnos(dbenv, rec, lsnp, notused1, / __db_pg_free_log ../db/db_auto.c /^__db_pg_free_log(dbp, txnid, ret_lsnp, flags, pgno/ -__db_pg_free_print ../db/db_auto.c /^__db_pg_free_print(dbenv, dbtp, lsnp, notused2, no/ +__db_pg_free_print ../db/db_autop.c /^__db_pg_free_print(dbenv, dbtp, lsnp, notused2, no/ __db_pg_free_read ../db/db_auto.c /^__db_pg_free_read(dbenv, recbuf, argpp)$/ __db_pg_free_recover ../db/db_rec.c /^__db_pg_free_recover(dbenv, dbtp, lsnp, op, info)$/ __db_pg_free_recover_int ../db/db_rec.c /^__db_pg_free_recover_int(dbenv, argp, file_dbp, ls/ -__db_pg_freedata_getpgnos ../db/db_auto.c /^__db_pg_freedata_getpgnos(dbenv, rec, lsnp, notuse/ __db_pg_freedata_log ../db/db_auto.c /^__db_pg_freedata_log(dbp, txnid, ret_lsnp, flags, / -__db_pg_freedata_print ../db/db_auto.c /^__db_pg_freedata_print(dbenv, dbtp, lsnp, notused2/ +__db_pg_freedata_print ../db/db_autop.c /^__db_pg_freedata_print(dbenv, dbtp, lsnp, notused2/ __db_pg_freedata_read ../db/db_auto.c /^__db_pg_freedata_read(dbenv, recbuf, argpp)$/ __db_pg_freedata_recover ../db/db_rec.c /^__db_pg_freedata_recover(dbenv, dbtp, lsnp, op, in/ -__db_pg_new_getpgnos ../db/db_auto.c /^__db_pg_new_getpgnos(dbenv, rec, lsnp, notused1, s/ +__db_pg_init_log ../db/db_auto.c /^__db_pg_init_log(dbp, txnid, ret_lsnp, flags, pgno/ +__db_pg_init_print ../db/db_autop.c /^__db_pg_init_print(dbenv, dbtp, lsnp, notused2, no/ +__db_pg_init_read ../db/db_auto.c /^__db_pg_init_read(dbenv, recbuf, argpp)$/ +__db_pg_init_recover ../db/db_rec.c /^__db_pg_init_recover(dbenv, dbtp, lsnp, op, info)$/ __db_pg_new_log ../db/db_auto.c /^__db_pg_new_log(dbp, txnid, ret_lsnp, flags, pgno,/ -__db_pg_new_print ../db/db_auto.c /^__db_pg_new_print(dbenv, dbtp, lsnp, notused2, not/ +__db_pg_new_print ../db/db_autop.c /^__db_pg_new_print(dbenv, dbtp, lsnp, notused2, not/ __db_pg_new_read ../db/db_auto.c /^__db_pg_new_read(dbenv, recbuf, argpp)$/ __db_pg_new_recover ../db/db_rec.c /^__db_pg_new_recover(dbenv, dbtp, lsnp, op, info)$/ -__db_pg_prepare_getpgnos ../db/db_auto.c /^__db_pg_prepare_getpgnos(dbenv, rec, lsnp, notused/ __db_pg_prepare_log ../db/db_auto.c /^__db_pg_prepare_log(dbp, txnid, ret_lsnp, flags, p/ -__db_pg_prepare_print ../db/db_auto.c /^__db_pg_prepare_print(dbenv, dbtp, lsnp, notused2,/ +__db_pg_prepare_print ../db/db_autop.c /^__db_pg_prepare_print(dbenv, dbtp, lsnp, notused2,/ __db_pg_prepare_read ../db/db_auto.c /^__db_pg_prepare_read(dbenv, recbuf, argpp)$/ __db_pg_prepare_recover ../db/db_rec.c /^__db_pg_prepare_recover(dbenv, dbtp, lsnp, op, inf/ __db_pgerr ../common/db_err.c /^__db_pgerr(dbp, pgno, errval)$/ __db_pget ../db/db_iface.c /^__db_pget(dbp, txn, skey, pkey, data, flags)$/ -__db_pget__SWIG_1 ../libdb_java/db_java_wrap.c /^static int __db_pget__SWIG_1(struct __db *self,DB_/ __db_pget_arg ../db/db_iface.c /^__db_pget_arg(dbp, pkey, flags)$/ __db_pget_pp ../db/db_iface.c /^__db_pget_pp(dbp, txn, skey, pkey, data, flags)$/ -__db_pget_proc ../rpc_server/c/db_server_proc.c /^__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,$/ +__db_pget_proc ../rpc_server/c/db_server_proc.c /^__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen, skey/ __db_pgfmt ../common/db_err.c /^__db_pgfmt(dbenv, pgno)$/ __db_pgin ../db/db_conv.c /^__db_pgin(dbenv, pg, pp, cookie)$/ __db_pgout ../db/db_conv.c /^__db_pgout(dbenv, pg, pp, cookie)$/ __db_pitem ../db/db_dup.c /^__db_pitem(dbc, pagep, indx, nbytes, hdr, data)$/ __db_poff ../db/db_overflow.c /^__db_poff(dbc, dbt, pgnop)$/ -__db_pr ../db/db_pr.c /^__db_pr(p, len, fp)$/ +__db_pr ../db/db_pr.c /^__db_pr(dbenv, mbp, p, len)$/ __db_pr_callback ../db/db_pr.c /^__db_pr_callback(handle, str_arg)$/ -__db_prdb ../db/db_pr.c /^__db_prdb(dbp, fp)$/ +__db_prdb ../db/db_pr.c /^__db_prdb(dbp, flags)$/ __db_prdbt ../db/db_pr.c /^__db_prdbt(dbtp, checkprint, prefix, handle, callb/ -__db_prflags ../db/db_pr.c /^__db_prflags(flags, fn, vfp)$/ +__db_prflags ../db/db_pr.c /^__db_prflags(dbenv, mbp, flags, fn, prefix, suffix/ __db_prfooter ../db/db_pr.c /^__db_prfooter(handle, callback)$/ __db_prheader ../db/db_pr.c /^__db_prheader(dbp, subname, pflag, keyflag, handle/ -__db_prnpage ../db/db_pr.c /^__db_prnpage(dbp, pgno, fp)$/ -__db_proff ../db/db_pr.c /^__db_proff(vp, fp)$/ -__db_prpage ../db/db_pr.c /^__db_prpage(dbp, h, fp, flags)$/ -__db_prqueue ../qam/qam_method.c /^__db_prqueue(dbp, fp, flags)$/ -__db_prtree ../db/db_pr.c /^__db_prtree(dbp, fp, flags)$/ +__db_print_all ../db/db_stati.c /^__db_print_all(dbp, flags)$/ +__db_print_citem ../db/db_stati.c /^int __db_print_citem(dbc)$/ +__db_print_cursor ../db/db_stati.c /^__db_print_cursor(dbp)$/ +__db_print_fh ../env/env_stat.c /^__db_print_fh(dbenv, fh, flags)$/ +__db_print_fileid ../env/env_stat.c /^__db_print_fileid(dbenv, id, suffix)$/ +__db_print_mutex ../env/env_stat.c /^__db_print_mutex(dbenv, mbp, mutex, suffix, flags)/ +__db_print_reginfo ../env/env_stat.c /^__db_print_reginfo(dbenv, infop, s)$/ +__db_print_stats ../db/db_stati.c /^__db_print_stats(dbp, flags)$/ +__db_prnpage ../db/db_pr.c /^__db_prnpage(dbp, pgno)$/ +__db_proff ../db/db_pr.c /^__db_proff(dbenv, mbp, vp)$/ +__db_prpage ../db/db_pr.c /^__db_prpage(dbp, h, flags)$/ +__db_prqueue ../qam/qam_method.c /^__db_prqueue(dbp, flags)$/ +__db_prtree ../db/db_pr.c /^__db_prtree(dbp, flags)$/ __db_pthread_mutex_destroy ../mutex/mut_pthread.c /^__db_pthread_mutex_destroy(mutexp)$/ __db_pthread_mutex_init ../mutex/mut_pthread.c /^__db_pthread_mutex_init(dbenv, mutexp, flags)$/ __db_pthread_mutex_lock ../mutex/mut_pthread.c /^__db_pthread_mutex_lock(dbenv, mutexp)$/ @@ -2469,8 +2675,8 @@ __db_pthread_mutex_unlock ../mutex/mut_pthread.c /^__db_pthread_mutex_unlock(dbe __db_put ../db/db_am.c /^__db_put(dbp, txn, key, data, flags)$/ __db_put_arg ../db/db_iface.c /^__db_put_arg(dbp, key, data, flags)$/ __db_put_pp ../db/db_iface.c /^__db_put_pp(dbp, txn, key, data, flags)$/ -__db_put_proc ../rpc_server/c/db_server_proc.c /^__db_put_proc(dbpcl_id, txnpcl_id, keydlen,$/ -__db_qmeta ../db/db_pr.c /^__db_qmeta(dbp, fp, h, flags)$/ +__db_put_proc ../rpc_server/c/db_server_proc.c /^__db_put_proc(dbpcl_id, txnpcl_id, keydlen, keydof/ +__db_qmeta ../db/db_pr.c /^__db_qmeta(dbp, h, flags)$/ __db_r_attach ../env/env_region.c /^__db_r_attach(dbenv, infop, size)$/ __db_r_detach ../env/env_region.c /^__db_r_detach(dbenv, infop, destroy)$/ __db_rdonly ../db/db_iface.c /^__db_rdonly(dbenv, name)$/ @@ -2482,22 +2688,15 @@ __db_rec_toobig ../common/db_err.c /^__db_rec_toobig(dbenv, data_len, fixed_rec_ __db_reclaim_callback ../db/db_reclaim.c /^__db_reclaim_callback(dbp, p, cookie, putp)$/ __db_refresh ../db/db.c /^__db_refresh(dbp, txn, flags, deferred_closep)$/ __db_region_destroy ../env/env_region.c /^__db_region_destroy(dbenv, infop)$/ -__db_relink ../db/db_dup.c /^__db_relink(dbc, add_rem, pagep, new_next, needloc/ -__db_relink_getpgnos ../db/db_auto.c /^__db_relink_getpgnos(dbenv, rec, lsnp, notused1, s/ -__db_relink_log ../db/db_auto.c /^__db_relink_log(dbp, txnid, ret_lsnp, flags,$/ -__db_relink_print ../db/db_auto.c /^__db_relink_print(dbenv, dbtp, lsnp, notused2, not/ -__db_relink_read ../db/db_auto.c /^__db_relink_read(dbenv, recbuf, argpp)$/ -__db_relink_recover ../db/db_rec.c /^__db_relink_recover(dbenv, dbtp, lsnp, op, info)$/ __db_remove ../db/db_remove.c /^__db_remove(dbp, txn, name, subdb, flags)$/ __db_remove_int ../db/db_remove.c /^__db_remove_int(dbp, txn, name, subdb, flags)$/ __db_remove_pp ../db/db_remove.c /^__db_remove_pp(dbp, name, subdb, flags)$/ -__db_remove_proc ../rpc_server/c/db_server_proc.c /^__db_remove_proc(dbpcl_id, name, subdb,$/ +__db_remove_proc ../rpc_server/c/db_server_proc.c /^__db_remove_proc(dbpcl_id, name, subdb, flags, rep/ __db_rename ../db/db_rename.c /^__db_rename(dbp, txn, name, subdb, newname)$/ __db_rename_int ../db/db_rename.c /^__db_rename_int(dbp, txn, name, subdb, newname)$/ __db_rename_pp ../db/db_rename.c /^__db_rename_pp(dbp, name, subdb, newname, flags)$/ -__db_rename_proc ../rpc_server/c/db_server_proc.c /^__db_rename_proc(dbpcl_id, name, subdb,$/ -__db_rep_enter ../rep/rep_util.c /^__db_rep_enter(dbp, checkgen, return_now)$/ -__db_rep_exit ../rep/rep_util.c /^__db_rep_exit(dbenv)$/ +__db_rename_proc ../rpc_server/c/db_server_proc.c /^__db_rename_proc(dbpcl_id, name, subdb, newname, f/ +__db_rep_enter ../rep/rep_util.c /^__db_rep_enter(dbp, checkgen, checklock, return_no/ __db_ret ../db/db_ret.c /^__db_ret(dbp, h, indx, dbt, memp, memsize)$/ __db_retcopy ../db/db_ret.c /^__db_retcopy(dbenv, dbt, data, len, memp, memsize)/ __db_rijndaelDecrypt ../crypto/rijndael/rijndael-alg-fst.c /^__db_rijndaelDecrypt(rk, Nr, ct, pt)$/ @@ -2506,7 +2705,7 @@ __db_rijndaelEncrypt ../crypto/rijndael/rijndael-alg-fst.c /^__db_rijndaelEncryp __db_rijndaelEncryptRound ../crypto/rijndael/rijndael-alg-fst.c /^__db_rijndaelEncryptRound(rk, Nr, pt, ct)$/ __db_rijndaelKeySetupDec ../crypto/rijndael/rijndael-alg-fst.c /^__db_rijndaelKeySetupDec(rk, cipherKey, keyBits)$/ __db_rijndaelKeySetupEnc ../crypto/rijndael/rijndael-alg-fst.c /^__db_rijndaelKeySetupEnc(rk, cipherKey, keyBits)$/ -__db_rmid_to_env ../xa/xa_map.c /^__db_rmid_to_env(rmid, envp)$/ +__db_rmid_to_env ../xa/xa_map.c /^__db_rmid_to_env(rmid, dbenvp)$/ __db_rpath ../os/os_rpath.c /^__db_rpath(path)$/ __db_s_done ../db/db_cam.c /^__db_s_done(sdbp)$/ __db_s_first ../db/db_cam.c /^__db_s_first(pdbp)$/ @@ -2524,14 +2723,11 @@ __db_salvage_subdbpg ../db/db_vrfy.c /^__db_salvage_subdbpg(dbp, vdp, master, ha __db_salvage_subdbs ../db/db_vrfy.c /^__db_salvage_subdbs(dbp, vdp, handle, callback, fl/ __db_salvage_unknowns ../db/db_vrfy.c /^__db_salvage_unknowns(dbp, vdp, handle, callback, / __db_secondary_close ../db/db_am.c /^__db_secondary_close(sdbp, flags)$/ +__db_secondary_close_pp ../db/db_iface.c /^__db_secondary_close_pp(dbp, flags)$/ __db_secondary_corrupt ../db/db_join.c /^__db_secondary_corrupt(dbp)$/ __db_secondary_get ../db/db_am.c /^__db_secondary_get(sdbp, txn, skey, data, flags)$/ __db_set_alloc ../db/db_method.c /^__db_set_alloc(dbp, mal_func, real_func, free_func/ __db_set_append_recno ../db/db_method.c /^__db_set_append_recno(dbp, func)$/ -__db_set_bt_compare ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_bt_compare(struct __db *s/ -__db_set_bt_maxkey ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_bt_maxkey(struct __db *se/ -__db_set_bt_minkey ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_bt_minkey(struct __db *se/ -__db_set_bt_prefix ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_bt_prefix(struct __db *se/ __db_set_cachesize ../db/db_method.c /^__db_set_cachesize(dbp, cache_gbytes, cache_bytes,/ __db_set_dup_compare ../db/db_method.c /^__db_set_dup_compare(dbp, func)$/ __db_set_encrypt ../db/db_method.c /^__db_set_encrypt(dbp, passwd, flags)$/ @@ -2540,35 +2736,32 @@ __db_set_errfile ../db/db_method.c /^__db_set_errfile(dbp, errfile)$/ __db_set_errpfx ../db/db_method.c /^__db_set_errpfx(dbp, errpfx)$/ __db_set_feedback ../db/db_method.c /^__db_set_feedback(dbp, feedback)$/ __db_set_flags ../db/db_method.c /^__db_set_flags(dbp, flags)$/ -__db_set_h_ffactor ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_h_ffactor(struct __db *se/ -__db_set_h_hash ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_h_hash(struct __db *self,/ -__db_set_h_nelem ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_h_nelem(struct __db *self/ __db_set_lorder ../db/db_method.c /^__db_set_lorder(dbp, db_lorder)$/ +__db_set_msgcall ../db/db_method.c /^__db_set_msgcall(dbp, msgcall)$/ +__db_set_msgfile ../db/db_method.c /^__db_set_msgfile(dbp, msgfile)$/ __db_set_pagesize ../db/db_method.c /^__db_set_pagesize(dbp, db_pagesize)$/ __db_set_paniccall ../db/db_method.c /^__db_set_paniccall(dbp, paniccall)$/ -__db_set_q_extentsize ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_q_extentsize(struct __db / -__db_set_re_delim ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_re_delim(struct __db *sel/ -__db_set_re_len ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_re_len(struct __db *self,/ -__db_set_re_pad ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_re_pad(struct __db *self,/ -__db_set_re_source ../libdb_java/db_java_wrap.c /^static db_ret_t __db_set_re_source(struct __db *se/ __db_sgenrand ../crypto/mersenne/mt19937db.c /^__db_sgenrand(seed, mt, mtip)$/ -__db_shalloc ../env/db_salloc.c /^__db_shalloc(p, len, align, retp)$/ -__db_shalloc_dump ../env/db_salloc.c /^__db_shalloc_dump(addr, fp)$/ -__db_shalloc_free ../env/db_salloc.c /^__db_shalloc_free(regionp, ptr)$/ -__db_shalloc_init ../env/db_salloc.c /^__db_shalloc_init(area, size)$/ +__db_shalloc ../env/db_salloc.c /^__db_shalloc(infop, len, align, retp)$/ +__db_shalloc_free ../env/db_salloc.c /^__db_shalloc_free(infop, ptr)$/ +__db_shalloc_init ../env/db_salloc.c /^__db_shalloc_init(infop, size)$/ __db_shalloc_size ../env/db_salloc.c /^__db_shalloc_size(len, align)$/ +__db_shalloc_sizeof ../env/db_salloc.c /^__db_shalloc_sizeof(ptr)$/ __db_shlocks_clear ../dbinc/mutex.h /^#define __db_shlocks_clear(a, b, c) __db_shreg_loc/ __db_shlocks_destroy ../dbinc/mutex.h /^#define __db_shlocks_destroy(a, b) __db_shreg_lock/ +__db_shm_mode ../os/os_oflags.c /^__db_shm_mode(dbenv)$/ __db_shreg_locks_clear ../mutex/mutex.c /^__db_shreg_locks_clear(mutexp, infop, rp)$/ __db_shreg_locks_destroy ../mutex/mutex.c /^__db_shreg_locks_destroy(infop, rp)$/ __db_shreg_locks_record ../mutex/mutex.c /^__db_shreg_locks_record(dbenv, mutexp, infop, rp)$/ __db_shreg_maintinit ../mutex/mutex.c /^__db_shreg_maintinit(infop, addr, size)$/ __db_shreg_mutex_init ../mutex/mutex.c /^__db_shreg_mutex_init(dbenv, mutexp, offset, flags/ -__db_shsizeof ../env/db_salloc.c /^__db_shsizeof(ptr)$/ -__db_stat ../db/db_iface.c /^__db_stat(dbp, spp, flags)$/ -__db_stat_arg ../db/db_iface.c /^__db_stat_arg(dbp, flags)$/ -__db_stat_pp ../db/db_iface.c /^__db_stat_pp(dbp, spp, flags)$/ -__db_stat_proc ../rpc_server/c/db_server_proc.c /^__db_stat_proc(dbpcl_id, flags, replyp, freep)$/ +__db_stat ../db/db_stati.c /^__db_stat(dbp, txn, spp, flags)$/ +__db_stat_arg ../db/db_stati.c /^__db_stat_arg(dbp, flags)$/ +__db_stat_not_built ../env/env_stat.c /^__db_stat_not_built(dbenv)$/ +__db_stat_pp ../db/db_stati.c /^__db_stat_pp(dbp, txn, spp, flags)$/ +__db_stat_print ../db/db_stati.c /^__db_stat_print(dbp, flags)$/ +__db_stat_print_pp ../db/db_stati.c /^__db_stat_print_pp(dbp, flags)$/ +__db_stat_proc ../rpc_server/c/db_server_proc.c /^__db_stat_proc(dbpcl_id, txnpcl_id, flags, replyp,/ __db_strsep ../common/util_arg.c /^__db_strsep(stringp, delim)$/ __db_subdb_remove ../db/db_remove.c /^__db_subdb_remove(dbp, txn, name, subdb)$/ __db_subdb_rename ../db/db_rename.c /^__db_subdb_rename(dbp, txn, name, subdb, newname)$/ @@ -2584,29 +2777,23 @@ __db_testcopy ../db/db.c /^__db_testcopy(dbenv, dbp, name)$/ __db_testdocopy ../db/db.c /^__db_testdocopy(dbenv, name)$/ __db_tmp_open ../env/env_open.c /^__db_tmp_open(dbenv, tmp_oflags, path, fhpp)$/ __db_traverse_big ../db/db_reclaim.c /^__db_traverse_big(dbp, pgno, callback, cookie)$/ -__db_truncate ../db/db_truncate.c /^__db_truncate(dbp, txn, countp, flags)$/ +__db_truncate ../db/db_truncate.c /^__db_truncate(dbp, txn, countp)$/ __db_truncate_callback ../db/db_reclaim.c /^__db_truncate_callback(dbp, p, cookie, putp)$/ __db_truncate_pp ../db/db_truncate.c /^__db_truncate_pp(dbp, txn, countp, flags)$/ -__db_truncate_proc ../rpc_server/c/db_server_proc.c /^__db_truncate_proc(dbpcl_id, txnpcl_id,$/ -__db_txn_abort ../libdb_java/db_java_wrap.c /^static db_ret_t __db_txn_abort(struct __db_txn *se/ -__db_txn_abort_4002 ../rpc_client/db_server_clnt.c /^__db_txn_abort_4002(argp, clnt)$/ +__db_truncate_proc ../rpc_server/c/db_server_proc.c /^__db_truncate_proc(dbpcl_id, txnpcl_id, flags, rep/ +__db_txn_abort_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_txn_abort_4003__SVCSUFFIX__(msg, req)$/ __db_txn_auto_init ../db/db_iface.c /^__db_txn_auto_init(dbenv, txnidp)$/ __db_txn_auto_resolve ../db/db_iface.c /^__db_txn_auto_resolve(dbenv, txn, nosync, ret)$/ -__db_txn_begin_4002 ../rpc_client/db_server_clnt.c /^__db_txn_begin_4002(argp, clnt)$/ -__db_txn_commit ../libdb_java/db_java_wrap.c /^static db_ret_t __db_txn_commit(struct __db_txn *s/ -__db_txn_commit_4002 ../rpc_client/db_server_clnt.c /^__db_txn_commit_4002(argp, clnt)$/ -__db_txn_discard ../libdb_java/db_java_wrap.c /^static db_ret_t __db_txn_discard(struct __db_txn */ -__db_txn_discard_4002 ../rpc_client/db_server_clnt.c /^__db_txn_discard_4002(argp, clnt)$/ -__db_txn_id ../libdb_java/db_java_wrap.c /^static u_int32_t __db_txn_id(struct __db_txn *self/ -__db_txn_prepare ../libdb_java/db_java_wrap.c /^static db_ret_t __db_txn_prepare(struct __db_txn */ -__db_txn_prepare_4002 ../rpc_client/db_server_clnt.c /^__db_txn_prepare_4002(argp, clnt)$/ -__db_txn_recover_4002 ../rpc_client/db_server_clnt.c /^__db_txn_recover_4002(argp, clnt)$/ -__db_txn_set_timeout ../libdb_java/db_java_wrap.c /^static db_ret_t __db_txn_set_timeout(struct __db_t/ +__db_txn_begin_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_txn_begin_4003__SVCSUFFIX__(msg, req)$/ +__db_txn_commit_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_txn_commit_4003__SVCSUFFIX__(msg, req)$/ +__db_txn_discard_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_txn_discard_4003__SVCSUFFIX__(msg, req)$/ +__db_txn_prepare_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_txn_prepare_4003__SVCSUFFIX__(msg, req)$/ +__db_txn_recover_4003__SVCSUFFIX__ ../rpc_server/c/gen_db_server.c /^__db_txn_recover_4003__SVCSUFFIX__(msg, req)$/ __db_txnlist_add ../db/db_dispatch.c /^__db_txnlist_add(dbenv, listp, txnid, status, lsn)/ __db_txnlist_ckp ../db/db_dispatch.c /^__db_txnlist_ckp(dbenv, listp, ckp_lsn)$/ __db_txnlist_end ../db/db_dispatch.c /^__db_txnlist_end(dbenv, listp)$/ -__db_txnlist_find ../db/db_dispatch.c /^__db_txnlist_find(dbenv, listp, txnid)$/ -__db_txnlist_find_internal ../db/db_dispatch.c /^__db_txnlist_find_internal(dbenv, listp, type, txn/ +__db_txnlist_find ../db/db_dispatch.c /^__db_txnlist_find(dbenv, listp, txnid, statusp)$/ +__db_txnlist_find_internal ../db/db_dispatch.c /^__db_txnlist_find_internal(dbenv,$/ __db_txnlist_gen ../db/db_dispatch.c /^__db_txnlist_gen(dbenv, listp, incr, min, max)$/ __db_txnlist_init ../db/db_dispatch.c /^__db_txnlist_init(dbenv, low_txn, hi_txn, trunc_ls/ __db_txnlist_lsnadd ../db/db_dispatch.c /^__db_txnlist_lsnadd(dbenv, listp, lsnp, flags)$/ @@ -2618,12 +2805,12 @@ __db_txnlist_update ../db/db_dispatch.c /^__db_txnlist_update(dbenv, listp, txni __db_unknown_flag ../common/db_err.c /^__db_unknown_flag(dbenv, routine, flag)$/ __db_unknown_type ../common/db_err.c /^__db_unknown_type(dbenv, routine, type)$/ __db_unmap_rmid ../xa/xa_map.c /^__db_unmap_rmid(rmid)$/ -__db_unmap_xid ../xa/xa_map.c /^__db_unmap_xid(env, xid, off)$/ +__db_unmap_xid ../xa/xa_map.c /^__db_unmap_xid(dbenv, xid, off)$/ __db_up_ovref ../db/db_upg_opd.c /^__db_up_ovref(dbp, fhp, pgno)$/ __db_upgrade ../db/db_upg.c /^__db_upgrade(dbp, fname, flags)$/ __db_upgrade_pp ../db/db_upg.c /^__db_upgrade_pp(dbp, fname, flags)$/ __db_util_arg ../common/util_arg.c /^__db_util_arg(arg0, str, argcp, argvp)$/ -__db_util_cache ../common/util_cache.c /^__db_util_cache(dbenv, dbp, cachep, resizep)$/ +__db_util_cache ../common/util_cache.c /^__db_util_cache(dbp, cachep, resizep)$/ __db_util_interrupted ../common/util_sig.c /^__db_util_interrupted()$/ __db_util_logset ../common/util_log.c /^__db_util_logset(progname, fname)$/ __db_util_siginit ../common/util_sig.c /^__db_util_siginit()$/ @@ -2657,6 +2844,7 @@ __db_vrfy_pgset ../db/db_vrfyutil.c /^__db_vrfy_pgset(dbenv, pgsize, dbpp)$/ __db_vrfy_pgset_get ../db/db_vrfyutil.c /^__db_vrfy_pgset_get(dbp, pgno, valp)$/ __db_vrfy_pgset_inc ../db/db_vrfyutil.c /^__db_vrfy_pgset_inc(dbp, pgno)$/ __db_vrfy_pgset_next ../db/db_vrfyutil.c /^__db_vrfy_pgset_next(dbc, pgnop)$/ +__db_vrfy_prdbt ../db/db_vrfyutil.c /^__db_vrfy_prdbt(dbtp, checkprint, prefix, handle, / __db_vrfy_putpageinfo ../db/db_vrfyutil.c /^__db_vrfy_putpageinfo(dbenv, vdp, pip)$/ __db_vrfy_struct_feedback ../db/db_vrfy.c /^__db_vrfy_struct_feedback(dbp, vdp)$/ __db_vrfy_structure ../db/db_vrfy.c /^__db_vrfy_structure(dbp, vdp, dbname, meta_pgno, f/ @@ -2667,33 +2855,26 @@ __db_win32_mutex_init ../mutex/mut_win32.c /^__db_win32_mutex_init(dbenv, mutexp __db_win32_mutex_lock ../mutex/mut_win32.c /^__db_win32_mutex_lock(dbenv, mutexp)$/ __db_win32_mutex_unlock ../mutex/mut_win32.c /^__db_win32_mutex_unlock(dbenv, mutexp)$/ __db_wrlock_err ../db/db_cam.c /^__db_wrlock_err(dbenv)$/ -__db_xa_close ../xa/xa.c /^__db_xa_close(xa_info, rmid, flags)$/ -__db_xa_commit ../xa/xa.c /^__db_xa_commit(xid, rmid, flags)$/ +__db_xa_close ../xa/xa.c /^__db_xa_close(xa_info, rmid, arg_flags)$/ +__db_xa_commit ../xa/xa.c /^__db_xa_commit(xid, rmid, arg_flags)$/ __db_xa_complete ../xa/xa.c /^__db_xa_complete(handle, retval, rmid, flags)$/ __db_xa_create ../xa/xa_db.c /^__db_xa_create(dbp)$/ __db_xa_end ../xa/xa.c /^__db_xa_end(xid, rmid, flags)$/ -__db_xa_forget ../xa/xa.c /^__db_xa_forget(xid, rmid, flags)$/ -__db_xa_open ../xa/xa.c /^__db_xa_open(xa_info, rmid, flags)$/ -__db_xa_prepare ../xa/xa.c /^__db_xa_prepare(xid, rmid, flags)$/ +__db_xa_forget ../xa/xa.c /^__db_xa_forget(xid, rmid, arg_flags)$/ +__db_xa_open ../xa/xa.c /^__db_xa_open(xa_info, rmid, arg_flags)$/ +__db_xa_prepare ../xa/xa.c /^__db_xa_prepare(xid, rmid, arg_flags)$/ __db_xa_recover ../xa/xa.c /^__db_xa_recover(xids, count, rmid, flags)$/ -__db_xa_rollback ../xa/xa.c /^__db_xa_rollback(xid, rmid, flags)$/ -__db_xa_start ../xa/xa.c /^__db_xa_start(xid, rmid, flags)$/ +__db_xa_rollback ../xa/xa.c /^__db_xa_rollback(xid, rmid, arg_flags)$/ +__db_xa_start ../xa/xa.c /^__db_xa_start(xid, rmid, arg_flags)$/ __db_xid_to_txn ../xa/xa_map.c /^__db_xid_to_txn(dbenv, xid, offp)$/ -__dbc_close ../libdb_java/db_java_wrap.c /^static db_ret_t __dbc_close(struct __dbc *self){$/ __dbc_close_int ../rpc_server/c/db_server_util.c /^__dbc_close_int(dbc_ctp)$/ __dbc_close_proc ../rpc_server/c/db_server_proc.c /^__dbc_close_proc(dbccl_id, replyp)$/ -__dbc_count ../libdb_java/db_java_wrap.c /^static db_recno_t __dbc_count(struct __dbc *self,u/ __dbc_count_proc ../rpc_server/c/db_server_proc.c /^__dbc_count_proc(dbccl_id, flags, replyp)$/ -__dbc_del ../libdb_java/db_java_wrap.c /^static int __dbc_del(struct __dbc *self,u_int32_t / __dbc_del_proc ../rpc_server/c/db_server_proc.c /^__dbc_del_proc(dbccl_id, flags, replyp)$/ -__dbc_dup ../libdb_java/db_java_wrap.c /^static DBC *__dbc_dup(struct __dbc *self,u_int32_t/ __dbc_dup_proc ../rpc_server/c/db_server_proc.c /^__dbc_dup_proc(dbccl_id, flags, replyp)$/ -__dbc_get__SWIG_0 ../libdb_java/db_java_wrap.c /^static int __dbc_get__SWIG_0(struct __dbc *self,DB/ -__dbc_get_proc ../rpc_server/c/db_server_proc.c /^__dbc_get_proc(dbccl_id, keydlen, keydoff,$/ -__dbc_pget__SWIG_1 ../libdb_java/db_java_wrap.c /^static int __dbc_pget__SWIG_1(struct __dbc *self,D/ -__dbc_pget_proc ../rpc_server/c/db_server_proc.c /^__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,$/ -__dbc_put ../libdb_java/db_java_wrap.c /^static int __dbc_put(struct __dbc *self,DBT *key,D/ -__dbc_put_proc ../rpc_server/c/db_server_proc.c /^__dbc_put_proc(dbccl_id, keydlen, keydoff,$/ +__dbc_get_proc ../rpc_server/c/db_server_proc.c /^__dbc_get_proc(dbccl_id, keydlen, keydoff, keyulen/ +__dbc_pget_proc ../rpc_server/c/db_server_proc.c /^__dbc_pget_proc(dbccl_id, skeydlen, skeydoff, skey/ +__dbc_put_proc ../rpc_server/c/db_server_proc.c /^__dbc_put_proc(dbccl_id, keydlen, keydoff, keyulen/ __dbcl_c_destroy ../rpc_client/client.c /^__dbcl_c_destroy(dbc)$/ __dbcl_c_refresh ../rpc_client/client.c /^__dbcl_c_refresh(dbc)$/ __dbcl_c_setup ../rpc_client/client.c /^__dbcl_c_setup(cl_id, dbp, dbcp)$/ @@ -2760,8 +2941,8 @@ __dbcl_db_remove_ret ../rpc_client/gen_client_ret.c /^__dbcl_db_remove_ret(dbp, __dbcl_db_rename ../rpc_client/gen_client.c /^__dbcl_db_rename(dbp, name, subdb, newname, flags)/ __dbcl_db_rename_ret ../rpc_client/gen_client_ret.c /^__dbcl_db_rename_ret(dbp, name, subdb, newname, fl/ __dbcl_db_set_append_recno ../rpc_client/gen_client.c /^__dbcl_db_set_append_recno(dbp, func0)$/ -__dbcl_db_stat ../rpc_client/gen_client.c /^__dbcl_db_stat(dbp, sp, flags)$/ -__dbcl_db_stat_ret ../rpc_client/gen_client_ret.c /^__dbcl_db_stat_ret(dbp, sp, flags, replyp)$/ +__dbcl_db_stat ../rpc_client/gen_client.c /^__dbcl_db_stat(dbp, txnp, sp, flags)$/ +__dbcl_db_stat_ret ../rpc_client/gen_client_ret.c /^__dbcl_db_stat_ret(dbp, txnp, sp, flags, replyp)$/ __dbcl_db_sync ../rpc_client/gen_client.c /^__dbcl_db_sync(dbp, flags)$/ __dbcl_db_truncate ../rpc_client/gen_client.c /^__dbcl_db_truncate(dbp, txnp, countp, flags)$/ __dbcl_db_truncate_ret ../rpc_client/gen_client_ret.c /^__dbcl_db_truncate_ret(dbp, txnp, countp, flags, r/ @@ -2814,7 +2995,8 @@ __dbcl_get_lk_detect ../rpc_client/gen_client.c /^__dbcl_get_lk_detect(dbenv, de __dbcl_get_lk_max_lockers ../rpc_client/gen_client.c /^__dbcl_get_lk_max_lockers(dbenv, maxp)$/ __dbcl_get_lk_max_locks ../rpc_client/gen_client.c /^__dbcl_get_lk_max_locks(dbenv, maxp)$/ __dbcl_get_lk_max_objects ../rpc_client/gen_client.c /^__dbcl_get_lk_max_objects(dbenv, maxp)$/ -__dbcl_get_mp_maxwrite ../rpc_client/gen_client.c /^__dbcl_get_mp_maxwrite(dbenv, nwritep, nsleepp)$/ +__dbcl_get_mp_max_openfd ../rpc_client/gen_client.c /^__dbcl_get_mp_max_openfd(dbenv, nopenp)$/ +__dbcl_get_mp_max_write ../rpc_client/gen_client.c /^__dbcl_get_mp_max_write(dbenv, nwritep, nsleepp)$/ __dbcl_get_mp_mmapsize ../rpc_client/gen_client.c /^__dbcl_get_mp_mmapsize(dbenv, mmapsizep)$/ __dbcl_get_shm_key ../rpc_client/gen_client.c /^__dbcl_get_shm_key(dbenv, shm_keyp)$/ __dbcl_get_tas_spins ../rpc_client/gen_client.c /^__dbcl_get_tas_spins(dbenv, tas_spinsp)$/ @@ -2864,7 +3046,7 @@ __dbcl_memp_sync ../rpc_client/gen_client.c /^__dbcl_memp_sync(dbenv, lsn)$/ __dbcl_memp_trickle ../rpc_client/gen_client.c /^__dbcl_memp_trickle(dbenv, pct, nwrotep)$/ __dbcl_noserver ../rpc_client/gen_client.c /^__dbcl_noserver(dbenv)$/ __dbcl_refresh ../rpc_client/client.c /^__dbcl_refresh(dbenv)$/ -__dbcl_rep_elect ../rpc_client/gen_client.c /^__dbcl_rep_elect(dbenv, nsites, pri, timeout, idp)/ +__dbcl_rep_elect ../rpc_client/gen_client.c /^__dbcl_rep_elect(dbenv, nsites, nvotes, pri, timeo/ __dbcl_rep_flush ../rpc_client/gen_client.c /^__dbcl_rep_flush(dbenv)$/ __dbcl_rep_get_limit ../rpc_client/gen_client.c /^__dbcl_rep_get_limit(dbenv, mbytesp, bytesp)$/ __dbcl_rep_process_message ../rpc_client/gen_client.c /^__dbcl_rep_process_message(dbenv, rec, control, id/ @@ -2887,7 +3069,8 @@ __dbcl_set_lk_max ../rpc_client/gen_client.c /^__dbcl_set_lk_max(dbenv, max)$/ __dbcl_set_lk_max_lockers ../rpc_client/gen_client.c /^__dbcl_set_lk_max_lockers(dbenv, max)$/ __dbcl_set_lk_max_locks ../rpc_client/gen_client.c /^__dbcl_set_lk_max_locks(dbenv, max)$/ __dbcl_set_lk_max_objects ../rpc_client/gen_client.c /^__dbcl_set_lk_max_objects(dbenv, max)$/ -__dbcl_set_mp_maxwrite ../rpc_client/gen_client.c /^__dbcl_set_mp_maxwrite(dbenv, nwrite, nsleep)$/ +__dbcl_set_mp_max_openfd ../rpc_client/gen_client.c /^__dbcl_set_mp_max_openfd(dbenv, nopen)$/ +__dbcl_set_mp_max_write ../rpc_client/gen_client.c /^__dbcl_set_mp_max_write(dbenv, nwrite, nsleep)$/ __dbcl_set_mp_mmapsize ../rpc_client/gen_client.c /^__dbcl_set_mp_mmapsize(dbenv, mmapsize)$/ __dbcl_set_shm_key ../rpc_client/gen_client.c /^__dbcl_set_shm_key(dbenv, shm_key)$/ __dbcl_set_tas_spins ../rpc_client/gen_client.c /^__dbcl_set_tas_spins(dbenv, tas_spins)$/ @@ -2931,6 +3114,7 @@ __dbenv_get_errfile ../env/env_method.c /^__dbenv_get_errfile(dbenv, errfilep)$/ __dbenv_get_errpfx ../env/env_method.c /^__dbenv_get_errpfx(dbenv, errpfxp)$/ __dbenv_get_flags ../env/env_method.c /^__dbenv_get_flags(dbenv, flagsp)$/ __dbenv_get_home ../env/env_method.c /^__dbenv_get_home(dbenv, homep)$/ +__dbenv_get_msgfile ../env/env_method.c /^__dbenv_get_msgfile(dbenv, msgfilep)$/ __dbenv_get_open_flags ../env/env_open.c /^__dbenv_get_open_flags(dbenv, flagsp)$/ __dbenv_get_shm_key ../env/env_method.c /^__dbenv_get_shm_key(dbenv, shm_keyp)$/ __dbenv_get_tas_spins ../env/env_method.c /^__dbenv_get_tas_spins(dbenv, tas_spinsp)$/ @@ -2939,6 +3123,8 @@ __dbenv_get_verbose ../env/env_method.c /^__dbenv_get_verbose(dbenv, which, onof __dbenv_init ../env/env_method.c /^__dbenv_init(dbenv)$/ __dbenv_map_flags ../env/env_method.c /^__dbenv_map_flags(dbenv, inflagsp, outflagsp)$/ __dbenv_open ../env/env_open.c /^__dbenv_open(dbenv, db_home, flags, mode)$/ +__dbenv_print_all ../env/env_stat.c /^__dbenv_print_all(dbenv, flags)$/ +__dbenv_print_stats ../env/env_stat.c /^__dbenv_print_stats(dbenv, flags)$/ __dbenv_refresh ../env/env_open.c /^__dbenv_refresh(dbenv, orig_flags, rep_check)$/ __dbenv_remove ../env/env_open.c /^__dbenv_remove(dbenv, db_home, flags)$/ __dbenv_remove_int ../env/env_open.c /^__dbenv_remove_int(dbenv, db_home, flags)$/ @@ -2951,12 +3137,17 @@ __dbenv_set_errfile ../env/env_method.c /^__dbenv_set_errfile(dbenv, errfile)$/ __dbenv_set_errpfx ../env/env_method.c /^__dbenv_set_errpfx(dbenv, errpfx)$/ __dbenv_set_feedback ../env/env_method.c /^__dbenv_set_feedback(dbenv, feedback)$/ __dbenv_set_flags ../env/env_method.c /^__dbenv_set_flags(dbenv, flags, on)$/ +__dbenv_set_intermediate_dir ../env/env_method.c /^__dbenv_set_intermediate_dir(dbenv, mode, flags)$/ +__dbenv_set_msgcall ../env/env_method.c /^__dbenv_set_msgcall(dbenv, msgcall)$/ +__dbenv_set_msgfile ../env/env_method.c /^__dbenv_set_msgfile(dbenv, msgfile)$/ __dbenv_set_paniccall ../env/env_method.c /^__dbenv_set_paniccall(dbenv, paniccall)$/ __dbenv_set_rpc_server_noclnt ../env/env_method.c /^__dbenv_set_rpc_server_noclnt(dbenv, cl, host, tse/ __dbenv_set_shm_key ../env/env_method.c /^__dbenv_set_shm_key(dbenv, shm_key)$/ __dbenv_set_tas_spins ../env/env_method.c /^__dbenv_set_tas_spins(dbenv, tas_spins)$/ __dbenv_set_tmp_dir ../env/env_method.c /^__dbenv_set_tmp_dir(dbenv, dir)$/ __dbenv_set_verbose ../env/env_method.c /^__dbenv_set_verbose(dbenv, which, on)$/ +__dbenv_stat_print ../env/env_stat.c /^__dbenv_stat_print(dbenv, flags)$/ +__dbenv_stat_print_pp ../env/env_stat.c /^__dbenv_stat_print_pp(dbenv, flags)$/ __dbh_am_chk ../db/db_method.c /^__dbh_am_chk(dbp, flags)$/ __dbh_err ../db/db_method.c /^__dbh_err(DB *dbp, int error, const char *fmt, .../ __dbh_errx ../db/db_method.c /^__dbh_errx(DB *dbp, const char *fmt, ...)$/ @@ -2970,24 +3161,26 @@ __dbj_dbt_copyout ../libdb_java/db_java_wrap.c /^static void __dbj_dbt_copyout($ __dbj_dbt_release ../libdb_java/db_java_wrap.c /^static void __dbj_dbt_release($/ __dbj_dup_compare ../libdb_java/db_java_wrap.c /^static int __dbj_dup_compare(DB *db, const DBT *db/ __dbj_env_feedback ../libdb_java/db_java_wrap.c /^static void __dbj_env_feedback(DB_ENV *dbenv, int / -__dbj_error ../libdb_java/db_java_wrap.c /^static void __dbj_error(const char *prefix, char */ -__dbj_fill_bt_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_bt_stat(JNIEnv *jnienv, jcla/ -__dbj_fill_h_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_h_stat(JNIEnv *jnienv, jclas/ -__dbj_fill_lock_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_lock_stat(JNIEnv *jnienv, jc/ -__dbj_fill_log_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_log_stat(JNIEnv *jnienv, jcl/ +__dbj_error ../libdb_java/db_java_wrap.c /^static void __dbj_error(const DB_ENV *dbenv, const/ +__dbj_fill_bt_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_bt_stat(JNIEnv *jnienv, $/ +__dbj_fill_h_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_h_stat(JNIEnv *jnienv, $/ +__dbj_fill_lock_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_lock_stat(JNIEnv *jnienv, $/ +__dbj_fill_log_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_log_stat(JNIEnv *jnienv, $/ __dbj_fill_mpool_fstat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_mpool_fstat(JNIEnv *jnienv, / -__dbj_fill_mpool_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_mpool_stat(JNIEnv *jnienv, j/ -__dbj_fill_qam_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_qam_stat(JNIEnv *jnienv, jcl/ -__dbj_fill_rep_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_rep_stat(JNIEnv *jnienv, jcl/ -__dbj_fill_txn_active ../libdb_java/java_stat_auto.c /^static int __dbj_fill_txn_active(JNIEnv *jnienv, j/ -__dbj_fill_txn_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_txn_stat(JNIEnv *jnienv, jcl/ +__dbj_fill_mpool_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_mpool_stat(JNIEnv *jnienv, $/ +__dbj_fill_qam_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_qam_stat(JNIEnv *jnienv, $/ +__dbj_fill_rep_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_rep_stat(JNIEnv *jnienv, $/ +__dbj_fill_seq_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_seq_stat(JNIEnv *jnienv, $/ +__dbj_fill_txn_active ../libdb_java/java_stat_auto.c /^static int __dbj_fill_txn_active(JNIEnv *jnienv, $/ +__dbj_fill_txn_stat ../libdb_java/java_stat_auto.c /^static int __dbj_fill_txn_stat(JNIEnv *jnienv, $/ __dbj_get_except ../libdb_java/db_java_wrap.c /^static jthrowable __dbj_get_except(JNIEnv *jenv,$/ __dbj_get_jnienv ../libdb_java/db_java_wrap.c /^static JNIEnv *__dbj_get_jnienv(void)$/ __dbj_h_hash ../libdb_java/db_java_wrap.c /^static u_int32_t __dbj_h_hash(DB *db, const void */ +__dbj_message ../libdb_java/db_java_wrap.c /^static void __dbj_message(const DB_ENV *dbenv, con/ __dbj_panic ../libdb_java/db_java_wrap.c /^static void __dbj_panic(DB_ENV *dbenv, int err)$/ __dbj_rep_transport ../libdb_java/db_java_wrap.c /^static int __dbj_rep_transport(DB_ENV *dbenv,$/ __dbj_seckey_create ../libdb_java/db_java_wrap.c /^static int __dbj_seckey_create(DB *db,$/ -__dbj_throw ../libdb_java/db_java_wrap.c /^static int __dbj_throw(JNIEnv *jenv, int err, cons/ +__dbj_throw ../libdb_java/db_java_wrap.c /^static int __dbj_throw(JNIEnv *jenv,$/ __dbj_verify_callback ../libdb_java/db_java_wrap.c /^static int __dbj_verify_callback(void *handle, con/ __dbj_wrap_DB_LSN ../libdb_java/db_java_wrap.c /^static jobject __dbj_wrap_DB_LSN(JNIEnv *jenv, DB_/ __dblist_get ../db/db.c /^__dblist_get(dbenv, adjid)$/ @@ -2995,28 +3188,27 @@ __dbreg_add_dbentry ../dbreg/dbreg_util.c /^__dbreg_add_dbentry(dbenv, dblp, dbp __dbreg_assign_id ../dbreg/dbreg.c /^__dbreg_assign_id(dbp, id)$/ __dbreg_check_master ../dbreg/dbreg_util.c /^__dbreg_check_master(dbenv, uid, name)$/ __dbreg_close_files ../dbreg/dbreg_util.c /^__dbreg_close_files(dbenv)$/ -__dbreg_close_id ../dbreg/dbreg.c /^__dbreg_close_id(dbp, txn)$/ +__dbreg_close_id ../dbreg/dbreg.c /^__dbreg_close_id(dbp, txn, op)$/ __dbreg_do_open ../dbreg/dbreg_util.c /^__dbreg_do_open(dbenv,$/ __dbreg_fid_to_fname ../dbreg/dbreg_util.c /^__dbreg_fid_to_fname(dblp, fid, have_lock, fnamep)/ __dbreg_get_id ../dbreg/dbreg.c /^__dbreg_get_id(dbp, txn, idp)$/ __dbreg_get_name ../dbreg/dbreg_util.c /^__dbreg_get_name(dbenv, fid, namep)$/ __dbreg_id_to_db ../dbreg/dbreg_util.c /^__dbreg_id_to_db(dbenv, txn, dbpp, ndx, inc)$/ __dbreg_id_to_db_int ../dbreg/dbreg_util.c /^__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, t/ -__dbreg_id_to_fname ../dbreg/dbreg_util.c /^__dbreg_id_to_fname(dblp, lid, have_lock, fnamep)$/ -__dbreg_init_getpgnos ../dbreg/dbreg_auto.c /^__dbreg_init_getpgnos(dbenv, dtabp, dtabsizep)$/ -__dbreg_init_print ../dbreg/dbreg_auto.c /^__dbreg_init_print(dbenv, dtabp, dtabsizep)$/ +__dbreg_id_to_fname ../dbreg/dbreg_util.c /^__dbreg_id_to_fname(dblp, id, have_lock, fnamep)$/ +__dbreg_init_print ../dbreg/dbreg_autop.c /^__dbreg_init_print(dbenv, dtabp, dtabsizep)$/ __dbreg_init_recover ../dbreg/dbreg_auto.c /^__dbreg_init_recover(dbenv, dtabp, dtabsizep)$/ __dbreg_lazy_id ../dbreg/dbreg_util.c /^__dbreg_lazy_id(dbp)$/ +__dbreg_log_files ../dbreg/dbreg_util.c /^__dbreg_log_files(dbenv)$/ __dbreg_new_id ../dbreg/dbreg.c /^__dbreg_new_id(dbp, txn)$/ __dbreg_open_file ../dbreg/dbreg_rec.c /^__dbreg_open_file(dbenv, txn, argp, info)$/ -__dbreg_open_files ../dbreg/dbreg_util.c /^__dbreg_open_files(dbenv)$/ -__dbreg_pluck_id ../dbreg/dbreg_util.c /^__dbreg_pluck_id(dbenv, id)$/ -__dbreg_pop_id ../dbreg/dbreg_util.c /^__dbreg_pop_id(dbenv, id)$/ -__dbreg_print_dblist ../dbreg/dbreg_util.c /^__dbreg_print_dblist(dbenv)$/ -__dbreg_push_id ../dbreg/dbreg_util.c /^__dbreg_push_id(dbenv, id)$/ -__dbreg_register_getpgnos ../dbreg/dbreg_auto.c /^__dbreg_register_getpgnos(dbenv, rec, lsnp, notuse/ +__dbreg_pluck_id ../dbreg/dbreg.c /^__dbreg_pluck_id(dbenv, id)$/ +__dbreg_pop_id ../dbreg/dbreg.c /^__dbreg_pop_id(dbenv, id)$/ +__dbreg_print_dblist ../dbreg/dbreg_stat.c /^__dbreg_print_dblist(dbenv, flags)$/ +__dbreg_print_fname ../dbreg/dbreg_stat.c /^__dbreg_print_fname(dbenv, fnp)$/ +__dbreg_push_id ../dbreg/dbreg.c /^__dbreg_push_id(dbenv, id)$/ __dbreg_register_log ../dbreg/dbreg_auto.c /^__dbreg_register_log(dbenv, txnid, ret_lsnp, flags/ -__dbreg_register_print ../dbreg/dbreg_auto.c /^__dbreg_register_print(dbenv, dbtp, lsnp, notused2/ +__dbreg_register_print ../dbreg/dbreg_autop.c /^__dbreg_register_print(dbenv, dbtp, lsnp, notused2/ __dbreg_register_read ../dbreg/dbreg_auto.c /^__dbreg_register_read(dbenv, recbuf, argpp)$/ __dbreg_register_recover ../dbreg/dbreg_rec.c /^__dbreg_register_recover(dbenv, dbtp, lsnp, op, in/ __dbreg_rem_dbentry ../dbreg/dbreg_util.c /^__dbreg_rem_dbentry(dblp, ndx)$/ @@ -3024,7 +3216,6 @@ __dbreg_revoke_id ../dbreg/dbreg.c /^__dbreg_revoke_id(dbp, have_lock, force_id) __dbreg_setup ../dbreg/dbreg.c /^__dbreg_setup(dbp, name, create_txnid)$/ __dbreg_teardown ../dbreg/dbreg.c /^__dbreg_teardown(dbp)$/ __dbsrv_active ../rpc_server/c/db_server_util.c /^__dbsrv_active(ctp)$/ -__dbsrv_main ../rpc_server/c/db_server_svc.c /^void __dbsrv_main()$/ __dbsrv_settimeout ../rpc_server/c/db_server_util.c /^__dbsrv_settimeout(ctp, to)$/ __dbsrv_sharedb ../rpc_server/c/db_server_util.c /^__dbsrv_sharedb(db_ctp, name, subdb, type, flags)$/ __dbsrv_shareenv ../rpc_server/c/db_server_util.c /^__dbsrv_shareenv(env_ctp, home, flags)$/ @@ -3036,61 +3227,55 @@ __dd_debug ../lock/lock_deadlock.c /^__dd_debug(dbenv, idmap, bitmap, nlockers, __dd_find ../lock/lock_deadlock.c /^__dd_find(dbenv, bmp, idmap, nlockers, nalloc, dea/ __dd_isolder ../lock/lock_deadlock.c /^__dd_isolder(a, b, lock_max, txn_max)$/ __dd_verify ../lock/lock_deadlock.c /^__dd_verify(idmap, deadmap, tmpmap, origmap, nlock/ -__env_cachesize_proc ../rpc_server/c/db_server_proc.c /^__env_cachesize_proc(dbenvcl_id, gbytes, bytes,$/ +__env_cachesize_proc ../rpc_server/c/db_server_proc.c /^__env_cachesize_proc(dbenvcl_id, gbytes, bytes, nc/ __env_close_proc ../rpc_server/c/db_server_proc.c /^__env_close_proc(dbenvcl_id, flags, replyp)$/ __env_create_proc ../rpc_server/c/db_server_proc.c /^__env_create_proc(timeout, replyp)$/ -__env_dbremove_proc ../rpc_server/c/db_server_proc.c /^__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,$/ -__env_dbrename_proc ../rpc_server/c/db_server_proc.c /^__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,$/ +__env_db_rep_exit ../rep/rep_util.c /^__env_db_rep_exit(dbenv)$/ +__env_dbremove_proc ../rpc_server/c/db_server_proc.c /^__env_dbremove_proc(dbenvcl_id, txnpcl_id, name, s/ +__env_dbrename_proc ../rpc_server/c/db_server_proc.c /^__env_dbrename_proc(dbenvcl_id, txnpcl_id, name, s/ __env_encrypt_proc ../rpc_server/c/db_server_proc.c /^__env_encrypt_proc(dbenvcl_id, passwd, flags, repl/ __env_flags_proc ../rpc_server/c/db_server_proc.c /^__env_flags_proc(dbenvcl_id, flags, onoff, replyp)/ -__env_get_cachesize_proc ../rpc_server/c/db_server_proc.c /^__env_get_cachesize_proc(dbenvcl_id,$/ +__env_get_cachesize_proc ../rpc_server/c/db_server_proc.c /^__env_get_cachesize_proc(dbenvcl_id, replyp)$/ __env_get_encrypt_flags_proc ../rpc_server/c/db_server_proc.c /^__env_get_encrypt_flags_proc(dbenvcl_id, replyp)$/ __env_get_flags_proc ../rpc_server/c/db_server_proc.c /^__env_get_flags_proc(dbenvcl_id, replyp)$/ __env_get_home_proc ../rpc_server/c/db_server_proc.c /^__env_get_home_proc(dbenvcl_id, replyp)$/ __env_get_open_flags_proc ../rpc_server/c/db_server_proc.c /^__env_get_open_flags_proc(dbenvcl_id, replyp)$/ -__env_open_proc ../rpc_server/c/db_server_proc.c /^__env_open_proc(dbenvcl_id, home, flags,$/ +__env_open_proc ../rpc_server/c/db_server_proc.c /^__env_open_proc(dbenvcl_id, home, flags, mode, rep/ __env_openfiles ../env/env_recover.c /^__env_openfiles(dbenv, logc, txninfo,$/ __env_remove_proc ../rpc_server/c/db_server_proc.c /^__env_remove_proc(dbenvcl_id, home, flags, replyp)/ __env_rep_enter ../rep/rep_util.c /^__env_rep_enter(dbenv)$/ -__env_rep_exit ../rep/rep_util.c /^__env_rep_exit(dbenv)$/ __fop_create ../fileops/fop_basic.c /^__fop_create(dbenv, txn, fhpp, name, appname, mode/ -__fop_create_getpgnos ../fileops/fileops_auto.c /^__fop_create_getpgnos(dbenv, rec, lsnp, notused1, / __fop_create_log ../fileops/fileops_auto.c /^__fop_create_log(dbenv, txnid, ret_lsnp, flags,$/ -__fop_create_print ../fileops/fileops_auto.c /^__fop_create_print(dbenv, dbtp, lsnp, notused2, no/ +__fop_create_print ../fileops/fileops_autop.c /^__fop_create_print(dbenv, dbtp, lsnp, notused2, no/ __fop_create_read ../fileops/fileops_auto.c /^__fop_create_read(dbenv, recbuf, argpp)$/ __fop_create_recover ../fileops/fop_rec.c /^__fop_create_recover(dbenv, dbtp, lsnp, op, info)$/ __fop_dbrename ../fileops/fop_util.c /^__fop_dbrename(dbp, old, new)$/ __fop_dummy ../fileops/fop_util.c /^__fop_dummy(dbp, txn, old, new, flags)$/ -__fop_file_remove_getpgnos ../fileops/fileops_auto.c /^__fop_file_remove_getpgnos(dbenv, rec, lsnp, notus/ __fop_file_remove_log ../fileops/fileops_auto.c /^__fop_file_remove_log(dbenv, txnid, ret_lsnp, flag/ -__fop_file_remove_print ../fileops/fileops_auto.c /^__fop_file_remove_print(dbenv, dbtp, lsnp, notused/ +__fop_file_remove_print ../fileops/fileops_autop.c /^__fop_file_remove_print(dbenv, dbtp, lsnp, notused/ __fop_file_remove_read ../fileops/fileops_auto.c /^__fop_file_remove_read(dbenv, recbuf, argpp)$/ __fop_file_remove_recover ../fileops/fop_rec.c /^__fop_file_remove_recover(dbenv, dbtp, lsnp, op, i/ __fop_file_setup ../fileops/fop_util.c /^__fop_file_setup(dbp, txn, name, mode, flags, reti/ -__fop_init_getpgnos ../fileops/fileops_auto.c /^__fop_init_getpgnos(dbenv, dtabp, dtabsizep)$/ -__fop_init_print ../fileops/fileops_auto.c /^__fop_init_print(dbenv, dtabp, dtabsizep)$/ +__fop_init_print ../fileops/fileops_autop.c /^__fop_init_print(dbenv, dtabp, dtabsizep)$/ __fop_init_recover ../fileops/fileops_auto.c /^__fop_init_recover(dbenv, dtabp, dtabsizep)$/ -__fop_lock_handle ../fileops/fop_util.c /^__fop_lock_handle(dbenv, dbp, locker, mode, elock,/ +__fop_lock_handle ../fileops/fop_util.c /^__fop_lock_handle(dbenv, dbp, locker, mode, elockp/ __fop_read_meta ../fileops/fop_util.c /^__fop_read_meta(dbenv, name, buf, size, fhp, errok/ __fop_remove ../fileops/fop_basic.c /^__fop_remove(dbenv, txn, fileid, name, appname, fl/ -__fop_remove_getpgnos ../fileops/fileops_auto.c /^__fop_remove_getpgnos(dbenv, rec, lsnp, notused1, / __fop_remove_log ../fileops/fileops_auto.c /^__fop_remove_log(dbenv, txnid, ret_lsnp, flags,$/ -__fop_remove_print ../fileops/fileops_auto.c /^__fop_remove_print(dbenv, dbtp, lsnp, notused2, no/ +__fop_remove_print ../fileops/fileops_autop.c /^__fop_remove_print(dbenv, dbtp, lsnp, notused2, no/ __fop_remove_read ../fileops/fileops_auto.c /^__fop_remove_read(dbenv, recbuf, argpp)$/ __fop_remove_recover ../fileops/fop_rec.c /^__fop_remove_recover(dbenv, dbtp, lsnp, op, info)$/ __fop_remove_setup ../fileops/fop_util.c /^__fop_remove_setup(dbp, txn, name, flags)$/ __fop_rename ../fileops/fop_basic.c /^__fop_rename(dbenv, txn, oldname, newname, fid, ap/ -__fop_rename_getpgnos ../fileops/fileops_auto.c /^__fop_rename_getpgnos(dbenv, rec, lsnp, notused1, / __fop_rename_log ../fileops/fileops_auto.c /^__fop_rename_log(dbenv, txnid, ret_lsnp, flags,$/ -__fop_rename_print ../fileops/fileops_auto.c /^__fop_rename_print(dbenv, dbtp, lsnp, notused2, no/ +__fop_rename_print ../fileops/fileops_autop.c /^__fop_rename_print(dbenv, dbtp, lsnp, notused2, no/ __fop_rename_read ../fileops/fileops_auto.c /^__fop_rename_read(dbenv, recbuf, argpp)$/ __fop_rename_recover ../fileops/fop_rec.c /^__fop_rename_recover(dbenv, dbtp, lsnp, op, info)$/ __fop_set_pgsize ../fileops/fop_util.c /^__fop_set_pgsize(dbp, fhp, name)$/ __fop_subdb_setup ../fileops/fop_util.c /^__fop_subdb_setup(dbp, txn, mname, name, mode, fla/ __fop_write ../fileops/fop_basic.c /^__fop_write(dbenv,$/ -__fop_write_getpgnos ../fileops/fileops_auto.c /^__fop_write_getpgnos(dbenv, rec, lsnp, notused1, s/ __fop_write_log ../fileops/fileops_auto.c /^__fop_write_log(dbenv, txnid, ret_lsnp, flags,$/ -__fop_write_print ../fileops/fileops_auto.c /^__fop_write_print(dbenv, dbtp, lsnp, notused2, not/ +__fop_write_print ../fileops/fileops_autop.c /^__fop_write_print(dbenv, dbtp, lsnp, notused2, not/ __fop_write_read ../fileops/fileops_auto.c /^__fop_write_read(dbenv, recbuf, argpp)$/ __fop_write_recover ../fileops/fop_rec.c /^__fop_write_recover(dbenv, dbtp, lsnp, op, info)$/ __ham_30_hashmeta ../hash/hash_upgrade.c /^__ham_30_hashmeta(dbp, real_name, obuf)$/ @@ -3116,21 +3301,17 @@ __ham_c_update ../hash/hash.c /^__ham_c_update(dbc, len, add, is_dup)$/ __ham_c_writelock ../hash/hash.c /^__ham_c_writelock(dbc)$/ __ham_call_hash ../hash/hash.c /^__ham_call_hash(dbc, k, len)$/ __ham_check_move ../hash/hash_dup.c /^__ham_check_move(dbc, add_len)$/ -__ham_chgpg_getpgnos ../hash/hash_auto.c /^__ham_chgpg_getpgnos(dbenv, rec, lsnp, notused1, s/ __ham_chgpg_log ../hash/hash_auto.c /^__ham_chgpg_log(dbp, txnid, ret_lsnp, flags, mode,/ -__ham_chgpg_print ../hash/hash_auto.c /^__ham_chgpg_print(dbenv, dbtp, lsnp, notused2, not/ +__ham_chgpg_print ../hash/hash_autop.c /^__ham_chgpg_print(dbenv, dbtp, lsnp, notused2, not/ __ham_chgpg_read ../hash/hash_auto.c /^__ham_chgpg_read(dbenv, recbuf, argpp)$/ __ham_chgpg_recover ../hash/hash_rec.c /^__ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)$/ __ham_copy_item ../hash/hash_page.c /^__ham_copy_item(dbp, src_page, src_ndx, dest_page)/ -__ham_copypage_getpgnos ../hash/hash_auto.c /^__ham_copypage_getpgnos(dbenv, rec, lsnp, notused1/ __ham_copypage_log ../hash/hash_auto.c /^__ham_copypage_log(dbp, txnid, ret_lsnp, flags, pg/ -__ham_copypage_print ../hash/hash_auto.c /^__ham_copypage_print(dbenv, dbtp, lsnp, notused2, / +__ham_copypage_print ../hash/hash_autop.c /^__ham_copypage_print(dbenv, dbtp, lsnp, notused2, / __ham_copypage_read ../hash/hash_auto.c /^__ham_copypage_read(dbenv, recbuf, argpp)$/ __ham_copypage_recover ../hash/hash_rec.c /^__ham_copypage_recover(dbenv, dbtp, lsnp, op, info/ -__ham_cprint ../hash/hash_dup.c /^__ham_cprint(dbc)$/ -__ham_curadj_getpgnos ../hash/hash_auto.c /^__ham_curadj_getpgnos(dbenv, rec, lsnp, notused1, / __ham_curadj_log ../hash/hash_auto.c /^__ham_curadj_log(dbp, txnid, ret_lsnp, flags, pgno/ -__ham_curadj_print ../hash/hash_auto.c /^__ham_curadj_print(dbenv, dbtp, lsnp, notused2, no/ +__ham_curadj_print ../hash/hash_autop.c /^__ham_curadj_print(dbenv, dbtp, lsnp, notused2, no/ __ham_curadj_read ../hash/hash_auto.c /^__ham_curadj_read(dbenv, recbuf, argpp)$/ __ham_curadj_recover ../hash/hash_rec.c /^__ham_curadj_recover(dbenv, dbtp, lsnp, op, info)$/ __ham_db_close ../hash/hash_method.c /^__ham_db_close(dbp)$/ @@ -3153,19 +3334,16 @@ __ham_get_cpage ../hash/hash_page.c /^__ham_get_cpage(dbc, mode)$/ __ham_get_h_ffactor ../hash/hash_method.c /^__ham_get_h_ffactor(dbp, h_ffactorp)$/ __ham_get_h_nelem ../hash/hash_method.c /^__ham_get_h_nelem(dbp, h_nelemp)$/ __ham_get_meta ../hash/hash_meta.c /^__ham_get_meta(dbc)$/ -__ham_groupalloc_getpgnos ../hash/hash_auto.c /^__ham_groupalloc_getpgnos(dbenv, rec, lsnp, notuse/ __ham_groupalloc_log ../hash/hash_auto.c /^__ham_groupalloc_log(dbp, txnid, ret_lsnp, flags, / -__ham_groupalloc_print ../hash/hash_auto.c /^__ham_groupalloc_print(dbenv, dbtp, lsnp, notused2/ +__ham_groupalloc_print ../hash/hash_autop.c /^__ham_groupalloc_print(dbenv, dbtp, lsnp, notused2/ __ham_groupalloc_read ../hash/hash_auto.c /^__ham_groupalloc_read(dbenv, recbuf, argpp)$/ __ham_groupalloc_recover ../hash/hash_rec.c /^__ham_groupalloc_recover(dbenv, dbtp, lsnp, op, in/ __ham_init_dbt ../hash/hash.c /^__ham_init_dbt(dbenv, dbt, size, bufp, sizep)$/ -__ham_init_getpgnos ../hash/hash_auto.c /^__ham_init_getpgnos(dbenv, dtabp, dtabsizep)$/ __ham_init_meta ../hash/hash_open.c /^__ham_init_meta(dbp, meta, pgno, lsnp)$/ -__ham_init_print ../hash/hash_auto.c /^__ham_init_print(dbenv, dtabp, dtabsizep)$/ +__ham_init_print ../hash/hash_autop.c /^__ham_init_print(dbenv, dtabp, dtabsizep)$/ __ham_init_recover ../hash/hash_auto.c /^__ham_init_recover(dbenv, dtabp, dtabsizep)$/ -__ham_insdel_getpgnos ../hash/hash_auto.c /^__ham_insdel_getpgnos(dbenv, rec, lsnp, notused1, / __ham_insdel_log ../hash/hash_auto.c /^__ham_insdel_log(dbp, txnid, ret_lsnp, flags,$/ -__ham_insdel_print ../hash/hash_auto.c /^__ham_insdel_print(dbenv, dbtp, lsnp, notused2, no/ +__ham_insdel_print ../hash/hash_autop.c /^__ham_insdel_print(dbenv, dbtp, lsnp, notused2, no/ __ham_insdel_read ../hash/hash_auto.c /^__ham_insdel_read(dbenv, recbuf, argpp)$/ __ham_insdel_recover ../hash/hash_rec.c /^__ham_insdel_recover(dbenv, dbtp, lsnp, op, info)$/ __ham_item ../hash/hash_page.c /^__ham_item(dbc, mode, pgnop)$/ @@ -3180,18 +3358,16 @@ __ham_lookup ../hash/hash.c /^__ham_lookup(dbc, key, sought, mode, pgnop)$/ __ham_make_dup ../hash/hash_dup.c /^__ham_make_dup(dbenv, notdup, duplicate, bufp, siz/ __ham_meta2pgset ../hash/hash_verify.c /^int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset/ __ham_metachk ../hash/hash_open.c /^__ham_metachk(dbp, name, hashm)$/ -__ham_metagroup_getpgnos ../hash/hash_auto.c /^__ham_metagroup_getpgnos(dbenv, rec, lsnp, notused/ __ham_metagroup_log ../hash/hash_auto.c /^__ham_metagroup_log(dbp, txnid, ret_lsnp, flags, b/ -__ham_metagroup_print ../hash/hash_auto.c /^__ham_metagroup_print(dbenv, dbtp, lsnp, notused2,/ +__ham_metagroup_print ../hash/hash_autop.c /^__ham_metagroup_print(dbenv, dbtp, lsnp, notused2,/ __ham_metagroup_read ../hash/hash_auto.c /^__ham_metagroup_read(dbenv, recbuf, argpp)$/ __ham_metagroup_recover ../hash/hash_rec.c /^__ham_metagroup_recover(dbenv, dbtp, lsnp, op, inf/ __ham_move_offpage ../hash/hash_dup.c /^__ham_move_offpage(dbc, pagep, ndx, pgno)$/ __ham_mswap ../hash/hash_conv.c /^__ham_mswap(pg)$/ __ham_new_file ../hash/hash_open.c /^__ham_new_file(dbp, txn, fhp, name)$/ __ham_new_subdb ../hash/hash_open.c /^__ham_new_subdb(mdbp, dbp, txn)$/ -__ham_newpage_getpgnos ../hash/hash_auto.c /^__ham_newpage_getpgnos(dbenv, rec, lsnp, notused1,/ __ham_newpage_log ../hash/hash_auto.c /^__ham_newpage_log(dbp, txnid, ret_lsnp, flags,$/ -__ham_newpage_print ../hash/hash_auto.c /^__ham_newpage_print(dbenv, dbtp, lsnp, notused2, n/ +__ham_newpage_print ../hash/hash_autop.c /^__ham_newpage_print(dbenv, dbtp, lsnp, notused2, n/ __ham_newpage_read ../hash/hash_auto.c /^__ham_newpage_read(dbenv, recbuf, argpp)$/ __ham_newpage_recover ../hash/hash_rec.c /^__ham_newpage_recover(dbenv, dbtp, lsnp, op, info)/ __ham_next_cpage ../hash/hash_page.c /^__ham_next_cpage(dbc, pgno, dirty)$/ @@ -3200,13 +3376,13 @@ __ham_open ../hash/hash_open.c /^__ham_open(dbp, txn, name, base_pgno, flags)$/ __ham_overwrite ../hash/hash.c /^__ham_overwrite(dbc, nval, flags)$/ __ham_pgin ../hash/hash_conv.c /^__ham_pgin(dbenv, dummydbp, pg, pp, cookie)$/ __ham_pgout ../hash/hash_conv.c /^__ham_pgout(dbenv, dummydbp, pg, pp, cookie)$/ +__ham_print_cursor ../hash/hash_stat.c /^__ham_print_cursor(dbc)$/ __ham_putitem ../hash/hash_page.c /^__ham_putitem(dbp, p, dbt, type)$/ __ham_quick_delete ../hash/hash.c /^__ham_quick_delete(dbc)$/ __ham_reclaim ../hash/hash_reclaim.c /^__ham_reclaim(dbp, txn)$/ __ham_release_meta ../hash/hash_meta.c /^__ham_release_meta(dbc)$/ -__ham_replace_getpgnos ../hash/hash_auto.c /^__ham_replace_getpgnos(dbenv, rec, lsnp, notused1,/ __ham_replace_log ../hash/hash_auto.c /^__ham_replace_log(dbp, txnid, ret_lsnp, flags, pgn/ -__ham_replace_print ../hash/hash_auto.c /^__ham_replace_print(dbenv, dbtp, lsnp, notused2, n/ +__ham_replace_print ../hash/hash_autop.c /^__ham_replace_print(dbenv, dbtp, lsnp, notused2, n/ __ham_replace_read ../hash/hash_auto.c /^__ham_replace_read(dbenv, recbuf, argpp)$/ __ham_replace_recover ../hash/hash_rec.c /^__ham_replace_recover(dbenv, dbtp, lsnp, op, info)/ __ham_replpair ../hash/hash_page.c /^__ham_replpair(dbc, dbt, make_dup)$/ @@ -3216,13 +3392,13 @@ __ham_set_h_ffactor ../hash/hash_method.c /^__ham_set_h_ffactor(dbp, h_ffactor)$ __ham_set_h_hash ../hash/hash_method.c /^__ham_set_h_hash(dbp, func)$/ __ham_set_h_nelem ../hash/hash_method.c /^__ham_set_h_nelem(dbp, h_nelem)$/ __ham_split_page ../hash/hash_page.c /^__ham_split_page(dbc, obucket, nbucket)$/ -__ham_splitdata_getpgnos ../hash/hash_auto.c /^__ham_splitdata_getpgnos(dbenv, rec, lsnp, notused/ __ham_splitdata_log ../hash/hash_auto.c /^__ham_splitdata_log(dbp, txnid, ret_lsnp, flags, o/ -__ham_splitdata_print ../hash/hash_auto.c /^__ham_splitdata_print(dbenv, dbtp, lsnp, notused2,/ +__ham_splitdata_print ../hash/hash_autop.c /^__ham_splitdata_print(dbenv, dbtp, lsnp, notused2,/ __ham_splitdata_read ../hash/hash_auto.c /^__ham_splitdata_read(dbenv, recbuf, argpp)$/ __ham_splitdata_recover ../hash/hash_rec.c /^__ham_splitdata_recover(dbenv, dbtp, lsnp, op, inf/ __ham_stat ../hash/hash_stat.c /^__ham_stat(dbc, spp, flags)$/ __ham_stat_callback ../hash/hash_stat.c /^__ham_stat_callback(dbp, pagep, cookie, putp)$/ +__ham_stat_print ../hash/hash_stat.c /^__ham_stat_print(dbc, flags)$/ __ham_test ../hash/hash_func.c /^__ham_test(dbp, key, len)$/ __ham_traverse ../hash/hash_stat.c /^__ham_traverse(dbc, mode, callback, cookie, look_p/ __ham_truncate ../hash/hash_reclaim.c /^__ham_truncate(dbc, countp)$/ @@ -3233,7 +3409,7 @@ __ham_vrfy_item ../hash/hash_verify.c /^__ham_vrfy_item(dbp, vdp, pgno, h, i, fl __ham_vrfy_meta ../hash/hash_verify.c /^__ham_vrfy_meta(dbp, vdp, m, pgno, flags)$/ __ham_vrfy_structure ../hash/hash_verify.c /^__ham_vrfy_structure(dbp, vdp, meta_pgno, flags)$/ __int64 ../libdb_java/db_java_wrap.c 13 -__lock_addfamilylocker ../lock/lock.c /^__lock_addfamilylocker(dbenv, pid, id)$/ +__lock_addfamilylocker ../lock/lock_id.c /^__lock_addfamilylocker(dbenv, pid, id)$/ __lock_cmp ../lock/lock_util.c /^__lock_cmp(dbt, lock_obj)$/ __lock_dbenv_close ../lock/lock_method.c /^__lock_dbenv_close(dbenv)$/ __lock_dbenv_create ../lock/lock_method.c /^__lock_dbenv_create(dbenv)$/ @@ -3241,49 +3417,53 @@ __lock_dbenv_refresh ../lock/lock_region.c /^__lock_dbenv_refresh(dbenv)$/ __lock_detect ../lock/lock_deadlock.c /^__lock_detect(dbenv, atype, abortp)$/ __lock_detect_pp ../lock/lock_deadlock.c /^__lock_detect_pp(dbenv, flags, atype, abortp)$/ __lock_downgrade ../lock/lock.c /^__lock_downgrade(dbenv, lock, new_mode, flags)$/ -__lock_dump_locker ../lock/lock_stat.c /^__lock_dump_locker(lt, lip, fp)$/ -__lock_dump_object ../lock/lock_stat.c /^__lock_dump_object(lt, op, fp)$/ -__lock_dump_region ../lock/lock_stat.c /^__lock_dump_region(dbenv, area, fp)$/ -__lock_expired ../lock/lock.c /^__lock_expired(dbenv, now, timevalp)$/ -__lock_expires ../lock/lock.c /^__lock_expires(dbenv, timevalp, timeout)$/ -__lock_fix_list ../lock/lock.c /^__lock_fix_list(dbenv, list_dbt, nlocks)$/ -__lock_freefamilylocker ../lock/lock.c /^__lock_freefamilylocker(lt, locker)$/ +__lock_dump_locker ../lock/lock_stat.c /^__lock_dump_locker(dbenv, mbp, lt, lip)$/ +__lock_dump_object ../lock/lock_stat.c /^__lock_dump_object(lt, mbp, op)$/ +__lock_expired ../lock/lock_timer.c /^__lock_expired(dbenv, now, timevalp)$/ +__lock_expires ../lock/lock_timer.c /^__lock_expires(dbenv, timevalp, timeout)$/ +__lock_fix_list ../lock/lock_list.c /^__lock_fix_list(dbenv, list_dbt, nlocks)$/ +__lock_freefamilylocker ../lock/lock_id.c /^__lock_freefamilylocker(lt, locker)$/ __lock_freelock ../lock/lock.c /^__lock_freelock(lt, lockp, locker, flags)$/ -__lock_freelocker ../lock/lock.c /^__lock_freelocker(lt, region, sh_locker, indx)$/ +__lock_freelocker ../lock/lock_id.c /^__lock_freelocker(lt, region, sh_locker, indx)$/ __lock_get ../lock/lock.c /^__lock_get(dbenv, locker, flags, obj, lock_mode, l/ __lock_get_env_timeout ../lock/lock_method.c /^__lock_get_env_timeout(dbenv, timeoutp, flag)$/ __lock_get_internal ../lock/lock.c /^__lock_get_internal(lt, locker, flags, obj, lock_m/ -__lock_get_list ../lock/lock.c /^__lock_get_list(dbenv, locker, flags, lock_mode, l/ +__lock_get_list ../lock/lock_list.c /^__lock_get_list(dbenv, locker, flags, lock_mode, l/ __lock_get_lk_conflicts ../lock/lock_method.c /^__lock_get_lk_conflicts(dbenv, lk_conflictsp, lk_m/ __lock_get_lk_detect ../lock/lock_method.c /^__lock_get_lk_detect(dbenv, lk_detectp)$/ __lock_get_lk_max_lockers ../lock/lock_method.c /^__lock_get_lk_max_lockers(dbenv, lk_maxp)$/ __lock_get_lk_max_locks ../lock/lock_method.c /^__lock_get_lk_max_locks(dbenv, lk_maxp)$/ __lock_get_lk_max_objects ../lock/lock_method.c /^__lock_get_lk_max_objects(dbenv, lk_maxp)$/ __lock_get_pp ../lock/lock.c /^__lock_get_pp(dbenv, locker, flags, obj, lock_mode/ -__lock_getlocker ../lock/lock.c /^__lock_getlocker(lt, locker, indx, create, retp)$/ +__lock_getlocker ../lock/lock_id.c /^__lock_getlocker(lt, locker, indx, create, retp)$/ __lock_getobj ../lock/lock.c /^__lock_getobj(lt, obj, ndx, create, retp)$/ -__lock_id ../lock/lock.c /^__lock_id(dbenv, idp)$/ -__lock_id_free ../lock/lock.c /^__lock_id_free(dbenv, id)$/ -__lock_id_free_pp ../lock/lock.c /^__lock_id_free_pp(dbenv, id)$/ -__lock_id_pp ../lock/lock.c /^__lock_id_pp(dbenv, idp)$/ -__lock_id_set ../lock/lock_region.c /^__lock_id_set(dbenv, cur_id, max_id)$/ +__lock_id ../lock/lock_id.c /^__lock_id(dbenv, idp)$/ +__lock_id_free ../lock/lock_id.c /^__lock_id_free(dbenv, id)$/ +__lock_id_free_pp ../lock/lock_id.c /^__lock_id_free_pp(dbenv, id)$/ +__lock_id_pp ../lock/lock_id.c /^__lock_id_pp(dbenv, idp)$/ +__lock_id_set ../lock/lock_id.c /^__lock_id_set(dbenv, cur_id, max_id)$/ __lock_inherit_locks ../lock/lock.c /^__lock_inherit_locks(lt, locker, flags)$/ -__lock_inherit_timeout ../lock/lock.c /^__lock_inherit_timeout(dbenv, parent, locker)$/ -__lock_init ../lock/lock_region.c /^__lock_init(dbenv, lt)$/ +__lock_inherit_timeout ../lock/lock_timer.c /^__lock_inherit_timeout(dbenv, parent, locker)$/ __lock_is_parent ../lock/lock.c /^__lock_is_parent(lt, locker, sh_locker)$/ __lock_lhash ../lock/lock_util.c /^__lock_lhash(lock_obj)$/ +__lock_list_print ../lock/lock_list.c /^__lock_list_print(dbenv, list)$/ __lock_locker_cmp ../lock/lock_util.c /^__lock_locker_cmp(locker, sh_locker)$/ __lock_locker_hash ../lock/lock_util.c /^__lock_locker_hash(locker)$/ +__lock_locker_is_parent ../lock/lock.c /^__lock_locker_is_parent(dbenv, locker, child, retp/ +__lock_nomem ../lock/lock_util.c /^__lock_nomem(dbenv, res)$/ __lock_ohash ../lock/lock_util.c /^__lock_ohash(dbt)$/ __lock_open ../lock/lock_region.c /^__lock_open(dbenv)$/ -__lock_printheader ../lock/lock_stat.c /^__lock_printheader(fp)$/ -__lock_printlock ../lock/lock_stat.c /^__lock_printlock(lt, lp, ispgno, fp)$/ +__lock_print_all ../lock/lock_stat.c /^__lock_print_all(dbenv, flags)$/ +__lock_print_header ../lock/lock_stat.c /^__lock_print_header(dbenv)$/ +__lock_print_stats ../lock/lock_stat.c /^__lock_print_stats(dbenv, flags)$/ +__lock_printlock ../lock/lock_stat.c /^__lock_printlock(lt, mbp, lp, ispgno)$/ __lock_promote ../lock/lock.c /^__lock_promote(lt, obj, flags)$/ -__lock_put ../lock/lock.c /^__lock_put(dbenv, lock)$/ +__lock_put ../lock/lock.c /^__lock_put(dbenv, lock, flags)$/ __lock_put_internal ../lock/lock.c /^__lock_put_internal(lt, lockp, obj_ndx, flags)$/ __lock_put_nolock ../lock/lock.c /^__lock_put_nolock(dbenv, lock, runp, flags)$/ __lock_put_pp ../lock/lock.c /^__lock_put_pp(dbenv, lock)$/ __lock_region_destroy ../lock/lock_region.c /^__lock_region_destroy(dbenv, infop)$/ +__lock_region_init ../lock/lock_region.c /^__lock_region_init(dbenv, lt)$/ __lock_region_maint ../lock/lock_region.c /^__lock_region_maint(dbenv)$/ __lock_region_size ../lock/lock_region.c /^__lock_region_size(dbenv)$/ __lock_remove_waiter ../lock/lock.c /^__lock_remove_waiter(lt, sh_obj, lockp, status)$/ @@ -3294,18 +3474,20 @@ __lock_set_lk_max ../lock/lock_method.c /^__lock_set_lk_max(dbenv, lk_max)$/ __lock_set_lk_max_lockers ../lock/lock_method.c /^__lock_set_lk_max_lockers(dbenv, lk_max)$/ __lock_set_lk_max_locks ../lock/lock_method.c /^__lock_set_lk_max_locks(dbenv, lk_max)$/ __lock_set_lk_max_objects ../lock/lock_method.c /^__lock_set_lk_max_objects(dbenv, lk_max)$/ -__lock_set_timeout ../lock/lock.c /^__lock_set_timeout(dbenv, locker, timeout, op)$/ -__lock_set_timeout_internal ../lock/lock.c /^__lock_set_timeout_internal(dbenv, locker, timeout/ -__lock_sort_cmp ../lock/lock.c /^__lock_sort_cmp(a, b)$/ +__lock_set_timeout ../lock/lock_timer.c /^__lock_set_timeout(dbenv, locker, timeout, op)$/ +__lock_set_timeout_internal ../lock/lock_timer.c /^__lock_set_timeout_internal(dbenv, locker, timeout/ +__lock_sort_cmp ../lock/lock_list.c /^__lock_sort_cmp(a, b)$/ __lock_stat ../lock/lock_stat.c /^__lock_stat(dbenv, statp, flags)$/ __lock_stat_pp ../lock/lock_stat.c /^__lock_stat_pp(dbenv, statp, flags)$/ +__lock_stat_print ../lock/lock_stat.c /^__lock_stat_print(dbenv, flags)$/ +__lock_stat_print_pp ../lock/lock_stat.c /^__lock_stat_print_pp(dbenv, flags)$/ __lock_trade ../lock/lock.c /^__lock_trade(dbenv, lock, new_locker)$/ __lock_vec ../lock/lock.c /^__lock_vec(dbenv, locker, flags, list, nlist, elis/ __lock_vec_pp ../lock/lock.c /^__lock_vec_pp(dbenv, locker, flags, list, nlist, e/ __log_archive ../log/log_archive.c /^__log_archive(dbenv, listp, flags)$/ __log_archive_pp ../log/log_archive.c /^__log_archive_pp(dbenv, listp, flags)$/ -__log_autoremove ../log/log.c /^__log_autoremove(dbenv)$/ -__log_backup ../env/env_recover.c /^__log_backup(dbenv, logc, max_lsn, start_lsn)$/ +__log_autoremove ../log/log_archive.c /^__log_autoremove(dbenv)$/ +__log_backup ../env/env_recover.c /^__log_backup(dbenv, logc, max_lsn, start_lsn, cmp)/ __log_c_close ../log/log_get.c /^__log_c_close(logc)$/ __log_c_close_pp ../log/log_get.c /^__log_c_close_pp(logc, flags)$/ __log_c_get ../log/log_get.c /^__log_c_get(logc, alsn, dbt, flags)$/ @@ -3318,6 +3500,7 @@ __log_c_io ../log/log_get.c /^__log_c_io(logc, fnum, offset, p, nrp, eofp)$/ __log_c_ondisk ../log/log_get.c /^__log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp/ __log_c_set_maxrec ../log/log_get.c /^__log_c_set_maxrec(logc, np)$/ __log_c_shortread ../log/log_get.c /^__log_c_shortread(logc, lsn, check_silent)$/ +__log_check_sizes ../log/log_method.c /^__log_check_sizes(dbenv, lg_max, lg_bsize)$/ __log_cursor ../log/log_get.c /^__log_cursor(dbenv, logcp)$/ __log_cursor_pp ../log/log_get.c /^__log_cursor_pp(dbenv, logcp, flags)$/ __log_dbenv_create ../log/log_method.c /^__log_dbenv_create(dbenv)$/ @@ -3333,16 +3516,24 @@ __log_flush_commit ../log/log_put.c /^__log_flush_commit(dbenv, lsnp, flags)$/ __log_flush_int ../log/log_put.c /^__log_flush_int(dblp, lsnp, release)$/ __log_flush_pp ../log/log_put.c /^__log_flush_pp(dbenv, lsn)$/ __log_get_cached_ckp_lsn ../log/log.c /^__log_get_cached_ckp_lsn(dbenv, ckp_lsnp)$/ +__log_get_flags ../log/log_method.c /^__log_get_flags(dbenv, flagsp)$/ __log_get_lg_bsize ../log/log_method.c /^__log_get_lg_bsize(dbenv, lg_bsizep)$/ __log_get_lg_dir ../log/log_method.c /^__log_get_lg_dir(dbenv, dirp)$/ __log_get_lg_max ../log/log_method.c /^__log_get_lg_max(dbenv, lg_maxp)$/ __log_get_lg_regionmax ../log/log_method.c /^__log_get_lg_regionmax(dbenv, lg_regionmaxp)$/ __log_init ../log/log.c /^__log_init(dbenv, dblp)$/ +__log_inmem_chkspace ../log/log.c /^__log_inmem_chkspace(dblp, len)$/ +__log_inmem_copyin ../log/log.c /^__log_inmem_copyin(dblp, offset, buf, size)$/ +__log_inmem_copyout ../log/log.c /^__log_inmem_copyout(dblp, offset, buf, size)$/ +__log_inmem_lsnoff ../log/log.c /^__log_inmem_lsnoff(dblp, lsn, offsetp)$/ +__log_inmem_newfile ../log/log.c /^__log_inmem_newfile(dblp, file)$/ __log_is_outdated ../log/log.c /^__log_is_outdated(dbenv, fnum, outdatedp)$/ __log_name ../log/log_put.c /^__log_name(dblp, filenumber, namep, fhpp, flags)$/ -__log_newfh ../log/log_put.c /^__log_newfh(dblp)$/ -__log_newfile ../log/log_put.c /^__log_newfile(dblp, lsnp)$/ +__log_newfh ../log/log_put.c /^__log_newfh(dblp, create)$/ +__log_newfile ../log/log_put.c /^__log_newfile(dblp, lsnp, logfile)$/ __log_open ../log/log.c /^__log_open(dbenv)$/ +__log_print_all ../log/log_stat.c /^__log_print_all(dbenv, flags)$/ +__log_print_stats ../log/log_stat.c /^__log_print_stats(dbenv, flags)$/ __log_put ../log/log_put.c /^__log_put(dbenv, lsnp, udbt, flags)$/ __log_put_next ../log/log_put.c /^__log_put_next(dbenv, lsn, dbt, hdr, old_lsnp)$/ __log_put_pp ../log/log_put.c /^__log_put_pp(dbenv, lsnp, udbt, flags)$/ @@ -3351,12 +3542,15 @@ __log_recover ../log/log.c /^__log_recover(dblp)$/ __log_region_destroy ../log/log.c /^__log_region_destroy(dbenv, infop)$/ __log_region_size ../log/log.c /^__log_region_size(dbenv)$/ __log_rep_put ../log/log_put.c /^__log_rep_put(dbenv, lsnp, rec)$/ +__log_set_flags ../log/log_method.c /^__log_set_flags(dbenv, flags, on)$/ __log_set_lg_bsize ../log/log_method.c /^__log_set_lg_bsize(dbenv, lg_bsize)$/ __log_set_lg_dir ../log/log_method.c /^__log_set_lg_dir(dbenv, dir)$/ __log_set_lg_max ../log/log_method.c /^__log_set_lg_max(dbenv, lg_max)$/ __log_set_lg_regionmax ../log/log_method.c /^__log_set_lg_regionmax(dbenv, lg_regionmax)$/ -__log_stat ../log/log.c /^__log_stat(dbenv, statp, flags)$/ -__log_stat_pp ../log/log.c /^__log_stat_pp(dbenv, statp, flags)$/ +__log_stat ../log/log_stat.c /^__log_stat(dbenv, statp, flags)$/ +__log_stat_pp ../log/log_stat.c /^__log_stat_pp(dbenv, statp, flags)$/ +__log_stat_print ../log/log_stat.c /^__log_stat_print(dbenv, flags)$/ +__log_stat_print_pp ../log/log_stat.c /^__log_stat_print_pp(dbenv, flags)$/ __log_txn_lsn ../log/log_put.c /^__log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)$/ __log_valid ../log/log.c /^__log_valid(dblp, number, set_persist, fhpp, flags/ __log_vtruncate ../log/log.c /^__log_vtruncate(dbenv, lsn, ckplsn, trunclsn)$/ @@ -3365,23 +3559,21 @@ __log_zero ../log/log.c /^__log_zero(dbenv, from_lsn, to_lsn)$/ __lsn_diff ../env/env_recover.c /^__lsn_diff(low, high, current, max, is_forward)$/ __memp_alloc ../mp/mp_alloc.c /^__memp_alloc(dbmp, memreg, mfp, len, offsetp, retp/ __memp_bad_buffer ../mp/mp_alloc.c /^__memp_bad_buffer(hp)$/ -__memp_bhfree ../mp/mp_bh.c /^__memp_bhfree(dbmp, hp, bhp, free_mem)$/ +__memp_bhfree ../mp/mp_bh.c /^__memp_bhfree(dbmp, hp, bhp, flags)$/ __memp_bhwrite ../mp/mp_bh.c /^__memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)$/ __memp_check_order ../mp/mp_alloc.c /^__memp_check_order(hp)$/ -__memp_close_flush_files ../mp/mp_sync.c /^__memp_close_flush_files(dbenv, dbmp)$/ +__memp_close_flush_files ../mp/mp_sync.c /^__memp_close_flush_files(dbenv, dbmp, dosync)$/ __memp_dbenv_create ../mp/mp_method.c /^__memp_dbenv_create(dbenv)$/ __memp_dbenv_refresh ../mp/mp_region.c /^__memp_dbenv_refresh(dbenv)$/ -__memp_dump_region ../mp/mp_stat.c /^__memp_dump_region(dbenv, area, fp)$/ -__memp_dumpcache ../mp/mp_stat.c /^__memp_dumpcache(dbenv, dbmp, reginfo, fmap, fp, f/ __memp_fclose ../mp/mp_fopen.c /^__memp_fclose(dbmfp, flags)$/ __memp_fclose_pp ../mp/mp_fopen.c /^__memp_fclose_pp(dbmfp, flags)$/ -__memp_fcreate ../mp/mp_fopen.c /^__memp_fcreate(dbenv, retp)$/ -__memp_fcreate_pp ../mp/mp_fopen.c /^__memp_fcreate_pp(dbenv, retp, flags)$/ +__memp_fcreate ../mp/mp_fmethod.c /^__memp_fcreate(dbenv, retp)$/ +__memp_fcreate_pp ../mp/mp_fmethod.c /^__memp_fcreate_pp(dbenv, retp, flags)$/ __memp_fget ../mp/mp_fget.c /^__memp_fget(dbmfp, pgnoaddr, flags, addrp)$/ __memp_fget_pp ../mp/mp_fget.c /^__memp_fget_pp(dbmfp, pgnoaddr, flags, addrp)$/ -__memp_fn ../mp/mp_fopen.c /^__memp_fn(dbmfp)$/ -__memp_fns ../mp/mp_fopen.c /^__memp_fns(dbmp, mfp)$/ -__memp_fopen ../mp/mp_fopen.c /^__memp_fopen(dbmfp, mfp, path, flags, mode, pagesi/ +__memp_fn ../mp/mp_fmethod.c /^__memp_fn(dbmfp)$/ +__memp_fns ../mp/mp_fmethod.c /^__memp_fns(dbmp, mfp)$/ +__memp_fopen ../mp/mp_fopen.c /^__memp_fopen(dbmfp, mfp, path, flags, mode, pgsize/ __memp_fopen_pp ../mp/mp_fopen.c /^__memp_fopen_pp(dbmfp, path, flags, mode, pagesize/ __memp_fput ../mp/mp_fput.c /^__memp_fput(dbmfp, pgaddr, flags)$/ __memp_fput_pp ../mp/mp_fput.c /^__memp_fput_pp(dbmfp, pgaddr, flags)$/ @@ -3389,45 +3581,57 @@ __memp_fset ../mp/mp_fset.c /^__memp_fset(dbmfp, pgaddr, flags)$/ __memp_fset_pp ../mp/mp_fset.c /^__memp_fset_pp(dbmfp, pgaddr, flags)$/ __memp_fsync ../mp/mp_sync.c /^__memp_fsync(dbmfp)$/ __memp_fsync_pp ../mp/mp_sync.c /^__memp_fsync_pp(dbmfp)$/ +__memp_ftruncate ../mp/mp_method.c /^__memp_ftruncate(dbmfp, pgno, flags)$/ __memp_get_cachesize ../mp/mp_method.c /^__memp_get_cachesize(dbenv, gbytesp, bytesp, ncach/ -__memp_get_clear_len ../mp/mp_fopen.c /^__memp_get_clear_len(dbmfp, clear_lenp)$/ -__memp_get_fileid ../mp/mp_fopen.c /^__memp_get_fileid(dbmfp, fileid)$/ -__memp_get_flags ../mp/mp_fopen.c /^__memp_get_flags(dbmfp, flagsp)$/ -__memp_get_ftype ../mp/mp_fopen.c /^__memp_get_ftype(dbmfp, ftypep)$/ -__memp_get_lsn_offset ../mp/mp_fopen.c /^__memp_get_lsn_offset(dbmfp, lsn_offsetp)$/ -__memp_get_maxsize ../mp/mp_fopen.c /^__memp_get_maxsize(dbmfp, gbytesp, bytesp)$/ -__memp_get_mp_maxwrite ../mp/mp_method.c /^__memp_get_mp_maxwrite(dbenv, maxwritep, maxwrite_/ +__memp_get_clear_len ../mp/mp_fmethod.c /^__memp_get_clear_len(dbmfp, clear_lenp)$/ +__memp_get_fileid ../mp/mp_fmethod.c /^__memp_get_fileid(dbmfp, fileid)$/ +__memp_get_flags ../mp/mp_fmethod.c /^__memp_get_flags(dbmfp, flagsp)$/ +__memp_get_ftype ../mp/mp_fmethod.c /^__memp_get_ftype(dbmfp, ftypep)$/ +__memp_get_lsn_offset ../mp/mp_fmethod.c /^__memp_get_lsn_offset(dbmfp, lsn_offsetp)$/ +__memp_get_maxsize ../mp/mp_fmethod.c /^__memp_get_maxsize(dbmfp, gbytesp, bytesp)$/ +__memp_get_mp_max_openfd ../mp/mp_method.c /^__memp_get_mp_max_openfd(dbenv, maxopenfdp)$/ +__memp_get_mp_max_write ../mp/mp_method.c /^__memp_get_mp_max_write(dbenv, maxwritep, maxwrite/ __memp_get_mp_mmapsize ../mp/mp_method.c /^__memp_get_mp_mmapsize(dbenv, mp_mmapsizep)$/ -__memp_get_pgcookie ../mp/mp_fopen.c /^__memp_get_pgcookie(dbmfp, pgcookie)$/ -__memp_get_priority ../mp/mp_fopen.c /^__memp_get_priority(dbmfp, priorityp)$/ +__memp_get_pgcookie ../mp/mp_fmethod.c /^__memp_get_pgcookie(dbmfp, pgcookie)$/ +__memp_get_priority ../mp/mp_fmethod.c /^__memp_get_priority(dbmfp, priorityp)$/ __memp_get_refcnt ../mp/mp_method.c /^__memp_get_refcnt(dbenv, fileid, refp)$/ -__memp_last_pgno ../mp/mp_fopen.c /^__memp_last_pgno(dbmfp, pgnoaddr)$/ +__memp_init ../mp/mp_region.c /^__memp_init(dbenv, dbmp, reginfo_off, htab_buckets/ +__memp_init_config ../mp/mp_region.c /^__memp_init_config(dbenv, mp)$/ +__memp_last_pgno ../mp/mp_fmethod.c /^__memp_last_pgno(dbmfp, pgnoaddr)$/ __memp_mf_discard ../mp/mp_fopen.c /^__memp_mf_discard(dbmp, mfp)$/ -__memp_mf_sync ../mp/mp_fopen.c /^__memp_mf_sync(dbmp, mfp)$/ +__memp_mf_sync ../mp/mp_sync.c /^__memp_mf_sync(dbmp, mfp)$/ __memp_nameop ../mp/mp_method.c /^__memp_nameop(dbenv, fileid, newname, fullold, ful/ __memp_open ../mp/mp_region.c /^__memp_open(dbenv)$/ -__memp_pbh ../mp/mp_stat.c /^__memp_pbh(dbmp, bhp, fmap, fp)$/ __memp_pg ../mp/mp_bh.c /^__memp_pg(dbmfp, bhp, is_pgin)$/ __memp_pgread ../mp/mp_bh.c /^__memp_pgread(dbmfp, mutexp, bhp, can_create)$/ __memp_pgwrite ../mp/mp_bh.c /^__memp_pgwrite(dbenv, dbmfp, hp, bhp)$/ +__memp_print_all ../mp/mp_stat.c /^__memp_print_all(dbenv, flags)$/ +__memp_print_bh ../mp/mp_stat.c /^__memp_print_bh(dbenv, dbmp, bhp, fmap, flags)$/ +__memp_print_hash ../mp/mp_stat.c /^__memp_print_hash(dbenv, dbmp, reginfo, fmap, flag/ +__memp_print_stats ../mp/mp_stat.c /^__memp_print_stats(dbenv, flags)$/ +__memp_region_destroy ../mp/mp_region.c /^__memp_region_destroy(dbenv, infop)$/ +__memp_region_maint ../mp/mp_region.c /^__memp_region_maint(infop)$/ __memp_register ../mp/mp_register.c /^__memp_register(dbenv, ftype, pgin, pgout)$/ __memp_register_pp ../mp/mp_register.c /^__memp_register_pp(dbenv, ftype, pgin, pgout)$/ __memp_reset_lru ../mp/mp_fput.c /^__memp_reset_lru(dbenv, memreg)$/ -__memp_set_cachesize ../mp/mp_method.c /^__memp_set_cachesize(dbenv, gbytes, bytes, ncache)/ -__memp_set_clear_len ../mp/mp_fopen.c /^__memp_set_clear_len(dbmfp, clear_len)$/ -__memp_set_fileid ../mp/mp_fopen.c /^__memp_set_fileid(dbmfp, fileid)$/ -__memp_set_flags ../mp/mp_fopen.c /^__memp_set_flags(dbmfp, flags, onoff)$/ -__memp_set_ftype ../mp/mp_fopen.c /^__memp_set_ftype(dbmfp, ftype)$/ -__memp_set_lsn_offset ../mp/mp_fopen.c /^__memp_set_lsn_offset(dbmfp, lsn_offset)$/ -__memp_set_maxsize ../mp/mp_fopen.c /^__memp_set_maxsize(dbmfp, gbytes, bytes)$/ -__memp_set_mp_maxwrite ../mp/mp_method.c /^__memp_set_mp_maxwrite(dbenv, maxwrite, maxwrite_s/ +__memp_set_cachesize ../mp/mp_method.c /^__memp_set_cachesize(dbenv, gbytes, bytes, arg_nca/ +__memp_set_clear_len ../mp/mp_fmethod.c /^__memp_set_clear_len(dbmfp, clear_len)$/ +__memp_set_fileid ../mp/mp_fmethod.c /^__memp_set_fileid(dbmfp, fileid)$/ +__memp_set_flags ../mp/mp_fmethod.c /^__memp_set_flags(dbmfp, flags, onoff)$/ +__memp_set_ftype ../mp/mp_fmethod.c /^__memp_set_ftype(dbmfp, ftype)$/ +__memp_set_lsn_offset ../mp/mp_fmethod.c /^__memp_set_lsn_offset(dbmfp, lsn_offset)$/ +__memp_set_maxsize ../mp/mp_fmethod.c /^__memp_set_maxsize(dbmfp, gbytes, bytes)$/ +__memp_set_mp_max_openfd ../mp/mp_method.c /^__memp_set_mp_max_openfd(dbenv, maxopenfd)$/ +__memp_set_mp_max_write ../mp/mp_method.c /^__memp_set_mp_max_write(dbenv, maxwrite, maxwrite_/ __memp_set_mp_mmapsize ../mp/mp_method.c /^__memp_set_mp_mmapsize(dbenv, mp_mmapsize)$/ -__memp_set_pgcookie ../mp/mp_fopen.c /^__memp_set_pgcookie(dbmfp, pgcookie)$/ -__memp_set_priority ../mp/mp_fopen.c /^__memp_set_priority(dbmfp, priority)$/ +__memp_set_pgcookie ../mp/mp_fmethod.c /^__memp_set_pgcookie(dbmfp, pgcookie)$/ +__memp_set_priority ../mp/mp_fmethod.c /^__memp_set_priority(dbmfp, priority)$/ __memp_stat ../mp/mp_stat.c /^__memp_stat(dbenv, gspp, fspp, flags)$/ -__memp_stat_hash ../mp/mp_stat.c /^__memp_stat_hash(reginfo, mp, dirtyp)$/ +__memp_stat_hash ../mp/mp_stat.c /^__memp_stat_hash(dbenv, reginfo, mp, dirtyp)$/ __memp_stat_pp ../mp/mp_stat.c /^__memp_stat_pp(dbenv, gspp, fspp, flags)$/ -__memp_stat_wait ../mp/mp_stat.c /^__memp_stat_wait(reginfo, mp, mstat, flags)$/ +__memp_stat_print ../mp/mp_stat.c /^__memp_stat_print(dbenv, flags)$/ +__memp_stat_print_pp ../mp/mp_stat.c /^__memp_stat_print_pp(dbenv, flags)$/ +__memp_stat_wait ../mp/mp_stat.c /^__memp_stat_wait(dbenv, reginfo, mp, mstat, flags)/ __memp_sync ../mp/mp_sync.c /^__memp_sync(dbenv, lsnp)$/ __memp_sync_files ../mp/mp_sync.c /^int __memp_sync_files(dbenv, dbmp)$/ __memp_sync_int ../mp/mp_sync.c /^__memp_sync_int(dbenv, dbmfp, trickle_max, op, wro/ @@ -3436,9 +3640,6 @@ __memp_trickle ../mp/mp_trickle.c /^__memp_trickle(dbenv, pct, nwrotep)$/ __memp_trickle_pp ../mp/mp_trickle.c /^__memp_trickle_pp(dbenv, pct, nwrotep)$/ __mp_xxx_fh ../mp/mp_sync.c /^__mp_xxx_fh(dbmfp, fhp)$/ __mpe_fsync ../os/os_fsync.c /^__mpe_fsync(fd)$/ -__mpool_init ../mp/mp_region.c /^__mpool_init(dbenv, dbmp, reginfo_off, htab_bucket/ -__mpool_region_destroy ../mp/mp_region.c /^__mpool_region_destroy(dbenv, infop)$/ -__mpool_region_maint ../mp/mp_region.c /^__mpool_region_maint(infop)$/ __op_rep_enter ../rep/rep_util.c /^__op_rep_enter(dbenv)$/ __op_rep_exit ../rep/rep_util.c /^__op_rep_exit(dbenv)$/ __os_abspath ../os/os_abs.c /^__os_abspath(path)$/ @@ -3457,14 +3658,16 @@ __os_get_errno_ret_zero ../os/os_errno.c /^__os_get_errno_ret_zero()$/ __os_guard ../os/os_alloc.c /^__os_guard(dbenv)$/ __os_have_direct ../os/os_open.c /^__os_have_direct()$/ __os_id ../os/os_id.c /^__os_id(idp)$/ +__os_intermediate_dir ../os/os_open.c /^__os_intermediate_dir(dbenv, name)$/ __os_io ../os/os_rw.c /^__os_io(dbenv, op, fhp, pgno, pagesize, buf, niop)/ __os_ioinfo ../os/os_stat.c /^__os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, ios/ __os_isroot ../os/os_root.c /^__os_isroot()$/ __os_malloc ../os/os_alloc.c /^__os_malloc(dbenv, size, storep)$/ __os_map ../os/os_map.c /^__os_map(dbenv, path, fhp, len, is_region, is_rdon/ __os_mapfile ../os/os_map.c /^__os_mapfile(dbenv, path, fhp, len, is_rdonly, add/ +__os_mkdir ../os/os_open.c /^__os_mkdir(dbenv, name)$/ __os_open ../os/os_open.c /^__os_open(dbenv, name, flags, mode, fhpp)$/ -__os_open_extend ../os/os_open.c /^__os_open_extend(dbenv, name, log_size, page_size,/ +__os_open_extend ../os/os_open.c /^__os_open_extend(dbenv, name, page_size, flags, mo/ __os_openhandle ../os/os_handle.c /^__os_openhandle(dbenv, name, flags, mode, fhpp)$/ __os_physwrite ../os/os_rw.c /^__os_physwrite(dbenv, fhp, addr, len, nwp)$/ __os_pstat_getdynamic ../os/os_spin.c /^__os_pstat_getdynamic()$/ @@ -3476,7 +3679,7 @@ __os_r_sysdetach ../os/os_map.c /^__os_r_sysdetach(dbenv, infop, destroy)$/ __os_read ../os/os_rw.c /^__os_read(dbenv, fhp, addr, len, nrp)$/ __os_realloc ../os/os_alloc.c /^__os_realloc(dbenv, size, storep)$/ __os_region_unlink ../os/os_unlink.c /^__os_region_unlink(dbenv, path)$/ -__os_rename ../os/os_rename.c /^__os_rename(dbenv, old, new, flags)$/ +__os_rename ../os/os_rename.c /^__os_rename(dbenv, old, new, silent)$/ __os_seek ../os/os_seek.c /^__os_seek(dbenv, fhp, pgsize, pageno, relative, is/ __os_set_errno ../os/os_errno.c /^__os_set_errno(evalue)$/ __os_shmname ../os/os_open.c /^__os_shmname(dbenv, name, newnamep)$/ @@ -3485,8 +3688,10 @@ __os_spin ../os/os_spin.c /^__os_spin(dbenv)$/ __os_strdup ../os/os_alloc.c /^__os_strdup(dbenv, str, storep)$/ __os_sysconf ../os/os_spin.c /^__os_sysconf()$/ __os_tmpdir ../os/os_tmpdir.c /^__os_tmpdir(dbenv, flags)$/ +__os_truncate ../os/os_truncate.c /^__os_truncate(dbenv, fhp, pgno, pgsize)$/ __os_ufree ../os/os_alloc.c /^__os_ufree(dbenv, ptr)$/ __os_umalloc ../os/os_alloc.c /^__os_umalloc(dbenv, size, storep)$/ +__os_unique_id ../os/os_id.c /^__os_unique_id(dbenv, idp)$/ __os_unlink ../os/os_unlink.c /^__os_unlink(dbenv, path)$/ __os_unmapfile ../os/os_map.c /^__os_unmapfile(dbenv, addr, len)$/ __os_urealloc ../os/os_alloc.c /^__os_urealloc(dbenv, size, storep)$/ @@ -3495,9 +3700,8 @@ __os_yield ../os/os_spin.c /^__os_yield(dbenv, usecs)$/ __os_zerofill ../os/os_rw.c /^__os_zerofill(dbenv, fhp)$/ __qam_31_qammeta ../qam/qam_upgrade.c /^__qam_31_qammeta(dbp, real_name, buf)$/ __qam_32_qammeta ../qam/qam_upgrade.c /^__qam_32_qammeta(dbp, real_name, buf)$/ -__qam_add_getpgnos ../qam/qam_auto.c /^__qam_add_getpgnos(dbenv, rec, lsnp, notused1, sum/ __qam_add_log ../qam/qam_auto.c /^__qam_add_log(dbp, txnid, ret_lsnp, flags, lsn, pg/ -__qam_add_print ../qam/qam_auto.c /^__qam_add_print(dbenv, dbtp, lsnp, notused2, notus/ +__qam_add_print ../qam/qam_autop.c /^__qam_add_print(dbenv, dbtp, lsnp, notused2, notus/ __qam_add_read ../qam/qam_auto.c /^__qam_add_read(dbenv, recbuf, argpp)$/ __qam_add_recover ../qam/qam_rec.c /^__qam_add_recover(dbenv, dbtp, lsnp, op, info)$/ __qam_append ../qam/qam.c /^__qam_append(dbc, key, data)$/ @@ -3512,14 +3716,12 @@ __qam_c_put ../qam/qam.c /^__qam_c_put(dbc, key, data, flags, pgnop)$/ __qam_consume ../qam/qam.c /^__qam_consume(dbc, meta, first)$/ __qam_db_close ../qam/qam_method.c /^__qam_db_close(dbp, flags)$/ __qam_db_create ../qam/qam_method.c /^__qam_db_create(dbp)$/ -__qam_del_getpgnos ../qam/qam_auto.c /^__qam_del_getpgnos(dbenv, rec, lsnp, notused1, sum/ __qam_del_log ../qam/qam_auto.c /^__qam_del_log(dbp, txnid, ret_lsnp, flags, lsn, pg/ -__qam_del_print ../qam/qam_auto.c /^__qam_del_print(dbenv, dbtp, lsnp, notused2, notus/ +__qam_del_print ../qam/qam_autop.c /^__qam_del_print(dbenv, dbtp, lsnp, notused2, notus/ __qam_del_read ../qam/qam_auto.c /^__qam_del_read(dbenv, recbuf, argpp)$/ __qam_del_recover ../qam/qam_rec.c /^__qam_del_recover(dbenv, dbtp, lsnp, op, info)$/ -__qam_delext_getpgnos ../qam/qam_auto.c /^__qam_delext_getpgnos(dbenv, rec, lsnp, notused1, / __qam_delext_log ../qam/qam_auto.c /^__qam_delext_log(dbp, txnid, ret_lsnp, flags, lsn,/ -__qam_delext_print ../qam/qam_auto.c /^__qam_delext_print(dbenv, dbtp, lsnp, notused2, no/ +__qam_delext_print ../qam/qam_autop.c /^__qam_delext_print(dbenv, dbtp, lsnp, notused2, no/ __qam_delext_read ../qam/qam_auto.c /^__qam_delext_read(dbenv, recbuf, argpp)$/ __qam_delext_recover ../qam/qam_rec.c /^__qam_delext_recover(dbenv, dbtp, lsnp, op, info)$/ __qam_exid ../qam/qam_files.c /^__qam_exid(dbp, fidp, exnum)$/ @@ -3532,20 +3734,18 @@ __qam_fremove ../qam/qam_files.c /^__qam_fremove(dbp, pgnoaddr)$/ __qam_gen_filelist ../qam/qam_files.c /^__qam_gen_filelist(dbp, filelistp)$/ __qam_get_extentsize ../qam/qam_method.c /^__qam_get_extentsize(dbp, q_extentsizep)$/ __qam_getno ../qam/qam.c /^__qam_getno(dbp, key, rep)$/ -__qam_incfirst_getpgnos ../qam/qam_auto.c /^__qam_incfirst_getpgnos(dbenv, rec, lsnp, notused1/ __qam_incfirst_log ../qam/qam_auto.c /^__qam_incfirst_log(dbp, txnid, ret_lsnp, flags, re/ -__qam_incfirst_print ../qam/qam_auto.c /^__qam_incfirst_print(dbenv, dbtp, lsnp, notused2, / +__qam_incfirst_print ../qam/qam_autop.c /^__qam_incfirst_print(dbenv, dbtp, lsnp, notused2, / __qam_incfirst_read ../qam/qam_auto.c /^__qam_incfirst_read(dbenv, recbuf, argpp)$/ __qam_incfirst_recover ../qam/qam_rec.c /^__qam_incfirst_recover(dbenv, dbtp, lsnp, op, info/ -__qam_init_getpgnos ../qam/qam_auto.c /^__qam_init_getpgnos(dbenv, dtabp, dtabsizep)$/ __qam_init_meta ../qam/qam_open.c /^__qam_init_meta(dbp, meta)$/ -__qam_init_print ../qam/qam_auto.c /^__qam_init_print(dbenv, dtabp, dtabsizep)$/ +__qam_init_print ../qam/qam_autop.c /^__qam_init_print(dbenv, dtabp, dtabsizep)$/ __qam_init_recover ../qam/qam_auto.c /^__qam_init_recover(dbenv, dtabp, dtabsizep)$/ +__qam_map_flags ../qam/qam_method.c /^__qam_map_flags(dbp, inflagsp, outflagsp)$/ __qam_metachk ../qam/qam_open.c /^__qam_metachk(dbp, name, qmeta)$/ __qam_mswap ../qam/qam_conv.c /^__qam_mswap(pg)$/ -__qam_mvptr_getpgnos ../qam/qam_auto.c /^__qam_mvptr_getpgnos(dbenv, rec, lsnp, notused1, s/ __qam_mvptr_log ../qam/qam_auto.c /^__qam_mvptr_log(dbp, txnid, ret_lsnp, flags,$/ -__qam_mvptr_print ../qam/qam_auto.c /^__qam_mvptr_print(dbenv, dbtp, lsnp, notused2, not/ +__qam_mvptr_print ../qam/qam_autop.c /^__qam_mvptr_print(dbenv, dbtp, lsnp, notused2, not/ __qam_mvptr_read ../qam/qam_auto.c /^__qam_mvptr_read(dbenv, recbuf, argpp)$/ __qam_mvptr_recover ../qam/qam_rec.c /^__qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)$/ __qam_nameop ../qam/qam_files.c /^int __qam_nameop(dbp, txn, newname, op)$/ @@ -3554,12 +3754,15 @@ __qam_open ../qam/qam_open.c /^__qam_open(dbp, txn, name, base_pgno, mode, flags __qam_pgin_out ../qam/qam_conv.c /^__qam_pgin_out(dbenv, pg, pp, cookie)$/ __qam_pitem ../qam/qam.c /^__qam_pitem(dbc, pagep, indx, recno, data)$/ __qam_position ../qam/qam.c /^__qam_position(dbc, recnop, mode, exactp)$/ -__qam_remove ../qam/qam_method.c /^__qam_remove(dbp, txn, name, subdb, lsnp)$/ -__qam_rename ../qam/qam_method.c /^__qam_rename(dbp, txn, filename, subdb, newname)$/ +__qam_remove ../qam/qam_method.c /^__qam_remove(dbp, txn, name, subdb)$/ +__qam_rename ../qam/qam_method.c /^__qam_rename(dbp, txn, name, subdb, newname)$/ +__qam_rr ../qam/qam_method.c /^__qam_rr(dbp, txn, name, subdb, newname, op)$/ __qam_salvage ../qam/qam_verify.c /^__qam_salvage(dbp, vdp, pgno, h, handle, callback,/ __qam_set_ext_data ../qam/qam_open.c /^__qam_set_ext_data(dbp, name)$/ __qam_set_extentsize ../qam/qam_method.c /^__qam_set_extentsize(dbp, extentsize)$/ +__qam_set_flags ../qam/qam_method.c /^__qam_set_flags(dbp, flagsp)$/ __qam_stat ../qam/qam_stat.c /^__qam_stat(dbc, spp, flags)$/ +__qam_stat_print ../qam/qam_stat.c /^__qam_stat_print(dbc, flags)$/ __qam_sync ../qam/qam_files.c /^__qam_sync(dbp)$/ __qam_testdocopy ../db/db.c /^__qam_testdocopy(dbp, name)$/ __qam_truncate ../qam/qam.c /^__qam_truncate(dbc, countp)$/ @@ -3567,6 +3770,7 @@ __qam_vrfy_data ../qam/qam_verify.c /^__qam_vrfy_data(dbp, vdp, h, pgno, flags)$ __qam_vrfy_meta ../qam/qam_verify.c /^__qam_vrfy_meta(dbp, vdp, meta, pgno, flags)$/ __qam_vrfy_structure ../qam/qam_verify.c /^__qam_vrfy_structure(dbp, vdp, flags)$/ __qam_vrfy_walkqueue ../qam/qam_verify.c /^__qam_vrfy_walkqueue(dbp, vdp, handle, callback, f/ +__queue_pageinfo ../qam/qam_method.c /^__queue_pageinfo(dbp, firstp, lastp, emptyp, prpag/ __ram_add ../btree/bt_recno.c /^__ram_add(dbc, recnop, data, flags, bi_flags)$/ __ram_append ../btree/bt_recno.c /^__ram_append(dbc, key, data)$/ __ram_c_del ../btree/bt_recno.c /^__ram_c_del(dbc)$/ @@ -3593,51 +3797,105 @@ __ram_update ../btree/bt_recno.c /^__ram_update(dbc, recno, can_create)$/ __ram_vrfy_inp ../btree/bt_verify.c /^__ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags/ __ram_vrfy_leaf ../btree/bt_verify.c /^__ram_vrfy_leaf(dbp, vdp, h, pgno, flags)$/ __ram_writeback ../btree/bt_recno.c /^__ram_writeback(dbp)$/ +__reg_type ../env/env_stat.c /^__reg_type(t)$/ __rep_abort_prepared ../rep/rep_method.c /^__rep_abort_prepared(dbenv)$/ __rep_apply ../rep/rep_record.c /^__rep_apply(dbenv, rp, rec, ret_lsnp)$/ __rep_bt_cmp ../rep/rep_method.c /^__rep_bt_cmp(dbp, dbt1, dbt2)$/ -__rep_check_alloc ../rep/rep_util.c /^__rep_check_alloc(dbenv, r, n)$/ -__rep_client_dbinit ../rep/rep_method.c /^__rep_client_dbinit(dbenv, startup)$/ +__rep_check_doreq ../rep/rep_record.c /^__rep_check_doreq(dbenv, rep)$/ +__rep_client_dbinit ../rep/rep_method.c /^__rep_client_dbinit(dbenv, startup, which)$/ __rep_cmp_vote ../rep/rep_record.c /^__rep_cmp_vote(dbenv, rep, eidp, lsnp, priority, g/ __rep_cmp_vote2 ../rep/rep_record.c /^__rep_cmp_vote2(dbenv, rep, eid, egen)$/ __rep_collect_txn ../rep/rep_record.c /^__rep_collect_txn(dbenv, lsnp, lc)$/ __rep_dbenv_close ../rep/rep_region.c /^__rep_dbenv_close(dbenv)$/ __rep_dbenv_create ../rep/rep_method.c /^__rep_dbenv_create(dbenv)$/ __rep_dbenv_refresh ../rep/rep_region.c /^__rep_dbenv_refresh(dbenv)$/ +__rep_do_ckp ../rep/rep_record.c /^__rep_do_ckp(dbenv, rec, rp)$/ __rep_dorecovery ../rep/rep_record.c /^__rep_dorecovery(dbenv, lsnp, trunclsnp)$/ -__rep_elect ../rep/rep_method.c /^__rep_elect(dbenv, nsites, priority, timeout, eidp/ +__rep_egen_init ../rep/rep_region.c /^__rep_egen_init(dbenv, rep)$/ +__rep_elect ../rep/rep_method.c /^__rep_elect(dbenv, nsites, nvotes, priority, timeo/ __rep_elect_done ../rep/rep_util.c /^__rep_elect_done(dbenv, rep)$/ -__rep_elect_init ../rep/rep_method.c /^__rep_elect_init(dbenv, lsnp, nsites, priority, be/ +__rep_elect_init ../rep/rep_method.c /^__rep_elect_init(dbenv, lsnp, nsites, nvotes, prio/ __rep_elect_master ../rep/rep_method.c /^__rep_elect_master(dbenv, rep, eidp)$/ +__rep_filedone ../rep/rep_backup.c /^__rep_filedone(dbenv, eid, rep, msgfp, type)$/ +__rep_fileinfo_buf ../rep/rep_auto.c /^__rep_fileinfo_buf(buf, max, lenp,$/ +__rep_fileinfo_read ../rep/rep_auto.c /^__rep_fileinfo_read(dbenv, recbuf, nextp, argpp)$/ +__rep_files_data ../rep/rep_backup.c /^__rep_files_data(dbenv, fp, fileszp, filelenp, fil/ +__rep_files_inmem ../rep/rep_backup.c /^__rep_files_inmem(dbenv, fp, fileszp, filelenp, fi/ +__rep_finfo_alloc ../rep/rep_backup.c /^__rep_finfo_alloc(dbenv, rfpsrc, rfpp)$/ __rep_flush ../rep/rep_method.c /^__rep_flush(dbenv)$/ +__rep_get_fileinfo ../rep/rep_backup.c /^__rep_get_fileinfo(dbenv, file, rfp, uid, filecntp/ __rep_get_gen ../rep/rep_util.c /^__rep_get_gen(dbenv, genp)$/ __rep_get_limit ../rep/rep_method.c /^__rep_get_limit(dbenv, gbytesp, bytesp)$/ +__rep_getnext ../rep/rep_record.c /^__rep_getnext(dbenv)$/ __rep_grow_sites ../rep/rep_util.c /^__rep_grow_sites(dbenv, nsites)$/ __rep_is_client ../rep/rep_util.c /^__rep_is_client(dbenv)$/ +__rep_lockout ../rep/rep_record.c /^__rep_lockout(dbenv, db_rep, rep)$/ +__rep_log_setup ../rep/rep_backup.c /^__rep_log_setup(dbenv, rep)$/ +__rep_loggap_req ../rep/rep_backup.c /^__rep_loggap_req(dbenv, rep, lsnp, moregap)$/ __rep_lsn_cmp ../rep/rep_record.c /^__rep_lsn_cmp(lsn1, lsn2)$/ +__rep_mpf_open ../rep/rep_backup.c /^__rep_mpf_open(dbenv, mpfp, rfp)$/ __rep_new_master ../rep/rep_util.c /^__rep_new_master(dbenv, cntrl, eid)$/ __rep_newfile ../rep/rep_record.c /^__rep_newfile(dbenv, rc, lsnp)$/ __rep_noarchive ../rep/rep_util.c /^__rep_noarchive(dbenv)$/ __rep_open ../rep/rep_method.c /^__rep_open(dbenv)$/ +__rep_page ../rep/rep_backup.c /^__rep_page(dbenv, eid, rp, rec)$/ +__rep_page_fail ../rep/rep_backup.c /^__rep_page_fail(dbenv, eid, rec)$/ +__rep_page_gap ../rep/rep_backup.c /^__rep_page_gap(dbenv, rep, msgfp, type)$/ +__rep_page_req ../rep/rep_backup.c /^__rep_page_req(dbenv, eid, rec)$/ +__rep_page_sendpages ../rep/rep_backup.c /^__rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp)$/ +__rep_pggap_req ../rep/rep_backup.c /^__rep_pggap_req(dbenv, rep, reqfp, moregap)$/ __rep_preclose ../rep/rep_region.c /^__rep_preclose(dbenv, do_closefiles)$/ +__rep_print_all ../rep/rep_stat.c /^__rep_print_all(dbenv, flags)$/ __rep_print_logmsg ../rep/rep_util.c /^__rep_print_logmsg(dbenv, logdbt, lsnp)$/ __rep_print_message ../rep/rep_util.c /^__rep_print_message(dbenv, eid, rp, str)$/ +__rep_print_stats ../rep/rep_stat.c /^__rep_print_stats(dbenv, flags)$/ __rep_process_message ../rep/rep_record.c /^__rep_process_message(dbenv, control, rec, eidp, r/ +__rep_process_rec ../rep/rep_record.c /^__rep_process_rec(dbenv, rp, rec, typep, ret_lsnp)/ __rep_process_txn ../rep/rep_record.c /^__rep_process_txn(dbenv, rec)$/ +__rep_queue_filedone ../rep/rep_backup.c /^__rep_queue_filedone(dbenv, rep, rfp)$/ __rep_region_destroy ../rep/rep_region.c /^__rep_region_destroy(dbenv)$/ __rep_region_init ../rep/rep_region.c /^__rep_region_init(dbenv)$/ +__rep_remfirst ../rep/rep_record.c /^__rep_remfirst(dbenv, cntrl, rec)$/ +__rep_resend_req ../rep/rep_record.c /^__rep_resend_req(dbenv, eid)$/ __rep_restore_prepared ../rep/rep_method.c /^__rep_restore_prepared(dbenv)$/ -__rep_send_file ../rep/rep_util.c /^__rep_send_file(dbenv, rec, eid)$/ __rep_send_message ../rep/rep_util.c /^__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, / -__rep_send_vote ../rep/rep_util.c /^__rep_send_vote(dbenv, lsnp, nsites, pri, tiebreak/ +__rep_send_vote ../rep/rep_util.c /^__rep_send_vote(dbenv, lsnp, nsites, nvotes, pri, / __rep_set_limit ../rep/rep_method.c /^__rep_set_limit(dbenv, gbytes, bytes)$/ __rep_set_rep_transport ../rep/rep_method.c /^__rep_set_rep_transport(dbenv, eid, f_send)$/ __rep_set_request ../rep/rep_method.c /^__rep_set_request(dbenv, min, max)$/ __rep_start ../rep/rep_method.c /^__rep_start(dbenv, dbt, flags)$/ -__rep_stat ../rep/rep_method.c /^__rep_stat(dbenv, statp, flags)$/ +__rep_stat ../rep/rep_stat.c /^__rep_stat(dbenv, statp, flags)$/ +__rep_stat_pp ../rep/rep_stat.c /^__rep_stat_pp(dbenv, statp, flags)$/ +__rep_stat_print ../rep/rep_stat.c /^__rep_stat_print(dbenv, flags)$/ +__rep_stat_print_pp ../rep/rep_stat.c /^__rep_stat_print_pp(dbenv, flags)$/ __rep_tally ../rep/rep_record.c /^__rep_tally(dbenv, rep, eid, countp, egen, vtoff)$/ -__rep_verify_match ../rep/rep_record.c /^__rep_verify_match(dbenv, rp, savetime)$/ +__rep_update_buf ../rep/rep_auto.c /^__rep_update_buf(buf, max, lenp,$/ +__rep_update_read ../rep/rep_auto.c /^__rep_update_read(dbenv, recbuf, nextp, argpp)$/ +__rep_update_req ../rep/rep_backup.c /^__rep_update_req(dbenv, eid)$/ +__rep_update_setup ../rep/rep_backup.c /^__rep_update_setup(dbenv, eid, rp, rec)$/ +__rep_verify_match ../rep/rep_record.c /^__rep_verify_match(dbenv, reclsnp, savetime)$/ __rep_wait ../rep/rep_method.c /^__rep_wait(dbenv, timeout, eidp, flags)$/ +__rep_walk_dir ../rep/rep_backup.c /^__rep_walk_dir(dbenv, dir, fp, fileszp, filelenp, / +__rep_write_egen ../rep/rep_region.c /^__rep_write_egen(dbenv, egen)$/ +__rep_write_page ../rep/rep_backup.c /^__rep_write_page(dbenv, rep, msgfp)$/ +__seq_close ../sequence/sequence.c /^__seq_close(seq, flags)$/ +__seq_get ../sequence/sequence.c /^__seq_get(seq, txn, delta, retp, flags)$/ +__seq_get_cachesize ../sequence/sequence.c /^__seq_get_cachesize(seq, cachesize)$/ +__seq_get_db ../sequence/sequence.c /^__seq_get_db(seq, dbpp)$/ +__seq_get_flags ../sequence/sequence.c /^__seq_get_flags(seq, flagsp)$/ +__seq_get_key ../sequence/sequence.c /^__seq_get_key(seq, key)$/ +__seq_get_range ../sequence/sequence.c /^__seq_get_range(seq, minp, maxp)$/ +__seq_initial_value ../sequence/sequence.c /^__seq_initial_value(seq, value)$/ +__seq_open ../sequence/sequence.c /^__seq_open(seq, txn, keyp, flags)$/ +__seq_print_all ../sequence/seq_stat.c /^__seq_print_all(seq, flags)$/ +__seq_print_stats ../sequence/seq_stat.c /^__seq_print_stats(seq, flags)$/ +__seq_remove ../sequence/sequence.c /^__seq_remove(seq, txn, flags)$/ +__seq_set_cachesize ../sequence/sequence.c /^__seq_set_cachesize(seq, cachesize)$/ +__seq_set_flags ../sequence/sequence.c /^__seq_set_flags(seq, flags)$/ +__seq_set_range ../sequence/sequence.c /^__seq_set_range(seq, min, max)$/ +__seq_stat ../sequence/seq_stat.c /^__seq_stat(seq, spp, flags)$/ +__seq_stat_print ../sequence/seq_stat.c /^__seq_stat_print(seq, flags)$/ +__seq_update ../sequence/sequence.c /^__seq_update(seq, txn, delta, flags)$/ __txn_abort ../txn/txn.c /^__txn_abort(txnp)$/ __txn_abort_pp ../txn/txn.c /^__txn_abort_pp(txnp)$/ __txn_abort_proc ../rpc_server/c/db_server_proc.c /^__txn_abort_proc(txnpcl_id, replyp)$/ @@ -3645,23 +3903,22 @@ __txn_activekids ../txn/txn.c /^__txn_activekids(dbenv, rectype, txnp)$/ __txn_begin ../txn/txn.c /^__txn_begin(dbenv, parent, txnpp, flags)$/ __txn_begin_int ../txn/txn.c /^__txn_begin_int(txn, internal)$/ __txn_begin_pp ../txn/txn.c /^__txn_begin_pp(dbenv, parent, txnpp, flags)$/ -__txn_begin_proc ../rpc_server/c/db_server_proc.c /^__txn_begin_proc(dbenvcl_id, parentcl_id,$/ +__txn_begin_proc ../rpc_server/c/db_server_proc.c /^__txn_begin_proc(dbenvcl_id, parentcl_id, flags, r/ __txn_checkpoint ../txn/txn.c /^__txn_checkpoint(dbenv, kbytes, minutes, flags)$/ __txn_checkpoint_pp ../txn/txn.c /^__txn_checkpoint_pp(dbenv, kbytes, minutes, flags)/ -__txn_child_getpgnos ../txn/txn_auto.c /^__txn_child_getpgnos(dbenv, rec, lsnp, notused1, s/ __txn_child_log ../txn/txn_auto.c /^__txn_child_log(dbenv, txnid, ret_lsnp, flags,$/ -__txn_child_print ../txn/txn_auto.c /^__txn_child_print(dbenv, dbtp, lsnp, notused2, not/ +__txn_child_print ../txn/txn_autop.c /^__txn_child_print(dbenv, dbtp, lsnp, notused2, not/ __txn_child_read ../txn/txn_auto.c /^__txn_child_read(dbenv, recbuf, argpp)$/ __txn_child_recover ../txn/txn_rec.c /^__txn_child_recover(dbenv, dbtp, lsnp, op, info)$/ -__txn_ckp_getpgnos ../txn/txn_auto.c /^__txn_ckp_getpgnos(dbenv, rec, lsnp, notused1, sum/ __txn_ckp_log ../txn/txn_auto.c /^__txn_ckp_log(dbenv, txnid, ret_lsnp, flags,$/ -__txn_ckp_print ../txn/txn_auto.c /^__txn_ckp_print(dbenv, dbtp, lsnp, notused2, notus/ +__txn_ckp_print ../txn/txn_autop.c /^__txn_ckp_print(dbenv, dbtp, lsnp, notused2, notus/ __txn_ckp_read ../txn/txn_auto.c /^__txn_ckp_read(dbenv, recbuf, argpp)$/ __txn_ckp_recover ../txn/txn_rec.c /^__txn_ckp_recover(dbenv, dbtp, lsnp, op, info)$/ __txn_closeevent ../txn/txn_util.c /^__txn_closeevent(dbenv, txn, dbp)$/ __txn_commit ../txn/txn.c /^__txn_commit(txnp, flags)$/ __txn_commit_pp ../txn/txn.c /^__txn_commit_pp(txnp, flags)$/ __txn_commit_proc ../rpc_server/c/db_server_proc.c /^__txn_commit_proc(txnpcl_id, flags, replyp)$/ +__txn_compare ../txn/txn_stat.c /^__txn_compare(a1, b1)$/ __txn_compensate_begin ../txn/txn.c /^__txn_compensate_begin(dbenv, txnpp)$/ __txn_continue ../txn/txn_recover.c /^__txn_continue(env, txnp, td, off)$/ __txn_dbenv_create ../txn/txn_method.c /^__txn_dbenv_create(dbenv)$/ @@ -3672,38 +3929,39 @@ __txn_discard_proc ../rpc_server/c/db_server_proc.c /^__txn_discard_proc(txnpcl_ __txn_dispatch_undo ../txn/txn.c /^__txn_dispatch_undo(dbenv, txnp, rdbt, key_lsn, tx/ __txn_doevents ../txn/txn_util.c /^__txn_doevents(dbenv, txn, opcode, preprocess)$/ __txn_end ../txn/txn.c /^__txn_end(txnp, is_commit)$/ -__txn_findlastckp ../txn/txn_region.c /^__txn_findlastckp(dbenv, lsnp)$/ +__txn_findlastckp ../txn/txn_region.c /^__txn_findlastckp(dbenv, lsnp, max_lsn)$/ __txn_force_abort ../txn/txn.c /^__txn_force_abort(dbenv, buffer)$/ __txn_get_prepared ../txn/txn_recover.c /^__txn_get_prepared(dbenv, xids, txns, count, retp,/ __txn_get_tx_max ../txn/txn_method.c /^__txn_get_tx_max(dbenv, tx_maxp)$/ __txn_get_tx_timestamp ../txn/txn_method.c /^__txn_get_tx_timestamp(dbenv, timestamp)$/ +__txn_getactive ../txn/txn.c /^__txn_getactive(dbenv, lsnp)$/ __txn_getckp ../txn/txn.c /^__txn_getckp(dbenv, lsnp)$/ __txn_id ../txn/txn.c /^__txn_id(txnp)$/ __txn_id_set ../txn/txn_region.c /^__txn_id_set(dbenv, cur_txnid, max_txnid)$/ __txn_init ../txn/txn_region.c /^__txn_init(dbenv, tmgrp)$/ -__txn_init_getpgnos ../txn/txn_auto.c /^__txn_init_getpgnos(dbenv, dtabp, dtabsizep)$/ -__txn_init_print ../txn/txn_auto.c /^__txn_init_print(dbenv, dtabp, dtabsizep)$/ +__txn_init_print ../txn/txn_autop.c /^__txn_init_print(dbenv, dtabp, dtabsizep)$/ __txn_init_recover ../txn/txn_auto.c /^__txn_init_recover(dbenv, dtabp, dtabsizep)$/ __txn_isvalid ../txn/txn.c /^__txn_isvalid(txnp, tdp, op)$/ __txn_lockevent ../txn/txn_util.c /^__txn_lockevent(dbenv, txn, dbp, lock, locker)$/ __txn_map_gid ../txn/txn_recover.c /^__txn_map_gid(dbenv, gid, tdp, offp)$/ __txn_open ../txn/txn_region.c /^__txn_open(dbenv)$/ +__txn_openfiles ../txn/txn_recover.c /^__txn_openfiles(dbenv, min, force)$/ __txn_preclose ../txn/txn.c /^__txn_preclose(dbenv)$/ __txn_prepare ../txn/txn.c /^__txn_prepare(txnp, gid)$/ __txn_prepare_proc ../rpc_server/c/db_server_proc.c /^__txn_prepare_proc(txnpcl_id, gid, replyp)$/ +__txn_print_all ../txn/txn_stat.c /^__txn_print_all(dbenv, flags)$/ +__txn_print_stats ../txn/txn_stat.c /^__txn_print_stats(dbenv, flags)$/ __txn_recover ../txn/txn_recover.c /^__txn_recover(dbenv, preplist, count, retp, flags)/ __txn_recover_pp ../txn/txn_recover.c /^__txn_recover_pp(dbenv, preplist, count, retp, fla/ -__txn_recover_proc ../rpc_server/c/db_server_proc.c /^__txn_recover_proc(dbenvcl_id, count,$/ -__txn_recycle_getpgnos ../txn/txn_auto.c /^__txn_recycle_getpgnos(dbenv, rec, lsnp, notused1,/ +__txn_recover_proc ../rpc_server/c/db_server_proc.c /^__txn_recover_proc(dbenvcl_id, count, flags, reply/ __txn_recycle_log ../txn/txn_auto.c /^__txn_recycle_log(dbenv, txnid, ret_lsnp, flags,$/ -__txn_recycle_print ../txn/txn_auto.c /^__txn_recycle_print(dbenv, dbtp, lsnp, notused2, n/ +__txn_recycle_print ../txn/txn_autop.c /^__txn_recycle_print(dbenv, dbtp, lsnp, notused2, n/ __txn_recycle_read ../txn/txn_auto.c /^__txn_recycle_read(dbenv, recbuf, argpp)$/ __txn_recycle_recover ../txn/txn_rec.c /^__txn_recycle_recover(dbenv, dbtp, lsnp, op, info)/ __txn_region_destroy ../txn/txn_region.c /^__txn_region_destroy(dbenv, infop)$/ __txn_region_size ../txn/txn_region.c /^__txn_region_size(dbenv)$/ -__txn_regop_getpgnos ../txn/txn_auto.c /^__txn_regop_getpgnos(dbenv, rec, lsnp, notused1, s/ __txn_regop_log ../txn/txn_auto.c /^__txn_regop_log(dbenv, txnid, ret_lsnp, flags,$/ -__txn_regop_print ../txn/txn_auto.c /^__txn_regop_print(dbenv, dbtp, lsnp, notused2, not/ +__txn_regop_print ../txn/txn_autop.c /^__txn_regop_print(dbenv, dbtp, lsnp, notused2, not/ __txn_regop_read ../txn/txn_auto.c /^__txn_regop_read(dbenv, recbuf, argpp)$/ __txn_regop_recover ../txn/txn_rec.c /^__txn_regop_recover(dbenv, dbtp, lsnp, op, info)$/ __txn_remevent ../txn/txn_util.c /^__txn_remevent(dbenv, txn, name, fileid)$/ @@ -3711,19 +3969,22 @@ __txn_remlock ../txn/txn_util.c /^__txn_remlock(dbenv, txn, lock, locker)$/ __txn_remrem ../txn/txn_util.c /^__txn_remrem(dbenv, txn, name)$/ __txn_reset ../txn/txn.c /^__txn_reset(dbenv)$/ __txn_restore_txn ../txn/txn_rec.c /^__txn_restore_txn(dbenv, lsnp, argp)$/ +__txn_set_begin_lsnp ../txn/txn.c /^__txn_set_begin_lsnp(txn, rlsnp)$/ __txn_set_timeout ../txn/txn.c /^__txn_set_timeout(txnp, timeout, op)$/ __txn_set_tx_max ../txn/txn_method.c /^__txn_set_tx_max(dbenv, tx_max)$/ __txn_set_tx_timestamp ../txn/txn_method.c /^__txn_set_tx_timestamp(dbenv, timestamp)$/ __txn_stat ../txn/txn_stat.c /^__txn_stat(dbenv, statp, flags)$/ __txn_stat_pp ../txn/txn_stat.c /^__txn_stat_pp(dbenv, statp, flags)$/ +__txn_stat_print ../txn/txn_stat.c /^__txn_stat_print(dbenv, flags)$/ +__txn_stat_print_pp ../txn/txn_stat.c /^__txn_stat_print_pp(dbenv, flags)$/ __txn_undo ../txn/txn.c /^__txn_undo(txnp)$/ __txn_updateckp ../txn/txn.c /^__txn_updateckp(dbenv, lsnp)$/ __txn_xa_begin ../txn/txn.c /^__txn_xa_begin(dbenv, txn)$/ -__txn_xa_regop_getpgnos ../txn/txn_auto.c /^__txn_xa_regop_getpgnos(dbenv, rec, lsnp, notused1/ __txn_xa_regop_log ../txn/txn_auto.c /^__txn_xa_regop_log(dbenv, txnid, ret_lsnp, flags,$/ -__txn_xa_regop_print ../txn/txn_auto.c /^__txn_xa_regop_print(dbenv, dbtp, lsnp, notused2, / +__txn_xa_regop_print ../txn/txn_autop.c /^__txn_xa_regop_print(dbenv, dbtp, lsnp, notused2, / __txn_xa_regop_read ../txn/txn_auto.c /^__txn_xa_regop_read(dbenv, recbuf, argpp)$/ __txn_xa_regop_recover ../txn/txn_rec.c /^__txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info/ +__txn_xid_stats ../txn/txn_stat.c /^__txn_xid_stats(dbenv, mbp, txnp)$/ __ua_memcpy ../os/os_alloc.c /^__ua_memcpy(dst, src, len)$/ __usermem ../log/log_archive.c /^__usermem(dbenv, listp)$/ __vx_fsync ../os/os_fsync.c /^__vx_fsync(fd)$/ @@ -3731,17 +3992,18 @@ __xa_close ../xa/xa_db.c /^__xa_close(dbp, flags)$/ __xa_cursor ../xa/xa_db.c /^__xa_cursor(dbp, txn, dbcp, flags)$/ __xa_del ../xa/xa_db.c /^__xa_del(dbp, txn, key, flags)$/ __xa_get ../xa/xa_db.c /^__xa_get(dbp, txn, key, data, flags)$/ -__xa_get_txn ../xa/xa.c /^__xa_get_txn(env, txnp, do_init)$/ +__xa_get_txn ../xa/xa.c /^__xa_get_txn(dbenv, txnp, do_init)$/ __xa_open ../xa/xa_db.c /^__xa_open(dbp, txn, name, subdb, type, flags, mode/ __xa_put ../xa/xa_db.c /^__xa_put(dbp, txn, key, data, flags)$/ -__xa_put_txn ../xa/xa.c /^__xa_put_txn(env, txnp)$/ +__xa_put_txn ../xa/xa.c /^__xa_put_txn(dbenv, txnp)$/ _app_dispatch_intercept_c ../cxx/cxx_env.cpp /^int _app_dispatch_intercept_c(DB_ENV *env, DBT *db/ _debug_check ../tcl/tcl_internal.c /^_debug_check()$/ -_exported ../dbinc/db_cxx.in 150 +_exported ../dbinc/db_cxx.in 132 _feedback_intercept_c ../cxx/cxx_env.cpp /^void _feedback_intercept_c(DB_ENV *env, int opcode/ _paniccall_intercept_c ../cxx/cxx_env.cpp /^void _paniccall_intercept_c(DB_ENV *env, int errva/ _rep_send_intercept_c ../cxx/cxx_env.cpp /^int _rep_send_intercept_c(DB_ENV *env, const DBT */ -_stream_error_function_c ../cxx/cxx_env.cpp /^void _stream_error_function_c(const char *prefix, / +_stream_error_function_c ../cxx/cxx_env.cpp /^void _stream_error_function_c(const DB_ENV *env,$/ +_stream_message_function_c ../cxx/cxx_env.cpp /^void _stream_message_function_c(const DB_ENV *env,/ add_home ../rpc_server/c/db_server_util.c /^add_home(home)$/ add_passwd ../rpc_server/c/db_server_util.c /^add_passwd(passwd)$/ bdb_DbOpen ../tcl/tcl_db_pkg.c /^bdb_DbOpen(interp, objc, objv, ip, dbp)$/ @@ -3753,14 +4015,16 @@ bdb_DbmCommand ../tcl/tcl_compat.c /^bdb_DbmCommand(interp, objc, objv, flag, db bdb_EnvOpen ../tcl/tcl_db_pkg.c /^bdb_EnvOpen(interp, objc, objv, ip, env)$/ bdb_HCommand ../tcl/tcl_compat.c /^bdb_HCommand(interp, objc, objv)$/ bdb_Handles ../tcl/tcl_db_pkg.c /^bdb_Handles(interp, objc, objv)$/ +bdb_MsgType ../tcl/tcl_db_pkg.c /^bdb_MsgType(interp, objc, objv)$/ bdb_NdbmOpen ../tcl/tcl_compat.c /^bdb_NdbmOpen(interp, objc, objv, dbpp)$/ bdb_RandCommand ../tcl/tcl_util.c /^bdb_RandCommand(interp, objc, objv)$/ +bdb_SeqOpen ../tcl/tcl_db_pkg.c /^bdb_SeqOpen(interp, objc, objv, ip, seqp)$/ bdb_Version ../tcl/tcl_db_pkg.c /^bdb_Version(interp, objc, objv)$/ berkdb_Cmd ../tcl/tcl_db_pkg.c /^berkdb_Cmd(notused, interp, objc, objv)$/ blk ../hmac/sha1.c /^#define blk(i) (block->l[i&15] = rol(block->l[(i+1/ blk0 ../hmac/sha1.c /^#define blk0(i) is_bigendian ? block->l[i] : \\$/ -bt_compare_fcn_type ../dbinc/db_cxx.in 177 -bt_prefix_fcn_type ../dbinc/db_cxx.in 179 +bt_compare_fcn_type ../dbinc/db_cxx.in 159 +bt_prefix_fcn_type ../dbinc/db_cxx.in 161 ca_recno_arg ../dbinc/btree.h 84 const ../dbinc/db_185.in 54 ct_anyp ../dbinc/db_server_int.h 126 @@ -3784,7 +4048,7 @@ db185_put ../db185/db185.c /^db185_put(db185p, key185, data185, flags)$/ db185_seq ../db185/db185.c /^db185_seq(db185p, key185, data185, flags)$/ db185_sync ../db185/db185.c /^db185_sync(db185p, flags)$/ db_Cmd ../tcl/tcl_db.c /^db_Cmd(clientData, interp, objc, objv)$/ -db_ca_mode ../dbinc/btree.h 315 +db_ca_mode ../dbinc/btree.h 316 db_create ../db/db_method.c /^db_create(dbpp, dbenv, flags)$/ db_env_create ../env/env_method.c /^db_env_create(dbenvpp, flags)$/ db_env_set_func_close ../os/os_method.c /^db_env_set_func_close(func_close)$/ @@ -3793,10 +4057,13 @@ db_env_set_func_dirlist ../os/os_method.c /^db_env_set_func_dirlist(func_dirlist db_env_set_func_exists ../os/os_method.c /^db_env_set_func_exists(func_exists)$/ db_env_set_func_free ../os/os_method.c /^db_env_set_func_free(func_free)$/ db_env_set_func_fsync ../os/os_method.c /^db_env_set_func_fsync(func_fsync)$/ +db_env_set_func_ftruncate ../os/os_method.c /^db_env_set_func_ftruncate(func_ftruncate)$/ db_env_set_func_ioinfo ../os/os_method.c /^db_env_set_func_ioinfo(func_ioinfo)$/ db_env_set_func_malloc ../os/os_method.c /^db_env_set_func_malloc(func_malloc)$/ db_env_set_func_map ../os/os_method.c /^db_env_set_func_map(func_map)$/ db_env_set_func_open ../os/os_method.c /^db_env_set_func_open(func_open)$/ +db_env_set_func_pread ../os/os_method.c /^db_env_set_func_pread(func_pread)$/ +db_env_set_func_pwrite ../os/os_method.c /^db_env_set_func_pwrite(func_pwrite)$/ db_env_set_func_read ../os/os_method.c /^db_env_set_func_read(func_read)$/ db_env_set_func_realloc ../os/os_method.c /^db_env_set_func_realloc(func_realloc)$/ db_env_set_func_rename ../os/os_method.c /^db_env_set_func_rename(func_rename)$/ @@ -3806,24 +4073,23 @@ db_env_set_func_unlink ../os/os_method.c /^db_env_set_func_unlink(func_unlink)$/ db_env_set_func_unmap ../os/os_method.c /^db_env_set_func_unmap(func_unmap)$/ db_env_set_func_write ../os/os_method.c /^db_env_set_func_write(func_write)$/ db_env_set_func_yield ../os/os_method.c /^db_env_set_func_yield(func_yield)$/ -db_errcall_fcn_type ../cxx/cxx_env.cpp 69 -db_free_fcn_type ../dbinc/db_cxx.in 175 +db_free_fcn_type ../dbinc/db_cxx.in 157 db_ham_mode ../dbinc/hash.h 142 -db_indx_t ../dbinc/db.in 78 +db_indx_t ../dbinc/db.in 106 db_limbo_state ../dbinc/db_dispatch.h 113 -db_lockmode_t ../dbinc/db.in 359 -db_lockop_t ../dbinc/db.in 376 -db_malloc_fcn_type ../dbinc/db_cxx.in 171 -db_pgno_t ../dbinc/db.in 77 -db_realloc_fcn_type ../dbinc/db_cxx.in 173 -db_recno_t ../dbinc/db.in 81 -db_recops ../dbinc/db.in 742 -db_ret_t ../libdb_java/db_java_wrap.c 1253 -db_rpc_serverprog_4002 ../rpc_server/c/db_server_svc.c /^db_rpc_serverprog_4002(rqstp, transp)$/ -db_status_t ../dbinc/db.in 393 +db_lockmode_t ../dbinc/db.in 413 +db_lockop_t ../dbinc/db.in 430 +db_malloc_fcn_type ../dbinc/db_cxx.in 153 +db_pgno_t ../dbinc/db.in 105 +db_realloc_fcn_type ../dbinc/db_cxx.in 155 +db_recno_t ../dbinc/db.in 109 +db_recops ../dbinc/db.in 806 +db_ret_t ../libdb_java/db_java_wrap.c 1724 +db_sequence_create ../sequence/sequence.c /^db_sequence_create(seqp, dbp, flags)$/ +db_status_t ../dbinc/db.in 446 db_strerror ../common/db_err.c /^db_strerror(error)$/ db_sync_op ../dbinc/mp.h 37 -db_timeout_t ../dbinc/db.in 84 +db_timeout_t ../dbinc/db.in 112 db_txnlist_type ../dbinc/db_dispatch.h 51 db_version ../env/env_open.c /^db_version(majverp, minverp, patchp)$/ dbc_Cmd ../tcl/tcl_dbcursor.c /^dbc_Cmd(clientData, interp, objc, objv)$/ @@ -3839,16 +4105,15 @@ dbm_open ../dbinc/db.in /^#define dbm_open(a, b, c) __db_ndbm_open@DB_VERSIO/ dbm_pagfno ../dbinc/db.in /^#define dbm_pagfno(a) __db_ndbm_pagfno@DB_VERSION/ dbm_rdonly ../dbinc/db.in /^#define dbm_rdonly(a) __db_ndbm_rdonly@DB_VERSION/ dbm_store ../dbinc/db.in /^#define dbm_store(a, b, c, d) \\$/ -dbmclose ../dbinc/db.in 1951 +dbmclose ../dbinc/db.in 2135 dbminit ../dbinc/db.in /^#define dbminit(a) __db_dbm_init@DB_VERSION_UNIQUE/ dbopen ../dbinc/db_185.in 167 delete ../dbinc/db.in /^#define delete(a) __db_dbm_delete@DB_VERSION_UNIQU/ -delete___db_lock_u ../libdb_java/db_java_wrap.c /^void delete___db_lock_u(struct __db_lock_u *self){/ -delete___db_lsn ../libdb_java/db_java_wrap.c /^void delete___db_lsn(struct __db_lsn *self){$/ -dirent ../clib/getcwd.c 50 +delete_DbLock ../libdb_java/db_java_wrap.c /^void delete_DbLock(struct DbLock *self){$/ +dirent ../clib/getcwd.c 48 dirfd ../clib/getcwd.c /^#define dirfd(dirp) ((dirp)->dd_fd)$/ dupString ../cxx/cxx_except.cpp /^static char *dupString(const char *s)$/ -dup_compare_fcn_type ../dbinc/db_cxx.in 181 +dup_compare_fcn_type ../dbinc/db_cxx.in 163 env_Cmd ../tcl/tcl_env.c /^env_Cmd(clientData, interp, objc, objv)$/ env_DbRemove ../tcl/tcl_env.c /^env_DbRemove(interp, objc, objv, dbenv)$/ env_DbRename ../tcl/tcl_env.c /^env_DbRename(interp, objc, objv, dbenv)$/ @@ -3860,15 +4125,15 @@ env_GetVerbose ../tcl/tcl_env.c /^env_GetVerbose(interp, objc, objv, dbenv)$/ env_recover ../rpc_server/c/db_server_util.c /^env_recover(progname)$/ exec_proc ../mutex/tm.c /^exec_proc(id, tmpath, typearg)$/ fetch ../dbinc/db.in /^#define fetch(a) __db_dbm_fetch@DB_VERSION_UNIQUE_/ -firstkey ../dbinc/db.in 1956 -fsync ../os/os_fsync.c /^#define fsync(fd) __vx_fsync(fd);$/ +firstkey ../dbinc/db.in 2140 +fsync ../os/os_fsync.c /^#define fsync(fd) __vx_fsync(fd)$/ get_fullhome ../rpc_server/c/db_server_util.c /^get_fullhome(name)$/ get_tableent ../rpc_server/c/db_server_util.c /^get_tableent(id)$/ getcwd ../clib/getcwd.c /^getcwd(pt, size)$/ getopt ../clib/getopt.c /^getopt(nargc, nargv, ostr)$/ -h_hash_fcn_type ../dbinc/db_cxx.in 183 +h_hash_fcn_type ../dbinc/db_cxx.in 165 hcreate ../dbinc/db.in /^#define hcreate(a) __db_hcreate@DB_VERSION_UNIQUE_/ -hdestroy ../dbinc/db.in 1973 +hdestroy ../dbinc/db.in 2157 home_entry ../dbinc/db_server_int.h 40 hsearch ../dbinc/db.in /^#define hsearch(a, b) __db_hsearch@DB_VERSION_UNIQ/ i_anyp ../dbinc/tcl_db.h 123 @@ -3894,53 +4159,58 @@ i_pgno ../dbinc/tcl_db.h 135 i_pgsz ../dbinc/tcl_db.h 138 i_txnp ../dbinc/tcl_db.h 128 indx_t ../dbinc/db_185.in 82 -int_bool ../libdb_java/db_java_wrap.c 1254 +int_bool ../libdb_java/db_java_wrap.c 1725 item ../hsearch/hsearch.c /^ ENTRY item;$/ lock_Cmd ../tcl/tcl_lock.c /^lock_Cmd(clientData, interp, objc, objv)$/ log_compare ../log/log_compare.c /^log_compare(lsn0, lsn1)$/ logc_Cmd ../tcl/tcl_log.c /^logc_Cmd(clientData, interp, objc, objv)$/ -logfile_validity ../dbinc/log.h 288 -m ../dbinc/tcl_db.h 41 +logfile_validity ../dbinc/log.h 376 +m ../dbinc/tcl_db.h 42 map_file ../mutex/tm.c /^map_file(gm_addrp, tm_addrp, lm_addrp, fdp)$/ memcmp ../clib/memcmp.c /^memcmp(s1, s2, n)$/ memcpy ../clib/memmove.c /^memcpy(dst0, src0, length)$/ mp_Cmd ../tcl/tcl_mp.c /^mp_Cmd(clientData, interp, objc, objv)$/ -mu_action ../dbinc/db_int.in 313 +mu_action ../dbinc/db_int.in 388 mutex_Cmd ../tcl/tcl_util.c /^mutex_Cmd(clientData, interp, objc, objv)$/ ndbm_Cmd ../tcl/tcl_compat.c /^ndbm_Cmd(clientData, interp, objc, objv)$/ -new___db ../libdb_java/db_java_wrap.c /^struct __db *new___db(DB_ENV *dbenv,u_int32_t flag/ -new___db_env ../libdb_java/db_java_wrap.c /^struct __db_env *new___db_env(u_int32_t flags){$/ -new___db_lsn ../libdb_java/db_java_wrap.c /^struct __db_lsn *new___db_lsn(u_int32_t file,u_int/ +new_Db ../libdb_java/db_java_wrap.c /^struct Db *new_Db(DB_ENV *dbenv,u_int32_t flags){$/ +new_DbEnv ../libdb_java/db_java_wrap.c /^struct DbEnv *new_DbEnv(u_int32_t flags){$/ +new_DbSequence ../libdb_java/db_java_wrap.c /^struct DbSequence *new_DbSequence(DB *db,u_int32_t/ new_ct_ent ../rpc_server/c/db_server_util.c /^new_ct_ent(errp)$/ nextkey ../dbinc/db.in /^#define nextkey(a) __db_dbm_nextkey@DB_VERSION_UNI/ onint ../common/util_sig.c /^onint(signo)$/ pg_Cmd ../tcl/tcl_mp.c /^pg_Cmd(clientData, interp, objc, objv)$/ -pgin_fcn_type ../dbinc/db_cxx.in 185 +pgin_fcn_type ../dbinc/db_cxx.in 167 pgno_t ../dbinc/db_185.in 76 -pgout_fcn_type ../dbinc/db_cxx.in 187 +pgout_fcn_type ../dbinc/db_cxx.in 169 +pthread_cond_destroy ../mutex/mut_pthread.c /^#define pthread_cond_destroy(x) 0$/ pthread_cond_signal ../mutex/mut_pthread.c 33 pthread_cond_wait ../mutex/mut_pthread.c 34 pthread_mutex_destroy ../mutex/mut_pthread.c /^#define pthread_mutex_destroy(x) 0$/ -pthread_mutex_lock ../mutex/mut_pthread.c 35 -pthread_mutex_trylock ../mutex/mut_pthread.c 36 -pthread_mutex_unlock ../mutex/mut_pthread.c 37 -pthread_self ../mutex/mut_pthread.c 51 +pthread_mutex_lock ../mutex/mut_pthread.c 36 +pthread_mutex_trylock ../mutex/mut_pthread.c 37 +pthread_mutex_unlock ../mutex/mut_pthread.c 38 +pthread_self ../mutex/mut_pthread.c 54 qam_name_op ../dbinc/qam.h 166 qam_position_mode ../dbinc/qam.h 151 qam_probe_mode ../dbinc/qam.h 157 raise ../clib/raise.c /^raise(s)$/ recno_t ../dbinc/db_185.in 84 -reg_type ../dbinc/region.h 117 +reg_type_t ../dbinc/region.h 117 +repdb_t ../dbinc/rep.h 98 retval ../hsearch/hsearch.c /^static ENTRY retval;$/ -roff_t ../dbinc/db.in 91 +roff_t ../dbinc/db.in 121 rol ../hmac/sha1.c /^#define rol(value, bits) (((value) << (bits)) | ((/ run_locker ../mutex/tm.c /^run_locker(id)$/ run_lthread ../mutex/tm.c /^run_lthread(arg)$/ run_wakeup ../mutex/tm.c /^run_wakeup(id)$/ run_wthread ../mutex/tm.c /^run_wthread(arg)$/ +seq_Cmd ../tcl/tcl_seq.c /^seq_Cmd(clientData, interp, objc, objv)$/ shm_open ../mutex/tm.c 25 shm_unlink ../mutex/tm.c 26 snprintf ../clib/snprintf.c /^snprintf(char *str, size_t n, const char *fmt, .../ +sprintf_overflow ../clib/snprintf.c /^sprintf_overflow()$/ +sprintf_retcharpnt ../clib/snprintf.c /^sprintf_retcharpnt()$/ store ../dbinc/db.in /^#define store(a, b) __db_dbm_store@DB_VERSION_UNIQ/ strcasecmp ../clib/strcasecmp.c /^strcasecmp(s1, s2)$/ strdup ../clib/strdup.c /^strdup(str)$/ @@ -3966,6 +4236,8 @@ tcl_DbcPut ../tcl/tcl_dbcursor.c /^tcl_DbcPut(interp, objc, objv, dbc)$/ tcl_EnvAttr ../tcl/tcl_env.c /^tcl_EnvAttr(interp, objc, objv, dbenv)$/ tcl_EnvGetEncryptFlags ../tcl/tcl_env.c /^tcl_EnvGetEncryptFlags(interp, objc, objv, dbenv)$/ tcl_EnvRemove ../tcl/tcl_env.c /^tcl_EnvRemove(interp, objc, objv, dbenv, envip)$/ +tcl_EnvSetErrfile ../tcl/tcl_env.c /^tcl_EnvSetErrfile(interp, dbenv, ip, errf)$/ +tcl_EnvSetErrpfx ../tcl/tcl_env.c /^tcl_EnvSetErrpfx(interp, dbenv, ip, pfx)$/ tcl_EnvSetFlags ../tcl/tcl_env.c /^tcl_EnvSetFlags(interp, dbenv, which, onoff)$/ tcl_EnvTest ../tcl/tcl_env.c /^tcl_EnvTest(interp, objc, objv, dbenv)$/ tcl_EnvVerbose ../tcl/tcl_env.c /^tcl_EnvVerbose(interp, dbenv, which, onoff)$/ @@ -3998,6 +4270,11 @@ tcl_RepProcessMessage ../tcl/tcl_rep.c /^tcl_RepProcessMessage(interp, objc, obj tcl_RepRequest ../tcl/tcl_rep.c /^tcl_RepRequest(interp, objc, objv, dbenv)$/ tcl_RepStart ../tcl/tcl_rep.c /^tcl_RepStart(interp, objc, objv, dbenv)$/ tcl_RepStat ../tcl/tcl_rep.c /^tcl_RepStat(interp, objc, objv, dbenv)$/ +tcl_SeqClose ../tcl/tcl_seq.c /^tcl_SeqClose(interp, objc, objv, seq, ip)$/ +tcl_SeqGet ../tcl/tcl_seq.c /^tcl_SeqGet(interp, objc, objv, seq)$/ +tcl_SeqGetFlags ../tcl/tcl_seq.c /^tcl_SeqGetFlags(interp, objc, objv, seq)$/ +tcl_SeqRemove ../tcl/tcl_seq.c /^tcl_SeqRemove(interp, objc, objv, seq, ip)$/ +tcl_SeqStat ../tcl/tcl_seq.c /^tcl_SeqStat(interp, objc, objv, seq)$/ tcl_Txn ../tcl/tcl_txn.c /^tcl_Txn(interp, objc, objv, envp, envip)$/ tcl_TxnCheckpoint ../tcl/tcl_txn.c /^tcl_TxnCheckpoint(interp, objc, objv, envp)$/ tcl_TxnCommit ../tcl/tcl_txn.c /^tcl_TxnCommit(interp, objc, objv, txnp, txnip)$/ @@ -4010,7 +4287,6 @@ tcl_db_free ../tcl/tcl_db_pkg.c /^tcl_db_free(ptr)$/ tcl_db_malloc ../tcl/tcl_db_pkg.c /^tcl_db_malloc(size)$/ tcl_db_realloc ../tcl/tcl_db_pkg.c /^tcl_db_realloc(ptr, size)$/ tcl_dup_compare ../tcl/tcl_db_pkg.c /^tcl_dup_compare(dbp, dbta, dbtb)$/ -tcl_flag_callback ../tcl/tcl_internal.c /^tcl_flag_callback(flags, fn, vtcbp)$/ tcl_h_hash ../tcl/tcl_db_pkg.c /^tcl_h_hash(dbp, buf, len)$/ tcl_rep_send ../tcl/tcl_db_pkg.c /^tcl_rep_send(dbenv, control, rec, lsnp, eid, flags/ tcl_second_call ../tcl/tcl_db.c /^tcl_second_call(dbp, pkey, data, skey)$/ @@ -4018,154 +4294,17 @@ tm_file_init ../mutex/tm.c /^tm_file_init()$/ tm_mutex_destroy ../mutex/tm.c /^tm_mutex_destroy()$/ tm_mutex_init ../mutex/tm.c /^tm_mutex_init()$/ tm_mutex_stats ../mutex/tm.c /^tm_mutex_stats()$/ -tmpString::tmpString ../cxx/cxx_except.cpp /^tmpString::tmpString(const char *str1,$/ tsl_t ../dbinc/mutex.h 79 txn_Cmd ../tcl/tcl_txn.c /^txn_Cmd(clientData, interp, objc, objv)$/ -txnop_t ../txn/txn.c 98 +txnop_t ../txn/txn.c 96 u16 ../crypto/rijndael/rijndael-alg-fst.h 37 u32 ../crypto/rijndael/rijndael-alg-fst.h 38 u8 ../crypto/rijndael/rijndael-alg-fst.h 36 unmap_file ../mutex/tm.c /^unmap_file(addr, fd)$/ usage ../mutex/tm.c /^usage()$/ -val ../dbinc/tcl_db.h 42 +val ../dbinc/tcl_db.h 43 version_check ../rpc_server/c/db_server_util.c /^version_check()$/ -vsnprintf ../clib/vsnprintf.c /^vsnprintf(str, n, fmt, ap)$/ -wmask ../clib/memmove.c 55 -word ../clib/memmove.c 50 -wsize ../clib/memmove.c 53 -xdr___db_associate_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_associate_msg(xdrs, objp)$/ -xdr___db_associate_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_associate_reply(xdrs, objp)$/ -xdr___db_bt_maxkey_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_bt_maxkey_msg(xdrs, objp)$/ -xdr___db_bt_maxkey_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_bt_maxkey_reply(xdrs, objp)$/ -xdr___db_bt_minkey_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_bt_minkey_msg(xdrs, objp)$/ -xdr___db_bt_minkey_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_bt_minkey_reply(xdrs, objp)$/ -xdr___db_close_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_close_msg(xdrs, objp)$/ -xdr___db_close_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_close_reply(xdrs, objp)$/ -xdr___db_create_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_create_msg(xdrs, objp)$/ -xdr___db_create_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_create_reply(xdrs, objp)$/ -xdr___db_cursor_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_cursor_msg(xdrs, objp)$/ -xdr___db_cursor_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_cursor_reply(xdrs, objp)$/ -xdr___db_del_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_del_msg(xdrs, objp)$/ -xdr___db_del_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_del_reply(xdrs, objp)$/ -xdr___db_encrypt_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_encrypt_msg(xdrs, objp)$/ -xdr___db_encrypt_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_encrypt_reply(xdrs, objp)$/ -xdr___db_extentsize_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_extentsize_msg(xdrs, objp)$/ -xdr___db_extentsize_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_extentsize_reply(xdrs, objp)$/ -xdr___db_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_flags_msg(xdrs, objp)$/ -xdr___db_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_flags_reply(xdrs, objp)$/ -xdr___db_get_bt_minkey_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_bt_minkey_msg(xdrs, objp)$/ -xdr___db_get_bt_minkey_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_bt_minkey_reply(xdrs, objp)$/ -xdr___db_get_encrypt_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_encrypt_flags_msg(xdrs, objp)$/ -xdr___db_get_encrypt_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_encrypt_flags_reply(xdrs, objp)$/ -xdr___db_get_extentsize_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_extentsize_msg(xdrs, objp)$/ -xdr___db_get_extentsize_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_extentsize_reply(xdrs, objp)$/ -xdr___db_get_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_flags_msg(xdrs, objp)$/ -xdr___db_get_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_flags_reply(xdrs, objp)$/ -xdr___db_get_h_ffactor_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_h_ffactor_msg(xdrs, objp)$/ -xdr___db_get_h_ffactor_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_h_ffactor_reply(xdrs, objp)$/ -xdr___db_get_h_nelem_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_h_nelem_msg(xdrs, objp)$/ -xdr___db_get_h_nelem_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_h_nelem_reply(xdrs, objp)$/ -xdr___db_get_lorder_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_lorder_msg(xdrs, objp)$/ -xdr___db_get_lorder_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_lorder_reply(xdrs, objp)$/ -xdr___db_get_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_msg(xdrs, objp)$/ -xdr___db_get_name_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_name_msg(xdrs, objp)$/ -xdr___db_get_name_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_name_reply(xdrs, objp)$/ -xdr___db_get_open_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_open_flags_msg(xdrs, objp)$/ -xdr___db_get_open_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_open_flags_reply(xdrs, objp)$/ -xdr___db_get_pagesize_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_pagesize_msg(xdrs, objp)$/ -xdr___db_get_pagesize_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_pagesize_reply(xdrs, objp)$/ -xdr___db_get_re_delim_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_re_delim_msg(xdrs, objp)$/ -xdr___db_get_re_delim_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_re_delim_reply(xdrs, objp)$/ -xdr___db_get_re_len_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_re_len_msg(xdrs, objp)$/ -xdr___db_get_re_len_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_re_len_reply(xdrs, objp)$/ -xdr___db_get_re_pad_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_get_re_pad_msg(xdrs, objp)$/ -xdr___db_get_re_pad_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_re_pad_reply(xdrs, objp)$/ -xdr___db_get_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_get_reply(xdrs, objp)$/ -xdr___db_h_ffactor_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_h_ffactor_msg(xdrs, objp)$/ -xdr___db_h_ffactor_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_h_ffactor_reply(xdrs, objp)$/ -xdr___db_h_nelem_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_h_nelem_msg(xdrs, objp)$/ -xdr___db_h_nelem_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_h_nelem_reply(xdrs, objp)$/ -xdr___db_join_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_join_msg(xdrs, objp)$/ -xdr___db_join_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_join_reply(xdrs, objp)$/ -xdr___db_key_range_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_key_range_msg(xdrs, objp)$/ -xdr___db_key_range_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_key_range_reply(xdrs, objp)$/ -xdr___db_lorder_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_lorder_msg(xdrs, objp)$/ -xdr___db_lorder_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_lorder_reply(xdrs, objp)$/ -xdr___db_open_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_open_msg(xdrs, objp)$/ -xdr___db_open_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_open_reply(xdrs, objp)$/ -xdr___db_pagesize_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_pagesize_msg(xdrs, objp)$/ -xdr___db_pagesize_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_pagesize_reply(xdrs, objp)$/ -xdr___db_pget_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_pget_msg(xdrs, objp)$/ -xdr___db_pget_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_pget_reply(xdrs, objp)$/ -xdr___db_put_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_put_msg(xdrs, objp)$/ -xdr___db_put_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_put_reply(xdrs, objp)$/ -xdr___db_re_delim_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_re_delim_msg(xdrs, objp)$/ -xdr___db_re_delim_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_re_delim_reply(xdrs, objp)$/ -xdr___db_re_len_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_re_len_msg(xdrs, objp)$/ -xdr___db_re_len_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_re_len_reply(xdrs, objp)$/ -xdr___db_re_pad_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_re_pad_msg(xdrs, objp)$/ -xdr___db_re_pad_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_re_pad_reply(xdrs, objp)$/ -xdr___db_remove_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_remove_msg(xdrs, objp)$/ -xdr___db_remove_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_remove_reply(xdrs, objp)$/ -xdr___db_rename_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_rename_msg(xdrs, objp)$/ -xdr___db_rename_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_rename_reply(xdrs, objp)$/ -xdr___db_stat_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_stat_msg(xdrs, objp)$/ -xdr___db_stat_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_stat_reply(xdrs, objp)$/ -xdr___db_sync_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_sync_msg(xdrs, objp)$/ -xdr___db_sync_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_sync_reply(xdrs, objp)$/ -xdr___db_truncate_msg ../rpc_server/c/db_server_xdr.c /^xdr___db_truncate_msg(xdrs, objp)$/ -xdr___db_truncate_reply ../rpc_server/c/db_server_xdr.c /^xdr___db_truncate_reply(xdrs, objp)$/ -xdr___dbc_close_msg ../rpc_server/c/db_server_xdr.c /^xdr___dbc_close_msg(xdrs, objp)$/ -xdr___dbc_close_reply ../rpc_server/c/db_server_xdr.c /^xdr___dbc_close_reply(xdrs, objp)$/ -xdr___dbc_count_msg ../rpc_server/c/db_server_xdr.c /^xdr___dbc_count_msg(xdrs, objp)$/ -xdr___dbc_count_reply ../rpc_server/c/db_server_xdr.c /^xdr___dbc_count_reply(xdrs, objp)$/ -xdr___dbc_del_msg ../rpc_server/c/db_server_xdr.c /^xdr___dbc_del_msg(xdrs, objp)$/ -xdr___dbc_del_reply ../rpc_server/c/db_server_xdr.c /^xdr___dbc_del_reply(xdrs, objp)$/ -xdr___dbc_dup_msg ../rpc_server/c/db_server_xdr.c /^xdr___dbc_dup_msg(xdrs, objp)$/ -xdr___dbc_dup_reply ../rpc_server/c/db_server_xdr.c /^xdr___dbc_dup_reply(xdrs, objp)$/ -xdr___dbc_get_msg ../rpc_server/c/db_server_xdr.c /^xdr___dbc_get_msg(xdrs, objp)$/ -xdr___dbc_get_reply ../rpc_server/c/db_server_xdr.c /^xdr___dbc_get_reply(xdrs, objp)$/ -xdr___dbc_pget_msg ../rpc_server/c/db_server_xdr.c /^xdr___dbc_pget_msg(xdrs, objp)$/ -xdr___dbc_pget_reply ../rpc_server/c/db_server_xdr.c /^xdr___dbc_pget_reply(xdrs, objp)$/ -xdr___dbc_put_msg ../rpc_server/c/db_server_xdr.c /^xdr___dbc_put_msg(xdrs, objp)$/ -xdr___dbc_put_reply ../rpc_server/c/db_server_xdr.c /^xdr___dbc_put_reply(xdrs, objp)$/ -xdr___env_cachesize_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_cachesize_msg(xdrs, objp)$/ -xdr___env_cachesize_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_cachesize_reply(xdrs, objp)$/ -xdr___env_close_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_close_msg(xdrs, objp)$/ -xdr___env_close_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_close_reply(xdrs, objp)$/ -xdr___env_create_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_create_msg(xdrs, objp)$/ -xdr___env_create_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_create_reply(xdrs, objp)$/ -xdr___env_dbremove_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_dbremove_msg(xdrs, objp)$/ -xdr___env_dbremove_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_dbremove_reply(xdrs, objp)$/ -xdr___env_dbrename_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_dbrename_msg(xdrs, objp)$/ -xdr___env_dbrename_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_dbrename_reply(xdrs, objp)$/ -xdr___env_encrypt_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_encrypt_msg(xdrs, objp)$/ -xdr___env_encrypt_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_encrypt_reply(xdrs, objp)$/ -xdr___env_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_flags_msg(xdrs, objp)$/ -xdr___env_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_flags_reply(xdrs, objp)$/ -xdr___env_get_cachesize_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_get_cachesize_msg(xdrs, objp)$/ -xdr___env_get_cachesize_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_get_cachesize_reply(xdrs, objp)$/ -xdr___env_get_encrypt_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_get_encrypt_flags_msg(xdrs, objp)$/ -xdr___env_get_encrypt_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_get_encrypt_flags_reply(xdrs, objp)$/ -xdr___env_get_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_get_flags_msg(xdrs, objp)$/ -xdr___env_get_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_get_flags_reply(xdrs, objp)$/ -xdr___env_get_home_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_get_home_msg(xdrs, objp)$/ -xdr___env_get_home_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_get_home_reply(xdrs, objp)$/ -xdr___env_get_open_flags_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_get_open_flags_msg(xdrs, objp)$/ -xdr___env_get_open_flags_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_get_open_flags_reply(xdrs, objp)$/ -xdr___env_open_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_open_msg(xdrs, objp)$/ -xdr___env_open_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_open_reply(xdrs, objp)$/ -xdr___env_remove_msg ../rpc_server/c/db_server_xdr.c /^xdr___env_remove_msg(xdrs, objp)$/ -xdr___env_remove_reply ../rpc_server/c/db_server_xdr.c /^xdr___env_remove_reply(xdrs, objp)$/ -xdr___txn_abort_msg ../rpc_server/c/db_server_xdr.c /^xdr___txn_abort_msg(xdrs, objp)$/ -xdr___txn_abort_reply ../rpc_server/c/db_server_xdr.c /^xdr___txn_abort_reply(xdrs, objp)$/ -xdr___txn_begin_msg ../rpc_server/c/db_server_xdr.c /^xdr___txn_begin_msg(xdrs, objp)$/ -xdr___txn_begin_reply ../rpc_server/c/db_server_xdr.c /^xdr___txn_begin_reply(xdrs, objp)$/ -xdr___txn_commit_msg ../rpc_server/c/db_server_xdr.c /^xdr___txn_commit_msg(xdrs, objp)$/ -xdr___txn_commit_reply ../rpc_server/c/db_server_xdr.c /^xdr___txn_commit_reply(xdrs, objp)$/ -xdr___txn_discard_msg ../rpc_server/c/db_server_xdr.c /^xdr___txn_discard_msg(xdrs, objp)$/ -xdr___txn_discard_reply ../rpc_server/c/db_server_xdr.c /^xdr___txn_discard_reply(xdrs, objp)$/ -xdr___txn_prepare_msg ../rpc_server/c/db_server_xdr.c /^xdr___txn_prepare_msg(xdrs, objp)$/ -xdr___txn_prepare_reply ../rpc_server/c/db_server_xdr.c /^xdr___txn_prepare_reply(xdrs, objp)$/ -xdr___txn_recover_msg ../rpc_server/c/db_server_xdr.c /^xdr___txn_recover_msg(xdrs, objp)$/ -xdr___txn_recover_reply ../rpc_server/c/db_server_xdr.c /^xdr___txn_recover_reply(xdrs, objp)$/ +vsnprintf ../clib/snprintf.c /^vsnprintf(str, n, fmt, ap)$/ +wmask ../clib/memmove.c 53 +word ../clib/memmove.c 48 +wsize ../clib/memmove.c 51 diff --git a/db/dist/template/db_server_proc b/db/dist/template/db_server_proc index 84fce4d2e..224c83684 100644 --- a/db/dist/template/db_server_proc +++ b/db/dist/template/db_server_proc @@ -1,4 +1,3 @@ -/* Do not edit: automatically built by gen_rpc.awk. */ #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -9,10 +8,10 @@ #include #endif +#include "db_server.h" + #include "db_int.h" -#include "dbinc_auto/db_server.h" #include "dbinc/db_server_int.h" -#include "dbinc_auto/rpc_server_ext.h" /* BEGIN __env_get_cachesize_proc */ void @@ -1442,8 +1441,10 @@ __db_rename_proc(dbpcl_id, name, subdb, /* BEGIN __db_stat_proc */ void -__db_stat_proc(dbpcl_id, flags, replyp, freep) +__db_stat_proc(dbpcl_id, txnpcl_id, + flags, replyp, freep) long dbpcl_id; + long txnpcl_id; u_int32_t flags; __db_stat_reply *replyp; int * freep; @@ -1452,9 +1453,13 @@ __db_stat_proc(dbpcl_id, flags, replyp, freep) int ret; DB * dbp; ct_entry *dbp_ctp; + DB_TXN * txnp; + ct_entry *txnp_ctp; ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); dbp = (DB *)dbp_ctp->ct_anyp; + ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); + txnp = (DB_TXN *)txnp_ctp->ct_anyp; /* * XXX Code goes here diff --git a/db/dist/template/gen_client_ret b/db/dist/template/gen_client_ret index 340f99e0d..3d897e609 100644 --- a/db/dist/template/gen_client_ret +++ b/db/dist/template/gen_client_ret @@ -1,15 +1,12 @@ /* Do not edit: automatically built by gen_rpc.awk. */ #include "db_config.h" -#ifdef HAVE_RPC #ifndef NO_SYSTEM_INCLUDES #include -#include #include #endif #include "db_int.h" -#include "dbinc_auto/db_server.h" #include "dbinc/txn.h" /* @@ -338,14 +335,12 @@ __dbcl_db_open_ret(dbp, txnp, name, subdb, type, flags, mode, replyp) int ret; long db; DBTYPE type; - u_int32_t dbflags; int lorder; if (replyp->status != 0) return (replyp->status); db = replyp->dbcl_id; type = replyp->type; - dbflags = replyp->dbflags; lorder = replyp->lorder; /* @@ -464,12 +459,13 @@ __dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp) } /* - * PUBLIC: int __dbcl_db_stat_ret __P((DB *, void *, u_int32_t, + * PUBLIC: int __dbcl_db_stat_ret __P((DB *, DB_TXN *, void *, u_int32_t, * PUBLIC: __db_stat_reply *)); */ int -__dbcl_db_stat_ret(dbp, sp, flags, replyp) +__dbcl_db_stat_ret(dbp, txnp, sp, flags, replyp) DB * dbp; + DB_TXN * txnp; void * sp; u_int32_t flags; __db_stat_reply *replyp; @@ -725,4 +721,3 @@ __dbcl_dbc_put_ret(dbc, key, data, flags, replyp) return (replyp->status); } -#endif /* HAVE_RPC */ diff --git a/db/dist/template/rec_btree b/db/dist/template/rec_btree index 6c954db1a..0b2c96182 100644 --- a/db/dist/template/rec_btree +++ b/db/dist/template/rec_btree @@ -7,319 +7,9 @@ #endif #include "db_int.h" -#include "db_page.h" -#include "bam.h" -#include "log.h" - -/* - * __bam_pg_alloc_recover -- - * Recovery function for pg_alloc. - * - * PUBLIC: int __bam_pg_alloc_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_pg_alloc_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_pg_alloc_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - REC_PRINT(__bam_pg_alloc_print); - REC_INTRO(__bam_pg_alloc_read); - - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) - if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - - /* - * Use this when there is something like "pagelsn" in the argp - * structure. Sometimes, you might need to compare meta-data - * lsn's instead. - * - * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); - */ - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to undo update described. */ - modified = 1; - } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) - goto out; - - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} - -/* - * __bam_pg_alloc1_recover -- - * Recovery function for pg_alloc1. - * - * PUBLIC: int __bam_pg_alloc1_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_pg_alloc1_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_pg_alloc1_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - REC_PRINT(__bam_pg_alloc1_print); - REC_INTRO(__bam_pg_alloc1_read); - - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) - if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - - /* - * Use this when there is something like "pagelsn" in the argp - * structure. Sometimes, you might need to compare meta-data - * lsn's instead. - * - * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); - */ - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to undo update described. */ - modified = 1; - } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) - goto out; - - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} - -/* - * __bam_pg_free_recover -- - * Recovery function for pg_free. - * - * PUBLIC: int __bam_pg_free_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_pg_free_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_pg_free_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - REC_PRINT(__bam_pg_free_print); - REC_INTRO(__bam_pg_free_read); - - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) - if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - - /* - * Use this when there is something like "pagelsn" in the argp - * structure. Sometimes, you might need to compare meta-data - * lsn's instead. - * - * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); - */ - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to undo update described. */ - modified = 1; - } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) - goto out; - - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} - -/* - * __bam_pg_free1_recover -- - * Recovery function for pg_free1. - * - * PUBLIC: int __bam_pg_free1_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_pg_free1_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_pg_free1_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - REC_PRINT(__bam_pg_free1_print); - REC_INTRO(__bam_pg_free1_read); - - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) - if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - - /* - * Use this when there is something like "pagelsn" in the argp - * structure. Sometimes, you might need to compare meta-data - * lsn's instead. - * - * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); - */ - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to undo update described. */ - modified = 1; - } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) - goto out; - - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} - -/* - * __bam_split1_recover -- - * Recovery function for split1. - * - * PUBLIC: int __bam_split1_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_split1_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_split1_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - REC_PRINT(__bam_split1_print); - REC_INTRO(__bam_split1_read); - - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) - if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - - /* - * Use this when there is something like "pagelsn" in the argp - * structure. Sometimes, you might need to compare meta-data - * lsn's instead. - * - * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); - */ - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to undo update described. */ - modified = 1; - } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) - goto out; - - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} +#include "dbinc/db_page.h" +#include "dbinc/__bam.h" +#include "dbinc/log.h" /* * __bam_split_recover -- @@ -344,11 +34,11 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info) int cmp_n, cmp_p, modified, ret; REC_PRINT(__bam_split_print); - REC_INTRO(__bam_split_read); + REC_INTRO(__bam_split_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -374,7 +64,7 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -384,33 +74,33 @@ out: REC_CLOSE; } /* - * __bam_rsplit1_recover -- - * Recovery function for rsplit1. + * __bam_rsplit_recover -- + * Recovery function for rsplit. * - * PUBLIC: int __bam_rsplit1_recover + * PUBLIC: int __bam_rsplit_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_rsplit1_recover(dbenv, dbtp, lsnp, op, info) +__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_rsplit1_args *argp; + __bam_rsplit_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_rsplit1_print); - REC_INTRO(__bam_rsplit1_read); + REC_PRINT(__bam_rsplit_print); + REC_INTRO(__bam_rsplit_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -436,7 +126,7 @@ __bam_rsplit1_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -446,33 +136,33 @@ out: REC_CLOSE; } /* - * __bam_rsplit_recover -- - * Recovery function for rsplit. + * __bam_adj_recover -- + * Recovery function for adj. * - * PUBLIC: int __bam_rsplit_recover + * PUBLIC: int __bam_adj_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info) +__bam_adj_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_rsplit_args *argp; + __bam_adj_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_rsplit_print); - REC_INTRO(__bam_rsplit_read); + REC_PRINT(__bam_adj_print); + REC_INTRO(__bam_adj_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -498,7 +188,7 @@ __bam_rsplit_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -508,33 +198,33 @@ out: REC_CLOSE; } /* - * __bam_adj_recover -- - * Recovery function for adj. + * __bam_cadjust_recover -- + * Recovery function for cadjust. * - * PUBLIC: int __bam_adj_recover + * PUBLIC: int __bam_cadjust_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_adj_recover(dbenv, dbtp, lsnp, op, info) +__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_adj_args *argp; + __bam_cadjust_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_adj_print); - REC_INTRO(__bam_adj_read); + REC_PRINT(__bam_cadjust_print); + REC_INTRO(__bam_cadjust_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -560,7 +250,7 @@ __bam_adj_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -570,33 +260,33 @@ out: REC_CLOSE; } /* - * __bam_cadjust_recover -- - * Recovery function for cadjust. + * __bam_cdel_recover -- + * Recovery function for cdel. * - * PUBLIC: int __bam_cadjust_recover + * PUBLIC: int __bam_cdel_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info) +__bam_cdel_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_cadjust_args *argp; + __bam_cdel_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_cadjust_print); - REC_INTRO(__bam_cadjust_read); + REC_PRINT(__bam_cdel_print); + REC_INTRO(__bam_cdel_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -622,7 +312,7 @@ __bam_cadjust_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -632,33 +322,33 @@ out: REC_CLOSE; } /* - * __bam_cdel_recover -- - * Recovery function for cdel. + * __bam_repl_recover -- + * Recovery function for repl. * - * PUBLIC: int __bam_cdel_recover + * PUBLIC: int __bam_repl_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_cdel_recover(dbenv, dbtp, lsnp, op, info) +__bam_repl_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_cdel_args *argp; + __bam_repl_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_cdel_print); - REC_INTRO(__bam_cdel_read); + REC_PRINT(__bam_repl_print); + REC_INTRO(__bam_repl_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -684,7 +374,7 @@ __bam_cdel_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -694,33 +384,33 @@ out: REC_CLOSE; } /* - * __bam_repl_recover -- - * Recovery function for repl. + * __bam_root_recover -- + * Recovery function for root. * - * PUBLIC: int __bam_repl_recover + * PUBLIC: int __bam_root_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_repl_recover(dbenv, dbtp, lsnp, op, info) +__bam_root_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_repl_args *argp; + __bam_root_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_repl_print); - REC_INTRO(__bam_repl_read); + REC_PRINT(__bam_root_print); + REC_INTRO(__bam_root_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -746,7 +436,7 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -756,33 +446,33 @@ out: REC_CLOSE; } /* - * __bam_root_recover -- - * Recovery function for root. + * __bam_curadj_recover -- + * Recovery function for curadj. * - * PUBLIC: int __bam_root_recover + * PUBLIC: int __bam_curadj_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_root_recover(dbenv, dbtp, lsnp, op, info) +__bam_curadj_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_root_args *argp; + __bam_curadj_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_root_print); - REC_INTRO(__bam_root_read); + REC_PRINT(__bam_curadj_print); + REC_INTRO(__bam_curadj_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -808,7 +498,7 @@ __bam_root_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -818,33 +508,33 @@ out: REC_CLOSE; } /* - * __bam_curadj_recover -- - * Recovery function for curadj. + * __bam_rcuradj_recover -- + * Recovery function for rcuradj. * - * PUBLIC: int __bam_curadj_recover + * PUBLIC: int __bam_rcuradj_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_curadj_recover(dbenv, dbtp, lsnp, op, info) +__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_curadj_args *argp; + __bam_rcuradj_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_curadj_print); - REC_INTRO(__bam_curadj_read); + REC_PRINT(__bam_rcuradj_print); + REC_INTRO(__bam_rcuradj_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -870,7 +560,7 @@ __bam_curadj_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; @@ -880,33 +570,33 @@ out: REC_CLOSE; } /* - * __bam_rcuradj_recover -- - * Recovery function for rcuradj. + * __bam_relink_recover -- + * Recovery function for relink. * - * PUBLIC: int __bam_rcuradj_recover + * PUBLIC: int __bam_relink_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); */ int -__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info) +__bam_relink_recover(dbenv, dbtp, lsnp, op, info) DB_ENV *dbenv; DBT *dbtp; DB_LSN *lsnp; db_recops op; void *info; { - __bam_rcuradj_args *argp; + __bam_relink_args *argp; DB *file_dbp; DBC *dbc; DB_MPOOLFILE *mpf; PAGE *pagep; int cmp_n, cmp_p, modified, ret; - REC_PRINT(__bam_rcuradj_print); - REC_INTRO(__bam_rcuradj_read); + REC_PRINT(__bam_relink_print); + REC_INTRO(__bam_relink_read, 1); - if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) if (DB_REDO(op)) { - if ((ret = memp_fget(mpf, + if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) goto out; } else { @@ -932,7 +622,7 @@ __bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info) /* Need to undo update described. */ modified = 1; } - if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) goto out; *lsnp = argp->prev_lsn; diff --git a/db/dist/template/rec_db b/db/dist/template/rec_db index d5245d4b5..5c4c8f08d 100644 --- a/db/dist/template/rec_db +++ b/db/dist/template/rec_db @@ -197,68 +197,6 @@ __db_ovref_recover(dbenv, dbtp, lsnp, op, info) out: REC_CLOSE; } -/* - * __db_relink_recover -- - * Recovery function for relink. - * - * PUBLIC: int __db_relink_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_relink_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_relink_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - REC_PRINT(__db_relink_print); - REC_INTRO(__db_relink_read, 1); - - if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) - if (DB_REDO(op)) { - if ((ret = mpf->get(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - - /* - * Use this when there is something like "pagelsn" in the argp - * structure. Sometimes, you might need to compare meta-data - * lsn's instead. - * - * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); - */ - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to undo update described. */ - modified = 1; - } - if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) - goto out; - - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} - /* * __db_debug_recover -- * Recovery function for debug. @@ -755,3 +693,65 @@ __db_pg_new_recover(dbenv, dbtp, lsnp, op, info) out: REC_CLOSE; } +/* + * __db_pg_init_recover -- + * Recovery function for pg_init. + * + * PUBLIC: int __db_pg_init_recover + * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + */ +int +__db_pg_init_recover(dbenv, dbtp, lsnp, op, info) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops op; + void *info; +{ + __db_pg_init_args *argp; + DB *file_dbp; + DBC *dbc; + DB_MPOOLFILE *mpf; + PAGE *pagep; + int cmp_n, cmp_p, modified, ret; + + REC_PRINT(__db_pg_init_print); + REC_INTRO(__db_pg_init_read, 1); + + if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) + if (DB_REDO(op)) { + if ((ret = mpf->get(mpf, + &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) + goto out; + } else { + *lsnp = argp->prev_lsn; + ret = 0; + goto out; + } + + modified = 0; + cmp_n = log_compare(lsnp, &LSN(pagep)); + + /* + * Use this when there is something like "pagelsn" in the argp + * structure. Sometimes, you might need to compare meta-data + * lsn's instead. + * + * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); + */ + if (cmp_p == 0 && DB_REDO(op)) { + /* Need to redo update described. */ + modified = 1; + } else if (cmp_n == 0 && !DB_REDO(op)) { + /* Need to undo update described. */ + modified = 1; + } + if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) + goto out; + + *lsnp = argp->prev_lsn; + ret = 0; + +out: REC_CLOSE; +} + diff --git a/db/dist/vx_2.0/BerkeleyDBsmall.wpj b/db/dist/vx_2.0/BerkeleyDBsmall.wpj new file mode 100644 index 000000000..3c9fd350f --- /dev/null +++ b/db/dist/vx_2.0/BerkeleyDBsmall.wpj @@ -0,0 +1,251 @@ +Document file - DO NOT EDIT + + BUILD_PENTIUM_debug_BUILDRULE +BerkeleyDB20small.out + + + BUILD_PENTIUM_debug_MACRO_AR +ar386 + + + BUILD_PENTIUM_debug_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20small_sim.a + + + BUILD_PENTIUM_debug_MACRO_AS +cc386 + + + BUILD_PENTIUM_debug_MACRO_CC +cc386 + + + BUILD_PENTIUM_debug_MACRO_CFLAGS +-g \ + -mpentium \ + -ansi \ + -nostdinc \ + -DRW_MULTI_THREAD \ + -D_REENTRANT \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM \ + -O0 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. \ + -DDIAGNOSTIC \ + -DDEBUG + + + BUILD_PENTIUM_debug_MACRO_CFLAGS_AS +-g \ + -mpentium \ + -ansi \ + -nostdinc \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -x \ + assembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM + + + BUILD_PENTIUM_debug_MACRO_CPP +cc386 -E -P -xc + + + BUILD_PENTIUM_debug_MACRO_LD +ld386 + + + BUILD_PENTIUM_debug_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_debug_MACRO_NM +nm386 -g + + + BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_debug_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_debug_MACRO_SIZE +size386 + + + BUILD_PENTIUM_debug_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_debug_TC +::tc_PENTIUMgnu + + + BUILD_PENTIUM_release_BUILDRULE +BerkeleyDB20small.out + + + BUILD_PENTIUM_release_MACRO_AR +ar386 + + + BUILD_PENTIUM_release_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20small_sim.a + + + BUILD_PENTIUM_release_MACRO_AS +cc386 + + + BUILD_PENTIUM_release_MACRO_CC +cc386 + + + BUILD_PENTIUM_release_MACRO_CFLAGS +-mpentium \ + -ansi \ + -nostdinc \ + -DRW_MULTI_THREAD \ + -D_REENTRANT \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM \ + -O2 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. + + + BUILD_PENTIUM_release_MACRO_CFLAGS_AS +-g \ + -mpentium \ + -ansi \ + -nostdinc \ + -fvolatile \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -x \ + assembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM + + + BUILD_PENTIUM_release_MACRO_CPP +cc386 -E -P -xc + + + BUILD_PENTIUM_release_MACRO_LD +ld386 + + + BUILD_PENTIUM_release_MACRO_LDDEPS + + + + BUILD_PENTIUM_release_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_release_MACRO_NM +nm386 -g + + + BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_release_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_release_MACRO_SIZE +size386 + + + BUILD_PENTIUM_release_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_release_TC +::tc_PENTIUMgnu + + + BUILD_RULE_BerkeleyDB20small.out + + + + BUILD_RULE_BerkeleyDB20small_sim.out + + + + BUILD_RULE_archive + + + + BUILD_RULE_objects + + + + BUILD__CURRENT +PENTIUM_debug + + + BUILD__LIST +PENTIUM_release PENTIUM_debug + + + CORE_INFO_TYPE +::prj_vxApp + + + CORE_INFO_VERSION +2.0 + + diff --git a/db/dist/vx_2.2/BerkeleyDBsmall.wpj b/db/dist/vx_2.2/BerkeleyDBsmall.wpj new file mode 100644 index 000000000..bfbdadc46 --- /dev/null +++ b/db/dist/vx_2.2/BerkeleyDBsmall.wpj @@ -0,0 +1,310 @@ +Document file - DO NOT EDIT + + BUILD_PENTIUM_debug_BUILDRULE +BerkeleyDB22small.out + + + BUILD_PENTIUM_debug_MACRO_AR +arpentium + + + BUILD_PENTIUM_debug_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUM_debug/BerkeleyDB22small.a + + + BUILD_PENTIUM_debug_MACRO_AS +ccpentium + + + BUILD_PENTIUM_debug_MACRO_CC +ccpentium + + + BUILD_PENTIUM_debug_MACRO_CC_ARCH_SPEC +-mcpu=pentiumpro -march=pentiumpro + + + BUILD_PENTIUM_debug_MACRO_CFLAGS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu \ + -O0 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. \ + -DDIAGNOSTIC \ + -DDEBUG + + + BUILD_PENTIUM_debug_MACRO_CFLAGS_AS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -xassembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu + + + BUILD_PENTIUM_debug_MACRO_CPP +ccpentium -E -P + + + BUILD_PENTIUM_debug_MACRO_HEX_FLAGS + + + + BUILD_PENTIUM_debug_MACRO_LD +ldpentium + + + BUILD_PENTIUM_debug_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_debug_MACRO_LD_PARTIAL +ccpentium -r -nostdlib -Wl,-X + + + BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_debug_MACRO_NM +nmpentium -g + + + BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_debug_MACRO_OPTION_DEPEND +-M -w + + + BUILD_PENTIUM_debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE +-MD + + + BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_debug_MACRO_OPTION_LANG_C +-xc + + + BUILD_PENTIUM_debug_MACRO_OPTION_UNDEFINE_MACRO +-U + + + BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_debug_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_debug_MACRO_SIZE +sizepentium + + + BUILD_PENTIUM_debug_MACRO_TOOL_FAMILY +gnu + + + BUILD_PENTIUM_debug_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_debug_TC +::tc_PENTIUM2gnu + + + BUILD_PENTIUM_release_BUILDRULE +BerkeleyDB22small.out + + + BUILD_PENTIUM_release_MACRO_AR +arpentium + + + BUILD_PENTIUM_release_MACRO_ARCHIVE +$(PRJ_DIR)/PENTIUM_release/BerkeleyDB22small.a + + + BUILD_PENTIUM_release_MACRO_AS +ccpentium + + + BUILD_PENTIUM_release_MACRO_CC +ccpentium + + + BUILD_PENTIUM_release_MACRO_CC_ARCH_SPEC +-mcpu=pentiumpro -march=pentiumpro + + + BUILD_PENTIUM_release_MACRO_CFLAGS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu \ + -O2 \ + -I$(PRJ_DIR) \ + -I$(PRJ_DIR)/.. + + + BUILD_PENTIUM_release_MACRO_CFLAGS_AS +-g \ + -mcpu=pentiumpro \ + -march=pentiumpro \ + -ansi \ + -nostdlib \ + -fno-builtin \ + -fno-defer-pop \ + -P \ + -xassembler-with-cpp \ + -I. \ + -I$(WIND_BASE)/target/h \ + -DCPU=PENTIUM2 \ + -DTOOL_FAMILY=gnu \ + -DTOOL=gnu + + + BUILD_PENTIUM_release_MACRO_CPP +ccpentium -E -P + + + BUILD_PENTIUM_release_MACRO_HEX_FLAGS + + + + BUILD_PENTIUM_release_MACRO_LD +ldpentium + + + BUILD_PENTIUM_release_MACRO_LDFLAGS +-X -N + + + BUILD_PENTIUM_release_MACRO_LD_PARTIAL +ccpentium -r -nostdlib -Wl,-X + + + BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS +-X -r + + + BUILD_PENTIUM_release_MACRO_NM +nmpentium -g + + + BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO +-D + + + BUILD_PENTIUM_release_MACRO_OPTION_DEPEND +-M -w + + + BUILD_PENTIUM_release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE +-MD + + + BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR +-I + + + BUILD_PENTIUM_release_MACRO_OPTION_LANG_C +-xc + + + BUILD_PENTIUM_release_MACRO_OPTION_UNDEFINE_MACRO +-U + + + BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE + + + + BUILD_PENTIUM_release_MACRO_PRJ_LIBS + + + + BUILD_PENTIUM_release_MACRO_SIZE +sizepentium + + + BUILD_PENTIUM_release_MACRO_TOOL_FAMILY +gnu + + + BUILD_PENTIUM_release_RO_DEPEND_PATH +{$(WIND_BASE)/target/h/} \ + {$(WIND_BASE)/target/src/} \ + {$(WIND_BASE)/target/config/} + + + BUILD_PENTIUM_release_TC +::tc_PENTIUM2gnu + + + BUILD_RULE_BerkeleyDB22small.out + + + + BUILD_RULE_BerkeleyDB22small.pl + + + + BUILD_RULE_archive + + + + BUILD_RULE_objects + + + + BUILD__CURRENT +PENTIUM_debug + + + BUILD__LIST +PENTIUM_release PENTIUM_debug + + + CORE_INFO_TYPE +::prj_vxApp + + + CORE_INFO_VERSION +2.2 + + diff --git a/db/dist/vx_config.in b/db/dist/vx_config.in index e05d46984..3c0ef3af1 100644 --- a/db/dist/vx_config.in +++ b/db/dist/vx_config.in @@ -53,9 +53,15 @@ /* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ /* #undef HAVE_FCNTL_F_SETFD */ +/* Define to 1 if you have the `fdatasync' function. */ +/* #undef HAVE_FDATASYNC */ + /* Define to 1 if allocated filesystem blocks are not zeroed. */ #define HAVE_FILESYSTEM_NOTZERO 1 +/* Define to 1 if you have the `ftruncate' function. */ +/* #undef HAVE_FTRUNCATE */ + /* Define to 1 if you have the `getcwd' function. */ #define HAVE_GETCWD 1 @@ -80,6 +86,9 @@ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ +/* Define to 1 if the system has the type `long long'. */ +/* #undef HAVE_LONG_LONG */ + /* Define to 1 if you have the `memcmp' function. */ #define HAVE_MEMCMP 1 @@ -227,6 +236,9 @@ /* Define to 1 if you have the `raise' function. */ #define HAVE_RAISE 1 +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + /* Define to 1 if building replication support. */ #define HAVE_REPLICATION 1 @@ -239,12 +251,21 @@ /* Define to 1 if you have the `select' function. */ #define HAVE_SELECT 1 +/* Define to 1 if building sequence support. */ +/* #undef HAVE_SEQUENCE */ + /* Define to 1 if you have the `shmget' function. */ /* #undef HAVE_SHMGET */ /* Define to 1 if you have the `snprintf' function. */ /* #undef HAVE_SNPRINTF */ +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if building statistics support. */ +#define HAVE_STATISTICS 1 + /* Define to 1 if you have the header file. */ /* #undef HAVE_STDINT_H */ @@ -304,6 +325,9 @@ /* Define to 1 if unlink of file with open file descriptors will fail. */ #define HAVE_UNLINK_WITH_OPEN_FAILURE 1 +/* Define to 1 if the system has the type `unsigned long long'. */ +/* #undef HAVE_UNSIGNED_LONG_LONG */ + /* Define to 1 if building access method verification support. */ #define HAVE_VERIFY 1 @@ -346,7 +370,7 @@ /* Define to 1 if you can safely include both and . */ /* #undef TIME_WITH_SYS_TIME */ -/* Define to 1 to mask harmless unitialized memory read/writes. */ +/* Define to 1 to mask harmless uninitialized memory read/writes. */ /* #undef UMRW */ /* Number of bits in a file offset, on hosts where this is settable. */ diff --git a/db/dist/vx_setup/LICENSE.TXT b/db/dist/vx_setup/LICENSE.TXT index 463fb66d2..f31971375 100644 --- a/db/dist/vx_setup/LICENSE.TXT +++ b/db/dist/vx_setup/LICENSE.TXT @@ -1,3 +1,3 @@ -Copyright (c) 1996-2003 +Copyright (c) 1996-2004 Sleepycat Software. All rights reserved. See the file LICENSE for redistribution information. diff --git a/db/dist/win_config.in b/db/dist/win_config.in index b4404d2fb..8041f36a7 100644 --- a/db/dist/win_config.in +++ b/db/dist/win_config.in @@ -48,9 +48,15 @@ /* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ /* #undef HAVE_FCNTL_F_SETFD */ +/* Define to 1 if you have the `fdatasync' function. */ +/* #undef HAVE_FDATASYNC */ + /* Define to 1 if allocated filesystem blocks are not zeroed. */ #define HAVE_FILESYSTEM_NOTZERO 1 +/* Define to 1 if you have the `ftruncate' function. */ +#define HAVE_FTRUNCATE 1 + /* Define to 1 if you have the `getcwd' function. */ #define HAVE_GETCWD 1 @@ -77,6 +83,9 @@ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + /* Define to 1 if you have the `memcmp' function. */ #define HAVE_MEMCMP 1 @@ -226,6 +235,9 @@ /* Define to 1 if you have the `raise' function. */ #define HAVE_RAISE 1 +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + /* Define to 1 if building replication support. */ #ifndef HAVE_SMALLBUILD #define HAVE_REPLICATION 1 @@ -240,12 +252,21 @@ /* Define to 1 if you have the `select' function. */ /* #undef HAVE_SELECT */ +/* Define to 1 if building sequence support. */ +#define HAVE_SEQUENCE 1 + /* Define to 1 if you have the `shmget' function. */ /* #undef HAVE_SHMGET */ /* Define to 1 if you have the `snprintf' function. */ #define HAVE_SNPRINTF 1 +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if building statistics support. */ +#define HAVE_STATISTICS 1 + /* Define to 1 if you have the header file. */ /* #undef HAVE_STDINT_H */ @@ -305,6 +326,9 @@ /* Define to 1 if unlink of file with open file descriptors will fail. */ /* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */ +/* Define to 1 if the system has the type `unsigned long long'. */ +#define HAVE_UNSIGNED_LONG_LONG 1 + /* Define to 1 if building access method verification support. */ #ifndef HAVE_SMALLBUILD #define HAVE_VERIFY 1 @@ -349,7 +373,7 @@ /* Define to 1 if you can safely include both and . */ /* #undef TIME_WITH_SYS_TIME */ -/* Define to 1 to mask harmless unitialized memory read/writes. */ +/* Define to 1 to mask harmless uninitialized memory read/writes. */ /* #undef UMRW */ /* Number of bits in a file offset, on hosts where this is settable. */ @@ -410,6 +434,6 @@ * arguments turning OFF all vendor extensions. Even more unfortunately, if * we do that, it fails to parse windows.h!!!!! So, we define __STDC__ here, * after windows.h comes in. Note: the compiler knows we've defined it, and - * starts enforcing strict ANSI compilance from this point on. + * starts enforcing strict ANSI compliance from this point on. */ #define __STDC__ 1 diff --git a/db/dist/win_db.in b/db/dist/win_db.in index 5f2ecbee1..d623610f4 100644 --- a/db/dist/win_db.in +++ b/db/dist/win_db.in @@ -1,5 +1,5 @@ /*- - * $Id: win_db.in,v 11.1 2003/03/20 15:08:21 bostic Exp $ + * $Id: win_db.in,v 11.4 2004/10/07 13:59:24 carol Exp $ * * The following provides the information necessary to build Berkeley * DB on native Windows, and other Windows environments such as MinGW. @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -57,3 +58,37 @@ extern int getopt(int, char * const *, const char *); #if defined(__cplusplus) } #endif + +#ifdef _UNICODE +#define TO_TSTRING(dbenv, s, ts, ret) do { \ + int __len = strlen(s) + 1; \ + ts = NULL; \ + if ((ret = __os_malloc((dbenv), \ + __len * sizeof (_TCHAR), &(ts))) == 0 && \ + MultiByteToWideChar(CP_UTF8, 0, \ + (s), -1, (ts), __len) == 0) \ + ret = __os_get_errno(); \ + } while (0) + +#define FROM_TSTRING(dbenv, ts, s, ret) { \ + int __len = WideCharToMultiByte(CP_UTF8, 0, ts, -1, \ + NULL, 0, NULL, NULL); \ + s = NULL; \ + if ((ret = __os_malloc((dbenv), __len, &(s))) == 0 && \ + WideCharToMultiByte(CP_UTF8, 0, \ + (ts), -1, (s), __len, NULL, NULL) == 0) \ + ret = __os_get_errno(); \ + } while (0) + +#define FREE_STRING(dbenv, s) do { \ + if ((s) != NULL) { \ + __os_free((dbenv), (s)); \ + (s) = NULL; \ + } \ + } while (0) + +#else +#define TO_TSTRING(dbenv, s, ts, ret) (ret) = 0, (ts) = (_TCHAR *)(s) +#define FROM_TSTRING(dbenv, ts, s, ret) (ret) = 0, (s) = (char *)(ts) +#define FREE_STRING(dbenv, ts) +#endif diff --git a/db/dist/win_exports.in b/db/dist/win_exports.in index bf2b181ab..9087875cc 100644 --- a/db/dist/win_exports.in +++ b/db/dist/win_exports.in @@ -1,8 +1,9 @@ -# $Id: win_exports.in,v 1.28 2003/06/30 21:50:10 mjc Exp $ +# $Id: win_exports.in,v 1.41 2004/10/12 17:44:10 bostic Exp $ # Standard interfaces. db_create db_env_create + db_sequence_create db_strerror db_version db_xa_switch @@ -15,10 +16,13 @@ db_env_set_func_exists db_env_set_func_free db_env_set_func_fsync + db_env_set_func_ftruncate db_env_set_func_ioinfo db_env_set_func_malloc db_env_set_func_map db_env_set_func_open + db_env_set_func_pread + db_env_set_func_pwrite db_env_set_func_read db_env_set_func_realloc db_env_set_func_rename @@ -40,6 +44,8 @@ __db_dbm_init __db_dbm_nextkey __db_dbm_store + __db_get_flags_fn + __db_get_seq_flags_fn __db_hcreate __db_hdestroy __db_hsearch @@ -67,9 +73,7 @@ __ham_func4 __ham_func5 __ham_test - __lock_dump_region __lock_id_set - __memp_dump_region __os_calloc __os_closehandle __os_free @@ -85,42 +89,75 @@ __txn_id_set #These are needed for linking tools or java. - __bam_init_print + __bam_adj_read + __bam_cadjust_read + __bam_cdel_read + __bam_curadj_read __bam_pgin __bam_pgout - __crdel_init_print + __bam_rcuradj_read + __bam_relink_read + __bam_repl_read + __bam_root_read + __bam_rsplit_read + __bam_split_read + __crdel_metasub_read + __db_addrem_read + __db_big_read + __db_cksum_read + __db_debug_read __db_dispatch - __db_dump - __db_e_stat + __db_dumptree __db_err + __db_fileid_reset __db_getlong __db_getulong __db_global_values - __db_init_print - __db_inmemdbflags __db_isbigendian + __db_lsn_reset + __db_noop_read __db_omode __db_overwrite + __db_ovref_read + __db_pg_alloc_read + __db_pg_free_read + __db_pg_freedata_read + __db_pg_init_read + __db_pg_new_read + __db_pg_prepare_read __db_pgin __db_pgout __db_pr_callback - __db_prdbt - __db_prfooter - __db_prheader __db_rpath + __db_stat_pp + __db_stat_print_pp __db_util_cache __db_util_interrupted __db_util_logset __db_util_siginit __db_util_sigresend __db_verify_internal - __dbreg_init_print - __fop_init_print + __dbreg_register_read + __fop_create_read + __fop_file_remove_read + __fop_remove_read + __fop_rename_read + __fop_write_read + __ham_chgpg_read + __ham_copypage_read + __ham_curadj_read __ham_get_meta - __ham_init_print + __ham_groupalloc_read + __ham_insdel_read + __ham_metagroup_read + __ham_newpage_read __ham_pgin __ham_pgout __ham_release_meta + __ham_replace_read + __ham_splitdata_read + __lock_list_print + __log_stat_pp __os_clock __os_get_errno __os_id @@ -128,6 +165,15 @@ __os_sleep __os_ufree __os_yield - __qam_init_print + __qam_add_read + __qam_del_read + __qam_delext_read + __qam_incfirst_read + __qam_mvptr_read __qam_pgin_out - __txn_init_print + __rep_stat_print + __txn_child_read + __txn_ckp_read + __txn_recycle_read + __txn_regop_read + __txn_xa_regop_read diff --git a/db/docs/api_c/api_index.html b/db/docs/api_c/api_index.html index 2c922718e..e71239172 100644 --- a/db/docs/api_c/api_index.html +++ b/db/docs/api_c/api_index.html @@ -1,168 +1,201 @@ - - + + -Berkeley DB: Berkeley DB: C API +Berkeley DB: Berkeley DB: C Handle Methods - + -

Berkeley DB: C API

+

Berkeley DB: C Handle Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SectionMethodDescription
Database Environmentdb_env_createCreate an environment handle

DB_ENV->closeClose an environment

DB_ENV->dbremoveRemove a database

DB_ENV->dbrenameRename a database

DB_ENV->errError message with error string

DB_ENV->errxError message

DB_ENV->openOpen an environment

DB_ENV->removeRemove an environment

db_strerrorError strings

db_versionReturn version information
Environment ConfigurationDB_ENV->set_app_dispatchConfigure application recovery

DB_ENV->set_allocSet local space allocation functions

DB_ENV->set_data_dirSet the environment data directory

DB_ENV->set_encryptSet the environment cryptographic key

DB_ENV->set_errcallSet error message callback

DB_ENV->set_errfileSet error message FILE

DB_ENV->set_errpfxSet error message prefix

DB_ENV->set_feedbackSet feedback callback

DB_ENV->set_flagsEnvironment configuration

DB_ENV->set_paniccallSet panic callback

DB_ENV->set_rpc_serverEstablish an RPC server connection

DB_ENV->set_shm_keySet system memory shared segment ID

DB_ENV->set_tas_spinsSet the number of test-and-set spins

DB_ENV->set_timeoutSet lock and transaction timeout

DB_ENV->set_tmp_dirSet the environment temporary file directory

DB_ENV->set_verboseSet verbose messages
Database Operationsdb_createCreate a database handle

DB->associateAssociate a secondary index

DB->closeClose a database

DB->delDelete items from a database

DB->errError message with error string

DB->errxError message

DB->fdReturn a file descriptor from a database

DB->get, DB->pgetGet items from a database

DB->get_byteswappedReturn if the underlying database is in host order

DB->get_envReturn a handle for the underlying database environment

DB->get_typeReturn the database type

DB->joinPerform a database join on cursors

DB->key_rangeReturn estimate of key location

DB->openOpen a database

DB->putStore items into a database

DB->removeRemove a database

DB->renameRename a database

DB->statReturn database statistics

DB->syncFlush a database to stable storage

DB->truncateEmpty a database

DB->upgradeUpgrade a database

DB->verifyVerify/salvage a database
Database ConfigurationDB->set_allocSet local space allocation functions

DB->set_cachesizeSet the database cache size

DB->set_dup_compareSet a duplicate comparison function

DB->set_encryptSet the database cryptographic key

DB->set_errcallSet error message callback

DB->set_errfileSet error message FILE

DB->set_errpfxSet error message prefix

DB->set_feedbackSet feedback callback

DB->set_flagsGeneral database configuration

DB->set_lorderSet the database byte order

DB->set_pagesizeSet the underlying database page size

DB->set_paniccallSet panic callback
Btree/Recno ConfigurationDB->set_append_recnoSet record append callback

DB->set_bt_compareSet a Btree comparison function

DB->set_bt_minkeySet the minimum number of keys per Btree page

DB->set_bt_prefixSet a Btree prefix comparison function

DB->set_re_delimSet the variable-length record delimiter

DB->set_re_lenSet the fixed-length record length

DB->set_re_padSet the fixed-length record pad byte

DB->set_re_sourceSet the backing Recno text file
Hash ConfigurationDB->set_h_ffactorSet the Hash table density

DB->set_h_hashSet a hashing function

DB->set_h_nelemSet the Hash table size
Queue ConfigurationDB->set_q_extentsizeSet Queue database extent size
Database Cursor OperationsDB->cursorCreate a cursor handle

DBcursor->c_closeClose a cursor

DBcursor->c_countReturn count of duplicates

DBcursor->c_delDelete by cursor

DBcursor->c_dupDuplicate a cursor

DBcursor->c_get, DBcursor->c_pgetRetrieve by cursor

DBcursor->c_putStore by cursor
Key/Data PairsDBT
Bulk RetrievalDB_MULTIPLE_INIT
Lock SubsystemDB_ENV->set_lk_conflictsSet lock conflicts matrix

DB_ENV->set_lk_detectSet automatic deadlock detection

DB_ENV->set_lk_max_lockersSet maximum number of lockers

DB_ENV->set_lk_max_locksSet maximum number of locks

DB_ENV->set_lk_max_objectsSet maximum number of lock objects

DB_ENV->lock_detectPerform deadlock detection

DB_ENV->lock_getAcquire a lock

DB_ENV->lock_idAcquire a locker ID

DB_ENV->lock_id_freeRelease a locker ID

DB_ENV->lock_putRelease a lock

DB_ENV->lock_statReturn lock subsystem statistics

DB_ENV->lock_vecAcquire/release locks
Log SubsystemDB_ENV->set_lg_bsizeSet log buffer size

DB_ENV->set_lg_dirSet the environment logging directory

DB_ENV->set_lg_maxSet log file size

DB_ENV->set_lg_regionmaxSet logging region size

DB_ENV->log_archiveList log and database files

DB_ENV->log_fileMap Log Sequence Numbers to log files

DB_ENV->log_flushFlush log records

DB_ENV->log_putWrite a log record

DB_ENV->log_statReturn log subsystem statistics
Log Cursor OperationsDB_ENV->log_cursorCreate a log cursor handle

DB_LOGC->closeClose a log cursor

DB_LOGC->getRetrieve a log record
Log Sequence NumbersDB_LSN

log_compareCompare two Log Sequence Numbers
Memory Pool SubsystemDB->mpfReturn the database's memory pool handle

DB_ENV->set_cachesizeSet the environment cache size

DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size

DB_ENV->memp_registerRegister input/output functions for a file in a memory pool

DB_ENV->memp_statReturn memory pool statistics

DB_ENV->memp_syncFlush pages from a memory pool

DB_ENV->memp_trickleTrickle flush pages from a memory pool
Memory Pool FilesDB_ENV->memp_fcreateCreate a memory pool file handle

DB_MPOOLFILE->closeClose a file in a memory pool

DB_MPOOLFILE->getGet page from a file in a memory pool

DB_MPOOLFILE->openOpen a file in a memory pool

DB_MPOOLFILE->putReturn a page to a memory pool

DB_MPOOLFILE->setSet memory pool page status

DB_MPOOLFILE->syncFlush pages from a file in a memory pool

DB_MPOOLFILE->set_clear_lenSet file page bytes to be cleared

DB_MPOOLFILE->set_fileidSet file unique identifier

DB_MPOOLFILE->set_flagsGeneral memory pool file configuration

DB_MPOOLFILE->set_ftypeSet file type

DB_MPOOLFILE->set_lsn_offsetSet file log-sequence-number offset

DB_MPOOLFILE->set_maxsizeSet the maximum file size

DB_MPOOLFILE->set_pgcookieSet file cookie for pgin/pgout

DB_MPOOLFILE->set_prioritySet the file priority
Transaction SubsystemDB_ENV->set_tx_maxSet maximum number of transactions

DB_ENV->set_tx_timestampSet recovery timestamp

DB_ENV->txn_checkpointCheckpoint the transaction subsystem

DB_ENV->txn_recoverDistributed transaction recovery

DB_ENV->txn_statReturn transaction subsystem statistics
TransactionsDB_ENV->txn_beginBegin a transaction

DB_TXN->abortAbort a transaction

DB_TXN->commitCommit a transaction

DB_TXN->discardDiscard a prepared but not resolved transaction handle

DB_TXN->idReturn a transaction's ID

DB_TXN->preparePrepare a transaction for commit

DB_TXN->set_timeoutSet transaction timeout
ReplicationDB_ENV->set_rep_transportConfigure replication transport

DB_ENV->rep_electHold a replication election

DB_ENV->set_rep_limitLimit data sent in response to a single message

DB_ENV->rep_process_messageProcess a replication message

DB_ENV->rep_startConfigure an environment for replication

DB_ENV->rep_statReplication statistics
Historic InterfacesdbmUNIX Dbm/Ndbm Interfaces

hsearchUNIX Hsearch Interfaces
System ConfigurationRun-time configurationReplace underlying Berkeley DB system interfaces
C Handle MethodsDescription
DBcursor->c_closeClose a cursor
DBcursor->c_countReturn count of duplicates
DBcursor->c_delDelete by cursor
DBcursor->c_dupDuplicate a cursor
DBcursor->c_getRetrieve by cursor
DBcursor->c_pgetRetrieve by cursor
DBcursor->c_putStore by cursor
db_createCreate a database handle
DB->associateAssociate a secondary index
DB->closeClose a database
DB->cursorCreate a cursor handle
DB->delDelete items from a database
DB->errError message with error string
DB->errxError message
DB->fdReturn a file descriptor from a database
DB->getGet items from a database
DB->get_byteswappedReturn if the underlying database is in host order
DB->get_envReturn database environment handle
DB->mpfReturn the database's memory pool handle
DB->get_typeReturn the database type
DB->joinPerform a database join on cursors
DB->key_rangeReturn estimate of key location
DB->openOpen a database
DB->pgetGet items from a database
DB->putStore items into a database
DB->removeRemove a database
DB->renameRename a database
DB->set_allocSet local space allocation functions
DB->set_append_recnoSet record append callback
DB->set_bt_compareSet a Btree comparison function
DB->set_bt_minkeySet the minimum number of keys per Btree page
DB->set_bt_prefixSet a Btree prefix comparison function
DB->set_cachesizeSet the database cache size
DB->set_dup_compareSet a duplicate comparison function
DB->set_encryptSet the database cryptographic key
DB->set_errcallSet error and informational message callback
DB->set_errfileSet error and informational message FILE
DB->set_errpfxSet error message prefix
DB->set_feedbackSet feedback callback
DB->set_flagsGeneral database configuration
DB->set_h_ffactorSet the Hash table density
DB->set_h_hashSet a hashing function
DB->set_h_nelemSet the Hash table size
DB->set_lorderSet the database byte order
DB->set_msgcallSet error and informational message callback
DB->set_msgfileSet error and informational message FILE
DB->set_pagesizeSet the underlying database page size
DB->set_paniccallSet panic callback
DB->set_q_extentsizeSet Queue database extent size
DB->set_re_delimSet the variable-length record delimiter
DB->set_re_lenSet the fixed-length record length
DB->set_re_padSet the fixed-length record pad byte
DB->set_re_sourceSet the backing Recno text file
DB->statDatabase statistics
DB->stat_printDatabase statistics
DB->syncFlush a database to stable storage
DB->truncateEmpty a database
DB->upgradeUpgrade a database
DB->verifyVerify/salvage a database
db_env_createCreate an environment handle
DB_ENV->closeClose an environment
db_strerrorError strings
db_versionReturn version information
DB_ENV->dbremoveRemove a database
DB_ENV->dbrenameRename a database
DB_ENV->errError message with error string
DB_ENV->errxError message
DB_ENV->lock_detectPerform deadlock detection
DB_ENV->lock_getAcquire a lock
DB_ENV->lock_idAcquire a locker ID
DB_ENV->lock_id_freeRelease a locker ID
DB_ENV->lock_putRelease a lock
DB_ENV->lock_statLock subsystem statistics
DB_ENV->lock_stat_printLock subsystem statistics
DB_ENV->lock_vecAcquire/release locks
DB_ENV->log_archiveList log and database files
log_compareCompare two Log Sequence Numbers
DB_ENV->log_cursorCreate a log cursor handle
DB_ENV->log_fileMap Log Sequence Numbers to log files
DB_ENV->log_flushFlush log records
DB_ENV->log_putWrite a log record
DB_ENV->log_statLog subsystem statistics
DB_ENV->log_stat_printLog subsystem statistics
DB_ENV->memp_fcreateCreate a memory pool file handle
DB_ENV->memp_registerRegister input/output functions for a file in a memory pool
DB_ENV->memp_statMemory pool statistics
DB_ENV->memp_stat_printMemory pool statistics
DB_ENV->memp_syncFlush pages from a memory pool
DB_ENV->memp_trickleTrickle flush pages from a memory pool
DB_ENV->openOpen an environment
DB_ENV->removeRemove an environment
DB_ENV->rep_electHold a replication election
DB_ENV->rep_process_messageProcess a replication message
DB_ENV->rep_startConfigure an environment for replication
DB_ENV->rep_statReplication statistics
DB_ENV->rep_stat_printReplication statistics
DB_ENV->set_allocSet local space allocation functions
DB_ENV->set_app_dispatchConfigure application recovery
DB_ENV->set_cachesizeSet the environment cache size
DB_ENV->set_data_dirSet the environment data directory
DB_ENV->set_encryptSet the environment cryptographic key
DB_ENV->set_errcallSet error and informational message callbacks
DB_ENV->set_errfileSet error and informational message FILE
DB_ENV->set_errpfxSet error message prefix
DB_ENV->set_feedbackSet feedback callback
DB_ENV->set_flagsEnvironment configuration
DB_ENV->set_lg_bsizeSet log buffer size
DB_ENV->set_lg_dirSet the environment logging directory
DB_ENV->set_lg_maxSet log file size
DB_ENV->set_lg_regionmaxSet logging region size
DB_ENV->set_lk_conflictsSet lock conflicts matrix
DB_ENV->set_lk_detectSet automatic deadlock detection
DB_ENV->set_lk_max_lockersSet maximum number of lockers
DB_ENV->set_lk_max_locksSet maximum number of locks
DB_ENV->set_lk_max_objectsSet maximum number of lock objects
DB_ENV->memp_set_max_openfdSet maximum number of open file descriptors
DB_ENV->memp_set_max_writeSet maximum number of sequential write operations
DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size
DB_ENV->set_msgcallSet error and informational message callbacks
DB_ENV->set_msgfileSet error and informational message FILE
DB_ENV->set_paniccallSet panic callback
DB_ENV->set_rep_limitLimit data sent in response to a single message
DB_ENV->set_rep_transportConfigure replication transport
DB_ENV->set_rpc_serverEstablish an RPC server connection
DB_ENV->set_shm_keySet system memory shared segment ID
DB_ENV->set_tas_spinsSet the number of test-and-set spins
DB_ENV->set_timeoutSet lock and transaction timeout
DB_ENV->set_tmp_dirSet the environment temporary file directory
DB_ENV->set_tx_maxSet maximum number of transactions
DB_ENV->set_tx_timestampSet recovery timestamp
DB_ENV->set_verboseSet verbose messages
DB_ENV->stat_printEnvironment statistics
DB_ENV->txn_beginBegin a transaction
DB_ENV->txn_checkpointCheckpoint the transaction subsystem
DB_ENV->txn_recoverDistributed transaction recovery
DB_ENV->txn_statTransaction subsystem statistics
DB_ENV->txn_stat_printTransaction subsystem statistics
DB_LOGC->closeClose a log cursor
DB_LOGC->getRetrieve a log record
DB_MPOOLFILE->closeClose a file in a memory pool
DB_MPOOLFILE->getGet page from a file in a memory pool
DB_MPOOLFILE->openOpen a file in a memory pool
DB_MPOOLFILE->putReturn a page to a memory pool
DB_MPOOLFILE->setSet memory pool page status
DB_MPOOLFILE->set_clear_lenSet file page bytes to be cleared
DB_MPOOLFILE->set_fileidSet file unique identifier
DB_MPOOLFILE->set_flagsGeneral memory pool file configuration
DB_MPOOLFILE->set_ftypeSet file type
DB_MPOOLFILE->set_lsn_offsetSet file log-sequence-number offset
DB_MPOOLFILE->set_maxsizeSet the maximum file size
DB_MPOOLFILE->set_pgcookieSet file cookie for pgin/pgout
DB_MPOOLFILE->set_prioritySet the file priority
DB_MPOOLFILE->syncFlush pages from a file in a memory pool
DB_SEQUENCE->closeClose a sequence
DB_SEQUENCE->getReturn the next sequence element(s)
DB_SEQUENCE->get_cachesizeReturn the sequence cache size
DB_SEQUENCE->get_dbpReturn dbp handle for sequence
DB_SEQUENCE->get_flagsReturn sequence flag values
DB_SEQUENCE->get_keyReturn key for sequence
DB_SEQUENCE->get_rangeReturn the range of a sequence
DB_SEQUENCE->init_valueSet the initial value of a sequence
DB_SEQUENCE->openOpen a sequence
DB_SEQUENCE->removeRemove a sequence
DB_SEQUENCE->set_cachesizeSet the sequence cache size
DB_SEQUENCE->set_flagsSet sequence flag values
DB_SEQUENCE->set_rangeSet the range of a sequence
DB_SEQUENCE->statSequence statistics
DB_TXN->abortAbort a transaction
DB_TXN->commitCommit a transaction
DB_TXN->discardDiscard a prepared but not resolved transaction handle
DB_TXN->idReturn a transaction's ID
DB_TXN->preparePrepare a transaction for commit
DB_TXN->set_timeoutSet transaction timeout
DBTKey/Data pairs
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/c_pindex.html b/db/docs/api_c/c_pindex.html index a0cefbaaa..41219bc50 100644 --- a/db/docs/api_c/c_pindex.html +++ b/db/docs/api_c/c_pindex.html @@ -2,837 +2,604 @@ Berkeley DB: C API Index - +

C API Index

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Building a small memory footprint library --disable-cryptography
Building a small memory footprint library --disable-hash
Configuring Berkeley DB --disable-largefile
Building a small memory footprint library --disable-queue
Building a small memory footprint library --disable-replication
Configuring Berkeley DB --disable-shared
Configuring Berkeley DB --disable-static
Building a small memory footprint library --disable-verify
Configuring Berkeley DB --enable-compat185
Configuring Berkeley DB --enable-cxx
Configuring Berkeley DB --enable-debug
Configuring Berkeley DB --enable-debug_rop
Configuring Berkeley DB --enable-debug_wop
Configuring Berkeley DB --enable-diagnostic
Configuring Berkeley DB --enable-dump185
Configuring Berkeley DB --enable-java
Configuring Berkeley DB --enable-posixmutexes
Configuring Berkeley DB --enable-rpc
Configuring Berkeley DB --enable-smallbuild
Building a small memory footprint library --enable-smallbuild
Configuring Berkeley DB --enable-tcl
Configuring Berkeley DB --enable-test
Configuring Berkeley DB --enable-uimutexes
Configuring Berkeley DB --enable-umrw
Configuring Berkeley DB --with-mutex=MUTEX
Configuring Berkeley DB --with-mutexalign=ALIGNMENT
Configuring Berkeley DB --with-rpm=ARCHIVE
Configuring Berkeley DB --with-tcl=DIR
Configuring Berkeley DB --with-uniquename=NAME
/etc/magic
configuring Berkeley DB 1.85 API compatibility
building a utility to dump Berkeley DB 1.85 databases
Upgrading to release 2.0
Upgrading to release 3.0
Upgrading to release 3.1
Upgrading to release 3.2
Upgrading to release 3.3
Upgrading to release 4.0
Upgrading to release 4.1
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
selecting an access method
access method FAQ
access method tuning
introduction to the access methods
AIX
data alignment
programmatic APIs
hot backup
BDB
BDB
BDB
introduction to the buffer pool subsystem
turn off system buffering
turn off system buffering for database files
turn off system buffering for log files
building for QNX
building for UNIX
building for UNIX FAQ
building for VxWorks
building for VxWorks AE
building for VxWorks FAQ
building for Win32
building for Windows FAQ
bulk retrieval
selecting a byte order
configuring the C++ API
flushing the database cache
selecting a cache size
introduction to the memory cache subsystem
catastrophic recovery
Patches, Updates and Change logs
database page checksum
closing a cursor
closing a database
database compaction
specifying a Btree comparison function
changing compile or load options
Concurrent Data Store
database environment configuration
configuring Berkeley DB for UNIX systems
salvaging corrupted databases
counting data items for a key
closing a cursor
deleting records with a cursor
duplicating a cursor
retrieving records with a cursor
storing records with a cursor
cursor stability
database cursors
DBT data
DB->associate
DB->close
DB->cursor
DB->del
DB->err
DB->errx
DB->fd
DB->get
DB->get_bt_minkey
DB->get_byteswapped
DB->get_cachesize
DB->get_database
DB->get_encrypt_flags
DB->get_env
DB->get_errfile
DB->get_errpfx
DB->get_file
DB->get_flags
DB->get_h_ffactor
DB->get_h_nelem
DB->get_lorder
DB->get_open_flags
DB->get_pagesize
DB->get_q_extentsize
DB->get_re_delim
DB->get_re_len
DB->get_re_pad
DB->get_re_source
DB->get_transactional
DB->get_type
DB->join
DB->key_range
DB->mpf
DB->open
DB->pget
DB->put
DB->remove
DB->rename
DB->set_alloc
DB->set_append_recno
DB->set_bt_compare
DB->set_bt_minkey
DB->set_bt_prefix
DB->set_cachesize
DB->set_dup_compare
DB->set_encrypt
DB->set_errcall
DB->set_errfile
DB->set_errpfx
DB->set_feedback
DB->set_flags
DB->set_h_ffactor
DB->set_h_hash
DB->set_h_nelem
DB->set_lorder
DB->set_pagesize
DB->set_paniccall
DB->set_q_extentsize
DB->set_re_delim
DB->set_re_len
DB->set_re_pad
DB->set_re_source
DB->stat
DB->sync
DB->truncate
DB->upgrade
DB->verify
DBC
DBcursor->c_close
DBcursor->c_count
DBcursor->c_del
DBcursor->c_dup
DBcursor->c_get
DBcursor->c_pget
DBcursor->c_put
DbEnv::get_cachesize_nocache
dbm/ndbm
DBT
DBT
DBcursor->c_put DB_AFTER
DB->verify DB_AGGRESSIVE
DB->put DB_APPEND
DB_ENV->log_archive DB_ARCH_ABS
DB_ENV->log_archive DB_ARCH_DATA
DB_ENV->log_archive DB_ARCH_LOG
DB_ENV->log_archive DB_ARCH_REMOVE
DB->associate DB_AUTO_COMMIT
DB->del DB_AUTO_COMMIT
DB->get DB_AUTO_COMMIT
DB->open DB_AUTO_COMMIT
DB->put DB_AUTO_COMMIT
DB->truncate DB_AUTO_COMMIT
DB_ENV->dbremove DB_AUTO_COMMIT
DB_ENV->dbrename DB_AUTO_COMMIT
DB_ENV->set_flags DB_AUTO_COMMIT
DBcursor->c_put DB_BEFORE
DB->open DB_BTREE
DB_ENV->set_flags DB_CDB_ALLDB
DB->set_flags DB_CHKSUM
DB_CONFIG
DB->get DB_CONSUME
DB->get DB_CONSUME_WAIT
DB->associate DB_CREATE
db_create
DB->open DB_CREATE
DB_ENV->open DB_CREATE
DB_MPOOLFILE->open DB_CREATE
DBcursor->c_get DB_CURRENT
DBcursor->c_put DB_CURRENT
DB_LOGC->get DB_CURRENT
DB_DBT_APPMALLOC
DBT DB_DBT_MALLOC
DBT DB_DBT_PARTIAL
DBT DB_DBT_REALLOC
DBT DB_DBT_USERMEM
DB_MPOOLFILE->open DB_DIRECT
DB_ENV->set_flags DB_DIRECT_DB
DB_ENV->set_flags DB_DIRECT_LOG
DB->cursor DB_DIRTY_READ
DB->get DB_DIRTY_READ
DB->join DB_DIRTY_READ
DB->open DB_DIRTY_READ
DBcursor->c_get DB_DIRTY_READ
DB_ENV->txn_begin DB_DIRTY_READ
DB_DONOTINDEX
DB->set_flags DB_DUP
DB->set_flags DB_DUPSORT
DB->upgrade DB_DUPSORT
DB_EID_BROADCAST
DB->set_flags DB_ENCRYPT
DB->set_encrypt DB_ENCRYPT_AES
DB_ENV->set_encrypt DB_ENCRYPT_AES
DB_ENV->close
DB_ENV->dbremove
DB_ENV->dbrename
DB_ENV->err
DB_ENV->errx
DB_ENV->get_cachesize
DB_ENV->get_data_dirs
DB_ENV->get_encrypt_flags
DB_ENV->get_errfile
DB_ENV->get_errpfx
DB_ENV->get_flags
DB_ENV->get_home
DB_ENV->get_lg_bsize
DB_ENV->get_lg_dir
DB_ENV->get_lg_max
DB_ENV->get_lg_regionmax
DB_ENV->get_lk_conflicts
DB_ENV->get_lk_detect
DB_ENV->get_lk_max_lockers
DB_ENV->get_lk_max_locks
DB_ENV->get_lk_max_objects
DB_ENV->get_mp_mmapsize
DB_ENV->get_open_flags
DB_ENV->get_rep_limit
DB_ENV->get_shm_key
DB_ENV->get_tas_spins
DB_ENV->get_timeout
DB_ENV->get_tmp_dir
DB_ENV->get_tx_max
DB_ENV->get_tx_timestamp
DB_ENV->get_verbose
DB_ENV->lock_detect
DB_ENV->lock_get
DB_ENV->lock_id
DB_ENV->lock_id_free
DB_ENV->lock_put
DB_ENV->lock_stat
DB_ENV->lock_vec
DB_ENV->log_archive
DB_ENV->log_cursor
DB_ENV->log_file
DB_ENV->log_flush
DB_ENV->log_put
DB_ENV->log_stat
DB_ENV->memp_fcreate
DB_ENV->memp_register
DB_ENV->memp_stat
DB_ENV->memp_sync
DB_ENV->memp_trickle
DB_ENV->open
DB_ENV->remove
DB_ENV->rep_elect
DB_ENV->rep_process_message
DB_ENV->rep_start
DB_ENV->rep_stat
DB_ENV->set_alloc
DB_ENV->set_app_dispatch
DB_ENV->set_cachesize
DB_ENV->set_data_dir
DB_ENV->set_encrypt
DB_ENV->set_errcall
DB_ENV->set_errfile
DB_ENV->set_errpfx
DB_ENV->set_feedback
DB_ENV->set_flags
DB_ENV->set_lg_bsize
DB_ENV->set_lg_dir
DB_ENV->set_lg_max
DB_ENV->set_lg_regionmax
DB_ENV->set_lk_conflicts
DB_ENV->set_lk_detect
DB_ENV->set_lk_max_lockers
DB_ENV->set_lk_max_locks
DB_ENV->set_lk_max_objects
DB_ENV->set_mp_mmapsize
DB_ENV->set_paniccall
DB_ENV->set_rep_limit
DB_ENV->set_rep_transport
DB_ENV->set_rpc_server
DB_ENV->set_shm_key
DB_ENV->set_tas_spins
DB_ENV->set_timeout
DB_ENV->set_tmp_dir
DB_ENV->set_tx_max
DB_ENV->set_tx_timestamp
DB_ENV->set_verbose
DB_ENV->txn_begin
DB_ENV->txn_checkpoint
DB_ENV->txn_recover
DB_ENV->txn_stat
db_env_create
db_env_set_func_close
db_env_set_func_dirfree
db_env_set_func_dirlist
db_env_set_func_exists
db_env_set_func_free
db_env_set_func_fsync
db_env_set_func_ioinfo
db_env_set_func_malloc
db_env_set_func_map
db_env_set_func_open
db_env_set_func_read
db_env_set_func_realloc
db_env_set_func_rename
db_env_set_func_seek
db_env_set_func_sleep
db_env_set_func_unlink
db_env_set_func_unmap
db_env_set_func_write
db_env_set_func_yield
DB->open DB_EXCL
DB->stat DB_FAST_STAT
DBcursor->c_get DB_FIRST
DB_LOGC->get DB_FIRST
DB_ENV->txn_recover DB_FIRST
DB_ENV->log_put DB_FLUSH
DB_ENV->remove DB_FORCE
DB_ENV->txn_checkpoint DB_FORCE
DB->get DB_GET_BOTH
DBcursor->c_get DB_GET_BOTH
DBcursor->c_get DB_GET_BOTH_RANGE
DBcursor->c_get DB_GET_RECNO
DB->open DB_HASH
File naming DB_HOME
File naming db_home
DB_ENV->open DB_INIT_CDB
DB_ENV->open DB_INIT_LOCK
DB_ENV->open DB_INIT_LOG
DB_ENV->open DB_INIT_MPOOL
DB_ENV->open DB_INIT_REP
DB_ENV->open DB_INIT_TXN
DB_ENV->open DB_JOINENV
DB->join DB_JOIN_ITEM
DBcursor->c_get DB_JOIN_ITEM
DB->join DB_JOIN_NOSORT
Error returns to applications DB_KEYEMPTY
Error returns to applications DB_KEYEXIST
DBcursor->c_put DB_KEYFIRST
DBcursor->c_put DB_KEYLAST
DBcursor->c_get DB_LAST
DB_LOGC->get DB_LAST
DB_LOCK
DB_ENV->open DB_LOCKDOWN
DB_LOCK_DEADLOCK
Error returns to applications DB_LOCK_DEADLOCK
DB_ENV->set_lk_detect DB_LOCK_DEFAULT
DB_ENV->lock_detect DB_LOCK_DEFAULT
DB_ENV->set_lk_detect DB_LOCK_EXPIRE
DB_ENV->lock_detect DB_LOCK_EXPIRE
DB_ENV->lock_vec DB_LOCK_GET
DB_ENV->lock_vec DB_LOCK_GET_TIMEOUT
DB_ENV->lock_vec DB_LOCK_IREAD
DB_ENV->lock_vec DB_LOCK_IWR
DB_ENV->lock_vec DB_LOCK_IWRITE
DB_ENV->set_lk_detect DB_LOCK_MAXLOCKS
DB_ENV->lock_detect DB_LOCK_MAXLOCKS
DB_ENV->set_lk_detect DB_LOCK_MINLOCKS
DB_ENV->lock_detect DB_LOCK_MINLOCKS
DB_ENV->set_lk_detect DB_LOCK_MINWRITE
DB_ENV->lock_detect DB_LOCK_MINWRITE
Error returns to applications DB_LOCK_NOTGRANTED
DB_ENV->lock_get DB_LOCK_NOWAIT
DB_ENV->lock_vec DB_LOCK_NOWAIT
DB_ENV->set_lk_detect DB_LOCK_OLDEST
DB_ENV->lock_detect DB_LOCK_OLDEST
DB_ENV->lock_vec DB_LOCK_PUT
DB_ENV->lock_vec DB_LOCK_PUT_ALL
DB_ENV->lock_vec DB_LOCK_PUT_OBJ
DB_ENV->set_lk_detect DB_LOCK_RANDOM
DB_ENV->lock_detect DB_LOCK_RANDOM
DB_ENV->lock_vec DB_LOCK_READ
DB_ENV->lock_vec DB_LOCK_TIMEOUT
DB_ENV->lock_vec DB_LOCK_WRITE
DB_ENV->set_lk_detect DB_LOCK_YOUNGEST
DB_ENV->lock_detect DB_LOCK_YOUNGEST
DB_LOGC
DB_LOGC->close
DB_LOGC->get
DB_ENV->set_flags DB_LOG_AUTOREMOVE
DB_LSN
DB_MPOOLFILE
DB_MPOOLFILE->close
DB_MPOOLFILE->get
DB_MPOOLFILE->get_clear_len
DB_MPOOLFILE->get_fileid
DB_MPOOLFILE->get_flags
DB_MPOOLFILE->get_ftype
DB_MPOOLFILE->get_lsn_offset
DB_MPOOLFILE->get_maxsize
DB_MPOOLFILE->get_pgcookie
DB_MPOOLFILE->get_priority
DB_MPOOLFILE->open
DB_MPOOLFILE->put
DB_MPOOLFILE->set
DB_MPOOLFILE->set_clear_len
DB_MPOOLFILE->set_fileid
DB_MPOOLFILE->set_flags
DB_MPOOLFILE->set_ftype
DB_MPOOLFILE->set_lsn_offset
DB_MPOOLFILE->set_maxsize
DB_MPOOLFILE->set_pgcookie
DB_MPOOLFILE->set_priority
DB_MPOOLFILE->sync
DB_MPOOLFILE->put DB_MPOOL_CLEAN
DB_MPOOLFILE->set DB_MPOOL_CLEAN
DB_MPOOLFILE->get DB_MPOOL_CREATE
DB_MPOOLFILE->put DB_MPOOL_DIRTY
DB_MPOOLFILE->set DB_MPOOL_DIRTY
DB_MPOOLFILE->put DB_MPOOL_DISCARD
DB_MPOOLFILE->set DB_MPOOL_DISCARD
DB_MPOOLFILE->get DB_MPOOL_LAST
DB_MPOOLFILE->get DB_MPOOL_NEW
DB_MPOOLFILE->set_flags DB_MPOOL_NOFILE
DB->get DB_MULTIPLE
DBcursor->c_get DB_MULTIPLE
DBT DB_MULTIPLE_INIT
DBcursor->c_get DB_MULTIPLE_KEY
DBT DB_MULTIPLE_KEY_NEXT
DBT DB_MULTIPLE_NEXT
DBT DB_MULTIPLE_RECNO_NEXT
DBcursor->c_get DB_NEXT
DB_LOGC->get DB_NEXT
DB_ENV->txn_recover DB_NEXT
DBcursor->c_get DB_NEXT_DUP
DBcursor->c_get DB_NEXT_NODUP
DB->put DB_NODUPDATA
DBcursor->c_put DB_NODUPDATA
DB_ENV->set_flags DB_NOLOCKING
DB->open DB_NOMMAP
DB_ENV->set_flags DB_NOMMAP
DB_MPOOLFILE->open DB_NOMMAP
DB->verify DB_NOORDERCHK
DB->put DB_NOOVERWRITE
DB_ENV->set_flags DB_NOPANIC
DB_NOSERVER
DB_ENV->set_rpc_server DB_NOSERVER
DB_ENV->set_rpc_server DB_NOSERVER_HOME
DB_NOSERVER_ID
DB_ENV->set_rpc_server DB_NOSERVER_ID
DB->close DB_NOSYNC
Error returns to applications DB_NOTFOUND
DB_MPOOLFILE->open DB_ODDFILESIZE
DB->upgrade DB_OLD_VERSION
DB->verify DB_ORDERCHKONLY
DB_ENV->set_flags DB_OVERWRITE
DB_PAGE_NOTFOUND
DB_ENV->set_flags DB_PANIC_ENVIRONMENT
DBcursor->c_dup DB_POSITION
DBcursor->c_get DB_PREV
DB_LOGC->get DB_PREV
DBcursor->c_get DB_PREV_NODUP
DB->verify DB_PRINTABLE
DB_MPOOLFILE->set_priority DB_PRIORITY_DEFAULT
DB_MPOOLFILE->set_priority DB_PRIORITY_HIGH
DB_MPOOLFILE->set_priority DB_PRIORITY_LOW
DB_MPOOLFILE->set_priority DB_PRIORITY_VERY_HIGH
DB_MPOOLFILE->set_priority DB_PRIORITY_VERY_LOW
DB_ENV->open DB_PRIVATE
DB->open DB_QUEUE
DB->open DB_RDONLY
DB_MPOOLFILE->open DB_RDONLY
DB->open DB_RECNO
DB->set_flags DB_RECNUM
DB_ENV->open DB_RECOVER
DB_ENV->set_feedback DB_RECOVER
DB_ENV->open DB_RECOVER_FATAL
DB_ENV->set_flags DB_REGION_INIT
DB->set_flags DB_RENUMBER
DB_ENV->rep_start DB_REP_CLIENT
DB_ENV->rep_process_message DB_REP_DUPMASTER
DB_ENV->rep_process_message DB_REP_HOLDELECTION
DB_ENV->rep_process_message DB_REP_ISPERM
DB_ENV->rep_start DB_REP_LOGSONLY
DB_ENV->rep_start DB_REP_MASTER
DB_ENV->rep_process_message DB_REP_NEWMASTER
DB_ENV->rep_process_message DB_REP_NEWSITE
DB_ENV->set_rep_transport DB_REP_NOBUFFER
DB_ENV->rep_process_message DB_REP_NOTPERM
DB_ENV->rep_process_message DB_REP_OUTDATED
DB_ENV->set_rep_transport DB_REP_PERMANENT
DB_REP_UNAVAIL
DB->set_flags DB_REVSPLITOFF
DB->get DB_RMW
DB->join DB_RMW
DBcursor->c_get DB_RMW
db_env_create DB_RPCCLIENT
Error returns to applications DB_RUNRECOVERY
DB->verify DB_SALVAGE
DBcursor->c_get DB_SET
DB_LOGC->get DB_SET
DB_ENV->set_timeout DB_SET_LOCK_TIMEOUT
DB_TXN->set_timeout DB_SET_LOCK_TIMEOUT
DBcursor->c_get DB_SET_RANGE
DB->get DB_SET_RECNO
DBcursor->c_get DB_SET_RECNO
DB_ENV->set_timeout DB_SET_TXN_TIMEOUT
DB_TXN->set_timeout DB_SET_TXN_TIMEOUT
DB->set_flags DB_SNAPSHOT
DB_ENV->lock_stat DB_STAT_CLEAR
DB_ENV->log_stat DB_STAT_CLEAR
DB_ENV->memp_stat DB_STAT_CLEAR
DB_ENV->rep_stat DB_STAT_CLEAR
DB_ENV->txn_stat DB_STAT_CLEAR
db_strerror
DB_ENV->open DB_SYSTEM_MEM
DB->open DB_THREAD
DB_ENV->open DB_THREAD
DB_ENV->set_flags DB_TIME_NOTGRANTED
DB->open DB_TRUNCATE
DB_TXN
DB_TXN->abort
DB_TXN->commit
DB_TXN->discard
DB_TXN->id
DB_TXN->prepare
DB_TXN->set_timeout
DB_ENV->set_app_dispatch DB_TXN_ABORT
DB_ENV->set_app_dispatch DB_TXN_APPLY
DB_ENV->set_app_dispatch DB_TXN_BACKWARD_ROLL
DB_ENV->set_app_dispatch DB_TXN_FORWARD_ROLL
DB_ENV->set_flags DB_TXN_NOSYNC
DB_ENV->txn_begin DB_TXN_NOSYNC
DB_TXN->commit DB_TXN_NOSYNC
DB->set_flags DB_TXN_NOT_DURABLE
DB_ENV->set_flags DB_TXN_NOT_DURABLE
DB_ENV->txn_begin DB_TXN_NOWAIT
DB_ENV->set_app_dispatch DB_TXN_PRINT
DB_ENV->txn_begin DB_TXN_SYNC
DB_TXN->commit DB_TXN_SYNC
DB_ENV->set_flags DB_TXN_WRITE_NOSYNC
DB->open DB_UNKNOWN
DB->set_feedback DB_UPGRADE
DB_ENV->open DB_USE_ENVIRON
DB_ENV->remove DB_USE_ENVIRON
DB_ENV->open DB_USE_ENVIRON_ROOT
DB_ENV->remove DB_USE_ENVIRON_ROOT
DB_ENV->set_verbose DB_VERB_CHKPOINT
DB_ENV->set_verbose DB_VERB_DEADLOCK
DB_ENV->set_verbose DB_VERB_RECOVERY
DB_ENV->set_verbose DB_VERB_REPLICATION
DB_ENV->set_verbose DB_VERB_WAITSFOR
DB->set_feedback DB_VERIFY
DB_VERIFY_BAD
db_version
db_version
DB->cursor DB_WRITECURSOR
db_create DB_XA_CREATE
DB_XIDDATASIZE
DB_ENV->set_flags DB_YIELDCPU
deadlocks
introduction to debugging
debugging applications
degrees of isolation
deleting records
deleting records with a cursor
dirty reads
disk space requirements
Distributed Transactions
DBT dlen
DBT doff
double buffering
duplicate data items
sorted duplicate data items
duplicate data items
duplicating a cursor
turn off database durability
turn off durability in the database environment
emptying a database
database encryption
encryption
turn off access to a database environment
database environment
use environment constants in naming
use environment constants in naming
database environment FAQ
fault database environment in during open
environment variables
introduction to database environments
equality join
error handling
error name space
error returns
selecting a Queue extent size
hot failover
Java API FAQ
Java FAQ
Tcl FAQ
XA FAQ
configuring without large file support
file utility
returning pages to the filesystem
recovery and filesystem operations
remote filesystems
page fill factor
configuring a small memory footprint library
Berkeley DB free-threaded handles
FreeBSD
specifying a database hash
hash table size
hcreate
HP-UX
hsearch
secondary indices
installing Berkeley DB for UNIX systems
interface compatibility
IRIX
degrees of isolation
configuring the Java API
Java API FAQ
Java compatibility
Java configuration
Java FAQ
equality join
key/data pairs
retrieved key/data permanence
database limits
Linux
changing compile or load options
DB_ENV->lock_vec lock
standard lock modes
ignore locking
page-level locking
two-phase locking
locking and non-Berkeley DB applications
locking configuration
Berkeley DB Transactional Data Store locking conventions
Berkeley DB Concurrent Data Store locking conventions
configure locking for Berkeley DB Concurrent Data Store
locking granularity
introduction to the locking subsystem
sizing the locking subsystem
locking without transactions
log file limits
automatic log file removal
log file removal
logging configuration
introduction to the logging subsystem
retrieving Btree records by logical record @number
log_compare
Mac OS X
turn off database file memory mapping
memory pool configuration
introduction to the memory pool subsystem
configuring for MinGW
DB_ENV->lock_vec mode
Berkeley DB library name spaces
file naming
natural join
NFS problems
retrieving Btree records by logical record number
DB_ENV->lock_vec obj
DB_ENV->lock_vec op
opening a database
OSF/1
selecting a page size
ignore database environment panic
partial record storage and retrieval
Patches, Updates and Change logs
Perl
retrieved key/data permanence
task/thread priority
Sleepycat Software's Berkeley DB products
building for QNX
QNX
dirty reads
accessing Btree records by record number
logical record numbers
managing record-based databases
logically renumbering records
Berkeley DB recoverability
renumbering records in Recno databases
repeatable read
introduction to replication
Resource Manager
XA Resource Manager
retrieving records
retrieving records in bulk
retrieving records with a cursor
turn off reverse splits in Btree databases
RPC client
configuring a RPC client/server
introduction to rpc client/server
RPC FAQ
RPC server
RPM
database salvage
SCO
Berkeley DB handle scope
secondary indices
security
disabling shared libraries
shared libraries
signal handling
DBT size
Sleepycat Software
Solaris
source code layout
turn off reverse splits in Btree databases
cursor stability
disabling static libraries
database statistics
storing records
storing records with a cursor
configure for stress testing
SunOS
loading Berkeley DB with Tcl
using Berkeley DB with Tcl
configuring the Tcl API
Tcl API programming notes
Tcl FAQ
temporary files
configuring the test suite
running the test suite
running the test suite under UNIX
running the test suite under Windows
text backing files
pre-loading text files into Recno databases
loading text into databases
dumping/loading text to/from databases
building threaded applications
lock timeouts
transaction timeouts
turn off synchronous transaction commit
turn off synchronous transaction commit
transaction configuration
transaction FAQ
transaction limits
Transaction Manager
administering transaction protected applications
archival in transaction protected applications
checkpoints in transaction protected applications
deadlock detection in transaction protected applications
recovery in transaction protected applications
introduction to the transaction subsystem
transaction throughput
transaction tuning
Transactional Data Store
nested transactions
truncating a database
access method tuning
transaction tuning
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
simple tutorial
configuring Berkeley DB with the Tuxedo System
DBT ulen
Ultrix
building for UNIX
building for UNIX FAQ
configuring Berkeley DB for UNIX systems
Patches, Updates and Change logs
upgrading databases
utilities
database verification
building for VxWorks FAQ
VxWorks notes
running the test suite under Windows
building for Windows FAQ
Windows notes
XA FAQ
XA Resource Manager
__db.001
Building a small memory footprint library  --disable-cryptography
Building a small memory footprint library  --disable-hash
Configuring Berkeley DB  --disable-largefile
Building a small memory footprint library  --disable-queue
Building a small memory footprint library  --disable-replication
Configuring Berkeley DB  --disable-shared
Configuring Berkeley DB  --disable-static
Building a small memory footprint library  --disable-statistics
Building a small memory footprint library  --disable-verify
Configuring Berkeley DB  --enable-compat185
Configuring Berkeley DB  --enable-cxx
Configuring Berkeley DB  --enable-debug
Configuring Berkeley DB  --enable-debug_rop
Configuring Berkeley DB  --enable-debug_wop
Configuring Berkeley DB  --enable-diagnostic
Configuring Berkeley DB  --enable-dump185
Configuring Berkeley DB  --enable-java
Configuring Berkeley DB  --enable-posixmutexes
Configuring Berkeley DB  --enable-rpc
Configuring Berkeley DB  --enable-smallbuild
Building a small memory footprint library  --enable-smallbuild
Configuring Berkeley DB  --enable-tcl
Configuring Berkeley DB  --enable-test
Configuring Berkeley DB  --enable-uimutexes
Configuring Berkeley DB  --enable-umrw
Configuring Berkeley DB  --with-mutex=MUTEX
Configuring Berkeley DB  --with-mutexalign=ALIGNMENT
Configuring Berkeley DB  --with-rpm=ARCHIVE
Configuring Berkeley DB  --with-tcl=DIR
Configuring Berkeley DB  --with-uniquename=NAME
 /etc/magic
configuring Berkeley DB  1.85 API compatibility
building a utility to dump Berkeley DB  1.85 databases
selecting an  access method
 access method FAQ
 access method tuning
introduction to the  access methods
 AIX
data  alignment
 Apache
programmatic  APIs
hot  backup
introduction to the  buffer pool subsystem
turn off system  buffering
turn off system  buffering for database files
turn off system  buffering for log files
turn off system  buffering for log files
 building for QNX
 building for UNIX
 building for UNIX FAQ
 building for VxWorks
 building for VxWorks AE
 building for VxWorks FAQ
 building for Win32
 building for Windows FAQ
 bulk retrieval
selecting a  byte order
configuring the  C++ API
flushing the database  cache
selecting a  cache size
introduction to the memory  cache subsystem
 catastrophic recovery
Patches, Updates and  Change logs
database page  checksum
 closing a cursor
 closing a database
database  compaction
specifying a Btree  comparison function
changing  compile or load options
 Concurrent Data Store
database environment  configuration
 configuring Berkeley DB for UNIX systems
salvaging  corrupted databases
 counting data items for a key
closing a  cursor
deleting records with a  cursor
duplicating a  cursor
retrieving records with a  cursor
storing records with a  cursor
 cursor stability
 cursor stability
database  cursors
DBT  data
DBcursor->c_put  DB_AFTER
DB->verify  DB_AGGRESSIVE
DB->put  DB_APPEND
DB_ENV->log_archive  DB_ARCH_ABS
DB_ENV->log_archive  DB_ARCH_DATA
DB_ENV->log_archive  DB_ARCH_LOG
DB_ENV->log_archive  DB_ARCH_REMOVE
DB->associate  DB_AUTO_COMMIT
DB->del  DB_AUTO_COMMIT
DB->get  DB_AUTO_COMMIT
DB->open  DB_AUTO_COMMIT
DB->put  DB_AUTO_COMMIT
DB->truncate  DB_AUTO_COMMIT
DB_ENV->dbremove  DB_AUTO_COMMIT
DB_ENV->dbrename  DB_AUTO_COMMIT
DB_ENV->set_flags  DB_AUTO_COMMIT
DB_SEQUENCE->get  DB_AUTO_COMMIT
DB_SEQUENCE->open  DB_AUTO_COMMIT
DB_SEQUENCE->remove  DB_AUTO_COMMIT
DBcursor->c_put  DB_BEFORE
DB->open  DB_BTREE
 DB_BUFFER_SMALL
DB_ENV->set_flags  DB_CDB_ALLDB
DB->set_flags  DB_CHKSUM
 DB_CONFIG
DB->get  DB_CONSUME
DB->get  DB_CONSUME_WAIT
DB->associate  DB_CREATE
DB->open  DB_CREATE
DB_ENV->open  DB_CREATE
DB_MPOOLFILE->open  DB_CREATE
DB_SEQUENCE->open  DB_CREATE
DBcursor->c_get  DB_CURRENT
DBcursor->c_put  DB_CURRENT
DB_LOGC->get  DB_CURRENT
 DB_DBT_APPMALLOC
DBT  DB_DBT_MALLOC
DBT  DB_DBT_PARTIAL
DBT  DB_DBT_REALLOC
DBT  DB_DBT_USERMEM
DB->cursor  DB_DEGREE_2
DB->get  DB_DEGREE_2
DB->stat  DB_DEGREE_2
DB_ENV->txn_begin  DB_DEGREE_2
DB_MPOOLFILE->open  DB_DIRECT
DB_ENV->set_flags  DB_DIRECT_DB
DB_ENV->set_flags  DB_DIRECT_LOG
DB->cursor  DB_DIRTY_READ
DB->get  DB_DIRTY_READ
DB->join  DB_DIRTY_READ
DB->open  DB_DIRTY_READ
DB->stat  DB_DIRTY_READ
DBcursor->c_get  DB_DIRTY_READ
DB_ENV->txn_begin  DB_DIRTY_READ
 DB_DONOTINDEX
DB_ENV->set_flags  DB_DSYNC_LOG
DB->set_flags  DB_DUP
DB->set_flags  DB_DUPSORT
DB->upgrade  DB_DUPSORT
 DB_EID_BROADCAST
DB->set_flags  DB_ENCRYPT
DB->set_encrypt  DB_ENCRYPT_AES
DB_ENV->set_encrypt  DB_ENCRYPT_AES
DB->open  DB_EXCL
DB_SEQUENCE->open  DB_EXCL
DB->stat  DB_FAST_STAT
DBcursor->c_get  DB_FIRST
DB_LOGC->get  DB_FIRST
DB_ENV->txn_recover  DB_FIRST
DB_ENV->log_put  DB_FLUSH
DB_ENV->remove  DB_FORCE
DB_ENV->txn_checkpoint  DB_FORCE
DB->get  DB_GET_BOTH
DBcursor->c_get  DB_GET_BOTH
DBcursor->c_get  DB_GET_BOTH_RANGE
DBcursor->c_get  DB_GET_RECNO
DB->open  DB_HASH
File naming  DB_HOME
File naming  db_home
DB_ENV->open  DB_INIT_CDB
DB_ENV->open  DB_INIT_LOCK
DB_ENV->open  DB_INIT_LOG
DB_ENV->open  DB_INIT_MPOOL
DB_ENV->open  DB_INIT_REP
DB_ENV->open  DB_INIT_TXN
DB->set_flags  DB_INORDER
DB_ENV->open  DB_JOINENV
DB->join  DB_JOIN_ITEM
DBcursor->c_get  DB_JOIN_ITEM
DB->join  DB_JOIN_NOSORT
Error returns to applications  DB_KEYEMPTY
Error returns to applications  DB_KEYEXIST
DBcursor->c_put  DB_KEYFIRST
DBcursor->c_put  DB_KEYLAST
DBcursor->c_get  DB_LAST
DB_LOGC->get  DB_LAST
DB_ENV->open  DB_LOCKDOWN
 DB_LOCK_DEADLOCK
Error returns to applications  DB_LOCK_DEADLOCK
DB_ENV->set_lk_detect  DB_LOCK_DEFAULT
DB_ENV->lock_detect  DB_LOCK_DEFAULT
DB_ENV->set_lk_detect  DB_LOCK_EXPIRE
DB_ENV->lock_detect  DB_LOCK_EXPIRE
DB_ENV->lock_vec  DB_LOCK_GET
DB_ENV->lock_vec  DB_LOCK_GET_TIMEOUT
DB_ENV->lock_vec  DB_LOCK_IREAD
DB_ENV->lock_vec  DB_LOCK_IWR
DB_ENV->lock_vec  DB_LOCK_IWRITE
DB_ENV->set_lk_detect  DB_LOCK_MAXLOCKS
DB_ENV->lock_detect  DB_LOCK_MAXLOCKS
DB_ENV->set_lk_detect  DB_LOCK_MAXWRITE
DB_ENV->lock_detect  DB_LOCK_MAXWRITE
DB_ENV->set_lk_detect  DB_LOCK_MINLOCKS
DB_ENV->lock_detect  DB_LOCK_MINLOCKS
DB_ENV->set_lk_detect  DB_LOCK_MINWRITE
DB_ENV->lock_detect  DB_LOCK_MINWRITE
Error returns to applications  DB_LOCK_NOTGRANTED
DB_ENV->lock_get  DB_LOCK_NOWAIT
DB_ENV->lock_vec  DB_LOCK_NOWAIT
DB_ENV->set_lk_detect  DB_LOCK_OLDEST
DB_ENV->lock_detect  DB_LOCK_OLDEST
DB_ENV->lock_vec  DB_LOCK_PUT
DB_ENV->lock_vec  DB_LOCK_PUT_ALL
DB_ENV->lock_vec  DB_LOCK_PUT_OBJ
DB_ENV->set_lk_detect  DB_LOCK_RANDOM
DB_ENV->lock_detect  DB_LOCK_RANDOM
DB_ENV->lock_vec  DB_LOCK_READ
DB_ENV->lock_vec  DB_LOCK_TIMEOUT
DB_ENV->lock_vec  DB_LOCK_WRITE
DB_ENV->set_lk_detect  DB_LOCK_YOUNGEST
DB_ENV->lock_detect  DB_LOCK_YOUNGEST
DB_ENV->set_flags  DB_LOG_AUTOREMOVE
DB_ENV->set_flags  DB_LOG_BUFFER_FULL
DB_ENV->set_flags  DB_LOG_INMEMORY
DB_MPOOLFILE->put  DB_MPOOL_CLEAN
DB_MPOOLFILE->set  DB_MPOOL_CLEAN
DB_MPOOLFILE->get  DB_MPOOL_CREATE
DB_MPOOLFILE->put  DB_MPOOL_DIRTY
DB_MPOOLFILE->set  DB_MPOOL_DIRTY
DB_MPOOLFILE->put  DB_MPOOL_DISCARD
DB_MPOOLFILE->set  DB_MPOOL_DISCARD
DB_MPOOLFILE->get  DB_MPOOL_LAST
DB_MPOOLFILE->get  DB_MPOOL_NEW
DB_MPOOLFILE->set_flags  DB_MPOOL_NOFILE
DB_MPOOLFILE->set_flags  DB_MPOOL_UNLINK
DB->get  DB_MULTIPLE
DBcursor->c_get  DB_MULTIPLE
DBT  DB_MULTIPLE_INIT
DBcursor->c_get  DB_MULTIPLE_KEY
DBT  DB_MULTIPLE_KEY_NEXT
DBT  DB_MULTIPLE_NEXT
DBT  DB_MULTIPLE_RECNO_NEXT
DBcursor->c_get  DB_NEXT
DB_LOGC->get  DB_NEXT
DB_ENV->txn_recover  DB_NEXT
DBcursor->c_get  DB_NEXT_DUP
DBcursor->c_get  DB_NEXT_NODUP
DB->put  DB_NODUPDATA
DBcursor->c_put  DB_NODUPDATA
DB_ENV->set_flags  DB_NOLOCKING
DB->open  DB_NOMMAP
DB_ENV->set_flags  DB_NOMMAP
DB_MPOOLFILE->open  DB_NOMMAP
DB->verify  DB_NOORDERCHK
DB->put  DB_NOOVERWRITE
DB_ENV->set_flags  DB_NOPANIC
 DB_NOSERVER
DB_ENV->set_rpc_server  DB_NOSERVER
DB_ENV->set_rpc_server  DB_NOSERVER_HOME
 DB_NOSERVER_ID
DB_ENV->set_rpc_server  DB_NOSERVER_ID
DB->close  DB_NOSYNC
Error returns to applications  DB_NOTFOUND
DB_MPOOLFILE->open  DB_ODDFILESIZE
DB->upgrade  DB_OLD_VERSION
DB->verify  DB_ORDERCHKONLY
DB_ENV->set_flags  DB_OVERWRITE
 DB_PAGE_NOTFOUND
DB_ENV->set_flags  DB_PANIC_ENVIRONMENT
DBcursor->c_dup  DB_POSITION
DBcursor->c_get  DB_PREV
DB_LOGC->get  DB_PREV
DBcursor->c_get  DB_PREV_NODUP
DB->verify  DB_PRINTABLE
DB_MPOOLFILE->set_priority  DB_PRIORITY_DEFAULT
DB_MPOOLFILE->set_priority  DB_PRIORITY_HIGH
DB_MPOOLFILE->set_priority  DB_PRIORITY_LOW
DB_MPOOLFILE->set_priority  DB_PRIORITY_VERY_HIGH
DB_MPOOLFILE->set_priority  DB_PRIORITY_VERY_LOW
DB_ENV->open  DB_PRIVATE
DB->open  DB_QUEUE
DB->open  DB_RDONLY
DB_MPOOLFILE->open  DB_RDONLY
DB->open  DB_RECNO
DB->set_flags  DB_RECNUM
DB_ENV->open  DB_RECOVER
DB_ENV->set_feedback  DB_RECOVER
DB_ENV->open  DB_RECOVER_FATAL
DB_ENV->set_flags  DB_REGION_INIT
DB->set_flags  DB_RENUMBER
DB_ENV->rep_start  DB_REP_CLIENT
DB_ENV->rep_process_message  DB_REP_DUPMASTER
DB_ENV->rep_process_message  DB_REP_HOLDELECTION
DB_ENV->rep_process_message  DB_REP_ISPERM
DB_ENV->rep_start  DB_REP_MASTER
DB_ENV->rep_process_message  DB_REP_NEWMASTER
DB_ENV->rep_process_message  DB_REP_NEWSITE
DB_ENV->set_rep_transport  DB_REP_NOBUFFER
DB_ENV->rep_process_message  DB_REP_NOTPERM
DB_ENV->set_rep_transport  DB_REP_PERMANENT
DB_ENV->rep_process_message  DB_REP_STARTUPDONE
 DB_REP_UNAVAIL
DB->set_flags  DB_REVSPLITOFF
DB->get  DB_RMW
DB->join  DB_RMW
DBcursor->c_get  DB_RMW
db_env_create  DB_RPCCLIENT
Error returns to applications  DB_RUNRECOVERY
DB->verify  DB_SALVAGE
DB_SEQUENCE->set_flags  DB_SEQ_DEC
DB_SEQUENCE->set_flags  DB_SEQ_INC
DB_SEQUENCE->set_flags  DB_SEQ_WRAP
DBcursor->c_get  DB_SET
DB_LOGC->get  DB_SET
DB_ENV->set_timeout  DB_SET_LOCK_TIMEOUT
DB_TXN->set_timeout  DB_SET_LOCK_TIMEOUT
DBcursor->c_get  DB_SET_RANGE
DB->get  DB_SET_RECNO
DBcursor->c_get  DB_SET_RECNO
DB_ENV->set_timeout  DB_SET_TXN_TIMEOUT
DB_TXN->set_timeout  DB_SET_TXN_TIMEOUT
DB->set_flags  DB_SNAPSHOT
DB->stat  DB_STAT_ALL
DB_ENV->stat_print  DB_STAT_ALL
DB_ENV->lock_stat  DB_STAT_ALL
DB_ENV->log_stat  DB_STAT_ALL
DB_ENV->memp_stat  DB_STAT_ALL
DB_ENV->rep_stat  DB_STAT_ALL
DB_ENV->txn_stat  DB_STAT_ALL
DB_ENV->lock_stat  DB_STAT_CLEAR
DB_ENV->log_stat  DB_STAT_CLEAR
DB_ENV->memp_stat  DB_STAT_CLEAR
DB_ENV->rep_stat  DB_STAT_CLEAR
DB_SEQUENCE->stat  DB_STAT_CLEAR
DB_ENV->txn_stat  DB_STAT_CLEAR
DB_ENV->lock_stat  DB_STAT_LOCK_CONF
DB_ENV->lock_stat  DB_STAT_LOCK_LOCKERS
DB_ENV->lock_stat  DB_STAT_LOCK_OBJECTS
DB_ENV->lock_stat  DB_STAT_LOCK_PARAMS
DB_ENV->memp_stat  DB_STAT_MEMP_HASH
DB_ENV->stat_print  DB_STAT_SUBSYSTEM
DB_ENV->open  DB_SYSTEM_MEM
DB->open  DB_THREAD
DB_ENV->open  DB_THREAD
DB_SEQUENCE->open  DB_THREAD
DB_ENV->set_flags  DB_TIME_NOTGRANTED
DB->open  DB_TRUNCATE
DB_ENV->set_app_dispatch  DB_TXN_ABORT
DB_ENV->set_app_dispatch  DB_TXN_APPLY
DB_ENV->set_app_dispatch  DB_TXN_BACKWARD_ROLL
DB_ENV->set_app_dispatch  DB_TXN_FORWARD_ROLL
DB_ENV->set_flags  DB_TXN_NOSYNC
DB_SEQUENCE->get  DB_TXN_NOSYNC
DB_SEQUENCE->remove  DB_TXN_NOSYNC
DB_ENV->txn_begin  DB_TXN_NOSYNC
DB_TXN->commit  DB_TXN_NOSYNC
DB->set_flags  DB_TXN_NOT_DURABLE
DB_ENV->txn_begin  DB_TXN_NOWAIT
DB_ENV->set_app_dispatch  DB_TXN_PRINT
DB_ENV->txn_begin  DB_TXN_SYNC
DB_TXN->commit  DB_TXN_SYNC
DB_ENV->set_flags  DB_TXN_WRITE_NOSYNC
DB->open  DB_UNKNOWN
DB->set_feedback  DB_UPGRADE
DB_ENV->open  DB_USE_ENVIRON
DB_ENV->remove  DB_USE_ENVIRON
DB_ENV->open  DB_USE_ENVIRON_ROOT
DB_ENV->remove  DB_USE_ENVIRON_ROOT
DB_ENV->set_verbose  DB_VERB_DEADLOCK
DB_ENV->set_verbose  DB_VERB_RECOVERY
DB_ENV->set_verbose  DB_VERB_REPLICATION
DB_ENV->set_verbose  DB_VERB_WAITSFOR
DB->set_feedback  DB_VERIFY
 DB_VERIFY_BAD
 DB_VERSION_MISMATCH
DB->cursor  DB_WRITECURSOR
db_create  DB_XA_CREATE
 DB_XIDDATASIZE
DB_ENV->set_flags  DB_YIELDCPU
 deadlocks
introduction to  debugging
 debugging applications
 degree 2 isolation
 degrees of isolation
 deleting records
 deleting records with a cursor
 dirty reads
 disk space requirements
 Distributed Transactions
DBT  dlen
DBT  doff
 double buffering
 duplicate data items
sorted  duplicate data items
 duplicate data items
 duplicating a cursor
turn off database  durability
 emptying a database
database  encryption
 encryption
turn off access to a database  environment
database  environment
use  environment constants in naming
use  environment constants in naming
database  environment FAQ
fault database  environment in during open
 environment variables
introduction to database  environments
 equality join
 error handling
 error name space
 error returns
selecting a Queue  extent size
hot  failover
Java  FAQ
Tcl  FAQ
XA  FAQ
configuring without large  file support
 file utility
returning pages to the  filesystem
recovery and  filesystem operations
remote  filesystems
page  fill factor
configuring a small memory  footprint library
Berkeley DB  free-threaded handles
 FreeBSD
specifying a database  hash
 hash table size
 HP-UX
secondary  indices
 installing Berkeley DB for UNIX systems
 interface compatibility
 IRIX
degrees of  isolation
degree 2  isolation
configuring the  Java API
 Java compatibility
 Java configuration
 Java FAQ
equality  join
 key/data pairs
retrieved  key/data permanence
database  limits
 Linux
changing compile or  load options
DB_ENV->lock_vec  lock
standard  lock modes
ignore  locking
page-level  locking
two-phase  locking
 locking and non-Berkeley DB applications
 locking configuration
Berkeley DB Transactional Data Store  locking conventions
Berkeley DB Concurrent Data Store  locking conventions
configure  locking for Berkeley DB Concurrent Data Store
 locking granularity
introduction to the  locking subsystem
sizing the  locking subsystem
 locking without transactions
 log file limits
automatic  log file removal
 log file removal
 logging configuration
introduction to the  logging subsystem
retrieving Btree records by  logical record @number
in memory  logs
turn off database file  memory mapping
 memory pool configuration
introduction to the  memory pool subsystem
configuring for  MinGW
 mod
DB_ENV->lock_vec  mode
Berkeley DB library  name spaces
file  naming
 natural join
 NFS problems
retrieving Btree records by logical record  number
DB_ENV->lock_vec  obj
DB_ENV->lock_vec  op
 opening a database
 ordered retrieval of records from Queue databases
 OSF/1
selecting a  page size
ignore database environment  panic
 partial record storage and retrieval
 Patches, Updates and Change logs
 Perl
retrieved key/data  permanence
 PHP
task/thread  priority
Sleepycat Software's Berkeley DB  products
building for  QNX
 QNX
dirty  reads
accessing Btree records by  record number
logical  record numbers
managing  record-based databases
logically renumbering  records
Berkeley DB  recoverability
 renumbering records in Recno databases
 repeatable read
introduction to  replication
 Resource Manager
XA  Resource Manager
 retrieving records
 retrieving records in bulk
 retrieving records with a cursor
turn off  reverse splits in Btree databases
 RPC client
configuring a  RPC client/server
introduction to  rpc client/server
 RPC FAQ
 RPC server
database  salvage
 SCO
Berkeley DB handle  scope
 secondary indices
 security
introduction to  sequences
disabling  shared libraries
 shared libraries
 signal handling
DBT  size
 Sleepycat Software
 Solaris
 source code layout
turn off reverse  splits in Btree databases
cursor  stability
cursor  stability
disabling  static libraries
database  statistics
 storing records
 storing records with a cursor
configure for  stress testing
 SunOS
loading Berkeley DB with  Tcl
using Berkeley DB with  Tcl
configuring the  Tcl API
 Tcl API programming notes
 Tcl FAQ
 temporary files
configuring the  test suite
running the  test suite
running the  test suite under UNIX
running the  test suite under Windows
 text backing files
pre-loading  text files into Recno databases
loading  text into databases
dumping/loading  text to/from databases
building  threaded applications
lock  timeouts
transaction  timeouts
turn off synchronous  transaction commit
turn off synchronous  transaction commit
 transaction configuration
 transaction FAQ
 transaction limits
 Transaction Manager
administering  transaction protected applications
archival in  transaction protected applications
checkpoints in  transaction protected applications
deadlock detection in  transaction protected applications
recovery in  transaction protected applications
introduction to the  transaction subsystem
 transaction throughput
 transaction tuning
 Transactional Data Store
nested  transactions
 truncating a database
access method  tuning
transaction  tuning
configuring Berkeley DB with the  Tuxedo System
DBT  ulen
 Ultrix
 Unicode
building for  UNIX
building for  UNIX FAQ
configuring Berkeley DB for  UNIX systems
Patches,  Updates and Change logs
 upgrading databases
 Upgrading to release 2.0
 Upgrading to release 3.0
 Upgrading to release 3.1
 Upgrading to release 3.2
 Upgrading to release 3.3
 Upgrading to release 4.0
 Upgrading to release 4.1
 Upgrading to release 4.2
 Upgrading to release 4.3
 utilities
database  verification
building for  VxWorks FAQ
 VxWorks notes
running the test suite under  Windows
building for  Windows FAQ
 Windows notes
 XA FAQ
 XA Resource Manager
 __db.001

Copyright Sleepycat Software diff --git a/db/docs/api_c/db_associate.html b/db/docs/api_c/db_associate.html index eea6970f7..1e2330d1e 100644 --- a/db/docs/api_c/db_associate.html +++ b/db/docs/api_c/db_associate.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->associate - + -

DB->associate

API -Ref -
+Ref +


@@ -44,29 +43,28 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

callback
-The callback parameter is a callback function that creates a +
+
callback
The callback parameter is a callback function that creates a secondary key from a given primary key and data pair.

The callback parameter may be NULL if both the primary and secondary database handles were opened with the DB_RDONLY flag.

The callback takes four arguments:

-

-

secondary
The secondary parameter is the database handle for the secondary. -

key
The key parameter is a DBT referencing the primary key. -

data
The data parameter is a DBT referencing the primary data +
+
secondary
The secondary parameter is the database handle for the secondary. +
key
The key parameter is a DBT referencing the primary key. +
data
The data parameter is a DBT referencing the primary data item. -

result
The result parameter is a zeroed DBT in which the callback +
result
The result parameter is a zeroed DBT in which the callback function should fill in data and size fields that describe the secondary key.
- +

If the callback function needs to allocate memory for the data field rather than simply pointing into the primary key or datum, the flags field of the returned DBT should be set to DB_DBT_APPMALLOC, which indicates that Berkeley DB should free the memory when it is done with it.

- +

If any key/data pair in the primary yields a null secondary key and should be left out of the secondary index, the callback function may optionally return DB_DONOTINDEX. Otherwise, the callback @@ -80,11 +78,10 @@ iterations and range queries will reflect only the corresponding subset of the database. If this is not desirable, the application should ensure that the callback function is well-defined for all possible values and never returns DB_DONOTINDEX.

-

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_CREATE
If the secondary database is empty, walk through the primary and create +
+
DB_CREATE
If the secondary database is empty, walk through the primary and create an index to it in the empty secondary. This operation is potentially very expensive.

If the secondary database has been opened in an environment configured @@ -102,16 +99,14 @@ not do any special operation ordering.

In addition, the following flag may be set by bitwise inclusively OR'ing it into the flags parameter: -

-

DB_AUTO_COMMIT
Enclose the DB->associate call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DB->associate call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

primary
-The primary parameter should be a database handle for the primary +
primary
The primary parameter should be a database handle for the primary database that is to be indexed. -

secondary
-The secondary parameter should be an open database handle of +
secondary
The secondary parameter should be an open database handle of either a newly created and empty database that is to be used to store a secondary index, or of a database that was previously associated with the same primary and contains a secondary index. Note that it is not @@ -121,8 +116,7 @@ with the DB_THREAD flag it is saf of control after the DB->associate method has returned. Note also that either secondary keys must be unique or the secondary database must be configured with support for duplicate data items. -

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL. @@ -130,12 +124,12 @@ the txnid parameter is a transaction handle returned from

Errors

The DB->associate method may fail and return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the secondary database handle has already been associated with this or +
+
EINVAL
If the secondary database handle has already been associated with this or another database handle; the secondary database handle is not open; the primary database has been configured to allow duplicates; or if an invalid flag value or parameter was specified. @@ -149,6 +143,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_class.html b/db/docs/api_c/db_class.html index dfea794b6..e0a57786d 100644 --- a/db/docs/api_c/db_class.html +++ b/db/docs/api_c/db_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_create - + -

db_create

API -Ref -
+Ref +


@@ -55,12 +54,10 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

dbp
-The dbp parameter references the memory into which the returned +
+
dbp
The dbp parameter references the memory into which the returned structure pointer is stored. -

dbenv
-If the dbenv parameter is NULL, the database is standalone; that +
dbenv
If the dbenv parameter is NULL, the database is standalone; that is, it is not part of any Berkeley DB environment.

If the dbenv parameter is not NULL, the database is created within the specified Berkeley DB environment. The database access methods @@ -68,11 +65,10 @@ automatically make calls to the other subsystems in Berkeley DB, based on the enclosing environment. For example, if the environment has been configured to use locking, the access methods will automatically acquire the correct locks when reading and writing pages of the database.

-

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_XA_CREATE
Instead of creating a standalone database, create a database intended +
+
DB_XA_CREATE
Instead of creating a standalone database, create a database intended to be accessed via applications running under an X/Open conformant Transaction Manager. The database will be opened in the environment specified by the OPENINFO parameter of the GROUPS section of the @@ -83,8 +79,8 @@ Introduction section in the Berkeley DB Reference Guide for more information

Errors

The db_create method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -96,6 +92,6 @@ DB

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_close.html b/db/docs/api_c/db_close.html index a18f1ede4..477730ddd 100644 --- a/db/docs/api_c/db_close.html +++ b/db/docs/api_c/db_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->close - + -

DB->close

API -Ref -
+Ref +


@@ -52,12 +51,11 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_NOSYNC
Do not flush cached information to disk. The DB_NOSYNC flag is +
+
DB_NOSYNC
Do not flush cached information to disk. The DB_NOSYNC flag is a dangerous option. It should be set only if the application is doing logging (with transactions) so that the database is recoverable after a system or application crash, or if the database is always generated @@ -77,8 +75,8 @@ updated copy.

Errors

The DB->close method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -90,6 +88,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_cursor.html b/db/docs/api_c/db_cursor.html index 8060b49fc..25ac77850 100644 --- a/db/docs/api_c/db_cursor.html +++ b/db/docs/api_c/db_cursor.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->cursor - + -

DB->cursor

API -Ref -
+Ref +


@@ -35,23 +34,24 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

cursorp
-The cursorp parameter references memory into which +
+
cursorp
The cursorp parameter references memory into which a pointer to the allocated cursor is copied. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_DIRTY_READ
All read operations performed by the cursor may return modified but not +
+
DB_DEGREE_2
This cursor will have degree 2 isolation. This ensures the stability +of the current data item read by this cursor but permits data read +by this cursor to be modified or deleted prior to the commit of +the transaction for this cursor. +
DB_DIRTY_READ
All read operations performed by the cursor may return modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_WRITECURSOR
Specify that the cursor will be used to update the database. The +
DB_WRITECURSOR
Specify that the cursor will be used to update the database. The underlying database environment must have been opened using the DB_INIT_CDB flag.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL. To transaction-protect cursor operations, cursors must be opened and closed within the context of a transaction, and the txnid @@ -61,12 +61,12 @@ used.

Errors

The DB->cursor method may fail and return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -78,6 +78,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_del.html b/db/docs/api_c/db_del.html index 06a2c4b77..9d5ecd47a 100644 --- a/db/docs/api_c/db_del.html +++ b/db/docs/api_c/db_del.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->del - + -

DB->del

API -Ref -
+Ref +


@@ -35,8 +34,7 @@ associated with the designated key will be discarded.

When called on a database that has been made into a secondary index using the DB->associate method, the DB->del method deletes the key/data pair from the primary database and all secondary indices.

-

-The DB->del method will return DB_NOTFOUND if the specified key is not in the database. +

The DB->del method will return DB_NOTFOUND if the specified key is not in the database. The DB->del method will return DB_KEYEMPTY if the database is a Queue or Recno database and the specified key exists, but was never explicitly created by the application or was later deleted. @@ -45,19 +43,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

key
-The key DBT operated on. -

flags
-The flags parameter must be set to 0 or +
+
key
The key DBT operated on. +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the DB->del call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DB->del call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL. @@ -65,24 +60,24 @@ the txnid parameter is a transaction handle returned from

Errors

The DB->del method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -94,6 +89,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_err.html b/db/docs/api_c/db_err.html index 9c88a243f..4423a1084 100644 --- a/db/docs/api_c/db_err.html +++ b/db/docs/api_c/db_err.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->err - + -

DB->err

API -Ref -
+Ref +


@@ -32,22 +31,21 @@ DB->errx(DB *db, const char *fmt, ...);
 


Description: DB->err

-

The DB_ENV->err, DB_ENV->errx, DB->err and DB->errx methods provide error-messaging functionality for applications written using the Berkeley DB library.

The DB_ENV->err method constructs an error message consisting of the following elements:

-

-

An optional prefix string
If no error callback function has been set using the +
+
An optional prefix string
If no error callback function has been set using the DB_ENV->set_errcall method, any prefix string specified using the DB_ENV->set_errpfx method, followed by two separating characters: a colon and a <space> character. -

An optional printf-style message
The supplied message fmt, if non-NULL, in which the +
An optional printf-style message
The supplied message fmt, if non-NULL, in which the ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent parameters are converted for output. -

A separator
Two separating characters: a colon and a <space> character. -

A standard error string
The standard system or Berkeley DB library error string associated with the +
A separator
Two separating characters: a colon and a <space> character. +
A standard error string
The standard system or Berkeley DB library error string associated with the error value, as returned by the db_strerror method.
@@ -60,20 +58,24 @@ parameters: any prefix string specified (see DB_ENV->set_errfile), the error message is written to that output stream.

If none of these output options has been configured, the error message -is written to stderr, the standard -error output stream.

+is written to stderr, the standard error output stream.

+

Parameters

+
+
error
The error parameter is the error value for which the +DB_ENV->err and DB->err methods will display a explanatory +string. +
fmt
The fmt parameter is an optional printf-style message to display. +

The DB_ENV->errx and DB->errx methods perform identically to the DB_ENV->err and DB->err methods, except that they do not append the final separator characters and standard error string to the error message.

Parameters

-

-

error
-The error parameter is the error value for which the +
+
error
The error parameter is the error value for which the DB_ENV->err and DB->err methods will display a explanatory string. -

fmt
-The fmt parameter is an optional printf-style message to display. +
fmt
The fmt parameter is an optional printf-style message to display.

Class

@@ -84,6 +86,6 @@ The fmt parameter is an optional printf-style message to display.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_fd.html b/db/docs/api_c/db_fd.html index 78163e0e2..1ecd5c46e 100644 --- a/db/docs/api_c/db_fd.html +++ b/db/docs/api_c/db_fd.html @@ -1,23 +1,22 @@ - + Berkeley DB: DB->fd - + -

DB->fd

API -Ref -
+Ref +


@@ -43,9 +42,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

fdp
-The fdp parameter references memory into which +
+
fdp
The fdp parameter references memory into which the current file descriptor is copied.

@@ -57,6 +55,6 @@ The fdp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_get.html b/db/docs/api_c/db_get.html index 5e23e79b0..d5eb959b1 100644 --- a/db/docs/api_c/db_get.html +++ b/db/docs/api_c/db_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->get - + -

DB->get

API -Ref -
+Ref +


@@ -33,28 +32,21 @@ DB->pget(DB *db,
 


Description: DB->get

-

The DB->get method retrieves key/data pairs from the database. The -address -and length of the data associated with the specified key are -returned in the structure to which data refers.

+address and length of the data associated with the specified key +are returned in the structure to which data refers.

In the presence of duplicate key values, DB->get will return the first data item for the designated key. Duplicates are sorted by insert order, except where this order has been overridden by cursor operations. Retrieval of duplicates requires the use of cursor operations. See DBcursor->c_get for details.

When called on a database that has been made into a secondary index -using the DB->associate method, the DB->get -and DB->pget methods return -the key from the secondary index and the data item from the primary -database. In addition, the -DB->pget method +using the DB->associate method, the DB->get and +DB->pget methods return the key from the secondary index and the data +item from the primary database. In addition, the DB->pget method returns the key from the primary database. In databases that are not -secondary indices, the -DB->pget method -will always fail.

-

-The DB->get method will return DB_NOTFOUND if the specified key is not in the database. +secondary indices, the DB->pget method will always fail.

+

The DB->get method will return DB_NOTFOUND if the specified key is not in the database. The DB->get method will return DB_KEYEMPTY if the database is a Queue or Recno database and the specified key exists, but was never explicitly created by the application or was later deleted. @@ -63,21 +55,19 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

data
-The data DBT operated on. -

flags
-The flags parameter must be set to 0 or +
+
data
The data DBT operated on. +
flags
The flags parameter must be set to 0 or one of the following values: -

-

DB_CONSUME
Return the record number and data from the available record closest to +
+
DB_CONSUME
Return the record number and data from the available record closest to the head of the queue, and delete the record. The cursor will be positioned on the deleted record. The record number will be returned in key, as described in DBT. The data will be returned in the data parameter. A record is available if it is not deleted and is not currently locked. The underlying database must be of type Queue for DB_CONSUME to be specified. -

DB_CONSUME_WAIT
The DB_CONSUME_WAIT flag is the same as the DB_CONSUME +
DB_CONSUME_WAIT
The DB_CONSUME_WAIT flag is the same as the DB_CONSUME flag, except that if the Queue database is empty, the thread of control will wait until there is data in the queue before returning. The underlying database must be of type Queue for DB_CONSUME_WAIT @@ -87,37 +77,36 @@ with the DB_CONSUME_WAIT flag may return DB_LOCK_NOTGRANTED. This failure, by itself, does not require the enclosing transaction be aborted.

-

DB_GET_BOTH
Retrieve the key/data pair only if both the key and data match the +
DB_GET_BOTH
Retrieve the key/data pair only if both the key and data match the arguments. -

When used with the -DB->pget method -version of this method on a secondary index handle, return the -secondary key/primary key/data tuple only if both the primary and -secondary keys match the arguments. It is an error to use the -DB_GET_BOTH flag with the -DB->get +

When used with the DB->pget method version of this method on a +secondary index handle, return the secondary key/primary key/data tuple +only if both the primary and secondary keys match the arguments. It is +an error to use the DB_GET_BOTH flag with the DB->get version of this method and a secondary index handle.

-

DB_SET_RECNO
Retrieve the specified numbered key/data pair from a database. Upon +
DB_SET_RECNO
Retrieve the specified numbered key/data pair from a database. Upon return, both the key and data items will have been filled in. -

The data field of the specified key -must be a pointer to a logical record number (that is, a db_recno_t). -This record number determines the record to be retrieved. +

The data field of the specified key must be a pointer +to a logical record number (that is, a db_recno_t). This record +number determines the record to be retrieved.

For DB_SET_RECNO to be specified, the underlying database must be -of type Btree, and it must have been created with the DB_RECNUM flag.

+of type Btree, and it must have been created with the DB_RECNUM flag.

In addition, the following flags may be set by bitwise inclusively OR'ing them into the flags parameter: -

-

DB_AUTO_COMMIT
Enclose the DB->get call within a transaction. If the call +
+
DB_AUTO_COMMIT
Enclose the DB->get call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. This flag may only be specified with the DB_CONSUME and DB_CONSUME_WAIT flags. -

DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the +
DB_DEGREE_2
Perform the get operation with degree 2 isolation. +The read is not repeatable. +
DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_MULTIPLE
Return multiple data items in the buffer to which the data +
DB_MULTIPLE
Return multiple data items in the buffer to which the data parameter refers.

In the case of Btree or Hash databases, all of the data items associated with the specified key are entered into the buffer. In the case of @@ -130,7 +119,7 @@ least as large as the page size of the underlying database, aligned for unsigned integer access, and be a multiple of 1024 bytes in size. If the buffer size is insufficient, then upon return from the call the size field of the data parameter will have been set to an estimated -buffer size, and the error ENOMEM is returned. (The size is an estimate as the +buffer size, and the error DB_BUFFER_SMALL is returned. (The size is an estimate as the exact size needed may not be known until all entries are read. It is best to initially provide a relatively large buffer, but applications should be prepared to resize the buffer as necessary and repeatedly call @@ -142,7 +131,7 @@ into secondary indices using the DB->ass

See DB_MULTIPLE_INIT for more information.

-

DB_RMW
Acquire write locks instead of read locks when doing the retrieval. +
DB_RMW
Acquire write locks instead of read locks when doing the retrieval. Setting this flag can eliminate deadlock during a read-modify-write cycle by acquiring the write lock during the read part of the cycle so that another thread of control acquiring a read lock for the same item, @@ -152,46 +141,43 @@ Berkeley DB calls in non-transactional operations, the DBT operated on. -

pkey
-The pkey parameter is the return key from the primary database. -

txnid
-If the operation is to be transaction-protected, +
key
The key DBT operated on. +
pkey
The pkey parameter is the return key from the primary database. +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL.

Errors

The DB->get method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

DB_LOCK_NOTGRANTED
The DB_CONSUME_WAIT flag was specified, lock or transaction +
+
DB_LOCK_NOTGRANTED
The DB_CONSUME_WAIT flag was specified, lock or transaction timers were configured and the lock could not be granted before the wait-time expired.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EINVAL
If a record number of 0 was specified; +
+
EINVAL
If a record number of 0 was specified; the DB_THREAD flag was specified to the DB->open method and none of the DB_DBT_MALLOC, DB_DBT_REALLOC or DB_DBT_USERMEM flags were set in the DBT; -the DB->pget method -was called with a DB handle that does not refer to a secondary index; or if an +the DB->pget method was called with a DB handle that does not +refer to a secondary index; or if an invalid flag value or parameter was specified.
-

-

ENOMEM
The requested item could not be returned due to insufficient memory. +
+
DB_BUFFER_SMALL
The requested item could not be returned due to undersized buffer.

Class

@@ -202,6 +188,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_get_byteswapped.html b/db/docs/api_c/db_get_byteswapped.html index b7a3849a9..daa16f68e 100644 --- a/db/docs/api_c/db_get_byteswapped.html +++ b/db/docs/api_c/db_get_byteswapped.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->get_byteswapped - + -

DB->get_byteswapped

API -Ref -
+Ref +


@@ -28,13 +27,12 @@ DB->get_byteswapped(DB *db, int *isswapped);
 


Description: DB->get_byteswapped

-

The DB->get_byteswapped method returns -if the underlying database files were created on an architecture of the -same byte order as the current one, -or -if they were not (that is, big-endian on a little-endian machine, or -vice versa). This information may be used to determine whether -application data needs to be adjusted for this architecture or not.

+

The DB->get_byteswapped method returns if the underlying database +files were created on an architecture of the same byte order as the +current one, or if they were not (that is, big-endian on a little-endian +machine, or vice versa). This information may be used to determine +whether application data needs to be adjusted for this architecture or +not.

The DB->get_byteswapped method may not be called before the DB->open method has been called.

The DB->get_byteswapped method @@ -42,9 +40,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

isswapped
-If the underlying database files were created on an architecture of the +
+
isswapped
If the underlying database files were created on an architecture of the same byte order as the current one. 0 is stored into the memory location referenced by isswapped. If the underlying database files were created on an architecture of a different byte order as the current one, @@ -53,8 +50,8 @@ created on an architecture of a different byte order as the current one,

Errors

The DB->get_byteswapped method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called before DB->open was called; or if an +
+
EINVAL
If the method was called before DB->open was called; or if an invalid flag value or parameter was specified.

@@ -66,6 +63,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_get_mpf.html b/db/docs/api_c/db_get_mpf.html index 6f1d67afd..12f0412c4 100644 --- a/db/docs/api_c/db_get_mpf.html +++ b/db/docs/api_c/db_get_mpf.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->mpf - + -

DB->mpf

API -Ref -
+Ref +


@@ -27,6 +26,9 @@ DB_MPOOLFILE *DB->mpf
 


Description: DB->mpf

+

The DB->mpf method returns the Db.get_mpf.

+

The DB->mpf method may be called at any time during the life of the +application.

DB->mpf gives access to the DB_MPOOLFILE associated with a DB object.


@@ -38,6 +40,6 @@ with a DB object.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_get_type.html b/db/docs/api_c/db_get_type.html index bef9ddfc3..888ca7a4d 100644 --- a/db/docs/api_c/db_get_type.html +++ b/db/docs/api_c/db_get_type.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->get_type - + -

DB->get_type

API -Ref -
+Ref +


@@ -29,11 +28,10 @@ DB->get_type(DB *db, DBTYPE *type);
 

Description: DB->get_type

The DB->get_type method returns the type of the underlying access -method (and file format). The type value is one of DB_BTREE, -DB_HASH, DB_RECNO, or DB_QUEUE. This -value may be used to determine the type of the database after a return -from DB->open with the type parameter set to -DB_UNKNOWN.

+method (and file format). The type value is one of DB_BTREE, DB_HASH, +DB_RECNO, or DB_QUEUE. This value may be used to determine the type of +the database after a return from DB->open with the type +parameter set to DB_UNKNOWN.

The DB->get_type method may not be called before the DB->open method has been called.

The DB->get_type method @@ -41,16 +39,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

type
-The type parameter references memory into which +
+
type
The type parameter references memory into which the type of the underlying access method is copied.

Errors

The DB->get_type method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called before DB->open was called; or if an +
+
EINVAL
If the method was called before DB->open was called; or if an invalid flag value or parameter was specified.

@@ -62,6 +59,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_getenv.html b/db/docs/api_c/db_getenv.html index ec1be31fa..f4824b34a 100644 --- a/db/docs/api_c/db_getenv.html +++ b/db/docs/api_c/db_getenv.html @@ -1,46 +1,35 @@ - - + + Berkeley DB: DB->get_env - + -

DB->get_env

API -Ref -
+Ref +


 #include <db.h>
 

DB_ENV * -DB->getenv(DB *db); +DB->get_env(DB *db);


Description: DB->get_env

-

The DB->get_env method returns the handle for the database environment underlying the database.

+

The DB->get_env method returns the Db.getDbEnv.

The DB->get_env method may be called at any time during the life of the application.

-

The DB->get_env method -returns a non-zero error value on failure -and 0 on success. -

-

Parameters

-

-

db
-The DB->get_env method returns the -handle for the database environment underlying the database in db. -

Class

DB @@ -50,6 +39,6 @@ handle for the database environment underlying the database in db.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_join.html b/db/docs/api_c/db_join.html index 93e41577d..f7518d35e 100644 --- a/db/docs/api_c/db_join.html +++ b/db/docs/api_c/db_join.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->join - + -

DB->join

API -Ref -
+Ref +


@@ -37,32 +36,32 @@ information on how to organize your data to use this functionality, see
 the primary database.

The join cursor supports only the DBcursor->c_get and dbc_close cursor functions:

-

-

DBcursor->c_get
Iterates over the values associated with the keys to which each item in +
+
DBcursor->c_get
Iterates over the values associated with the keys to which each item in curslist was initialized. Any data value that appears in all items specified by the curslist parameter is then used as a key into the primary, and the key/data pair found in the primary is returned. The flags parameter must be set to 0 or the following value: -

-

DB_JOIN_ITEM
Do not use the data value found in all the cursors as a lookup key for +
+
DB_JOIN_ITEM
Do not use the data value found in all the cursors as a lookup key for the primary, but simply return it in the key parameter instead. The data parameter is left unchanged.
In addition, the following flag may be set by bitwise inclusively OR'ing it into the flags parameter: -

-

DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the +
+
DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_RMW
Acquire write locks instead of read locks when doing the retrieval. +
DB_RMW
Acquire write locks instead of read locks when doing the retrieval. Setting this flag can eliminate deadlock during a read-modify-write cycle by acquiring the write lock during the read part of the cycle so that another thread of control acquiring a read lock for the same item, in its own read-modify-write cycle, will not result in deadlock.
-

DBcursor->c_close
Close the returned cursor and release all resources. (Closing the cursors +
DBcursor->c_close
Close the returned cursor and release all resources. (Closing the cursors in curslist is the responsibility of the caller.)

The DB->join method @@ -70,9 +69,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

curslist
-The curslist parameter contains a NULL terminated array of cursors. +
+
curslist
The curslist parameter contains a NULL terminated array of cursors. Each cursor must have been initialized to refer to the key on which the underlying database should be joined. Typically, this initialization is done by a DBcursor->c_get call with the DB_SET flag specified. Once the @@ -90,14 +88,12 @@ most. By default, DB->join does this sort on behalf of its caller.

For the returned join cursor to be used in a transaction-protected manner, the cursors listed in curslist must have been created within the context of the same transaction.

-

dbcp
-The newly created join cursor is returned in the memory location to which -dbcp refers. -

flags
-The flags parameter must be set to 0 or +
dbcp
The newly created join cursor is returned in the memory location to +which dbcp refers. +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_JOIN_NOSORT
Do not sort the cursors based on the number of data items to which they +
+
DB_JOIN_NOSORT
Do not sort the cursors based on the number of data items to which they refer. If the data are structured so that cursors with many data items also share many common elements, higher performance will result from listing those cursors before cursors with fewer data items; that is, a @@ -109,15 +105,15 @@ DB->join.

Errors

The DB->join method may fail and return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EINVAL
If cursor methods other than DBcursor->c_get or DBcursor->c_close were +
+
EINVAL
If cursor methods other than DBcursor->c_get or DBcursor->c_close were called; or if an invalid flag value or parameter was specified.
@@ -130,6 +126,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_key_range.html b/db/docs/api_c/db_key_range.html index d54adb7f5..a83305475 100644 --- a/db/docs/api_c/db_key_range.html +++ b/db/docs/api_c/db_key_range.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->key_range - + -

DB->key_range

API -Ref -
+Ref +


@@ -34,7 +33,7 @@ that are less than, equal to, and greater than the specified key.  The
 underlying database must be of type Btree.

The DB->key_range method fills in a structure of type DB_KEY_RANGE. The following data fields are available from the DB_KEY_RANGE structure:

-

+
double less;
A value between 0 and 1, the proportion of keys less than the specified key.
double equal;
A value between 0 and 1, the proportion of keys equal to the specified @@ -51,39 +50,35 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

key
-The key DBT operated on. -

key_range
-The estimates are returned in the key_range parameter, which +
+
key
The key DBT operated on. +
key_range
The estimates are returned in the key_range parameter, which contains three elements of type double: less, equal, and greater. Values are in the range of 0 to 1; for example, if the field less is 0.05, 5% of the keys in the database are less than the key parameter. The value for equal will be zero if there is no matching key, and will be non-zero otherwise. -

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL. The DB->key_range method does not retain the locks it acquires for the life of the transaction, so estimates may not be repeatable. -

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DB->key_range method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the underlying database was not of type Btree; or if an +
+
EINVAL
If the underlying database was not of type Btree; or if an invalid flag value or parameter was specified.

@@ -95,6 +90,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_list.html b/db/docs/api_c/db_list.html index 4a44358f6..46f42f9c8 100644 --- a/db/docs/api_c/db_list.html +++ b/db/docs/api_c/db_list.html @@ -1,67 +1,69 @@ - + Berkeley DB: Berkeley DB: Databases and Related Methods - +

Berkeley DB: Databases and Related Methods

- + - + - - + - + - + + + + + + - - - - - - + + - - - - + + + + + - - - - - + + + + + +
Databases and Related MethodsDescription
Database OperationsDescription
db_createCreate a database handle
DB->associateAssociate a secondary index
DB->closeClose a database
DB->cursorCreate a cursor handle
DB->delDelete items from a database
DB->errError message with error string
DB->errxError message
DB->fdReturn a file descriptor from a database
DB->getGet items from a database
DB->get, DB->pgetGet items from a database
DB->get_byteswappedReturn if the underlying database is in host order
DB->get_envReturn a handle for the underlying database environment
DB->get_envReturn database environment handle
DB->get_typeReturn the database type
DB->joinPerform a database join on cursors
DB->key_rangeReturn estimate of key location
DB->openOpen a database
DB->pgetGet items from a database
DB->putStore items into a database
DB->removeRemove a database
DB->renameRename a database
DB->stat, DB->stat_printDatabase statistics
DB->syncFlush a database to stable storage
DB->truncateEmpty a database
DB->upgradeUpgrade a database
DB->verifyVerify/salvage a database
Database Configuration
DB->set_allocSet local space allocation functions
DB->set_append_recnoSet record append callback
DB->set_bt_compareSet a Btree comparison function
DB->set_bt_minkeySet the minimum number of keys per Btree page
DB->set_bt_prefixSet a Btree prefix comparison function
DB->set_cachesizeSet the database cache size
DB->set_dup_compareSet a duplicate comparison function
DB->set_encryptSet the database cryptographic key
DB->set_errcallSet error message callback
DB->set_errfileSet error message FILE
DB->set_errcall, DB->set_msgcallSet error and informational message callback
DB->set_errfile, DB->set_msgfileSet error and informational message FILE
DB->set_errpfxSet error message prefix
DB->set_feedbackSet feedback callback
DB->set_flagsGeneral database configuration
DB->set_h_ffactorSet the Hash table density
DB->set_h_hashSet a hashing function
DB->set_h_nelemSet the Hash table size
DB->set_lorderSet the database byte order
DB->set_pagesizeSet the underlying database page size
DB->set_paniccallSet panic callback
DB->set_q_extentsizeSet Queue database extent size
Btree/Recno Configuration
DB->set_append_recnoSet record append callback
DB->set_bt_compareSet a Btree comparison function
DB->set_bt_minkeySet the minimum number of keys per Btree page
DB->set_bt_prefixSet a Btree prefix comparison function
DB->set_re_delimSet the variable-length record delimiter
DB->set_re_lenSet the fixed-length record length
DB->set_re_padSet the fixed-length record pad byte
DB->set_re_sourceSet the backing Recno text file
DB->statReturn database statistics
DB->syncFlush a database to stable storage
DB->truncateEmpty a database
DB->upgradeUpgrade a database
DB->verifyVerify/salvage a database
Hash Configuration
DB->set_h_ffactorSet the Hash table density
DB->set_h_hashSet a hashing function
DB->set_h_nelemSet the Hash table size
Queue Configuration
DB->set_q_extentsizeSet Queue database extent size
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_open.html b/db/docs/api_c/db_open.html index 811d5f710..b2e3cb3bd 100644 --- a/db/docs/api_c/db_open.html +++ b/db/docs/api_c/db_open.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->open - + -

DB->open

API -Ref -
+Ref +


@@ -38,10 +37,6 @@ DB->get_transactional(DB *db);
 


Description: DB->open

- - - -

The DB->open method opens the database represented by the file and database parameters for both reading and writing.

The currently supported Berkeley DB file formats (or access @@ -64,9 +59,8 @@ If DB->open fails, the DB->close meth discard the DB handle.

Parameters

-

-

database
-The database parameter is optional, and allows applications to +
+
database
The database parameter is optional, and allows applications to have multiple databases in a single file. Although no database parameter needs to be specified, it is an error to attempt to open a second database in a file that was not initially created using @@ -80,41 +74,40 @@ created by setting both the file and database parameters to NULL. Note that in-memory databases can only ever be shared by sharing the single database handle that created them, in circumstances where doing so is safe.

-

file
-The file parameter is used as the name of an underlying file that +
file
The file parameter is used as the name of an underlying file that will be used to back the database.

In-memory databases never intended to be preserved on disk may be created by setting both the file and database parameters to NULL. Note that in-memory databases can only ever be shared by sharing the single database handle that created them, in circumstances where doing so is safe.

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_AUTO_COMMIT
Enclose the DB->open call within a transaction. If the call +
+
DB_AUTO_COMMIT
Enclose the DB->open call within a transaction. If the call succeeds, the open operation will be recoverable. If the call fails, no database will have been created. -

DB_CREATE
Create the database. If the database does not already exist and the -DB_CREATE flag is not specified, the DB->open will -fail. -

DB_DIRTY_READ
Support dirty reads; that is, read operations on the database may +
DB_CREATE
Create the database. If the database does not already exist and the +DB_CREATE flag is not specified, the DB->open will fail. +
DB_DIRTY_READ
Support dirty reads; that is, read operations on the database may request the return of modified but not yet committed data. This flag must be specified on all DB handles used to perform dirty reads or database updates, otherwise requests for dirty reads may not be honored and the read may block. -

DB_EXCL
Return an error if the database already exists. The DB_EXCL +
DB_EXCL
Return an error if the database already exists. The DB_EXCL flag is only meaningful when specified with the DB_CREATE flag. -

DB_NOMMAP
Do not map this database into process memory (see the +
DB_NOMMAP
Do not map this database into process memory (see the DB_ENV->set_mp_mmapsize method for further information). -

DB_RDONLY
Open the database for reading only. Any attempt to modify items in the +
DB_RDONLY
Open the database for reading only. Any attempt to modify items in the database will fail, regardless of the actual permissions of any underlying files. -

DB_THREAD
Cause the DB handle returned by DB->open to be -free-threaded; that is, usable by multiple threads within a -single address space. -

DB_TRUNCATE
Physically truncate the underlying file, discarding all previous +
DB_THREAD
Cause the DB handle returned by DB->open to be +free-threaded; that is, concurrently usable by multiple +threads in the address space. +
DB_TRUNCATE
Physically truncate the underlying file, discarding all previous databases it might have held. Underlying filesystem primitives are used to implement this flag. For this reason, it is applicable only to the file and cannot be used to discard databases within a file. @@ -122,31 +115,28 @@ file and cannot be used to discard databases within a file. and it is an error to specify it in a locking or transaction-protected environment.

-

mode
-On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by -the database open are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation -(see umask(2)). If mode is 0, the database open will use a default -mode of readable and writable by both owner and group. On Windows -systems, the mode parameter is ignored. The group ownership of created -files is based on the system and directory defaults, and is not further -specified by Berkeley DB. -

txnid
-If the operation is to be transaction-protected, +
mode
On Windows systems, the mode parameter is ignored. +

On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files created by the database open +are created with mode mode (as described in chmod(2)) +and modified by the process' umask value at the time of creation (see +umask(2)). Created files are owned by the process owner; the +group ownership of created files is based on the system and directory +defaults, and is not further specified by Berkeley DB. System shared memory +segments created by the database open are created with mode mode, unmodified +by the process' umask value. If mode is 0, the database open will use a +default mode of readable and writable by both owner and group.

+
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL. Note that transactionally protected operations on a DB handle requires the DB handle itself be transactionally protected during its open. -

type
-The type parameter -is of type DBTYPE, and -must be set to one of DB_BTREE, -DB_HASH, DB_QUEUE, -DB_RECNO, or DB_UNKNOWN. If -type is DB_UNKNOWN, the database must already exist -and DB->open will automatically determine its type. The -DB->get_type method may be used to determine the underlying type of -databases opened using DB_UNKNOWN. +
type
The type parameter is of type DBTYPE, and must be set to one of +DB_BTREE, DB_HASH, DB_QUEUE, +DB_RECNO, or DB_UNKNOWN. If type is +DB_UNKNOWN, the database must already exist and DB->open will +automatically determine its type. The DB->get_type method may be used +to determine the underlying type of databases opened using DB_UNKNOWN.

Environment Variables

If the database was opened within a database environment, the @@ -155,31 +145,31 @@ database environment home.

DB->open is affected by any database directory specified using the DB_ENV->set_data_dir method, or by setting the "set_data_dir" string in the environment's DB_CONFIG file.

-

-

TMPDIR
If the file and dbenv parameters to DB->open are +
+
TMPDIR
If the file and dbenv parameters to DB->open are NULL, the environment variable TMPDIR may be used as a directory in which to create temporary backing files

Errors

The DB->open method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.
-

-

DB_OLD_VERSION
The database cannot be opened without being first upgraded. +
+
DB_OLD_VERSION
The database cannot be opened without being first upgraded.
-

-

EEXIST
DB_CREATE and DB_EXCL were specified and the database exists. +
+
EEXIST
DB_CREATE and DB_EXCL were specified and the database exists.
-

-

EINVAL
If an unknown database type, page size, hash function, pad byte, byte +
+
EINVAL
If an unknown database type, page size, hash function, pad byte, byte order, or a flag value or parameter that is incompatible with the specified database was specified; the DB_THREAD flag was specified and fast mutexes are not @@ -192,11 +182,11 @@ flag or the provided database environment supports transaction processing; or if an invalid flag value or parameter was specified.
-

-

ENOENT
A nonexistent re_source file was specified. +
+
ENOENT
A nonexistent re_source file was specified.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.

@@ -204,12 +194,10 @@ unrolled a committed transaction.

The DB->get_database method returns the current filename and database name.

Parameters

-

-

filenamep
-The filenamep parameter references memory into which +
+
filenamep
The filenamep parameter references memory into which a pointer to the current filename is copied. -

dbnamep
-The dbnamep parameter references memory into which +
dbnamep
The dbnamep parameter references memory into which a pointer to the current database name is copied.

The DB->get_database method may be called at any time during the life of the @@ -228,9 +216,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flagsp
-The DB->get_open_flags method returns the +
+
flagsp
The DB->get_open_flags method returns the current open method flags in flagsp.

@@ -248,6 +235,6 @@ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_put.html b/db/docs/api_c/db_put.html index 6cef08bd9..5b8bcb6b6 100644 --- a/db/docs/api_c/db_put.html +++ b/db/docs/api_c/db_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->put - + -

DB->put

API -Ref -
+Ref +


@@ -41,12 +40,11 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or one of the following values: -

-

DB_APPEND
Append the key/data pair to the end of the database. For the +
+
DB_APPEND
Append the key/data pair to the end of the database. For the DB_APPEND flag to be specified, the underlying database must be a Queue or Recno database. The record number allocated to the record is returned in the specified key. @@ -56,38 +54,33 @@ DB->put operation with the DB_APPEND flag aborts, the record number may be decremented (and later reallocated by a subsequent DB_APPEND operation) by the Recno access method, but will not be decremented or reallocated by the Queue access method.

-

DB_NODUPDATA
In the case of the Btree and Hash access methods, enter the new key/data +
DB_NODUPDATA
In the case of the Btree and Hash access methods, enter the new key/data pair only if it does not already appear in the database.

The DB_NODUPDATA flag may only be specified if the underlying database has been configured to support sorted duplicates. The DB_NODUPDATA flag may not be specified to the Queue or Recno access methods.

-

-The DB->put method will return DB_KEYEXIST if DB_NODUPDATA is set and the key/data pair already appears +

The DB->put method will return DB_KEYEXIST if DB_NODUPDATA is set and the key/data pair already appears in the database.

-

DB_NOOVERWRITE
Enter the new key/data pair only if the key does not already appear in the +
DB_NOOVERWRITE
Enter the new key/data pair only if the key does not already appear in the database. The DB->put method call with the DB_NOOVERWRITE flag set will fail if the key already exists in the database, even if the database supports duplicates. -

-The DB->put method will return DB_KEYEXIST if DB_NOOVERWRITE is set and the key already appears in the +

The DB->put method will return DB_KEYEXIST if DB_NOOVERWRITE is set and the key already appears in the database.

In addition, the following flag may be set by bitwise inclusively OR'ing it into the flags parameter: -

-

DB_AUTO_COMMIT
Enclose the DB->put call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DB->put call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

data
-The data DBT operated on. -

key
-The key DBT operated on. -

txnid
-If the operation is to be transaction-protected, +
data
The data DBT operated on. +
key
The key DBT operated on. +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL. @@ -95,29 +88,29 @@ the txnid parameter is a transaction handle returned from

Errors

The DB->put method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If a record number of 0 was specified; +
+
EINVAL
If a record number of 0 was specified; an attempt was made to add a record to a fixed-length database that was too large to fit; an attempt was made to do a partial put; an attempt was made to add a record to a secondary index; or if an invalid flag value or parameter was specified.
-

-

ENOSPC
A btree exceeded the maximum btree depth (255). +
+
ENOSPC
A btree exceeded the maximum btree depth (255).

Class

@@ -128,6 +121,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_remove.html b/db/docs/api_c/db_remove.html index ed4ffb989..97eefa4bb 100644 --- a/db/docs/api_c/db_remove.html +++ b/db/docs/api_c/db_remove.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->remove - + -

DB->remove

API -Ref -
+Ref +


@@ -31,15 +30,18 @@ DB->remove(DB *db,
 

Description: DB->remove

The DB->remove method removes the database specified by the -file and database parameters. If no database is -specified, the underlying file represented by file is removed, -incidentally removing all of the databases it contained.

+file and database parameters. If no database +is specified, the underlying file represented by file is +removed, incidentally removing all of the databases it contained.

Applications should never remove databases with open DB handles, or in the case of removing a file, when any database in the file has an open handle. For example, some architectures do not permit the removal of files with open system handles. On these architectures, attempts to remove databases currently in use by any thread of control in the system -will fail.

+may fail.

+

The DB->remove method should not be called if the remove is intended +to be transactionally safe; the DB_ENV->dbremove method should be used +instead.

The DB->remove method may not be called after calling the DB->open method on any DB handle. If the DB->open method has already been called on a DB handle, close the existing @@ -51,14 +53,13 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the +
+
database
The database parameter is the database to be removed. +
file
The file parameter is the physical file which contains the database(s) to be removed. -

flags
-The flags parameter is currently unused, and must be set to 0. +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter is currently unused, and must be set to 0.

Environment Variables

If the database was opened within a database environment, the @@ -70,16 +71,12 @@ in the environment's DB_CONFIG file.

Errors

The DB->remove method may fail and return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to remove the underlying file and a database in the -file was currently open. -
-

-

EINVAL
If DB->remove called after DB->open was called; or if an +
+
EINVAL
If DB->remove called after DB->open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -90,6 +87,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_rename.html b/db/docs/api_c/db_rename.html index c2830aa2e..5f0ca8095 100644 --- a/db/docs/api_c/db_rename.html +++ b/db/docs/api_c/db_rename.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->rename - + -

DB->rename

API -Ref -
+Ref +


@@ -41,7 +40,10 @@ the database environment, no database in the file may be open when the
 DB->rename method is called.  In particular, some architectures do
 not permit renaming files with open handles.  On these architectures,
 attempts to rename databases that are currently in use by any thread of
-control in the system will fail.

+control in the system may fail.

+

The DB->rename method should not be called if the rename is intended +to be transactionally safe; the DB_ENV->dbrename method should be used +instead.

The DB->rename method may not be called after calling the DB->open method on any DB handle. If the DB->open method has already been called on a DB handle, close the existing @@ -53,16 +55,14 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the -database(s) to be removed. -

flags
-The flags parameter is currently unused, and must be set to 0. -

newname
-The newname parameter is the new name of the database or file. +
+
database
The database parameter is the database to be renamed. +
file
The file parameter is the physical file which contains the +database(s) to be renamed. +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter is currently unused, and must be set to 0. +
newname
The newname parameter is the new name of the database or file.

Environment Variables

If the database was opened within a database environment, the @@ -74,16 +74,12 @@ in the environment's DB_CONFIG file.

Errors

The DB->rename method may fail and return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to rename the underlying file and a database in the -file was currently open. -
-

-

EINVAL
If DB->rename called after DB->open was called; or if an +
+
EINVAL
If DB->rename called after DB->open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -94,6 +90,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_alloc.html b/db/docs/api_c/db_set_alloc.html index 17f25c49a..8b04e1ecd 100644 --- a/db/docs/api_c/db_set_alloc.html +++ b/db/docs/api_c/db_set_alloc.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->set_alloc - + -

DB->set_alloc

API -Ref -
+Ref +


@@ -70,8 +69,8 @@ and 0 on success.
 

Errors

The DB->set_alloc method may fail and return one of the following non-zero errors:

-

-

EINVAL
If Called in a database environment. +
+
EINVAL
If Called in a database environment.

Called after DB->open was called.

; or if an invalid flag value or parameter was specified.
@@ -84,6 +83,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_append_recno.html b/db/docs/api_c/db_set_append_recno.html index fa5a5a7a1..1bf967e1e 100644 --- a/db/docs/api_c/db_set_append_recno.html +++ b/db/docs/api_c/db_set_append_recno.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_append_recno - + -

DB->set_append_recno

API -Ref -
+Ref +


@@ -44,19 +43,18 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

db_append_recno_fcn
-The db_append_recno_fcn parameter is a function to call after +
+
db_append_recno_fcn
The db_append_recno_fcn parameter is a function to call after the record number has been selected but before the data has been stored into the database. The function takes three parameters: -

-

db
The db parameter is the enclosing database handle. -

dbt
The dbt parameter is the data DBT to be stored. -

recno
The recno parameter is the generated record number. +
+
db
The db parameter is the enclosing database handle. +
dbt
The dbt parameter is the data DBT to be stored. +
recno
The recno parameter is the generated record number.
-

The called function may modify the data DBT. -If the function needs to allocate memory for the data field, the -flags field of the returned DBT should be set to +

The called function may modify the data DBT. If the function +needs to allocate memory for the data field, the flags +field of the returned DBT should be set to DB_DBT_APPMALLOC, which indicates that Berkeley DB should free the memory when it is done with it.

The callback function must return 0 on success and errno or @@ -65,8 +63,8 @@ a value outside of the Berkeley DB error name space on failure.

Errors

The DB->set_append_recno method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -78,6 +76,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_bt_compare.html b/db/docs/api_c/db_set_bt_compare.html index 4f4a306fb..562432c90 100644 --- a/db/docs/api_c/db_set_bt_compare.html +++ b/db/docs/api_c/db_set_bt_compare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_bt_compare - + -

DB->set_bt_compare

API -Ref -
+Ref +


@@ -47,15 +46,14 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

bt_compare_fcn
-The bt_compare_fcn function is the application-specified Btree +
+
bt_compare_fcn
The bt_compare_fcn function is the application-specified Btree comparison function. The comparison function takes three parameters: -

-

db
The db parameter is the enclosing database handle. -

dbt1
The dbt1 parameter is the DBT representing the +
+
db
The db parameter is the enclosing database handle. +
dbt1
The dbt1 parameter is the DBT representing the application supplied key. -

dbt2
The dbt2 parameter is the DBT representing the +
dbt2
The dbt2 parameter is the DBT representing the current tree's key.

The bt_compare_fcn function must return an integer value less @@ -75,8 +73,8 @@ the data field refers may be assumed.

Errors

The DB->set_bt_compare method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -88,6 +86,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_bt_minkey.html b/db/docs/api_c/db_set_bt_minkey.html index e5e6ec22c..773c9b67b 100644 --- a/db/docs/api_c/db_set_bt_minkey.html +++ b/db/docs/api_c/db_set_bt_minkey.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_bt_minkey - + -

DB->set_bt_minkey

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_bt_minkey(DB *db, u_int32_t *bt_minkeyp);
 


Description: DB->set_bt_minkey

-

Set the minimum number of key/data pairs intended to be stored on any single Btree leaf page.

This value is used to determine if key or data items will be stored on @@ -51,16 +49,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bt_minkey
-The bt_minkey parameter is the minimum number of key/data pairs +
+
bt_minkey
The bt_minkey parameter is the minimum number of key/data pairs intended to be stored on any single Btree leaf page.

Errors

The DB->set_bt_minkey method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -74,9 +71,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bt_minkeyp
-The DB->get_bt_minkey method returns the +
+
bt_minkeyp
The DB->get_bt_minkey method returns the minimum number of key/data pairs intended to be stored on any single Btree leaf page in bt_minkeyp.
@@ -89,6 +85,6 @@ leaf page in bt_minkeyp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_bt_prefix.html b/db/docs/api_c/db_set_bt_prefix.html index f4428b32a..a7fc8bb14 100644 --- a/db/docs/api_c/db_set_bt_prefix.html +++ b/db/docs/api_c/db_set_bt_prefix.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_bt_prefix - + -

DB->set_bt_prefix

API -Ref -
+Ref +


@@ -55,14 +54,13 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

bt_prefix_fcn
-The bt_prefix_fcn function is the application-specific Btree +
+
bt_prefix_fcn
The bt_prefix_fcn function is the application-specific Btree prefix function. The prefix function takes three parameters: -

-

db
The db parameter is the enclosing database handle. -

dbt1
The dbt1 parameter is a DBT representing a database key. -

dbt2
The dbt2 parameter is a DBT representing a database key. +
+
db
The db parameter is the enclosing database handle. +
dbt1
The dbt1 parameter is a DBT representing a database key. +
dbt2
The dbt2 parameter is a DBT representing a database key.

The bt_prefix_fcn function must return the number of bytes of the second key parameter that would be required by the Btree key @@ -78,8 +76,8 @@ which the data field refers may be assumed.

Errors

The DB->set_bt_prefix method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -91,6 +89,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_cachesize.html b/db/docs/api_c/db_set_cachesize.html index 0d8a2e8ff..279a0cf44 100644 --- a/db/docs/api_c/db_set_cachesize.html +++ b/db/docs/api_c/db_set_cachesize.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->set_cachesize - + -

DB->set_cachesize

API -Ref -
+Ref +


@@ -34,7 +33,6 @@ DB->get_cachesize(DB *db,
 


Description: DB->set_cachesize

-

Set the size of the shared memory buffer pool -- that is, the cache. The cache should be the size of the normal working data set of the application, with some small amount of additional memory for unusual @@ -44,7 +42,7 @@ pages accessed simultaneously, and is usually much larger.)

20KB. Any cache size less than 500MB is automatically increased by 25% to account for buffer pool overhead; cache sizes larger than 500MB are used as specified. The current maximum size of a single cache is 4GB. -(All sizes are in powers-of-two, that is, 256KB is 2^32 not 256,000.) +(All sizes are in powers-of-two, that is, 256KB is 2^18 not 256,000.) For information on tuning the Berkeley DB cache size, see Selecting a cache size.

It is possible to specify caches to Berkeley DB larger than 4GB and/or large @@ -64,19 +62,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

gbytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

ncache
-The ncache parameter is the number of caches to create. +
+
bytes
The size of the cache is set to gbytes gigabytes plus bytes. +
gbytes
The size of the cache is set to gbytes gigabytes plus bytes. +
ncache
The ncache parameter is the number of caches to create.

Errors

The DB->set_cachesize method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the specified cache size was impossibly small; +
+
EINVAL
If the specified cache size was impossibly small; called in a database environment; the method was called after DB->open @@ -94,15 +89,12 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the cache is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the cache is copied. -

ncachep
-The ncachep parameter references memory into which +
ncachep
The ncachep parameter references memory into which the number of caches is copied.

@@ -114,6 +106,6 @@ The ncachep parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_dup_compare.html b/db/docs/api_c/db_set_dup_compare.html index 9f6147c7a..5c2347d14 100644 --- a/db/docs/api_c/db_set_dup_compare.html +++ b/db/docs/api_c/db_set_dup_compare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_dup_compare - + -

DB->set_dup_compare

API -Ref -
+Ref +


@@ -46,17 +45,16 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

dup_compare_fcn
-The dup_compare_fcn function is the application-specified +
+
dup_compare_fcn
The dup_compare_fcn function is the application-specified duplicate data item comparison function. The function takes three arguments: -

-

db
The db parameter is the enclosing database handle. -

dbt1
The dbt1 parameter is a DBT representing the application -supplied key. -

dbt2
The dbt2 parameter is a DBT representing the current -tree's key. +
+
db
The db parameter is the enclosing database handle. +
dbt1
The dbt1 parameter is a DBT representing the application +supplied data item. +
dbt2
The dbt2 parameter is a DBT representing the current +tree's data item.

The dup_compare_fcn function must return an integer value less than, equal to, or greater than zero if the first data item parameter @@ -73,8 +71,8 @@ refers may be assumed.

Errors

The DB->set_dup_compare method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -86,6 +84,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_encrypt.html b/db/docs/api_c/db_set_encrypt.html index 94c513e96..7a231c89d 100644 --- a/db/docs/api_c/db_set_encrypt.html +++ b/db/docs/api_c/db_set_encrypt.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->set_encrypt - + -

DB->set_encrypt

API -Ref -
+Ref +


@@ -32,7 +31,6 @@ DB->get_encrypt_flags(DB *db, u_int32_t *flagsp);
 


Description: DB->set_encrypt

-

Set the password used by the Berkeley DB library to perform encryption and decryption.

Because databases opened within Berkeley DB environments use the password @@ -45,30 +43,28 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard +
+
DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm for encryption or decryption.
-

passwd
-The passwd parameter is the password used to perform encryption +
passwd
The passwd parameter is the password used to perform encryption and decryption.

Errors

The DB->set_encrypt method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.
-

-

EOPNOTSUPP
Cryptography is not available in this Berkeley DB release. +
+
EOPNOTSUPP
Cryptography is not available in this Berkeley DB release.

Description: DB->get_encrypt_flags

@@ -80,9 +76,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flagsp
-The DB->get_encrypt_flags method returns the +
+
flagsp
The DB->get_encrypt_flags method returns the encryption flags in flagsp.

@@ -94,6 +89,6 @@ encryption flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_errcall.html b/db/docs/api_c/db_set_errcall.html index 0dccad7ad..8865d5016 100644 --- a/db/docs/api_c/db_set_errcall.html +++ b/db/docs/api_c/db_set_errcall.html @@ -1,32 +1,31 @@ - - + + Berkeley DB: DB->set_errcall - + -

DB->set_errcall

API -Ref -
+Ref +


 #include <db.h>
 

void -DB->set_errcall(DB *, - void (*db_errcall_fcn)(const char *errpfx, char *msg)); +DB->set_errcall(DB *, void (*db_errcall_fcn) + (const DB_ENV *dbenv, const char *errpfx, const char *msg));


Description: DB->set_errcall

@@ -40,6 +39,7 @@ In some cases, when an error occurs, Berkeley DB will call db_errcall_fcn with additional error information. It is up to the db_errcall_fcn function to display the error message in an appropriate manner.

+

Setting db_errcall_fcn to NULL unconfigures the callback interface.

Alternatively, you can use the DB->set_errfile or DB_ENV->set_errfile methods to display the additional information via a C library FILE *.

@@ -52,14 +52,14 @@ the DB_ENV->set_errcall method.The DB->set_errcall method may be called at any time during the life of the application.

Parameters

-

-

db_errcall_fcn
-The db_errcall_fcn parameter is the application-specified error -reporting function. The function takes two parameters: -

-

errpfx
The errpfx parameter is the prefix string (as previously set by +
+
db_errcall_fcn
The db_errcall_fcn parameter is the application-specified error +reporting function. The function takes three parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
errpfx
The errpfx parameter is the prefix string (as previously set by DB->set_errpfx or DB_ENV->set_errpfx). -

msg
The msg parameter is the error message string. +
msg
The msg parameter is the error message string.

@@ -71,6 +71,6 @@ reporting function. The function takes two parameters:

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_errfile.html b/db/docs/api_c/db_set_errfile.html index d7a61b02e..4840f7d88 100644 --- a/db/docs/api_c/db_set_errfile.html +++ b/db/docs/api_c/db_set_errfile.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->set_errfile - + -

DB->set_errfile

API -Ref -
+Ref +


@@ -32,7 +31,6 @@ DB->get_errfile(DB *db, FILE **errfilep);
 


Description: DB->set_errfile

- When an error occurs in the Berkeley DB library, a Berkeley DB error or an error return value is returned by the interface. In some cases, however, the errno value may be insufficient to completely describe @@ -46,6 +44,7 @@ an additional error message to the specified file reference.

(":") (if a prefix string was previously specified using DB->set_errpfx or DB_ENV->set_errpfx), an error string, and a trailing <newline> character.

+

Setting errfile to NULL unconfigures the interface.

This error logging enhancement does not slow performance or significantly increase application size, and may be run during normal operation as well as during application debugging.

@@ -55,26 +54,15 @@ the DB_ENV->set_errfile method.The DB->set_errfile method may be called at any time during the life of the application.

Parameters

-

-

errfile
-The errfile parameter is a C library FILE * to be used for +
+
errfile
The errfile parameter is a C library FILE * to be used for displaying additional Berkeley DB error information.

Description: DB->get_errfile

-

The DB->get_errfile method returns the FILE *.

+

The DB->get_errfile method returns the .

The DB->get_errfile method may be called at any time during the life of the application.

-

The DB->get_errfile method -returns a non-zero error value on failure -and 0 on success. -

-

Parameters

-

-

errfilep
-The DB->get_errfile method returns the -FILE * in errfilep. -

Class

DB @@ -84,6 +72,6 @@ FILE * in errfilep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_errpfx.html b/db/docs/api_c/db_set_errpfx.html index 6b4808e7e..b793b930a 100644 --- a/db/docs/api_c/db_set_errpfx.html +++ b/db/docs/api_c/db_set_errpfx.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->set_errpfx - + -

DB->set_errpfx

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ void DB->get_errpfx(DB *db, const char **errpfxp);
 


Description: DB->set_errpfx

-

Set the prefix string that appears before error messages issued by Berkeley DB.

The DB->set_errpfx and DB_ENV->set_errpfx methods do not copy the memory to which the errpfx parameter refers; rather, they @@ -45,9 +43,8 @@ the DB_ENV->set_errpfx method.

The DB->set_errpfx method may be called at any time during the life of the application.

Parameters

-

-

errpfx
-The errpfx parameter is the application-specified error prefix +
+
errpfx
The errpfx parameter is the application-specified error prefix for additional error messages.

@@ -60,9 +57,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

errpfxp
-The DB->get_errpfx method returns a reference to the +
+
errpfxp
The DB->get_errpfx method returns a reference to the error prefix in errpfxp.

@@ -74,6 +70,6 @@ error prefix in errpfxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_feedback.html b/db/docs/api_c/db_set_feedback.html index 2ed6166c8..cb9916754 100644 --- a/db/docs/api_c/db_set_feedback.html +++ b/db/docs/api_c/db_set_feedback.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->set_feedback - + -

DB->set_feedback

API -Ref -
+Ref +


@@ -44,20 +43,19 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

db_feedback_fcn
-The db_feedback_fcn parameter is the application-specified +
+
db_feedback_fcn
The db_feedback_fcn parameter is the application-specified feedback function called to report Berkeley DB operation progress. The callback function must take three parameters: -

-

db
The db parameter is a reference to the enclosing database. -

opcode
The opcode parameter is an operation code. The opcode +
+
db
The db parameter is a reference to the enclosing database. +
opcode
The opcode parameter is an operation code. The opcode parameter may take on any of the following values: -

-

DB_UPGRADE
The underlying database is being upgraded. -

DB_VERIFY
The underlying database is being verified. +
+
DB_UPGRADE
The underlying database is being upgraded. +
DB_VERIFY
The underlying database is being verified.
-

percent
The percent parameter is the percent of the operation that has +
percent
The percent parameter is the percent of the operation that has been completed, specified as an integer value between 0 and 100.
@@ -70,6 +68,6 @@ been completed, specified as an integer value between 0 and 100.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_flags.html b/db/docs/api_c/db_set_flags.html index ed2e8a836..e8e008d3c 100644 --- a/db/docs/api_c/db_set_flags.html +++ b/db/docs/api_c/db_set_flags.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_flags - + -

DB->set_flags

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_flags(DB *db, u_int32_t *flagsp);
 


Description: DB->set_flags

-

Configure a database. Calling DB->set_flags is additive; there is no way to clear flags.

The DB->set_flags method may not be called after the DB->open method is called. @@ -41,15 +39,14 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values:

General

The following flags may be specified for any Berkeley DB access method:

-

- -

DB_CHKSUM
Do checksum verification of pages read into the cache from the backing +
+ +
DB_CHKSUM
Do checksum verification of pages read into the cache from the backing filestore. Berkeley DB uses the SHA1 Secure Hash Algorithm if encryption is configured and a general hash algorithm if it is not.

Calling DB->set_flags with the DB_CHKSUM flag only affects the @@ -61,8 +58,8 @@ will be ignored.

If creating additional databases in a file, the checksum behavior specified must be consistent with the existing databases in the file or an error will be returned. - -

DB_ENCRYPT
Encrypt the database using the cryptographic password specified to the + +
DB_ENCRYPT
Encrypt the database using the cryptographic password specified to the DB_ENV->set_encrypt or DB->set_encrypt methods.

Calling DB->set_flags with the DB_ENCRYPT flag only affects the specified DB handle (and any other Berkeley DB handles opened within @@ -78,29 +75,28 @@ be returned.

Encrypted databases are not portable between machines of different byte orders, that is, encrypted databases created on big-endian machines cannot be read on little-endian machines, and vice versa.

- -

DB_TXN_NOT_DURABLE
If set, Berkeley DB will not write log records for this database. This -means that updates of this database exhibit the ACI (atomicity, -consistency, and isolation) properties, but not D (durability); that -is, database integrity will be maintained, but if the application or -system fails, integrity will not persist. The database file must be -verified and/or restored from backup after a failure. In order to -ensure integrity after application shut down, the database handles must -be closed without specifying DB_NOSYNC, or all database -changes must be flushed from the database environment cache using -either the DB_ENV->txn_checkpoint or DB_ENV->memp_sync methods. -All database handles for a single physical file must set -DB_TXN_NOT_DURABLE, including database handles for different -databases in a physical file. + +
DB_TXN_NOT_DURABLE
If set, Berkeley DB will not write log records for this database. This means +that updates of this database exhibit the ACI (atomicity, consistency, +and isolation) properties, but not D (durability); that is, database +integrity will be maintained, but if the application or system fails, +integrity will not persist. The database file must be verified and/or +restored from backup after a failure. In order to ensure integrity +after application shut down, the database handles must be closed without +specifying DB_NOSYNC, or all database changes must be flushed +from the database environment cache using either the +DB_ENV->txn_checkpoint or DB_ENV->memp_sync methods. All database handles for +a single physical file must set DB_TXN_NOT_DURABLE, including +database handles for different databases in a physical file.

Calling DB->set_flags with the DB_TXN_NOT_DURABLE flag only affects the specified DB handle (and any other Berkeley DB handles opened within the scope of that handle).

Btree

The following flags may be specified for the Btree access method:

-

- -

DB_DUP
Permit duplicate data items in the database; that is, insertion when the +
+ +
DB_DUP
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database will be successful. The ordering of duplicates in the database is determined by the order of insertion, unless the ordering is otherwise @@ -116,14 +112,13 @@ must be the same as the existing database or an error will be returned.

It is an error to specify both DB_DUP and DB_RECNUM.

- -

DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the + +
DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database -will be successful. The ordering of duplicates in the database is determined -by the duplicate comparison function. -If the application does not specify a comparison function using the -DB->set_dup_compare method, a default lexical comparison will be -used. +will be successful. The ordering of duplicates in the database is +determined by the duplicate comparison function. If the application +does not specify a comparison function using the +DB->set_dup_compare method, a default lexical comparison will be used. It is an error to specify both DB_DUPSORT and DB_RECNUM.

Calling DB->set_flags with the DB_DUPSORT flag affects the database, including all threads of control accessing the database.

@@ -132,8 +127,8 @@ flag must be the same as the existing database or an error will be returned.

- -

DB_RECNUM
Support retrieval from the Btree using record numbers. For more + +
DB_RECNUM
Support retrieval from the Btree using record numbers. For more information, see the DB_SET_RECNO flag to the DB->get and DBcursor->c_get methods.

Logical record numbers in Btree databases are mutable in the face of @@ -153,8 +148,8 @@ flag must be the same as the existing database or an error will be returned.

- -

DB_REVSPLITOFF
Turn off reverse splitting in the Btree. As pages are emptied in a + +
DB_REVSPLITOFF
Turn off reverse splitting in the Btree. As pages are emptied in a database, the Berkeley DB Btree implementation attempts to coalesce empty pages into higher-level pages in order to keep the database as small as possible and minimize search time. This can hurt performance in applications @@ -170,8 +165,8 @@ the scope of that handle).

Hash

The following flags may be specified for the Hash access method:

-

-

DB_DUP
Permit duplicate data items in the database; that is, insertion when the +
+
DB_DUP
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database will be successful. The ordering of duplicates in the database is determined by the order of insertion, unless the ordering is otherwise @@ -186,13 +181,12 @@ flag must be the same as the existing database or an error will be returned.

-

DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the +
DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database -will be successful. The ordering of duplicates in the database is determined -by the duplicate comparison function. -If the application does not specify a comparison function using the -DB->set_dup_compare method, a default lexical comparison will be -used. +will be successful. The ordering of duplicates in the database is +determined by the duplicate comparison function. If the application +does not specify a comparison function using the +DB->set_dup_compare method, a default lexical comparison will be used. It is an error to specify both DB_DUPSORT and DB_RECNUM.

Calling DB->set_flags with the DB_DUPSORT flag affects the database, including all threads of control accessing the database.

@@ -203,13 +197,30 @@ will be returned.

Queue

-

There are no additional flags that may be specified for the Queue access -method.

+

The following flags may be specified for the Queue access method:

+
+ +
DB_INORDER
The DB_INORDER flag modifies the operation of the +DB_CONSUME or DB_CONSUME_WAIT flags to DB->get +to return key/data pairs in order. That is, they will always return +the key/data item from the head of the queue. +

The default behavior of queue databases is optimized for multiple +readers, and does not guarantee that record will be retrieved in the +order they are added to the queue. Specifically, if a writing thread +adds multiple records to an empty queue, reading threads may skip some +of the initial records when the next DB->get call returns.

+

This flag modifies the DB->get call to verify that the record +being returned is in fact the head of the queue. This will increase +contention and reduce concurrency when there are many reading threads.

+

Calling DB->set_flags with the DB_INORDER flag only affects the +specified DB handle (and any other Berkeley DB handles opened within +the scope of that handle).

+

Recno

The following flags may be specified for the Recno access method:

-

- -

DB_RENUMBER
Specifying the DB_RENUMBER flag causes the logical record +
+ +
DB_RENUMBER
Specifying the DB_RENUMBER flag causes the logical record numbers to be mutable, and change as records are added to and deleted from the database. For example, the deletion of record number 4 causes records numbered 5 and greater to be renumbered downward by one. If a @@ -242,8 +253,8 @@ flag must be the same as the existing database or an error will be returned.

- -

DB_SNAPSHOT
This flag specifies that any specified re_source file be read + +
DB_SNAPSHOT
This flag specifies that any specified re_source file be read in its entirety when DB->open is called. If this flag is not specified, the re_source file may be read lazily.

Calling DB->set_flags with the DB_SNAPSHOT flag only affects the @@ -254,8 +265,8 @@ the scope of that handle).

Errors

The DB->set_flags method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -268,9 +279,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flagsp
-The DB->get_flags method returns the +
+
flagsp
The DB->get_flags method returns the current flags in flagsp.

@@ -282,6 +292,6 @@ current flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_h_ffactor.html b/db/docs/api_c/db_set_h_ffactor.html index 187549ba1..c88032d95 100644 --- a/db/docs/api_c/db_set_h_ffactor.html +++ b/db/docs/api_c/db_set_h_ffactor.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_h_ffactor - + -

DB->set_h_ffactor

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_h_ffactor(DB *db, u_int32_t *h_ffactorp);
 


Description: DB->set_h_ffactor

-

Set the desired density within the hash table. If no value is specified, the fill factor will be selected dynamically as pages are filled.

@@ -47,9 +45,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

h_ffactor
-The h_ffactor parameter is the desired density within the hash table. +
+
h_ffactor
The h_ffactor parameter is the desired density within the hash table.

The density is an approximation of the number of keys allowed to accumulate in any one bucket, determining when the hash table grows or shrinks. If you know the average sizes of the keys and data in your @@ -60,8 +57,8 @@ rule computing fill factor is to set it to the following:

Errors

The DB->set_h_ffactor method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -74,9 +71,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

h_ffactorp
-The DB->get_h_ffactor method returns the +
+
h_ffactorp
The DB->get_h_ffactor method returns the hash table density in h_ffactorp.

@@ -88,6 +84,6 @@ hash table density in h_ffactorp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_h_hash.html b/db/docs/api_c/db_set_h_hash.html index fa783ba1e..553937716 100644 --- a/db/docs/api_c/db_set_h_hash.html +++ b/db/docs/api_c/db_set_h_hash.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_h_hash - + -

DB->set_h_hash

API -Ref -
+Ref +


@@ -46,20 +45,18 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

h_hash_fcn
-The h_hash_fcn parameter is the application-specified hash function. +
+
h_hash_fcn
The h_hash_fcn parameter is the application-specified hash function.

Application-specified hash functions take a pointer to a byte string and -a length as parameters, and return a value of type -u_int32_t. +a length as parameters, and return a value of type u_int32_t. The hash function must handle any key values used by the application (possibly including zero-length keys).

Errors

The DB->set_h_hash method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; the +
+
EINVAL
If the method was called after DB->open was called; the specified hash function differs from the hash function with which the database was created; or if an invalid flag value or parameter was specified. @@ -73,6 +70,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_h_nelem.html b/db/docs/api_c/db_set_h_nelem.html index 7117f64bd..aa8254738 100644 --- a/db/docs/api_c/db_set_h_nelem.html +++ b/db/docs/api_c/db_set_h_nelem.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_h_nelem - + -

DB->set_h_nelem

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_h_nelem(DB *db, u_int32_t *h_nelemp);
 


Description: DB->set_h_nelem

-

Set an estimate of the final size of the hash table.

In order for the estimate to be used when creating the database, the DB->set_h_ffactor method must also be called. If the estimate @@ -50,16 +48,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

h_nelem
-The h_nelem parameter is an estimate of the final size of the +
+
h_nelem
The h_nelem parameter is an estimate of the final size of the hash table.

Errors

The DB->set_h_nelem method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -72,9 +69,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

h_nelemp
-The DB->get_h_nelem method returns the +
+
h_nelemp
The DB->get_h_nelem method returns the estimate of the final size of the hash table in h_nelemp.

@@ -86,6 +82,6 @@ estimate of the final size of the hash table in h_nelemp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_lorder.html b/db/docs/api_c/db_set_lorder.html index 05ad69ba2..d3a5b5483 100644 --- a/db/docs/api_c/db_set_lorder.html +++ b/db/docs/api_c/db_set_lorder.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_lorder - + -

DB->set_lorder

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_lorder(DB *db, int *lorderp);
 


Description: DB->set_lorder

-

Set the byte order for integers in the stored database metadata. The host byte order of the machine where the Berkeley DB library was compiled will be used if no byte order is set.

@@ -53,17 +51,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lorder
-The lorder parameter should represent the byte order as an +
+
lorder
The lorder parameter should represent the byte order as an integer; for example, big endian order is the number 4,321, and little endian order is the number 1,234.

Errors

The DB->set_lorder method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -77,9 +74,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lorderp
-The DB->get_lorder method returns the +
+
lorderp
The DB->get_lorder method returns the database byte order in lorderp.

@@ -91,6 +87,6 @@ database byte order in lorderp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_msgcall.html b/db/docs/api_c/db_set_msgcall.html new file mode 100644 index 000000000..2b29efc62 --- /dev/null +++ b/db/docs/api_c/db_set_msgcall.html @@ -0,0 +1,69 @@ + + + + + + + +Berkeley DB: DB->set_msgcall + + + + + + + +
+

DB->set_msgcall

+
+API +Ref
+


+ +

+#include <db.h>
+

+void +DB->set_msgcall(DB *, + void (*db_msgcall_fcn)(const DB_ENV *dbenv, char *msg)); +

+
+

Description: DB->set_msgcall

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DB_ENV->set_verbose and DB_ENV->stat_print.

+

The DB_ENV->set_msgcall and DB->set_msgcall methods are used to +pass these messages to the application, and Berkeley DB will call +db_msgcall_fcn with each message. It is up to the +db_msgcall_fcn function to display the message in an appropriate +manner.

+

Setting db_msgcall_fcn to NULL unconfigures the callback interface.

+

Alternatively, you can use the DB->set_msgfile or +DB_ENV->set_msgfile methods to display the messages via a C library FILE *.

+

For DB handles opened inside of Berkeley DB environments, calling the +DB->set_msgcall method affects the entire environment and is equivalent to calling +the DB_ENV->set_msgcall method.

+

The DB->set_msgcall method may be called at any time during the life of the +application.

+

Parameters

+
+
db_msgcall_fcn
The db_msgcall_fcn parameter is the application-specified message +reporting function. The function takes two parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
msg
The msg parameter is the message string. +
+
+
+

Class

+DB +

See Also

+Databases and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/db_set_msgfile.html b/db/docs/api_c/db_set_msgfile.html new file mode 100644 index 000000000..49c153a85 --- /dev/null +++ b/db/docs/api_c/db_set_msgfile.html @@ -0,0 +1,72 @@ + + + + + + + +Berkeley DB: DB->set_msgfile + + + + + + + +
+

DB->set_msgfile

+
+API +Ref
+


+ +

+#include <db.h>
+

+void +DB->set_msgfile(DB *db, FILE *msgfile); +

+void +DB->get_msgfile(DB *db, FILE **msgfilep); +

+
+

Description: DB->set_msgfile

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DB_ENV->set_verbose and DB_ENV->stat_print.

+

The DB_ENV->set_msgfile and DB->set_msgfile methods are used to +display these messages for the application. +In this case the message will include a trailing <newline> +character.

+

Setting msgfile to NULL unconfigures the interface.

+

Alternatively, you can use the DB_ENV->set_msgcall or +DB->set_msgcall methods to capture the additional error information +in a way that does not use C library FILE *'s.

+

For DB handles opened inside of Berkeley DB environments, calling the +DB->set_msgfile method affects the entire environment and is equivalent to calling +the DB_ENV->set_msgfile method.

+

The DB->set_msgfile method may be called at any time during the life of the +application.

+

Parameters

+
+
msgfile
The msgfile parameter is a C library FILE * to be used for +displaying messages. +
+
+

Description: DB->get_msgfile

+

The DB->get_msgfile method returns the .

+

The DB->get_msgfile method may be called at any time during the life of the +application.

+
+

Class

+DB +

See Also

+Databases and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/db_set_pagesize.html b/db/docs/api_c/db_set_pagesize.html index 20224ebc0..c802aebd5 100644 --- a/db/docs/api_c/db_set_pagesize.html +++ b/db/docs/api_c/db_set_pagesize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_pagesize - + -

DB->set_pagesize

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_pagesize(DB *db, u_int32_t *pagesizep);
 


Description: DB->set_pagesize

-

Set the size of the pages used to hold items in the database, in bytes. The minimum page size is 512 bytes, the maximum page size is 64K bytes, and the page size must be a power-of-two. If the page size is not @@ -55,15 +53,14 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

pagesize
-The pagesize parameter sets the database page size. +
+
pagesize
The pagesize parameter sets the database page size.

Errors

The DB->set_pagesize method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -76,9 +73,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

pagesizep
-The DB->get_pagesize method returns the +
+
pagesizep
The DB->get_pagesize method returns the page size in pagesizep.

@@ -90,6 +86,6 @@ page size in pagesizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_paniccall.html b/db/docs/api_c/db_set_paniccall.html index 68b17bbf8..c0cdce6a5 100644 --- a/db/docs/api_c/db_set_paniccall.html +++ b/db/docs/api_c/db_set_paniccall.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: DB->set_paniccall - + -

DB->set_paniccall

API -Ref -
+Ref +


@@ -49,14 +48,13 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

db_panic_fcn
-The db_panic_fcn parameter is the application-specified function +
+
db_panic_fcn
The db_panic_fcn parameter is the application-specified function called in the case of a database environment panic. The function takes two arguments: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

errval
The errval parameter is the error value that would have been +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
errval
The errval parameter is the error value that would have been returned to the caller if DB_RUNRECOVERY were not going to be returned instead.
@@ -70,6 +68,6 @@ returned instead.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_q_extentsize.html b/db/docs/api_c/db_set_q_extentsize.html index 22b2c2f5f..214e4af00 100644 --- a/db/docs/api_c/db_set_q_extentsize.html +++ b/db/docs/api_c/db_set_q_extentsize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_q_extentsize - + -

DB->set_q_extentsize

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_q_extentsize(DB *db, u_int32_t *extentsizep);
 


Description: DB->set_q_extentsize

-

Set the size of the extents used to hold pages in a Queue database, specified as a number of pages. Each extent is created as a separate physical file. If no extent size is set, the default behavior is to @@ -50,16 +48,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

extentsize
-The extentsize parameter is the number of pages in a Queue database +
+
extentsize
The extentsize parameter is the number of pages in a Queue database extent.

Errors

The DB->set_q_extentsize method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -72,9 +69,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

extentsizep
-The DB->get_q_extentsize method returns the +
+
extentsizep
The DB->get_q_extentsize method returns the number of pages in an extent in extentsizep.

@@ -86,6 +82,6 @@ number of pages in an extent in extentsizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_re_delim.html b/db/docs/api_c/db_set_re_delim.html index 3174fc9fa..f57719ee0 100644 --- a/db/docs/api_c/db_set_re_delim.html +++ b/db/docs/api_c/db_set_re_delim.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_re_delim - + -

DB->set_re_delim

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_re_delim(DB *db, int *delimp);
 


Description: DB->set_re_delim

-

Set the delimiting byte used to mark the end of a record in the backing source file for the Recno access method.

This byte is used for variable length records if the re_source @@ -50,16 +48,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

re_delim
-The re_delim parameter is the delimiting byte used to mark the +
+
re_delim
The re_delim parameter is the delimiting byte used to mark the end of a record.

Errors

The DB->set_re_delim method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -72,9 +69,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

delimp
-The DB->get_re_delim method returns the +
+
delimp
The DB->get_re_delim method returns the delimiting byte in delimp.

@@ -86,6 +82,6 @@ delimiting byte in delimp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_re_len.html b/db/docs/api_c/db_set_re_len.html index eb3ab1c16..fa1837bc7 100644 --- a/db/docs/api_c/db_set_re_len.html +++ b/db/docs/api_c/db_set_re_len.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_re_len - + -

DB->set_re_len

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_re_len(DB *db, u_int32_t *re_lenp);
 


Description: DB->set_re_len

-

For the Queue access method, specify that the records are of length re_len. For the Queue access method, the record length must be enough smaller than the database's page size that at least one record @@ -57,16 +55,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

re_len
-The re_len parameter is the length of a Queue or Recno database +
+
re_len
The re_len parameter is the length of a Queue or Recno database record, in bytes.

Errors

The DB->set_re_len method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -79,9 +76,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

re_lenp
-The DB->get_re_len method returns the +
+
re_lenp
The DB->get_re_len method returns the record length in re_lenp.

@@ -93,6 +89,6 @@ record length in re_lenp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_re_pad.html b/db/docs/api_c/db_set_re_pad.html index eb325f99b..640e375d2 100644 --- a/db/docs/api_c/db_set_re_pad.html +++ b/db/docs/api_c/db_set_re_pad.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_re_pad - + -

DB->set_re_pad

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_re_pad(DB *db, int *re_padp);
 


Description: DB->set_re_pad

-

Set the padding character for short, fixed-length records for the Queue and Recno access methods.

If no pad character is specified, <space> characters (that @@ -48,16 +46,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

re_pad
-The re_pad parameter is the pad character for fixed-length +
+
re_pad
The re_pad parameter is the pad character for fixed-length records for the Queue and Recno access methods.

Errors

The DB->set_re_pad method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -70,9 +67,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

re_padp
-The DB->get_re_pad method returns the +
+
re_padp
The DB->get_re_pad method returns the pad character in re_padp.

@@ -84,6 +80,6 @@ pad character in re_padp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_set_re_source.html b/db/docs/api_c/db_set_re_source.html index 919dccee3..d40096da4 100644 --- a/db/docs/api_c/db_set_re_source.html +++ b/db/docs/api_c/db_set_re_source.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->set_re_source - + -

DB->set_re_source

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB->get_re_source(DB *db, const char **sourcep);
 


Description: DB->set_re_source

-

Set the underlying source file for the Recno access method. The purpose of the source value is to provide fast access and modification to databases that are normally stored as flat text files.

@@ -88,15 +86,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

source
-The backing flat text database file for a Recno database. +
+
source
The backing flat text database file for a Recno database. +

On Windows, the source argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The DB->set_re_source method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB->open was called; or if an +
+
EINVAL
If the method was called after DB->open was called; or if an invalid flag value or parameter was specified.

@@ -113,6 +112,6 @@ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_stat.html b/db/docs/api_c/db_stat.html index 40b76a7a5..4aa4cecb8 100644 --- a/db/docs/api_c/db_stat.html +++ b/db/docs/api_c/db_stat.html @@ -1,37 +1,60 @@ - - + + Berkeley DB: DB->stat - + -

DB->stat

API -Ref -
+Ref +


 #include <db.h>
 

int -DB->stat(DB *db, void *sp, u_int32_t flags); +DB->stat(DB *db, DB_TXN *txnid, void *sp, u_int32_t flags); +

+int +DB->stat_print(DB *db, u_int32_t flags);


Description: DB->stat

-

The DB->stat method creates a statistical structure and -copies a pointer to it into user-specified memory locations. -Specifically, if sp is non-NULL, a pointer to the statistics -for the database are copied into the memory location to which it refers.

+

The DB->stat method creates a statistical structure and copies a +pointer to it into user-specified memory locations. Specifically, if +sp is non-NULL, a pointer to the statistics for the database are +copied into the memory location to which it refers.

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +one of the following values: +
+
DB_DEGREE_2
Database items read during this operation will have degree 2 isolation. +This ensures the stability of the data items read during the stat +operation but permits that data to be modified or deleted by other +transactions prior to the commit of the specified transaction. +
DB_DIRTY_READ
Database items read during this operation may include modified but not +yet committed data. Silently ignored if the DB_DIRTY_READ flag +was not specified when the underlying database was opened. +
DB_FAST_STAT
Return only the values which do not require traversal of the database. +Among other things, this flag makes it possible for applications to +request key and record counts without incurring the performance penalty +of traversing the entire database. +
+
txnid
If the operation is to be transaction-protected, +the txnid parameter is a transaction handle returned from +DB_ENV->txn_begin; otherwise, NULL. +

Statistical structures are stored in allocated memory. If application-specific allocation routines have been declared (see DB_ENV->set_alloc for more information), they are used to allocate the memory; otherwise, the @@ -56,10 +79,9 @@ returns a non-zero error value on failure and 0 on success.

Hash Statistics

-

In the case of a Hash database, -the statistics are stored in a structure of type DB_HASH_STAT. The -following fields will be filled in:

-

+

In the case of a Hash database, the statistics are stored in a structure +of type DB_HASH_STAT. The following fields will be filled in:

+
u_int32_t hash_magic;
Magic number that identifies the file as a Hash file. Returned if DB_FAST_STAT is set.
u_int32_t hash_version;
The version of the Hash database. Returned if DB_FAST_STAT is @@ -88,28 +110,39 @@ that did not fit in the main bucket page).
u_int32_t hash_dup_free;
The number of bytes free on duplicate pages.

Btree and Recno Statistics

-

In the case of a Btree or Recno database, -the statistics are stored in a structure of type DB_BTREE_STAT. The -following fields will be filled in:

-

+

In the case of a Btree or Recno database, the statistics are stored in +a structure of type DB_BTREE_STAT. The following fields will be filled +in:

+
u_int32_t bt_magic;
Magic number that identifies the file as a Btree database. Returned if DB_FAST_STAT is set.
u_int32_t bt_version;
The version of the Btree database. Returned if DB_FAST_STAT is set. -
u_int32_t bt_nkeys;
For the Btree Access Method, the number of unique keys in the database. -If DB_FAST_STAT was specified and the database was created with -the DB_RECNUM flag, the count will be exact, otherwise, the -count will be the last saved value unless it has never been calculated, -in which case it will be 0. For the Recno Access Method, the exact -number of records in the database. Returned if DB_FAST_STAT is -set. +
u_int32_t bt_nkeys;
For the Btree Access Method, the number of keys in the database. If +the DB_FAST_STAT flag is not specified or the database was +configured to support record numbers (see DB_RECNUM), the count +will be exact. Otherwise, the count will be the last saved value unless +it has never been calculated, in which case it will be 0. +

For the Recno Access Method, the number of records in the database. If +the database was configured with mutable record numbers (see +DB_RENUMBER), the count will be exact. Otherwise, if the +DB_FAST_STAT flag is specified the count will be exact but will +include deleted and implicitly created records; if the +DB_FAST_STAT flag is not specified, the count will be exact and +will not include deleted or implicitly created records.

+

Returned if DB_FAST_STAT is set.

u_int32_t bt_ndata;
For the Btree Access Method, the number of key/data pairs in the -database. If DB_FAST_STAT was specified the count will be the -last saved value unless it has never been calculated, in which case it -will be 0. For the Recno Access Method, the exact number of records in -the database. If the database has been configured to not renumber -records during deletion, the count of records will only reflect -undeleted records. Returned if DB_FAST_STAT is set. +database. If the DB_FAST_STAT flag is not specified, the count +will be exact. Otherwise, the count will be the last saved value unless +it has never been calculated, in which case it will be 0. +

For the Recno Access Method, the number of records in the database. If +the database was configured with mutable record numbers (see +DB_RENUMBER), the count will be exact. Otherwise, if the +DB_FAST_STAT flag is specified the count will be exact but will +include deleted and implicitly created records; if the +DB_FAST_STAT flag is not specified, the count will be exact and +will not include deleted or implicitly created records.

+

Returned if DB_FAST_STAT is set.

u_int32_t bt_pagesize;
Underlying database page size, in bytes. Returned if DB_FAST_STAT is set.
u_int32_t bt_minkey;
The minimum keys per page. Returned if DB_FAST_STAT is set. @@ -122,6 +155,7 @@ DB_FAST_STAT is set.
u_int32_t bt_leaf_pg;
Number of database leaf pages.
u_int32_t bt_dup_pg;
Number of database duplicate pages.
u_int32_t bt_over_pg;
Number of database overflow pages. +
u_int32_t bt_empty_pg;
Number of empty database pages.
u_int32_t bt_free;
Number of pages on the free list.
u_int32_t bt_int_pgfree;
Number of bytes free in database internal pages.
u_int32_t bt_leaf_pgfree;
Number of bytes free in database leaf pages. @@ -129,10 +163,10 @@ DB_FAST_STAT is set.
u_int32_t bt_over_pgfree;
Number of bytes free in database overflow pages.

Queue Statistics

-

In the case of a Queue database, -the statistics are stored in a structure of type DB_QUEUE_STAT. The -following fields will be filled in:

-

+

In the case of a Queue database, the statistics are stored in a +structure of type DB_QUEUE_STAT. The following fields will be filled +in:

+
u_int32_t qs_magic;
Magic number that identifies the file as a Queue file. Returned if DB_FAST_STAT is set.
u_int32_t qs_version;
The version of the Queue file type. Returned if DB_FAST_STAT @@ -158,38 +192,40 @@ DB_FAST_STAT is set. DB_FAST_STAT is set.
u_int32_t qs_cur_recno;
Next available record number. Returned if DB_FAST_STAT is set.
-

Parameters

-

-

flags
-The flags parameter must be set to 0 or -one of the following values: -

-

DB_FAST_STAT
Return only the values which do not require traversal of the database. -

Among other things, this flag makes it possible for applications to -request key and record counts without incurring the performance penalty -of traversing the entire database. If the underlying database is of -type Recno, or of type Btree and the database was created with the -DB_RECNUM flag, the count of keys will be exact. Otherwise, -the count of keys will be the value saved the last time the database -was traversed, or 0 if no count of keys has ever been made. If the -underlying database is of type Recno, the count of data items will be -exact, otherwise, the count of data items will be the value saved the -last time the database was traversed, or 0 if no count of data items -has ever been done.

-
-

Errors

The DB->stat method may fail and return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DB->stat_print

+

The DB->stat_print method returns the +database statistical information, as described for the DB->stat method. +The information is printed to a specified output channel (see the +DB_ENV->set_msgfile method for more information), or passed to an +application callback function (see the DB_ENV->set_msgcall method for +more information).

+

The DB->stat_print method may not be called before the DB->open method has been +called.

+

The DB->stat_print method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

DB

See Also

@@ -198,6 +234,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_sync.html b/db/docs/api_c/db_sync.html index 40ea2c30b..2b1d3e6b4 100644 --- a/db/docs/api_c/db_sync.html +++ b/db/docs/api_c/db_sync.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->sync - + -

DB->sync

API -Ref -
+Ref +


@@ -46,19 +45,18 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DB->sync method may fail and return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -70,6 +68,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_truncate.html b/db/docs/api_c/db_truncate.html index 735c8c948..1ded675c3 100644 --- a/db/docs/api_c/db_truncate.html +++ b/db/docs/api_c/db_truncate.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->truncate - + -

DB->truncate

API -Ref -
+Ref +


@@ -30,9 +29,12 @@ DB->truncate(DB *db,
 

Description: DB->truncate

The DB->truncate method empties the database, discarding all records -it contains. -The number of records discarded from the database is returned in -countp.

+it contains. The number of records discarded from the database is +returned in countp.

+

When called on a database configured with secondary indices using the +DB->associate method, the DB->truncate method truncates the primary +database and all secondary indices. A count of the records discarded +from the primary database is returned.

It is an error to call the DB->truncate method on a database with open cursors.

The DB->truncate method @@ -40,20 +42,17 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

countp
-The countp parameter references memory into which +
+
countp
The countp parameter references memory into which the number of records discarded from the database is copied. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the DB->truncate call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DB->truncate call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL. @@ -61,14 +60,14 @@ the txnid parameter is a transaction handle returned from

Errors

The DB->truncate method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

EINVAL
If there are open cursors in the database; or if an +
+
EINVAL
If there are open cursors in the database; or if an invalid flag value or parameter was specified.

@@ -80,6 +79,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_upgrade.html b/db/docs/api_c/db_upgrade.html index 81f340f7a..672e3b641 100644 --- a/db/docs/api_c/db_upgrade.html +++ b/db/docs/api_c/db_upgrade.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->upgrade - + -

DB->upgrade

API -Ref -
+Ref +


@@ -46,15 +45,13 @@ and 0 on success.
 See the db_upgrade utility source code for an example of using DB->upgrade
 in a IEEE/ANSI Std 1003.1 (POSIX) environment.

Parameters

-

-

file
-The file parameter is the physical file containing the databases +
+
file
The file parameter is the physical file containing the databases to be upgraded. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_DUPSORT
This flag is only meaningful when upgrading databases from +
+
DB_DUPSORT
This flag is only meaningful when upgrading databases from releases before the Berkeley DB 3.1 release.

As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release, the on-disk format of duplicate data items changed. To correctly @@ -87,11 +84,11 @@ in the environment's DB_CONFIG file.

Errors

The DB->upgrade method may fail and return one of the following non-zero errors:

-

-

DB_OLD_VERSION
The database cannot be upgraded by this version of the Berkeley DB software. +
+
DB_OLD_VERSION
The database cannot be upgraded by this version of the Berkeley DB software.
-

-

EINVAL
If the database is not in the same byte-order as the system; or if an +
+
EINVAL
If the database is not in the same byte-order as the system; or if an invalid flag value or parameter was specified.

@@ -103,6 +100,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/db_verify.html b/db/docs/api_c/db_verify.html index ab3dad1c1..432a48fa3 100644 --- a/db/docs/api_c/db_verify.html +++ b/db/docs/api_c/db_verify.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB->verify - + -

DB->verify

API -Ref -
+Ref +


@@ -44,9 +43,8 @@ called, regardless of its return.

The DB->verify method is the underlying method used by the db_verify utility. See the db_verify utility source code for an example of using DB->verify in a IEEE/ANSI Std 1003.1 (POSIX) environment.

- -

-The DB->verify method will return DB_VERIFY_BAD if a database is + +

The DB->verify method will return DB_VERIFY_BAD if a database is corrupted. When the DB_SALVAGE flag is specified, the DB_VERIFY_BAD return means that all key/data pairs in the file may not have been successfully output. @@ -55,22 +53,19 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

database
-The database parameter is the database in file on which +
+
database
The database parameter is the database in file on which the database checks for btree and duplicate sort order and for hashing are to be performed. See the DB_ORDERCHKONLY flag for more information.

The database parameter must be set to NULL except when the DB_ORDERCHKONLY flag is set.

-

file
-The file parameter is the physical file in which the databases +
file
The file parameter is the physical file in which the databases to be verified are found. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_SALVAGE
Write the key/data pairs from all databases in the file to the file +
+
DB_SALVAGE
Write the key/data pairs from all databases in the file to the file stream named in the outfile parameter. The output format is the same as that specified for the db_dump utility, and can be used as input for the db_load utility. @@ -80,8 +75,8 @@ pairs normally produces less than optimal loads for Btree databases.

In addition, the following flags may be set by bitwise inclusively OR'ing them into the flags parameter:

-

-

DB_AGGRESSIVE
Output all the key/data pairs in the file that can be found. +
+
DB_AGGRESSIVE
Output all the key/data pairs in the file that can be found. By default, DB->verify does not assume corruption. For example, if a key/data pair on a page is marked as deleted, it is not then written to the output file. When DB_AGGRESSIVE is specified, corruption @@ -90,14 +85,14 @@ case, key/data pairs that are corrupted or have been deleted may appear in the output (even if the file being salvaged is in no way corrupt), and the output will almost certainly require editing before being loaded into a database. -

DB_PRINTABLE
When using the DB_SALVAGE flag, if characters in either the key +
DB_PRINTABLE
When using the DB_SALVAGE flag, if characters in either the key or data items are printing characters (as defined by isprint(3)), use printing characters to represent them. This flag permits users to use standard text editors and tools to modify the contents of databases or selectively remove data from salvager output.

Note: different systems may have different notions about what characters are considered printing characters, and databases dumped in this manner may be less portable to external systems.

-

DB_NOORDERCHK
Skip the database checks for btree and duplicate sort order and for +
DB_NOORDERCHK
Skip the database checks for btree and duplicate sort order and for hashing.

The DB->verify method normally verifies that btree keys and duplicate items are correctly sorted, and hash keys are correctly hashed. If the @@ -110,7 +105,7 @@ first perform verification of the file as a whole by using the DB_NOORDERCHK flag, and then individually verify the sort order and hashing function for each database in the file using the DB_ORDERCHKONLY flag.

-

DB_ORDERCHKONLY
Perform the database checks for btree and duplicate sort order and for +
DB_ORDERCHKONLY
Perform the database checks for btree and duplicate sort order and for hashing, skipped by DB_NOORDERCHK.

When this flag is specified, a database parameter should also be specified, indicating the database in the physical file which is to be @@ -118,8 +113,7 @@ checked. This flag is only safe to use on databases that have already successfully been verified using DB->verify with the DB_NOORDERCHK flag set.

-

outfile
-The outfile parameter is an optional file stream to which the +
outfile
The outfile parameter is an optional file stream to which the databases' key/data pairs are written.

Environment Variables

@@ -130,13 +124,13 @@ database environment home.

the DB_ENV->set_data_dir method, or by setting the "set_data_dir" string in the environment's DB_CONFIG file.

Errors

-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

The DB->verify method may fail and return one of the following non-zero errors:

-

-

EINVAL
If DB->verify was called after DB->open; or if an +
+
EINVAL
If DB->verify was called after DB->open; or if an invalid flag value or parameter was specified.

@@ -148,6 +142,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_class.html b/db/docs/api_c/dbc_class.html index 16ff16892..e21aa59ee 100644 --- a/db/docs/api_c/dbc_class.html +++ b/db/docs/api_c/dbc_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DBC - + -

DBC

API -Ref -
+Ref +


@@ -39,6 +38,6 @@ handle may not be accessed again, regardless of the method's return.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_close.html b/db/docs/api_c/dbc_close.html index e5508bfa3..a714ef66a 100644 --- a/db/docs/api_c/dbc_close.html +++ b/db/docs/api_c/dbc_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DBcursor->c_close - + -

DBcursor->c_close

API -Ref -
+Ref +


@@ -43,14 +42,14 @@ and 0 on success.
 

Errors

The DBcursor->c_close method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

EINVAL
If the cursor is already closed; or if an +
+
EINVAL
If the cursor is already closed; or if an invalid flag value or parameter was specified.

@@ -62,6 +61,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_count.html b/db/docs/api_c/dbc_count.html index c5ad27375..fff7e2f3d 100644 --- a/db/docs/api_c/dbc_count.html +++ b/db/docs/api_c/dbc_count.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DBcursor->c_count - + -

DBcursor->c_count

API -Ref -
+Ref +


@@ -35,22 +34,20 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

countp
-The countp parameter references memory into which +
+
countp
The countp parameter references memory into which the count of the number of duplicate data items is copied. -

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DBcursor->c_count method may fail and return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the cursor has not been initialized; or if an +
+
EINVAL
If the cursor has not been initialized; or if an invalid flag value or parameter was specified.

@@ -62,6 +59,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_del.html b/db/docs/api_c/dbc_del.html index 91ec09d8b..a41e932bf 100644 --- a/db/docs/api_c/dbc_del.html +++ b/db/docs/api_c/dbc_del.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DBcursor->c_del - + -

DBcursor->c_del

API -Ref -
+Ref +


@@ -37,42 +36,40 @@ indices.

The cursor position is unchanged after a delete, and subsequent calls to cursor functions expecting the cursor to refer to an existing key will fail.

-

-The DBcursor->c_del method will return DB_KEYEMPTY if the element has already been deleted. +

The DBcursor->c_del method will return DB_KEYEMPTY if the element has already been deleted. Unless otherwise specified, the DBcursor->c_del method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DBcursor->c_del method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

EINVAL
If the cursor has not been initialized; or if an +
+
EINVAL
If the cursor has not been initialized; or if an invalid flag value or parameter was specified.
-

-

EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was +
+
EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was specified to DB_ENV->open.

@@ -84,6 +81,6 @@ specified to DB_ENV->open.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_dup.html b/db/docs/api_c/dbc_dup.html index 0c05aa923..5a7e195a3 100644 --- a/db/docs/api_c/dbc_dup.html +++ b/db/docs/api_c/dbc_dup.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DBcursor->c_dup - + -

DBcursor->c_dup

API -Ref -
+Ref +


@@ -37,27 +36,27 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_POSITION
The newly created cursor is initialized to refer to the same position -in the database as the original cursor and hold the same locks. If the -DB_POSITION flag is not specified, then the created cursor is -uninitialized and will behave like a cursor newly created using -DB->cursor. +
+
DB_POSITION
The newly created cursor is initialized to refer to the same position +in the database as the original cursor (if any) and hold the same locks +(if any). If the DB_POSITION flag is not specified, or the +original cursor does not hold a database position and locks, the created +cursor is uninitialized and will behave like a cursor newly created +using DB->cursor.

Errors

The DBcursor->c_dup method may fail and return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the cursor has not been initialized; or if an +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -69,6 +68,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_get.html b/db/docs/api_c/dbc_get.html index fe3f4533a..25e0932ef 100644 --- a/db/docs/api_c/dbc_get.html +++ b/db/docs/api_c/dbc_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DBcursor->c_get - + -

DBcursor->c_get

API -Ref -
+Ref +


@@ -33,23 +32,17 @@ DBcursor->c_pget(DBC *DBcursor,
 


Description: DBcursor->c_get

-

The DBcursor->c_get method retrieves key/data pairs from the database. The -address and length of the key -are returned in the object to which key refers (except for the -case of the DB_SET flag, in which the key object is -unchanged), and the address -and length of the data are returned in the object to which data -refers.

+address and length of the key are returned in the object to which +key refers (except for the case of the DB_SET flag, in +which the key object is unchanged), and the address and length +of the data are returned in the object to which data refers.

When called on a cursor opened on a database that has been made into a secondary index using the DB->associate method, the DBcursor->c_get and DBcursor->c_pget methods return the key from the secondary index and the -data item from the primary database. In addition, the -DBcursor->c_pget method +data item from the primary database. In addition, the DBcursor->c_pget method returns the key from the primary database. In databases that are not -secondary indices, the -DBcursor->c_pget method -will always fail.

+secondary indices, the DBcursor->c_pget method will always fail.

Modifications to the database during a sequential scan will be reflected in the scan; that is, records inserted behind a cursor will not be returned while records inserted in front of a cursor will be returned.

@@ -63,76 +56,65 @@ and 0 on success.

If DBcursor->c_get fails for any reason, the state of the cursor will be unchanged.

Parameters

-

-

data
-The data DBT operated on. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_CURRENT
Return the key/data pair to which the cursor refers. -

-The DBcursor->c_get method will return DB_KEYEMPTY if DB_CURRENT is set and the cursor key/data pair was deleted. +

+
data
The data DBT operated on. +
flags
The flags parameter must be set to one of the following values: +
+
DB_CURRENT
Return the key/data pair to which the cursor refers. +

The DBcursor->c_get method will return DB_KEYEMPTY if DB_CURRENT is set and the cursor key/data pair was deleted.

-

DB_FIRST
The cursor is set to refer to the first key/data pair of the database, +
DB_FIRST
The cursor is set to refer to the first key/data pair of the database, and that pair is returned. If the first key has duplicate values, the first data item in the set of duplicates is returned.

If the database is a Queue or Recno database, DBcursor->c_get using the DB_FIRST flag will ignore any keys that exist but were never explicitly created by the application, or were created and later deleted.

-

-The DBcursor->c_get method will return DB_NOTFOUND if DB_FIRST is set and the database is empty. +

The DBcursor->c_get method will return DB_NOTFOUND if DB_FIRST is set and the database is empty.

-

DB_GET_BOTH
The DB_GET_BOTH flag is identical to the DB_SET flag, +
DB_GET_BOTH
The DB_GET_BOTH flag is identical to the DB_SET flag, except that both the key and the data parameters must be matched by the key and data item in the database. -

When used with the -DBcursor->c_pget method -version of this method on a secondary index handle, both the -secondary and primary keys must be matched by the secondary and primary -key item in the database. It is an error to use the DB_GET_BOTH -flag with the -DBcursor->c_get -version of this method and a cursor that has been opened on a -secondary index handle.

-

DB_GET_BOTH_RANGE
The DB_GET_BOTH_RANGE flag is identical to the DB_GET_BOTH +

When used with the DBcursor->c_pget method version of this method on a +secondary index handle, both the secondary and primary keys must be +matched by the secondary and primary key item in the database. It is +an error to use the DB_GET_BOTH flag with the DBcursor->c_get +version of this method and a cursor that has been opened on a secondary +index handle.

+
DB_GET_BOTH_RANGE
The DB_GET_BOTH_RANGE flag is identical to the DB_GET_BOTH flag, except that, in the case of any database supporting sorted duplicate sets, the returned key/data pair is the smallest data item greater than or equal to the specified data item (as determined by the comparison function), permitting partial matches and range searches in duplicate data sets. -

DB_GET_RECNO
Return the record number associated with the cursor. The record number +
DB_GET_RECNO
Return the record number associated with the cursor. The record number will be returned in data, as described in DBT. The key parameter is ignored.

For DB_GET_RECNO to be specified, the underlying database must be of type Btree, and it must have been created with the DB_RECNUM flag.

When called on a cursor opened on a database that has been made into a -secondary index, the -DBcursor->c_get and DBcursor->c_pget methods return -the record number of the primary database in data. In addition, -the -DBcursor->c_pget method -returns the record number of the secondary index in pkey. If -either underlying database is not of type Btree or is not created with -the DB_RECNUM flag, the out-of-band record number of 0 is -returned.

-

DB_JOIN_ITEM
Do not use the data value found in all of the cursors as a lookup key for +secondary index, the DBcursor->c_get and DBcursor->c_pget methods return the +record number of the primary database in data. In addition, the +DBcursor->c_pget method returns the record number of the secondary index in +pkey. If either underlying database is not of type Btree or is +not created with the DB_RECNUM flag, the out-of-band record +number of 0 is returned.

+
DB_JOIN_ITEM
Do not use the data value found in all of the cursors as a lookup key for the primary database, but simply return it in the key parameter instead. The data parameter is left unchanged.

For DB_JOIN_ITEM to be specified, the underlying cursor must have been returned from the DB->join method.

-

DB_LAST
The cursor is set to refer to the last key/data pair of the database, +
DB_LAST
The cursor is set to refer to the last key/data pair of the database, and that pair is returned. If the last key has duplicate values, the last data item in the set of duplicates is returned.

If the database is a Queue or Recno database, DBcursor->c_get using the DB_LAST flag will ignore any keys that exist but were never explicitly created by the application, or were created and later deleted.

-

-The DBcursor->c_get method will return DB_NOTFOUND if DB_LAST is set and the database is empty. +

The DBcursor->c_get method will return DB_NOTFOUND if DB_LAST is set and the database is empty.

-

DB_NEXT
If the cursor is not yet initialized, DB_NEXT is identical to +
DB_NEXT
If the cursor is not yet initialized, DB_NEXT is identical to DB_FIRST. Otherwise, the cursor is moved to the next key/data pair of the database, and that pair is returned. In the presence of duplicate key values, the value of the key may not change. @@ -140,29 +122,26 @@ duplicate key values, the value of the key may not change. DB_NEXT flag will skip any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The DBcursor->c_get method will return DB_NOTFOUND if DB_NEXT is set and the cursor is already on the last record +

The DBcursor->c_get method will return DB_NOTFOUND if DB_NEXT is set and the cursor is already on the last record in the database.

-

DB_NEXT_DUP
If the next key/data pair of the database is a duplicate data record for +
DB_NEXT_DUP
If the next key/data pair of the database is a duplicate data record for the current key/data pair, the cursor is moved to the next key/data pair of the database, and that pair is returned. -

-The DBcursor->c_get method will return DB_NOTFOUND if DB_NEXT_DUP is set and the next key/data pair of the +

The DBcursor->c_get method will return DB_NOTFOUND if DB_NEXT_DUP is set and the next key/data pair of the database is not a duplicate data record for the current key/data pair.

-

DB_NEXT_NODUP
If the cursor is not yet initialized, DB_NEXT_NODUP is identical +
DB_NEXT_NODUP
If the cursor is not yet initialized, DB_NEXT_NODUP is identical to DB_FIRST. Otherwise, the cursor is moved to the next non-duplicate key of the database, and that key/data pair is returned.

If the database is a Queue or Recno database, DBcursor->c_get using the DB_NEXT_NODUP flag will ignore any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The DBcursor->c_get method will return DB_NOTFOUND if DB_NEXT_NODUP is set and no non-duplicate key/data pairs +

The DBcursor->c_get method will return DB_NOTFOUND if DB_NEXT_NODUP is set and no non-duplicate key/data pairs occur after the cursor position in the database.

-

DB_PREV
If the cursor is not yet initialized, DB_PREV is identical to +
DB_PREV
If the cursor is not yet initialized, DB_PREV is identical to DB_LAST. Otherwise, the cursor is moved to the previous key/data pair of the database, and that pair is returned. In the presence of duplicate key values, the value of the key may not change. @@ -170,25 +149,22 @@ presence of duplicate key values, the value of the key may not change. DB_PREV flag will skip any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The DBcursor->c_get method will return DB_NOTFOUND if DB_PREV is set and the cursor is already on the first record +

The DBcursor->c_get method will return DB_NOTFOUND if DB_PREV is set and the cursor is already on the first record in the database.

-

DB_PREV_NODUP
If the cursor is not yet initialized, DB_PREV_NODUP is identical +
DB_PREV_NODUP
If the cursor is not yet initialized, DB_PREV_NODUP is identical to DB_LAST. Otherwise, the cursor is moved to the previous non-duplicate key of the database, and that key/data pair is returned.

If the database is a Queue or Recno database, DBcursor->c_get using the DB_PREV_NODUP flag will ignore any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The DBcursor->c_get method will return DB_NOTFOUND if DB_PREV_NODUP is set and no non-duplicate key/data pairs +

The DBcursor->c_get method will return DB_NOTFOUND if DB_PREV_NODUP is set and no non-duplicate key/data pairs occur before the cursor position in the database.

-

DB_SET
Move the cursor to the specified key/data pair of the database, and +
DB_SET
Move the cursor to the specified key/data pair of the database, and return the datum associated with the given key. -

-The DBcursor->c_get method will return DB_NOTFOUND if DB_SET is set and +

The DBcursor->c_get method will return DB_NOTFOUND if DB_SET is set and no matching keys are found. The DBcursor->c_get method will return DB_KEYEMPTY if DB_SET is set and the database is a Queue or Recno database, and the specified key exists, but was never @@ -196,29 +172,28 @@ explicitly created by the application or was later deleted. In the presence of duplicate key values, DBcursor->c_get will return the first data item for the given key.

-

DB_SET_RANGE
The DB_SET_RANGE flag is identical to the DB_SET flag, +
DB_SET_RANGE
The DB_SET_RANGE flag is identical to the DB_SET flag, except that in the case of the Btree access method, the key is returned as well as the data item and the returned key/data pair is the smallest key greater than or equal to the specified key (as determined by the Btree comparison function), permitting partial key matches and range searches. -

DB_SET_RECNO
Move the cursor to the specific numbered record of the database, and +
DB_SET_RECNO
Move the cursor to the specific numbered record of the database, and return the associated key/data pair. The data field of the -specified key -must be a pointer to a memory location from which a db_recno_t -may be read, as described in DBT. This memory location will be -read to determine the record to be retrieved. +specified key must be a pointer to a memory location from which +a db_recno_t may be read, as described in DBT. This +memory location will be read to determine the record to be retrieved.

For DB_SET_RECNO to be specified, the underlying database must be of type Btree, and it must have been created with the DB_RECNUM flag.

In addition, the following flags may be set by bitwise inclusively OR'ing them into the flags parameter: -

-

DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the +
+
DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_MULTIPLE
Return multiple data items in the data parameter. +
DB_MULTIPLE
Return multiple data items in the data parameter.

In the case of Btree or Hash databases, duplicate data items for the current key, starting at the current cursor position, are entered into the buffer. Subsequent calls with both the DB_NEXT_DUP and @@ -243,7 +218,7 @@ least as large as the page size of the underlying database, aligned for unsigned integer access, and be a multiple of 1024 bytes in size. If the buffer size is insufficient, then upon return from the call the size field of the data parameter will have been set to an estimated -buffer size, and the error ENOMEM is returned. (The size is an estimate as the +buffer size, and the error DB_BUFFER_SMALL is returned. (The size is an estimate as the exact size needed may not be known until all entries are read. It is best to initially provide a relatively large buffer, but applications should be prepared to resize the buffer as necessary and repeatedly call @@ -257,10 +232,10 @@ DB_NEXT_NODUP, DB_SET, DB_SET_RANGE, and DB_SET_RECNO options. The DB_MULTIPLE flag may not be used when accessing databases made into secondary indices using the DB->associate method.

-

DB_MULTIPLE_KEY
Return multiple key and data pairs in the data parameter. +
DB_MULTIPLE_KEY
Return multiple key and data pairs in the data parameter.

Key and data pairs, starting at the current cursor position, are entered into the buffer. Subsequent calls with both the DB_NEXT and -DB_MULTIPLE flags specified will return additional key and data +DB_MULTIPLE_KEY flags specified will return additional key and data pairs or DB_NOTFOUND if there are no additional key and data items to return.

In the case of Btree or Hash databases, @@ -275,7 +250,7 @@ least as large as the page size of the underlying database, aligned for unsigned integer access, and be a multiple of 1024 bytes in size. If the buffer size is insufficient, then upon return from the call the size field of the data parameter will have been set to an estimated -buffer size, and the error ENOMEM is returned. (The size is an estimate as the +buffer size, and the error DB_BUFFER_SMALL is returned. (The size is an estimate as the exact size needed may not be known until all entries are read. It is best to initially provide a relatively large buffer, but applications should be prepared to resize the buffer as necessary and repeatedly call @@ -287,43 +262,40 @@ DB_NEXT_NODUP, DB_SET, DB_SET_RANGE, and DB_SET_RECNO options. The DB_MULTIPLE_KEY flag may not be used when accessing databases made into secondary indices using the DB->associate method.

-

DB_RMW
Acquire write locks instead of read locks when doing the retrieval. +
DB_RMW
Acquire write locks instead of read locks when doing the retrieval. Setting this flag can eliminate deadlock during a read-modify-write cycle by acquiring the write lock during the read part of the cycle so that another thread of control acquiring a read lock for the same item, in its own read-modify-write cycle, will not result in deadlock.
-

key
-The key DBT operated on. -

pkey
-The secondary index key DBT operated on. +
key
The key DBT operated on. +
pkey
The secondary index key DBT operated on.

Errors

The DBcursor->c_get method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EINVAL
If the DB_CURRENT or DB_NEXT_DUP flags were specified and +
+
EINVAL
If the DB_CURRENT or DB_NEXT_DUP flags were specified and the cursor has not been initialized; -the -DBcursor->c_pget method -was called with a cursor that does not refer to a secondary index; or if an +the DBcursor->c_pget method was called with a cursor that does not refer to a +secondary index; or if an invalid flag value or parameter was specified.
-

-

ENOMEM
The requested item could not be returned due to insufficient memory. +
+
DB_BUFFER_SMALL
The requested item could not be returned due to undersized buffer.

Class

@@ -334,6 +306,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_list.html b/db/docs/api_c/dbc_list.html index 002d58956..b3b5056d2 100644 --- a/db/docs/api_c/dbc_list.html +++ b/db/docs/api_c/dbc_list.html @@ -1,12 +1,12 @@ - + Berkeley DB: Berkeley DB: Database Cursors and Related Methods - +

Berkeley DB: Database Cursors and Related Methods

@@ -22,6 +22,6 @@ DBcursor->c_pgetRetrieve by cursor DBcursor->c_putStore by cursor -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbc_put.html b/db/docs/api_c/dbc_put.html index f22845a8d..968c5cd78 100644 --- a/db/docs/api_c/dbc_put.html +++ b/db/docs/api_c/dbc_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DBcursor->c_put - + -

DBcursor->c_put

API -Ref -
+Ref +


@@ -38,13 +37,11 @@ unchanged.  If DBcursor->c_put succeeds and an item is inserted into the
 database, the cursor is always positioned to refer to the newly inserted
 item.

Parameters

-

-

data
-The data DBT operated on. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_AFTER
In the case of the Btree and Hash access methods, insert the data +
+
data
The data DBT operated on. +
flags
The flags parameter must be set to one of the following values: +
+
DB_AFTER
In the case of the Btree and Hash access methods, insert the data element as a duplicate element of the key to which the cursor refers. The new element appears immediately after the current cursor position. It is an error to specify DB_AFTER if the underlying Btree or @@ -59,11 +56,10 @@ in the structure to which the key parameter refers. The initial value of the key parameter is ignored. See DB->open for more information.

The DB_AFTER flag may not be specified to the Queue access method.

-

-The DBcursor->c_put method will return DB_NOTFOUND if the current cursor record has already been deleted and the +

The DBcursor->c_put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying access method is Hash.

-

DB_BEFORE
In the case of the Btree and Hash access methods, insert the data +
DB_BEFORE
In the case of the Btree and Hash access methods, insert the data element as a duplicate element of the key to which the cursor refers. The new element appears immediately before the current cursor position. It is an error to specify DB_BEFORE if the underlying Btree or @@ -78,17 +74,14 @@ returned in the structure to which the key parameter refers. The initial value of the key parameter is ignored. See DB->open for more information.

The DB_BEFORE flag may not be specified to the Queue access method.

-

-The DBcursor->c_put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying +

The DBcursor->c_put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying access method is Hash.

-

DB_CURRENT
Overwrite the data of the key/data pair to which the cursor refers with +
DB_CURRENT
Overwrite the data of the key/data pair to which the cursor refers with the specified data item. The key parameter is ignored. -

-The DBcursor->c_put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying -access method is Hash. +

The DBcursor->c_put method will return DB_NOTFOUND if the current cursor record has already been deleted.

-

DB_KEYFIRST
In the case of the Btree and Hash access methods, insert the specified +
DB_KEYFIRST
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database.

If the underlying database supports duplicate data items, and if the key already exists in the database and a duplicate sort function has @@ -98,7 +91,7 @@ has been specified, the inserted data item is added as the first of the data items for that key.

The DB_KEYFIRST flag may not be specified to the Queue or Recno access methods.

-

DB_KEYLAST
In the case of the Btree and Hash access methods, insert the specified +
DB_KEYLAST
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database.

If the underlying database supports duplicate data items, and if the key already exists in the database and a duplicate sort function has @@ -108,7 +101,7 @@ function has been specified, the inserted data item is added as the last of the data items for that key.

The DB_KEYLAST flag may not be specified to the Queue or Recno access methods.

-

DB_NODUPDATA
In the case of the Btree and Hash access methods, insert the specified +
DB_NODUPDATA
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database, unless a key/data pair comparing equally to it already exists in the database. If a matching key/data pair already exists in the database, DB_KEYEXIST is returned. @@ -117,27 +110,26 @@ database has been configured to support sorted duplicate data items.

The DB_NODUPDATA flag may not be specified to the Queue or Recno access methods.

-

key
-The key DBT operated on. +
key
The key DBT operated on.

Errors

The DBcursor->c_put method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the DB_AFTER, DB_BEFORE or DB_CURRENT flags +
+
EINVAL
If the DB_AFTER, DB_BEFORE or DB_CURRENT flags were specified and the cursor has not been initialized; the DB_AFTER or DB_BEFORE flags were specified and a duplicate sort function has been specified; @@ -151,8 +143,8 @@ large to fit; an attempt was made to add a record to a secondary index; or if an invalid flag value or parameter was specified.
-

-

EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was +
+
EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was specified to DB_ENV->open.

@@ -164,6 +156,6 @@ specified to DB_ENV->open.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbm.html b/db/docs/api_c/dbm.html index f65a91292..ab510bea1 100644 --- a/db/docs/api_c/dbm.html +++ b/db/docs/api_c/dbm.html @@ -1,23 +1,22 @@ - + Berkeley DB: dbm/ndbm - + -

dbm/ndbm

API -Ref -
+Ref +


@@ -218,6 +217,6 @@ specified for other Berkeley DB and C library or system functions.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbt_bulk.html b/db/docs/api_c/dbt_bulk.html index 5d19705a7..ab66bd8dc 100644 --- a/db/docs/api_c/dbt_bulk.html +++ b/db/docs/api_c/dbt_bulk.html @@ -1,39 +1,38 @@ - - + + Berkeley DB: DBT - + -

DBT: Bulk Retrieval

API -Ref -
+Ref +


- +

If either of the DB_MULTIPLE or DB_MULTIPLE_KEY flags were specified to the DB->get or DBcursor->c_get methods, the data DBT returned by those interfaces will refer to a buffer that is filled with data. Access to that data is through the following macros:

-

-

DB_MULTIPLE_INIT
DB_MULTIPLE_INIT(void *pointer, DBT *data);
+
+
DB_MULTIPLE_INIT
DB_MULTIPLE_INIT(void *pointer, DBT *data);

Initialize the retrieval. The pointer parameter is a variable to be initialized. The data parameter is a DBT structure returned from a successful call to DB->get or DBcursor->c_get for which one of the DB_MULTIPLE or DB_MULTIPLE_KEY flags was specified.

-

DB_MULTIPLE_NEXT
DB_MULTIPLE_NEXT(void *pointer, DBT *data, void *retdata, size_t retdlen);
+
DB_MULTIPLE_NEXT
DB_MULTIPLE_NEXT(void *pointer, DBT *data, void *retdata, size_t retdlen);

The data parameter is a DBT structure returned from a successful call to DB->get or DBcursor->c_get for which the DB_MULTIPLE flag was specified. The pointer and @@ -45,7 +44,7 @@ element. When used with the Queue and Recno access methods, retdata will be set to NULL for deleted records. The pointer parameter is set to NULL if there are no more data elements in the returned set.

-

DB_MULTIPLE_KEY_NEXT
DB_MULTIPLE_KEY_NEXT(void *pointer, DBT *data,
+
DB_MULTIPLE_KEY_NEXT
DB_MULTIPLE_KEY_NEXT(void *pointer, DBT *data,
 	void *retkey, size_t retklen, void *retdata, size_t retdlen);

The data parameter is a DBT structure returned from a successful call to DBcursor->c_get for which the DB_MULTIPLE_KEY @@ -58,7 +57,7 @@ refer to the next data element in the returned set, and the retdlen parameter is set to the length, in bytes, of that data element. The pointer parameter is set to NULL if there are no more key/data pairs in the returned set.

-

DB_MULTIPLE_RECNO_NEXT
DB_MULTIPLE_RECNO_NEXT(void *pointer, DBT *data,
+
DB_MULTIPLE_RECNO_NEXT
DB_MULTIPLE_RECNO_NEXT(void *pointer, DBT *data,
 	db_recno_t recno, void * retdata, size_t retdlen);

The data parameter is a DBT structure returned from a successful call to DBcursor->c_get for which the DB_MULTIPLE_KEY @@ -77,6 +76,6 @@ pairs in the returned set.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbt_class.html b/db/docs/api_c/dbt_class.html index 89a3771c9..9465c7e5e 100644 --- a/db/docs/api_c/dbt_class.html +++ b/db/docs/api_c/dbt_class.html @@ -1,27 +1,26 @@ - - + + Berkeley DB: DBT - + -

DBT: Key/Data Pairs

API -Ref -
+Ref +


- +

Storage and retrieval for the Berkeley DB access methods are based on key/data pairs. Both key and data items are represented by the DBT data structure. (The name DBT is a mnemonic for data @@ -51,22 +50,22 @@ store into the data structure element a pointer to a byte string of size bytes, and the memory to which the pointer refers will be allocated and managed by Berkeley DB.

The elements of the DBT structure are defined as follows:

-

-

void *data;
A pointer to a byte string. -

u_int32_t size;
The length of data, in bytes. -

u_int32_t ulen;
The size of the user's buffer (to which data refers), in bytes. +
+
void *data;
A pointer to a byte string. +
u_int32_t size;
The length of data, in bytes. +
u_int32_t ulen;
The size of the user's buffer (to which data refers), in bytes. This location is not written by the Berkeley DB functions.

Note that applications can determine the length of a record by setting the ulen field to 0 and checking the return value in the size field. See the DB_DBT_USERMEM flag for more information.

-

u_int32_t dlen;
The length of the partial record being read or written by the application, +
u_int32_t dlen;
The length of the partial record being read or written by the application, in bytes. See the DB_DBT_PARTIAL flag for more information. -

u_int32_t doff;
The offset of the partial record being read or written by the application, +
u_int32_t doff;
The offset of the partial record being read or written by the application, in bytes. See the DB_DBT_PARTIAL flag for more information. -

u_int32_t flags;
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
u_int32_t flags;
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_DBT_MALLOC
When this flag is set, Berkeley DB will allocate memory for the returned key +
+
DB_DBT_MALLOC
When this flag is set, Berkeley DB will allocate memory for the returned key or data item (using malloc(3), or the user-specified malloc function), and return a pointer to it in the data field of the key or data DBT structure. Because any allocated memory becomes the @@ -75,7 +74,7 @@ whether memory was allocated using the returned value of the data field.

It is an error to specify more than one of DB_DBT_MALLOC, DB_DBT_REALLOC, and DB_DBT_USERMEM.

-

DB_DBT_REALLOC
When this flag is set Berkeley DB will allocate memory for the returned key +
DB_DBT_REALLOC
When this flag is set Berkeley DB will allocate memory for the returned key or data item (using realloc(3), or the user-specified realloc function), and return a pointer to it in the data field of the key or data DBT structure. Because any allocated memory becomes the @@ -88,15 +87,16 @@ is that the latter will call realloc(3) instead of instead of the application doing repeated free/malloc calls.

It is an error to specify more than one of DB_DBT_MALLOC, DB_DBT_REALLOC, and DB_DBT_USERMEM.

-

DB_DBT_USERMEM
The data field of the key or data structure must refer to + +
DB_DBT_USERMEM
The data field of the key or data structure must refer to memory that is at least ulen bytes in length. If the length of the requested item is less than or equal to that number of bytes, the item is copied into the memory to which the data field refers. Otherwise, the size field is set to the length needed for the -requested item, and the error ENOMEM is returned. +requested item, and the error DB_BUFFER_SMALL is returned.

It is an error to specify more than one of DB_DBT_MALLOC, DB_DBT_REALLOC, and DB_DBT_USERMEM.

-

DB_DBT_PARTIAL
Do partial retrieval or storage of an item. If the calling application +
DB_DBT_PARTIAL
Do partial retrieval or storage of an item. If the calling application is doing a get, the dlen bytes starting doff bytes from the beginning of the retrieved data record are returned as if they comprised the entire record. If any or all of the specified bytes do @@ -132,6 +132,6 @@ those specified by the put call.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/dbt_package.html b/db/docs/api_c/dbt_package.html new file mode 100644 index 000000000..788b8bc7d --- /dev/null +++ b/db/docs/api_c/dbt_package.html @@ -0,0 +1,23 @@ + + + + + + +Berkeley DB: DBT and Bulk Get Operations + + + + +

DBT and Bulk Get Operations

+ + + + + + + +
DBT and Bulk Get OperationsDescription
DBTKey/data pairs
DB_MULTIPLE_INITInitialize bulk get retrieval
DB_MULTIPLE_NEXTNext bulk get retrieval
DB_MULTIPLE_KEY_NEXTNext bulk get retrieval
DB_MULTIPLE_RECNO_NEXTNext bulk get retrieval
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/env_class.html b/db/docs/api_c/env_class.html index 51ab0e4d8..c689ba8fe 100644 --- a/db/docs/api_c/env_class.html +++ b/db/docs/api_c/env_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_create - + -

db_env_create

API -Ref -
+Ref +


@@ -56,8 +55,8 @@ and 0 on success.
 

The flags parameter must be set to 0 or the following value: -

-

DB_RPCCLIENT
Create a client environment to connect to a server. +
+
DB_RPCCLIENT
Create a client environment to connect to a server.

The DB_RPCCLIENT flag indicates to the system that this environment is remote on a server. The use of this flag causes the environment methods to use functions that call a server instead of local functions. @@ -75,6 +74,6 @@ DB_ENV

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_close.html b/db/docs/api_c/env_close.html index f6ed4b517..16b43b00b 100644 --- a/db/docs/api_c/env_close.html +++ b/db/docs/api_c/env_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->close - + -

DB_ENV->close

API -Ref -
+Ref +


@@ -36,11 +35,11 @@ handles must not be closed while database handles remain open, or
 transactions in the environment have not yet been committed or aborted.
 Specifically, this includes DB, DBC, DB_TXN,
 DB_LOGC and DB_MPOOLFILE handles.

-

Where the environment was initialized with the DB_INIT_LOCK flag, -calling DB_ENV->close does not release any locks still held by the -closing process, providing functionality for long-lived locks. -Processes that want to have all their locks -released can do so by issuing the appropriate DB_ENV->lock_vec call.

+

Where the environment was initialized with the DB_INIT_LOCK +flag, calling DB_ENV->close does not release any locks still held +by the closing process, providing functionality for long-lived locks. +Processes that want to have all their locks released can do so by +issuing the appropriate DB_ENV->lock_vec call.

Where the environment was initialized with the DB_INIT_MPOOL flag, calling DB_ENV->close implies calls to DB_MPOOLFILE->close for any remaining open files in the memory pool that were returned to this @@ -67,9 +66,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Class

@@ -80,6 +78,6 @@ The flags parameter is currently unused, and must be set to 0.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_dbremove.html b/db/docs/api_c/env_dbremove.html index 739aa269c..3628f49aa 100644 --- a/db/docs/api_c/env_dbremove.html +++ b/db/docs/api_c/env_dbremove.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->dbremove - + -

DB_ENV->dbremove

API -Ref -
+Ref +


@@ -30,36 +29,34 @@ DB_ENV->dbremove(DB_ENV *dbenv, DB_TXN *txnid,
 

Description: DB_ENV->dbremove

The DB_ENV->dbremove method removes the database specified by the -file and database parameters. If no database is -specified, the underlying file represented by file is removed, -incidentally removing all of the databases it contained.

+file and database parameters. If no database +is specified, the underlying file represented by file is +removed, incidentally removing all of the databases it contained.

Applications should never remove databases with open DB handles, or in the case of removing a file, when any database in the file has an open handle. For example, some architectures do not permit the removal of files with open system handles. On these architectures, attempts to remove databases currently in use by any thread of control in the system -will fail.

+may fail.

The DB_ENV->dbremove method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the +
+
database
The database parameter is the database to be removed. +
file
The file parameter is the physical file which contains the database(s) to be removed. -

flags
-The flags parameter must be set to 0 or +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the DB_ENV->dbremove call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DB_ENV->dbremove call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL.
@@ -73,22 +70,18 @@ in the environment's DB_CONFIG file.

Errors

The DB_ENV->dbremove method may fail and return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to remove the underlying file and a database in the -file was currently open. -
-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

EINVAL
If DB_ENV->dbremove called before DB_ENV->open was called; or if an +
+
EINVAL
If DB_ENV->dbremove called before DB_ENV->open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -99,6 +92,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_dbrename.html b/db/docs/api_c/env_dbrename.html index 18a22d411..56c92d1ad 100644 --- a/db/docs/api_c/env_dbrename.html +++ b/db/docs/api_c/env_dbrename.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->dbrename - + -

DB_ENV->dbrename

API -Ref -
+Ref +


@@ -40,30 +39,27 @@ the database environment, no database in the file may be open when the
 DB_ENV->dbrename method is called.  In particular, some architectures do
 not permit renaming files with open handles.  On these architectures,
 attempts to rename databases that are currently in use by any thread of
-control in the system will fail.

+control in the system may fail.

The DB_ENV->dbrename method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the -database(s) to be removed. -

flags
-The flags parameter must be set to 0 or +
+
database
The database parameter is the database to be renamed. +
file
The file parameter is the physical file which contains the +database(s) to be renamed. +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the DB_ENV->dbrename call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DB_ENV->dbrename call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

newname
-The newname parameter is the new name of the database or file. -

txnid
-If the operation is to be transaction-protected, +
newname
The newname parameter is the new name of the database or file. +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DB_ENV->txn_begin; otherwise, NULL.
@@ -77,22 +73,18 @@ in the environment's DB_CONFIG file.

Errors

The DB_ENV->dbrename method may fail and return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to rename the underlying file and a database in the -file was currently open. -
-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

EINVAL
If DB_ENV->dbrename called before DB_ENV->open was called; or if an +
+
EINVAL
If DB_ENV->dbrename called before DB_ENV->open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -103,6 +95,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_err.html b/db/docs/api_c/env_err.html index 604c10a02..c1353d2d5 100644 --- a/db/docs/api_c/env_err.html +++ b/db/docs/api_c/env_err.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->err - + -

DB_ENV->err

API -Ref -
+Ref +


@@ -31,22 +30,21 @@ DB_ENV->errx(DB_ENV *dbenv, const char *fmt, ...);
 


Description: DB_ENV->err

-

The DB_ENV->err, DB_ENV->errx, DB->err and DB->errx methods provide error-messaging functionality for applications written using the Berkeley DB library.

The DB_ENV->err method constructs an error message consisting of the following elements:

-

-

An optional prefix string
If no error callback function has been set using the +
+
An optional prefix string
If no error callback function has been set using the DB_ENV->set_errcall method, any prefix string specified using the DB_ENV->set_errpfx method, followed by two separating characters: a colon and a <space> character. -

An optional printf-style message
The supplied message fmt, if non-NULL, in which the +
An optional printf-style message
The supplied message fmt, if non-NULL, in which the ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent parameters are converted for output. -

A separator
Two separating characters: a colon and a <space> character. -

A standard error string
The standard system or Berkeley DB library error string associated with the +
A separator
Two separating characters: a colon and a <space> character. +
A standard error string
The standard system or Berkeley DB library error string associated with the error value, as returned by the db_strerror method.
@@ -59,20 +57,24 @@ parameters: any prefix string specified (see DB_ENV->set_errfile), the error message is written to that output stream.

If none of these output options has been configured, the error message -is written to stderr, the standard -error output stream.

+is written to stderr, the standard error output stream.

+

Parameters

+
+
error
The error parameter is the error value for which the +DB_ENV->err and DB->err methods will display a explanatory +string. +
fmt
The fmt parameter is an optional printf-style message to display. +

The DB_ENV->errx and DB->errx methods perform identically to the DB_ENV->err and DB->err methods, except that they do not append the final separator characters and standard error string to the error message.

Parameters

-

-

error
-The error parameter is the error value for which the +
+
error
The error parameter is the error value for which the DB_ENV->err and DB->err methods will display a explanatory string. -

fmt
-The fmt parameter is an optional printf-style message to display. +
fmt
The fmt parameter is an optional printf-style message to display.

Class

@@ -83,6 +85,6 @@ The fmt parameter is an optional printf-style message to display.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_list.html b/db/docs/api_c/env_list.html index 82a3237c7..e5a6fac5d 100644 --- a/db/docs/api_c/env_list.html +++ b/db/docs/api_c/env_list.html @@ -1,84 +1,49 @@ - + Berkeley DB: Berkeley DB: Database Environments and Related Methods - +

Berkeley DB: Database Environments and Related Methods

- + - + - - - - - - - - - - - - - - - - - - + + - - - - - + + + + - + - - + + - - - - - - - - - - - - - - - - - -
Database Environments and Related MethodsDescription
Database Environment OperationsDescription
db_env_createCreate an environment handle
DB_ENV->closeClose an environment
DB_ENV->dbremoveRemove a database
DB_ENV->dbrenameRename a database
DB_ENV->errError message with error string
DB_ENV->errxError message
DB_ENV->lock_detectPerform deadlock detection
DB_ENV->lock_getAcquire a lock
DB_ENV->lock_idAcquire a locker ID
DB_ENV->lock_id_freeRelease a locker ID
DB_ENV->lock_putRelease a lock
DB_ENV->lock_statReturn lock subsystem statistics
DB_ENV->lock_vecAcquire/release locks
DB_ENV->log_archiveList log and database files
DB_ENV->log_cursorCreate a log cursor handle
DB_ENV->log_fileMap Log Sequence Numbers to log files
DB_ENV->log_flushFlush log records
DB_ENV->log_putWrite a log record
DB_ENV->log_statReturn log subsystem statistics
DB_ENV->memp_fcreateOpen a file in a memory pool
DB_ENV->memp_registerRegister input/output functions for a file in a memory pool
DB_ENV->memp_statReturn memory pool statistics
DB_ENV->memp_syncFlush pages from a memory pool
DB_ENV->memp_trickleTrickle flush pages from a memory pool
DB_ENV->get_homeReturn environment's home directory
DB_ENV->get_open_flagsReturn the flags with which the environment was opened
DB_ENV->openOpen an environment
DB_ENV->removeRemove an environment
DB_ENV->rep_electHold a replication election
DB_ENV->rep_process_messageProcess a replication message
DB_ENV->rep_startConfigure an environment for replication
DB_ENV->rep_statReplication statistics
DB_ENV->set_allocSet local space allocation functions
DB_ENV->stat_printEnvironment statistics
db_strerrorError strings
db_versionReturn version information
Environment Configuration
DB_ENV->set_app_dispatchConfigure application recovery
DB_ENV->set_cachesizeSet the environment cache size
DB_ENV->set_allocSet local space allocation functions
DB_ENV->set_data_dirSet the environment data directory
DB_ENV->set_encryptSet the environment cryptographic key
DB_ENV->set_errcallSet error message callback
DB_ENV->set_errfileSet error message FILE
DB_ENV->set_errcall, DB_ENV->set_msgcallSet error and informational message callbacks
DB_ENV->set_errfile, DB_ENV->set_msgfileSet error and informational message FILE
DB_ENV->set_errpfxSet error message prefix
DB_ENV->set_feedbackSet feedback callback
DB_ENV->set_flagsEnvironment configuration
DB_ENV->set_lg_bsizeSet log buffer size
DB_ENV->set_lg_dirSet the environment logging directory
DB_ENV->set_lg_maxSet log file size
DB_ENV->set_lg_regionmaxSet logging region size
DB_ENV->set_lk_conflictsSet lock conflicts matrix
DB_ENV->set_lk_detectSet automatic deadlock detection
DB_ENV->set_lk_max_lockersSet maximum number of lockers
DB_ENV->set_lk_max_locksSet maximum number of locks
DB_ENV->set_lk_max_objectsSet maximum number of lock objects
DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size
DB_ENV->set_paniccallSet panic callback
DB_ENV->set_rep_limitLimit data sent in response to a single message
DB_ENV->set_rep_transportConfigure replication transport
DB_ENV->set_rpc_serverEstablish an RPC server connection
DB_ENV->set_shm_keySet system memory shared segment ID
DB_ENV->set_tas_spinsSet the number of test-and-set spins
DB_ENV->set_timeoutSet lock and transaction timeout
DB_ENV->set_tmp_dirSet the environment temporary file directory
DB_ENV->set_tx_maxSet maximum number of transactions
DB_ENV->set_tx_timestampSet recovery timestamp
DB_ENV->set_verboseSet verbose messages
DB_ENV->txn_beginBegin a transaction
DB_ENV->txn_checkpointCheckpoint the transaction subsystem
DB_ENV->txn_recoverDistributed transaction recovery
DB_ENV->txn_statReturn transaction subsystem statistics
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_open.html b/db/docs/api_c/env_open.html index c8c4eb6ae..9d9b9a6ed 100644 --- a/db/docs/api_c/env_open.html +++ b/db/docs/api_c/env_open.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->open - + -

DB_ENV->open

API -Ref -
+Ref +


@@ -34,8 +33,6 @@ DB_ENV->get_open_flags(DB_ENV *dbenv, u_int32_t *flagsp);
 


Description: DB_ENV->open

- -

The DB_ENV->open method opens a Berkeley DB environment. It provides a structure for creating a consistent environment for processes using one or more of the features of Berkeley DB.

@@ -46,16 +43,16 @@ If DB_ENV->open fails, the DB_ENV->close to discard the DB_ENV handle.

Parameters

-

-

db_home
-The db_home parameter is the database environment's home +
+
db_home
The db_home parameter is the database environment's home directory. For more information on db_home, and filename resolution in general, see Berkeley DB File Naming. The environment variable DB_HOME may be used as the path of the database home, as described in Berkeley DB File Naming. -

flags
-The flags parameter specifies the subsystems that are initialized +

On Windows, the db_home argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter specifies the subsystems that are initialized and how the application's environment affects Berkeley DB file naming, among other things. The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one @@ -63,44 +60,44 @@ or more of the following values:

Because there are a large number of flags that can be specified, they have been grouped together by functionality. The first group of flags indicates which of the Berkeley DB subsystems should be initialized:

-

-

DB_JOINENV
Join an existing environment. This option allows applications to +
+
DB_JOINENV
Join an existing environment. This option allows applications to join an existing environment without knowing which Berkeley DB subsystems the environment supports. -

DB_INIT_CDB
Initialize locking for the Berkeley DB Concurrent Data Store +
DB_INIT_CDB
Initialize locking for the Berkeley DB Concurrent Data Store product. In this mode, Berkeley DB provides multiple reader/single writer access. The only other subsystem that should be specified with the DB_INIT_CDB flag is DB_INIT_MPOOL. -

DB_INIT_LOCK
Initialize the locking subsystem. This subsystem should be used when +
DB_INIT_LOCK
Initialize the locking subsystem. This subsystem should be used when multiple processes or threads are going to be reading and writing a Berkeley DB database, so that they do not interfere with each other. If all threads are accessing the database(s) read-only, locking is unnecessary. When the DB_INIT_LOCK flag is specified, it is usually necessary to run a deadlock detector, as well. See db_deadlock and DB_ENV->lock_detect for more information. -

DB_INIT_LOG
Initialize the logging subsystem. This subsystem should be used when +
DB_INIT_LOG
Initialize the logging subsystem. This subsystem should be used when recovery from application or system failure is necessary. If the log region is being created and log files are already present, the log files are reviewed; subsequent log writes are appended to the end of the log, rather than overwriting current log entries. -

DB_INIT_MPOOL
Initialize the shared memory buffer pool subsystem. This subsystem +
DB_INIT_MPOOL
Initialize the shared memory buffer pool subsystem. This subsystem should be used whenever an application is using any Berkeley DB access method. -

DB_INIT_REP
Initialize the replication subsystem. This subsystem +
DB_INIT_REP
Initialize the replication subsystem. This subsystem should be used whenever an application plans on using replication. The DB_INIT_REP flag requires the DB_INIT_TXN and DB_INIT_LOCK flags also be configured. -

DB_INIT_TXN
Initialize the transaction subsystem. This subsystem should be used +
DB_INIT_TXN
Initialize the transaction subsystem. This subsystem should be used when recovery and atomicity of multiple operations are important. The DB_INIT_TXN flag implies the DB_INIT_LOG flag.

The second group of flags govern what recovery, if any, is performed when the environment is initialized:

-

-

DB_RECOVER
Run normal recovery on this environment before opening it for normal +
+
DB_RECOVER
Run normal recovery on this environment before opening it for normal use. If this flag is set, the DB_CREATE flag must also be set because the regions will be removed and re-created. -

DB_RECOVER_FATAL
Run catastrophic recovery on this environment before opening it for +
DB_RECOVER_FATAL
Run catastrophic recovery on this environment before opening it for normal use. If this flag is set, the DB_CREATE flag must also be set because the regions will be removed and re-created.
@@ -129,14 +126,14 @@ is necessary to ensure that all necessary log files are present before running recovery. For further information, consult db_archive and db_recover.

The third group of flags govern file-naming extensions in the environment:

-

- -

DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information +
+ +
DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, environment information will be used in file naming for all users only if the DB_USE_ENVIRON flag is set. -

DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information +
DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, if the DB_USE_ENVIRON_ROOT flag is set, environment information will @@ -144,11 +141,11 @@ be used for file naming only for users with appropriate permissions (for example, users with a user-ID of 0 on UNIX systems).

Finally, there are a few additional unrelated flags:

-

-

DB_CREATE
Cause Berkeley DB subsystems to create any underlying files, as necessary. -

DB_LOCKDOWN
Lock shared Berkeley DB environment files and memory-mapped databases into +
+
DB_CREATE
Cause Berkeley DB subsystems to create any underlying files, as necessary. +
DB_LOCKDOWN
Lock shared Berkeley DB environment files and memory-mapped databases into memory. -

DB_PRIVATE
Specify that the environment will only be accessed by a single process +
DB_PRIVATE
Specify that the environment will only be accessed by a single process (although that process may be multithreaded). This flag has two effects on the Berkeley DB environment. First, all underlying data structures are allocated from per-process memory instead of from shared memory that is @@ -160,41 +157,51 @@ corruption and unpredictable behavior. For example, if both a server application and the Berkeley DB utility db_stat are expected to access the environment, the DB_PRIVATE flag should not be specified.

-

DB_SYSTEM_MEM
Allocate memory from system shared memory instead of from memory backed +
DB_SYSTEM_MEM
Allocate memory from system shared memory instead of from memory backed by the filesystem. See Shared Memory Regions for more information. -

DB_THREAD
Cause the DB_ENV handle returned by DB_ENV->open to be -free-threaded; that is, usable by multiple threads within a -single address space. +
DB_THREAD
Cause the DB_ENV handle returned by DB_ENV->open to be +free-threaded; that is, concurrently usable by multiple +threads in the address space. The DB_THREAD flag should be specified +if the DB_ENV handle will be concurrently used by multiple +threads of control or if multiple DB handles, opened within the database +environment, will be used concurrently.
-

mode
-On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by -Berkeley DB are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation -(see umask(2)). If mode is 0, Berkeley DB will use a default -mode of readable and writable by both owner and group. On Windows -systems, the mode parameter is ignored. The group ownership of created -files is based on the system and directory defaults, and is not further -specified by Berkeley DB. +
mode
On Windows systems, the mode parameter is ignored. +

On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files created by Berkeley DB +are created with mode mode (as described in chmod(2)) +and modified by the process' umask value at the time of creation (see +umask(2)). Created files are owned by the process owner; the +group ownership of created files is based on the system and directory +defaults, and is not further specified by Berkeley DB. System shared memory +segments created by Berkeley DB are created with mode mode, unmodified +by the process' umask value. If mode is 0, Berkeley DB will use a +default mode of readable and writable by both owner and group.

+

Errors

The DB_ENV->open method may fail and return one of the following non-zero errors:

-

-

EAGAIN
The shared memory region was locked and (repeatedly) unavailable. +
+
DB_VERSION_MISMATCH
The version of the Berkeley DB library doesn't match the version that created +the database environment. +
+
+
EAGAIN
The shared memory region was locked and (repeatedly) unavailable.
-

-

EINVAL
If the DB_THREAD flag was specified and fast mutexes are not +
+
EINVAL
If the DB_THREAD flag was specified and fast mutexes are not available for this architecture; The DB_HOME or TMPDIR environment variables were set, but empty; An incorrectly formatted NAME VALUE entry or line was found; or if an invalid flag value or parameter was specified.
-

-

ENOSPC
HP-UX only: a previously created Berkeley DB environment for this process still +
+
ENOSPC
HP-UX only: a previously created Berkeley DB environment for this process still exists.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Description: DB_ENV->get_home

@@ -211,9 +218,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flagsp
-The DB_ENV->get_open_flags method returns the +
+
flagsp
The DB_ENV->get_open_flags method returns the open method flags in flagsp.

@@ -225,6 +231,6 @@ open method flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_remove.html b/db/docs/api_c/env_remove.html index 4c9c8dd5c..5de4edc7a 100644 --- a/db/docs/api_c/env_remove.html +++ b/db/docs/api_c/env_remove.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->remove - + -

DB_ENV->remove

API -Ref -
+Ref +


@@ -72,25 +71,25 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

db_home
-The db_home parameter names the database environment to be removed. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
db_home
The db_home parameter names the database environment to be removed. +

On Windows, the db_home argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_FORCE
If the DB_FORCE flag is set, the environment is removed, regardless +
+
DB_FORCE
If the DB_FORCE flag is set, the environment is removed, regardless of any processes that may still using it, and no locks are acquired during this process. (Generally, the DB_FORCE flag is specified only when applications were unable to shut down cleanly, and there is a risk that an application may have died holding a Berkeley DB lock.) - -

DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information + +
DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, environment information will be used in file naming for all users only if the DB_USE_ENVIRON flag is set. -

DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information +
DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, if the DB_USE_ENVIRON_ROOT flag is set, environment information will @@ -101,11 +100,11 @@ example, users with a user-ID of 0 on UNIX systems).

Errors

The DB_ENV->remove method may fail and return one of the following non-zero errors:

-

-

EBUSY
The shared memory region was in use and the force flag was not set. +
+
EBUSY
The shared memory region was in use and the force flag was not set.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -116,6 +115,6 @@ may fail and return one of the following non-zero errors:


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_alloc.html b/db/docs/api_c/env_set_alloc.html index cea238d08..18c00092a 100644 --- a/db/docs/api_c/env_set_alloc.html +++ b/db/docs/api_c/env_set_alloc.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_alloc - + -

DB_ENV->set_alloc

API -Ref -
+Ref +


@@ -68,21 +67,18 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

app_malloc
-The app_malloc parameter is the application-specified malloc +
+
app_malloc
The app_malloc parameter is the application-specified malloc function. -

app_realloc
-The app_realloc parameter is the application-specified realloc +
app_realloc
The app_realloc parameter is the application-specified realloc function. -

app_free
-The app_free parameter is the application-specified free function. +
app_free
The app_free parameter is the application-specified free function.

Errors

The DB_ENV->set_alloc method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -94,6 +90,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_app_dispatch.html b/db/docs/api_c/env_set_app_dispatch.html index 64131e42a..a9c425cef 100644 --- a/db/docs/api_c/env_set_app_dispatch.html +++ b/db/docs/api_c/env_set_app_dispatch.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_app_dispatch - + -

DB_ENV->set_app_dispatch

API -Ref -
+Ref +


@@ -46,26 +45,25 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

tx_recover
-The tx_recover parameter is the application's abort and recovery +
+
tx_recover
The tx_recover parameter is the application's abort and recovery function. The function takes four parameters: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

log_rec
The log_rec parameter is a log record. -

lsn
The lsn parameter is a log sequence number. -

op
The op parameter is one of the following values: -

-

DB_TXN_BACKWARD_ROLL
The log is being read backward to determine which transactions have been +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
log_rec
The log_rec parameter is a log record. +
lsn
The lsn parameter is a log sequence number. +
op
The op parameter is one of the following values: +
+
DB_TXN_BACKWARD_ROLL
The log is being read backward to determine which transactions have been committed and to abort those operations that were not; undo the operation described by the log record. -

DB_TXN_FORWARD_ROLL
The log is being played forward; redo the operation described by the log +
DB_TXN_FORWARD_ROLL
The log is being played forward; redo the operation described by the log record. -

DB_TXN_ABORT
The log is being read backward during a transaction abort; undo the +
DB_TXN_ABORT
The log is being read backward during a transaction abort; undo the operation described by the log record. -

DB_TXN_APPLY
The log is being applied on a replica site; redo the operation +
DB_TXN_APPLY
The log is being applied on a replica site; redo the operation described by the log record. -

DB_TXN_PRINT
The log is being printed for debugging purposes; print the contents of +
DB_TXN_PRINT
The log is being printed for debugging purposes; print the contents of this log record in the desired format.

The DB_TXN_FORWARD_ROLL and DB_TXN_APPLY operations @@ -86,8 +84,8 @@ value outside of the Berkeley DB error name space on failure.

Errors

The DB_ENV->set_app_dispatch method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -99,6 +97,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_cachesize.html b/db/docs/api_c/env_set_cachesize.html index e32278ce0..b44edeb56 100644 --- a/db/docs/api_c/env_set_cachesize.html +++ b/db/docs/api_c/env_set_cachesize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_cachesize - + -

DB_ENV->set_cachesize

API -Ref -
+Ref +


@@ -33,8 +32,6 @@ DB_ENV->get_cachesize(DB_ENV *dbenv,
 


Description: DB_ENV->set_cachesize

- -

Set the size of the shared memory buffer pool -- that is, the cache. The cache should be the size of the normal working data set of the application, with some small amount of additional memory for unusual @@ -44,7 +41,7 @@ pages accessed simultaneously, and is usually much larger.)

20KB. Any cache size less than 500MB is automatically increased by 25% to account for buffer pool overhead; cache sizes larger than 500MB are used as specified. The current maximum size of a single cache is 4GB. -(All sizes are in powers-of-two, that is, 256KB is 2^32 not 256,000.) +(All sizes are in powers-of-two, that is, 256KB is 2^18 not 256,000.) For information on tuning the Berkeley DB cache size, see Selecting a cache size.

It is possible to specify caches to Berkeley DB larger than 4GB and/or large @@ -77,19 +74,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

gbytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

ncache
-The ncache parameter is the number of caches to create. +
+
bytes
The size of the cache is set to gbytes gigabytes plus bytes. +
gbytes
The size of the cache is set to gbytes gigabytes plus bytes. +
ncache
The ncache parameter is the number of caches to create.

Errors

The DB_ENV->set_cachesize method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the specified cache size was impossibly small; +
+
EINVAL
If the specified cache size was impossibly small; the method was called after DB_ENV->open was called; or if an @@ -106,15 +100,12 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the cache is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the cache is copied. -

ncachep
-The ncachep parameter references memory into which +
ncachep
The ncachep parameter references memory into which the number of caches is copied.

@@ -126,6 +117,6 @@ The ncachep parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_data_dir.html b/db/docs/api_c/env_set_data_dir.html index 9ae74a723..00876769f 100644 --- a/db/docs/api_c/env_set_data_dir.html +++ b/db/docs/api_c/env_set_data_dir.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_data_dir - + -

DB_ENV->set_data_dir

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_data_dirs(DB_ENV *dbenv, const char ***dirpp);
 


Description: DB_ENV->set_data_dir

-

Set the path of a directory to be used as the location of the access method database files. Paths specified to the DB->open function will be searched relative to this path. Paths set using this method @@ -39,11 +37,10 @@ are additive, and specifying more than one will result in each specified directory being searched for database files. If any directories are specified, created database files will always be created in the first path specified.

-

If no database directories are specified, database files can exist only -in the environment home directory. See Berkeley DB File Naming for more information.

-

For the greatest degree of recoverability from system or application -failure, database files and log files should be located on separate -physical devices.

+

If no database directories are specified, database files must be named +either by absolute paths or relative to the environment home directory. +See Berkeley DB File Naming for more +information.

The database environment's data directories may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_data_dir", one or more whitespace characters, @@ -63,22 +60,22 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

dir
-The dir parameter is a directory to be used as a location for +
+
dir
The dir parameter is a directory to be used as a location for database files. +

On Windows, the dir argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The DB_ENV->set_data_dir method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

Description: DB_ENV->get_data_dirs

-

The DB_ENV->get_data_dirs method returns the NULL-terminated -array of directories.

+

The DB_ENV->get_data_dirs method returns the NULL-terminated array of directories.

The DB_ENV->get_data_dirs method may be called at any time during the life of the application.

The DB_ENV->get_data_dirs method @@ -86,11 +83,9 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

dirpp
-The DB_ENV->get_data_dirs method returns a reference to the -NULL-terminated -array of directories in dirpp. +
+
dirpp
The DB_ENV->get_data_dirs method returns a reference to the +NULL-terminated array of directories in dirpp.

Class

@@ -101,6 +96,6 @@ array of directories in dirpp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_encrypt.html b/db/docs/api_c/env_set_encrypt.html index 5f9a44c9f..696b74844 100644 --- a/db/docs/api_c/env_set_encrypt.html +++ b/db/docs/api_c/env_set_encrypt.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_encrypt - + -

DB_ENV->set_encrypt

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_encrypt_flags(DB_ENV *dbenv, u_int32_t *flagsp);
 


Description: DB_ENV->set_encrypt

-

Set the password used by the Berkeley DB library to perform encryption and decryption.

The DB_ENV->set_encrypt method configures a database environment, not only operations @@ -48,30 +46,28 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard +
+
DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm for encryption or decryption.
-

passwd
-The passwd parameter is the password used to perform encryption +
passwd
The passwd parameter is the password used to perform encryption and decryption.

Errors

The DB_ENV->set_encrypt method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.
-

-

EOPNOTSUPP
Cryptography is not available in this Berkeley DB release. +
+
EOPNOTSUPP
Cryptography is not available in this Berkeley DB release.

Description: DB_ENV->get_encrypt_flags

@@ -83,9 +79,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flagsp
-The DB_ENV->get_encrypt_flags method returns the +
+
flagsp
The DB_ENV->get_encrypt_flags method returns the encryption flags in flagsp.

@@ -97,6 +92,6 @@ encryption flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_errcall.html b/db/docs/api_c/env_set_errcall.html index 68fd76a63..0114c970f 100644 --- a/db/docs/api_c/env_set_errcall.html +++ b/db/docs/api_c/env_set_errcall.html @@ -1,31 +1,30 @@ - - + + Berkeley DB: DB_ENV->set_errcall - + -

DB_ENV->set_errcall

API -Ref -
+Ref +


 #include <db.h>
 

void -DB_ENV->set_errcall(DB_ENV *dbenv, - void (*db_errcall_fcn)(const char *errpfx, char *msg)); +DB_ENV->set_errcall(DB_ENV *dbenv, void (*db_errcall_fcn) + (const DB_ENV *dbenv, const char *errpfx, const char *msg));


Description: DB_ENV->set_errcall

@@ -39,6 +38,7 @@ In some cases, when an error occurs, Berkeley DB will call db_errcall_fcn with additional error information. It is up to the db_errcall_fcn function to display the error message in an appropriate manner.

+

Setting db_errcall_fcn to NULL unconfigures the callback interface.

Alternatively, you can use the DB->set_errfile or DB_ENV->set_errfile methods to display the additional information via a C library FILE *.

@@ -48,14 +48,14 @@ as during application debugging.

The DB_ENV->set_errcall method may be called at any time during the life of the application.

Parameters

-

-

db_errcall_fcn
-The db_errcall_fcn parameter is the application-specified error -reporting function. The function takes two parameters: -

-

errpfx
The errpfx parameter is the prefix string (as previously set by +
+
db_errcall_fcn
The db_errcall_fcn parameter is the application-specified error +reporting function. The function takes three parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
errpfx
The errpfx parameter is the prefix string (as previously set by DB->set_errpfx or DB_ENV->set_errpfx). -

msg
The msg parameter is the error message string. +
msg
The msg parameter is the error message string.

@@ -67,6 +67,6 @@ reporting function. The function takes two parameters:

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_errfile.html b/db/docs/api_c/env_set_errfile.html index b84eeeec8..a56bef79b 100644 --- a/db/docs/api_c/env_set_errfile.html +++ b/db/docs/api_c/env_set_errfile.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_errfile - + -

DB_ENV->set_errfile

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_errfile(DB_ENV *dbenv, FILE **errfilep);
 


Description: DB_ENV->set_errfile

- When an error occurs in the Berkeley DB library, a Berkeley DB error or an error return value is returned by the interface. In some cases, however, the errno value may be insufficient to completely describe @@ -45,32 +43,22 @@ an additional error message to the specified file reference.

(":") (if a prefix string was previously specified using DB->set_errpfx or DB_ENV->set_errpfx), an error string, and a trailing <newline> character.

+

Setting errfile to NULL unconfigures the interface.

This error logging enhancement does not slow performance or significantly increase application size, and may be run during normal operation as well as during application debugging.

The DB_ENV->set_errfile method may be called at any time during the life of the application.

Parameters

-

-

errfile
-The errfile parameter is a C library FILE * to be used for +
+
errfile
The errfile parameter is a C library FILE * to be used for displaying additional Berkeley DB error information.

Description: DB_ENV->get_errfile

-

The DB_ENV->get_errfile method returns the FILE *.

+

The DB_ENV->get_errfile method returns the .

The DB_ENV->get_errfile method may be called at any time during the life of the application.

-

The DB_ENV->get_errfile method -returns a non-zero error value on failure -and 0 on success. -

-

Parameters

-

-

errfilep
-The DB_ENV->get_errfile method returns the -FILE * in errfilep. -

Class

DB_ENV @@ -80,6 +68,6 @@ FILE * in errfilep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_errpfx.html b/db/docs/api_c/env_set_errpfx.html index f1853d87f..b6b8bca0e 100644 --- a/db/docs/api_c/env_set_errpfx.html +++ b/db/docs/api_c/env_set_errpfx.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_errpfx - + -

DB_ENV->set_errpfx

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_errpfx(DB_ENV *dbenv, const char **errpfxp);
 


Description: DB_ENV->set_errpfx

-

Set the prefix string that appears before error messages issued by Berkeley DB.

The DB->set_errpfx and DB_ENV->set_errpfx methods do not copy the memory to which the errpfx parameter refers; rather, they @@ -42,9 +40,8 @@ closed.

The DB_ENV->set_errpfx method may be called at any time during the life of the application.

Parameters

-

-

errpfx
-The errpfx parameter is the application-specified error prefix +
+
errpfx
The errpfx parameter is the application-specified error prefix for additional error messages.

@@ -57,9 +54,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

errpfxp
-The DB_ENV->get_errpfx method returns a reference to the +
+
errpfxp
The DB_ENV->get_errpfx method returns a reference to the error prefix in errpfxp.

@@ -71,6 +67,6 @@ error prefix in errpfxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_feedback.html b/db/docs/api_c/env_set_feedback.html index 32c199f2e..1183ec5da 100644 --- a/db/docs/api_c/env_set_feedback.html +++ b/db/docs/api_c/env_set_feedback.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_feedback - + -

DB_ENV->set_feedback

API -Ref -
+Ref +


@@ -45,20 +44,19 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

db_feedback_fcn
-The db_feedback_fcn parameter is the application-specified +
+
db_feedback_fcn
The db_feedback_fcn parameter is the application-specified feedback function called to report Berkeley DB operation progress. The callback function must take three parameters: -

-

dbenv
The dbenv parameter is a reference to the enclosing database +
+
dbenv
The dbenv parameter is a reference to the enclosing database environment. -

opcode
The opcode parameter is an operation code. The opcode +
opcode
The opcode parameter is an operation code. The opcode parameter may take on any of the following values: -

-

DB_RECOVER
The environment is being recovered. +
+
DB_RECOVER
The environment is being recovered.
-

percent
The percent parameter is the percent of the operation that has +
percent
The percent parameter is the percent of the operation that has been completed, specified as an integer value between 0 and 100.
@@ -71,6 +69,6 @@ been completed, specified as an integer value between 0 and 100.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_flags.html b/db/docs/api_c/env_set_flags.html index 34e6c3858..10f0642fc 100644 --- a/db/docs/api_c/env_set_flags.html +++ b/db/docs/api_c/env_set_flags.html @@ -1,23 +1,22 @@ - + Berkeley DB: DB_ENV->set_flags - + -

DB_ENV->set_flags

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_flags(DB_ENV *dbenv, u_int32_t *flagsp);
 


Description: DB_ENV->set_flags

-

Configure a database environment.

The database environment's flag values may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a @@ -46,12 +44,11 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set by bitwise inclusively OR'ing together one or more +
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_AUTO_COMMIT
If set, operations for which no explicit transaction handle was +
+
DB_AUTO_COMMIT
If set, operations for which no explicit transaction handle was specified, and which modify databases in the database environment, will be automatically enclosed within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, @@ -65,8 +62,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_AUTO_COMMIT flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_CDB_ALLDB
If set, Berkeley DB Concurrent Data Store applications will perform locking on an environment-wide + +
DB_CDB_ALLDB
If set, Berkeley DB Concurrent Data Store applications will perform locking on an environment-wide basis rather than on a per-database basis.

Calling DB_ENV->set_flags with the DB_CDB_ALLDB flag only affects the specified DB_ENV handle (and any other Berkeley DB handles opened @@ -77,8 +74,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_CDB_ALLDB flag may be used to configure Berkeley DB only before the DB_ENV->open method is called.

- -

DB_DIRECT_DB
Turn off system buffering of Berkeley DB database files to avoid double caching. + +
DB_DIRECT_DB
Turn off system buffering of Berkeley DB database files to avoid double caching.

Calling DB_ENV->set_flags with the DB_DIRECT_DB flag only affects the specified DB_ENV handle (and any other Berkeley DB handles opened within the scope of that handle). @@ -88,8 +85,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_DIRECT_DB flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_DIRECT_LOG
Turn off system buffering of Berkeley DB log files to avoid double caching. + +
DB_DIRECT_LOG
Turn off system buffering of Berkeley DB log files to avoid double caching.

Calling DB_ENV->set_flags with the DB_DIRECT_LOG flag only affects the specified DB_ENV handle (and any other Berkeley DB handles opened within the scope of that handle). @@ -99,21 +96,57 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_DIRECT_LOG flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_LOG_AUTOREMOVE
If set, Berkeley DB will automatically remove log files that are no longer -needed. Automatic log file removal is likely to make catastrophic -recovery impossible. -

Calling DB_ENV->set_flags with the DB_LOG_AUTOREMOVE flag only affects + +

DB_DSYNC_LOG
Configure Berkeley DB to flush log writes to the backing disk before returning +from the write system call, rather than flushing log writes explicitly +in a separate system call. This is only available on some systems (for +example, systems supporting the IEEE/ANSI Std 1003.1 (POSIX) standard O_DSYNC flag, +or systems supporting the Win32 FILE_FLAG_WRITE_THROUGH flag). This +configuration may result in inaccurate file modification times and other +file-level information for Berkeley DB log files. This configuration may +offer a performance increase on some systems and a performance decrease +on others. +

Calling DB_ENV->set_flags with the DB_DSYNC_LOG flag only affects the specified DB_ENV handle (and any other Berkeley DB handles opened within the scope of that handle). For consistent behavior across the environment, all DB_ENV -handles opened in the environment must either set the DB_LOG_AUTOREMOVE flag +handles opened in the environment must either set the DB_DSYNC_LOG flag or the flag should be specified in the DB_CONFIG configuration file.

+

The DB_DSYNC_LOG flag may be used to configure Berkeley DB at any time during +the life of the application.

+ +
DB_LOG_AUTOREMOVE
If set, Berkeley DB will automatically remove log files that are no longer +needed. Automatic log file removal is likely to make catastrophic +recovery impossible. +

Calling DB_ENV->set_flags with the DB_LOG_AUTOREMOVE flag affects the +database environment, including all threads of control accessing the +database environment.

The DB_LOG_AUTOREMOVE flag may be used to configure Berkeley DB at any time during the life of the application.

+ +
DB_LOG_INMEMORY
If set, maintain transaction logs in memory rather than on disk. This +means that transactions exhibit the ACI (atomicity, consistency, and +isolation) properties, but not D (durability); that is, database +integrity will be maintained, but if the application or system fails, +integrity will not persist. All database files must be verified and/or +restored from a replication group master or archival backup after +application or system failure. +

When in-memory logs are configured and no more log buffer space is +available, Berkeley DB methods may return an additional error value, +DB_LOG_BUFFER_FULL. When choosing log buffer and file sizes +for in-memory logs, applications should ensure the in-memory log buffer +size is large enough that no transaction will ever span the entire +buffer, and avoid a state where the in-memory buffer is full and no +space can be freed because a transaction that started in the first log +"file" is still active.

+

Calling DB_ENV->set_flags with the DB_LOG_INMEMORY flag affects the +database environment, including all threads of control accessing the +database environment.

+

The DB_LOG_INMEMORY flag may be used to configure Berkeley DB only before the +DB_ENV->open method is called.

-

DB_NOLOCKING
If set, Berkeley DB will grant all requested mutual exclusion mutexes and +
DB_NOLOCKING
If set, Berkeley DB will grant all requested mutual exclusion mutexes and database locks without regard for their actual availability. This functionality should never be used for purposes other than debugging.

Calling DB_ENV->set_flags with the DB_NOLOCKING flag only affects @@ -123,7 +156,7 @@ within the scope of that handle).

The DB_NOLOCKING flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_NOMMAP
If set, Berkeley DB will copy read-only database files into the local cache +
DB_NOMMAP
If set, Berkeley DB will copy read-only database files into the local cache instead of potentially mapping them into process memory (see the description of the DB_ENV->set_mp_mmapsize method for further information). @@ -137,7 +170,7 @@ file.

The DB_NOMMAP flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_NOPANIC
If set, Berkeley DB will ignore any panic state in the database environment. +
DB_NOPANIC
If set, Berkeley DB will ignore any panic state in the database environment. (Database environments in a panic state normally refuse all attempts to call Berkeley DB functions, returning DB_RUNRECOVERY.) This functionality should never be used for purposes other than debugging. @@ -147,7 +180,7 @@ within the scope of that handle).

The DB_NOPANIC flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_OVERWRITE
Overwrite files stored in encrypted formats before deleting them. Berkeley DB +
DB_OVERWRITE
Overwrite files stored in encrypted formats before deleting them. Berkeley DB overwrites files using alternating 0xff, 0x00 and 0xff byte patterns. For file overwriting to be effective, the underlying file must be stored on a fixed-block filesystem. Systems with journaling or logging filesystems @@ -160,7 +193,7 @@ within the scope of that handle).

The DB_OVERWRITE flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_PANIC_ENVIRONMENT
If set, Berkeley DB will set the panic state for the database environment. +
DB_PANIC_ENVIRONMENT
If set, Berkeley DB will set the panic state for the database environment. (Database environments in a panic state normally refuse all attempts to call Berkeley DB functions, returning DB_RUNRECOVERY.) This flag may not be specified using the environment's DB_CONFIG file. This @@ -172,7 +205,7 @@ database environment.

The DB_PANIC_ENVIRONMENT flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_REGION_INIT
In some applications, the expense of page-faulting the underlying shared +
DB_REGION_INIT
In some applications, the expense of page-faulting the underlying shared memory regions can affect performance. (For example, if the page-fault occurs while holding a lock, other lock requests can convoy, and overall throughput may decrease.) If set, Berkeley DB will page-fault shared regions @@ -190,7 +223,7 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_REGION_INIT flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_TIME_NOTGRANTED
If set, database calls timing out based on lock or transaction timeout +
DB_TIME_NOTGRANTED
If set, database calls timing out based on lock or transaction timeout values will return DB_LOCK_NOTGRANTED instead of DB_LOCK_DEADLOCK. This allows applications to distinguish between operations which have @@ -205,7 +238,7 @@ file.

The DB_TIME_NOTGRANTED flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_TXN_NOSYNC
If set, Berkeley DB will not write or synchronously flush the log on transaction +
DB_TXN_NOSYNC
If set, Berkeley DB will not write or synchronously flush the log on transaction commit. This means that transactions exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database @@ -224,26 +257,7 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_TXN_NOSYNC flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_TXN_NOT_DURABLE
If set, Berkeley DB will not write log records. This means that -transactions exhibit the ACI (atomicity, consistency, and isolation) -properties, but not D (durability); that is, database integrity will -be maintained, but if the application or system fails, integrity will -not persist. All database files must be verified and/or restored from -backup after a failure. In order to ensure integrity after -application shut down, all database handles must be closed without -specifying DB_NOSYNC, or all database changes must be flushed -from the database environment cache using -either the DB_ENV->txn_checkpoint or DB_ENV->memp_sync methods. -

Calling DB_ENV->set_flags with the DB_TXN_NOT_DURABLE flag only affects -the specified DB_ENV handle (and any other Berkeley DB handles opened -within the scope of that handle). -For consistent behavior across the environment, all DB_ENV -handles opened in the environment must either set the DB_TXN_NOT_DURABLE flag -or the flag should be specified in the DB_CONFIG configuration -file.

The DB_TXN_NOT_DURABLE flag may be used to configure Berkeley DB at any time during -the life of the application.

- -

DB_TXN_WRITE_NOSYNC
If set, Berkeley DB will write, but will not synchronously flush, the log on +
DB_TXN_WRITE_NOSYNC
If set, Berkeley DB will write, but will not synchronously flush, the log on transaction commit. This means that transactions exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database @@ -261,8 +275,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_TXN_WRITE_NOSYNC flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_YIELDCPU
If set, Berkeley DB will yield the processor immediately after each page or + +
DB_YIELDCPU
If set, Berkeley DB will yield the processor immediately after each page or mutex acquisition. This functionality should never be used for purposes other than stress testing.

Calling DB_ENV->set_flags with the DB_YIELDCPU flag only affects @@ -275,16 +289,14 @@ file.

The DB_YIELDCPU flag may be used to configure Berkeley DB at any time during the life of the application.

-

onoff
-If the onoff parameter is -zero, -the specified flags are cleared; otherwise they are set. +
onoff
If the onoff parameter is zero, the specified flags are cleared; +otherwise they are set.

Errors

The DB_ENV->set_flags method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -297,9 +309,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flagsp
-The DB_ENV->get_flags method returns the +
+
flagsp
The DB_ENV->get_flags method returns the configuration flags in flagsp.

@@ -311,6 +322,6 @@ configuration flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lg_bsize.html b/db/docs/api_c/env_set_lg_bsize.html index 9195f89e0..437ea9739 100644 --- a/db/docs/api_c/env_set_lg_bsize.html +++ b/db/docs/api_c/env_set_lg_bsize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lg_bsize - + -

DB_ENV->set_lg_bsize

API -Ref -
+Ref +


@@ -31,16 +30,25 @@ DB_ENV->get_lg_bsize(DB_ENV *dbenv, u_int32_t *lg_bsizep);
 


Description: DB_ENV->set_lg_bsize

- -

Set the size of the in-memory log buffer, in bytes. By default, or if -the value is set to 0, a size of 32K is used. The size of the log file -(see DB_ENV->set_lg_max) must be at least four times the size of -the in-memory log buffer.

-

Log information is stored in-memory until the storage space fills up -or transaction commit forces the information to be flushed to stable -storage. In the presence of long-running transactions or transactions -producing large amounts of data, larger buffer sizes can increase -throughput.

+

Set the size of the in-memory log buffer, in bytes.

+

When the logging subsystem is configured for on-disk logging, the +default size of the in-memory log buffer is 32KB. Log information is +stored in-memory until the storage space fills up or transaction commit +forces the information to be flushed to stable storage. In the presence +of long-running transactions or transactions producing large amounts of +data, larger buffer sizes can increase throughput.

+

When the logging subsystem is configured for in-memory logging, the +default size of the in-memory log buffer is 1MB. Log information is +stored in-memory until the storage space fills up or transaction abort +or commit frees up the memory for new transactions. In the presence of +long-running transactions or transactions producing large amounts of +data, the buffer size must be sufficient to hold all log information +that can accumulate during the longest running transaction. When +choosing log buffer and file sizes for in-memory logs, applications +should ensure the in-memory log buffer size is large enough that no +transaction will ever span the entire buffer, and avoid a state where +the in-memory buffer is full and no space can be freed because a +transaction that started in the first log "file" is still active.

The database environment's log buffer size may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_lg_bsize", one or more whitespace characters, @@ -61,18 +69,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lg_bsize
-The lg_bsize parameter is the size of the in-memory log buffer, +
+
lg_bsize
The lg_bsize parameter is the size of the in-memory log buffer, in bytes.

Errors

The DB_ENV->set_lg_bsize method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; -The size of the log file is less than four times the size of the in-memory -log buffer; or if an +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -85,9 +90,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lg_bsizep
-The DB_ENV->get_lg_bsize method returns the +
+
lg_bsizep
The DB_ENV->get_lg_bsize method returns the size of the log buffer, in bytes in lg_bsizep.

@@ -99,6 +103,6 @@ size of the log buffer, in bytes in lg_bsizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lg_dir.html b/db/docs/api_c/env_set_lg_dir.html index c93872f1f..e186755af 100644 --- a/db/docs/api_c/env_set_lg_dir.html +++ b/db/docs/api_c/env_set_lg_dir.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lg_dir - + -

DB_ENV->set_lg_dir

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_lg_dir(DB_ENV *dbenv, const char **dirp);
 


Description: DB_ENV->set_lg_dir

-

The path of a directory to be used as the location of logging files. Log files created by the Log Manager subsystem will be created in this directory.

@@ -61,15 +59,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

dir
-The dir parameter is the directory used to store the logging files. +
+
dir
The dir parameter is the directory used to store the logging files. +

On Windows, the dir argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The DB_ENV->set_lg_dir method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -82,9 +81,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

dirp
-The DB_ENV->get_lg_dir method returns a reference to the +
+
dirp
The DB_ENV->get_lg_dir method returns a reference to the log directory in dirp.

@@ -96,6 +94,6 @@ log directory in dirp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lg_max.html b/db/docs/api_c/env_set_lg_max.html index b22b0a5bf..64c7e09f4 100644 --- a/db/docs/api_c/env_set_lg_max.html +++ b/db/docs/api_c/env_set_lg_max.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lg_max - + -

DB_ENV->set_lg_max

API -Ref -
+Ref +


@@ -31,15 +30,25 @@ DB_ENV->get_lg_max(DB_ENV *dbenv, u_int32_t *lg_maxp);
 


Description: DB_ENV->set_lg_max

- -

Set the maximum size of a single file in the log, in bytes. By default, -or if the lg_max parameter is set to 0, a size of 10MB is used. -Because DB_LSN file offsets are unsigned four-byte values, the -set value may not be larger than the maximum unsigned four-byte value. -The size of the log file must be at least four times the size of the -in-memory log buffer (see DB_ENV->set_lg_bsize).

-

See Log File Limits -for more information.

+

Set the maximum size of a single file in the log, in bytes. Because +DB_LSN file offsets are unsigned four-byte values, the set +value may not be larger than the maximum unsigned four-byte value.

+

When the logging subsystem is configured for on-disk logging, the +default size of a log file is 10MB.

+

When the logging subsystem is configured for in-memory logging, the +default size of a log file is 256KB. In addition, the configured log +buffer size must be larger than the log file size. (The logging +subsystem divides memory configured for in-memory log records into +"files", as database environments configured for in-memory log records +may exchange log records with other members of a replication group, and +those members may be configured to store log records on-disk.) When +choosing log buffer and file sizes for in-memory logs, applications +should ensure the in-memory log buffer size is large enough that no +transaction will ever span the entire buffer, and avoid a state where +the in-memory buffer is full and no space can be freed because a +transaction that started in the first log "file" is still active.

+

See Log File Limits for more +information.

The database environment's log file size may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_lg_max", one or more whitespace characters, @@ -59,15 +68,14 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lg_max
-The lg_max parameter is the size of a single log file, in bytes. +
+
lg_max
The lg_max parameter is the size of a single log file, in bytes.

Errors

The DB_ENV->set_lg_max method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; +
+
EINVAL
If the method was called after DB_ENV->open was called; the size of the log file is less than four times the size of the in-memory log buffer; The specified log file size was too large; or if an @@ -83,9 +91,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lg_maxp
-The DB_ENV->get_lg_max method returns the +
+
lg_maxp
The DB_ENV->get_lg_max method returns the maximum log file size in lg_maxp.

@@ -97,6 +104,6 @@ maximum log file size in lg_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lg_regionmax.html b/db/docs/api_c/env_set_lg_regionmax.html index e6580307d..4bf7328fa 100644 --- a/db/docs/api_c/env_set_lg_regionmax.html +++ b/db/docs/api_c/env_set_lg_regionmax.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lg_regionmax - + -

DB_ENV->set_lg_regionmax

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_lg_regionmax(DB_ENV *dbenv, u_int32_t *lg_regionmaxp);
 


Description: DB_ENV->set_lg_regionmax

-

Set the size of the underlying logging area of the Berkeley DB environment, in bytes. By default, or if the value is set to 0, the default size is 60KB. The log region is used to store filenames, and so may need to be @@ -57,16 +55,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lg_regionmax
-The lg_regionmax parameter is the size of the logging area in +
+
lg_regionmax
The lg_regionmax parameter is the size of the logging area in the Berkeley DB environment, in bytes.

Errors

The DB_ENV->set_lg_regionmax method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -79,9 +76,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lg_regionmaxp
-The DB_ENV->get_lg_regionmax method returns the +
+
lg_regionmaxp
The DB_ENV->get_lg_regionmax method returns the size of the underlying logging subsystem region in lg_regionmaxp.

@@ -93,6 +89,6 @@ size of the underlying logging subsystem region in lg_regionmaxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lk_conflicts.html b/db/docs/api_c/env_set_lk_conflicts.html index 340c0997b..cf5728a87 100644 --- a/db/docs/api_c/env_set_lk_conflicts.html +++ b/db/docs/api_c/env_set_lk_conflicts.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lk_conflicts - + -

DB_ENV->set_lk_conflicts

API -Ref -
+Ref +


@@ -33,7 +32,6 @@ DB_ENV->get_lk_conflicts(DB_ENV *dbenv,
 


Description: DB_ENV->set_lk_conflicts

-

Set the locking conflicts matrix.

If DB_ENV->set_lk_conflicts is never called, a standard conflicts array is used; see Standard Lock @@ -51,27 +49,24 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

conflicts
-The conflicts parameter is the new locking conflicts matrix. -The conflicts parameter -is an nmodes by nmodes array. -A non-0 value for the array element indicates that requested_mode and -held_mode conflict: +
+
conflicts
The conflicts parameter is the new locking conflicts matrix. +The conflicts parameter is an nmodes by nmodes +array. A non-0 value for the array element indicates that +requested_mode and held_mode conflict:
conflicts[requested_mode][held_mode]

The not-granted mode must be represented by 0.

-

nmodes
-The nmodes parameter is the size of the lock conflicts matrix. +
nmodes
The nmodes parameter is the size of the lock conflicts matrix.

Errors

The DB_ENV->set_lk_conflicts method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.
-

-

ENOMEM
The conflicts array could not be copied. +
+
ENOMEM
The conflicts array could not be copied.

Description: dbenv_get_lk_conflicts

@@ -83,12 +78,10 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lk_conflictsp
-The lk_conflictsp parameter references memory into which +
+
lk_conflictsp
The lk_conflictsp parameter references memory into which a pointer to the current conflicts array is copied. -

lk_modesp
-The lk_modesp parameter references memory into which +
lk_modesp
The lk_modesp parameter references memory into which the size of the current conflicts array is copied.

@@ -100,6 +93,6 @@ The lk_modesp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lk_detect.html b/db/docs/api_c/env_set_lk_detect.html index 21b020719..e7b580475 100644 --- a/db/docs/api_c/env_set_lk_detect.html +++ b/db/docs/api_c/env_set_lk_detect.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lk_detect - + -

DB_ENV->set_lk_detect

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_lk_detect(DB_ENV *dbenv, u_int32_t *lk_detectp);
 


Description: DB_ENV->set_lk_detect

-

Set if the deadlock detector is to be run whenever a lock conflict occurs, and specify what lock request(s) should be rejected. As transactions acquire locks on behalf of a single locker ID, rejecting a @@ -47,44 +45,37 @@ environment is opened, it will silently overrule configuration done before that time.

The DB_ENV->set_lk_detect method configures a database environment, not only operations performed using the specified DB_ENV handle.

-

The DB_ENV->set_lk_detect method may not be called after the DB_ENV->open method is -called. -If the database environment already exists when -DB_ENV->open is called, the information specified to DB_ENV->set_lk_detect -must be consistent with the existing environment or an error will be -returned. -

+

Although the DB_ENV->set_lk_detect method may be called at any time during the life of +the application, it should normally be called before making calls to the +db_env_create or db_create methods.

The DB_ENV->set_lk_detect method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

detect
-The detect parameter configures the deadlock detector. The +
+
detect
The detect parameter configures the deadlock detector. The specified value must be one of the following list: -

-

DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment +
+
DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment was created. If no lock policy has yet been specified, set the lock policy to DB_LOCK_RANDOM.
DB_LOCK_EXPIRE
Reject lock requests which have timed out. No other deadlock detection is performed. -
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the greatest number of -locks. -
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest number of -locks. -
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest number of -write locks. -
DB_LOCK_OLDEST
Reject the lock request for the oldest locker ID. +
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the most locks. +
DB_LOCK_MAXWRITE
Reject the lock request for the locker ID with the most write locks. +
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest locks. +
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest write locks. +
DB_LOCK_OLDEST
Reject the lock request for the locker ID with the oldest lock.
DB_LOCK_RANDOM
Reject the lock request for a random locker ID. -
DB_LOCK_YOUNGEST
Reject the lock request for the youngest locker ID. +
DB_LOCK_YOUNGEST
Reject the lock request for the locker ID with the youngest lock.

Errors

The DB_ENV->set_lk_detect method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -97,9 +88,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lk_detectp
-The DB_ENV->get_lk_detect method returns the +
+
lk_detectp
The DB_ENV->get_lk_detect method returns the deadlock detector configuration in lk_detectp.

@@ -111,6 +101,6 @@ deadlock detector configuration in lk_detectp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lk_max_lockers.html b/db/docs/api_c/env_set_lk_max_lockers.html index 7c25bda8b..0e8c8aeff 100644 --- a/db/docs/api_c/env_set_lk_max_lockers.html +++ b/db/docs/api_c/env_set_lk_max_lockers.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lk_max_lockers - + -

DB_ENV->set_lk_max_lockers

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_lk_max_lockers(DB_ENV *dbenv, u_int32_t *lk_maxp);
 


Description: DB_ENV->set_lk_max_lockers

-

Set the maximum number of locking entities supported by the Berkeley DB environment. This value is used by DB_ENV->open to estimate how much space to allocate for various lock-table data structures. The @@ -57,16 +55,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

max
-The max parameter is the maximum number simultaneous locking +
+
max
The max parameter is the maximum number simultaneous locking entities supported by the Berkeley DB environment.

Errors

The DB_ENV->set_lk_max_lockers method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -79,9 +76,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lk_maxp
-The DB_ENV->get_lk_max_lockers method returns the +
+
lk_maxp
The DB_ENV->get_lk_max_lockers method returns the maximum number of lockers in lk_maxp.

@@ -93,6 +89,6 @@ maximum number of lockers in lk_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lk_max_locks.html b/db/docs/api_c/env_set_lk_max_locks.html index 3cffd6741..0115a7de8 100644 --- a/db/docs/api_c/env_set_lk_max_locks.html +++ b/db/docs/api_c/env_set_lk_max_locks.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lk_max_locks - + -

DB_ENV->set_lk_max_locks

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_lk_max_locks(DB_ENV *dbenv, u_int32_t *lk_maxp);
 


Description: DB_ENV->set_lk_max_locks

-

Set the maximum number of locks supported by the Berkeley DB environment. This value is used by DB_ENV->open to estimate how much space to allocate for various lock-table data structures. The default value is @@ -58,31 +56,29 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

max
-The max parameter is the maximum number of locks supported by +
+
max
The max parameter is the maximum number of locks supported by the Berkeley DB environment.

Errors

The DB_ENV->set_lk_max_locks method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

-

Description: DB_ENV->set_lk_max_locks

-

The DB_ENV->set_lk_max_locks method returns the maximum number of locks.

-

The DB_ENV->set_lk_max_locks method may be called at any time during the life of the +

Description: DB_ENV->get_lk_max_locks

+

The DB_ENV->get_lk_max_locks method returns the maximum number of locks.

+

The DB_ENV->get_lk_max_locks method may be called at any time during the life of the application.

-

The DB_ENV->set_lk_max_locks method +

The DB_ENV->get_lk_max_locks method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lk_maxp
-The DB_ENV->set_lk_max_locks method returns the +
+
lk_maxp
The DB_ENV->get_lk_max_locks method returns the maximum number of locks in lk_maxp.

@@ -94,6 +90,6 @@ maximum number of locks in lk_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_lk_max_objects.html b/db/docs/api_c/env_set_lk_max_objects.html index 62f0b37ce..edfdad722 100644 --- a/db/docs/api_c/env_set_lk_max_objects.html +++ b/db/docs/api_c/env_set_lk_max_objects.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_lk_max_objects - + -

DB_ENV->set_lk_max_objects

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_lk_max_objects(DB_ENV *dbenv, u_int32_t *lk_maxp);
 


Description: DB_ENV->set_lk_max_objects

-

Set the maximum number of locked objects supported by the Berkeley DB environment. This value is used by DB_ENV->open to estimate how much space to allocate for various lock-table data structures. The @@ -57,16 +55,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

max
-The max parameter is the maximum number of locked objects +
+
max
The max parameter is the maximum number of locked objects supported by the Berkeley DB environment.

Errors

The DB_ENV->set_lk_max_objects method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -79,9 +76,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lk_maxp
-The DB_ENV->get_lk_max_objects method returns the +
+
lk_maxp
The DB_ENV->get_lk_max_objects method returns the maximum number of locked objects in lk_maxp.

@@ -93,6 +89,6 @@ maximum number of locked objects in lk_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_mp_mmapsize.html b/db/docs/api_c/env_set_mp_mmapsize.html index fc117ac38..b400dd9a9 100644 --- a/db/docs/api_c/env_set_mp_mmapsize.html +++ b/db/docs/api_c/env_set_mp_mmapsize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_mp_mmapsize - + -

DB_ENV->set_mp_mmapsize

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_mp_mmapsize(DB_ENV *dbenv, size_t *mp_mmapsizep);
 


Description: DB_ENV->set_mp_mmapsize

-

Files that are opened read-only in the pool (and that satisfy a few other criteria) are, by default, mapped into the process address space instead of being copied into the local cache. This can result in @@ -50,9 +48,8 @@ and the size in bytes. Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

-

The DB_ENV->set_mp_mmapsize method configures operations performed using the specified -DB_ENV handle, not all operations performed on the underlying -database environment.

+

The DB_ENV->set_mp_mmapsize method configures a database environment, not only operations +performed using the specified DB_ENV handle.

The DB_ENV->set_mp_mmapsize method may be called at any time during the life of the application.

The DB_ENV->set_mp_mmapsize method @@ -60,16 +57,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

mp_mmapsize
-The mp_mmapsize parameter is the maximum file size, in bytes, +
+
mp_mmapsize
The mp_mmapsize parameter is the maximum file size, in bytes, for a file to be mapped into the process address space.

Errors

The DB_ENV->set_mp_mmapsize method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -82,9 +78,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

mp_mmapsizep
-The DB_ENV->get_mp_mmapsize method returns the +
+
mp_mmapsizep
The DB_ENV->get_mp_mmapsize method returns the maximum file map size in mp_mmapsizep.

@@ -96,6 +91,6 @@ maximum file map size in mp_mmapsizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_msgcall.html b/db/docs/api_c/env_set_msgcall.html new file mode 100644 index 000000000..3facc1664 --- /dev/null +++ b/db/docs/api_c/env_set_msgcall.html @@ -0,0 +1,65 @@ + + + + + + +Berkeley DB: DB_ENV->set_msgcall + + + + + + + +
+

DB_ENV->set_msgcall

+
+API +Ref
+


+ +

+#include <db.h>
+

+void +DB_ENV->set_msgcall(DB_ENV *dbenv, + void (*db_msgcall_fcn)(const DB_ENV *dbenv, const char *msg)); +

+
+

Description: DB_ENV->set_msgcall

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DB_ENV->set_verbose and DB_ENV->stat_print.

+

The DB_ENV->set_msgcall and DB->set_msgcall methods are used to +pass these messages to the application, and Berkeley DB will call +db_msgcall_fcn with each message. It is up to the +db_msgcall_fcn function to display the message in an appropriate +manner.

+

Setting db_msgcall_fcn to NULL unconfigures the callback interface.

+

Alternatively, you can use the DB->set_msgfile or +DB_ENV->set_msgfile methods to display the messages via a C library FILE *.

+

The DB_ENV->set_msgcall method may be called at any time during the life of the +application.

+

Parameters

+
+
db_msgcall_fcn
The db_msgcall_fcn parameter is the application-specified message +reporting function. The function takes two parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
msg
The msg parameter is the message string. +
+
+
+

Class

+DB_ENV +

See Also

+Database Environments and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/env_set_msgfile.html b/db/docs/api_c/env_set_msgfile.html new file mode 100644 index 000000000..6a59ba65d --- /dev/null +++ b/db/docs/api_c/env_set_msgfile.html @@ -0,0 +1,68 @@ + + + + + + +Berkeley DB: DB_ENV->set_msgfile + + + + + + + +
+

DB_ENV->set_msgfile

+
+API +Ref
+


+ +

+#include <db.h>
+

+void +DB_ENV->set_msgfile(DB_ENV *dbenv, FILE *msgfile); +

+void +DB_ENV->get_msgfile(DB_ENV *dbenv, FILE **msgfilep); +

+
+

Description: DB_ENV->set_msgfile

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DB_ENV->set_verbose and DB_ENV->stat_print.

+

The DB_ENV->set_msgfile and DB->set_msgfile methods are used to +display these messages for the application. +In this case the message will include a trailing <newline> +character.

+

Setting msgfile to NULL unconfigures the interface.

+

Alternatively, you can use the DB_ENV->set_msgcall or +DB->set_msgcall methods to capture the additional error information +in a way that does not use C library FILE *'s.

+

The DB_ENV->set_msgfile method may be called at any time during the life of the +application.

+

Parameters

+
+
msgfile
The msgfile parameter is a C library FILE * to be used for +displaying messages. +
+
+

Description: DB_ENV->get_msgfile

+

The DB_ENV->get_msgfile method returns the .

+

The DB_ENV->get_msgfile method may be called at any time during the life of the +application.

+
+

Class

+DB_ENV +

See Also

+Database Environments and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/env_set_paniccall.html b/db/docs/api_c/env_set_paniccall.html index 1f80b4846..e2dab6d2d 100644 --- a/db/docs/api_c/env_set_paniccall.html +++ b/db/docs/api_c/env_set_paniccall.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_paniccall - + -

DB_ENV->set_paniccall

API -Ref -
+Ref +


@@ -45,14 +44,13 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

db_panic_fcn
-The db_panic_fcn parameter is the application-specified function +
+
db_panic_fcn
The db_panic_fcn parameter is the application-specified function called in the case of a database environment panic. The function takes two arguments: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

errval
The errval parameter is the error value that would have been +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
errval
The errval parameter is the error value that would have been returned to the caller if DB_RUNRECOVERY were not going to be returned instead.
@@ -66,6 +64,6 @@ returned instead.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_rpc_server.html b/db/docs/api_c/env_set_rpc_server.html index e58437174..499833e24 100644 --- a/db/docs/api_c/env_set_rpc_server.html +++ b/db/docs/api_c/env_set_rpc_server.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_rpc_server - + -

DB_ENV->set_rpc_server

API -Ref -
+Ref +


@@ -32,8 +31,7 @@ DB_ENV->set_rpc_server(DB_ENV *dbenv, CLIENT *client, char *host,
 

Establishes a connection for this dbenv to a RPC server.

When the DB_ENV->set_rpc_server method has been called, subsequent calls to Berkeley DB library interfaces may return or throw exceptions encapsulating -DB_NOSERVER, DB_NOSERVER_ID, or -DB_NOSERVER_HOME.

+DB_NOSERVER, DB_NOSERVER_ID, or DB_NOSERVER_HOME.

The DB_ENV->set_rpc_server method configures operations performed using the specified DB_ENV handle, not all operations performed on the underlying database environment.

@@ -45,25 +43,20 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

cl_timeout
- +
+
cl_timeout
The cl_timeout parameter specifies the number of seconds the client should wait for results to come back from the server. Once the timeout has expired on any communication with the server, DB_NOSERVER will be returned. If this value is zero, a default timeout is used. -

client
-If the client channel has been provided by the application then +
client
If the client channel has been provided by the application then Berkeley DB will use it as its connection and the host and cl_timeout fields are ignored. -

host
-The host parameter is the host to which the Berkeley DB server will +
host
The host parameter is the host to which the Berkeley DB server will connect and create a channel for communication. -

flags
-The flags parameter is currently unused, and must be set to 0. - -

sv_timeout
-The sv_timeout parameter specifies the number of seconds the server +
flags
The flags parameter is currently unused, and must be set to 0. + +
sv_timeout
The sv_timeout parameter specifies the number of seconds the server should allow a client connection to remain idle before assuming that the client is gone. Once that timeout has been reached, the server releases all resources associated with that client connection. Subsequent attempts @@ -76,8 +69,8 @@ values. If this value is zero, a default timeout is used.

Errors

The DB_ENV->set_rpc_server method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -89,6 +82,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_shm_key.html b/db/docs/api_c/env_set_shm_key.html index 936029a12..b6a35b59e 100644 --- a/db/docs/api_c/env_set_shm_key.html +++ b/db/docs/api_c/env_set_shm_key.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_shm_key - + -

DB_ENV->set_shm_key

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_shm_key(DB_ENV *dbenv, long *shm_keyp);
 


Description: DB_ENV->set_shm_key

-

Specify a base segment ID for Berkeley DB environment shared memory regions created in system memory on VxWorks or systems supporting X/Open-style shared memory interfaces; for example, UNIX systems supporting @@ -75,16 +73,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

shm_key
-The shm_key parameter is the base segment ID for the database +
+
shm_key
The shm_key parameter is the base segment ID for the database environment.

Errors

The DB_ENV->set_shm_key method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -97,9 +94,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

shm_keyp
-The DB_ENV->get_shm_key method returns the +
+
shm_keyp
The DB_ENV->get_shm_key method returns the base segment ID in shm_keyp.

@@ -111,6 +107,6 @@ base segment ID in shm_keyp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_tas_spins.html b/db/docs/api_c/env_set_tas_spins.html index a27726ef1..84ba6b6df 100644 --- a/db/docs/api_c/env_set_tas_spins.html +++ b/db/docs/api_c/env_set_tas_spins.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_tas_spins - + -

DB_ENV->set_tas_spins

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_tas_spins(DB_ENV *dbenv, u_int32_t *tas_spinsp);
 


Description: DB_ENV->set_tas_spins

-

Specify that test-and-set mutexes should spin tas_spins times without blocking. The value defaults to 1 on uniprocessor systems and to 50 times the number of processors on multiprocessor systems.

@@ -52,16 +50,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

tas_spins
-The tas_spins parameter is the number of spins test-and-set +
+
tas_spins
The tas_spins parameter is the number of spins test-and-set mutexes should execute before blocking.

Errors

The DB_ENV->set_tas_spins method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -74,9 +71,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

tas_spinsp
-The DB_ENV->get_tas_spins method returns the +
+
tas_spinsp
The DB_ENV->get_tas_spins method returns the test-and-set spin count in tas_spinsp.

@@ -88,6 +84,6 @@ test-and-set spin count in tas_spinsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_timeout.html b/db/docs/api_c/env_set_timeout.html index d838bf4ba..7993e4e4f 100644 --- a/db/docs/api_c/env_set_timeout.html +++ b/db/docs/api_c/env_set_timeout.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_timeout - + -

DB_ENV->set_timeout

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_timeout(DB_ENV *dbenv, db_timeout_t *timeoutp, u_int32_t flag);
 


Description: DB_ENV->set_timeout

-

The DB_ENV->set_timeout method sets timeout values for locks or transactions in the database environment.

Timeouts are checked whenever a thread of control blocks on a lock or @@ -56,11 +54,10 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to one of the following values: -

-

DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this database environment. +
+
flags
The flags parameter must be set to one of the following values: +
+
DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this database environment.

The database environment's lock timeout value may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_lock_timeout", one or more whitespace characters, @@ -68,7 +65,7 @@ and the lock timeout value. Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

-

DB_SET_TXN_TIMEOUT
Set the timeout value for transactions in this database environment. +
DB_SET_TXN_TIMEOUT
Set the timeout value for transactions in this database environment.

The database environment's transaction timeout value may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_txn_timeout", one or more whitespace characters, @@ -77,16 +74,15 @@ Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

-

timeout
-The timeout parameter is the timeout value. It must be specified +
timeout
The timeout parameter is the timeout value. It must be specified as an unsigned 32-bit number of microseconds, limiting the maximum timeout to roughly 71 minutes.

Errors

The DB_ENV->set_timeout method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -99,15 +95,13 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flag
-The flags parameter must be set to one of the following values: -

-

DB_SET_LOCK_TIMEOUT
Return the timeout value for locks in this database environment. -

DB_SET_TXN_TIMEOUT
Return the timeout value for transactions in this database environment. +
+
flag
The flags parameter must be set to one of the following values: +
+
DB_SET_LOCK_TIMEOUT
Return the timeout value for locks in this database environment. +
DB_SET_TXN_TIMEOUT
Return the timeout value for transactions in this database environment.
-

timeoutp
-The timeoutp parameter references memory into which +
timeoutp
The timeoutp parameter references memory into which the timeout value of the specified flag parameter is copied.

@@ -119,6 +113,6 @@ The timeoutp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_tmp_dir.html b/db/docs/api_c/env_set_tmp_dir.html index 85ac6a945..11675a5ee 100644 --- a/db/docs/api_c/env_set_tmp_dir.html +++ b/db/docs/api_c/env_set_tmp_dir.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_tmp_dir - + -

DB_ENV->set_tmp_dir

API -Ref -
+Ref +


@@ -31,8 +30,7 @@ DB_ENV->get_tmp_dir(DB_ENV *dbenv, const char **dirp);
 


Description: DB_ENV->set_tmp_dir

- - +

Specify the path of a directory to be used as the location of temporary files. The files created to back in-memory access method databases will be created relative to this path. These temporary files can be quite @@ -78,16 +76,17 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

dir
-The dir parameter is the directory to be used to store temporary +
+
dir
The dir parameter is the directory to be used to store temporary files. +

On Windows, the dir argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The DB_ENV->set_tmp_dir method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -100,9 +99,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

dirp
-The DB_ENV->get_tmp_dir method returns a reference to the +
+
dirp
The DB_ENV->get_tmp_dir method returns a reference to the database environment temporary file directory in dirp.

@@ -114,6 +112,6 @@ database environment temporary file directory in dirp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_tx_max.html b/db/docs/api_c/env_set_tx_max.html index d366037f9..1461744db 100644 --- a/db/docs/api_c/env_set_tx_max.html +++ b/db/docs/api_c/env_set_tx_max.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_tx_max - + -

DB_ENV->set_tx_max

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_tx_max(DB_ENV *dbenv, u_int32_t *tx_maxp);
 


Description: DB_ENV->set_tx_max

-

Configure the Berkeley DB database environment to support at least max active transactions. This value bounds the size of the memory allocated for transactions. Child transactions are counted as active until they @@ -61,17 +59,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

max
-The max parameter configures the minimum number of +
+
max
The max parameter configures the minimum number of simultaneously active transactions supported by Berkeley DB database environment.

Errors

The DB_ENV->set_tx_max method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DB_ENV->open was called; or if an +
+
EINVAL
If the method was called after DB_ENV->open was called; or if an invalid flag value or parameter was specified.

@@ -84,9 +81,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

tx_maxp
-The DB_ENV->get_tx_max method returns the +
+
tx_maxp
The DB_ENV->get_tx_max method returns the number of active transactions in tx_maxp.

@@ -98,6 +94,6 @@ number of active transactions in tx_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_tx_timestamp.html b/db/docs/api_c/env_set_tx_timestamp.html index 4949cb169..29f5841de 100644 --- a/db/docs/api_c/env_set_tx_timestamp.html +++ b/db/docs/api_c/env_set_tx_timestamp.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_tx_timestamp - + -

DB_ENV->set_tx_timestamp

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_tx_timestamp(DB_ENV *dbenv, time_t *timestampp);
 


Description: DB_ENV->set_tx_timestamp

-

Recover to the time specified by timestamp rather than to the most current possible date.

Once a database environment has been upgraded to a new version of Berkeley DB @@ -48,9 +46,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

timestamp
-The timestamp parameter references the memory location where the +
+
timestamp
The timestamp parameter references the memory location where the recovery timestamp is located.

The timestamp parameter should be the number of seconds since 0 hours, 0 minutes, 0 seconds, January 1, 1970, Coordinated Universal @@ -59,8 +56,8 @@ Time; that is, the Epoch.

Errors

The DB_ENV->set_tx_timestamp method may fail and return one of the following non-zero errors:

-

-

EINVAL
If it is not possible to recover to the specified time using the log files +
+
EINVAL
If it is not possible to recover to the specified time using the log files currently present in the environment; or if an invalid flag value or parameter was specified.
@@ -74,9 +71,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

timestampp
-The DB_ENV->get_tx_timestamp method returns the +
+
timestampp
The DB_ENV->get_tx_timestamp method returns the recovery timestamp in timestampp.

@@ -88,6 +84,6 @@ recovery timestamp in timestampp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_set_verbose.html b/db/docs/api_c/env_set_verbose.html index 8a59400ab..7862cdb45 100644 --- a/db/docs/api_c/env_set_verbose.html +++ b/db/docs/api_c/env_set_verbose.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_verbose - + -

DB_ENV->set_verbose

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_verbose(DB_ENV *dbenv, u_int32_t which, int *onoffp);
 


Description: DB_ENV->set_verbose

-

The DB_ENV->set_verbose method turns specific additional informational and debugging messages in the Berkeley DB message output on and off. To see the additional messages, verbose messages must also be configured for @@ -41,7 +39,7 @@ the application. For more information on verbose messages, see the DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_verbose", one or more whitespace characters, and the method which parameter as a string; for example, -"set_verbose DB_VERB_CHKPOINT". +"set_verbose DB_VERB_RECOVERY". Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

@@ -55,29 +53,26 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

onoff
-If the onoff parameter is set to non-zero, the additional messages are output. -

which
-The which parameter must be set to one of the following values: -

-

DB_VERB_CHKPOINT
Display checkpoint location information when searching the log for -checkpoints. -

DB_VERB_DEADLOCK
Display additional information when doing deadlock detection. -

DB_VERB_RECOVERY
Display additional information when performing recovery. -

DB_VERB_REPLICATION
Display additional information when processing replication messages. +
+
onoff
If the onoff parameter is set to non-zero, the additional +messages are output. +
which
The which parameter must be set to one of the following values: +
+
DB_VERB_DEADLOCK
Display additional information when doing deadlock detection. +
DB_VERB_RECOVERY
Display additional information when performing recovery. +
DB_VERB_REPLICATION
Display additional information when processing replication messages.

Note, to get complete replication logging when debugging replication applications, you must also configure and build the Berkeley DB library with the --enable-diagnostic configuration option as well as call the DB_ENV->set_verbose method.

-

DB_VERB_WAITSFOR
Display the waits-for table when doing deadlock detection. +
DB_VERB_WAITSFOR
Display the waits-for table when doing deadlock detection.

Errors

The DB_ENV->set_verbose method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -91,12 +86,10 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

which
-The which parameter is the message value for which configuration +
+
which
The which parameter is the message value for which configuration is being checked. -

onoffp
-The onoffp parameter references memory into which +
onoffp
The onoffp parameter references memory into which the configuration of the specified which parameter is copied.

@@ -108,6 +101,6 @@ The onoffp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_stat.html b/db/docs/api_c/env_stat.html new file mode 100644 index 000000000..6f0976174 --- /dev/null +++ b/db/docs/api_c/env_stat.html @@ -0,0 +1,66 @@ + + + + + + +Berkeley DB: DB_ENV->stat_print + + + + + + + +
+

DB_ENV->stat_print

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_ENV->stat_print(DB_ENV *dbenv, u_int32_t flags); +

+
+

Description: DB_ENV->stat_print

+

The DB_ENV->stat_print method returns the +default statistical information. +The information is printed to a specified output channel (see the +DB_ENV->set_msgfile method for more information), or passed to an +application callback function (see the DB_ENV->set_msgcall method for +more information).

+

The DB_ENV->stat_print method may not be called before the DB_ENV->open method has +been called.

+

The DB_ENV->stat_print method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+In addition, the following flag may be set by +bitwise inclusively OR'ing it into the flags parameter: +
+
DB_STAT_SUBSYSTEM
Display information for all configured subsystems. +
+
+
+

Class

+DB_ENV +

See Also

+Database Environments and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/env_strerror.html b/db/docs/api_c/env_strerror.html index b61676f3c..d538c2223 100644 --- a/db/docs/api_c/env_strerror.html +++ b/db/docs/api_c/env_strerror.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_strerror - + -

db_strerror

API -Ref -
+Ref +


@@ -38,9 +37,8 @@ is returned. See
 Error returns to applications
 for more information.

Parameters

-

-

error
-The error parameter is the error number for which an error message +
+
error
The error parameter is the error number for which an error message string is wanted.

@@ -52,6 +50,6 @@ string is wanted.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/env_version.html b/db/docs/api_c/env_version.html index 68acc73a6..3ca889a37 100644 --- a/db/docs/api_c/env_version.html +++ b/db/docs/api_c/env_version.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_version - + -

db_version

API -Ref -
+Ref +


@@ -28,19 +27,15 @@ db_version(int *major, int *minor, int *patch);
 


Description: db_version

-

The db_version method returns a pointer to a string, suitable for display, containing Berkeley DB version information.

Parameters

-

-

major
-If major is non-NULL, the major +
+
major
If major is non-NULL, the major version of the Berkeley DB release is copied to the memory to which it refers. -

minor
-If minor is non-NULL, the minor version of the Berkeley DB release +
minor
If minor is non-NULL, the minor version of the Berkeley DB release is copied to the memory to which it refers. -

patch
-If patch is non-NULL, the patch version of the Berkeley DB release +
patch
If patch is non-NULL, the patch version of the Berkeley DB release is copied to the memory to which it refers.

@@ -52,6 +47,6 @@ is copied to the memory to which it refers.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/frame.html b/db/docs/api_c/frame.html new file mode 100644 index 000000000..e01d432fd --- /dev/null +++ b/db/docs/api_c/frame.html @@ -0,0 +1,15 @@ + + + +C API (Version: 4.3.14) + + + + + + + +<meta http-equiv="refresh" content="0;url=api_index.html"> + + + diff --git a/db/docs/api_c/hsearch.html b/db/docs/api_c/hsearch.html index 1eca6d649..2178943e1 100644 --- a/db/docs/api_c/hsearch.html +++ b/db/docs/api_c/hsearch.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: hsearch - + -

hsearch

API -Ref -
+Ref +


@@ -44,7 +43,6 @@ hdestroy(void);
 


Description: hsearch

-

The hsearch functions are intended to provide a high-performance implementation and source code compatibility for applications written to the historic hsearch interface. It is not recommended for any other @@ -68,12 +66,12 @@ and retrieval. The field data is declared to be of type

The hsearch function retrieves key/data pairs from, and stores key/data pairs into the database.

The action parameter must be set to one of two values:

-

-

ENTER
If the key does not already appear in the database, insert the key/data +
+
ENTER
If the key does not already appear in the database, insert the key/data pair into the database. If the key already appears in the database, return a reference to an ENTRY structure which refers to the existing key and its associated data element. -

FIND
Retrieve the specified key/data pair from the database. +
FIND
Retrieve the specified key/data pair from the database.

Compatibility Notes

Historically, hsearch required applications to maintain the keys @@ -99,6 +97,6 @@ system functions.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_class.html b/db/docs/api_c/lock_class.html index eea2d261b..5cb09f9d0 100644 --- a/db/docs/api_c/lock_class.html +++ b/db/docs/api_c/lock_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_LOCK - + -

DB_LOCK

API -Ref -
+Ref +


@@ -39,6 +38,6 @@ for a single lock, and has no methods of its own.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_detect.html b/db/docs/api_c/lock_detect.html index 214725102..8d1691d7c 100644 --- a/db/docs/api_c/lock_detect.html +++ b/db/docs/api_c/lock_detect.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->lock_detect - + -

DB_ENV->lock_detect

API -Ref -
+Ref +


@@ -40,35 +39,33 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

aborted
-

atype
-The atype parameter specifies which lock request(s) to reject. +
+
aborted
If the aborted parameter is non-NULL, the memory location to +which it refers will be set to the number of lock requests that were +rejected. +
atype
The atype parameter specifies which lock request(s) to reject. It must be set to one of the following list: -

-

DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment +
+
DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment was created. If no lock policy has yet been specified, set the lock policy to DB_LOCK_RANDOM.
DB_LOCK_EXPIRE
Reject lock requests which have timed out. No other deadlock detection is performed. -
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the greatest number of -locks. -
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest number of -locks. -
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest number of -write locks. -
DB_LOCK_OLDEST
Reject the lock request for the oldest locker ID. +
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the most locks. +
DB_LOCK_MAXWRITE
Reject the lock request for the locker ID with the most write locks. +
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest locks. +
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest write locks. +
DB_LOCK_OLDEST
Reject the lock request for the locker ID with the oldest lock.
DB_LOCK_RANDOM
Reject the lock request for a random locker ID. -
DB_LOCK_YOUNGEST
Reject the lock request for the youngest locker ID. +
DB_LOCK_YOUNGEST
Reject the lock request for the locker ID with the youngest lock.
-

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DB_ENV->lock_detect method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -80,6 +77,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_get.html b/db/docs/api_c/lock_get.html index d41232ae8..9bdaaf49c 100644 --- a/db/docs/api_c/lock_get.html +++ b/db/docs/api_c/lock_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->lock_get - + -

DB_ENV->lock_get

API -Ref -
+Ref +


@@ -31,28 +30,25 @@ DB_ENV->lock_get(DB_ENV *env, u_int32_t locker,
 

Description: DB_ENV->lock_get

The DB_ENV->lock_get method acquires a lock from the lock table, returning -information about it in -the lock parameter.

+information about it in the lock parameter.

The DB_ENV->lock_get method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with an +
+
DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with an existing lock, return DB_LOCK_NOTGRANTED immediately instead of waiting for the lock to become available.
-

lock_mode
-The lock_mode parameter is used as an index into the environment's +
lock_mode
The lock_mode parameter is used as an index into the environment's lock conflict matrix. When using the default lock conflict matrix, lock_mode must be set to one of the following values: -

+
DB_LOCK_READ
read (shared)
DB_LOCK_WRITE
write (exclusive)
DB_LOCK_IWRITE
intention to write (shared) @@ -60,11 +56,9 @@ lock conflict matrix. When using the default lock conflict matrix,
DB_LOCK_IWR
intention to read and write (shared)

See DB_ENV->set_lk_conflicts and Standard Lock Modes for more information on the lock conflict matrix.

-

locker
-The locker parameter is an unsigned 32-bit integer quantity. It +
locker
The locker parameter is an unsigned 32-bit integer quantity. It represents the entity requesting the lock. -

object
-The object parameter is an untyped byte string that specifies the +
object
The object parameter is an untyped byte string that specifies the object to be locked. Applications using the locking subsystem directly while also doing locking via the Berkeley DB access methods must take care not to inadvertently lock objects that happen to be equal to the unique file @@ -75,21 +69,21 @@ for more information.

Errors

The DB_ENV->lock_get method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

DB_LOCK_NOTGRANTED
The DB_LOCK_NOWAIT flag or lock timers were configured and the lock could not be granted before the wait-time expired. +
+
DB_LOCK_NOTGRANTED
The DB_LOCK_NOWAIT flag or lock timers were configured and the lock could not be granted before the wait-time expired.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.
-

-

ENOMEM
The maximum number of locks has been reached. +
+
ENOMEM
The maximum number of locks has been reached.

Class

@@ -100,6 +94,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_id.html b/db/docs/api_c/lock_id.html index 7d4e078da..1449d5c49 100644 --- a/db/docs/api_c/lock_id.html +++ b/db/docs/api_c/lock_id.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->lock_id - + -

DB_ENV->lock_id

API -Ref -
+Ref +


@@ -28,9 +27,9 @@ DB_ENV->lock_id(DB_ENV *env, u_int32_t *idp);
 


Description: DB_ENV->lock_id

-

The DB_ENV->lock_id method -copies a locker ID, which is guaranteed to be unique in the specified lock -table, into the memory location to which idp refers.

+

The DB_ENV->lock_id method copies a locker ID, which is guaranteed to be +unique in the specified lock table, into the memory location to which +idp refers.

The DB_ENV->lock_id_free method should be called to return the locker ID to the Berkeley DB library when it is no longer needed.

The DB_ENV->lock_id method @@ -38,9 +37,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

idp
-The idp parameter references memory into which +
+
idp
The idp parameter references memory into which the allocated locker ID is copied.

@@ -52,6 +50,6 @@ The idp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_id_free.html b/db/docs/api_c/lock_id_free.html index 64779372d..6014d24bd 100644 --- a/db/docs/api_c/lock_id_free.html +++ b/db/docs/api_c/lock_id_free.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->lock_id_free - + -

DB_ENV->lock_id_free

API -Ref -
+Ref +


@@ -35,15 +34,14 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

id
-The id parameter is the locker id to be freed. +
+
id
The id parameter is the locker id to be freed.

Errors

The DB_ENV->lock_id_free method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the locker ID is invalid or locks are still held by this locker ID; or if an +
+
EINVAL
If the locker ID is invalid or locks are still held by this locker ID; or if an invalid flag value or parameter was specified.

@@ -55,6 +53,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_list.html b/db/docs/api_c/lock_list.html index 5e7c5f04c..5eace022a 100644 --- a/db/docs/api_c/lock_list.html +++ b/db/docs/api_c/lock_list.html @@ -1,32 +1,32 @@ - + Berkeley DB: Berkeley DB: Locking Subsystem and Related Methods - +

Berkeley DB: Locking Subsystem and Related Methods

- + - - - - - - - - - - - - - + + + + + + + + + + + + +
Locking Subsystem and Related MethodsDescription
DB_ENV->set_lk_conflictsSet lock conflicts matrix
DB_ENV->set_lk_detectSet automatic deadlock detection
DB_ENV->set_lk_max_lockersSet maximum number of lockers
DB_ENV->set_lk_max_locksSet maximum number of locks
DB_ENV->set_lk_max_objectsSet maximum number of lock objects
DB_ENV->set_timeoutSet lock and transaction timeout
DB_ENV->lock_detectPerform deadlock detection
DB_ENV->lock_getAcquire a lock
DB_ENV->lock_idAcquire a locker ID
DB_ENV->lock_id_freeRelease a locker ID
DB_ENV->lock_putRelease a lock
DB_ENV->lock_statReturn lock subsystem statistics
DB_ENV->lock_vecAcquire/release locks
DB_ENV->lock_detectPerform deadlock detection
DB_ENV->lock_getAcquire a lock
DB_ENV->lock_idAcquire a locker ID
DB_ENV->lock_id_freeRelease a locker ID
DB_ENV->lock_putRelease a lock
DB_ENV->lock_statReturn lock subsystem statistics
DB_ENV->lock_vecAcquire/release locks
DB_ENV->set_lk_conflictsSet lock conflicts matrix
DB_ENV->set_lk_detectSet automatic deadlock detection
DB_ENV->set_lk_max_lockersSet maximum number of lockers
DB_ENV->set_lk_max_locksSet maximum number of locks
DB_ENV->set_lk_max_objectsSet maximum number of lock objects
DB_ENV->set_timeoutSet lock and transaction timeout
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_put.html b/db/docs/api_c/lock_put.html index 9912df5d5..5ef6422db 100644 --- a/db/docs/api_c/lock_put.html +++ b/db/docs/api_c/lock_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->lock_put - + -

DB_ENV->lock_put

API -Ref -
+Ref +


@@ -34,15 +33,14 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

lock
-The lock parameter is the lock to be released. +
+
lock
The lock parameter is the lock to be released.

Errors

The DB_ENV->lock_put method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -54,6 +52,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_stat.html b/db/docs/api_c/lock_stat.html index 8c86bde6f..74f27988e 100644 --- a/db/docs/api_c/lock_stat.html +++ b/db/docs/api_c/lock_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->lock_stat - + -

DB_ENV->lock_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DB_ENV->lock_stat(DB_ENV *env, DB_LOCK_STAT **statp, u_int32_t flags); +

+int +DB_ENV->lock_stat_print(DB_ENV *env, u_int32_t flags);


Description: DB_ENV->lock_stat

@@ -40,7 +42,7 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_LOCK_STAT fields will be filled in:

-

+
u_int32_t st_id;
The last allocated locker ID.
u_int32_t st_cur_maxid;
The current maximum unused locker ID.
u_int32_t st_nmodes;
The number of lock modes. @@ -65,7 +67,7 @@ individually freed.

u_int32_t st_ntxntimeouts;
The number of transactions that have timed out. This value is also a component of st_ndeadlocks, the total number of deadlocks detected. -
u_int32_t st_regsize;
The size of the lock region. +
roff_t st_regsize;
The size of the lock region, in bytes.
u_int32_t st_region_wait;
The number of times that a thread of control was forced to wait before obtaining the region lock.
u_int32_t st_region_nowait;
The number of times that a thread of control was able to obtain @@ -76,25 +78,45 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

The DB_ENV->lock_stat method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DB_ENV->lock_stat_print

+

The DB_ENV->lock_stat_print method prints diagnostic information to the output +channel described by the DB_ENV->set_msgfile method.

+

The DB_ENV->lock_stat_print method may not be called before the DB_ENV->open method has +been called.

+

The DB_ENV->lock_stat_print method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_ALL
Display all available information. +
DB_STAT_LOCK_CONF
Display the lock conflict matrix. +
DB_STAT_LOCK_LOCKERS
Display the lockers within hash chains. +
DB_STAT_LOCK_OBJECTS
Display the lock objects within hash chains. +
DB_STAT_LOCK_PARAMS
Display the locking subsystem parameters. +
+
+

Class

DB_ENV, DB_LOCK

See Also

@@ -103,6 +125,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lock_vec.html b/db/docs/api_c/lock_vec.html index acbe1b22d..91df52008 100644 --- a/db/docs/api_c/lock_vec.html +++ b/db/docs/api_c/lock_vec.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->lock_vec - + -

DB_ENV->lock_vec

API -Ref -
+Ref +


@@ -37,34 +36,30 @@ escalation.

If any of the requested locks cannot be acquired, or any of the locks to be released cannot be released, the operations before the failing operation are guaranteed to have completed successfully, and -DB_ENV->lock_vec returns a non-zero value. In addition, if elistp -is not NULL, it is set to point to the DB_LOCKREQ entry that was being -processed when the error occurred.

+DB_ENV->lock_vec returns a non-zero value. In addition, if +elistp is not NULL, it is set to point to the DB_LOCKREQ entry +that was being processed when the error occurred.

Unless otherwise specified, the DB_ENV->lock_vec method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

elistp
-If an error occurs, and the elistp parameter is non-NULL, it +
+
elistp
If an error occurs, and the elistp parameter is non-NULL, it is set to point to the DB_LOCKREQ entry that was being processed when the error occurred. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with +
+
DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with an existing lock, return DB_LOCK_NOTGRANTED immediately instead of waiting for the lock to become available. In this case, if non-NULL, elistp identifies the request that was not granted.
-

locker
-The locker parameter is an unsigned 32-bit integer quantity. It +
locker
The locker parameter is an unsigned 32-bit integer quantity. It represents the entity requesting or releasing the lock. -

list
-The list array provided to DB_ENV->lock_vec is typedef'd as +
list
The list array provided to DB_ENV->lock_vec is typedef'd as DB_LOCKREQ.

To ensure compatibility with future releases of Berkeley DB, all fields of the DB_LOCKREQ structure that are not explicitly set should @@ -72,46 +67,46 @@ be initialized to 0 before the first time the structure is used. Do this by declaring the structure external or static, or by calling memset(3).

A DB_LOCKREQ structure has at least the following fields:

-

-

lockop_t op;
The operation to be performed, which must be set to one of the +
+
lockop_t op;
The operation to be performed, which must be set to one of the following values: -

-

DB_LOCK_GET
Get the lock defined by the values of the mode and obj +
+
DB_LOCK_GET
Get the lock defined by the values of the mode and obj structure fields, for the specified locker. Upon return from DB_ENV->lock_vec, if the lock field is non-NULL, a reference to the acquired lock is stored there. (This reference is invalidated by any call to DB_ENV->lock_vec or DB_ENV->lock_put that releases the lock.) -

DB_LOCK_GET_TIMEOUT
Identical to DB_LOCK_GET except that the value in the timeout +
DB_LOCK_GET_TIMEOUT
Identical to DB_LOCK_GET except that the value in the timeout structure field overrides any previously specified timeout value for this lock. A value of 0 turns off any previously specified timeout. -

DB_LOCK_PUT
The lock to which the lock structure field refers is released. +
DB_LOCK_PUT
The lock to which the lock structure field refers is released. The locker parameter, and mode and obj fields are ignored. -

DB_LOCK_PUT_ALL
All locks held by the specified locker are released. The +
DB_LOCK_PUT_ALL
All locks held by the specified locker are released. The lock, mode, and obj structure fields are ignored. Locks acquired in operations performed by the current call to DB_ENV->lock_vec which appear before the DB_LOCK_PUT_ALL operation are released; those acquired in operations appearing after the DB_LOCK_PUT_ALL operation are not released. -

DB_LOCK_PUT_OBJ
All locks held on obj are released. The locker +
DB_LOCK_PUT_OBJ
All locks held on obj are released. The locker parameter and the lock and mode structure fields are ignored. Locks acquired in operations performed by the current call to DB_ENV->lock_vec that appear before the DB_LOCK_PUT_OBJ operation are released; those acquired in operations appearing after the DB_LOCK_PUT_OBJ operation are not released. -

DB_LOCK_TIMEOUT
Cause the specified locker to timeout immediately. If the +
DB_LOCK_TIMEOUT
Cause the specified locker to timeout immediately. If the database environment has not configured automatic deadlock detection, the transaction will timeout the next time deadlock detection is performed. As transactions acquire locks on behalf of a single locker ID, timing out the locker ID associated with a transaction will time out the transaction itself.
-

DB_LOCK lock;
A lock reference. -

const lockmode_t mode;
The lock mode, used as an index into the environment's lock conflict matrix. +
DB_LOCK lock;
A lock reference. +
const lockmode_t mode;
The lock mode, used as an index into the environment's lock conflict matrix. When using the default lock conflict matrix, mode must be set to one of the following values: -

+
DB_LOCK_READ
read (shared)
DB_LOCK_WRITE
write (exclusive)
DB_LOCK_IWRITE
intention to write (shared) @@ -119,36 +114,35 @@ of the following values:
DB_LOCK_IWR
intention to read and write (shared)

See DB_ENV->set_lk_conflicts and Standard Lock Modes for more information on the lock conflict matrix.

-

const DBT obj;
An untyped byte string that specifies the object to be locked or +
const DBT obj;
An untyped byte string that specifies the object to be locked or released. Applications using the locking subsystem directly while also doing locking via the Berkeley DB access methods must take care not to inadvertently lock objects that happen to be equal to the unique file IDs used to lock files. See Access method locking conventions for more information. -

u_int32_t timeout;
The lock timeout value. +
u_int32_t timeout;
The lock timeout value.
-

nlist
-The nlist parameter specifies the number of elements in the +
nlist
The nlist parameter specifies the number of elements in the list array.

Errors

The DB_ENV->lock_vec method may fail and return one of the following non-zero errors:

-

-

DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve +
+
DB_LOCK_DEADLOCK
A transactional database environment operation was selected to resolve a deadlock. -

DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable +
DB_LOCK_NOTGRANTED
A Berkeley DB Concurrent Data Store database environment configured for lock timeouts was unable to grant a lock in the allowed time.
-

-

DB_LOCK_NOTGRANTED
The DB_LOCK_NOWAIT flag or lock timers were configured and the lock could not be granted before the wait-time expired. +
+
DB_LOCK_NOTGRANTED
The DB_LOCK_NOWAIT flag or lock timers were configured and the lock could not be granted before the wait-time expired.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.
-

-

ENOMEM
The maximum number of locks has been reached. +
+
ENOMEM
The maximum number of locks has been reached.

Class

@@ -159,6 +153,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_archive.html b/db/docs/api_c/log_archive.html index 5faf3d1fd..a2397bedf 100644 --- a/db/docs/api_c/log_archive.html +++ b/db/docs/api_c/log_archive.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->log_archive - + -

DB_ENV->log_archive

API -Ref -
+Ref +


@@ -33,8 +32,8 @@ DB_ENV->log_archive(DB_ENV *env, char *(*listp)[], u_int32_t flags);
 files that are no longer in use (for example, that are no longer
 involved in active transactions), and that may safely be archived for
 catastrophic recovery and then removed from the system.  If there are
-no filenames to return,
-the memory location to which listp refers will be set to NULL.

+no filenames to return, the memory location to which listp +refers will be set to NULL.

Arrays of log filenames are stored in allocated memory. If application-specific allocation routines have been declared (see DB_ENV->set_alloc for more information), they are used to allocate the memory; otherwise, the @@ -67,33 +66,29 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_ARCH_ABS
All pathnames are returned as absolute pathnames, instead of relative +
+
DB_ARCH_ABS
All pathnames are returned as absolute pathnames, instead of relative to the database home directory. -

DB_ARCH_DATA
Return the database files that need to be archived in order to recover +
DB_ARCH_DATA
Return the database files that need to be archived in order to recover the database from catastrophic failure. If any of the database files have not been accessed during the lifetime of the current log files, DB_ENV->log_archive will not include them in this list. It is also possible that some of the files referred to by the log have since been deleted from the system. -

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually -exclusive.

-

DB_ARCH_LOG
Return all the log filenames, regardless of whether or not they are in +

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually exclusive.

+
DB_ARCH_LOG
Return all the log filenames, regardless of whether or not they are in use. -

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually -exclusive.

-

DB_ARCH_REMOVE
Remove log files that are no longer needed; no filenames are returned. +

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually exclusive.

+
DB_ARCH_REMOVE
Remove log files that are no longer needed; no filenames are returned. Automatic log file removal is likely to make catastrophic recovery impossible.

The DB_ARCH_REMOVE flag may not be specified with any other flag.

-

listp
-The listp parameter references memory into which the allocated +
listp
The listp parameter references memory into which the allocated array of log or database filenames is copied. If there are no filenames to return, the memory location to which listp refers will be set to NULL. @@ -101,8 +96,8 @@ to NULL.

Errors

The DB_ENV->log_archive method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -114,6 +109,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_compare.html b/db/docs/api_c/log_compare.html index c2a0decb7..da9b9a97c 100644 --- a/db/docs/api_c/log_compare.html +++ b/db/docs/api_c/log_compare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: log_compare - + -

log_compare

API -Ref -
+Ref +


@@ -33,13 +32,11 @@ DB_LSN structures,
 returning 0 if they are equal, 1 if lsn0 is greater than
 lsn1, and -1 if lsn0 is less than lsn1.

Parameters

-

-

lsn0
-The lsn0 parameter is one of the +
+
lsn0
The lsn0 parameter is one of the DB_LSN structures to be compared. -

lsn1
-The lsn1 parameter is one of the +
lsn1
The lsn1 parameter is one of the DB_LSN structures to be compared.
@@ -52,6 +49,6 @@ to be compared.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_cursor.html b/db/docs/api_c/log_cursor.html index 4a4f47e38..f7e104755 100644 --- a/db/docs/api_c/log_cursor.html +++ b/db/docs/api_c/log_cursor.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->log_cursor - + -

DB_ENV->log_cursor

API -Ref -
+Ref +


@@ -34,18 +33,16 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

cursorp
-The cursorp parameter references memory into which +
+
cursorp
The cursorp parameter references memory into which a pointer to the created log cursor is copied. -

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DB_ENV->log_cursor method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -57,6 +54,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_file.html b/db/docs/api_c/log_file.html index 3bac18bfd..f19343cb2 100644 --- a/db/docs/api_c/log_file.html +++ b/db/docs/api_c/log_file.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->log_file - + -

DB_ENV->log_file

API -Ref -
+Ref +


@@ -45,16 +44,13 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

lsn
-The lsn parameter is the +
+
lsn
The lsn parameter is the DB_LSN structure for which a filename is wanted. -

namep
-The namep parameter references memory into which +
namep
The namep parameter references memory into which the name of the file containing the record named by lsn is copied. -

len
-The len parameter is the length of the namep buffer in +
len
The len parameter is the length of the namep buffer in bytes. If namep is too short to hold the filename, DB_ENV->log_file will fail. (Log filenames are normally quite short, on the order of 10 characters.) @@ -62,8 +58,9 @@ on the order of 10 characters.)

Errors

The DB_ENV->log_file method may fail and return one of the following non-zero errors:

-

-

ENOMEM
The supplied buffer was too small to hold the log filename. +
+
EINVAL
If supplied buffer was too small to hold the log filename; or if an +invalid flag value or parameter was specified.

Class

@@ -74,6 +71,6 @@ may fail and return one of the following non-zero errors:


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_flush.html b/db/docs/api_c/log_flush.html index f7a1c2f16..b4b853432 100644 --- a/db/docs/api_c/log_flush.html +++ b/db/docs/api_c/log_flush.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->log_flush - + -

DB_ENV->log_flush

API -Ref -
+Ref +


@@ -34,17 +33,16 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

lsn
-All log records with DB_LSN values less than or equal to the +
+
lsn
All log records with DB_LSN values less than or equal to the lsn parameter are written to disk. If lsn is NULL, all records in the log are flushed.

Errors

The DB_ENV->log_flush method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -56,6 +54,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_list.html b/db/docs/api_c/log_list.html index 98095dbab..173ec69cc 100644 --- a/db/docs/api_c/log_list.html +++ b/db/docs/api_c/log_list.html @@ -1,32 +1,33 @@ - + Berkeley DB: Berkeley DB: Logging Subsystem and Related Methods - +

Berkeley DB: Logging Subsystem and Related Methods

- + - - - - - - - - - - - - - + + + + + + + + + + + + + +
Logging Subsystem and Related MethodsDescription
DB_ENV->log_archiveList log and database files
DB_ENV->log_cursorCreate a log cursor handle
DB_ENV->log_fileMap Log Sequence Numbers to log files
DB_ENV->log_flushFlush log records
DB_ENV->log_putWrite a log record
DB_ENV->set_lg_bsizeSet log buffer size
DB_ENV->set_lg_dirSet the environment logging directory
DB_ENV->set_lg_maxSet log file size
DB_ENV->set_lg_regionmaxSet logging region size
log_compareCompare two Log Sequence Numbers
DB_ENV->log_statReturn log subsystem statistics
DB_LOGC->closeClose a log cursor
DB_LOGC->getRetrieve a log record
DB_LSNLog Sequence Numbers
log_compareCompare two Log Sequence Numbers
DB_ENV->log_archiveList log and database files
DB_ENV->log_cursorCreate a log cursor handle
DB_ENV->log_fileMap Log Sequence Numbers to log files
DB_ENV->log_flushFlush log records
DB_ENV->log_putWrite a log record
DB_ENV->log_statReturn log subsystem statistics
DB_ENV->set_lg_bsizeSet log buffer size
DB_ENV->set_lg_dirSet the environment logging directory
DB_ENV->set_lg_maxSet log file size
DB_ENV->set_lg_regionmaxSet logging region size
DB_LOGC->closeClose a log cursor
DB_LOGC->getRetrieve a log record
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_put.html b/db/docs/api_c/log_put.html index c1f74f3bd..ee4163ffc 100644 --- a/db/docs/api_c/log_put.html +++ b/db/docs/api_c/log_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->log_put - + -

DB_ENV->log_put

API -Ref -
+Ref +


@@ -36,9 +35,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

data
-The data parameter is the record to write to the log. +
+
data
The data parameter is the record to write to the log.

The caller is responsible for providing any necessary structure to data. (For example, in a write-ahead logging protocol, the application must understand what part of data is an operation @@ -47,23 +45,21 @@ In addition, most transaction managers will store in data the DB_LSN of the previous log record for the same transaction, to support chaining back through the transaction's log records during undo.)

-

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_FLUSH
The log is forced to disk after this record is written, guaranteeing +
+
DB_FLUSH
The log is forced to disk after this record is written, guaranteeing that all records with DB_LSN values less than or equal to the one being "put" are on disk before DB_ENV->log_put returns.
-

lsn
-The lsn parameter references memory into which +
lsn
The lsn parameter references memory into which the DB_LSN of the put record is copied.

Errors

The DB_ENV->log_flush method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the record to be logged is larger than the maximum log record; or if an +
+
EINVAL
If the record to be logged is larger than the maximum log record; or if an invalid flag value or parameter was specified.

@@ -75,6 +71,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/log_stat.html b/db/docs/api_c/log_stat.html index d4d53fb96..707930b09 100644 --- a/db/docs/api_c/log_stat.html +++ b/db/docs/api_c/log_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->log_stat - + -

DB_ENV->log_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DB_ENV->log_stat(DB_ENV *env, DB_LOG_STAT **statp, u_int32_t flags); +

+int +DB_ENV->log_stat_print(DB_ENV *env, u_int32_t flags);


Description: DB_ENV->log_stat

@@ -40,7 +42,7 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_LOG_STAT fields will be filled in:

-

+
u_int32_t st_magic;
The magic number that identifies a file as a log file.
u_int32_t st_version;
The version of the log file type.
int st_mode;
The mode of any created log files. @@ -62,7 +64,7 @@ in-memory log record cache filled up.
u_int32_t st_maxcommitperflush;
The maximum number of commits contained in a single log flush.
u_int32_t st_mincommitperflush;
The minimum number of commits contained in a single log flush that contained a commit. -
u_int32_t st_regsize;
The size of the region. +
roff_t st_regsize;
The size of the region, in bytes.
u_int32_t st_region_wait;
The number of times that a thread of control was forced to wait before obtaining the region lock.
u_int32_t st_region_nowait;
The number of times that a thread of control was able to obtain @@ -73,25 +75,45 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

The DB_ENV->log_stat method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DB_ENV->log_stat_print

+

The DB_ENV->log_stat_print method returns the +logging subsystem statistical information, as described for the DB_ENV->log_stat method. +The information is printed to a specified output channel (see the +DB_ENV->set_msgfile method for more information), or passed to an +application callback function (see the DB_ENV->set_msgcall method for +more information).

+

The DB_ENV->log_stat_print method may not be called before the DB_ENV->open method has +been called.

+

The DB_ENV->log_stat_print method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

DB_ENV, DB_LOGC, DB_LSN

See Also

@@ -100,6 +122,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/logc_class.html b/db/docs/api_c/logc_class.html index f0bd8af30..bd24104a9 100644 --- a/db/docs/api_c/logc_class.html +++ b/db/docs/api_c/logc_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_LOGC - + -

DB_LOGC

API -Ref -
+Ref +


@@ -41,6 +40,6 @@ return.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/logc_close.html b/db/docs/api_c/logc_close.html index efb3d9f8a..4b826e8cf 100644 --- a/db/docs/api_c/logc_close.html +++ b/db/docs/api_c/logc_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_LOGC->close - + -

DB_LOGC->close

API -Ref -
+Ref +


@@ -36,15 +35,14 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DB_LOGC->close method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the cursor was previously closed; or if an +
+
EINVAL
If the cursor was previously closed; or if an invalid flag value or parameter was specified.

@@ -56,6 +54,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/logc_get.html b/db/docs/api_c/logc_get.html index 72bc0996b..508420987 100644 --- a/db/docs/api_c/logc_get.html +++ b/db/docs/api_c/logc_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_LOGC->get - + -

DB_LOGC->get

API -Ref -
+Ref +


@@ -34,56 +33,49 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

data
-The data field of the data structure is set to the record +
+
data
The data field of the data structure is set to the record retrieved, and the size field indicates the number of bytes in the record. See DBT for a description of other fields in the data structure. The DB_DBT_MALLOC, DB_DBT_REALLOC and DB_DBT_USERMEM flags may be specified for any DBT used for data retrieval. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_CURRENT
Return the log record to which the log currently refers. -

DB_FIRST
The first record from any of the log files found in the log directory +
flags
The flags parameter must be set to one of the following values: +
+
DB_CURRENT
Return the log record to which the log currently refers. +
DB_FIRST
The first record from any of the log files found in the log directory is returned in the data parameter. The lsn parameter is overwritten with the DB_LSN of the record returned. -

-The DB_LOGC->get method will return DB_NOTFOUND if DB_FIRST is set and the log is empty. +

The DB_LOGC->get method will return DB_NOTFOUND if DB_FIRST is set and the log is empty.

-

DB_LAST
The last record in the log is returned in the data parameter. +
DB_LAST
The last record in the log is returned in the data parameter. The lsn parameter is overwritten with the DB_LSN of the record returned. -

-The DB_LOGC->get method will return DB_NOTFOUND if DB_LAST is set and the log is empty. +

The DB_LOGC->get method will return DB_NOTFOUND if DB_LAST is set and the log is empty.

-

DB_NEXT
The current log position is advanced to the next record in the log, and +
DB_NEXT
The current log position is advanced to the next record in the log, and that record is returned in the data parameter. The lsn parameter is overwritten with the DB_LSN of the record returned.

If the cursor has not been initialized via DB_FIRST, DB_LAST, DB_SET, DB_NEXT, or DB_PREV, DB_LOGC->get will return the first record in the log.

-

-The DB_LOGC->get method will return DB_NOTFOUND if DB_NEXT is set and the last log record has already been +

The DB_LOGC->get method will return DB_NOTFOUND if DB_NEXT is set and the last log record has already been returned or the log is empty.

-

DB_PREV
The current log position is advanced to the previous record in the log, +
DB_PREV
The current log position is advanced to the previous record in the log, and that record is returned in the data parameter. The lsn parameter is overwritten with the DB_LSN of the record returned.

If the cursor has not been initialized via DB_FIRST, DB_LAST, DB_SET, DB_NEXT, or DB_PREV, DB_LOGC->get will return the last record in the log.

-

-The DB_LOGC->get method will return DB_NOTFOUND if DB_PREV is set and the first log record has already been +

The DB_LOGC->get method will return DB_NOTFOUND if DB_PREV is set and the first log record has already been returned or the log is empty.

-

DB_SET
Retrieve the record specified by the lsn parameter. +
DB_SET
Retrieve the record specified by the lsn parameter.
-

lsn
-When the flag parameter is set to DB_CURRENT, +
lsn
When the flag parameter is set to DB_CURRENT, DB_FIRST, DB_LAST, DB_NEXT or DB_PREV, the lsn parameter is overwritten with the DB_LSN value of the record retrieved. When flag is set to DB_SET, @@ -93,8 +85,8 @@ be retrieved.

Errors

The DB_LOGC->get method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the DB_CURRENT flag was set and the log cursor has not yet +
+
EINVAL
If the DB_CURRENT flag was set and the log cursor has not yet been initialized; the DB_CURRENT, DB_NEXT, or DB_PREV flags were set and the log was opened with the DB_THREAD flag set; @@ -111,6 +103,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/lsn_class.html b/db/docs/api_c/lsn_class.html index 444823f85..b6bce7a51 100644 --- a/db/docs/api_c/lsn_class.html +++ b/db/docs/api_c/lsn_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_LSN - + -

DB_LSN

API -Ref -
+Ref +


@@ -40,6 +39,6 @@ the other specifies an offset in the log file.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_fclose.html b/db/docs/api_c/memp_fclose.html index f2481d782..2b3d5a002 100644 --- a/db/docs/api_c/memp_fclose.html +++ b/db/docs/api_c/memp_fclose.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->close - + -

DB_MPOOLFILE->close

API -Ref -
+Ref +


@@ -41,9 +40,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Class

@@ -54,6 +52,6 @@ The flags parameter is currently unused, and must be set to 0.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_fcreate.html b/db/docs/api_c/memp_fcreate.html index f7e810e6d..c6aa1c421 100644 --- a/db/docs/api_c/memp_fcreate.html +++ b/db/docs/api_c/memp_fcreate.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->memp_fcreate - + -

DB_ENV->memp_fcreate

API -Ref -
+Ref +


@@ -37,9 +36,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Class

@@ -50,6 +48,6 @@ The flags parameter is currently unused, and must be set to 0.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_fget.html b/db/docs/api_c/memp_fget.html index 331dd428f..7318f7e09 100644 --- a/db/docs/api_c/memp_fget.html +++ b/db/docs/api_c/memp_fget.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->get - + -

DB_MPOOLFILE->get

API -Ref -
+Ref +


@@ -37,9 +36,8 @@ DB_MPOOLFILE->get(DB_MPOOLFILE *mpf,
 

Fully or partially created pages have all their bytes set to a nul byte, unless the DB_MPOOLFILE->set_clear_len method was called to specify other behavior before the file was opened.

- -

-The DB_MPOOLFILE->get method + +

The DB_MPOOLFILE->get method will return DB_PAGE_NOTFOUND if the requested page does not exist and DB_MPOOL_CREATE was not set. Unless otherwise specified, the DB_MPOOLFILE->get method @@ -47,28 +45,25 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_CREATE
If the specified page does not exist, create it. In this case, the +
+
DB_MPOOL_CREATE
If the specified page does not exist, create it. In this case, the pgin method, if specified, is called. -

DB_MPOOL_LAST
Return the last page of the source file, and copy its page number into +
DB_MPOOL_LAST
Return the last page of the source file, and copy its page number into the memory location to which pgnoaddr refers. -

DB_MPOOL_NEW
Create a new page in the file, and copy its page number into the memory +
DB_MPOOL_NEW
Create a new page in the file, and copy its page number into the memory location to which pgnoaddr refers. In this case, the pgin method, if specified, is not called.

The DB_MPOOL_CREATE, DB_MPOOL_LAST, and DB_MPOOL_NEW flags are mutually exclusive.

-

pagep
-The pagep parameter references memory into which +
pagep
The pagep parameter references memory into which a pointer to the returned page is copied. -

pgnoaddr
-If the flags parameter is set to DB_MPOOL_LAST or +
pgnoaddr
If the flags parameter is set to DB_MPOOL_LAST or DB_MPOOL_NEW, the page number of the created page is copied into the memory location to which the pgnoaddr parameter refers. Otherwise, the pgnoaddr parameter is the page to @@ -79,19 +74,19 @@ number 0, not page number 1.

Errors

The DB_MPOOLFILE->get method may fail and return one of the following non-zero errors:

-

-

EAGAIN
The page reference count has overflowed. (This should never happen +
+
EAGAIN
The page reference count has overflowed. (This should never happen unless there is a bug in the application.)
-

-

EINVAL
If the DB_MPOOL_NEW flag was set, and the source file was not +
+
EINVAL
If the DB_MPOOL_NEW flag was set, and the source file was not opened for writing; more than one of DB_MPOOL_CREATE, DB_MPOOL_LAST, and DB_MPOOL_NEW was set; or if an invalid flag value or parameter was specified.
-

-

ENOMEM
The cache is full, and no more pages will fit in the pool. +
+
ENOMEM
The cache is full, and no more pages will fit in the pool.

Class

@@ -102,6 +97,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_fopen.html b/db/docs/api_c/memp_fopen.html index e237f68e1..c41ee9ce0 100644 --- a/db/docs/api_c/memp_fopen.html +++ b/db/docs/api_c/memp_fopen.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->open - + -

DB_MPOOLFILE->open

API -Ref -
+Ref +


@@ -35,43 +34,44 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

file
-The file parameter is the name of the file to be opened. If +
+
file
The file parameter is the name of the file to be opened. If file is NULL, a private temporary file is created that cannot be shared with any other process (although it may be shared with other threads of control in the same process). -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_CREATE
Create any underlying files, as necessary. If the files do not already +
+
DB_CREATE
Create any underlying files, as necessary. If the files do not already exist and the DB_CREATE flag is not specified, the call will fail. - -

DB_DIRECT
If set and supported by the system, turn off system buffering of the + +
DB_DIRECT
If set and supported by the system, turn off system buffering of the file to avoid double caching. -

DB_NOMMAP
Always copy this file into the local cache instead of potentially mapping +
DB_NOMMAP
Always copy this file into the local cache instead of potentially mapping it into process memory (see the description of the DB_ENV->set_mp_mmapsize method for further information). -

DB_ODDFILESIZE
Attempts to open files which are not a multiple of the page size in +
DB_ODDFILESIZE
Attempts to open files which are not a multiple of the page size in length will fail, by default. If the DB_ODDFILESIZE flag is set, any partial page at the end of the file will be ignored and the open will proceed. -

DB_RDONLY
Open any underlying files for reading only. Any attempt to write the file +
DB_RDONLY
Open any underlying files for reading only. Any attempt to write the file using the pool functions will fail, regardless of the actual permissions of the file.
-

mode
-On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by -DB_MPOOLFILE->open are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation -(see umask(2)). If mode is 0, DB_MPOOLFILE->open will use a default -mode of readable and writable by both owner and group. On Windows -systems, the mode parameter is ignored. The group ownership of created -files is based on the system and directory defaults, and is not further -specified by Berkeley DB. -

pagesize
-The pagesize parameter is the size, in bytes, of the unit of +
mode
On Windows systems, the mode parameter is ignored. +

On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files created by DB_MPOOLFILE->open +are created with mode mode (as described in chmod(2)) +and modified by the process' umask value at the time of creation (see +umask(2)). Created files are owned by the process owner; the +group ownership of created files is based on the system and directory +defaults, and is not further specified by Berkeley DB. System shared memory +segments created by DB_MPOOLFILE->open are created with mode mode, unmodified +by the process' umask value. If mode is 0, DB_MPOOLFILE->open will use a +default mode of readable and writable by both owner and group.

+
pagesize
The pagesize parameter is the size, in bytes, of the unit of transfer between the application and the cache, although it is not necessarily the unit of transfer between the cache and the underlying filesystem. @@ -79,15 +79,15 @@ filesystem.

Errors

The DB_MPOOLFILE->open method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the file has already been entered into the pool, and the pagesize +
+
EINVAL
If the file has already been entered into the pool, and the pagesize value is not the same as when the file was entered into the pool, or the length of the file is not zero or a multiple of the pagesize; the DB_RDONLY flag was specified for an in-memory pool; or if an invalid flag value or parameter was specified.
-

-

ENOMEM
The maximum number of open files has been reached. +
+
ENOMEM
The maximum number of open files has been reached.

Class

@@ -98,6 +98,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_fput.html b/db/docs/api_c/memp_fput.html index 42fe0dd5c..a8740a0e7 100644 --- a/db/docs/api_c/memp_fput.html +++ b/db/docs/api_c/memp_fput.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->put - + -

DB_MPOOLFILE->put

API -Ref -
+Ref +


@@ -34,34 +33,31 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

pgaddr
-The pgaddr parameter is the address of the page to be +
+
pgaddr
The pgaddr parameter is the address of the page to be returned to the cache. The pgaddr parameter must be an address previously returned by DB_MPOOLFILE->get. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother +
+
DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother writing the page back to the source file). -

DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before +
DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before being evicted from the pool. -

DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be +
DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be discarded before other pages in the pool.
-

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are -mutually exclusive.

+

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are mutually exclusive.

Errors

The DB_MPOOLFILE->put method may fail and return one of the following non-zero errors:

-

-

EACCES
The DB_MPOOL_DIRTY flag was set and the source file was not +
+
EACCES
The DB_MPOOL_DIRTY flag was set and the source file was not opened for writing.
-

-

EINVAL
If the pgaddr parameter does not refer to a page returned by +
+
EINVAL
If the pgaddr parameter does not refer to a page returned by DB_MPOOLFILE->get; more than one of the DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags was set; or if an @@ -76,6 +72,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_fset.html b/db/docs/api_c/memp_fset.html index 6c2d8fb17..fb01f08b5 100644 --- a/db/docs/api_c/memp_fset.html +++ b/db/docs/api_c/memp_fset.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set - + -

DB_MPOOLFILE->set

API -Ref -
+Ref +


@@ -34,30 +33,27 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

pgaddr
-The pgaddr parameter is the address of the page for which +
+
pgaddr
The pgaddr parameter is the address of the page for which attributes are to be set. he pgaddr parameter must be an address previously returned by DB_MPOOLFILE->get. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother +
+
DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother writing the page back to the source file). -

DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before +
DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before being evicted from the pool. -

DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be +
DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be discarded before other pages in the pool.
-

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are -mutually exclusive.

+

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are mutually exclusive.

Errors

The DB_MPOOLFILE->set method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -69,6 +65,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_fsync.html b/db/docs/api_c/memp_fsync.html index f9f018111..38c762f95 100644 --- a/db/docs/api_c/memp_fsync.html +++ b/db/docs/api_c/memp_fsync.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->sync - + -

DB_MPOOLFILE->sync

API -Ref -
+Ref +


@@ -46,6 +45,6 @@ and 0 on success.
 

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_list.html b/db/docs/api_c/memp_list.html index 84ad0d1ee..9813e9012 100644 --- a/db/docs/api_c/memp_list.html +++ b/db/docs/api_c/memp_list.html @@ -1,37 +1,41 @@ - + Berkeley DB: Berkeley DB: Memory Pools and Related Methods - +

Berkeley DB: Memory Pools and Related Methods

- + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + +
Memory Pools and Related MethodsDescription
DB_ENV->set_cachesizeSet the environment cache size
DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size
DB_ENV->memp_registerRegister input/output functions for a file in a memory pool
DB_ENV->memp_statReturn memory pool statistics
DB_ENV->memp_syncFlush pages from a memory pool
DB_ENV->memp_trickleTrickle flush pages from a memory pool
DB_ENV->memp_fcreateOpen a file in a memory pool
DB_MPOOLFILE->closeClose a file in a memory pool
DB_MPOOLFILE->getGet page from a file in a memory pool
DB_MPOOLFILE->openOpen a file in a memory pool
DB_MPOOLFILE->putReturn a page to a memory pool
DB_MPOOLFILE->setSet memory pool page status
DB_MPOOLFILE->syncFlush pages from a file in a memory pool
DB_MPOOLFILE->set_clear_lenSet file page bytes to be cleared
DB_MPOOLFILE->set_fileidSet file unique identifier
DB_MPOOLFILE->set_ftypeSet file type
DB_MPOOLFILE->set_lsn_offsetSet file log-sequence-number offset
DB_MPOOLFILE->set_pgcookieSet file cookie for pgin/pgout
DB->mpfReturn the database's memory pool handle
DB_ENV->memp_fcreateOpen a file in a memory pool
DB_ENV->memp_registerRegister input/output functions for a file in a memory pool
DB_ENV->memp_set_max_openfdSet the maximum number of open file descriptors
DB_ENV->memp_set_max_writeSet the maximum number of sequential disk writes
DB_ENV->memp_statReturn memory pool statistics
DB_ENV->memp_syncFlush pages from a memory pool
DB_ENV->memp_trickleTrickle flush pages from a memory pool
DB_ENV->set_cachesizeSet the environment cache size
DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size
DB_MPOOLFILE->closeClose a file in a memory pool
DB_MPOOLFILE->getGet page from a file in a memory pool
DB_MPOOLFILE->openOpen a file in a memory pool
DB_MPOOLFILE->putReturn a page to a memory pool
DB_MPOOLFILE->setSet memory pool page status
DB_MPOOLFILE->set_clear_lenSet file page bytes to be cleared
DB_MPOOLFILE->set_fileidSet file unique identifier
DB_MPOOLFILE->set_flagsGeneral memory pool file configuration
DB_MPOOLFILE->set_ftypeSet file type
DB_MPOOLFILE->set_lsn_offsetSet file log-sequence-number offset
DB_MPOOLFILE->set_pgcookieSet file cookie for pgin/pgout
DB_MPOOLFILE->syncFlush pages from a file in a memory pool
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_maxwrite.html b/db/docs/api_c/memp_maxwrite.html new file mode 100644 index 000000000..70bd71f24 --- /dev/null +++ b/db/docs/api_c/memp_maxwrite.html @@ -0,0 +1,82 @@ + + + + + + +Berkeley DB: DB_ENV->memp_set_max_write + + + + + + + +
+

DB_ENV->memp_set_max_write

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_ENV->memp_set_max_write(DB_ENV *env, int maxwrite, int maxwrite_sleep); +

+int +DB_ENV->memp_get_max_write(DB_ENV *env, int *maxwritep, int *maxwrite_sleepp); +

+
+

Description: DB_ENV->memp_set_max_write

+

The DB_ENV->memp_set_max_write method limits the number of sequential write +operations scheduled by the library when flushing dirty pages from the +cache.

+

The DB_ENV->memp_set_max_write method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
maxwrite
The maximum number of sequential write operations scheduled by the +library when flushing dirty pages from the cache. +
maxwrite_sleep
The number of microseconds the thread of control should pause before +scheduling further write operations. +
+

Errors

+

The DB_ENV->memp_set_max_write method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DB_ENV->memp_get_max_write

+

The DB_ENV->memp_get_max_write method returns the current maximum number of +sequential write operations and microseconds to pause.

+

The DB_ENV->memp_get_max_write method may be called at any time during the life of the +application.

+

The DB_ENV->memp_get_max_write method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
maxwritep
The maxwritep parameter references memory into which + the maximum number of sequential write operations is copied. +
maxwrite_sleepp
The maxwrite_sleepp parameter references memory into which + the microseconds to pause before scheduling further write operations is copied. +
+
+

Class

+DB_ENV, DB_MPOOLFILE +

See Also

+Memory Pools and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/memp_openfd.html b/db/docs/api_c/memp_openfd.html new file mode 100644 index 000000000..07757095c --- /dev/null +++ b/db/docs/api_c/memp_openfd.html @@ -0,0 +1,77 @@ + + + + + + +Berkeley DB: DB_ENV->memp_set_max_openfd + + + + + + + +
+

DB_ENV->memp_set_max_openfd

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_ENV->memp_set_max_openfd(DB_ENV *env, int maxopenfd); +

+int +DB_ENV->memp_get_max_openfd(DB_ENV *env, int *maxopenfdp); +

+
+

Description: DB_ENV->memp_set_max_openfd

+

The DB_ENV->memp_set_max_openfd method limits the number of file descriptors +the library will open concurrently when flushing dirty pages from the +cache.

+

The DB_ENV->memp_set_max_openfd method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
maxopenfd
The maximum number of file descriptors that may be concurrently opened +by the library when flushing dirty pages from the cache. +
+

Errors

+

The DB_ENV->memp_set_max_openfd method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DB_ENV->memp_get_max_openfd

+

The DB_ENV->memp_get_max_openfd method returns the maximum number of file descriptors open.

+

The DB_ENV->memp_get_max_openfd method may be called at any time during the life of the +application.

+

The DB_ENV->memp_get_max_openfd method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
maxopenfdp
The DB_ENV->memp_get_max_openfd method returns the +maximum number of file descriptors open in maxopenfdp. +
+
+

Class

+DB_ENV, DB_MPOOLFILE +

See Also

+Memory Pools and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/memp_register.html b/db/docs/api_c/memp_register.html index 91f83a817..9233d2399 100644 --- a/db/docs/api_c/memp_register.html +++ b/db/docs/api_c/memp_register.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->memp_register - + -

DB_ENV->memp_register

API -Ref -
+Ref +


@@ -60,25 +59,13 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

ftype
-The ftype parameter specifies the type of file for which the +
+
ftype
The ftype parameter specifies the type of file for which the page-in and page-out functions will be called.

The ftype value for a file must be a non-zero positive number less than 128 (0 and negative numbers are reserved for internal use by the Berkeley DB library).

-

pgin_fcn
-The page-in and page-out functions. -

The pgin_fcn and pgout_fcn functions are called with a -reference to the current database environment, the page number being -read or written, a pointer to the page being read or written, and any -parameter pgcookie that was specified to the -DB_MPOOLFILE->set_pgcookie method.

-

The pgin_fcn and pgout_fcn functions should return 0 on -success, and a non-zero value on failure, in which case the shared Berkeley DB -library function calling it will also fail, returning that non-zero -value. The non-zero value should be selected from values outside of the -Berkeley DB library namespace.

+
pgin_fcn
pgout_fcn

Class

@@ -89,6 +76,6 @@ Berkeley DB library namespace.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_clear_len.html b/db/docs/api_c/memp_set_clear_len.html index 85a356ca7..9023e6bca 100644 --- a/db/docs/api_c/memp_set_clear_len.html +++ b/db/docs/api_c/memp_set_clear_len.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_clear_len - + -

DB_MPOOLFILE->set_clear_len

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_MPOOLFILE->get_clear_len(DB_MPOOLFILE *mpf, u_int32_t *lenp);
 


Description: DB_MPOOLFILE->set_clear_len

-

The DB_MPOOLFILE->set_clear_len method sets the number of initial bytes in a page that should be set to nul when the page is created as a result of the DB_MPOOL_CREATE or DB_MPOOL_NEW flags being @@ -51,27 +49,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

len
-The len parameter is the number of initial bytes in a page that +
+
len
The len parameter is the number of initial bytes in a page that should be set to nul when the page is created. A value of 0 results in the entire page being set to nul bytes.

Description: DB_MPOOLFILE->get_clear_len

-

The DB_MPOOLFILE->get_clear_len method returns the bytes to be cleared.

+

The DB_MPOOLFILE->get_clear_len method returns the .

The DB_MPOOLFILE->get_clear_len method may be called at any time during the life of the application.

-

The DB_MPOOLFILE->get_clear_len method -returns a non-zero error value on failure -and 0 on success. -

-

Parameters

-

-

lenp
-The DB_MPOOLFILE->get_clear_len method returns the -bytes to be cleared in lenp. -

Class

DB_ENV, DB_MPOOLFILE @@ -81,6 +68,6 @@ bytes to be cleared in lenp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_fileid.html b/db/docs/api_c/memp_set_fileid.html index 533c2dc27..d4f0306c7 100644 --- a/db/docs/api_c/memp_set_fileid.html +++ b/db/docs/api_c/memp_set_fileid.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_fileid - + -

DB_MPOOLFILE->set_fileid

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_MPOOLFILE->get_fileid(DB_MPOOLFILE *mpf, u_int8_t *fileid);
 


Description: DB_MPOOLFILE->set_fileid

-

The DB_MPOOLFILE->set_fileid method specifies a unique identifier for the file. (The shared memory buffer pool functions must be able to uniquely identify files in order that multiple processes wanting to share a file @@ -67,9 +65,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

fileid
-The fileid parameter is the unique identifier for the file. +
+
fileid
The fileid parameter is the unique identifier for the file. Unique file identifiers must be a DB_FILE_ID_LEN length array of bytes.

@@ -91,6 +88,6 @@ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_flags.html b/db/docs/api_c/memp_set_flags.html index 3c2cff559..34d2f5ab7 100644 --- a/db/docs/api_c/memp_set_flags.html +++ b/db/docs/api_c/memp_set_flags.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_flags - + -

DB_MPOOLFILE->set_flags

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_MPOOLFILE->get_flags(DB_MPOOLFILE *mpf, u_int32_t *flagsp);
 


Description: DB_MPOOLFILE->set_flags

-

Configure a file in the cache.

To set the flags for a particular database, call the DB_MPOOLFILE->set_flags method using the DB_MPOOLFILE handle stored in @@ -41,19 +39,20 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set by bitwise inclusively OR'ing together one or more +
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_NOFILE
If set, no backing temporary file will be opened for in-memory +
+
DB_MPOOL_NOFILE
If set, no backing temporary file will be opened for in-memory databases, even if they expand to fill the entire cache. Attempts to create new file pages after the cache has been filled will fail.

The DB_MPOOL_NOFILE flag may be used to configure Berkeley DB at any time during the life of the application.

+
DB_MPOOL_UNLINK
If set, remove the file when the last reference to it is closed. +

The DB_MPOOL_UNLINK flag may be used to configure Berkeley DB at any time during +the life of the application.

-

onoff
-If onoff is +
onoff
If onoff is zero, the specified flags are cleared; otherwise they are set.
@@ -67,9 +66,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flagsp
-The DB_MPOOLFILE->get_flags method returns the +
+
flagsp
The DB_MPOOLFILE->get_flags method returns the flags in flagsp.

@@ -81,6 +79,6 @@ flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_ftype.html b/db/docs/api_c/memp_set_ftype.html index 3039a2464..8e96fc7ad 100644 --- a/db/docs/api_c/memp_set_ftype.html +++ b/db/docs/api_c/memp_set_ftype.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_ftype - + -

DB_MPOOLFILE->set_ftype

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_MPOOLFILE->get_ftype(DB_MPOOLFILE *mpf, int *ftypep);
 


Description: DB_MPOOLFILE->set_ftype

-

The DB_MPOOLFILE->set_ftype method specifies a file type for the purposes of input or output processing of the file's pages as they are read from or written to, the backing filesystem store.

@@ -47,28 +45,17 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

ftype
-The ftype parameter sets the file's type for the purposes of input +
+
ftype
The ftype parameter sets the file's type for the purposes of input and output processing. The ftype must be the same as a ftype parameter previously specified to the DB_ENV->memp_register method. (See the DB_ENV->memp_register documentation for more information.)

Description: DB_MPOOLFILE->get_ftype

-

The DB_MPOOLFILE->get_ftype method returns the file type.

+

The DB_MPOOLFILE->get_ftype method returns the .

The DB_MPOOLFILE->get_ftype method may be called at any time during the life of the application.

-

The DB_MPOOLFILE->get_ftype method -returns a non-zero error value on failure -and 0 on success. -

-

Parameters

-

-

ftypep
-The DB_MPOOLFILE->get_ftype method returns the -file type in ftypep. -

Class

DB_ENV, DB_MPOOLFILE @@ -78,6 +65,6 @@ file type in ftypep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_lsn_offset.html b/db/docs/api_c/memp_set_lsn_offset.html index 10274765e..cd8c611fe 100644 --- a/db/docs/api_c/memp_set_lsn_offset.html +++ b/db/docs/api_c/memp_set_lsn_offset.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_lsn_offset - + -

DB_MPOOLFILE->set_lsn_offset

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_MPOOLFILE->get_lsn_offset(DB_MPOOLFILE *mpf, int32_t *lsn_offsetp);
 


Description: DB_MPOOLFILE->set_lsn_offset

-

The DB_MPOOLFILE->set_lsn_offset method specifies the zero-based byte offset of a log sequence number (DB_LSN) on the file's pages, for the purposes of page-flushing as part of transaction checkpoint. (See the @@ -50,26 +48,15 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

lsn_offset
-The lsn_offset parameter is the zero-based byte offset of the +
+
lsn_offset
The lsn_offset parameter is the zero-based byte offset of the log sequence number on the file's pages.

Description: DB_MPOOLFILE->get_lsn_offset

-

The DB_MPOOLFILE->get_lsn_offset method returns the log sequence number byte offset.

+

The DB_MPOOLFILE->get_lsn_offset method returns the .

The DB_MPOOLFILE->get_lsn_offset method may be called at any time during the life of the application.

-

The DB_MPOOLFILE->get_lsn_offset method -returns a non-zero error value on failure -and 0 on success. -

-

Parameters

-

-

lsn_offsetp
-The DB_MPOOLFILE->get_lsn_offset method returns the -log sequence number byte offset in lsn_offsetp. -

Class

DB_ENV, DB_MPOOLFILE @@ -79,6 +66,6 @@ log sequence number byte offset in lsn_offsetp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_maxsize.html b/db/docs/api_c/memp_set_maxsize.html index 950c83fb9..c0fa6af37 100644 --- a/db/docs/api_c/memp_set_maxsize.html +++ b/db/docs/api_c/memp_set_maxsize.html @@ -1,30 +1,29 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_maxsize - + -

DB_MPOOLFILE->set_maxsize

API -Ref -
+Ref +


 #include <db.h>
 

int -DB_MPOOLFILE->set_set_maxsize(DB_MPOOLFILE *mpf, +DB_MPOOLFILE->set_maxsize(DB_MPOOLFILE *mpf, u_int32_t gbytes, u_int32_t bytes);

int @@ -33,11 +32,9 @@ DB_MPOOLFILE->get_maxsize(DB_MPOOLFILE *mpf,


Description: DB_MPOOLFILE->set_maxsize

- -

Set the maximum size for the file to be -gbytes gigabytes plus bytes. -Attempts to allocate new pages in the file after the limit has been -reached will fail.

+

Set the maximum size for the file to be gbytes gigabytes plus +bytes. Attempts to allocate new pages in the file after the +limit has been reached will fail.

To set the maximum file size for a particular database, call the DB_MPOOLFILE->set_maxsize method using the DB_MPOOLFILE handle stored in the mpf field of the DB handle. Attempts to insert @@ -49,12 +46,10 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bytes
-The maximum size of the file is set to gbytes gigabytes plus +
+
bytes
The maximum size of the file is set to gbytes gigabytes plus bytes. -

gbytes
-The maximum size of the file is set to gbytes gigabytes plus +
gbytes
The maximum size of the file is set to gbytes gigabytes plus bytes.

@@ -66,12 +61,10 @@ and 0 on success.

The DB_MPOOLFILE->get_maxsize method may be called at any time during the life of the application.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the maximum file size is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the maximum file size is copied.

@@ -83,6 +76,6 @@ The gbytesp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_pgcookie.html b/db/docs/api_c/memp_set_pgcookie.html index 119632784..de5ceec9f 100644 --- a/db/docs/api_c/memp_set_pgcookie.html +++ b/db/docs/api_c/memp_set_pgcookie.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_pgcookie - + -

DB_MPOOLFILE->set_pgcookie

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_MPOOLFILE->get_pgcookie(DB_MPOOLFILE *mpf, DBT *dbt);
 


Description: DB_MPOOLFILE->set_pgcookie

-

The DB_MPOOLFILE->set_pgcookie method specifies a byte string that is provided to the functions registered to do input or output processing of the file's pages as they are read from or written to, the backing filesystem @@ -49,27 +47,16 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

pgcookie
-The pgcookie parameter is a byte string provided to the +
+
pgcookie
The pgcookie parameter is a byte string provided to the functions registered to do input or output processing of the file's pages.

Description: DB_MPOOLFILE->get_pgcookie

-

The DB_MPOOLFILE->get_pgcookie method returns the byte string.

+

The DB_MPOOLFILE->get_pgcookie method returns the .

The DB_MPOOLFILE->get_pgcookie method may be called at any time during the life of the application.

-

The DB_MPOOLFILE->get_pgcookie method -returns a non-zero error value on failure -and 0 on success. -

-

Parameters

-

-

dbt
-The DB_MPOOLFILE->get_pgcookie method returns a reference to the -byte string in dbt. -

Class

DB_ENV, DB_MPOOLFILE @@ -79,6 +66,6 @@ byte string in dbt.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_set_priority.html b/db/docs/api_c/memp_set_priority.html index 12fdbb5db..acfd342c8 100644 --- a/db/docs/api_c/memp_set_priority.html +++ b/db/docs/api_c/memp_set_priority.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE->set_priority - + -

DB_MPOOLFILE->set_priority

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_MPOOLFILE->get_priority(DB_MPOOLFILE *mpf, DB_CACHE_PRIORITY *priorityp);
 


Description: DB_MPOOLFILE->set_priority

-

Set the cache priority for pages from the specified file. The priority of a page biases the replacement algorithm to be more or less likely to discard a page when space is needed in the buffer pool. The bias is @@ -48,12 +46,11 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

priority
-The priority parameter must be set to one of the following +
+
priority
The priority parameter must be set to one of the following values: -

-

DB_PRIORITY_VERY_LOW
The lowest priority: pages are the most likely to be discarded. +
+
DB_PRIORITY_VERY_LOW
The lowest priority: pages are the most likely to be discarded.
DB_PRIORITY_LOW
The next lowest priority.
DB_PRIORITY_DEFAULT
The default priority.
DB_PRIORITY_HIGH
The next highest priority. @@ -70,9 +67,8 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

priorityp
-The DB_MPOOLFILE->get_priority method returns the +
+
priorityp
The DB_MPOOLFILE->get_priority method returns the cache priority in priorityp.

@@ -84,6 +80,6 @@ cache priority in priorityp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_stat.html b/db/docs/api_c/memp_stat.html index 5ae5c0317..09b32bb6a 100644 --- a/db/docs/api_c/memp_stat.html +++ b/db/docs/api_c/memp_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->memp_stat - + -

DB_ENV->memp_stat

API -Ref -
+Ref +


@@ -26,6 +25,9 @@
 int
 DB_ENV->memp_stat(DB_ENV *env, DB_MPOOL_STAT **gsp,
     DB_MPOOL_FSTAT *(*fsp)[], u_int32_t flags);
+

+int +DB_ENV->memp_stat_print(DB_ENV *env, u_int32_t flags);


Description: DB_ENV->memp_stat

@@ -45,11 +47,11 @@ individually freed.

If gsp is non-NULL, the global statistics for the cache mp are copied into the memory location to which it refers. The following DB_MPOOL_STAT fields will be filled in:

-

+
size_t st_gbytes;
Gigabytes of cache (total cache size is st_gbytes + st_bytes).
size_t st_bytes;
Bytes of cache (total cache size is st_gbytes + st_bytes).
u_int32_t st_ncache;
Number of caches. -
u_int32_t st_regsize;
Individual cache size. +
roff_t st_regsize;
Individual cache size, in bytes.
u_int32_t st_map;
Requested pages mapped into the process' address space (there is no available information about whether or not this request caused disk I/O, although examining the application page fault rate may be helpful). @@ -83,6 +85,15 @@ lock without waiting.
u_int32_t st_alloc_max_buckets;
Maximum number of hash buckets checked during an allocation.
u_int32_t st_alloc_pages;
Number of pages checked during allocation.
u_int32_t st_alloc_max_pages;
Maximum number of pages checked during an allocation. +
u_int32_t st_mmapsize;
Maximum memory-mapped file size. +
u_int32_t st_maxopenfd;
Maximum open file descriptors. +
u_int32_t st_maxwrite;
Maximum sequential buffer writes. +
u_int32_t st_maxwrite_sleep;
Sleep after writing maximum sequential buffers. +
+The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_CLEAR
Reset statistics after returning their values.

If fsp is non-NULL, a pointer to a NULL-terminated variable length array of statistics for individual files, in the cache @@ -92,7 +103,7 @@ set to NULL.

The per-file statistics are stored in structures of type DB_MPOOL_FSTAT. The following DB_MPOOL_FSTAT fields will be filled in for each file in the cache; that is, each element of the array:

-

+
char * file_name;
The name of the file.
size_t st_pagesize;
Page size in bytes.
u_int32_t st_cache_hit;
Requested pages found in the cache. @@ -107,28 +118,44 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

fsp
-The fsp parameter references memory into which +
fsp
The fsp parameter references memory into which a pointer to the allocated per-file statistics structures is copied. -

gsp
-The gsp parameter references memory into which +
gsp
The gsp parameter references memory into which a pointer to the allocated global statistics structure is copied.

Errors

The DB_ENV->memp_stat method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DB_ENV->memp_stat_print

+

The DB_ENV->memp_stat_print method prints diagnostic information to the output +channel described by the DB_ENV->set_msgfile method.

+

The DB_ENV->memp_stat_print method may not be called before the DB_ENV->open method has +been called.

+

The DB_ENV->memp_stat_print method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_ALL
Display all available information. +
DB_STAT_MEMP_HASH
Display the buffers with hash chains. +
+
+

Class

DB_ENV, DB_MPOOLFILE

See Also

@@ -137,6 +164,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_sync.html b/db/docs/api_c/memp_sync.html index 7e2b562a8..494ea172b 100644 --- a/db/docs/api_c/memp_sync.html +++ b/db/docs/api_c/memp_sync.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->memp_sync - + -

DB_ENV->memp_sync

API -Ref -
+Ref +


@@ -44,9 +43,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

lsn
-The purpose of the lsn parameter is to enable a transaction +
+
lsn
The purpose of the lsn parameter is to enable a transaction manager to ensure, as part of a checkpoint, that all pages modified by a certain time have been written to disk.

All modified pages with a a log sequence number (DB_LSN) less @@ -56,8 +54,8 @@ NULL, all modified pages in the pool are written to disk.

Errors

The DB_ENV->memp_sync method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the DB_ENV->memp_sync function was called without logging having been +
+
EINVAL
If the DB_ENV->memp_sync function was called without logging having been initialized in the environment; or if an invalid flag value or parameter was specified.
@@ -70,6 +68,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/memp_trickle.html b/db/docs/api_c/memp_trickle.html index 231fbe93f..1b5abc614 100644 --- a/db/docs/api_c/memp_trickle.html +++ b/db/docs/api_c/memp_trickle.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->memp_trickle - + -

DB_ENV->memp_trickle

API -Ref -
+Ref +


@@ -34,6 +33,25 @@ backing files.

The purpose of the DB_ENV->memp_trickle function is to enable a memory pool manager to ensure that a page is always available for reading in new information without having to wait for a write.

+

The DB_ENV->memp_trickle method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
nwrotep
The nwrotep parameter references memory into which + the number of pages written to reach the specified +percentage is copied. +
percent
The percent parameter is the percent of the pages in the cache +that should be clean. +
+

Errors

+

The DB_ENV->memp_trickle method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +

Class

DB_ENV, DB_MPOOLFILE @@ -43,6 +61,6 @@ information without having to wait for a write.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/mempfile_class.html b/db/docs/api_c/mempfile_class.html index b175a6878..0a16987a0 100644 --- a/db/docs/api_c/mempfile_class.html +++ b/db/docs/api_c/mempfile_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_MPOOLFILE - + -

DB_MPOOLFILE

API -Ref -
+Ref +


@@ -54,6 +53,6 @@ method's return.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/object.html b/db/docs/api_c/object.html new file mode 100644 index 000000000..04dec7b20 --- /dev/null +++ b/db/docs/api_c/object.html @@ -0,0 +1,32 @@ + + + +C API (Version: 4.3.14) + + + + +Home
+All Methods
+Permuted Index
+

+Cursors
+Databases
+Environments
+Key/Data Pairs
+Locking
+Logging
+Memory Pool
+Replication
+Sequences
+Transactions
+

+Dbm/Ndbm
+Hsearch
+

+Reference
+Utilities
+ + diff --git a/db/docs/api_c/pindex.src b/db/docs/api_c/pindex.src index d71b9db97..7f776f8a4 100644 --- a/db/docs/api_c/pindex.src +++ b/db/docs/api_c/pindex.src @@ -1,44 +1,26 @@ -__APIREL__/api_c/db_associate.html__OCT__2 @DB-__GT__associate -__APIREL__/api_c/db_associate.html__OCT__3 @DB_DBT_APPMALLOC -__APIREL__/api_c/db_associate.html__OCT__4 @DB_DONOTINDEX +__APIREL__/api_c/db_associate.html__OCT__2 @DB_DBT_APPMALLOC +__APIREL__/api_c/db_associate.html__OCT__3 @DB_DONOTINDEX __APIREL__/api_c/db_associate.html__OCT__DB_CREATE DB-__GT__associate@DB_CREATE __APIREL__/api_c/db_associate.html__OCT__DB_AUTO_COMMIT DB-__GT__associate@DB_AUTO_COMMIT -__APIREL__/api_c/db_class.html__OCT__2 @db_create __APIREL__/api_c/db_class.html__OCT__DB_XA_CREATE db_create@DB_XA_CREATE -__APIREL__/api_c/db_close.html__OCT__2 @DB-__GT__close __APIREL__/api_c/db_close.html__OCT__DB_NOSYNC DB-__GT__close@DB_NOSYNC -__APIREL__/api_c/db_cursor.html__OCT__2 @DB-__GT__cursor +__APIREL__/api_c/db_cursor.html__OCT__DB_DEGREE_2 DB-__GT__cursor@DB_DEGREE_2 __APIREL__/api_c/db_cursor.html__OCT__DB_DIRTY_READ DB-__GT__cursor@DB_DIRTY_READ __APIREL__/api_c/db_cursor.html__OCT__DB_WRITECURSOR DB-__GT__cursor@DB_WRITECURSOR -__APIREL__/api_c/db_del.html__OCT__2 @DB-__GT__del __APIREL__/api_c/db_del.html__OCT__DB_AUTO_COMMIT DB-__GT__del@DB_AUTO_COMMIT -__APIREL__/api_c/db_err.html__OCT__2 @DB-__GT__err -__APIREL__/api_c/db_err.html__OCT__3 @DB-__GT__errx -__APIREL__/api_c/db_get.html__OCT__2 @DB-__GT__get -__APIREL__/api_c/db_get.html__OCT__3 @DB-__GT__pget __APIREL__/api_c/db_get.html__OCT__DB_CONSUME DB-__GT__get@DB_CONSUME __APIREL__/api_c/db_get.html__OCT__DB_CONSUME_WAIT DB-__GT__get@DB_CONSUME_WAIT __APIREL__/api_c/db_get.html__OCT__DB_GET_BOTH DB-__GT__get@DB_GET_BOTH __APIREL__/api_c/db_get.html__OCT__DB_SET_RECNO DB-__GT__get@DB_SET_RECNO __APIREL__/api_c/db_get.html__OCT__DB_AUTO_COMMIT DB-__GT__get@DB_AUTO_COMMIT +__APIREL__/api_c/db_get.html__OCT__DB_DEGREE_2 DB-__GT__get@DB_DEGREE_2 __APIREL__/api_c/db_get.html__OCT__DB_DIRTY_READ DB-__GT__get@DB_DIRTY_READ __APIREL__/api_c/db_get.html__OCT__DB_MULTIPLE DB-__GT__get@DB_MULTIPLE __APIREL__/api_c/db_get.html__OCT__DB_RMW DB-__GT__get@DB_RMW -__APIREL__/api_c/db_get_byteswapped.html__OCT__2 @DB-__GT__get_byteswapped -__APIREL__/api_c/db_get_mpf.html__OCT__2 @DB-__GT__mpf -__APIREL__/api_c/db_get_type.html__OCT__2 @DB-__GT__get_type -__APIREL__/api_c/db_getenv.html__OCT__2 @DB-__GT__get_env -__APIREL__/api_c/db_join.html__OCT__2 @DB-__GT__join __APIREL__/api_c/db_join.html__OCT__DB_JOIN_ITEM DB-__GT__join@DB_JOIN_ITEM __APIREL__/api_c/db_join.html__OCT__DB_DIRTY_READ DB-__GT__join@DB_DIRTY_READ __APIREL__/api_c/db_join.html__OCT__DB_RMW DB-__GT__join@DB_RMW __APIREL__/api_c/db_join.html__OCT__DB_JOIN_NOSORT DB-__GT__join@DB_JOIN_NOSORT -__APIREL__/api_c/db_key_range.html__OCT__2 @DB-__GT__key_range -__APIREL__/api_c/db_open.html__OCT__2 @DB-__GT__open -__APIREL__/api_c/db_open.html__OCT__3 @DB-__GT__get_file -__APIREL__/api_c/db_open.html__OCT__4 @DB-__GT__get_database -__APIREL__/api_c/db_open.html__OCT__5 @DB-__GT__get_open_flags -__APIREL__/api_c/db_open.html__OCT__6 @DB-__GT__get_transactional __APIREL__/api_c/db_open.html__OCT__DB_AUTO_COMMIT DB-__GT__open@DB_AUTO_COMMIT __APIREL__/api_c/db_open.html__OCT__DB_CREATE DB-__GT__open@DB_CREATE __APIREL__/api_c/db_open.html__OCT__DB_DIRTY_READ DB-__GT__open@DB_DIRTY_READ @@ -52,90 +34,50 @@ __APIREL__/api_c/db_open.html__OCT__DB_HASH DB-__GT__open@DB_HASH __APIREL__/api_c/db_open.html__OCT__DB_QUEUE DB-__GT__open@DB_QUEUE __APIREL__/api_c/db_open.html__OCT__DB_RECNO DB-__GT__open@DB_RECNO __APIREL__/api_c/db_open.html__OCT__DB_UNKNOWN DB-__GT__open@DB_UNKNOWN -__APIREL__/api_c/db_put.html__OCT__2 @DB-__GT__put __APIREL__/api_c/db_put.html__OCT__DB_APPEND DB-__GT__put@DB_APPEND __APIREL__/api_c/db_put.html__OCT__DB_NODUPDATA DB-__GT__put@DB_NODUPDATA __APIREL__/api_c/db_put.html__OCT__DB_NOOVERWRITE DB-__GT__put@DB_NOOVERWRITE __APIREL__/api_c/db_put.html__OCT__DB_AUTO_COMMIT DB-__GT__put@DB_AUTO_COMMIT -__APIREL__/api_c/db_remove.html__OCT__2 @DB-__GT__remove -__APIREL__/api_c/db_rename.html__OCT__2 @DB-__GT__rename -__APIREL__/api_c/db_set_append_recno.html__OCT__2 @DB-__GT__set_append_recno -__APIREL__/api_c/db_set_bt_compare.html__OCT__2 @DB-__GT__set_bt_compare -__APIREL__/api_c/db_set_bt_minkey.html__OCT__2 @DB-__GT__set_bt_minkey -__APIREL__/api_c/db_set_bt_minkey.html__OCT__3 @DB-__GT__get_bt_minkey -__APIREL__/api_c/db_set_bt_prefix.html__OCT__2 @DB-__GT__set_bt_prefix -__APIREL__/api_c/db_set_cachesize.html__OCT__2 @DB-__GT__set_cachesize -__APIREL__/api_c/db_set_cachesize.html__OCT__3 @DB-__GT__get_cachesize -__APIREL__/api_c/db_set_dup_compare.html__OCT__2 @DB-__GT__set_dup_compare -__APIREL__/api_c/db_set_encrypt.html__OCT__2 @DB-__GT__set_encrypt -__APIREL__/api_c/db_set_encrypt.html__OCT__3 @DB-__GT__get_encrypt_flags __APIREL__/api_c/db_set_encrypt.html__OCT__DB_ENCRYPT_AES DB-__GT__set_encrypt@DB_ENCRYPT_AES -__APIREL__/api_c/db_set_errcall.html__OCT__2 @DB-__GT__set_errcall -__APIREL__/api_c/db_set_errpfx.html__OCT__2 @DB-__GT__set_errpfx -__APIREL__/api_c/db_set_errpfx.html__OCT__3 @DB-__GT__get_errpfx -__APIREL__/api_c/db_set_feedback.html__OCT__2 @DB-__GT__set_feedback __APIREL__/api_c/db_set_feedback.html__OCT__DB_UPGRADE DB-__GT__set_feedback@DB_UPGRADE __APIREL__/api_c/db_set_feedback.html__OCT__DB_VERIFY DB-__GT__set_feedback@DB_VERIFY -__APIREL__/api_c/db_set_flags.html__OCT__2 @DB-__GT__set_flags -__APIREL__/api_c/db_set_flags.html__OCT__3 @DB-__GT__get_flags -__APIREL__/api_c/db_set_flags.html__OCT__4 database page @checksum +__APIREL__/api_c/db_set_flags.html__OCT__2 database page @checksum __APIREL__/api_c/db_set_flags.html__OCT__DB_CHKSUM DB-__GT__set_flags@DB_CHKSUM -__APIREL__/api_c/db_set_flags.html__OCT__5 database @encryption +__APIREL__/api_c/db_set_flags.html__OCT__3 database @encryption __APIREL__/api_c/db_set_flags.html__OCT__DB_ENCRYPT DB-__GT__set_flags@DB_ENCRYPT -__APIREL__/api_c/db_set_flags.html__OCT__6 turn off database @durability +__APIREL__/api_c/db_set_flags.html__OCT__4 turn off database @durability __APIREL__/api_c/db_set_flags.html__OCT__DB_TXN_NOT_DURABLE DB-__GT__set_flags@DB_TXN_NOT_DURABLE -__APIREL__/api_c/db_set_flags.html__OCT__7 @duplicate data items +__APIREL__/api_c/db_set_flags.html__OCT__5 @duplicate data items __APIREL__/api_c/db_set_flags.html__OCT__DB_DUP DB-__GT__set_flags@DB_DUP -__APIREL__/api_c/db_set_flags.html__OCT__8 sorted @duplicate data items +__APIREL__/api_c/db_set_flags.html__OCT__6 sorted @duplicate data items __APIREL__/api_c/db_set_flags.html__OCT__DB_DUPSORT DB-__GT__set_flags@DB_DUPSORT -__APIREL__/api_c/db_set_flags.html__OCT__9 accessing Btree records by @record number +__APIREL__/api_c/db_set_flags.html__OCT__7 accessing Btree records by @record number __APIREL__/api_c/db_set_flags.html__OCT__DB_RECNUM DB-__GT__set_flags@DB_RECNUM -__APIREL__/api_c/db_set_flags.html__OCT__10 turn off @reverse splits in Btree databases -__APIREL__/api_c/db_set_flags.html__OCT__11 turn off reverse @splits in Btree databases +__APIREL__/api_c/db_set_flags.html__OCT__8 turn off @reverse splits in Btree databases +__APIREL__/api_c/db_set_flags.html__OCT__9 turn off reverse @splits in Btree databases __APIREL__/api_c/db_set_flags.html__OCT__DB_REVSPLITOFF DB-__GT__set_flags@DB_REVSPLITOFF __APIREL__/api_c/db_set_flags.html__OCT__DB_DUP DB-__GT__set_flags@DB_DUP __APIREL__/api_c/db_set_flags.html__OCT__DB_DUPSORT DB-__GT__set_flags@DB_DUPSORT -__APIREL__/api_c/db_set_flags.html__OCT__12 @renumbering records in Recno databases +__APIREL__/api_c/db_set_flags.html__OCT__10 @ordered retrieval of records from Queue databases +__APIREL__/api_c/db_set_flags.html__OCT__DB_INORDER DB-__GT__set_flags@DB_INORDER +__APIREL__/api_c/db_set_flags.html__OCT__11 @renumbering records in Recno databases __APIREL__/api_c/db_set_flags.html__OCT__DB_RENUMBER DB-__GT__set_flags@DB_RENUMBER -__APIREL__/api_c/db_set_flags.html__OCT__13 pre-loading @text files into Recno databases +__APIREL__/api_c/db_set_flags.html__OCT__12 pre-loading @text files into Recno databases __APIREL__/api_c/db_set_flags.html__OCT__DB_SNAPSHOT DB-__GT__set_flags@DB_SNAPSHOT -__APIREL__/api_c/db_set_h_ffactor.html__OCT__2 @DB-__GT__set_h_ffactor -__APIREL__/api_c/db_set_h_ffactor.html__OCT__3 @DB-__GT__get_h_ffactor -__APIREL__/api_c/db_set_h_hash.html__OCT__2 @DB-__GT__set_h_hash -__APIREL__/api_c/db_set_h_nelem.html__OCT__2 @DB-__GT__set_h_nelem -__APIREL__/api_c/db_set_h_nelem.html__OCT__3 @DB-__GT__get_h_nelem -__APIREL__/api_c/db_set_lorder.html__OCT__2 @DB-__GT__set_lorder -__APIREL__/api_c/db_set_lorder.html__OCT__3 @DB-__GT__get_lorder -__APIREL__/api_c/db_set_pagesize.html__OCT__2 @DB-__GT__set_pagesize -__APIREL__/api_c/db_set_pagesize.html__OCT__3 @DB-__GT__get_pagesize -__APIREL__/api_c/db_set_paniccall.html__OCT__2 @DB-__GT__set_paniccall -__APIREL__/api_c/db_set_q_extentsize.html__OCT__2 @DB-__GT__set_q_extentsize -__APIREL__/api_c/db_set_q_extentsize.html__OCT__3 @DB-__GT__get_q_extentsize -__APIREL__/api_c/db_set_re_delim.html__OCT__2 @DB-__GT__set_re_delim -__APIREL__/api_c/db_set_re_delim.html__OCT__3 @DB-__GT__get_re_delim -__APIREL__/api_c/db_set_re_len.html__OCT__2 @DB-__GT__set_re_len -__APIREL__/api_c/db_set_re_len.html__OCT__3 @DB-__GT__get_re_len -__APIREL__/api_c/db_set_re_pad.html__OCT__2 @DB-__GT__set_re_pad -__APIREL__/api_c/db_set_re_pad.html__OCT__3 @DB-__GT__get_re_pad -__APIREL__/api_c/db_set_re_source.html__OCT__2 @DB-__GT__set_re_source -__APIREL__/api_c/db_set_re_source.html__OCT__3 @DB-__GT__get_re_source -__APIREL__/api_c/db_stat.html__OCT__2 @DB-__GT__stat +__APIREL__/api_c/db_stat.html__OCT__DB_DEGREE_2 DB-__GT__stat@DB_DEGREE_2 +__APIREL__/api_c/db_stat.html__OCT__DB_DIRTY_READ DB-__GT__stat@DB_DIRTY_READ __APIREL__/api_c/db_stat.html__OCT__DB_FAST_STAT DB-__GT__stat@DB_FAST_STAT -__APIREL__/api_c/db_sync.html__OCT__2 @DB-__GT__sync -__APIREL__/api_c/db_truncate.html__OCT__2 @DB-__GT__truncate +__APIREL__/api_c/db_stat.html__OCT__DB_STAT_ALL DB-__GT__stat@DB_STAT_ALL __APIREL__/api_c/db_truncate.html__OCT__DB_AUTO_COMMIT DB-__GT__truncate@DB_AUTO_COMMIT -__APIREL__/api_c/db_upgrade.html__OCT__2 @DB-__GT__upgrade __APIREL__/api_c/db_upgrade.html__OCT__DB_DUPSORT DB-__GT__upgrade@DB_DUPSORT __APIREL__/api_c/db_upgrade.html__OCT__DB_OLD_VERSION DB-__GT__upgrade@DB_OLD_VERSION -__APIREL__/api_c/db_verify.html__OCT__2 @DB-__GT__verify -__APIREL__/api_c/db_verify.html__OCT__3 @DB_VERIFY_BAD +__APIREL__/api_c/db_verify.html__OCT__2 @DB_VERIFY_BAD __APIREL__/api_c/db_verify.html__OCT__DB_SALVAGE DB-__GT__verify@DB_SALVAGE __APIREL__/api_c/db_verify.html__OCT__DB_AGGRESSIVE DB-__GT__verify@DB_AGGRESSIVE __APIREL__/api_c/db_verify.html__OCT__DB_PRINTABLE DB-__GT__verify@DB_PRINTABLE __APIREL__/api_c/db_verify.html__OCT__DB_NOORDERCHK DB-__GT__verify@DB_NOORDERCHK __APIREL__/api_c/db_verify.html__OCT__DB_ORDERCHKONLY DB-__GT__verify@DB_ORDERCHKONLY -__APIREL__/api_c/dbt_class.html__OCT__2 @DBT -__APIREL__/api_c/dbt_class.html__OCT__3 @key/data pairs +__APIREL__/api_c/dbt_class.html__OCT__2 @key/data pairs __APIREL__/api_c/dbt_class.html__OCT__data DBT@data __APIREL__/api_c/dbt_class.html__OCT__size DBT@size __APIREL__/api_c/dbt_class.html__OCT__ulen DBT@ulen @@ -143,26 +85,15 @@ __APIREL__/api_c/dbt_class.html__OCT__dlen DBT@dlen __APIREL__/api_c/dbt_class.html__OCT__doff DBT@doff __APIREL__/api_c/dbt_class.html__OCT__DB_DBT_MALLOC DBT@DB_DBT_MALLOC __APIREL__/api_c/dbt_class.html__OCT__DB_DBT_REALLOC DBT@DB_DBT_REALLOC +__APIREL__/api_c/dbt_class.html__OCT__3 @DB_BUFFER_SMALL __APIREL__/api_c/dbt_class.html__OCT__DB_DBT_USERMEM DBT@DB_DBT_USERMEM __APIREL__/api_c/dbt_class.html__OCT__DB_DBT_PARTIAL DBT@DB_DBT_PARTIAL -__APIREL__/api_c/db_fd.html__OCT__2 @DB-__GT__fd -__APIREL__/api_c/db_set_alloc.html__OCT__2 @DB-__GT__set_alloc -__APIREL__/api_c/db_set_errfile.html__OCT__2 @DB-__GT__set_errfile -__APIREL__/api_c/db_set_errfile.html__OCT__3 @DB-__GT__get_errfile -__APIREL__/api_c/dbt_bulk.html__OCT__2 @DBT -__APIREL__/api_c/dbt_bulk.html__OCT__3 @bulk retrieval +__APIREL__/api_c/dbt_bulk.html__OCT__2 @bulk retrieval __APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_INIT DBT@DB_MULTIPLE_INIT __APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_NEXT DBT@DB_MULTIPLE_NEXT __APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_KEY_NEXT DBT@DB_MULTIPLE_KEY_NEXT __APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_RECNO_NEXT DBT@DB_MULTIPLE_RECNO_NEXT -__APIREL__/api_c/dbc_class.html__OCT__2 @DBC -__APIREL__/api_c/dbc_close.html__OCT__2 @DBcursor-__GT__c_close -__APIREL__/api_c/dbc_count.html__OCT__2 @DBcursor-__GT__c_count -__APIREL__/api_c/dbc_del.html__OCT__2 @DBcursor-__GT__c_del -__APIREL__/api_c/dbc_dup.html__OCT__2 @DBcursor-__GT__c_dup __APIREL__/api_c/dbc_dup.html__OCT__DB_POSITION DBcursor-__GT__c_dup@DB_POSITION -__APIREL__/api_c/dbc_get.html__OCT__2 @DBcursor-__GT__c_get -__APIREL__/api_c/dbc_get.html__OCT__3 @DBcursor-__GT__c_pget __APIREL__/api_c/dbc_get.html__OCT__DB_CURRENT DBcursor-__GT__c_get@DB_CURRENT __APIREL__/api_c/dbc_get.html__OCT__DB_FIRST DBcursor-__GT__c_get@DB_FIRST __APIREL__/api_c/dbc_get.html__OCT__DB_GET_BOTH DBcursor-__GT__c_get@DB_GET_BOTH @@ -182,25 +113,15 @@ __APIREL__/api_c/dbc_get.html__OCT__DB_DIRTY_READ DBcursor-__GT__c_get@DB_DIRTY_ __APIREL__/api_c/dbc_get.html__OCT__DB_MULTIPLE DBcursor-__GT__c_get@DB_MULTIPLE __APIREL__/api_c/dbc_get.html__OCT__DB_MULTIPLE_KEY DBcursor-__GT__c_get@DB_MULTIPLE_KEY __APIREL__/api_c/dbc_get.html__OCT__DB_RMW DBcursor-__GT__c_get@DB_RMW -__APIREL__/api_c/dbc_put.html__OCT__2 @DBcursor-__GT__c_put __APIREL__/api_c/dbc_put.html__OCT__DB_AFTER DBcursor-__GT__c_put@DB_AFTER __APIREL__/api_c/dbc_put.html__OCT__DB_BEFORE DBcursor-__GT__c_put@DB_BEFORE __APIREL__/api_c/dbc_put.html__OCT__DB_CURRENT DBcursor-__GT__c_put@DB_CURRENT __APIREL__/api_c/dbc_put.html__OCT__DB_KEYFIRST DBcursor-__GT__c_put@DB_KEYFIRST __APIREL__/api_c/dbc_put.html__OCT__DB_KEYLAST DBcursor-__GT__c_put@DB_KEYLAST __APIREL__/api_c/dbc_put.html__OCT__DB_NODUPDATA DBcursor-__GT__c_put@DB_NODUPDATA -__APIREL__/api_c/env_class.html__OCT__2 @db_env_create __APIREL__/api_c/env_class.html__OCT__DB_RPCCLIENT db_env_create@DB_RPCCLIENT -__APIREL__/api_c/env_close.html__OCT__2 @DB_ENV-__GT__close -__APIREL__/api_c/env_dbremove.html__OCT__2 @DB_ENV-__GT__dbremove __APIREL__/api_c/env_dbremove.html__OCT__DB_AUTO_COMMIT DB_ENV-__GT__dbremove@DB_AUTO_COMMIT -__APIREL__/api_c/env_dbrename.html__OCT__2 @DB_ENV-__GT__dbrename __APIREL__/api_c/env_dbrename.html__OCT__DB_AUTO_COMMIT DB_ENV-__GT__dbrename@DB_AUTO_COMMIT -__APIREL__/api_c/env_err.html__OCT__2 @DB_ENV-__GT__err -__APIREL__/api_c/env_err.html__OCT__3 @DB_ENV-__GT__errx -__APIREL__/api_c/env_open.html__OCT__2 @DB_ENV-__GT__open -__APIREL__/api_c/env_open.html__OCT__3 @DB_ENV-__GT__get_home -__APIREL__/api_c/env_open.html__OCT__4 @DB_ENV-__GT__get_open_flags __APIREL__/api_c/env_open.html__OCT__DB_JOINENV DB_ENV-__GT__open@DB_JOINENV __APIREL__/api_c/env_open.html__OCT__DB_INIT_CDB DB_ENV-__GT__open@DB_INIT_CDB __APIREL__/api_c/env_open.html__OCT__DB_INIT_LOCK DB_ENV-__GT__open@DB_INIT_LOCK @@ -210,7 +131,7 @@ __APIREL__/api_c/env_open.html__OCT__DB_INIT_REP DB_ENV-__GT__open@DB_INIT_REP __APIREL__/api_c/env_open.html__OCT__DB_INIT_TXN DB_ENV-__GT__open@DB_INIT_TXN __APIREL__/api_c/env_open.html__OCT__DB_RECOVER DB_ENV-__GT__open@DB_RECOVER __APIREL__/api_c/env_open.html__OCT__DB_RECOVER_FATAL DB_ENV-__GT__open@DB_RECOVER_FATAL -__APIREL__/api_c/env_open.html__OCT__5 use @environment constants in naming +__APIREL__/api_c/env_open.html__OCT__2 use @environment constants in naming __APIREL__/api_c/env_open.html__OCT__DB_USE_ENVIRON DB_ENV-__GT__open@DB_USE_ENVIRON __APIREL__/api_c/env_open.html__OCT__DB_USE_ENVIRON_ROOT DB_ENV-__GT__open@DB_USE_ENVIRON_ROOT __APIREL__/api_c/env_open.html__OCT__DB_CREATE DB_ENV-__GT__open@DB_CREATE @@ -218,41 +139,32 @@ __APIREL__/api_c/env_open.html__OCT__DB_LOCKDOWN DB_ENV-__GT__open@DB_LOCKDOWN __APIREL__/api_c/env_open.html__OCT__DB_PRIVATE DB_ENV-__GT__open@DB_PRIVATE __APIREL__/api_c/env_open.html__OCT__DB_SYSTEM_MEM DB_ENV-__GT__open@DB_SYSTEM_MEM __APIREL__/api_c/env_open.html__OCT__DB_THREAD DB_ENV-__GT__open@DB_THREAD -__APIREL__/api_c/env_remove.html__OCT__2 @DB_ENV-__GT__remove +__APIREL__/api_c/env_open.html__OCT__3 @DB_VERSION_MISMATCH __APIREL__/api_c/env_remove.html__OCT__DB_FORCE DB_ENV-__GT__remove@DB_FORCE -__APIREL__/api_c/env_remove.html__OCT__3 use @environment constants in naming +__APIREL__/api_c/env_remove.html__OCT__2 use @environment constants in naming __APIREL__/api_c/env_remove.html__OCT__DB_USE_ENVIRON DB_ENV-__GT__remove@DB_USE_ENVIRON __APIREL__/api_c/env_remove.html__OCT__DB_USE_ENVIRON_ROOT DB_ENV-__GT__remove@DB_USE_ENVIRON_ROOT -__APIREL__/api_c/env_set_app_dispatch.html__OCT__2 @DB_ENV-__GT__set_app_dispatch __APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_BACKWARD_ROLL DB_ENV-__GT__set_app_dispatch@DB_TXN_BACKWARD_ROLL __APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_FORWARD_ROLL DB_ENV-__GT__set_app_dispatch@DB_TXN_FORWARD_ROLL __APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_ABORT DB_ENV-__GT__set_app_dispatch@DB_TXN_ABORT __APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_APPLY DB_ENV-__GT__set_app_dispatch@DB_TXN_APPLY __APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_PRINT DB_ENV-__GT__set_app_dispatch@DB_TXN_PRINT -__APIREL__/api_c/env_set_cachesize.html__OCT__2 @DB_ENV-__GT__set_cachesize -__APIREL__/api_c/env_set_cachesize.html__OCT__3 @DB_ENV-__GT__get_cachesize -__APIREL__/api_c/env_set_cachesize.html__OCT__4 @DbEnv::get_cachesize_nocache -__APIREL__/api_c/env_set_data_dir.html__OCT__2 @DB_ENV-__GT__set_data_dir -__APIREL__/api_c/env_set_data_dir.html__OCT__3 @DB_ENV-__GT__get_data_dirs -__APIREL__/api_c/env_set_encrypt.html__OCT__2 @DB_ENV-__GT__set_encrypt -__APIREL__/api_c/env_set_encrypt.html__OCT__3 @DB_ENV-__GT__get_encrypt_flags __APIREL__/api_c/env_set_encrypt.html__OCT__DB_ENCRYPT_AES DB_ENV-__GT__set_encrypt@DB_ENCRYPT_AES -__APIREL__/api_c/env_set_errcall.html__OCT__2 @DB_ENV-__GT__set_errcall -__APIREL__/api_c/env_set_errpfx.html__OCT__2 @DB_ENV-__GT__set_errpfx -__APIREL__/api_c/env_set_errpfx.html__OCT__3 @DB_ENV-__GT__get_errpfx -__APIREL__/api_c/env_set_feedback.html__OCT__2 @DB_ENV-__GT__set_feedback __APIREL__/api_c/env_set_feedback.html__OCT__DB_RECOVER DB_ENV-__GT__set_feedback@DB_RECOVER -__APIREL__/api_c/env_set_flags.html__OCT__2 @DB_ENV-__GT__set_flags -__APIREL__/api_c/env_set_flags.html__OCT__3 @DB_ENV-__GT__get_flags __APIREL__/api_c/env_set_flags.html__OCT__DB_AUTO_COMMIT DB_ENV-__GT__set_flags@DB_AUTO_COMMIT -__APIREL__/api_c/env_set_flags.html__OCT__4 configure @locking for Berkeley DB Concurrent Data Store +__APIREL__/api_c/env_set_flags.html__OCT__2 configure @locking for Berkeley DB Concurrent Data Store __APIREL__/api_c/env_set_flags.html__OCT__DB_CDB_ALLDB DB_ENV-__GT__set_flags@DB_CDB_ALLDB -__APIREL__/api_c/env_set_flags.html__OCT__5 turn off system @buffering for database files +__APIREL__/api_c/env_set_flags.html__OCT__3 turn off system @buffering for database files __APIREL__/api_c/env_set_flags.html__OCT__DB_DIRECT_DB DB_ENV-__GT__set_flags@DB_DIRECT_DB -__APIREL__/api_c/env_set_flags.html__OCT__6 turn off system @buffering for log files +__APIREL__/api_c/env_set_flags.html__OCT__4 turn off system @buffering for log files __APIREL__/api_c/env_set_flags.html__OCT__DB_DIRECT_LOG DB_ENV-__GT__set_flags@DB_DIRECT_LOG -__APIREL__/api_c/env_set_flags.html__OCT__7 automatic @log file removal +__APIREL__/api_c/env_set_flags.html__OCT__5 turn off system @buffering for log files +__APIREL__/api_c/env_set_flags.html__OCT__DB_DSYNC_LOG DB_ENV-__GT__set_flags@DB_DSYNC_LOG +__APIREL__/api_c/env_set_flags.html__OCT__6 automatic @log file removal __APIREL__/api_c/env_set_flags.html__OCT__DB_LOG_AUTOREMOVE DB_ENV-__GT__set_flags@DB_LOG_AUTOREMOVE +__APIREL__/api_c/env_set_flags.html__OCT__7 in memory @logs +__APIREL__/api_c/env_set_flags.html__OCT__DB_LOG_INMEMORY DB_ENV-__GT__set_flags@DB_LOG_INMEMORY +__APIREL__/api_c/env_set_flags.html__OCT__DB_LOG_BUFFER_FULL DB_ENV-__GT__set_flags@DB_LOG_BUFFER_FULL __APIREL__/api_c/env_set_flags.html__OCT__8 ignore @locking __APIREL__/api_c/env_set_flags.html__OCT__DB_NOLOCKING DB_ENV-__GT__set_flags@DB_NOLOCKING __APIREL__/api_c/env_set_flags.html__OCT__9 turn off database file @memory mapping @@ -267,84 +179,51 @@ __APIREL__/api_c/env_set_flags.html__OCT__DB_REGION_INIT DB_ENV-__GT__set_flags@ __APIREL__/api_c/env_set_flags.html__OCT__DB_TIME_NOTGRANTED DB_ENV-__GT__set_flags@DB_TIME_NOTGRANTED __APIREL__/api_c/env_set_flags.html__OCT__13 turn off synchronous @transaction commit __APIREL__/api_c/env_set_flags.html__OCT__DB_TXN_NOSYNC DB_ENV-__GT__set_flags@DB_TXN_NOSYNC -__APIREL__/api_c/env_set_flags.html__OCT__14 turn off @durability in the database environment -__APIREL__/api_c/env_set_flags.html__OCT__DB_TXN_NOT_DURABLE DB_ENV-__GT__set_flags@DB_TXN_NOT_DURABLE -__APIREL__/api_c/env_set_flags.html__OCT__15 turn off synchronous @transaction commit +__APIREL__/api_c/env_set_flags.html__OCT__14 turn off synchronous @transaction commit __APIREL__/api_c/env_set_flags.html__OCT__DB_TXN_WRITE_NOSYNC DB_ENV-__GT__set_flags@DB_TXN_WRITE_NOSYNC -__APIREL__/api_c/env_set_flags.html__OCT__16 configure for @stress testing +__APIREL__/api_c/env_set_flags.html__OCT__15 configure for @stress testing __APIREL__/api_c/env_set_flags.html__OCT__DB_YIELDCPU DB_ENV-__GT__set_flags@DB_YIELDCPU -__APIREL__/api_c/env_set_paniccall.html__OCT__2 @DB_ENV-__GT__set_paniccall -__APIREL__/api_c/env_set_rpc_server.html__OCT__2 @DB_ENV-__GT__set_rpc_server __APIREL__/api_c/env_set_rpc_server.html__OCT__DB_NOSERVER DB_ENV-__GT__set_rpc_server@DB_NOSERVER __APIREL__/api_c/env_set_rpc_server.html__OCT__DB_NOSERVER_ID DB_ENV-__GT__set_rpc_server@DB_NOSERVER_ID __APIREL__/api_c/env_set_rpc_server.html__OCT__DB_NOSERVER_HOME DB_ENV-__GT__set_rpc_server@DB_NOSERVER_HOME -__APIREL__/api_c/env_set_rpc_server.html__OCT__3 @DB_NOSERVER -__APIREL__/api_c/env_set_rpc_server.html__OCT__4 @DB_NOSERVER_ID -__APIREL__/api_c/env_set_shm_key.html__OCT__2 @DB_ENV-__GT__set_shm_key -__APIREL__/api_c/env_set_shm_key.html__OCT__3 @DB_ENV-__GT__get_shm_key -__APIREL__/api_c/env_set_tas_spins.html__OCT__2 @DB_ENV-__GT__set_tas_spins -__APIREL__/api_c/env_set_tas_spins.html__OCT__3 @DB_ENV-__GT__get_tas_spins -__APIREL__/api_c/env_set_timeout.html__OCT__2 @DB_ENV-__GT__set_timeout -__APIREL__/api_c/env_set_timeout.html__OCT__3 @DB_ENV-__GT__get_timeout +__APIREL__/api_c/env_set_rpc_server.html__OCT__2 @DB_NOSERVER +__APIREL__/api_c/env_set_rpc_server.html__OCT__3 @DB_NOSERVER_ID __APIREL__/api_c/env_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DB_ENV-__GT__set_timeout@DB_SET_LOCK_TIMEOUT __APIREL__/api_c/env_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DB_ENV-__GT__set_timeout@DB_SET_TXN_TIMEOUT __APIREL__/api_c/env_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DB_ENV-__GT__set_timeout@DB_SET_LOCK_TIMEOUT __APIREL__/api_c/env_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DB_ENV-__GT__set_timeout@DB_SET_TXN_TIMEOUT -__APIREL__/api_c/env_set_tmp_dir.html__OCT__2 @DB_ENV-__GT__set_tmp_dir -__APIREL__/api_c/env_set_tmp_dir.html__OCT__3 @temporary files -__APIREL__/api_c/env_set_tmp_dir.html__OCT__4 @DB_ENV-__GT__get_tmp_dir -__APIREL__/api_c/env_set_verbose.html__OCT__2 @DB_ENV-__GT__set_verbose -__APIREL__/api_c/env_set_verbose.html__OCT__3 @DB_ENV-__GT__get_verbose -__APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_CHKPOINT DB_ENV-__GT__set_verbose@DB_VERB_CHKPOINT +__APIREL__/api_c/env_set_tmp_dir.html__OCT__2 @temporary files __APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_DEADLOCK DB_ENV-__GT__set_verbose@DB_VERB_DEADLOCK __APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_RECOVERY DB_ENV-__GT__set_verbose@DB_VERB_RECOVERY __APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_REPLICATION DB_ENV-__GT__set_verbose@DB_VERB_REPLICATION __APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_WAITSFOR DB_ENV-__GT__set_verbose@DB_VERB_WAITSFOR -__APIREL__/api_c/env_strerror.html__OCT__2 @db_strerror -__APIREL__/api_c/env_version.html__OCT__2 @db_version -__APIREL__/api_c/env_version.html__OCT__3 @db_version -__APIREL__/api_c/env_set_alloc.html__OCT__2 @DB_ENV-__GT__set_alloc -__APIREL__/api_c/env_set_errfile.html__OCT__2 @DB_ENV-__GT__set_errfile -__APIREL__/api_c/env_set_errfile.html__OCT__3 @DB_ENV-__GT__get_errfile -__APIREL__/api_c/dbm.html__OCT__2 @dbm/ndbm -__APIREL__/api_c/hsearch.html__OCT__2 @hsearch -__APIREL__/api_c/hsearch.html__OCT__3 @hcreate -__APIREL__/api_c/env_set_lk_conflicts.html__OCT__2 @DB_ENV-__GT__set_lk_conflicts -__APIREL__/api_c/env_set_lk_conflicts.html__OCT__3 @DB_ENV-__GT__get_lk_conflicts -__APIREL__/api_c/env_set_lk_detect.html__OCT__2 @DB_ENV-__GT__set_lk_detect -__APIREL__/api_c/env_set_lk_detect.html__OCT__3 @DB_ENV-__GT__get_lk_detect +__APIREL__/api_c/env_stat.html__OCT__DB_STAT_ALL DB_ENV-__GT__stat_print@DB_STAT_ALL +__APIREL__/api_c/env_stat.html__OCT__DB_STAT_SUBSYSTEM DB_ENV-__GT__stat_print@DB_STAT_SUBSYSTEM __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_DEFAULT DB_ENV-__GT__set_lk_detect@DB_LOCK_DEFAULT __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_EXPIRE DB_ENV-__GT__set_lk_detect@DB_LOCK_EXPIRE __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_MAXLOCKS DB_ENV-__GT__set_lk_detect@DB_LOCK_MAXLOCKS +__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_MAXWRITE DB_ENV-__GT__set_lk_detect@DB_LOCK_MAXWRITE __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_MINLOCKS DB_ENV-__GT__set_lk_detect@DB_LOCK_MINLOCKS __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_MINWRITE DB_ENV-__GT__set_lk_detect@DB_LOCK_MINWRITE __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_OLDEST DB_ENV-__GT__set_lk_detect@DB_LOCK_OLDEST __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_RANDOM DB_ENV-__GT__set_lk_detect@DB_LOCK_RANDOM __APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_YOUNGEST DB_ENV-__GT__set_lk_detect@DB_LOCK_YOUNGEST -__APIREL__/api_c/env_set_lk_max_lockers.html__OCT__2 @DB_ENV-__GT__set_lk_max_lockers -__APIREL__/api_c/env_set_lk_max_lockers.html__OCT__3 @DB_ENV-__GT__get_lk_max_lockers -__APIREL__/api_c/env_set_lk_max_locks.html__OCT__2 @DB_ENV-__GT__set_lk_max_locks -__APIREL__/api_c/env_set_lk_max_locks.html__OCT__3 @DB_ENV-__GT__get_lk_max_locks -__APIREL__/api_c/env_set_lk_max_objects.html__OCT__2 @DB_ENV-__GT__set_lk_max_objects -__APIREL__/api_c/env_set_lk_max_objects.html__OCT__3 @DB_ENV-__GT__get_lk_max_objects -__APIREL__/api_c/lock_class.html__OCT__2 @DB_LOCK -__APIREL__/api_c/lock_detect.html__OCT__2 @DB_ENV-__GT__lock_detect __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_DEFAULT DB_ENV-__GT__lock_detect@DB_LOCK_DEFAULT __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_EXPIRE DB_ENV-__GT__lock_detect@DB_LOCK_EXPIRE __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_MAXLOCKS DB_ENV-__GT__lock_detect@DB_LOCK_MAXLOCKS +__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_MAXWRITE DB_ENV-__GT__lock_detect@DB_LOCK_MAXWRITE __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_MINLOCKS DB_ENV-__GT__lock_detect@DB_LOCK_MINLOCKS __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_MINWRITE DB_ENV-__GT__lock_detect@DB_LOCK_MINWRITE __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_OLDEST DB_ENV-__GT__lock_detect@DB_LOCK_OLDEST __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_RANDOM DB_ENV-__GT__lock_detect@DB_LOCK_RANDOM __APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_YOUNGEST DB_ENV-__GT__lock_detect@DB_LOCK_YOUNGEST -__APIREL__/api_c/lock_get.html__OCT__2 @DB_ENV-__GT__lock_get __APIREL__/api_c/lock_get.html__OCT__DB_LOCK_NOWAIT DB_ENV-__GT__lock_get@DB_LOCK_NOWAIT -__APIREL__/api_c/lock_id.html__OCT__2 @DB_ENV-__GT__lock_id -__APIREL__/api_c/lock_id_free.html__OCT__2 @DB_ENV-__GT__lock_id_free -__APIREL__/api_c/lock_put.html__OCT__2 @DB_ENV-__GT__lock_put -__APIREL__/api_c/lock_stat.html__OCT__2 @DB_ENV-__GT__lock_stat __APIREL__/api_c/lock_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__lock_stat@DB_STAT_CLEAR -__APIREL__/api_c/lock_vec.html__OCT__2 @DB_ENV-__GT__lock_vec +__APIREL__/api_c/lock_stat.html__OCT__DB_STAT_ALL DB_ENV-__GT__lock_stat@DB_STAT_ALL +__APIREL__/api_c/lock_stat.html__OCT__DB_STAT_LOCK_CONF DB_ENV-__GT__lock_stat@DB_STAT_LOCK_CONF +__APIREL__/api_c/lock_stat.html__OCT__DB_STAT_LOCK_LOCKERS DB_ENV-__GT__lock_stat@DB_STAT_LOCK_LOCKERS +__APIREL__/api_c/lock_stat.html__OCT__DB_STAT_LOCK_OBJECTS DB_ENV-__GT__lock_stat@DB_STAT_LOCK_OBJECTS +__APIREL__/api_c/lock_stat.html__OCT__DB_STAT_LOCK_PARAMS DB_ENV-__GT__lock_stat@DB_STAT_LOCK_PARAMS __APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_NOWAIT DB_ENV-__GT__lock_vec@DB_LOCK_NOWAIT __APIREL__/api_c/lock_vec.html__OCT__op DB_ENV-__GT__lock_vec@op __APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_GET DB_ENV-__GT__lock_vec@DB_LOCK_GET @@ -361,159 +240,89 @@ __APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_IWRITE DB_ENV-__GT__lock_vec@DB_LOC __APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_IREAD DB_ENV-__GT__lock_vec@DB_LOCK_IREAD __APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_IWR DB_ENV-__GT__lock_vec@DB_LOCK_IWR __APIREL__/api_c/lock_vec.html__OCT__obj DB_ENV-__GT__lock_vec@obj -__APIREL__/api_c/env_set_lg_bsize.html__OCT__2 @DB_ENV-__GT__set_lg_bsize -__APIREL__/api_c/env_set_lg_bsize.html__OCT__3 @DB_ENV-__GT__get_lg_bsize -__APIREL__/api_c/env_set_lg_dir.html__OCT__2 @DB_ENV-__GT__set_lg_dir -__APIREL__/api_c/env_set_lg_dir.html__OCT__3 @DB_ENV-__GT__get_lg_dir -__APIREL__/api_c/env_set_lg_max.html__OCT__2 @DB_ENV-__GT__set_lg_max -__APIREL__/api_c/env_set_lg_max.html__OCT__3 @DB_ENV-__GT__get_lg_max -__APIREL__/api_c/env_set_lg_regionmax.html__OCT__2 @DB_ENV-__GT__set_lg_regionmax -__APIREL__/api_c/env_set_lg_regionmax.html__OCT__3 @DB_ENV-__GT__get_lg_regionmax -__APIREL__/api_c/log_archive.html__OCT__2 @DB_ENV-__GT__log_archive __APIREL__/api_c/log_archive.html__OCT__DB_ARCH_ABS DB_ENV-__GT__log_archive@DB_ARCH_ABS __APIREL__/api_c/log_archive.html__OCT__DB_ARCH_DATA DB_ENV-__GT__log_archive@DB_ARCH_DATA __APIREL__/api_c/log_archive.html__OCT__DB_ARCH_LOG DB_ENV-__GT__log_archive@DB_ARCH_LOG __APIREL__/api_c/log_archive.html__OCT__DB_ARCH_REMOVE DB_ENV-__GT__log_archive@DB_ARCH_REMOVE -__APIREL__/api_c/log_compare.html__OCT__2 @log_compare -__APIREL__/api_c/log_cursor.html__OCT__2 @DB_ENV-__GT__log_cursor -__APIREL__/api_c/log_file.html__OCT__2 @DB_ENV-__GT__log_file -__APIREL__/api_c/log_flush.html__OCT__2 @DB_ENV-__GT__log_flush -__APIREL__/api_c/log_put.html__OCT__2 @DB_ENV-__GT__log_put __APIREL__/api_c/log_put.html__OCT__DB_FLUSH DB_ENV-__GT__log_put@DB_FLUSH -__APIREL__/api_c/log_stat.html__OCT__2 @DB_ENV-__GT__log_stat __APIREL__/api_c/log_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__log_stat@DB_STAT_CLEAR -__APIREL__/api_c/logc_class.html__OCT__2 @DB_LOGC -__APIREL__/api_c/logc_close.html__OCT__2 @DB_LOGC-__GT__close -__APIREL__/api_c/logc_get.html__OCT__2 @DB_LOGC-__GT__get +__APIREL__/api_c/log_stat.html__OCT__DB_STAT_ALL DB_ENV-__GT__log_stat@DB_STAT_ALL __APIREL__/api_c/logc_get.html__OCT__DB_CURRENT DB_LOGC-__GT__get@DB_CURRENT __APIREL__/api_c/logc_get.html__OCT__DB_FIRST DB_LOGC-__GT__get@DB_FIRST __APIREL__/api_c/logc_get.html__OCT__DB_LAST DB_LOGC-__GT__get@DB_LAST __APIREL__/api_c/logc_get.html__OCT__DB_NEXT DB_LOGC-__GT__get@DB_NEXT __APIREL__/api_c/logc_get.html__OCT__DB_PREV DB_LOGC-__GT__get@DB_PREV __APIREL__/api_c/logc_get.html__OCT__DB_SET DB_LOGC-__GT__get@DB_SET -__APIREL__/api_c/lsn_class.html__OCT__2 @DB_LSN -__APIREL__/api_c/env_set_mp_mmapsize.html__OCT__2 @DB_ENV-__GT__set_mp_mmapsize -__APIREL__/api_c/env_set_mp_mmapsize.html__OCT__3 @DB_ENV-__GT__get_mp_mmapsize -__APIREL__/api_c/memp_set_flags.html__OCT__2 @DB_MPOOLFILE-__GT__set_flags -__APIREL__/api_c/memp_set_flags.html__OCT__3 @DB_MPOOLFILE-__GT__get_flags -__APIREL__/api_c/memp_set_flags.html__OCT__DB_MPOOL_NOFILE DB_MPOOLFILE-__GT__set_flags@DB_MPOOL_NOFILE -__APIREL__/api_c/memp_set_maxsize.html__OCT__2 @DB_MPOOLFILE-__GT__set_maxsize -__APIREL__/api_c/memp_set_maxsize.html__OCT__3 @DB_MPOOLFILE-__GT__get_maxsize -__APIREL__/api_c/memp_set_priority.html__OCT__2 @DB_MPOOLFILE-__GT__set_priority -__APIREL__/api_c/memp_set_priority.html__OCT__3 @DB_MPOOLFILE-__GT__get_priority -__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_VERY_LOW DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_VERY_LOW -__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_LOW DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_LOW -__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_DEFAULT DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_DEFAULT -__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_HIGH DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_HIGH -__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_VERY_HIGH DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_VERY_HIGH -__APIREL__/api_c/memp_stat.html__OCT__2 @DB_ENV-__GT__memp_stat -__APIREL__/api_c/memp_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__memp_stat@DB_STAT_CLEAR -__APIREL__/api_c/memp_sync.html__OCT__2 @DB_ENV-__GT__memp_sync -__APIREL__/api_c/memp_trickle.html__OCT__2 @DB_ENV-__GT__memp_trickle -__APIREL__/api_c/mempfile_class.html__OCT__2 @DB_MPOOLFILE -__APIREL__/api_c/memp_fclose.html__OCT__2 @DB_MPOOLFILE-__GT__close -__APIREL__/api_c/memp_fcreate.html__OCT__2 @DB_ENV-__GT__memp_fcreate -__APIREL__/api_c/memp_fget.html__OCT__2 @DB_MPOOLFILE-__GT__get -__APIREL__/api_c/memp_fget.html__OCT__3 @DB_PAGE_NOTFOUND +__APIREL__/api_c/memp_fget.html__OCT__2 @DB_PAGE_NOTFOUND __APIREL__/api_c/memp_fget.html__OCT__DB_MPOOL_CREATE DB_MPOOLFILE-__GT__get@DB_MPOOL_CREATE __APIREL__/api_c/memp_fget.html__OCT__DB_MPOOL_LAST DB_MPOOLFILE-__GT__get@DB_MPOOL_LAST __APIREL__/api_c/memp_fget.html__OCT__DB_MPOOL_NEW DB_MPOOLFILE-__GT__get@DB_MPOOL_NEW -__APIREL__/api_c/memp_fopen.html__OCT__2 @DB_MPOOLFILE-__GT__open __APIREL__/api_c/memp_fopen.html__OCT__DB_CREATE DB_MPOOLFILE-__GT__open@DB_CREATE -__APIREL__/api_c/memp_fopen.html__OCT__3 turn off system @buffering +__APIREL__/api_c/memp_fopen.html__OCT__2 turn off system @buffering __APIREL__/api_c/memp_fopen.html__OCT__DB_DIRECT DB_MPOOLFILE-__GT__open@DB_DIRECT __APIREL__/api_c/memp_fopen.html__OCT__DB_NOMMAP DB_MPOOLFILE-__GT__open@DB_NOMMAP __APIREL__/api_c/memp_fopen.html__OCT__DB_ODDFILESIZE DB_MPOOLFILE-__GT__open@DB_ODDFILESIZE __APIREL__/api_c/memp_fopen.html__OCT__DB_RDONLY DB_MPOOLFILE-__GT__open@DB_RDONLY -__APIREL__/api_c/memp_fput.html__OCT__2 @DB_MPOOLFILE-__GT__put __APIREL__/api_c/memp_fput.html__OCT__DB_MPOOL_CLEAN DB_MPOOLFILE-__GT__put@DB_MPOOL_CLEAN __APIREL__/api_c/memp_fput.html__OCT__DB_MPOOL_DIRTY DB_MPOOLFILE-__GT__put@DB_MPOOL_DIRTY __APIREL__/api_c/memp_fput.html__OCT__DB_MPOOL_DISCARD DB_MPOOLFILE-__GT__put@DB_MPOOL_DISCARD -__APIREL__/api_c/memp_fset.html__OCT__2 @DB_MPOOLFILE-__GT__set __APIREL__/api_c/memp_fset.html__OCT__DB_MPOOL_CLEAN DB_MPOOLFILE-__GT__set@DB_MPOOL_CLEAN __APIREL__/api_c/memp_fset.html__OCT__DB_MPOOL_DIRTY DB_MPOOLFILE-__GT__set@DB_MPOOL_DIRTY __APIREL__/api_c/memp_fset.html__OCT__DB_MPOOL_DISCARD DB_MPOOLFILE-__GT__set@DB_MPOOL_DISCARD -__APIREL__/api_c/memp_fsync.html__OCT__2 @DB_MPOOLFILE-__GT__sync -__APIREL__/api_c/memp_register.html__OCT__2 @DB_ENV-__GT__memp_register -__APIREL__/api_c/memp_set_clear_len.html__OCT__2 @DB_MPOOLFILE-__GT__set_clear_len -__APIREL__/api_c/memp_set_clear_len.html__OCT__3 @DB_MPOOLFILE-__GT__get_clear_len -__APIREL__/api_c/memp_set_fileid.html__OCT__2 @DB_MPOOLFILE-__GT__set_fileid -__APIREL__/api_c/memp_set_fileid.html__OCT__3 @DB_MPOOLFILE-__GT__get_fileid -__APIREL__/api_c/memp_set_ftype.html__OCT__2 @DB_MPOOLFILE-__GT__set_ftype -__APIREL__/api_c/memp_set_ftype.html__OCT__3 @DB_MPOOLFILE-__GT__get_ftype -__APIREL__/api_c/memp_set_lsn_offset.html__OCT__2 @DB_MPOOLFILE-__GT__set_lsn_offset -__APIREL__/api_c/memp_set_lsn_offset.html__OCT__3 @DB_MPOOLFILE-__GT__get_lsn_offset -__APIREL__/api_c/memp_set_pgcookie.html__OCT__2 @DB_MPOOLFILE-__GT__set_pgcookie -__APIREL__/api_c/memp_set_pgcookie.html__OCT__3 @DB_MPOOLFILE-__GT__get_pgcookie -__APIREL__/api_c/rep_elect.html__OCT__2 @DB_ENV-__GT__rep_elect -__APIREL__/api_c/rep_elect.html__OCT__3 @DB_REP_UNAVAIL -__APIREL__/api_c/rep_limit.html__OCT__2 @DB_ENV-__GT__set_rep_limit -__APIREL__/api_c/rep_limit.html__OCT__3 @DB_ENV-__GT__get_rep_limit -__APIREL__/api_c/rep_message.html__OCT__2 @DB_ENV-__GT__rep_process_message +__APIREL__/api_c/memp_set_flags.html__OCT__DB_MPOOL_NOFILE DB_MPOOLFILE-__GT__set_flags@DB_MPOOL_NOFILE +__APIREL__/api_c/memp_set_flags.html__OCT__DB_MPOOL_UNLINK DB_MPOOLFILE-__GT__set_flags@DB_MPOOL_UNLINK +__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_VERY_LOW DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_VERY_LOW +__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_LOW DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_LOW +__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_DEFAULT DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_DEFAULT +__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_HIGH DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_HIGH +__APIREL__/api_c/memp_set_priority.html__OCT__DB_PRIORITY_VERY_HIGH DB_MPOOLFILE-__GT__set_priority@DB_PRIORITY_VERY_HIGH +__APIREL__/api_c/memp_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__memp_stat@DB_STAT_CLEAR +__APIREL__/api_c/memp_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__memp_stat@DB_STAT_CLEAR +__APIREL__/api_c/memp_stat.html__OCT__DB_STAT_ALL DB_ENV-__GT__memp_stat@DB_STAT_ALL +__APIREL__/api_c/memp_stat.html__OCT__DB_STAT_MEMP_HASH DB_ENV-__GT__memp_stat@DB_STAT_MEMP_HASH +__APIREL__/api_c/rep_elect.html__OCT__2 @DB_REP_UNAVAIL __APIREL__/api_c/rep_message.html__OCT__DB_REP_DUPMASTER DB_ENV-__GT__rep_process_message@DB_REP_DUPMASTER __APIREL__/api_c/rep_message.html__OCT__DB_REP_HOLDELECTION DB_ENV-__GT__rep_process_message@DB_REP_HOLDELECTION __APIREL__/api_c/rep_message.html__OCT__DB_REP_ISPERM DB_ENV-__GT__rep_process_message@DB_REP_ISPERM __APIREL__/api_c/rep_message.html__OCT__DB_REP_NEWMASTER DB_ENV-__GT__rep_process_message@DB_REP_NEWMASTER __APIREL__/api_c/rep_message.html__OCT__DB_REP_NEWSITE DB_ENV-__GT__rep_process_message@DB_REP_NEWSITE __APIREL__/api_c/rep_message.html__OCT__DB_REP_NOTPERM DB_ENV-__GT__rep_process_message@DB_REP_NOTPERM -__APIREL__/api_c/rep_message.html__OCT__DB_REP_OUTDATED DB_ENV-__GT__rep_process_message@DB_REP_OUTDATED -__APIREL__/api_c/rep_start.html__OCT__2 @DB_ENV-__GT__rep_start +__APIREL__/api_c/rep_message.html__OCT__DB_REP_STARTUPDONE DB_ENV-__GT__rep_process_message@DB_REP_STARTUPDONE __APIREL__/api_c/rep_start.html__OCT__DB_REP_CLIENT DB_ENV-__GT__rep_start@DB_REP_CLIENT -__APIREL__/api_c/rep_start.html__OCT__DB_REP_LOGSONLY DB_ENV-__GT__rep_start@DB_REP_LOGSONLY __APIREL__/api_c/rep_start.html__OCT__DB_REP_MASTER DB_ENV-__GT__rep_start@DB_REP_MASTER -__APIREL__/api_c/rep_stat.html__OCT__2 @DB_ENV-__GT__rep_stat __APIREL__/api_c/rep_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__rep_stat@DB_STAT_CLEAR -__APIREL__/api_c/rep_transport.html__OCT__2 @DB_ENV-__GT__set_rep_transport -__APIREL__/api_c/rep_transport.html__OCT__3 @DB_EID_BROADCAST +__APIREL__/api_c/rep_stat.html__OCT__DB_STAT_ALL DB_ENV-__GT__rep_stat@DB_STAT_ALL +__APIREL__/api_c/rep_transport.html__OCT__2 @DB_EID_BROADCAST __APIREL__/api_c/rep_transport.html__OCT__DB_REP_NOBUFFER DB_ENV-__GT__set_rep_transport@DB_REP_NOBUFFER __APIREL__/api_c/rep_transport.html__OCT__DB_REP_PERMANENT DB_ENV-__GT__set_rep_transport@DB_REP_PERMANENT -__APIREL__/api_c/set_func_close.html__OCT__2 @db_env_set_func_close -__APIREL__/api_c/set_func_dirfree.html__OCT__2 @db_env_set_func_dirfree -__APIREL__/api_c/set_func_dirlist.html__OCT__2 @db_env_set_func_dirlist -__APIREL__/api_c/set_func_exists.html__OCT__2 @db_env_set_func_exists -__APIREL__/api_c/set_func_free.html__OCT__2 @db_env_set_func_free -__APIREL__/api_c/set_func_fsync.html__OCT__2 @db_env_set_func_fsync -__APIREL__/api_c/set_func_ioinfo.html__OCT__2 @db_env_set_func_ioinfo -__APIREL__/api_c/set_func_malloc.html__OCT__2 @db_env_set_func_malloc -__APIREL__/api_c/set_func_map.html__OCT__2 @db_env_set_func_map -__APIREL__/api_c/set_func_open.html__OCT__2 @db_env_set_func_open -__APIREL__/api_c/set_func_read.html__OCT__2 @db_env_set_func_read -__APIREL__/api_c/set_func_realloc.html__OCT__2 @db_env_set_func_realloc -__APIREL__/api_c/set_func_rename.html__OCT__2 @db_env_set_func_rename -__APIREL__/api_c/set_func_seek.html__OCT__2 @db_env_set_func_seek -__APIREL__/api_c/set_func_sleep.html__OCT__2 @db_env_set_func_sleep -__APIREL__/api_c/set_func_unlink.html__OCT__2 @db_env_set_func_unlink -__APIREL__/api_c/set_func_unmap.html__OCT__2 @db_env_set_func_unmap -__APIREL__/api_c/set_func_write.html__OCT__2 @db_env_set_func_write -__APIREL__/api_c/set_func_yield.html__OCT__2 @db_env_set_func_yield -__APIREL__/api_c/env_set_tx_max.html__OCT__2 @DB_ENV-__GT__set_tx_max -__APIREL__/api_c/env_set_tx_max.html__OCT__3 @DB_ENV-__GT__get_tx_max -__APIREL__/api_c/env_set_tx_timestamp.html__OCT__2 @DB_ENV-__GT__set_tx_timestamp -__APIREL__/api_c/env_set_tx_timestamp.html__OCT__3 @DB_ENV-__GT__get_tx_timestamp -__APIREL__/api_c/txn_abort.html__OCT__2 @DB_TXN-__GT__abort -__APIREL__/api_c/txn_begin.html__OCT__2 @DB_ENV-__GT__txn_begin +__APIREL__/api_c/seq_get.html__OCT__DB_AUTO_COMMIT DB_SEQUENCE-__GT__get@DB_AUTO_COMMIT +__APIREL__/api_c/seq_get.html__OCT__DB_TXN_NOSYNC DB_SEQUENCE-__GT__get@DB_TXN_NOSYNC +__APIREL__/api_c/seq_open.html__OCT__DB_AUTO_COMMIT DB_SEQUENCE-__GT__open@DB_AUTO_COMMIT +__APIREL__/api_c/seq_open.html__OCT__DB_CREATE DB_SEQUENCE-__GT__open@DB_CREATE +__APIREL__/api_c/seq_open.html__OCT__DB_EXCL DB_SEQUENCE-__GT__open@DB_EXCL +__APIREL__/api_c/seq_open.html__OCT__DB_THREAD DB_SEQUENCE-__GT__open@DB_THREAD +__APIREL__/api_c/seq_set_flags.html__OCT__DB_SEQ_DEC DB_SEQUENCE-__GT__set_flags@DB_SEQ_DEC +__APIREL__/api_c/seq_set_flags.html__OCT__DB_SEQ_INC DB_SEQUENCE-__GT__set_flags@DB_SEQ_INC +__APIREL__/api_c/seq_set_flags.html__OCT__DB_SEQ_WRAP DB_SEQUENCE-__GT__set_flags@DB_SEQ_WRAP +__APIREL__/api_c/seq_stat.html__OCT__DB_STAT_CLEAR DB_SEQUENCE-__GT__stat@DB_STAT_CLEAR +__APIREL__/api_c/seq_stat.html__OCT__DB_STAT_CLEAR DB_SEQUENCE-__GT__stat@DB_STAT_CLEAR +__APIREL__/api_c/seq_remove.html__OCT__DB_AUTO_COMMIT DB_SEQUENCE-__GT__remove@DB_AUTO_COMMIT +__APIREL__/api_c/seq_remove.html__OCT__DB_TXN_NOSYNC DB_SEQUENCE-__GT__remove@DB_TXN_NOSYNC +__APIREL__/api_c/txn_begin.html__OCT__DB_DEGREE_2 DB_ENV-__GT__txn_begin@DB_DEGREE_2 __APIREL__/api_c/txn_begin.html__OCT__DB_DIRTY_READ DB_ENV-__GT__txn_begin@DB_DIRTY_READ __APIREL__/api_c/txn_begin.html__OCT__DB_TXN_NOSYNC DB_ENV-__GT__txn_begin@DB_TXN_NOSYNC __APIREL__/api_c/txn_begin.html__OCT__DB_TXN_NOWAIT DB_ENV-__GT__txn_begin@DB_TXN_NOWAIT __APIREL__/api_c/txn_begin.html__OCT__DB_TXN_SYNC DB_ENV-__GT__txn_begin@DB_TXN_SYNC -__APIREL__/api_c/txn_checkpoint.html__OCT__2 @DB_ENV-__GT__txn_checkpoint __APIREL__/api_c/txn_checkpoint.html__OCT__DB_FORCE DB_ENV-__GT__txn_checkpoint@DB_FORCE -__APIREL__/api_c/txn_class.html__OCT__2 @DB_TXN -__APIREL__/api_c/txn_commit.html__OCT__2 @DB_TXN-__GT__commit __APIREL__/api_c/txn_commit.html__OCT__DB_TXN_NOSYNC DB_TXN-__GT__commit@DB_TXN_NOSYNC __APIREL__/api_c/txn_commit.html__OCT__DB_TXN_SYNC DB_TXN-__GT__commit@DB_TXN_SYNC -__APIREL__/api_c/txn_discard.html__OCT__2 @DB_TXN-__GT__discard -__APIREL__/api_c/txn_id.html__OCT__2 @DB_TXN-__GT__id -__APIREL__/api_c/txn_prepare.html__OCT__2 @DB_TXN-__GT__prepare -__APIREL__/api_c/txn_prepare.html__OCT__3 @DB_XIDDATASIZE -__APIREL__/api_c/txn_recover.html__OCT__2 @DB_ENV-__GT__txn_recover +__APIREL__/api_c/txn_prepare.html__OCT__2 @DB_XIDDATASIZE __APIREL__/api_c/txn_recover.html__OCT__DB_FIRST DB_ENV-__GT__txn_recover@DB_FIRST __APIREL__/api_c/txn_recover.html__OCT__DB_NEXT DB_ENV-__GT__txn_recover@DB_NEXT -__APIREL__/api_c/txn_set_timeout.html__OCT__2 @DB_TXN-__GT__set_timeout __APIREL__/api_c/txn_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DB_TXN-__GT__set_timeout@DB_SET_LOCK_TIMEOUT __APIREL__/api_c/txn_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DB_TXN-__GT__set_timeout@DB_SET_TXN_TIMEOUT -__APIREL__/api_c/txn_stat.html__OCT__2 @DB_ENV-__GT__txn_stat __APIREL__/api_c/txn_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__txn_stat@DB_STAT_CLEAR +__APIREL__/api_c/txn_stat.html__OCT__DB_STAT_ALL DB_ENV-__GT__txn_stat@DB_STAT_ALL __APIREL__/ref/am/close.html__OCT__2 @closing a database __APIREL__/ref/am/count.html__OCT__2 @counting data items for a key __APIREL__/ref/am/curclose.html__OCT__2 @closing a cursor @@ -629,7 +438,6 @@ __APIREL__/ref/build_unix/intro.html__OCT__4 building for @UNIX __APIREL__/ref/build_unix/intro.html__OCT__5 building for @QNX __APIREL__/ref/build_unix/irix.html__OCT__2 @IRIX __APIREL__/ref/build_unix/linux.html__OCT__2 @Linux -__APIREL__/ref/build_unix/macosx.html__OCT__2 @Mac OS X __APIREL__/ref/build_unix/notes.html__OCT__2 @building for UNIX FAQ __APIREL__/ref/build_unix/notes.html__OCT__3 building for @UNIX FAQ __APIREL__/ref/build_unix/osf1.html__OCT__2 @OSF/1 @@ -640,6 +448,7 @@ __APIREL__/ref/build_unix/small.html__OCT__--disable-cryptography Building a sma __APIREL__/ref/build_unix/small.html__OCT__--disable-hash Building a small memory footprint library@--disable-hash __APIREL__/ref/build_unix/small.html__OCT__--disable-queue Building a small memory footprint library@--disable-queue __APIREL__/ref/build_unix/small.html__OCT__--disable-replication Building a small memory footprint library@--disable-replication +__APIREL__/ref/build_unix/small.html__OCT__--disable-statistics Building a small memory footprint library@--disable-statistics __APIREL__/ref/build_unix/small.html__OCT__--disable-verify Building a small memory footprint library@--disable-verify __APIREL__/ref/build_unix/small.html__OCT__--enable-smallbuild Building a small memory footprint library@--enable-smallbuild __APIREL__/ref/build_unix/solaris.html__OCT__2 @Solaris @@ -658,6 +467,7 @@ __APIREL__/ref/build_win/intro.html__OCT__2 @building for Win32 __APIREL__/ref/build_win/notes.html__OCT__2 @Windows notes __APIREL__/ref/build_win/test.html__OCT__2 running the @test suite under Windows __APIREL__/ref/build_win/test.html__OCT__3 running the test suite under @Windows +__APIREL__/ref/build_win/unicode.html__OCT__2 @Unicode __APIREL__/ref/cam/intro.html__OCT__2 @Concurrent Data Store __APIREL__/ref/debug/intro.html__OCT__2 introduction to @debugging __APIREL__/ref/debug/common.html__OCT__2 @debugging applications @@ -677,52 +487,17 @@ __APIREL__/ref/env/region.html__OCT__2 @__db.001 __APIREL__/ref/env/remote.html__OCT__2 remote @filesystems __APIREL__/ref/env/remote.html__OCT__3 @NFS problems __APIREL__/ref/env/security.html__OCT__2 @security -__APIREL__/ref/intro/products.html__OCT__2 Sleepycat Software's Berkeley DB @products +__APIREL__/ref/ext/mod.html__OCT__2 @Apache +__APIREL__/ref/ext/mod.html__OCT__3 @mod +__APIREL__/ref/ext/perl.html__OCT__2 @Perl +__APIREL__/ref/ext/php.html__OCT__2 @PHP __APIREL__/ref/install/file.html__OCT__2 @/etc/magic __APIREL__/ref/install/file.html__OCT__3 @file utility -__APIREL__/ref/install/rpm.html__OCT__2 @RPM +__APIREL__/ref/intro/products.html__OCT__2 Sleepycat Software's Berkeley DB @products __APIREL__/ref/java/compat.html__OCT__2 @Java compatibility __APIREL__/ref/java/conf.html__OCT__2 @Java configuration __APIREL__/ref/java/faq.html__OCT__2 Java @FAQ __APIREL__/ref/java/faq.html__OCT__3 @Java FAQ -__APIREL__/ref/bdb/overview.html__OCT__2 @BDB -__APIREL__/ref/bdb/cs_bdb_bind.html__OCT__2 @BDB -__APIREL__/ref/bdb/cs_bdb_collection.html__OCT__2 @BDB -__APIREL__/ref/bdb/faq.html__OCT__2 Java API @FAQ -__APIREL__/ref/bdb/faq.html__OCT__3 @Java API FAQ -__APIREL__/ref/bdb_basic/catalog.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/env.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/except.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/intro.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/keyvalue.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/main.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/read.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/stores.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/transact.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/views.html__OCT__2 @tutorial -__APIREL__/ref/bdb_basic/write.html__OCT__2 @tutorial -__APIREL__/ref/bdb_entity/bindings.html__OCT__2 @tutorial -__APIREL__/ref/bdb_entity/classes.html__OCT__2 @tutorial -__APIREL__/ref/bdb_entity/intro.html__OCT__2 @tutorial -__APIREL__/ref/bdb_entity/read.html__OCT__2 @tutorial -__APIREL__/ref/bdb_entity/views.html__OCT__2 @tutorial -__APIREL__/ref/bdb_index/foreign.html__OCT__2 @tutorial -__APIREL__/ref/bdb_index/intro.html__OCT__2 @tutorial -__APIREL__/ref/bdb_index/read.html__OCT__2 @tutorial -__APIREL__/ref/bdb_index/second.html__OCT__2 @tutorial -__APIREL__/ref/bdb_index/views.html__OCT__2 @tutorial -__APIREL__/ref/bdb_sentity/binding.html__OCT__2 @tutorial -__APIREL__/ref/bdb_sentity/class.html__OCT__2 @tutorial -__APIREL__/ref/bdb_sentity/intro.html__OCT__2 @tutorial -__APIREL__/ref/bdb_sentity/remove.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tuple/extract.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tuple/format.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tuple/intro.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tuple/sorted.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tuple/tbinding.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tuple/tsbinding.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tut/intro.html__OCT__2 @tutorial -__APIREL__/ref/bdb_tut/summary.html__OCT__2 @tutorial __APIREL__/ref/lock/am_conv.html__OCT__2 Berkeley DB Transactional Data Store @locking conventions __APIREL__/ref/lock/cam_conv.html__OCT__2 Berkeley DB Concurrent Data Store @locking conventions __APIREL__/ref/lock/config.html__OCT__2 @locking configuration @@ -744,7 +519,6 @@ __APIREL__/ref/mp/intro.html__OCT__2 introduction to the @memory pool subsystem __APIREL__/ref/mp/intro.html__OCT__3 introduction to the memory @cache subsystem __APIREL__/ref/mp/intro.html__OCT__4 introduction to the @buffer pool subsystem __APIREL__/ref/mp/config.html__OCT__2 @memory pool configuration -__APIREL__/ref/perl/intro.html__OCT__2 @Perl __APIREL__/ref/program/appsignals.html__OCT__2 @signal handling __APIREL__/ref/program/compatible.html__OCT__2 @interface compatibility __APIREL__/ref/program/environ.html__OCT__2 @environment variables @@ -767,7 +541,7 @@ __APIREL__/ref/rpc/client.html__OCT__2 @RPC client __APIREL__/ref/rpc/faq.html__OCT__2 @RPC FAQ __APIREL__/ref/rpc/intro.html__OCT__2 introduction to @rpc client/server __APIREL__/ref/rpc/server.html__OCT__2 @RPC server -__APIREL__/ref/simple_tut/intro.html__OCT__2 simple @tutorial +__APIREL__/ref/sequence/intro.html__OCT__2 introduction to @sequences __APIREL__/ref/tcl/intro.html__OCT__2 loading Berkeley DB with @Tcl __APIREL__/ref/tcl/faq.html__OCT__2 Tcl @FAQ __APIREL__/ref/tcl/faq.html__OCT__3 @Tcl FAQ @@ -787,8 +561,12 @@ __APIREL__/ref/transapp/intro.html__OCT__2 @Transactional Data Store __APIREL__/ref/transapp/logfile.html__OCT__2 @log file removal __APIREL__/ref/transapp/nested.html__OCT__2 nested @transactions __APIREL__/ref/transapp/read.html__OCT__2 @repeatable read -__APIREL__/ref/transapp/read.html__OCT__3 dirty @reads -__APIREL__/ref/transapp/read.html__OCT__4 @dirty reads +__APIREL__/ref/transapp/read.html__OCT__3 @cursor stability +__APIREL__/ref/transapp/read.html__OCT__4 cursor @stability +__APIREL__/ref/transapp/read.html__OCT__5 @degree 2 isolation +__APIREL__/ref/transapp/read.html__OCT__6 degree 2 @isolation +__APIREL__/ref/transapp/read.html__OCT__7 dirty @reads +__APIREL__/ref/transapp/read.html__OCT__8 @dirty reads __APIREL__/ref/transapp/reclimit.html__OCT__2 Berkeley DB @recoverability __APIREL__/ref/transapp/recovery.html__OCT__2 recovery in @transaction protected applications __APIREL__/ref/transapp/throughput.html__OCT__2 @transaction throughput @@ -797,24 +575,15 @@ __APIREL__/ref/transapp/tune.html__OCT__3 transaction @tuning __APIREL__/ref/txn/config.html__OCT__2 @transaction configuration __APIREL__/ref/txn/intro.html__OCT__2 introduction to the @transaction subsystem __APIREL__/ref/txn/limits.html__OCT__2 @transaction limits -__APIREL__/ref/upgrade.2.0/intro.html__OCT__2 Upgrading to release @2.0 -__APIREL__/ref/upgrade.3.0/intro.html__OCT__2 Upgrading to release @3.0 -__APIREL__/ref/upgrade.3.1/intro.html__OCT__2 Upgrading to release @3.1 -__APIREL__/ref/upgrade.3.2/intro.html__OCT__2 Upgrading to release @3.2 -__APIREL__/ref/upgrade.3.3/intro.html__OCT__2 Upgrading to release @3.3 -__APIREL__/ref/upgrade.4.0/intro.html__OCT__2 Upgrading to release @4.0 -__APIREL__/ref/upgrade.4.1/intro.html__OCT__2 Upgrading to release @4.1 -__APIREL__/ref/upgrade.4.2/cksum.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/client.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/del.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/intro.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/java.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/lockng.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/nosync.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/priority.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/queue.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/repinit.html__OCT__2 Upgrading to release @4.2 -__APIREL__/ref/upgrade.4.2/verify.html__OCT__2 Upgrading to release @4.2 +__APIREL__/ref/upgrade.2.0/toc.html__OCT__2 @Upgrading to release 2.0 +__APIREL__/ref/upgrade.3.0/toc.html__OCT__2 @Upgrading to release 3.0 +__APIREL__/ref/upgrade.3.1/toc.html__OCT__2 @Upgrading to release 3.1 +__APIREL__/ref/upgrade.3.2/toc.html__OCT__2 @Upgrading to release 3.2 +__APIREL__/ref/upgrade.3.3/toc.html__OCT__2 @Upgrading to release 3.3 +__APIREL__/ref/upgrade.4.0/toc.html__OCT__2 @Upgrading to release 4.0 +__APIREL__/ref/upgrade.4.1/toc.html__OCT__2 @Upgrading to release 4.1 +__APIREL__/ref/upgrade.4.2/toc.html__OCT__2 @Upgrading to release 4.2 +__APIREL__/ref/upgrade.4.3/toc.html__OCT__2 @Upgrading to release 4.3 __APIREL__/ref/xa/build.html__OCT__2 @Transaction Manager __APIREL__/ref/xa/intro.html__OCT__2 @Distributed Transactions __APIREL__/ref/xa/intro.html__OCT__3 @Resource Manager diff --git a/db/docs/api_c/rep_elect.html b/db/docs/api_c/rep_elect.html index 31ba16945..9e6cf2309 100644 --- a/db/docs/api_c/rep_elect.html +++ b/db/docs/api_c/rep_elect.html @@ -1,31 +1,30 @@ - - + + Berkeley DB: DB_ENV->rep_elect - + -

DB_ENV->rep_elect

API -Ref -
+Ref +


 #include <db.h>
 

int -DB_ENV->rep_elect(DB_ENV *env, int nsites, - int priority, u_int32_t timeout, int *envid); +DB_ENV->rep_elect(DB_ENV *env, int nsites, int nvotes, + int priority, u_int32_t timeout, int *envid, u_int32_t flags);


Description: DB_ENV->rep_elect

@@ -39,41 +38,41 @@ selected master, in accordance with the results of this election.

The thread of control that calls the DB_ENV->rep_elect method must not be the thread of control that processes incoming messages; processing the incoming messages is necessary to successfully complete an election.

-

The DB_ENV->rep_elect method -returns a non-zero error value on failure -and 0 on success. -

Parameters

-

-

envid
-The envid parameter references memory into which +
+
envid
The envid parameter references memory into which the newly elected master's ID is copied. -

nsites
-The nsites parameter indicates the number of environments that +
nsites
The nsites parameter indicates the number of environments that the application believes are in the replication group. This number is used by Berkeley DB to avoid having two masters active simultaneously, even in the case of a network partition. During an election, a new master -cannot be elected unless more than half of nsites agree on -the new master. Thus, in the face of a network partition, the side of -the partition with more than half the environments will elect a new -master and continue, while the environments communicating with fewer -than half the other environments will fail to find a new master. -

priority
-The priority parameter is the priority of this environment. It +cannot be elected unless more than half of nsites agree on the +new master. Thus, in the face of a network partition, the side of the +partition with more than half the environments will elect a new master +and continue, while the environments communicating with fewer than half +the other environments will fail to find a new master. +
nvotes
The nvotes parameter indicates the number of votes required by +the application to successfully elect a new master. It must be a +positive integer, no greater than nsites, or 0 if the election +should use a simple majority of the nsites value as the +requirement. A warning is given if half or fewer votes are required to +win an election as that can potentially lead to multiple masters in the +face of a network partition. +
priority
The priority parameter is the priority of this environment. It must be a positive integer, or 0 if this environment is not permitted to become a master (see Replication environment priorities for more information). -

timeout
-The timeout parameter specifies a timeout period for an election. +
timeout
The timeout parameter specifies a timeout period for an election. If the election has not completed after timeout microseconds, the election will fail. +
flags
The flags parameter is currently unused, and must be set to 0.
- +

Errors

The DB_ENV->rep_elect method may fail and return one of the following non-zero errors:

-

-

DB_REP_UNAVAIL
The replication group was unable to elect a master, or was unable to +
+
DB_REP_UNAVAIL
The replication group was unable to elect a master, or was unable to complete the election in the specified timeout period.

@@ -85,6 +84,6 @@ complete the election in the specified timeout period.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/rep_limit.html b/db/docs/api_c/rep_limit.html index 25f88a091..d42d85429 100644 --- a/db/docs/api_c/rep_limit.html +++ b/db/docs/api_c/rep_limit.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_rep_limit - + -

DB_ENV->set_rep_limit

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DB_ENV->get_rep_limit(DB_ENV *dbenv, u_int32_t *gbytesp, u_int32_t *bytesp);
 


Description: DB_ENV->set_rep_limit

-

The DB_ENV->set_rep_limit method imposes a byte-count limit on the amount of data that will be transmitted from a site in a single call to DB_ENV->rep_process_message method.

The DB_ENV->set_rep_limit method configures a database environment, not only operations @@ -43,12 +41,10 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bytes
-The gbytes and bytes parameters specify the maximum +
+
bytes
The gbytes and bytes parameters specify the maximum number of bytes that will be sent in a single call to DB_ENV->rep_process_message method. -

gbytes
-The gbytes and bytes parameters specify the maximum +
gbytes
The gbytes and bytes parameters specify the maximum number of bytes that will be sent in a single call to DB_ENV->rep_process_message method.

@@ -60,12 +56,10 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the current transmit limit is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the in the current transmit limit is copied.

@@ -77,6 +71,6 @@ The gbytesp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/rep_list.html b/db/docs/api_c/rep_list.html index 85285640a..ccdf169d6 100644 --- a/db/docs/api_c/rep_list.html +++ b/db/docs/api_c/rep_list.html @@ -1,25 +1,25 @@ - + Berkeley DB: Berkeley DB: Replication and Related Methods - +

Berkeley DB: Replication and Related Methods

- + - - - - - - + + + + + +
Replication and Related MethodsDescription
DB_ENV->set_rep_transportConfigure replication transport
DB_ENV->rep_electHold a replication election
DB_ENV->set_rep_limitLimit data sent in response to a single message
DB_ENV->rep_process_messageProcess a replication message
DB_ENV->rep_startConfigure an environment for replication
DB_ENV->rep_statReplication statistics
DB_ENV->rep_electHold a replication election
DB_ENV->rep_process_messageProcess a replication message
DB_ENV->rep_startConfigure an environment for replication
DB_ENV->rep_statReplication statistics
DB_ENV->set_rep_limitLimit data sent in response to a single message
DB_ENV->set_rep_transportConfigure replication transport
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/rep_message.html b/db/docs/api_c/rep_message.html index 9de2c00b9..31ed1c375 100644 --- a/db/docs/api_c/rep_message.html +++ b/db/docs/api_c/rep_message.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->rep_process_message - + -

DB_ENV->rep_process_message

API -Ref -
+Ref +


@@ -36,58 +35,49 @@ processed using the same DB_ENV handle.  I
 a single thread of control process all messages, only that all threads
 of control processing messages use the same handle.

The DB_ENV->rep_process_message method has additional return values:

-

-

DB_REP_DUPMASTER
-

-The DB_ENV->rep_process_message method +

+
DB_REP_DUPMASTER
+

The DB_ENV->rep_process_message method will return DB_REP_DUPMASTER if the replication group has more than one master. The application should reconfigure itself as a client by calling the DB_ENV->rep_start method, and then call for an election by calling DB_ENV->rep_elect.

-

DB_REP_HOLDELECTION
-

-The DB_ENV->rep_process_message method +

DB_REP_HOLDELECTION
+

The DB_ENV->rep_process_message method will return DB_REP_HOLDELECTION if an election is needed. The application should call for an election by calling DB_ENV->rep_elect.

-

DB_REP_ISPERM
-

-The DB_ENV->rep_process_message method will return DB_REP_ISPERM if processing this message results in the processing of records +

DB_REP_ISPERM
+

The DB_ENV->rep_process_message method will return DB_REP_ISPERM if processing this message results in the processing of records that are permanent. The maximum LSN of the permanent records stored is returned.

-

DB_REP_NEWMASTER
-

-The DB_ENV->rep_process_message method will return DB_REP_NEWMASTER if a new master has been elected. +

DB_REP_NEWMASTER
+

The DB_ENV->rep_process_message method will return DB_REP_NEWMASTER if a new master has been elected. The envid parameter contains the environment ID of the new master. If the recipient of this error return has been made master, it is the application's responsibility to begin acting as the master environment.

-

DB_REP_NEWSITE
-

-The DB_ENV->rep_process_message method will return DB_REP_NEWSITE if the system received contact information from a new environment. +

DB_REP_NEWSITE
+

The DB_ENV->rep_process_message method will return DB_REP_NEWSITE if the system received contact information from a new environment. The rec parameter contains the opaque data specified in the cdata parameter to the DB_ENV->rep_start. The application should take whatever action is needed to establish a communication channel with this new environment.

-

DB_REP_NOTPERM
-

-The DB_ENV->rep_process_message method will return DB_REP_NOTPERM if a message carrying a DB_REP_PERMANENT flag was processed +

DB_REP_NOTPERM
+

The DB_ENV->rep_process_message method will return DB_REP_NOTPERM if a message carrying a DB_REP_PERMANENT flag was processed successfully, but was not written to disk. The LSN of this record is returned. The application should take whatever action is deemed necessary to retain its recoverability characteristics.

-

DB_REP_OUTDATED
-

-The DB_ENV->rep_process_message method -will return DB_REP_OUTDATED if the current environment's logs are too far out of date with respect -to the master to be automatically synchronized. -The application should copy over a hot backup of the environment, run -recovery, and restart the client. +

DB_REP_STARTUPDONE
+

The DB_ENV->rep_process_message method will return DB_REP_STARTUPDONE if the system detects that a client completed startup synchronization. +The client application knows that this client is now processing +live log records received from the master.

Unless otherwise specified, the DB_ENV->rep_process_message method @@ -95,27 +85,23 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

control
-The control parameter should reference a copy of the +
+
control
The control parameter should reference a copy of the control parameter specified by Berkeley DB on the sending environment. -

envid
-The envid parameter should contain the local identifier that +
envid
The envid parameter should contain the local identifier that corresponds to the environment that sent the message to be processed (see Replication environment IDs for more information). -

rec
-The rec parameter should reference a copy of the rec +
rec
The rec parameter should reference a copy of the rec parameter specified by Berkeley DB on the sending environment. -

ret_lsn
-If DB_ENV->rep_process_message method returns DB_REP_NOTPERM then the -ret_lsnp parameter will -contain the log sequence number of this permanent log message that could -not be written to disk. If DB_ENV->rep_process_message method returns -DB_REP_ISPERM then the ret_lsnp parameter will contain largest log sequence number of the -permanent records that are now written to disk as a result of processing -this message. In all other cases the value of ret_lsnp is undefined. +
ret_lsn
If DB_ENV->rep_process_message method returns DB_REP_NOTPERM then the ret_lsnp +parameter will contain the log sequence number of this permanent log +message that could not be written to disk. If DB_ENV->rep_process_message method +returns DB_REP_ISPERM then the ret_lsnp parameter will contain +largest log sequence number of the permanent records that are now +written to disk as a result of processing this message. In all other +cases the value of ret_lsnp is undefined.

Class

@@ -126,6 +112,6 @@ this message. In all other cases the value of ret_lsnp is undefined.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/rep_start.html b/db/docs/api_c/rep_start.html index b5c4c08c2..8fe4748af 100644 --- a/db/docs/api_c/rep_start.html +++ b/db/docs/api_c/rep_start.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->rep_start - + -

DB_ENV->rep_start

API -Ref -
+Ref +


@@ -43,26 +42,23 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

cdata
-The cdata parameter is an opaque data item that is sent over +
+
cdata
The cdata parameter is an opaque data item that is sent over the communication infrastructure when the client or master comes online (see Connecting to a new site for more information). If no such information is useful, cdata should be NULL. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_REP_CLIENT
Configure the environment as a replication client. -

DB_REP_LOGSONLY
Configure the environment as a log files-only client. -

DB_REP_MASTER
Configure the environment as a replication master. +
flags
The flags parameter must be set to one of the following values: +
+
DB_REP_CLIENT
Configure the environment as a replication client. +
DB_REP_MASTER
Configure the environment as a replication master.

Errors

The DB_ENV->rep_start method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the database environment was not already configured to communicate with +
+
EINVAL
If the database environment was not already configured to communicate with a replication group by a call to DB_ENV->set_rep_transport; the database environment was not already opened; or if an invalid flag value or parameter was specified. @@ -76,6 +72,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/rep_stat.html b/db/docs/api_c/rep_stat.html index 7f073492a..85628dfd8 100644 --- a/db/docs/api_c/rep_stat.html +++ b/db/docs/api_c/rep_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->rep_stat - + -

DB_ENV->rep_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DB_ENV->rep_stat(DB_ENV *env, DB_REP_STAT **statp, u_int32_t flags); +

+int +DB_ENV->rep_stat_print(DB_ENV *env, u_int32_t flags);


Description: DB_ENV->rep_stat

@@ -40,11 +42,10 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_REP_STAT fields will be filled in:

-

+
u_int32_t st_status;
The current replication mode. Set to DB_REP_MASTER if the environment is a replication master, DB_REP_CLIENT if the -environment is a replication client, DB_REP_LOGSONLY if the -environment is a log-files-only replica, or 0 if replication is not +environment is a replication client, or 0 if replication is not configured.
DB_LSN st_next_lsn;
In replication environments configured as masters, the next LSN expected. In replication environments configured as clients, the next LSN to be used. @@ -54,8 +55,7 @@ being waited for, or 0 if no log records are currently missing.
u_int32_t st_env_id;
The current environment ID.
u_int32_t st_env_priority;
The current environment priority.
u_int32_t st_gen;
The current generation number. -
u_int32_t st_in_recovery;
The site is currently in client recovery. When this field is set, LSN -values are not authoritative. +
u_int32_t st_egen;
The current election generation number.
u_int32_t st_log_duplicated;
The number of duplicate log records received.
u_int32_t st_log_queued;
The number of log records currently queued.
u_int32_t st_log_queued_max;
The maximum number of log records ever queued at once. @@ -72,6 +72,14 @@ values are not authoritative.
u_int32_t st_newsites;
The number of new site messages received.
int st_nsites;
The number of sites believed to be in the replication group.
u_int32_t st_outdated;
The number of outdated conditions detected. +
u_int32_t st_next_pg;
The next page number we expect to receive. +
u_int32_t st_waiting_pg;
The page number of the first page we have after missing pages +being waited for, or 0 if no pages are currently missing. +
u_int32_t st_pg_duplicated;
The number of duplicate pages received. +
u_int32_t st_pg_records;
The number of pages received and stored. +
u_int32_t st_pg_requested;
The number of pages missed and requested from the master. +
u_int32_t st_startup_complete;
The client site has completed its startup procedures and is now +handling live records from the master.
u_int32_t st_txns_applied;
The number of transactions applied.
u_int32_t st_elections;
The number of elections held.
u_int32_t st_elections_won;
The number of elections won. @@ -80,6 +88,7 @@ values are not authoritative.
u_int32_t st_election_gen;
The election generation number.
DB_LSN st_election_lsn;
The maximum LSN of election winner.
u_int32_t st_election_nsites;
The number sites expected to participate in elections. +
u_int32_t st_election_nvotes;
The number of votes required to complete the election.
u_int32_t st_nthrottles;
Transmission limited. This indicates the number of times that data transmission was stopped to limit the amount of data sent in response to a single call to DB_ENV->rep_process_message. @@ -92,25 +101,45 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

The DB_ENV->rep_stat method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DB_ENV->rep_stat_print

+

The DB_ENV->rep_stat_print method returns the +replication subsystem statistical information, as described for the DB_ENV->rep_stat method. +The information is printed to a specified output channel (see the +DB_ENV->set_msgfile method for more information), or passed to an +application callback function (see the DB_ENV->set_msgcall method for +more information).

+

The DB_ENV->rep_stat_print method may not be called before the DB_ENV->open method has +been called.

+

The DB_ENV->rep_stat_print method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

DB_ENV

See Also

@@ -119,6 +148,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/rep_transport.html b/db/docs/api_c/rep_transport.html index a4a625b65..ccaee5ae7 100644 --- a/db/docs/api_c/rep_transport.html +++ b/db/docs/api_c/rep_transport.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->set_rep_transport - + -

DB_ENV->set_rep_transport

API -Ref -
+Ref +


@@ -43,43 +42,40 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

envid
-The envid parameter is the local environment's ID. It must be +
+
envid
The envid parameter is the local environment's ID. It must be a positive integer and uniquely identify this Berkeley DB database environment (see Replication environment IDs for more information). -

send
-The send callback function is used to transmit data using the +
send
The send callback function is used to transmit data using the replication application's communication infrastructure. The parameters to send are as follows: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

control
The control parameter is the first of the two data elements to be +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
control
The control parameter is the first of the two data elements to be transmitted by the send function. -

rec
The rec parameter is the second of the two data elements to be +
rec
The rec parameter is the second of the two data elements to be transmitted by the send function. -

lsnp
If the type of message to be sent has an LSN associated with it, then -the lsnp parameter -contains the LSN of the record being sent. This LSN can be used to -determine that certain records have been processed successfully by -clients. -

envid
The envid parameter is a positive integer identifier that +
lsnp
If the type of message to be sent has an LSN associated with it, then +the lsnp parameter contains the LSN of the record being sent. +This LSN can be used to determine that certain records have been +processed successfully by clients. +
envid
The envid parameter is a positive integer identifier that specifies the replication environment to which the message should be sent (see Replication environment IDs for more information). - +

The special identifier DB_EID_BROADCAST indicates that a message should be broadcast to every environment in the replication group. The application may use a true broadcast protocol or may send the message in sequence to each machine with which it is in communication. In both cases, the sending site should not be asked to process the message.

-

flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_REP_NOBUFFER
The record being sent should be transmitted immediately and not buffered +
+
DB_REP_NOBUFFER
The record being sent should be transmitted immediately and not buffered or delayed. -

DB_REP_PERMANENT
The record being sent is critical for maintaining database integrity +
DB_REP_PERMANENT
The record being sent is critical for maintaining database integrity (for example, the message includes a transaction commit). The application should take appropriate action to enforce the reliability guarantees it has chosen, such as waiting for acknowledgement from one @@ -99,8 +95,8 @@ error from the send function will be ignored.

Errors

The DB_ENV->set_rep_transport method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -112,6 +108,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/seq_class.html b/db/docs/api_c/seq_class.html new file mode 100644 index 000000000..8d9a64c0f --- /dev/null +++ b/db/docs/api_c/seq_class.html @@ -0,0 +1,77 @@ + + + + + + +Berkeley DB: db_sequence_create + + + + + + + +
+

db_sequence_create

+
+API +Ref
+


+ +

+#include <db.h>
+

+typedef struct __db_sequence DB_SEQUENCE; +

+int +db_sequence_create(DB_SEQUENCE **seq, DB *db, u_int32_t flags); +

+
+

Description: db_sequence_create

+

The DB_SEQUENCE handle is the handle used to manipulate a +sequence object. A sequence object is stored in a record in a +database.

+

DB_SEQUENCE handles are free-threaded if the DB_THREAD +flag is specified to the DB_SEQUENCE->open method when the sequence is opened. +Once the DB_SEQUENCE->close or DB_SEQUENCE->remove methods are called, the +handle may not be accessed again, regardless of the method's return.

+

Each handle opened on a sequence may maintain a separate cache of values +which are returned to the application using the DB_SEQUENCE->get method +either singly or in groups depending on its delta parameter.

+

The +DB_SEQUENCE method creates a +DB_SEQUENCE object that serves as the handle for a sequence. +Calling the DB_SEQUENCE->close or DB_SEQUENCE->remove methods will discard the +handle.

+

The db_sequence_create method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
seq
The seq parameter references the memory into which the returned +structure pointer is stored. +
db
The db parameter is an open database handle which holds the +persistent data for the sequence. +
flags
The flags parameter is currently unused, and must be set to 0. +
+

Errors

+

The db_sequence_create method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_close.html b/db/docs/api_c/seq_close.html new file mode 100644 index 000000000..381cc823f --- /dev/null +++ b/db/docs/api_c/seq_close.html @@ -0,0 +1,60 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->close + + + + + + + +
+

DB_SEQUENCE->close

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->close(DB_SEQUENCE *seq, u_int32_t flags); +

+
+

Description: DB_SEQUENCE->close

+

The DB_SEQUENCE->close method closes the sequence handle. Any unused cached +values are lost.

+

The DB_SEQUENCE handle may not be accessed again after DB_SEQUENCE->close is +called, regardless of its return.

+

The DB_SEQUENCE->close method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter is currently unused, and must be set to 0. +
+

Errors

+

The DB_SEQUENCE->close method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_get.html b/db/docs/api_c/seq_get.html new file mode 100644 index 000000000..9dd68ca72 --- /dev/null +++ b/db/docs/api_c/seq_get.html @@ -0,0 +1,77 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->get + + + + + + + +
+

DB_SEQUENCE->get

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->get(DB_SEQUENCE *seq, + DB_TXN *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags); +

+
+

Description: DB_SEQUENCE->get

+

The DB_SEQUENCE->get method returns the next available element in the sequence +and changes the sequence value by delta. The value of +delta must be greater than zero. If there are enough cached +values in the sequence handle then they will be returned. Otherwise the +next value will be fetched from the database and incremented +(decremented) by enough to cover the delta and the next batch +of cached values.

+

If the underlying database handle was opened in a transaction then +either the txnid parameter must be a valid transaction handle or +DB_AUTO_COMMIT must be specified. The txnid handle must be NULL +if the sequence handle was opened with a non-zero cache size.

+

For maximum concurrency a non-zero cache size should be specified prior +to opening the sequence handle and DB_AUTO_COMMIT | DB_TXN_NOSYNC should +be specified each DB_SEQUENCE->get method call.

+

The DB_SEQUENCE->get method will return EINVAL if the record in the database is not a valid sequence record, +or the sequences have overflowed is range. +

+

Parameters

+
+
delta
Specifies the amount to increment or decrement the sequence. +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_AUTO_COMMIT
If the database +must be updated the update will be enclosed in a transaction +and will be recoverable. +
DB_TXN_NOSYNC
If a DB_AUTO_COMMIT +triggers a transaction, do not synchronously flush the log. +
+
retp
retp points to the memory to hold the return value from +the sequence. +
txnid
If the operation is to be transaction-protected, +the txnid parameter is a transaction handle returned from +DB_ENV->txn_begin; otherwise, NULL. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_init_value.html b/db/docs/api_c/seq_init_value.html new file mode 100644 index 000000000..70473a5b6 --- /dev/null +++ b/db/docs/api_c/seq_init_value.html @@ -0,0 +1,60 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->init_value + + + + + + + +
+

DB_SEQUENCE->init_value

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->init_value(DB_SEQUENCE *seq, db_seq_t value); +

+
+

Description: DB_SEQUENCE->init_value

+

Set the initial value for a sequence. This call is only effective when +the sequence is being created.

+

The DB_SEQUENCE->init_value method may not be called after the DB_SEQUENCE->open +method is called.

+

The DB_SEQUENCE->init_value method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
value
The initial value to set. +
+

Errors

+

The DB_SEQUENCE->init_value method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_list.html b/db/docs/api_c/seq_list.html new file mode 100644 index 000000000..9820ab888 --- /dev/null +++ b/db/docs/api_c/seq_list.html @@ -0,0 +1,34 @@ + + + + + + +Berkeley DB: Berkeley DB: Sequences and Related Methods + + + + +

Berkeley DB: Sequences and Related Methods

+ + + + + + + + + + + + + + + + + + +
Sequences and Related MethodsDescription
db_sequence_createCreate a sequence handle
DB_SEQUENCE->closeClose a sequence
DB_SEQUENCE->getGet the next sequence element(s)
DB_SEQUENCE->get_dbpReturn a handle for the underlying sequence database
DB_SEQUENCE->get_cachesizeReturn the cache size of a sequence
DB_SEQUENCE->get_flagsReturn the flags for a sequence
DB_SEQUENCE->get_rangeReturn the range for a sequence
DB_SEQUENCE->get_keyReturn the key for a sequence
DB_SEQUENCE->init_valueSet the initial value of a sequence
DB_SEQUENCE->openOpen a sequence
DB_SEQUENCE->removeRemove a sequence
DB_SEQUENCE->set_cachesizeSet the cache size of a sequence
DB_SEQUENCE->set_flagsSet the flags for a sequence
DB_SEQUENCE->set_rangeSet the range for a sequence
DB_SEQUENCE->statReturn sequence statistics
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_open.html b/db/docs/api_c/seq_open.html new file mode 100644 index 000000000..51cd2d23c --- /dev/null +++ b/db/docs/api_c/seq_open.html @@ -0,0 +1,107 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->open + + + + + + + +
+

DB_SEQUENCE->open

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->open(DB_SEQUENCE *seq, DB_TXN *txnid, DBT *key, u_int32_t flags); +

+int +DB_SEQUENCE->get_dbp(DB_SEQUENCE *seq, DB **dbp); +

+int +DB_SEQUENCE->get_key(DB_SEQUENCE *seq, DBT *key); +

+int +

+
+

Description: DB_SEQUENCE->open

+

The DB_SEQUENCE->open method opens the sequence represented by the key. +The key must be compatible with the underlying database specified in the +corresponding call to db_sequence_create.

+

Parameters

+
+
key
The key specifies which record in the database stores +the persistent sequence data. +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_AUTO_COMMIT
Enclose the DB_SEQUENCE->open call within a transaction. If the call +succeeds, the open operation will be recoverable. If the +DB_CREATE flag is specified and the call fails, no sequence will +have been created. +
DB_CREATE
Create the sequence. If the sequence does not already exist and the +DB_CREATE flag is not specified, the DB_SEQUENCE->open will fail. +
DB_EXCL
Return an error if the sequence already exists. The DB_EXCL +flag is only meaningful when specified with the DB_CREATE +flag. +
DB_THREAD
Cause the DB_SEQUENCE handle returned by DB_SEQUENCE->open to be +free-threaded; that is, usable by multiple threads within a +single address space. +
+
txnid
If the operation is to be transaction-protected, +(other than by specifying the DB_AUTO_COMMIT flag), +the txnid parameter is a transaction handle returned from +DB_ENV->txn_begin; otherwise, NULL. Note that transactionally protected operations on a DB_SEQUENCE +handle require the DB_SEQUENCE handle itself be transactionally +protected during its open if the open creates the sequence. +
+
+

Description: DB_SEQUENCE->get_dbp

+

The DB_SEQUENCE->get_dbp method returns the database handle.

+

Parameters

+
+
dbp
The dbp parameter references memory into which +a pointer to the database handle is copied. +
+

The DB_SEQUENCE->get_dbp method may be called at any time during the life of the +application.

+

The DB_SEQUENCE->get_dbp method +returns a non-zero error value on failure +and 0 on success. +

+
+

Description: DB_SEQUENCE->get_key

+

The DB_SEQUENCE->get_key method returns the key for the sequence.

+

Parameters

+
+
key
The key parameter references memory into which +a pointer to the key data is copied. +
+

The DB_SEQUENCE->get_key method may be called at any time during the life of the +application.

+

The DB_SEQUENCE->get_key method +returns a non-zero error value on failure +and 0 on success. +

+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_remove.html b/db/docs/api_c/seq_remove.html new file mode 100644 index 000000000..922db62eb --- /dev/null +++ b/db/docs/api_c/seq_remove.html @@ -0,0 +1,71 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->remove + + + + + + + +
+

DB_SEQUENCE->remove

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->remove(DB_SEQUENCE *seq, DB_TXN *txnid, u_int32_t flags); +

+
+

Description: DB_SEQUENCE->remove

+

The DB_SEQUENCE->remove method removes the sequence from the database. This +method should not be called if there are other open handles on this +sequence.

+

The DB_SEQUENCE handle may not be accessed again after DB_SEQUENCE->remove is +called, regardless of its return.

+

The DB_SEQUENCE->remove method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_AUTO_COMMIT
The remove +will be enclosed in a transaction and will be recoverable. +
DB_TXN_NOSYNC
If a DB_AUTO_COMMIT +triggers a transaction, do not synchronously flush the log. +
+
txnid
If the operation is to be transaction-protected, +the txnid parameter is a transaction handle returned from +DB_ENV->txn_begin; otherwise, NULL. +
+

Errors

+

The DB_SEQUENCE->remove method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_set_cachesize.html b/db/docs/api_c/seq_set_cachesize.html new file mode 100644 index 000000000..13882ba43 --- /dev/null +++ b/db/docs/api_c/seq_set_cachesize.html @@ -0,0 +1,76 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->set_cachesize + + + + + + + +
+

DB_SEQUENCE->set_cachesize

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->set_cachesize(DB_SEQUENCE *seq, int32_t size); +

+int +DB_SEQUENCE->get_cachesize(DB_SEQUENCE *seq, int32_t *sizep); +

+
+

Description: DB_SEQUENCE->set_cachesize

+

Configure the number of elements cached by a sequence handle.

+

The DB_SEQUENCE->set_cachesize method may not be called after the +DB_SEQUENCE->open method is called.

+

The DB_SEQUENCE->set_cachesize method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
size
The number of elements in the cache. +
+

Errors

+

The DB_SEQUENCE->set_cachesize method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DB_SEQUENCE->get_cachesize

+

The DB_SEQUENCE->get_cachesize method returns the current cache size.

+

The DB_SEQUENCE->get_cachesize method may be called at any time during the life of the +application.

+

The DB_SEQUENCE->get_cachesize method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
sizep
The DB_SEQUENCE->get_cachesize method returns the +current cache size in sizep. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_set_flags.html b/db/docs/api_c/seq_set_flags.html new file mode 100644 index 000000000..0455ece8b --- /dev/null +++ b/db/docs/api_c/seq_set_flags.html @@ -0,0 +1,89 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->set_flags + + + + + + + +
+

DB_SEQUENCE->set_flags

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->set_flags(DB_SEQUENCE *seq, u_int32_t flags); +

+int +DB_SEQUENCE->get_flags(DB_SEQUENCE *seq, u_int32_t *flagsp); +

+
+

Description: DB_SEQUENCE->set_flags

+

Configure a sequence. The flags are only effective when creating a +sequence. Calling DB_SEQUENCE->set_flags is additive; there is no way +to clear flags.

+

The DB_SEQUENCE->set_flags method may not be called after the +DB_SEQUENCE->open method is called.

+

The DB_SEQUENCE->set_flags method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_SEQ_DEC
Specify that the sequence should be decremented. +
+
+
DB_SEQ_INC
Specify that the sequence should be incremented. This is the default. +
+
+
DB_SEQ_WRAP
Specify that the sequence should wrap around when it is incremented +(decremented) past the specified maximum (minimum) value. +
+
+

Errors

+

The DB_SEQUENCE->set_flags method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DB_SEQUENCE->get_flags

+

The DB_SEQUENCE->get_flags method returns the current flags.

+

The DB_SEQUENCE->get_flags method may be called at any time during the life of the +application.

+

The DB_SEQUENCE->get_flags method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flagsp
The DB_SEQUENCE->get_flags method returns the +current flags in flagsp. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_set_range.html b/db/docs/api_c/seq_set_range.html new file mode 100644 index 000000000..05aa7637a --- /dev/null +++ b/db/docs/api_c/seq_set_range.html @@ -0,0 +1,79 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->set_range + + + + + + + +
+

DB_SEQUENCE->set_range

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->set_range(DB_SEQUENCE *seq, db_seq_t min, db_seq_t max); +

+int +DB_SEQUENCE->get_range(DB_SEQUENCE *seq, db_seq_t *minp, db_seq_t *maxp); +

+
+

Description: DB_SEQUENCE->set_range

+

Configure a sequence range. This call is only effective when the +sequence is being created. The range is limited to a signed 64 bit +integer.

+

The DB_SEQUENCE->set_range method may not be called after the +DB_SEQUENCE->open method is called.

+

The DB_SEQUENCE->set_range method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
min
Specifies the minimum value for the sequence. +
max
Specifies the maximum value for the sequence. +
+

Errors

+

The DB_SEQUENCE->set_range method +may fail and return one of the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DB_SEQUENCE->get_range

+

The DB_SEQUENCE->get_range method returns the range of values in the sequence.

+

The DB_SEQUENCE->get_range method may be called at any time during the life of the +application.

+

The DB_SEQUENCE->get_range method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
minp
The DB_SEQUENCE->get_range method returns the minimum value in minp. +
maxp
The DB_SEQUENCE->get_range method returns the maximum value in maxp. +
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/seq_stat.html b/db/docs/api_c/seq_stat.html new file mode 100644 index 000000000..144ae8f09 --- /dev/null +++ b/db/docs/api_c/seq_stat.html @@ -0,0 +1,99 @@ + + + + + + +Berkeley DB: DB_SEQUENCE->stat + + + + + + + +
+

DB_SEQUENCE->stat

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +DB_SEQUENCE->stat(DB_SEQUENCE *db, void *sp, u_int32_t flags); +

+int +DB_SEQUENCE->stat_print(DB_SEQUENCE *db, u_int32_t flags); +

+
+

Description: DB_SEQUENCE->stat

+

The DB_SEQUENCE->stat method creates a statistical structure and copies a +pointer to it into user-specified memory locations. Specifically, if +sp is non-NULL, a pointer to the statistics for the database are +copied into the memory location to which it refers.

+

Statistical structures are stored in allocated memory. If application-specific allocation +routines have been declared (see DB_ENV->set_alloc for more +information), they are used to allocate the memory; otherwise, the +standard C library malloc(3) is used. The caller is +responsible for deallocating the memory. To deallocate the memory, free +the memory reference; references inside the returned memory need not be +individually freed.

+

In the presence of multiple threads or processes accessing an active +sequence, the information returned by DB_SEQUENCE->stat may be out-of-date.

+

The DB_SEQUENCE->stat method cannot be transaction-protected. For this reason, +it should be called in a thread of control that has no open cursors or +active transactions.

+

The statistics are stored in a structure of type DB_SEQUENCE_STAT. The +following fields will be filled in:

+
+
u_int32_t st_wait;
The number of times a thread of control was forced to wait on the +handle mutex. +
u_int32_t st_nowait;
The number of times that a thread of control was able to obtain handle +mutex without waiting. +
db_seq_t st_current;
The current value of the sequence in the database. +
db_seq_t st_value;
The current cached value of the sequence. +
db_seq_t st_last_value;
The last cached value of the sequence. +
db_seq_t st_min;
The minimum permitted value of the sequence. +
db_seq_t st_max;
The maximum permitted value of the sequence. +
int32_t st_cache_size;
The number of values that will be cached in this handle. +
u_int32_t st_flags;
The flags value for the sequence. +
+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_CLEAR
Reset statistics after printing their values. +
+
+

The DB_SEQUENCE->stat_print method +returns a non-zero error value on failure +and 0 on success. +

+
+

Description: DB_SEQUENCE->stat_print

+

The DB_SEQUENCE->stat_print method prints diagnostic information to the output +channel described by the DB_ENV->set_msgfile method.

+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_CLEAR
Reset statistics after printing their values. +
+
+
+

Class

+DB_SEQUENCE +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/set_func_close.html b/db/docs/api_c/set_func_close.html index e6d76f3e1..e2b744861 100644 --- a/db/docs/api_c/set_func_close.html +++ b/db/docs/api_c/set_func_close.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_close - + -

db_env_set_func_close

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_close
-The func_close parameter is the replacement function. It must conform +
+
func_close
The func_close parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_dirfree.html b/db/docs/api_c/set_func_dirfree.html index 30cefb6d1..c6bde8e69 100644 --- a/db/docs/api_c/set_func_dirfree.html +++ b/db/docs/api_c/set_func_dirfree.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_dirfree - + -

db_env_set_func_dirfree

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_dirfree
-The func_dirfree parameter is a function which frees the memory +
+
func_dirfree
The func_dirfree parameter is a function which frees the memory returned from the db_env_set_func_dirlist function.

The namesp and cnt parameters to this function are the same values as were returned by the db_env_set_func_dirlist @@ -56,6 +54,6 @@ function.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_dirlist.html b/db/docs/api_c/set_func_dirlist.html index 79eb63b5f..033cbfaed 100644 --- a/db/docs/api_c/set_func_dirlist.html +++ b/db/docs/api_c/set_func_dirlist.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_dirlist - + -

db_env_set_func_dirlist

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_dirlist
-The func_dirlist parameter is the function which reads through +
+
func_dirlist
The func_dirlist parameter is the function which reads through a directory and returns a list of the files it contains.

The dir parameter to this function is the name of the directory to be searched.

@@ -61,6 +59,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_exists.html b/db/docs/api_c/set_func_exists.html index 472d2d445..8986307e7 100644 --- a/db/docs/api_c/set_func_exists.html +++ b/db/docs/api_c/set_func_exists.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_exists - + -

db_env_set_func_exists

API -Ref -
+Ref +


@@ -41,9 +40,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_exists
-The func_exists parameter is the function which returns if a +
+
func_exists
The func_exists parameter is the function which returns if a file exists and if it is a file of type directory.

The path parameter to this function is the pathname of the file to be checked.

@@ -58,6 +56,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_free.html b/db/docs/api_c/set_func_free.html index 121a5e3aa..b5cd6b553 100644 --- a/db/docs/api_c/set_func_free.html +++ b/db/docs/api_c/set_func_free.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_free - + -

db_env_set_func_free

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_free
-The func_free parameter is the replacement function. It must conform +
+
func_free
The func_free parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_fsync.html b/db/docs/api_c/set_func_fsync.html index ecd3fe3b2..62e9fb10c 100644 --- a/db/docs/api_c/set_func_fsync.html +++ b/db/docs/api_c/set_func_fsync.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_fsync - + -

db_env_set_func_fsync

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_fsync
-The func_fsync parameter is the replacement function. It must conform +
+
func_fsync
The func_fsync parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_ftruncate.html b/db/docs/api_c/set_func_ftruncate.html new file mode 100644 index 000000000..6354c3e97 --- /dev/null +++ b/db/docs/api_c/set_func_ftruncate.html @@ -0,0 +1,58 @@ + + + + + + +Berkeley DB: db_env_set_func_ftruncate + + + + + + + +
+

db_env_set_func_ftruncate

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +db_env_set_func_ftruncate(int (*func_ftruncate)(int fd, off_t offset)); +

+
+

Description: db_env_set_func_ftruncate

+

The Berkeley DB library requires the ability to truncate a file.

+

The db_env_set_func_ftruncate method configures all operations performed by a process and +all of its threads of control, not operations confined to a single +database environment.

+

Although the db_env_set_func_ftruncate method may be called at any time during the life of +the application, it should normally be called before making calls to the +db_env_create or db_create methods.

+

The db_env_set_func_ftruncate method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
func_ftruncate
The func_ftruncate parameter is the function which truncates a file. +

The fd parameter is an open file descriptor on the file.

+

The ftruncate function must truncate the file to the byte +length specified by the offset parameter.

+

The func_ftruncate function must return the value of errno on +failure and 0 on success.

+
+

See Also

+Run-time configuration +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/set_func_ioinfo.html b/db/docs/api_c/set_func_ioinfo.html index 3fd24586d..1567b3e96 100644 --- a/db/docs/api_c/set_func_ioinfo.html +++ b/db/docs/api_c/set_func_ioinfo.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_ioinfo - + -

db_env_set_func_ioinfo

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_ioinfo
-The func_ioinfo parameter is the function which returns the size +
+
func_ioinfo
The func_ioinfo parameter is the function which returns the size and I/O characteristics of a file.

The path parameter is the pathname of the file to be checked, and the fd parameter is an open file descriptor on the file.

@@ -66,6 +64,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_malloc.html b/db/docs/api_c/set_func_malloc.html index abb8d69f6..efd1a1941 100644 --- a/db/docs/api_c/set_func_malloc.html +++ b/db/docs/api_c/set_func_malloc.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_malloc - + -

db_env_set_func_malloc

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_malloc
-The func_malloc parameter is the replacement function. It must conform +
+
func_malloc
The func_malloc parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_map.html b/db/docs/api_c/set_func_map.html index de9cded75..6833c7339 100644 --- a/db/docs/api_c/set_func_map.html +++ b/db/docs/api_c/set_func_map.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_map - + -

db_env_set_func_map

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_map
-The func_map parameter is the function which maps a file into +
+
func_map
The func_map parameter is the function which maps a file into memory and creates shared memory regions.

The path parameter is the name of a file.

The is_region parameter will be zero if the intention is to map @@ -70,6 +68,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_open.html b/db/docs/api_c/set_func_open.html index 349e0baa6..1b316162d 100644 --- a/db/docs/api_c/set_func_open.html +++ b/db/docs/api_c/set_func_open.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_open - + -

db_env_set_func_open

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_open
-The func_open parameter is the replacement function. It must conform +
+
func_open
The func_open parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_pread.html b/db/docs/api_c/set_func_pread.html new file mode 100644 index 000000000..481bb7d8a --- /dev/null +++ b/db/docs/api_c/set_func_pread.html @@ -0,0 +1,56 @@ + + + + + + +Berkeley DB: db_env_set_func_pread + + + + + + + +
+

db_env_set_func_pread

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +db_env_set_func_pread(ssize_t (*func_pread)(int fd, void *buf, size_t nbytes, off_t offset)); +

+
+

Description: db_env_set_func_pread

+

Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) pread function with +func_pread, which must conform to the standard interface +specification.

+

The db_env_set_func_pread method configures all operations performed by a process and +all of its threads of control, not operations confined to a single +database environment.

+

Although the db_env_set_func_pread method may be called at any time during the life of +the application, it should normally be called before making calls to the +db_env_create or db_create methods.

+

The db_env_set_func_pread method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
func_pread
The func_pread parameter is the replacement function. It must conform +to the standard interface specification. +
+

See Also

+Run-time configuration +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/set_func_pwrite.html b/db/docs/api_c/set_func_pwrite.html new file mode 100644 index 000000000..a77d58e41 --- /dev/null +++ b/db/docs/api_c/set_func_pwrite.html @@ -0,0 +1,56 @@ + + + + + + +Berkeley DB: db_env_set_func_pwrite + + + + + + + +
+

db_env_set_func_pwrite

+
+API +Ref
+


+ +

+#include <db.h>
+

+int +db_env_set_func_pwrite(ssize_t (*func_pwrite)(int fd, const void *buf, size_t nbytes, off_t offset)); +

+
+

Description: db_env_set_func_pwrite

+

Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) pwrite function with +func_pwrite, which must conform to the standard interface +specification.

+

The db_env_set_func_pwrite method configures all operations performed by a process and +all of its threads of control, not operations confined to a single +database environment.

+

Although the db_env_set_func_pwrite method may be called at any time during the life of +the application, it should normally be called before making calls to the +db_env_create or db_create methods.

+

The db_env_set_func_pwrite method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
func_pwrite
The func_pwrite parameter is the replacement function. It must conform +to the standard interface specification. +
+

See Also

+Run-time configuration +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_c/set_func_read.html b/db/docs/api_c/set_func_read.html index 55a733849..927b80b26 100644 --- a/db/docs/api_c/set_func_read.html +++ b/db/docs/api_c/set_func_read.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_read - + -

db_env_set_func_read

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_read
-The func_read parameter is the replacement function. It must conform +
+
func_read
The func_read parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_realloc.html b/db/docs/api_c/set_func_realloc.html index 93d6a9bc7..887467d6e 100644 --- a/db/docs/api_c/set_func_realloc.html +++ b/db/docs/api_c/set_func_realloc.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_realloc - + -

db_env_set_func_realloc

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_realloc
-The func_realloc parameter is the replacement function. It must conform +
+
func_realloc
The func_realloc parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_rename.html b/db/docs/api_c/set_func_rename.html index dbf57a979..eb0631ab3 100644 --- a/db/docs/api_c/set_func_rename.html +++ b/db/docs/api_c/set_func_rename.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_rename - + -

db_env_set_func_rename

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_rename
-The func_rename parameter is the replacement function. It must conform +
+
func_rename
The func_rename parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_seek.html b/db/docs/api_c/set_func_seek.html index 7e406ead2..1f6f9fd1c 100644 --- a/db/docs/api_c/set_func_seek.html +++ b/db/docs/api_c/set_func_seek.html @@ -1,31 +1,29 @@ - - + + Berkeley DB: db_env_set_func_seek - + -

db_env_set_func_seek

API -Ref -
+Ref +


 #include <db.h>
 

int -db_env_set_func_seek(int (*func_seek)(int fd, size_t pgsize, - db_pgno_t pageno, u_int32_t relative, int rewind, int whence)); +db_env_set_func_seek(int (*func_seek)(int fd, off_t offset, int whence));


Description: db_env_set_func_seek

@@ -42,16 +40,13 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

func_seek
-The func_seek parameter is the function which seeks to a specific +
+
func_seek
The func_seek parameter is the function which seeks to a specific location in a file.

The fd parameter is an open file descriptor on the file.

The seek function must cause a subsequent read from or write to -the file to occur at a byte offset specified by the calculation:

-
(pgsize * pageno) + relative
-

If rewind is non-zero, the byte offset is treated as a backward -seek, not a forward one.

+the file to occur at the byte offset specified by the offset +parameter.

The whence parameter specifies where in the file the byte offset is relative to, as described by the IEEE/ANSI Std 1003.1 (POSIX) lseek system call.

@@ -64,6 +59,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_sleep.html b/db/docs/api_c/set_func_sleep.html index f1f6d8c92..9a926cc2b 100644 --- a/db/docs/api_c/set_func_sleep.html +++ b/db/docs/api_c/set_func_sleep.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_sleep - + -

db_env_set_func_sleep

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_sleep
-The func_sleep parameter is the function which seeks to a specific +
+
func_sleep
The func_sleep parameter is the function which seeks to a specific location in a file.

The seconds and microseconds parameters specify the amount of time to wait until the suspending thread of control should run again.

@@ -60,6 +58,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_unlink.html b/db/docs/api_c/set_func_unlink.html index 8a553cc22..46b6d2f77 100644 --- a/db/docs/api_c/set_func_unlink.html +++ b/db/docs/api_c/set_func_unlink.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_unlink - + -

db_env_set_func_unlink

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_unlink
-The func_unlink parameter is the replacement function. It must conform +
+
func_unlink
The func_unlink parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -53,6 +51,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_unmap.html b/db/docs/api_c/set_func_unmap.html index 95ccb8961..bab5ce2f1 100644 --- a/db/docs/api_c/set_func_unmap.html +++ b/db/docs/api_c/set_func_unmap.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_unmap - + -

db_env_set_func_unmap

API -Ref -
+Ref +


@@ -41,9 +40,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_unmap
-The func_unmap parameter is the function which unmaps a file or +
+
func_unmap
The func_unmap parameter is the function which unmaps a file or shared memory region.

The addr parameter is the value returned by the db_env_set_func_map function when the file or region was mapped @@ -59,6 +57,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_write.html b/db/docs/api_c/set_func_write.html index d39e09bd6..25951fba1 100644 --- a/db/docs/api_c/set_func_write.html +++ b/db/docs/api_c/set_func_write.html @@ -1,23 +1,22 @@ - + Berkeley DB: db_env_set_func_write - + -

db_env_set_func_write

API -Ref -
+Ref +


@@ -43,9 +42,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_write
-The func_write parameter is the replacement function. It must conform +
+
func_write
The func_write parameter is the replacement function. It must conform to the standard interface specification.

See Also

@@ -54,6 +52,6 @@ to the standard interface specification.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/set_func_yield.html b/db/docs/api_c/set_func_yield.html index 334af4895..665d02384 100644 --- a/db/docs/api_c/set_func_yield.html +++ b/db/docs/api_c/set_func_yield.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: db_env_set_func_yield - + -

db_env_set_func_yield

API -Ref -
+Ref +


@@ -55,9 +54,8 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

func_yield
-The func_yield parameter is the function which yields the processor. +
+
func_yield
The func_yield parameter is the function which yields the processor.

The func_yield function must return the value of errno on failure and 0 on success.

@@ -67,6 +65,6 @@ failure and 0 on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_abort.html b/db/docs/api_c/txn_abort.html index b339e3316..78f261f21 100644 --- a/db/docs/api_c/txn_abort.html +++ b/db/docs/api_c/txn_abort.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_TXN->abort - + -

DB_TXN->abort

API -Ref -
+Ref +


@@ -52,6 +51,6 @@ and 0 on success.
 

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_begin.html b/db/docs/api_c/txn_begin.html index 7d57ab0a2..011576552 100644 --- a/db/docs/api_c/txn_begin.html +++ b/db/docs/api_c/txn_begin.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->txn_begin - + -

DB_ENV->txn_begin

API -Ref -
+Ref +


@@ -31,8 +30,7 @@ DB_ENV->txn_begin(DB_ENV *env,
 

Description: DB_ENV->txn_begin

The DB_ENV->txn_begin method creates a new transaction in the environment and copies a pointer to a DB_TXN that uniquely identifies it into -the memory to which tid refers. -Calling the DB_TXN->abort, +the memory to which tid refers. Calling the DB_TXN->abort, DB_TXN->commit or DB_TXN->discard methods will discard the returned handle.

Note: Transactions may only span threads if they do so serially; @@ -51,15 +49,18 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_DIRTY_READ
All read operations performed by the transaction may read modified but +
+
DB_DEGREE_2
This transaction will have degree 2 isolation. This provides for cursor +stability but not repeatable reads. Data items which have been +previously read by this transaction may be deleted or modified by other +transactions before this transaction completes. +
DB_DIRTY_READ
All read operations performed by the transaction may read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_TXN_NOSYNC
Do not synchronously flush the log when this transaction commits or +
DB_TXN_NOSYNC
Do not synchronously flush the log when this transaction commits or prepares. This means the transaction will exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database integrity will be maintained but it is possible that this @@ -67,11 +68,11 @@ transaction may be undone during recovery.

This behavior may be set for a Berkeley DB environment using the DB_ENV->set_flags method. Any value specified to this method overrides that setting.

-

DB_TXN_NOWAIT
If a lock is unavailable for any Berkeley DB operation performed in the context +
DB_TXN_NOWAIT
If a lock is unavailable for any Berkeley DB operation performed in the context of this transaction, cause the operation to return DB_LOCK_DEADLOCK immediately instead of blocking on the lock. -

DB_TXN_SYNC
Synchronously flush the log when this transaction commits or prepares. +
DB_TXN_SYNC
Synchronously flush the log when this transaction commits or prepares. This means the transaction will exhibit all of the ACID (atomicity, consistency, isolation, and durability) properties.

This behavior is the default for Berkeley DB environments unless the @@ -79,8 +80,7 @@ consistency, isolation, and durability) properties. DB_ENV->set_flags method. Any value specified to this method overrides that setting.

-

parent
-If the parent parameter is non-NULL, the new transaction will +
parent
If the parent parameter is non-NULL, the new transaction will be a nested transaction, with the transaction indicated by parent as its parent. Transactions may be nested to any level. In the presence of distributed transactions and two-phase commit, only @@ -90,8 +90,8 @@ specified, should be passed as an parameter to
APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_checkpoint.html b/db/docs/api_c/txn_checkpoint.html index 397f3a1b3..3f4824628 100644 --- a/db/docs/api_c/txn_checkpoint.html +++ b/db/docs/api_c/txn_checkpoint.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->txn_checkpoint - + -

DB_ENV->txn_checkpoint

API -Ref -
+Ref +


@@ -39,27 +38,24 @@ and 0 on success.
 See the db_checkpoint utility source code for an example of using DB_ENV->txn_checkpoint
 in a IEEE/ANSI Std 1003.1 (POSIX) environment.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_FORCE
Force a checkpoint record, even if there has been no activity since the +
+
DB_FORCE
Force a checkpoint record, even if there has been no activity since the last checkpoint.
-

kbyte
-If the kbyte parameter is non-zero, a checkpoint will be done +
kbyte
If the kbyte parameter is non-zero, a checkpoint will be done if more than kbyte kilobytes of log data have been written since the last checkpoint. -

min
-If the min parameter is non-zero, a checkpoint will be done if +
min
If the min parameter is non-zero, a checkpoint will be done if more than min minutes have passed since the last checkpoint.

Errors

The DB_ENV->txn_checkpoint method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -71,6 +67,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_class.html b/db/docs/api_c/txn_class.html index 00e05a48e..49e3f1351 100644 --- a/db/docs/api_c/txn_class.html +++ b/db/docs/api_c/txn_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_TXN - + -

DB_TXN

API -Ref -
+Ref +


@@ -49,6 +48,6 @@ and DB_TXN->commit.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_commit.html b/db/docs/api_c/txn_commit.html index 0de416e31..43eef9606 100644 --- a/db/docs/api_c/txn_commit.html +++ b/db/docs/api_c/txn_commit.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_TXN->commit - + -

DB_TXN->commit

API -Ref -
+Ref +


@@ -49,12 +48,11 @@ returns a non-zero error value on failure
 and 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or one of the following values: -

-

DB_TXN_NOSYNC
Do not synchronously flush the log. This means the transaction will +
+
DB_TXN_NOSYNC
Do not synchronously flush the log. This means the transaction will exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database integrity will be maintained, but it is possible that this transaction may be undone during recovery. @@ -62,7 +60,7 @@ it is possible that this transaction may be undone during recovery. DB_ENV->set_flags method or for a single transaction using the DB_ENV->txn_begin method. Any value specified to this method overrides both of those settings.

-

DB_TXN_SYNC
Synchronously flush the log. This means the transaction will exhibit +
DB_TXN_SYNC
Synchronously flush the log. This means the transaction will exhibit all of the ACID (atomicity, consistency, isolation, and durability) properties.

This behavior is the default for Berkeley DB environments unless the @@ -81,6 +79,6 @@ method overrides both of those settings.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_discard.html b/db/docs/api_c/txn_discard.html index a6fcdaa6c..d37462863 100644 --- a/db/docs/api_c/txn_discard.html +++ b/db/docs/api_c/txn_discard.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_TXN->discard - + -

DB_TXN->discard

API -Ref -
+Ref +


@@ -43,15 +42,14 @@ and 0 on success.
 

After DB_TXN->discard has been called, regardless of its return, the DB_TXN handle may not be accessed again.

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DB_TXN->discard method may fail and return one of the following non-zero errors:

-

-

EINVAL
If the transaction handle does not refer to a transaction that was +
+
EINVAL
If the transaction handle does not refer to a transaction that was recovered into a prepared but not yet completed state; or if an invalid flag value or parameter was specified.
@@ -64,6 +62,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_id.html b/db/docs/api_c/txn_id.html index 948b885ef..dde7744a5 100644 --- a/db/docs/api_c/txn_id.html +++ b/db/docs/api_c/txn_id.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_TXN->id - + -

DB_TXN->id

API -Ref -
+Ref +


@@ -41,6 +40,6 @@ to the DB_ENV->lock_get or 
APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_list.html b/db/docs/api_c/txn_list.html index c7e1a0726..fa4902bad 100644 --- a/db/docs/api_c/txn_list.html +++ b/db/docs/api_c/txn_list.html @@ -1,31 +1,32 @@ - + Berkeley DB: Berkeley DB: Transaction Subsystem and Related Methods - +

Berkeley DB: Transaction Subsystem and Related Methods

- + - - - - - - - - - - - - + + + + + + + + + + + + +
Transaction Subsystem and Related MethodsDescription
DB_ENV->set_tx_maxSet maximum number of transactions
DB_ENV->set_tx_timestampSet recovery timestamp
DB_ENV->txn_checkpointCheckpoint the transaction subsystem
DB_ENV->txn_recoverDistributed transaction recovery
DB_ENV->txn_statReturn transaction subsystem statistics
DB_ENV->txn_beginBegin a transaction
DB_TXN->abortAbort a transaction
DB_TXN->commitCommit a transaction
DB_TXN->discardDiscard a prepared but not resolved transaction handle
DB_TXN->idReturn a transaction's ID
DB_TXN->preparePrepare a transaction for commit
DB_TXN->set_timeoutSet transaction timeout
DB_ENV->set_timeoutSet lock and transaction timeout
DB_ENV->set_tx_maxSet maximum number of transactions
DB_ENV->set_tx_timestampSet recovery timestamp
DB_ENV->txn_beginBegin a transaction
DB_ENV->txn_checkpointCheckpoint the transaction subsystem
DB_ENV->txn_recoverDistributed transaction recovery
DB_ENV->txn_statReturn transaction subsystem statistics
DB_TXN->abortAbort a transaction
DB_TXN->commitCommit a transaction
DB_TXN->discardDiscard a prepared but not resolved transaction handle
DB_TXN->idReturn a transaction's ID
DB_TXN->preparePrepare a transaction for commit
DB_TXN->set_timeoutSet transaction timeout
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_prepare.html b/db/docs/api_c/txn_prepare.html index ba48aa3fd..b5c59385b 100644 --- a/db/docs/api_c/txn_prepare.html +++ b/db/docs/api_c/txn_prepare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_TXN->prepare - + -

DB_TXN->prepare

API -Ref -
+Ref +


@@ -28,7 +27,7 @@ DB_TXN->prepare(DB_TXN *tid, u_int8_t gid[DB_XIDDATASIZE]);
 


Description: DB_TXN->prepare

- +

The DB_TXN->prepare method initiates the beginning of a two-phase commit.

In a distributed transaction environment, Berkeley DB can be used as a local transaction manager. In this case, the distributed transaction manager @@ -38,19 +37,17 @@ return before responding to the distributed transaction manager. Only after the distributed transaction manager receives successful responses from all of its prepare messages should it issue any commit messages.

-

In the case of nested transactions, preparing the parent -causes all unresolved children of the parent transaction to be committed. -Child transactions should never be explicitly prepared. -Their fate will be resolved along with their parent's during -global recovery.

+

In the case of nested transactions, preparing the parent causes all +unresolved children of the parent transaction to be committed. Child +transactions should never be explicitly prepared. Their fate will be +resolved along with their parent's during global recovery.

The DB_TXN->prepare method returns a non-zero error value on failure and 0 on success.

Parameters

-

-

gid
-The gid parameter specifies the global transaction ID by which this +
+
gid
The gid parameter specifies the global transaction ID by which this transaction will be known. This global transaction ID will be returned in calls to DB_ENV->txn_recover, telling the application which global transactions must be resolved. @@ -64,6 +61,6 @@ transactions must be resolved.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_recover.html b/db/docs/api_c/txn_recover.html index 26fe245ca..45d6ae0d3 100644 --- a/db/docs/api_c/txn_recover.html +++ b/db/docs/api_c/txn_recover.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->txn_recover - + -

DB_ENV->txn_recover

API -Ref -
+Ref +


@@ -42,7 +41,7 @@ be filled in with a list of transactions that must be resolved by the
 application (committed, aborted or discarded).  The preplist
 parameter is a structure of type DB_PREPLIST; the following DB_PREPLIST
 fields will be filled in:

-

+
DB_TXN * txn;
The transaction handle for the transaction.
u_int8_t gid[DB_XIDDATASIZE];
The global transaction ID for the transaction. The global transaction ID is the one specified when the transaction was prepared. The @@ -57,21 +56,18 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

count
-The count parameter specifies the number of available entries +
+
count
The count parameter specifies the number of available entries in the passed-in preplist array. The retp parameter returns the number of entries DB_ENV->txn_recover has filled in, in the array. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_FIRST
Begin returning a list of prepared, but not yet resolved transactions. -

DB_NEXT
Continue returning a list of prepared, but not yet resolved transactions, +
flags
The flags parameter must be set to one of the following values: +
+
DB_FIRST
Begin returning a list of prepared, but not yet resolved transactions. +
DB_NEXT
Continue returning a list of prepared, but not yet resolved transactions, starting where the last call to DB_ENV->txn_recover left off.
-

preplist
-The preplist parameter references memory into which +
preplist
The preplist parameter references memory into which the list of transactions to be resolved by the application is copied.

@@ -83,6 +79,6 @@ The preplist parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_set_timeout.html b/db/docs/api_c/txn_set_timeout.html index 2f169518e..965a973d9 100644 --- a/db/docs/api_c/txn_set_timeout.html +++ b/db/docs/api_c/txn_set_timeout.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_TXN->set_timeout - + -

DB_TXN->set_timeout

API -Ref -
+Ref +


@@ -31,15 +30,12 @@ DB_TXN->set_timeout(DB_TXN *tid, db_timeout_t timeout, u_int32_t flags);
 

The DB_TXN->set_timeout method sets timeout values for locks or transactions for the specified transaction.

Timeouts are checked whenever a thread of control blocks on a lock or -when deadlock detection is performed. (In the case of -DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly -through the Lock subsystem interfaces. In the case of -DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a -transaction. In either case, it may be a lock requested by the database -access methods underlying the application.) As timeouts are only -checked when the lock request first blocks or when deadlock detection -is performed, the accuracy of the timeout depends on how often deadlock -detection is performed.

+when deadlock detection is performed. In the case of +DB_SET_LOCK_TIMEOUT, the timeout is for any single lock request. +In the case of DB_SET_TXN_TIMEOUT, the timeout is for the life +of the transaction. As timeouts are only checked when the lock request +first blocks or when deadlock detection is performed, the accuracy of +the timeout depends on how often deadlock detection is performed.

Timeout values may be specified for the database environment as a whole. See DB_ENV->set_timeout and for more information.

The DB_TXN->set_timeout method configures operations performed on the underlying @@ -52,23 +48,21 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to one of the following values: -

-

DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this transaction. -

DB_SET_TXN_TIMEOUT
Set the timeout value for this transaction. +
+
flags
The flags parameter must be set to one of the following values: +
+
DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this transaction. +
DB_SET_TXN_TIMEOUT
Set the timeout value for this transaction.
-

timeout
-The timeout parameter is specified as an unsigned 32-bit number +
timeout
The timeout parameter is specified as an unsigned 32-bit number of microseconds, limiting the maximum timeout to roughly 71 minutes. A value of 0 disables timeouts for the transaction.

Errors

The DB_TXN->set_timeout method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -80,6 +74,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_c/txn_stat.html b/db/docs/api_c/txn_stat.html index 5f07a3361..a3b130970 100644 --- a/db/docs/api_c/txn_stat.html +++ b/db/docs/api_c/txn_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DB_ENV->txn_stat - + -

DB_ENV->txn_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DB_ENV->txn_stat(DB_ENV *env, DB_TXN_STAT **statp, u_int32_t flags); +

+int +DB_ENV->txn_stat_print(DB_ENV *env, u_int32_t flags);


Description: DB_ENV->txn_stat

@@ -40,7 +42,7 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_TXN_STAT fields will be filled in:

-

+
DbLsn st_last_ckp;
The LSN of the last checkpoint.
time_t st_time_ckp;
The time the last completed checkpoint finished (as the number of seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) time function). @@ -52,15 +54,15 @@ since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) time functi
u_int32_t st_naborts;
The number of transactions that have aborted.
u_int32_t st_ncommits;
The number of transactions that have committed.
u_int32_t st_nrestores;
The number of transactions that have been restored. -
u_int32_t st_regsize;
The size of the region. +
roff_t st_regsize;
The size of the region, in bytes.
u_int32_t st_region_wait;
The number of times that a thread of control was forced to wait before obtaining the region lock.
u_int32_t st_region_nowait;
The number of times that a thread of control was able to obtain the region lock without waiting.
DB_TXN_ACTIVE *st_txnarray;
A pointer to an array of st_nactive DB_TXN_ACTIVE structures, -describing the currently active transactions. The following fields of +describing the currently active transactions. The following fields of the DB_TXN_ACTIVE structure will be filled in: -

+
u_int32_t txnid;
The transaction ID of the transaction.
u_int32_t parentid;
The transaction ID of the parent transaction (or 0, if no parent).
DbLsn lsn;
The current log sequence number when the transaction was begun. @@ -74,25 +76,45 @@ returns a non-zero error value on failure and 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

The DB_ENV->txn_stat method may fail and return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DB_ENV->txn_stat_print

+

The DB_ENV->txn_stat_print method returns the +transaction subsystem statistical information, as described for the DB_ENV->txn_stat method. +The information is printed to a specified output channel (see the +DB_ENV->set_msgfile method for more information), or passed to an +application callback function (see the DB_ENV->set_msgcall method for +more information).

+

The DB_ENV->txn_stat_print method may not be called before the DB_ENV->open method has +been called.

+

The DB_ENV->txn_stat_print method +returns a non-zero error value on failure +and 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

DB_ENV, DB_TXN

See Also

@@ -101,6 +123,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/api_index.html b/db/docs/api_cxx/api_index.html index f918538a3..926a807bb 100644 --- a/db/docs/api_cxx/api_index.html +++ b/db/docs/api_cxx/api_index.html @@ -1,178 +1,201 @@ - - + + -Berkeley DB: Berkeley DB: C++ API +Berkeley DB: Berkeley DB: C++ Handle Methods - + -

Berkeley DB: C++ API

+

Berkeley DB: C++ Handle Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SectionClass/MethodDescription
Database EnvironmentDbEnvCreate an environment handle

DbEnv::closeClose an environment

DbEnv::dbremoveRemove a database

DbEnv::dbrenameRename a database

DbEnv::errError message with error string

DbEnv::errxError message

DbEnv::openOpen an environment

DbEnv::removeRemove an environment

DbEnv::strerrorError strings

DbEnv::versionReturn version information
Environment ConfigurationDbEnv::set_app_dispatchConfigure application recovery

DbEnv::set_allocSet local space allocation functions

DbEnv::set_data_dirSet the environment data directory

DbEnv::set_encryptSet the environment cryptographic key

DbEnv::set_errcallSet error message callback

DbEnv::set_errfileSet error message FILE

DbEnv::set_error_streamSet error message output stream

DbEnv::set_errpfxSet error message prefix

DbEnv::set_feedbackSet feedback callback

DbEnv::set_flagsEnvironment configuration

DbEnv::set_paniccallSet panic callback

DbEnv::set_rpc_serverEstablish an RPC server connection

DbEnv::set_shm_keySet system memory shared segment ID

DbEnv::set_tas_spinsSet the number of test-and-set spins

DbEnv::set_timeoutSet lock and transaction timeout

DbEnv::set_tmp_dirSet the environment temporary file directory

DbEnv::set_verboseSet verbose messages
Database OperationsDbCreate a database handle

Db::associateAssociate a secondary index

Db::closeClose a database

Db::delDelete items from a database

Db::errError message with error string

Db::errxError message

Db::fdReturn a file descriptor from a database

Db::get, Db::pgetGet items from a database

Db::get_byteswappedReturn if the underlying database is in host order

Db::getenvReturn a handle for the underlying database environment

Db::get_typeReturn the database type

Db::joinPerform a database join on cursors

Db::key_rangeReturn estimate of key location

Db::openOpen a database

Db::putStore items into a database

Db::removeRemove a database

Db::renameRename a database

Db::statReturn database statistics

Db::syncFlush a database to stable storage

Db::truncateEmpty a database

Db::upgradeUpgrade a database

Db::verifyVerify/salvage a database
Database ConfigurationDb::set_allocSet local space allocation functions

Db::set_cachesizeSet the database cache size

Db::set_dup_compareSet a duplicate comparison function

Db::set_encryptSet the database cryptographic key

Db::set_errcallSet error message callback

Db::set_errfileSet error message FILE

Db::set_error_streamSet error message output stream

Db::set_errpfxSet error message prefix

Db::set_feedbackSet feedback callback

Db::set_flagsGeneral database configuration

Db::set_lorderSet the database byte order

Db::set_pagesizeSet the underlying database page size

Db::set_paniccallSet panic callback
Btree/Recno ConfigurationDb::set_append_recnoSet record append callback

Db::set_bt_compareSet a Btree comparison function

Db::set_bt_minkeySet the minimum number of keys per Btree page

Db::set_bt_prefixSet a Btree prefix comparison function

Db::set_re_delimSet the variable-length record delimiter

Db::set_re_lenSet the fixed-length record length

Db::set_re_padSet the fixed-length record pad byte

Db::set_re_sourceSet the backing Recno text file
Hash ConfigurationDb::set_h_ffactorSet the Hash table density

Db::set_h_hashSet a hashing function

Db::set_h_nelemSet the Hash table size
Queue ConfigurationDb::set_q_extentsizeSet Queue database extent size
Database Cursor OperationsDbcCursor class

Db::cursorCreate a cursor handle

Dbc::closeClose a cursor

Dbc::countReturn count of duplicates

Dbc::delDelete by cursor

Dbc::dupDuplicate a cursor

Dbc::get, Dbc::pgetRetrieve by cursor

Dbc::putStore by cursor
Key/Data PairsDbt
Bulk RetrievalDbMultipleDataIterator

DbMultipleKeyDataIterator

DbMultipleRecnoDataIterator
Lock SubsystemDbEnv::set_lk_conflictsSet lock conflicts matrix

DbEnv::set_lk_detectSet automatic deadlock detection

DbEnv::set_lk_max_lockersSet maximum number of lockers

DbEnv::set_lk_max_locksSet maximum number of locks

DbEnv::set_lk_max_objectsSet maximum number of lock objects

DbEnv::lock_detectPerform deadlock detection

DbEnv::lock_getAcquire a lock

DbEnv::lock_idAcquire a locker ID

DbEnv::lock_id_freeRelease a locker ID

DbEnv::lock_putRelease a lock

DbEnv::lock_statReturn lock subsystem statistics

DbEnv::lock_vecAcquire/release locks
Log SubsystemDbEnv::set_lg_bsizeSet log buffer size

DbEnv::set_lg_dirSet the environment logging directory

DbEnv::set_lg_maxSet log file size

DbEnv::set_lg_regionmaxSet logging region size

DbEnv::log_archiveList log and database files

DbEnv::log_fileMap Log Sequence Numbers to log files

DbEnv::log_flushFlush log records

DbEnv::log_putWrite a log record

DbEnv::log_statReturn log subsystem statistics
Log Cursor OperationsDbLogcLog cursor class

DbEnv::log_cursorCreate a log cursor handle

DbLogc::closeClose a log cursor

DbLogc::getRetrieve a log record
Log Sequence NumbersDbLsn

DbEnv::log_compareCompare two Log Sequence Numbers
Memory Pool SubsystemDb::get_mpfReturn the database's memory pool handle

DbEnv::set_cachesizeSet the environment cache size

DbEnv::set_mp_mmapsizeSet maximum mapped-in database file size

DbEnv::memp_registerRegister input/output functions for a file in a memory pool

DbEnv::memp_statReturn memory pool statistics

DbEnv::memp_syncFlush pages from a memory pool

DbEnv::memp_trickleTrickle flush pages from a memory pool
Memory Pool FilesDbMpoolFileMemory Pool File class

DbEnv::memp_fcreateCreate a memory pool file handle

DbMpoolFile::closeClose a file in a memory pool

DbMpoolFile::getGet page from a file in a memory pool

DbMpoolFile::openOpen a file in a memory pool

DbMpoolFile::putReturn a page to a memory pool

DbMpoolFile::setSet memory pool page status

DbMpoolFile::syncFlush pages from a file in a memory pool

DbMpoolFile::set_clear_lenSet file page bytes to be cleared

DbMpoolFile::set_fileidSet file unique identifier

DbMpoolFile::set_flagsGeneral memory pool file configuration

DbMpoolFile::set_ftypeSet file type

DbMpoolFile::set_lsn_offsetSet file log-sequence-number offset

DbMpoolFile::set_maxsizeSet the maximum file size

DbMpoolFile::set_pgcookieSet file cookie for pgin/pgout

DbMpoolFile::set_prioritySet the file priority
Transaction SubsystemDbEnv::set_tx_maxSet maximum number of transactions

DbEnv::set_tx_timestampSet recovery timestamp

DbEnv::txn_checkpointCheckpoint the transaction subsystem

DbEnv::txn_recoverDistributed transaction recovery

DbEnv::txn_statReturn transaction subsystem statistics
TransactionsDbTxnTransaction class

DbEnv::txn_beginBegin a transaction

DbTxn::abortAbort a transaction

DbTxn::commitCommit a transaction

DbTxn::discardDiscard a prepared but not resolved transaction handle

DbTxn::idReturn a transaction's ID

DbTxn::preparePrepare a transaction for commit

DbTxn::set_timeoutSet transaction timeout
ReplicationDbEnv::set_rep_transportConfigure replication transport

DbEnv::rep_electHold a replication election

DbEnv::set_rep_limitLimit data sent in response to a single message

DbEnv::rep_process_messageProcess a replication message

DbEnv::rep_startConfigure an environment for replication

DbEnv::rep_statReplication statistics
ExceptionsDbExceptionException Class for Berkeley DB Activity

DbDeadlockExceptionException Class for deadlocks

DbLockNotGrantedExceptionException Class for lock request failures

DbMemoryExceptionException Class for insufficient memory

DbRunRecoveryExceptionException Class for failures requiring recovery
C++ Handle MethodsDescription
Dbc::closeClose a cursor
Dbc::countReturn count of duplicates
Dbc::delDelete by cursor
Dbc::dupDuplicate a cursor
Dbc::getRetrieve by cursor
Dbc::pgetRetrieve by cursor
Dbc::putStore by cursor
DbCreate a database handle
Db::associateAssociate a secondary index
Db::closeClose a database
Db::cursorCreate a cursor handle
Db::delDelete items from a database
Db::errError message with error string
Db::errxError message
Db::fdReturn a file descriptor from a database
Db::getGet items from a database
Db::get_byteswappedReturn if the underlying database is in host order
Db::getenvReturn database environment handle
Db::get_mpfReturn the database's memory pool handle
Db::get_typeReturn the database type
Db::joinPerform a database join on cursors
Db::key_rangeReturn estimate of key location
Db::openOpen a database
Db::pgetGet items from a database
Db::putStore items into a database
Db::removeRemove a database
Db::renameRename a database
Db::set_allocSet local space allocation functions
Db::set_append_recnoSet record append callback
Db::set_bt_compareSet a Btree comparison function
Db::set_bt_minkeySet the minimum number of keys per Btree page
Db::set_bt_prefixSet a Btree prefix comparison function
Db::set_cachesizeSet the database cache size
Db::set_dup_compareSet a duplicate comparison function
Db::set_encryptSet the database cryptographic key
Db::set_errcallSet error and informational message callback
Db::set_errfileSet error and informational message FILE
Db::set_error_streamSet error and informational message output stream
Db::set_errpfxSet error message prefix
Db::set_feedbackSet feedback callback
Db::set_flagsGeneral database configuration
Db::set_h_ffactorSet the Hash table density
Db::set_h_hashSet a hashing function
Db::set_h_nelemSet the Hash table size
Db::set_lorderSet the database byte order
Db::set_message_streamSet error and informational message output stream
Db::set_msgcallSet error and informational message callback
Db::set_msgfileSet error and informational message FILE
Db::set_pagesizeSet the underlying database page size
Db::set_paniccallSet panic callback
Db::set_q_extentsizeSet Queue database extent size
Db::set_re_delimSet the variable-length record delimiter
Db::set_re_lenSet the fixed-length record length
Db::set_re_padSet the fixed-length record pad byte
Db::set_re_sourceSet the backing Recno text file
Db::statDatabase statistics
Db::stat_printDatabase statistics
Db::syncFlush a database to stable storage
Db::truncateEmpty a database
Db::upgradeUpgrade a database
Db::verifyVerify/salvage a database
DbDeadlockExceptionException Class for deadlocks
DbEnvCreate an environment handle
DbEnv::closeClose an environment
DbEnv::strerrorError strings
DbEnv::versionReturn version information
DbEnv::dbremoveRemove a database
DbEnv::dbrenameRename a database
DbEnv::errError message with error string
DbEnv::errxError message
DbEnv::lock_detectPerform deadlock detection
DbEnv::lock_getAcquire a lock
DbEnv::lock_idAcquire a locker ID
DbEnv::lock_id_freeRelease a locker ID
DbEnv::lock_putRelease a lock
DbEnv::lock_statLock subsystem statistics
DbEnv::lock_stat_printLock subsystem statistics
DbEnv::lock_vecAcquire/release locks
DbEnv::log_archiveList log and database files
DbEnv::log_compareCompare two Log Sequence Numbers
DbEnv::log_cursorCreate a log cursor handle
DbEnv::log_fileMap Log Sequence Numbers to log files
DbEnv::log_flushFlush log records
DbEnv::log_putWrite a log record
DbEnv::log_statLog subsystem statistics
DbEnv::log_stat_printLog subsystem statistics
DbEnv::memp_fcreateCreate a memory pool file handle
DbEnv::memp_registerRegister input/output functions for a file in a memory pool
DbEnv::memp_statMemory pool statistics
DbEnv::memp_stat_printMemory pool statistics
DbEnv::memp_syncFlush pages from a memory pool
DbEnv::memp_trickleTrickle flush pages from a memory pool
DbEnv::openOpen an environment
DbEnv::removeRemove an environment
DbEnv::rep_electHold a replication election
DbEnv::rep_process_messageProcess a replication message
DbEnv::rep_startConfigure an environment for replication
DbEnv::rep_statReplication statistics
DbEnv::rep_stat_printReplication statistics
DbEnv::set_allocSet local space allocation functions
DbEnv::set_app_dispatchConfigure application recovery
DbEnv::set_cachesizeSet the environment cache size
DbEnv::set_data_dirSet the environment data directory
DbEnv::set_encryptSet the environment cryptographic key
DbEnv::set_errcallSet error and informational message callbacks
DbEnv::set_errfileSet error and informational message FILE
DbEnv::set_error_streamSet error and informational message output stream
DbEnv::set_errpfxSet error message prefix
DbEnv::set_feedbackSet feedback callback
DbEnv::set_flagsEnvironment configuration
DbEnv::set_lg_bsizeSet log buffer size
DbEnv::set_lg_dirSet the environment logging directory
DbEnv::set_lg_maxSet log file size
DbEnv::set_lg_regionmaxSet logging region size
DbEnv::set_lk_conflictsSet lock conflicts matrix
DbEnv::set_lk_detectSet automatic deadlock detection
DbEnv::set_lk_max_lockersSet maximum number of lockers
DbEnv::set_lk_max_locksSet maximum number of locks
DbEnv::set_lk_max_objectsSet maximum number of lock objects
DbEnv::set_message_streamSet error and informational message output stream
DbEnv::set_max_openfdSet maximum number of open file descriptors
DbEnv::set_max_writeSet maximum number of sequential write operations
DbEnv::set_mp_mmapsizeSet maximum mapped-in database file size
DbEnv::set_msgcallSet error and informational message callbacks
DbEnv::set_msgfileSet error and informational message FILE
DbEnv::set_paniccallSet panic callback
DbEnv::set_rep_limitLimit data sent in response to a single message
DbEnv::set_rep_transportConfigure replication transport
DbEnv::set_rpc_serverEstablish an RPC server connection
DbEnv::set_shm_keySet system memory shared segment ID
DbEnv::set_tas_spinsSet the number of test-and-set spins
DbEnv::set_timeoutSet lock and transaction timeout
DbEnv::set_tmp_dirSet the environment temporary file directory
DbEnv::set_tx_maxSet maximum number of transactions
DbEnv::set_tx_timestampSet recovery timestamp
DbEnv::set_verboseSet verbose messages
DbEnv::stat_printEnvironment statistics
DbEnv::txn_beginBegin a transaction
DbEnv::txn_checkpointCheckpoint the transaction subsystem
DbEnv::txn_recoverDistributed transaction recovery
DbEnv::txn_statTransaction subsystem statistics
DbEnv::txn_stat_printTransaction subsystem statistics
DbExceptionException Class for Berkeley DB Activity
DbLockNotGrantedExceptionException Class for lock request failures
DbLogc::closeClose a log cursor
DbLogc::getRetrieve a log record
DbMemoryExceptionException Class for insufficient memory
DbMpoolFile::closeClose a file in a memory pool
DbMpoolFile::getGet page from a file in a memory pool
DbMpoolFile::openOpen a file in a memory pool
DbMpoolFile::putReturn a page to a memory pool
DbMpoolFile::setSet memory pool page status
DbMpoolFile::set_clear_lenSet file page bytes to be cleared
DbMpoolFile::set_fileidSet file unique identifier
DbMpoolFile::set_flagsGeneral memory pool file configuration
DbMpoolFile::set_ftypeSet file type
DbMpoolFile::set_lsn_offsetSet file log-sequence-number offset
DbMpoolFile::set_maxsizeSet the maximum file size
DbMpoolFile::set_pgcookieSet file cookie for pgin/pgout
DbMpoolFile::set_prioritySet the file priority
DbMpoolFile::syncFlush pages from a file in a memory pool
DbRunRecoveryExceptionException Class for failures requiring recovery
DbSequence::closeClose a sequence
DbSequence::getReturn the next sequence element(s)
DbSequence::get_cachesizeReturn the sequence cache size
DbSequence::get_dbpReturn dbp handle for sequence
DbSequence::get_flagsReturn sequence flag values
DbSequence::get_keyReturn key for sequence
DbSequence::get_rangeReturn the range of a sequence
DbSequence::init_valueSet the initial value of a sequence
DbSequence::openOpen a sequence
DbSequence::removeRemove a sequence
DbSequence::set_cachesizeSet the sequence cache size
DbSequence::set_flagsSet sequence flag values
DbSequence::set_rangeSet the range of a sequence
DbSequence::statSequence statistics
DbTxn::abortAbort a transaction
DbTxn::commitCommit a transaction
DbTxn::discardDiscard a prepared but not resolved transaction handle
DbTxn::idReturn a transaction's ID
DbTxn::preparePrepare a transaction for commit
DbTxn::set_timeoutSet transaction timeout
DbtKey/Data pairs
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/cxx_pindex.html b/db/docs/api_cxx/cxx_pindex.html index 7a46557d0..e054d488f 100644 --- a/db/docs/api_cxx/cxx_pindex.html +++ b/db/docs/api_cxx/cxx_pindex.html @@ -2,817 +2,595 @@ Berkeley DB: C++ API Index - +

C++ API Index

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Building a small memory footprint library --disable-cryptography
Building a small memory footprint library --disable-hash
Configuring Berkeley DB --disable-largefile
Building a small memory footprint library --disable-queue
Building a small memory footprint library --disable-replication
Configuring Berkeley DB --disable-shared
Configuring Berkeley DB --disable-static
Building a small memory footprint library --disable-verify
Configuring Berkeley DB --enable-compat185
Configuring Berkeley DB --enable-cxx
Configuring Berkeley DB --enable-debug
Configuring Berkeley DB --enable-debug_rop
Configuring Berkeley DB --enable-debug_wop
Configuring Berkeley DB --enable-diagnostic
Configuring Berkeley DB --enable-dump185
Configuring Berkeley DB --enable-java
Configuring Berkeley DB --enable-posixmutexes
Configuring Berkeley DB --enable-rpc
Configuring Berkeley DB --enable-smallbuild
Building a small memory footprint library --enable-smallbuild
Configuring Berkeley DB --enable-tcl
Configuring Berkeley DB --enable-test
Configuring Berkeley DB --enable-uimutexes
Configuring Berkeley DB --enable-umrw
Configuring Berkeley DB --with-mutex=MUTEX
Configuring Berkeley DB --with-mutexalign=ALIGNMENT
Configuring Berkeley DB --with-rpm=ARCHIVE
Configuring Berkeley DB --with-tcl=DIR
Configuring Berkeley DB --with-uniquename=NAME
/etc/magic
configuring Berkeley DB 1.85 API compatibility
building a utility to dump Berkeley DB 1.85 databases
Upgrading to release 2.0
Upgrading to release 3.0
Upgrading to release 3.1
Upgrading to release 3.2
Upgrading to release 3.3
Upgrading to release 4.0
Upgrading to release 4.1
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
Upgrading to release 4.2
selecting an access method
access method FAQ
access method tuning
introduction to the access methods
AIX
data alignment
programmatic APIs
hot backup
BDB
BDB
BDB
introduction to the buffer pool subsystem
turn off system buffering
turn off system buffering for database files
turn off system buffering for log files
building for QNX
building for UNIX
building for UNIX FAQ
building for VxWorks
building for VxWorks AE
building for VxWorks FAQ
building for Win32
building for Windows FAQ
Bulk Retrieval API
selecting a byte order
configuring the C++ API
flushing the database cache
selecting a cache size
introduction to the memory cache subsystem
catastrophic recovery
Patches, Updates and Change logs
database page checksum
closing a cursor
closing a database
database compaction
specifying a Btree comparison function
changing compile or load options
Concurrent Data Store
database environment configuration
configuring Berkeley DB for UNIX systems
salvaging corrupted databases
counting data items for a key
closing a cursor
deleting records with a cursor
duplicating a cursor
retrieving records with a cursor
storing records with a cursor
cursor stability
database cursors
Db
Db::associate
Db::close
Db::cursor
Db::del
Db::err
Db::errx
Db::fd
Db::get
Db::getenv
Db::get_bt_minkey
Db::get_byteswapped
Db::get_cachesize
Db::get_database
Db::get_encrypt_flags
Db::get_errfile
Db::get_errpfx
Db::get_file
Db::get_flags
Db::get_h_ffactor
Db::get_h_nelem
Db::get_lorder
Db::get_mpf
Db::get_open_flags
Db::get_pagesize
Db::get_q_extentsize
Db::get_re_delim
Db::get_re_len
Db::get_re_pad
Db::get_re_source
Db::get_transactional
Db::get_type
Db::join
Db::key_range
Db::open
Db::pget
Db::put
Db::remove
Db::rename
Db::set_alloc
Db::set_append_recno
Db::set_bt_compare
Db::set_bt_minkey
Db::set_bt_prefix
Db::set_cachesize
Db::set_dup_compare
Db::set_encrypt
Db::set_errcall
Db::set_errfile
Db::set_error_stream
Db::set_errpfx
Db::set_feedback
Db::set_flags
Db::set_h_ffactor
Db::set_h_hash
Db::set_h_nelem
Db::set_lorder
Db::set_pagesize
Db::set_paniccall
Db::set_q_extentsize
Db::set_re_delim
Db::set_re_len
Db::set_re_pad
Db::set_re_source
Db::stat
Db::sync
Db::truncate
Db::upgrade
Db::verify
Dbc
Dbc::close
Dbc::count
Dbc::del
Dbc::dup
Dbc::get
Dbc::pget
Dbc::put
DbDeadlockException
DbEnv
DbEnv::close
DbEnv::dbremove
DbEnv::dbrename
DbEnv::err
DbEnv::errx
DbEnv::get_cachesize
DbEnv::get_cachesize_nocache
DbEnv::get_data_dirs
DbEnv::get_encrypt_flags
DbEnv::get_errfile
DbEnv::get_errpfx
DbEnv::get_flags
DbEnv::get_home
DbEnv::get_lg_bsize
DbEnv::get_lg_dir
DbEnv::get_lg_max
DbEnv::get_lg_regionmax
DbEnv::get_lk_conflicts
DbEnv::get_lk_detect
DbEnv::get_lk_max_lockers
DbEnv::get_lk_max_locks
DbEnv::get_lk_max_objects
DbEnv::get_mp_mmapsize
DbEnv::get_open_flags
DbEnv::get_rep_limit
DbEnv::get_shm_key
DbEnv::get_tas_spins
DbEnv::get_timeout
DbEnv::get_tmp_dir
DbEnv::get_tx_max
DbEnv::get_tx_timestamp
DbEnv::get_verbose
DbEnv::lock_detect
DbEnv::lock_get
DbEnv::lock_id
DbEnv::lock_id_free
DbEnv::lock_put
DbEnv::lock_stat
DbEnv::lock_vec
DbEnv::log_archive
DbEnv::log_compare
DbEnv::log_cursor
DbEnv::log_file
DbEnv::log_flush
DbEnv::log_put
DbEnv::log_stat
DbEnv::memp_fcreate
DbEnv::memp_register
DbEnv::memp_stat
DbEnv::memp_sync
DbEnv::memp_trickle
DbEnv::open
DbEnv::remove
DbEnv::rep_elect
DbEnv::rep_process_message
DbEnv::rep_start
DbEnv::rep_stat
DbEnv::set_alloc
DbEnv::set_app_dispatch
DbEnv::set_cachesize
DbEnv::set_data_dir
DbEnv::set_encrypt
DbEnv::set_errcall
DbEnv::set_errfile
DbEnv::set_error_stream
DbEnv::set_errpfx
DbEnv::set_feedback
DbEnv::set_flags
DbEnv::set_lg_bsize
DbEnv::set_lg_dir
DbEnv::set_lg_max
DbEnv::set_lg_regionmax
DbEnv::set_lk_conflicts
DbEnv::set_lk_detect
DbEnv::set_lk_max_lockers
DbEnv::set_lk_max_locks
DbEnv::set_lk_max_objects
DbEnv::set_mp_mmapsize
DbEnv::set_paniccall
DbEnv::set_rep_limit
DbEnv::set_rep_transport
DbEnv::set_rpc_server
DbEnv::set_shm_key
DbEnv::set_tas_spins
DbEnv::set_timeout
DbEnv::set_tmp_dir
DbEnv::set_tx_max
DbEnv::set_tx_timestamp
DbEnv::set_verbose
DbEnv::strerror
DbEnv::txn_begin
DbEnv::txn_checkpoint
DbEnv::txn_recover
DbEnv::txn_stat
DbEnv::version
DbEnv::version
DbException
DbException::get_env
DbException::get_errno
DbException::what
DbLock
DbLockNotGrantedException
DbLogc
DbLogc::close
DbLogc::get
DbLsn
DbMemoryException
DbMpoolFile
DbMpoolFile::close
DbMpoolFile::get
DbMpoolFile::get_clear_len
DbMpoolFile::get_fileid
DbMpoolFile::get_flags
DbMpoolFile::get_ftype
DbMpoolFile::get_lsn_offset
DbMpoolFile::get_maxsize
DbMpoolFile::get_pgcookie
DbMpoolFile::get_priority
DbMpoolFile::open
DbMpoolFile::put
DbMpoolFile::set
DbMpoolFile::set_clear_len
DbMpoolFile::set_fileid
DbMpoolFile::set_flags
DbMpoolFile::set_ftype
DbMpoolFile::set_lsn_offset
DbMpoolFile::set_maxsize
DbMpoolFile::set_pgcookie
DbMpoolFile::set_priority
DbMpoolFile::sync
DbRunRecoveryException
Dbt
DbTxn
DbTxn::abort
DbTxn::commit
DbTxn::discard
DbTxn::id
DbTxn::prepare
DbTxn::set_timeout
Dbc::put DB_AFTER
Db::verify DB_AGGRESSIVE
Db::put DB_APPEND
DbEnv::log_archive DB_ARCH_ABS
DbEnv::log_archive DB_ARCH_DATA
DbEnv::log_archive DB_ARCH_LOG
DbEnv::log_archive DB_ARCH_REMOVE
Db::associate DB_AUTO_COMMIT
Db::del DB_AUTO_COMMIT
Db::get DB_AUTO_COMMIT
Db::open DB_AUTO_COMMIT
Db::put DB_AUTO_COMMIT
Db::truncate DB_AUTO_COMMIT
DbEnv::dbremove DB_AUTO_COMMIT
DbEnv::dbrename DB_AUTO_COMMIT
DbEnv::set_flags DB_AUTO_COMMIT
Dbc::put DB_BEFORE
Db::open DB_BTREE
DbEnv::set_flags DB_CDB_ALLDB
Db::set_flags DB_CHKSUM
DB_CONFIG
Db::get DB_CONSUME
Db::get DB_CONSUME_WAIT
Db::associate DB_CREATE
Db::open DB_CREATE
DbEnv::open DB_CREATE
DbMpoolFile::open DB_CREATE
Dbc::get DB_CURRENT
Dbc::put DB_CURRENT
DbLogc::get DB_CURRENT
Db DB_CXX_NO_EXCEPTIONS
DbEnv DB_CXX_NO_EXCEPTIONS
DB_DBT_APPMALLOC
Dbt DB_DBT_MALLOC
Dbt DB_DBT_PARTIAL
Dbt DB_DBT_REALLOC
Dbt DB_DBT_USERMEM
DbMpoolFile::open DB_DIRECT
DbEnv::set_flags DB_DIRECT_DB
DbEnv::set_flags DB_DIRECT_LOG
Db::cursor DB_DIRTY_READ
Db::get DB_DIRTY_READ
Db::join DB_DIRTY_READ
Db::open DB_DIRTY_READ
Dbc::get DB_DIRTY_READ
DbEnv::txn_begin DB_DIRTY_READ
DB_DONOTINDEX
Db::set_flags DB_DUP
Db::set_flags DB_DUPSORT
Db::upgrade DB_DUPSORT
DB_EID_BROADCAST
Db::set_flags DB_ENCRYPT
Db::set_encrypt DB_ENCRYPT_AES
DbEnv::set_encrypt DB_ENCRYPT_AES
Db::open DB_EXCL
Db::stat DB_FAST_STAT
Dbc::get DB_FIRST
DbLogc::get DB_FIRST
DbEnv::txn_recover DB_FIRST
DbEnv::log_put DB_FLUSH
DbEnv::remove DB_FORCE
DbEnv::txn_checkpoint DB_FORCE
Db::get DB_GET_BOTH
Dbc::get DB_GET_BOTH
Dbc::get DB_GET_BOTH_RANGE
Dbc::get DB_GET_RECNO
Db::open DB_HASH
File naming DB_HOME
File naming db_home
DbEnv::open DB_INIT_CDB
DbEnv::open DB_INIT_LOCK
DbEnv::open DB_INIT_LOG
DbEnv::open DB_INIT_MPOOL
DbEnv::open DB_INIT_REP
DbEnv::open DB_INIT_TXN
DbEnv::open DB_JOINENV
Db::join DB_JOIN_ITEM
Dbc::get DB_JOIN_ITEM
Db::join DB_JOIN_NOSORT
Error returns to applications DB_KEYEMPTY
Error returns to applications DB_KEYEXIST
Dbc::put DB_KEYFIRST
Dbc::put DB_KEYLAST
Dbc::get DB_LAST
DbLogc::get DB_LAST
DbEnv::open DB_LOCKDOWN
DB_LOCK_DEADLOCK
Error returns to applications DB_LOCK_DEADLOCK
DbEnv::set_lk_detect DB_LOCK_DEFAULT
DbEnv::lock_detect DB_LOCK_DEFAULT
DbEnv::set_lk_detect DB_LOCK_EXPIRE
DbEnv::lock_detect DB_LOCK_EXPIRE
DbEnv::lock_vec DB_LOCK_GET
DbEnv::lock_vec DB_LOCK_GET_TIMEOUT
DbEnv::lock_vec DB_LOCK_IREAD
DbEnv::lock_vec DB_LOCK_IWR
DbEnv::lock_vec DB_LOCK_IWRITE
DbEnv::set_lk_detect DB_LOCK_MAXLOCKS
DbEnv::lock_detect DB_LOCK_MAXLOCKS
DbEnv::set_lk_detect DB_LOCK_MINLOCKS
DbEnv::lock_detect DB_LOCK_MINLOCKS
DbEnv::set_lk_detect DB_LOCK_MINWRITE
DbEnv::lock_detect DB_LOCK_MINWRITE
Error returns to applications DB_LOCK_NOTGRANTED
DbEnv::lock_get DB_LOCK_NOWAIT
DbEnv::lock_vec DB_LOCK_NOWAIT
DbEnv::set_lk_detect DB_LOCK_OLDEST
DbEnv::lock_detect DB_LOCK_OLDEST
DbEnv::lock_vec DB_LOCK_PUT
DbEnv::lock_vec DB_LOCK_PUT_ALL
DbEnv::lock_vec DB_LOCK_PUT_OBJ
DbEnv::set_lk_detect DB_LOCK_RANDOM
DbEnv::lock_detect DB_LOCK_RANDOM
DbEnv::lock_vec DB_LOCK_READ
DbEnv::lock_vec DB_LOCK_TIMEOUT
DbEnv::lock_vec DB_LOCK_WRITE
DbEnv::set_lk_detect DB_LOCK_YOUNGEST
DbEnv::lock_detect DB_LOCK_YOUNGEST
DbEnv::set_flags DB_LOG_AUTOREMOVE
DbMpoolFile::put DB_MPOOL_CLEAN
DbMpoolFile::set DB_MPOOL_CLEAN
DbMpoolFile::get DB_MPOOL_CREATE
DbMpoolFile::put DB_MPOOL_DIRTY
DbMpoolFile::set DB_MPOOL_DIRTY
DbMpoolFile::put DB_MPOOL_DISCARD
DbMpoolFile::set DB_MPOOL_DISCARD
DbMpoolFile::get DB_MPOOL_LAST
DbMpoolFile::get DB_MPOOL_NEW
DbMpoolFile::set_flags DB_MPOOL_NOFILE
Db::get DB_MULTIPLE
Dbc::get DB_MULTIPLE
Dbc::get DB_MULTIPLE_KEY
Dbc::get DB_NEXT
DbLogc::get DB_NEXT
DbEnv::txn_recover DB_NEXT
Dbc::get DB_NEXT_DUP
Dbc::get DB_NEXT_NODUP
Db::put DB_NODUPDATA
Dbc::put DB_NODUPDATA
DbEnv::set_flags DB_NOLOCKING
Db::open DB_NOMMAP
DbEnv::set_flags DB_NOMMAP
DbMpoolFile::open DB_NOMMAP
Db::verify DB_NOORDERCHK
Db::put DB_NOOVERWRITE
DbEnv::set_flags DB_NOPANIC
DB_NOSERVER
DbEnv::set_rpc_server DB_NOSERVER
DbEnv::set_rpc_server DB_NOSERVER_HOME
DB_NOSERVER_ID
DbEnv::set_rpc_server DB_NOSERVER_ID
Db::close DB_NOSYNC
Error returns to applications DB_NOTFOUND
DbMpoolFile::open DB_ODDFILESIZE
Db::upgrade DB_OLD_VERSION
Db::verify DB_ORDERCHKONLY
DbEnv::set_flags DB_OVERWRITE
DB_PAGE_NOTFOUND
DbEnv::set_flags DB_PANIC_ENVIRONMENT
Dbc::dup DB_POSITION
Dbc::get DB_PREV
DbLogc::get DB_PREV
Dbc::get DB_PREV_NODUP
Db::verify DB_PRINTABLE
DbMpoolFile::set_priority DB_PRIORITY_DEFAULT
DbMpoolFile::set_priority DB_PRIORITY_HIGH
DbMpoolFile::set_priority DB_PRIORITY_LOW
DbMpoolFile::set_priority DB_PRIORITY_VERY_HIGH
DbMpoolFile::set_priority DB_PRIORITY_VERY_LOW
DbEnv::open DB_PRIVATE
Db::open DB_QUEUE
Db::open DB_RDONLY
DbMpoolFile::open DB_RDONLY
Db::open DB_RECNO
Db::set_flags DB_RECNUM
DbEnv::open DB_RECOVER
DbEnv::set_feedback DB_RECOVER
DbEnv::open DB_RECOVER_FATAL
DbEnv::set_flags DB_REGION_INIT
Db::set_flags DB_RENUMBER
DbEnv::rep_start DB_REP_CLIENT
DbEnv::rep_process_message DB_REP_DUPMASTER
DbEnv::rep_process_message DB_REP_HOLDELECTION
DbEnv::rep_process_message DB_REP_ISPERM
DbEnv::rep_start DB_REP_LOGSONLY
DbEnv::rep_start DB_REP_MASTER
DbEnv::rep_process_message DB_REP_NEWMASTER
DbEnv::rep_process_message DB_REP_NEWSITE
DbEnv::set_rep_transport DB_REP_NOBUFFER
DbEnv::rep_process_message DB_REP_NOTPERM
DbEnv::rep_process_message DB_REP_OUTDATED
DbEnv::set_rep_transport DB_REP_PERMANENT
DB_REP_UNAVAIL
Db::set_flags DB_REVSPLITOFF
Db::get DB_RMW
Db::join DB_RMW
Dbc::get DB_RMW
DbEnv DB_RPCCLIENT
Error returns to applications DB_RUNRECOVERY
Db::verify DB_SALVAGE
Dbc::get DB_SET
DbLogc::get DB_SET
DbEnv::set_timeout DB_SET_LOCK_TIMEOUT
DbTxn::set_timeout DB_SET_LOCK_TIMEOUT
Dbc::get DB_SET_RANGE
Db::get DB_SET_RECNO
Dbc::get DB_SET_RECNO
DbEnv::set_timeout DB_SET_TXN_TIMEOUT
DbTxn::set_timeout DB_SET_TXN_TIMEOUT
Db::set_flags DB_SNAPSHOT
DbEnv::lock_stat DB_STAT_CLEAR
DbEnv::log_stat DB_STAT_CLEAR
DbEnv::memp_stat DB_STAT_CLEAR
DbEnv::rep_stat DB_STAT_CLEAR
DbEnv::txn_stat DB_STAT_CLEAR
DbEnv::open DB_SYSTEM_MEM
Db::open DB_THREAD
DbEnv::open DB_THREAD
DbEnv::set_flags DB_TIME_NOTGRANTED
Db::open DB_TRUNCATE
DbEnv::set_app_dispatch DB_TXN_ABORT
DbEnv::set_app_dispatch DB_TXN_APPLY
DbEnv::set_app_dispatch DB_TXN_BACKWARD_ROLL
DbEnv::set_app_dispatch DB_TXN_FORWARD_ROLL
DbEnv::set_flags DB_TXN_NOSYNC
DbEnv::txn_begin DB_TXN_NOSYNC
DbTxn::commit DB_TXN_NOSYNC
Db::set_flags DB_TXN_NOT_DURABLE
DbEnv::set_flags DB_TXN_NOT_DURABLE
DbEnv::txn_begin DB_TXN_NOWAIT
DbEnv::set_app_dispatch DB_TXN_PRINT
DbEnv::txn_begin DB_TXN_SYNC
DbTxn::commit DB_TXN_SYNC
DbEnv::set_flags DB_TXN_WRITE_NOSYNC
Db::open DB_UNKNOWN
Db::set_feedback DB_UPGRADE
DbEnv::open DB_USE_ENVIRON
DbEnv::remove DB_USE_ENVIRON
DbEnv::open DB_USE_ENVIRON_ROOT
DbEnv::remove DB_USE_ENVIRON_ROOT
DbEnv::set_verbose DB_VERB_CHKPOINT
DbEnv::set_verbose DB_VERB_DEADLOCK
DbEnv::set_verbose DB_VERB_RECOVERY
DbEnv::set_verbose DB_VERB_REPLICATION
DbEnv::set_verbose DB_VERB_WAITSFOR
Db::set_feedback DB_VERIFY
DB_VERIFY_BAD
Db::cursor DB_WRITECURSOR
Db DB_XA_CREATE
DB_XIDDATASIZE
DbEnv::set_flags DB_YIELDCPU
deadlocks
introduction to debugging
debugging applications
degrees of isolation
deleting records
deleting records with a cursor
dirty reads
disk space requirements
Distributed Transactions
double buffering
duplicate data items
sorted duplicate data items
duplicate data items
duplicating a cursor
turn off database durability
turn off durability in the database environment
emptying a database
database encryption
encryption
turn off access to a database environment
database environment
use environment constants in naming
use environment constants in naming
database environment FAQ
fault database environment in during open
environment variables
introduction to database environments
equality join
error handling
error name space
error returns
selecting a Queue extent size
hot failover
Java API FAQ
Java FAQ
Tcl FAQ
XA FAQ
configuring without large file support
file utility
returning pages to the filesystem
recovery and filesystem operations
remote filesystems
page fill factor
configuring a small memory footprint library
Berkeley DB free-threaded handles
FreeBSD
specifying a database hash
hash table size
HP-UX
secondary indices
installing Berkeley DB for UNIX systems
interface compatibility
IRIX
degrees of isolation
configuring the Java API
Java API FAQ
Java compatibility
Java configuration
Java FAQ
equality join
key/data pairs
retrieved key/data permanence
database limits
Linux
changing compile or load options
DbEnv::lock_vec lock
standard lock modes
ignore locking
page-level locking
two-phase locking
locking and non-Berkeley DB applications
locking configuration
Berkeley DB Transactional Data Store locking conventions
Berkeley DB Concurrent Data Store locking conventions
configure locking for Berkeley DB Concurrent Data Store
locking granularity
introduction to the locking subsystem
sizing the locking subsystem
locking without transactions
log file limits
automatic log file removal
log file removal
logging configuration
introduction to the logging subsystem
retrieving Btree records by logical record @number
Mac OS X
turn off database file memory mapping
memory pool configuration
introduction to the memory pool subsystem
configuring for MinGW
DbEnv::lock_vec mode
Berkeley DB library name spaces
file naming
natural join
NFS problems
retrieving Btree records by logical record number
DbEnv::lock_vec obj
DbEnv::lock_vec op
opening a database
OSF/1
selecting a page size
ignore database environment panic
partial record storage and retrieval
Patches, Updates and Change logs
Perl
retrieved key/data permanence
task/thread priority
Sleepycat Software's Berkeley DB products
building for QNX
QNX
dirty reads
accessing Btree records by record number
logical record numbers
managing record-based databases
logically renumbering records
Berkeley DB recoverability
renumbering records in Recno databases
repeatable read
introduction to replication
Resource Manager
XA Resource Manager
retrieving records
retrieving records in bulk
retrieving records with a cursor
turn off reverse splits in Btree databases
RPC client
configuring a RPC client/server
introduction to rpc client/server
RPC FAQ
RPC server
RPM
database salvage
SCO
Berkeley DB handle scope
secondary indices
security
disabling shared libraries
shared libraries
signal handling
Sleepycat Software
Solaris
source code layout
turn off reverse splits in Btree databases
cursor stability
disabling static libraries
database statistics
storing records
storing records with a cursor
configure for stress testing
SunOS
loading Berkeley DB with Tcl
using Berkeley DB with Tcl
configuring the Tcl API
Tcl API programming notes
Tcl FAQ
temporary files
configuring the test suite
running the test suite
running the test suite under UNIX
running the test suite under Windows
text backing files
pre-loading text files into Recno databases
loading text into databases
dumping/loading text to/from databases
building threaded applications
lock timeouts
transaction timeouts
turn off synchronous transaction commit
turn off synchronous transaction commit
transaction configuration
transaction FAQ
transaction limits
Transaction Manager
administering transaction protected applications
archival in transaction protected applications
checkpoints in transaction protected applications
deadlock detection in transaction protected applications
recovery in transaction protected applications
introduction to the transaction subsystem
transaction throughput
transaction tuning
Transactional Data Store
nested transactions
truncating a database
access method tuning
transaction tuning
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
tutorial
simple tutorial
configuring Berkeley DB with the Tuxedo System
Ultrix
building for UNIX
building for UNIX FAQ
configuring Berkeley DB for UNIX systems
Patches, Updates and Change logs
upgrading databases
utilities
database verification
building for VxWorks FAQ
VxWorks notes
running the test suite under Windows
building for Windows FAQ
Windows notes
XA FAQ
XA Resource Manager
__db.001
Building a small memory footprint library  --disable-cryptography
Building a small memory footprint library  --disable-hash
Configuring Berkeley DB  --disable-largefile
Building a small memory footprint library  --disable-queue
Building a small memory footprint library  --disable-replication
Configuring Berkeley DB  --disable-shared
Configuring Berkeley DB  --disable-static
Building a small memory footprint library  --disable-statistics
Building a small memory footprint library  --disable-verify
Configuring Berkeley DB  --enable-compat185
Configuring Berkeley DB  --enable-cxx
Configuring Berkeley DB  --enable-debug
Configuring Berkeley DB  --enable-debug_rop
Configuring Berkeley DB  --enable-debug_wop
Configuring Berkeley DB  --enable-diagnostic
Configuring Berkeley DB  --enable-dump185
Configuring Berkeley DB  --enable-java
Configuring Berkeley DB  --enable-posixmutexes
Configuring Berkeley DB  --enable-rpc
Configuring Berkeley DB  --enable-smallbuild
Building a small memory footprint library  --enable-smallbuild
Configuring Berkeley DB  --enable-tcl
Configuring Berkeley DB  --enable-test
Configuring Berkeley DB  --enable-uimutexes
Configuring Berkeley DB  --enable-umrw
Configuring Berkeley DB  --with-mutex=MUTEX
Configuring Berkeley DB  --with-mutexalign=ALIGNMENT
Configuring Berkeley DB  --with-rpm=ARCHIVE
Configuring Berkeley DB  --with-tcl=DIR
Configuring Berkeley DB  --with-uniquename=NAME
 /etc/magic
configuring Berkeley DB  1.85 API compatibility
building a utility to dump Berkeley DB  1.85 databases
selecting an  access method
 access method FAQ
 access method tuning
introduction to the  access methods
 AIX
data  alignment
 Apache
programmatic  APIs
hot  backup
introduction to the  buffer pool subsystem
turn off system  buffering
turn off system  buffering for database files
turn off system  buffering for log files
turn off system  buffering for log files
 building for QNX
 building for UNIX
 building for UNIX FAQ
 building for VxWorks
 building for VxWorks AE
 building for VxWorks FAQ
 building for Win32
 building for Windows FAQ
selecting a  byte order
configuring the  C++ API
flushing the database  cache
selecting a  cache size
introduction to the memory  cache subsystem
 catastrophic recovery
Patches, Updates and  Change logs
database page  checksum
 closing a cursor
 closing a database
database  compaction
specifying a Btree  comparison function
changing  compile or load options
 Concurrent Data Store
database environment  configuration
 configuring Berkeley DB for UNIX systems
salvaging  corrupted databases
 counting data items for a key
closing a  cursor
deleting records with a  cursor
duplicating a  cursor
retrieving records with a  cursor
storing records with a  cursor
 cursor stability
 cursor stability
database  cursors
Dbc::put  DB_AFTER
Db::verify  DB_AGGRESSIVE
Db::put  DB_APPEND
DbEnv::log_archive  DB_ARCH_ABS
DbEnv::log_archive  DB_ARCH_DATA
DbEnv::log_archive  DB_ARCH_LOG
DbEnv::log_archive  DB_ARCH_REMOVE
Db::associate  DB_AUTO_COMMIT
Db::del  DB_AUTO_COMMIT
Db::get  DB_AUTO_COMMIT
Db::open  DB_AUTO_COMMIT
Db::put  DB_AUTO_COMMIT
Db::truncate  DB_AUTO_COMMIT
DbEnv::dbremove  DB_AUTO_COMMIT
DbEnv::dbrename  DB_AUTO_COMMIT
DbEnv::set_flags  DB_AUTO_COMMIT
DbSequence::get  DB_AUTO_COMMIT
DbSequence::open  DB_AUTO_COMMIT
DbSequence::remove  DB_AUTO_COMMIT
Dbc::put  DB_BEFORE
Db::open  DB_BTREE
DbEnv::set_flags  DB_CDB_ALLDB
Db::set_flags  DB_CHKSUM
 DB_CONFIG
Db::get  DB_CONSUME
Db::get  DB_CONSUME_WAIT
Db::associate  DB_CREATE
Db::open  DB_CREATE
DbEnv::open  DB_CREATE
DbMpoolFile::open  DB_CREATE
DbSequence::open  DB_CREATE
Dbc::get  DB_CURRENT
Dbc::put  DB_CURRENT
DbLogc::get  DB_CURRENT
Db  DB_CXX_NO_EXCEPTIONS
DbEnv  DB_CXX_NO_EXCEPTIONS
 DB_DBT_APPMALLOC
Dbt  DB_DBT_MALLOC
Dbt  DB_DBT_PARTIAL
Dbt  DB_DBT_REALLOC
Dbt  DB_DBT_USERMEM
Db::cursor  DB_DEGREE_2
Db::get  DB_DEGREE_2
Db::stat  DB_DEGREE_2
DbEnv::txn_begin  DB_DEGREE_2
DbMpoolFile::open  DB_DIRECT
DbEnv::set_flags  DB_DIRECT_DB
DbEnv::set_flags  DB_DIRECT_LOG
Db::cursor  DB_DIRTY_READ
Db::get  DB_DIRTY_READ
Db::join  DB_DIRTY_READ
Db::open  DB_DIRTY_READ
Db::stat  DB_DIRTY_READ
Dbc::get  DB_DIRTY_READ
DbEnv::txn_begin  DB_DIRTY_READ
 DB_DONOTINDEX
DbEnv::set_flags  DB_DSYNC_LOG
Db::set_flags  DB_DUP
Db::set_flags  DB_DUPSORT
Db::upgrade  DB_DUPSORT
 DB_EID_BROADCAST
Db::set_flags  DB_ENCRYPT
Db::set_encrypt  DB_ENCRYPT_AES
DbEnv::set_encrypt  DB_ENCRYPT_AES
Db::open  DB_EXCL
DbSequence::open  DB_EXCL
Db::stat  DB_FAST_STAT
Dbc::get  DB_FIRST
DbLogc::get  DB_FIRST
DbEnv::txn_recover  DB_FIRST
DbEnv::log_put  DB_FLUSH
DbEnv::remove  DB_FORCE
DbEnv::txn_checkpoint  DB_FORCE
Db::get  DB_GET_BOTH
Dbc::get  DB_GET_BOTH
Dbc::get  DB_GET_BOTH_RANGE
Dbc::get  DB_GET_RECNO
Db::open  DB_HASH
File naming  DB_HOME
File naming  db_home
DbEnv::open  DB_INIT_CDB
DbEnv::open  DB_INIT_LOCK
DbEnv::open  DB_INIT_LOG
DbEnv::open  DB_INIT_MPOOL
DbEnv::open  DB_INIT_REP
DbEnv::open  DB_INIT_TXN
Db::set_flags  DB_INORDER
DbEnv::open  DB_JOINENV
Db::join  DB_JOIN_ITEM
Dbc::get  DB_JOIN_ITEM
Db::join  DB_JOIN_NOSORT
Error returns to applications  DB_KEYEMPTY
Error returns to applications  DB_KEYEXIST
Dbc::put  DB_KEYFIRST
Dbc::put  DB_KEYLAST
Dbc::get  DB_LAST
DbLogc::get  DB_LAST
DbEnv::open  DB_LOCKDOWN
 DB_LOCK_DEADLOCK
Error returns to applications  DB_LOCK_DEADLOCK
DbEnv::set_lk_detect  DB_LOCK_DEFAULT
DbEnv::lock_detect  DB_LOCK_DEFAULT
DbEnv::set_lk_detect  DB_LOCK_EXPIRE
DbEnv::lock_detect  DB_LOCK_EXPIRE
DbEnv::lock_vec  DB_LOCK_GET
DbEnv::lock_vec  DB_LOCK_GET_TIMEOUT
DbEnv::lock_vec  DB_LOCK_IREAD
DbEnv::lock_vec  DB_LOCK_IWR
DbEnv::lock_vec  DB_LOCK_IWRITE
DbEnv::set_lk_detect  DB_LOCK_MAXLOCKS
DbEnv::lock_detect  DB_LOCK_MAXLOCKS
DbEnv::set_lk_detect  DB_LOCK_MAXWRITE
DbEnv::lock_detect  DB_LOCK_MAXWRITE
DbEnv::set_lk_detect  DB_LOCK_MINLOCKS
DbEnv::lock_detect  DB_LOCK_MINLOCKS
DbEnv::set_lk_detect  DB_LOCK_MINWRITE
DbEnv::lock_detect  DB_LOCK_MINWRITE
Error returns to applications  DB_LOCK_NOTGRANTED
DbEnv::lock_get  DB_LOCK_NOWAIT
DbEnv::lock_vec  DB_LOCK_NOWAIT
DbEnv::set_lk_detect  DB_LOCK_OLDEST
DbEnv::lock_detect  DB_LOCK_OLDEST
DbEnv::lock_vec  DB_LOCK_PUT
DbEnv::lock_vec  DB_LOCK_PUT_ALL
DbEnv::lock_vec  DB_LOCK_PUT_OBJ
DbEnv::set_lk_detect  DB_LOCK_RANDOM
DbEnv::lock_detect  DB_LOCK_RANDOM
DbEnv::lock_vec  DB_LOCK_READ
DbEnv::lock_vec  DB_LOCK_TIMEOUT
DbEnv::lock_vec  DB_LOCK_WRITE
DbEnv::set_lk_detect  DB_LOCK_YOUNGEST
DbEnv::lock_detect  DB_LOCK_YOUNGEST
DbEnv::set_flags  DB_LOG_AUTOREMOVE
DbEnv::set_flags  DB_LOG_BUFFER_FULL
DbEnv::set_flags  DB_LOG_INMEMORY
DbMpoolFile::put  DB_MPOOL_CLEAN
DbMpoolFile::set  DB_MPOOL_CLEAN
DbMpoolFile::get  DB_MPOOL_CREATE
DbMpoolFile::put  DB_MPOOL_DIRTY
DbMpoolFile::set  DB_MPOOL_DIRTY
DbMpoolFile::put  DB_MPOOL_DISCARD
DbMpoolFile::set  DB_MPOOL_DISCARD
DbMpoolFile::get  DB_MPOOL_LAST
DbMpoolFile::get  DB_MPOOL_NEW
DbMpoolFile::set_flags  DB_MPOOL_NOFILE
DbMpoolFile::set_flags  DB_MPOOL_UNLINK
Db::get  DB_MULTIPLE
Dbc::get  DB_MULTIPLE
Dbc::get  DB_MULTIPLE_KEY
Dbc::get  DB_NEXT
DbLogc::get  DB_NEXT
DbEnv::txn_recover  DB_NEXT
Dbc::get  DB_NEXT_DUP
Dbc::get  DB_NEXT_NODUP
Db::put  DB_NODUPDATA
Dbc::put  DB_NODUPDATA
DbEnv::set_flags  DB_NOLOCKING
Db::open  DB_NOMMAP
DbEnv::set_flags  DB_NOMMAP
DbMpoolFile::open  DB_NOMMAP
Db::verify  DB_NOORDERCHK
Db::put  DB_NOOVERWRITE
DbEnv::set_flags  DB_NOPANIC
 DB_NOSERVER
DbEnv::set_rpc_server  DB_NOSERVER
DbEnv::set_rpc_server  DB_NOSERVER_HOME
 DB_NOSERVER_ID
DbEnv::set_rpc_server  DB_NOSERVER_ID
Db::close  DB_NOSYNC
Error returns to applications  DB_NOTFOUND
DbMpoolFile::open  DB_ODDFILESIZE
Db::upgrade  DB_OLD_VERSION
Db::verify  DB_ORDERCHKONLY
DbEnv::set_flags  DB_OVERWRITE
 DB_PAGE_NOTFOUND
DbEnv::set_flags  DB_PANIC_ENVIRONMENT
Dbc::dup  DB_POSITION
Dbc::get  DB_PREV
DbLogc::get  DB_PREV
Dbc::get  DB_PREV_NODUP
Db::verify  DB_PRINTABLE
DbMpoolFile::set_priority  DB_PRIORITY_DEFAULT
DbMpoolFile::set_priority  DB_PRIORITY_HIGH
DbMpoolFile::set_priority  DB_PRIORITY_LOW
DbMpoolFile::set_priority  DB_PRIORITY_VERY_HIGH
DbMpoolFile::set_priority  DB_PRIORITY_VERY_LOW
DbEnv::open  DB_PRIVATE
Db::open  DB_QUEUE
Db::open  DB_RDONLY
DbMpoolFile::open  DB_RDONLY
Db::open  DB_RECNO
Db::set_flags  DB_RECNUM
DbEnv::open  DB_RECOVER
DbEnv::set_feedback  DB_RECOVER
DbEnv::open  DB_RECOVER_FATAL
DbEnv::set_flags  DB_REGION_INIT
Db::set_flags  DB_RENUMBER
DbEnv::rep_start  DB_REP_CLIENT
DbEnv::rep_process_message  DB_REP_DUPMASTER
DbEnv::rep_process_message  DB_REP_HOLDELECTION
DbEnv::rep_process_message  DB_REP_ISPERM
DbEnv::rep_start  DB_REP_MASTER
DbEnv::rep_process_message  DB_REP_NEWMASTER
DbEnv::rep_process_message  DB_REP_NEWSITE
DbEnv::set_rep_transport  DB_REP_NOBUFFER
DbEnv::rep_process_message  DB_REP_NOTPERM
DbEnv::set_rep_transport  DB_REP_PERMANENT
DbEnv::rep_process_message  DB_REP_STARTUPDONE
 DB_REP_UNAVAIL
Db::set_flags  DB_REVSPLITOFF
Db::get  DB_RMW
Db::join  DB_RMW
Dbc::get  DB_RMW
DbEnv  DB_RPCCLIENT
Error returns to applications  DB_RUNRECOVERY
Db::verify  DB_SALVAGE
DbSequence::set_flags  DB_SEQ_DEC
DbSequence::set_flags  DB_SEQ_INC
DbSequence::set_flags  DB_SEQ_WRAP
Dbc::get  DB_SET
DbLogc::get  DB_SET
DbEnv::set_timeout  DB_SET_LOCK_TIMEOUT
DbTxn::set_timeout  DB_SET_LOCK_TIMEOUT
Dbc::get  DB_SET_RANGE
Db::get  DB_SET_RECNO
Dbc::get  DB_SET_RECNO
DbEnv::set_timeout  DB_SET_TXN_TIMEOUT
DbTxn::set_timeout  DB_SET_TXN_TIMEOUT
Db::set_flags  DB_SNAPSHOT
Db::stat  DB_STAT_ALL
DbEnv::stat_print  DB_STAT_ALL
DbEnv::lock_stat  DB_STAT_ALL
DbEnv::log_stat  DB_STAT_ALL
DbEnv::memp_stat  DB_STAT_ALL
DbEnv::rep_stat  DB_STAT_ALL
DbEnv::txn_stat  DB_STAT_ALL
DbEnv::lock_stat  DB_STAT_CLEAR
DbEnv::log_stat  DB_STAT_CLEAR
DbEnv::memp_stat  DB_STAT_CLEAR
DbEnv::rep_stat  DB_STAT_CLEAR
DbSequence::stat  DB_STAT_CLEAR
DbEnv::txn_stat  DB_STAT_CLEAR
DbEnv::lock_stat  DB_STAT_LOCK_CONF
DbEnv::lock_stat  DB_STAT_LOCK_LOCKERS
DbEnv::lock_stat  DB_STAT_LOCK_OBJECTS
DbEnv::lock_stat  DB_STAT_LOCK_PARAMS
DbEnv::memp_stat  DB_STAT_MEMP_HASH
DbEnv::stat_print  DB_STAT_SUBSYSTEM
DbEnv::open  DB_SYSTEM_MEM
Db::open  DB_THREAD
DbEnv::open  DB_THREAD
DbSequence::open  DB_THREAD
DbEnv::set_flags  DB_TIME_NOTGRANTED
Db::open  DB_TRUNCATE
DbEnv::set_app_dispatch  DB_TXN_ABORT
DbEnv::set_app_dispatch  DB_TXN_APPLY
DbEnv::set_app_dispatch  DB_TXN_BACKWARD_ROLL
DbEnv::set_app_dispatch  DB_TXN_FORWARD_ROLL
DbEnv::set_flags  DB_TXN_NOSYNC
DbSequence::get  DB_TXN_NOSYNC
DbSequence::remove  DB_TXN_NOSYNC
DbEnv::txn_begin  DB_TXN_NOSYNC
DbTxn::commit  DB_TXN_NOSYNC
Db::set_flags  DB_TXN_NOT_DURABLE
DbEnv::txn_begin  DB_TXN_NOWAIT
DbEnv::set_app_dispatch  DB_TXN_PRINT
DbEnv::txn_begin  DB_TXN_SYNC
DbTxn::commit  DB_TXN_SYNC
DbEnv::set_flags  DB_TXN_WRITE_NOSYNC
Db::open  DB_UNKNOWN
Db::set_feedback  DB_UPGRADE
DbEnv::open  DB_USE_ENVIRON
DbEnv::remove  DB_USE_ENVIRON
DbEnv::open  DB_USE_ENVIRON_ROOT
DbEnv::remove  DB_USE_ENVIRON_ROOT
DbEnv::set_verbose  DB_VERB_DEADLOCK
DbEnv::set_verbose  DB_VERB_RECOVERY
DbEnv::set_verbose  DB_VERB_REPLICATION
DbEnv::set_verbose  DB_VERB_WAITSFOR
Db::set_feedback  DB_VERIFY
 DB_VERIFY_BAD
 DB_VERSION_MISMATCH
Db::cursor  DB_WRITECURSOR
Db  DB_XA_CREATE
 DB_XIDDATASIZE
DbEnv::set_flags  DB_YIELDCPU
 deadlocks
introduction to  debugging
 debugging applications
 degree 2 isolation
 degrees of isolation
 deleting records
 deleting records with a cursor
 dirty reads
 disk space requirements
 Distributed Transactions
 double buffering
 duplicate data items
sorted  duplicate data items
 duplicate data items
 duplicating a cursor
turn off database  durability
 emptying a database
database  encryption
 encryption
turn off access to a database  environment
database  environment
use  environment constants in naming
use  environment constants in naming
database  environment FAQ
fault database  environment in during open
 environment variables
introduction to database  environments
 equality join
 error handling
 error name space
 error returns
selecting a Queue  extent size
hot  failover
Java  FAQ
Tcl  FAQ
XA  FAQ
configuring without large  file support
 file utility
returning pages to the  filesystem
recovery and  filesystem operations
remote  filesystems
page  fill factor
configuring a small memory  footprint library
Berkeley DB  free-threaded handles
 FreeBSD
specifying a database  hash
 hash table size
 HP-UX
secondary  indices
 installing Berkeley DB for UNIX systems
 interface compatibility
 IRIX
degrees of  isolation
degree 2  isolation
configuring the  Java API
 Java compatibility
 Java configuration
 Java FAQ
equality  join
 key/data pairs
retrieved  key/data permanence
database  limits
 Linux
changing compile or  load options
DbEnv::lock_vec  lock
standard  lock modes
ignore  locking
page-level  locking
two-phase  locking
 locking and non-Berkeley DB applications
 locking configuration
Berkeley DB Transactional Data Store  locking conventions
Berkeley DB Concurrent Data Store  locking conventions
configure  locking for Berkeley DB Concurrent Data Store
 locking granularity
introduction to the  locking subsystem
sizing the  locking subsystem
 locking without transactions
 log file limits
automatic  log file removal
 log file removal
 logging configuration
introduction to the  logging subsystem
retrieving Btree records by  logical record @number
in memory  logs
turn off database file  memory mapping
 memory pool configuration
introduction to the  memory pool subsystem
configuring for  MinGW
 mod
DbEnv::lock_vec  mode
Berkeley DB library  name spaces
file  naming
 natural join
 NFS problems
retrieving Btree records by logical record  number
DbEnv::lock_vec  obj
DbEnv::lock_vec  op
 opening a database
 ordered retrieval of records from Queue databases
 OSF/1
selecting a  page size
ignore database environment  panic
 partial record storage and retrieval
 Patches, Updates and Change logs
 Perl
retrieved key/data  permanence
 PHP
task/thread  priority
Sleepycat Software's Berkeley DB  products
building for  QNX
 QNX
dirty  reads
accessing Btree records by  record number
logical  record numbers
managing  record-based databases
logically renumbering  records
Berkeley DB  recoverability
 renumbering records in Recno databases
 repeatable read
introduction to  replication
 Resource Manager
XA  Resource Manager
 retrieving records
 retrieving records in bulk
 retrieving records with a cursor
turn off  reverse splits in Btree databases
 RPC client
configuring a  RPC client/server
introduction to  rpc client/server
 RPC FAQ
 RPC server
database  salvage
 SCO
Berkeley DB handle  scope
 secondary indices
 security
introduction to  sequences
disabling  shared libraries
 shared libraries
 signal handling
 Sleepycat Software
 Solaris
 source code layout
turn off reverse  splits in Btree databases
cursor  stability
cursor  stability
disabling  static libraries
database  statistics
 storing records
 storing records with a cursor
configure for  stress testing
 SunOS
loading Berkeley DB with  Tcl
using Berkeley DB with  Tcl
configuring the  Tcl API
 Tcl API programming notes
 Tcl FAQ
 temporary files
configuring the  test suite
running the  test suite
running the  test suite under UNIX
running the  test suite under Windows
 text backing files
pre-loading  text files into Recno databases
loading  text into databases
dumping/loading  text to/from databases
building  threaded applications
lock  timeouts
transaction  timeouts
turn off synchronous  transaction commit
turn off synchronous  transaction commit
 transaction configuration
 transaction FAQ
 transaction limits
 Transaction Manager
administering  transaction protected applications
archival in  transaction protected applications
checkpoints in  transaction protected applications
deadlock detection in  transaction protected applications
recovery in  transaction protected applications
introduction to the  transaction subsystem
 transaction throughput
 transaction tuning
 Transactional Data Store
nested  transactions
 truncating a database
access method  tuning
transaction  tuning
configuring Berkeley DB with the  Tuxedo System
 Ultrix
 Unicode
building for  UNIX
building for  UNIX FAQ
configuring Berkeley DB for  UNIX systems
Patches,  Updates and Change logs
 upgrading databases
 Upgrading to release 2.0
 Upgrading to release 3.0
 Upgrading to release 3.1
 Upgrading to release 3.2
 Upgrading to release 3.3
 Upgrading to release 4.0
 Upgrading to release 4.1
 Upgrading to release 4.2
 Upgrading to release 4.3
 utilities
database  verification
building for  VxWorks FAQ
 VxWorks notes
running the test suite under  Windows
building for  Windows FAQ
 Windows notes
 XA FAQ
 XA Resource Manager
 __db.001

Copyright Sleepycat Software diff --git a/db/docs/api_cxx/db_associate.html b/db/docs/api_cxx/db_associate.html index 64b560b21..9e145d1ec 100644 --- a/db/docs/api_cxx/db_associate.html +++ b/db/docs/api_cxx/db_associate.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::associate - + -

Db::associate

API -Ref -
+Ref +


@@ -45,29 +44,28 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

callback
-The callback parameter is a callback function that creates a +
+
callback
The callback parameter is a callback function that creates a secondary key from a given primary key and data pair.

The callback parameter may be NULL if both the primary and secondary database handles were opened with the DB_RDONLY flag.

The callback takes four arguments:

-

-

secondary
The secondary parameter is the database handle for the secondary. -

key
The key parameter is a Dbt referencing the primary key. -

data
The data parameter is a Dbt referencing the primary data +
+
secondary
The secondary parameter is the database handle for the secondary. +
key
The key parameter is a Dbt referencing the primary key. +
data
The data parameter is a Dbt referencing the primary data item. -

result
The result parameter is a zeroed Dbt in which the callback +
result
The result parameter is a zeroed Dbt in which the callback function should fill in data and size fields that describe the secondary key.
- +

If the callback function needs to allocate memory for the data field rather than simply pointing into the primary key or datum, the flags field of the returned Dbt should be set to DB_DBT_APPMALLOC, which indicates that Berkeley DB should free the memory when it is done with it.

- +

If any key/data pair in the primary yields a null secondary key and should be left out of the secondary index, the callback function may optionally return DB_DONOTINDEX. Otherwise, the callback @@ -81,11 +79,10 @@ iterations and range queries will reflect only the corresponding subset of the database. If this is not desirable, the application should ensure that the callback function is well-defined for all possible values and never returns DB_DONOTINDEX.

-

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_CREATE
If the secondary database is empty, walk through the primary and create +
+
DB_CREATE
If the secondary database is empty, walk through the primary and create an index to it in the empty secondary. This operation is potentially very expensive.

If the secondary database has been opened in an environment configured @@ -103,16 +100,14 @@ not do any special operation ordering.

In addition, the following flag may be set by bitwise inclusively OR'ing it into the flags parameter: -

-

DB_AUTO_COMMIT
Enclose the Db::associate call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the Db::associate call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

primary
-The associate method called should be a method off a database handle for +
primary
The associate method called should be a method off a database handle for the primary database that is to be indexed. -

secondary
-The secondary parameter should be an open database handle of +
secondary
The secondary parameter should be an open database handle of either a newly created and empty database that is to be used to store a secondary index, or of a database that was previously associated with the same primary and contains a secondary index. Note that it is not @@ -122,8 +117,7 @@ with the DB_THREAD flag it is s of control after the Db::associate method has returned. Note also that either secondary keys must be unique or the secondary database must be configured with support for duplicate data items. -

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL. @@ -134,12 +128,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the secondary database handle has already been associated with this or +
+
EINVAL
If the secondary database handle has already been associated with this or another database handle; the secondary database handle is not open; the primary database has been configured to allow duplicates; or if an invalid flag value or parameter was specified. @@ -153,6 +147,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_class.html b/db/docs/api_cxx/db_class.html index 5f8d256e5..6c96ad7f2 100644 --- a/db/docs/api_cxx/db_class.html +++ b/db/docs/api_cxx/db_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db - + -

Db

API -Ref -
+Ref +


@@ -68,9 +67,8 @@ the associated const Db object, if there is one.

and C++ language software. It should not be necessary to use these calls in a purely C++ application.

Parameters

-

-

dbenv
-If no dbenv value is specified, the database is standalone; that +
+
dbenv
If no dbenv value is specified, the database is standalone; that is, it is not part of any Berkeley DB environment.

If a dbenv value is specified, the database is created within the specified Berkeley DB environment. The database access methods @@ -78,11 +76,10 @@ automatically make calls to the other subsystems in Berkeley DB based on the enclosing environment. For example, if the environment has been configured to use locking, the access methods will automatically acquire the correct locks when reading and writing pages of the database.

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_CXX_NO_EXCEPTIONS
The Berkeley DB C++ API supports two different error behaviors. By default, +
+
DB_CXX_NO_EXCEPTIONS
The Berkeley DB C++ API supports two different error behaviors. By default, whenever an error occurs, an exception is thrown that encapsulates the error information. This generally allows for cleaner logic for transaction processing because a try block can surround a single @@ -91,7 +88,7 @@ exceptions are not thrown; instead, each individual function returns an error code.

If dbenv is not null, this flag is ignored, and the error behavior of the specified environment is used instead.

-

DB_XA_CREATE
Instead of creating a standalone database, create a database intended to +
DB_XA_CREATE
Instead of creating a standalone database, create a database intended to be accessed via applications running under a X/Open conformant Transaction Manager. The database will be opened in the environment specified by the OPENINFO parameter of the GROUPS section of the ubbconfig file. See the @@ -108,6 +105,6 @@ Db

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_close.html b/db/docs/api_cxx/db_close.html index 6671977ca..bbdedab07 100644 --- a/db/docs/api_cxx/db_close.html +++ b/db/docs/api_cxx/db_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::close - + -

Db::close

API -Ref -
+Ref +


@@ -53,12 +52,11 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_NOSYNC
Do not flush cached information to disk. The DB_NOSYNC flag is +
+
DB_NOSYNC
Do not flush cached information to disk. The DB_NOSYNC flag is a dangerous option. It should be set only if the application is doing logging (with transactions) so that the database is recoverable after a system or application crash, or if the database is always generated @@ -81,8 +79,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -94,6 +92,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_cursor.html b/db/docs/api_cxx/db_cursor.html index 24439ce59..01f7aad0f 100644 --- a/db/docs/api_cxx/db_cursor.html +++ b/db/docs/api_cxx/db_cursor.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::cursor - + -

Db::cursor

API -Ref -
+Ref +


@@ -35,23 +34,24 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

cursorp
-The cursorp parameter references memory into which +
+
cursorp
The cursorp parameter references memory into which a pointer to the allocated cursor is copied. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_DIRTY_READ
All read operations performed by the cursor may return modified but not +
+
DB_DEGREE_2
This cursor will have degree 2 isolation. This ensures the stability +of the current data item read by this cursor but permits data read +by this cursor to be modified or deleted prior to the commit of +the transaction for this cursor. +
DB_DIRTY_READ
All read operations performed by the cursor may return modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_WRITECURSOR
Specify that the cursor will be used to update the database. The +
DB_WRITECURSOR
Specify that the cursor will be used to update the database. The underlying database environment must have been opened using the DB_INIT_CDB flag.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL. To transaction-protect cursor operations, cursors must be opened and closed within the context of a transaction, and the txnid @@ -64,12 +64,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -81,6 +81,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_del.html b/db/docs/api_cxx/db_del.html index 63a1733d9..2fd4b1774 100644 --- a/db/docs/api_cxx/db_del.html +++ b/db/docs/api_cxx/db_del.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::del - + -

Db::del

API -Ref -
+Ref +


@@ -35,8 +34,7 @@ associated with the designated key will be discarded.

When called on a database that has been made into a secondary index using the Db::associate method, the Db::del method deletes the key/data pair from the primary database and all secondary indices.

-

-The Db::del method will return DB_NOTFOUND if the specified key is not in the database. +

The Db::del method will return DB_NOTFOUND if the specified key is not in the database. The Db::del method will return DB_KEYEMPTY if the database is a Queue or Recno database and the specified key exists, but was never explicitly created by the application or was later deleted. @@ -46,19 +44,16 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

key
-The key Dbt operated on. -

flags
-The flags parameter must be set to 0 or +
+
key
The key Dbt operated on. +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the Db::del call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the Db::del call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL. @@ -69,18 +64,18 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -100,6 +95,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_err.html b/db/docs/api_cxx/db_err.html index 86f1b6f33..bccddc7bb 100644 --- a/db/docs/api_cxx/db_err.html +++ b/db/docs/api_cxx/db_err.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::err - + -

Db::err

API -Ref -
+Ref +


@@ -30,22 +29,21 @@ Db::errx(const char *fmt, ...);
 


Description: Db::err

-

The DbEnv::err, DbEnv::errx, Db::err and Db::errx methods provide error-messaging functionality for applications written using the Berkeley DB library.

The DbEnv::err method constructs an error message consisting of the following elements:

-

-

An optional prefix string
If no error callback function has been set using the +
+
An optional prefix string
If no error callback function has been set using the DbEnv::set_errcall method, any prefix string specified using the DbEnv::set_errpfx method, followed by two separating characters: a colon and a <space> character. -

An optional printf-style message
The supplied message fmt, if non-NULL, in which the +
An optional printf-style message
The supplied message fmt, if non-NULL, in which the ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent parameters are converted for output. -

A separator
Two separating characters: a colon and a <space> character. -

A standard error string
The standard system or Berkeley DB library error string associated with the +
A separator
Two separating characters: a colon and a <space> character. +
A standard error string
The standard system or Berkeley DB library error string associated with the error value, as returned by the DbEnv::strerror method.
@@ -61,20 +59,24 @@ stream.

(see DbEnv::set_error_stream and Db::set_error_stream), the error message is written to that stream.

If none of these output options has been configured, the error message -is written to stderr, the standard -error output stream.

+is written to stderr, the standard error output stream.

+

Parameters

+
+
error
The error parameter is the error value for which the +DbEnv::err and Db::err methods will display a explanatory +string. +
fmt
The fmt parameter is an optional printf-style message to display. +

The DbEnv::errx and Db::errx methods perform identically to the DbEnv::err and Db::err methods, except that they do not append the final separator characters and standard error string to the error message.

Parameters

-

-

error
-The error parameter is the error value for which the +
+
error
The error parameter is the error value for which the DbEnv::err and Db::err methods will display a explanatory string. -

fmt
-The fmt parameter is an optional printf-style message to display. +
fmt
The fmt parameter is an optional printf-style message to display.

Class

@@ -85,6 +87,6 @@ The fmt parameter is an optional printf-style message to display.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_fd.html b/db/docs/api_cxx/db_fd.html index 2c80a7b31..e5bb54c12 100644 --- a/db/docs/api_cxx/db_fd.html +++ b/db/docs/api_cxx/db_fd.html @@ -1,23 +1,22 @@ - + Berkeley DB: Db::fd - + -

Db::fd

API -Ref -
+Ref +


@@ -44,9 +43,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

fdp
-The fdp parameter references memory into which +
+
fdp
The fdp parameter references memory into which the current file descriptor is copied.

@@ -58,6 +56,6 @@ The fdp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_get.html b/db/docs/api_cxx/db_get.html index a7ed44b58..caf3401fb 100644 --- a/db/docs/api_cxx/db_get.html +++ b/db/docs/api_cxx/db_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::get - + -

Db::get

API -Ref -
+Ref +


@@ -31,28 +30,21 @@ Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, u_int32_t flags);
 


Description: Db::get

-

The Db::get method retrieves key/data pairs from the database. The -address -and length of the data associated with the specified key are -returned in the structure to which data refers.

+address and length of the data associated with the specified key +are returned in the structure to which data refers.

In the presence of duplicate key values, Db::get will return the first data item for the designated key. Duplicates are sorted by insert order, except where this order has been overridden by cursor operations. Retrieval of duplicates requires the use of cursor operations. See Dbc::get for details.

When called on a database that has been made into a secondary index -using the Db::associate method, the Db::get -and Db::pget methods return -the key from the secondary index and the data item from the primary -database. In addition, the -Db::pget method +using the Db::associate method, the Db::get and +Db::pget methods return the key from the secondary index and the data +item from the primary database. In addition, the Db::pget method returns the key from the primary database. In databases that are not -secondary indices, the -Db::pget method -will always fail.

-

-The Db::get method will return DB_NOTFOUND if the specified key is not in the database. +secondary indices, the Db::pget method will always fail.

+

The Db::get method will return DB_NOTFOUND if the specified key is not in the database. The Db::get method will return DB_KEYEMPTY if the database is a Queue or Recno database and the specified key exists, but was never explicitly created by the application or was later deleted. @@ -62,21 +54,19 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

data
-The data Dbt operated on. -

flags
-The flags parameter must be set to 0 or +
+
data
The data Dbt operated on. +
flags
The flags parameter must be set to 0 or one of the following values: -

-

DB_CONSUME
Return the record number and data from the available record closest to +
+
DB_CONSUME
Return the record number and data from the available record closest to the head of the queue, and delete the record. The cursor will be positioned on the deleted record. The record number will be returned in key, as described in Dbt. The data will be returned in the data parameter. A record is available if it is not deleted and is not currently locked. The underlying database must be of type Queue for DB_CONSUME to be specified. -

DB_CONSUME_WAIT
The DB_CONSUME_WAIT flag is the same as the DB_CONSUME +
DB_CONSUME_WAIT
The DB_CONSUME_WAIT flag is the same as the DB_CONSUME flag, except that if the Queue database is empty, the thread of control will wait until there is data in the queue before returning. The underlying database must be of type Queue for DB_CONSUME_WAIT @@ -87,37 +77,36 @@ may return DB_LOCK_NOT DbLockNotGrantedException exception. This failure, by itself, does not require the enclosing transaction be aborted.

-

DB_GET_BOTH
Retrieve the key/data pair only if both the key and data match the +
DB_GET_BOTH
Retrieve the key/data pair only if both the key and data match the arguments. -

When used with the -Db::pget method -version of this method on a secondary index handle, return the -secondary key/primary key/data tuple only if both the primary and -secondary keys match the arguments. It is an error to use the -DB_GET_BOTH flag with the -Db::get +

When used with the Db::pget method version of this method on a +secondary index handle, return the secondary key/primary key/data tuple +only if both the primary and secondary keys match the arguments. It is +an error to use the DB_GET_BOTH flag with the Db::get version of this method and a secondary index handle.

-

DB_SET_RECNO
Retrieve the specified numbered key/data pair from a database. Upon +
DB_SET_RECNO
Retrieve the specified numbered key/data pair from a database. Upon return, both the key and data items will have been filled in. -

The data field of the specified key -must be a pointer to a logical record number (that is, a db_recno_t). -This record number determines the record to be retrieved. +

The data field of the specified key must be a pointer +to a logical record number (that is, a db_recno_t). This record +number determines the record to be retrieved.

For DB_SET_RECNO to be specified, the underlying database must be -of type Btree, and it must have been created with the DB_RECNUM flag.

+of type Btree, and it must have been created with the DB_RECNUM flag.

In addition, the following flags may be set by bitwise inclusively OR'ing them into the flags parameter: -

-

DB_AUTO_COMMIT
Enclose the Db::get call within a transaction. If the call +
+
DB_AUTO_COMMIT
Enclose the Db::get call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. This flag may only be specified with the DB_CONSUME and DB_CONSUME_WAIT flags. -

DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the +
DB_DEGREE_2
Perform the get operation with degree 2 isolation. +The read is not repeatable. +
DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_MULTIPLE
Return multiple data items in the buffer to which the data +
DB_MULTIPLE
Return multiple data items in the buffer to which the data parameter refers.

In the case of Btree or Hash databases, all of the data items associated with the specified key are entered into the buffer. In the case of @@ -130,7 +119,7 @@ least as large as the page size of the underlying database, aligned for unsigned integer access, and be a multiple of 1024 bytes in size. If the buffer size is insufficient, then upon return from the call the size field of the data parameter will have been set to an estimated -buffer size, and the error ENOMEM is returned. (The size is an estimate as the +buffer size, and the error DB_BUFFER_SMALL is returned. (The size is an estimate as the exact size needed may not be known until all entries are read. It is best to initially provide a relatively large buffer, but applications should be prepared to resize the buffer as necessary and repeatedly call @@ -142,7 +131,7 @@ into secondary indices using the Db::asso

See DbMultipleDataIterator for more information.

-

DB_RMW
Acquire write locks instead of read locks when doing the retrieval. +
DB_RMW
Acquire write locks instead of read locks when doing the retrieval. Setting this flag can eliminate deadlock during a read-modify-write cycle by acquiring the write lock during the read part of the cycle so that another thread of control acquiring a read lock for the same item, @@ -152,12 +141,9 @@ Berkeley DB calls in non-transactional operations, the Dbt operated on. -

pkey
-The pkey parameter is the return key from the primary database. -

txnid
-If the operation is to be transaction-protected, +
key
The key Dbt operated on. +
pkey
The pkey parameter is the return key from the primary database. +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL.
@@ -167,20 +153,20 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EINVAL
If a record number of 0 was specified; +
+
EINVAL
If a record number of 0 was specified; the DB_THREAD flag was specified to the Db::open method and none of the DB_DBT_MALLOC, DB_DBT_REALLOC or DB_DBT_USERMEM flags were set in the Dbt; -the Db::pget method -was called with a Db handle that does not refer to a secondary index; or if an +the Db::pget method was called with a Db handle that does not +refer to a secondary index; or if an invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -196,8 +182,8 @@ timers were configured and the lock could not be granted before the wait-time ex the Db::get method will fail and either return DB_LOCK_NOTGRANTED or throw a DbLockNotGrantedException exception.

-

If the requested item could not be returned due to insufficient memory, the Db::get method will fail and -either return ENOMEM or +

If the requested item could not be returned due to undersized buffer, the Db::get method will fail and +either return DB_BUFFER_SMALL or throw a DbMemoryException exception.


Class

@@ -208,6 +194,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_get_byteswapped.html b/db/docs/api_cxx/db_get_byteswapped.html index b0a800e47..bc7a6f261 100644 --- a/db/docs/api_cxx/db_get_byteswapped.html +++ b/db/docs/api_cxx/db_get_byteswapped.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::get_byteswapped - + -

Db::get_byteswapped

API -Ref -
+Ref +


@@ -28,13 +27,12 @@ Db::get_byteswapped(int *isswapped);
 


Description: Db::get_byteswapped

-

The Db::get_byteswapped method returns -if the underlying database files were created on an architecture of the -same byte order as the current one, -or -if they were not (that is, big-endian on a little-endian machine, or -vice versa). This information may be used to determine whether -application data needs to be adjusted for this architecture or not.

+

The Db::get_byteswapped method returns if the underlying database +files were created on an architecture of the same byte order as the +current one, or if they were not (that is, big-endian on a little-endian +machine, or vice versa). This information may be used to determine +whether application data needs to be adjusted for this architecture or +not.

The Db::get_byteswapped method may not be called before the Db::open method has been called.

The Db::get_byteswapped method @@ -43,9 +41,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

isswapped
-If the underlying database files were created on an architecture of the +
+
isswapped
If the underlying database files were created on an architecture of the same byte order as the current one. 0 is stored into the memory location referenced by isswapped. If the underlying database files were created on an architecture of a different byte order as the current one, @@ -57,8 +54,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called before Db::open was called; or if an +
+
EINVAL
If the method was called before Db::open was called; or if an invalid flag value or parameter was specified.

@@ -70,6 +67,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_get_mpf.html b/db/docs/api_cxx/db_get_mpf.html index 02e026963..e7fcf340a 100644 --- a/db/docs/api_cxx/db_get_mpf.html +++ b/db/docs/api_cxx/db_get_mpf.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::get_mpf - + -

Db::get_mpf

API -Ref -
+Ref +


@@ -28,6 +27,9 @@ Db::get_mpf();
 


Description: Db::get_mpf

+

The Db::get_mpf method returns the Db.get_mpf.

+

The Db::get_mpf method may be called at any time during the life of the +application.

Db::get_mpf gives access to the DbMpoolFile associated with a Db object.


@@ -39,6 +41,6 @@ with a Db object.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_get_type.html b/db/docs/api_cxx/db_get_type.html index 885cea18e..02d27726a 100644 --- a/db/docs/api_cxx/db_get_type.html +++ b/db/docs/api_cxx/db_get_type.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::get_type - + -

Db::get_type

API -Ref -
+Ref +


@@ -29,11 +28,10 @@ Db::get_type(DBTYPE *type);
 

Description: Db::get_type

The Db::get_type method returns the type of the underlying access -method (and file format). The type value is one of DB_BTREE, -DB_HASH, DB_RECNO, or DB_QUEUE. This -value may be used to determine the type of the database after a return -from Db::open with the type parameter set to -DB_UNKNOWN.

+method (and file format). The type value is one of DB_BTREE, DB_HASH, +DB_RECNO, or DB_QUEUE. This value may be used to determine the type of +the database after a return from Db::open with the type +parameter set to DB_UNKNOWN.

The Db::get_type method may not be called before the Db::open method has been called.

The Db::get_type method @@ -42,9 +40,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

type
-The type parameter references memory into which +
+
type
The type parameter references memory into which the type of the underlying access method is copied.

Errors

@@ -53,8 +50,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called before Db::open was called; or if an +
+
EINVAL
If the method was called before Db::open was called; or if an invalid flag value or parameter was specified.

@@ -66,6 +63,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_getenv.html b/db/docs/api_cxx/db_getenv.html index 9547e7603..7b1a66fb4 100644 --- a/db/docs/api_cxx/db_getenv.html +++ b/db/docs/api_cxx/db_getenv.html @@ -1,47 +1,35 @@ - - + + Berkeley DB: Db::getenv - + -

Db::getenv

API -Ref -
+Ref +


 #include <db_cxx.h>
 

DbEnv * -Db::getenv(); +Db::get_env();


Description: Db::getenv

-

The Db::getenv method returns the handle for the database environment underlying the database.

+

The Db::getenv method returns the Db.getDbEnv.

The Db::getenv method may be called at any time during the life of the application.

-

The Db::getenv method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

-

Parameters

-

-

db
-The Db::getenv method returns the -handle for the database environment underlying the database in db. -

Class

Db @@ -51,6 +39,6 @@ handle for the database environment underlying the database in db.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_join.html b/db/docs/api_cxx/db_join.html index 440df0939..9ea301192 100644 --- a/db/docs/api_cxx/db_join.html +++ b/db/docs/api_cxx/db_join.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::join - + -

Db::join

API -Ref -
+Ref +


@@ -36,32 +35,32 @@ information on how to organize your data to use this functionality, see
 the primary database.

The join cursor supports only the Dbc::get and dbc_close cursor functions:

-

-

Dbc::get
Iterates over the values associated with the keys to which each item in +
+
Dbc::get
Iterates over the values associated with the keys to which each item in curslist was initialized. Any data value that appears in all items specified by the curslist parameter is then used as a key into the primary, and the key/data pair found in the primary is returned. The flags parameter must be set to 0 or the following value: -

-

DB_JOIN_ITEM
Do not use the data value found in all the cursors as a lookup key for +
+
DB_JOIN_ITEM
Do not use the data value found in all the cursors as a lookup key for the primary, but simply return it in the key parameter instead. The data parameter is left unchanged.
In addition, the following flag may be set by bitwise inclusively OR'ing it into the flags parameter: -

-

DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the +
+
DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_RMW
Acquire write locks instead of read locks when doing the retrieval. +
DB_RMW
Acquire write locks instead of read locks when doing the retrieval. Setting this flag can eliminate deadlock during a read-modify-write cycle by acquiring the write lock during the read part of the cycle so that another thread of control acquiring a read lock for the same item, in its own read-modify-write cycle, will not result in deadlock.
-

Dbc::close
Close the returned cursor and release all resources. (Closing the cursors +
Dbc::close
Close the returned cursor and release all resources. (Closing the cursors in curslist is the responsibility of the caller.)

The Db::join method @@ -70,9 +69,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

curslist
-The curslist parameter contains a NULL terminated array of cursors. +
+
curslist
The curslist parameter contains a NULL terminated array of cursors. Each cursor must have been initialized to refer to the key on which the underlying database should be joined. Typically, this initialization is done by a Dbc::get call with the DB_SET flag specified. Once the @@ -90,14 +88,12 @@ most. By default, Db::join does this sort on behalf of its caller.

For the returned join cursor to be used in a transaction-protected manner, the cursors listed in curslist must have been created within the context of the same transaction.

-

dbcp
-The newly created join cursor is returned in the memory location to which -dbcp refers. -

flags
-The flags parameter must be set to 0 or +
dbcp
The newly created join cursor is returned in the memory location to +which dbcp refers. +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_JOIN_NOSORT
Do not sort the cursors based on the number of data items to which they +
+
DB_JOIN_NOSORT
Do not sort the cursors based on the number of data items to which they refer. If the data are structured so that cursors with many data items also share many common elements, higher performance will result from listing those cursors before cursors with fewer data items; that is, a @@ -112,15 +108,15 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EINVAL
If cursor methods other than Dbc::get or Dbc::close were +
+
EINVAL
If cursor methods other than Dbc::get or Dbc::close were called; or if an invalid flag value or parameter was specified.
@@ -133,6 +129,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_key_range.html b/db/docs/api_cxx/db_key_range.html index fe0cdcdfb..4544abc77 100644 --- a/db/docs/api_cxx/db_key_range.html +++ b/db/docs/api_cxx/db_key_range.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::key_range - + -

Db::key_range

API -Ref -
+Ref +


@@ -34,7 +33,7 @@ that are less than, equal to, and greater than the specified key.  The
 underlying database must be of type Btree.

The Db::key_range method fills in a structure of type DB_KEY_RANGE. The following data fields are available from the DB_KEY_RANGE structure:

-

+
double less;
A value between 0 and 1, the proportion of keys less than the specified key.
double equal;
A value between 0 and 1, the proportion of keys equal to the specified @@ -52,23 +51,19 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

key
-The key Dbt operated on. -

key_range
-The estimates are returned in the key_range parameter, which +
+
key
The key Dbt operated on. +
key_range
The estimates are returned in the key_range parameter, which contains three elements of type double: less, equal, and greater. Values are in the range of 0 to 1; for example, if the field less is 0.05, 5% of the keys in the database are less than the key parameter. The value for equal will be zero if there is no matching key, and will be non-zero otherwise. -

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL. The Db::key_range method does not retain the locks it acquires for the life of the transaction, so estimates may not be repeatable. -

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The Db::key_range method @@ -76,12 +71,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the underlying database was not of type Btree; or if an +
+
EINVAL
If the underlying database was not of type Btree; or if an invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -101,6 +96,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_list.html b/db/docs/api_cxx/db_list.html index 4fb11b052..fa587f3b5 100644 --- a/db/docs/api_cxx/db_list.html +++ b/db/docs/api_cxx/db_list.html @@ -1,68 +1,70 @@ - + Berkeley DB: Berkeley DB: Databases and Related Methods - +

Berkeley DB: Databases and Related Methods

- + - + + - - + - - + - + + + + + + - - - - - - - + + + - - - - + + + + + - - - - - + + + + + +
Databases and Related MethodsDescription
Database OperationsDescription
DbCreate a database handle
Db::associateAssociate a secondary index
Db::closeClose a database
Db::cursorCreate a cursor handle
Db::delDelete items from a database
Db::errError message with error string
Db::errxError message
Db::fdReturn a file descriptor from a database
Db::getGet items from a database
Db::get, Db::pgetGet items from a database
Db::get_byteswappedReturn if the underlying database is in host order
Db::getenvReturn a handle for the underlying database environment
Db::get_mpfGet the memory pool handle for a database
Db::getenvReturn database environment handle
Db::get_typeReturn the database type
Db::joinPerform a database join on cursors
Db::key_rangeReturn estimate of key location
Db::openOpen a database
Db::pgetGet items from a database
Db::putStore items into a database
Db::removeRemove a database
Db::renameRename a database
Db::stat, Db::stat_printDatabase statistics
Db::syncFlush a database to stable storage
Db::truncateEmpty a database
Db::upgradeUpgrade a database
Db::verifyVerify/salvage a database
Database Configuration
Db::set_allocSet local space allocation functions
Db::set_append_recnoSet record append callback
Db::set_bt_compareSet a Btree comparison function
Db::set_bt_minkeySet the minimum number of keys per Btree page
Db::set_bt_prefixSet a Btree prefix comparison function
Db::set_cachesizeSet the database cache size
Db::set_dup_compareSet a duplicate comparison function
Db::set_encryptSet the database cryptographic key
Db::set_errcallSet error message callback
Db::set_errfileSet error message FILE
Db::set_error_streamSet error message output stream
Db::set_errcall, Db::set_msgcallSet error and informational message callback
Db::set_errfile, Db::set_msgfileSet error and informational message FILE
Db::set_error_stream, Db::set_message_streamSet error and informational message output stream
Db::set_errpfxSet error message prefix
Db::set_feedbackSet feedback callback
Db::set_flagsGeneral database configuration
Db::set_h_ffactorSet the Hash table density
Db::set_h_hashSet a hashing function
Db::set_h_nelemSet the Hash table size
Db::set_lorderSet the database byte order
Db::set_pagesizeSet the underlying database page size
Db::set_paniccallSet panic callback
Db::set_q_extentsizeSet Queue database extent size
Btree/Recno Configuration
Db::set_append_recnoSet record append callback
Db::set_bt_compareSet a Btree comparison function
Db::set_bt_minkeySet the minimum number of keys per Btree page
Db::set_bt_prefixSet a Btree prefix comparison function
Db::set_re_delimSet the variable-length record delimiter
Db::set_re_lenSet the fixed-length record length
Db::set_re_padSet the fixed-length record pad byte
Db::set_re_sourceSet the backing Recno text file
Db::statReturn database statistics
Db::syncFlush a database to stable storage
Db::truncateEmpty a database
Db::upgradeUpgrade a database
Db::verifyVerify/salvage a database
Hash Configuration
Db::set_h_ffactorSet the Hash table density
Db::set_h_hashSet a hashing function
Db::set_h_nelemSet the Hash table size
Queue Configuration
Db::set_q_extentsizeSet Queue database extent size
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_open.html b/db/docs/api_cxx/db_open.html index d383e660e..865580030 100644 --- a/db/docs/api_cxx/db_open.html +++ b/db/docs/api_cxx/db_open.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::open - + -

Db::open

API -Ref -
+Ref +


@@ -38,10 +37,6 @@ Db::get_transactional()
 


Description: Db::open

- - - -

The Db::open method opens the database represented by the file and database parameters for both reading and writing.

The currently supported Berkeley DB file formats (or access @@ -65,9 +60,8 @@ If Db::open fails, the Db::close method s discard the Db handle.

Parameters

-

-

database
-The database parameter is optional, and allows applications to +
+
database
The database parameter is optional, and allows applications to have multiple databases in a single file. Although no database parameter needs to be specified, it is an error to attempt to open a second database in a file that was not initially created using @@ -81,41 +75,40 @@ created by setting both the file and database parameters to NULL. Note that in-memory databases can only ever be shared by sharing the single database handle that created them, in circumstances where doing so is safe.

-

file
-The file parameter is used as the name of an underlying file that +
file
The file parameter is used as the name of an underlying file that will be used to back the database.

In-memory databases never intended to be preserved on disk may be created by setting both the file and database parameters to NULL. Note that in-memory databases can only ever be shared by sharing the single database handle that created them, in circumstances where doing so is safe.

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_AUTO_COMMIT
Enclose the Db::open call within a transaction. If the call +
+
DB_AUTO_COMMIT
Enclose the Db::open call within a transaction. If the call succeeds, the open operation will be recoverable. If the call fails, no database will have been created. -

DB_CREATE
Create the database. If the database does not already exist and the -DB_CREATE flag is not specified, the Db::open will -fail. -

DB_DIRTY_READ
Support dirty reads; that is, read operations on the database may +
DB_CREATE
Create the database. If the database does not already exist and the +DB_CREATE flag is not specified, the Db::open will fail. +
DB_DIRTY_READ
Support dirty reads; that is, read operations on the database may request the return of modified but not yet committed data. This flag must be specified on all Db handles used to perform dirty reads or database updates, otherwise requests for dirty reads may not be honored and the read may block. -

DB_EXCL
Return an error if the database already exists. The DB_EXCL +
DB_EXCL
Return an error if the database already exists. The DB_EXCL flag is only meaningful when specified with the DB_CREATE flag. -

DB_NOMMAP
Do not map this database into process memory (see the +
DB_NOMMAP
Do not map this database into process memory (see the DbEnv::set_mp_mmapsize method for further information). -

DB_RDONLY
Open the database for reading only. Any attempt to modify items in the +
DB_RDONLY
Open the database for reading only. Any attempt to modify items in the database will fail, regardless of the actual permissions of any underlying files. -

DB_THREAD
Cause the Db handle returned by Db::open to be -free-threaded; that is, usable by multiple threads within a -single address space. -

DB_TRUNCATE
Physically truncate the underlying file, discarding all previous +
DB_THREAD
Cause the Db handle returned by Db::open to be +free-threaded; that is, concurrently usable by multiple +threads in the address space. +
DB_TRUNCATE
Physically truncate the underlying file, discarding all previous databases it might have held. Underlying filesystem primitives are used to implement this flag. For this reason, it is applicable only to the file and cannot be used to discard databases within a file. @@ -123,31 +116,28 @@ file and cannot be used to discard databases within a file. and it is an error to specify it in a locking or transaction-protected environment.

-

mode
-On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by -the database open are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation -(see umask(2)). If mode is 0, the database open will use a default -mode of readable and writable by both owner and group. On Windows -systems, the mode parameter is ignored. The group ownership of created -files is based on the system and directory defaults, and is not further -specified by Berkeley DB. -

txnid
-If the operation is to be transaction-protected, +
mode
On Windows systems, the mode parameter is ignored. +

On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files created by the database open +are created with mode mode (as described in chmod(2)) +and modified by the process' umask value at the time of creation (see +umask(2)). Created files are owned by the process owner; the +group ownership of created files is based on the system and directory +defaults, and is not further specified by Berkeley DB. System shared memory +segments created by the database open are created with mode mode, unmodified +by the process' umask value. If mode is 0, the database open will use a +default mode of readable and writable by both owner and group.

+
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL. Note that transactionally protected operations on a Db handle requires the Db handle itself be transactionally protected during its open. -

type
-The type parameter -is of type DBTYPE, and -must be set to one of DB_BTREE, -DB_HASH, DB_QUEUE, -DB_RECNO, or DB_UNKNOWN. If -type is DB_UNKNOWN, the database must already exist -and Db::open will automatically determine its type. The -Db::get_type method may be used to determine the underlying type of -databases opened using DB_UNKNOWN. +
type
The type parameter is of type DBTYPE, and must be set to one of +DB_BTREE, DB_HASH, DB_QUEUE, +DB_RECNO, or DB_UNKNOWN. If type is +DB_UNKNOWN, the database must already exist and Db::open will +automatically determine its type. The Db::get_type method may be used +to determine the underlying type of databases opened using DB_UNKNOWN.

Environment Variables

If the database was opened within a database environment, the @@ -156,28 +146,28 @@ database environment home.

Db::open is affected by any database directory specified using the DbEnv::set_data_dir method, or by setting the "set_data_dir" string in the environment's DB_CONFIG file.

-

-

TMPDIR
If the file and dbenv parameters to Db::open are +
+
TMPDIR
If the file and dbenv parameters to Db::open are NULL, the environment variable TMPDIR may be used as a directory in which to create temporary backing files

Errors

-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

The Db::open method may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_OLD_VERSION
The database cannot be opened without being first upgraded. +
+
DB_OLD_VERSION
The database cannot be opened without being first upgraded.
-

-

EEXIST
DB_CREATE and DB_EXCL were specified and the database exists. +
+
EEXIST
DB_CREATE and DB_EXCL were specified and the database exists.
-

-

EINVAL
If an unknown database type, page size, hash function, pad byte, byte +
+
EINVAL
If an unknown database type, page size, hash function, pad byte, byte order, or a flag value or parameter that is incompatible with the specified database was specified; the DB_THREAD flag was specified and fast mutexes are not @@ -190,11 +180,11 @@ flag or the provided database environment supports transaction processing; or if an invalid flag value or parameter was specified.
-

-

ENOENT
A nonexistent re_source file was specified. +
+
ENOENT
A nonexistent re_source file was specified.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.

If a transactional database environment operation was selected to @@ -210,12 +200,10 @@ throw a DbLockNotGrantedException exc

The Db::get_database method returns the current filename and database name.

Parameters

-

-

filenamep
-The filenamep parameter references memory into which +
+
filenamep
The filenamep parameter references memory into which a pointer to the current filename is copied. -

dbnamep
-The dbnamep parameter references memory into which +
dbnamep
The dbnamep parameter references memory into which a pointer to the current database name is copied.

The Db::get_database method may be called at any time during the life of the @@ -236,9 +224,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flagsp
-The Db::get_open_flags method returns the +
+
flagsp
The Db::get_open_flags method returns the current open method flags in flagsp.

@@ -256,6 +243,6 @@ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_put.html b/db/docs/api_cxx/db_put.html index 0aab595ce..f88a2399f 100644 --- a/db/docs/api_cxx/db_put.html +++ b/db/docs/api_cxx/db_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::put - + -

Db::put

API -Ref -
+Ref +


@@ -41,12 +40,11 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or one of the following values: -

-

DB_APPEND
Append the key/data pair to the end of the database. For the +
+
DB_APPEND
Append the key/data pair to the end of the database. For the DB_APPEND flag to be specified, the underlying database must be a Queue or Recno database. The record number allocated to the record is returned in the specified key. @@ -56,38 +54,33 @@ Db::put operation with the DB_APPEND flag aborts, the record number may be decremented (and later reallocated by a subsequent DB_APPEND operation) by the Recno access method, but will not be decremented or reallocated by the Queue access method.

-

DB_NODUPDATA
In the case of the Btree and Hash access methods, enter the new key/data +
DB_NODUPDATA
In the case of the Btree and Hash access methods, enter the new key/data pair only if it does not already appear in the database.

The DB_NODUPDATA flag may only be specified if the underlying database has been configured to support sorted duplicates. The DB_NODUPDATA flag may not be specified to the Queue or Recno access methods.

-

-The Db::put method will return DB_KEYEXIST if DB_NODUPDATA is set and the key/data pair already appears +

The Db::put method will return DB_KEYEXIST if DB_NODUPDATA is set and the key/data pair already appears in the database.

-

DB_NOOVERWRITE
Enter the new key/data pair only if the key does not already appear in the +
DB_NOOVERWRITE
Enter the new key/data pair only if the key does not already appear in the database. The Db::put method call with the DB_NOOVERWRITE flag set will fail if the key already exists in the database, even if the database supports duplicates. -

-The Db::put method will return DB_KEYEXIST if DB_NOOVERWRITE is set and the key already appears in the +

The Db::put method will return DB_KEYEXIST if DB_NOOVERWRITE is set and the key already appears in the database.

In addition, the following flag may be set by bitwise inclusively OR'ing it into the flags parameter: -

-

DB_AUTO_COMMIT
Enclose the Db::put call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the Db::put call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

data
-The data Dbt operated on. -

key
-The key Dbt operated on. -

txnid
-If the operation is to be transaction-protected, +
data
The data Dbt operated on. +
key
The key Dbt operated on. +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL. @@ -98,23 +91,23 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If a record number of 0 was specified; +
+
EINVAL
If a record number of 0 was specified; an attempt was made to add a record to a fixed-length database that was too large to fit; an attempt was made to do a partial put; an attempt was made to add a record to a secondary index; or if an invalid flag value or parameter was specified.
-

-

ENOSPC
A btree exceeded the maximum btree depth (255). +
+
ENOSPC
A btree exceeded the maximum btree depth (255).

If a transactional database environment operation was selected to resolve a deadlock, the Db::put method will fail and @@ -133,6 +126,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_remove.html b/db/docs/api_cxx/db_remove.html index 476afa154..c5c661807 100644 --- a/db/docs/api_cxx/db_remove.html +++ b/db/docs/api_cxx/db_remove.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::remove - + -

Db::remove

API -Ref -
+Ref +


@@ -30,15 +29,18 @@ Db::remove(const char *file, const char *database, u_int32_t flags);
 

Description: Db::remove

The Db::remove method removes the database specified by the -file and database parameters. If no database is -specified, the underlying file represented by file is removed, -incidentally removing all of the databases it contained.

+file and database parameters. If no database +is specified, the underlying file represented by file is +removed, incidentally removing all of the databases it contained.

Applications should never remove databases with open Db handles, or in the case of removing a file, when any database in the file has an open handle. For example, some architectures do not permit the removal of files with open system handles. On these architectures, attempts to remove databases currently in use by any thread of control in the system -will fail.

+may fail.

+

The Db::remove method should not be called if the remove is intended +to be transactionally safe; the DbEnv::dbremove method should be used +instead.

The Db::remove method may not be called after calling the Db::open method on any Db handle. If the Db::open method has already been called on a Db handle, close the existing @@ -51,14 +53,13 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the +
+
database
The database parameter is the database to be removed. +
file
The file parameter is the physical file which contains the database(s) to be removed. -

flags
-The flags parameter is currently unused, and must be set to 0. +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter is currently unused, and must be set to 0.

Environment Variables

If the database was opened within a database environment, the @@ -73,16 +74,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to remove the underlying file and a database in the -file was currently open. -
-

-

EINVAL
If Db::remove called after Db::open was called; or if an +
+
EINVAL
If Db::remove called after Db::open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -93,6 +90,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_rename.html b/db/docs/api_cxx/db_rename.html index e77a8cfa3..65e6cd92f 100644 --- a/db/docs/api_cxx/db_rename.html +++ b/db/docs/api_cxx/db_rename.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::rename - + -

Db::rename

API -Ref -
+Ref +


@@ -41,7 +40,10 @@ the database environment, no database in the file may be open when the
 Db::rename method is called.  In particular, some architectures do
 not permit renaming files with open handles.  On these architectures,
 attempts to rename databases that are currently in use by any thread of
-control in the system will fail.

+control in the system may fail.

+

The Db::rename method should not be called if the rename is intended +to be transactionally safe; the DbEnv::dbrename method should be used +instead.

The Db::rename method may not be called after calling the Db::open method on any Db handle. If the Db::open method has already been called on a Db handle, close the existing @@ -54,16 +56,14 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the -database(s) to be removed. -

flags
-The flags parameter is currently unused, and must be set to 0. -

newname
-The newname parameter is the new name of the database or file. +
+
database
The database parameter is the database to be renamed. +
file
The file parameter is the physical file which contains the +database(s) to be renamed. +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter is currently unused, and must be set to 0. +
newname
The newname parameter is the new name of the database or file.

Environment Variables

If the database was opened within a database environment, the @@ -78,16 +78,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to rename the underlying file and a database in the -file was currently open. -
-

-

EINVAL
If Db::rename called after Db::open was called; or if an +
+
EINVAL
If Db::rename called after Db::open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -98,6 +94,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_alloc.html b/db/docs/api_cxx/db_set_alloc.html index 65ead0a7b..4d4b9d068 100644 --- a/db/docs/api_cxx/db_set_alloc.html +++ b/db/docs/api_cxx/db_set_alloc.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_alloc - + -

Db::set_alloc

API -Ref -
+Ref +


@@ -79,8 +78,8 @@ may fail and throw
 DbException,
 encapsulating one of the following non-zero errors, or return one of
 the following non-zero errors:

-

-

EINVAL
If Called in a database environment. +
+
EINVAL
If Called in a database environment.

Called after Db::open was called.

; or if an invalid flag value or parameter was specified.
@@ -93,6 +92,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_append_recno.html b/db/docs/api_cxx/db_set_append_recno.html index 44d906ea8..336acc964 100644 --- a/db/docs/api_cxx/db_set_append_recno.html +++ b/db/docs/api_cxx/db_set_append_recno.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_append_recno - + -

Db::set_append_recno

API -Ref -
+Ref +


@@ -45,19 +44,18 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

db_append_recno_fcn
-The db_append_recno_fcn parameter is a function to call after +
+
db_append_recno_fcn
The db_append_recno_fcn parameter is a function to call after the record number has been selected but before the data has been stored into the database. The function takes three parameters: -

-

db
The db parameter is the enclosing database handle. -

dbt
The dbt parameter is the data Dbt to be stored. -

recno
The recno parameter is the generated record number. +
+
db
The db parameter is the enclosing database handle. +
dbt
The dbt parameter is the data Dbt to be stored. +
recno
The recno parameter is the generated record number.
-

The called function may modify the data Dbt. -If the function needs to allocate memory for the data field, the -flags field of the returned Dbt should be set to +

The called function may modify the data Dbt. If the function +needs to allocate memory for the data field, the flags +field of the returned Dbt should be set to DB_DBT_APPMALLOC, which indicates that Berkeley DB should free the memory when it is done with it.

The callback function must return 0 on success and errno or @@ -69,8 +67,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -82,6 +80,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_bt_compare.html b/db/docs/api_cxx/db_set_bt_compare.html index bbf8af8ea..82560cda5 100644 --- a/db/docs/api_cxx/db_set_bt_compare.html +++ b/db/docs/api_cxx/db_set_bt_compare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_bt_compare - + -

Db::set_bt_compare

API -Ref -
+Ref +


@@ -50,15 +49,14 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

bt_compare_fcn
-The bt_compare_fcn function is the application-specified Btree +
+
bt_compare_fcn
The bt_compare_fcn function is the application-specified Btree comparison function. The comparison function takes three parameters: -

-

db
The db parameter is the enclosing database handle. -

dbt1
The dbt1 parameter is the Dbt representing the +
+
db
The db parameter is the enclosing database handle. +
dbt1
The dbt1 parameter is the Dbt representing the application supplied key. -

dbt2
The dbt2 parameter is the Dbt representing the +
dbt2
The dbt2 parameter is the Dbt representing the current tree's key.

The bt_compare_fcn function must return an integer value less @@ -81,8 +79,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -94,6 +92,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_bt_minkey.html b/db/docs/api_cxx/db_set_bt_minkey.html index 42ee969d3..bbca728a1 100644 --- a/db/docs/api_cxx/db_set_bt_minkey.html +++ b/db/docs/api_cxx/db_set_bt_minkey.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_bt_minkey - + -

Db::set_bt_minkey

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_bt_minkey(u_int32_t *bt_minkeyp);
 


Description: Db::set_bt_minkey

-

Set the minimum number of key/data pairs intended to be stored on any single Btree leaf page.

This value is used to determine if key or data items will be stored on @@ -52,9 +50,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bt_minkey
-The bt_minkey parameter is the minimum number of key/data pairs +
+
bt_minkey
The bt_minkey parameter is the minimum number of key/data pairs intended to be stored on any single Btree leaf page.

Errors

@@ -63,8 +60,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -79,9 +76,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bt_minkeyp
-The Db::get_bt_minkey method returns the +
+
bt_minkeyp
The Db::get_bt_minkey method returns the minimum number of key/data pairs intended to be stored on any single Btree leaf page in bt_minkeyp.
@@ -94,6 +90,6 @@ leaf page in bt_minkeyp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_bt_prefix.html b/db/docs/api_cxx/db_set_bt_prefix.html index 9c9df5293..16d27913b 100644 --- a/db/docs/api_cxx/db_set_bt_prefix.html +++ b/db/docs/api_cxx/db_set_bt_prefix.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_bt_prefix - + -

Db::set_bt_prefix

API -Ref -
+Ref +


@@ -58,14 +57,13 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

bt_prefix_fcn
-The bt_prefix_fcn function is the application-specific Btree +
+
bt_prefix_fcn
The bt_prefix_fcn function is the application-specific Btree prefix function. The prefix function takes three parameters: -

-

db
The db parameter is the enclosing database handle. -

dbt1
The dbt1 parameter is a Dbt representing a database key. -

dbt2
The dbt2 parameter is a Dbt representing a database key. +
+
db
The db parameter is the enclosing database handle. +
dbt1
The dbt1 parameter is a Dbt representing a database key. +
dbt2
The dbt2 parameter is a Dbt representing a database key.

The bt_prefix_fcn function must return the number of bytes of the second key parameter that would be required by the Btree key @@ -84,8 +82,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -97,6 +95,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_cachesize.html b/db/docs/api_cxx/db_set_cachesize.html index 4670c9bb6..870313e63 100644 --- a/db/docs/api_cxx/db_set_cachesize.html +++ b/db/docs/api_cxx/db_set_cachesize.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_cachesize - + -

Db::set_cachesize

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_cachesize(u_int32_t *gbytesp, u_int32_t *bytesp, int *ncachep);
 


Description: Db::set_cachesize

-

Set the size of the shared memory buffer pool -- that is, the cache. The cache should be the size of the normal working data set of the application, with some small amount of additional memory for unusual @@ -41,7 +39,7 @@ pages accessed simultaneously, and is usually much larger.)

20KB. Any cache size less than 500MB is automatically increased by 25% to account for buffer pool overhead; cache sizes larger than 500MB are used as specified. The current maximum size of a single cache is 4GB. -(All sizes are in powers-of-two, that is, 256KB is 2^32 not 256,000.) +(All sizes are in powers-of-two, that is, 256KB is 2^18 not 256,000.) For information on tuning the Berkeley DB cache size, see Selecting a cache size.

It is possible to specify caches to Berkeley DB larger than 4GB and/or large @@ -62,13 +60,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

gbytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

ncache
-The ncache parameter is the number of caches to create. +
+
bytes
The size of the cache is set to gbytes gigabytes plus bytes. +
gbytes
The size of the cache is set to gbytes gigabytes plus bytes. +
ncache
The ncache parameter is the number of caches to create.

Errors

The Db::set_cachesize method @@ -76,8 +71,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the specified cache size was impossibly small; +
+
EINVAL
If the specified cache size was impossibly small; called in a database environment; the method was called after Db::open @@ -96,15 +91,12 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the cache is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the cache is copied. -

ncachep
-The ncachep parameter references memory into which +
ncachep
The ncachep parameter references memory into which the number of caches is copied.

@@ -116,6 +108,6 @@ The ncachep parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_dup_compare.html b/db/docs/api_cxx/db_set_dup_compare.html index 8ecdee7a5..f5ec5e4e0 100644 --- a/db/docs/api_cxx/db_set_dup_compare.html +++ b/db/docs/api_cxx/db_set_dup_compare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_dup_compare - + -

Db::set_dup_compare

API -Ref -
+Ref +


@@ -49,17 +48,16 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

dup_compare_fcn
-The dup_compare_fcn function is the application-specified +
+
dup_compare_fcn
The dup_compare_fcn function is the application-specified duplicate data item comparison function. The function takes three arguments: -

-

db
The db parameter is the enclosing database handle. -

dbt1
The dbt1 parameter is a Dbt representing the application -supplied key. -

dbt2
The dbt2 parameter is a Dbt representing the current -tree's key. +
+
db
The db parameter is the enclosing database handle. +
dbt1
The dbt1 parameter is a Dbt representing the application +supplied data item. +
dbt2
The dbt2 parameter is a Dbt representing the current +tree's data item.

The dup_compare_fcn function must return an integer value less than, equal to, or greater than zero if the first data item parameter @@ -79,8 +77,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -92,6 +90,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_encrypt.html b/db/docs/api_cxx/db_set_encrypt.html index a4b7c1a04..2ce6d2904 100644 --- a/db/docs/api_cxx/db_set_encrypt.html +++ b/db/docs/api_cxx/db_set_encrypt.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_encrypt - + -

Db::set_encrypt

API -Ref -
+Ref +


@@ -32,7 +31,6 @@ Db::get_encrypt_flags(u_int32_t *flagsp);
 


Description: Db::set_encrypt

-

Set the password used by the Berkeley DB library to perform encryption and decryption.

Because databases opened within Berkeley DB environments use the password @@ -46,17 +44,15 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard +
+
DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm for encryption or decryption.
-

passwd
-The passwd parameter is the password used to perform encryption +
passwd
The passwd parameter is the password used to perform encryption and decryption.

Errors

@@ -65,14 +61,14 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.
-

-

EOPNOTSUPP
Cryptography is not available in this Berkeley DB release. +
+
EOPNOTSUPP
Cryptography is not available in this Berkeley DB release.

Description: Db::get_encrypt_flags

@@ -85,9 +81,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flagsp
-The Db::get_encrypt_flags method returns the +
+
flagsp
The Db::get_encrypt_flags method returns the encryption flags in flagsp.

@@ -99,6 +94,6 @@ encryption flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_errcall.html b/db/docs/api_cxx/db_set_errcall.html index fec250a12..ad07cf169 100644 --- a/db/docs/api_cxx/db_set_errcall.html +++ b/db/docs/api_cxx/db_set_errcall.html @@ -1,31 +1,30 @@ - - + + Berkeley DB: Db::set_errcall - + -

Db::set_errcall

API -Ref -
+Ref +


 #include <db_cxx.h>
 

-void Db::set_errcall( - void (*db_errcall_fcn)(const char *errpfx, char *msg)); +void Db::set_errcall(void (*db_errcall_fcn) + (const DbEnv *dbenv, const char *errpfx, const char *msg));


Description: Db::set_errcall

@@ -40,6 +39,7 @@ In some cases, when an error occurs, Berkeley DB will call db_errcall_fcn with additional error information. It is up to the db_errcall_fcn function to display the error message in an appropriate manner.

+

Setting db_errcall_fcn to NULL unconfigures the callback interface.

Alternatively, you can use the DbEnv::set_error_stream and Db::set_error_stream methods to display the additional information via an output stream, or the Db::set_errfile or @@ -54,14 +54,14 @@ the DbEnv::set_errcall method.

The Db::set_errcall method may be called at any time during the life of the application.

Parameters

-

-

db_errcall_fcn
-The db_errcall_fcn parameter is the application-specified error -reporting function. The function takes two parameters: -

-

errpfx
The errpfx parameter is the prefix string (as previously set by +
+
db_errcall_fcn
The db_errcall_fcn parameter is the application-specified error +reporting function. The function takes three parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
errpfx
The errpfx parameter is the prefix string (as previously set by Db::set_errpfx or DbEnv::set_errpfx). -

msg
The msg parameter is the error message string. +
msg
The msg parameter is the error message string.

@@ -73,6 +73,6 @@ reporting function. The function takes two parameters:

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_errfile.html b/db/docs/api_cxx/db_set_errfile.html index c883a6782..15106f39e 100644 --- a/db/docs/api_cxx/db_set_errfile.html +++ b/db/docs/api_cxx/db_set_errfile.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_errfile - + -

Db::set_errfile

API -Ref -
+Ref +


@@ -30,7 +29,6 @@ void Db::get_errfile(FILE **errfilep);
 


Description: Db::set_errfile

- When an error occurs in the Berkeley DB library, an exception is thrown or an error return value is returned by the interface. In some cases, however, the errno value may be insufficient to completely @@ -50,6 +48,7 @@ should not mix these approaches.

(":") (if a prefix string was previously specified using Db::set_errpfx or DbEnv::set_errpfx), an error string, and a trailing <newline> character.

+

Setting errfile to NULL unconfigures the interface.

This error logging enhancement does not slow performance or significantly increase application size, and may be run during normal operation as well as during application debugging.

@@ -59,27 +58,15 @@ the DbEnv::set_errfile method.

The Db::set_errfile method may be called at any time during the life of the application.

Parameters

-

-

errfile
-The errfile parameter is a C library FILE * to be used for +
+
errfile
The errfile parameter is a C library FILE * to be used for displaying additional Berkeley DB error information.

Description: Db::get_errfile

-

The Db::get_errfile method returns the FILE *.

+

The Db::get_errfile method returns the .

The Db::get_errfile method may be called at any time during the life of the application.

-

The Db::get_errfile method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

-

Parameters

-

-

errfilep
-The Db::get_errfile method returns the -FILE * in errfilep. -

Class

Db @@ -89,6 +76,6 @@ FILE * in errfilep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_error_stream.html b/db/docs/api_cxx/db_set_error_stream.html index 3119487b8..d31c7d40f 100644 --- a/db/docs/api_cxx/db_set_error_stream.html +++ b/db/docs/api_cxx/db_set_error_stream.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_error_stream - + -

Db::set_error_stream

API -Ref -
+Ref +


@@ -42,11 +41,13 @@ output an additional error message to the specified stream.

(":") (if a prefix string was previously specified using DbEnv::set_errpfx), an error string, and a trailing <newline> character.

-

Alternatively, you can use the DbEnv::set_errfile method to display -the additional information via a C library FILE *, or the -DbEnv::set_errcall method to capture the additional error information in -a way that does not use either output streams or C library FILE *'s. You -should not mix these approaches.

+

Setting stream to NULL unconfigures the interface.

+

Alternatively, you can use the DbEnv::set_errfile and +Db::set_errfile methods to display the additional information via a C +library FILE *, or the DbEnv::set_errcall and +Db::set_errcall methods to capture the additional error information +in a way that does not use either output streams or C library FILE *'s. +You should not mix these approaches.

This error-logging enhancement does not slow performance or significantly increase application size, and may be run during normal operation as well as during application debugging.

@@ -54,9 +55,8 @@ as during application debugging.

Db::set_error_stream method affects the entire environment and is equivalent to calling the DbEnv::set_error_stream method.

Parameters

-

-

stream
-The stream parameter is the application-specified output stream to +
+
stream
The stream parameter is the application-specified output stream to be used for additional error information.

@@ -68,6 +68,6 @@ be used for additional error information.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_errpfx.html b/db/docs/api_cxx/db_set_errpfx.html index 8801682a8..5d3830b2f 100644 --- a/db/docs/api_cxx/db_set_errpfx.html +++ b/db/docs/api_cxx/db_set_errpfx.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_errpfx - + -

Db::set_errpfx

API -Ref -
+Ref +


@@ -30,7 +29,6 @@ void Db::get_errpfx(const char **errpfxp);
 


Description: Db::set_errpfx

-

Set the prefix string that appears before error messages issued by Berkeley DB.

The Db::set_errpfx and DbEnv::set_errpfx methods do not copy the memory to which the errpfx parameter refers; rather, they @@ -44,9 +42,8 @@ the DbEnv::set_errpfx method.

The Db::set_errpfx method may be called at any time during the life of the application.

Parameters

-

-

errpfx
-The errpfx parameter is the application-specified error prefix +
+
errpfx
The errpfx parameter is the application-specified error prefix for additional error messages.

@@ -60,9 +57,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

errpfxp
-The Db::get_errpfx method returns a reference to the +
+
errpfxp
The Db::get_errpfx method returns a reference to the error prefix in errpfxp.

@@ -74,6 +70,6 @@ error prefix in errpfxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_feedback.html b/db/docs/api_cxx/db_set_feedback.html index 102db9ef0..a8a40e981 100644 --- a/db/docs/api_cxx/db_set_feedback.html +++ b/db/docs/api_cxx/db_set_feedback.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_feedback - + -

Db::set_feedback

API -Ref -
+Ref +


@@ -45,20 +44,19 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

db_feedback_fcn
-The db_feedback_fcn parameter is the application-specified +
+
db_feedback_fcn
The db_feedback_fcn parameter is the application-specified feedback function called to report Berkeley DB operation progress. The callback function must take three parameters: -

-

db
The db parameter is a reference to the enclosing database. -

opcode
The opcode parameter is an operation code. The opcode +
+
db
The db parameter is a reference to the enclosing database. +
opcode
The opcode parameter is an operation code. The opcode parameter may take on any of the following values: -

-

DB_UPGRADE
The underlying database is being upgraded. -

DB_VERIFY
The underlying database is being verified. +
+
DB_UPGRADE
The underlying database is being upgraded. +
DB_VERIFY
The underlying database is being verified.
-

percent
The percent parameter is the percent of the operation that has +
percent
The percent parameter is the percent of the operation that has been completed, specified as an integer value between 0 and 100.
@@ -71,6 +69,6 @@ been completed, specified as an integer value between 0 and 100.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_flags.html b/db/docs/api_cxx/db_set_flags.html index 1a7b00d16..04a8c32c1 100644 --- a/db/docs/api_cxx/db_set_flags.html +++ b/db/docs/api_cxx/db_set_flags.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_flags - + -

Db::set_flags

API -Ref -
+Ref +


@@ -30,7 +29,6 @@ int Db::get_flags(u_int32_t *flagsp);
 


Description: Db::set_flags

-

Configure a database. Calling Db::set_flags is additive; there is no way to clear flags.

The Db::set_flags method may not be called after the Db::open method is called. @@ -41,15 +39,14 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values:

General

The following flags may be specified for any Berkeley DB access method:

-

- -

DB_CHKSUM
Do checksum verification of pages read into the cache from the backing +
+ +
DB_CHKSUM
Do checksum verification of pages read into the cache from the backing filestore. Berkeley DB uses the SHA1 Secure Hash Algorithm if encryption is configured and a general hash algorithm if it is not.

Calling Db::set_flags with the DB_CHKSUM flag only affects the @@ -61,8 +58,8 @@ will be ignored.

If creating additional databases in a file, the checksum behavior specified must be consistent with the existing databases in the file or an error will be returned. - -

DB_ENCRYPT
Encrypt the database using the cryptographic password specified to the + +
DB_ENCRYPT
Encrypt the database using the cryptographic password specified to the DbEnv::set_encrypt or Db::set_encrypt methods.

Calling Db::set_flags with the DB_ENCRYPT flag only affects the specified Db handle (and any other Berkeley DB handles opened within @@ -78,29 +75,28 @@ be returned.

Encrypted databases are not portable between machines of different byte orders, that is, encrypted databases created on big-endian machines cannot be read on little-endian machines, and vice versa.

- -

DB_TXN_NOT_DURABLE
If set, Berkeley DB will not write log records for this database. This -means that updates of this database exhibit the ACI (atomicity, -consistency, and isolation) properties, but not D (durability); that -is, database integrity will be maintained, but if the application or -system fails, integrity will not persist. The database file must be -verified and/or restored from backup after a failure. In order to -ensure integrity after application shut down, the database handles must -be closed without specifying DB_NOSYNC, or all database -changes must be flushed from the database environment cache using -either the DbEnv::txn_checkpoint or DbEnv::memp_sync methods. -All database handles for a single physical file must set -DB_TXN_NOT_DURABLE, including database handles for different -databases in a physical file. + +
DB_TXN_NOT_DURABLE
If set, Berkeley DB will not write log records for this database. This means +that updates of this database exhibit the ACI (atomicity, consistency, +and isolation) properties, but not D (durability); that is, database +integrity will be maintained, but if the application or system fails, +integrity will not persist. The database file must be verified and/or +restored from backup after a failure. In order to ensure integrity +after application shut down, the database handles must be closed without +specifying DB_NOSYNC, or all database changes must be flushed +from the database environment cache using either the +DbEnv::txn_checkpoint or DbEnv::memp_sync methods. All database handles for +a single physical file must set DB_TXN_NOT_DURABLE, including +database handles for different databases in a physical file.

Calling Db::set_flags with the DB_TXN_NOT_DURABLE flag only affects the specified Db handle (and any other Berkeley DB handles opened within the scope of that handle).

Btree

The following flags may be specified for the Btree access method:

-

- -

DB_DUP
Permit duplicate data items in the database; that is, insertion when the +
+ +
DB_DUP
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database will be successful. The ordering of duplicates in the database is determined by the order of insertion, unless the ordering is otherwise @@ -116,14 +112,13 @@ must be the same as the existing database or an error will be returned.

It is an error to specify both DB_DUP and DB_RECNUM.

- -

DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the + +
DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database -will be successful. The ordering of duplicates in the database is determined -by the duplicate comparison function. -If the application does not specify a comparison function using the -Db::set_dup_compare method, a default lexical comparison will be -used. +will be successful. The ordering of duplicates in the database is +determined by the duplicate comparison function. If the application +does not specify a comparison function using the +Db::set_dup_compare method, a default lexical comparison will be used. It is an error to specify both DB_DUPSORT and DB_RECNUM.

Calling Db::set_flags with the DB_DUPSORT flag affects the database, including all threads of control accessing the database.

@@ -132,8 +127,8 @@ flag must be the same as the existing database or an error will be returned.

- -

DB_RECNUM
Support retrieval from the Btree using record numbers. For more + +
DB_RECNUM
Support retrieval from the Btree using record numbers. For more information, see the DB_SET_RECNO flag to the Db::get and Dbc::get methods.

Logical record numbers in Btree databases are mutable in the face of @@ -153,8 +148,8 @@ flag must be the same as the existing database or an error will be returned.

- -

DB_REVSPLITOFF
Turn off reverse splitting in the Btree. As pages are emptied in a + +
DB_REVSPLITOFF
Turn off reverse splitting in the Btree. As pages are emptied in a database, the Berkeley DB Btree implementation attempts to coalesce empty pages into higher-level pages in order to keep the database as small as possible and minimize search time. This can hurt performance in applications @@ -170,8 +165,8 @@ the scope of that handle).

Hash

The following flags may be specified for the Hash access method:

-

-

DB_DUP
Permit duplicate data items in the database; that is, insertion when the +
+
DB_DUP
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database will be successful. The ordering of duplicates in the database is determined by the order of insertion, unless the ordering is otherwise @@ -186,13 +181,12 @@ flag must be the same as the existing database or an error will be returned.

-

DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the +
DB_DUPSORT
Permit duplicate data items in the database; that is, insertion when the key of the key/data pair being inserted already exists in the database -will be successful. The ordering of duplicates in the database is determined -by the duplicate comparison function. -If the application does not specify a comparison function using the -Db::set_dup_compare method, a default lexical comparison will be -used. +will be successful. The ordering of duplicates in the database is +determined by the duplicate comparison function. If the application +does not specify a comparison function using the +Db::set_dup_compare method, a default lexical comparison will be used. It is an error to specify both DB_DUPSORT and DB_RECNUM.

Calling Db::set_flags with the DB_DUPSORT flag affects the database, including all threads of control accessing the database.

@@ -203,13 +197,30 @@ will be returned.

Queue

-

There are no additional flags that may be specified for the Queue access -method.

+

The following flags may be specified for the Queue access method:

+
+ +
DB_INORDER
The DB_INORDER flag modifies the operation of the +DB_CONSUME or DB_CONSUME_WAIT flags to Db::get +to return key/data pairs in order. That is, they will always return +the key/data item from the head of the queue. +

The default behavior of queue databases is optimized for multiple +readers, and does not guarantee that record will be retrieved in the +order they are added to the queue. Specifically, if a writing thread +adds multiple records to an empty queue, reading threads may skip some +of the initial records when the next Db::get call returns.

+

This flag modifies the Db::get call to verify that the record +being returned is in fact the head of the queue. This will increase +contention and reduce concurrency when there are many reading threads.

+

Calling Db::set_flags with the DB_INORDER flag only affects the +specified Db handle (and any other Berkeley DB handles opened within +the scope of that handle).

+

Recno

The following flags may be specified for the Recno access method:

-

- -

DB_RENUMBER
Specifying the DB_RENUMBER flag causes the logical record +
+ +
DB_RENUMBER
Specifying the DB_RENUMBER flag causes the logical record numbers to be mutable, and change as records are added to and deleted from the database. For example, the deletion of record number 4 causes records numbered 5 and greater to be renumbered downward by one. If a @@ -242,8 +253,8 @@ flag must be the same as the existing database or an error will be returned.

- -

DB_SNAPSHOT
This flag specifies that any specified re_source file be read + +
DB_SNAPSHOT
This flag specifies that any specified re_source file be read in its entirety when Db::open is called. If this flag is not specified, the re_source file may be read lazily.

Calling Db::set_flags with the DB_SNAPSHOT flag only affects the @@ -257,8 +268,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -272,9 +283,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flagsp
-The Db::get_flags method returns the +
+
flagsp
The Db::get_flags method returns the current flags in flagsp.

@@ -286,6 +296,6 @@ current flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_h_ffactor.html b/db/docs/api_cxx/db_set_h_ffactor.html index 0e9be3d5f..1cc1560eb 100644 --- a/db/docs/api_cxx/db_set_h_ffactor.html +++ b/db/docs/api_cxx/db_set_h_ffactor.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_h_ffactor - + -

Db::set_h_ffactor

API -Ref -
+Ref +


@@ -30,7 +29,6 @@ int Db::get_h_ffactor(u_int32_t *h_ffactorp);
 


Description: Db::set_h_ffactor

-

Set the desired density within the hash table. If no value is specified, the fill factor will be selected dynamically as pages are filled.

@@ -47,9 +45,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

h_ffactor
-The h_ffactor parameter is the desired density within the hash table. +
+
h_ffactor
The h_ffactor parameter is the desired density within the hash table.

The density is an approximation of the number of keys allowed to accumulate in any one bucket, determining when the hash table grows or shrinks. If you know the average sizes of the keys and data in your @@ -63,8 +60,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -78,9 +75,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

h_ffactorp
-The Db::get_h_ffactor method returns the +
+
h_ffactorp
The Db::get_h_ffactor method returns the hash table density in h_ffactorp.

@@ -92,6 +88,6 @@ hash table density in h_ffactorp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_h_hash.html b/db/docs/api_cxx/db_set_h_hash.html index d0c029cf0..ada6bc005 100644 --- a/db/docs/api_cxx/db_set_h_hash.html +++ b/db/docs/api_cxx/db_set_h_hash.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_h_hash - + -

Db::set_h_hash

API -Ref -
+Ref +


@@ -50,12 +49,10 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

h_hash_fcn
-The h_hash_fcn parameter is the application-specified hash function. +
+
h_hash_fcn
The h_hash_fcn parameter is the application-specified hash function.

Application-specified hash functions take a pointer to a byte string and -a length as parameters, and return a value of type -u_int32_t. +a length as parameters, and return a value of type u_int32_t. The hash function must handle any key values used by the application (possibly including zero-length keys).

@@ -65,8 +62,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; the +
+
EINVAL
If the method was called after Db::open was called; the specified hash function differs from the hash function with which the database was created; or if an invalid flag value or parameter was specified. @@ -80,6 +77,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_h_nelem.html b/db/docs/api_cxx/db_set_h_nelem.html index 3f6e4e0d1..6e1361239 100644 --- a/db/docs/api_cxx/db_set_h_nelem.html +++ b/db/docs/api_cxx/db_set_h_nelem.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_h_nelem - + -

Db::set_h_nelem

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_h_nelem(u_int32_t *h_nelemp);
 


Description: Db::set_h_nelem

-

Set an estimate of the final size of the hash table.

In order for the estimate to be used when creating the database, the Db::set_h_ffactor method must also be called. If the estimate @@ -51,9 +49,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

h_nelem
-The h_nelem parameter is an estimate of the final size of the +
+
h_nelem
The h_nelem parameter is an estimate of the final size of the hash table.

Errors

@@ -62,8 +59,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -77,9 +74,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

h_nelemp
-The Db::get_h_nelem method returns the +
+
h_nelemp
The Db::get_h_nelem method returns the estimate of the final size of the hash table in h_nelemp.

@@ -91,6 +87,6 @@ estimate of the final size of the hash table in h_nelemp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_lorder.html b/db/docs/api_cxx/db_set_lorder.html index 05585e6c1..d3356861c 100644 --- a/db/docs/api_cxx/db_set_lorder.html +++ b/db/docs/api_cxx/db_set_lorder.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_lorder - + -

Db::set_lorder

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_lorder(int *lorderp);
 


Description: Db::set_lorder

-

Set the byte order for integers in the stored database metadata. The host byte order of the machine where the Berkeley DB library was compiled will be used if no byte order is set.

@@ -54,9 +52,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lorder
-The lorder parameter should represent the byte order as an +
+
lorder
The lorder parameter should represent the byte order as an integer; for example, big endian order is the number 4,321, and little endian order is the number 1,234.
@@ -66,8 +63,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -82,9 +79,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lorderp
-The Db::get_lorder method returns the +
+
lorderp
The Db::get_lorder method returns the database byte order in lorderp.

@@ -96,6 +92,6 @@ database byte order in lorderp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_msg_stream.html b/db/docs/api_cxx/db_set_msg_stream.html new file mode 100644 index 000000000..5d1c08775 --- /dev/null +++ b/db/docs/api_cxx/db_set_msg_stream.html @@ -0,0 +1,64 @@ + + + + + + + +Berkeley DB: Db::set_message_stream + + + + + + + +
+

Db::set_message_stream

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+void Db::set_message_stream(class ostream*); +

+
+

Description: Db::set_message_stream

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DbEnv::set_verbose and DbEnv::stat_print.

+

The DbEnv::set_message_stream and +Db::set_message_stream methods are used to display these messages for +the application. In this case, the message will include a trailing +<newline> character.

+

Setting stream to NULL unconfigures the interface.

+

Alternatively, you can use the DbEnv::set_msgfile and +Db::set_msgfile methods to display the messages via a C library FILE *, +or the DbEnv::set_msgcall and Db::set_msgcall methods to +capture the additional error information in a way that does not use +either output streams or C library FILE *'s. You should not mix these +approaches.

+

For Db handles opened inside of Berkeley DB environments, calling the +Db::set_message_stream method affects the entire environment and is equivalent to calling +the DbEnv::set_message_stream method.

+

Parameters

+
+
stream
The stream parameter is the application-specified output stream to +be used for additional message information. +
+
+

Class

+Db +

See Also

+Databases and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/db_set_msgcall.html b/db/docs/api_cxx/db_set_msgcall.html new file mode 100644 index 000000000..08acee44b --- /dev/null +++ b/db/docs/api_cxx/db_set_msgcall.html @@ -0,0 +1,70 @@ + + + + + + + +Berkeley DB: Db::set_msgcall + + + + + + + +
+

Db::set_msgcall

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+void Db::set_msgcall( + void (*db_msgcall_fcn)(const DbEnv *dbenv, char *msg)); +

+
+

Description: Db::set_msgcall

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DbEnv::set_verbose and DbEnv::stat_print.

+

The DbEnv::set_msgcall and Db::set_msgcall methods are used to +pass these messages to the application, and Berkeley DB will call +db_msgcall_fcn with each message. It is up to the +db_msgcall_fcn function to display the message in an appropriate +manner.

+

Setting db_msgcall_fcn to NULL unconfigures the callback interface.

+

Alternatively, you can use the DbEnv::set_message_stream and +Db::set_message_stream methods to display the messages via an output +stream, or the Db::set_msgfile or DbEnv::set_msgfile methods +to display the messages via a C library FILE *.

+

For Db handles opened inside of Berkeley DB environments, calling the +Db::set_msgcall method affects the entire environment and is equivalent to calling +the DbEnv::set_msgcall method.

+

The Db::set_msgcall method may be called at any time during the life of the +application.

+

Parameters

+
+
db_msgcall_fcn
The db_msgcall_fcn parameter is the application-specified message +reporting function. The function takes two parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
msg
The msg parameter is the message string. +
+
+
+

Class

+Db +

See Also

+Databases and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/db_set_msgfile.html b/db/docs/api_cxx/db_set_msgfile.html new file mode 100644 index 000000000..133ca3623 --- /dev/null +++ b/db/docs/api_cxx/db_set_msgfile.html @@ -0,0 +1,72 @@ + + + + + + + +Berkeley DB: Db::set_msgfile + + + + + + + +
+

Db::set_msgfile

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+void Db::set_msgfile(FILE *msgfile); +

+void Db::get_msgfile(FILE **msgfilep); +

+
+

Description: Db::set_msgfile

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DbEnv::set_verbose and DbEnv::stat_print.

+

The DbEnv::set_msgfile and Db::set_msgfile methods are used to +display these messages for the application. In this case, the message +will include a trailing <newline> character.

+

Setting msgfile to NULL unconfigures the interface.

+

Alternatively, you can use the DbEnv::set_message_stream and +Db::set_message_stream methods to display the messages via an output +stream, or the DbEnv::set_msgcall and Db::set_msgcall methods +to capture the additional error information in a way that does not use +either output streams or C library FILE *'s. You should not mix these +approaches.

+

For Db handles opened inside of Berkeley DB environments, calling the +Db::set_msgfile method affects the entire environment and is equivalent to calling +the DbEnv::set_msgfile method.

+

The Db::set_msgfile method may be called at any time during the life of the +application.

+

Parameters

+
+
msgfile
The msgfile parameter is a C library FILE * to be used for +displaying messages. +
+
+

Description: Db::get_msgfile

+

The Db::get_msgfile method returns the .

+

The Db::get_msgfile method may be called at any time during the life of the +application.

+
+

Class

+Db +

See Also

+Databases and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/db_set_pagesize.html b/db/docs/api_cxx/db_set_pagesize.html index d5742d393..059b5f05c 100644 --- a/db/docs/api_cxx/db_set_pagesize.html +++ b/db/docs/api_cxx/db_set_pagesize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_pagesize - + -

Db::set_pagesize

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_pagesize(u_int32_t *pagesizep);
 


Description: Db::set_pagesize

-

Set the size of the pages used to hold items in the database, in bytes. The minimum page size is 512 bytes, the maximum page size is 64K bytes, and the page size must be a power-of-two. If the page size is not @@ -56,9 +54,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

pagesize
-The pagesize parameter sets the database page size. +
+
pagesize
The pagesize parameter sets the database page size.

Errors

The Db::set_pagesize method @@ -66,8 +63,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -81,9 +78,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

pagesizep
-The Db::get_pagesize method returns the +
+
pagesizep
The Db::get_pagesize method returns the page size in pagesizep.

@@ -95,6 +91,6 @@ page size in pagesizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_paniccall.html b/db/docs/api_cxx/db_set_paniccall.html index 3cb84ee5f..e060c0bd4 100644 --- a/db/docs/api_cxx/db_set_paniccall.html +++ b/db/docs/api_cxx/db_set_paniccall.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Db::set_paniccall - + -

Db::set_paniccall

API -Ref -
+Ref +


@@ -51,14 +50,13 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

db_panic_fcn
-The db_panic_fcn parameter is the application-specified function +
+
db_panic_fcn
The db_panic_fcn parameter is the application-specified function called in the case of a database environment panic. The function takes two arguments: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

errval
The errval parameter is the error value that would have been +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
errval
The errval parameter is the error value that would have been returned to the caller if DB_RUNRECOVERY were not going to be returned instead.
@@ -72,6 +70,6 @@ returned instead.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_q_extentsize.html b/db/docs/api_cxx/db_set_q_extentsize.html index e43fa322f..2b15bcbee 100644 --- a/db/docs/api_cxx/db_set_q_extentsize.html +++ b/db/docs/api_cxx/db_set_q_extentsize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_q_extentsize - + -

Db::set_q_extentsize

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_q_extentsize(u_int32_t *extentsizep);
 


Description: Db::set_q_extentsize

-

Set the size of the extents used to hold pages in a Queue database, specified as a number of pages. Each extent is created as a separate physical file. If no extent size is set, the default behavior is to @@ -51,9 +49,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

extentsize
-The extentsize parameter is the number of pages in a Queue database +
+
extentsize
The extentsize parameter is the number of pages in a Queue database extent.

Errors

@@ -62,8 +59,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -77,9 +74,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

extentsizep
-The Db::get_q_extentsize method returns the +
+
extentsizep
The Db::get_q_extentsize method returns the number of pages in an extent in extentsizep.

@@ -91,6 +87,6 @@ number of pages in an extent in extentsizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_re_delim.html b/db/docs/api_cxx/db_set_re_delim.html index 86b09d62c..a4514cac1 100644 --- a/db/docs/api_cxx/db_set_re_delim.html +++ b/db/docs/api_cxx/db_set_re_delim.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_re_delim - + -

Db::set_re_delim

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_re_delim(int *delimp);
 


Description: Db::set_re_delim

-

Set the delimiting byte used to mark the end of a record in the backing source file for the Recno access method.

This byte is used for variable length records if the re_source @@ -51,9 +49,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

re_delim
-The re_delim parameter is the delimiting byte used to mark the +
+
re_delim
The re_delim parameter is the delimiting byte used to mark the end of a record.

Errors

@@ -62,8 +59,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -77,9 +74,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

delimp
-The Db::get_re_delim method returns the +
+
delimp
The Db::get_re_delim method returns the delimiting byte in delimp.

@@ -91,6 +87,6 @@ delimiting byte in delimp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_re_len.html b/db/docs/api_cxx/db_set_re_len.html index ba3b7dde3..44c9c3f1c 100644 --- a/db/docs/api_cxx/db_set_re_len.html +++ b/db/docs/api_cxx/db_set_re_len.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_re_len - + -

Db::set_re_len

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_re_len(u_int32_t *re_lenp);
 


Description: Db::set_re_len

-

For the Queue access method, specify that the records are of length re_len. For the Queue access method, the record length must be enough smaller than the database's page size that at least one record @@ -58,9 +56,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

re_len
-The re_len parameter is the length of a Queue or Recno database +
+
re_len
The re_len parameter is the length of a Queue or Recno database record, in bytes.

Errors

@@ -69,8 +66,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -84,9 +81,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

re_lenp
-The Db::get_re_len method returns the +
+
re_lenp
The Db::get_re_len method returns the record length in re_lenp.

@@ -98,6 +94,6 @@ record length in re_lenp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_re_pad.html b/db/docs/api_cxx/db_set_re_pad.html index 7f8ffadbc..83e146e65 100644 --- a/db/docs/api_cxx/db_set_re_pad.html +++ b/db/docs/api_cxx/db_set_re_pad.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_re_pad - + -

Db::set_re_pad

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_re_pad(int *re_padp);
 


Description: Db::set_re_pad

-

Set the padding character for short, fixed-length records for the Queue and Recno access methods.

If no pad character is specified, <space> characters (that @@ -49,9 +47,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

re_pad
-The re_pad parameter is the pad character for fixed-length +
+
re_pad
The re_pad parameter is the pad character for fixed-length records for the Queue and Recno access methods.

Errors

@@ -60,8 +57,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -75,9 +72,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

re_padp
-The Db::get_re_pad method returns the +
+
re_padp
The Db::get_re_pad method returns the pad character in re_padp.

@@ -89,6 +85,6 @@ pad character in re_padp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_set_re_source.html b/db/docs/api_cxx/db_set_re_source.html index dccad0489..548a48214 100644 --- a/db/docs/api_cxx/db_set_re_source.html +++ b/db/docs/api_cxx/db_set_re_source.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::set_re_source - + -

Db::set_re_source

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ Db::get_re_source(const char **sourcep);
 


Description: Db::set_re_source

-

Set the underlying source file for the Recno access method. The purpose of the source value is to provide fast access and modification to databases that are normally stored as flat text files.

@@ -89,9 +87,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

source
-The backing flat text database file for a Recno database. +
+
source
The backing flat text database file for a Recno database. +

On Windows, the source argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The Db::set_re_source method @@ -99,8 +98,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after Db::open was called; or if an +
+
EINVAL
If the method was called after Db::open was called; or if an invalid flag value or parameter was specified.

@@ -117,6 +116,6 @@ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_stat.html b/db/docs/api_cxx/db_stat.html index 18ddb4d0c..57cf27199 100644 --- a/db/docs/api_cxx/db_stat.html +++ b/db/docs/api_cxx/db_stat.html @@ -1,37 +1,60 @@ - - + + Berkeley DB: Db::stat - + -

Db::stat

API -Ref -
+Ref +


 #include <db_cxx.h>
 

int -Db::stat(void *sp, u_int32_t flags); +Db::stat(void *sp, DB_TXN *txnid, u_int32_t flags); +

+int +Db::stat_print(u_int32_t flags);


Description: Db::stat

-

The Db::stat method creates a statistical structure and -copies a pointer to it into user-specified memory locations. -Specifically, if sp is non-NULL, a pointer to the statistics -for the database are copied into the memory location to which it refers.

+

The Db::stat method creates a statistical structure and copies a +pointer to it into user-specified memory locations. Specifically, if +sp is non-NULL, a pointer to the statistics for the database are +copied into the memory location to which it refers.

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +one of the following values: +
+
DB_DEGREE_2
Database items read during this operation will have degree 2 isolation. +This ensures the stability of the data items read during the stat +operation but permits that data to be modified or deleted by other +transactions prior to the commit of the specified transaction. +
DB_DIRTY_READ
Database items read during this operation may include modified but not +yet committed data. Silently ignored if the DB_DIRTY_READ flag +was not specified when the underlying database was opened. +
DB_FAST_STAT
Return only the values which do not require traversal of the database. +Among other things, this flag makes it possible for applications to +request key and record counts without incurring the performance penalty +of traversing the entire database. +
+
txnid
If the operation is to be transaction-protected, +the txnid parameter is a transaction handle returned from +DbEnv::txn_begin; otherwise, NULL. +

Statistical structures are stored in allocated memory. If application-specific allocation routines have been declared (see DbEnv::set_alloc for more information), they are used to allocate the memory; otherwise, the @@ -57,10 +80,9 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Hash Statistics

-

In the case of a Hash database, -the statistics are stored in a structure of type DB_HASH_STAT. The -following fields will be filled in:

-

+

In the case of a Hash database, the statistics are stored in a structure +of type DB_HASH_STAT. The following fields will be filled in:

+
u_int32_t hash_magic;
Magic number that identifies the file as a Hash file. Returned if DB_FAST_STAT is set.
u_int32_t hash_version;
The version of the Hash database. Returned if DB_FAST_STAT is @@ -89,28 +111,39 @@ that did not fit in the main bucket page).
u_int32_t hash_dup_free;
The number of bytes free on duplicate pages.

Btree and Recno Statistics

-

In the case of a Btree or Recno database, -the statistics are stored in a structure of type DB_BTREE_STAT. The -following fields will be filled in:

-

+

In the case of a Btree or Recno database, the statistics are stored in +a structure of type DB_BTREE_STAT. The following fields will be filled +in:

+
u_int32_t bt_magic;
Magic number that identifies the file as a Btree database. Returned if DB_FAST_STAT is set.
u_int32_t bt_version;
The version of the Btree database. Returned if DB_FAST_STAT is set. -
u_int32_t bt_nkeys;
For the Btree Access Method, the number of unique keys in the database. -If DB_FAST_STAT was specified and the database was created with -the DB_RECNUM flag, the count will be exact, otherwise, the -count will be the last saved value unless it has never been calculated, -in which case it will be 0. For the Recno Access Method, the exact -number of records in the database. Returned if DB_FAST_STAT is -set. +
u_int32_t bt_nkeys;
For the Btree Access Method, the number of keys in the database. If +the DB_FAST_STAT flag is not specified or the database was +configured to support record numbers (see DB_RECNUM), the count +will be exact. Otherwise, the count will be the last saved value unless +it has never been calculated, in which case it will be 0. +

For the Recno Access Method, the number of records in the database. If +the database was configured with mutable record numbers (see +DB_RENUMBER), the count will be exact. Otherwise, if the +DB_FAST_STAT flag is specified the count will be exact but will +include deleted and implicitly created records; if the +DB_FAST_STAT flag is not specified, the count will be exact and +will not include deleted or implicitly created records.

+

Returned if DB_FAST_STAT is set.

u_int32_t bt_ndata;
For the Btree Access Method, the number of key/data pairs in the -database. If DB_FAST_STAT was specified the count will be the -last saved value unless it has never been calculated, in which case it -will be 0. For the Recno Access Method, the exact number of records in -the database. If the database has been configured to not renumber -records during deletion, the count of records will only reflect -undeleted records. Returned if DB_FAST_STAT is set. +database. If the DB_FAST_STAT flag is not specified, the count +will be exact. Otherwise, the count will be the last saved value unless +it has never been calculated, in which case it will be 0. +

For the Recno Access Method, the number of records in the database. If +the database was configured with mutable record numbers (see +DB_RENUMBER), the count will be exact. Otherwise, if the +DB_FAST_STAT flag is specified the count will be exact but will +include deleted and implicitly created records; if the +DB_FAST_STAT flag is not specified, the count will be exact and +will not include deleted or implicitly created records.

+

Returned if DB_FAST_STAT is set.

u_int32_t bt_pagesize;
Underlying database page size, in bytes. Returned if DB_FAST_STAT is set.
u_int32_t bt_minkey;
The minimum keys per page. Returned if DB_FAST_STAT is set. @@ -123,6 +156,7 @@ DB_FAST_STAT is set.
u_int32_t bt_leaf_pg;
Number of database leaf pages.
u_int32_t bt_dup_pg;
Number of database duplicate pages.
u_int32_t bt_over_pg;
Number of database overflow pages. +
u_int32_t bt_empty_pg;
Number of empty database pages.
u_int32_t bt_free;
Number of pages on the free list.
u_int32_t bt_int_pgfree;
Number of bytes free in database internal pages.
u_int32_t bt_leaf_pgfree;
Number of bytes free in database leaf pages. @@ -130,10 +164,10 @@ DB_FAST_STAT is set.
u_int32_t bt_over_pgfree;
Number of bytes free in database overflow pages.

Queue Statistics

-

In the case of a Queue database, -the statistics are stored in a structure of type DB_QUEUE_STAT. The -following fields will be filled in:

-

+

In the case of a Queue database, the statistics are stored in a +structure of type DB_QUEUE_STAT. The following fields will be filled +in:

+
u_int32_t qs_magic;
Magic number that identifies the file as a Queue file. Returned if DB_FAST_STAT is set.
u_int32_t qs_version;
The version of the Queue file type. Returned if DB_FAST_STAT @@ -159,41 +193,44 @@ DB_FAST_STAT is set. DB_FAST_STAT is set.
u_int32_t qs_cur_recno;
Next available record number. Returned if DB_FAST_STAT is set.
-

Parameters

-

-

flags
-The flags parameter must be set to 0 or -one of the following values: -

-

DB_FAST_STAT
Return only the values which do not require traversal of the database. -

Among other things, this flag makes it possible for applications to -request key and record counts without incurring the performance penalty -of traversing the entire database. If the underlying database is of -type Recno, or of type Btree and the database was created with the -DB_RECNUM flag, the count of keys will be exact. Otherwise, -the count of keys will be the value saved the last time the database -was traversed, or 0 if no count of keys has ever been made. If the -underlying database is of type Recno, the count of data items will be -exact, otherwise, the count of data items will be the value saved the -last time the database was traversed, or 0 if no count of data items -has ever been done.

-
-

Errors

The Db::stat method may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: Db::stat_print

+

The Db::stat_print method returns the +database statistical information, as described for the Db::stat method. +The information is printed to a specified output channel (see the +DbEnv::set_msgfile method for more information), or passed to an +application callback function (see the DbEnv::set_msgcall method for +more information).

+

The Db::stat_print method may not be called before the Db::open method has been +called.

+

The Db::stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

Db

See Also

@@ -202,6 +239,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_sync.html b/db/docs/api_cxx/db_sync.html index 1618c2da6..dc481367d 100644 --- a/db/docs/api_cxx/db_sync.html +++ b/db/docs/api_cxx/db_sync.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::sync - + -

Db::sync

API -Ref -
+Ref +


@@ -47,9 +46,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The Db::sync method @@ -57,12 +55,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -74,6 +72,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_truncate.html b/db/docs/api_cxx/db_truncate.html index 56b2b32b6..c6e12ee1c 100644 --- a/db/docs/api_cxx/db_truncate.html +++ b/db/docs/api_cxx/db_truncate.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::truncate - + -

Db::truncate

API -Ref -
+Ref +


@@ -29,9 +28,12 @@ Db::truncate(DbTxn *txnid, u_int32_t *countp, u_int32_t flags);
 

Description: Db::truncate

The Db::truncate method empties the database, discarding all records -it contains. -The number of records discarded from the database is returned in -countp.

+it contains. The number of records discarded from the database is +returned in countp.

+

When called on a database configured with secondary indices using the +Db::associate method, the Db::truncate method truncates the primary +database and all secondary indices. A count of the records discarded +from the primary database is returned.

It is an error to call the Db::truncate method on a database with open cursors.

The Db::truncate method @@ -40,20 +42,17 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

countp
-The countp parameter references memory into which +
+
countp
The countp parameter references memory into which the number of records discarded from the database is copied. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the Db::truncate call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the Db::truncate call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, (other than by specifying the DB_AUTO_COMMIT flag), the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL. @@ -64,8 +63,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If there are open cursors in the database; or if an +
+
EINVAL
If there are open cursors in the database; or if an invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -85,6 +84,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_upgrade.html b/db/docs/api_cxx/db_upgrade.html index ff409f638..0a3938fd7 100644 --- a/db/docs/api_cxx/db_upgrade.html +++ b/db/docs/api_cxx/db_upgrade.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::upgrade - + -

Db::upgrade

API -Ref -
+Ref +


@@ -44,15 +43,13 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

file
-The file parameter is the physical file containing the databases +
+
file
The file parameter is the physical file containing the databases to be upgraded. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_DUPSORT
This flag is only meaningful when upgrading databases from +
+
DB_DUPSORT
This flag is only meaningful when upgrading databases from releases before the Berkeley DB 3.1 release.

As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release, the on-disk format of duplicate data items changed. To correctly @@ -88,11 +85,11 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_OLD_VERSION
The database cannot be upgraded by this version of the Berkeley DB software. +
+
DB_OLD_VERSION
The database cannot be upgraded by this version of the Berkeley DB software.
-

-

EINVAL
If the database is not in the same byte-order as the system; or if an +
+
EINVAL
If the database is not in the same byte-order as the system; or if an invalid flag value or parameter was specified.

@@ -104,6 +101,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/db_verify.html b/db/docs/api_cxx/db_verify.html index f0f74d572..ea4670df3 100644 --- a/db/docs/api_cxx/db_verify.html +++ b/db/docs/api_cxx/db_verify.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Db::verify - + -

Db::verify

API -Ref -
+Ref +


@@ -41,9 +40,8 @@ thread of control.

The Db handle may not be accessed again after Db::verify is called, regardless of its return.

- -

-The Db::verify method will return DB_VERIFY_BAD if a database is + +

The Db::verify method will return DB_VERIFY_BAD if a database is corrupted. When the DB_SALVAGE flag is specified, the DB_VERIFY_BAD return means that all key/data pairs in the file may not have been successfully output. @@ -53,22 +51,19 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

database
-The database parameter is the database in file on which +
+
database
The database parameter is the database in file on which the database checks for btree and duplicate sort order and for hashing are to be performed. See the DB_ORDERCHKONLY flag for more information.

The database parameter must be set to NULL except when the DB_ORDERCHKONLY flag is set.

-

file
-The file parameter is the physical file in which the databases +
file
The file parameter is the physical file in which the databases to be verified are found. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_SALVAGE
Write the key/data pairs from all databases in the file to the file +
+
DB_SALVAGE
Write the key/data pairs from all databases in the file to the file stream named in the outfile parameter. The output format is the same as that specified for the db_dump utility, and can be used as input for the db_load utility. @@ -78,8 +73,8 @@ pairs normally produces less than optimal loads for Btree databases.

In addition, the following flags may be set by bitwise inclusively OR'ing them into the flags parameter:

-

-

DB_AGGRESSIVE
Output all the key/data pairs in the file that can be found. +
+
DB_AGGRESSIVE
Output all the key/data pairs in the file that can be found. By default, Db::verify does not assume corruption. For example, if a key/data pair on a page is marked as deleted, it is not then written to the output file. When DB_AGGRESSIVE is specified, corruption @@ -88,14 +83,14 @@ case, key/data pairs that are corrupted or have been deleted may appear in the output (even if the file being salvaged is in no way corrupt), and the output will almost certainly require editing before being loaded into a database. -

DB_PRINTABLE
When using the DB_SALVAGE flag, if characters in either the key +
DB_PRINTABLE
When using the DB_SALVAGE flag, if characters in either the key or data items are printing characters (as defined by isprint(3)), use printing characters to represent them. This flag permits users to use standard text editors and tools to modify the contents of databases or selectively remove data from salvager output.

Note: different systems may have different notions about what characters are considered printing characters, and databases dumped in this manner may be less portable to external systems.

-

DB_NOORDERCHK
Skip the database checks for btree and duplicate sort order and for +
DB_NOORDERCHK
Skip the database checks for btree and duplicate sort order and for hashing.

The Db::verify method normally verifies that btree keys and duplicate items are correctly sorted, and hash keys are correctly hashed. If the @@ -108,7 +103,7 @@ first perform verification of the file as a whole by using the DB_NOORDERCHK flag, and then individually verify the sort order and hashing function for each database in the file using the DB_ORDERCHKONLY flag.

-

DB_ORDERCHKONLY
Perform the database checks for btree and duplicate sort order and for +
DB_ORDERCHKONLY
Perform the database checks for btree and duplicate sort order and for hashing, skipped by DB_NOORDERCHK.

When this flag is specified, a database parameter should also be specified, indicating the database in the physical file which is to be @@ -116,8 +111,7 @@ checked. This flag is only safe to use on databases that have already successfully been verified using Db::verify with the DB_NOORDERCHK flag set.

-

outfile
-The outfile parameter is an optional file stream to which the +
outfile
The outfile parameter is an optional file stream to which the databases' key/data pairs are written.

Environment Variables

@@ -128,16 +122,16 @@ database environment home.

the DbEnv::set_data_dir method, or by setting the "set_data_dir" string in the environment's DB_CONFIG file.

Errors

-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

The Db::verify method may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If Db::verify was called after Db::open; or if an +
+
EINVAL
If Db::verify was called after Db::open; or if an invalid flag value or parameter was specified.

@@ -149,6 +143,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_class.html b/db/docs/api_cxx/dbc_class.html index 6c581bc24..e9502b24b 100644 --- a/db/docs/api_cxx/dbc_class.html +++ b/db/docs/api_cxx/dbc_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Dbc - + -

Dbc

API -Ref -
+Ref +


@@ -39,6 +38,6 @@ handle may not be accessed again, regardless of the method's return.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_close.html b/db/docs/api_cxx/dbc_close.html index 97b660ba4..3136d03ea 100644 --- a/db/docs/api_cxx/dbc_close.html +++ b/db/docs/api_cxx/dbc_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Dbc::close - + -

Dbc::close

API -Ref -
+Ref +


@@ -47,8 +46,8 @@ may fail and throw
 DbException,
 encapsulating one of the following non-zero errors, or return one of
 the following non-zero errors:

-

-

EINVAL
If the cursor is already closed; or if an +
+
EINVAL
If the cursor is already closed; or if an invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -68,6 +67,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_count.html b/db/docs/api_cxx/dbc_count.html index 85eae93c4..3d081300e 100644 --- a/db/docs/api_cxx/dbc_count.html +++ b/db/docs/api_cxx/dbc_count.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Dbc::count - + -

Dbc::count

API -Ref -
+Ref +


@@ -36,12 +35,10 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

countp
-The countp parameter references memory into which +
+
countp
The countp parameter references memory into which the count of the number of duplicate data items is copied. -

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The Dbc::count method @@ -49,12 +46,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the cursor has not been initialized; or if an +
+
EINVAL
If the cursor has not been initialized; or if an invalid flag value or parameter was specified.

@@ -66,6 +63,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_del.html b/db/docs/api_cxx/dbc_del.html index 04d87effb..41d47c7eb 100644 --- a/db/docs/api_cxx/dbc_del.html +++ b/db/docs/api_cxx/dbc_del.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Dbc::del - + -

Dbc::del

API -Ref -
+Ref +


@@ -37,17 +36,15 @@ indices.

The cursor position is unchanged after a delete, and subsequent calls to cursor functions expecting the cursor to refer to an existing key will fail.

-

-The Dbc::del method will return DB_KEYEMPTY if the element has already been deleted. +

The Dbc::del method will return DB_KEYEMPTY if the element has already been deleted. Unless otherwise specified, the Dbc::del method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The Dbc::del method @@ -55,22 +52,22 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

EINVAL
If the cursor has not been initialized; or if an +
+
EINVAL
If the cursor has not been initialized; or if an invalid flag value or parameter was specified.
-

-

EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was +
+
EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was specified to DbEnv::open.

If a transactional database environment operation was selected to @@ -90,6 +87,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_dup.html b/db/docs/api_cxx/dbc_dup.html index 57584570c..3c1e656eb 100644 --- a/db/docs/api_cxx/dbc_dup.html +++ b/db/docs/api_cxx/dbc_dup.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Dbc::dup - + -

Dbc::dup

API -Ref -
+Ref +


@@ -38,16 +37,16 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_POSITION
The newly created cursor is initialized to refer to the same position -in the database as the original cursor and hold the same locks. If the -DB_POSITION flag is not specified, then the created cursor is -uninitialized and will behave like a cursor newly created using -Db::cursor. +
+
DB_POSITION
The newly created cursor is initialized to refer to the same position +in the database as the original cursor (if any) and hold the same locks +(if any). If the DB_POSITION flag is not specified, or the +original cursor does not hold a database position and locks, the created +cursor is uninitialized and will behave like a cursor newly created +using Db::cursor.

Errors

@@ -56,12 +55,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the cursor has not been initialized; or if an +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -73,6 +72,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_get.html b/db/docs/api_cxx/dbc_get.html index 857ddda42..05facbe84 100644 --- a/db/docs/api_cxx/dbc_get.html +++ b/db/docs/api_cxx/dbc_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Dbc::get - + -

Dbc::get

API -Ref -
+Ref +


@@ -31,23 +30,17 @@ Dbc::pget(Dbt *key, Dbt *pkey, Dbt *data, u_int32_t flags);
 


Description: Dbc::get

-

The Dbc::get method retrieves key/data pairs from the database. The -address and length of the key -are returned in the object to which key refers (except for the -case of the DB_SET flag, in which the key object is -unchanged), and the address -and length of the data are returned in the object to which data -refers.

+address and length of the key are returned in the object to which +key refers (except for the case of the DB_SET flag, in +which the key object is unchanged), and the address and length +of the data are returned in the object to which data refers.

When called on a cursor opened on a database that has been made into a secondary index using the Db::associate method, the Dbc::get and Dbc::pget methods return the key from the secondary index and the -data item from the primary database. In addition, the -Dbc::pget method +data item from the primary database. In addition, the Dbc::pget method returns the key from the primary database. In databases that are not -secondary indices, the -Dbc::pget method -will always fail.

+secondary indices, the Dbc::pget method will always fail.

Modifications to the database during a sequential scan will be reflected in the scan; that is, records inserted behind a cursor will not be returned while records inserted in front of a cursor will be returned.

@@ -62,76 +55,65 @@ failure, and returns 0 on success.

If Dbc::get fails for any reason, the state of the cursor will be unchanged.

Parameters

-

-

data
-The data Dbt operated on. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_CURRENT
Return the key/data pair to which the cursor refers. -

-The Dbc::get method will return DB_KEYEMPTY if DB_CURRENT is set and the cursor key/data pair was deleted. +

+
data
The data Dbt operated on. +
flags
The flags parameter must be set to one of the following values: +
+
DB_CURRENT
Return the key/data pair to which the cursor refers. +

The Dbc::get method will return DB_KEYEMPTY if DB_CURRENT is set and the cursor key/data pair was deleted.

-

DB_FIRST
The cursor is set to refer to the first key/data pair of the database, +
DB_FIRST
The cursor is set to refer to the first key/data pair of the database, and that pair is returned. If the first key has duplicate values, the first data item in the set of duplicates is returned.

If the database is a Queue or Recno database, Dbc::get using the DB_FIRST flag will ignore any keys that exist but were never explicitly created by the application, or were created and later deleted.

-

-The Dbc::get method will return DB_NOTFOUND if DB_FIRST is set and the database is empty. +

The Dbc::get method will return DB_NOTFOUND if DB_FIRST is set and the database is empty.

-

DB_GET_BOTH
The DB_GET_BOTH flag is identical to the DB_SET flag, +
DB_GET_BOTH
The DB_GET_BOTH flag is identical to the DB_SET flag, except that both the key and the data parameters must be matched by the key and data item in the database. -

When used with the -Dbc::pget method -version of this method on a secondary index handle, both the -secondary and primary keys must be matched by the secondary and primary -key item in the database. It is an error to use the DB_GET_BOTH -flag with the -Dbc::get -version of this method and a cursor that has been opened on a -secondary index handle.

-

DB_GET_BOTH_RANGE
The DB_GET_BOTH_RANGE flag is identical to the DB_GET_BOTH +

When used with the Dbc::pget method version of this method on a +secondary index handle, both the secondary and primary keys must be +matched by the secondary and primary key item in the database. It is +an error to use the DB_GET_BOTH flag with the Dbc::get +version of this method and a cursor that has been opened on a secondary +index handle.

+
DB_GET_BOTH_RANGE
The DB_GET_BOTH_RANGE flag is identical to the DB_GET_BOTH flag, except that, in the case of any database supporting sorted duplicate sets, the returned key/data pair is the smallest data item greater than or equal to the specified data item (as determined by the comparison function), permitting partial matches and range searches in duplicate data sets. -

DB_GET_RECNO
Return the record number associated with the cursor. The record number +
DB_GET_RECNO
Return the record number associated with the cursor. The record number will be returned in data, as described in Dbt. The key parameter is ignored.

For DB_GET_RECNO to be specified, the underlying database must be of type Btree, and it must have been created with the DB_RECNUM flag.

When called on a cursor opened on a database that has been made into a -secondary index, the -Dbc::get and Dbc::pget methods return -the record number of the primary database in data. In addition, -the -Dbc::pget method -returns the record number of the secondary index in pkey. If -either underlying database is not of type Btree or is not created with -the DB_RECNUM flag, the out-of-band record number of 0 is -returned.

-

DB_JOIN_ITEM
Do not use the data value found in all of the cursors as a lookup key for +secondary index, the Dbc::get and Dbc::pget methods return the +record number of the primary database in data. In addition, the +Dbc::pget method returns the record number of the secondary index in +pkey. If either underlying database is not of type Btree or is +not created with the DB_RECNUM flag, the out-of-band record +number of 0 is returned.

+
DB_JOIN_ITEM
Do not use the data value found in all of the cursors as a lookup key for the primary database, but simply return it in the key parameter instead. The data parameter is left unchanged.

For DB_JOIN_ITEM to be specified, the underlying cursor must have been returned from the Db::join method.

-

DB_LAST
The cursor is set to refer to the last key/data pair of the database, +
DB_LAST
The cursor is set to refer to the last key/data pair of the database, and that pair is returned. If the last key has duplicate values, the last data item in the set of duplicates is returned.

If the database is a Queue or Recno database, Dbc::get using the DB_LAST flag will ignore any keys that exist but were never explicitly created by the application, or were created and later deleted.

-

-The Dbc::get method will return DB_NOTFOUND if DB_LAST is set and the database is empty. +

The Dbc::get method will return DB_NOTFOUND if DB_LAST is set and the database is empty.

-

DB_NEXT
If the cursor is not yet initialized, DB_NEXT is identical to +
DB_NEXT
If the cursor is not yet initialized, DB_NEXT is identical to DB_FIRST. Otherwise, the cursor is moved to the next key/data pair of the database, and that pair is returned. In the presence of duplicate key values, the value of the key may not change. @@ -139,29 +121,26 @@ duplicate key values, the value of the key may not change. DB_NEXT flag will skip any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The Dbc::get method will return DB_NOTFOUND if DB_NEXT is set and the cursor is already on the last record +

The Dbc::get method will return DB_NOTFOUND if DB_NEXT is set and the cursor is already on the last record in the database.

-

DB_NEXT_DUP
If the next key/data pair of the database is a duplicate data record for +
DB_NEXT_DUP
If the next key/data pair of the database is a duplicate data record for the current key/data pair, the cursor is moved to the next key/data pair of the database, and that pair is returned. -

-The Dbc::get method will return DB_NOTFOUND if DB_NEXT_DUP is set and the next key/data pair of the +

The Dbc::get method will return DB_NOTFOUND if DB_NEXT_DUP is set and the next key/data pair of the database is not a duplicate data record for the current key/data pair.

-

DB_NEXT_NODUP
If the cursor is not yet initialized, DB_NEXT_NODUP is identical +
DB_NEXT_NODUP
If the cursor is not yet initialized, DB_NEXT_NODUP is identical to DB_FIRST. Otherwise, the cursor is moved to the next non-duplicate key of the database, and that key/data pair is returned.

If the database is a Queue or Recno database, Dbc::get using the DB_NEXT_NODUP flag will ignore any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The Dbc::get method will return DB_NOTFOUND if DB_NEXT_NODUP is set and no non-duplicate key/data pairs +

The Dbc::get method will return DB_NOTFOUND if DB_NEXT_NODUP is set and no non-duplicate key/data pairs occur after the cursor position in the database.

-

DB_PREV
If the cursor is not yet initialized, DB_PREV is identical to +
DB_PREV
If the cursor is not yet initialized, DB_PREV is identical to DB_LAST. Otherwise, the cursor is moved to the previous key/data pair of the database, and that pair is returned. In the presence of duplicate key values, the value of the key may not change. @@ -169,25 +148,22 @@ presence of duplicate key values, the value of the key may not change. DB_PREV flag will skip any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The Dbc::get method will return DB_NOTFOUND if DB_PREV is set and the cursor is already on the first record +

The Dbc::get method will return DB_NOTFOUND if DB_PREV is set and the cursor is already on the first record in the database.

-

DB_PREV_NODUP
If the cursor is not yet initialized, DB_PREV_NODUP is identical +
DB_PREV_NODUP
If the cursor is not yet initialized, DB_PREV_NODUP is identical to DB_LAST. Otherwise, the cursor is moved to the previous non-duplicate key of the database, and that key/data pair is returned.

If the database is a Queue or Recno database, Dbc::get using the DB_PREV_NODUP flag will ignore any keys that exist but were never explicitly created by the application, or those that were created and later deleted.

-

-The Dbc::get method will return DB_NOTFOUND if DB_PREV_NODUP is set and no non-duplicate key/data pairs +

The Dbc::get method will return DB_NOTFOUND if DB_PREV_NODUP is set and no non-duplicate key/data pairs occur before the cursor position in the database.

-

DB_SET
Move the cursor to the specified key/data pair of the database, and +
DB_SET
Move the cursor to the specified key/data pair of the database, and return the datum associated with the given key. -

-The Dbc::get method will return DB_NOTFOUND if DB_SET is set and +

The Dbc::get method will return DB_NOTFOUND if DB_SET is set and no matching keys are found. The Dbc::get method will return DB_KEYEMPTY if DB_SET is set and the database is a Queue or Recno database, and the specified key exists, but was never @@ -195,29 +171,28 @@ explicitly created by the application or was later deleted. In the presence of duplicate key values, Dbc::get will return the first data item for the given key.

-

DB_SET_RANGE
The DB_SET_RANGE flag is identical to the DB_SET flag, +
DB_SET_RANGE
The DB_SET_RANGE flag is identical to the DB_SET flag, except that in the case of the Btree access method, the key is returned as well as the data item and the returned key/data pair is the smallest key greater than or equal to the specified key (as determined by the Btree comparison function), permitting partial key matches and range searches. -

DB_SET_RECNO
Move the cursor to the specific numbered record of the database, and +
DB_SET_RECNO
Move the cursor to the specific numbered record of the database, and return the associated key/data pair. The data field of the -specified key -must be a pointer to a memory location from which a db_recno_t -may be read, as described in Dbt. This memory location will be -read to determine the record to be retrieved. +specified key must be a pointer to a memory location from which +a db_recno_t may be read, as described in Dbt. This +memory location will be read to determine the record to be retrieved.

For DB_SET_RECNO to be specified, the underlying database must be of type Btree, and it must have been created with the DB_RECNUM flag.

In addition, the following flags may be set by bitwise inclusively OR'ing them into the flags parameter: -

-

DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the +
+
DB_DIRTY_READ
Read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_MULTIPLE
Return multiple data items in the data parameter. +
DB_MULTIPLE
Return multiple data items in the data parameter.

In the case of Btree or Hash databases, duplicate data items for the current key, starting at the current cursor position, are entered into the buffer. Subsequent calls with both the DB_NEXT_DUP and @@ -242,7 +217,7 @@ least as large as the page size of the underlying database, aligned for unsigned integer access, and be a multiple of 1024 bytes in size. If the buffer size is insufficient, then upon return from the call the size field of the data parameter will have been set to an estimated -buffer size, and the error ENOMEM is returned. (The size is an estimate as the +buffer size, and the error DB_BUFFER_SMALL is returned. (The size is an estimate as the exact size needed may not be known until all entries are read. It is best to initially provide a relatively large buffer, but applications should be prepared to resize the buffer as necessary and repeatedly call @@ -256,10 +231,10 @@ DB_NEXT_NODUP, DB_SET, DB_SET_RANGE, and DB_SET_RECNO options. The DB_MULTIPLE flag may not be used when accessing databases made into secondary indices using the Db::associate method.

-

DB_MULTIPLE_KEY
Return multiple key and data pairs in the data parameter. +
DB_MULTIPLE_KEY
Return multiple key and data pairs in the data parameter.

Key and data pairs, starting at the current cursor position, are entered into the buffer. Subsequent calls with both the DB_NEXT and -DB_MULTIPLE flags specified will return additional key and data +DB_MULTIPLE_KEY flags specified will return additional key and data pairs or DB_NOTFOUND if there are no additional key and data items to return.

In the case of Btree or Hash databases, @@ -274,7 +249,7 @@ least as large as the page size of the underlying database, aligned for unsigned integer access, and be a multiple of 1024 bytes in size. If the buffer size is insufficient, then upon return from the call the size field of the data parameter will have been set to an estimated -buffer size, and the error ENOMEM is returned. (The size is an estimate as the +buffer size, and the error DB_BUFFER_SMALL is returned. (The size is an estimate as the exact size needed may not be known until all entries are read. It is best to initially provide a relatively large buffer, but applications should be prepared to resize the buffer as necessary and repeatedly call @@ -286,16 +261,14 @@ DB_NEXT_NODUP, DB_SET, DB_SET_RANGE, and DB_SET_RECNO options. The DB_MULTIPLE_KEY flag may not be used when accessing databases made into secondary indices using the Db::associate method.

-

DB_RMW
Acquire write locks instead of read locks when doing the retrieval. +
DB_RMW
Acquire write locks instead of read locks when doing the retrieval. Setting this flag can eliminate deadlock during a read-modify-write cycle by acquiring the write lock during the read part of the cycle so that another thread of control acquiring a read lock for the same item, in its own read-modify-write cycle, will not result in deadlock.
-

key
-The key Dbt operated on. -

pkey
-The secondary index key Dbt operated on. +
key
The key Dbt operated on. +
pkey
The secondary index key Dbt operated on.

Errors

The Dbc::get method @@ -303,19 +276,18 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

DB_SECONDARY_BAD
A secondary index references a nonexistent primary key. +
+
DB_SECONDARY_BAD
A secondary index references a nonexistent primary key.
-

-

EINVAL
If the DB_CURRENT or DB_NEXT_DUP flags were specified and +
+
EINVAL
If the DB_CURRENT or DB_NEXT_DUP flags were specified and the cursor has not been initialized; -the -Dbc::pget method -was called with a cursor that does not refer to a secondary index; or if an +the Dbc::pget method was called with a cursor that does not refer to a +secondary index; or if an invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -326,8 +298,8 @@ throw a DbDeadlockException excepti to grant a lock in the allowed time, the Dbc::get method will fail and either return DB_LOCK_NOTGRANTED or throw a DbLockNotGrantedException exception.

-

If the requested item could not be returned due to insufficient memory, the Dbc::get method will fail and -either return ENOMEM or +

If the requested item could not be returned due to undersized buffer, the Dbc::get method will fail and +either return DB_BUFFER_SMALL or throw a DbMemoryException exception.


Class

@@ -338,6 +310,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_list.html b/db/docs/api_cxx/dbc_list.html index 5668c167b..af9bd5cfc 100644 --- a/db/docs/api_cxx/dbc_list.html +++ b/db/docs/api_cxx/dbc_list.html @@ -1,12 +1,12 @@ - + Berkeley DB: Berkeley DB: Database Cursors and Related Methods - +

Berkeley DB: Database Cursors and Related Methods

@@ -22,6 +22,6 @@ Dbc::pgetRetrieve by cursor Dbc::putStore by cursor -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbc_put.html b/db/docs/api_cxx/dbc_put.html index ca9eb3729..c561f80c8 100644 --- a/db/docs/api_cxx/dbc_put.html +++ b/db/docs/api_cxx/dbc_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Dbc::put - + -

Dbc::put

API -Ref -
+Ref +


@@ -39,13 +38,11 @@ unchanged.  If Dbc::put succeeds and an item is inserted into the
 database, the cursor is always positioned to refer to the newly inserted
 item.

Parameters

-

-

data
-The data Dbt operated on. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_AFTER
In the case of the Btree and Hash access methods, insert the data +
+
data
The data Dbt operated on. +
flags
The flags parameter must be set to one of the following values: +
+
DB_AFTER
In the case of the Btree and Hash access methods, insert the data element as a duplicate element of the key to which the cursor refers. The new element appears immediately after the current cursor position. It is an error to specify DB_AFTER if the underlying Btree or @@ -60,11 +57,10 @@ in the structure to which the key parameter refers. The initial value of the key parameter is ignored. See Db::open for more information.

The DB_AFTER flag may not be specified to the Queue access method.

-

-The Dbc::put method will return DB_NOTFOUND if the current cursor record has already been deleted and the +

The Dbc::put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying access method is Hash.

-

DB_BEFORE
In the case of the Btree and Hash access methods, insert the data +
DB_BEFORE
In the case of the Btree and Hash access methods, insert the data element as a duplicate element of the key to which the cursor refers. The new element appears immediately before the current cursor position. It is an error to specify DB_BEFORE if the underlying Btree or @@ -79,17 +75,14 @@ returned in the structure to which the key parameter refers. The initial value of the key parameter is ignored. See Db::open for more information.

The DB_BEFORE flag may not be specified to the Queue access method.

-

-The Dbc::put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying +

The Dbc::put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying access method is Hash.

-

DB_CURRENT
Overwrite the data of the key/data pair to which the cursor refers with +
DB_CURRENT
Overwrite the data of the key/data pair to which the cursor refers with the specified data item. The key parameter is ignored. -

-The Dbc::put method will return DB_NOTFOUND if the current cursor record has already been deleted and the underlying -access method is Hash. +

The Dbc::put method will return DB_NOTFOUND if the current cursor record has already been deleted.

-

DB_KEYFIRST
In the case of the Btree and Hash access methods, insert the specified +
DB_KEYFIRST
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database.

If the underlying database supports duplicate data items, and if the key already exists in the database and a duplicate sort function has @@ -99,7 +92,7 @@ has been specified, the inserted data item is added as the first of the data items for that key.

The DB_KEYFIRST flag may not be specified to the Queue or Recno access methods.

-

DB_KEYLAST
In the case of the Btree and Hash access methods, insert the specified +
DB_KEYLAST
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database.

If the underlying database supports duplicate data items, and if the key already exists in the database and a duplicate sort function has @@ -109,7 +102,7 @@ function has been specified, the inserted data item is added as the last of the data items for that key.

The DB_KEYLAST flag may not be specified to the Queue or Recno access methods.

-

DB_NODUPDATA
In the case of the Btree and Hash access methods, insert the specified +
DB_NODUPDATA
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database, unless a key/data pair comparing equally to it already exists in the database. If a matching key/data pair already exists in the database, DB_KEYEXIST is returned. @@ -118,8 +111,7 @@ database has been configured to support sorted duplicate data items.

The DB_NODUPDATA flag may not be specified to the Queue or Recno access methods.

-

key
-The key Dbt operated on. +
key
The key Dbt operated on.

Errors

The Dbc::put method @@ -127,15 +119,15 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EACCES
An attempt was made to modify a read-only database. +
+
EACCES
An attempt was made to modify a read-only database.
-

-

DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election +
+
DB_REP_HANDLE_DEAD
The database handle has been invalidated because a replication election unrolled a committed transaction.
-

-

EINVAL
If the DB_AFTER, DB_BEFORE or DB_CURRENT flags +
+
EINVAL
If the DB_AFTER, DB_BEFORE or DB_CURRENT flags were specified and the cursor has not been initialized; the DB_AFTER or DB_BEFORE flags were specified and a duplicate sort function has been specified; @@ -149,8 +141,8 @@ large to fit; an attempt was made to add a record to a secondary index; or if an invalid flag value or parameter was specified.
-

-

EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was +
+
EPERM
Write attempted on read-only cursor when the DB_INIT_CDB flag was specified to DbEnv::open.

If a transactional database environment operation was selected to @@ -170,6 +162,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbt_bulk_class.html b/db/docs/api_cxx/dbt_bulk_class.html index 6bc40670c..29852baaf 100644 --- a/db/docs/api_cxx/dbt_bulk_class.html +++ b/db/docs/api_cxx/dbt_bulk_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: Bulk Retrieval API - + -

Bulk Retrieval API

API -Ref -
+Ref +


@@ -73,9 +72,8 @@ to any access method.

Db::get or Dbc::get that used the DB_MULTIPLE flag.

Parameters

-

-

dbt
-The dbt parameter is a data Dbt returned by the call to +
+
dbt
The dbt parameter is a data Dbt returned by the call to Db::get or Dbc::get that used the DB_MULTIPLE flag.
@@ -87,9 +85,8 @@ together yield the next data item in the original bulk retrieval buffer.

The DbMultipleDataIterator.next method returns false if no more data are available, and true otherwise.

Parameters

-

-

data
-The data parameter is a Dbt that will be filled in with +
+
data
The data parameter is a Dbt that will be filled in with a reference to a buffer, a size, and an offset that together yield the next data item in the original bulk retrieval buffer.
@@ -102,9 +99,8 @@ belonging to the Btree or Hash access methods.

Db::get or Dbc::get that used the DB_MULTIPLE_KEY flag.

Parameters

-

-

dbt
-The dbt parameter is a data Dbt returned by the call to +
+
dbt
The dbt parameter is a data Dbt returned by the call to Db::get or Dbc::get that used the DB_MULTIPLE_KEY flag.
@@ -119,13 +115,11 @@ available, and true otherwise.

The DbMultipleKeyDataIterator.next method returns false if no more data are available, and true otherwise.

Parameters

-

-

key
-The key parameter will be filled in with a reference to a buffer, +
+
key
The key parameter will be filled in with a reference to a buffer, a size, and an offset that yields the next key item in the original bulk retrieval buffer. -

data
-The data parameter will be filled in with a reference to a buffer, +
data
The data parameter will be filled in with a reference to a buffer, a size, and an offset that yields the next data item in the original bulk retrieval buffer.
@@ -138,9 +132,8 @@ Queue access methods.

Db::get or Dbc::get that used the DB_MULTIPLE_KEY flag.

Parameters

-

-

dbt
-The dbt parameter is a data Dbt returned by the call to +
+
dbt
The dbt parameter is a data Dbt returned by the call to Db::get or Dbc::get that used the DB_MULTIPLE_KEY flag.
@@ -153,13 +146,11 @@ and data item in the original bulk retrieval buffer.

The DbMultipleRecnoDataIterator.next method returns false if no more data are available, and true otherwise.

Parameters

-

-

key
-The key parameter will be filled in with a reference to a +
+
key
The key parameter will be filled in with a reference to a buffer, a size, and an offset that yields the next key item in the original bulk retrieval buffer. -

data
-The data parameter will be filled in with a reference to a +
data
The data parameter will be filled in with a reference to a buffer, a size, and an offset that yields the next data item in the original bulk retrieval buffer.
@@ -167,6 +158,6 @@ original bulk retrieval buffer.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbt_class.html b/db/docs/api_cxx/dbt_class.html index 3628a3fd7..93e117e9c 100644 --- a/db/docs/api_cxx/dbt_class.html +++ b/db/docs/api_cxx/dbt_class.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: Dbt - + -

Dbt

API -Ref -
+Ref +


@@ -60,7 +59,7 @@ public:
 

Description: Dbt

This information describes the specific details of the Dbt class, used to encode keys and data items in a database.

- +

Key/Data Pairs

Storage and retrieval for the Db access methods are based on key/data pairs. Both key and data items are represented by Dbt @@ -87,8 +86,7 @@ refers will be allocated and managed by Berkeley DB.

multiple threads simultaneously access the same Dbt object using Db API calls, the results are undefined, and may result in a crash. One easy way to avoid problems is to use Dbt objects -that are -constructed as stack variables.

+that are constructed as stack variables.

Each Dbt object has an associated DBT struct, which is used by the underlying implementation of Berkeley DB and its C-language API. The Dbt::get_DBT method returns a pointer to this struct. Given a const @@ -105,12 +103,10 @@ and C++ language software. It should not be necessary to use these calls in a purely C++ application.


Description: Dbt::set_data

-

Set the data array. -

+

Set the data array.

Parameters

-

-

data
-The data parameter is an array of bytes to be used to set the +
+
data
The data parameter is an array of bytes to be used to set the content for the Dbt.

@@ -124,9 +120,8 @@ Dbt::set_recno_key_data method is called, the data, size and offset fields in the Dbt are implicitly set to hold a byte array representation of the integer key.

Parameters

-

-

recno
-The recno parameter logical record number used to initialize the +
+
recno
The recno parameter logical record number used to initialize the data array.

@@ -138,12 +133,10 @@ logical record number.

Set the byte offset into the data array.

The number of bytes offset into the data array determine the portion of the array actually used. This element is accessed using -Dbt::get_offset and Dbt::set_offset. -

+Dbt::get_offset and Dbt::set_offset.

Parameters

-

-

offset
-The offset parameter is the byte offset into the data array. +
+
offset
The offset parameter is the byte offset into the data array.

Description: Dbt::get_offset

@@ -152,9 +145,8 @@ The offset parameter is the byte offset into the data array.

Description: Dbt::set_size

Set the byte size of the data array.

Parameters

-

-

size
-The size parameter is the size of the data array in bytes. +
+
size
The size parameter is the size of the data array in bytes.

Description: Dbt::get_size

@@ -166,9 +158,8 @@ The size parameter is the size of the data array in bytes. the ulen to 0 and checking the return value found in size. See the DB_DBT_USERMEM flag for more information.

Parameters

-

-

ulen
-The ulen parameter the size of the data array in bytes. +
+
ulen
The ulen parameter the size of the data array in bytes.

Description: Dbt::get_ulen

@@ -179,9 +170,8 @@ The ulen parameter the size of the data array in bytes. application, in bytes. See the DB_DBT_PARTIAL flag for more information.

Parameters

-

-

dlen
-The dlen parameter is the length of the partial record in bytes. +
+
dlen
The dlen parameter is the length of the partial record in bytes.

Description: Dbt::get_dlen

@@ -192,9 +182,8 @@ The dlen parameter is the length of the partial record in bytes. application, in bytes. See the DB_DBT_PARTIAL flag for more information.

Parameters

-

-

doff
-The doff parameter is the offset of the partial record. +
+
doff
The doff parameter is the offset of the partial record.

Description: Dbt::get_doff

@@ -203,40 +192,35 @@ The doff parameter is the offset of the partial record.

Description: Dbt::set_object

Set the object flag value.

Parameters

-

-

flags
-The flags parameter is Dbt flag value. +
+
flags
The flags parameter is Dbt flag value. The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_DBT_MALLOC
-When this flag is set, Berkeley DB will allocate memory for the returned key -or data item -(using malloc(3) or the user-specified malloc method), and -return a pointer to it in the data field of the key or data -Dbt object. Because any allocated memory becomes the +
+
DB_DBT_MALLOC
When this flag is set, Berkeley DB will allocate memory for the returned key +or data item (using malloc(3) or the user-specified malloc +method), and return a pointer to it in the data field of the key +or data Dbt object. Because any allocated memory becomes the responsibility of the calling application, the caller must determine whether memory was allocated using the returned value of the data field.

It is an error to specify more than one of DB_DBT_MALLOC, DB_DBT_REALLOC, and DB_DBT_USERMEM.

-

DB_DBT_REALLOC
-When this flag is set Berkeley DB -will allocate memory for the returned key or data item (using -realloc(3) or the user-specified realloc method), and return -a pointer to it in the data field of the key or data Dbt -object. Because any allocated memory becomes the responsibility of the -calling application, the caller must determine whether memory was -allocated using the returned value of the data field. +
DB_DBT_REALLOC
When this flag is set Berkeley DB will allocate memory for the returned key +or data item (using realloc(3) or the user-specified realloc +method), and return a pointer to it in the data field of the key +or data Dbt object. Because any allocated memory becomes the +responsibility of the calling application, the caller must determine +whether memory was allocated using the returned value of the +data field.

It is an error to specify more than one of DB_DBT_MALLOC, DB_DBT_REALLOC, and DB_DBT_USERMEM.

-

DB_DBT_USERMEM
-The data field of the key or data object must refer to memory +
DB_DBT_USERMEM
The data field of the key or data object must refer to memory that is at least ulen bytes in length. If the length of the requested item is less than or equal to that number of bytes, the item is copied into the memory referred to by the data field. Otherwise, the size field is set to the length needed for the -requested item, and the error ENOMEM is returned. +requested item, and the error DB_BUFFER_SMALL is returned.

It is an error to specify more than one of DB_DBT_MALLOC, DB_DBT_REALLOC, and DB_DBT_USERMEM.

@@ -250,10 +234,10 @@ you are retrieving, you might decrease the memory burden and speed your application by allocating your own byte array and using DB_DBT_USERMEM. Even if you don't know the maximum size, you can use this option and reallocate your array whenever your retrieval API call -returns an ENOMEM error or throws an exception encapsulating an ENOMEM.

-

-

DB_DBT_PARTIAL
-Do partial retrieval or storage of an item. If the calling application +returns an DB_BUFFER_SMALL error or throws an exception +encapsulating an DB_BUFFER_SMALL.

+
+
DB_DBT_PARTIAL
Do partial retrieval or storage of an item. If the calling application is doing a get, the dlen bytes starting doff bytes from the beginning of the retrieved data record are returned as if they comprised the entire record. If any or all of the specified bytes do @@ -292,6 +276,6 @@ bytes would be those specified by the put call.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/dbt_package.html b/db/docs/api_cxx/dbt_package.html new file mode 100644 index 000000000..07ca6bc02 --- /dev/null +++ b/db/docs/api_cxx/dbt_package.html @@ -0,0 +1,22 @@ + + + + + + +Berkeley DB: DBT and Bulk Get Operations + + + + +

DBT and Bulk Get Operations

+ + + + + + +
DBT and Bulk Get OperationsDescription
DbtKey/data pairs
DbMultipleDataIteratorNext bulk get retrieval
DbMultipleKeyDataIteratorNext bulk get retrieval
DbMultipleRecnoDataIteratorNext bulk get retrieval
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/deadlock_class.html b/db/docs/api_cxx/deadlock_class.html index cde0e7228..0a161f018 100644 --- a/db/docs/api_cxx/deadlock_class.html +++ b/db/docs/api_cxx/deadlock_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbDeadlockException - + -

DbDeadlockException

API -Ref -
+Ref +


@@ -34,10 +33,11 @@ for a lock are deadlocked, when a lock request has timed out, or when a
 lock request would need to block and the transaction has been configured
 to not wait for locks.  One of the threads' transactions is selected for
 termination, and a DbDeadlockException is thrown to that thread.

+

The DbException errno value is set to DB_LOCK_DEADLOCK.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_class.html b/db/docs/api_cxx/env_class.html index 2b3a9a5ae..992b673c0 100644 --- a/db/docs/api_cxx/env_class.html +++ b/db/docs/api_cxx/env_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv - + -

DbEnv

API -Ref -
+Ref +


@@ -54,15 +53,15 @@ regardless of the method's return.

allocates memory internally; calling the DbEnv::close or DbEnv::remove methods will free that memory.

The following flags value may be specified:

-

-

DB_CXX_NO_EXCEPTIONS
The Berkeley DB C++ API supports two different error behaviors. By default, +
+
DB_CXX_NO_EXCEPTIONS
The Berkeley DB C++ API supports two different error behaviors. By default, whenever an error occurs, an exception is thrown that encapsulates the error information. This generally allows for cleaner logic for transaction processing because a try block can surround a single transaction. However, if DB_CXX_NO_EXCEPTIONS is specified, exceptions are not thrown; instead, each individual function returns an error code. -

DB_RPCCLIENT
Create a client environment to connect to a server. +
DB_RPCCLIENT
Create a client environment to connect to a server.

The DB_RPCCLIENT flag indicates to the system that this environment is remote on a server. The use of this flag causes the environment methods to use functions that call a server instead of local functions. @@ -94,6 +93,6 @@ DbEnv

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_close.html b/db/docs/api_cxx/env_close.html index 58a0ff45b..ddd22d9fa 100644 --- a/db/docs/api_cxx/env_close.html +++ b/db/docs/api_cxx/env_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::close - + -

DbEnv::close

API -Ref -
+Ref +


@@ -35,11 +34,11 @@ handles must not be closed while database handles remain open, or
 transactions in the environment have not yet been committed or aborted.
 Specifically, this includes Db, Dbc, DbTxn,
 DbLogc and DbMpoolFile handles.

-

Where the environment was initialized with the DB_INIT_LOCK flag, -calling DbEnv::close does not release any locks still held by the -closing process, providing functionality for long-lived locks. -Processes that want to have all their locks -released can do so by issuing the appropriate DbEnv::lock_vec call.

+

Where the environment was initialized with the DB_INIT_LOCK +flag, calling DbEnv::close does not release any locks still held +by the closing process, providing functionality for long-lived locks. +Processes that want to have all their locks released can do so by +issuing the appropriate DbEnv::lock_vec call.

Where the environment was initialized with the DB_INIT_MPOOL flag, calling DbEnv::close implies calls to DbMpoolFile::close for any remaining open files in the memory pool that were returned to this @@ -67,9 +66,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Class

@@ -80,6 +78,6 @@ The flags parameter is currently unused, and must be set to 0.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_dbremove.html b/db/docs/api_cxx/env_dbremove.html index 48ea68f03..2d4608445 100644 --- a/db/docs/api_cxx/env_dbremove.html +++ b/db/docs/api_cxx/env_dbremove.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::dbremove - + -

DbEnv::dbremove

API -Ref -
+Ref +


@@ -30,37 +29,35 @@ DbEnv::dbremove(DbTxn *txnid,
 

Description: DbEnv::dbremove

The DbEnv::dbremove method removes the database specified by the -file and database parameters. If no database is -specified, the underlying file represented by file is removed, -incidentally removing all of the databases it contained.

+file and database parameters. If no database +is specified, the underlying file represented by file is +removed, incidentally removing all of the databases it contained.

Applications should never remove databases with open Db handles, or in the case of removing a file, when any database in the file has an open handle. For example, some architectures do not permit the removal of files with open system handles. On these architectures, attempts to remove databases currently in use by any thread of control in the system -will fail.

+may fail.

The DbEnv::dbremove method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the +
+
database
The database parameter is the database to be removed. +
file
The file parameter is the physical file which contains the database(s) to be removed. -

flags
-The flags parameter must be set to 0 or +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the DbEnv::dbremove call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DbEnv::dbremove call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

txnid
-If the operation is to be transaction-protected, +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL.
@@ -77,16 +74,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to remove the underlying file and a database in the -file was currently open. -
-

-

EINVAL
If DbEnv::dbremove called before DbEnv::open was called; or if an +
+
EINVAL
If DbEnv::dbremove called before DbEnv::open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

If a transactional database environment operation was selected to resolve a deadlock, the DbEnv::dbremove method will fail and @@ -105,6 +98,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_dbrename.html b/db/docs/api_cxx/env_dbrename.html index ce2c2d019..c8d13a351 100644 --- a/db/docs/api_cxx/env_dbrename.html +++ b/db/docs/api_cxx/env_dbrename.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::dbrename - + -

DbEnv::dbrename

API -Ref -
+Ref +


@@ -40,31 +39,28 @@ the database environment, no database in the file may be open when the
 DbEnv::dbrename method is called.  In particular, some architectures do
 not permit renaming files with open handles.  On these architectures,
 attempts to rename databases that are currently in use by any thread of
-control in the system will fail.

+control in the system may fail.

The DbEnv::dbrename method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

database
-The database parameter is the database to be removed. -

file
-The file parameter is the physical file which contains the -database(s) to be removed. -

flags
-The flags parameter must be set to 0 or +
+
database
The database parameter is the database to be renamed. +
file
The file parameter is the physical file which contains the +database(s) to be renamed. +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_AUTO_COMMIT
Enclose the DbEnv::dbrename call within a transaction. If the call succeeds, +
+
DB_AUTO_COMMIT
Enclose the DbEnv::dbrename call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes.
-

newname
-The newname parameter is the new name of the database or file. -

txnid
-If the operation is to be transaction-protected, +
newname
The newname parameter is the new name of the database or file. +
txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from DbEnv::txn_begin; otherwise, NULL.
@@ -81,16 +77,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_FILEOPEN
An attempt was made to rename the underlying file and a database in the -file was currently open. -
-

-

EINVAL
If DbEnv::dbrename called before DbEnv::open was called; or if an +
+
EINVAL
If DbEnv::dbrename called before DbEnv::open was called; or if an invalid flag value or parameter was specified.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

If a transactional database environment operation was selected to resolve a deadlock, the DbEnv::dbrename method will fail and @@ -109,6 +101,6 @@ throw a DbLockNotGrantedException exc

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_err.html b/db/docs/api_cxx/env_err.html index 076c4ffc6..e066b2130 100644 --- a/db/docs/api_cxx/env_err.html +++ b/db/docs/api_cxx/env_err.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::err - + -

DbEnv::err

API -Ref -
+Ref +


@@ -29,22 +28,21 @@ DbEnv::errx(const char *fmt, ...);
 


Description: DbEnv::err

-

The DbEnv::err, DbEnv::errx, Db::err and Db::errx methods provide error-messaging functionality for applications written using the Berkeley DB library.

The DbEnv::err method constructs an error message consisting of the following elements:

-

-

An optional prefix string
If no error callback function has been set using the +
+
An optional prefix string
If no error callback function has been set using the DbEnv::set_errcall method, any prefix string specified using the DbEnv::set_errpfx method, followed by two separating characters: a colon and a <space> character. -

An optional printf-style message
The supplied message fmt, if non-NULL, in which the +
An optional printf-style message
The supplied message fmt, if non-NULL, in which the ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent parameters are converted for output. -

A separator
Two separating characters: a colon and a <space> character. -

A standard error string
The standard system or Berkeley DB library error string associated with the +
A separator
Two separating characters: a colon and a <space> character. +
A standard error string
The standard system or Berkeley DB library error string associated with the error value, as returned by the DbEnv::strerror method.
@@ -60,20 +58,24 @@ stream.

(see DbEnv::set_error_stream and Db::set_error_stream), the error message is written to that stream.

If none of these output options has been configured, the error message -is written to stderr, the standard -error output stream.

+is written to stderr, the standard error output stream.

+

Parameters

+
+
error
The error parameter is the error value for which the +DbEnv::err and Db::err methods will display a explanatory +string. +
fmt
The fmt parameter is an optional printf-style message to display. +

The DbEnv::errx and Db::errx methods perform identically to the DbEnv::err and Db::err methods, except that they do not append the final separator characters and standard error string to the error message.

Parameters

-

-

error
-The error parameter is the error value for which the +
+
error
The error parameter is the error value for which the DbEnv::err and Db::err methods will display a explanatory string. -

fmt
-The fmt parameter is an optional printf-style message to display. +
fmt
The fmt parameter is an optional printf-style message to display.

Class

@@ -84,6 +86,6 @@ The fmt parameter is an optional printf-style message to display.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_list.html b/db/docs/api_cxx/env_list.html index be0b3286b..d038d0a5e 100644 --- a/db/docs/api_cxx/env_list.html +++ b/db/docs/api_cxx/env_list.html @@ -1,82 +1,50 @@ - + Berkeley DB: Berkeley DB: Database Environments and Related Methods - +

Berkeley DB: Database Environments and Related Methods

- + - + + - - - - - - - - - - - - - - - - + + - - - - - + + + + - + - - - + + + - - - - - - - - - - - - - - - - - -
Database Environments and Related MethodsDescription
Database Environment OperationsDescription
DbEnvCreate an environment handle
DbEnv::closeClose an environment
DbEnv::dbremoveRemove a database
DbEnv::dbrenameRename a database
DbEnv::errError message with error string
DbEnv::errxError message
DbEnv::lock_detectPerform deadlock detection
DbEnv::lock_getAcquire a lock
DbEnv::lock_idAcquire a locker ID
DbEnv::lock_id_freeRelease a locker ID
DbEnv::lock_putRelease a lock
DbEnv::lock_statReturn lock subsystem statistics
DbEnv::lock_vecAcquire/release locks
DbEnv::log_archiveList log and database files
DbEnv::log_fileMap Log Sequence Numbers to log files
DbEnv::log_flushFlush log records
DbEnv::log_putWrite a log record
DbEnv::log_statReturn log subsystem statistics
DbEnv::memp_registerRegister input/output functions for a file in a memory pool
DbEnv::memp_statReturn memory pool statistics
DbEnv::memp_syncFlush pages from a memory pool
DbEnv::memp_trickleTrickle flush pages from a memory pool
DbEnv::get_homeReturn environment's home directory
DbEnv::get_open_flagsReturn the flags with which the environment was opened
DbEnv::openOpen an environment
DbEnv::removeRemove an environment
DbEnv::rep_electHold a replication election
DbEnv::rep_process_messageProcess a replication message
DbEnv::rep_startConfigure an environment for replication
DbEnv::rep_statReplication statistics
DbEnv::set_allocSet local space allocation functions
DbEnv::stat_printEnvironment statistics
DbEnv::strerrorError strings
DbEnv::versionReturn version information
Environment Configuration
DbEnv::set_app_dispatchConfigure application recovery
DbEnv::set_cachesizeSet the environment cache size
DbEnv::set_allocSet local space allocation functions
DbEnv::set_data_dirSet the environment data directory
DbEnv::set_encryptSet the environment cryptographic key
DbEnv::set_errcallSet error message callback
DbEnv::set_errfileSet error message FILE
DbEnv::set_error_streamSet error message output stream
DbEnv::set_errcall, DbEnv::set_msgcallSet error and informational message callbacks
DbEnv::set_errfile, DbEnv::set_msgfileSet error and informational message FILE
DbEnv::set_error_stream, DbEnv::set_message_streamSet error and informational message output stream
DbEnv::set_errpfxSet error message prefix
DbEnv::set_feedbackSet feedback callback
DbEnv::set_flagsEnvironment configuration
DbEnv::set_lg_bsizeSet log buffer size
DbEnv::set_lg_dirSet the environment logging directory
DbEnv::set_lg_maxSet log file size
DbEnv::set_lg_regionmaxSet logging region size
DbEnv::set_lk_conflictsSet lock conflicts matrix
DbEnv::set_lk_detectSet automatic deadlock detection
DbEnv::set_lk_max_lockersSet maximum number of lockers
DbEnv::set_lk_max_locksSet maximum number of locks
DbEnv::set_lk_max_objectsSet maximum number of lock objects
DbEnv::set_mp_mmapsizeSet maximum mapped-in database file size
DbEnv::set_paniccallSet panic callback
DbEnv::set_rep_limitLimit data sent in response to a single message
DbEnv::set_rep_transportConfigure replication transport
DbEnv::set_rpc_serverEstablish an RPC server connection
DbEnv::set_shm_keySet system memory shared segment ID
DbEnv::set_tas_spinsSet the number of test-and-set spins
DbEnv::set_timeoutSet lock and transaction timeout
DbEnv::set_tmp_dirSet the environment temporary file directory
DbEnv::set_tx_maxSet maximum number of transactions
DbEnv::set_tx_timestampSet recovery timestamp
DbEnv::set_verboseSet verbose messages
DbEnv::txn_beginBegin a transaction
DbEnv::txn_checkpointCheckpoint the transaction subsystem
DbEnv::txn_recoverDistributed transaction recovery
DbEnv::txn_statReturn transaction subsystem statistics
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_open.html b/db/docs/api_cxx/env_open.html index e0b2f2f0c..3c0615682 100644 --- a/db/docs/api_cxx/env_open.html +++ b/db/docs/api_cxx/env_open.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::open - + -

DbEnv::open

API -Ref -
+Ref +


@@ -34,8 +33,6 @@ DbEnv::get_open_flags(u_int32_t *flagsp);
 


Description: DbEnv::open

- -

The DbEnv::open method opens a Berkeley DB environment. It provides a structure for creating a consistent environment for processes using one or more of the features of Berkeley DB.

@@ -47,16 +44,16 @@ If DbEnv::open fails, the DbEnv::close m to discard the DbEnv handle.

Parameters

-

-

db_home
-The db_home parameter is the database environment's home +
+
db_home
The db_home parameter is the database environment's home directory. For more information on db_home, and filename resolution in general, see Berkeley DB File Naming. The environment variable DB_HOME may be used as the path of the database home, as described in Berkeley DB File Naming. -

flags
-The flags parameter specifies the subsystems that are initialized +

On Windows, the db_home argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter specifies the subsystems that are initialized and how the application's environment affects Berkeley DB file naming, among other things. The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one @@ -64,44 +61,44 @@ or more of the following values:

Because there are a large number of flags that can be specified, they have been grouped together by functionality. The first group of flags indicates which of the Berkeley DB subsystems should be initialized:

-

-

DB_JOINENV
Join an existing environment. This option allows applications to +
+
DB_JOINENV
Join an existing environment. This option allows applications to join an existing environment without knowing which Berkeley DB subsystems the environment supports. -

DB_INIT_CDB
Initialize locking for the Berkeley DB Concurrent Data Store +
DB_INIT_CDB
Initialize locking for the Berkeley DB Concurrent Data Store product. In this mode, Berkeley DB provides multiple reader/single writer access. The only other subsystem that should be specified with the DB_INIT_CDB flag is DB_INIT_MPOOL. -

DB_INIT_LOCK
Initialize the locking subsystem. This subsystem should be used when +
DB_INIT_LOCK
Initialize the locking subsystem. This subsystem should be used when multiple processes or threads are going to be reading and writing a Berkeley DB database, so that they do not interfere with each other. If all threads are accessing the database(s) read-only, locking is unnecessary. When the DB_INIT_LOCK flag is specified, it is usually necessary to run a deadlock detector, as well. See db_deadlock and DbEnv::lock_detect for more information. -

DB_INIT_LOG
Initialize the logging subsystem. This subsystem should be used when +
DB_INIT_LOG
Initialize the logging subsystem. This subsystem should be used when recovery from application or system failure is necessary. If the log region is being created and log files are already present, the log files are reviewed; subsequent log writes are appended to the end of the log, rather than overwriting current log entries. -

DB_INIT_MPOOL
Initialize the shared memory buffer pool subsystem. This subsystem +
DB_INIT_MPOOL
Initialize the shared memory buffer pool subsystem. This subsystem should be used whenever an application is using any Berkeley DB access method. -

DB_INIT_REP
Initialize the replication subsystem. This subsystem +
DB_INIT_REP
Initialize the replication subsystem. This subsystem should be used whenever an application plans on using replication. The DB_INIT_REP flag requires the DB_INIT_TXN and DB_INIT_LOCK flags also be configured. -

DB_INIT_TXN
Initialize the transaction subsystem. This subsystem should be used +
DB_INIT_TXN
Initialize the transaction subsystem. This subsystem should be used when recovery and atomicity of multiple operations are important. The DB_INIT_TXN flag implies the DB_INIT_LOG flag.

The second group of flags govern what recovery, if any, is performed when the environment is initialized:

-

-

DB_RECOVER
Run normal recovery on this environment before opening it for normal +
+
DB_RECOVER
Run normal recovery on this environment before opening it for normal use. If this flag is set, the DB_CREATE flag must also be set because the regions will be removed and re-created. -

DB_RECOVER_FATAL
Run catastrophic recovery on this environment before opening it for +
DB_RECOVER_FATAL
Run catastrophic recovery on this environment before opening it for normal use. If this flag is set, the DB_CREATE flag must also be set because the regions will be removed and re-created.
@@ -130,14 +127,14 @@ is necessary to ensure that all necessary log files are present before running recovery. For further information, consult db_archive and db_recover.

The third group of flags govern file-naming extensions in the environment:

-

- -

DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information +
+ +
DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, environment information will be used in file naming for all users only if the DB_USE_ENVIRON flag is set. -

DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information +
DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, if the DB_USE_ENVIRON_ROOT flag is set, environment information will @@ -145,11 +142,11 @@ be used for file naming only for users with appropriate permissions (for example, users with a user-ID of 0 on UNIX systems).

Finally, there are a few additional unrelated flags:

-

-

DB_CREATE
Cause Berkeley DB subsystems to create any underlying files, as necessary. -

DB_LOCKDOWN
Lock shared Berkeley DB environment files and memory-mapped databases into +
+
DB_CREATE
Cause Berkeley DB subsystems to create any underlying files, as necessary. +
DB_LOCKDOWN
Lock shared Berkeley DB environment files and memory-mapped databases into memory. -

DB_PRIVATE
Specify that the environment will only be accessed by a single process +
DB_PRIVATE
Specify that the environment will only be accessed by a single process (although that process may be multithreaded). This flag has two effects on the Berkeley DB environment. First, all underlying data structures are allocated from per-process memory instead of from shared memory that is @@ -161,44 +158,54 @@ corruption and unpredictable behavior. For example, if both a server application and the Berkeley DB utility db_stat are expected to access the environment, the DB_PRIVATE flag should not be specified.

-

DB_SYSTEM_MEM
Allocate memory from system shared memory instead of from memory backed +
DB_SYSTEM_MEM
Allocate memory from system shared memory instead of from memory backed by the filesystem. See Shared Memory Regions for more information. -

DB_THREAD
Cause the DbEnv handle returned by DbEnv::open to be -free-threaded; that is, usable by multiple threads within a -single address space. +
DB_THREAD
Cause the DbEnv handle returned by DbEnv::open to be +free-threaded; that is, concurrently usable by multiple +threads in the address space. The DB_THREAD flag should be specified +if the DbEnv handle will be concurrently used by multiple +threads of control or if multiple DB handles, opened within the database +environment, will be used concurrently.
-

mode
-On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by -Berkeley DB are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation -(see umask(2)). If mode is 0, Berkeley DB will use a default -mode of readable and writable by both owner and group. On Windows -systems, the mode parameter is ignored. The group ownership of created -files is based on the system and directory defaults, and is not further -specified by Berkeley DB. +
mode
On Windows systems, the mode parameter is ignored. +

On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files created by Berkeley DB +are created with mode mode (as described in chmod(2)) +and modified by the process' umask value at the time of creation (see +umask(2)). Created files are owned by the process owner; the +group ownership of created files is based on the system and directory +defaults, and is not further specified by Berkeley DB. System shared memory +segments created by Berkeley DB are created with mode mode, unmodified +by the process' umask value. If mode is 0, Berkeley DB will use a +default mode of readable and writable by both owner and group.

+

Errors

The DbEnv::open method may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EAGAIN
The shared memory region was locked and (repeatedly) unavailable. +
+
DB_VERSION_MISMATCH
The version of the Berkeley DB library doesn't match the version that created +the database environment. +
+
+
EAGAIN
The shared memory region was locked and (repeatedly) unavailable.
-

-

EINVAL
If the DB_THREAD flag was specified and fast mutexes are not +
+
EINVAL
If the DB_THREAD flag was specified and fast mutexes are not available for this architecture; The DB_HOME or TMPDIR environment variables were set, but empty; An incorrectly formatted NAME VALUE entry or line was found; or if an invalid flag value or parameter was specified.
-

-

ENOSPC
HP-UX only: a previously created Berkeley DB environment for this process still +
+
ENOSPC
HP-UX only: a previously created Berkeley DB environment for this process still exists.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Description: DbEnv::get_home

@@ -216,9 +223,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flagsp
-The DbEnv::get_open_flags method returns the +
+
flagsp
The DbEnv::get_open_flags method returns the open method flags in flagsp.

@@ -230,6 +236,6 @@ open method flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_remove.html b/db/docs/api_cxx/env_remove.html index d5c2b7862..fbc9df337 100644 --- a/db/docs/api_cxx/env_remove.html +++ b/db/docs/api_cxx/env_remove.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::remove - + -

DbEnv::remove

API -Ref -
+Ref +


@@ -73,25 +72,25 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

db_home
-The db_home parameter names the database environment to be removed. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
db_home
The db_home parameter names the database environment to be removed. +

On Windows, the db_home argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_FORCE
If the DB_FORCE flag is set, the environment is removed, regardless +
+
DB_FORCE
If the DB_FORCE flag is set, the environment is removed, regardless of any processes that may still using it, and no locks are acquired during this process. (Generally, the DB_FORCE flag is specified only when applications were unable to shut down cleanly, and there is a risk that an application may have died holding a Berkeley DB lock.) - -

DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information + +
DB_USE_ENVIRON
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, environment information will be used in file naming for all users only if the DB_USE_ENVIRON flag is set. -

DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information +
DB_USE_ENVIRON_ROOT
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, if the DB_USE_ENVIRON_ROOT flag is set, environment information will @@ -105,11 +104,11 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EBUSY
The shared memory region was in use and the force flag was not set. +
+
EBUSY
The shared memory region was in use and the force flag was not set.
-

-

ENOENT
The file or directory does not exist. +
+
ENOENT
The file or directory does not exist.

Class

@@ -120,6 +119,6 @@ the following non-zero errors:


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_alloc.html b/db/docs/api_cxx/env_set_alloc.html index 2965d7f84..c59c88f2d 100644 --- a/db/docs/api_cxx/env_set_alloc.html +++ b/db/docs/api_cxx/env_set_alloc.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_alloc - + -

DbEnv::set_alloc

API -Ref -
+Ref +


@@ -74,15 +73,12 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

app_malloc
-The app_malloc parameter is the application-specified malloc +
+
app_malloc
The app_malloc parameter is the application-specified malloc function. -

app_realloc
-The app_realloc parameter is the application-specified realloc +
app_realloc
The app_realloc parameter is the application-specified realloc function. -

app_free
-The app_free parameter is the application-specified free function. +
app_free
The app_free parameter is the application-specified free function.

Errors

The DbEnv::set_alloc method @@ -90,8 +86,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -103,6 +99,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_app_dispatch.html b/db/docs/api_cxx/env_set_app_dispatch.html index ba8b9ec11..a35fac352 100644 --- a/db/docs/api_cxx/env_set_app_dispatch.html +++ b/db/docs/api_cxx/env_set_app_dispatch.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_app_dispatch - + -

DbEnv::set_app_dispatch

API -Ref -
+Ref +


@@ -46,26 +45,25 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

tx_recover
-The tx_recover parameter is the application's abort and recovery +
+
tx_recover
The tx_recover parameter is the application's abort and recovery function. The function takes four parameters: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

log_rec
The log_rec parameter is a log record. -

lsn
The lsn parameter is a log sequence number. -

op
The op parameter is one of the following values: -

-

DB_TXN_BACKWARD_ROLL
The log is being read backward to determine which transactions have been +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
log_rec
The log_rec parameter is a log record. +
lsn
The lsn parameter is a log sequence number. +
op
The op parameter is one of the following values: +
+
DB_TXN_BACKWARD_ROLL
The log is being read backward to determine which transactions have been committed and to abort those operations that were not; undo the operation described by the log record. -

DB_TXN_FORWARD_ROLL
The log is being played forward; redo the operation described by the log +
DB_TXN_FORWARD_ROLL
The log is being played forward; redo the operation described by the log record. -

DB_TXN_ABORT
The log is being read backward during a transaction abort; undo the +
DB_TXN_ABORT
The log is being read backward during a transaction abort; undo the operation described by the log record. -

DB_TXN_APPLY
The log is being applied on a replica site; redo the operation +
DB_TXN_APPLY
The log is being applied on a replica site; redo the operation described by the log record. -

DB_TXN_PRINT
The log is being printed for debugging purposes; print the contents of +
DB_TXN_PRINT
The log is being printed for debugging purposes; print the contents of this log record in the desired format.

The DB_TXN_FORWARD_ROLL and DB_TXN_APPLY operations @@ -89,8 +87,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -102,6 +100,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_cachesize.html b/db/docs/api_cxx/env_set_cachesize.html index 2f7e9d684..386f22bc5 100644 --- a/db/docs/api_cxx/env_set_cachesize.html +++ b/db/docs/api_cxx/env_set_cachesize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_cachesize - + -

DbEnv::set_cachesize

API -Ref -
+Ref +


@@ -31,8 +30,6 @@ DbEnv::get_cachesize(u_int32_t *gbytesp, u_int32_t *bytesp, int *ncachep);
 


Description: DbEnv::set_cachesize

- -

Set the size of the shared memory buffer pool -- that is, the cache. The cache should be the size of the normal working data set of the application, with some small amount of additional memory for unusual @@ -42,7 +39,7 @@ pages accessed simultaneously, and is usually much larger.)

20KB. Any cache size less than 500MB is automatically increased by 25% to account for buffer pool overhead; cache sizes larger than 500MB are used as specified. The current maximum size of a single cache is 4GB. -(All sizes are in powers-of-two, that is, 256KB is 2^32 not 256,000.) +(All sizes are in powers-of-two, that is, 256KB is 2^18 not 256,000.) For information on tuning the Berkeley DB cache size, see Selecting a cache size.

It is possible to specify caches to Berkeley DB larger than 4GB and/or large @@ -76,13 +73,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

gbytes
-The size of the cache is set to gbytes gigabytes plus bytes. -

ncache
-The ncache parameter is the number of caches to create. +
+
bytes
The size of the cache is set to gbytes gigabytes plus bytes. +
gbytes
The size of the cache is set to gbytes gigabytes plus bytes. +
ncache
The ncache parameter is the number of caches to create.

Errors

The DbEnv::set_cachesize method @@ -90,8 +84,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the specified cache size was impossibly small; +
+
EINVAL
If the specified cache size was impossibly small; the method was called after DbEnv::open was called; or if an @@ -109,15 +103,12 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the cache is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the cache is copied. -

ncachep
-The ncachep parameter references memory into which +
ncachep
The ncachep parameter references memory into which the number of caches is copied.

@@ -129,6 +120,6 @@ The ncachep parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_data_dir.html b/db/docs/api_cxx/env_set_data_dir.html index 63dd8106c..8a28661cb 100644 --- a/db/docs/api_cxx/env_set_data_dir.html +++ b/db/docs/api_cxx/env_set_data_dir.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_data_dir - + -

DbEnv::set_data_dir

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_data_dirs(const char ***dirpp);
 


Description: DbEnv::set_data_dir

-

Set the path of a directory to be used as the location of the access method database files. Paths specified to the Db::open function will be searched relative to this path. Paths set using this method @@ -39,11 +37,10 @@ are additive, and specifying more than one will result in each specified directory being searched for database files. If any directories are specified, created database files will always be created in the first path specified.

-

If no database directories are specified, database files can exist only -in the environment home directory. See Berkeley DB File Naming for more information.

-

For the greatest degree of recoverability from system or application -failure, database files and log files should be located on separate -physical devices.

+

If no database directories are specified, database files must be named +either by absolute paths or relative to the environment home directory. +See Berkeley DB File Naming for more +information.

The database environment's data directories may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_data_dir", one or more whitespace characters, @@ -64,10 +61,11 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

dir
-The dir parameter is a directory to be used as a location for +
+
dir
The dir parameter is a directory to be used as a location for database files. +

On Windows, the dir argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The DbEnv::set_data_dir method @@ -75,14 +73,13 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

Description: DbEnv::get_data_dirs

-

The DbEnv::get_data_dirs method returns the NULL-terminated -array of directories.

+

The DbEnv::get_data_dirs method returns the NULL-terminated array of directories.

The DbEnv::get_data_dirs method may be called at any time during the life of the application.

The DbEnv::get_data_dirs method @@ -91,11 +88,9 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

dirpp
-The DbEnv::get_data_dirs method returns a reference to the -NULL-terminated -array of directories in dirpp. +
+
dirpp
The DbEnv::get_data_dirs method returns a reference to the +NULL-terminated array of directories in dirpp.

Class

@@ -106,6 +101,6 @@ array of directories in dirpp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_encrypt.html b/db/docs/api_cxx/env_set_encrypt.html index 0e9e80d84..0c084a154 100644 --- a/db/docs/api_cxx/env_set_encrypt.html +++ b/db/docs/api_cxx/env_set_encrypt.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_encrypt - + -

DbEnv::set_encrypt

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_encrypt_flags(u_int32_t *flagsp);
 


Description: DbEnv::set_encrypt

-

Set the password used by the Berkeley DB library to perform encryption and decryption.

The DbEnv::set_encrypt method configures a database environment, not only operations @@ -49,17 +47,15 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard +
+
DB_ENCRYPT_AES
Use the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm for encryption or decryption.
-

passwd
-The passwd parameter is the password used to perform encryption +
passwd
The passwd parameter is the password used to perform encryption and decryption.

Errors

@@ -68,14 +64,14 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.
-

-

EOPNOTSUPP
Cryptography is not available in this Berkeley DB release. +
+
EOPNOTSUPP
Cryptography is not available in this Berkeley DB release.

Description: DbEnv::get_encrypt_flags

@@ -88,9 +84,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flagsp
-The DbEnv::get_encrypt_flags method returns the +
+
flagsp
The DbEnv::get_encrypt_flags method returns the encryption flags in flagsp.

@@ -102,6 +97,6 @@ encryption flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_errcall.html b/db/docs/api_cxx/env_set_errcall.html index 7046d0b36..926483206 100644 --- a/db/docs/api_cxx/env_set_errcall.html +++ b/db/docs/api_cxx/env_set_errcall.html @@ -1,30 +1,29 @@ - - + + Berkeley DB: DbEnv::set_errcall - + -

DbEnv::set_errcall

API -Ref -
+Ref +


 #include <db_cxx.h>
 

-void DbEnv::set_errcall( - void (*db_errcall_fcn)(const char *errpfx, char *msg)); +void DbEnv::set_errcall(void (*db_errcall_fcn) + (const Dbenv *dbenv, const char *errpfx, const char *msg));


Description: DbEnv::set_errcall

@@ -39,6 +38,7 @@ In some cases, when an error occurs, Berkeley DB will call db_errcall_fcn with additional error information. It is up to the db_errcall_fcn function to display the error message in an appropriate manner.

+

Setting db_errcall_fcn to NULL unconfigures the callback interface.

Alternatively, you can use the DbEnv::set_error_stream and Db::set_error_stream methods to display the additional information via an output stream, or the Db::set_errfile or @@ -50,14 +50,14 @@ as during application debugging.

The DbEnv::set_errcall method may be called at any time during the life of the application.

Parameters

-

-

db_errcall_fcn
-The db_errcall_fcn parameter is the application-specified error -reporting function. The function takes two parameters: -

-

errpfx
The errpfx parameter is the prefix string (as previously set by +
+
db_errcall_fcn
The db_errcall_fcn parameter is the application-specified error +reporting function. The function takes three parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
errpfx
The errpfx parameter is the prefix string (as previously set by Db::set_errpfx or DbEnv::set_errpfx). -

msg
The msg parameter is the error message string. +
msg
The msg parameter is the error message string.

@@ -69,6 +69,6 @@ reporting function. The function takes two parameters:

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_errfile.html b/db/docs/api_cxx/env_set_errfile.html index 1b589c4cd..e11c656f5 100644 --- a/db/docs/api_cxx/env_set_errfile.html +++ b/db/docs/api_cxx/env_set_errfile.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_errfile - + -

DbEnv::set_errfile

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_errfile(FILE **errfilep);
 


Description: DbEnv::set_errfile

- When an error occurs in the Berkeley DB library, an exception is thrown or an error return value is returned by the interface. In some cases, however, the errno value may be insufficient to completely @@ -51,33 +49,22 @@ should not mix these approaches.

(":") (if a prefix string was previously specified using Db::set_errpfx or DbEnv::set_errpfx), an error string, and a trailing <newline> character.

+

Setting errfile to NULL unconfigures the interface.

This error logging enhancement does not slow performance or significantly increase application size, and may be run during normal operation as well as during application debugging.

The DbEnv::set_errfile method may be called at any time during the life of the application.

Parameters

-

-

errfile
-The errfile parameter is a C library FILE * to be used for +
+
errfile
The errfile parameter is a C library FILE * to be used for displaying additional Berkeley DB error information.

Description: DbEnv::get_errfile

-

The DbEnv::get_errfile method returns the FILE *.

+

The DbEnv::get_errfile method returns the .

The DbEnv::get_errfile method may be called at any time during the life of the application.

-

The DbEnv::get_errfile method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

-

Parameters

-

-

errfilep
-The DbEnv::get_errfile method returns the -FILE * in errfilep. -

Class

DbEnv @@ -87,6 +74,6 @@ FILE * in errfilep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_error_stream.html b/db/docs/api_cxx/env_set_error_stream.html index ad04a7237..82d5f9d5e 100644 --- a/db/docs/api_cxx/env_set_error_stream.html +++ b/db/docs/api_cxx/env_set_error_stream.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_error_stream - + -

DbEnv::set_error_stream

API -Ref -
+Ref +


@@ -41,18 +40,19 @@ output an additional error message to the specified stream.

(":") (if a prefix string was previously specified using DbEnv::set_errpfx), an error string, and a trailing <newline> character.

-

Alternatively, you can use the DbEnv::set_errfile method to display -the additional information via a C library FILE *, or the -DbEnv::set_errcall method to capture the additional error information in -a way that does not use either output streams or C library FILE *'s. You -should not mix these approaches.

+

Setting stream to NULL unconfigures the interface.

+

Alternatively, you can use the DbEnv::set_errfile and +Db::set_errfile methods to display the additional information via a C +library FILE *, or the DbEnv::set_errcall and +Db::set_errcall methods to capture the additional error information +in a way that does not use either output streams or C library FILE *'s. +You should not mix these approaches.

This error-logging enhancement does not slow performance or significantly increase application size, and may be run during normal operation as well as during application debugging.

Parameters

-

-

stream
-The stream parameter is the application-specified output stream to +
+
stream
The stream parameter is the application-specified output stream to be used for additional error information.

@@ -64,6 +64,6 @@ be used for additional error information.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_errpfx.html b/db/docs/api_cxx/env_set_errpfx.html index 8a093c921..2ac87cae1 100644 --- a/db/docs/api_cxx/env_set_errpfx.html +++ b/db/docs/api_cxx/env_set_errpfx.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_errpfx - + -

DbEnv::set_errpfx

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_errpfx(const char **errpfxp);
 


Description: DbEnv::set_errpfx

-

Set the prefix string that appears before error messages issued by Berkeley DB.

The Db::set_errpfx and DbEnv::set_errpfx methods do not copy the memory to which the errpfx parameter refers; rather, they @@ -42,9 +40,8 @@ closed.

The DbEnv::set_errpfx method may be called at any time during the life of the application.

Parameters

-

-

errpfx
-The errpfx parameter is the application-specified error prefix +
+
errpfx
The errpfx parameter is the application-specified error prefix for additional error messages.

@@ -58,9 +55,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

errpfxp
-The DbEnv::get_errpfx method returns a reference to the +
+
errpfxp
The DbEnv::get_errpfx method returns a reference to the error prefix in errpfxp.

@@ -72,6 +68,6 @@ error prefix in errpfxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_feedback.html b/db/docs/api_cxx/env_set_feedback.html index 4ca4abd94..d573ccb44 100644 --- a/db/docs/api_cxx/env_set_feedback.html +++ b/db/docs/api_cxx/env_set_feedback.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_feedback - + -

DbEnv::set_feedback

API -Ref -
+Ref +


@@ -46,20 +45,19 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

db_feedback_fcn
-The db_feedback_fcn parameter is the application-specified +
+
db_feedback_fcn
The db_feedback_fcn parameter is the application-specified feedback function called to report Berkeley DB operation progress. The callback function must take three parameters: -

-

dbenv
The dbenv parameter is a reference to the enclosing database +
+
dbenv
The dbenv parameter is a reference to the enclosing database environment. -

opcode
The opcode parameter is an operation code. The opcode +
opcode
The opcode parameter is an operation code. The opcode parameter may take on any of the following values: -

-

DB_RECOVER
The environment is being recovered. +
+
DB_RECOVER
The environment is being recovered.
-

percent
The percent parameter is the percent of the operation that has +
percent
The percent parameter is the percent of the operation that has been completed, specified as an integer value between 0 and 100.
@@ -72,6 +70,6 @@ been completed, specified as an integer value between 0 and 100.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_flags.html b/db/docs/api_cxx/env_set_flags.html index ea8b18b13..498128d4d 100644 --- a/db/docs/api_cxx/env_set_flags.html +++ b/db/docs/api_cxx/env_set_flags.html @@ -1,23 +1,22 @@ - + Berkeley DB: DbEnv::set_flags - + -

DbEnv::set_flags

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_flags(u_int32_t *flagsp)
 


Description: DbEnv::set_flags

-

Configure a database environment.

The database environment's flag values may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a @@ -47,12 +45,11 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set by bitwise inclusively OR'ing together one or more +
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_AUTO_COMMIT
If set, operations for which no explicit transaction handle was +
+
DB_AUTO_COMMIT
If set, operations for which no explicit transaction handle was specified, and which modify databases in the database environment, will be automatically enclosed within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, @@ -66,8 +63,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_AUTO_COMMIT flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_CDB_ALLDB
If set, Berkeley DB Concurrent Data Store applications will perform locking on an environment-wide + +
DB_CDB_ALLDB
If set, Berkeley DB Concurrent Data Store applications will perform locking on an environment-wide basis rather than on a per-database basis.

Calling DbEnv::set_flags with the DB_CDB_ALLDB flag only affects the specified DbEnv handle (and any other Berkeley DB handles opened @@ -78,8 +75,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_CDB_ALLDB flag may be used to configure Berkeley DB only before the DbEnv::open method is called.

- -

DB_DIRECT_DB
Turn off system buffering of Berkeley DB database files to avoid double caching. + +
DB_DIRECT_DB
Turn off system buffering of Berkeley DB database files to avoid double caching.

Calling DbEnv::set_flags with the DB_DIRECT_DB flag only affects the specified DbEnv handle (and any other Berkeley DB handles opened within the scope of that handle). @@ -89,8 +86,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_DIRECT_DB flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_DIRECT_LOG
Turn off system buffering of Berkeley DB log files to avoid double caching. + +
DB_DIRECT_LOG
Turn off system buffering of Berkeley DB log files to avoid double caching.

Calling DbEnv::set_flags with the DB_DIRECT_LOG flag only affects the specified DbEnv handle (and any other Berkeley DB handles opened within the scope of that handle). @@ -100,21 +97,57 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_DIRECT_LOG flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_LOG_AUTOREMOVE
If set, Berkeley DB will automatically remove log files that are no longer -needed. Automatic log file removal is likely to make catastrophic -recovery impossible. -

Calling DbEnv::set_flags with the DB_LOG_AUTOREMOVE flag only affects + +

DB_DSYNC_LOG
Configure Berkeley DB to flush log writes to the backing disk before returning +from the write system call, rather than flushing log writes explicitly +in a separate system call. This is only available on some systems (for +example, systems supporting the IEEE/ANSI Std 1003.1 (POSIX) standard O_DSYNC flag, +or systems supporting the Win32 FILE_FLAG_WRITE_THROUGH flag). This +configuration may result in inaccurate file modification times and other +file-level information for Berkeley DB log files. This configuration may +offer a performance increase on some systems and a performance decrease +on others. +

Calling DbEnv::set_flags with the DB_DSYNC_LOG flag only affects the specified DbEnv handle (and any other Berkeley DB handles opened within the scope of that handle). For consistent behavior across the environment, all DbEnv -handles opened in the environment must either set the DB_LOG_AUTOREMOVE flag +handles opened in the environment must either set the DB_DSYNC_LOG flag or the flag should be specified in the DB_CONFIG configuration file.

+

The DB_DSYNC_LOG flag may be used to configure Berkeley DB at any time during +the life of the application.

+ +
DB_LOG_AUTOREMOVE
If set, Berkeley DB will automatically remove log files that are no longer +needed. Automatic log file removal is likely to make catastrophic +recovery impossible. +

Calling DbEnv::set_flags with the DB_LOG_AUTOREMOVE flag affects the +database environment, including all threads of control accessing the +database environment.

The DB_LOG_AUTOREMOVE flag may be used to configure Berkeley DB at any time during the life of the application.

+ +
DB_LOG_INMEMORY
If set, maintain transaction logs in memory rather than on disk. This +means that transactions exhibit the ACI (atomicity, consistency, and +isolation) properties, but not D (durability); that is, database +integrity will be maintained, but if the application or system fails, +integrity will not persist. All database files must be verified and/or +restored from a replication group master or archival backup after +application or system failure. +

When in-memory logs are configured and no more log buffer space is +available, Berkeley DB methods may return an additional error value, +DB_LOG_BUFFER_FULL. When choosing log buffer and file sizes +for in-memory logs, applications should ensure the in-memory log buffer +size is large enough that no transaction will ever span the entire +buffer, and avoid a state where the in-memory buffer is full and no +space can be freed because a transaction that started in the first log +"file" is still active.

+

Calling DbEnv::set_flags with the DB_LOG_INMEMORY flag affects the +database environment, including all threads of control accessing the +database environment.

+

The DB_LOG_INMEMORY flag may be used to configure Berkeley DB only before the +DbEnv::open method is called.

-

DB_NOLOCKING
If set, Berkeley DB will grant all requested mutual exclusion mutexes and +
DB_NOLOCKING
If set, Berkeley DB will grant all requested mutual exclusion mutexes and database locks without regard for their actual availability. This functionality should never be used for purposes other than debugging.

Calling DbEnv::set_flags with the DB_NOLOCKING flag only affects @@ -124,7 +157,7 @@ within the scope of that handle).

The DB_NOLOCKING flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_NOMMAP
If set, Berkeley DB will copy read-only database files into the local cache +
DB_NOMMAP
If set, Berkeley DB will copy read-only database files into the local cache instead of potentially mapping them into process memory (see the description of the DbEnv::set_mp_mmapsize method for further information). @@ -138,7 +171,7 @@ file.

The DB_NOMMAP flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_NOPANIC
If set, Berkeley DB will ignore any panic state in the database environment. +
DB_NOPANIC
If set, Berkeley DB will ignore any panic state in the database environment. (Database environments in a panic state normally refuse all attempts to call Berkeley DB functions, returning DB_RUNRECOVERY.) This functionality should never be used for purposes other than debugging. @@ -148,7 +181,7 @@ within the scope of that handle).

The DB_NOPANIC flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_OVERWRITE
Overwrite files stored in encrypted formats before deleting them. Berkeley DB +
DB_OVERWRITE
Overwrite files stored in encrypted formats before deleting them. Berkeley DB overwrites files using alternating 0xff, 0x00 and 0xff byte patterns. For file overwriting to be effective, the underlying file must be stored on a fixed-block filesystem. Systems with journaling or logging filesystems @@ -161,7 +194,7 @@ within the scope of that handle).

The DB_OVERWRITE flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_PANIC_ENVIRONMENT
If set, Berkeley DB will set the panic state for the database environment. +
DB_PANIC_ENVIRONMENT
If set, Berkeley DB will set the panic state for the database environment. (Database environments in a panic state normally refuse all attempts to call Berkeley DB functions, returning DB_RUNRECOVERY.) This flag may not be specified using the environment's DB_CONFIG file. This @@ -173,7 +206,7 @@ database environment.

The DB_PANIC_ENVIRONMENT flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_REGION_INIT
In some applications, the expense of page-faulting the underlying shared +
DB_REGION_INIT
In some applications, the expense of page-faulting the underlying shared memory regions can affect performance. (For example, if the page-fault occurs while holding a lock, other lock requests can convoy, and overall throughput may decrease.) If set, Berkeley DB will page-fault shared regions @@ -191,7 +224,7 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_REGION_INIT flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_TIME_NOTGRANTED
If set, database calls timing out based on lock or transaction timeout +
DB_TIME_NOTGRANTED
If set, database calls timing out based on lock or transaction timeout values will throw a DbLockNotGrantedException exception instead of DbDeadlockException. @@ -207,7 +240,7 @@ file.

The DB_TIME_NOTGRANTED flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_TXN_NOSYNC
If set, Berkeley DB will not write or synchronously flush the log on transaction +
DB_TXN_NOSYNC
If set, Berkeley DB will not write or synchronously flush the log on transaction commit. This means that transactions exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database @@ -226,26 +259,7 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_TXN_NOSYNC flag may be used to configure Berkeley DB at any time during the life of the application.

-

DB_TXN_NOT_DURABLE
If set, Berkeley DB will not write log records. This means that -transactions exhibit the ACI (atomicity, consistency, and isolation) -properties, but not D (durability); that is, database integrity will -be maintained, but if the application or system fails, integrity will -not persist. All database files must be verified and/or restored from -backup after a failure. In order to ensure integrity after -application shut down, all database handles must be closed without -specifying DB_NOSYNC, or all database changes must be flushed -from the database environment cache using -either the DbEnv::txn_checkpoint or DbEnv::memp_sync methods. -

Calling DbEnv::set_flags with the DB_TXN_NOT_DURABLE flag only affects -the specified DbEnv handle (and any other Berkeley DB handles opened -within the scope of that handle). -For consistent behavior across the environment, all DbEnv -handles opened in the environment must either set the DB_TXN_NOT_DURABLE flag -or the flag should be specified in the DB_CONFIG configuration -file.

The DB_TXN_NOT_DURABLE flag may be used to configure Berkeley DB at any time during -the life of the application.

- -

DB_TXN_WRITE_NOSYNC
If set, Berkeley DB will write, but will not synchronously flush, the log on +
DB_TXN_WRITE_NOSYNC
If set, Berkeley DB will write, but will not synchronously flush, the log on transaction commit. This means that transactions exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database @@ -263,8 +277,8 @@ or the flag should be specified in the DB_CONFIG configuration file.

The DB_TXN_WRITE_NOSYNC flag may be used to configure Berkeley DB at any time during the life of the application.

- -

DB_YIELDCPU
If set, Berkeley DB will yield the processor immediately after each page or + +
DB_YIELDCPU
If set, Berkeley DB will yield the processor immediately after each page or mutex acquisition. This functionality should never be used for purposes other than stress testing.

Calling DbEnv::set_flags with the DB_YIELDCPU flag only affects @@ -277,10 +291,8 @@ file.

The DB_YIELDCPU flag may be used to configure Berkeley DB at any time during the life of the application.

-

onoff
-If the onoff parameter is -zero, -the specified flags are cleared; otherwise they are set. +
onoff
If the onoff parameter is zero, the specified flags are cleared; +otherwise they are set.

Errors

The DbEnv::set_flags method @@ -288,8 +300,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -303,9 +315,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flagsp
-The DbEnv::get_flags method returns the +
+
flagsp
The DbEnv::get_flags method returns the configuration flags in flagsp.

@@ -317,6 +328,6 @@ configuration flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lg_bsize.html b/db/docs/api_cxx/env_set_lg_bsize.html index 018af1026..0dc470af4 100644 --- a/db/docs/api_cxx/env_set_lg_bsize.html +++ b/db/docs/api_cxx/env_set_lg_bsize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lg_bsize - + -

DbEnv::set_lg_bsize

API -Ref -
+Ref +


@@ -31,16 +30,25 @@ DbEnv::get_lg_bsize(u_int32_t *lg_bsizep);
 


Description: DbEnv::set_lg_bsize

- -

Set the size of the in-memory log buffer, in bytes. By default, or if -the value is set to 0, a size of 32K is used. The size of the log file -(see DbEnv::set_lg_max) must be at least four times the size of -the in-memory log buffer.

-

Log information is stored in-memory until the storage space fills up -or transaction commit forces the information to be flushed to stable -storage. In the presence of long-running transactions or transactions -producing large amounts of data, larger buffer sizes can increase -throughput.

+

Set the size of the in-memory log buffer, in bytes.

+

When the logging subsystem is configured for on-disk logging, the +default size of the in-memory log buffer is 32KB. Log information is +stored in-memory until the storage space fills up or transaction commit +forces the information to be flushed to stable storage. In the presence +of long-running transactions or transactions producing large amounts of +data, larger buffer sizes can increase throughput.

+

When the logging subsystem is configured for in-memory logging, the +default size of the in-memory log buffer is 1MB. Log information is +stored in-memory until the storage space fills up or transaction abort +or commit frees up the memory for new transactions. In the presence of +long-running transactions or transactions producing large amounts of +data, the buffer size must be sufficient to hold all log information +that can accumulate during the longest running transaction. When +choosing log buffer and file sizes for in-memory logs, applications +should ensure the in-memory log buffer size is large enough that no +transaction will ever span the entire buffer, and avoid a state where +the in-memory buffer is full and no space can be freed because a +transaction that started in the first log "file" is still active.

The database environment's log buffer size may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_lg_bsize", one or more whitespace characters, @@ -62,9 +70,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lg_bsize
-The lg_bsize parameter is the size of the in-memory log buffer, +
+
lg_bsize
The lg_bsize parameter is the size of the in-memory log buffer, in bytes.

Errors

@@ -73,10 +80,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; -The size of the log file is less than four times the size of the in-memory -log buffer; or if an +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -90,9 +95,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lg_bsizep
-The DbEnv::get_lg_bsize method returns the +
+
lg_bsizep
The DbEnv::get_lg_bsize method returns the size of the log buffer, in bytes in lg_bsizep.

@@ -104,6 +108,6 @@ size of the log buffer, in bytes in lg_bsizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lg_dir.html b/db/docs/api_cxx/env_set_lg_dir.html index a46d9568f..001e76405 100644 --- a/db/docs/api_cxx/env_set_lg_dir.html +++ b/db/docs/api_cxx/env_set_lg_dir.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lg_dir - + -

DbEnv::set_lg_dir

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_lg_dir(const char **dirp);
 


Description: DbEnv::set_lg_dir

-

The path of a directory to be used as the location of logging files. Log files created by the Log Manager subsystem will be created in this directory.

@@ -62,9 +60,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

dir
-The dir parameter is the directory used to store the logging files. +
+
dir
The dir parameter is the directory used to store the logging files. +

On Windows, the dir argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The DbEnv::set_lg_dir method @@ -72,8 +71,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -87,9 +86,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

dirp
-The DbEnv::get_lg_dir method returns a reference to the +
+
dirp
The DbEnv::get_lg_dir method returns a reference to the log directory in dirp.

@@ -101,6 +99,6 @@ log directory in dirp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lg_max.html b/db/docs/api_cxx/env_set_lg_max.html index d692a1edd..8e050a7cf 100644 --- a/db/docs/api_cxx/env_set_lg_max.html +++ b/db/docs/api_cxx/env_set_lg_max.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lg_max - + -

DbEnv::set_lg_max

API -Ref -
+Ref +


@@ -31,15 +30,25 @@ DbEnv::get_lg_max(u_int32_t *);
 


Description: DbEnv::set_lg_max

- -

Set the maximum size of a single file in the log, in bytes. By default, -or if the lg_max parameter is set to 0, a size of 10MB is used. -Because DbLsn file offsets are unsigned four-byte values, the -set value may not be larger than the maximum unsigned four-byte value. -The size of the log file must be at least four times the size of the -in-memory log buffer (see DbEnv::set_lg_bsize).

-

See Log File Limits -for more information.

+

Set the maximum size of a single file in the log, in bytes. Because +DbLsn file offsets are unsigned four-byte values, the set +value may not be larger than the maximum unsigned four-byte value.

+

When the logging subsystem is configured for on-disk logging, the +default size of a log file is 10MB.

+

When the logging subsystem is configured for in-memory logging, the +default size of a log file is 256KB. In addition, the configured log +buffer size must be larger than the log file size. (The logging +subsystem divides memory configured for in-memory log records into +"files", as database environments configured for in-memory log records +may exchange log records with other members of a replication group, and +those members may be configured to store log records on-disk.) When +choosing log buffer and file sizes for in-memory logs, applications +should ensure the in-memory log buffer size is large enough that no +transaction will ever span the entire buffer, and avoid a state where +the in-memory buffer is full and no space can be freed because a +transaction that started in the first log "file" is still active.

+

See Log File Limits for more +information.

The database environment's log file size may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_lg_max", one or more whitespace characters, @@ -60,9 +69,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lg_max
-The lg_max parameter is the size of a single log file, in bytes. +
+
lg_max
The lg_max parameter is the size of a single log file, in bytes.

Errors

The DbEnv::set_lg_max method @@ -70,8 +78,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; +
+
EINVAL
If the method was called after DbEnv::open was called; the size of the log file is less than four times the size of the in-memory log buffer; The specified log file size was too large; or if an @@ -88,9 +96,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lg_maxp
-The DbEnv::get_lg_max method returns the +
+
lg_maxp
The DbEnv::get_lg_max method returns the maximum log file size in lg_maxp.

@@ -102,6 +109,6 @@ maximum log file size in lg_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lg_regionmax.html b/db/docs/api_cxx/env_set_lg_regionmax.html index 2d06a5580..5a0f36417 100644 --- a/db/docs/api_cxx/env_set_lg_regionmax.html +++ b/db/docs/api_cxx/env_set_lg_regionmax.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lg_regionmax - + -

DbEnv::set_lg_regionmax

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_lg_regionmax(u_int32_t *lg_regionmaxp);
 


Description: DbEnv::set_lg_regionmax

-

Set the size of the underlying logging area of the Berkeley DB environment, in bytes. By default, or if the value is set to 0, the default size is 60KB. The log region is used to store filenames, and so may need to be @@ -58,9 +56,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lg_regionmax
-The lg_regionmax parameter is the size of the logging area in +
+
lg_regionmax
The lg_regionmax parameter is the size of the logging area in the Berkeley DB environment, in bytes.

Errors

@@ -69,8 +66,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -84,9 +81,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lg_regionmaxp
-The DbEnv::get_lg_regionmax method returns the +
+
lg_regionmaxp
The DbEnv::get_lg_regionmax method returns the size of the underlying logging subsystem region in lg_regionmaxp.

@@ -98,6 +94,6 @@ size of the underlying logging subsystem region in lg_regionmaxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lk_conflicts.html b/db/docs/api_cxx/env_set_lk_conflicts.html index 0ab10bee3..e3f8fd6e4 100644 --- a/db/docs/api_cxx/env_set_lk_conflicts.html +++ b/db/docs/api_cxx/env_set_lk_conflicts.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lk_conflicts - + -

DbEnv::set_lk_conflicts

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_lk_conflicts(const u_int8_t **lk_conflictsp, int *lk_modesp);
 


Description: DbEnv::set_lk_conflicts

-

Set the locking conflicts matrix.

If DbEnv::set_lk_conflicts is never called, a standard conflicts array is used; see Standard Lock @@ -50,17 +48,14 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

conflicts
-The conflicts parameter is the new locking conflicts matrix. -The conflicts parameter -is an nmodes by nmodes array. -A non-0 value for the array element indicates that requested_mode and -held_mode conflict: +
+
conflicts
The conflicts parameter is the new locking conflicts matrix. +The conflicts parameter is an nmodes by nmodes +array. A non-0 value for the array element indicates that +requested_mode and held_mode conflict:
conflicts[requested_mode][held_mode]

The not-granted mode must be represented by 0.

-

nmodes
-The nmodes parameter is the size of the lock conflicts matrix. +
nmodes
The nmodes parameter is the size of the lock conflicts matrix.

Errors

The DbEnv::set_lk_conflicts method @@ -68,13 +63,13 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

If the conflicts array could not be copied, the DbEnv::set_lk_conflicts method will fail and either return ENOMEM or -throw a DbMemoryException exception.

+throw a DbMemoryException.


Description: dbenv_get_lk_conflicts

The DbEnv::get_lk_conflicts method returns the current conflicts array.

@@ -86,12 +81,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lk_conflictsp
-The lk_conflictsp parameter references memory into which +
+
lk_conflictsp
The lk_conflictsp parameter references memory into which a pointer to the current conflicts array is copied. -

lk_modesp
-The lk_modesp parameter references memory into which +
lk_modesp
The lk_modesp parameter references memory into which the size of the current conflicts array is copied.

@@ -103,6 +96,6 @@ The lk_modesp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lk_detect.html b/db/docs/api_cxx/env_set_lk_detect.html index fc866e87a..58db702e9 100644 --- a/db/docs/api_cxx/env_set_lk_detect.html +++ b/db/docs/api_cxx/env_set_lk_detect.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lk_detect - + -

DbEnv::set_lk_detect

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_lk_detect(u_int32_t *lk_detectp);
 


Description: DbEnv::set_lk_detect

-

Set if the deadlock detector is to be run whenever a lock conflict occurs, and specify what lock request(s) should be rejected. As transactions acquire locks on behalf of a single locker ID, rejecting a @@ -47,38 +45,31 @@ environment is opened, it will silently overrule configuration done before that time.

The DbEnv::set_lk_detect method configures a database environment, not only operations performed using the specified DbEnv handle.

-

The DbEnv::set_lk_detect method may not be called after the DbEnv::open method is -called. -If the database environment already exists when -DbEnv::open is called, the information specified to DbEnv::set_lk_detect -must be consistent with the existing environment or an error will be -returned. -

+

Although the DbEnv::set_lk_detect method may be called at any time during the life of +the application, it should normally be called before making calls to the +db_env_create or db_create methods.

The DbEnv::set_lk_detect method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

detect
-The detect parameter configures the deadlock detector. The +
+
detect
The detect parameter configures the deadlock detector. The specified value must be one of the following list: -

-

DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment +
+
DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment was created. If no lock policy has yet been specified, set the lock policy to DB_LOCK_RANDOM.
DB_LOCK_EXPIRE
Reject lock requests which have timed out. No other deadlock detection is performed. -
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the greatest number of -locks. -
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest number of -locks. -
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest number of -write locks. -
DB_LOCK_OLDEST
Reject the lock request for the oldest locker ID. +
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the most locks. +
DB_LOCK_MAXWRITE
Reject the lock request for the locker ID with the most write locks. +
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest locks. +
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest write locks. +
DB_LOCK_OLDEST
Reject the lock request for the locker ID with the oldest lock.
DB_LOCK_RANDOM
Reject the lock request for a random locker ID. -
DB_LOCK_YOUNGEST
Reject the lock request for the youngest locker ID. +
DB_LOCK_YOUNGEST
Reject the lock request for the locker ID with the youngest lock.

Errors

@@ -87,8 +78,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -102,9 +93,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lk_detectp
-The DbEnv::get_lk_detect method returns the +
+
lk_detectp
The DbEnv::get_lk_detect method returns the deadlock detector configuration in lk_detectp.

@@ -116,6 +106,6 @@ deadlock detector configuration in lk_detectp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lk_max_lockers.html b/db/docs/api_cxx/env_set_lk_max_lockers.html index 175e53437..bb2865d55 100644 --- a/db/docs/api_cxx/env_set_lk_max_lockers.html +++ b/db/docs/api_cxx/env_set_lk_max_lockers.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lk_max_lockers - + -

DbEnv::set_lk_max_lockers

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_lk_max_lockers(u_int32_t *, lk_maxp);
 


Description: DbEnv::set_lk_max_lockers

-

Set the maximum number of locking entities supported by the Berkeley DB environment. This value is used by DbEnv::open to estimate how much space to allocate for various lock-table data structures. The @@ -58,9 +56,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

max
-The max parameter is the maximum number simultaneous locking +
+
max
The max parameter is the maximum number simultaneous locking entities supported by the Berkeley DB environment.

Errors

@@ -69,8 +66,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -84,9 +81,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lk_maxp
-The DbEnv::get_lk_max_lockers method returns the +
+
lk_maxp
The DbEnv::get_lk_max_lockers method returns the maximum number of lockers in lk_maxp.

@@ -98,6 +94,6 @@ maximum number of lockers in lk_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lk_max_locks.html b/db/docs/api_cxx/env_set_lk_max_locks.html index 8e68bc408..76d1afac3 100644 --- a/db/docs/api_cxx/env_set_lk_max_locks.html +++ b/db/docs/api_cxx/env_set_lk_max_locks.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lk_max_locks - + -

DbEnv::set_lk_max_locks

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_lk_max_locks(u_int32_t *lk_maxp);
 


Description: DbEnv::set_lk_max_locks

-

Set the maximum number of locks supported by the Berkeley DB environment. This value is used by DbEnv::open to estimate how much space to allocate for various lock-table data structures. The default value is @@ -59,9 +57,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

max
-The max parameter is the maximum number of locks supported by +
+
max
The max parameter is the maximum number of locks supported by the Berkeley DB environment.

Errors

@@ -70,24 +67,23 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

-

Description: DbEnv::set_lk_max_locks

-

The DbEnv::set_lk_max_locks method returns the maximum number of locks.

-

The DbEnv::set_lk_max_locks method may be called at any time during the life of the +

Description: DbEnv::get_lk_max_locks

+

The DbEnv::get_lk_max_locks method returns the maximum number of locks.

+

The DbEnv::get_lk_max_locks method may be called at any time during the life of the application.

-

The DbEnv::set_lk_max_locks method +

The DbEnv::get_lk_max_locks method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lk_maxp
-The DbEnv::set_lk_max_locks method returns the +
+
lk_maxp
The DbEnv::get_lk_max_locks method returns the maximum number of locks in lk_maxp.

@@ -99,6 +95,6 @@ maximum number of locks in lk_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_lk_max_objects.html b/db/docs/api_cxx/env_set_lk_max_objects.html index cb951600f..0b7e19247 100644 --- a/db/docs/api_cxx/env_set_lk_max_objects.html +++ b/db/docs/api_cxx/env_set_lk_max_objects.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_lk_max_objects - + -

DbEnv::set_lk_max_objects

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_lk_max_objects(u_int32_t *lk_maxp);
 


Description: DbEnv::set_lk_max_objects

-

Set the maximum number of locked objects supported by the Berkeley DB environment. This value is used by DbEnv::open to estimate how much space to allocate for various lock-table data structures. The @@ -58,9 +56,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

max
-The max parameter is the maximum number of locked objects +
+
max
The max parameter is the maximum number of locked objects supported by the Berkeley DB environment.

Errors

@@ -69,8 +66,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -84,9 +81,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lk_maxp
-The DbEnv::get_lk_max_objects method returns the +
+
lk_maxp
The DbEnv::get_lk_max_objects method returns the maximum number of locked objects in lk_maxp.

@@ -98,6 +94,6 @@ maximum number of locked objects in lk_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_mp_mmapsize.html b/db/docs/api_cxx/env_set_mp_mmapsize.html index df7ddb203..fb5014398 100644 --- a/db/docs/api_cxx/env_set_mp_mmapsize.html +++ b/db/docs/api_cxx/env_set_mp_mmapsize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_mp_mmapsize - + -

DbEnv::set_mp_mmapsize

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_mp_mmapsize(size_t *mp_mmapsizep);
 


Description: DbEnv::set_mp_mmapsize

-

Files that are opened read-only in the pool (and that satisfy a few other criteria) are, by default, mapped into the process address space instead of being copied into the local cache. This can result in @@ -50,9 +48,8 @@ and the size in bytes. Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

-

The DbEnv::set_mp_mmapsize method configures operations performed using the specified -DbEnv handle, not all operations performed on the underlying -database environment.

+

The DbEnv::set_mp_mmapsize method configures a database environment, not only operations +performed using the specified DbEnv handle.

The DbEnv::set_mp_mmapsize method may be called at any time during the life of the application.

The DbEnv::set_mp_mmapsize method @@ -61,9 +58,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

mp_mmapsize
-The mp_mmapsize parameter is the maximum file size, in bytes, +
+
mp_mmapsize
The mp_mmapsize parameter is the maximum file size, in bytes, for a file to be mapped into the process address space.

Errors

@@ -72,8 +68,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -87,9 +83,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

mp_mmapsizep
-The DbEnv::get_mp_mmapsize method returns the +
+
mp_mmapsizep
The DbEnv::get_mp_mmapsize method returns the maximum file map size in mp_mmapsizep.

@@ -101,6 +96,6 @@ maximum file map size in mp_mmapsizep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_msg_stream.html b/db/docs/api_cxx/env_set_msg_stream.html new file mode 100644 index 000000000..cfaf489c7 --- /dev/null +++ b/db/docs/api_cxx/env_set_msg_stream.html @@ -0,0 +1,60 @@ + + + + + + +Berkeley DB: DbEnv::set_message_stream + + + + + + + +
+

DbEnv::set_message_stream

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+void DbEnv::set_message_stream(class ostream*); +

+
+

Description: DbEnv::set_message_stream

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DbEnv::set_verbose and DbEnv::stat_print.

+

The DbEnv::set_message_stream and +Db::set_message_stream methods are used to display these messages for +the application. In this case, the message will include a trailing +<newline> character.

+

Setting stream to NULL unconfigures the interface.

+

Alternatively, you can use the DbEnv::set_msgfile and +Db::set_msgfile methods to display the messages via a C library FILE *, +or the DbEnv::set_msgcall and Db::set_msgcall methods to +capture the additional error information in a way that does not use +either output streams or C library FILE *'s. You should not mix these +approaches.

+

Parameters

+
+
stream
The stream parameter is the application-specified output stream to +be used for additional message information. +
+
+

Class

+DbEnv +

See Also

+Database Environments and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/env_set_msgcall.html b/db/docs/api_cxx/env_set_msgcall.html new file mode 100644 index 000000000..4869edc0d --- /dev/null +++ b/db/docs/api_cxx/env_set_msgcall.html @@ -0,0 +1,66 @@ + + + + + + +Berkeley DB: DbEnv::set_msgcall + + + + + + + +
+

DbEnv::set_msgcall

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+void DbEnv::set_msgcall( + void (*db_msgcall_fcn)(const DbEnv *dbenv, const char *msg)); +

+
+

Description: DbEnv::set_msgcall

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DbEnv::set_verbose and DbEnv::stat_print.

+

The DbEnv::set_msgcall and Db::set_msgcall methods are used to +pass these messages to the application, and Berkeley DB will call +db_msgcall_fcn with each message. It is up to the +db_msgcall_fcn function to display the message in an appropriate +manner.

+

Setting db_msgcall_fcn to NULL unconfigures the callback interface.

+

Alternatively, you can use the DbEnv::set_message_stream and +Db::set_message_stream methods to display the messages via an output +stream, or the Db::set_msgfile or DbEnv::set_msgfile methods +to display the messages via a C library FILE *.

+

The DbEnv::set_msgcall method may be called at any time during the life of the +application.

+

Parameters

+
+
db_msgcall_fcn
The db_msgcall_fcn parameter is the application-specified message +reporting function. The function takes two parameters: +
+
dbenv
The dbenv parameter is the enclosing database environment. +
msg
The msg parameter is the message string. +
+
+
+

Class

+DbEnv +

See Also

+Database Environments and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/env_set_msgfile.html b/db/docs/api_cxx/env_set_msgfile.html new file mode 100644 index 000000000..9837036c2 --- /dev/null +++ b/db/docs/api_cxx/env_set_msgfile.html @@ -0,0 +1,70 @@ + + + + + + +Berkeley DB: DbEnv::set_msgfile + + + + + + + +
+

DbEnv::set_msgfile

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+void +DbEnv::set_msgfile(FILE *msgfile); +

+void +DbEnv::get_msgfile(FILE **msgfilep); +

+
+

Description: DbEnv::set_msgfile

+

There are interfaces in the Berkeley DB library which either directly output +informational messages or statistical information, or configure the +library to output such messages when performing other operations, for +example, DbEnv::set_verbose and DbEnv::stat_print.

+

The DbEnv::set_msgfile and Db::set_msgfile methods are used to +display these messages for the application. In this case, the message +will include a trailing <newline> character.

+

Setting msgfile to NULL unconfigures the interface.

+

Alternatively, you can use the DbEnv::set_message_stream and +Db::set_message_stream methods to display the messages via an output +stream, or the DbEnv::set_msgcall and Db::set_msgcall methods +to capture the additional error information in a way that does not use +either output streams or C library FILE *'s. You should not mix these +approaches.

+

The DbEnv::set_msgfile method may be called at any time during the life of the +application.

+

Parameters

+
+
msgfile
The msgfile parameter is a C library FILE * to be used for +displaying messages. +
+
+

Description: DbEnv::get_msgfile

+

The DbEnv::get_msgfile method returns the .

+

The DbEnv::get_msgfile method may be called at any time during the life of the +application.

+
+

Class

+DbEnv +

See Also

+Database Environments and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/env_set_paniccall.html b/db/docs/api_cxx/env_set_paniccall.html index cd817ea1c..b73c658bc 100644 --- a/db/docs/api_cxx/env_set_paniccall.html +++ b/db/docs/api_cxx/env_set_paniccall.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_paniccall - + -

DbEnv::set_paniccall

API -Ref -
+Ref +


@@ -47,14 +46,13 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

db_panic_fcn
-The db_panic_fcn parameter is the application-specified function +
+
db_panic_fcn
The db_panic_fcn parameter is the application-specified function called in the case of a database environment panic. The function takes two arguments: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

errval
The errval parameter is the error value that would have been +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
errval
The errval parameter is the error value that would have been returned to the caller if DB_RUNRECOVERY were not going to be returned instead.
@@ -68,6 +66,6 @@ returned instead.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_rpc_server.html b/db/docs/api_cxx/env_set_rpc_server.html index a0799c460..2273758ec 100644 --- a/db/docs/api_cxx/env_set_rpc_server.html +++ b/db/docs/api_cxx/env_set_rpc_server.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_rpc_server - + -

DbEnv::set_rpc_server

API -Ref -
+Ref +


@@ -32,8 +31,7 @@ DbEnv::set_rpc_server(CLIENT *client, char *host,
 

Establishes a connection for this dbenv to a RPC server.

When the DbEnv::set_rpc_server method has been called, subsequent calls to Berkeley DB library interfaces may return or throw exceptions encapsulating -DB_NOSERVER, DB_NOSERVER_ID, or -DB_NOSERVER_HOME.

+DB_NOSERVER, DB_NOSERVER_ID, or DB_NOSERVER_HOME.

The DbEnv::set_rpc_server method configures operations performed using the specified DbEnv handle, not all operations performed on the underlying database environment.

@@ -46,25 +44,20 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

cl_timeout
- +
+
cl_timeout
The cl_timeout parameter specifies the number of seconds the client should wait for results to come back from the server. Once the timeout has expired on any communication with the server, DB_NOSERVER will be returned. If this value is zero, a default timeout is used. -

client
-If the client channel has been provided by the application then +
client
If the client channel has been provided by the application then Berkeley DB will use it as its connection and the host and cl_timeout fields are ignored. -

host
-The host parameter is the host to which the Berkeley DB server will +
host
The host parameter is the host to which the Berkeley DB server will connect and create a channel for communication. -

flags
-The flags parameter is currently unused, and must be set to 0. - -

sv_timeout
-The sv_timeout parameter specifies the number of seconds the server +
flags
The flags parameter is currently unused, and must be set to 0. + +
sv_timeout
The sv_timeout parameter specifies the number of seconds the server should allow a client connection to remain idle before assuming that the client is gone. Once that timeout has been reached, the server releases all resources associated with that client connection. Subsequent attempts @@ -80,8 +73,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -93,6 +86,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_shm_key.html b/db/docs/api_cxx/env_set_shm_key.html index 61ccdbf8d..424ec2c37 100644 --- a/db/docs/api_cxx/env_set_shm_key.html +++ b/db/docs/api_cxx/env_set_shm_key.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_shm_key - + -

DbEnv::set_shm_key

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_shm_key(long *shm_keyp);
 


Description: DbEnv::set_shm_key

-

Specify a base segment ID for Berkeley DB environment shared memory regions created in system memory on VxWorks or systems supporting X/Open-style shared memory interfaces; for example, UNIX systems supporting @@ -76,9 +74,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

shm_key
-The shm_key parameter is the base segment ID for the database +
+
shm_key
The shm_key parameter is the base segment ID for the database environment.

Errors

@@ -87,8 +84,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -102,9 +99,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

shm_keyp
-The DbEnv::get_shm_key method returns the +
+
shm_keyp
The DbEnv::get_shm_key method returns the base segment ID in shm_keyp.

@@ -116,6 +112,6 @@ base segment ID in shm_keyp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_tas_spins.html b/db/docs/api_cxx/env_set_tas_spins.html index fd401c467..ca934525d 100644 --- a/db/docs/api_cxx/env_set_tas_spins.html +++ b/db/docs/api_cxx/env_set_tas_spins.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_tas_spins - + -

DbEnv::set_tas_spins

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_tas_spins(u_int32_t *, tas_spinsp);
 


Description: DbEnv::set_tas_spins

-

Specify that test-and-set mutexes should spin tas_spins times without blocking. The value defaults to 1 on uniprocessor systems and to 50 times the number of processors on multiprocessor systems.

@@ -53,9 +51,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

tas_spins
-The tas_spins parameter is the number of spins test-and-set +
+
tas_spins
The tas_spins parameter is the number of spins test-and-set mutexes should execute before blocking.

Errors

@@ -64,8 +61,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -79,9 +76,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

tas_spinsp
-The DbEnv::get_tas_spins method returns the +
+
tas_spinsp
The DbEnv::get_tas_spins method returns the test-and-set spin count in tas_spinsp.

@@ -93,6 +89,6 @@ test-and-set spin count in tas_spinsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_timeout.html b/db/docs/api_cxx/env_set_timeout.html index f20643d41..07086313a 100644 --- a/db/docs/api_cxx/env_set_timeout.html +++ b/db/docs/api_cxx/env_set_timeout.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_timeout - + -

DbEnv::set_timeout

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_timeout(db_timeout_t *timeoutp, u_int32_t flag);
 


Description: DbEnv::set_timeout

-

The DbEnv::set_timeout method sets timeout values for locks or transactions in the database environment.

Timeouts are checked whenever a thread of control blocks on a lock or @@ -57,11 +55,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to one of the following values: -

-

DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this database environment. +
+
flags
The flags parameter must be set to one of the following values: +
+
DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this database environment.

The database environment's lock timeout value may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_lock_timeout", one or more whitespace characters, @@ -69,7 +66,7 @@ and the lock timeout value. Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

-

DB_SET_TXN_TIMEOUT
Set the timeout value for transactions in this database environment. +
DB_SET_TXN_TIMEOUT
Set the timeout value for transactions in this database environment.

The database environment's transaction timeout value may also be set using the environment's DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_txn_timeout", one or more whitespace characters, @@ -78,8 +75,7 @@ Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

-

timeout
-The timeout parameter is the timeout value. It must be specified +
timeout
The timeout parameter is the timeout value. It must be specified as an unsigned 32-bit number of microseconds, limiting the maximum timeout to roughly 71 minutes.
@@ -89,8 +85,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -104,15 +100,13 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flag
-The flags parameter must be set to one of the following values: -

-

DB_SET_LOCK_TIMEOUT
Return the timeout value for locks in this database environment. -

DB_SET_TXN_TIMEOUT
Return the timeout value for transactions in this database environment. +
+
flag
The flags parameter must be set to one of the following values: +
+
DB_SET_LOCK_TIMEOUT
Return the timeout value for locks in this database environment. +
DB_SET_TXN_TIMEOUT
Return the timeout value for transactions in this database environment.
-

timeoutp
-The timeoutp parameter references memory into which +
timeoutp
The timeoutp parameter references memory into which the timeout value of the specified flag parameter is copied.

@@ -124,6 +118,6 @@ The timeoutp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_tmp_dir.html b/db/docs/api_cxx/env_set_tmp_dir.html index 0714234ce..d5e07a5dd 100644 --- a/db/docs/api_cxx/env_set_tmp_dir.html +++ b/db/docs/api_cxx/env_set_tmp_dir.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_tmp_dir - + -

DbEnv::set_tmp_dir

API -Ref -
+Ref +


@@ -31,8 +30,7 @@ DbEnv::get_tmp_dir(const char **dirp);
 


Description: DbEnv::set_tmp_dir

- - +

Specify the path of a directory to be used as the location of temporary files. The files created to back in-memory access method databases will be created relative to this path. These temporary files can be quite @@ -79,10 +77,11 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

dir
-The dir parameter is the directory to be used to store temporary +
+
dir
The dir parameter is the directory to be used to store temporary files. +

On Windows, the dir argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

Errors

The DbEnv::set_tmp_dir method @@ -90,8 +89,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -105,9 +104,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

dirp
-The DbEnv::get_tmp_dir method returns a reference to the +
+
dirp
The DbEnv::get_tmp_dir method returns a reference to the database environment temporary file directory in dirp.

@@ -119,6 +117,6 @@ database environment temporary file directory in dirp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_tx_max.html b/db/docs/api_cxx/env_set_tx_max.html index 6f837ba2e..0ffa87947 100644 --- a/db/docs/api_cxx/env_set_tx_max.html +++ b/db/docs/api_cxx/env_set_tx_max.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_tx_max - + -

DbEnv::set_tx_max

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_tx_max(u_int32_t *tx_maxp);
 


Description: DbEnv::set_tx_max

-

Configure the Berkeley DB database environment to support at least max active transactions. This value bounds the size of the memory allocated for transactions. Child transactions are counted as active until they @@ -62,9 +60,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

max
-The max parameter configures the minimum number of +
+
max
The max parameter configures the minimum number of simultaneously active transactions supported by Berkeley DB database environment.
@@ -74,8 +71,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the method was called after DbEnv::open was called; or if an +
+
EINVAL
If the method was called after DbEnv::open was called; or if an invalid flag value or parameter was specified.

@@ -89,9 +86,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

tx_maxp
-The DbEnv::get_tx_max method returns the +
+
tx_maxp
The DbEnv::get_tx_max method returns the number of active transactions in tx_maxp.

@@ -103,6 +99,6 @@ number of active transactions in tx_maxp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_tx_timestamp.html b/db/docs/api_cxx/env_set_tx_timestamp.html index cce7f78c5..3a7f55bba 100644 --- a/db/docs/api_cxx/env_set_tx_timestamp.html +++ b/db/docs/api_cxx/env_set_tx_timestamp.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_tx_timestamp - + -

DbEnv::set_tx_timestamp

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_tx_timestamp(time_t *timestampp);
 


Description: DbEnv::set_tx_timestamp

-

Recover to the time specified by timestamp rather than to the most current possible date.

Once a database environment has been upgraded to a new version of Berkeley DB @@ -49,9 +47,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

timestamp
-The timestamp parameter references the memory location where the +
+
timestamp
The timestamp parameter references the memory location where the recovery timestamp is located.

The timestamp parameter should be the number of seconds since 0 hours, 0 minutes, 0 seconds, January 1, 1970, Coordinated Universal @@ -63,8 +60,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If it is not possible to recover to the specified time using the log files +
+
EINVAL
If it is not possible to recover to the specified time using the log files currently present in the environment; or if an invalid flag value or parameter was specified.
@@ -79,9 +76,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

timestampp
-The DbEnv::get_tx_timestamp method returns the +
+
timestampp
The DbEnv::get_tx_timestamp method returns the recovery timestamp in timestampp.

@@ -93,6 +89,6 @@ recovery timestamp in timestampp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_set_verbose.html b/db/docs/api_cxx/env_set_verbose.html index 54511666e..7b5533382 100644 --- a/db/docs/api_cxx/env_set_verbose.html +++ b/db/docs/api_cxx/env_set_verbose.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_verbose - + -

DbEnv::set_verbose

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_verbose(u_int32_t which, int *onoffp);
 


Description: DbEnv::set_verbose

-

The DbEnv::set_verbose method turns specific additional informational and debugging messages in the Berkeley DB message output on and off. To see the additional messages, verbose messages must also be configured for @@ -41,7 +39,7 @@ the application. For more information on verbose messages, see the DB_CONFIG file. The syntax of the entry in that file is a single line with the string "set_verbose", one or more whitespace characters, and the method which parameter as a string; for example, -"set_verbose DB_VERB_CHKPOINT". +"set_verbose DB_VERB_RECOVERY". Because the DB_CONFIG file is read when the database environment is opened, it will silently overrule configuration done before that time.

@@ -56,22 +54,19 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

onoff
-If the onoff parameter is set to non-zero, the additional messages are output. -

which
-The which parameter must be set to one of the following values: -

-

DB_VERB_CHKPOINT
Display checkpoint location information when searching the log for -checkpoints. -

DB_VERB_DEADLOCK
Display additional information when doing deadlock detection. -

DB_VERB_RECOVERY
Display additional information when performing recovery. -

DB_VERB_REPLICATION
Display additional information when processing replication messages. +
+
onoff
If the onoff parameter is set to non-zero, the additional +messages are output. +
which
The which parameter must be set to one of the following values: +
+
DB_VERB_DEADLOCK
Display additional information when doing deadlock detection. +
DB_VERB_RECOVERY
Display additional information when performing recovery. +
DB_VERB_REPLICATION
Display additional information when processing replication messages.

Note, to get complete replication logging when debugging replication applications, you must also configure and build the Berkeley DB library with the --enable-diagnostic configuration option as well as call the DbEnv::set_verbose method.

-

DB_VERB_WAITSFOR
Display the waits-for table when doing deadlock detection. +
DB_VERB_WAITSFOR
Display the waits-for table when doing deadlock detection.

Errors

@@ -80,8 +75,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -96,12 +91,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

which
-The which parameter is the message value for which configuration +
+
which
The which parameter is the message value for which configuration is being checked. -

onoffp
-The onoffp parameter references memory into which +
onoffp
The onoffp parameter references memory into which the configuration of the specified which parameter is copied.

@@ -113,6 +106,6 @@ The onoffp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_stat.html b/db/docs/api_cxx/env_stat.html new file mode 100644 index 000000000..c9735933a --- /dev/null +++ b/db/docs/api_cxx/env_stat.html @@ -0,0 +1,67 @@ + + + + + + +Berkeley DB: DbEnv::stat_print + + + + + + + +
+

DbEnv::stat_print

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbEnv::stat_print(u_int32_t flags); +

+
+

Description: DbEnv::stat_print

+

The DbEnv::stat_print method returns the +default statistical information. +The information is printed to a specified output channel (see the +DbEnv::set_msgfile method for more information), or passed to an +application callback function (see the DbEnv::set_msgcall method for +more information).

+

The DbEnv::stat_print method may not be called before the DbEnv::open method has +been called.

+

The DbEnv::stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+In addition, the following flag may be set by +bitwise inclusively OR'ing it into the flags parameter: +
+
DB_STAT_SUBSYSTEM
Display information for all configured subsystems. +
+
+
+

Class

+DbEnv +

See Also

+Database Environments and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/env_strerror.html b/db/docs/api_cxx/env_strerror.html index 64e7f5fae..ee37ae593 100644 --- a/db/docs/api_cxx/env_strerror.html +++ b/db/docs/api_cxx/env_strerror.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::strerror - + -

DbEnv::strerror

API -Ref -
+Ref +


@@ -38,9 +37,8 @@ is returned. See
 Error returns to applications
 for more information.

Parameters

-

-

error
-The error parameter is the error number for which an error message +
+
error
The error parameter is the error number for which an error message string is wanted.

@@ -52,6 +50,6 @@ string is wanted.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/env_version.html b/db/docs/api_cxx/env_version.html index 068aab6c6..0a83d266b 100644 --- a/db/docs/api_cxx/env_version.html +++ b/db/docs/api_cxx/env_version.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::version - + -

DbEnv::version

API -Ref -
+Ref +


@@ -28,19 +27,15 @@ DbEnv::version(int *major, int *minor, int *patch);
 


Description: DbEnv::version

-

The DbEnv::version method returns a pointer to a string, suitable for display, containing Berkeley DB version information.

Parameters

-

-

major
-If major is non-NULL, the major +
+
major
If major is non-NULL, the major version of the Berkeley DB release is copied to the memory to which it refers. -

minor
-If minor is non-NULL, the minor version of the Berkeley DB release +
minor
If minor is non-NULL, the minor version of the Berkeley DB release is copied to the memory to which it refers. -

patch
-If patch is non-NULL, the patch version of the Berkeley DB release +
patch
If patch is non-NULL, the patch version of the Berkeley DB release is copied to the memory to which it refers.

@@ -52,6 +47,6 @@ is copied to the memory to which it refers.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/exc_package.html b/db/docs/api_cxx/exc_package.html new file mode 100644 index 000000000..2ff788b56 --- /dev/null +++ b/db/docs/api_cxx/exc_package.html @@ -0,0 +1,23 @@ + + + + + + +Berkeley DB: Exceptions + + + + +

Exceptions

+ + + + + + + +
ExceptionsDescription
DbDeadlockExceptionException Class for deadlocks
DbExceptionException Class for Berkeley DB Activity
DbLockNotGrantedExceptionException Class for lock request failures
DbMemoryExceptionException Class for insufficient memory
DbRunRecoveryExceptionException Class for failures requiring recovery
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/except_class.html b/db/docs/api_cxx/except_class.html index fca4e6df2..09065dbc0 100644 --- a/db/docs/api_cxx/except_class.html +++ b/db/docs/api_cxx/except_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbException - + -

DbException

API -Ref -
+Ref +


@@ -32,9 +31,6 @@ public:
 


Description: DbException

- - -

This information describes the DbException class and how it is used by the various Berkeley DB classes.

Most methods in the Berkeley DB classes return an int, but also throw an @@ -46,11 +42,12 @@ Alternatively, Berkeley DB can be configured to not throw exceptions, and instead have the individual function return an error code, by setting the DB_CXX_NO_EXCEPTIONS for the Db and DbEnv constructors.

-

A DbException object contains an informational string, an errno, -and a reference to the environment from which the exception was -thrown. The errno can be obtained by using DbException::get_errno. -The informational string can be obtained by using DbException::what. -And, the environment can be obtained using DbException::get_env.

+

A DbException object contains an informational string, an errno, and a +reference to the environment from which the exception was thrown. The +errno can be obtained by using DbException::get_errno, and can be +used, in standard cases, to determine the type of the exception. The +informational string can be obtained by using DbException::what. And, +the environment can be obtained using DbException::get_env.

We expect in the future that this class will inherit from the standard class exception, but certain language implementation bugs currently prevent this on some platforms.

@@ -63,6 +60,6 @@ not appear in the database.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/frame.html b/db/docs/api_cxx/frame.html new file mode 100644 index 000000000..e01d432fd --- /dev/null +++ b/db/docs/api_cxx/frame.html @@ -0,0 +1,15 @@ + + + +C API (Version: 4.3.14) + + + + + + + +<meta http-equiv="refresh" content="0;url=api_index.html"> + + + diff --git a/db/docs/api_cxx/lock_class.html b/db/docs/api_cxx/lock_class.html index 622e42ad0..b0c712729 100644 --- a/db/docs/api_cxx/lock_class.html +++ b/db/docs/api_cxx/lock_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbLock - + -

DbLock

API -Ref -
+Ref +


@@ -45,6 +44,6 @@ for a single lock, and has no methods of its own.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_detect.html b/db/docs/api_cxx/lock_detect.html index 4530d70e4..ffbeac285 100644 --- a/db/docs/api_cxx/lock_detect.html +++ b/db/docs/api_cxx/lock_detect.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::lock_detect - + -

DbEnv::lock_detect

API -Ref -
+Ref +


@@ -37,29 +36,27 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

aborted
-

atype
-The atype parameter specifies which lock request(s) to reject. +
+
aborted
If the aborted parameter is non-NULL, the memory location to +which it refers will be set to the number of lock requests that were +rejected. +
atype
The atype parameter specifies which lock request(s) to reject. It must be set to one of the following list: -

-

DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment +
+
DB_LOCK_DEFAULT
Use whatever lock policy was specified when the database environment was created. If no lock policy has yet been specified, set the lock policy to DB_LOCK_RANDOM.
DB_LOCK_EXPIRE
Reject lock requests which have timed out. No other deadlock detection is performed. -
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the greatest number of -locks. -
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest number of -locks. -
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest number of -write locks. -
DB_LOCK_OLDEST
Reject the lock request for the oldest locker ID. +
DB_LOCK_MAXLOCKS
Reject the lock request for the locker ID with the most locks. +
DB_LOCK_MAXWRITE
Reject the lock request for the locker ID with the most write locks. +
DB_LOCK_MINLOCKS
Reject the lock request for the locker ID with the fewest locks. +
DB_LOCK_MINWRITE
Reject the lock request for the locker ID with the fewest write locks. +
DB_LOCK_OLDEST
Reject the lock request for the locker ID with the oldest lock.
DB_LOCK_RANDOM
Reject the lock request for a random locker ID. -
DB_LOCK_YOUNGEST
Reject the lock request for the youngest locker ID. +
DB_LOCK_YOUNGEST
Reject the lock request for the locker ID with the youngest lock.
-

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DbEnv::lock_detect method @@ -67,8 +64,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -80,6 +77,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_get.html b/db/docs/api_cxx/lock_get.html index c120e0890..8d46f5e88 100644 --- a/db/docs/api_cxx/lock_get.html +++ b/db/docs/api_cxx/lock_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::lock_get - + -

DbEnv::lock_get

API -Ref -
+Ref +


@@ -30,30 +29,27 @@ DbEnv::lock_get(u_int32_t locker, u_int32_t flags,
 

Description: DbEnv::lock_get

The DbEnv::lock_get method acquires a lock from the lock table, returning -information about it in -the lock parameter.

+information about it in the lock parameter.

The DbEnv::lock_get method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with an +
+
DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with an existing lock, return DB_LOCK_NOTGRANTED or throw a DbLockNotGrantedException immediately instead of waiting for the lock to become available.
-

lock_mode
-The lock_mode parameter is used as an index into the environment's +
lock_mode
The lock_mode parameter is used as an index into the environment's lock conflict matrix. When using the default lock conflict matrix, lock_mode must be set to one of the following values: -

+
DB_LOCK_READ
read (shared)
DB_LOCK_WRITE
write (exclusive)
DB_LOCK_IWRITE
intention to write (shared) @@ -61,11 +57,9 @@ lock conflict matrix. When using the default lock conflict matrix,
DB_LOCK_IWR
intention to read and write (shared)

See DbEnv::set_lk_conflicts and Standard Lock Modes for more information on the lock conflict matrix.

-

locker
-The locker parameter is an unsigned 32-bit integer quantity. It +
locker
The locker parameter is an unsigned 32-bit integer quantity. It represents the entity requesting the lock. -

object
-The object parameter is an untyped byte string that specifies the +
object
The object parameter is an untyped byte string that specifies the object to be locked. Applications using the locking subsystem directly while also doing locking via the Berkeley DB access methods must take care not to inadvertently lock objects that happen to be equal to the unique file @@ -79,8 +73,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -97,7 +91,7 @@ either return DB_LOCK_NOTGRANTED or throw a DbLockNotGrantedException exception.

If the maximum number of locks has been reached, the DbEnv::lock_get method will fail and either return ENOMEM or -throw a DbMemoryException exception.

+throw a DbMemoryException.


Class

DbEnv, DbLock @@ -107,6 +101,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_id.html b/db/docs/api_cxx/lock_id.html index 5b807e625..537f24925 100644 --- a/db/docs/api_cxx/lock_id.html +++ b/db/docs/api_cxx/lock_id.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::lock_id - + -

DbEnv::lock_id

API -Ref -
+Ref +


@@ -28,9 +27,9 @@ DbEnv::lock_id(u_int32_t *idp);
 


Description: DbEnv::lock_id

-

The DbEnv::lock_id method -copies a locker ID, which is guaranteed to be unique in the specified lock -table, into the memory location to which idp refers.

+

The DbEnv::lock_id method copies a locker ID, which is guaranteed to be +unique in the specified lock table, into the memory location to which +idp refers.

The DbEnv::lock_id_free method should be called to return the locker ID to the Berkeley DB library when it is no longer needed.

The DbEnv::lock_id method @@ -39,9 +38,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

idp
-The idp parameter references memory into which +
+
idp
The idp parameter references memory into which the allocated locker ID is copied.

@@ -53,6 +51,6 @@ The idp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_id_free.html b/db/docs/api_cxx/lock_id_free.html index 13b0065b7..34683955f 100644 --- a/db/docs/api_cxx/lock_id_free.html +++ b/db/docs/api_cxx/lock_id_free.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::lock_id_free - + -

DbEnv::lock_id_free

API -Ref -
+Ref +


@@ -36,9 +35,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

id
-The id parameter is the locker id to be freed. +
+
id
The id parameter is the locker id to be freed.

Errors

The DbEnv::lock_id_free method @@ -46,8 +44,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the locker ID is invalid or locks are still held by this locker ID; or if an +
+
EINVAL
If the locker ID is invalid or locks are still held by this locker ID; or if an invalid flag value or parameter was specified.

@@ -59,6 +57,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_list.html b/db/docs/api_cxx/lock_list.html index d3b754e73..515ccbcd6 100644 --- a/db/docs/api_cxx/lock_list.html +++ b/db/docs/api_cxx/lock_list.html @@ -1,32 +1,32 @@ - + Berkeley DB: Berkeley DB: Locking Subsystem and Related Methods - +

Berkeley DB: Locking Subsystem and Related Methods

- + - - - - - - - - - - - - - + + + + + + + + + + + + +
Locking Subsystem and Related MethodsDescription
DbEnv::set_lk_conflictsSet lock conflicts matrix
DbEnv::set_lk_detectSet automatic deadlock detection
DbEnv::set_lk_max_lockersSet maximum number of lockers
DbEnv::set_lk_max_locksSet maximum number of locks
DbEnv::set_lk_max_objectsSet maximum number of lock objects
DbEnv::set_timeoutSet lock and transaction timeout
DbEnv::lock_detectPerform deadlock detection
DbEnv::lock_getAcquire a lock
DbEnv::lock_idAcquire a locker ID
DbEnv::lock_id_freeRelease a locker ID
DbEnv::lock_putRelease a lock
DbEnv::lock_statReturn lock subsystem statistics
DbEnv::lock_vecAcquire/release locks
DbEnv::lock_detectPerform deadlock detection
DbEnv::lock_getAcquire a lock
DbEnv::lock_idAcquire a locker ID
DbEnv::lock_id_freeRelease a locker ID
DbEnv::lock_putRelease a lock
DbEnv::lock_statReturn lock subsystem statistics
DbEnv::lock_vecAcquire/release locks
DbEnv::set_lk_conflictsSet lock conflicts matrix
DbEnv::set_lk_detectSet automatic deadlock detection
DbEnv::set_lk_max_lockersSet maximum number of lockers
DbEnv::set_lk_max_locksSet maximum number of locks
DbEnv::set_lk_max_objectsSet maximum number of lock objects
DbEnv::set_timeoutSet lock and transaction timeout
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_put.html b/db/docs/api_cxx/lock_put.html index 25fdaf293..f78e07428 100644 --- a/db/docs/api_cxx/lock_put.html +++ b/db/docs/api_cxx/lock_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::lock_put - + -

DbEnv::lock_put

API -Ref -
+Ref +


@@ -35,9 +34,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

lock
-The lock parameter is the lock to be released. +
+
lock
The lock parameter is the lock to be released.

Errors

The DbEnv::lock_put method @@ -45,8 +43,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -58,6 +56,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_stat.html b/db/docs/api_cxx/lock_stat.html index f097440f6..47c1fc3a8 100644 --- a/db/docs/api_cxx/lock_stat.html +++ b/db/docs/api_cxx/lock_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::lock_stat - + -

DbEnv::lock_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DbEnv::lock_stat(DB_LOCK_STAT **statp, u_int32_t flags); +

+int +DbEnv::lock_stat_print(u_int32_t flags);


Description: DbEnv::lock_stat

@@ -40,7 +42,7 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_LOCK_STAT fields will be filled in:

-

+
u_int32_t st_id;
The last allocated locker ID.
u_int32_t st_cur_maxid;
The current maximum unused locker ID.
u_int32_t st_nmodes;
The number of lock modes. @@ -65,7 +67,7 @@ individually freed.

u_int32_t st_ntxntimeouts;
The number of transactions that have timed out. This value is also a component of st_ndeadlocks, the total number of deadlocks detected. -
u_int32_t st_regsize;
The size of the lock region. +
roff_t st_regsize;
The size of the lock region, in bytes.
u_int32_t st_region_wait;
The number of times that a thread of control was forced to wait before obtaining the region lock.
u_int32_t st_region_nowait;
The number of times that a thread of control was able to obtain @@ -77,15 +79,13 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

@@ -94,11 +94,34 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DbEnv::lock_stat_print

+

The DbEnv::lock_stat_print method prints diagnostic information to the output +channel described by the DbEnv::set_msgfile method.

+

The DbEnv::lock_stat_print method may not be called before the DbEnv::open method has +been called.

+

The DbEnv::lock_stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_ALL
Display all available information. +
DB_STAT_LOCK_CONF
Display the lock conflict matrix. +
DB_STAT_LOCK_LOCKERS
Display the lockers within hash chains. +
DB_STAT_LOCK_OBJECTS
Display the lock objects within hash chains. +
DB_STAT_LOCK_PARAMS
Display the locking subsystem parameters. +
+
+

Class

DbEnv, DbLock

See Also

@@ -107,6 +130,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lock_vec.html b/db/docs/api_cxx/lock_vec.html index 0b9cf6699..c3ada64bb 100644 --- a/db/docs/api_cxx/lock_vec.html +++ b/db/docs/api_cxx/lock_vec.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::lock_vec - + -

DbEnv::lock_vec

API -Ref -
+Ref +


@@ -37,25 +36,23 @@ escalation.

If any of the requested locks cannot be acquired, or any of the locks to be released cannot be released, the operations before the failing operation are guaranteed to have completed successfully, and -DbEnv::lock_vec returns a non-zero value. In addition, if elistp -is not NULL, it is set to point to the DB_LOCKREQ entry that was being -processed when the error occurred.

+DbEnv::lock_vec returns a non-zero value. In addition, if +elistp is not NULL, it is set to point to the DB_LOCKREQ entry +that was being processed when the error occurred.

Unless otherwise specified, the DbEnv::lock_vec method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

elistp
-If an error occurs, and the elistp parameter is non-NULL, it +
+
elistp
If an error occurs, and the elistp parameter is non-NULL, it is set to point to the DB_LOCKREQ entry that was being processed when the error occurred. -

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with +
+
DB_LOCK_NOWAIT
If a lock cannot be granted because the requested lock conflicts with an existing lock, return DB_LOCK_NOTGRANTED or throw a DbLockNotGrantedException immediately instead of waiting for @@ -64,11 +61,9 @@ the lock to become available. In this case, if non-NULL, exception is thrown, the index of the request that was not granted can be found by calling DbLockNotGrantedException.get_index.
-

locker
-The locker parameter is an unsigned 32-bit integer quantity. It +
locker
The locker parameter is an unsigned 32-bit integer quantity. It represents the entity requesting or releasing the lock. -

list
-The list array provided to DbEnv::lock_vec is typedef'd as +
list
The list array provided to DbEnv::lock_vec is typedef'd as DB_LOCKREQ.

To ensure compatibility with future releases of Berkeley DB, all fields of the DB_LOCKREQ structure that are not explicitly set should @@ -76,46 +71,46 @@ be initialized to 0 before the first time the structure is used. Do this by declaring the structure external or static, or by calling memset(3).

A DB_LOCKREQ structure has at least the following fields:

-

-

lockop_t op;
The operation to be performed, which must be set to one of the +
+
lockop_t op;
The operation to be performed, which must be set to one of the following values: -

-

DB_LOCK_GET
Get the lock defined by the values of the mode and obj +
+
DB_LOCK_GET
Get the lock defined by the values of the mode and obj structure fields, for the specified locker. Upon return from DbEnv::lock_vec, if the lock field is non-NULL, a reference to the acquired lock is stored there. (This reference is invalidated by any call to DbEnv::lock_vec or DbEnv::lock_put that releases the lock.) -

DB_LOCK_GET_TIMEOUT
Identical to DB_LOCK_GET except that the value in the timeout +
DB_LOCK_GET_TIMEOUT
Identical to DB_LOCK_GET except that the value in the timeout structure field overrides any previously specified timeout value for this lock. A value of 0 turns off any previously specified timeout. -

DB_LOCK_PUT
The lock to which the lock structure field refers is released. +
DB_LOCK_PUT
The lock to which the lock structure field refers is released. The locker parameter, and mode and obj fields are ignored. -

DB_LOCK_PUT_ALL
All locks held by the specified locker are released. The +
DB_LOCK_PUT_ALL
All locks held by the specified locker are released. The lock, mode, and obj structure fields are ignored. Locks acquired in operations performed by the current call to DbEnv::lock_vec which appear before the DB_LOCK_PUT_ALL operation are released; those acquired in operations appearing after the DB_LOCK_PUT_ALL operation are not released. -

DB_LOCK_PUT_OBJ
All locks held on obj are released. The locker +
DB_LOCK_PUT_OBJ
All locks held on obj are released. The locker parameter and the lock and mode structure fields are ignored. Locks acquired in operations performed by the current call to DbEnv::lock_vec that appear before the DB_LOCK_PUT_OBJ operation are released; those acquired in operations appearing after the DB_LOCK_PUT_OBJ operation are not released. -

DB_LOCK_TIMEOUT
Cause the specified locker to timeout immediately. If the +
DB_LOCK_TIMEOUT
Cause the specified locker to timeout immediately. If the database environment has not configured automatic deadlock detection, the transaction will timeout the next time deadlock detection is performed. As transactions acquire locks on behalf of a single locker ID, timing out the locker ID associated with a transaction will time out the transaction itself.
-

DB_LOCK lock;
A lock reference. -

const lockmode_t mode;
The lock mode, used as an index into the environment's lock conflict matrix. +
DB_LOCK lock;
A lock reference. +
const lockmode_t mode;
The lock mode, used as an index into the environment's lock conflict matrix. When using the default lock conflict matrix, mode must be set to one of the following values: -

+
DB_LOCK_READ
read (shared)
DB_LOCK_WRITE
write (exclusive)
DB_LOCK_IWRITE
intention to write (shared) @@ -123,16 +118,15 @@ of the following values:
DB_LOCK_IWR
intention to read and write (shared)

See DbEnv::set_lk_conflicts and Standard Lock Modes for more information on the lock conflict matrix.

-

const Dbt obj;
An untyped byte string that specifies the object to be locked or +
const Dbt obj;
An untyped byte string that specifies the object to be locked or released. Applications using the locking subsystem directly while also doing locking via the Berkeley DB access methods must take care not to inadvertently lock objects that happen to be equal to the unique file IDs used to lock files. See Access method locking conventions for more information. -

u_int32_t timeout;
The lock timeout value. +
u_int32_t timeout;
The lock timeout value.
-

nlist
-The nlist parameter specifies the number of elements in the +
nlist
The nlist parameter specifies the number of elements in the list array.

Errors

@@ -141,8 +135,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

If a transactional database environment operation was selected to @@ -159,7 +153,7 @@ either return DB_LOCK_NOTGRANTED or throw a DbLockNotGrantedException exception.

If the maximum number of locks has been reached, the DbEnv::lock_vec method will fail and either return ENOMEM or -throw a DbMemoryException exception.

+throw a DbMemoryException.


Class

DbEnv, DbLock @@ -169,6 +163,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lockng_class.html b/db/docs/api_cxx/lockng_class.html index 0e4f5df0a..4fce90b9c 100644 --- a/db/docs/api_cxx/lockng_class.html +++ b/db/docs/api_cxx/lockng_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbLockNotGrantedException - + -

DbLockNotGrantedException

API -Ref -
+Ref +


@@ -36,36 +35,41 @@ public:
 

Description: DbLockNotGrantedException

This information describes the DbLockNotGrantedException class and how it is used by the various Db* classes.

-

A DbLockNotGrantedException is thrown when a lock, requested -using the DbEnv::lock_get or DbEnv::lock_vec methods, where the -DB_LOCK_NOWAIT option was specified, is unable to be granted -immediately.

-

The get_op method -returns 0 when DbEnv::lock_get was called, and returns the op -for the failed DB_LOCKREQ when DbEnv::lock_vec was called.

-

The get_mode method -returns the mode parameter when +

A DbLockNotGrantedException is thrown when a lock requested using +the DbEnv::lock_get or DbEnv::lock_vec methods, where the DB_LOCK_NOWAIT +flag or lock timers were configured, could not be granted before the +wait-time expired.

+

Additionally, DbLockNotGrantedException is thrown when a Berkeley DB +Concurrent Data Store database environment configured for lock timeouts +was unable to grant a lock in the allowed time.

+

Additionally, DbLockNotGrantedException is thrown when lock or +transaction timeouts have been configured, a database operation has +timed out, and the DB_TIME_NOTGRANTED configuration flag has +been specified.

+

The DbException errno value is set to DB_LOCKNOTGRANTED.

+

The get_op method returns 0 when DbEnv::lock_get was called, +and returns the op for the failed DB_LOCKREQ when +DbEnv::lock_vec was called.

+

The get_mode method returns the mode parameter when DbEnv::lock_get was called, and returns the mode for the failed DB_LOCKREQ when DbEnv::lock_vec was called.

-

The get_obj method -returns the mode parameter when -returns the object parameter when DbEnv::lock_get was called, and -returns the object for the failed DB_LOCKREQ when +

The get_obj method returns the mode parameter when +returns the object parameter when DbEnv::lock_get was called, +and returns the object for the failed DB_LOCKREQ when DbEnv::lock_vec was called. The Dbt pointer may or may not refer to valid memory, depending on whether the Dbt used in the call to the failed DbEnv::lock_get or DbEnv::lock_vec method is still in scope and has not been deleted.

-

The get_lock method -returns NULL when DbEnv::lock_get was called, and returns the -lock in the failed DB_LOCKREQ when DbEnv::lock_vec -was called.

-

The get_index method -returns -1 when DbEnv::lock_get was called, and returns the index of -the failed DB_LOCKREQ when DbEnv::lock_vec was called.

+

The get_lock method returns NULL when DbEnv::lock_get was +called, and returns the lock in the failed DB_LOCKREQ +when DbEnv::lock_vec was called.

+

The get_index method returns -1 when DbEnv::lock_get was +called, and returns the index of the failed DB_LOCKREQ when +DbEnv::lock_vec was called.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_archive.html b/db/docs/api_cxx/log_archive.html index ee9b4a4db..0234cbfd6 100644 --- a/db/docs/api_cxx/log_archive.html +++ b/db/docs/api_cxx/log_archive.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::log_archive - + -

DbEnv::log_archive

API -Ref -
+Ref +


@@ -33,8 +32,8 @@ DbEnv::log_archive(char *(*listp)[], u_int32_t flags);
 files that are no longer in use (for example, that are no longer
 involved in active transactions), and that may safely be archived for
 catastrophic recovery and then removed from the system.  If there are
-no filenames to return,
-the memory location to which listp refers will be set to NULL.

+no filenames to return, the memory location to which listp +refers will be set to NULL.

Arrays of log filenames are stored in allocated memory. If application-specific allocation routines have been declared (see DbEnv::set_alloc for more information), they are used to allocate the memory; otherwise, the @@ -65,33 +64,29 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_ARCH_ABS
All pathnames are returned as absolute pathnames, instead of relative +
+
DB_ARCH_ABS
All pathnames are returned as absolute pathnames, instead of relative to the database home directory. -

DB_ARCH_DATA
Return the database files that need to be archived in order to recover +
DB_ARCH_DATA
Return the database files that need to be archived in order to recover the database from catastrophic failure. If any of the database files have not been accessed during the lifetime of the current log files, DbEnv::log_archive will not include them in this list. It is also possible that some of the files referred to by the log have since been deleted from the system. -

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually -exclusive.

-

DB_ARCH_LOG
Return all the log filenames, regardless of whether or not they are in +

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually exclusive.

+
DB_ARCH_LOG
Return all the log filenames, regardless of whether or not they are in use. -

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually -exclusive.

-

DB_ARCH_REMOVE
Remove log files that are no longer needed; no filenames are returned. +

The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually exclusive.

+
DB_ARCH_REMOVE
Remove log files that are no longer needed; no filenames are returned. Automatic log file removal is likely to make catastrophic recovery impossible.

The DB_ARCH_REMOVE flag may not be specified with any other flag.

-

listp
-The listp parameter references memory into which the allocated +
listp
The listp parameter references memory into which the allocated array of log or database filenames is copied. If there are no filenames to return, the memory location to which listp refers will be set to NULL. @@ -102,8 +97,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -115,6 +110,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_compare.html b/db/docs/api_cxx/log_compare.html index 97c6d4294..24f972657 100644 --- a/db/docs/api_cxx/log_compare.html +++ b/db/docs/api_cxx/log_compare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::log_compare - + -

DbEnv::log_compare

API -Ref -
+Ref +


@@ -33,13 +32,11 @@ DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
 returning 0 if they are equal, 1 if lsn0 is greater than
 lsn1, and -1 if lsn0 is less than lsn1.

Parameters

-

-

lsn0
-The lsn0 parameter is one of the +
+
lsn0
The lsn0 parameter is one of the DbLsn objects to be compared. -

lsn1
-The lsn1 parameter is one of the +
lsn1
The lsn1 parameter is one of the DbLsn objects to be compared.
@@ -52,6 +49,6 @@ to be compared.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_cursor.html b/db/docs/api_cxx/log_cursor.html index 391cc5bba..037a35e6f 100644 --- a/db/docs/api_cxx/log_cursor.html +++ b/db/docs/api_cxx/log_cursor.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::log_cursor - + -

DbEnv::log_cursor

API -Ref -
+Ref +


@@ -35,12 +34,10 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

cursorp
-The cursorp parameter references memory into which +
+
cursorp
The cursorp parameter references memory into which a pointer to the created log cursor is copied. -

flags
-The flags parameter is currently unused, and must be set to 0. +
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DbEnv::log_cursor method @@ -48,8 +45,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -61,6 +58,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_file.html b/db/docs/api_cxx/log_file.html index 4409adacb..f050b48cd 100644 --- a/db/docs/api_cxx/log_file.html +++ b/db/docs/api_cxx/log_file.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::log_file - + -

DbEnv::log_file

API -Ref -
+Ref +


@@ -45,24 +44,27 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

lsn
-The lsn parameter is the +
+
lsn
The lsn parameter is the DbLsn object for which a filename is wanted. -

namep
-The namep parameter references memory into which +
namep
The namep parameter references memory into which the name of the file containing the record named by lsn is copied. -

len
-The len parameter is the length of the namep buffer in +
len
The len parameter is the length of the namep buffer in bytes. If namep is too short to hold the filename, DbEnv::log_file will fail. (Log filenames are normally quite short, on the order of 10 characters.)

Errors

-

If the supplied buffer was too small to hold the log filename, the DbEnv::log_file method will fail and -either return ENOMEM or -throw a DbMemoryException exception.

+

The DbEnv::log_file method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
If supplied buffer was too small to hold the log filename; or if an +invalid flag value or parameter was specified. +

Class

DbEnv, DbLogc, DbLsn @@ -72,6 +74,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_flush.html b/db/docs/api_cxx/log_flush.html index ab7dbccf1..929cc8b03 100644 --- a/db/docs/api_cxx/log_flush.html +++ b/db/docs/api_cxx/log_flush.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::log_flush - + -

DbEnv::log_flush

API -Ref -
+Ref +


@@ -35,9 +34,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

lsn
-All log records with DbLsn values less than or equal to the +
+
lsn
All log records with DbLsn values less than or equal to the lsn parameter are written to disk. If lsn is NULL, all records in the log are flushed.
@@ -47,8 +45,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -60,6 +58,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_list.html b/db/docs/api_cxx/log_list.html index 7dce46e65..38269d662 100644 --- a/db/docs/api_cxx/log_list.html +++ b/db/docs/api_cxx/log_list.html @@ -1,32 +1,33 @@ - + Berkeley DB: Berkeley DB: Logging Subsystem and Related Methods - +

Berkeley DB: Logging Subsystem and Related Methods

- + - - - - - - - - - - - - - + + + + + + + + + + + + + +
Logging Subsystem and Related MethodsDescription
DbEnv::log_archiveList log and database files
DbEnv::log_cursorCreate a log cursor handle
DbEnv::log_fileMap Log Sequence Numbers to log files
DbEnv::log_flushFlush log records
DbEnv::log_putWrite a log record
DbEnv::set_lg_bsizeSet log buffer size
DbEnv::set_lg_dirSet the environment logging directory
DbEnv::set_lg_maxSet log file size
DbEnv::set_lg_regionmaxSet logging region size
DbEnv::log_compareCompare two Log Sequence Numbers
DbEnv::log_statReturn log subsystem statistics
DbLogc::closeClose a log cursor
DbLogc::getRetrieve a log record
DbLsnLog Sequence Numbers
DbEnv::log_compareCompare two Log Sequence Numbers
DbEnv::log_archiveList log and database files
DbEnv::log_cursorCreate a log cursor handle
DbEnv::log_fileMap Log Sequence Numbers to log files
DbEnv::log_flushFlush log records
DbEnv::log_putWrite a log record
DbEnv::log_statReturn log subsystem statistics
DbEnv::set_lg_bsizeSet log buffer size
DbEnv::set_lg_dirSet the environment logging directory
DbEnv::set_lg_maxSet log file size
DbEnv::set_lg_regionmaxSet logging region size
DbLogc::closeClose a log cursor
DbLogc::getRetrieve a log record
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_put.html b/db/docs/api_cxx/log_put.html index a6904770e..02c076ee0 100644 --- a/db/docs/api_cxx/log_put.html +++ b/db/docs/api_cxx/log_put.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::log_put - + -

DbEnv::log_put

API -Ref -
+Ref +


@@ -36,9 +35,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

data
-The data parameter is the record to write to the log. +
+
data
The data parameter is the record to write to the log.

The caller is responsible for providing any necessary structure to data. (For example, in a write-ahead logging protocol, the application must understand what part of data is an operation @@ -47,16 +45,14 @@ In addition, most transaction managers will store in data the DbLsn of the previous log record for the same transaction, to support chaining back through the transaction's log records during undo.)

-

flags
-The flags parameter must be set to 0 or +
flags
The flags parameter must be set to 0 or the following value: -

-

DB_FLUSH
The log is forced to disk after this record is written, guaranteeing +
+
DB_FLUSH
The log is forced to disk after this record is written, guaranteeing that all records with DbLsn values less than or equal to the one being "put" are on disk before DbEnv::log_put returns.
-

lsn
-The lsn parameter references memory into which +
lsn
The lsn parameter references memory into which the DbLsn of the put record is copied.

Errors

@@ -65,8 +61,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the record to be logged is larger than the maximum log record; or if an +
+
EINVAL
If the record to be logged is larger than the maximum log record; or if an invalid flag value or parameter was specified.

@@ -78,6 +74,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/log_stat.html b/db/docs/api_cxx/log_stat.html index 4d8bd1ddb..4ecfb8fe7 100644 --- a/db/docs/api_cxx/log_stat.html +++ b/db/docs/api_cxx/log_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::log_stat - + -

DbEnv::log_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DbEnv::log_stat(DB_LOG_STAT **statp, u_int32_t flags); +

+int +DbEnv::log_stat_print(u_int32_t flags);


Description: DbEnv::log_stat

@@ -40,7 +42,7 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_LOG_STAT fields will be filled in:

-

+
u_int32_t st_magic;
The magic number that identifies a file as a log file.
u_int32_t st_version;
The version of the log file type.
int st_mode;
The mode of any created log files. @@ -62,7 +64,7 @@ in-memory log record cache filled up.
u_int32_t st_maxcommitperflush;
The maximum number of commits contained in a single log flush.
u_int32_t st_mincommitperflush;
The minimum number of commits contained in a single log flush that contained a commit. -
u_int32_t st_regsize;
The size of the region. +
roff_t st_regsize;
The size of the region, in bytes.
u_int32_t st_region_wait;
The number of times that a thread of control was forced to wait before obtaining the region lock.
u_int32_t st_region_nowait;
The number of times that a thread of control was able to obtain @@ -74,15 +76,13 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

@@ -91,11 +91,34 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DbEnv::log_stat_print

+

The DbEnv::log_stat_print method returns the +logging subsystem statistical information, as described for the DbEnv::log_stat method. +The information is printed to a specified output channel (see the +DbEnv::set_msgfile method for more information), or passed to an +application callback function (see the DbEnv::set_msgcall method for +more information).

+

The DbEnv::log_stat_print method may not be called before the DbEnv::open method has +been called.

+

The DbEnv::log_stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

DbEnv, DbLogc, DbLsn

See Also

@@ -104,6 +127,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/logc_class.html b/db/docs/api_cxx/logc_class.html index b456ae521..6f0a2dd52 100644 --- a/db/docs/api_cxx/logc_class.html +++ b/db/docs/api_cxx/logc_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbLogc - + -

DbLogc

API -Ref -
+Ref +


@@ -41,6 +40,6 @@ return.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/logc_close.html b/db/docs/api_cxx/logc_close.html index 14d245869..971c93d00 100644 --- a/db/docs/api_cxx/logc_close.html +++ b/db/docs/api_cxx/logc_close.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbLogc::close - + -

DbLogc::close

API -Ref -
+Ref +


@@ -37,9 +36,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DbLogc::close method @@ -47,8 +45,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the cursor was previously closed; or if an +
+
EINVAL
If the cursor was previously closed; or if an invalid flag value or parameter was specified.

@@ -60,6 +58,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/logc_get.html b/db/docs/api_cxx/logc_get.html index 719072f67..e1df0fcd1 100644 --- a/db/docs/api_cxx/logc_get.html +++ b/db/docs/api_cxx/logc_get.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbLogc::get - + -

DbLogc::get

API -Ref -
+Ref +


@@ -35,56 +34,49 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

data
-The data field of the data structure is set to the record +
+
data
The data field of the data structure is set to the record retrieved, and the size field indicates the number of bytes in the record. See Dbt for a description of other fields in the data structure. The DB_DBT_MALLOC, DB_DBT_REALLOC and DB_DBT_USERMEM flags may be specified for any Dbt used for data retrieval. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_CURRENT
Return the log record to which the log currently refers. -

DB_FIRST
The first record from any of the log files found in the log directory +
flags
The flags parameter must be set to one of the following values: +
+
DB_CURRENT
Return the log record to which the log currently refers. +
DB_FIRST
The first record from any of the log files found in the log directory is returned in the data parameter. The lsn parameter is overwritten with the DbLsn of the record returned. -

-The DbLogc::get method will return DB_NOTFOUND if DB_FIRST is set and the log is empty. +

The DbLogc::get method will return DB_NOTFOUND if DB_FIRST is set and the log is empty.

-

DB_LAST
The last record in the log is returned in the data parameter. +
DB_LAST
The last record in the log is returned in the data parameter. The lsn parameter is overwritten with the DbLsn of the record returned. -

-The DbLogc::get method will return DB_NOTFOUND if DB_LAST is set and the log is empty. +

The DbLogc::get method will return DB_NOTFOUND if DB_LAST is set and the log is empty.

-

DB_NEXT
The current log position is advanced to the next record in the log, and +
DB_NEXT
The current log position is advanced to the next record in the log, and that record is returned in the data parameter. The lsn parameter is overwritten with the DbLsn of the record returned.

If the cursor has not been initialized via DB_FIRST, DB_LAST, DB_SET, DB_NEXT, or DB_PREV, DbLogc::get will return the first record in the log.

-

-The DbLogc::get method will return DB_NOTFOUND if DB_NEXT is set and the last log record has already been +

The DbLogc::get method will return DB_NOTFOUND if DB_NEXT is set and the last log record has already been returned or the log is empty.

-

DB_PREV
The current log position is advanced to the previous record in the log, +
DB_PREV
The current log position is advanced to the previous record in the log, and that record is returned in the data parameter. The lsn parameter is overwritten with the DbLsn of the record returned.

If the cursor has not been initialized via DB_FIRST, DB_LAST, DB_SET, DB_NEXT, or DB_PREV, DbLogc::get will return the last record in the log.

-

-The DbLogc::get method will return DB_NOTFOUND if DB_PREV is set and the first log record has already been +

The DbLogc::get method will return DB_NOTFOUND if DB_PREV is set and the first log record has already been returned or the log is empty.

-

DB_SET
Retrieve the record specified by the lsn parameter. +
DB_SET
Retrieve the record specified by the lsn parameter.
-

lsn
-When the flag parameter is set to DB_CURRENT, +
lsn
When the flag parameter is set to DB_CURRENT, DB_FIRST, DB_LAST, DB_NEXT or DB_PREV, the lsn parameter is overwritten with the DbLsn value of the record retrieved. When flag is set to DB_SET, @@ -97,8 +89,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the DB_CURRENT flag was set and the log cursor has not yet +
+
EINVAL
If the DB_CURRENT flag was set and the log cursor has not yet been initialized; the DB_CURRENT, DB_NEXT, or DB_PREV flags were set and the log was opened with the DB_THREAD flag set; @@ -115,6 +107,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/lsn_class.html b/db/docs/api_cxx/lsn_class.html index d5b25e0e2..ac0f58300 100644 --- a/db/docs/api_cxx/lsn_class.html +++ b/db/docs/api_cxx/lsn_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbLsn - + -

DbLsn

API -Ref -
+Ref +


@@ -40,6 +39,6 @@ the other specifies an offset in the log file.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_class.html b/db/docs/api_cxx/memp_class.html index 9f0b78fc3..4277d43f0 100644 --- a/db/docs/api_cxx/memp_class.html +++ b/db/docs/api_cxx/memp_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMemoryException - + -

DbMemoryException

API -Ref -
+Ref +


@@ -36,9 +35,10 @@ it is used by the various Db* classes.

to complete an operation, and there is the possibility of recovering. An example is during a Db::get or Dbc::get operation with the Dbt flags set to DB_DBT_USERMEM.

-

The get_dbt method returns the -Dbt with insufficient memory to complete the operation, causing -the DbMemoryException to be thrown. +

The DbException errno value is set to DB_BUFFER_SMALL or ENOMEM.

+

The get_dbt method returns the Dbt with insufficient +memory to complete the operation, causing the DbMemoryException +to be thrown. The Dbt pointer may or may not refer to valid memory, depending on whether the Dbt used in the call to the failed Berkeley DB method is still in scope and has not been deleted.

@@ -46,6 +46,6 @@ is still in scope and has not been deleted.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_fclose.html b/db/docs/api_cxx/memp_fclose.html index 2e1cec107..cbbfb5c07 100644 --- a/db/docs/api_cxx/memp_fclose.html +++ b/db/docs/api_cxx/memp_fclose.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::close - + -

DbMpoolFile::close

API -Ref -
+Ref +


@@ -42,9 +41,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Class

@@ -55,6 +53,6 @@ The flags parameter is currently unused, and must be set to 0.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_fcreate.html b/db/docs/api_cxx/memp_fcreate.html index 927d31af3..32358be57 100644 --- a/db/docs/api_cxx/memp_fcreate.html +++ b/db/docs/api_cxx/memp_fcreate.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::memp_fcreate - + -

DbEnv::memp_fcreate

API -Ref -
+Ref +


@@ -38,9 +37,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Class

@@ -51,6 +49,6 @@ The flags parameter is currently unused, and must be set to 0.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_fget.html b/db/docs/api_cxx/memp_fget.html index aa8b28626..f233cf13a 100644 --- a/db/docs/api_cxx/memp_fget.html +++ b/db/docs/api_cxx/memp_fget.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::get - + -

DbMpoolFile::get

API -Ref -
+Ref +


@@ -36,9 +35,8 @@ DbMpoolFile::get(db_pgno_t *pgnoaddr, u_int32_t flags, void **pagep);
 

Fully or partially created pages have all their bytes set to a nul byte, unless the DbMpoolFile::set_clear_len method was called to specify other behavior before the file was opened.

- -

-The DbMpoolFile::get method + +

The DbMpoolFile::get method will either return DB_PAGE_NOTFOUND or throw an exception that encapsulates DB_PAGE_NOTFOUND if the requested page does not exist and DB_MPOOL_CREATE was not set. @@ -48,28 +46,25 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_CREATE
If the specified page does not exist, create it. In this case, the +
+
DB_MPOOL_CREATE
If the specified page does not exist, create it. In this case, the pgin method, if specified, is called. -

DB_MPOOL_LAST
Return the last page of the source file, and copy its page number into +
DB_MPOOL_LAST
Return the last page of the source file, and copy its page number into the memory location to which pgnoaddr refers. -

DB_MPOOL_NEW
Create a new page in the file, and copy its page number into the memory +
DB_MPOOL_NEW
Create a new page in the file, and copy its page number into the memory location to which pgnoaddr refers. In this case, the pgin method, if specified, is not called.

The DB_MPOOL_CREATE, DB_MPOOL_LAST, and DB_MPOOL_NEW flags are mutually exclusive.

-

pagep
-The pagep parameter references memory into which +
pagep
The pagep parameter references memory into which a pointer to the returned page is copied. -

pgnoaddr
-If the flags parameter is set to DB_MPOOL_LAST or +
pgnoaddr
If the flags parameter is set to DB_MPOOL_LAST or DB_MPOOL_NEW, the page number of the created page is copied into the memory location to which the pgnoaddr parameter refers. Otherwise, the pgnoaddr parameter is the page to @@ -83,12 +78,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EAGAIN
The page reference count has overflowed. (This should never happen +
+
EAGAIN
The page reference count has overflowed. (This should never happen unless there is a bug in the application.)
-

-

EINVAL
If the DB_MPOOL_NEW flag was set, and the source file was not +
+
EINVAL
If the DB_MPOOL_NEW flag was set, and the source file was not opened for writing; more than one of DB_MPOOL_CREATE, DB_MPOOL_LAST, and DB_MPOOL_NEW was set; or if an @@ -96,7 +91,7 @@ invalid flag value or parameter was specified.

If the cache is full, and no more pages will fit in the pool, the DbMpoolFile::get method will fail and either return ENOMEM or -throw a DbMemoryException exception.

+throw a DbMemoryException.


Class

DbEnv, DbMpoolFile @@ -106,6 +101,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_fopen.html b/db/docs/api_cxx/memp_fopen.html index 55d1b879a..d0f909583 100644 --- a/db/docs/api_cxx/memp_fopen.html +++ b/db/docs/api_cxx/memp_fopen.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::open - + -

DbMpoolFile::open

API -Ref -
+Ref +


@@ -35,43 +34,44 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

file
-The file parameter is the name of the file to be opened. If +
+
file
The file parameter is the name of the file to be opened. If file is NULL, a private temporary file is created that cannot be shared with any other process (although it may be shared with other threads of control in the same process). -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +

On Windows, the file argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters.

+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_CREATE
Create any underlying files, as necessary. If the files do not already +
+
DB_CREATE
Create any underlying files, as necessary. If the files do not already exist and the DB_CREATE flag is not specified, the call will fail. - -

DB_DIRECT
If set and supported by the system, turn off system buffering of the + +
DB_DIRECT
If set and supported by the system, turn off system buffering of the file to avoid double caching. -

DB_NOMMAP
Always copy this file into the local cache instead of potentially mapping +
DB_NOMMAP
Always copy this file into the local cache instead of potentially mapping it into process memory (see the description of the DbEnv::set_mp_mmapsize method for further information). -

DB_ODDFILESIZE
Attempts to open files which are not a multiple of the page size in +
DB_ODDFILESIZE
Attempts to open files which are not a multiple of the page size in length will fail, by default. If the DB_ODDFILESIZE flag is set, any partial page at the end of the file will be ignored and the open will proceed. -

DB_RDONLY
Open any underlying files for reading only. Any attempt to write the file +
DB_RDONLY
Open any underlying files for reading only. Any attempt to write the file using the pool functions will fail, regardless of the actual permissions of the file.
-

mode
-On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by -DbMpoolFile::open are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation -(see umask(2)). If mode is 0, DbMpoolFile::open will use a default -mode of readable and writable by both owner and group. On Windows -systems, the mode parameter is ignored. The group ownership of created -files is based on the system and directory defaults, and is not further -specified by Berkeley DB. -

pagesize
-The pagesize parameter is the size, in bytes, of the unit of +
mode
On Windows systems, the mode parameter is ignored. +

On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files created by DbMpoolFile::open +are created with mode mode (as described in chmod(2)) +and modified by the process' umask value at the time of creation (see +umask(2)). Created files are owned by the process owner; the +group ownership of created files is based on the system and directory +defaults, and is not further specified by Berkeley DB. System shared memory +segments created by DbMpoolFile::open are created with mode mode, unmodified +by the process' umask value. If mode is 0, DbMpoolFile::open will use a +default mode of readable and writable by both owner and group.

+
pagesize
The pagesize parameter is the size, in bytes, of the unit of transfer between the application and the cache, although it is not necessarily the unit of transfer between the cache and the underlying filesystem. @@ -82,8 +82,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the file has already been entered into the pool, and the pagesize +
+
EINVAL
If the file has already been entered into the pool, and the pagesize value is not the same as when the file was entered into the pool, or the length of the file is not zero or a multiple of the pagesize; the DB_RDONLY flag was specified for an in-memory pool; or if an @@ -91,7 +91,7 @@ invalid flag value or parameter was specified.

If the maximum number of open files has been reached, the DbMpoolFile::open method will fail and either return ENOMEM or -throw a DbMemoryException exception.

+throw a DbMemoryException.


Class

DbEnv, DbMpoolFile @@ -101,6 +101,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_fput.html b/db/docs/api_cxx/memp_fput.html index 693006b95..ff3de88b9 100644 --- a/db/docs/api_cxx/memp_fput.html +++ b/db/docs/api_cxx/memp_fput.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::put - + -

DbMpoolFile::put

API -Ref -
+Ref +


@@ -35,24 +34,21 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

pgaddr
-The pgaddr parameter is the address of the page to be +
+
pgaddr
The pgaddr parameter is the address of the page to be returned to the cache. The pgaddr parameter must be an address previously returned by DbMpoolFile::get. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother +
+
DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother writing the page back to the source file). -

DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before +
DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before being evicted from the pool. -

DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be +
DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be discarded before other pages in the pool.
-

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are -mutually exclusive.

+

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are mutually exclusive.

Errors

The DbMpoolFile::put method @@ -60,12 +56,12 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EACCES
The DB_MPOOL_DIRTY flag was set and the source file was not +
+
EACCES
The DB_MPOOL_DIRTY flag was set and the source file was not opened for writing.
-

-

EINVAL
If the pgaddr parameter does not refer to a page returned by +
+
EINVAL
If the pgaddr parameter does not refer to a page returned by DbMpoolFile::get; more than one of the DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags was set; or if an @@ -80,6 +76,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_fset.html b/db/docs/api_cxx/memp_fset.html index f6ffcce7d..a7b2bb498 100644 --- a/db/docs/api_cxx/memp_fset.html +++ b/db/docs/api_cxx/memp_fset.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set - + -

DbMpoolFile::set

API -Ref -
+Ref +


@@ -35,24 +34,21 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

pgaddr
-The pgaddr parameter is the address of the page for which +
+
pgaddr
The pgaddr parameter is the address of the page for which attributes are to be set. he pgaddr parameter must be an address previously returned by DbMpoolFile::get. -

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother +
+
DB_MPOOL_CLEAN
Clear any previously set modification information (that is, don't bother writing the page back to the source file). -

DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before +
DB_MPOOL_DIRTY
The page has been modified and must be written to the source file before being evicted from the pool. -

DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be +
DB_MPOOL_DISCARD
The page is unlikely to be useful in the near future, and should be discarded before other pages in the pool.
-

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are -mutually exclusive.

+

The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are mutually exclusive.

Errors

The DbMpoolFile::set method @@ -60,8 +56,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -73,6 +69,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_fsync.html b/db/docs/api_cxx/memp_fsync.html index 20f445c5d..db8108215 100644 --- a/db/docs/api_cxx/memp_fsync.html +++ b/db/docs/api_cxx/memp_fsync.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::sync - + -

DbMpoolFile::sync

API -Ref -
+Ref +


@@ -47,6 +46,6 @@ failure, and returns 0 on success.
 

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_list.html b/db/docs/api_cxx/memp_list.html index e79a351ac..2b7623208 100644 --- a/db/docs/api_cxx/memp_list.html +++ b/db/docs/api_cxx/memp_list.html @@ -1,37 +1,41 @@ - + Berkeley DB: Berkeley DB: Memory Pools and Related Methods - +

Berkeley DB: Memory Pools and Related Methods

- + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + +
Memory Pools and Related MethodsDescription
DbEnv::set_cachesizeSet the environment cache size
DbEnv::set_mp_mmapsizeSet maximum mapped-in database file size
DbEnv::memp_registerRegister input/output functions for a file in a memory pool
DbEnv::memp_statReturn memory pool statistics
DbEnv::memp_syncFlush pages from a memory pool
DbEnv::memp_trickleTrickle flush pages from a memory pool
DbEnv::memp_fcreateOpen a file in a memory pool
DbMpoolFile::closeClose a file in a memory pool
DbMpoolFile::getGet page from a file in a memory pool
DbMpoolFile::openOpen a file in a memory pool
DbMpoolFile::putReturn a page to a memory pool
DbMpoolFile::setSet memory pool page status
DbMpoolFile::syncFlush pages from a file in a memory pool
DbMpoolFile::set_clear_lenSet file page bytes to be cleared
DbMpoolFile::set_fileidSet file unique identifier
DbMpoolFile::set_ftypeSet file type
DbMpoolFile::set_lsn_offsetSet file log-sequence-number offset
DbMpoolFile::set_pgcookieSet file cookie for pgin/pgout
Db::get_mpfReturn the database's memory pool handle
DbEnv::memp_fcreateOpen a file in a memory pool
DbEnv::memp_registerRegister input/output functions for a file in a memory pool
DbEnv::set_max_openfdSet the maximum number of open file descriptors
DbEnv::set_max_writeSet the maximum number of sequential disk writes
DbEnv::memp_statReturn memory pool statistics
DbEnv::memp_syncFlush pages from a memory pool
DbEnv::memp_trickleTrickle flush pages from a memory pool
DbEnv::set_cachesizeSet the environment cache size
DbEnv::set_mp_mmapsizeSet maximum mapped-in database file size
DbMpoolFile::closeClose a file in a memory pool
DbMpoolFile::getGet page from a file in a memory pool
DbMpoolFile::openOpen a file in a memory pool
DbMpoolFile::putReturn a page to a memory pool
DbMpoolFile::setSet memory pool page status
DbMpoolFile::set_clear_lenSet file page bytes to be cleared
DbMpoolFile::set_fileidSet file unique identifier
DbMpoolFile::set_flagsGeneral memory pool file configuration
DbMpoolFile::set_ftypeSet file type
DbMpoolFile::set_lsn_offsetSet file log-sequence-number offset
DbMpoolFile::set_pgcookieSet file cookie for pgin/pgout
DbMpoolFile::syncFlush pages from a file in a memory pool
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_maxwrite.html b/db/docs/api_cxx/memp_maxwrite.html new file mode 100644 index 000000000..76e325e9f --- /dev/null +++ b/db/docs/api_cxx/memp_maxwrite.html @@ -0,0 +1,87 @@ + + + + + + +Berkeley DB: DbEnv::set_max_write + + + + + + + +
+

DbEnv::set_max_write

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbEnv::memp_set_max_write(int maxwrite, int maxwrite_sleep); +

+int +DbEnv::memp_get_max_write(int *maxwritep, int *maxwrite_sleepp); +

+
+

Description: DbEnv::set_max_write

+

The DbEnv::set_max_write method limits the number of sequential write +operations scheduled by the library when flushing dirty pages from the +cache.

+

The DbEnv::set_max_write method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
maxwrite
The maximum number of sequential write operations scheduled by the +library when flushing dirty pages from the cache. +
maxwrite_sleep
The number of microseconds the thread of control should pause before +scheduling further write operations. +
+

Errors

+

The DbEnv::set_max_write method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DbEnv::get_max_write

+

The DbEnv::get_max_write method returns the current maximum number of +sequential write operations and microseconds to pause.

+

The DbEnv::get_max_write method may be called at any time during the life of the +application.

+

The DbEnv::get_max_write method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
maxwritep
The maxwritep parameter references memory into which + the maximum number of sequential write operations is copied. +
maxwrite_sleepp
The maxwrite_sleepp parameter references memory into which + the microseconds to pause before scheduling further write operations is copied. +
+
+

Class

+DbEnv, DbMpoolFile +

See Also

+Memory Pools and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/memp_openfd.html b/db/docs/api_cxx/memp_openfd.html new file mode 100644 index 000000000..f1fb6f618 --- /dev/null +++ b/db/docs/api_cxx/memp_openfd.html @@ -0,0 +1,82 @@ + + + + + + +Berkeley DB: DbEnv::set_max_openfd + + + + + + + +
+

DbEnv::set_max_openfd

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbEnv::memp_set_max_openfd(int maxopenfd); +

+int +DbEnv::memp_get_max_openfd(int *maxopenfdp); +

+
+

Description: DbEnv::set_max_openfd

+

The DbEnv::set_max_openfd method limits the number of file descriptors +the library will open concurrently when flushing dirty pages from the +cache.

+

The DbEnv::set_max_openfd method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
maxopenfd
The maximum number of file descriptors that may be concurrently opened +by the library when flushing dirty pages from the cache. +
+

Errors

+

The DbEnv::set_max_openfd method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DbEnv::get_max_openfd

+

The DbEnv::get_max_openfd method returns the maximum number of file descriptors open.

+

The DbEnv::get_max_openfd method may be called at any time during the life of the +application.

+

The DbEnv::get_max_openfd method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
maxopenfdp
The DbEnv::get_max_openfd method returns the +maximum number of file descriptors open in maxopenfdp. +
+
+

Class

+DbEnv, DbMpoolFile +

See Also

+Memory Pools and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/memp_register.html b/db/docs/api_cxx/memp_register.html index ca4a210d9..71d3ca3bf 100644 --- a/db/docs/api_cxx/memp_register.html +++ b/db/docs/api_cxx/memp_register.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::memp_register - + -

DbEnv::memp_register

API -Ref -
+Ref +


@@ -66,25 +65,13 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

ftype
-The ftype parameter specifies the type of file for which the +
+
ftype
The ftype parameter specifies the type of file for which the page-in and page-out functions will be called.

The ftype value for a file must be a non-zero positive number less than 128 (0 and negative numbers are reserved for internal use by the Berkeley DB library).

-

pgin_fcn
-The page-in and page-out functions. -

The pgin_fcn and pgout_fcn functions are called with a -reference to the current database environment, the page number being -read or written, a pointer to the page being read or written, and any -parameter pgcookie that was specified to the -DbMpoolFile::set_pgcookie method.

-

The pgin_fcn and pgout_fcn functions should return 0 on -success, and a non-zero value on failure, in which case the shared Berkeley DB -library function calling it will also fail, returning that non-zero -value. The non-zero value should be selected from values outside of the -Berkeley DB library namespace.

+
pgin_fcn
pgout_fcn

Class

@@ -95,6 +82,6 @@ Berkeley DB library namespace.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_clear_len.html b/db/docs/api_cxx/memp_set_clear_len.html index 89e820474..bceaa46f7 100644 --- a/db/docs/api_cxx/memp_set_clear_len.html +++ b/db/docs/api_cxx/memp_set_clear_len.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_clear_len - + -

DbMpoolFile::set_clear_len

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbMpoolFile::get_clear_len(u_int32_t *lenp);
 


Description: DbMpoolFile::set_clear_len

-

The DbMpoolFile::set_clear_len method sets the number of initial bytes in a page that should be set to nul when the page is created as a result of the DB_MPOOL_CREATE or DB_MPOOL_NEW flags being @@ -52,28 +50,16 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

len
-The len parameter is the number of initial bytes in a page that +
+
len
The len parameter is the number of initial bytes in a page that should be set to nul when the page is created. A value of 0 results in the entire page being set to nul bytes.

Description: DbMpoolFile::get_clear_len

-

The DbMpoolFile::get_clear_len method returns the bytes to be cleared.

+

The DbMpoolFile::get_clear_len method returns the .

The DbMpoolFile::get_clear_len method may be called at any time during the life of the application.

-

The DbMpoolFile::get_clear_len method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

-

Parameters

-

-

lenp
-The DbMpoolFile::get_clear_len method returns the -bytes to be cleared in lenp. -

Class

DbEnv, DbMpoolFile @@ -83,6 +69,6 @@ bytes to be cleared in lenp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_fileid.html b/db/docs/api_cxx/memp_set_fileid.html index df08ba7db..de6d47add 100644 --- a/db/docs/api_cxx/memp_set_fileid.html +++ b/db/docs/api_cxx/memp_set_fileid.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_fileid - + -

DbMpoolFile::set_fileid

API -Ref -
+Ref +


@@ -30,7 +29,6 @@ int DbMpoolFile::get_fileid(u_int8_t *fileid);
 


Description: DbMpoolFile::set_fileid

-

The DbMpoolFile::set_fileid method specifies a unique identifier for the file. (The shared memory buffer pool functions must be able to uniquely identify files in order that multiple processes wanting to share a file @@ -67,9 +65,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

fileid
-The fileid parameter is the unique identifier for the file. +
+
fileid
The fileid parameter is the unique identifier for the file. Unique file identifiers must be a DB_FILE_ID_LEN length array of bytes.

@@ -92,6 +89,6 @@ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_flags.html b/db/docs/api_cxx/memp_set_flags.html index c316a2f5d..3f3fe4490 100644 --- a/db/docs/api_cxx/memp_set_flags.html +++ b/db/docs/api_cxx/memp_set_flags.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_flags - + -

DbMpoolFile::set_flags

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbMpoolFile::get_flags(u_int32_t *flagsp);
 


Description: DbMpoolFile::set_flags

-

Configure a file in the cache.

To set the flags for a particular database, call the DbMpoolFile::set_flags method using the DbMpoolFile handle stored in @@ -42,19 +40,20 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set by bitwise inclusively OR'ing together one or more +
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_MPOOL_NOFILE
If set, no backing temporary file will be opened for in-memory +
+
DB_MPOOL_NOFILE
If set, no backing temporary file will be opened for in-memory databases, even if they expand to fill the entire cache. Attempts to create new file pages after the cache has been filled will fail.

The DB_MPOOL_NOFILE flag may be used to configure Berkeley DB at any time during the life of the application.

+
DB_MPOOL_UNLINK
If set, remove the file when the last reference to it is closed. +

The DB_MPOOL_UNLINK flag may be used to configure Berkeley DB at any time during +the life of the application.

-

onoff
-If onoff is +
onoff
If onoff is false, the specified flags are cleared; otherwise they are set.
@@ -69,9 +68,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flagsp
-The DbMpoolFile::get_flags method returns the +
+
flagsp
The DbMpoolFile::get_flags method returns the flags in flagsp.

@@ -83,6 +81,6 @@ flags in flagsp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_ftype.html b/db/docs/api_cxx/memp_set_ftype.html index 3a54f518e..a628fa3b5 100644 --- a/db/docs/api_cxx/memp_set_ftype.html +++ b/db/docs/api_cxx/memp_set_ftype.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_ftype - + -

DbMpoolFile::set_ftype

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbMpoolFile::get_ftype(int *ftypep);
 


Description: DbMpoolFile::set_ftype

-

The DbMpoolFile::set_ftype method specifies a file type for the purposes of input or output processing of the file's pages as they are read from or written to, the backing filesystem store.

@@ -48,29 +46,17 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

ftype
-The ftype parameter sets the file's type for the purposes of input +
+
ftype
The ftype parameter sets the file's type for the purposes of input and output processing. The ftype must be the same as a ftype parameter previously specified to the DbEnv::memp_register method. (See the DbEnv::memp_register documentation for more information.)

Description: DbMpoolFile::get_ftype

-

The DbMpoolFile::get_ftype method returns the file type.

+

The DbMpoolFile::get_ftype method returns the .

The DbMpoolFile::get_ftype method may be called at any time during the life of the application.

-

The DbMpoolFile::get_ftype method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

-

Parameters

-

-

ftypep
-The DbMpoolFile::get_ftype method returns the -file type in ftypep. -

Class

DbEnv, DbMpoolFile @@ -80,6 +66,6 @@ file type in ftypep.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_lsn_offset.html b/db/docs/api_cxx/memp_set_lsn_offset.html index 77c6574b4..0e5c34697 100644 --- a/db/docs/api_cxx/memp_set_lsn_offset.html +++ b/db/docs/api_cxx/memp_set_lsn_offset.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_lsn_offset - + -

DbMpoolFile::set_lsn_offset

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbMpoolFile::get_lsn_offset(int32_t *lsn_offsetp);
 


Description: DbMpoolFile::set_lsn_offset

-

The DbMpoolFile::set_lsn_offset method specifies the zero-based byte offset of a log sequence number (DbLsn) on the file's pages, for the purposes of page-flushing as part of transaction checkpoint. (See the @@ -51,27 +49,15 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

lsn_offset
-The lsn_offset parameter is the zero-based byte offset of the +
+
lsn_offset
The lsn_offset parameter is the zero-based byte offset of the log sequence number on the file's pages.

Description: DbMpoolFile::get_lsn_offset

-

The DbMpoolFile::get_lsn_offset method returns the log sequence number byte offset.

+

The DbMpoolFile::get_lsn_offset method returns the .

The DbMpoolFile::get_lsn_offset method may be called at any time during the life of the application.

-

The DbMpoolFile::get_lsn_offset method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

-

Parameters

-

-

lsn_offsetp
-The DbMpoolFile::get_lsn_offset method returns the -log sequence number byte offset in lsn_offsetp. -

Class

DbEnv, DbMpoolFile @@ -81,6 +67,6 @@ log sequence number byte offset in lsn_offsetp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_maxsize.html b/db/docs/api_cxx/memp_set_maxsize.html index 0e448ce15..2f2e38c8f 100644 --- a/db/docs/api_cxx/memp_set_maxsize.html +++ b/db/docs/api_cxx/memp_set_maxsize.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_maxsize - + -

DbMpoolFile::set_maxsize

API -Ref -
+Ref +


@@ -31,11 +30,9 @@ DbMpoolFile::get_maxsize(u_int32_t *gbytesp, u_int32_t *bytesp);
 


Description: DbMpoolFile::set_maxsize

- -

Set the maximum size for the file to be -gbytes gigabytes plus bytes. -Attempts to allocate new pages in the file after the limit has been -reached will fail.

+

Set the maximum size for the file to be gbytes gigabytes plus +bytes. Attempts to allocate new pages in the file after the +limit has been reached will fail.

To set the maximum file size for a particular database, call the DbMpoolFile::set_maxsize method using the DbMpoolFile handle stored in the mpf field of the Db handle. Attempts to insert @@ -48,12 +45,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bytes
-The maximum size of the file is set to gbytes gigabytes plus +
+
bytes
The maximum size of the file is set to gbytes gigabytes plus bytes. -

gbytes
-The maximum size of the file is set to gbytes gigabytes plus +
gbytes
The maximum size of the file is set to gbytes gigabytes plus bytes.

@@ -66,12 +61,10 @@ failure, and returns 0 on success.

The DbMpoolFile::get_maxsize method may be called at any time during the life of the application.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the maximum file size is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the maximum file size is copied.

@@ -83,6 +76,6 @@ The gbytesp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_pgcookie.html b/db/docs/api_cxx/memp_set_pgcookie.html index 08c99e77f..03ca2e632 100644 --- a/db/docs/api_cxx/memp_set_pgcookie.html +++ b/db/docs/api_cxx/memp_set_pgcookie.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_pgcookie - + -

DbMpoolFile::set_pgcookie

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbMpoolFile::get_pgcookie(DBT *dbt);
 


Description: DbMpoolFile::set_pgcookie

-

The DbMpoolFile::set_pgcookie method specifies a byte string that is provided to the functions registered to do input or output processing of the file's pages as they are read from or written to, the backing filesystem @@ -50,28 +48,16 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

pgcookie
-The pgcookie parameter is a byte string provided to the +
+
pgcookie
The pgcookie parameter is a byte string provided to the functions registered to do input or output processing of the file's pages.

Description: DbMpoolFile::get_pgcookie

-

The DbMpoolFile::get_pgcookie method returns the byte string.

+

The DbMpoolFile::get_pgcookie method returns the .

The DbMpoolFile::get_pgcookie method may be called at any time during the life of the application.

-

The DbMpoolFile::get_pgcookie method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

-

Parameters

-

-

dbt
-The DbMpoolFile::get_pgcookie method returns a reference to the -byte string in dbt. -

Class

DbEnv, DbMpoolFile @@ -81,6 +67,6 @@ byte string in dbt.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_set_priority.html b/db/docs/api_cxx/memp_set_priority.html index a8f98416f..0ba75f827 100644 --- a/db/docs/api_cxx/memp_set_priority.html +++ b/db/docs/api_cxx/memp_set_priority.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile::set_priority - + -

DbMpoolFile::set_priority

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbMpoolFile::get_priority(DB_CACHE_PRIORITY *priorityp);
 


Description: DbMpoolFile::set_priority

-

Set the cache priority for pages from the specified file. The priority of a page biases the replacement algorithm to be more or less likely to discard a page when space is needed in the buffer pool. The bias is @@ -49,12 +47,11 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

priority
-The priority parameter must be set to one of the following +
+
priority
The priority parameter must be set to one of the following values: -

-

DB_PRIORITY_VERY_LOW
The lowest priority: pages are the most likely to be discarded. +
+
DB_PRIORITY_VERY_LOW
The lowest priority: pages are the most likely to be discarded.
DB_PRIORITY_LOW
The next lowest priority.
DB_PRIORITY_DEFAULT
The default priority.
DB_PRIORITY_HIGH
The next highest priority. @@ -72,9 +69,8 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

priorityp
-The DbMpoolFile::get_priority method returns the +
+
priorityp
The DbMpoolFile::get_priority method returns the cache priority in priorityp.

@@ -86,6 +82,6 @@ cache priority in priorityp.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_stat.html b/db/docs/api_cxx/memp_stat.html index e669a4f93..0c7f5c765 100644 --- a/db/docs/api_cxx/memp_stat.html +++ b/db/docs/api_cxx/memp_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::memp_stat - + -

DbEnv::memp_stat

API -Ref -
+Ref +


@@ -26,6 +25,9 @@
 int
 DbEnv::memp_stat(DB_MPOOL_STAT **gsp,
     DB_MPOOL_FSTAT *(*fsp)[], u_int32_t flags);
+

+int +DbEnv::memp_stat_print(u_int32_t flags);


Description: DbEnv::memp_stat

@@ -45,11 +47,11 @@ individually freed.

If gsp is non-NULL, the global statistics for the cache mp are copied into the memory location to which it refers. The following DB_MPOOL_STAT fields will be filled in:

-

+
size_t st_gbytes;
Gigabytes of cache (total cache size is st_gbytes + st_bytes).
size_t st_bytes;
Bytes of cache (total cache size is st_gbytes + st_bytes).
u_int32_t st_ncache;
Number of caches. -
u_int32_t st_regsize;
Individual cache size. +
roff_t st_regsize;
Individual cache size, in bytes.
u_int32_t st_map;
Requested pages mapped into the process' address space (there is no available information about whether or not this request caused disk I/O, although examining the application page fault rate may be helpful). @@ -83,6 +85,15 @@ lock without waiting.
u_int32_t st_alloc_max_buckets;
Maximum number of hash buckets checked during an allocation.
u_int32_t st_alloc_pages;
Number of pages checked during allocation.
u_int32_t st_alloc_max_pages;
Maximum number of pages checked during an allocation. +
u_int32_t st_mmapsize;
Maximum memory-mapped file size. +
u_int32_t st_maxopenfd;
Maximum open file descriptors. +
u_int32_t st_maxwrite;
Maximum sequential buffer writes. +
u_int32_t st_maxwrite_sleep;
Sleep after writing maximum sequential buffers. +
+The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_CLEAR
Reset statistics after returning their values.

If fsp is non-NULL, a pointer to a NULL-terminated variable length array of statistics for individual files, in the cache @@ -92,7 +103,7 @@ set to NULL.

The per-file statistics are stored in structures of type DB_MPOOL_FSTAT. The following DB_MPOOL_FSTAT fields will be filled in for each file in the cache; that is, each element of the array:

-

+
char * file_name;
The name of the file.
size_t st_pagesize;
Page size in bytes.
u_int32_t st_cache_hit;
Requested pages found in the cache. @@ -102,19 +113,21 @@ the cache; that is, each element of the array:

u_int32_t st_page_in;
Pages read into the cache.
u_int32_t st_page_out;
Pages written from the cache to the backing file.
+

The DbEnv::memp_stat method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

fsp
-The fsp parameter references memory into which +
fsp
The fsp parameter references memory into which a pointer to the allocated per-file statistics structures is copied. -

gsp
-The gsp parameter references memory into which +
gsp
The gsp parameter references memory into which a pointer to the allocated global statistics structure is copied.

Errors

@@ -123,11 +136,31 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DbEnv::memp_stat_print

+

The DbEnv::memp_stat_print method prints diagnostic information to the output +channel described by the DbEnv::set_msgfile method.

+

The DbEnv::memp_stat_print method may not be called before the DbEnv::open method has +been called.

+

The DbEnv::memp_stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_ALL
Display all available information. +
DB_STAT_MEMP_HASH
Display the buffers with hash chains. +
+
+

Class

DbEnv, DbMpoolFile

See Also

@@ -136,6 +169,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_sync.html b/db/docs/api_cxx/memp_sync.html index bd7c004af..8c103f972 100644 --- a/db/docs/api_cxx/memp_sync.html +++ b/db/docs/api_cxx/memp_sync.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::memp_sync - + -

DbEnv::memp_sync

API -Ref -
+Ref +


@@ -45,9 +44,8 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

lsn
-The purpose of the lsn parameter is to enable a transaction +
+
lsn
The purpose of the lsn parameter is to enable a transaction manager to ensure, as part of a checkpoint, that all pages modified by a certain time have been written to disk.

All modified pages with a a log sequence number (DbLsn) less @@ -60,8 +58,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the DbEnv::memp_sync function was called without logging having been +
+
EINVAL
If the DbEnv::memp_sync function was called without logging having been initialized in the environment; or if an invalid flag value or parameter was specified.
@@ -74,6 +72,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/memp_trickle.html b/db/docs/api_cxx/memp_trickle.html index add205158..f29f588ff 100644 --- a/db/docs/api_cxx/memp_trickle.html +++ b/db/docs/api_cxx/memp_trickle.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::memp_trickle - + -

DbEnv::memp_trickle

API -Ref -
+Ref +


@@ -34,6 +33,29 @@ backing files.

The purpose of the DbEnv::memp_trickle function is to enable a memory pool manager to ensure that a page is always available for reading in new information without having to wait for a write.

+

The DbEnv::memp_trickle method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
nwrotep
The nwrotep parameter references memory into which + the number of pages written to reach the specified +percentage is copied. +
percent
The percent parameter is the percent of the pages in the cache +that should be clean. +
+

Errors

+

The DbEnv::memp_trickle method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +

Class

DbEnv, DbMpoolFile @@ -43,6 +65,6 @@ information without having to wait for a write.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/mempfile_class.html b/db/docs/api_cxx/mempfile_class.html index 929db7d94..7faa44a89 100644 --- a/db/docs/api_cxx/mempfile_class.html +++ b/db/docs/api_cxx/mempfile_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbMpoolFile - + -

DbMpoolFile

API -Ref -
+Ref +


@@ -68,6 +67,6 @@ calls in a purely C++ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/object.html b/db/docs/api_cxx/object.html new file mode 100644 index 000000000..6484fc53e --- /dev/null +++ b/db/docs/api_cxx/object.html @@ -0,0 +1,33 @@ + + + +C API (Version: 4.3.14) + + + + +Home
+All Methods
+Permuted Index
+

+Cursors
+Databases
+Environments
+Exceptions
+Key/Data Pairs
+Locking
+Logging
+Memory Pool
+Replication
+Sequences
+Transactions
+

+Dbm/Ndbm
+Hsearch
+

+Reference
+Utilities
+ + diff --git a/db/docs/api_cxx/pindex.src b/db/docs/api_cxx/pindex.src index 2246cc9cd..20ca35452 100644 --- a/db/docs/api_cxx/pindex.src +++ b/db/docs/api_cxx/pindex.src @@ -1,45 +1,27 @@ -__APIREL__/api_cxx/db_associate.html__OCT__2 @Db::associate -__APIREL__/api_cxx/db_associate.html__OCT__3 @DB_DBT_APPMALLOC -__APIREL__/api_cxx/db_associate.html__OCT__4 @DB_DONOTINDEX +__APIREL__/api_cxx/db_associate.html__OCT__2 @DB_DBT_APPMALLOC +__APIREL__/api_cxx/db_associate.html__OCT__3 @DB_DONOTINDEX __APIREL__/api_cxx/db_associate.html__OCT__DB_CREATE Db::associate@DB_CREATE __APIREL__/api_cxx/db_associate.html__OCT__DB_AUTO_COMMIT Db::associate@DB_AUTO_COMMIT -__APIREL__/api_cxx/db_class.html__OCT__2 @Db __APIREL__/api_cxx/db_class.html__OCT__DB_CXX_NO_EXCEPTIONS Db@DB_CXX_NO_EXCEPTIONS __APIREL__/api_cxx/db_class.html__OCT__DB_XA_CREATE Db@DB_XA_CREATE -__APIREL__/api_cxx/db_close.html__OCT__2 @Db::close __APIREL__/api_cxx/db_close.html__OCT__DB_NOSYNC Db::close@DB_NOSYNC -__APIREL__/api_cxx/db_cursor.html__OCT__2 @Db::cursor +__APIREL__/api_cxx/db_cursor.html__OCT__DB_DEGREE_2 Db::cursor@DB_DEGREE_2 __APIREL__/api_cxx/db_cursor.html__OCT__DB_DIRTY_READ Db::cursor@DB_DIRTY_READ __APIREL__/api_cxx/db_cursor.html__OCT__DB_WRITECURSOR Db::cursor@DB_WRITECURSOR -__APIREL__/api_cxx/db_del.html__OCT__2 @Db::del __APIREL__/api_cxx/db_del.html__OCT__DB_AUTO_COMMIT Db::del@DB_AUTO_COMMIT -__APIREL__/api_cxx/db_err.html__OCT__2 @Db::err -__APIREL__/api_cxx/db_err.html__OCT__3 @Db::errx -__APIREL__/api_cxx/db_get.html__OCT__2 @Db::get -__APIREL__/api_cxx/db_get.html__OCT__3 @Db::pget __APIREL__/api_cxx/db_get.html__OCT__DB_CONSUME Db::get@DB_CONSUME __APIREL__/api_cxx/db_get.html__OCT__DB_CONSUME_WAIT Db::get@DB_CONSUME_WAIT __APIREL__/api_cxx/db_get.html__OCT__DB_GET_BOTH Db::get@DB_GET_BOTH __APIREL__/api_cxx/db_get.html__OCT__DB_SET_RECNO Db::get@DB_SET_RECNO __APIREL__/api_cxx/db_get.html__OCT__DB_AUTO_COMMIT Db::get@DB_AUTO_COMMIT +__APIREL__/api_cxx/db_get.html__OCT__DB_DEGREE_2 Db::get@DB_DEGREE_2 __APIREL__/api_cxx/db_get.html__OCT__DB_DIRTY_READ Db::get@DB_DIRTY_READ __APIREL__/api_cxx/db_get.html__OCT__DB_MULTIPLE Db::get@DB_MULTIPLE __APIREL__/api_cxx/db_get.html__OCT__DB_RMW Db::get@DB_RMW -__APIREL__/api_cxx/db_get_byteswapped.html__OCT__2 @Db::get_byteswapped -__APIREL__/api_cxx/db_get_mpf.html__OCT__2 @Db::get_mpf -__APIREL__/api_cxx/db_get_type.html__OCT__2 @Db::get_type -__APIREL__/api_cxx/db_getenv.html__OCT__2 @Db::getenv -__APIREL__/api_cxx/db_join.html__OCT__2 @Db::join __APIREL__/api_cxx/db_join.html__OCT__DB_JOIN_ITEM Db::join@DB_JOIN_ITEM __APIREL__/api_cxx/db_join.html__OCT__DB_DIRTY_READ Db::join@DB_DIRTY_READ __APIREL__/api_cxx/db_join.html__OCT__DB_RMW Db::join@DB_RMW __APIREL__/api_cxx/db_join.html__OCT__DB_JOIN_NOSORT Db::join@DB_JOIN_NOSORT -__APIREL__/api_cxx/db_key_range.html__OCT__2 @Db::key_range -__APIREL__/api_cxx/db_open.html__OCT__2 @Db::open -__APIREL__/api_cxx/db_open.html__OCT__3 @Db::get_file -__APIREL__/api_cxx/db_open.html__OCT__4 @Db::get_database -__APIREL__/api_cxx/db_open.html__OCT__5 @Db::get_open_flags -__APIREL__/api_cxx/db_open.html__OCT__6 @Db::get_transactional __APIREL__/api_cxx/db_open.html__OCT__DB_AUTO_COMMIT Db::open@DB_AUTO_COMMIT __APIREL__/api_cxx/db_open.html__OCT__DB_CREATE Db::open@DB_CREATE __APIREL__/api_cxx/db_open.html__OCT__DB_DIRTY_READ Db::open@DB_DIRTY_READ @@ -53,108 +35,55 @@ __APIREL__/api_cxx/db_open.html__OCT__DB_HASH Db::open@DB_HASH __APIREL__/api_cxx/db_open.html__OCT__DB_QUEUE Db::open@DB_QUEUE __APIREL__/api_cxx/db_open.html__OCT__DB_RECNO Db::open@DB_RECNO __APIREL__/api_cxx/db_open.html__OCT__DB_UNKNOWN Db::open@DB_UNKNOWN -__APIREL__/api_cxx/db_put.html__OCT__2 @Db::put __APIREL__/api_cxx/db_put.html__OCT__DB_APPEND Db::put@DB_APPEND __APIREL__/api_cxx/db_put.html__OCT__DB_NODUPDATA Db::put@DB_NODUPDATA __APIREL__/api_cxx/db_put.html__OCT__DB_NOOVERWRITE Db::put@DB_NOOVERWRITE __APIREL__/api_cxx/db_put.html__OCT__DB_AUTO_COMMIT Db::put@DB_AUTO_COMMIT -__APIREL__/api_cxx/db_remove.html__OCT__2 @Db::remove -__APIREL__/api_cxx/db_rename.html__OCT__2 @Db::rename -__APIREL__/api_cxx/db_set_append_recno.html__OCT__2 @Db::set_append_recno -__APIREL__/api_cxx/db_set_bt_compare.html__OCT__2 @Db::set_bt_compare -__APIREL__/api_cxx/db_set_bt_minkey.html__OCT__2 @Db::set_bt_minkey -__APIREL__/api_cxx/db_set_bt_minkey.html__OCT__3 @Db::get_bt_minkey -__APIREL__/api_cxx/db_set_bt_prefix.html__OCT__2 @Db::set_bt_prefix -__APIREL__/api_cxx/db_set_cachesize.html__OCT__2 @Db::set_cachesize -__APIREL__/api_cxx/db_set_cachesize.html__OCT__3 @Db::get_cachesize -__APIREL__/api_cxx/db_set_dup_compare.html__OCT__2 @Db::set_dup_compare -__APIREL__/api_cxx/db_set_encrypt.html__OCT__2 @Db::set_encrypt -__APIREL__/api_cxx/db_set_encrypt.html__OCT__3 @Db::get_encrypt_flags __APIREL__/api_cxx/db_set_encrypt.html__OCT__DB_ENCRYPT_AES Db::set_encrypt@DB_ENCRYPT_AES -__APIREL__/api_cxx/db_set_errcall.html__OCT__2 @Db::set_errcall -__APIREL__/api_cxx/db_set_errpfx.html__OCT__2 @Db::set_errpfx -__APIREL__/api_cxx/db_set_errpfx.html__OCT__3 @Db::get_errpfx -__APIREL__/api_cxx/db_set_feedback.html__OCT__2 @Db::set_feedback __APIREL__/api_cxx/db_set_feedback.html__OCT__DB_UPGRADE Db::set_feedback@DB_UPGRADE __APIREL__/api_cxx/db_set_feedback.html__OCT__DB_VERIFY Db::set_feedback@DB_VERIFY -__APIREL__/api_cxx/db_set_flags.html__OCT__2 @Db::set_flags -__APIREL__/api_cxx/db_set_flags.html__OCT__3 @Db::get_flags -__APIREL__/api_cxx/db_set_flags.html__OCT__4 database page @checksum +__APIREL__/api_cxx/db_set_flags.html__OCT__2 database page @checksum __APIREL__/api_cxx/db_set_flags.html__OCT__DB_CHKSUM Db::set_flags@DB_CHKSUM -__APIREL__/api_cxx/db_set_flags.html__OCT__5 database @encryption +__APIREL__/api_cxx/db_set_flags.html__OCT__3 database @encryption __APIREL__/api_cxx/db_set_flags.html__OCT__DB_ENCRYPT Db::set_flags@DB_ENCRYPT -__APIREL__/api_cxx/db_set_flags.html__OCT__6 turn off database @durability +__APIREL__/api_cxx/db_set_flags.html__OCT__4 turn off database @durability __APIREL__/api_cxx/db_set_flags.html__OCT__DB_TXN_NOT_DURABLE Db::set_flags@DB_TXN_NOT_DURABLE -__APIREL__/api_cxx/db_set_flags.html__OCT__7 @duplicate data items +__APIREL__/api_cxx/db_set_flags.html__OCT__5 @duplicate data items __APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUP Db::set_flags@DB_DUP -__APIREL__/api_cxx/db_set_flags.html__OCT__8 sorted @duplicate data items +__APIREL__/api_cxx/db_set_flags.html__OCT__6 sorted @duplicate data items __APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUPSORT Db::set_flags@DB_DUPSORT -__APIREL__/api_cxx/db_set_flags.html__OCT__9 accessing Btree records by @record number +__APIREL__/api_cxx/db_set_flags.html__OCT__7 accessing Btree records by @record number __APIREL__/api_cxx/db_set_flags.html__OCT__DB_RECNUM Db::set_flags@DB_RECNUM -__APIREL__/api_cxx/db_set_flags.html__OCT__10 turn off @reverse splits in Btree databases -__APIREL__/api_cxx/db_set_flags.html__OCT__11 turn off reverse @splits in Btree databases +__APIREL__/api_cxx/db_set_flags.html__OCT__8 turn off @reverse splits in Btree databases +__APIREL__/api_cxx/db_set_flags.html__OCT__9 turn off reverse @splits in Btree databases __APIREL__/api_cxx/db_set_flags.html__OCT__DB_REVSPLITOFF Db::set_flags@DB_REVSPLITOFF __APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUP Db::set_flags@DB_DUP __APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUPSORT Db::set_flags@DB_DUPSORT -__APIREL__/api_cxx/db_set_flags.html__OCT__12 @renumbering records in Recno databases +__APIREL__/api_cxx/db_set_flags.html__OCT__10 @ordered retrieval of records from Queue databases +__APIREL__/api_cxx/db_set_flags.html__OCT__DB_INORDER Db::set_flags@DB_INORDER +__APIREL__/api_cxx/db_set_flags.html__OCT__11 @renumbering records in Recno databases __APIREL__/api_cxx/db_set_flags.html__OCT__DB_RENUMBER Db::set_flags@DB_RENUMBER -__APIREL__/api_cxx/db_set_flags.html__OCT__13 pre-loading @text files into Recno databases +__APIREL__/api_cxx/db_set_flags.html__OCT__12 pre-loading @text files into Recno databases __APIREL__/api_cxx/db_set_flags.html__OCT__DB_SNAPSHOT Db::set_flags@DB_SNAPSHOT -__APIREL__/api_cxx/db_set_h_ffactor.html__OCT__2 @Db::set_h_ffactor -__APIREL__/api_cxx/db_set_h_ffactor.html__OCT__3 @Db::get_h_ffactor -__APIREL__/api_cxx/db_set_h_hash.html__OCT__2 @Db::set_h_hash -__APIREL__/api_cxx/db_set_h_nelem.html__OCT__2 @Db::set_h_nelem -__APIREL__/api_cxx/db_set_h_nelem.html__OCT__3 @Db::get_h_nelem -__APIREL__/api_cxx/db_set_lorder.html__OCT__2 @Db::set_lorder -__APIREL__/api_cxx/db_set_lorder.html__OCT__3 @Db::get_lorder -__APIREL__/api_cxx/db_set_pagesize.html__OCT__2 @Db::set_pagesize -__APIREL__/api_cxx/db_set_pagesize.html__OCT__3 @Db::get_pagesize -__APIREL__/api_cxx/db_set_paniccall.html__OCT__2 @Db::set_paniccall -__APIREL__/api_cxx/db_set_q_extentsize.html__OCT__2 @Db::set_q_extentsize -__APIREL__/api_cxx/db_set_q_extentsize.html__OCT__3 @Db::get_q_extentsize -__APIREL__/api_cxx/db_set_re_delim.html__OCT__2 @Db::set_re_delim -__APIREL__/api_cxx/db_set_re_delim.html__OCT__3 @Db::get_re_delim -__APIREL__/api_cxx/db_set_re_len.html__OCT__2 @Db::set_re_len -__APIREL__/api_cxx/db_set_re_len.html__OCT__3 @Db::get_re_len -__APIREL__/api_cxx/db_set_re_pad.html__OCT__2 @Db::set_re_pad -__APIREL__/api_cxx/db_set_re_pad.html__OCT__3 @Db::get_re_pad -__APIREL__/api_cxx/db_set_re_source.html__OCT__2 @Db::set_re_source -__APIREL__/api_cxx/db_set_re_source.html__OCT__3 @Db::get_re_source -__APIREL__/api_cxx/db_stat.html__OCT__2 @Db::stat +__APIREL__/api_cxx/db_stat.html__OCT__DB_DEGREE_2 Db::stat@DB_DEGREE_2 +__APIREL__/api_cxx/db_stat.html__OCT__DB_DIRTY_READ Db::stat@DB_DIRTY_READ __APIREL__/api_cxx/db_stat.html__OCT__DB_FAST_STAT Db::stat@DB_FAST_STAT -__APIREL__/api_cxx/db_sync.html__OCT__2 @Db::sync -__APIREL__/api_cxx/db_truncate.html__OCT__2 @Db::truncate +__APIREL__/api_cxx/db_stat.html__OCT__DB_STAT_ALL Db::stat@DB_STAT_ALL __APIREL__/api_cxx/db_truncate.html__OCT__DB_AUTO_COMMIT Db::truncate@DB_AUTO_COMMIT -__APIREL__/api_cxx/db_upgrade.html__OCT__2 @Db::upgrade __APIREL__/api_cxx/db_upgrade.html__OCT__DB_DUPSORT Db::upgrade@DB_DUPSORT __APIREL__/api_cxx/db_upgrade.html__OCT__DB_OLD_VERSION Db::upgrade@DB_OLD_VERSION -__APIREL__/api_cxx/db_verify.html__OCT__2 @Db::verify -__APIREL__/api_cxx/db_verify.html__OCT__3 @DB_VERIFY_BAD +__APIREL__/api_cxx/db_verify.html__OCT__2 @DB_VERIFY_BAD __APIREL__/api_cxx/db_verify.html__OCT__DB_SALVAGE Db::verify@DB_SALVAGE __APIREL__/api_cxx/db_verify.html__OCT__DB_AGGRESSIVE Db::verify@DB_AGGRESSIVE __APIREL__/api_cxx/db_verify.html__OCT__DB_PRINTABLE Db::verify@DB_PRINTABLE __APIREL__/api_cxx/db_verify.html__OCT__DB_NOORDERCHK Db::verify@DB_NOORDERCHK __APIREL__/api_cxx/db_verify.html__OCT__DB_ORDERCHKONLY Db::verify@DB_ORDERCHKONLY -__APIREL__/api_cxx/dbt_class.html__OCT__2 @Dbt -__APIREL__/api_cxx/dbt_class.html__OCT__3 @key/data pairs +__APIREL__/api_cxx/dbt_class.html__OCT__2 @key/data pairs __APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_MALLOC Dbt@DB_DBT_MALLOC __APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_REALLOC Dbt@DB_DBT_REALLOC __APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_USERMEM Dbt@DB_DBT_USERMEM __APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_PARTIAL Dbt@DB_DBT_PARTIAL -__APIREL__/api_cxx/db_fd.html__OCT__2 @Db::fd -__APIREL__/api_cxx/db_set_alloc.html__OCT__2 @Db::set_alloc -__APIREL__/api_cxx/db_set_errfile.html__OCT__2 @Db::set_errfile -__APIREL__/api_cxx/db_set_errfile.html__OCT__3 @Db::get_errfile -__APIREL__/api_cxx/db_set_error_stream.html__OCT__2 @Db::set_error_stream -__APIREL__/api_cxx/dbt_bulk_class.html__OCT__2 @Bulk Retrieval API -__APIREL__/api_cxx/dbc_class.html__OCT__2 @Dbc -__APIREL__/api_cxx/dbc_close.html__OCT__2 @Dbc::close -__APIREL__/api_cxx/dbc_count.html__OCT__2 @Dbc::count -__APIREL__/api_cxx/dbc_del.html__OCT__2 @Dbc::del -__APIREL__/api_cxx/dbc_dup.html__OCT__2 @Dbc::dup __APIREL__/api_cxx/dbc_dup.html__OCT__DB_POSITION Dbc::dup@DB_POSITION -__APIREL__/api_cxx/dbc_get.html__OCT__2 @Dbc::get -__APIREL__/api_cxx/dbc_get.html__OCT__3 @Dbc::pget __APIREL__/api_cxx/dbc_get.html__OCT__DB_CURRENT Dbc::get@DB_CURRENT __APIREL__/api_cxx/dbc_get.html__OCT__DB_FIRST Dbc::get@DB_FIRST __APIREL__/api_cxx/dbc_get.html__OCT__DB_GET_BOTH Dbc::get@DB_GET_BOTH @@ -174,31 +103,16 @@ __APIREL__/api_cxx/dbc_get.html__OCT__DB_DIRTY_READ Dbc::get@DB_DIRTY_READ __APIREL__/api_cxx/dbc_get.html__OCT__DB_MULTIPLE Dbc::get@DB_MULTIPLE __APIREL__/api_cxx/dbc_get.html__OCT__DB_MULTIPLE_KEY Dbc::get@DB_MULTIPLE_KEY __APIREL__/api_cxx/dbc_get.html__OCT__DB_RMW Dbc::get@DB_RMW -__APIREL__/api_cxx/dbc_put.html__OCT__2 @Dbc::put __APIREL__/api_cxx/dbc_put.html__OCT__DB_AFTER Dbc::put@DB_AFTER __APIREL__/api_cxx/dbc_put.html__OCT__DB_BEFORE Dbc::put@DB_BEFORE __APIREL__/api_cxx/dbc_put.html__OCT__DB_CURRENT Dbc::put@DB_CURRENT __APIREL__/api_cxx/dbc_put.html__OCT__DB_KEYFIRST Dbc::put@DB_KEYFIRST __APIREL__/api_cxx/dbc_put.html__OCT__DB_KEYLAST Dbc::put@DB_KEYLAST __APIREL__/api_cxx/dbc_put.html__OCT__DB_NODUPDATA Dbc::put@DB_NODUPDATA -__APIREL__/api_cxx/except_class.html__OCT__2 @DbException -__APIREL__/api_cxx/except_class.html__OCT__3 @DbException::get_env -__APIREL__/api_cxx/except_class.html__OCT__4 @DbException::get_errno -__APIREL__/api_cxx/except_class.html__OCT__5 @DbException::what -__APIREL__/api_cxx/runrec_class.html__OCT__2 @DbRunRecoveryException -__APIREL__/api_cxx/env_class.html__OCT__2 @DbEnv __APIREL__/api_cxx/env_class.html__OCT__DB_CXX_NO_EXCEPTIONS DbEnv@DB_CXX_NO_EXCEPTIONS __APIREL__/api_cxx/env_class.html__OCT__DB_RPCCLIENT DbEnv@DB_RPCCLIENT -__APIREL__/api_cxx/env_close.html__OCT__2 @DbEnv::close -__APIREL__/api_cxx/env_dbremove.html__OCT__2 @DbEnv::dbremove __APIREL__/api_cxx/env_dbremove.html__OCT__DB_AUTO_COMMIT DbEnv::dbremove@DB_AUTO_COMMIT -__APIREL__/api_cxx/env_dbrename.html__OCT__2 @DbEnv::dbrename __APIREL__/api_cxx/env_dbrename.html__OCT__DB_AUTO_COMMIT DbEnv::dbrename@DB_AUTO_COMMIT -__APIREL__/api_cxx/env_err.html__OCT__2 @DbEnv::err -__APIREL__/api_cxx/env_err.html__OCT__3 @DbEnv::errx -__APIREL__/api_cxx/env_open.html__OCT__2 @DbEnv::open -__APIREL__/api_cxx/env_open.html__OCT__3 @DbEnv::get_home -__APIREL__/api_cxx/env_open.html__OCT__4 @DbEnv::get_open_flags __APIREL__/api_cxx/env_open.html__OCT__DB_JOINENV DbEnv::open@DB_JOINENV __APIREL__/api_cxx/env_open.html__OCT__DB_INIT_CDB DbEnv::open@DB_INIT_CDB __APIREL__/api_cxx/env_open.html__OCT__DB_INIT_LOCK DbEnv::open@DB_INIT_LOCK @@ -208,7 +122,7 @@ __APIREL__/api_cxx/env_open.html__OCT__DB_INIT_REP DbEnv::open@DB_INIT_REP __APIREL__/api_cxx/env_open.html__OCT__DB_INIT_TXN DbEnv::open@DB_INIT_TXN __APIREL__/api_cxx/env_open.html__OCT__DB_RECOVER DbEnv::open@DB_RECOVER __APIREL__/api_cxx/env_open.html__OCT__DB_RECOVER_FATAL DbEnv::open@DB_RECOVER_FATAL -__APIREL__/api_cxx/env_open.html__OCT__5 use @environment constants in naming +__APIREL__/api_cxx/env_open.html__OCT__2 use @environment constants in naming __APIREL__/api_cxx/env_open.html__OCT__DB_USE_ENVIRON DbEnv::open@DB_USE_ENVIRON __APIREL__/api_cxx/env_open.html__OCT__DB_USE_ENVIRON_ROOT DbEnv::open@DB_USE_ENVIRON_ROOT __APIREL__/api_cxx/env_open.html__OCT__DB_CREATE DbEnv::open@DB_CREATE @@ -216,41 +130,32 @@ __APIREL__/api_cxx/env_open.html__OCT__DB_LOCKDOWN DbEnv::open@DB_LOCKDOWN __APIREL__/api_cxx/env_open.html__OCT__DB_PRIVATE DbEnv::open@DB_PRIVATE __APIREL__/api_cxx/env_open.html__OCT__DB_SYSTEM_MEM DbEnv::open@DB_SYSTEM_MEM __APIREL__/api_cxx/env_open.html__OCT__DB_THREAD DbEnv::open@DB_THREAD -__APIREL__/api_cxx/env_remove.html__OCT__2 @DbEnv::remove +__APIREL__/api_cxx/env_open.html__OCT__3 @DB_VERSION_MISMATCH __APIREL__/api_cxx/env_remove.html__OCT__DB_FORCE DbEnv::remove@DB_FORCE -__APIREL__/api_cxx/env_remove.html__OCT__3 use @environment constants in naming +__APIREL__/api_cxx/env_remove.html__OCT__2 use @environment constants in naming __APIREL__/api_cxx/env_remove.html__OCT__DB_USE_ENVIRON DbEnv::remove@DB_USE_ENVIRON __APIREL__/api_cxx/env_remove.html__OCT__DB_USE_ENVIRON_ROOT DbEnv::remove@DB_USE_ENVIRON_ROOT -__APIREL__/api_cxx/env_set_app_dispatch.html__OCT__2 @DbEnv::set_app_dispatch __APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_BACKWARD_ROLL DbEnv::set_app_dispatch@DB_TXN_BACKWARD_ROLL __APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_FORWARD_ROLL DbEnv::set_app_dispatch@DB_TXN_FORWARD_ROLL __APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_ABORT DbEnv::set_app_dispatch@DB_TXN_ABORT __APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_APPLY DbEnv::set_app_dispatch@DB_TXN_APPLY __APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_PRINT DbEnv::set_app_dispatch@DB_TXN_PRINT -__APIREL__/api_cxx/env_set_cachesize.html__OCT__2 @DbEnv::set_cachesize -__APIREL__/api_cxx/env_set_cachesize.html__OCT__3 @DbEnv::get_cachesize -__APIREL__/api_cxx/env_set_cachesize.html__OCT__4 @DbEnv::get_cachesize_nocache -__APIREL__/api_cxx/env_set_data_dir.html__OCT__2 @DbEnv::set_data_dir -__APIREL__/api_cxx/env_set_data_dir.html__OCT__3 @DbEnv::get_data_dirs -__APIREL__/api_cxx/env_set_encrypt.html__OCT__2 @DbEnv::set_encrypt -__APIREL__/api_cxx/env_set_encrypt.html__OCT__3 @DbEnv::get_encrypt_flags __APIREL__/api_cxx/env_set_encrypt.html__OCT__DB_ENCRYPT_AES DbEnv::set_encrypt@DB_ENCRYPT_AES -__APIREL__/api_cxx/env_set_errcall.html__OCT__2 @DbEnv::set_errcall -__APIREL__/api_cxx/env_set_errpfx.html__OCT__2 @DbEnv::set_errpfx -__APIREL__/api_cxx/env_set_errpfx.html__OCT__3 @DbEnv::get_errpfx -__APIREL__/api_cxx/env_set_feedback.html__OCT__2 @DbEnv::set_feedback __APIREL__/api_cxx/env_set_feedback.html__OCT__DB_RECOVER DbEnv::set_feedback@DB_RECOVER -__APIREL__/api_cxx/env_set_flags.html__OCT__2 @DbEnv::set_flags -__APIREL__/api_cxx/env_set_flags.html__OCT__3 @DbEnv::get_flags __APIREL__/api_cxx/env_set_flags.html__OCT__DB_AUTO_COMMIT DbEnv::set_flags@DB_AUTO_COMMIT -__APIREL__/api_cxx/env_set_flags.html__OCT__4 configure @locking for Berkeley DB Concurrent Data Store +__APIREL__/api_cxx/env_set_flags.html__OCT__2 configure @locking for Berkeley DB Concurrent Data Store __APIREL__/api_cxx/env_set_flags.html__OCT__DB_CDB_ALLDB DbEnv::set_flags@DB_CDB_ALLDB -__APIREL__/api_cxx/env_set_flags.html__OCT__5 turn off system @buffering for database files +__APIREL__/api_cxx/env_set_flags.html__OCT__3 turn off system @buffering for database files __APIREL__/api_cxx/env_set_flags.html__OCT__DB_DIRECT_DB DbEnv::set_flags@DB_DIRECT_DB -__APIREL__/api_cxx/env_set_flags.html__OCT__6 turn off system @buffering for log files +__APIREL__/api_cxx/env_set_flags.html__OCT__4 turn off system @buffering for log files __APIREL__/api_cxx/env_set_flags.html__OCT__DB_DIRECT_LOG DbEnv::set_flags@DB_DIRECT_LOG -__APIREL__/api_cxx/env_set_flags.html__OCT__7 automatic @log file removal +__APIREL__/api_cxx/env_set_flags.html__OCT__5 turn off system @buffering for log files +__APIREL__/api_cxx/env_set_flags.html__OCT__DB_DSYNC_LOG DbEnv::set_flags@DB_DSYNC_LOG +__APIREL__/api_cxx/env_set_flags.html__OCT__6 automatic @log file removal __APIREL__/api_cxx/env_set_flags.html__OCT__DB_LOG_AUTOREMOVE DbEnv::set_flags@DB_LOG_AUTOREMOVE +__APIREL__/api_cxx/env_set_flags.html__OCT__7 in memory @logs +__APIREL__/api_cxx/env_set_flags.html__OCT__DB_LOG_INMEMORY DbEnv::set_flags@DB_LOG_INMEMORY +__APIREL__/api_cxx/env_set_flags.html__OCT__DB_LOG_BUFFER_FULL DbEnv::set_flags@DB_LOG_BUFFER_FULL __APIREL__/api_cxx/env_set_flags.html__OCT__8 ignore @locking __APIREL__/api_cxx/env_set_flags.html__OCT__DB_NOLOCKING DbEnv::set_flags@DB_NOLOCKING __APIREL__/api_cxx/env_set_flags.html__OCT__9 turn off database file @memory mapping @@ -265,82 +170,51 @@ __APIREL__/api_cxx/env_set_flags.html__OCT__DB_REGION_INIT DbEnv::set_flags@DB_R __APIREL__/api_cxx/env_set_flags.html__OCT__DB_TIME_NOTGRANTED DbEnv::set_flags@DB_TIME_NOTGRANTED __APIREL__/api_cxx/env_set_flags.html__OCT__13 turn off synchronous @transaction commit __APIREL__/api_cxx/env_set_flags.html__OCT__DB_TXN_NOSYNC DbEnv::set_flags@DB_TXN_NOSYNC -__APIREL__/api_cxx/env_set_flags.html__OCT__14 turn off @durability in the database environment -__APIREL__/api_cxx/env_set_flags.html__OCT__DB_TXN_NOT_DURABLE DbEnv::set_flags@DB_TXN_NOT_DURABLE -__APIREL__/api_cxx/env_set_flags.html__OCT__15 turn off synchronous @transaction commit +__APIREL__/api_cxx/env_set_flags.html__OCT__14 turn off synchronous @transaction commit __APIREL__/api_cxx/env_set_flags.html__OCT__DB_TXN_WRITE_NOSYNC DbEnv::set_flags@DB_TXN_WRITE_NOSYNC -__APIREL__/api_cxx/env_set_flags.html__OCT__16 configure for @stress testing +__APIREL__/api_cxx/env_set_flags.html__OCT__15 configure for @stress testing __APIREL__/api_cxx/env_set_flags.html__OCT__DB_YIELDCPU DbEnv::set_flags@DB_YIELDCPU -__APIREL__/api_cxx/env_set_paniccall.html__OCT__2 @DbEnv::set_paniccall -__APIREL__/api_cxx/env_set_rpc_server.html__OCT__2 @DbEnv::set_rpc_server __APIREL__/api_cxx/env_set_rpc_server.html__OCT__DB_NOSERVER DbEnv::set_rpc_server@DB_NOSERVER __APIREL__/api_cxx/env_set_rpc_server.html__OCT__DB_NOSERVER_ID DbEnv::set_rpc_server@DB_NOSERVER_ID __APIREL__/api_cxx/env_set_rpc_server.html__OCT__DB_NOSERVER_HOME DbEnv::set_rpc_server@DB_NOSERVER_HOME -__APIREL__/api_cxx/env_set_rpc_server.html__OCT__3 @DB_NOSERVER -__APIREL__/api_cxx/env_set_rpc_server.html__OCT__4 @DB_NOSERVER_ID -__APIREL__/api_cxx/env_set_shm_key.html__OCT__2 @DbEnv::set_shm_key -__APIREL__/api_cxx/env_set_shm_key.html__OCT__3 @DbEnv::get_shm_key -__APIREL__/api_cxx/env_set_tas_spins.html__OCT__2 @DbEnv::set_tas_spins -__APIREL__/api_cxx/env_set_tas_spins.html__OCT__3 @DbEnv::get_tas_spins -__APIREL__/api_cxx/env_set_timeout.html__OCT__2 @DbEnv::set_timeout -__APIREL__/api_cxx/env_set_timeout.html__OCT__3 @DbEnv::get_timeout +__APIREL__/api_cxx/env_set_rpc_server.html__OCT__2 @DB_NOSERVER +__APIREL__/api_cxx/env_set_rpc_server.html__OCT__3 @DB_NOSERVER_ID __APIREL__/api_cxx/env_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DbEnv::set_timeout@DB_SET_LOCK_TIMEOUT __APIREL__/api_cxx/env_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DbEnv::set_timeout@DB_SET_TXN_TIMEOUT __APIREL__/api_cxx/env_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DbEnv::set_timeout@DB_SET_LOCK_TIMEOUT __APIREL__/api_cxx/env_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DbEnv::set_timeout@DB_SET_TXN_TIMEOUT -__APIREL__/api_cxx/env_set_tmp_dir.html__OCT__2 @DbEnv::set_tmp_dir -__APIREL__/api_cxx/env_set_tmp_dir.html__OCT__3 @temporary files -__APIREL__/api_cxx/env_set_tmp_dir.html__OCT__4 @DbEnv::get_tmp_dir -__APIREL__/api_cxx/env_set_verbose.html__OCT__2 @DbEnv::set_verbose -__APIREL__/api_cxx/env_set_verbose.html__OCT__3 @DbEnv::get_verbose -__APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_CHKPOINT DbEnv::set_verbose@DB_VERB_CHKPOINT +__APIREL__/api_cxx/env_set_tmp_dir.html__OCT__2 @temporary files __APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_DEADLOCK DbEnv::set_verbose@DB_VERB_DEADLOCK __APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_RECOVERY DbEnv::set_verbose@DB_VERB_RECOVERY __APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_REPLICATION DbEnv::set_verbose@DB_VERB_REPLICATION __APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_WAITSFOR DbEnv::set_verbose@DB_VERB_WAITSFOR -__APIREL__/api_cxx/env_strerror.html__OCT__2 @DbEnv::strerror -__APIREL__/api_cxx/env_version.html__OCT__2 @DbEnv::version -__APIREL__/api_cxx/env_version.html__OCT__3 @DbEnv::version -__APIREL__/api_cxx/env_set_errfile.html__OCT__2 @DbEnv::set_errfile -__APIREL__/api_cxx/env_set_errfile.html__OCT__3 @DbEnv::get_errfile -__APIREL__/api_cxx/env_set_alloc.html__OCT__2 @DbEnv::set_alloc -__APIREL__/api_cxx/env_set_error_stream.html__OCT__2 @DbEnv::set_error_stream -__APIREL__/api_cxx/env_set_lk_conflicts.html__OCT__2 @DbEnv::set_lk_conflicts -__APIREL__/api_cxx/env_set_lk_conflicts.html__OCT__3 @DbEnv::get_lk_conflicts -__APIREL__/api_cxx/env_set_lk_detect.html__OCT__2 @DbEnv::set_lk_detect -__APIREL__/api_cxx/env_set_lk_detect.html__OCT__3 @DbEnv::get_lk_detect +__APIREL__/api_cxx/env_stat.html__OCT__DB_STAT_ALL DbEnv::stat_print@DB_STAT_ALL +__APIREL__/api_cxx/env_stat.html__OCT__DB_STAT_SUBSYSTEM DbEnv::stat_print@DB_STAT_SUBSYSTEM __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_DEFAULT DbEnv::set_lk_detect@DB_LOCK_DEFAULT __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_EXPIRE DbEnv::set_lk_detect@DB_LOCK_EXPIRE __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_MAXLOCKS DbEnv::set_lk_detect@DB_LOCK_MAXLOCKS +__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_MAXWRITE DbEnv::set_lk_detect@DB_LOCK_MAXWRITE __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_MINLOCKS DbEnv::set_lk_detect@DB_LOCK_MINLOCKS __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_MINWRITE DbEnv::set_lk_detect@DB_LOCK_MINWRITE __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_OLDEST DbEnv::set_lk_detect@DB_LOCK_OLDEST __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_RANDOM DbEnv::set_lk_detect@DB_LOCK_RANDOM __APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_YOUNGEST DbEnv::set_lk_detect@DB_LOCK_YOUNGEST -__APIREL__/api_cxx/env_set_lk_max_lockers.html__OCT__2 @DbEnv::set_lk_max_lockers -__APIREL__/api_cxx/env_set_lk_max_lockers.html__OCT__3 @DbEnv::get_lk_max_lockers -__APIREL__/api_cxx/env_set_lk_max_locks.html__OCT__2 @DbEnv::set_lk_max_locks -__APIREL__/api_cxx/env_set_lk_max_locks.html__OCT__3 @DbEnv::get_lk_max_locks -__APIREL__/api_cxx/env_set_lk_max_objects.html__OCT__2 @DbEnv::set_lk_max_objects -__APIREL__/api_cxx/env_set_lk_max_objects.html__OCT__3 @DbEnv::get_lk_max_objects -__APIREL__/api_cxx/lock_class.html__OCT__2 @DbLock -__APIREL__/api_cxx/lock_detect.html__OCT__2 @DbEnv::lock_detect __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_DEFAULT DbEnv::lock_detect@DB_LOCK_DEFAULT __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_EXPIRE DbEnv::lock_detect@DB_LOCK_EXPIRE __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_MAXLOCKS DbEnv::lock_detect@DB_LOCK_MAXLOCKS +__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_MAXWRITE DbEnv::lock_detect@DB_LOCK_MAXWRITE __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_MINLOCKS DbEnv::lock_detect@DB_LOCK_MINLOCKS __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_MINWRITE DbEnv::lock_detect@DB_LOCK_MINWRITE __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_OLDEST DbEnv::lock_detect@DB_LOCK_OLDEST __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_RANDOM DbEnv::lock_detect@DB_LOCK_RANDOM __APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_YOUNGEST DbEnv::lock_detect@DB_LOCK_YOUNGEST -__APIREL__/api_cxx/lock_get.html__OCT__2 @DbEnv::lock_get __APIREL__/api_cxx/lock_get.html__OCT__DB_LOCK_NOWAIT DbEnv::lock_get@DB_LOCK_NOWAIT -__APIREL__/api_cxx/lock_id.html__OCT__2 @DbEnv::lock_id -__APIREL__/api_cxx/lock_id_free.html__OCT__2 @DbEnv::lock_id_free -__APIREL__/api_cxx/lock_put.html__OCT__2 @DbEnv::lock_put -__APIREL__/api_cxx/lock_stat.html__OCT__2 @DbEnv::lock_stat __APIREL__/api_cxx/lock_stat.html__OCT__DB_STAT_CLEAR DbEnv::lock_stat@DB_STAT_CLEAR -__APIREL__/api_cxx/lock_vec.html__OCT__2 @DbEnv::lock_vec +__APIREL__/api_cxx/lock_stat.html__OCT__DB_STAT_ALL DbEnv::lock_stat@DB_STAT_ALL +__APIREL__/api_cxx/lock_stat.html__OCT__DB_STAT_LOCK_CONF DbEnv::lock_stat@DB_STAT_LOCK_CONF +__APIREL__/api_cxx/lock_stat.html__OCT__DB_STAT_LOCK_LOCKERS DbEnv::lock_stat@DB_STAT_LOCK_LOCKERS +__APIREL__/api_cxx/lock_stat.html__OCT__DB_STAT_LOCK_OBJECTS DbEnv::lock_stat@DB_STAT_LOCK_OBJECTS +__APIREL__/api_cxx/lock_stat.html__OCT__DB_STAT_LOCK_PARAMS DbEnv::lock_stat@DB_STAT_LOCK_PARAMS __APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_NOWAIT DbEnv::lock_vec@DB_LOCK_NOWAIT __APIREL__/api_cxx/lock_vec.html__OCT__op DbEnv::lock_vec@op __APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_GET DbEnv::lock_vec@DB_LOCK_GET @@ -357,140 +231,86 @@ __APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_IWRITE DbEnv::lock_vec@DB_LOCK_IW __APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_IREAD DbEnv::lock_vec@DB_LOCK_IREAD __APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_IWR DbEnv::lock_vec@DB_LOCK_IWR __APIREL__/api_cxx/lock_vec.html__OCT__obj DbEnv::lock_vec@obj -__APIREL__/api_cxx/deadlock_class.html__OCT__2 @DbDeadlockException -__APIREL__/api_cxx/lockng_class.html__OCT__2 @DbLockNotGrantedException -__APIREL__/api_cxx/env_set_lg_bsize.html__OCT__2 @DbEnv::set_lg_bsize -__APIREL__/api_cxx/env_set_lg_bsize.html__OCT__3 @DbEnv::get_lg_bsize -__APIREL__/api_cxx/env_set_lg_dir.html__OCT__2 @DbEnv::set_lg_dir -__APIREL__/api_cxx/env_set_lg_dir.html__OCT__3 @DbEnv::get_lg_dir -__APIREL__/api_cxx/env_set_lg_max.html__OCT__2 @DbEnv::set_lg_max -__APIREL__/api_cxx/env_set_lg_max.html__OCT__3 @DbEnv::get_lg_max -__APIREL__/api_cxx/env_set_lg_regionmax.html__OCT__2 @DbEnv::set_lg_regionmax -__APIREL__/api_cxx/env_set_lg_regionmax.html__OCT__3 @DbEnv::get_lg_regionmax -__APIREL__/api_cxx/log_archive.html__OCT__2 @DbEnv::log_archive __APIREL__/api_cxx/log_archive.html__OCT__DB_ARCH_ABS DbEnv::log_archive@DB_ARCH_ABS __APIREL__/api_cxx/log_archive.html__OCT__DB_ARCH_DATA DbEnv::log_archive@DB_ARCH_DATA __APIREL__/api_cxx/log_archive.html__OCT__DB_ARCH_LOG DbEnv::log_archive@DB_ARCH_LOG __APIREL__/api_cxx/log_archive.html__OCT__DB_ARCH_REMOVE DbEnv::log_archive@DB_ARCH_REMOVE -__APIREL__/api_cxx/log_compare.html__OCT__2 @DbEnv::log_compare -__APIREL__/api_cxx/log_cursor.html__OCT__2 @DbEnv::log_cursor -__APIREL__/api_cxx/log_file.html__OCT__2 @DbEnv::log_file -__APIREL__/api_cxx/log_flush.html__OCT__2 @DbEnv::log_flush -__APIREL__/api_cxx/log_put.html__OCT__2 @DbEnv::log_put __APIREL__/api_cxx/log_put.html__OCT__DB_FLUSH DbEnv::log_put@DB_FLUSH -__APIREL__/api_cxx/log_stat.html__OCT__2 @DbEnv::log_stat __APIREL__/api_cxx/log_stat.html__OCT__DB_STAT_CLEAR DbEnv::log_stat@DB_STAT_CLEAR -__APIREL__/api_cxx/logc_class.html__OCT__2 @DbLogc -__APIREL__/api_cxx/logc_close.html__OCT__2 @DbLogc::close -__APIREL__/api_cxx/logc_get.html__OCT__2 @DbLogc::get +__APIREL__/api_cxx/log_stat.html__OCT__DB_STAT_ALL DbEnv::log_stat@DB_STAT_ALL __APIREL__/api_cxx/logc_get.html__OCT__DB_CURRENT DbLogc::get@DB_CURRENT __APIREL__/api_cxx/logc_get.html__OCT__DB_FIRST DbLogc::get@DB_FIRST __APIREL__/api_cxx/logc_get.html__OCT__DB_LAST DbLogc::get@DB_LAST __APIREL__/api_cxx/logc_get.html__OCT__DB_NEXT DbLogc::get@DB_NEXT __APIREL__/api_cxx/logc_get.html__OCT__DB_PREV DbLogc::get@DB_PREV __APIREL__/api_cxx/logc_get.html__OCT__DB_SET DbLogc::get@DB_SET -__APIREL__/api_cxx/lsn_class.html__OCT__2 @DbLsn -__APIREL__/api_cxx/env_set_mp_mmapsize.html__OCT__2 @DbEnv::set_mp_mmapsize -__APIREL__/api_cxx/env_set_mp_mmapsize.html__OCT__3 @DbEnv::get_mp_mmapsize -__APIREL__/api_cxx/memp_set_flags.html__OCT__2 @DbMpoolFile::set_flags -__APIREL__/api_cxx/memp_set_flags.html__OCT__3 @DbMpoolFile::get_flags -__APIREL__/api_cxx/memp_set_flags.html__OCT__DB_MPOOL_NOFILE DbMpoolFile::set_flags@DB_MPOOL_NOFILE -__APIREL__/api_cxx/memp_set_maxsize.html__OCT__2 @DbMpoolFile::set_maxsize -__APIREL__/api_cxx/memp_set_maxsize.html__OCT__3 @DbMpoolFile::get_maxsize -__APIREL__/api_cxx/memp_set_priority.html__OCT__2 @DbMpoolFile::set_priority -__APIREL__/api_cxx/memp_set_priority.html__OCT__3 @DbMpoolFile::get_priority -__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_VERY_LOW DbMpoolFile::set_priority@DB_PRIORITY_VERY_LOW -__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_LOW DbMpoolFile::set_priority@DB_PRIORITY_LOW -__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_DEFAULT DbMpoolFile::set_priority@DB_PRIORITY_DEFAULT -__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_HIGH DbMpoolFile::set_priority@DB_PRIORITY_HIGH -__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_VERY_HIGH DbMpoolFile::set_priority@DB_PRIORITY_VERY_HIGH -__APIREL__/api_cxx/memp_stat.html__OCT__2 @DbEnv::memp_stat -__APIREL__/api_cxx/memp_stat.html__OCT__DB_STAT_CLEAR DbEnv::memp_stat@DB_STAT_CLEAR -__APIREL__/api_cxx/memp_sync.html__OCT__2 @DbEnv::memp_sync -__APIREL__/api_cxx/memp_trickle.html__OCT__2 @DbEnv::memp_trickle -__APIREL__/api_cxx/mempfile_class.html__OCT__2 @DbMpoolFile -__APIREL__/api_cxx/memp_fclose.html__OCT__2 @DbMpoolFile::close -__APIREL__/api_cxx/memp_fcreate.html__OCT__2 @DbEnv::memp_fcreate -__APIREL__/api_cxx/memp_fget.html__OCT__2 @DbMpoolFile::get -__APIREL__/api_cxx/memp_fget.html__OCT__3 @DB_PAGE_NOTFOUND +__APIREL__/api_cxx/memp_fget.html__OCT__2 @DB_PAGE_NOTFOUND __APIREL__/api_cxx/memp_fget.html__OCT__DB_MPOOL_CREATE DbMpoolFile::get@DB_MPOOL_CREATE __APIREL__/api_cxx/memp_fget.html__OCT__DB_MPOOL_LAST DbMpoolFile::get@DB_MPOOL_LAST __APIREL__/api_cxx/memp_fget.html__OCT__DB_MPOOL_NEW DbMpoolFile::get@DB_MPOOL_NEW -__APIREL__/api_cxx/memp_fopen.html__OCT__2 @DbMpoolFile::open __APIREL__/api_cxx/memp_fopen.html__OCT__DB_CREATE DbMpoolFile::open@DB_CREATE -__APIREL__/api_cxx/memp_fopen.html__OCT__3 turn off system @buffering +__APIREL__/api_cxx/memp_fopen.html__OCT__2 turn off system @buffering __APIREL__/api_cxx/memp_fopen.html__OCT__DB_DIRECT DbMpoolFile::open@DB_DIRECT __APIREL__/api_cxx/memp_fopen.html__OCT__DB_NOMMAP DbMpoolFile::open@DB_NOMMAP __APIREL__/api_cxx/memp_fopen.html__OCT__DB_ODDFILESIZE DbMpoolFile::open@DB_ODDFILESIZE __APIREL__/api_cxx/memp_fopen.html__OCT__DB_RDONLY DbMpoolFile::open@DB_RDONLY -__APIREL__/api_cxx/memp_fput.html__OCT__2 @DbMpoolFile::put __APIREL__/api_cxx/memp_fput.html__OCT__DB_MPOOL_CLEAN DbMpoolFile::put@DB_MPOOL_CLEAN __APIREL__/api_cxx/memp_fput.html__OCT__DB_MPOOL_DIRTY DbMpoolFile::put@DB_MPOOL_DIRTY __APIREL__/api_cxx/memp_fput.html__OCT__DB_MPOOL_DISCARD DbMpoolFile::put@DB_MPOOL_DISCARD -__APIREL__/api_cxx/memp_fset.html__OCT__2 @DbMpoolFile::set __APIREL__/api_cxx/memp_fset.html__OCT__DB_MPOOL_CLEAN DbMpoolFile::set@DB_MPOOL_CLEAN __APIREL__/api_cxx/memp_fset.html__OCT__DB_MPOOL_DIRTY DbMpoolFile::set@DB_MPOOL_DIRTY __APIREL__/api_cxx/memp_fset.html__OCT__DB_MPOOL_DISCARD DbMpoolFile::set@DB_MPOOL_DISCARD -__APIREL__/api_cxx/memp_fsync.html__OCT__2 @DbMpoolFile::sync -__APIREL__/api_cxx/memp_register.html__OCT__2 @DbEnv::memp_register -__APIREL__/api_cxx/memp_set_clear_len.html__OCT__2 @DbMpoolFile::set_clear_len -__APIREL__/api_cxx/memp_set_clear_len.html__OCT__3 @DbMpoolFile::get_clear_len -__APIREL__/api_cxx/memp_set_fileid.html__OCT__2 @DbMpoolFile::set_fileid -__APIREL__/api_cxx/memp_set_fileid.html__OCT__3 @DbMpoolFile::get_fileid -__APIREL__/api_cxx/memp_set_ftype.html__OCT__2 @DbMpoolFile::set_ftype -__APIREL__/api_cxx/memp_set_ftype.html__OCT__3 @DbMpoolFile::get_ftype -__APIREL__/api_cxx/memp_set_lsn_offset.html__OCT__2 @DbMpoolFile::set_lsn_offset -__APIREL__/api_cxx/memp_set_lsn_offset.html__OCT__3 @DbMpoolFile::get_lsn_offset -__APIREL__/api_cxx/memp_set_pgcookie.html__OCT__2 @DbMpoolFile::set_pgcookie -__APIREL__/api_cxx/memp_set_pgcookie.html__OCT__3 @DbMpoolFile::get_pgcookie -__APIREL__/api_cxx/memp_class.html__OCT__2 @DbMemoryException -__APIREL__/api_cxx/rep_elect.html__OCT__2 @DbEnv::rep_elect -__APIREL__/api_cxx/rep_elect.html__OCT__3 @DB_REP_UNAVAIL -__APIREL__/api_cxx/rep_limit.html__OCT__2 @DbEnv::set_rep_limit -__APIREL__/api_cxx/rep_limit.html__OCT__3 @DbEnv::get_rep_limit -__APIREL__/api_cxx/rep_message.html__OCT__2 @DbEnv::rep_process_message +__APIREL__/api_cxx/memp_set_flags.html__OCT__DB_MPOOL_NOFILE DbMpoolFile::set_flags@DB_MPOOL_NOFILE +__APIREL__/api_cxx/memp_set_flags.html__OCT__DB_MPOOL_UNLINK DbMpoolFile::set_flags@DB_MPOOL_UNLINK +__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_VERY_LOW DbMpoolFile::set_priority@DB_PRIORITY_VERY_LOW +__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_LOW DbMpoolFile::set_priority@DB_PRIORITY_LOW +__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_DEFAULT DbMpoolFile::set_priority@DB_PRIORITY_DEFAULT +__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_HIGH DbMpoolFile::set_priority@DB_PRIORITY_HIGH +__APIREL__/api_cxx/memp_set_priority.html__OCT__DB_PRIORITY_VERY_HIGH DbMpoolFile::set_priority@DB_PRIORITY_VERY_HIGH +__APIREL__/api_cxx/memp_stat.html__OCT__DB_STAT_CLEAR DbEnv::memp_stat@DB_STAT_CLEAR +__APIREL__/api_cxx/memp_stat.html__OCT__DB_STAT_CLEAR DbEnv::memp_stat@DB_STAT_CLEAR +__APIREL__/api_cxx/memp_stat.html__OCT__DB_STAT_ALL DbEnv::memp_stat@DB_STAT_ALL +__APIREL__/api_cxx/memp_stat.html__OCT__DB_STAT_MEMP_HASH DbEnv::memp_stat@DB_STAT_MEMP_HASH +__APIREL__/api_cxx/rep_elect.html__OCT__2 @DB_REP_UNAVAIL __APIREL__/api_cxx/rep_message.html__OCT__DB_REP_DUPMASTER DbEnv::rep_process_message@DB_REP_DUPMASTER __APIREL__/api_cxx/rep_message.html__OCT__DB_REP_HOLDELECTION DbEnv::rep_process_message@DB_REP_HOLDELECTION __APIREL__/api_cxx/rep_message.html__OCT__DB_REP_ISPERM DbEnv::rep_process_message@DB_REP_ISPERM __APIREL__/api_cxx/rep_message.html__OCT__DB_REP_NEWMASTER DbEnv::rep_process_message@DB_REP_NEWMASTER __APIREL__/api_cxx/rep_message.html__OCT__DB_REP_NEWSITE DbEnv::rep_process_message@DB_REP_NEWSITE __APIREL__/api_cxx/rep_message.html__OCT__DB_REP_NOTPERM DbEnv::rep_process_message@DB_REP_NOTPERM -__APIREL__/api_cxx/rep_message.html__OCT__DB_REP_OUTDATED DbEnv::rep_process_message@DB_REP_OUTDATED -__APIREL__/api_cxx/rep_start.html__OCT__2 @DbEnv::rep_start +__APIREL__/api_cxx/rep_message.html__OCT__DB_REP_STARTUPDONE DbEnv::rep_process_message@DB_REP_STARTUPDONE __APIREL__/api_cxx/rep_start.html__OCT__DB_REP_CLIENT DbEnv::rep_start@DB_REP_CLIENT -__APIREL__/api_cxx/rep_start.html__OCT__DB_REP_LOGSONLY DbEnv::rep_start@DB_REP_LOGSONLY __APIREL__/api_cxx/rep_start.html__OCT__DB_REP_MASTER DbEnv::rep_start@DB_REP_MASTER -__APIREL__/api_cxx/rep_stat.html__OCT__2 @DbEnv::rep_stat __APIREL__/api_cxx/rep_stat.html__OCT__DB_STAT_CLEAR DbEnv::rep_stat@DB_STAT_CLEAR -__APIREL__/api_cxx/rep_transport.html__OCT__2 @DbEnv::set_rep_transport -__APIREL__/api_cxx/rep_transport.html__OCT__3 @DB_EID_BROADCAST +__APIREL__/api_cxx/rep_stat.html__OCT__DB_STAT_ALL DbEnv::rep_stat@DB_STAT_ALL +__APIREL__/api_cxx/rep_transport.html__OCT__2 @DB_EID_BROADCAST __APIREL__/api_cxx/rep_transport.html__OCT__DB_REP_NOBUFFER DbEnv::set_rep_transport@DB_REP_NOBUFFER __APIREL__/api_cxx/rep_transport.html__OCT__DB_REP_PERMANENT DbEnv::set_rep_transport@DB_REP_PERMANENT -__APIREL__/api_cxx/env_set_tx_max.html__OCT__2 @DbEnv::set_tx_max -__APIREL__/api_cxx/env_set_tx_max.html__OCT__3 @DbEnv::get_tx_max -__APIREL__/api_cxx/env_set_tx_timestamp.html__OCT__2 @DbEnv::set_tx_timestamp -__APIREL__/api_cxx/env_set_tx_timestamp.html__OCT__3 @DbEnv::get_tx_timestamp -__APIREL__/api_cxx/txn_abort.html__OCT__2 @DbTxn::abort -__APIREL__/api_cxx/txn_begin.html__OCT__2 @DbEnv::txn_begin +__APIREL__/api_cxx/seq_get.html__OCT__DB_AUTO_COMMIT DbSequence::get@DB_AUTO_COMMIT +__APIREL__/api_cxx/seq_get.html__OCT__DB_TXN_NOSYNC DbSequence::get@DB_TXN_NOSYNC +__APIREL__/api_cxx/seq_open.html__OCT__DB_AUTO_COMMIT DbSequence::open@DB_AUTO_COMMIT +__APIREL__/api_cxx/seq_open.html__OCT__DB_CREATE DbSequence::open@DB_CREATE +__APIREL__/api_cxx/seq_open.html__OCT__DB_EXCL DbSequence::open@DB_EXCL +__APIREL__/api_cxx/seq_open.html__OCT__DB_THREAD DbSequence::open@DB_THREAD +__APIREL__/api_cxx/seq_set_flags.html__OCT__DB_SEQ_DEC DbSequence::set_flags@DB_SEQ_DEC +__APIREL__/api_cxx/seq_set_flags.html__OCT__DB_SEQ_INC DbSequence::set_flags@DB_SEQ_INC +__APIREL__/api_cxx/seq_set_flags.html__OCT__DB_SEQ_WRAP DbSequence::set_flags@DB_SEQ_WRAP +__APIREL__/api_cxx/seq_stat.html__OCT__DB_STAT_CLEAR DbSequence::stat@DB_STAT_CLEAR +__APIREL__/api_cxx/seq_stat.html__OCT__DB_STAT_CLEAR DbSequence::stat@DB_STAT_CLEAR +__APIREL__/api_cxx/seq_remove.html__OCT__DB_AUTO_COMMIT DbSequence::remove@DB_AUTO_COMMIT +__APIREL__/api_cxx/seq_remove.html__OCT__DB_TXN_NOSYNC DbSequence::remove@DB_TXN_NOSYNC +__APIREL__/api_cxx/txn_begin.html__OCT__DB_DEGREE_2 DbEnv::txn_begin@DB_DEGREE_2 __APIREL__/api_cxx/txn_begin.html__OCT__DB_DIRTY_READ DbEnv::txn_begin@DB_DIRTY_READ __APIREL__/api_cxx/txn_begin.html__OCT__DB_TXN_NOSYNC DbEnv::txn_begin@DB_TXN_NOSYNC __APIREL__/api_cxx/txn_begin.html__OCT__DB_TXN_NOWAIT DbEnv::txn_begin@DB_TXN_NOWAIT __APIREL__/api_cxx/txn_begin.html__OCT__DB_TXN_SYNC DbEnv::txn_begin@DB_TXN_SYNC -__APIREL__/api_cxx/txn_checkpoint.html__OCT__2 @DbEnv::txn_checkpoint __APIREL__/api_cxx/txn_checkpoint.html__OCT__DB_FORCE DbEnv::txn_checkpoint@DB_FORCE -__APIREL__/api_cxx/txn_class.html__OCT__2 @DbTxn -__APIREL__/api_cxx/txn_commit.html__OCT__2 @DbTxn::commit __APIREL__/api_cxx/txn_commit.html__OCT__DB_TXN_NOSYNC DbTxn::commit@DB_TXN_NOSYNC __APIREL__/api_cxx/txn_commit.html__OCT__DB_TXN_SYNC DbTxn::commit@DB_TXN_SYNC -__APIREL__/api_cxx/txn_discard.html__OCT__2 @DbTxn::discard -__APIREL__/api_cxx/txn_id.html__OCT__2 @DbTxn::id -__APIREL__/api_cxx/txn_prepare.html__OCT__2 @DbTxn::prepare -__APIREL__/api_cxx/txn_prepare.html__OCT__3 @DB_XIDDATASIZE -__APIREL__/api_cxx/txn_recover.html__OCT__2 @DbEnv::txn_recover +__APIREL__/api_cxx/txn_prepare.html__OCT__2 @DB_XIDDATASIZE __APIREL__/api_cxx/txn_recover.html__OCT__DB_FIRST DbEnv::txn_recover@DB_FIRST __APIREL__/api_cxx/txn_recover.html__OCT__DB_NEXT DbEnv::txn_recover@DB_NEXT -__APIREL__/api_cxx/txn_set_timeout.html__OCT__2 @DbTxn::set_timeout __APIREL__/api_cxx/txn_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DbTxn::set_timeout@DB_SET_LOCK_TIMEOUT __APIREL__/api_cxx/txn_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DbTxn::set_timeout@DB_SET_TXN_TIMEOUT -__APIREL__/api_cxx/txn_stat.html__OCT__2 @DbEnv::txn_stat __APIREL__/api_cxx/txn_stat.html__OCT__DB_STAT_CLEAR DbEnv::txn_stat@DB_STAT_CLEAR +__APIREL__/api_cxx/txn_stat.html__OCT__DB_STAT_ALL DbEnv::txn_stat@DB_STAT_ALL diff --git a/db/docs/api_cxx/rep_elect.html b/db/docs/api_cxx/rep_elect.html index eda1a9ff3..ee50c2c19 100644 --- a/db/docs/api_cxx/rep_elect.html +++ b/db/docs/api_cxx/rep_elect.html @@ -1,31 +1,30 @@ - - + + Berkeley DB: DbEnv::rep_elect - + -

DbEnv::rep_elect

API -Ref -
+Ref +


 #include <db_cxx.h>
 

int -DbEnv::rep_elect(int nsites, - int priority, u_int32_t timeout, int *envid); +DbEnv::rep_elect(int nsites, int nvotes, + int priority, u_int32_t timeout, int *envid, u_int32_t flags);


Description: DbEnv::rep_elect

@@ -39,45 +38,44 @@ selected master, in accordance with the results of this election.

The thread of control that calls the DbEnv::rep_elect method must not be the thread of control that processes incoming messages; processing the incoming messages is necessary to successfully complete an election.

-

The DbEnv::rep_elect method -either returns a non-zero error value -or throws an exception that encapsulates a non-zero error value on -failure, and returns 0 on success. -

Parameters

-

-

envid
-The envid parameter references memory into which +
+
envid
The envid parameter references memory into which the newly elected master's ID is copied. -

nsites
-The nsites parameter indicates the number of environments that +
nsites
The nsites parameter indicates the number of environments that the application believes are in the replication group. This number is used by Berkeley DB to avoid having two masters active simultaneously, even in the case of a network partition. During an election, a new master -cannot be elected unless more than half of nsites agree on -the new master. Thus, in the face of a network partition, the side of -the partition with more than half the environments will elect a new -master and continue, while the environments communicating with fewer -than half the other environments will fail to find a new master. -

priority
-The priority parameter is the priority of this environment. It +cannot be elected unless more than half of nsites agree on the +new master. Thus, in the face of a network partition, the side of the +partition with more than half the environments will elect a new master +and continue, while the environments communicating with fewer than half +the other environments will fail to find a new master. +
nvotes
The nvotes parameter indicates the number of votes required by +the application to successfully elect a new master. It must be a +positive integer, no greater than nsites, or 0 if the election +should use a simple majority of the nsites value as the +requirement. A warning is given if half or fewer votes are required to +win an election as that can potentially lead to multiple masters in the +face of a network partition. +
priority
The priority parameter is the priority of this environment. It must be a positive integer, or 0 if this environment is not permitted to become a master (see Replication environment priorities for more information). -

timeout
-The timeout parameter specifies a timeout period for an election. +
timeout
The timeout parameter specifies a timeout period for an election. If the election has not completed after timeout microseconds, the election will fail. +
flags
The flags parameter is currently unused, and must be set to 0.
- +

Errors

The DbEnv::rep_elect method may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

DB_REP_UNAVAIL
The replication group was unable to elect a master, or was unable to +
+
DB_REP_UNAVAIL
The replication group was unable to elect a master, or was unable to complete the election in the specified timeout period.

@@ -89,6 +87,6 @@ complete the election in the specified timeout period.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/rep_limit.html b/db/docs/api_cxx/rep_limit.html index f058f9d10..62dcd713f 100644 --- a/db/docs/api_cxx/rep_limit.html +++ b/db/docs/api_cxx/rep_limit.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_rep_limit - + -

DbEnv::set_rep_limit

API -Ref -
+Ref +


@@ -31,7 +30,6 @@ DbEnv::get_rep_limit(u_int32_t *gbytesp, u_int32_t *bytesp);
 


Description: DbEnv::set_rep_limit

-

The DbEnv::set_rep_limit method imposes a byte-count limit on the amount of data that will be transmitted from a site in a single call to DbEnv::rep_process_message method.

The DbEnv::set_rep_limit method configures a database environment, not only operations @@ -44,12 +42,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bytes
-The gbytes and bytes parameters specify the maximum +
+
bytes
The gbytes and bytes parameters specify the maximum number of bytes that will be sent in a single call to DbEnv::rep_process_message method. -

gbytes
-The gbytes and bytes parameters specify the maximum +
gbytes
The gbytes and bytes parameters specify the maximum number of bytes that will be sent in a single call to DbEnv::rep_process_message method.

@@ -62,12 +58,10 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

bytesp
-The bytesp parameter references memory into which +
+
bytesp
The bytesp parameter references memory into which the additional bytes of memory in the current transmit limit is copied. -

gbytesp
-The gbytesp parameter references memory into which +
gbytesp
The gbytesp parameter references memory into which the gigabytes of memory in the in the current transmit limit is copied.

@@ -79,6 +73,6 @@ The gbytesp parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/rep_list.html b/db/docs/api_cxx/rep_list.html index 21c9cb0bf..e24bbc562 100644 --- a/db/docs/api_cxx/rep_list.html +++ b/db/docs/api_cxx/rep_list.html @@ -1,25 +1,25 @@ - + Berkeley DB: Berkeley DB: Replication and Related Methods - +

Berkeley DB: Replication and Related Methods

- + - - - - - - + + + + + +
Replication and Related MethodsDescription
DbEnv::set_rep_transportConfigure replication transport
DbEnv::rep_electHold a replication election
DbEnv::set_rep_limitLimit data sent in response to a single message
DbEnv::rep_process_messageProcess a replication message
DbEnv::rep_startConfigure an environment for replication
DbEnv::rep_statReplication statistics
DbEnv::rep_electHold a replication election
DbEnv::rep_process_messageProcess a replication message
DbEnv::rep_startConfigure an environment for replication
DbEnv::rep_statReplication statistics
DbEnv::set_rep_limitLimit data sent in response to a single message
DbEnv::set_rep_transportConfigure replication transport
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/rep_message.html b/db/docs/api_cxx/rep_message.html index d2f2c7409..ef8085881 100644 --- a/db/docs/api_cxx/rep_message.html +++ b/db/docs/api_cxx/rep_message.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::rep_process_message - + -

DbEnv::rep_process_message

API -Ref -
+Ref +


@@ -35,61 +34,51 @@ processed using the same DbEnv handle.
 a single thread of control process all messages, only that all threads
 of control processing messages use the same handle.

The DbEnv::rep_process_message method has additional return values:

-

-

DB_REP_DUPMASTER
-

-The DbEnv::rep_process_message method +

+
DB_REP_DUPMASTER
+

The DbEnv::rep_process_message method will either return DB_REP_DUPMASTER or throw an exception that encapsulates DB_REP_DUPMASTER if the replication group has more than one master. The application should reconfigure itself as a client by calling the DbEnv::rep_start method, and then call for an election by calling DbEnv::rep_elect.

-

DB_REP_HOLDELECTION
-

-The DbEnv::rep_process_message method +

DB_REP_HOLDELECTION
+

The DbEnv::rep_process_message method will either return DB_REP_HOLDELECTION or throw an exception that encapsulates DB_REP_HOLDELECTION if an election is needed. The application should call for an election by calling DbEnv::rep_elect.

-

DB_REP_ISPERM
-

-The DbEnv::rep_process_message method will return DB_REP_ISPERM if processing this message results in the processing of records +

DB_REP_ISPERM
+

The DbEnv::rep_process_message method will return DB_REP_ISPERM if processing this message results in the processing of records that are permanent. The maximum LSN of the permanent records stored is returned.

-

DB_REP_NEWMASTER
-

-The DbEnv::rep_process_message method will return DB_REP_NEWMASTER if a new master has been elected. +

DB_REP_NEWMASTER
+

The DbEnv::rep_process_message method will return DB_REP_NEWMASTER if a new master has been elected. The envid parameter contains the environment ID of the new master. If the recipient of this error return has been made master, it is the application's responsibility to begin acting as the master environment.

-

DB_REP_NEWSITE
-

-The DbEnv::rep_process_message method will return DB_REP_NEWSITE if the system received contact information from a new environment. +

DB_REP_NEWSITE
+

The DbEnv::rep_process_message method will return DB_REP_NEWSITE if the system received contact information from a new environment. The rec parameter contains the opaque data specified in the cdata parameter to the DbEnv::rep_start. The application should take whatever action is needed to establish a communication channel with this new environment.

-

DB_REP_NOTPERM
-

-The DbEnv::rep_process_message method will return DB_REP_NOTPERM if a message carrying a DB_REP_PERMANENT flag was processed +

DB_REP_NOTPERM
+

The DbEnv::rep_process_message method will return DB_REP_NOTPERM if a message carrying a DB_REP_PERMANENT flag was processed successfully, but was not written to disk. The LSN of this record is returned. The application should take whatever action is deemed necessary to retain its recoverability characteristics.

-

DB_REP_OUTDATED
-

-The DbEnv::rep_process_message method -will either return DB_REP_OUTDATED or -throw an exception that encapsulates DB_REP_OUTDATED if the current environment's logs are too far out of date with respect -to the master to be automatically synchronized. -The application should copy over a hot backup of the environment, run -recovery, and restart the client. +

DB_REP_STARTUPDONE
+

The DbEnv::rep_process_message method will return DB_REP_STARTUPDONE if the system detects that a client completed startup synchronization. +The client application knows that this client is now processing +live log records received from the master.

Unless otherwise specified, the DbEnv::rep_process_message method @@ -98,27 +87,23 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

control
-The control parameter should reference a copy of the +
+
control
The control parameter should reference a copy of the control parameter specified by Berkeley DB on the sending environment. -

envid
-The envid parameter should contain the local identifier that +
envid
The envid parameter should contain the local identifier that corresponds to the environment that sent the message to be processed (see Replication environment IDs for more information). -

rec
-The rec parameter should reference a copy of the rec +
rec
The rec parameter should reference a copy of the rec parameter specified by Berkeley DB on the sending environment. -

ret_lsn
-If DbEnv::rep_process_message method returns DB_REP_NOTPERM then the -ret_lsnp parameter will -contain the log sequence number of this permanent log message that could -not be written to disk. If DbEnv::rep_process_message method returns -DB_REP_ISPERM then the ret_lsnp parameter will contain largest log sequence number of the -permanent records that are now written to disk as a result of processing -this message. In all other cases the value of ret_lsnp is undefined. +
ret_lsn
If DbEnv::rep_process_message method returns DB_REP_NOTPERM then the ret_lsnp +parameter will contain the log sequence number of this permanent log +message that could not be written to disk. If DbEnv::rep_process_message method +returns DB_REP_ISPERM then the ret_lsnp parameter will contain +largest log sequence number of the permanent records that are now +written to disk as a result of processing this message. In all other +cases the value of ret_lsnp is undefined.

Class

@@ -129,6 +114,6 @@ this message. In all other cases the value of ret_lsnp is undefined.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/rep_start.html b/db/docs/api_cxx/rep_start.html index d5ad2119b..22f4766ca 100644 --- a/db/docs/api_cxx/rep_start.html +++ b/db/docs/api_cxx/rep_start.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::rep_start - + -

DbEnv::rep_start

API -Ref -
+Ref +


@@ -44,19 +43,16 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

cdata
-The cdata parameter is an opaque data item that is sent over +
+
cdata
The cdata parameter is an opaque data item that is sent over the communication infrastructure when the client or master comes online (see Connecting to a new site for more information). If no such information is useful, cdata should be NULL. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_REP_CLIENT
Configure the environment as a replication client. -

DB_REP_LOGSONLY
Configure the environment as a log files-only client. -

DB_REP_MASTER
Configure the environment as a replication master. +
flags
The flags parameter must be set to one of the following values: +
+
DB_REP_CLIENT
Configure the environment as a replication client. +
DB_REP_MASTER
Configure the environment as a replication master.

Errors

@@ -65,8 +61,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the database environment was not already configured to communicate with +
+
EINVAL
If the database environment was not already configured to communicate with a replication group by a call to DbEnv::set_rep_transport; the database environment was not already opened; or if an invalid flag value or parameter was specified. @@ -80,6 +76,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/rep_stat.html b/db/docs/api_cxx/rep_stat.html index c79f68019..20f6c7fac 100644 --- a/db/docs/api_cxx/rep_stat.html +++ b/db/docs/api_cxx/rep_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::rep_stat - + -

DbEnv::rep_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DbEnv::rep_stat(DB_REP_STAT **statp, u_int32_t flags); +

+int +DbEnv::rep_stat_print(u_int32_t flags);


Description: DbEnv::rep_stat

@@ -40,11 +42,10 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_REP_STAT fields will be filled in:

-

+
u_int32_t st_status;
The current replication mode. Set to DB_REP_MASTER if the environment is a replication master, DB_REP_CLIENT if the -environment is a replication client, DB_REP_LOGSONLY if the -environment is a log-files-only replica, or 0 if replication is not +environment is a replication client, or 0 if replication is not configured.
DB_LSN st_next_lsn;
In replication environments configured as masters, the next LSN expected. In replication environments configured as clients, the next LSN to be used. @@ -54,8 +55,7 @@ being waited for, or 0 if no log records are currently missing.
u_int32_t st_env_id;
The current environment ID.
u_int32_t st_env_priority;
The current environment priority.
u_int32_t st_gen;
The current generation number. -
u_int32_t st_in_recovery;
The site is currently in client recovery. When this field is set, LSN -values are not authoritative. +
u_int32_t st_egen;
The current election generation number.
u_int32_t st_log_duplicated;
The number of duplicate log records received.
u_int32_t st_log_queued;
The number of log records currently queued.
u_int32_t st_log_queued_max;
The maximum number of log records ever queued at once. @@ -72,6 +72,14 @@ values are not authoritative.
u_int32_t st_newsites;
The number of new site messages received.
int st_nsites;
The number of sites believed to be in the replication group.
u_int32_t st_outdated;
The number of outdated conditions detected. +
u_int32_t st_next_pg;
The next page number we expect to receive. +
u_int32_t st_waiting_pg;
The page number of the first page we have after missing pages +being waited for, or 0 if no pages are currently missing. +
u_int32_t st_pg_duplicated;
The number of duplicate pages received. +
u_int32_t st_pg_records;
The number of pages received and stored. +
u_int32_t st_pg_requested;
The number of pages missed and requested from the master. +
u_int32_t st_startup_complete;
The client site has completed its startup procedures and is now +handling live records from the master.
u_int32_t st_txns_applied;
The number of transactions applied.
u_int32_t st_elections;
The number of elections held.
u_int32_t st_elections_won;
The number of elections won. @@ -80,6 +88,7 @@ values are not authoritative.
u_int32_t st_election_gen;
The election generation number.
DB_LSN st_election_lsn;
The maximum LSN of election winner.
u_int32_t st_election_nsites;
The number sites expected to participate in elections. +
u_int32_t st_election_nvotes;
The number of votes required to complete the election.
u_int32_t st_nthrottles;
Transmission limited. This indicates the number of times that data transmission was stopped to limit the amount of data sent in response to a single call to DbEnv::rep_process_message. @@ -93,15 +102,13 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

@@ -110,11 +117,34 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DbEnv::rep_stat_print

+

The DbEnv::rep_stat_print method returns the +replication subsystem statistical information, as described for the DbEnv::rep_stat method. +The information is printed to a specified output channel (see the +DbEnv::set_msgfile method for more information), or passed to an +application callback function (see the DbEnv::set_msgcall method for +more information).

+

The DbEnv::rep_stat_print method may not be called before the DbEnv::open method has +been called.

+

The DbEnv::rep_stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

DbEnv

See Also

@@ -123,6 +153,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/rep_transport.html b/db/docs/api_cxx/rep_transport.html index 4ba4e5f46..06a0fff2a 100644 --- a/db/docs/api_cxx/rep_transport.html +++ b/db/docs/api_cxx/rep_transport.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::set_rep_transport - + -

DbEnv::set_rep_transport

API -Ref -
+Ref +


@@ -44,43 +43,40 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

envid
-The envid parameter is the local environment's ID. It must be +
+
envid
The envid parameter is the local environment's ID. It must be a positive integer and uniquely identify this Berkeley DB database environment (see Replication environment IDs for more information). -

send
-The send callback function is used to transmit data using the +
send
The send callback function is used to transmit data using the replication application's communication infrastructure. The parameters to send are as follows: -

-

dbenv
The dbenv parameter is the enclosing database environment handle. -

control
The control parameter is the first of the two data elements to be +
+
dbenv
The dbenv parameter is the enclosing database environment handle. +
control
The control parameter is the first of the two data elements to be transmitted by the send function. -

rec
The rec parameter is the second of the two data elements to be +
rec
The rec parameter is the second of the two data elements to be transmitted by the send function. -

lsnp
If the type of message to be sent has an LSN associated with it, then -the lsnp parameter -contains the LSN of the record being sent. This LSN can be used to -determine that certain records have been processed successfully by -clients. -

envid
The envid parameter is a positive integer identifier that +
lsnp
If the type of message to be sent has an LSN associated with it, then +the lsnp parameter contains the LSN of the record being sent. +This LSN can be used to determine that certain records have been +processed successfully by clients. +
envid
The envid parameter is a positive integer identifier that specifies the replication environment to which the message should be sent (see Replication environment IDs for more information). - +

The special identifier DB_EID_BROADCAST indicates that a message should be broadcast to every environment in the replication group. The application may use a true broadcast protocol or may send the message in sequence to each machine with which it is in communication. In both cases, the sending site should not be asked to process the message.

-

flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_REP_NOBUFFER
The record being sent should be transmitted immediately and not buffered +
+
DB_REP_NOBUFFER
The record being sent should be transmitted immediately and not buffered or delayed. -

DB_REP_PERMANENT
The record being sent is critical for maintaining database integrity +
DB_REP_PERMANENT
The record being sent is critical for maintaining database integrity (for example, the message includes a transaction commit). The application should take appropriate action to enforce the reliability guarantees it has chosen, such as waiting for acknowledgement from one @@ -103,8 +99,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -116,6 +112,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/runrec_class.html b/db/docs/api_cxx/runrec_class.html index 11b464440..ddbf03eb0 100644 --- a/db/docs/api_cxx/runrec_class.html +++ b/db/docs/api_cxx/runrec_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbRunRecoveryException - + -

DbRunRecoveryException

API -Ref -
+Ref +


@@ -33,12 +32,13 @@ how it is used by the various Berkeley DB classes.

down the application and run recovery (for example, if Berkeley DB is unable to allocate heap memory). When a fatal error occurs in Berkeley DB, methods will throw a DbRunRecoveryException, at which point all -subsequent database calls will also fail in the same way. When this +subsequent Berkeley DB calls will also fail in the same way. When this occurs, recovery should be performed.

+

The DbException errno value is set to DB_RUNRECOVERY.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/seq_class.html b/db/docs/api_cxx/seq_class.html new file mode 100644 index 000000000..22f3e7c3a --- /dev/null +++ b/db/docs/api_cxx/seq_class.html @@ -0,0 +1,71 @@ + + + + + + +Berkeley DB: DbSequence + + + + + + + +
+

DbSequence

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+class DbSequence { +public: + DbSequence(Db *db, u_int32_t flags); + ~DbSequence(); +

+ DB_SEQUENCE *DbSequence::get_DB(); + const DB *DbSequence::get_const_DB() const; + static DbSequence *DbSequence::get_DbSequence(DB *db); + static const DbSequence *DbSequence::get_const_DbSequence(const DB *db); + ... +}; +

+
+

Description: DbSequence

+

The DbSequence handle is the handle used to manipulate a +sequence object. A sequence object is stored in a record in a +database.

+

DbSequence handles are free-threaded if the DB_THREAD +flag is specified to the DbSequence::open method when the sequence is opened. +Once the DbSequence::close or DbSequence::remove methods are called, the +handle may not be accessed again, regardless of the method's return.

+

Each handle opened on a sequence may maintain a separate cache of values +which are returned to the application using the DbSequence::get method +either singly or in groups depending on its delta parameter.

+

The +constructor creates a +DbSequence object that serves as the handle for a sequence. +Calling the DbSequence::close or DbSequence::remove methods will discard the +handle.

+

Parameters

+
+
db
The db parameter is an open database handle which holds the +persistent data for the sequence. +
flags
The flags parameter is currently unused, and must be set to 0. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_close.html b/db/docs/api_cxx/seq_close.html new file mode 100644 index 000000000..43fe881b5 --- /dev/null +++ b/db/docs/api_cxx/seq_close.html @@ -0,0 +1,64 @@ + + + + + + +Berkeley DB: DbSequence::close + + + + + + + +
+

DbSequence::close

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbSequence::close(u_int32_t flags); +

+
+

Description: DbSequence::close

+

The DbSequence::close method closes the sequence handle. Any unused cached +values are lost.

+

The DbSequence handle may not be accessed again after DbSequence::close is +called, regardless of its return.

+

The DbSequence::close method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter is currently unused, and must be set to 0. +
+

Errors

+

The DbSequence::close method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_get.html b/db/docs/api_cxx/seq_get.html new file mode 100644 index 000000000..1d1c83d4c --- /dev/null +++ b/db/docs/api_cxx/seq_get.html @@ -0,0 +1,76 @@ + + + + + + +Berkeley DB: DbSequence::get + + + + + + + +
+

DbSequence::get

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbSequence::get(DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags); +

+
+

Description: DbSequence::get

+

The DbSequence::get method returns the next available element in the sequence +and changes the sequence value by delta. The value of +delta must be greater than zero. If there are enough cached +values in the sequence handle then they will be returned. Otherwise the +next value will be fetched from the database and incremented +(decremented) by enough to cover the delta and the next batch +of cached values.

+

If the underlying database handle was opened in a transaction then +either the txnid parameter must be a valid transaction handle or +DB_AUTO_COMMIT must be specified. The txnid handle must be NULL +if the sequence handle was opened with a non-zero cache size.

+

For maximum concurrency a non-zero cache size should be specified prior +to opening the sequence handle and DB_AUTO_COMMIT | DB_TXN_NOSYNC should +be specified each DbSequence::get method call.

+

The DbSequence::get method will return EINVAL if the record in the database is not a valid sequence record, +or the sequences have overflowed is range. +

+

Parameters

+
+
delta
Specifies the amount to increment or decrement the sequence. +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_AUTO_COMMIT
If the database +must be updated the update will be enclosed in a transaction +and will be recoverable. +
DB_TXN_NOSYNC
If a DB_AUTO_COMMIT +triggers a transaction, do not synchronously flush the log. +
+
retp
retp points to the memory to hold the return value from +the sequence. +
txnid
If the operation is to be transaction-protected, +the txnid parameter is a transaction handle returned from +DbEnv::txn_begin; otherwise, NULL. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_init_value.html b/db/docs/api_cxx/seq_init_value.html new file mode 100644 index 000000000..a98c4b2a4 --- /dev/null +++ b/db/docs/api_cxx/seq_init_value.html @@ -0,0 +1,64 @@ + + + + + + +Berkeley DB: DbSequence::init_value + + + + + + + +
+

DbSequence::init_value

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbSequence::init_value(db_seq_t value); +

+
+

Description: DbSequence::init_value

+

Set the initial value for a sequence. This call is only effective when +the sequence is being created.

+

The DbSequence::init_value method may not be called after the DbSequence::open +method is called.

+

The DbSequence::init_value method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
value
The initial value to set. +
+

Errors

+

The DbSequence::init_value method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_list.html b/db/docs/api_cxx/seq_list.html new file mode 100644 index 000000000..754e9c4b3 --- /dev/null +++ b/db/docs/api_cxx/seq_list.html @@ -0,0 +1,34 @@ + + + + + + +Berkeley DB: Berkeley DB: Sequences and Related Methods + + + + +

Berkeley DB: Sequences and Related Methods

+ + + + + + + + + + + + + + + + + + +
Sequences and Related MethodsDescription
DbSequenceCreate a sequence handle
DbSequence::closeClose a sequence
DbSequence::getGet the next sequence element(s)
DbSequence::get_dbpReturn a handle for the underlying sequence database
DbSequence::get_cachesizeReturn the cache size of a sequence
DbSequence::get_flagsReturn the flags for a sequence
DbSequence::get_rangeReturn the range for a sequence
DbSequence::get_keyReturn the key for a sequence
DbSequence::init_valueSet the initial value of a sequence
DbSequence::openOpen a sequence
DbSequence::removeRemove a sequence
DbSequence::set_cachesizeSet the cache size of a sequence
DbSequence::set_flagsSet the flags for a sequence
DbSequence::set_rangeSet the range for a sequence
DbSequence::statReturn sequence statistics
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_open.html b/db/docs/api_cxx/seq_open.html new file mode 100644 index 000000000..7f70b2d69 --- /dev/null +++ b/db/docs/api_cxx/seq_open.html @@ -0,0 +1,108 @@ + + + + + + +Berkeley DB: DbSequence::open + + + + + + + +
+

DbSequence::open

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +int +DbSequence::open(DbTxn *txnid, Dbt *key, u_int32_t flags); +

+int +DbSequence::get_dbp(Db **dbp); +

+int +DbSequence::get_key(Dbt *key); +

+
+

Description: DbSequence::open

+

The DbSequence::open method opens the sequence represented by the key. +The key must be compatible with the underlying database specified in the +corresponding call to db_sequence_create.

+

Parameters

+
+
key
The key specifies which record in the database stores +the persistent sequence data. +
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_AUTO_COMMIT
Enclose the DbSequence::open call within a transaction. If the call +succeeds, the open operation will be recoverable. If the +DB_CREATE flag is specified and the call fails, no sequence will +have been created. +
DB_CREATE
Create the sequence. If the sequence does not already exist and the +DB_CREATE flag is not specified, the DbSequence::open will fail. +
DB_EXCL
Return an error if the sequence already exists. The DB_EXCL +flag is only meaningful when specified with the DB_CREATE +flag. +
DB_THREAD
Cause the DbSequence handle returned by DbSequence::open to be +free-threaded; that is, usable by multiple threads within a +single address space. +
+
txnid
If the operation is to be transaction-protected, +(other than by specifying the DB_AUTO_COMMIT flag), +the txnid parameter is a transaction handle returned from +DbEnv::txn_begin; otherwise, NULL. Note that transactionally protected operations on a DbSequence +handle require the DbSequence handle itself be transactionally +protected during its open if the open creates the sequence. +
+
+

Description: DbSequence::get_dbp

+

The DbSequence::get_dbp method returns the database handle.

+

Parameters

+
+
dbp
The dbp parameter references memory into which +a pointer to the database handle is copied. +
+

The DbSequence::get_dbp method may be called at any time during the life of the +application.

+

The DbSequence::get_dbp method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+
+

Description: DbSequence::get_key

+

The DbSequence::get_key method returns the key for the sequence.

+

Parameters

+
+
key
The key parameter references memory into which +a pointer to the key data is copied. +
+

The DbSequence::get_key method may be called at any time during the life of the +application.

+

The DbSequence::get_key method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_remove.html b/db/docs/api_cxx/seq_remove.html new file mode 100644 index 000000000..f953d5427 --- /dev/null +++ b/db/docs/api_cxx/seq_remove.html @@ -0,0 +1,75 @@ + + + + + + +Berkeley DB: DbSequence::remove + + + + + + + +
+

DbSequence::remove

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbSequence::remove(u_int32_t flags); +

+
+

Description: DbSequence::remove

+

The DbSequence::remove method removes the sequence from the database. This +method should not be called if there are other open handles on this +sequence.

+

The DbSequence handle may not be accessed again after DbSequence::remove is +called, regardless of its return.

+

The DbSequence::remove method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_AUTO_COMMIT
The remove +will be enclosed in a transaction and will be recoverable. +
DB_TXN_NOSYNC
If a DB_AUTO_COMMIT +triggers a transaction, do not synchronously flush the log. +
+
txnid
If the operation is to be transaction-protected, +the txnid parameter is a transaction handle returned from +DbEnv::txn_begin; otherwise, NULL. +
+

Errors

+

The DbSequence::remove method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_set_cachesize.html b/db/docs/api_cxx/seq_set_cachesize.html new file mode 100644 index 000000000..d26648d2a --- /dev/null +++ b/db/docs/api_cxx/seq_set_cachesize.html @@ -0,0 +1,80 @@ + + + + + + +Berkeley DB: DbSequence::set_cachesize + + + + + + + +
+

DbSequence::set_cachesize

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbSequence::set_cachesize(int32_t size); +

+int DbSequence::get_cachesize(*sizep); +

+
+

Description: DbSequence::set_cachesize

+

Configure the number of elements cached by a sequence handle.

+

The DbSequence::set_cachesize method may not be called after the +DbSequence::open method is called.

+

The DbSequence::set_cachesize method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
size
The number of elements in the cache. +
+

Errors

+

The DbSequence::set_cachesize method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DbSequence::get_cachesize

+

The DbSequence::get_cachesize method returns the current cache size.

+

The DbSequence::get_cachesize method may be called at any time during the life of the +application.

+

The DbSequence::get_cachesize method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
sizep
The DbSequence::get_cachesize method returns the +current cache size in sizep. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_set_flags.html b/db/docs/api_cxx/seq_set_flags.html new file mode 100644 index 000000000..b0889a035 --- /dev/null +++ b/db/docs/api_cxx/seq_set_flags.html @@ -0,0 +1,93 @@ + + + + + + +Berkeley DB: DbSequence::set_flags + + + + + + + +
+

DbSequence::set_flags

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbSequence::set_flags(u_int32_t flags); +

+int DbSequence::get_flags(u_int32_t *flagsp); +

+
+

Description: DbSequence::set_flags

+

Configure a sequence. The flags are only effective when creating a +sequence. Calling DbSequence::set_flags is additive; there is no way +to clear flags.

+

The DbSequence::set_flags method may not be called after the +DbSequence::open method is called.

+

The DbSequence::set_flags method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +or more of the following values: +
+
DB_SEQ_DEC
Specify that the sequence should be decremented. +
+
+
DB_SEQ_INC
Specify that the sequence should be incremented. This is the default. +
+
+
DB_SEQ_WRAP
Specify that the sequence should wrap around when it is incremented +(decremented) past the specified maximum (minimum) value. +
+
+

Errors

+

The DbSequence::set_flags method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DbSequence::get_flags

+

The DbSequence::get_flags method returns the current flags.

+

The DbSequence::get_flags method may be called at any time during the life of the +application.

+

The DbSequence::get_flags method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flagsp
The DbSequence::get_flags method returns the +current flags in flagsp. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_set_range.html b/db/docs/api_cxx/seq_set_range.html new file mode 100644 index 000000000..a33ed26c4 --- /dev/null +++ b/db/docs/api_cxx/seq_set_range.html @@ -0,0 +1,83 @@ + + + + + + +Berkeley DB: DbSequence::set_range + + + + + + + +
+

DbSequence::set_range

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +DbSequence::set_range(db_seq_t min, db_seq_t max); +

+int DbSequence::get_range(u_int32_t, db_seq_t *minp, db_seq_t *maxp); +

+
+

Description: DbSequence::set_range

+

Configure a sequence range. This call is only effective when the +sequence is being created. The range is limited to a signed 64 bit +integer.

+

The DbSequence::set_range method may not be called after the +DbSequence::open method is called.

+

The DbSequence::set_range method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
min
Specifies the minimum value for the sequence. +
max
Specifies the maximum value for the sequence. +
+

Errors

+

The DbSequence::set_range method +may fail and throw +DbException, +encapsulating one of the following non-zero errors, or return one of +the following non-zero errors:

+
+
EINVAL
An +invalid flag value or parameter was specified. +
+
+

Description: DbSequence::get_range

+

The DbSequence::get_range method returns the range of values in the sequence.

+

The DbSequence::get_range method may be called at any time during the life of the +application.

+

The DbSequence::get_range method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
minp
The DbSequence::get_range method returns the minimum value in minp. +
maxp
The DbSequence::get_range method returns the maximum value in maxp. +
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/seq_stat.html b/db/docs/api_cxx/seq_stat.html new file mode 100644 index 000000000..6fc65d462 --- /dev/null +++ b/db/docs/api_cxx/seq_stat.html @@ -0,0 +1,100 @@ + + + + + + +Berkeley DB: DbSequence::stat + + + + + + + +
+

DbSequence::stat

+
+API +Ref
+


+ +

+#include <db_cxx.h>
+

+int +Db::stat(void *sp, u_int32_t flags); +

+int +Db::stat_print(u_int32_t flags); +

+
+

Description: DbSequence::stat

+

The DbSequence::stat method creates a statistical structure and copies a +pointer to it into user-specified memory locations. Specifically, if +sp is non-NULL, a pointer to the statistics for the database are +copied into the memory location to which it refers.

+

Statistical structures are stored in allocated memory. If application-specific allocation +routines have been declared (see DbEnv::set_alloc for more +information), they are used to allocate the memory; otherwise, the +standard C library malloc(3) is used. The caller is +responsible for deallocating the memory. To deallocate the memory, free +the memory reference; references inside the returned memory need not be +individually freed.

+

In the presence of multiple threads or processes accessing an active +sequence, the information returned by DbSequence::stat may be out-of-date.

+

The DbSequence::stat method cannot be transaction-protected. For this reason, +it should be called in a thread of control that has no open cursors or +active transactions.

+

The statistics are stored in a structure of type DB_SEQUENCE_STAT. The +following fields will be filled in:

+
+
u_int32_t st_wait;
The number of times a thread of control was forced to wait on the +handle mutex. +
u_int32_t st_nowait;
The number of times that a thread of control was able to obtain handle +mutex without waiting. +
db_seq_t st_current;
The current value of the sequence in the database. +
db_seq_t st_value;
The current cached value of the sequence. +
db_seq_t st_last_value;
The last cached value of the sequence. +
db_seq_t st_min;
The minimum permitted value of the sequence. +
db_seq_t st_max;
The maximum permitted value of the sequence. +
int32_t st_cache_size;
The number of values that will be cached in this handle. +
u_int32_t st_flags;
The flags value for the sequence. +
+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_CLEAR
Reset statistics after printing their values. +
+
+

The DbSequence::stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+
+

Description: DbSequence::stat_print

+

The DbSequence::stat_print method prints diagnostic information to the output +channel described by the DbEnv::set_msgfile method.

+

Parameters

+
+
flags
The flags parameter must be set by bitwise inclusively OR'ing together one or more +of the following values: +
+
DB_STAT_CLEAR
Reset statistics after printing their values. +
+
+
+

Class

+DbSequence +

See Also

+Sequences and Related Methods +
+

+APIRef +
+

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/api_cxx/txn_abort.html b/db/docs/api_cxx/txn_abort.html index 4475a6f80..4cf6b01b3 100644 --- a/db/docs/api_cxx/txn_abort.html +++ b/db/docs/api_cxx/txn_abort.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbTxn::abort - + -

DbTxn::abort

API -Ref -
+Ref +


@@ -53,6 +52,6 @@ failure, and returns 0 on success.
 

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_begin.html b/db/docs/api_cxx/txn_begin.html index b4e384ab5..1e8e22199 100644 --- a/db/docs/api_cxx/txn_begin.html +++ b/db/docs/api_cxx/txn_begin.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::txn_begin - + -

DbEnv::txn_begin

API -Ref -
+Ref +


@@ -30,8 +29,7 @@ DbEnv::txn_begin(DbTxn *parent, DbTxn **tid, u_int32_t flags);
 

Description: DbEnv::txn_begin

The DbEnv::txn_begin method creates a new transaction in the environment and copies a pointer to a DbTxn that uniquely identifies it into -the memory to which tid refers. -Calling the DbTxn::abort, +the memory to which tid refers. Calling the DbTxn::abort, DbTxn::commit or DbTxn::discard methods will discard the returned handle.

Note: Transactions may only span threads if they do so serially; @@ -51,15 +49,18 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one +
+
flags
The flags parameter must be set to 0 or by bitwise inclusively OR'ing together one or more of the following values: -

-

DB_DIRTY_READ
All read operations performed by the transaction may read modified but +
+
DB_DEGREE_2
This transaction will have degree 2 isolation. This provides for cursor +stability but not repeatable reads. Data items which have been +previously read by this transaction may be deleted or modified by other +transactions before this transaction completes. +
DB_DIRTY_READ
All read operations performed by the transaction may read modified but not yet committed data. Silently ignored if the DB_DIRTY_READ flag was not specified when the underlying database was opened. -

DB_TXN_NOSYNC
Do not synchronously flush the log when this transaction commits or +
DB_TXN_NOSYNC
Do not synchronously flush the log when this transaction commits or prepares. This means the transaction will exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database integrity will be maintained but it is possible that this @@ -67,11 +68,11 @@ transaction may be undone during recovery.

This behavior may be set for a Berkeley DB environment using the DbEnv::set_flags method. Any value specified to this method overrides that setting.

-

DB_TXN_NOWAIT
If a lock is unavailable for any Berkeley DB operation performed in the context +
DB_TXN_NOWAIT
If a lock is unavailable for any Berkeley DB operation performed in the context of this transaction, cause the operation to return DB_LOCK_DEADLOCK or throw a DbDeadlockException immediately instead of blocking on the lock. -

DB_TXN_SYNC
Synchronously flush the log when this transaction commits or prepares. +
DB_TXN_SYNC
Synchronously flush the log when this transaction commits or prepares. This means the transaction will exhibit all of the ACID (atomicity, consistency, isolation, and durability) properties.

This behavior is the default for Berkeley DB environments unless the @@ -79,8 +80,7 @@ consistency, isolation, and durability) properties. DbEnv::set_flags method. Any value specified to this method overrides that setting.

-

parent
-If the parent parameter is non-NULL, the new transaction will +
parent
If the parent parameter is non-NULL, the new transaction will be a nested transaction, with the transaction indicated by parent as its parent. Transactions may be nested to any level. In the presence of distributed transactions and two-phase commit, only @@ -90,7 +90,7 @@ specified, should be passed as an parameter to DbMemoryException exception.

+throw a DbMemoryException.


Class

DbEnv, DbTxn @@ -100,6 +100,6 @@ throw a DbMemoryException exception.


APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_checkpoint.html b/db/docs/api_cxx/txn_checkpoint.html index 615710189..c3b019178 100644 --- a/db/docs/api_cxx/txn_checkpoint.html +++ b/db/docs/api_cxx/txn_checkpoint.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::txn_checkpoint - + -

DbEnv::txn_checkpoint

API -Ref -
+Ref +


@@ -36,20 +35,17 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_FORCE
Force a checkpoint record, even if there has been no activity since the +
+
DB_FORCE
Force a checkpoint record, even if there has been no activity since the last checkpoint.
-

kbyte
-If the kbyte parameter is non-zero, a checkpoint will be done +
kbyte
If the kbyte parameter is non-zero, a checkpoint will be done if more than kbyte kilobytes of log data have been written since the last checkpoint. -

min
-If the min parameter is non-zero, a checkpoint will be done if +
min
If the min parameter is non-zero, a checkpoint will be done if more than min minutes have passed since the last checkpoint.

Errors

@@ -58,8 +54,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -71,6 +67,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_class.html b/db/docs/api_cxx/txn_class.html index f7ca97bdc..39877f7c3 100644 --- a/db/docs/api_cxx/txn_class.html +++ b/db/docs/api_cxx/txn_class.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbTxn - + -

DbTxn

API -Ref -
+Ref +


@@ -71,6 +70,6 @@ calls in a purely C++ application.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_commit.html b/db/docs/api_cxx/txn_commit.html index ed56f1fa4..0f6814d93 100644 --- a/db/docs/api_cxx/txn_commit.html +++ b/db/docs/api_cxx/txn_commit.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbTxn::commit - + -

DbTxn::commit

API -Ref -
+Ref +


@@ -50,12 +49,11 @@ or throws an exception that encapsulates a non-zero error value on
 failure, and returns 0 on success.
 

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or one of the following values: -

-

DB_TXN_NOSYNC
Do not synchronously flush the log. This means the transaction will +
+
DB_TXN_NOSYNC
Do not synchronously flush the log. This means the transaction will exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database integrity will be maintained, but it is possible that this transaction may be undone during recovery. @@ -63,7 +61,7 @@ it is possible that this transaction may be undone during recovery. DbEnv::set_flags method or for a single transaction using the DbEnv::txn_begin method. Any value specified to this method overrides both of those settings.

-

DB_TXN_SYNC
Synchronously flush the log. This means the transaction will exhibit +
DB_TXN_SYNC
Synchronously flush the log. This means the transaction will exhibit all of the ACID (atomicity, consistency, isolation, and durability) properties.

This behavior is the default for Berkeley DB environments unless the @@ -82,6 +80,6 @@ method overrides both of those settings.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_discard.html b/db/docs/api_cxx/txn_discard.html index 7928a8f48..2f2adf63b 100644 --- a/db/docs/api_cxx/txn_discard.html +++ b/db/docs/api_cxx/txn_discard.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbTxn::discard - + -

DbTxn::discard

API -Ref -
+Ref +


@@ -44,9 +43,8 @@ failure, and returns 0 on success.
 

After DbTxn::discard has been called, regardless of its return, the DbTxn handle may not be accessed again.

Parameters

-

-

flags
-The flags parameter is currently unused, and must be set to 0. +
+
flags
The flags parameter is currently unused, and must be set to 0.

Errors

The DbTxn::discard method @@ -54,8 +52,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
If the transaction handle does not refer to a transaction that was +
+
EINVAL
If the transaction handle does not refer to a transaction that was recovered into a prepared but not yet completed state; or if an invalid flag value or parameter was specified.
@@ -68,6 +66,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_id.html b/db/docs/api_cxx/txn_id.html index cb5005d48..c2c53a4a7 100644 --- a/db/docs/api_cxx/txn_id.html +++ b/db/docs/api_cxx/txn_id.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbTxn::id - + -

DbTxn::id

API -Ref -
+Ref +


@@ -41,6 +40,6 @@ to the DbEnv::lock_get or 
APIRef -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_list.html b/db/docs/api_cxx/txn_list.html index 6ca223e55..ca707207e 100644 --- a/db/docs/api_cxx/txn_list.html +++ b/db/docs/api_cxx/txn_list.html @@ -1,31 +1,32 @@ - + Berkeley DB: Berkeley DB: Transaction Subsystem and Related Methods - +

Berkeley DB: Transaction Subsystem and Related Methods

- + - - - - - - - - - - - - + + + + + + + + + + + + +
Transaction Subsystem and Related MethodsDescription
DbEnv::set_tx_maxSet maximum number of transactions
DbEnv::set_tx_timestampSet recovery timestamp
DbEnv::txn_checkpointCheckpoint the transaction subsystem
DbEnv::txn_recoverDistributed transaction recovery
DbEnv::txn_statReturn transaction subsystem statistics
DbEnv::txn_beginBegin a transaction
DbTxn::abortAbort a transaction
DbTxn::commitCommit a transaction
DbTxn::discardDiscard a prepared but not resolved transaction handle
DbTxn::idReturn a transaction's ID
DbTxn::preparePrepare a transaction for commit
DbTxn::set_timeoutSet transaction timeout
DbEnv::set_timeoutSet lock and transaction timeout
DbEnv::set_tx_maxSet maximum number of transactions
DbEnv::set_tx_timestampSet recovery timestamp
DbEnv::txn_beginBegin a transaction
DbEnv::txn_checkpointCheckpoint the transaction subsystem
DbEnv::txn_recoverDistributed transaction recovery
DbEnv::txn_statReturn transaction subsystem statistics
DbTxn::abortAbort a transaction
DbTxn::commitCommit a transaction
DbTxn::discardDiscard a prepared but not resolved transaction handle
DbTxn::idReturn a transaction's ID
DbTxn::preparePrepare a transaction for commit
DbTxn::set_timeoutSet transaction timeout
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_prepare.html b/db/docs/api_cxx/txn_prepare.html index 222402bfc..330c222f4 100644 --- a/db/docs/api_cxx/txn_prepare.html +++ b/db/docs/api_cxx/txn_prepare.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbTxn::prepare - + -

DbTxn::prepare

API -Ref -
+Ref +


@@ -28,7 +27,7 @@ DbTxn::prepare(u_int8_t gid[DB_XIDDATASIZE]);
 


Description: DbTxn::prepare

- +

The DbTxn::prepare method initiates the beginning of a two-phase commit.

In a distributed transaction environment, Berkeley DB can be used as a local transaction manager. In this case, the distributed transaction manager @@ -38,20 +37,18 @@ return before responding to the distributed transaction manager. Only after the distributed transaction manager receives successful responses from all of its prepare messages should it issue any commit messages.

-

In the case of nested transactions, preparing the parent -causes all unresolved children of the parent transaction to be committed. -Child transactions should never be explicitly prepared. -Their fate will be resolved along with their parent's during -global recovery.

+

In the case of nested transactions, preparing the parent causes all +unresolved children of the parent transaction to be committed. Child +transactions should never be explicitly prepared. Their fate will be +resolved along with their parent's during global recovery.

The DbTxn::prepare method either returns a non-zero error value or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

gid
-The gid parameter specifies the global transaction ID by which this +
+
gid
The gid parameter specifies the global transaction ID by which this transaction will be known. This global transaction ID will be returned in calls to DbEnv::txn_recover, telling the application which global transactions must be resolved. @@ -65,6 +62,6 @@ transactions must be resolved.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_recover.html b/db/docs/api_cxx/txn_recover.html index 342bf0f4a..bc46a4bca 100644 --- a/db/docs/api_cxx/txn_recover.html +++ b/db/docs/api_cxx/txn_recover.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::txn_recover - + -

DbEnv::txn_recover

API -Ref -
+Ref +


@@ -42,7 +41,7 @@ be filled in with a list of transactions that must be resolved by the
 application (committed, aborted or discarded).  The preplist
 parameter is a structure of type DB_PREPLIST; the following DB_PREPLIST
 fields will be filled in:

-

+
DB_TXN * txn;
The transaction handle for the transaction.
u_int8_t gid[DB_XIDDATASIZE];
The global transaction ID for the transaction. The global transaction ID is the one specified when the transaction was prepared. The @@ -58,21 +57,18 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

count
-The count parameter specifies the number of available entries +
+
count
The count parameter specifies the number of available entries in the passed-in preplist array. The retp parameter returns the number of entries DbEnv::txn_recover has filled in, in the array. -

flags
-The flags parameter must be set to one of the following values: -

-

DB_FIRST
Begin returning a list of prepared, but not yet resolved transactions. -

DB_NEXT
Continue returning a list of prepared, but not yet resolved transactions, +
flags
The flags parameter must be set to one of the following values: +
+
DB_FIRST
Begin returning a list of prepared, but not yet resolved transactions. +
DB_NEXT
Continue returning a list of prepared, but not yet resolved transactions, starting where the last call to DbEnv::txn_recover left off.
-

preplist
-The preplist parameter references memory into which +
preplist
The preplist parameter references memory into which the list of transactions to be resolved by the application is copied.

@@ -84,6 +80,6 @@ The preplist parameter references memory into which

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_set_timeout.html b/db/docs/api_cxx/txn_set_timeout.html index 1279f8ee2..c7fe7e9e3 100644 --- a/db/docs/api_cxx/txn_set_timeout.html +++ b/db/docs/api_cxx/txn_set_timeout.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbTxn::set_timeout - + -

DbTxn::set_timeout

API -Ref -
+Ref +


@@ -31,15 +30,12 @@ DbTxn::set_timeout(db_timeout_t timeout, u_int32_t flags);
 

The DbTxn::set_timeout method sets timeout values for locks or transactions for the specified transaction.

Timeouts are checked whenever a thread of control blocks on a lock or -when deadlock detection is performed. (In the case of -DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly -through the Lock subsystem interfaces. In the case of -DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a -transaction. In either case, it may be a lock requested by the database -access methods underlying the application.) As timeouts are only -checked when the lock request first blocks or when deadlock detection -is performed, the accuracy of the timeout depends on how often deadlock -detection is performed.

+when deadlock detection is performed. In the case of +DB_SET_LOCK_TIMEOUT, the timeout is for any single lock request. +In the case of DB_SET_TXN_TIMEOUT, the timeout is for the life +of the transaction. As timeouts are only checked when the lock request +first blocks or when deadlock detection is performed, the accuracy of +the timeout depends on how often deadlock detection is performed.

Timeout values may be specified for the database environment as a whole. See DbEnv::set_timeout and for more information.

The DbTxn::set_timeout method configures operations performed on the underlying @@ -53,15 +49,13 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to one of the following values: -

-

DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this transaction. -

DB_SET_TXN_TIMEOUT
Set the timeout value for this transaction. +
+
flags
The flags parameter must be set to one of the following values: +
+
DB_SET_LOCK_TIMEOUT
Set the timeout value for locks in this transaction. +
DB_SET_TXN_TIMEOUT
Set the timeout value for this transaction.
-

timeout
-The timeout parameter is specified as an unsigned 32-bit number +
timeout
The timeout parameter is specified as an unsigned 32-bit number of microseconds, limiting the maximum timeout to roughly 71 minutes. A value of 0 disables timeouts for the transaction.
@@ -71,8 +65,8 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

@@ -84,6 +78,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_cxx/txn_stat.html b/db/docs/api_cxx/txn_stat.html index d4bef3d85..acea16775 100644 --- a/db/docs/api_cxx/txn_stat.html +++ b/db/docs/api_cxx/txn_stat.html @@ -1,23 +1,22 @@ - - + + Berkeley DB: DbEnv::txn_stat - + -

DbEnv::txn_stat

API -Ref -
+Ref +


@@ -25,6 +24,9 @@
 

int DbEnv::txn_stat(DB_TXN_STAT **statp, u_int32_t flags); +

+int +DbEnv::txn_stat_print(u_int32_t flags);


Description: DbEnv::txn_stat

@@ -40,7 +42,7 @@ responsible for deallocating the memory. To deallocate the memory, free the memory reference; references inside the returned memory need not be individually freed.

The following DB_TXN_STAT fields will be filled in:

-

+
DbLsn st_last_ckp;
The LSN of the last checkpoint.
time_t st_time_ckp;
The time the last completed checkpoint finished (as the number of seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) time function). @@ -52,15 +54,15 @@ since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) time functi
u_int32_t st_naborts;
The number of transactions that have aborted.
u_int32_t st_ncommits;
The number of transactions that have committed.
u_int32_t st_nrestores;
The number of transactions that have been restored. -
u_int32_t st_regsize;
The size of the region. +
roff_t st_regsize;
The size of the region, in bytes.
u_int32_t st_region_wait;
The number of times that a thread of control was forced to wait before obtaining the region lock.
u_int32_t st_region_nowait;
The number of times that a thread of control was able to obtain the region lock without waiting.
DB_TXN_ACTIVE *st_txnarray;
A pointer to an array of st_nactive DB_TXN_ACTIVE structures, -describing the currently active transactions. The following fields of +describing the currently active transactions. The following fields of the DB_TXN_ACTIVE structure will be filled in: -

+
u_int32_t txnid;
The transaction ID of the transaction.
u_int32_t parentid;
The transaction ID of the parent transaction (or 0, if no parent).
DbLsn lsn;
The current log sequence number when the transaction was begun. @@ -75,15 +77,13 @@ or throws an exception that encapsulates a non-zero error value on failure, and returns 0 on success.

Parameters

-

-

flags
-The flags parameter must be set to 0 or +
+
flags
The flags parameter must be set to 0 or the following value: -

-

DB_STAT_CLEAR
Reset statistics after returning their values. +
+
DB_STAT_CLEAR
Reset statistics after returning their values.
-

statp
-The statp parameter references memory into which +
statp
The statp parameter references memory into which a pointer to the allocated statistics structure is copied.

Errors

@@ -92,11 +92,34 @@ may fail and throw DbException, encapsulating one of the following non-zero errors, or return one of the following non-zero errors:

-

-

EINVAL
An +
+
EINVAL
An invalid flag value or parameter was specified.

+

Description: DbEnv::txn_stat_print

+

The DbEnv::txn_stat_print method returns the +transaction subsystem statistical information, as described for the DbEnv::txn_stat method. +The information is printed to a specified output channel (see the +DbEnv::set_msgfile method for more information), or passed to an +application callback function (see the DbEnv::set_msgcall method for +more information).

+

The DbEnv::txn_stat_print method may not be called before the DbEnv::open method has +been called.

+

The DbEnv::txn_stat_print method +either returns a non-zero error value +or throws an exception that encapsulates a non-zero error value on +failure, and returns 0 on success. +

+

Parameters

+
+
flags
The flags parameter must be set to 0 or +the following value: +
+
DB_STAT_ALL
Display all available information. +
+
+

Class

DbEnv, DbTxn

See Also

@@ -105,6 +128,6 @@ invalid flag value or parameter was specified.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_close.html b/db/docs/api_tcl/db_close.html index 37218fb33..bffb7b3c3 100644 --- a/db/docs/api_tcl/db_close.html +++ b/db/docs/api_tcl/db_close.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db close - + -

db close

API -Ref -
+Ref +


db close
@@ -31,8 +30,8 @@ any underlying files.  Because key/data pairs are cached in memory, failing
 to sync the file with the db close or db sync command may
 result in inconsistent or lost information.

The options are as follows:

-

-

-nosync
Do not flush cached information to disk. +
+
-nosync
Do not flush cached information to disk.

The -nosync flag is a dangerous option. It should only be set if the application is doing logging (with transactions) so that the database is recoverable after a system or application crash, or if the database is @@ -56,6 +55,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_count.html b/db/docs/api_tcl/db_count.html index 4224c7a49..f90469ef0 100644 --- a/db/docs/api_tcl/db_count.html +++ b/db/docs/api_tcl/db_count.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db count - + -

db count

API -Ref -
+Ref +


db count key
@@ -34,6 +33,6 @@ Tcl error is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_cursor.html b/db/docs/api_tcl/db_cursor.html index bc96bcd55..d406641e0 100644 --- a/db/docs/api_tcl/db_cursor.html +++ b/db/docs/api_tcl/db_cursor.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db cursor - + -

db cursor

API -Ref -
+Ref +


db cursor
@@ -31,8 +30,8 @@ where X is an integer starting at 0 (for example, db0.c0 and db0.c1).
 It is through this Tcl command that the script accesses the cursor
 methods.

The options are as follows:

-

-

-txn txnid
If the operation is to be +
+
-txn txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from env txn. @@ -42,6 +41,6 @@ the txnid parameter is a transaction handle returned from

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_del.html b/db/docs/api_tcl/db_del.html index 3aacca5d3..a89fb935a 100644 --- a/db/docs/api_tcl/db_del.html +++ b/db/docs/api_tcl/db_del.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db del - + -

db del

API -Ref -
+Ref +


db del
@@ -32,15 +31,15 @@
 

In the presence of duplicate key values, all records associated with the designated key will be discarded.

The options are as follows:

-

-

-auto_commit
Enclose the call within a transaction. If the call succeeds, changes +
+
-auto_commit
Enclose the call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. -

-glob
The specified key is a wildcard pattern, and all keys matching that +
-glob
The specified key is a wildcard pattern, and all keys matching that pattern are discarded from the database. The pattern is a simple wildcard, any characters after the wildcard character are ignored. This option only works on databases using the Btree access method. -

-txn txnid
If the operation is to be +
-txn txnid
If the operation is to be transaction-protected (other than by specifying the -auto_commit flag), the txnid parameter is a transaction handle returned from env txn. @@ -51,6 +50,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_get.html b/db/docs/api_tcl/db_get.html index 8b5610c76..e10870e93 100644 --- a/db/docs/api_tcl/db_get.html +++ b/db/docs/api_tcl/db_get.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db get - + -

db get

API -Ref -
+Ref +


db get
@@ -44,23 +43,23 @@ db get
 duplicate items.  Duplicates are sorted by insert order except where this
 order has been overridden by cursor operations.

The options are as follows:

-

-

-auto_commit
Enclose the call within a transaction. If the call succeeds, changes +
+
-auto_commit
Enclose the call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. This option may only be specified with the -consume or -consume_wait flags. -

-consume
Return the record number and data from the available record closest to +
-consume
Return the record number and data from the available record closest to the head of the queue, and delete the record. The cursor will be positioned on the deleted record. A record is available if it is not deleted and is not currently locked. The underlying database must be of type Queue for -consume to be specified. -

-consume_wait
The same as the -consume flag except that if the Queue database +
-consume_wait
The same as the -consume flag except that if the Queue database is empty, the thread of control will wait until there is data in the queue before returning. The underlying database must be of type Queue for -consume_wait to be specified. -

-get_both key data
Retrieve the key/data pair only if both the key and data match the +
-get_both key data
Retrieve the key/data pair only if both the key and data match the arguments. -

-glob
Return all keys matching the given key, where the key is a simple +
-glob
Return all keys matching the given key, where the key is a simple wildcard pattern. Where it is used, it replaces the use of the key with the given pattern of a set of keys. Any characters after the wildcard character are ignored. For example, in a database of last names, the @@ -69,15 +68,15 @@ database, and the command "db0 get -glob Jo*" will return both "Jones" and "Johnson" from the database. The command "db0 get -glob *" will return all of the key/data pairs in the database. This option only works on databases using the Btree access method. -

-partial {doff dlen}
The dlen bytes starting doff bytes from the beginning +
-partial {doff dlen}
The dlen bytes starting doff bytes from the beginning of the retrieved data record are returned as if they comprised the entire record. If any or all of the specified bytes do not exist in the record, the command is successful and any existing bytes are returned. -

-recno
Retrieve the specified numbered key/data pair from a database. For +
-recno
Retrieve the specified numbered key/data pair from a database. For -recno to be specified, the specified key must be a record number; and the underlying database must be of type Recno or Queue, or of type Btree that was created with the -recnum option. -

-rmw
Acquire write locks instead of read locks when doing the retrieval. +
-rmw
Acquire write locks instead of read locks when doing the retrieval. Setting this flag may decrease the likelihood of deadlock during a read-modify-write cycle by immediately acquiring the write lock during the read part of the cycle so that another thread of control acquiring a @@ -87,7 +86,7 @@ result in deadlock. interface calls in nontransactional environments, the -rmw argument to the db get call is only meaningful in the presence of transactions.

-

-txn txnid
If the operation is to be +
-txn txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from env txn. @@ -103,6 +102,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_get_join.html b/db/docs/api_tcl/db_get_join.html index 3b8c04043..f15024428 100644 --- a/db/docs/api_tcl/db_get_join.html +++ b/db/docs/api_tcl/db_get_join.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db get_join - + -

db get_join

API -Ref -
+Ref +


db get_join
@@ -33,8 +32,8 @@ join the specified keys and returns a list of joined {key data} pairs.
 See Equality join for more information on
 the underlying requirements for joining.

The options are as follows:

-

-

-txn txnid
If the operation is to be +
+
-txn txnid
If the operation is to be transaction-protected, the txnid parameter is a transaction handle returned from env txn. @@ -44,6 +43,6 @@ the txnid parameter is a transaction handle returned from

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_get_type.html b/db/docs/api_tcl/db_get_type.html index fa01c3249..25a19ef2b 100644 --- a/db/docs/api_tcl/db_get_type.html +++ b/db/docs/api_tcl/db_get_type.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db get_type - + -

db get_type

API -Ref -
+Ref +


db get_type
@@ -31,6 +30,6 @@ returning one of "btree", "hash", "queue" or "recno".


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_is_byteswapped.html b/db/docs/api_tcl/db_is_byteswapped.html index d73927c51..c884a98b3 100644 --- a/db/docs/api_tcl/db_is_byteswapped.html +++ b/db/docs/api_tcl/db_is_byteswapped.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db is_byteswapped - + -

db is_byteswapped

API -Ref -
+Ref +


db is_byteswapped
@@ -35,6 +34,6 @@ or not.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_join.html b/db/docs/api_tcl/db_join.html index bd1e7bc6b..686914dcb 100644 --- a/db/docs/api_tcl/db_join.html +++ b/db/docs/api_tcl/db_join.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db join - + -

db join

API -Ref -
+Ref +


db join
@@ -45,6 +44,6 @@ been created within the same transaction.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_open.html b/db/docs/api_tcl/db_open.html index 3d273ab44..dfd3828b5 100644 --- a/db/docs/api_tcl/db_open.html +++ b/db/docs/api_tcl/db_open.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: berkdb open - + -

berkdb open

API -Ref -
+Ref +


berkdb open
@@ -59,22 +58,22 @@ The returned database handle is bound to a Tcl command of the form
 db1).  It is through this Tcl command that the script accesses the
 database methods.

The options are as follows:

-

-

-auto_commit
Enclose the call within a transaction. If the call succeeds, changes +
+
-auto_commit
Enclose the call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. -

-btree
Open/create a database of type Btree. The Btree format +
-btree
Open/create a database of type Btree. The Btree format is a representation of a sorted, balanced tree structure. -

-hash
Open/create a database of type Hash. The Hash format is +
-hash
Open/create a database of type Hash. The Hash format is an extensible, dynamic hashing scheme. -

-queue
Open/create a database of type Queue. The Queue format +
-queue
Open/create a database of type Queue. The Queue format supports fast access to fixed-length records accessed by sequentially or logical record number. -

-recno
Open/create a database of type Recno. The Recno format +
-recno
Open/create a database of type Recno. The Recno format supports fixed- or variable-length records, accessed sequentially or by logical record number, and optionally retrieved from a flat text file. -

-unknown
The database is of an unknown type, and must already exist. -

-cachesize {gbytes bytes ncache}
Set the size of the database's shared memory buffer pool (that is, the +
-unknown
The database is of an unknown type, and must already exist. +
-cachesize {gbytes bytes ncache}
Set the size of the database's shared memory buffer pool (that is, the cache), to gbytes gigabytes plus bytes. The cache should be the size of the normal working data set of the application, with some small amount of additional memory for unusual situations. @@ -96,35 +95,35 @@ separate pieces of memory.

Because databases opened within Berkeley DB environments use the cache specified to the environment, it is an error to attempt to set a cache in a database created within an environment.

-

-create
Create any underlying files, as necessary. If the files do not already +
-create
Create any underlying files, as necessary. If the files do not already exist and the -create argument is not specified, the call will fail. -

-delim delim
Set the delimiting byte used to mark the end of a record in the backing +
-delim delim
Set the delimiting byte used to mark the end of a record in the backing source file for the Recno access method.

This byte is used for variable length records if the -source argument file is specified. If the -source argument file is specified and no delimiting byte was specified, <newline> characters (that is, ASCII 0x0a) are interpreted as end-of-record markers.

-

-dup
Permit duplicate data items in the tree, that is, insertion when the +
-dup
Permit duplicate data items in the tree, that is, insertion when the key of the key/data pair being inserted already exists in the tree will be successful. The ordering of duplicates in the tree is determined by the order of insertion unless the ordering is otherwise specified by use of a cursor or a duplicate comparison function.

error to specify both -dup and -recnum.

-

-dupsort
Sort duplicates within a set of data items. A default lexical +
-dupsort
Sort duplicates within a set of data items. A default lexical comparison will be used. Specifying that duplicates are to be sorted changes the behavior of the db put operation as well as the dbc put operation when the -keyfirst, -keylast and -current options are specified. -

-encrypt
Specify the database in an environment should be encrypted with the +
-encrypt
Specify the database in an environment should be encrypted with the same password that is being used in the environment. -

-encryptaes passwd
Specify the database should be encrypted with the given password +
-encryptaes passwd
Specify the database should be encrypted with the given password using the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm. -

-encryptany passwd
Specify the already existing database should be opened +
-encryptany passwd
Specify the already existing database should be opened with the given password. This option is used if the database is known to be encrypted, but the specific algorithm used is not known. -

-env env
If no -env argument is given, the database is standalone; that +
-env env
If no -env argument is given, the database is standalone; that is, it is not part of any Berkeley DB environment.

If a -env argument is given, the database is created within the specified Berkeley DB environment. The database access methods automatically @@ -132,7 +131,7 @@ make calls to the other subsystems in Berkeley DB, based on the enclosing environment. For example, if the environment has been configured to use locking, the access methods will automatically acquire the correct locks when reading and writing pages of the database.

-

-errfile filename

When an error occurs in the Berkeley DB library, a Berkeley DB error or an error +

-errfile filename

When an error occurs in the Berkeley DB library, a Berkeley DB error or an error return value is returned by the function. In some cases, however, the errno value may be insufficient to completely describe the cause of the error especially during initial application debugging.

@@ -152,17 +151,17 @@ as during application debugging.

For database handles opened inside of Berkeley DB environments, specifying the -errfile argument affects the entire environment and is equivalent to specifying the same argument to the berkdb env command.

-

-excl
Return an error if the database already exists. -

-extent size
Set the size of the extents of the Queue database; the size is specified +
-excl
Return an error if the database already exists. +
-extent size
Set the size of the extents of the Queue database; the size is specified as the number of pages in an extent. Each extent is created as a separate physical file. If no extent size is set, the default behavior is to create only a single underlying database file.

For information on tuning the extent size, see Selecting a extent size.

-

-ffactor density
Set the desired density within the hash table. +
-ffactor density
Set the desired density within the hash table.

The density is an approximation of the number of keys allowed to accumulate in any one bucket -

-len len
For the Queue access method, specify that the records are of length +
-len len
For the Queue access method, specify that the records are of length len.

For the Recno access method, specify that the records are fixed-length, not byte-delimited, and are of length len.

@@ -172,32 +171,32 @@ information).

Any attempt to insert records into the database that are greater than len bytes long will cause the call to fail immediately and return an error.

-

-mode mode

On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by the access methods +

-mode mode

On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by the access methods are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation (see umask(2)). The group ownership of created files is based on the system and directory defaults, and is not further specified by Berkeley DB. If mode is 0, files are created readable and writable by both owner and group. On Windows systems, the mode argument is ignored.

-

-nelem size
Set an estimate of the final size of the hash table. +
-nelem size
Set an estimate of the final size of the hash table.

If not set or set too low, hash tables will still expand gracefully as keys are entered, although a slight performance degradation may be noticed.

-

-pad pad
Set the padding character for short, fixed-length records for the Queue +
-pad pad
Set the padding character for short, fixed-length records for the Queue and Recno access methods.

If no pad character is specified, <space> characters (that is, ASCII 0x20) are used for padding.

-

-pagesize pagesize
Set the size of the pages used to hold items in the database, in bytes. +
-pagesize pagesize
Set the size of the pages used to hold items in the database, in bytes. The minimum page size is 512 bytes, and the maximum page size is 64K bytes. If the page size is not explicitly set, one is selected based on the underlying filesystem I/O block size. The automatically selected size has a lower limit of 512 bytes and an upper limit of 16K bytes.

For information on tuning the Berkeley DB page size, see Selecting a page size.

-

-rdonly
Open the database for reading only. Any attempt to modify items in the +
-rdonly
Open the database for reading only. Any attempt to modify items in the database will fail, regardless of the actual permissions of any underlying files. -

-recnum
Support retrieval from the Btree using record numbers. +
-recnum
Support retrieval from the Btree using record numbers.

Logical record numbers in Btree databases are mutable in the face of record insertion or deletion. See the -renumber argument for further discussion.

@@ -208,7 +207,7 @@ deletions, effectively single-threading the tree for those operations. Specifying -recnum can result in serious performance degradation for some applications and data sets.

It is an error to specify both -dup and -recnum.

-

-renumber
Specifying the -renumber argument causes the logical record +
-renumber
Specifying the -renumber argument causes the logical record numbers to be mutable, and change as records are added to and deleted from the database. For example, the deletion of record number 4 causes records numbered 5 and greater to be renumbered downward by one. If a cursor was @@ -231,10 +230,10 @@ logical record, continuing to refer to the same record as it did before.

For these reasons, concurrent access to a Recno database with the -renumber flag specified may be largely meaningless, although it is supported.

-

-snapshot
This argument specifies that any specified -source file be read +
-snapshot
This argument specifies that any specified -source file be read in its entirety when the database is opened. If this argument is not specified, the -source file may be read lazily. -

-source file
Set the underlying source file for the Recno access method. The purpose +
-source file
Set the underlying source file for the Recno access method. The purpose of the -source file is to provide fast access and modification to databases that are normally stored as flat text files.

If the -source argument is give, it specifies an underlying flat @@ -279,20 +278,20 @@ they will be silently discarded.

used to specify databases that are read-only for Berkeley DB applications, and that are either generated on the fly by software tools, or modified using a different mechanism such as a text editor.

-

-truncate
Physically truncate the underlying file, discarding all previous databases +
-truncate
Physically truncate the underlying file, discarding all previous databases it might have held. Underlying filesystem primitives are used to implement this flag. For this reason, it is only applicable to the physical file and cannot be used to discard databases within a file.

The -truncate argument cannot be transaction-protected, and it is an error to specify it in a transaction-protected environment.

-

-txn txnid
If the operation is to be +
-txn txnid
If the operation is to be transaction-protected (other than by specifying the -auto_commit flag), the txnid parameter is a transaction handle returned from env txn. -

--
Mark the end of the command arguments. -

file
The name of a single physical file on disk that will be used to back the +
--
Mark the end of the command arguments. +
file
The name of a single physical file on disk that will be used to back the database. -

database
The database argument allows applications to have multiple +
database
The database argument allows applications to have multiple databases inside of a single physical file. This is useful when the databases are both numerous and reasonably small, in order to avoid creating a large number of underlying files. It is an error to attempt @@ -309,6 +308,6 @@ file for more information.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_put.html b/db/docs/api_tcl/db_put.html index 48b5e245b..17cd05c4d 100644 --- a/db/docs/api_tcl/db_put.html +++ b/db/docs/api_tcl/db_put.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db put - + -

db put

API -Ref -
+Ref +


db put
@@ -38,17 +37,17 @@ db put
 

The db put command stores the specified key/data pair into the database.

The options are as follows:

-

-

-auto_commit
Enclose the call within a transaction. If the call succeeds, changes +
+
-auto_commit
Enclose the call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. -

-append
Append the data item to the end of the database. For the -append +
-append
Append the data item to the end of the database. For the -append option to be specified, the underlying database must be a Queue or Recno database. The record number allocated to the record is returned on success. -

-nooverwrite
Enter the new key/data pair only if the key does not already appear in +
-nooverwrite
Enter the new key/data pair only if the key does not already appear in the database. -

-partial {doff dlen}

The dlen bytes starting doff bytes from the beginning +

-partial {doff dlen}

The dlen bytes starting doff bytes from the beginning of the specified key's data record are replaced by the data specified by the data and size structure elements. If dlen is smaller than the length of the supplied data, the record will grow; if @@ -57,11 +56,11 @@ will shrink. If the specified bytes do not exist, the record will be extended using nul bytes as necessary, and the db put call will succeed.

It is an error to attempt a partial put using the db put command in a database that supports duplicate records. Partial puts in databases supporting -duplicate records must be done using a dbc put command.

+duplicate records must be done using a dbc put command.

It is an error to attempt a partial put with differing dlen and supplied data length values in Queue or Recno databases with fixed-length records.

-

-txn txnid
If the operation is to be +
-txn txnid
If the operation is to be transaction-protected (other than by specifying the -auto_commit flag), the txnid parameter is a transaction handle returned from env txn. @@ -77,6 +76,6 @@ types, the key is interpreted by Tcl as a byte array.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_remove.html b/db/docs/api_tcl/db_remove.html index 2282179ec..a7d8a79e5 100644 --- a/db/docs/api_tcl/db_remove.html +++ b/db/docs/api_tcl/db_remove.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: berkdb dbremove - + -

berkdb dbremove

API -Ref -
+Ref +


berkdb dbremove
@@ -38,18 +37,18 @@ removing all databases that it contained.

No reference count of database use is maintained by Berkeley DB. Applications should not remove databases that are currently in use.

The options are as follows:

-

-

-encrypt
Specify the database in an environment is encrypted with the +
+
-encrypt
Specify the database in an environment is encrypted with the same password that is being used in the environment. -

-encryptaes passwd
Specify the database is encrypted with the given password +
-encryptaes passwd
Specify the database is encrypted with the given password using the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm. -

-encryptany passwd
Specify the already existing database is encrypted +
-encryptany passwd
Specify the already existing database is encrypted with the given password. This option is used if the database is known to be encrypted, but the specific algorithm used is not known. -

-env env
If a -env argument is given, the database in the specified Berkeley DB +
-env env
If a -env argument is given, the database in the specified Berkeley DB environment is removed. -

--
Mark the end of the command arguments. +
--
Mark the end of the command arguments.

The berkdb dbremove command returns 0 on success, and in the case of error, a Tcl error is thrown.

@@ -57,6 +56,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_rename.html b/db/docs/api_tcl/db_rename.html index f161437a6..13a2aa2a4 100644 --- a/db/docs/api_tcl/db_rename.html +++ b/db/docs/api_tcl/db_rename.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: berkdb dbrename - + -

berkdb dbrename

API -Ref -
+Ref +


berkdb rename
@@ -39,18 +38,18 @@ the physical file represented by file is renamed.

No reference count of database use is maintained by Berkeley DB. Applications should not rename databases that are currently in use.

The options are as follows:

-

-

-encrypt
Specify the database in an environment is encrypted with the +
+
-encrypt
Specify the database in an environment is encrypted with the same password that is being used in the environment. -

-encryptaes passwd
Specify the database is encrypted with the given password +
-encryptaes passwd
Specify the database is encrypted with the given password using the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm. -

-encryptany passwd
Specify the already existing database is encrypted +
-encryptany passwd
Specify the already existing database is encrypted with the given password. This option is used if the database is known to be encrypted, but the specific algorithm used is not known. -

-env env
If a -env argument is given, the database in the specified Berkeley DB +
-env env
If a -env argument is given, the database in the specified Berkeley DB environment is renamed. -

--
Mark the end of the command arguments. +
--
Mark the end of the command arguments.

The berkdb dbrename command returns 0 on success, and in the case of error, a Tcl error is thrown.

@@ -58,6 +57,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_stat.html b/db/docs/api_tcl/db_stat.html index e85362a68..e9bc39211 100644 --- a/db/docs/api_tcl/db_stat.html +++ b/db/docs/api_tcl/db_stat.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db stat - + -

db stat

API -Ref -
+Ref +


db stat
@@ -28,8 +27,8 @@
 

The db stat command returns a list of name/value pairs comprising the statistics of the database.

The options are as follows:

-

-

-faststat
Return only that information which does not require a traversal +
+
-faststat
Return only that information which does not require a traversal of the database.

In the case of error, a Tcl error is thrown.

@@ -37,6 +36,6 @@ of the database.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_sync.html b/db/docs/api_tcl/db_sync.html index 49cbe142e..77d46b25b 100644 --- a/db/docs/api_tcl/db_sync.html +++ b/db/docs/api_tcl/db_sync.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db sync - + -

db sync

API -Ref -
+Ref +


db sync
@@ -33,6 +32,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/db_truncate.html b/db/docs/api_tcl/db_truncate.html index b2fd10bb4..1366e68db 100644 --- a/db/docs/api_tcl/db_truncate.html +++ b/db/docs/api_tcl/db_truncate.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db truncate - + -

db truncate

API -Ref -
+Ref +


db truncate
@@ -28,11 +27,11 @@
 

Description(db truncate)

Empties the database, discarding all records it contains.

The options are as follows:

-

-

-auto_commit
Enclose the call within a transaction. If the call succeeds, changes +
+
-auto_commit
Enclose the call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. -

-txn txnid
If the operation is to be +
-txn txnid
If the operation is to be transaction-protected (other than by specifying the -auto_commit flag), the txnid parameter is a transaction handle returned from env txn. @@ -44,6 +43,6 @@ from the database on success.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/dbc_close.html b/db/docs/api_tcl/dbc_close.html index 4591cdfa0..5c78090a5 100644 --- a/db/docs/api_tcl/dbc_close.html +++ b/db/docs/api_tcl/dbc_close.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db close - + -

dbc close

API -Ref -
+Ref +


dbc close
@@ -33,6 +32,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/dbc_del.html b/db/docs/api_tcl/dbc_del.html index b2593d20c..c433f816e 100644 --- a/db/docs/api_tcl/dbc_del.html +++ b/db/docs/api_tcl/dbc_del.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db del - + -

dbc del

API -Ref -
+Ref +


dbc del
@@ -35,6 +34,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/dbc_dup.html b/db/docs/api_tcl/dbc_dup.html index 2fdfb9b89..20710e4ed 100644 --- a/db/docs/api_tcl/dbc_dup.html +++ b/db/docs/api_tcl/dbc_dup.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db dup - + -

dbc dup

API -Ref -
+Ref +


dbc dup
@@ -30,8 +29,8 @@ that uses the same transaction and locker ID as the original cursor. This
 is useful when an application is using locking and requires two or more
 cursors in the same thread of control.

The options are as follows:

-

-

-position
The newly created cursor is initialized to refer to the same position +
+
-position
The newly created cursor is initialized to refer to the same position in the database as the original cursor and hold the same locks. If the -position flag is not specified, the created cursor is uninitialized and will behave like a cursor newly created using the @@ -43,6 +42,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/dbc_get.html b/db/docs/api_tcl/dbc_get.html index 083e0e8fc..27e9db054 100644 --- a/db/docs/api_tcl/dbc_get.html +++ b/db/docs/api_tcl/dbc_get.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: db get - + -

dbc get

API -Ref -
+Ref +


dbc get
@@ -54,25 +53,25 @@ the case of the -get_recno option, dbc get returns a list
 of the record number.  In the case of the -join_item option,
 dbc get returns a list containing the joined key.

The options are as follows:

-

-

-current
Return the key/data pair to which the cursor currently refers. +
+
-current
Return the key/data pair to which the cursor currently refers.

If the cursor key/data pair was deleted, dbc get will return an empty list.

-

-first
The cursor is set to refer to the first key/data pair of the database, and +
-first
The cursor is set to refer to the first key/data pair of the database, and that pair is returned. In the presence of duplicate key values, the first data item in the set of duplicates is returned.

If the database is a Queue or Recno database, dbc get using the -first option will skip any keys that exist but were never explicitly created by the application, or were created and later deleted.

If the database is empty, dbc get will return an empty list.

-

-last
The cursor is set to refer to the last key/data pair of the database, and +
-last
The cursor is set to refer to the last key/data pair of the database, and that pair is returned. In the presence of duplicate key values, the last data item in the set of duplicates is returned.

If the database is a Queue or Recno database, dbc get using the -last option will skip any keys that exist but were never explicitly created by the application, or were created and later deleted.

If the database is empty, dbc get will return an empty list.

-

-next
If the cursor is not yet initialized, the -next option is +
-next
If the cursor is not yet initialized, the -next option is identical to -first.

Otherwise, the cursor is moved to the next key/data pair of the database, and that pair is returned. In the presence of duplicate key values, the @@ -82,17 +81,17 @@ value of the key may not change.

explicitly created by the application, or were created and later deleted.

If the cursor is already on the last record in the database, dbc get will return an empty list.

-

-nextdup
If the next key/data pair of the database is a duplicate record for the +
-nextdup
If the next key/data pair of the database is a duplicate record for the current key/data pair, the cursor is moved to the next key/data pair of the database, and that pair is returned. Otherwise, dbc get will return an empty list. -

-nextnodup
If the cursor is not yet initialized, the -nextnodup option is +
-nextnodup
If the cursor is not yet initialized, the -nextnodup option is identical to -first.

Otherwise, the cursor is moved to the next non-duplicate key/data pair of the database, and that pair is returned.

If no non-duplicate key/data pairs occur after the cursor position in the database, dbc get will return an empty list.

-

-prev
If the cursor is not yet initialized, -prev is identical to +
-prev
If the cursor is not yet initialized, -prev is identical to -last.

Otherwise, the cursor is moved to the previous key/data pair of the database, and that pair is returned. In the presence of duplicate key @@ -102,13 +101,13 @@ values, the value of the key may not change.

created by the application, or were created and later deleted.

If the cursor is already on the first record in the database, dbc get will return an empty list.

-

-prevnodup
If the cursor is not yet initialized, the -prevnodup option is +
-prevnodup
If the cursor is not yet initialized, the -prevnodup option is identical to -last.

Otherwise, the cursor is moved to the previous non-duplicate key/data pair of the database, and that pair is returned.

If no non-duplicate key/data pairs occur before the cursor position in the database, dbc get will return an empty list.

-

-set
Move the cursor to the specified key/data pair of the database, and return +
-set
Move the cursor to the specified key/data pair of the database, and return the datum associated with the given key.

In the presence of duplicate key values, dbc get will return the first data item for the given key.

@@ -116,38 +115,38 @@ first data item for the given key.

but was never explicitly created by the application or was later deleted, dbc get will return an empty list.

If no matching keys are found, dbc get will return an empty list.

-

-set_range
The -set_range option is identical to the -set option, +
-set_range
The -set_range option is identical to the -set option, except that the key is returned as well as the data item, and, in the case of the Btree access method, the returned key/data pair is the smallest key greater than or equal to the specified key (as determined by the comparison function), permitting partial key matches and range searches. -

-get_both
The -get_both option is identical to the -set option, +
-get_both
The -get_both option is identical to the -set option, except that both the key and the data arguments must be matched by the key and data item in the database.

For -get_both to be specified, the underlying database must be of type Btree or Hash.

-

-set_recno
Move the cursor to the specific numbered record of the database, and +
-set_recno
Move the cursor to the specific numbered record of the database, and return the associated key/data pair. The key must be a record number.

For the -set_recno option to be specified, the underlying database must be of type Btree, and it must have been created with the -recnum option.

-

-get_recno
Return a list of the record number associated with the current cursor +
-get_recno
Return a list of the record number associated with the current cursor position. No key argument should be specified.

For -get_recno to be specified, the underlying database must be of type Btree, and it must have been created with the -recnum option.

-

-join_item
Do not use the data value found in all the cursors as a lookup key for +
-join_item
Do not use the data value found in all the cursors as a lookup key for the primary database, but simply return it in the key parameter instead. The data parameter is left unchanged.

For -join_item to be specified, the cursor must have been created by the db join command.

-

-partial {offset length}
The dlen bytes starting doff bytes from the beginning +
-partial {offset length}
The dlen bytes starting doff bytes from the beginning of the retrieved data record are returned as if they comprised the entire record. If any or all of the specified bytes do not exist in the record, the command is successful and any existing bytes are returned. -

-rmw
Acquire write locks instead of read locks when doing the retrieval. Setting +
-rmw
Acquire write locks instead of read locks when doing the retrieval. Setting this flag may decrease the likelihood of deadlock during a read-modify-write cycle by immediately acquiring the write lock during the read part of the cycle so that another thread of control acquiring a read lock for the same @@ -164,6 +163,6 @@ exist an empty list is returned.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/dbc_put.html b/db/docs/api_tcl/dbc_put.html index d00b5a12a..91a421aef 100644 --- a/db/docs/api_tcl/dbc_put.html +++ b/db/docs/api_tcl/dbc_put.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: dbc put - + -

dbc put

API -Ref -
+Ref +


dbc put
@@ -36,8 +35,8 @@ dbc put
 

Description(dbc put)

The dbc put command stores the specified key/data pair into the database. One of the following options must be specified:

-

-

-after
In the case of the Btree and Hash access methods, insert the data element +
+
-after
In the case of the Btree and Hash access methods, insert the data element as a duplicate element of the key to which the cursor refers. The new element appears immediately after the current cursor position. It is an error to specify -after if the underlying Btree or Hash database @@ -55,7 +54,7 @@ the key parameter is ignored. See berkdb open for more information.

If the current cursor record has already been deleted, and the underlying access method is Hash, dbc put will throw a Tcl error. If the underlying access method is Btree or Recno, the operation will succeed.

-

-before
In the case of the Btree and Hash access methods, insert the data element +
-before
In the case of the Btree and Hash access methods, insert the data element as a duplicate element of the key to which the cursor refers. The new element appears immediately before the current cursor position. It is an error to specify -before if the underlying Btree or Hash database @@ -73,7 +72,7 @@ key parameter is ignored. See berkdb open for more information.

If the current cursor record has already been deleted and the underlying access method is Hash, dbc put will throw a Tcl error. If the underlying access method is Btree or Recno, the operation will succeed.

-

-current
Overwrite the data of the key/data pair to which the cursor refers with +
-current
Overwrite the data of the key/data pair to which the cursor refers with the specified data item. No key argument should be specified.

If the -dupsort option was specified to berkdb open and the data item of the key/data pair to which the cursor refers does not @@ -83,7 +82,7 @@ error.

access method is Hash, dbc put will throw a Tcl error. If the underlying access method is Btree, Queue, or Recno, the operation will succeed.

-

-keyfirst
In the case of the Btree and Hash access methods, insert the specified +
-keyfirst
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database.

If the key already exists in the database, and the -dupsort option was specified to berkdb open, the inserted data item is added in its @@ -92,7 +91,7 @@ sorted location. If the key already exists in the database, and the as the first of the data items for that key.

The -keyfirst option may not be specified to the Queue or Recno access methods.

-

-keylast
In the case of the Btree and Hash access methods, insert the specified +
-keylast
In the case of the Btree and Hash access methods, insert the specified key/data pair into the database.

If the key already exists in the database, and the -dupsort option was specified to berkdb open, the inserted data item is added in its @@ -101,16 +100,17 @@ sorted location. If the key already exists in the database, and the as the last of the data items for that key.

The -keylast option may not be specified to the Queue or Recno access methods.

-

-partial {doff dlen}

The dlen bytes starting doff bytes from the beginning +

-partial {doff dlen}

The dlen bytes starting doff bytes from the beginning of the specified key's data record are replaced by the data specified by the data and size structure elements. If dlen is smaller than the length of the supplied data, the record will grow; if dlen is larger than the length of the supplied data, the record will shrink. If the specified bytes do not exist, the record will be extended using nul bytes as necessary, and the dbc put call will succeed. +

It is an error to attempt a partial put using the dbc put command in a database that supports duplicate records. Partial puts in databases supporting -duplicate records must be done using a dbc put command.

+duplicate records must be done using a dbc put command.

It is an error to attempt a partial put with differing dlen and supplied data length values in Queue or Recno databases with fixed-length records.

@@ -129,6 +129,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/env_close.html b/db/docs/api_tcl/env_close.html index 1f381b887..e14d885a4 100644 --- a/db/docs/api_tcl/env_close.html +++ b/db/docs/api_tcl/env_close.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: env close - + -

env close

API -Ref -
+Ref +


env close
@@ -39,6 +38,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/env_dbremove.html b/db/docs/api_tcl/env_dbremove.html index 0679c5747..384f83437 100644 --- a/db/docs/api_tcl/env_dbremove.html +++ b/db/docs/api_tcl/env_dbremove.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: env dbremove - + -

env dbremove

API -Ref -
+Ref +


env dbremove
@@ -30,11 +29,11 @@
 

Description(env dbremove)

Remove the Berkeley DB database file.

The options are as follows:

-

-

-auto_commit
Enclose the call within a transaction. If the call succeeds, changes +
+
-auto_commit
Enclose the call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. -

-txn txnid
If the operation is to be +
-txn txnid
If the operation is to be transaction-protected (other than by specifying the -auto_commit flag), the txnid parameter is a transaction handle returned from env txn. @@ -45,6 +44,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/env_dbrename.html b/db/docs/api_tcl/env_dbrename.html index fc9d8aa12..44de8f61a 100644 --- a/db/docs/api_tcl/env_dbrename.html +++ b/db/docs/api_tcl/env_dbrename.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: env dbrename - + -

env dbrename

API -Ref -
+Ref +


env dbrename
@@ -31,11 +30,11 @@
 

Description(env dbrename)

Rename the Berkeley DB database file to newname.

The options are as follows:

-

-

-auto_commit
Enclose the call within a transaction. If the call succeeds, changes +
+
-auto_commit
Enclose the call within a transaction. If the call succeeds, changes made by the operation will be recoverable. If the call fails, the operation will have made no changes. -

-txn txnid
If the operation is to be +
-txn txnid
If the operation is to be transaction-protected (other than by specifying the -auto_commit flag), the txnid parameter is a transaction handle returned from env txn. @@ -46,6 +45,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/env_open.html b/db/docs/api_tcl/env_open.html index d73f785c3..6377c1e04 100644 --- a/db/docs/api_tcl/env_open.html +++ b/db/docs/api_tcl/env_open.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: berkdb env - + -

berkdb env

API -Ref -
+Ref +


berkdb env
@@ -51,8 +50,8 @@ accesses the environment methods.  The command automatically initializes
 the Shared Memory Buffer Pool subsystem.  This subsystem is used
 whenever the application is using any Berkeley DB access method.

The options are as follows:

-

-

-cachesize {gbytes bytes ncache}
Set the size of the database's shared memory buffer pool (that is, the +
+
-cachesize {gbytes bytes ncache}
Set the size of the database's shared memory buffer pool (that is, the cache), to gbytes gigabytes plus bytes. The cache should be the size of the normal working data set of the application, with some small amount of additional memory for unusual situations. @@ -71,16 +70,16 @@ cache will be allocated contiguously in memory. If it is greater than separate pieces of memory.

For information on tuning the Berkeley DB cache size, see Selecting a cache size.

-

-create
Cause Berkeley DB subsystems to create any underlying files, as necessary. -

-data_dir dirname
Specify the environment's data directory as described in +
-create
Cause Berkeley DB subsystems to create any underlying files, as necessary. +
-data_dir dirname
Specify the environment's data directory as described in Berkeley DB File Naming. -

-encryptaes passwd
Specify the database should be encrypted with the given password +
-encryptaes passwd
Specify the database should be encrypted with the given password using the Rijndael/AES (also known as the Advanced Encryption Standard and Federal Information Processing Standard (FIPS) 197) algorithm. -

-encryptany passwd
Specify the already existing environment should be opened +
-encryptany passwd
Specify the already existing environment should be opened with the given password. This option is used if the environment is known to be encrypted, but the specific algorithm used is not known. -

-errfile filename

When an error occurs in the Berkeley DB library, a Berkeley DB error or an error +

-errfile filename

When an error occurs in the Berkeley DB library, a Berkeley DB error or an error return value is returned by the function. In some cases, however, the errno value may be insufficient to completely describe the cause of the error especially during initial application debugging.

@@ -95,18 +94,18 @@ character.

This error-logging enhancement does not slow performance or significantly increase application size, and may be run during normal operation as well as during application debugging.

-

-home directory
The -home argument is described in +
-home directory
The -home argument is described in Berkeley DB File Naming. -

-log_dir dirname
Specify the environment's logging file directory as described in +
-log_dir dirname
Specify the environment's logging file directory as described in Berkeley DB File Naming. -

-mode mode

On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by Berkeley DB +

-mode mode

On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by Berkeley DB are created with mode mode (as described in chmod(2)) and modified by the process' umask value at the time of creation (see umask(2)). The group ownership of created files is based on the system and directory defaults, and is not further specified by Berkeley DB. If mode is 0, files are created readable and writable by both owner and group. On Windows systems, the mode argument is ignored.

-

-private
Specify that the environment will only be accessed by a single process +
-private
Specify that the environment will only be accessed by a single process (although that process may be multithreaded). This flag has two effects on the Berkeley DB environment. First, all underlying data structures are allocated from per-process memory instead of from shared memory that is @@ -117,22 +116,22 @@ accessing the environment, as it is likely to cause database corruption and unpredictable behavior. For example, if both a server application and the Berkeley DB utility db_stat will access the environment, the -private option should not be specified.

-

-recover
Run normal recovery on this environment before opening it for normal use. +
-recover
Run normal recovery on this environment before opening it for normal use. If this flag is set, the -create option must also be set because the regions will be removed and re-created. -

-recover_fatal
Run catastrophic recovery on this environment before opening it for +
-recover_fatal
Run catastrophic recovery on this environment before opening it for normal use. If this flag is set, the -create option must also be set since the regions will be removed and re-created. -

-shm_key key
Specify a base segment ID for Berkeley DB environment shared memory regions +
-shm_key key
Specify a base segment ID for Berkeley DB environment shared memory regions created in system memory on systems supporting X/Open-style shared memory interfaces, for example, UNIX systems supporting shmget(2) and related System V IPC interfaces. See Shared Memory Regions for more information. -

-system_mem
Allocate memory from system shared memory instead of memory backed by the +
-system_mem
Allocate memory from system shared memory instead of memory backed by the filesystem. See Shared Memory Regions for more information. -

-tmp_dir dirname
Specify the environment's tmp directory, as described in +
-tmp_dir dirname
Specify the environment's tmp directory, as described in Berkeley DB File Naming. -

-txn [nosync]
Initialize the Transaction subsystem. This subsystem is used when +
-txn [nosync]
Initialize the Transaction subsystem. This subsystem is used when recovery and atomicity of multiple operations and recovery are important. The -txn option implies the initialization of the logging and locking subsystems as well. @@ -146,17 +145,17 @@ redone.

The number of transactions that are potentially at risk is governed by how often the log is checkpointed (see db_checkpoint for more information) and how many log updates can fit on a single log page.

-

-txn_max max
Set the maximum number of simultaneous transactions that are supported +
-txn_max max
Set the maximum number of simultaneous transactions that are supported by the environment, which bounds the size of backing files. When there are more than the specified number of concurrent transactions, calls to env txn will fail (until some active transactions complete). -

-use_environ
The Berkeley DB process' environment may be permitted to specify information +
-use_environ
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, environment information will be used in file naming for all users only if the -use_environ flag is set. -

-use_environ_root
The Berkeley DB process' environment may be permitted to specify information +
-use_environ_root
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. As permitting users to specify which files are used can create security @@ -171,6 +170,6 @@ systems).

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/env_remove.html b/db/docs/api_tcl/env_remove.html index 62c5c77e4..6fa71f2d5 100644 --- a/db/docs/api_tcl/env_remove.html +++ b/db/docs/api_tcl/env_remove.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: berkdb envremove - + -

berkdb envremove

API -Ref -
+Ref +


berkdb envremove
@@ -33,28 +32,28 @@
 

Description(berkdb envremove)

Remove a Berkeley DB environment.

The options are as follows:

-

-

-data_dir dirname
Specify the environment's data directory, as described in +
+
-data_dir dirname
Specify the environment's data directory, as described in Berkeley DB File Naming. -

-force
If there are processes that have called berkdb env without calling +
-force
If there are processes that have called berkdb env without calling env close (that is, there are processes currently using the environment), berkdb envremove will fail without further action, unless the -force flag is set, in which case berkdb envremove will attempt to remove the environment regardless of any processes still using it. -

-home directory
The -home argument is described in +
-home directory
The -home argument is described in Berkeley DB File Naming. -

-log_dir dirname
Specify the environment's log directory, as described in +
-log_dir dirname
Specify the environment's log directory, as described in Berkeley DB File Naming. -

-tmp_dir dirname
Specify the environment's tmp directory, as described in +
-tmp_dir dirname
Specify the environment's tmp directory, as described in Berkeley DB File Naming. -

-use_environ
The Berkeley DB process' environment may be permitted to specify information +
-use_environ
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. Because permitting users to specify which files are used can create security problems, environment information will be used in file naming for all users only if the -use_environ flag is set. -

-use_environ_root
The Berkeley DB process' environment may be permitted to specify information +
-use_environ_root
The Berkeley DB process' environment may be permitted to specify information to be used when naming files; see Berkeley DB File Naming. As permitting users to specify which files are used can create security @@ -69,6 +68,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/tcl_index.html b/db/docs/api_tcl/tcl_index.html index 95ad48fa3..7b6968040 100644 --- a/db/docs/api_tcl/tcl_index.html +++ b/db/docs/api_tcl/tcl_index.html @@ -1,16 +1,16 @@ - + Berkeley DB: Tcl API - +

Tcl API

- + @@ -49,6 +49,6 @@
Tcl CommandDescription
berkdb envCreate an environment handle


berkdb versionReturn version information
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/txn.html b/db/docs/api_tcl/txn.html index 448d8fafe..187e3a02a 100644 --- a/db/docs/api_tcl/txn.html +++ b/db/docs/api_tcl/txn.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: env txn - + -

env txn

API -Ref -
+Ref +


env txn
@@ -34,21 +33,21 @@ X is an integer starting at 0 (for example, env0.txn0 and env0.txn1).
 It is through this Tcl command that the script accesses the transaction
 methods.

The options are as follows:

-

-

-nosync
Do not synchronously flush the log when this transaction commits or +
+
-nosync
Do not synchronously flush the log when this transaction commits or prepares. This means the transaction will exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database integrity will be maintained, but it is possible that this transaction may be undone during recovery instead of being redone.

This behavior may be set for an entire Berkeley DB environment as part of the berkdb env call.

-

-nowait
If a lock is unavailable for any Berkeley DB operation performed in the context +
-nowait
If a lock is unavailable for any Berkeley DB operation performed in the context of this transaction, throw a Tcl error immediately instead of blocking on the lock. -

-parent txnid
Create the new transaction as a nested transaction, with the specified +
-parent txnid
Create the new transaction as a nested transaction, with the specified transaction indicated as its parent. Transactions may be nested to any level. -

-sync
Synchronously flush the log when this transaction commits or prepares. +
-sync
Synchronously flush the log when this transaction commits or prepares. This means the transaction will exhibit all of the ACID (atomicity, consistency, isolation, and durability) properties.

This behavior is the default for Berkeley DB environments unless the @@ -60,6 +59,6 @@ consistency, isolation, and durability) properties.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/txn_abort.html b/db/docs/api_tcl/txn_abort.html index 21fb840f2..5e69b2fff 100644 --- a/db/docs/api_tcl/txn_abort.html +++ b/db/docs/api_tcl/txn_abort.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: txn abort - + -

txn abort

API -Ref -
+Ref +


txn abort
@@ -42,6 +41,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/txn_checkpoint.html b/db/docs/api_tcl/txn_checkpoint.html index 5219b9097..5ef717e3e 100644 --- a/db/docs/api_tcl/txn_checkpoint.html +++ b/db/docs/api_tcl/txn_checkpoint.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: env txn_checkpoint - + -

env txn_checkpoint

API -Ref -
+Ref +


env txn_checkpoint
@@ -29,11 +28,11 @@
 

Description(env txn_checkpoint)

The env txn_checkpoint command writes a checkpoint.

The options are as follows:

-

-

-force
The checkpoint will occur regardless of activity level. -

-kbyte kb
The checkpoint will occur only if at least the specified number +
+
-force
The checkpoint will occur regardless of activity level. +
-kbyte kb
The checkpoint will occur only if at least the specified number of kilobytes of log data has been written since the last checkpoint. -

-min minutes
The checkpoint will occur only if at least the specified number +
-min minutes
The checkpoint will occur only if at least the specified number of minutes has passed since the last checkpoint.

In the case of error, a Tcl error is thrown.

@@ -41,6 +40,6 @@ of minutes has passed since the last checkpoint.

APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/txn_commit.html b/db/docs/api_tcl/txn_commit.html index 91c7afdd4..9ca610084 100644 --- a/db/docs/api_tcl/txn_commit.html +++ b/db/docs/api_tcl/txn_commit.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: txn commit - + -

txn commit

API -Ref -
+Ref +


txn commit
@@ -41,15 +40,15 @@ will be aborted.

If the -nosync option is not specified, a commit log record is written and flushed to disk, as are all previously written log records.

The options are as follows:

-

-

-nosync
Do not synchronously flush the log. This means the transaction will +
+
-nosync
Do not synchronously flush the log. This means the transaction will exhibit the ACI (atomicity, consistency, and isolation) properties, but not D (durability); that is, database integrity will be maintained, but it is possible that this transaction may be undone during recovery instead of being redone.

This behavior may be set for an entire Berkeley DB environment as part of the berkdb env call.

-

-sync
Synchronously flush the log. This means the transaction will exhibit +
-sync
Synchronously flush the log. This means the transaction will exhibit all of the ACID (atomicity, consistency, isolation and durability) properties.

This behavior is the default for Berkeley DB environments unless the @@ -66,6 +65,6 @@ is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/api_tcl/version.html b/db/docs/api_tcl/version.html index a3c989582..ce61504ff 100644 --- a/db/docs/api_tcl/version.html +++ b/db/docs/api_tcl/version.html @@ -1,24 +1,23 @@ - - + + Berkeley DB: berkdb version - + -

berkdb version

API -Ref -
+Ref +


berkdb version
@@ -28,14 +27,14 @@
 

Return a list of the form {major minor patch} for the major, minor and patch levels of the underlying Berkeley DB release.

The options are as follows:

-

-

-string
Return a string with formatted Berkeley DB version information. +
+
-string
Return a string with formatted Berkeley DB version information.

In the case of error, a Tcl error is thrown.


APIRef
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/collections/tutorial/BasicProgram.html b/db/docs/collections/tutorial/BasicProgram.html new file mode 100644 index 000000000..470128711 --- /dev/null +++ b/db/docs/collections/tutorial/BasicProgram.html @@ -0,0 +1,457 @@ + + + + + + Chapter 2.  + The Basic Program + + + + + + + + + +

+
+
+
+
+

Chapter 2.  + The Basic Program +

+
+
+
+
+ +

+ The Basic example is a minimal implementation of the shipment + program. It writes and reads the part, supplier and shipment + databases. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Defining Serialized Key and Value Classes +

+
+
+
+
+

+ The key and value classes for each type of shipment record — + Parts, Suppliers and Shipments — are defined as ordinary Java + classes. In this example the serialized form of the key and value + objects is stored directly in the database. Therefore these classes + must implement the standard Java java.io.Serializable interface. A + compact form of Java serialization is used that does not duplicate + the class description in each record. Instead the class + descriptions are stored in the class catalog store, which is + described in the next section. But in all other respects, standard + Java serialization is used. +

+

+ An important point is that instances of these classes are passed + and returned by value, not by reference, when they are stored and + retrieved from the database. This means that changing a key or + value object does not automatically change the database. The object + must be explicitly stored in the database after changing it. To + emphasize this point the key and value classes defined here have no + field setter methods. Setter methods can be defined, but it is + important to remember that calling a setter method will not cause + the change to be stored in the database. How to store and retrieve + objects in the database will be described later. +

+

+ Each key and value class contains a toString method that is used + to output the contents of the object in the example program. This + is meant for illustration only and is not required for database + objects in general. +

+

+ Notice that the key and value classes defined below do not + contain any references to com.sleepycat packages. An + important characteristic of these classes is that they are + independent of the database. Therefore, they may be easily used in + other contexts and may be defined in a way that is compatible with + other tools and libraries. +

+

+ The PartKey class contains only the Part's Number field. +

+

+ Note that PartKey (as well as SupplierKey below) + contain only a single String field. Instead of defining a specific + class for each type of key, the String class by itself could have + been used. Specific key classes were used to illustrate strong + typing and for consistency in the example. The use of a plain + String as an index key is illustrated in the next example program. + It is up to the developer to use either primitive Java classes such + as String and Integer, or strongly typed classes. When + there is the possibility that fields will be added later to a key + or value, a specific class should be used. + +

+ +
import java.io.Serializable;
+
+public class PartKey implements Serializable
+{
+    private String number;
+
+    public PartKey(String number) {
+        this.number = number;
+    }
+
+    public final String getNumber() {
+        return number;
+    }
+
+    public String toString() {
+        return "[PartKey: number=" + number + ']';
+    }
+} 
+

+ The PartData class contains the Part's Name, Color, + Weight and City fields. +

+ +
import java.io.Serializable;
+
+public class PartData implements Serializable
+{
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public PartData(String name, String color, Weight weight, String city)
+    {
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final String getColor()
+    {
+        return color;
+    }
+
+    public final Weight getWeight()
+    {
+        return weight;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "[PartData: name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + ']';
+    }
+} 
+

+ The Weight class is also defined here, and is used as the + type of the Part's Weight field. Just as in standard Java + serialization, nothing special is needed to store nested objects as + long as they are all Serializable. +

+ +
import java.io.Serializable;
+
+public class Weight implements Serializable
+{
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units)
+    {
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount()
+    {
+        return amount;
+    }
+
+    public final String getUnits()
+    {
+        return units;
+    }
+
+    public String toString()
+    {
+        return "[" + amount + ' ' + units + ']';
+    }
+} 
+

+ The SupplierKey class contains the Supplier's Number + field. +

+ +
import java.io.Serializable;
+
+public class SupplierKey implements Serializable
+{
+    private String number;
+
+    public SupplierKey(String number)
+    {
+        this.number = number;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public String toString()
+    {
+        return "[SupplierKey: number=" + number + ']';
+    }
+} 
+

+ The SupplierData class contains the Supplier's Name, + Status and City fields. +

+ +
import java.io.Serializable;
+
+public class SupplierData implements Serializable
+{
+    private String name;
+    private int status;
+    private String city;
+
+    public SupplierData(String name, int status, String city)
+    {
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final int getStatus()
+    {
+        return status;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "[SupplierData: name=" + name +
+               " status=" + status +
+               " city=" + city + ']';
+    }
+}
+	
+

+ The ShipmentKey class contains the keys of both the Part + and Supplier. +

+ +
import java.io.Serializable;
+
+public class ShipmentKey implements Serializable
+{
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber()
+    {
+        return partNumber;
+    }
+
+    public final String getSupplierNumber()
+    {
+        return supplierNumber;
+    }
+
+    public String toString()
+    {
+        return "[ShipmentKey: supplier=" + supplierNumber +
+                " part=" + partNumber + ']';
+    }
+} 
+

+ The ShipmentData class contains only the Shipment's + Quantity field. Like PartKey and SupplierKey, + ShipmentData contains only a single primitive field. + Therefore the Integer class could have been used instead of + defining a specific value class. +

+ +
import java.io.Serializable;
+
+public class ShipmentData implements Serializable
+{
+    private int quantity;
+
+    public ShipmentData(int quantity)
+    {
+        this.quantity = quantity;
+    }
+
+    public final int getQuantity()
+    {
+        return quantity;
+    }
+
+    public String toString()
+    {
+        return "[ShipmentData: quantity=" + quantity + ']';
+    }
+} 
+
+
+ + + diff --git a/db/docs/collections/tutorial/BerkeleyDB-Java-Collections.pdf b/db/docs/collections/tutorial/BerkeleyDB-Java-Collections.pdf new file mode 100644 index 000000000..9435e5470 Binary files /dev/null and b/db/docs/collections/tutorial/BerkeleyDB-Java-Collections.pdf differ diff --git a/db/docs/collections/tutorial/Entity.html b/db/docs/collections/tutorial/Entity.html new file mode 100644 index 000000000..7937d8c22 --- /dev/null +++ b/db/docs/collections/tutorial/Entity.html @@ -0,0 +1,359 @@ + + + + + + Chapter 4.  + Using Entity Classes + + + + + + + + + + +
+
+
+
+

Chapter 4.  + Using Entity Classes +

+
+
+
+
+ +

+ In the prior examples, the keys and values of each store were + represented using separate classes. For example, a PartKey + and a PartData class were used. Many times it is desirable + to have a single class representing both the key and the value, for + example, a Part class. +

+

+ Such a combined key and value class is called an entity + class and is used along with an entity binding. Entity + bindings combine a key and a value into an entity when reading a + record from a collection, and split an entity into a key and a + value when writing a record to a collection. Entity bindings are + used in place of value bindings, and entity objects are used with + collections in place of value objects. +

+

+ Some reasons for using entities are: +

+
+
    +
  • +

    + When the key is a property of an entity object representing the + record as a whole, the object's identity and concept are often + clearer than with key and value objects that are disjoint. +

    +
  • +
  • +

    + A single entity object per record is often more convenient to + use than two objects. +

    +
  • +
+
+

+ Of course, instead of using an entity binding, you could simply + create the entity yourself after reading the key and value from a + collection, and split the entity into a key and value yourself + before writing it to a collection. But this would detract from the + convenience of the using the Java collections API. It is convenient + to obtain a Part object directly from + Map.get + + and to add a Part object using + Set.add. + Collections having entity bindings can be used naturally without + combining and splitting objects each time a collection method is + called; however, an entity binding class must be defined by the + application. +

+

+ In addition to showing how to use entity bindings, this example + illustrates a key feature of all bindings: Bindings are independent + of database storage parameters and formats. Compare this example to + the prior Index example and you'll see that the Sample and + SampleViews classes have been changed to use entity + bindings, but the SampleDatabase class was not changed at + all. In fact, the Entity program and the Index program can be used + interchangeably to access the same physical database files. This + demonstrates that bindings are only a "view" onto the physical + stored data. +

+

+ Warning: When using multiple bindings for the same + database, it is the application's responsibility to ensure that the + same format is used for all bindings. For example, a serial binding + and a tuple binding cannot be used to access the same records. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Defining Entity Classes +

+
+
+
+
+

+ As described in the prior section, entity classes are + combined key/value classes that are managed by entity bindings. In + this example the Part, Supplier and Shipment + classes are entity classes. These classes contain fields that are a + union of the fields of the key and value classes that were defined + earlier for each store. +

+

+ In general, entity classes may be defined in any way desired by + the application. The entity binding, which is also defined by the + application, is responsible for mapping between key/value objects + and entity objects. +

+

+ The Part, Supplier and Shipment + entity classes are + defined below. +

+

+ An important difference between the entity classes defined here + and the key and value classes defined earlier is that the entity + classes are not serializable (do not implement the + Serializable + + interface). This is because the entity classes are not directly + stored. The entity binding decomposes an entity object into key and + value objects, and only the key and value objects are serialized + for storage. +

+

+ One advantage of using entities can already be seen in the + toString() method of the classes below. These return debugging + output for the combined key and value, and will be used later to + create a listing of the database that is more readable than in the + prior examples. +

+ +
public class Part
+{
+    private String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final String getColor()
+    {
+        return color;
+    }
+
+    public final Weight getWeight()
+    {
+        return weight;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + '.';
+    }
+} 
+ +
public class Supplier
+{
+    private String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final int getStatus()
+    {
+        return status;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + '.';
+    }
+}  
+ +
public class Shipment
+{
+    private String partNumber;
+    private String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    public final String getPartNumber()
+    {
+        return partNumber;
+    }
+
+    public final String getSupplierNumber()
+    {
+        return supplierNumber;
+    }
+
+    public final int getQuantity()
+    {
+        return quantity;
+    }
+
+    public String toString()
+    {
+        return "Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + '.';
+    }
+}  
+
+
+ + + diff --git a/db/docs/collections/tutorial/SerializableEntity.html b/db/docs/collections/tutorial/SerializableEntity.html new file mode 100644 index 000000000..9f63bab4e --- /dev/null +++ b/db/docs/collections/tutorial/SerializableEntity.html @@ -0,0 +1,348 @@ + + + + + + Chapter 6.  + Using Serializable Entities + + + + + + + + + + +
+
+
+
+

Chapter 6.  + Using Serializable Entities +

+
+
+
+
+ +

+ In the prior examples that used entities (the Entity and Tuple examples) you + may have noticed the redundancy between the serializable value + classes and the entity classes. An entity class by definition + contains all properties of the value class as well as all + properties of the key class. +

+

+ When using serializable values it is possible to remove this + redundancy by changing the entity class in two ways: +

+
+
    +
  • +

    + Make the entity class serializable, so it can be used in place + of the value class. +

    +
  • +
  • +

    + Make the key fields transient, so they are not redundantly + stored in the record. +

    +
  • +
+
+

+ The modified entity class can then serve double-duty: It can be + serialized and stored as the record value, and it can be used as + the entity class as usual along with the Java collections API. The + PartData, SupplierData and ShipmentData + classes can then be removed. +

+

+ Transient fields are defined in Java as fields that are not + stored in the serialized form of an object. Therefore, when an + object is deserialized the transient fields must be explicitly + initialized. Since the entity binding is responsible for creating + entity objects, it is the natural place to initialize the transient + key fields. +

+

+ Note that it is not strictly necessary to make the key fields of + a serializable entity class transient. If this is not done, the key + will simply be stored redundantly in the record's value. This extra + storage may or may not be acceptable to an application. But since + we are using tuple keys and an entity binding class must be + implemented anyway to extract the key from the entity, it is + sensible to use transient key fields to reduce the record size. Of + course there may be a reason that transient fields are not desired; + for example, if an application wants to serialize the entity + objects for other purposes, then using transient fields should be + avoided. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Using Transient Fields in an Entity Class +

+
+
+
+
+

+ The entity classes in this example are redefined such that they + can be used both as serializable value classes and as entity + classes. Compared to the prior example there are three changes to + the Part, Supplier and Shipment entity + classes: +

+
+
    +
  • +

    + Each class now implements the Serializable + interface. +

    +
  • +
  • +

    + The key fields in each class are declared as transient. +

    +
  • +
  • +

    + A package-private setKey() method is added to each class + for initializing the transient key fields. This method will be + called from the entity bindings. +

    +
  • +
+
+ +
import java.io.Serializable;
+...
+public class Part implements Serializable
+{
+    private transient String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    final void setKey(String number)
+    {
+        this.number = number;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final String getColor()
+    {
+        return color;
+    }
+
+    public final Weight getWeight()
+    {
+        return weight;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + '.';
+    }
+}
+...
+public class Supplier implements Serializable
+{
+    private transient String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    void setKey(String number)
+    {
+        this.number = number;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final int getStatus()
+    {
+        return status;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + '.';
+    }
+}
+...
+public class Shipment implements Serializable
+{
+    private transient String partNumber;
+    private transient String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    void setKey(String partNumber, String supplierNumber)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    } 
+
+    public final String getPartNumber()
+    {
+        return partNumber;
+    }
+
+    public final String getSupplierNumber()
+    {
+        return supplierNumber;
+    }
+
+    public final int getQuantity()
+    {
+        return quantity;
+    }
+
+    public String toString()
+    {
+        return "Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + '.';
+    }
+}
+	
+
+
+ + + diff --git a/db/docs/collections/tutorial/SerializedObjectStorage.html b/db/docs/collections/tutorial/SerializedObjectStorage.html new file mode 100644 index 000000000..bb088dae7 --- /dev/null +++ b/db/docs/collections/tutorial/SerializedObjectStorage.html @@ -0,0 +1,84 @@ + + + + + + + Serialized Object Storage + + + + + + + + + +
+
+
+
+

+ Serialized Object Storage +

+
+
+
+
+

+ Serialization of an object graph includes class information as + well as instance information. If more than one instance of the same + class is serialized as separate serialization operations then the + class information exists more than once. To eliminate this + inefficiency the + StoredClassCatalog + + class will store the class format for all database records stored + using a + SerialBinding. + Refer to the + ship sample code for examples (the class + SampleDatabase in + examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleDatabase.java + is a good place to start). +

+
+ + + diff --git a/db/docs/collections/tutorial/Summary.html b/db/docs/collections/tutorial/Summary.html new file mode 100644 index 000000000..efa436a60 --- /dev/null +++ b/db/docs/collections/tutorial/Summary.html @@ -0,0 +1,193 @@ + + + + + + Chapter 7.  + Summary + + + + + + + + + + +
+
+
+
+

Chapter 7.  + Summary +

+
+
+
+
+

+ In summary, the Sleepycat Java Collections API tutorial has + demonstrated how to create different types of bindings, as well as + how to use the basic facilities of the Sleepycat Java Collections API: + the environment, databases, secondary indices, collections, and + transactions. The final approach illustrated by the last example + program, Serializable Entity, uses tuple keys and serial entity + values. Hopefully it is clear that any type of object-to-data + binding may be implemented by an application and used along with + standard Java collections. +

+

+ The following table summarizes the differences between the + examples in the tutorial. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ExampleKeyValueEntityComments
+ + The Basic Program + + SerialSerialNoThe shipment program
+ + Using Secondary Indices + + SerialSerialNoSecondary indices
+ + Using Entity Classes + + SerialSerialYesCombining the key and value in a single object
+ + Using Tuples + + TupleSerialYesCompact ordered keys
+ + Using Serializable Entities + + TupleSerialYesOne serializable class for entities and values
+
+

+ Having completed this tutorial, you may want to explore how other types of + bindings can be implemented. The bindings shown in this tutorial + are all external bindings, meaning that the data classes + themselves contain none of the binding implementation. It is also + possible to implement internal bindings, where the data + classes implement the binding. +

+

+ Internal bindings are called marshalled bindings in the + Sleepycat Java Collections API, and in this model each data class + implements a marshalling interface. A single external binding class + that understands the marshalling interface is used to call the + internal bindings of each data object, and therefore the overall + model and API is unchanged. To learn about marshalled bindings, see + the + + + + marshal and factory examples that + came with your DB distribution (you can find them in + + <INSTALL_DIR>examples_java/src/com/sleepycat/examples/collections/ship + where <INSTALL_DIR> is the location where you + unpacked your DB distribution). + + + These examples continue building on + the example programs used in the tutorial. The Marshal program is + the next program following the Serializable Entity program, and the + Factory program follows the Marshal program. The source code + comments in these examples explain their differences. +

+
+ + + diff --git a/db/docs/collections/tutorial/Tuple.html b/db/docs/collections/tutorial/Tuple.html new file mode 100644 index 000000000..956358b8d --- /dev/null +++ b/db/docs/collections/tutorial/Tuple.html @@ -0,0 +1,212 @@ + + + + + + Chapter 5.  + Using Tuples + + + + + + + + + + +
+
+
+
+

Chapter 5.  + Using Tuples +

+
+
+
+
+ +

+ Sleepycat Java Collections API tuples are sequences of + primitive Java data types, for example, integers and strings. The + tuple format is a binary format for tuples that can be used + to store keys and/or values. +

+

+ Tuples are useful as keys because they have a meaningful sort + order, while serialized objects do not. This is because the binary + data for a tuple is written in such a way that its raw byte + ordering provides a useful sort order. For example, strings in + tuples are written with a null terminator rather than with a + leading length. +

+

+ Tuples are useful as keys or values when reducing the + record size to a minimum is important. A tuple is significantly + smaller than an equivalent serialized object. However, unlike + serialized objects, tuples cannot contain complex data types and + are not easily extended except by adding fields at the end of the + tuple. +

+

+ Whenever a tuple format is used, except when the key or value + class is a Java primitive wrapper class, a tuple binding class must + be implemented to map between the Java object and the tuple fields. + Because of this extra requirement, and because tuples are not + easily extended, a useful technique shown in this example is to use + tuples for keys and serialized objects for values. This provides + compact ordered keys but still allows arbitrary Java objects as + values, and avoids implementing a tuple binding for each value + class. +

+

+ Compare this example to the prior Entity example and you'll see + that the Sample class has not changed. When changing a + database format, while new bindings are needed to map key and value + objects to the new format, the application using the objects often + does not need to be modified. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Using the Tuple Format +

+
+
+
+
+

+ Tuples are sequences of primitive Java values that can be + written to, and read from, the raw data bytes of a stored record. + The primitive values are written or read one at a time in sequence, + using the Sleepycat Java Collections API + TupleInput + + and + TupleOutput + + classes. These classes are very similar to the standard Java + DataInput + + and + DataOutput + + interfaces. The primary difference is the binary format of the + data, which is designed for sorting in the case of tuples. +

+

+ For example, to read and write a tuple containing two string + values, the following code snippets could be used. +

+ +
import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+TupleInput input;
+TupleOutput output;
+...
+String partNumber = input.readString();
+String supplierNumber = input.readString();
+...
+output.writeString(partNumber);
+output.writeString(supplierNumber);  
+

+ Since a tuple is defined as an ordered sequence, reading and + writing order must match. If the wrong data type is read (an + integer instead of string, for example), an exception may be thrown + or at minimum invalid data will be read. +

+

+ When the tuple format is used, bindings and key creators must + read and write tuples using the tuple API as shown above. This will + be illustrated in the next two sections. +

+
+
+ + + diff --git a/db/docs/collections/tutorial/UsingCollectionsAPI.html b/db/docs/collections/tutorial/UsingCollectionsAPI.html new file mode 100644 index 000000000..a37c3de5c --- /dev/null +++ b/db/docs/collections/tutorial/UsingCollectionsAPI.html @@ -0,0 +1,637 @@ + + + + + + + Using the Sleepycat Java Collections API + + + + + + + + + + +
+
+
+
+

+ Using the Sleepycat Java Collections API +

+
+
+
+
+

+ An + + Environment + + manages the resources for one or more data stores. A + + Database + + object + represents a single database and is created via a method on the + environment object. + + SecondaryDatabase + + objects represent an index associated with a primary database. + + + An access method must be chosen for each database and secondary + database. + + + Primary and secondary databases are then used to create stored + collection objects, as described in + + Using Stored Collections + . +

+
+
+
+
+

+ Using Transactions +

+
+
+
+
+

+ Once you have an environment, one or more databases, and one or + more stored collections, you are ready to access (read and write) + stored data. For a transactional environment, a transaction must be + started before accessing data, and must be committed or aborted + after access is complete. The Sleepycat Java Collections API provides several + ways of managing transactions. +

+

+ The recommended technique is to use the + TransactionRunner + + class along with your own implementation of the + TransactionWorker + + interface. + TransactionRunner + + will call your + TransactionWorker + + implementation class to perform the data access or work of the + transaction. This technique has the following benefits: +

+
+
    +
  • +

    + Transaction exceptions will be handled transparently and + retries will be performed when deadlocks are detected. +

    +
  • +
  • +

    + The transaction will automatically be committed if your + TransactionWorker.doWork() + + method returns normally, or will be + aborted if doWork() throws an exception. +

    +
  • +
  • +

    + TransactionRunner can be used for non-transactional + environments as well, allowing you to write your application + independently of the environment. +

    +
  • +
+
+

+ If you don't want to use + TransactionRunner, + the alternative is to use the + CurrentTransaction + + class. +

+
+
    +
  1. +

    + Obtain a CurrentTransaction instance by calling the + CurrentTransaction.getInstance + + method. The instance returned + can be used by all threads in a program. +

    +
  2. +
  3. +

    + Use + CurrentTransaction.beginTransaction(), + CurrentTransaction.commitTransaction() + + and + CurrentTransaction.abortTransaction() + + to directly begin, commit and abort transactions. +

    +
  4. +
+
+

+ If you choose to use CurrentTransaction directly you must handle + the + + DeadlockException + + exception and perform retries yourself. Also note that + CurrentTransaction may only be used in a transactional + environment. +

+

+ The Sleepycat Java Collections API supports nested transactions. If + TransactionRunner.run(com.sleepycat.collections.TransactionWorker) + + or + CurrentTransaction.beginTransaction() + , + is called while another transaction is active, a child transaction + is created. When + TransactionRunner.run(com.sleepycat.collections.TransactionWorker) + + returns, or when + CurrentTransaction.commitTransaction() + + or + CurrentTransaction.abortTransaction() + + is called, the parent transaction becomes active again. Note that + because only one transaction is active per-thread, it is impossible + to accidentally use a parent transaction while a child transaction + is active. +

+

+ The Sleepycat Java Collections API supports transaction auto-commit. + If no transaction is active and a write operation is requested for + a transactional database, auto-commit is used automatically. +

+

+ The Sleepycat Java Collections API also supports transaction + dirty-read via the + StoredCollections + + class. When dirty-read is enabled for a collection, data will be + read that has been modified by another transaction but not + committed. Using dirty-read can improve concurrency since reading + will not wait for other transactions to complete. For a + non-transactional container, dirty-read has no effect. See + StoredCollections + + for how to create a dirty-read collection. +

+
+
+
+
+
+

+ Transaction Rollback +

+
+
+
+
+

+ When a transaction is aborted (or rolled back) the application + is responsible for discarding references to any data objects that + were modified during the transaction. Since the Sleepycat Java Collections + API treats data by value, not by reference, neither the data + objects nor the Sleepycat Java Collections API objects contain status + information indicating whether the data objects are 1- in sync with + the database, 2- dirty (contain changes that have not been written + to the database), 3- stale (were read previously but have become + out of sync with changes made to the database), or 4- contain + changes that cannot be committed because of an aborted + transaction. +

+

+ For example, a given data object will reflect the current state + of the database after reading it within a transaction. If the + object is then modified it will be out of sync with the database. + When the modified object is written to the database it will then be + in sync again. But if the transaction is aborted the object will + then be out of sync with the database. References to objects for aborted + transactions + should no longer be used. When these objects are needed later they + should be read fresh from the database. +

+

+ When an existing stored object is to be updated, special care + should be taken to read the data, then modify it, and then write it + to the database, all within a single transaction. If a stale data + object (an object that was read previously but has since been + changed in the database) is modified and then written to the + database, database changes may be overwritten unintentionally. +

+

+ When an application enforces rules about concurrent access to + specific data objects or all data objects, the rules described here + can be relaxed. For example, if the application knows that a + certain object is only modified in one place, it may be able to + reliably keep a current copy of that object. In that case, it is + not necessary to reread the object before updating it. That said, + if arbitrary concurrent access is to be supported, the safest + approach is to always read data before modifying it within a single + transaction. +

+

+ Similar concerns apply to using data that may have become stale. + If the application depends on current data, it should be read fresh + from the database just before it is used. +

+
+
+
+
+
+

Selecting Access Methods

+
+
+
+
+

+ For each data store and secondary index, you must choose from one of the + access methods in the table below. + + The access method determines not only whether sorted keys or duplicate + keys are supported, but also what types of collection views may be used + and what restrictions are imposed on the collection views. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Access MethodOrderedDuplicatesRecord NumbersDatabase TypeDatabaseConfig Method
+ BTREE-UNIQUE + + Yes + + No + + No + + BTREE + + None +
+ BTREE-DUP + + Yes + + Yes, Unsorted + + No + + BTREE + + setUnsortedDuplicates +
+ BTREE-DUPSORT + + Yes + + Yes, Sorted + + No + + BTREE + + setSortedDuplicates +
+ BTREE-RECNUM + + Yes + + No + + Yes, Renumbered + + BTREE + + setBtreeRecordNumbers +
+ HASH-UNIQUE + + No + + No + + No + + HASH + + None +
+ HASH-DUP + + No + + Yes, Unsorted + + No + + HASH + + setUnsortedDuplicates +
+ HASH-DUPSORT + + No + + Yes, Sorted + + No + + HASH + + setSortedDuplicates +
+ QUEUE + + Yes + + No + + Yes, Fixed + + QUEUE + + None +
+ RECNO + + Yes + + No + + Yes, Fixed + + RECNO + + None +
+ RECNO-RENUMBER + + Yes + + No + + Yes, Renumbered + + RECNO + + setRenumbering +
+
+

+ Please see + Available Access Methods in + the Berkeley DB Programmer's Reference Guide + for more information on access method configuration. +

+
+
+
+
+
+

+ Access Method Restrictions +

+
+
+
+
+

+ The restrictions imposed by the access method on the database + model are: +

+
+
    +
  • +

    + If keys are ordered then data may be enumerated in key order and + key ranges may be used to form subsets of a data store. The + SortedMap and SortedSet + interfaces are supported for collections with ordered keys. +

    +
  • +
  • +

    + If duplicates are allowed then more than one value may be + associated with the same key. This means that the data store cannot + be strictly considered a map — it is really a multi-map. See + + Using Stored Collections + + for implications on the use of the collection interfaces. +

    +
  • +
  • +

    + If duplicate keys are allowed for a data store then the data + store may not have secondary indices. +

    +
  • +
  • +

    + For secondary indices with duplicates, the duplicates must be + sorted. This restriction is imposed by the Sleepycat Java + Collections API. +

    +
  • +
  • +

    + With sorted duplicates, all values for the same key must be + distinct. +

    +
  • +
  • +

    + If duplicates are unsorted, then values for the same key must be + distinct. +

    +
  • +
  • +

    + If record number keys are used, the the number of records is + limited to the maximum value of an unsigned 32-bit integer. +

    +
  • +
  • +

    + If reocrd number keys are renumbered, then standard List + add/remove behavior is supported but concurrency/performance is + reduced. +

    +
  • +
+
+

+ See + + Using Stored Collections + + for more information on how access methods impact the use of stored + collections. +

+
+
+ + + diff --git a/db/docs/collections/tutorial/UsingSecondaries.html b/db/docs/collections/tutorial/UsingSecondaries.html new file mode 100644 index 000000000..c6c147c72 --- /dev/null +++ b/db/docs/collections/tutorial/UsingSecondaries.html @@ -0,0 +1,457 @@ + + + + + + Chapter 3.  + Using Secondary Indices + + + + + + + + + + +
+
+
+
+

Chapter 3.  + Using Secondary Indices +

+
+
+
+
+ +

+ In the Basic example, each store has a single primary + key. The Index example extends the Basic example to add the use of + secondary keys. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Opening Secondary Key Indices +

+
+
+
+
+

+ Secondary indices or secondary databases are used + to access a primary database by a key other than the primary key. + Recall that the Supplier Number field is the primary key of the + Supplier database. In this section, the Supplier City field will be + used as a secondary lookup key. Given a city value, we would like + to be able to find the Suppliers in that city. Note that more than + one Supplier may be in the same city. +

+

+ Both primary and secondary databases contain key-value records. + The key of an index record is the secondary key, and its value is + the key of the associated record in the primary database. When lookups by + secondary key are performed, the associated record in the primary + database is transparently retrieved by its primary key and returned + to the caller. +

+

+ Secondary indices are maintained automatically when index key + fields (the City field in this case) are added, modified or removed + in the records of the primary database. However, the application + must implement a + + SecondaryKeyCreator + + that extracts the index key from the database record. +

+

+ It is useful to contrast opening an secondary index with opening + a primary database (as described earlier in + + Opening and Closing Databases + . +

+
+
    +
  • +

    + A primary database may be associated with one or more secondary + indices. A secondary index is always associated with exactly one + primary database. +

    +
  • +
  • +

    + For a secondary index, a + + SecondaryKeyCreator + + must be implemented by the application to extract the index key + from the record of its associated primary database. +

    +
  • +
  • +

    + A primary database is represented by a + + Database + + object and a secondary index is represented by a + + SecondaryDatabase + + object. The + + SecondaryDatabase + + class extends the + + Database + + class. +

    +
  • +
  • +

    + When a + + SecondaryDatabase + + is created it is associated with a primary + + Database + + object and a + + + SecondaryKeyCreator. + +

    +
  • +
+
+

+ The SampleDatabase class is extended to open the + Supplier-by-City secondary key index. +

+ +
import com.sleepycat.bind.serial.SerialSerialKeyCreator;
+import com.sleepycat.db.SecondaryConfig;
+import com.sleepycat.db.SecondaryDatabase;
+...
+public class SampleDatabase
+{
+    ...
+    private static final String SUPPLIER_CITY_INDEX = "supplier_city_index";
+    ...
+    private SecondaryDatabase supplierByCityDb;
+    ...
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setType(DatabaseType.BTREE);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(
+            new SupplierByCityKeyCreator(javaCatalog,
+                                         SupplierKey.class,
+                                         SupplierData.class,
+                                         String.class));
+
+        supplierByCityDb = env.openSecondaryDatabase(null, 
+                                                     SUPPLIER_CITY_INDEX,
+                                                     null,
+                                                     supplierDb,
+                                                     secConfig);
+    ...
+    }
+} 
+

+ A + + SecondaryConfig + + object is used to configure the secondary database. The + + SecondaryConfig + + class extends the + + DatabaseConfig + + class, and most steps for configuring a secondary database are the + same as for configuring a primary database. The main difference in + the example above is that the + SecondaryConfig.setSortedDuplicates() method is called to + allow duplicate index keys. This is how more than one Supplier may + be in the same City. If this property is not specified, the default is + that the index keys of all records must be unique. +

+

+ For a primary database, duplicate keys are not normally used + since a primary database with duplicate keys may not have any + associated secondary indices. If primary database keys are not + unique, there is no way for a secondary key to reference a specific + record in the primary database. +

+

+ Note that setSortedDuplicates() and not + setUnsortedDuplicates() was called. Sorted + duplicates are always used for indices rather than unsorted duplicates, + since sorting enables optimized equality joins. +

+

+ Opening a secondary key index requires creating a + + + SecondaryKeyCreator. + + The SupplierByCityKeyCreator class implements the + + SecondaryKeyCreator + + interface and will be defined below. +

+

+ The + + SecondaryDatabase + + object is opened last. If you compare the + openSecondaryDatabase() and openDatabase() methods you'll + notice only two differences: +

+
+
    +
  • +

    + openSecondaryDatabase() has an extra parameter for + specifying the associated primary database. The primary database is + supplierDb in this case. +

    +
  • +
  • +

    + The last parameter of openSecondaryDatabase() is a + SecondaryConfig instead of a DatabaseConfig. +

    +
  • +
+
+

+ How to use the secondary index to access records will be shown + in a later section. +

+

+ The application-defined SupplierByCityKeyCreator class is + shown below. It was used above to configure the secondary + database. +

+ +
public class SampleDatabase
+{
+...
+    private static class SupplierByCityKeyCreator
+        extends SerialSerialKeyCreator
+    {
+        private SupplierByCityKeyCreator(StoredClassCatalog catalog,
+                                         Class primaryKeyClass,
+                                         Class valueClass,
+                                         Class indexKeyClass)
+        {
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput)
+        {
+            SupplierData supplierData = (SupplierData) valueInput;
+            return supplierData.getCity();
+        }
+    }
+...
+} 
+

+ In general, a key creator class must implement the + + SecondaryKeyCreator + + interface. This interface has methods that operate on the record + data as raw bytes. In practice, it is easiest to use an abstract + base class that performs the conversion of record data to and from + the format defined for the database's key and value. The base class + implements the + + SecondaryKeyCreator + + interface and has abstract methods that must be implemented in turn + by the application. +

+

+ In this example the + SerialSerialKeyCreator + + base class is used because the database record uses the serial + format for both its key and its value. The abstract methods of this + class have key and value parameters of type + Object + + which are automatically converted to and from the raw record data + by the base class. +

+

+ To perform the conversions properly, the key creator must be + aware of all three formats involved: the key format of the primary + database record, the value format of the primary database record, + and the key format of the index record. The + SerialSerialKeyCreator + + constructor is given the base classes for these three formats as + parameters. +

+

+ The SerialSerialKeyCreator.createSecondaryKey method is + given the key and value of the primary database record as + parameters, and it returns the key of the index record. In this + example, the index key is a field in the primary database record + value. Since the record value is known to be a SupplierData + object, it is cast to that class and the city field is + returned. +

+

+ Note that the primaryKeyInput parameter is not used in + the example. This parameter is needed only when an index key is + derived from the key of the primary database record. Normally an + index key is derived only from the primary database record value, + but it may be derived from the key, value or both. +

+

+ The following getter methods return the secondary database + object for use by other classes in the example program. The + secondary database object is used to create Java collections for + accessing records via their secondary keys. +

+ +
public class SampleDatabase
+{
+    ...
+    public final SecondaryDatabase getSupplierByCityDatabase()
+    {
+        return supplierByCityDb;
+    }
+    ...
+} 
+

+ The following statement closes the secondary database. +

+ +
public class SampleDatabase
+{
+    ...
+    public void close()
+        throws DatabaseException {
+
+        supplierByCityDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        javaCatalog.close();
+        env.close();
+    }
+    ...
+} 
+

+ Secondary databases must be closed before closing their + associated primary database. +

+
+
+ + + diff --git a/db/docs/collections/tutorial/UsingStoredCollections.html b/db/docs/collections/tutorial/UsingStoredCollections.html new file mode 100644 index 000000000..415974b09 --- /dev/null +++ b/db/docs/collections/tutorial/UsingStoredCollections.html @@ -0,0 +1,804 @@ + + + + + + + Using Stored Collections + + + + + + + + + + +
+
+
+
+

+ Using Stored Collections +

+
+
+
+
+

+ When a stored collection is created it is based on either a + + Database + + or a + + + SecondaryDatabase. + + When a database is used, the primary key of the database is used as + the collection key. When a secondary database is used, the index + key is used as the collection key. Indexed collections can be used + for reading elements and removing elements but not for adding or + updating elements. +

+
+
+
+
+

+ Stored Collection and Access Methods +

+
+
+
+
+

+ The use of stored collections is constrained in certain respects as + described below. + + Most of these restrictions have to do with + List + + interfaces; for + Map + + interfaces, most all access modes are fully supported since the + Berkeley DB model is map-like. + +

+
+ +
+
+
+
+
+
+

+ Stored Collections Versus Standard Java Collections +

+
+
+
+
+

+ Stored collections have the following differences with the + standard Java collection interfaces. Some of these are interface + contract violations. +

+

+ The Java collections interface does not support duplicate keys + (multi-maps or multi-sets). When the access method allows duplicate + keys, the collection interfaces are defined as follows. +

+
+
    +
  • +

    + Map.entrySet() + + may contain multiple + Map.Entry + + objects with the same key. +

    +
  • +
  • +

    + Map.keySet() + + always contains unique keys, it does not contain duplicates. +

    +
  • +
  • +

    + Map.values() + + contains all values including the values + associated with duplicate keys. +

    +
  • +
  • +

    + Map.put() + + appends a duplicate if the key already exists rather than replacing + the existing value, and always returns null. +

    +
  • +
  • +

    + Map.remove() + + removes all duplicates for the specified key. +

    +
  • +
  • +

    + Map.get() + + returns the first duplicate for the specified key. +

    +
  • +
  • +

    + StoredMap.duplicates() + + is an additional method for returning the values for a given key as a + Collection. +

    +
  • +
+
+

+ Other differences are: +

+
+
    +
  • +

    + All iterators for stored collections must be explicitly closed with + StoredIterator.close(). + The static method + StoredIterator.close() + + allows calling close + for all iterators without harm to iterators that are not from + stored collections, and also avoids casting. If a stored iterator + is not closed, unpredictable behavior including process death may + result. +

    +
  • +
  • +

    + Collection.size() and Map.size() always throws + UnsupportedOperationException. + This is because the number of + records in a database cannot be determined reliably or + cheaply. +

    +
  • +
  • +

    + Because the size() method cannot be used, the bulk operation + methods of standard Java collections cannot be passed stored + collections as parameters, since the implementations rely on + size(). However, the bulk operation methods of stored collections + can be passed standard Java collections as parameters. + storedCollection.addAll(standardCollection) is allowed + while standardCollection.addAll(storedCollection) is + not allowed. This restriction applies to the standard + collection constructors that take a Collection parameter (copy + constructors), the Map.putAll() method, and the following + Collection methods: addAll(), containsAll(), removeAll() and + retainAll(). +

    +
  • +
  • +

    + The ListIterator.nextIndex() method + returns Integer.MAX_VALUE + for stored lists when positioned at the end of the list, rather + than returning the list size as specified by the ListIterator + interface. Again, this is because the database size is not + available. +

    +
  • +
  • +

    + Comparator + + objects cannot be used and the + SortedMap.comparator() + + and + SortedSet.comparator() + + methods always return null. The + Comparable + + interface is not supported. However, Comparators that operate on + byte arrays may be specified using + + + DatabaseConfig.setBtreeComparator. + +

    +
  • +
  • +

    + The + Object.equals() + + method is not used to determine whether a key + or value is contained in a collection, to locate a value by key, + etc. Instead the byte array representation of the keys and values + are used. However, the equals() method is called for each + key and value when comparing two collections for equality. It is + the responsibility of the application to make sure that the + equals() method returns true if and only if the byte array + representations of the two objects are equal. Normally this occurs + naturally since the byte array representation is derived from the + object's fields. +

    +
  • +
+
+
+
+
+
+
+

+ Other Stored Collection Characteristics +

+
+
+
+
+

+ The following characteristics of stored collections are + extensions of the definitions in the + java.util + + package. These differences do not violate the Java + collections interface contract. +

+
+
    +
  • +

    + All stored collections are thread safe (can be used by multiple + threads concurrently) + + whenever the Berkeley DB Concurrent Data Store or + Transactional Data Store environment is used. + + Locking is handled by the Berkeley DB + environment. To access a collection from multiple threads, creation + of synchronized collections using the + Collections + + class is not necessary + + except when using the Data Store environment. + + Iterators, however, should always be used only by a single thread. +

    +
  • +
  • +

    + All stored collections may be read-only if desired by passing + false for the writeAllowed parameter of their constructor. Creation + of immutable collections using the + Collections + + class is not necessary. +

    +
  • +
  • +

    + A stored collection is partially read-only if a secondary + index is used. Specifically, values may be removed but may not be + added or updated. The following methods will throw + UnsupportedOperationException + + when an index is used: + Collection.add(), + List.set(), + ListIterator.set() + + and + Map.Entry.setValue(). +

    +
  • +
  • +

    + SortedMap.entrySet() + + and + SortedMap.keySet() + + return a + SortedSet, + not just a + Set + + as specified in Java collections interface. This allows using the + SortedSet + + methods on the returned collection. +

    +
  • +
  • +

    + SortedMap.values() + + returns a + SortedSet, + not just a + Collection, + whenever the keys of the map can be derived from the values using + an entity binding. Note that the sorted set returned is not really + a set if duplicates are allowed, since it is technically a + collection; however, the + SortedSet + + methods (for example, subSet()), can still be used. +

    +
  • +
  • +

    + For + SortedSet + + and + SortedMap + + views, additional subSet() and subMap() methods are provided that + allow control over whether keys are treated as inclusive or + exclusive values in the key range. +

    +
  • +
  • +

    + Keys and values are stored by value, not by reference. This is + because objects that are added to collections are converted to byte + arrays (by bindings) and stored in the database. When they are + retrieved from the collection they are read from the database and + converted from byte arrays to objects. Therefore, the object + reference added to a collection will not be the same as the + reference later retrieved from the collection. +

    +
  • +
  • +

    + A runtime exception, + RuntimeExceptionWrapper, + is thrown whenever database exceptions occur which are not runtime + exceptions. The + RuntimeExceptionWrapper.getCause() + + method can be called to get the underlying exception. +

    +
  • +
  • +

    + All iterators for stored collections implement the + ListIterator + + interface as well as the + Iterator + + interface. This is to allow use of the + ListIterator.hasPrevious() + + and + ListIterator.previous() + + methods, which work for all collections + since Berkeley DB provides bidirectional cursors. +

    +
  • +
  • +

    + All stored collections have a + StoredCollection.iterator(boolean) + + method that allows creating + a read-only iterator for a writable collection. For the standard + Collection.iterator() + + method, the iterator is read-only only + when the collection is read-only. + + Read-only iterators are important + for using the Berkeley DB Concurrent Data Store environment, since + only one write cursors may be open at one time. +

    +
  • +
  • +

    + Iterator stability for stored collections is greater than the + iterator stability defined by the Java collections interfaces. + Stored iterator stability is the same as the cursor stability + defined by Berkeley DB. +

    +
  • +
  • +

    + When an entity binding is used, updating (setting) a value is + not allowed if the key in the entity is not equal to the original + key. For example, calling + Map.put() + + is not allowed when the key parameter is not equal to the key of + the entity parameter. + Map.put(), + List.set(), + ListIterator.set(), + and + Map.Entry.setValue() + + will throw + IllegalArgumentException + + in this situation. +

    +
  • +
  • +

    + Adding and removing items from stored lists is not allowed for + sublists. This is simply an unimplemented feature and may be + changed in the future. Currently for sublists the following + methods throw + UnsupportedOperationException: + List.add()List.add(), + List.remove()List.remove(), + ListIterator.add() + + and + ListIterator.remove()ListIterator.remove(). +

    +
  • +
  • +

    + The + + + StoredList.append(java.lang.Object) + + and + + + StoredMap.append(java.lang.Object) + + extension methods allows + adding a new record with an automatically assigned key. + + Record number assignment by the database itself is supported + for QUEUE, RECNO and RECNO-RENUMBER databases. + + An application-defined + PrimaryKeyAssigner + + is used to assign the key value. +

    +
  • +
+
+
+
+
+
+
+

+ Why Java Collections for Berkeley DB +

+
+
+
+
+

+ The Java collections interface was chosen as the best Java API + for DB given these requirements: +

+
+
    +
  1. +

    + Provide the Java developer with an API that is as familiar and + easy to use as possible. +

    +
  2. +
  3. +

    + Provide access to all, or a large majority, of the features of + the underlying Berkeley DB storage system. +

    +
  4. +
  5. +

    + Compared to the DB API, provide a higher-level API + that is oriented toward Java developers. +

    +
  6. +
  7. +

    + For ease of use, support object-to-data bindings, per-thread + transactions, and some traditional database features such as + foreign keys. +

    +
  8. +
  9. +

    + Provide a thin layer that can be thoroughly tested and which + does not significantly impact the reliability and performance of + DB. +

    +
  10. +
+
+

+ Admittedly there are several things about the Java Collections + API that don't quite fit with DB or with any transactional + database, and therefore there are some new rules for applying the + Java Collections API. However, these disadvantages are considered + to be smaller than the disadvantages of the alternatives: +

+
+
    +
  • +

    + A new API not based on the Java Collections API could have been + designed that maps well to DB but is higher-level. + However, this would require designing an entirely new model. The + exceptions for using the Java Collections API are considered easier + to learn than a whole new model. A new model would also require a + long design stabilization period before being as complete and + understandable as either the Java Collections API or the DB + API. +

    +
  • +
  • +

    + The ODMG API or another object persistence API could have been + implemented on top of DB. However, an object persistence + implementation would add much code and require a long stabilization + period. And while it may work well for applications that require + object persistence, it would probably never perform well enough for + many other applications. +

    +
  • +
+
+
+
+ + + diff --git a/db/docs/collections/tutorial/addingdatabaseitems.html b/db/docs/collections/tutorial/addingdatabaseitems.html new file mode 100644 index 000000000..4e1ff66d5 --- /dev/null +++ b/db/docs/collections/tutorial/addingdatabaseitems.html @@ -0,0 +1,230 @@ + + + + + + + Adding Database Items + + + + + + + + + + +
+
+
+
+

+ Adding Database Items +

+
+
+
+
+

+ Adding (as well as updating, removing, and deleting) information + in the database is accomplished via the standard Java collections + API. In the example, the + Map.put + + method is used to add objects. All standard Java methods for + modifying a collection may be used with the Sleepycat Java Collections + API. +

+

+ The PopulateDatabase.doWork() method calls private methods + for adding objects to each of the three database stores. It is + called via the + TransactionRunner + + class and was outlined in the previous section. +

+ +
import java.util.Map;
+import com.sleepycat.collections.TransactionWorker;
+...
+public class Sample
+{
+    ...
+    private SampleViews views;
+    ...
+    private class PopulateDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+    ...
+
+    private void addSuppliers()
+    {
+    }
+
+    private void addParts()
+    {
+    }
+
+    private void addShipments()
+    {
+    }
+} 
+

+ The addSuppliers(), addParts() and addShipments() + methods add objects to the Suppliers, Parts and Shipments stores. + The + Map + + for each store is obtained from the SampleViews object. +

+ +
    private void addSuppliers()
+    {
+        Map suppliers = views.getSupplierMap();
+        if (suppliers.isEmpty())
+        {
+            System.out.println("Adding Suppliers");
+            suppliers.put(new SupplierKey("S1"),
+                          new SupplierData("Smith", 20, "London"));
+            suppliers.put(new SupplierKey("S2"),
+                          new SupplierData("Jones", 10, "Paris"));
+            suppliers.put(new SupplierKey("S3"),
+                          new SupplierData("Blake", 30, "Paris"));
+            suppliers.put(new SupplierKey("S4"),
+                          new SupplierData("Clark", 20, "London"));
+            suppliers.put(new SupplierKey("S5"),
+                          new SupplierData("Adams", 30, "Athens"));
+        }
+    }
+
+    private void addParts()
+    {
+        Map parts = views.getPartMap();
+        if (parts.isEmpty())
+        {
+            System.out.println("Adding Parts");
+            parts.put(new PartKey("P1"),
+                      new PartData("Nut", "Red",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P2"),
+                      new PartData("Bolt", "Green",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P3"),
+                      new PartData("Screw", "Blue",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Rome"));
+            parts.put(new PartKey("P4"),
+                      new PartData("Screw", "Red",
+                                    new Weight(14.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P5"),
+                      new PartData("Cam", "Blue",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P6"),
+                      new PartData("Cog", "Red",
+                                    new Weight(19.0, Weight.GRAMS),
+                                    "London"));
+        }
+    }
+
+    private void addShipments()
+    {
+        Map shipments = views.getShipmentMap();
+        if (shipments.isEmpty())
+        {
+            System.out.println("Adding Shipments");
+            shipments.put(new ShipmentKey("P1", "S1"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P3", "S1"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P4", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P5", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P6", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P1", "S2"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S2"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P2", "S3"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P2", "S4"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P4", "S4"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P5", "S4"),
+                          new ShipmentData(400));
+        }
+    } 
+}
+

+ The key and value classes used above were defined in the + + Defining Serialized Key and Value Classes + . +

+

+ In each method above, objects are added only if the map is not + empty. This is a simple way of allowing the example program to be + run repeatedly. In real-life applications another technique — + checking the + Map.containsKey + + method, for example — might be used. +

+
+ + + diff --git a/db/docs/collections/tutorial/collectionOverview.html b/db/docs/collections/tutorial/collectionOverview.html new file mode 100644 index 000000000..c01feb18d --- /dev/null +++ b/db/docs/collections/tutorial/collectionOverview.html @@ -0,0 +1,473 @@ + + + + + + Appendix A.  + API Notes and Details + + + + + + + + + + +
+
+
+
+

Appendix A.  + API Notes and Details +

+
+
+
+
+

+ This appendix contains information useful to the collections programmer + that is too detailed to easily fit into the format of a tutorial. + Specifically, this appendix contains the following information: +

+ +
+
+
+
+

+ Using Data Bindings +

+
+
+
+
+

+ Data bindings determine how keys and values are represented as + stored data (byte arrays) in the database, and how stored data is + converted to and from Java objects. +

+

+ The selection of data bindings is, in general, independent of + the selection of + access methods and + collection views. In other + words, any binding can be used with any + access method or + collection. + + One exception to this rule is described under + Record Number Bindings + below. + +

+
+

Note

+

+ In this document, bindings are described in the + context of their use for stored data in a database. However, + bindings may also be used independently of a database to operate on + an arbitrary byte array. This allows using bindings when data is to + be written to a file or sent over a network, for example. +

+
+
+
+
+
+

+ Selecting Binding Formats +

+
+
+
+
+

+ For the key and value of each stored collection, you may select + one of the following types of bindings. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Binding FormatOrderedDescription
+ SerialBinding + No + The data is stored using a compact form of Java serialization, + where the class descriptions are stored separately in a catalog + database. Arbitrary Java objects are supported. +
+ TupleBinding + Yes + The data is stored using a series of fixed length primitive + values or zero terminated character arrays (strings). Class/type + evolution is not supported. +
+ RecordNumberBinding + Yes + The data is a 32-bit integer stored in a platform-dependent format. +
Custom binding formatUser-defined + The data storage format and ordering is determined by the + custom binding implementation. +
+
+

+ As shown in the table above, the tuple format supports built-in ordering + (without specifying a custom comparator), while the serial format does + not. This means that when a specific key order is needed, tuples should + be used instead of serial data. Alternatively, a custom BTree comparator should be + specified using + DatabaseConfig.setBtreeComparator(). Note that + a custom BTree comparator will usually execute more slowly than the + default byte-by-byte comparison. This makes using tuples an attractive + option, since they provide ordering along with optimal performance. +

+

+ The tuple binding uses less space and executes faster than the + serial binding. But once a tuple is written to a database, the + order of fields in the tuple may not be changed and fields may not + be deleted. The only type evolution allowed is the addition of + fields at the end of the tuple, and this must be explicitly + supported by the custom binding implementation. +

+

+ The serial binding supports the full generality of Java + serialization including type evolution. But serialized data can + only be accessed by Java applications, its size is larger, and its + bindings are slower to execute. +

+
+
+
+
+
+

Record Number Bindings

+
+
+
+
+

+ Any use of an access method with record number keys, and therefore any + use of a stored list view, requires using + RecordNumberBinding + + as the key binding. Since Berkeley DB stores record number keys using + a platform-dependent byte order, + RecordNumberBinding + + is needed to store record numbers properly. See + logical record numbers in + the Berkeley DB Programmer's Reference Guide + for more information on storing DB record numbers. +

+
+

Note

+

+ You may not use + RecordNumberBinding + + except with record number keys, as determined by the access + method. Using + RecordNumberBinding + + in other cases will create a database that is not portable + between platforms. When constructing the stored collection, + the Sleepycat Java Collections API will throw an + IllegalArgumentException + + in such cases. +

+
+
+
+
+
+
+

+ Selecting Data Bindings +

+
+
+
+
+

+ There are two types of binding interfaces. Simple entry bindings + implement the + EntryBinding + + interface and can be used for key or value objects. Entity bindings + implement the + EntityBinding + + interface and are used for combined key and value objects called + entities. +

+

+ Simple entry bindings map between the key or value data stored + by Berkeley DB and a key or value object. This is a simple + one-to-one mapping. +

+

+ Simple entry bindings are easy to implement and in some cases + require no coding. For example, a + SerialBinding + + can be used for keys or values without writing any additional + code. A tuple binding for a single-item tuple can also be used without + writing any code; see the + TupleBinding.getPrimitiveBinding + + method. +

+

+ Entity bindings must divide an entity object into its key and + value data, and then combine the key and value data to re-create + the entity object. This is a two-to-one mapping. +

+

+ Entity bindings are useful when a stored application object + naturally has its primary key as a property, which is very common. + For example, an Employee object would naturally have an + EmployeeNumber property (its primary key) and an entity binding + would then be needed. Of course, entity bindings are more complex + to implement, especially if their key and data formats are + different. +

+

+ Note that even when an entity binding is used a key binding is + also usually needed. For example, a key binding is used to create + key objects that are passed to the + Map.get() + + method. A key object is passed to this method even though it may + return an entity that also contains the key. +

+
+
+
+
+
+

+ Implementing Bindings +

+
+
+
+
+

+ There are two ways to implement bindings. The first way is to + create a binding class that implements one of the two binding + interfaces, + EntryBinding + + or + EntityBinding. + For tuple bindings and serial bindings there are a number of + abstract classes that make this easier. For example, you can extend + TupleBinding + + to implement a simple binding for a tuple key or value. Abstract + classes are also provided for entity bindings and are named after + the format names of the key and value. For example, you can extend + TupleSerialBinding + + to implement an entity binding with a tuple key and serial + value. +

+

+ Another way to implement bindings is with marshalling + interfaces. These are interfaces which perform the binding + operations and are implemented by the key, value or entity classes + themselves. With marshalling you use a binding which calls the + marshalling interface and you implement the marshalling interface + for each key, value or entity class. For example, you can use + TupleMarshalledBinding + + along with key or value classes that implement the + MarshalledTupleEntry + + interface. +

+
+
+
+
+
+

+ Using Bindings +

+
+
+
+
+

+ Bindings are specified whenever a stored collection is created. + A key binding must be specified for map, key set and entry set + views. A value binding or entity binding must be specified for map, + value set and entry set views. +

+

+ Any number of bindings may be created for the same stored data. + This allows multiple views over the same data. For example, a tuple + might be bound to an array of values or to a class with properties + for each object. +

+

+ It is important to be careful of bindings that only use a subset + of the stored data. This can be useful to simplify a view or to + hide information that should not be accessible. However, if you + write records using these bindings you may create stored data that + is invalid from the application's point of view. It is up to the + application to guard against this by creating a read-only + collection when such bindings are used. +

+
+
+
+
+
+

+ Secondary Key Creators +

+
+
+
+
+

+ Secondary Key Creators are needed whenever database indices are + used. For each secondary index + + + (SecondaryDatabase) + + a key creator is used to derive index key data from key/value data. + Key creators are objects whose classes implement the + + SecondaryKeyCreator + + interface. +

+

+ Like bindings, key creators may be implemented using a separate + key creator class or using a marshalling interface. Abstract key + creator classes and marshalling interfaces are provided in the + com.sleepycat.bind.tuple and com.sleepycat.bind.serial + packages. +

+

+ Unlike bindings, key creators fundamentally operate on key and + value data, not necessarily on the objects derived from the data by + bindings. In this sense key creators are a part of a database + definition, and may be independent of the various bindings that may + be used to view data in a database. However, key creators are not + prohibited from using higher level objects produced by bindings, + and doing so may be convenient for some applications. For example, + marshalling interfaces, which are defined for objects produced by + bindings, are a convenient way to define key creators. +

+
+
+
+ + + diff --git a/db/docs/collections/tutorial/collectionswithentities.html b/db/docs/collections/tutorial/collectionswithentities.html new file mode 100644 index 000000000..74f5774c8 --- /dev/null +++ b/db/docs/collections/tutorial/collectionswithentities.html @@ -0,0 +1,164 @@ + + + + + + + Creating Collections with Entity Bindings + + + + + + + + + + +
+
+
+
+

+ Creating Collections with Entity Bindings +

+
+
+
+
+

+ Stored map objects are created in this example in the same way + as in prior examples, but using entity bindings in place of value + bindings. All value objects passed and returned to the Java + collections API are then actually entity objects (Part, + Supplier and Shipment). The application no longer + deals directly with plain value objects (PartData, + SupplierData and ShipmentData). +

+

+ Since the partValueBinding, supplierValueBinding + and shipmentValueBinding were defined as entity bindings in + the prior section, there are no source code changes necessary for + creating the stored map objects. +

+ +
public class SampleViews
+{
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+        ...
+        partMap =
+            new StoredMap(db.getPartDatabase(),
+                          partKeyBinding, partValueBinding, true);
+        supplierMap =
+            new StoredMap(db.getSupplierDatabase(),
+                          supplierKeyBinding, supplierValueBinding, true);
+        shipmentMap =
+            new StoredMap(db.getShipmentDatabase(),
+                          shipmentKeyBinding, shipmentValueBinding, true);
+      ...
+    } 
+

+ Specifying an + EntityBinding + + will select a different + StoredMap + + constructor, but the syntax is the same. In general, an entity + binding may be used anywhere that a value binding is used. +

+

+ The following getter methods are defined for use by other + classes in the example program. Instead of returning the map's + entry set + (Map.entrySet), + the map's value set + (Map.values) + is returned. The entry set was convenient in prior examples because + it allowed enumerating all key/value pairs in the collection. Since + an entity contains the key and the value, enumerating the value set + can now be used more conveniently for the same purpose. +

+ +
import com.sleepycat.collections.StoredValueSet;
+...
+public class SampleViews
+{
+    ...
+    public StoredValueSet getPartSet()
+    {
+        return (StoredValueSet) partMap.values();
+    }
+
+    public StoredValueSet getSupplierSet()
+    {
+        return (StoredValueSet) supplierMap.values();
+    }
+
+    public StoredValueSet getShipmentSet()
+    {
+        return (StoredValueSet) shipmentMap.values();
+    }
+    ...
+} 
+

+ Notice that the collection returned by the + StoredMap.values + + method is actually a + StoredValueSet + + and not just a + Collection + + as defined by the + Map.values + + interface. As long as duplicate keys are not allowed, this + collection will behave as a true set and will disallow the addition + of duplicates, etc. +

+
+ + + diff --git a/db/docs/collections/tutorial/createbindingscollections.html b/db/docs/collections/tutorial/createbindingscollections.html new file mode 100644 index 000000000..aa68637dd --- /dev/null +++ b/db/docs/collections/tutorial/createbindingscollections.html @@ -0,0 +1,283 @@ + + + + + + + Creating Bindings and Collections + + + + + + + + + + +
+
+
+
+

+ Creating Bindings and Collections +

+
+
+
+
+

+ Bindings translate between stored records and Java objects. + In this example, Java serialization bindings are used. Serial + bindings are the simplest type of bindings because no mapping of + fields or type conversion is needed. Tuple bindings — which are + more difficult to create than serial bindings but have some + advantages — will be introduced later in the Tuple example + program. +

+

+ Standard Java collections are used to access records in a + database. Stored collections use bindings transparently to convert + the records to objects when they are retrieved from the collection, + and to convert the objects to records when they are stored in the + collection. +

+

+ An important characteristic of stored collections is that they + do not perform object caching. Every time an object is + accessed via a collection it will be added to or retrieved from the + database, and the bindings will be invoked to convert the data. + Objects are therefore always passed and returned by value, not by + reference. Because Berkeley DB is an embedded database, efficient + caching of stored raw record data is performed by the database library. +

+

+ The SampleViews class is used to create the bindings and + collections. This class is separate from the SampleDatabase + class to illustrate the idea that a single set of stored data can + be accessed via multiple bindings and collections, or views. + The skeleton for the SampleViews class follows. +

+ +
import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredMap;
+...
+
+public class SampleViews
+{
+    private StoredMap partMap;
+    private StoredMap supplierMap;
+    private StoredMap shipmentMap;
+
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+    }
+} 
+

+ A + StoredMap + + field is used for each database. The StoredMap class implements the + standard Java + Map + + interface, which has methods for obtaining a + Set + + of keys, a + Collection + + of values, or a + Set + + of + Map.Entry + + key/value pairs. Because databases contain key/value pairs, any + Berkeley DB database may be represented as a Java map. +

+

+ The following statements create the key and data bindings using + the + SerialBinding + + class. +

+ +
    public SampleViews(SampleDatabase db)
+    {
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new SerialBinding(catalog, PartKey.class);
+        EntryBinding partValueBinding =
+            new SerialBinding(catalog, PartData.class);
+        EntryBinding supplierKeyBinding =
+            new SerialBinding(catalog, SupplierKey.class);
+        EntryBinding supplierValueBinding =
+            new SerialBinding(catalog, SupplierData.class);
+        EntryBinding shipmentKeyBinding =
+            new SerialBinding(catalog, ShipmentKey.class);
+        EntryBinding shipmentValueBinding =
+            new SerialBinding(catalog, ShipmentData.class);
+        ...
+    } 
+

+ The first parameter of the + SerialBinding + + constructor is the class catalog, and is used to store the class + descriptions of the serialized objects. +

+

+ The second parameter is the base class for the serialized + objects and is used for type checking of the stored objects. If + null or Object.class is specified, then any Java + class is allowed. Otherwise, all objects stored in that format must + be instances of the specified class or derived from the specified + class. In the example, specific classes are used to enable strong + type checking. +

+

+ The following statements create standard Java maps using the + StoredMap + + class. +

+ +
    public SampleViews(SampleDatabase db)
+    {
+        ...
+        partMap =
+            new StoredMap(db.getPartDatabase(),
+                          partKeyBinding, partValueBinding, true);
+        supplierMap =
+            new StoredMap(db.getSupplierDatabase(),
+                          supplierKeyBinding, supplierValueBinding, true);
+        shipmentMap =
+            new StoredMap(db.getShipmentDatabase(),
+                          shipmentKeyBinding, shipmentValueBinding, true);
+    ...
+    } 
+

+ The first parameter of the + StoredMap + + constructor is the database. In a StoredMap, the database keys (the primary + keys) are used as the map keys. The Index + example shows how to use secondary index keys as map keys. +

+

+ The second and third parameters are the key and value bindings + to use when storing and retrieving objects via the map. +

+

+ The fourth and last parameter specifies whether changes will be + allowed via the collection. If false is passed, the collection will + be read-only. +

+

+ The following getter methods return the stored maps for use by + other classes in the example program. Convenience methods for + returning entry sets are also included. +

+ +
public class SampleViews
+{
+    ...
+    public final StoredMap getPartMap()
+    {
+        return partMap;
+    }
+
+    public final StoredMap getSupplierMap()
+    {
+        return supplierMap;
+    }
+
+    public final StoredMap getShipmentMap()
+    {
+        return shipmentMap;
+    }
+
+    public final StoredEntrySet getPartEntrySet()
+    {
+        return (StoredEntrySet) partMap.entrySet();
+    }
+
+    public final StoredEntrySet getSupplierEntrySet()
+    {
+        return (StoredEntrySet) supplierMap.entrySet();
+    }
+
+    public final StoredEntrySet getShipmentEntrySet()
+    {
+        return (StoredEntrySet) shipmentMap.entrySet();
+    }
+    ...
+} 
+

+ Note that StoredMap and StoredEntrySet are returned rather than + just returning Map and Set. Since StoredMap implements the Map + interface and StoredEntrySet implements the Set interface, you may + ask why Map and Set were not returned directly. +

+

+ StoredMap, StoredEntrySet, + and other stored collection classes + have a small number of extra methods beyond those in the Java + collection interfaces. The stored collection types are therefore + returned to avoid casting when using the extended methods. + Normally, however, only a Map or Set is needed, and may be used as + follows. +

+ +
    SampleDatabase sd = new SampleDatabase(new String("/home"));
+    SampleViews views = new SampleViews(sd);
+    Map partMap = views.getPartMap();
+    Set supplierEntries = views.getSupplierEntrySet(); 
+
+ + + diff --git a/db/docs/collections/tutorial/creatingentitybindings.html b/db/docs/collections/tutorial/creatingentitybindings.html new file mode 100644 index 000000000..adf98ba17 --- /dev/null +++ b/db/docs/collections/tutorial/creatingentitybindings.html @@ -0,0 +1,268 @@ + + + + + + + Creating Entity Bindings + + + + + + + + + + +
+
+
+
+

+ Creating Entity Bindings +

+
+
+
+
+

+ Entity bindings are similar to ordinary bindings in that + they convert between Java objects and the stored data format of + keys and values. In addition, entity bindings map between key/value + pairs and entity objects. An ordinary binding is a one-to-one + mapping, while an entity binding is a two-to-one mapping. +

+

+ The partValueBinding, supplierValueBinding and + shipmentValueBinding bindings are created below as entity + bindings rather than (in the prior examples) serial bindings. +

+ +
import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.SerialSerialBinding;
+...
+
+public class SampleViews
+{
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+        ClassCatalog catalog = db.getClassCatalog();
+        SerialBinding partKeyBinding =
+            new SerialBinding(catalog, PartKey.class);
+        EntityBinding partValueBinding =
+            new PartBinding(catalog, PartKey.class, PartData.class);
+        SerialBinding supplierKeyBinding =
+            new SerialBinding(catalog, SupplierKey.class);
+        EntityBinding supplierValueBinding =
+            new SupplierBinding(catalog, SupplierKey.class,
+                                SupplierData.class);
+        SerialBinding shipmentKeyBinding =
+            new SerialBinding(catalog, ShipmentKey.class);
+        EntityBinding shipmentValueBinding =
+            new ShipmentBinding(catalog, ShipmentKey.class,
+                                ShipmentData.class);
+        SerialBinding cityKeyBinding =
+            new SerialBinding(catalog, String.class);
+        ...
+    }
+} 
+

+ The entity bindings will be used in the next section to + construct stored map objects. +

+

+ The PartBinding class is defined below. +

+ +
public class SampleViews
+{
+    ...
+    private static class PartBinding extends SerialSerialBinding {
+        private PartBinding(ClassCatalog classCatalog,
+                            Class keyClass,
+                            Class dataClass)
+        {
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        public Object entryToObject(Object keyInput, Object dataInput)
+        {
+            PartKey key = (PartKey) keyInput;
+            PartData data = (PartData) dataInput;
+            return new Part(key.getNumber(), data.getName(), data.getColor(),
+                            data.getWeight(), data.getCity());
+        }
+
+        public Object objectToKey(Object object)
+        {
+            Part part = (Part) object;
+            return new PartKey(part.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            Part part = (Part) object;
+            return new PartData(part.getName(), part.getColor(),
+                                 part.getWeight(), part.getCity());
+        }
+    }
+    ...
+} 
+

+ In general, an entity binding is any class that implements the + EntityBinding + + interface, just as an ordinary binding is any class that implements + the + EntryBinding + + interface. In the prior examples the built-in + SerialBinding + + class (which implements + EntryBinding) + was used and no application-defined binding classes were needed. +

+

+ In this example, application-defined binding classes are used + that extend the + SerialSerialBinding + + abstract base class. This base class implements + EntityBinding + + and provides the conversions between key/value bytes and key/value + objects, just as the + SerialBinding + + class does. The application-defined entity class implements the + abstract methods defined in the base class that map between + key/value objects and entity objects. +

+

+ Three abstract methods are implemented for each entity binding. + The entryToObject() method takes as input the key and data + objects, which have been deserialized automatically by the base + class. As output, it returns the combined Part entity. +

+

+ The objectToKey() and objectToData() methods take an + entity object as input. As output they return the part key or data + object that is extracted from the entity object. The key or data + will then be serialized automatically by the base class. +

+

+ The SupplierBinding and ShipmentBinding classes + are very similar to the PartBinding class. +

+ +
public class SampleViews
+{
+    ...
+    private static class SupplierBinding extends SerialSerialBinding {
+        private SupplierBinding(ClassCatalog classCatalog,
+                                Class keyClass,
+                                Class dataClass)
+        {
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        public Object entryToObject(Object keyInput, Object dataInput)
+        {
+            SupplierKey key = (SupplierKey) keyInput;
+            SupplierData data = (SupplierData) dataInput;
+            return new Supplier(key.getNumber(), data.getName(),
+                                data.getStatus(), data.getCity());
+        }
+
+        public Object objectToKey(Object object)
+        {
+            Supplier supplier = (Supplier) object;
+            return new SupplierKey(supplier.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            Supplier supplier = (Supplier) object;
+            return new SupplierData(supplier.getName(), supplier.getStatus(),
+                                     supplier.getCity());
+        }
+    }
+
+    private static class ShipmentBinding extends SerialSerialBinding {
+        private ShipmentBinding(ClassCatalog classCatalog,
+                                Class keyClass,
+                                Class dataClass)
+        {
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        public Object entryToObject(Object keyInput, Object dataInput)
+        {
+            ShipmentKey key = (ShipmentKey) keyInput;
+            ShipmentData data = (ShipmentData) dataInput;
+            return new Shipment(key.getPartNumber(), key.getSupplierNumber(),
+                                data.getQuantity());
+        }
+
+        public Object objectToKey(Object object)
+        {
+            Shipment shipment = (Shipment) object;
+            return new ShipmentKey(shipment.getPartNumber(),
+                                   shipment.getSupplierNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            Shipment shipment = (Shipment) object;
+            return new ShipmentData(shipment.getQuantity());
+        }
+    }
+    ...
+} 
+
+ + + diff --git a/db/docs/collections/tutorial/developing.html b/db/docs/collections/tutorial/developing.html new file mode 100644 index 000000000..0f13dfe00 --- /dev/null +++ b/db/docs/collections/tutorial/developing.html @@ -0,0 +1,216 @@ + + + + + + Developing a Sleepycat Collections Application + + + + + + + + + +
+
+
+
+

Developing a Sleepycat Collections Application

+
+
+
+
+

+ There are several important choices to make when developing an + application using the Sleepycat Java Collections API. +

+
+
    +
  1. +

    + Choose the Berkeley DB Environment +

    +

    + Depending on your application's concurrency and transactional + requirements, you may choose one of the three Berkeley DB + Environments: Data Store, Concurrent Data Store, or + Transactional Data Store. For details on creating and + configuring the environment, see the + Berkeley DB Programmer's Reference Guide. +

    +
  2. +
  3. +

    + Choose the Berkeley DB Access Method +

    +

    + For each Berkeley DB datastore, you may choose from any of the + four Berkeley DB access methods — BTREE, HASH, RECNO, or + QUEUE + + (DatabaseType.BTREE, + DatabaseType.HASH, + DatabaseType.RECNO, + or + DatabaseType.QUEUE.) + + — and a number of other database options. Your choice + depends on several factors such as whether you need ordered + keys, unique keys, record number access, and so forth. For more + information on access methods, see the + Berkeley DB Programmer's Reference Guide. +

    +
  4. +
  5. +

    + Choose the Format for Keys and Values +

    +

    + For each database you may choose a binding format for the keys + and values. For example, the tuple format is useful for keys + because it has a deterministic sort order. The serial format is + useful for values if you want to store arbitrary Java objects. In + some cases a custom format may be appropriate. For details on + choosing a binding format see + + Using Data Bindings + . +

    +
  6. +
  7. +

    + Choose the Binding for Keys and Values +

    +

    + With the serial data format you do not have to create a binding + for each Java class that is stored since Java serialization is + used. But for other formats a binding must be defined that + translates between stored byte arrays and Java objects. For details + see + + Using Data Bindings + . +

    +
  8. +
  9. +

    + Choose Secondary Indices +

    +

    + Any database that has unique keys may have any number of + secondary indices. A secondary index has keys that are derived from + data values in the primary database. This allows lookup and + iteration of objects in the database by its index keys. + + + + For each index you must define how the index keys are derived from the data + values using a + + + SecondaryKeyCreator. + + For details see the + + + + SecondaryDatabase, + + + + SecondaryConfig + + and + + SecondaryKeyCreator + + classes. +

    +
  10. +
  11. +

    + Choose the Collection Interface for each Database +

    +

    + The standard Java Collection interfaces are used for accessing + databases and secondary indices. The Map and Set interfaces may be + used for any type of database. The Iterator interface is used + through the Set interfaces. For more information on the collection + interfaces see + + Using Stored Collections + . +

    +
  12. +
+
+

+ Any number of bindings and collections may be created for the + same database. This allows multiple views of the same stored data. + For example, a data store may be viewed as a Map of keys to values, + a Set of keys, or a Collection of values. String values, for + example, may be used with the built-in binding to the String class, + or with a custom binding to another class that represents the + string values differently. +

+

+ It is sometimes desirable to use a Java class that encapsulates + both a data key and a data value. For example, a Part object might + contain both the part number (key) and the part name (value). Using + the Sleepycat Java Collections API this type of object is called an + "entity". An entity binding is used to translate between the Java + object and the stored data key and value. Entity bindings may be + used with all Collection types. +

+

+ Please be aware that the provided Sleepycat Java Collections API collection classes + do not conform completely to the interface contracts + defined in the java.util package. For example, all + iterators must be explicitly closed and the size() + method is not available. The differences between the Sleepycat Java + Collections API collections and the standard Java collections are + documented in + + Stored Collections Versus Standard Java Collections + . +

+
+ + + diff --git a/db/docs/collections/tutorial/entitieswithcollections.html b/db/docs/collections/tutorial/entitieswithcollections.html new file mode 100644 index 000000000..f66cc541a --- /dev/null +++ b/db/docs/collections/tutorial/entitieswithcollections.html @@ -0,0 +1,252 @@ + + + + + + + Using Entities with Collections + + + + + + + + + + +
+
+
+
+

+ Using Entities with Collections +

+
+
+
+
+

+ In this example entity objects, rather than key and value + objects, are used for adding and enumerating the records in a + collection. Because fewer classes and objects are involved, adding + and enumerating is done more conveniently and more simply than in + the prior examples. +

+

+ For adding and iterating entities, the collection of entities + returned by + Map.values + + is used. In general, when using an entity binding, all Java + collection methods that are passed or returned a value object will + be passed or returned an entity object instead. +

+

+ The Sample class has been changed in this example to add + objects using the + Set.add + + method rather than the + Map.put + + method that was used in the prior examples. Entity objects are + constructed and passed to + Set.add. +

+ +
import java.util.Set;
+...
+public class Sample
+{
+    ...
+    private void addSuppliers()
+    {
+        Set suppliers = views.getSupplierSet();
+        if (suppliers.isEmpty())
+        {
+            System.out.println("Adding Suppliers");
+            suppliers.add(new Supplier("S1", "Smith", 20, "London"));
+            suppliers.add(new Supplier("S2", "Jones", 10, "Paris"));
+            suppliers.add(new Supplier("S3", "Blake", 30, "Paris"));
+            suppliers.add(new Supplier("S4", "Clark", 20, "London"));
+            suppliers.add(new Supplier("S5", "Adams", 30, "Athens"));
+        }
+    }
+
+    private void addParts()
+    {
+        Set parts = views.getPartSet();
+        if (parts.isEmpty())
+        {
+            System.out.println("Adding Parts");
+            parts.add(new Part("P1", "Nut", "Red",
+                      new Weight(12.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P2", "Bolt", "Green",
+                      new Weight(17.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P3", "Screw", "Blue",
+                      new Weight(17.0, Weight.GRAMS), "Rome"));
+            parts.add(new Part("P4", "Screw", "Red",
+                      new Weight(14.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P5", "Cam", "Blue",
+                      new Weight(12.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P6", "Cog", "Red",
+                      new Weight(19.0, Weight.GRAMS), "London"));
+        }
+    }
+
+    private void addShipments()
+    {
+        Set shipments = views.getShipmentSet();
+        if (shipments.isEmpty())
+        {
+            System.out.println("Adding Shipments");
+            shipments.add(new Shipment("P1", "S1", 300));
+            shipments.add(new Shipment("P2", "S1", 200));
+            shipments.add(new Shipment("P3", "S1", 400));
+            shipments.add(new Shipment("P4", "S1", 200));
+            shipments.add(new Shipment("P5", "S1", 100));
+            shipments.add(new Shipment("P6", "S1", 100));
+            shipments.add(new Shipment("P1", "S2", 300));
+            shipments.add(new Shipment("P2", "S2", 400));
+            shipments.add(new Shipment("P2", "S3", 200));
+            shipments.add(new Shipment("P2", "S4", 200));
+            shipments.add(new Shipment("P4", "S4", 300));
+            shipments.add(new Shipment("P5", "S4", 400));
+        }
+    } 
+

+ Instead of printing the key/value pairs by iterating over the + Map.entrySet + + as done in the prior example, this example + iterates over the entities in the + Map.values + + collection. +

+ +
import com.sleepycat.collections.StoredIterator;
+import java.util.Iterator;
+import java.util.Set;
+...
+public class Sample
+{
+    ...
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            printValues("Parts",
+                         views.getPartSet().iterator());
+            printValues("Suppliers",
+                         views.getSupplierSet().iterator());
+            printValues("Suppliers for City Paris",
+                         views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printValues("Shipments",
+                         views.getShipmentSet().iterator());
+            printValues("Shipments for Part P1",
+                         views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                         views.getShipmentBySupplierMap().duplicates(
+                                            new SupplierKey("S1")).iterator());
+        }
+    }
+    ...
+} 
+

+ The output of the example program is shown below. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+Part: number=P1 name=Nut color=Red weight=[12.0 grams] city=London
+Part: number=P2 name=Bolt color=Green weight=[17.0 grams] city=Paris
+Part: number=P3 name=Screw color=Blue weight=[17.0 grams] city=Rome
+Part: number=P4 name=Screw color=Red weight=[14.0 grams] city=London
+Part: number=P5 name=Cam color=Blue weight=[12.0 grams] city=Paris
+Part: number=P6 name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+Supplier: number=S1 name=Smith status=20 city=London
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+Supplier: number=S4 name=Clark status=20 city=London
+Supplier: number=S5 name=Adams status=30 city=Athens
+
+--- Suppliers for City Paris ---
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+
+--- Shipments ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P2 supplier=S2 quantity=400
+Shipment: part=P2 supplier=S3 quantity=200
+Shipment: part=P2 supplier=S4 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P4 supplier=S4 quantity=300
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P5 supplier=S4 quantity=400
+Shipment: part=P6 supplier=S1 quantity=100
+
+--- Shipments for Part P1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+
+--- Shipments for Supplier S1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P6 supplier=S1 quantity=100 
+
+ + + diff --git a/db/docs/collections/tutorial/gettingStarted.css b/db/docs/collections/tutorial/gettingStarted.css new file mode 100644 index 000000000..c1b4c86b7 --- /dev/null +++ b/db/docs/collections/tutorial/gettingStarted.css @@ -0,0 +1,41 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 9pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 9pt; } + +div.navfooter { font-size: 9pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 9pt; } + +span.emphasis { font-style: italic; font-size: 9pt;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + + diff --git a/db/docs/collections/tutorial/handlingexceptions.html b/db/docs/collections/tutorial/handlingexceptions.html new file mode 100644 index 000000000..47f477c7a --- /dev/null +++ b/db/docs/collections/tutorial/handlingexceptions.html @@ -0,0 +1,215 @@ + + + + + + + Handling Exceptions + + + + + + + + + + +
+
+
+
+

+ Handling Exceptions +

+
+
+
+
+

+ Exception handling was illustrated previously in + + Implementing the Main Program + + and + + Using Transactions + + exception handling in a Sleepycat Java Collections API application in + more detail. +

+

+ There are two exceptions that must be treated specially: + + RunRecoveryException + + and + + + DeadlockException. + +

+

+ + RunRecoveryException + + is thrown when the only solution is to shut down the application + and run recovery. All applications must catch this exception and + follow the recovery procedure. +

+

+ When + + DeadlockException + + is thrown, the application should normally retry the operation. If + a deadlock continues to occur for some maximum number of retries, + the application should give up and try again later or take other + corrective actions. The Sleepycat Java Collections API provides two APIs + for transaction execution. +

+
+ +
+

+ When using the + TransactionRunner + + class there are two other considerations. +

+
+
    +
  • +

    + First, if the application-defined + TransactionWorker.doWork + + method throws an exception the + transaction will automatically be aborted, and otherwise the + transaction will automatically be committed. Applications should + design their transaction processing with this in mind. +

    +
  • +
  • +

    + Second, please be aware that + TransactionRunner.run + + unwraps exceptions in order to discover whether a nested exception is a + + + DeadlockException. + + This is particularly important since all Berkeley DB exceptions + that occur while calling a stored collection method are wrapped + with a + RuntimeExceptionWrapper. + This wrapping is necessary because Berkeley DB exceptions are + checked exceptions, and the Java collections API does not allow + such exceptions to be thrown. +

    +
  • +
+
+

+ When calling + TransactionRunner.run, + the unwrapped (nested) exception will be unwrapped and thrown + automatically. If you are not using + TransactionRunner + + or if you are handling exceptions directly for some other reason, + use the + ExceptionUnwrapper.unwrap + + method to get the nested exception. For example, this can be used + to discover that an exception is a + + RunRecoveryException + + as shown below. +

+ +
import com.sleepycat.db.RunRecoveryException;
+import com.sleepycat.util.ExceptionUnwrapper;
+...
+    catch (Exception e)
+    {
+        e = ExceptionUnwrapper.unwrap(e);
+        if (e instanceof RunRecoveryException)
+        {
+            // follow recovery procedure
+        }
+    } 
+
+ + + diff --git a/db/docs/collections/tutorial/implementingmain.html b/db/docs/collections/tutorial/implementingmain.html new file mode 100644 index 000000000..aee811e8d --- /dev/null +++ b/db/docs/collections/tutorial/implementingmain.html @@ -0,0 +1,251 @@ + + + + + + + Implementing the Main Program + + + + + + + + + + +
+
+
+
+

+ Implementing the Main Program +

+
+
+
+
+

+ The main program opens the environment and databases, stores and retrieves + objects within a transaction, and finally closes the environment + databases. This section describes the main program shell, and the + next section describes how to run transactions for storing and + retrieving objects. +

+

+ The Sample class contains the main program. The skeleton + for the Sample class follows. +

+ +
import com.sleepycat.db.DatabaseException;
+import java.io.FileNotFoundException;
+
+public class Sample
+{
+    private SampleDatabase db;
+    private SampleViews views;
+
+    public static void main(String args)
+    {
+    }
+
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException
+    {
+    }
+
+    private void close()
+        throws DatabaseException
+    {
+    }
+
+    private void run()
+        throws Exception
+    {
+    }
+} 
+

+ The main program uses the SampleDatabase and + SampleViews classes that were described in the preceding + sections. The main method will create an instance of the + Sample class, and call its run() and close() + methods. +

+

+ The following statements parse the program's command line + arguments. +

+ +
    public static void main(String[] args)
+    {
+        System.out.println("\nRunning sample: " + Sample.class);
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1)
+        {
+            String arg = args[i];
+            if (args[i].equals("-h") && i < args.length - 1)
+            {
+                i += 1;
+                homeDir = args[i];
+            }
+            else
+            {
+                System.err.println("Usage:\n java " + 
+                                   Sample.class.getName() +
+                                  "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+        ...
+    } 
+

+ The usage command is: +

+
java com.sleepycat.examples.bdb.shipment.basic.Sample
+     [-h <home-directory> ] 
+

+ The -h command is used to set the homeDir + variable, which will later be passed to the SampleDatabase() + constructor. Normally all Berkeley DB programs should provide a way + to configure their database environment home directory. +

+

+ The default for the home directory is ./tmp — the tmp + subdirectory of the current directory where the sample is run. The + home directory must exist before running the sample. To re-create + the sample database from scratch, delete all files in the home + directory before running the sample. +

+

+ The home directory was described previously in + + Opening and Closing the Database Environment + . +

+

+ Of course, the command line arguments shown are only examples + and a real-life application may use different techniques for + configuring these options. +

+

+ The following statements create an instance of the Sample + class and call its run() and close() methods. +

+ +
    public static void main(String args)
+    {
+        ...
+        Sample sample = null;
+        try
+        {
+            sample = new Sample(homeDir);
+            sample.run();
+        }
+        catch (Exception e)
+        {
+            e.printStackTrace();
+        }
+        finally
+        {
+            if (sample != null)
+            {
+                try
+                {
+                    sample.close();
+                }
+                catch (Exception e)
+                {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    } 
+

+ The Sample() constructor will open the environment and + databases, and the run() method will run transactions for + storing and retrieving objects. If either of these throws an + exception, then the program was unable to run and should normally + terminate. (Transaction retries are handled at a lower level and + will be described later.) The first catch statement handles + such exceptions. +

+

+ The finally statement is used to call the close() + method since an attempt should always be made to close the environment and + databases + cleanly. If an exception is thrown during close and a prior + exception occurred above, then the exception during close is likely + a side effect of the prior exception. +

+

+ The Sample() constructor creates the SampleDatabase + and SampleViews objects. +

+ +
    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException
+    {
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    } 
+

+ Recall that creating the SampleDatabase object will open + the environment and all databases. +

+

+ To close the database the Sample.close() method simply + calls SampleDatabase.close(). +

+ +
     private void close()
+        throws DatabaseException
+    {
+        db.close();
+    } 
+

+ The run() method is described in the next section. +

+
+ + + diff --git a/db/docs/collections/tutorial/index.html b/db/docs/collections/tutorial/index.html new file mode 100644 index 000000000..7bb297e8c --- /dev/null +++ b/db/docs/collections/tutorial/index.html @@ -0,0 +1,525 @@ + + + + + + Berkeley DB Collections Tutorial + + + + + + + +
+
+
+
+

Berkeley DB Collections Tutorial

+
+
+
+

+ Legal Notice +

+

+ This documentation is distributed under the terms of the Sleepycat + public license. You may review the terms of this license at: + http://www.sleepycat.com/download/oslicense.html + +

+

+ Sleepycat Software, Berkeley DB, Berkeley DB XML and the Sleepycat logo + are trademarks or service marks of Sleepycat Software, Inc. All rights to + these marks are reserved. No third-party use is permitted without the + express prior written consent of Sleepycat Software, Inc. +

+

+ Java™ + + and all Java-based marks + are trademarks or registered trademarks + of Sun Microsystems, Inc, in the United States and other + countries. + +

+

+ To obtain a copy of this document's original source code, please write + to . +

+
+
+
+

9/22/2004

+
+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+
+
+ + 1. + Introduction + + +
+
+
+
+ + Features + +
+
+ + Developing a Sleepycat Collections Application + +
+
+ + Tutorial Introduction + +
+
+
+
+ + 2. + The Basic Program + + +
+
+
+
+ + + Defining Serialized Key and Value Classes + + +
+
+ + + Opening and Closing the Database Environment + + +
+
+ + + Opening and Closing the Class Catalog + + +
+
+ + + Opening and Closing Databases + + +
+
+ + + Creating Bindings and Collections + + +
+
+ + + Implementing the Main Program + + +
+
+ + + Using Transactions + + +
+
+ + + Adding Database Items + + +
+
+ + + Retrieving Database Items + + +
+
+ + + Handling Exceptions + + +
+
+
+
+ + 3. + Using Secondary Indices + + +
+
+
+
+ + + Opening Secondary Key Indices + + +
+
+ + + + More Secondary Key Indices + + +
+
+ + + Creating Indexed Collections + + +
+
+ + + Retrieving Items by Index Key + + +
+
+
+
+ + 4. + Using Entity Classes + + +
+
+
+
+ + + Defining Entity Classes + + +
+
+ + + Creating Entity Bindings + + +
+
+ + + Creating Collections with Entity Bindings + + +
+
+ + + Using Entities with Collections + + +
+
+
+
+ + 5. + Using Tuples + + +
+
+
+
+ + + Using the Tuple Format + + +
+
+ + + Using Tuples with Key Creators + + +
+
+ + + Creating Tuple Key Bindings + + +
+
+ + +Creating Tuple-Serial Entity Bindings + + +
+
+ + + Using Sorted Collections + + +
+
+
+
+ + 6. + Using Serializable Entities + + +
+
+
+
+ + + Using Transient Fields in an Entity Class + + +
+
+ + + Using Transient Fields in an Entity Binding + + +
+
+ + + Removing the Redundant Value Classes + + +
+
+
+
+ + 7. + Summary + + +
+
+ + A. + API Notes and Details + + +
+
+
+
+ + + Using Data Bindings + + +
+
+
+
+ + + Selecting Binding Formats + + +
+
+ + Record Number Bindings + +
+
+ + + Selecting Data Bindings + + +
+
+ + + Implementing Bindings + + +
+
+ + + Using Bindings + + +
+
+ + + Secondary Key Creators + + +
+
+
+
+ + + Using the Sleepycat Java Collections API + + +
+
+
+
+ + + Using Transactions + + +
+
+ + + Transaction Rollback + + +
+
+ + Selecting Access Methods + +
+
+ + + Access Method Restrictions + + +
+
+
+
+ + + Using Stored Collections + + +
+
+
+
+ + + Stored Collection and Access Methods + + +
+
+ + + Stored Collections Versus Standard Java Collections + + +
+
+ + + Other Stored Collection Characteristics + + +
+
+ + + Why Java Collections for Berkeley DB + + +
+
+
+
+ + + Serialized Object Storage + + +
+
+
+
+
+
+ + + diff --git a/db/docs/collections/tutorial/indexedcollections.html b/db/docs/collections/tutorial/indexedcollections.html new file mode 100644 index 000000000..9be8495ac --- /dev/null +++ b/db/docs/collections/tutorial/indexedcollections.html @@ -0,0 +1,248 @@ + + + + + + + Creating Indexed Collections + + + + + + + + + + +
+
+
+
+

+ Creating Indexed Collections +

+
+
+
+
+

+ In the prior Basic example, bindings and Java collections were + created for accessing databases via their primary keys. In this + example, bindings and collections are added for accessing the same + databases via their index keys. As in the prior example, serial + bindings and the Java + Map + + class are used. +

+

+ When a map is created from a + + + SecondaryDatabase, + + the keys of the map will be the index keys. However, the values of + the map will be the values of the primary database associated with + the index. This is how index keys can be used to access the values + in a primary database. +

+

+ For example, the Supplier's City field is an index key that can + be used to access the Supplier database. When a map is created + using the supplierByCityDb() method, the key to the map will be the + City field, a + String + + object. When + Map.get + + is called passing the City as the key parameter, a + SupplierData + object will be returned. +

+

+ The SampleViews class is extended to create an index key + binding for the Supplier's City field and three Java maps based on + the three indices created in the prior section. +

+ +
import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredMap;
+...
+
+public class SampleViews
+{
+    ...
+    private StoredMap supplierByCityMap;
+    private StoredMap shipmentByPartMap;
+    private StoredMap shipmentBySupplierMap;
+    ...
+
+    public SampleViews(SampleDatabase db)
+    {
+        ClassCatalog catalog = db.getClassCatalog();
+        ...
+        EntryBinding cityKeyBinding =
+            new SerialBinding(catalog, String.class);
+        ...
+        supplierByCityMap =
+            new StoredMap(db.getSupplierByCityDatabase(),
+                          cityKeyBinding, supplierValueBinding, true);
+        shipmentByPartMap =
+            new StoredMap(db.getShipmentByPartDatabase(),
+                          partKeyBinding, shipmentValueBinding, true);
+        shipmentBySupplierMap =
+            new StoredMap(db.getShipmentBySupplierDatabase(),
+                          supplierKeyBinding, shipmentValueBinding, true); 
+    ...
+    }
+} 
+

+ In general, the indexed maps are created here in the same way as + the unindexed maps were created in the Basic example. The + differences are: +

+
+
    +
  • +

    + The first parameter of the + StoredMap + + constructor is a + + SecondaryDatabase + + rather than a + + + Database. + +

    +
  • +
  • +

    + The second parameter is the index key binding rather than the + primary key binding. +

    +
  • +
+
+

+ For the supplierByCityMap, the cityKeyBinding must + first be created. This binding was not created in the Basic example + because the City field is not a primary key. +

+

+ Like the bindings created earlier for keys and values, the + cityKeyBinding is a + SerialBinding. + Unlike the bindings created earlier, it is an example of creating a + binding for a built-in Java class, + String, + instead of an application-defined class. Any serializable class may + be used. +

+

+ For the shipmentByPartMap and + shipmentBySupplierMap, the partKeyBinding and + supplierKeyBinding are used. These were created in the Basic + example and used as the primary key bindings for the partMap + and supplierMap. +

+

+ The value bindings — supplierValueBinding and + shipmentValueBinding — were also created in the Basic + example. +

+

+ This illustrates that bindings and formats may and should be + reused where appropriate for creating maps and other + collections. +

+

+ The following getter methods return the stored maps for use by + other classes in the example program. Convenience methods for + returning entry sets are also included. +

+ +
public class SampleViews
+{
+    ...
+    public final StoredMap getShipmentByPartMap()
+    {
+        return shipmentByPartMap;
+    }
+
+    public final StoredMap getShipmentBySupplierMap()
+    {
+        return shipmentBySupplierMap;
+    }
+
+    public final StoredMap getSupplierByCityMap()
+    {
+        return supplierByCityMap;
+    }
+
+    public final StoredEntrySet getShipmentByPartEntrySet()
+    {
+        return (StoredEntrySet) shipmentByPartMap.entrySet();
+    }
+
+    public final StoredEntrySet getShipmentBySupplierEntrySet()
+    {
+        return (StoredEntrySet) shipmentBySupplierMap.entrySet();
+    }
+
+    public final StoredEntrySet getSupplierByCityEntrySet()
+    {
+        return (StoredEntrySet) supplierByCityMap.entrySet();
+    }
+    ...
+} 
+
+ + + diff --git a/db/docs/collections/tutorial/intro.html b/db/docs/collections/tutorial/intro.html new file mode 100644 index 000000000..5e2a061b7 --- /dev/null +++ b/db/docs/collections/tutorial/intro.html @@ -0,0 +1,220 @@ + + + + + + Chapter 1.  + Introduction + + + + + + + + + + +
+
+
+
+

Chapter 1.  + Introduction +

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Features + +
+
+ + Developing a Sleepycat Collections Application + +
+
+ + Tutorial Introduction + +
+
+
+

+ The Sleepycat Java Collections API is a Java framework that extends + the well known + Java Collections + design pattern such that collections can now be + stored, updated and queried in a transactional manner. The Sleepycat Java + Collections API is a layer on top of DB. +

+

+ Together the Sleepycat Java Collections API and Berkeley DB provide an + embedded data management solution with all the benefits of a full + transactional storage and the simplicity of a well known Java API. + Java programmers who need fast, scalable, transactional data + management for their projects can quickly adopt and deploy the Sleepycat + Java Collections API with confidence. +

+

+ This framework was first known as + Greybird DB + written by Mark Hayes. Sleepycat Software has collaborated with Mark to + permanently incorporate his excellent work into our distribution + and support it as an ongoing part of Berkeley DB and Berkeley DB Java + Edition. The repository of source code that remains at Sourceforge at version 0.9.0 is + considered the last version before incorporation and will remain + intact but will not be updated to reflect changes made as part of + Berkeley DB or Berkeley DB Java Edition. +

+
+
+
+
+

Features

+
+
+
+
+

+ Berkeley DB has always provided a Java API which can be roughly + described as a map and cursor interface, where the keys and values + are represented as byte arrays. This API is a Java (JNI) interface + to the C API and it closely modeled the Berkeley DB C API's + interface. + + + + The Sleepycat Java Collections API is a layer on top of + + that + thin JNI mapping of the C API to Berkeley DB. + + + It adds significant new functionality in several ways. +

+
+
    +
  • +

    + An implementation of the Java Collections interfaces (Map, + SortedMap, Set, SortedSet, + List + and Iterator) is provided. +

    +
  • +
  • +

    + Transactions are supported using the conventional Java + transaction-per-thread model, where the current transaction is + implicitly associated with the current thread. +

    +
  • +
  • +

    + Transaction runner utilities are provided that automatically + perform transaction retry and exception handling. +

    +
  • +
  • +

    + Keys and values are represented as Java objects rather than + byte arrays. Bindings are used to map between Java objects and the + stored byte arrays. +

    +
  • +
  • +

    + The tuple data format is provided as the simplest data + representation, and is useful for keys as well as simple compact + values. +

    +
  • +
  • +

    + The serial data format is provided for storing arbitrary Java + objects without writing custom binding code. Java serialization is + extended to store the class descriptions separately, making the + data records much more compact than with standard Java + serialization. +

    +
  • +
  • +

    + Custom data formats and bindings can be easily added. XML data + format and XML bindings could easily be created using this feature, + for example. +

    +
  • +
  • +

    + The Sleepycat Java Collections API insulates the application + from minor differences in the use of the Berkeley DB Data Store, + Concurrent Data Store, and Transactional Data Store products. + This allows for development with one and deployment with another + without significant changes to code. +

    +
  • +
+
+

+ Note that the Sleepycat Java Collections API does not support caching + of programming language objects nor does it keep track of their stored + status. This is in contrast to "persistent object" approaches such + as those defined by + ODMG + and JDO + (JSR 12). + Such approaches have benefits but also require sophisticated object + caching. For simplicity the Sleepycat Java Collections API treats data + objects by value, not by reference, and does not perform object + caching of any kind. Since the Sleepycat Java Collections API is a thin + layer, its reliability and performance characteristics are roughly + equivalent to those of Berkeley DB, and database tuning is + accomplished in the same way as for any Berkeley DB database. +

+
+
+ + + diff --git a/db/docs/collections/tutorial/openclasscatalog.html b/db/docs/collections/tutorial/openclasscatalog.html new file mode 100644 index 000000000..fac34445b --- /dev/null +++ b/db/docs/collections/tutorial/openclasscatalog.html @@ -0,0 +1,208 @@ + + + + + + + Opening and Closing the Class Catalog + + + + + + + + + + +
+
+
+
+

+ Opening and Closing the Class Catalog +

+
+
+
+
+

+ This section describes how to open and close the Java class + catalog. The class catalog is a specialized database store that + contains the Java class descriptions of the serialized objects that + are stored in the database. The class descriptions are stored in + the catalog rather than storing them redundantly in each database + record. A single class catalog per environment must be opened + whenever serialized objects will be stored in the database. +

+

+ The SampleDatabase class is extended to open and close + the class catalog. The following additional imports and class + members are needed. +

+ +
import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseType;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import java.io.File;
+import java.io.FileNotFoundException;
+
+...
+
+public class SampleDatabase
+{
+    private Environment env;
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    ...
+    private StoredClassCatalog javaCatalog;
+    ...
+} 
+

+ While the class catalog is itself a database, it contains + metadata for other databases and is therefore treated specially by + the Sleepycat Java Collections API. The + StoredClassCatalog + + class encapsulates the catalog store and implements this special + behavior. +

+

+ The following statements open the class catalog by creating a + Database and a StoredClassCatalog object. The catalog + database is created if it doesn't already exist. +

+ +
    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setType(DatabaseType.BTREE);
+
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, 
+                                              dbConfig);
+
+        javaCatalog = new StoredClassCatalog(catalogDb);
+        ...
+    }
+    ...
+    public final StoredClassCatalog getClassCatalog() {
+        return javaCatalog;
+    } 
+

+ The + + DatabaseConfig + + class is used to specify configuration parameters when opening a + database. The first configuration option specified — + setTransactional() — is set to true to create a transactional + database. While non-transactional databases can also be created, + the examples in this tutorial use transactional databases. +

+

+ setAllowCreate() is set to true to specify + that the database will be created if it doesn't already exist. If + this parameter is not specified, an exception will be thrown if the + database does not already exist. +

+

+ setDatabaseType() identifies the database storage + type or access method. For opening a catalog database, the + BTREE type is required. BTREE is the + most commonly used database type and in this tutorial is used for all + databases. +

+

+ The first parameter of the openDatabase() method is an + optional transaction that is used for creating a new database. If + null is passed, auto-commit is used when creating a database. +

+

+ The second and third parameters of openDatabase() + specify the filename and database (sub-file) name o fthe database. The + database name is optional and is null in this example. +

+

+ The last parameter of openDatabase() specifies the database + configuration object. +

+

+ Lastly, the StoredClassCatalog object is created to manage the + information in the class catalog database. The + StoredClassCatalog object will be used in the sections + following for creating serial bindings. +

+

+ The getClassCatalog method returns the catalog object for + use by other classes in the example program. +

+

+ When the environment is closed, the class catalog is closed + also. +

+ +
    public void close()
+        throws DatabaseException
+    {
+        javaCatalog.close();
+        env.close();
+    } 
+

+ The StoredClassCatalog.close() method simply closes the + underlying class catalog database and in fact the + + Database.close() + + method may be called instead, if desired. The catalog database, and + all other databases, must be closed before closing the + environment. +

+
+ + + diff --git a/db/docs/collections/tutorial/opendatabases.html b/db/docs/collections/tutorial/opendatabases.html new file mode 100644 index 000000000..3a4913dea --- /dev/null +++ b/db/docs/collections/tutorial/opendatabases.html @@ -0,0 +1,170 @@ + + + + + + + Opening and Closing Databases + + + + + + + + + + +
+
+
+
+

+ Opening and Closing Databases +

+
+
+
+
+

+ This section describes how to open and close the Part, Supplier + and Shipment databases. A database is a collection of + records, each of which has a key and a value. The keys and values + are stored in a selected format, which defines the syntax of the + stored data. Two examples of formats are Java serialization format + and tuple format. In a given database, all keys have the same + format and all values have the same format. +

+

+ The SampleDatabase class is extended to open and close + the three databases. The following additional class members are + needed. +

+ +
public class SampleDatabase
+{
+    ...
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    ...
+    private Database supplierDb;
+    private Database partDb;
+    private Database shipmentDb;
+    ...
+} 
+

+ For each database there is a database name constant and a + Database object. +

+

+ The following statements open the three databases by + constructing a Database object. +

+ +
    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setType(DatabaseType.BTREE);
+        ...
+        partDb = env.openDatabase(null, PART_STORE, null, dbConfig);
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig);
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig);
+        ...
+    }
+	
+

+ The database configuration object that was used previously for + opening the catalog database is reused for opening the three + databases above. The databases are created if they don't already + exist. The parameters of the openDatabase() method were + described earlier when the class catalog database was opened. +

+

+ The following statements close the three databases. +

+ +
    public void close()
+        throws DatabaseException
+    {
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        javaCatalog.close();
+        env.close();
+    }
+	
+

+ All databases, including the catalog database, must be closed + before closing the environment. +

+

+ The following getter methods return the databases for use by + other classes in the example program. +

+ +
public class SampleDatabase
+{
+    ...
+    public final Database getPartDatabase()
+    {
+        return partDb;
+    }
+
+    public final Database getSupplierDatabase()
+    {
+        return supplierDb;
+    }
+
+    public final Database getShipmentDatabase()
+    {
+        return shipmentDb;
+    }
+    ...
+}
+
+ + + diff --git a/db/docs/collections/tutorial/opendbenvironment.html b/db/docs/collections/tutorial/opendbenvironment.html new file mode 100644 index 000000000..e0db5d3ea --- /dev/null +++ b/db/docs/collections/tutorial/opendbenvironment.html @@ -0,0 +1,200 @@ + + + + + + + Opening and Closing the Database Environment + + + + + + + + + + +
+
+
+
+

+ Opening and Closing the Database Environment +

+
+
+
+
+

+ This section of the tutorial describes how to open and close the + database environment. The database environment manages resources + (for example, memory, locks and transactions) for any number of + databases. A single environment instance is normally used for all + databases. +

+

+ The SampleDatabase class is used to open and close the + environment. It will also be used in following sections to open and + close the class catalog and other databases. Its constructor is + used to open the environment and its close() method is used + to close the environment. The skeleton for the + SampleDatabase class follows. +

+ +
import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import java.io.File;
+import java.io.FileNotFoundException;
+
+public class SampleDatabase
+{
+    private Environment env;
+
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+    }
+
+    public void close()
+        throws DatabaseException
+    {
+    }
+} 
+

+ The first thing to notice is that the Environment class is in + the + + com.sleepycat.db + package, not the com.sleepycat.collections + package. The + + com.sleepycat.db + package contains all core Berkeley DB + functionality. The com.sleepycat.collections package contains + extended functionality that is based on the Java Collections API. + The collections package is layered on top of the + + com.sleepycat.db + package. Both packages are needed to create a complete application + based on the Sleepycat Java Collections API. +

+

+ The following statements create an + + Environment + + object. +

+ +
public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        System.out.println("Opening environment in: " + homeDirectory);
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setInitializeCache(true);
+        envConfig.setInitializeLocking(true);
+
+        env = new Environment(new File(homeDirectory), envConfig);
+    } 
+

+ The + + EnvironmentConfig + + class is used to specify environment configuration parameters. The + first configuration option specified — setTransactional() — + is set to true to create an environment where transactional (and + non-transactional) databases may be opened. While non-transactional + environments can also be created, the examples in this tutorial use + a transactional environment. +

+

+ setAllowCreate() is set to true to specify + that the environment's files will be created if they don't already + exist. If this parameter is not specified, an exception will be + thrown if the environment does not already exist. A similar + parameter will be used later to cause databases to be created if + they don't exist. +

+

+ When an Environment object is constructed, a home + directory and the environment configuration object are specified. + The home directory is the location of the environment's log files + that store all database information. +

+

+ The following statement closes the environment. The environment + should always be closed when database work is completed to free + allocated resources and to avoid having to run recovery later. + Closing the environment does not automatically close databases, so + databases should be closed explicitly before closing the + environment. +

+ +
    public void close()
+        throws DatabaseException
+    {
+        env.close();
+    } 
+

+ The following getter method returns the environment for use by + other classes in the example program. The environment is used for + opening databases and running transactions. +

+ +
public class SampleDatabase
+{
+    ...
+    public final Environment getEnvironment()
+    {
+        return env;
+    }
+    ...
+} 
+
+ + + diff --git a/db/docs/collections/tutorial/openingforeignkeys.html b/db/docs/collections/tutorial/openingforeignkeys.html new file mode 100644 index 000000000..217236eac --- /dev/null +++ b/db/docs/collections/tutorial/openingforeignkeys.html @@ -0,0 +1,237 @@ + + + + + + + + More Secondary Key Indices + + + + + + + + + + +
+
+
+
+

+ + More Secondary Key Indices +

+
+
+
+
+

+ This section builds on the prior section describing secondary key indices. + Two more secondary key indices are defined for indexing the Shipment record + by PartNumber and by SupplierNumber. +

+

+ The SampleDatabase class is extended to open the + Shipment-by-Part and Shipment-by-Supplier secondary key + indices. +

+ +
import com.sleepycat.bind.serial.SerialSerialKeyCreator;
+import com.sleepycat.db.SecondaryConfig;
+import com.sleepycat.db.SecondaryDatabase;
+...
+public class SampleDatabase
+{
+    ...
+    private static final String SHIPMENT_PART_INDEX = "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX = 
+        "shipment_supplier_index";
+    ...
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    ...
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setType(DatabaseType.BTREE);
+        secConfig.setSortedDuplicates(true);
+        ...
+        secConfig.setKeyCreator(
+            new ShipmentByPartKeyCreator(javaCatalog,
+                                         ShipmentKey.class,
+                                         ShipmentData.class,
+                                         PartKey.class));
+        shipmentByPartDb = env.openSecondaryDatabase(null, 
+                                                     SHIPMENT_PART_INDEX,
+                                                     null,
+                                                     shipmentDb,
+                                                     secConfig);
+
+        secConfig.setKeyCreator(
+            new ShipmentBySupplierKeyCreator(javaCatalog,
+                                             ShipmentKey.class,
+                                             ShipmentData.class,
+                                             SupplierKey.class));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                     SHIPMENT_SUPPLIER_INDEX,
+                                                     null,
+                                                     shipmentDb,
+                                                     secConfig);
+    ...
+    }
+} 
+

+ The statements in this example are very similar to the statements used in + the previous section for opening a secondary index. +

+

+ The application-defined ShipmentByPartKeyCreator + and ShipmentBySupplierKeyCreator classes are shown below. They + were used above to configure the secondary database objects. +

+ +
public class SampleDatabase
+{
+...
+    private static class ShipmentByPartKeyCreator
+        extends SerialSerialKeyCreator
+    {
+        private ShipmentByPartKeyCreator(StoredClassCatalog catalog,
+                                         Class primaryKeyClass,
+                                         Class valueClass,
+                                         Class indexKeyClass)
+        {
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput)
+        {
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new PartKey(shipmentKey.getPartNumber());
+        }
+    }
+
+    private static class ShipmentBySupplierKeyCreator
+        extends SerialSerialKeyCreator
+    {
+        private ShipmentBySupplierKeyCreator(StoredClassCatalog catalog,
+                                             Class primaryKeyClass,
+                                             Class valueClass,
+                                             Class indexKeyClass)
+        {
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput)
+        {
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new SupplierKey(shipmentKey.getSupplierNumber());
+        }
+    }
+    ...
+} 
+

+ The key creator classes above are almost identical to the one + defined in the previous section for use with a secondary index. The + index key fields are different, of course, but the interesting + difference is that the index keys are extracted from the key, not + the value, of the Shipment record. This illustrates that an index + key may be derived from the primary database record key, value, or + both. +

+

+ The following getter methods return the secondary database + objects for use by other classes in the example program. +

+ +
public class SampleDatabase
+{
+    ...
+    public final SecondaryDatabase getShipmentByPartDatabase()
+    {
+        return shipmentByPartDb;
+    }
+
+    public final SecondaryDatabase getShipmentBySupplierDatabase()
+    {
+        return shipmentBySupplierDb;
+    }
+    ...
+} 
+

+ The following statements close the secondary databases. +

+ +
public class SampleDatabase
+{
+    ...
+    public void close()
+        throws DatabaseException {
+
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        javaCatalog.close();
+        env.close();
+    }
+    ...
+} 
+

+ Secondary databases must be closed before closing their + associated primary database. +

+
+ + + diff --git a/db/docs/collections/tutorial/preface.html b/db/docs/collections/tutorial/preface.html new file mode 100644 index 000000000..f501252cd --- /dev/null +++ b/db/docs/collections/tutorial/preface.html @@ -0,0 +1,129 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+
+

+ Welcome to the Berkeley DB (DB) Collections API. This document + provides a tutorial that introduces the collections API. + The goal of this document is to provide you with an efficient mechanism + with which you can quickly become efficient with this API. As such, this document is + intended for Java developers and senior software architects who are + looking for transactionally-protected backing of their Java collections. + No prior experience with Sleepycat technologies is expected or required. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Class names are represented in monospaced font, as are method + names. For example: "The Environment.openDatabase() method + returns a Database class object." +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + DB_INSTALLATION_HOME directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. For example: +

+
import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not already exist.
+Environment myDbEnvironment;
+

+ In situations in this book, programming examples are updated from one chapter to the next in this book. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not already exist.
+Environment myDbEnv;
+EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+myDbEnv = new Environment(new File("/export/dbEnv"), envConfig); 
+
+
+ + + diff --git a/db/docs/collections/tutorial/removingredundantvalueclasses.html b/db/docs/collections/tutorial/removingredundantvalueclasses.html new file mode 100644 index 000000000..828b177b0 --- /dev/null +++ b/db/docs/collections/tutorial/removingredundantvalueclasses.html @@ -0,0 +1,130 @@ + + + + + + + Removing the Redundant Value Classes + + + + + + + + + + +
+
+
+
+

+ Removing the Redundant Value Classes +

+
+
+
+
+

+ The PartData, SupplierData and ShipmentData + classes have been removed in this example, and the Part, + Supplier and Shipment entity classes are used in + their place. +

+

+ The serial formats are created with the entity classes. +

+ +
public class SampleDatabase
+{
+    ...
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog,
+                                                             Supplier.class));
+        ...
+        secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog,
+                                                             Shipment.class));
+        ...
+        secConfig.setKeyCreator(new ShipmentBySupplierKeyCreator(javaCatalog,
+                                                             Shipment.class));
+        ...
+    }
+} 
+

+ The index key creator uses the entity class as well. +

+ +
public class SampleDatabase
+{
+    ...
+
+    private static class SupplierByCityKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                         Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            Supplier supplier = (Supplier) valueInput;
+            String city = supplier.getCity();
+            if (city != null) {
+                indexKeyOutput.writeString(supplier.getCity());
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+} 
+
+ + + diff --git a/db/docs/collections/tutorial/retrievingbyindexkey.html b/db/docs/collections/tutorial/retrievingbyindexkey.html new file mode 100644 index 000000000..b4f731587 --- /dev/null +++ b/db/docs/collections/tutorial/retrievingbyindexkey.html @@ -0,0 +1,287 @@ + + + + + + + Retrieving Items by Index Key + + + + + + + + + + +
+
+
+
+

+ Retrieving Items by Index Key +

+
+
+
+
+

+ Retrieving information via database index keys can be + accomplished using the standard Java collections API, using a + collection created from a + + SecondaryDatabase + + rather than a + + + Database. + + However, the standard Java API does not support duplicate keys: more + than one element in a collection having the same key. All three + indices created in the prior section have duplicate keys because of + the nature of the city, part number and supplier number index keys. + More than one supplier may be in the same city, and more than one + shipment may have the same part number or supplier number. This + section describes how to use extended methods for stored + collections to return all values for a given key. +

+

+ Using the standard Java collections API, the + Map.get + + method for a stored collection with duplicate keys will return only + the first value for a given key. To obtain all values for a given + key, the + StoredMap.duplicates + + method may be called. This returns a + Collection + + of values for the given key. If duplicate keys are not allowed, the + returned collection will have at most one value. If the key is not + present in the map, an empty collection is returned. +

+

+ The Sample class is extended to retrieve duplicates for + specific index keys that are present in the database. +

+ +
import com.sleepycat.collections.StoredIterator;
+import java.util.Iterator;
+...
+public class Sample
+{
+    ...
+    private SampleViews views;
+    ...
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            printEntries("Parts",
+                          views.getPartEntrySet().iterator());
+            printEntries("Suppliers",
+                          views.getSupplierEntrySet().iterator());
+            printValues("Suppliers for City Paris",
+                         views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printEntries("Shipments",
+                          views.getShipmentEntrySet().iterator());
+            printValues("Shipments for Part P1",
+                         views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                         views.getShipmentBySupplierMap().duplicates(
+                                            new
+                                            SupplierKey("S1")).iterator());
+        }
+    }
+
+    private void printValues(String label, Iterator iterator)
+    {
+        System.out.println("\n--- " + label + " ---");
+        try
+        {
+            while (iterator.hasNext())
+            {
+                System.out.println(iterator.next().toString());
+            }
+        }
+        finally
+        {
+            StoredIterator.close(iterator);
+        }
+    }
+    ...
+} 
+

+ The + StoredMap.duplicates + + method is called passing the desired key. The returned value is a + standard Java + Collection + + containing the values for the specified key. A standard Java + Iterator + + is then obtained for this collection and all values returned by + that iterator are printed. +

+

+ Another technique for retrieving duplicates is to use the + collection returned by + Map.entrySet. + When duplicate keys are present, a + Map.Entry + + object will be present in this collection for each duplicate. This + collection can then be iterated or a subset can be created from it, + all using the standard Java collection API. +

+

+ Note that we did not discuss how duplicates keys can be + explicitly added or removed in a collection. For index keys, the + addition and deletion of duplicate keys happens automatically when + records containing the index key are added, updated, or + removed. +

+

+ While not shown in the example program, it is also possible to + create a store with duplicate keys in the same way as an index with + duplicate keys — by calling + DatabaseConfig.setSortedDuplicates() method. In that case, + calling + Map.put + + will add duplicate keys. To remove all duplicate keys, call + Map.remove. + To remove a specific duplicate key, call + StoredMap.duplicates + + and then call + Collection.remove + + using the returned collection. Duplicate + values may also be added to this collection using + Collection.add. +

+

+ The output of the example program is shown below. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+PartKey: number=P1
+PartData: name=Nut color=Red weight=[12.0 grams] city=London
+PartKey: number=P2
+PartData: name=Bolt color=Green weight=[17.0 grams] city=Paris
+PartKey: number=P3
+PartData: name=Screw color=Blue weight=[17.0 grams] city=Rome
+PartKey: number=P4
+PartData: name=Screw color=Red weight=[14.0 grams] city=London
+PartKey: number=P5
+PartData: name=Cam color=Blue weight=[12.0 grams] city=Paris
+PartKey: number=P6
+PartData: name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+SupplierKey: number=S1
+SupplierData: name=Smith status=20 city=London
+SupplierKey: number=S2
+SupplierData: name=Jones status=10 city=Paris
+SupplierKey: number=S3
+SupplierData: name=Blake status=30 city=Paris
+SupplierKey: number=S4
+SupplierData: name=Clark status=20 city=London
+SupplierKey: number=S5
+SupplierData: name=Adams status=30 city=Athens
+
+--- Suppliers for City Paris ---
+SupplierData: name=Jones status=10 city=Paris
+SupplierData: name=Blake status=30 city=Paris
+
+--- Shipments ---
+ShipmentKey: supplier=S1 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S2 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S2 part=P2
+ShipmentData: quantity=400
+ShipmentKey: supplier=S3 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S1 part=P3
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P4
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P4
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P5
+ShipmentData: quantity=100
+ShipmentKey: supplier=S4 part=P5
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P6
+ShipmentData: quantity=100 
+
+--- Shipments for Part P1 ---
+ShipmentData: quantity=300
+ShipmentData: quantity=300
+
+--- Shipments for Supplier S1 ---
+ShipmentData: quantity=300
+ShipmentData: quantity=200
+ShipmentData: quantity=400
+ShipmentData: quantity=200
+ShipmentData: quantity=100
+ShipmentData: quantity=100 
+
+ + + diff --git a/db/docs/collections/tutorial/retrievingdatabaseitems.html b/db/docs/collections/tutorial/retrievingdatabaseitems.html new file mode 100644 index 000000000..4b07a0f8c --- /dev/null +++ b/db/docs/collections/tutorial/retrievingdatabaseitems.html @@ -0,0 +1,249 @@ + + + + + + + Retrieving Database Items + + + + + + + + + + +
+
+
+
+

+ Retrieving Database Items +

+
+
+
+
+

+ Retrieving information from the database is accomplished via the + standard Java collections API. In the example, the + Set.iterator + + method is used to iterate all + Map.Entry + + objects for each store. All standard Java methods for retrieving + objects from a collection may be used with the Sleepycat Java Collections + API. +

+

+ The PrintDatabase.doWork() method calls + printEntries() + to print the map entries for each database store. It is called via + the + TransactionRunner + + class and was outlined in the previous section. +

+ +
import com.sleepycat.collections.StoredIterator;
+import java.util.Iterator;
+...
+public class Sample
+{
+    ...
+    private SampleViews views;
+    ...
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            printEntries("Parts",
+                          views.getPartEntrySet().iterator());
+            printEntries("Suppliers",
+                          views.getSupplierEntrySet().iterator());
+            printEntries("Shipments",
+                          views.getShipmentEntrySet().iterator());
+        }
+    }
+    ...
+
+    private void printEntries(String label, Iterator iterator)
+    {
+    }
+    ...
+} 
+

+ The + Set + + of + Map.Entry + + objects for each store is obtained from the SampleViews + object. This set can also be obtained by calling the + Map.entrySet + + method of a stored map. +

+

+ The printEntries() prints the map entries for any stored + map. The + Object.toString + + method of each key and value is called to + obtain a printable representation of each object. +

+ +
    private void printEntries(String label, Iterator iterator)
+    {
+        System.out.println("\n--- " + label + " ---");
+        try
+        {
+            while (iterator.hasNext())
+            {
+                Map.Entry entry = (Map.Entry) iterator.next();
+                System.out.println(entry.getKey().toString());
+                System.out.println(entry.getValue().toString());
+            }
+        }
+        finally
+        {
+            StoredIterator.close(iterator);
+        }
+    } 
+

+ It is very important that all iterators for stored collections + are explicitly closed. To ensure they are closed, a finally + clause should be used as shown above. If the iterator is not + closed, the underlying Berkeley DB cursor is not closed either and + the store may become unusable. +

+

+ If the iterator is cast to + StoredIterator + + then its + StoredIterator.close() + + method can be called. Or, as shown above, the static + StoredIterator.close() + + method can be called + to avoid casting. The static form of this method can be called + safely for any + Iterator. + If an iterator for a non-stored collection is passed, it is simply + ignored. +

+

+ This is one of a small number of behavioral differences between + standard Java collections and stored collections. For a complete + list see + + Using Stored Collections + . +

+

+ The output of the example program is shown below. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+PartKey: number=P1
+PartData: name=Nut color=Red weight=[12.0 grams] city=London
+PartKey: number=P2
+PartData: name=Bolt color=Green weight=[17.0 grams] city=Paris
+PartKey: number=P3
+PartData: name=Screw color=Blue weight=[17.0 grams] city=Rome
+PartKey: number=P4
+PartData: name=Screw color=Red weight=[14.0 grams] city=London
+PartKey: number=P5
+PartData: name=Cam color=Blue weight=[12.0 grams] city=Paris
+PartKey: number=P6
+PartData: name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+SupplierKey: number=S1
+SupplierData: name=Smith status=20 city=London
+SupplierKey: number=S2
+SupplierData: name=Jones status=10 city=Paris
+SupplierKey: number=S3
+SupplierData: name=Blake status=30 city=Paris
+SupplierKey: number=S4
+SupplierData: name=Clark status=20 city=London
+SupplierKey: number=S5
+SupplierData: name=Adams status=30 city=Athens
+
+--- Shipments ---
+ShipmentKey: supplier=S1 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S2 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S2 part=P2
+ShipmentData: quantity=400
+ShipmentKey: supplier=S3 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S1 part=P3
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P4
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P4
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P5
+ShipmentData: quantity=100
+ShipmentKey: supplier=S4 part=P5
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P6
+ShipmentData: quantity=100 
+
+ + + diff --git a/db/docs/collections/tutorial/sortedcollections.html b/db/docs/collections/tutorial/sortedcollections.html new file mode 100644 index 000000000..13ba7467c --- /dev/null +++ b/db/docs/collections/tutorial/sortedcollections.html @@ -0,0 +1,149 @@ + + + + + + + Using Sorted Collections + + + + + + + + + + +
+
+
+
+

+ Using Sorted Collections +

+
+
+
+
+

+ In general, no changes to the prior example are necessary to use + collections having tuple keys. Iteration of elements in a stored + collection will be ordered by the sort order of the tuples. +

+

+ In addition to using the tuple format, the + DatabaseType.BTREE + + access method must be used when creating the database. + DatabaseType.BTREE + + is used for the databases in all examples. The + DatabaseType.HASH + + access method does not support sorted keys. +

+

+ Although not shown in the example, all methods of the + SortedMap + + and + SortedSet + + interfaces may be used with sorted collections. For example, + submaps and subsets may be created. +

+

+ The output of the example program shows that records are sorted + by key value. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+Part: number=P1 name=Nut color=Red weight=[12.0 grams] city=London
+Part: number=P2 name=Bolt color=Green weight=[17.0 grams] city=Paris
+Part: number=P3 name=Screw color=Blue weight=[17.0 grams] city=Rome
+Part: number=P4 name=Screw color=Red weight=[14.0 grams] city=London
+Part: number=P5 name=Cam color=Blue weight=[12.0 grams] city=Paris
+Part: number=P6 name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+Supplier: number=S1 name=Smith status=20 city=London
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+Supplier: number=S4 name=Clark status=20 city=London
+Supplier: number=S5 name=Adams status=30 city=Athens
+
+--- Suppliers for City Paris ---
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+
+--- Shipments ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P2 supplier=S2 quantity=400
+Shipment: part=P2 supplier=S3 quantity=200
+Shipment: part=P2 supplier=S4 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P4 supplier=S4 quantity=300
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P5 supplier=S4 quantity=400
+Shipment: part=P6 supplier=S1 quantity=100
+
+--- Shipments for Part P1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+
+--- Shipments for Supplier S1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P6 supplier=S1 quantity=100 
+
+ + + diff --git a/db/docs/collections/tutorial/transientfieldsinbinding.html b/db/docs/collections/tutorial/transientfieldsinbinding.html new file mode 100644 index 000000000..add369ec3 --- /dev/null +++ b/db/docs/collections/tutorial/transientfieldsinbinding.html @@ -0,0 +1,177 @@ + + + + + + + Using Transient Fields in an Entity Binding + + + + + + + + + + +
+
+
+
+

+ Using Transient Fields in an Entity Binding +

+
+
+
+
+

+ The entity bindings from the prior example have been changed in + this example to use the entity object both as a value object and an + entity object. +

+

+ Before, the entryToObject() method combined the + deserialized value object with the key fields to create a new + entity object. Now, this method uses the deserialized object + directly as an entity, and initializes its key using the fields + read from the key tuple. +

+

+ Before, the objectToData() method constructed a new value + object using information in the entity. Now it simply returns the + entity. Nothing needs to be changed in the entity, since the + transient key fields won't be serialized. +

+ +
import com.sleepycat.bind.serial.ClassCatalog;
+...
+public class SampleViews
+{
+    ...
+    private static class PartBinding extends TupleSerialBinding
+    {
+        private PartBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            Part part = (Part) dataInput;
+            part.setKey(number);
+            return part;
+        }
+
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Part part = (Part) object;
+            output.writeString(part.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            return object;
+        }
+    }
+
+    private static class SupplierBinding extends TupleSerialBinding
+    {
+        private SupplierBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            Supplier supplier = (Supplier) dataInput;
+            supplier.setKey(number);
+            return supplier;
+        }
+
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Supplier supplier = (Supplier) object;
+            output.writeString(supplier.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            return object;
+        }
+    }
+
+    private static class ShipmentBinding extends TupleSerialBinding
+    {
+        private ShipmentBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String partNumber = keyInput.readString();
+            String supplierNumber = keyInput.readString();
+            Shipment shipment = (Shipment) dataInput;
+            shipment.setKey(partNumber, supplierNumber);
+            return shipment;
+        }
+
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Shipment shipment = (Shipment) object;
+            output.writeString(shipment.getPartNumber());
+            output.writeString(shipment.getSupplierNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            return object;
+        }
+    }
+} 
+
+ + + diff --git a/db/docs/collections/tutorial/tuple-serialentitybindings.html b/db/docs/collections/tutorial/tuple-serialentitybindings.html new file mode 100644 index 000000000..649e3770c --- /dev/null +++ b/db/docs/collections/tutorial/tuple-serialentitybindings.html @@ -0,0 +1,197 @@ + + + + + + +Creating Tuple-Serial Entity Bindings + + + + + + + + + + +
+
+
+
+

+Creating Tuple-Serial Entity Bindings +

+
+
+
+
+

+In the prior example serial keys and serial values were used, +and the +SerialSerialBinding + +base class was used for entity bindings. In this example, tuple +keys and serial values are used and therefore the +TupleSerialBinding + +base class is used for entity bindings. +

+

+As with any entity binding, a key and value is converted to an +entity in the +TupleSerialBinding.entryToObject + +method, and from an entity to +a key and value in the +TupleSerialBinding.objectToKey + +and +TupleSerialBinding.objectToData + +methods. But since keys are +stored as tuples, not as serialized objects, key fields are read +and written using the +TupleInput + +and +TupleOutput + +parameters. +

+

+The SampleViews class contains the modified entity +binding classes that were defined in the prior example: +PartBinding, SupplierBinding and +ShipmentBinding. +

+ +
import com.sleepycat.bind.serial.TupleSerialBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+public class SampleViews
+{
+    ...
+    private static class PartBinding extends TupleSerialBinding 
+    {
+        private PartBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            PartData data = (PartData) dataInput;
+            return new Part(number, data.getName(), data.getColor(),
+                            data.getWeight(), data.getCity());
+        }
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Part part = (Part) object;
+            output.writeString(part.getNumber());
+        }
+        public Object objectToData(Object object)
+        {
+            Part part = (Part) object;
+            return new PartData(part.getName(), part.getColor(),
+                                 part.getWeight(), part.getCity());
+        }
+    }
+    ...
+    private static class SupplierBinding extends TupleSerialBinding
+    {
+        private SupplierBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            SupplierData data = (SupplierData) dataInput;
+            return new Supplier(number, data.getName(),
+                                data.getStatus(), data.getCity());
+        }
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Supplier supplier = (Supplier) object;
+            output.writeString(supplier.getNumber());
+        }
+        public Object objectToData(Object object)
+        {
+            Supplier supplier = (Supplier) object;
+            return new SupplierData(supplier.getName(), supplier.getStatus(),
+                                     supplier.getCity());
+        }
+    }
+    ...
+    private static class ShipmentBinding extends TupleSerialBinding
+    {
+        private ShipmentBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String partNumber = keyInput.readString();
+            String supplierNumber = keyInput.readString();
+            ShipmentData data = (ShipmentData) dataInput;
+            return new Shipment(partNumber, supplierNumber,
+                                data.getQuantity());
+        }
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Shipment shipment = (Shipment) object;
+            output.writeString(shipment.getPartNumber());
+            output.writeString(shipment.getSupplierNumber());
+        }
+        public Object objectToData(Object object)
+        {
+            Shipment shipment = (Shipment) object;
+            return new ShipmentData(shipment.getQuantity());
+        }
+    }
+    ...
+} 
+
+ + + diff --git a/db/docs/collections/tutorial/tuplekeybindings.html b/db/docs/collections/tutorial/tuplekeybindings.html new file mode 100644 index 000000000..3a3b80633 --- /dev/null +++ b/db/docs/collections/tutorial/tuplekeybindings.html @@ -0,0 +1,220 @@ + + + + + + + Creating Tuple Key Bindings + + + + + + + + + + +
+
+
+
+

+ Creating Tuple Key Bindings +

+
+
+
+
+

+ Serial bindings were used in prior examples as key bindings, and + keys were stored as serialized objects. In this example, a tuple + binding is used for each key since keys will be stored as tuples. + Because keys are no longer stored as serialized objects, the + PartKey, SupplierKey and ShipmentKey classes + no longer implement the + Serializable + + interface (this is the only change to these classes and is not + shown below). +

+

+ For the Part key, Supplier key, + and Shipment key, the + SampleViews class was changed in this example to create a + custom + TupleBinding + + instead of a + SerialBinding. + The custom tuple key binding classes are defined further below. +

+ +
import com.sleepycat.bind.tuple.TupleBinding;
+...
+public class SampleViews
+{
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+        ...
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new PartKeyBinding();
+        EntityBinding partDataBinding =
+            new PartBinding(catalog, PartData.class);
+        EntryBinding supplierKeyBinding =
+            new SupplierKeyBinding();
+        EntityBinding supplierDataBinding =
+            new SupplierBinding(catalog, SupplierData.class);
+        EntryBinding shipmentKeyBinding =
+            new ShipmentKeyBinding();
+        EntityBinding shipmentDataBinding =
+            new ShipmentBinding(catalog, ShipmentData.class);
+        EntryBinding cityKeyBinding =
+            TupleBinding.getPrimitiveBinding(String.class);
+        ...
+    }
+} 
+

+ For the City key, however, a custom binding class is not needed + because the key class is a primitive Java type, + String. + For any primitive Java type, a tuple binding may be created using the + TupleBinding.getPrimitiveBinding + + static method. +

+

+ The custom key binding classes, PartKeyBinding, + SupplierKeyBinding and ShipmentKeyBinding, are + defined by extending the + TupleBinding + + class. The + TupleBinding + + abstract class implements the + EntryBinding + + interface, and is used for one-to-one bindings between tuples and + objects. Each binding class implements two methods for converting + between tuples and objects. Tuple fields are read using the + TupleInput + + parameter and written using the + TupleOutput + + parameter. +

+ +
import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+public class SampleViews
+{
+...
+
+    private static class PartKeyBinding extends TupleBinding
+    {
+        private PartKeyBinding()
+        {
+        }
+
+        public Object entryToObject(TupleInput input)
+        {
+            String number = input.readString();
+            return new PartKey(number);
+        }
+
+        public void objectToEntry(Object object, TupleOutput output)
+        {
+            PartKey key = (PartKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+    ...
+    private static class SupplierKeyBinding extends TupleBinding
+    {
+        private SupplierKeyBinding()
+        {
+        }
+
+        public Object entryToObject(TupleInput input)
+        {
+            String number = input.readString();
+            return new SupplierKey(number);
+        }
+
+        public void objectToEntry(Object object, TupleOutput output)
+        {
+            SupplierKey key = (SupplierKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+    ...
+    private static class ShipmentKeyBinding extends TupleBinding
+    {
+        private ShipmentKeyBinding()
+        {
+        }
+
+        public Object entryToObject(TupleInput input)
+        {
+            String partNumber = input.readString();
+            String supplierNumber = input.readString();
+            return new ShipmentKey(partNumber, supplierNumber);
+        }
+
+        public void objectToEntry(Object object, TupleOutput output)
+        {
+            ShipmentKey key = (ShipmentKey) object;
+            output.writeString(key.getPartNumber());
+            output.writeString(key.getSupplierNumber());
+        }
+    }
+    ...
+} 
+
+ + + diff --git a/db/docs/collections/tutorial/tupleswithkeycreators.html b/db/docs/collections/tutorial/tupleswithkeycreators.html new file mode 100644 index 000000000..57cb10fcc --- /dev/null +++ b/db/docs/collections/tutorial/tupleswithkeycreators.html @@ -0,0 +1,206 @@ + + + + + + + Using Tuples with Key Creators + + + + + + + + + + +
+
+
+
+

+ Using Tuples with Key Creators +

+
+
+
+
+

+ Key creators were used in prior examples to extract index keys + from value objects. The keys were returned as deserialized key + objects, since the serial format was used for keys. In this + example, the tuple format is used for keys and the key creators + return keys by writing information to a tuple. The differences + between this example and the prior example are: +

+
+ +
+

+ In addition to writing key tuples, the + ShipmentByPartKeyCreator and + ShipmentBySupplierKeyCreator classes also read the key tuple + of the primary key. This is because they extract the index key from + fields in the Shipment's primary key. Instead of calling getter + methods on the ShipmentKey object, as in prior examples, + these methods call + TupleInput.readString. + The ShipmentKey consists of two string fields that are read + in sequence. +

+

+ The modified key creators are shown below: + SupplierByCityKeyCreator, + ShipmentByPartKeyCreator + and ShipmentBySupplierKeyCreator. +

+ +
import com.sleepycat.bind.serial.TupleSerialKeyCreator;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+public class SampleDatabase
+{
+    ...
+    private static class SupplierByCityKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private SupplierByCityKeyCreator(StoredClassCatalog catalog,
+                                         Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            SupplierData supplierData = (SupplierData) valueInput;
+            String city = supplierData.getCity();
+            if (city != null) {
+                indexKeyOutput.writeString(supplierData.getCity());
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+
+    private static class ShipmentByPartKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private ShipmentByPartKeyCreator(StoredClassCatalog catalog,
+                                         Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            String partNumber = primaryKeyInput.readString();
+            // don't bother reading the supplierNumber
+            indexKeyOutput.writeString(partNumber);
+            return true;
+        }
+    }
+
+    private static class ShipmentBySupplierKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private ShipmentBySupplierKeyCreator(StoredClassCatalog catalog,
+                                             Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            primaryKeyInput.readString(); // skip the partNumber
+            String supplierNumber = primaryKeyInput.readString();
+            indexKeyOutput.writeString(supplierNumber);
+            return true;
+        }
+    }
+    ...
+}
+	
+
+ + + diff --git a/db/docs/collections/tutorial/tutorialintroduction.html b/db/docs/collections/tutorial/tutorialintroduction.html new file mode 100644 index 000000000..6709e3961 --- /dev/null +++ b/db/docs/collections/tutorial/tutorialintroduction.html @@ -0,0 +1,407 @@ + + + + + + Tutorial Introduction + + + + + + + + + +
+
+
+
+

Tutorial Introduction

+
+
+
+
+

+ Most of the remainder of this document illustrates the use of the Sleepycat Java + Collections API by presenting a tutorial that describes usage of the API. + This tutorial builds a shipment database, a familiar example from classic + database texts. +

+

+ The examples illustrate the following concepts of the Sleepycat Java + Collections API: +

+
+
    +
  • +

    + Object-to-data bindings +

    +
  • +
  • +

    + The database environment +

    +
  • +
  • +

    + Databases that contain key/value records +

    +
  • +
  • +

    + Secondary index databases that contain index keys +

    +
  • +
  • +

    + Java collections for accessing databases and + indices +

    +
  • +
  • +

    + Transactions used to commit or undo database + changes +

    +
  • +
+
+

+ The examples build on each other, but at the same time the + source code for each example stands alone. +

+ +

+ The shipment database consists of three database stores: the + part store, the supplier store, and the shipment store. Each store + contains a number of records, and each record consists of a key and + a value. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
StoreKeyValue
PartPart NumberName, Color, Weight, City
SupplierSupplier NumberName, Status, City
ShipmentPart Number, Supplier NumberQuantity
+
+

+ In the example programs, Java classes containing the fields + above are defined for the key and value of each store: + PartKey, + PartData, + SupplierKey, + SupplierData, + ShipmentKey and ShipmentData. In + addition, because the Part's Weight field is itself composed of two + fields — the weight value and the unit of measure — it is + represented by a separate Weight class. These classes will + be defined in the first example program. +

+

+ In general the Sleepycat Java Collections API uses bindings to + describe how Java objects are stored. A binding defines the stored + data syntax and the mapping between a Java object and the stored + data. The example programs show how to create different types of + bindings, and explains the characteristics of each type. +

+

+ The following tables show the record values that are used in + all the example programs in the tutorial. + +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NumberNameColorWeightCity
P1NutRed12.0 gramsLondon
P2BoltGreen17.0 gramsParis
P3ScrewBlue17.0 gramsRome
P4ScrewRed14.0 gramsLondon
P5CamBlue12.0 gramsParis
P6CogRed19.0 gramsLondon
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NumberNameStatusCity
S1Smith20London
S2Jones10Paris
S3Blake30Paris
S4Clark20London
S5Adams30Athens
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Part NumberSupplier NumberQuantity
P1S1300
P1S2300
P2S1200
P2S2400
P2S3200
P2S4200
P3S1400
P4S1200
P4S4300
P5S1100
P5S4400
P6S1100
+
+
+ + + diff --git a/db/docs/collections/tutorial/usingtransactions.html b/db/docs/collections/tutorial/usingtransactions.html new file mode 100644 index 000000000..311189c69 --- /dev/null +++ b/db/docs/collections/tutorial/usingtransactions.html @@ -0,0 +1,221 @@ + + + + + + + Using Transactions + + + + + + + + + + +
+
+
+
+

+ Using Transactions +

+
+
+
+
+

+ DB transactional applications have standard + transactional characteristics: recoverability, atomicity and + integrity (this is sometimes also referred to generically as ACID + properties). The Sleepycat Java Collections API provides these + transactional capabilities using a transaction-per-thread + model. Once a transaction is begun, it is implicitly associated + with the current thread until it is committed or aborted. This + model is used for the following reasons. +

+
+
    +
  • +

    + The transaction-per-thread model is commonly used in other Java + APIs such as J2EE. +

    +
  • +
  • +

    + Since the Java collections API is used for data access, there + is no way to pass a transaction object to methods such + as + Map.put. +

    +
  • +
+
+

+ The Sleepycat Java Collections API provides two transaction APIs. The + lower-level API is the + CurrentTransaction + + class. It provides a way to get the transaction for the current + thread, and to begin, commit and abort transactions. It also + provides access to the Berkeley DB core API + + Transaction + + object. With + CurrentTransaction, + just as in the + + com.sleepycat.db + API, the application is responsible + for beginning, committing and aborting transactions, and for + handling deadlock exceptions and retrying operations. This API may + be needed for some applications, but it is not used in the + example. +

+

+ The example uses the higher-level + TransactionRunner + + and + TransactionWorker + + APIs, which are build on top of + CurrentTransaction. + TransactionRunner.run() automatically begins a transaction and + then calls the TransactionWorker.doWork() method, which is + implemented by the application. +

+

+ The TransactionRunner.run() method automatically detects + deadlock exceptions and performs retries by repeatedly calling the + TransactionWorker.doWork() method until the operation succeeds + or the maximum retry count is reached. If the maximum retry count + is reached or if another exception (other than + + + DeadlockException) + + is thrown by TransactionWorker.doWork(), then the transaction + will be automatically aborted. Otherwise, the transaction will be + automatically committed. +

+

+ Using this high-level API, if TransactionRunner.run() + throws an exception, the application can assume that the operation + failed and the transaction was aborted; otherwise, when an + exception is not thrown, the application can assume the operation + succeeded and the transaction was committed. +

+

+ The Sample.run() method creates a TransactionRunner + object and calls its run() method. +

+ +
import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+...
+public class Sample
+{
+    private SampleDatabase db;
+    ...
+    private void run()
+        throws Exception
+    {
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+    ...
+    private class PopulateDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+        }
+    }
+
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+        }
+    }
+} 
+

+ The run() method is called by main() and was outlined + in the previous section. It first creates a + TransactionRunner, passing the database environment to its + constructor. +

+

+ It then calls TransactionRunner.run() to execute two + transactions, passing instances of the application-defined + PopulateDatabase and + PrintDatabase nested classes. + These classes implement the TransactionWorker.doWork() method + and will be fully described in the next two sections. +

+

+ For each call to TransactionRunner.run(), a separate + transaction will be performed. The use of two transactions in the + example — one for populating the database and another for printing + its contents — is arbitrary. A real-life application should be + designed to create transactions for each group of operations that + should have ACID properties, while also + taking into account the impact of transactions on performance. +

+

+ The advantage of using TransactionRunner is that deadlock + retries and transaction begin, commit and abort are handled + automatically. However, a TransactionWorker class must be + implemented for each type of transaction. If desired, anonymous + inner classes can be used to implement the TransactionWorker + interface. +

+
+ + + diff --git a/db/docs/gsg/C/BerkeleyDB-Core-C-GSG.pdf b/db/docs/gsg/C/BerkeleyDB-Core-C-GSG.pdf new file mode 100644 index 000000000..7bb4ed0f4 Binary files /dev/null and b/db/docs/gsg/C/BerkeleyDB-Core-C-GSG.pdf differ diff --git a/db/docs/gsg/C/CoreCursorUsage.html b/db/docs/gsg/C/CoreCursorUsage.html new file mode 100644 index 000000000..009f0ab58 --- /dev/null +++ b/db/docs/gsg/C/CoreCursorUsage.html @@ -0,0 +1,360 @@ + + + + + + Cursor Example + + + + + + + + + +
+
+
+
+

Cursor Example

+
+
+
+
+

+ In + Database Usage Example + + we wrote an + application that loaded two databases with + vendor and inventory information. In this example, we will write an + application to display all of the items in the inventory database. As a + part of showing any given inventory item, we will look up the vendor who + can provide the item and show the vendor's contact information. +

+

+ Specifically, the + example_database_read + + application does the following: +

+
+
    +
  1. +

    + Opens the the inventory and vendor databases + that were created by our + example_database_load + + application. See + example_database_load + + for information on how that + application creates the databases and writes data to them. +

    +
  2. +
  3. +

    Obtains a cursor from the inventory database.

    +
  4. +
  5. +

    + Steps through the inventory database, displaying + each record as it goes. +

    +
  6. +
  7. +

    + Gets the name of the vendor for that inventory item from the + inventory record. +

    +
  8. +
  9. +

    + Uses the vendor name to look up the vendor record in the vendor + database. +

    +
  10. +
  11. +

    Displays the vendor record.

    +
  12. +
+
+

+ Remember that you can find the complete implementation of this application + in: +

+
DB_INSTALL/examples_c/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 4.1 example_database_read +

+

+ To begin, we include the necessary header files and perform our + forward declarations. +

+ +
/* File: example_database_read.c */
+/* gettingstarted_common.h includes db.h for us */
+#include "gettingstarted_common.h"
+
+/* Forward declarations */
+char * show_inventory_item(void *);
+int show_all_records(STOCK_DBS *);
+int show_records(STOCK_DBS *, char *);
+int show_vendor_record(char *, DB *); 
+

+ Next we write our main() function. Note that it is + somewhat unnecessarily complicated here because we will be extending it + in the next chapter to perform inventory item lookups. +

+ +
/*
+ * Displays all inventory items and the associated vendor record.
+ */
+int
+main(int argc, char *argv[])
+{
+    STOCK_DBS my_stock;
+    int ret;
+
+    /* Initialize the STOCK_DBS struct */
+    initialize_stockdbs(&my_stock);
+                                                                                                                                
+    /*
+     * Parse the command line arguments here and determine
+     * the location of the database files. This step is
+     * omitted for brevity.
+     */
+
+    /*
+     * Identify the files that will hold our databases
+     * This function uses information obtained from the
+     * command line to identify the directory in which
+     * the database files reside.
+     */
+    set_db_filenames(&my_stock);
+                                                                                                                                
+    /* Open all databases */
+    ret = databases_setup(&my_stock, "example_database_read", stderr);
+    if (ret != 0) {
+        fprintf(stderr, "Error opening databases\n");
+        databases_close(&my_stock);
+        return (ret);
+    }
+                                                                                                                                
+    ret = show_all_records(&my_stock);
+
+    /* close our databases */
+    databases_close(&my_stock);
+    return (ret);
+} 
+

+ Next we need to write the show_all_records() + function. This + function takes a STOCK_DBS structure and displays all + of the inventory records found in the inventory database. Once it shows + the inventory record, it retrieves the vendor's name from that record + and uses it to look up and display the appropriate vendor record: +

+ +
int show_all_records(STOCK_DBS *my_stock)
+{
+    DBC *cursorp;
+    DBT key, data;
+    char *the_vendor;
+    int exit_value, ret;
+                                                                                                                                  
+    /* Initialize our DBTs. */
+    memset(&key, 0, sizeof(DBT));
+    memset(&data, 0, sizeof(DBT));
+                                                                                                                                  
+    /* Get a cursor to the itemname db */
+    my_stock->inventory_dbp->cursor(my_stock->inventory_dbp, NULL,
+      &cursorp, 0);
+                                                                                                                                  
+    /*
+     * Iterate over the inventory database, from the first record
+     * to the last, displaying each in turn.
+     */
+    exit_value = 0;
+    while ((ret =
+      cursorp->c_get(cursorp, &key, &data, DB_NEXT))
+      == 0)
+    {
+        the_vendor = show_inventory_item(data.data);
+        ret = show_vendor_record(the_vendor, my_stock->vendor_dbp);
+        if (ret) {
+            exit_value = ret;
+            break;
+        }
+    }
+                                                                                                                                  
+    /* Close the cursor */
+    cursorp->c_close(cursorp);
+    return(exit_value);
+} 
+

+ The show_inventory_item() simply extracts the + inventory information from the record data and displays it. It then + returns the vendor's name. Note that in order to extract the inventory + information, we have to unpack it from the data buffer. How we do this + is entirely dependent on how we packed the buffer in the first + place. For more information, see the + load_inventory_database() function implementation + in + example_database_load. + +

+ +
/*
+ * Shows an inventory item.
+ */
+char *
+show_inventory_item(void *vBuf)
+{
+    float price;
+    int buf_pos, quantity;
+    char *category, *name, *sku, *vendor_name;
+    char *buf = (char *)vBuf;
+                                                                                                                                  
+    /* Get the price.  */
+    price = *((float *)buf);
+    buf_pos = sizeof(float);
+                                                                                                                                  
+    /* Get the quantity. */
+    quantity = *((int *)(buf + buf_pos));
+    buf_pos += sizeof(int);
+                                                                                                                                  
+    /* Get the inventory item's name */
+    name = buf + buf_pos;
+    buf_pos += strlen(name) + 1;
+                                                                                                                                  
+    /* Get the inventory item's sku */
+    sku = buf + buf_pos;
+    buf_pos += strlen(sku) + 1;
+                                                                                                                                  
+    /* 
+     * Get the category (fruits, vegetables, desserts) that this 
+     * item belongs to.
+     */
+    category = buf + buf_pos;
+    buf_pos += strlen(category) + 1;
+                                                                                                                                  
+    /* Get the vendor's name */
+    vendor_name = buf + buf_pos;
+                                                                                                                                  
+    /* Display all this information */
+    printf("name: %s\n", name);
+    printf("\tSKU: %s\n", sku);
+    printf("\tCategory: %s\n", category);
+    printf("\tPrice: %.2f\n", price);
+    printf("\tQuantity: %i\n", quantity);
+    printf("\tVendor:\n");
+                                                                                                                                  
+    /* Return the vendor's name */
+    return(vendor_name);
+} 
+

+ Having returned the vendor's name, we can now use it to look up and + display the appropriate vendor record. In this case we do not need to use a + cursor to display the vendor record. Using a cursor here complicates our + code slightly for no good gain. Instead, we simply perform a + get() directly against the vendor database. +

+ +
/*
+ * Shows a vendor record. Each vendor record is an instance of
+ * a vendor structure. See load_vendor_database() in
+ * example_database_load for how this structure was originally
+ * put into the database.
+ */
+int
+show_vendor_record(char *vendor_name, DB *vendor_dbp)
+{
+    DBT key, data;
+    VENDOR my_vendor;
+    int ret;
+                                                                                                                                  
+    /* Zero our DBTs */
+    memset(&key, 0, sizeof(DBT));
+    memset(&data, 0, sizeof(DBT));
+                                                                                                                                  
+    /* Set the search key to the vendor's name */
+    key.data = vendor_name;
+    key.size = strlen(vendor_name) + 1;
+
+    /*
+     * Make sure we use the memory we set aside for the VENDOR
+     * structure rather than the memory that DB allocates.
+     * Some systems may require structures to be aligned in memory
+     * in a specific way, and DB may not get it right.
+     */
+                                                                                                                                
+    data.data = &my_vendor;
+    data.ulen = sizeof(VENDOR);
+    data.flags = DB_DBT_USERMEM;
+                                                                                                                                  
+    /* Get the record */
+    ret = vendor_dbp->get(vendor_dbp, 0, &key, &data, 0);
+    if (ret != 0) {
+        vendor_dbp->err(vendor_dbp, ret, "Error searching for vendor: '%s'",
+          vendor_name);
+        return(ret);
+    } else {
+        printf("\t\t%s\n", my_vendor.name);
+        printf("\t\t%s\n", my_vendor.street);
+        printf("\t\t%s, %s\n", my_vendor.city, my_vendor.state);
+        printf("\t\t%s\n\n", my_vendor.zipcode);
+        printf("\t\t%s\n\n", my_vendor.phone_number);
+        printf("\t\tContact: %s\n", my_vendor.sales_rep);
+        printf("\t\t%s\n", my_vendor.sales_rep_phone);
+    }
+    return(0);
+} 
+
+

+ That completes the implementation of + excxx_example_database_read(). In the next chapter, we + will extend this application to make use of a secondary database so that + we can query the inventory database for a specific inventory item. +

+
+ + + diff --git a/db/docs/gsg/C/CoreDBAdmin.html b/db/docs/gsg/C/CoreDBAdmin.html new file mode 100644 index 000000000..bf34be035 --- /dev/null +++ b/db/docs/gsg/C/CoreDBAdmin.html @@ -0,0 +1,149 @@ + + + + + + Administrative Methods + + + + + + + + + +
+
+
+
+

Administrative Methods

+
+
+
+
+

+ The following + DB + + + + methods may be useful to you when managing DB databases: +

+
+
    +
  • +

    + DB->get_open_flags() + +

    +

    + Returns the current open flags. It is an error to use this method on + an unopened database. +

    + +
    #include <db.h>
    +...
    +DB *dbp;
    +u_int32_t open_flags;
    +
    +/* Database open and subsequent operations omitted for clarity */
    +
    +dbp->get_open_flags(dbp, &open_flags); 
    +
  • +
  • +

    + DB->remove() + +

    +

    + Removes the specified database. If no value is given for the + database parameter, then the entire file + referenced by this method is removed. +

    +

    + Never remove a database that has handles opened for it. Never remove a file that + contains databases with opened handles. +

    + +
    #include <db.h>
    +...
    +DB *dbp;
    +
    +/* Database open and subsequent operations omitted for clarity */
    +
    +dbp->remove(dbp,                   /* Database pointer */
    +            "mydb.db",             /* Database file to remove */
    +            NULL,                  /* Database to remove. This is
    +                                    * NULL so the entire file is
    +                                    * removed.  */
    +           0);                     /* Flags. None used. */
    +
  • +
  • +

    + DB->rename() + +

    +

    + Renames the specified database. If no value is given for the + database parameter, then the entire file + referenced by this method is renamed. +

    +

    + Never rename a database that has handles opened for it. Never rename a file that + contains databases with opened handles. +

    + +
    #include <db.h>
    +...
    +DB *dbp;
    +
    +/* Database open and subsequent operations omitted for clarity */
    +
    +dbp->rename(dbp,                    /* Database pointer */
    +             "mydb.db",             /* Database file to rename */
    +             NULL,                  /* Database to rename. This is
    +                                     * NULL so the entire file is
    +                                     * renamed.  */
    +            "newdb.db",             /* New database file name */
    +            0);                     /* Flags. None used. */
    +
  • +
+
+ + + +
+ + + diff --git a/db/docs/gsg/C/CoreDbUsage.html b/db/docs/gsg/C/CoreDbUsage.html new file mode 100644 index 000000000..754fd02d9 --- /dev/null +++ b/db/docs/gsg/C/CoreDbUsage.html @@ -0,0 +1,299 @@ + + + + + + Database Example + + + + + + + + + +
+
+
+
+

Database Example

+
+
+
+
+

+ Throughout this book we will build a couple of applications that load + and retrieve inventory data from DB databases. While we are not yet ready to + begin reading from or writing to our databases, we can at least create + some important structures and functions that we will use to manage our + databases. +

+

+ Note that subsequent examples in this book will build on this code to + perform the more interesting work of writing to and reading from the + databases. +

+

+ Note that you can find the complete implementation of these functions + in: +

+
DB_INSTALL/examples_c/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 2.1 The stock_db Structure +

+

+ To begin, we create a structure that we will use to hold all our + database pointers and database names: +

+ +
/* File: gettingstarted_common.h */
+#include <db.h>
+
+typedef struct stock_dbs {
+    DB *inventory_dbp; /* Database containing inventory information */
+    DB *vendor_dbp;    /* Database containing vendor information */
+
+    char *db_home_dir;       /* Directory containing the database files */
+    char *inventory_db_name; /* Name of the inventory database */
+    char *vendor_db_name;    /* Name of the vendor database */
+} STOCK_DBS;
+
+/* Function prototypes */
+int databases_setup(STOCK_DBS *, const char *, FILE *);
+int databases_close(STOCK_DBS *);
+void initialize_stockdbs(STOCK_DBS *);
+int open_database(DB **, const char *, const char *,
+    FILE *);
+void set_db_filenames(STOCK_DBS *my_stock); 
+
+
+ +

+ Example 2.2 The stock_db Utility Functions +

+

+ Before continuing, we want some utility functions that we use to + make sure the stock_db structure is in a sane state before using it. + One is a simple function that initializes all the structure's + pointers to a useful default.The second is more interesting + in that it is used to place a + common path on all our database names so that we can explicitly + identify where all the database files should reside. +

+ +
/* File: gettingstarted_common.c */
+#include "gettingstarted_common.h"
+
+/* Initializes the STOCK_DBS struct.*/
+void
+initialize_stockdbs(STOCK_DBS *my_stock)
+{
+    my_stock->db_home_dir = DEFAULT_HOMEDIR;
+    my_stock->inventory_dbp = NULL;
+    my_stock->vendor_dbp = NULL;
+
+    my_stock->inventory_db_name = NULL;
+    my_stock->vendor_db_name = NULL;
+}
+
+/* Identify all the files that will hold our databases. */
+void
+set_db_filenames(STOCK_DBS *my_stock)
+{
+    size_t size;
+
+    /* Create the Inventory DB file name */
+    size = strlen(my_stock->db_home_dir) + strlen(INVENTORYDB) + 1;
+    my_stock->inventory_db_name = malloc(size);
+    snprintf(my_stock->inventory_db_name, size, "%s%s",
+      my_stock->db_home_dir, INVENTORYDB);
+
+    /* Create the Vendor DB file name */
+    size = strlen(my_stock->db_home_dir) + strlen(VENDORDB) + 1;
+    my_stock->vendor_db_name = malloc(size);
+    snprintf(my_stock->vendor_db_name, size, "%s%s",
+      my_stock->db_home_dir, VENDORDB);
+} 
+
+
+ +

+ Example 2.3 open_database() Function +

+

+ We are opening multiple databases, and we are + opening those databases using identical flags and error reporting + settings. It is therefore worthwhile to create a function that + performs this operation for us: +

+ +
/* File: gettingstarted_common.c */
+    
+/* Opens a database */
+int
+open_database(DB **dbpp,       /* The DB handle that we are opening */
+    const char *file_name,     /* The file in which the db lives */
+    const char *program_name,  /* Name of the program calling this 
+                                * function */
+    FILE *error_file_pointer)  /* File where we want error messages sent */
+{
+    DB *dbp;    /* For convenience */
+    u_int32_t open_flags;
+    int ret;
+
+    /* Initialize the DB handle */
+    ret = db_create(&dbp, NULL, 0);
+    if (ret != 0) {
+        fprintf(error_file_pointer, "%s: %s\n", program_name,
+                db_strerror(ret));
+        return(ret);
+    }
+
+    /* Point to the memory malloc'd by db_create() */
+    *dbpp = dbp;
+                                                                                                                               
+    /* Set up error handling for this database */
+    dbp->set_errfile(dbp, error_file_pointer);
+    dbp->set_errpfx(dbp, program_name);
+
+    /* Set the open flags */
+    open_flags = DB_CREATE;
+
+    /* Now open the database */
+    ret = dbp->open(dbp,        /* Pointer to the database */
+                    NULL,       /* Txn pointer */
+                    file_name,  /* File name */
+                    NULL,       /* Logical db name (unneeded) */
+                    DB_BTREE,   /* Database type (using btree) */
+                    open_flags, /* Open flags */
+                    0);         /* File mode. Using defaults */
+    if (ret != 0) {
+        dbp->err(dbp, ret, "Database '%s' open failed.", file_name);
+        return(ret);
+    }
+                                                                                                                               
+    return (0);
+}
+
+
+ +

+ Example 2.4 The databases_setup() Function +

+

+ Now that we have our open_database() function, + we can use it to open a database. We now create a simple function + that will open all our databases for us. +

+ +
/* opens all databases */
+int
+databases_setup(STOCK_DBS *my_stock, const char *program_name,
+  FILE *error_file_pointer)
+{
+    int ret;
+
+    /* Open the vendor database */
+    ret = open_database(&(my_stock->vendor_dbp),
+      my_stock->vendor_db_name,
+      program_name, error_file_pointer);
+    if (ret != 0)
+        /*
+         * Error reporting is handled in open_database() so just return
+         * the return code here.
+         */
+        return (ret);
+
+    /* Open the inventory database */
+    ret = open_database(&(my_stock->inventory_dbp),
+      my_stock->inventory_db_name,
+      program_name, error_file_pointer);
+    if (ret != 0)
+        /*
+         * Error reporting is handled in open_database() so just return
+         * the return code here.
+         */
+        return (ret);
+
+    printf("databases opened successfully\n");
+    return (0);
+}
+
+
+ +

+ Example 2.5 The databases_close() Function +

+

+ Finally, it is useful to have a function that can close all our databases for us: +

+ +
/* Closes all the databases. */
+int
+databases_close(STOCK_DBS *my_stock)
+{
+    int ret;
+    /*
+     * Note that closing a database automatically flushes its cached data
+     * to disk, so no sync is required here.
+     */
+
+    if (my_stock->inventory_dbp != NULL) {
+        ret = my_stock->inventory_dbp->close(my_stock->inventory_dbp, 0);
+        if (ret != 0)
+            fprintf(stderr, "Inventory database close failed: %s\n",
+              db_strerror(ret));
+    }
+
+    if (my_stock->vendor_dbp != NULL) {
+        ret = my_stock->vendor_dbp->close(my_stock->vendor_dbp, 0);
+        if (ret != 0)
+            fprintf(stderr, "Vendor database close failed: %s\n",
+              db_strerror(ret));
+    }
+
+    printf("databases closed.\n");
+    return (0);
+} 
+
+
+ + + diff --git a/db/docs/gsg/C/CoreEnvUsage.html b/db/docs/gsg/C/CoreEnvUsage.html new file mode 100644 index 000000000..20a1ae32b --- /dev/null +++ b/db/docs/gsg/C/CoreEnvUsage.html @@ -0,0 +1,156 @@ + + + + + + Managing Databases in Environments + + + + + + + + + +
+
+
+
+

Managing Databases in Environments

+
+
+
+
+

+ In Environments, we introduced + environments. While environments are not used in the example built in this book, + they are so commonly used for a wide class of DB applications that it is + necessary to show their basic usage, if only from a completeness perspective. +

+

+ To use an environment, you must first + + create the environment handle using , and then + + open it. At open time, you must identify the directory in + which it resides. This directory must exist prior to the open attempt. + You can also identify open properties, such as whether the environment can be + created if it does not already exist. +

+

+ For example, to + create an environment handle and + open an environment: +

+ +
#include <db.h>
+...
+DB_ENV *myEnv;            /* Env structure handle */
+DB *dbp;                  /* DB structure handle */
+u_int32_t db_flags;       /* database open flags */
+u_int32_t env_flags;      /* env open flags */
+int ret;                  /* function return value */
+
+/* 
+   Create an environment object and initialize it for error
+   reporting. 
+*/
+ret = db_env_create(&myEnv, 0);
+if (ret != 0) {
+    fprintf(stderr, "Error creating env handle: %s\n", db_strerror(ret));
+    return -1;
+}
+
+/* Open the environment. */
+env_flags = DB_CREATE;   /* If the environment does not exist,
+                          * create it. */
+
+ret = myEnv->open(myEnv,   /* DB_ENV ptr */
+  "/export1/testEnv",      /* env home directory */
+  env_flags,               /* Open flags */
+  0);                      /* File mode (default) */
+if (ret != 0) {
+    fprintf(stderr, "Environment open failed: %s", db_strerror(ret));
+    return -1;
+} 
+

+ Once an environment is opened, you can open databases in it. Note that by default databases + are stored in the environment's home directory, or relative to that directory if you + provide any sort of a path in the database's file name: +

+ +
/* 
+ * Initialize the DB structure. Pass the pointer
+ * to the environment in which this DB is opened.
+ */
+ret = db_create(&dbp, myEnv, 0);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* Database open flags */
+db_flags = DB_CREATE;    /* If the database does not exist, 
+                          * create it.*/
+
+/* open the database */
+ret = dbp->open(dbp,        /* DB structure pointer */
+                NULL,       /* Transaction pointer */
+                "my_db.db", /* On-disk file that holds the database. */
+                NULL,       /* Optional logical database name */
+                DB_BTREE,   /* Database access method */
+                db_flags,   /* Open flags */
+                0);         /* File mode (using defaults) */
+if (ret != 0) {
+  /* Error handling goes here */
+}
+

+ When you are done with an environment, you must close it. Before you close an environment, + make sure you close any opened databases. +

+ +
/* 
+* Close the database and environment
+*/
+
+if (dbp != NULL) {
+    dbp->close(dbp, 0);
+}
+
+if (myEnv != NULL) {
+    myEnv->close(myEnv, 0);
+} 
+
+ + + diff --git a/db/docs/gsg/C/Cursors.html b/db/docs/gsg/C/Cursors.html new file mode 100644 index 000000000..0f8b696e9 --- /dev/null +++ b/db/docs/gsg/C/Cursors.html @@ -0,0 +1,180 @@ + + + + + + Chapter 4. Using Cursors + + + + + + + + + +
+
+
+
+

Chapter 4. Using Cursors

+
+
+
+
+ +

+ Cursors provide a mechanism by which you can iterate over the records in a + database. Using cursors, you can get, put, and delete database records. If + a database allows duplicate records, then cursors are + + + + the easiest way that you can access anything + other than the first record for a given key. +

+

+ This chapter introduces cursors. It explains how to open and close them, how + to use them to modify databases, and how to use them with duplicate records. +

+
+
+
+
+

Opening and Closing Cursors

+
+
+
+
+

+ Cursors are managed using the + DBC structure. + + To use a cursor, you must open it using the + DB->cursor() + + method. +

+

For example:

+ +
#include <db.h>
+
+...
+
+DB *my_database;
+DBC *cursorp;
+
+/* Database open omitted for clarity */
+
+/* Get a cursor */
+my_database->cursor(my_database, NULL, &cursorp, 0); 
+

+ When you are done with the cursor, you should close it. To close a + cursor, call the + DBC->c_close() + + method. Note that closing your database while cursors are still opened + within the scope of the DB handle, especially if those cursors are + writing to the database, can have unpredictable results. Always + close your cursors before closing your database. +

+ +
#include <db.h>
+
+...
+
+DB *my_database;
+DBC *cursorp;
+
+/* Database and cursor open omitted for clarity */
+
+if (cursorp != NULL) 
+    cursorp->c_close(cursorp); 
+
+if (my_database != NULL) 
+    my_database->close(my_database, 0); 
+
+
+ + + diff --git a/db/docs/gsg/C/DB.html b/db/docs/gsg/C/DB.html new file mode 100644 index 000000000..4c56004b5 --- /dev/null +++ b/db/docs/gsg/C/DB.html @@ -0,0 +1,192 @@ + + + + + + Chapter 2. Databases + + + + + + + + + +
+
+
+
+

Chapter 2. Databases

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Opening Databases + +
+
+ + Closing Databases + +
+
+ + Database Open Flags + +
+
+ + Administrative Methods + +
+
+ + Error Reporting Functions + +
+
+ + Managing Databases in Environments + +
+
+ + Database Example + +
+
+
+

In Berkeley DB, a database is a collection of records. Records, + in turn, consist of two parts: key and data. That is, records consist of + key/data pairings. +

+

+ Conceptually, you can think of a + + database + as containing a two-column table where column 1 contains a key and column 2 + contains data. Both the key and the data are managed using + + DBT + + + structures + (see Database Records for details on this + + structure). + So, fundamentally, using a DB + + database + involves putting, getting, and deleting database records, which in turns involves efficiently + managing information + + contained in + + + DBT + + + + structures. + The next several chapters of this book are dedicated to those activities. +

+
+
+
+
+

Opening Databases

+
+
+
+
+

+ To open a database, you must first use the db_create() function to + initialize a DB handle. + Once you have initialized the DB + handle, you use its open() method to open the database. +

+

+ Note that by default, DB does not create databases if they do not already exist. + To override this behavior, specify the + DB_CREATE flag on the + open() method. +

+

+ The following code fragment illustrates a database open: + +

+ +
#include <db.h> 
+
+...
+
+DB *dbp;           /* DB structure handle */
+u_int32_t flags;   /* database open flags */
+int ret;           /* function return value */
+
+/* Initialize the structure. This
+ * database is not opened in an environment, 
+ * so the environment pointer is NULL. */
+ret = db_create(&dbp, NULL, 0);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* Database open flags */
+flags = DB_CREATE;    /* If the database does not exist, 
+                       * create it.*/
+
+/* open the database */
+ret = dbp->open(dbp,        /* DB structure pointer */
+                NULL,       /* Transaction pointer */
+                "my_db.db", /* On-disk file that holds the database. */
+                NULL,       /* Optional logical database name */
+                DB_BTREE,   /* Database access method */
+                flags,      /* Open flags */
+                0);         /* File mode (using defaults) */
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+
+ + + diff --git a/db/docs/gsg/C/DBEntry.html b/db/docs/gsg/C/DBEntry.html new file mode 100644 index 000000000..3adf78171 --- /dev/null +++ b/db/docs/gsg/C/DBEntry.html @@ -0,0 +1,232 @@ + + + + + + Chapter 3. Database Records + + + + + + + + + +
+
+
+
+

Chapter 3. Database Records

+
+
+
+
+ +

+ DB records contain two parts — a key and some data. Both the key + and its corresponding data are + encapsulated in + + DBT structures. + + Therefore, to access a DB record, you need two such + structures, + one for the key and + one for the data. +

+

+ DBT structures provide a void * + field that you use to point to your data, and another field that identifies + the data length. They can therefore be used to store anything from simple + primitive data to complex structures so long as the information you want to + store resides in a single contiguous block of memory. +

+

+ This chapter describes + DBT + + usage. It also + introduces storing and retrieving key/value pairs from a database. +

+
+
+
+
+

Using Database Records

+
+
+
+
+

+ Each database record is comprised of two + + DBT structures + + — one for the key and another for the data. + + +

+

+ To store a database record where the key and/or the data are primitive + data (int, float, and so forth), + or where the key and/or the data contain an array, we need only to point + to the memory location where that data resides and identify its + length. For example: +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DBT key, data;
+float money = 122.45;
+char *description = "Grocery bill.";
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+key.data = &money;
+key.size = sizeof(float);
+
+data.data = description;
+data.size = strlen(description) + 1; 
+

+ To retrieve the record, simply assign the void * returned in the + DBT + + to the appropriate variable. +

+

+ Note that in the following example we do not allow DB to assign the + memory for the retrieval of the money value. The reason why is that some + systems may require float values to have a specific alignment, and the + memory as returned by + DB + + may not be properly aligned (the same problem may exist for structures + on some systems). We tell DB to use our memory instead of its + own by specifying the DB_DBT_USERMEM flag. Be aware that + when we do this, we must also identify how much user memory is available + through the use of the ulen field. +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+float money;
+DBT key, data;
+char *description;
+
+/* Initialize the DBTs */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+key.data = &money;
+key.ulen = sizeof(float);
+key.flags = DB_DBT_USERMEM;
+
+/* Database retrieval code goes here */
+
+/* 
+ * Money is set into the memory that we supplied.
+ */
+description = data.data;
+
+
+ + + diff --git a/db/docs/gsg/C/DBOpenFlags.html b/db/docs/gsg/C/DBOpenFlags.html new file mode 100644 index 000000000..20d6ee251 --- /dev/null +++ b/db/docs/gsg/C/DBOpenFlags.html @@ -0,0 +1,118 @@ + + + + + + Database Open Flags + + + + + + + + + +
+
+
+
+

Database Open Flags

+
+
+
+
+

+ The following are the flags that you may want to use at database open time. + Note that this list is not exhaustive — it includes only those flags likely + to be of interest for introductory, single-threaded + database applications. For a complete list of the flags available to you, see the + Berkeley DB C API Reference Guide. + +

+
+

Note

+

+ To specify more than one flag on the call to + DB->open(), + + you must bitwise inclusively OR them together: +

+ +
u_int32_t open_flags = DB_CREATE | DB_EXCL;
+
+
+
    +
  • +

    + DB_CREATE +

    +

    + If the database does not currently exist, create it. By default, the database open + fails if the database does not already exist. +

    +
  • +
  • +

    + DB_EXCL +

    +

    + Exclusive database creation. Causes the database open to fail if the database + already exists. This flag is only meaningful when used with + DB_CREATE. +

    +
  • +
  • +

    + DB_RDONLY +

    +

    + Open the database for read operations only. Causes any subsequent database write + operations to fail. +

    +
  • +
  • +

    + DB_TRUNCATE +

    +

    + Physically truncate (empty) the on-disk file that contains the database. + Causes DB to delete all databases physically contained in that file. +

    +
  • +
+
+
+ + + diff --git a/db/docs/gsg/C/DbUsage.html b/db/docs/gsg/C/DbUsage.html new file mode 100644 index 000000000..b2e81244c --- /dev/null +++ b/db/docs/gsg/C/DbUsage.html @@ -0,0 +1,478 @@ + + + + + + Database Usage Example + + + + + + + + + +
+
+
+
+

Database Usage Example

+
+
+
+
+

+ In Database Example we created several + functions that will open and close the databases that we will use for + our inventory application. We now make use of those functions to load inventory data into + the two databases that we use for this application. +

+

+ Again, remember that you can find the complete implementation for these functions + in: +

+
DB_INSTALL/examples_c/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 3.1 VENDOR Structure +

+

+ We want to store data related to an inventory system. There are two + types of information that we want to manage: inventory data and related + vendor contact information. To manage this information, we could + create a structure for each type of data, but to illustrate + storing mixed data without a structure we refrain from creating one + for the inventory data. +

+

+ For the vendor data, we add the VENDOR structure to the same file as holds + our STOCK_DBS structure. Note that the VENDOR structure uses + fixed-length fields. This is not necessary and in fact could + represent a waste of resources if the number of vendors stored in + our database scales to very large numbers. However, for simplicity we use + fixed-length fields anyway, especially + given that our sample data contains so few vendor records. +

+

+ Note that for the inventory data, we will store the data by + marshalling it into a buffer, described below. +

+ +
/* File: gettingstarted_common.h */
+#include <db.h>
+
+...
+                                                                                                                                     
+typedef struct vendor {
+    char name[MAXFIELD];             /* Vendor name */
+    char street[MAXFIELD];           /* Street name and number */
+    char city[MAXFIELD];             /* City */
+    char state[3];                   /* Two-digit US state code */
+    char zipcode[6];                 /* US zipcode */
+    char phone_number[13];           /* Vendor phone number */
+    char sales_rep[MAXFIELD];        /* Name of sales representative */
+    char sales_rep_phone[MAXFIELD];  /* Sales rep's phone number */
+} VENDOR; 
+
+
+ +

+ Example 3.2 example_database_load +

+

+ Our initial sample application will load database information from + several flat files. To save space, we won't show all the details of + this example program. However, as always you can find the complete + implementation for this program here: +

+
DB_INSTALL/examples_c/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+

+ We begin with the normal include directives and forward declarations: +

+ +
/* example_database_load.c */
+#include "gettingstarted_common.h"
+                                                                                                                                
+                                                                                                                                
+/* Forward declarations */
+int load_vendors_database(STOCK_DBS, char *);
+int pack_string(char *, char *, int);
+int load_inventory_database(STOCK_DBS, char *); 
+

+ Next we begin our main() function with the variable + declarations and command line parsing that is normal for most command + line applications: +

+ +
/*
+ * Loads the contents of vendors.txt and inventory.txt into
+ * Berkeley DB databases. 
+ */
+int
+main(int argc, char *argv[])
+{
+    STOCK_DBS my_stock;
+    int ret, size;
+    char *basename, *inventory_file, *vendor_file;
+
+    /* Initialize the STOCK_DBS struct */
+    initialize_stockdbs(&my_stock);
+
+   /* 
+    * Initialize the base path. This path is used to 
+    * identify the location of the flat-text data
+    * input files.
+    */
+    basename = "./";
+
+    /* 
+     * Parse the command line arguments here and determine 
+     * the location of the flat text files containing the 
+     * inventory data here. This step is omitted for clarity.
+     */
+                                                                                                                               
+    /* 
+     * Identify the files that will hold our databases 
+     * This function uses information obtained from the
+     * command line to identify the directory in which
+     * the database files reside.
+     */
+    set_db_filenames(&my_stock);
+                                                                                                                                
+    /* Find our input files */
+    size = strlen(basename) + strlen(INVENTORY_FILE) + 1;
+    inventory_file = malloc(size);
+    snprintf(inventory_file, size, "%s%s", basename, INVENTORY_FILE);
+                                                                                                                                
+    size = strlen(basename) + strlen(VENDORS_FILE) + 1;
+    vendor_file = malloc(size);
+    snprintf(vendor_file, size, "%s%s", basename, VENDORS_FILE);
+                                                                                                                                
+    /* Open all databases */
+    ret = databases_setup(&my_stock, "example_database_load", stderr);
+    if (ret != 0) {
+            fprintf(stderr, "Error opening databases\n");
+            databases_close(&my_stock);
+            return (ret);
+    }
+                                                                                                                                
+    ret = load_vendors_database(my_stock, vendor_file);
+    if (!ret) {
+        fprintf(stderr, "Error loading vendors database.\n");
+        databases_close(&my_stock);
+        return (ret);
+    }
+    ret = load_inventory_database(my_stock, inventory_file);
+    if (!ret) {
+        fprintf(stderr, "Error loading inventory database.\n");
+        databases_close(&my_stock);
+        return (ret);
+    }
+
+    /* close our environment and databases */
+    databases_close(&my_stock);
+                                                                                                                                
+    printf("Done loading databases.\n");
+    return (0);
+}
+

+ Notice that there is not a lot to this function because we have pushed + off all the database activity to other places. In particular our + databases are all opened and configured in + databases_setup() which we implemented in + The databases_setup() Function. +

+

+ Next we show the implementation of + load_vendors_database(). We load this data by + scanning (line by line) the contents of the + vendors.txt into a VENDOR structure. Once we have a + line scanned into the structure, we can store that structure into our + vendors database. +

+

+ Note that we use the vendor's name as the key here. In doing so, we + assume that the vendor's name is unique in our database. If it was not, + we would either have to select a different key, or architect our + application such that it could cope with multiple vendor records with + the same name. +

+ +
/*
+ * Loads the contents of the vendors.txt file into
+ * a database.
+ */
+int
+load_vendors_database(STOCK_DBS my_stock, char *vendor_file)
+{
+    DBT key, data;
+    FILE *ifp;
+    VENDOR my_vendor;
+    char buf[MAXLINE];
+                                                                                                                               
+    /* Open the vendor file for read access */
+    ifp = fopen(vendor_file, "r");
+    if (ifp == NULL) {
+        fprintf(stderr, "Error opening file '%s'\n", vendor_file);
+        return(-1);
+    }
+                                                                                                                               
+    /* Iterate over the vendor file */
+    while(fgets(buf, MAXLINE, ifp) != NULL) {
+        /* zero out the structure */
+        memset(&my_vendor, 0, sizeof(VENDOR));
+        /* Zero out the DBTs */
+        memset(&key, 0, sizeof(DBT));
+        memset(&data, 0, sizeof(DBT));
+
+        /*
+         * Scan the line into the structure.
+         * Convenient, but not particularly safe.
+         * In a real program, there would be a lot more
+         * defensive code here.
+         */
+        sscanf(buf,
+          "%20[^#]#%20[^#]#%20[^#]#%3[^#]#%6[^#]#%13[^#]#%20[^#]#%20[^\n]",
+          my_vendor.name, my_vendor.street,
+          my_vendor.city, my_vendor.state,
+          my_vendor.zipcode, my_vendor.phone_number,
+          my_vendor.sales_rep, my_vendor.sales_rep_phone);
+                                                                                                                               
+        /* 
+         * Now that we have our structure we can load it 
+         * into the database. 
+         */
+                                                                                                                               
+        /* Set up the database record's key */
+        key.data = my_vendor.name;
+        key.size = strlen(my_vendor.name) + 1;
+                                                                                                                               
+        /* Set up the database record's data */
+        data.data = &my_vendor;
+        data.size = sizeof(my_vendor);
+                                                                                                                               
+        /*
+         * Note that given the way we built our struct, there is extra
+         * bytes in it. Essentially we're using fixed-width fields with
+         * the unused portion of some fields padded with zeros. This
+         * is the easiest thing to do, but it does result in a bloated
+         * database. Look at load_inventory_data() for an example of how
+         * to avoid this.
+         */
+                                                                                                                               
+        /* Put the data into the database.
+         * Omitting error handling for clarity.
+         */
+        my_stock.vendor_dbp->put(my_stock.vendor_dbp, 0, &key, &data, 0);
+    } /* end vendors database while loop */
+                                                                                                                               
+    /* Close the vendor.txt file */
+    fclose(ifp);
+    return(0);
+} 
+

+ Finally, we need to write the + load_inventory_database() function. We made this function a + bit more complicated than is necessary by avoiding the use of a + structure to manage the data. Instead, we manually pack all our inventory + data into a single block of memory, and store that data in the + database. +

+

+ While this complicates our code somewhat, this approach allows us to + use the smallest amount of space possible for the data that we want to + store. The result is that our cache can be smaller than it might + otherwise be and our database will take less space on disk than if we used + a structure with fixed-length fields. +

+

+ For a trivial dataset such as what we use for these examples, these + resource savings are negligible. But if we were storing hundreds of + millions of records, then the cost savings may become significant. +

+

+ Before we actually implement our inventory loading function, it is useful + to create a simple utility function that copies a character array into a + buffer at a designated offset: +

+ +
/*
+ * Simple little convenience function that takes a buffer, a string,
+ * and an offset and copies that string into the buffer at the
+ * appropriate location. Used to ensure that all our strings
+ * are contained in a single contiguous chunk of memory.
+ */
+int
+pack_string(char *buffer, char *string, int start_pos)
+{
+    int string_size = strlen(string) + 1;
+
+    memcpy(buffer+start_pos, string, string_size);
+                                                                                                                               
+    return(start_pos + string_size);
+} 
+

+ That done, we can now load the inventory database: +

+ +
/*
+ * Loads the contents of the inventory.txt file into
+ * a database.
+ */
+int
+load_inventory_database(STOCK_DBS my_stock, char *inventory_file)
+{
+    DBT key, data;
+    char buf[MAXLINE];
+    void *databuf;
+    int bufLen, dataLen;
+    FILE *ifp;
+                                                                                                                               
+    /*
+     * Rather than lining everything up nicely in a struct, we're being
+     * deliberately a bit sloppy here. This function illustrates how to
+     * store mixed data that might be obtained from various locations
+     * in your application.
+     */
+    float price;
+    int quantity;
+    char category[MAXFIELD], name[MAXFIELD];
+    char vendor[MAXFIELD], sku[MAXFIELD];
+
+    /* Load the inventory database */
+    ifp = fopen(inventory_file, "r");
+    if (ifp == NULL) {
+        fprintf(stderr, "Error opening file '%s'\n", inventory_file);
+        return(-1);
+    }
+                                                                                                                               
+    /* Get our buffer. MAXDATABUF is some suitably large number */
+    databuf = malloc(MAXDATABUF);
+
+    /* 
+     * Read the inventory.txt file line by line, saving each line off to the
+     * database as we go.
+     */
+    while(fgets(buf, MAXLINE, ifp) != NULL) {
+        /*
+         * Scan the line into the appropriate buffers and variables.
+         * Convenient, but not particularly safe. In a real
+         * program, there would be a lot more defensive code here.
+         */
+        sscanf(buf,
+          "%20[^#]#%20[^#]#%f#%i#%20[^#]#%20[^\n]",
+          name, sku, &price, &quantity, category, vendor);
+                                                                                                                               
+        /*
+         * Now pack it into a single contiguous memory location for
+         * storage.
+         */
+        memset(databuf, 0, MAXDATABUF);
+        bufLen = 0;
+        dataLen = 0;
+                                                                                                                               
+        /* 
+         * We first store the fixed-length elements. This makes our code to
+         * retrieve this data from the database a little bit easier.
+         */
+
+        /* First discover how long the data element is. */
+        dataLen = sizeof(float);
+        /* Then copy it to our buffer */
+        memcpy(databuf, &price, dataLen);
+        /* 
+         * Then figure out how much data is actually in our buffer.
+         * We repeat this pattern for all the data we want to store.
+         */
+        bufLen += dataLen;
+                                                                                                                               
+        /* Rinse, lather, repeat. */
+        dataLen = sizeof(int);
+        memcpy(databuf + bufLen, &quantity, dataLen);
+        bufLen += dataLen;
+                                                                                                                               
+        bufLen = pack_string(databuf, name, bufLen);
+        bufLen = pack_string(databuf, sku, bufLen);
+        bufLen = pack_string(databuf, category, bufLen);
+        bufLen = pack_string(databuf, vendor, bufLen);
+
+        /* 
+         * Now actually save the contents of the buffer off 
+         * to our database. 
+         */
+
+        /* Zero out the DBTs */
+        memset(&key, 0, sizeof(DBT));
+        memset(&data, 0, sizeof(DBT));
+                                                                                                                               
+        /* 
+         * The key is the item's SKU. This is a unique value, so we need not
+         * support duplicates for this database. 
+         */
+        key.data = sku;
+        key.size = strlen(sku) + 1;
+                                                                                                                               
+        /* The data is the information that we packed into databuf. */
+        data.data = databuf;
+        data.size = bufLen;
+                                                                                                                               
+        /* Put the data into the database */
+        my_stock.vendor_dbp->put(my_stock.inventory_dbp, 0, &key, &data, 0);
+    } /* end vendors database while loop */
+                                                                                                                               
+    /* Cleanup */
+    fclose(ifp);
+    if (databuf != NULL)
+        free(databuf);
+                                                                                                                               
+    return(0);
+} 
+

+ In the next chapter we provide an example that shows how to read + the inventory and vendor databases. +

+
+
+ + + diff --git a/db/docs/gsg/C/DeleteEntryWCursor.html b/db/docs/gsg/C/DeleteEntryWCursor.html new file mode 100644 index 000000000..9e58c5c79 --- /dev/null +++ b/db/docs/gsg/C/DeleteEntryWCursor.html @@ -0,0 +1,109 @@ + + + + + + Deleting Records Using Cursors + + + + + + + + + +
+
+
+
+

Deleting Records Using Cursors

+
+
+
+
+

+ + To delete a record using a cursor, simply position the cursor to the + record that you want to delete and then call + + + + + DBC->c_del(). + + + +

+

For example:

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *dbp;
+DBC *cursorp;
+DBT key, data;
+char *key1str = "My first string";
+int ret;
+
+/* Set up our DBTs */
+key.data = key1str;
+key.size = strlen(key1str) + 1;
+
+/* Database open omitted */
+
+/* Get the cursor */
+dbp->cursor(dbp, NULL, &cursorp, 0);
+
+/* Initialize our DBTs. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+                                                                                                                               
+/* Iterate over the database, deleting each record in turn. */
+while ((ret = cursorp->c_get(cursorp, &key,
+               &data, DB_SET)) == 0) {
+    cursorp->c_del(cursorp, 0);
+}
+
+/* Cursors must be closed */
+if (cursorp != NULL)
+    cursorp->c_close(cursorp); 
+
+if (dbp != NULL)
+    dbp->close(dbp, 0);
+
+ + + diff --git a/db/docs/gsg/C/Positioning.html b/db/docs/gsg/C/Positioning.html new file mode 100644 index 000000000..d97eb32f7 --- /dev/null +++ b/db/docs/gsg/C/Positioning.html @@ -0,0 +1,574 @@ + + + + + + Getting Records Using the Cursor + + + + + + + + + +
+
+
+
+

Getting Records Using the Cursor

+
+
+
+
+

+ To iterate over database records, from the first record to + the last, simply open the cursor and then use the + + DBC->c_get() + + method. + Note that you need to supply the + DB_NEXT flag to this method. + For example: +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *my_database;
+DBC *cursorp;
+DBT key, data;
+int ret;
+
+/* Database open omitted for clarity */
+
+/* Get a cursor */
+my_database->cursor(my_database, NULL, &cursorp, 0); 
+
+/* Initialize our DBTs. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+                                                                                                                               
+/* Iterate over the database, retrieving each record in turn. */
+while ((ret = cursorp->c_get(cursorp, &key, &data, DB_NEXT)) == 0) {
+        /* Do interesting things with the DBTs here. */
+}
+if (ret != DB_NOTFOUND) {
+        /* Error handling goes here */
+}
+
+/* Cursors must be closed */
+if (cursorp != NULL) 
+    cursorp->c_close(cursorp); 
+
+if (my_database != NULL) 
+    my_database->close(my_database, 0);
+

+ To iterate over the database from the last record to the first, use + DB_PREV instead of DB_NEXT: +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *my_database;
+DBC *cursorp;
+DBT key, data;
+int ret;
+
+/* Database open omitted for clarity */
+
+/* Get a cursor */
+my_database->cursor(my_database, NULL, &cursorp, 0); 
+
+/* Initialize our DBTs. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+                                                                                                                               
+/* Iterate over the database, retrieving each record in turn. */
+while ((ret = cursorp->c_get(cursorp, &key,
+      &data, DB_PREV)) == 0) {
+        /* Do interesting things with the DBTs here. */
+}
+if (ret != DB_NOTFOUND) {
+        /* Error handling goes here */
+}
+
+// Cursors must be closed
+if (cursorp != NULL) 
+    cursorp->c_close(cursorp); 
+
+if (my_database != NULL)
+    my_database->close(my_database, 0);
+
+
+
+
+

Searching for Records

+
+
+
+
+

+ You can use cursors to search for database records. You can search based + on just a key, or you can search based on both the key and the data. + You can also perform partial matches if your database supports sorted + duplicate sets. In all cases, the key and data parameters of these + methods are filled with the key and data values of the database record + to which the cursor is positioned as a result of the search. +

+

+ Also, if the search fails, then cursor's state is left unchanged + and + + DB_NOTFOUND + is returned. + + +

+

+ To use a cursor to search for a record, use + DBT->c_get(). + + When you use this method, you can provide the following flags: +

+
+

Note

+

+ Notice in the following list that the cursor flags use the + keyword SET when the cursor examines just the key + portion of the records (in this case, the cursor is set to the + record whose key matches the value provided to the cursor). + Moreover, when the cursor uses the keyword GET, + then the cursor is positioned to both the key + and the data values provided to the cursor. +

+

+ Regardless of the keyword you use to get a record with a cursor, the + cursor's key and data + DBTs + + are filled with the data retrieved from the record to which the + cursor is positioned. +

+
+
+
    +
  • +

    + + DB_SET +

    +

    + Moves the cursor to the first record in the database with + the specified key. +

    +
  • +
  • +

    + + DB_SET_RANGE +

    +

    + Identical to + DB_SET + Cursor.getSearchKey() + unless you are using the BTree access. In this case, the cursor + moves + + + to the first record in the database whose + key is greater than or equal to the specified key. This comparison + is determined by the + + comparison function + that you provide for the database. If no + + comparison function + is provided, then the default lexicographical sorting is used. +

    +

    + For example, suppose you have database records that use the + following + + strings + as keys: +

    +
    Alabama
    +Alaska
    +Arizona
    +

    + Then providing a search key of Alaska moves the + cursor to the second key noted above. Providing a key of + Al moves the cursor to the first key (Alabama), providing + a search key of Alas moves the cursor to the second key + (Alaska), and providing a key of Ar moves the + cursor to the last key (Arizona). +

    +
  • +
  • +

    + + DB_GET_BOTH +

    +

    + Moves the cursor to the first record in the database that uses + the specified key and data. +

    +
  • +
  • +

    + + DB_GET_BOTH_RANGE +

    +

    + Moves the cursor to the first record in the database whose key is + greater than or equal to the specified key. If the database supports + duplicate records, then on matching the key, the cursor is moved to + the duplicate record with the smallest data that is greater than or + equal to the specified data. +

    +

    + For example, + + suppose your database uses BTree + and it has + database records that use the following key/data pairs: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence 
    +

    then providing:

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    a search key of ...and a search data of ...moves the cursor to ...
    AlFlAlabama/Florence
    ArFlArizona/Florence
    AlFaAlaska/Fairbanks
    AlAAlabama/Athens
    +
    +
  • +
+
+

+ For example, assuming a database containing sorted duplicate records of + U.S. States/U.S Cities key/data pairs (both as + + strings), + then the following code fragment can be used to position the cursor + to any record in the database and print its key/data values: + +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DBC *cursorp;
+DBT key, data;
+DB *dbp;
+int ret;
+char *search_data = "Fa";
+char *search_key = "Al";
+
+/* database open omitted for clarity */
+
+/* Get a cursor */
+dbp->cursor(dbp, NULL, &cursorp, 0);
+
+/* Set up our DBTs */
+key.data = search_key;
+key.size = strlen(search_key) + 1;
+data.data = search_data;
+data.size = strlen(search_data) + 1;
+
+/*
+ * Position the cursor to the first record in the database whose
+ * key and data begin with the correct strings.
+ */
+ret = cursorp->c_get(cursorp, &key, &data, DB_GET_BOTH_RANGE);
+if (!ret) {
+    /* Do something with the data */
+} else {
+    /* Error handling goes here */
+}
+
+/* Close the cursor */
+if (cursorp != NULL)
+    cursorp->c_close(cursorp);
+
+/* Close the database */
+if (dbp != NULL)
+    dbp->close(dbp, 0); 
+
+
+
+
+
+

Working with Duplicate Records

+
+
+
+
+

+ A record is a duplicate of another record if the two records share the + same key. For duplicate records, only the data portion of the record is unique. +

+

+ Duplicate records are supported only for the BTree or Hash access methods. + For information on configuring your database to use duplicate records, + see Allowing Duplicate Records. +

+

+ If your database supports duplicate records, then it can potentially + contain multiple records that share the same key. + + + + By default, normal database + get operations will only return the first such record in a set + of duplicate records. Typically, subsequent duplicate records are + accessed using a cursor. + + + The following + + DBC->c_get() flags + + are interesting when working with databases that support duplicate records: +

+
+
    +
  • +

    + + + DB_NEXT, + DB_PREV + +

    +

    + Shows the next/previous record in the database, regardless of + whether it is a duplicate of the current record. For an example of + using these methods, see Getting Records Using the Cursor. +

    +
  • +
  • +

    + + DB_GET_BOTH_RANGE +

    +

    + Useful for seeking the cursor to a specific record, regardless of + whether it is a duplicate record. See Searching for Records for more + information. +

    +
  • +
  • +

    + + + DB_NEXT_NODUP, + DB_PREV_NODUP + +

    +

    + Gets the next/previous non-duplicate record in the database. This + allows you to skip over all the duplicates in a set of duplicate + records. If you call + + + c_get() + + with DB_PREV_NODUP, + + then the cursor is positioned to the last record for the previous + key in the database. For example, if you have the following records + in your database: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence
    +

    + and your cursor is positioned to Alaska/Fairbanks, + and you then call + + + c_get() + + with DB_PREV_NODUP, + + then the cursor is positioned to Alabama/Florence. Similarly, if + you call + + + c_get() + + with DB_NEXT_NODUP, + + + then the cursor is positioned to the first record corresponding to + the next key in the database. +

    +

    + If there is no next/previous key in the database, then + + DB_NOTFOUND + is returned, and the cursor is left unchanged. +

    +
  • +
  • +

    + + DB_NEXT_DUP +

    +

    + + Gets the + + next + record that shares the current key. If the + cursor is positioned at the last record in the duplicate set and + you call + + + DBC->c_get() + + with DB_NEXT_DUP, + + + then + + DB_NOTFOUND + is returned and the cursor is left unchanged. + +

    +
  • +
+
+

+ For example, the following code fragment positions a cursor to a key + + + + and displays it and all its + duplicates. + + +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *dbp;
+DBC *cursorp;
+DBT key, data;
+int ret;
+char *search_key = "Al";
+
+/* database open omitted for clarity */
+
+/* Get a cursor */
+dbp->cursor(dbp, NULL, &cursorp, 0);
+
+/* Set up our DBTs */
+key.data = search_key;
+key.size = strlen(search_key) + 1;
+
+/*
+ * Position the cursor to the first record in the database whose
+ * key and data begin with the correct strings.
+ */
+ret = cursorp->c_get(cursorp, &key, &data, DB_SET);
+while (ret != DB_NOTFOUND) {
+    printf("key: %s, data: %s\n", (char *)key.data, (char *)data.data);
+    ret = cursorp->c_get(cursorp, &key, &data, DB_NEXT_DUP);
+}
+
+/* Close the cursor */
+if (cursorp != NULL)
+    cursorp->c_close(cursorp);
+
+/* Close the database */
+if (dbp != NULL)
+    dbp->close(dbp, 0); 
+
+
+ + + diff --git a/db/docs/gsg/C/PutEntryWCursor.html b/db/docs/gsg/C/PutEntryWCursor.html new file mode 100644 index 000000000..367b58b15 --- /dev/null +++ b/db/docs/gsg/C/PutEntryWCursor.html @@ -0,0 +1,224 @@ + + + + + + Putting Records Using Cursors + + + + + + + + + +
+
+
+
+

Putting Records Using Cursors

+
+
+
+
+

+ You can use cursors to put records into the database. DB's behavior + when putting records into the database differs depending on the flags + that you use when writing the record, on the access method that you are + using, and on whether your database supports sorted duplicates. +

+

+ Note that when putting records to the database using a cursor, the + cursor is positioned at the record you inserted. Also, you can not + transactionally protect a put that is performed using a cursor; + if you want to transactionall protect your database writes, + put recrods using the database handle directly. +

+

+ You use + DBC->c_put() + + + to put (write) records to the database. You can use the following flags + with this method: +

+
+
    +
  • +

    + + DB_NODUPDATA +

    +

    + If the provided key already exists + in the database, then this method returns + DB_KEYEXIST. +

    +

    + If the key does not exist, then the order that the record is put into the database + is determined by the + + + insertion order in use by the database. If a comparison + function has been provided to the database, the record is + inserted in its sorted location. Otherwise (assuming BTree), + lexicographical sorting is used, with + shorter items collating before longer items. + +

    +

    + This flag can only be used for the BTree and Hash access methods, + and only if the database has been configured to support sorted + duplicate data items (DB_DUPSORT was specified at + database creation time). +

    +

    + This flag cannot be used with the Queue or Recno access methods. +

    +

    + For more information on duplicate records, see + Allowing Duplicate Records. +

    +
  • +
  • +

    + + DB_KEYFIRST +

    +

    + For databases that do not support duplicates, this method behaves + + + exactly the same as if a default insertion was performed. + + If the database supports duplicate records, + + + and a duplicate sort function has been specified, the + inserted data item is added in its sorted location. If + the key already exists in the database and no duplicate + sort function has been specified, the inserted data item + is added as the first of the data items for that key. + +

    +
  • +
  • +

    + + DB_KEYLAST +

    +

    + Behaves exactly as if + DB_KEYFIRST + + was used, except that if the key already exists in the database and no + duplicate sort function has been specified, the + inserted data item is added as the last of the data + items for that key. +

    +
  • +
+
+

For example:

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *dbp;
+DBC *cursorp;
+DBT data1, data2, data3;
+DBT key1, key2;
+char *key1str = "My first string";
+char *data1str = "My first data";
+char *key2str = "A second string";
+char *data2str = "My second data";
+char *data3str = "My third data";
+int ret;
+
+/* Set up our DBTs */
+key1.data = key1str;
+key1.size = strlen(key1str) + 1;
+data1.data = data1str;
+data1.size = strlen(data1str) + 1;
+
+key2.data = key2str;
+key2.size = strlen(key2str) + 1;
+data2.data = data2str;
+data2.size = strlen(data2str) + 1;
+data3.data = data3str;
+data3.size = strlen(data3str) + 1;
+
+/* Database open omitted */
+
+/* Get the cursor */
+dbp->cursor(dbp, NULL, &cursorp, 0);
+
+/* 
+ * Assuming an empty database, this first put places
+ * "My first string"/"My first data" in the first 
+ * position in the database
+ */
+ret = cursorp->c_put(cursorp, &key1, 
+  &data1, DB_KEYFIRST); 
+
+/*
+ * This put places "A second string"/"My second data" in the
+ * the database according to its key sorts against the key 
+ * used for the currently existing database record. Most likely
+ * this record would appear first in the database.
+ */
+ret = cursorp->c_put(cursorp, &key2, 
+  &data2, DB_KEYFIRST); /* Added according to sort order */
+
+/*
+ * If duplicates are not allowed, the currently existing record that 
+ * uses "key2" is overwritten with the data provided on this put.
+ * That is, the record "A second string"/"My second data" becomes
+ * "A second string"/"My third data"
+ *
+ * If duplicates are allowed, then "My third data" is placed in the
+ * duplicates list according to how it sorts against "My second data".
+ */
+ret = cursorp->c_put(cursorp, &key2, 
+  &data3, DB_KEYFIRST); /* If duplicates are not allowed, record 
+                         * is overwritten with new data. Otherwise, 
+                         * the record is added to the beginning of 
+                         * the duplicates list.
+                         */ 
+
+ + + diff --git a/db/docs/gsg/C/ReplacingEntryWCursor.html b/db/docs/gsg/C/ReplacingEntryWCursor.html new file mode 100644 index 000000000..a1b89224f --- /dev/null +++ b/db/docs/gsg/C/ReplacingEntryWCursor.html @@ -0,0 +1,137 @@ + + + + + + Replacing Records Using Cursors + + + + + + + + + +
+
+
+
+

Replacing Records Using Cursors

+
+
+
+
+

+ You replace the data for a database record by using + + + + + + DBC->c_put() + + with the DB_CURRENT flag. + + +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *dbp;
+DBC *cursorp;
+DBT key, data;
+char *key1str = "My first string";
+char *replacement_data = "replace me";
+int ret;
+
+/* Set up our DBTs */
+key.data = key1str;
+key.size = strlen(key1str) + 1;
+
+/* Database open omitted */
+
+/* Get the cursor */
+dbp->cursor(dbp, NULL, &cursorp, 0);
+
+/* Initialize our DBTs. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+                                                                                                                               
+/* Position the cursor */
+ret = cursorp->c_get(cursorp, &key, &data, DB_SET);
+if (ret == 0) {
+    data.data = replacement_data;
+    data.size = strlen(replacement_data) + 1;
+    cursorp->c_put(cursorp, &key, &data, DB_CURRENT);
+}
+
+/* Cursors must be closed */
+if (cursorp != NULL) 
+    cursorp->c_close(cursorp); 
+
+if (dbp != NULL)
+    dbp->close(dbp, 0);
+

+ Note that you cannot change a record's key using this method; the key + parameter is always ignored when you replace a record. +

+

+ When replacing the data portion of a record, if you are replacing a + record that is a member of a sorted duplicates set, then the replacement + will be successful only if the new record sorts identically to the old + record. This means that if you are replacing a record that is a member + of a sorted duplicates set, and if you are using the default + lexicographic sort, then the replacement will fail due to violating the + sort order. However, if you + provide a custom sort routine that, for example, sorts based on just a + few bytes out of the data item, then potentially you can perform + a direct replacement and still not violate the restrictions described + here. +

+

+ Under these circumstances, if + + you want to replace the data contained by a duplicate record, + + and you are not using a custom sort routine, then + + delete the record and create a new record with the desired key and data. +

+
+ + + diff --git a/db/docs/gsg/C/accessmethods.html b/db/docs/gsg/C/accessmethods.html new file mode 100644 index 000000000..02469bb95 --- /dev/null +++ b/db/docs/gsg/C/accessmethods.html @@ -0,0 +1,281 @@ + + + + + + Access Methods + + + + + + + + + +
+
+
+
+

Access Methods

+
+
+
+
+

+ While this manual will focus primarily on the BTree access method, it is + still useful to briefly describe all of the access methods that DB + makes available. +

+

+ Note that an access method can be selected only when the database is + created. Once selected, actual API usage is generally + identical across all access methods. That is, while some + exceptions exist, mechanically you interact with the library in the same + way regardless of which access method you have selected. +

+

+ The access method that you should choose is gated first by what you want + to use as a key, and then secondly by the performance that you see + for a given access method. +

+

+ The following are the available access methods: +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Access MethodDescription
BTree +

+ Data is stored in a sorted, balanced tree structure. + Both the key and the data for BTree records can be + arbitrarily complex. That is, they can contain single values + such as an integer or a string, or complex types such as a + structure. Also, although not the default + behavior, it is possible for two records to + use keys that compare as equals. When this occurs, the + records are considered to be duplicates of one another. +

+
Hash +

+ Data is stored in an extended linear hash table. Like + BTree, the key and the data used for Hash records can be of + arbitrarily complex data. Also, like BTree, duplicate + records are optionally supported. +

+
Queue +

+ Data is stored in a queue as fixed-length records. Each + record uses a logical record number as its key. This access + method is designed for fast inserts at the tail of the + queue, and it has a special operation that deletes and + returns a record from the head of the queue. +

+

+ This access method is unusual in that it provides record + level locking. This can provide + beneficial performance improvements in applications + requiring concurrent access to the queue. +

+
Recno +

+ Data is stored in either fixed or variable-length records. + Like Queue, Recno records use logical record numbers as keys. +

+
+
+
+
+
+
+

Selecting Access Methods

+
+
+
+
+

+ To select an access method, you should first consider what you want + to use as a key for you database records. If you want to use + arbitrary data (even strings), then you should use either BTree or + Hash. If you want to use logical record numbers (essentially + integers) then you should use Queue or Recno. +

+

+ Once you have made this decision, you must choose between either + BTree or Queue, or Hash or Recno. This decision is described next. +

+
+
+
+
+
+

Choosing between BTree and Hash

+
+
+
+
+

+ For small working datasets that fit entirely in memory, there is no + difference between BTree and Hash. Both will perform just as well + as the other. In this situation, you might just as well use BTree, + if for no other reason than the majority of DB applications use + BTree. +

+

+ Note that the main concern here is your + working dataset, not your entire dataset. Many applications maintain + large amounts of information but only need to access some small + portion of that data with any frequency. So what you want to + consider is the data that you will routinely use, not the sum total + of all the data managed by your application. +

+

+ However, as your working dataset grows to the point + where you cannot fit it all into memory, then you need to take more + care when choosing your access method. Specifically, choose: +

+
+
    +
  • +

    + BTree if your keys have some locality of reference. That is, + if they sort well and you can expect that a query for a + given key will likely be followed by a query for one of its + neighbors. +

    +
  • +
  • +

    + Hash if your dataset is extremely large. For any given + access method, DB must maintain a certain amount of internal + information. However, the amount of information that DB + must maintain for BTree is much greater than for Hash. The + result is that as your dataset grows, this internal + information can dominate the cache to the point where there + is relatively little space left for application data. + As a result, BTree can be forced to perform disk I/O much more + frequently than would Hash given the same amount of data. +

    +

    + Moreover, if your dataset becomes so large that DB will + almost certainly have to perform disk I/O to satisfy a + random request, then Hash will definitely out perform BTree + because it has fewer internal records to search through than + does BTree. +

    +
  • +
+
+
+
+
+
+
+

Choosing between Queue and Recno

+
+
+
+
+

+ Queue or Recno are used when the application wants to use logical + record numbers for the primary database key. Logical record numbers + are essentially integers that uniquely identify the database + record. They can be either mutable or fixed, where a mutable record + number is one that might change as database records are stored or + deleted. Fixed logical record numbers never change regardless of + what database operations are performed. +

+

+ When deciding between Queue and Recno, choose: +

+
+
    +
  • +

    + Queue if your application requires high degrees of + concurrency. Queue provides record-level locking (as opposed + to the page-level locking that the other access methods + use), and this can result in significantly faster throughput + for highly concurrent applications. +

    +

    + Note, however, that Queue provides support only for fixed + length records. So if the size of the data that you want to + store varies widely from record to record, you should + probably choose an access method other than Queue. +

    +
  • +
  • +

    + Recno if you want mutable record numbers. Queue is only + capable of providing fixed record numbers. Also, Recno + provides support for databases whose permanent storage is a + flat text file. This is useful for applications looking for + fast, temporary storage while the data is being read or + modified. +

    +
  • +
+
+
+
+ + + diff --git a/db/docs/gsg/C/btree.html b/db/docs/gsg/C/btree.html new file mode 100644 index 000000000..eabed1e6c --- /dev/null +++ b/db/docs/gsg/C/btree.html @@ -0,0 +1,572 @@ + + + + + + BTree Configuration + + + + + + + + +
+
+
+
+

BTree Configuration

+
+
+
+
+

+ In going through the previous chapters in this book, you may notice that + we touch on some topics that are specific to BTree, but we do not cover + those topics in any real detail. In this section, we will discuss + configuration issues that are unique to BTree. +

+

+ Specifically, in this section we describe: +

+
+
    +
  • +

    + Allowing duplicate records. +

    +
  • +
  • +

    + Setting comparator callbacks. +

    +
  • +
+
+
+
+
+
+

Allowing Duplicate Records

+
+
+
+
+

+ BTree databases can contain duplicate records. One record is + considered to be a duplicate of another when both records use keys + that compare as equal to one another. +

+

+ By default, keys are compared using a lexicographical comparison, + with shorter keys collating higher than longer keys. + You can override this default using the + DB->set_bt_compare() + + + method. See the next section for details. +

+

+ By default, DB databases do not allow duplicate records. As a + result, any attempt to write a record that uses a key equal to a + previously existing record results in the previously existing record + being overwritten by the new record. +

+

+ Allowing duplicate records is useful if you have a database that + contains records keyed by a commonly occurring piece of information. + It is frequently necessary to allow duplicate records for secondary + databases. +

+

+ For example, suppose your primary database contained records related + to automobiles. You might in this case want to be able to find all + the automobiles in the database that are of a particular color, so + you would index on the color of the automobile. However, for any + given color there will probably be multiple automobiles. Since the + index is the secondary key, this means that multiple secondary + database records will share the same key, and so the secondary + database must support duplicate records. +

+
+
+
+
+

Sorted Duplicates

+
+
+
+
+

+ Duplicate records can be stored in sorted or unsorted order. + You can cause DB to automatically sort your duplicate + records by + + specifying the DB_DUPSORT flag at + database creation time. + + +

+

+ If sorted duplicates are supported, then the + + sorting function specified on + DB->set_dup_compare() + + + + is used to determine the location of the duplicate record in its + duplicate set. If no such function is provided, then the default + lexicographical comparison is used. +

+
+
+
+
+
+

Unsorted Duplicates

+
+
+
+
+

+ For performance reasons, BTrees should always contain sorted + records. (BTrees containing unsorted entries must potentially + spend a great deal more time locating an entry than does a BTree + that contains sorted entries). That said, DB provides support + for suppressing automatic sorting of duplicate records because it may be that + your application is inserting records that are already in a + sorted order. +

+

+ That is, if the database is configured to support unsorted + duplicates, then the assumption is that your application + will manually perform the sorting. In this event, + expect to pay a significant performance penalty. Any time you + place records into the database in a sort order not know to + DB, you will pay a performance penalty +

+

+ That said, this is how DB behaves when inserting records + into a database that supports non-sorted duplicates: +

+
+
    +
  • +

    + If your application simply adds a duplicate record using + DB->put(), + + + then the record is inserted at the end of its sorted duplicate set. +

    +
  • +
  • +

    + If a cursor is used to put the duplicate record to the database, + then the new record is placed in the duplicate set according to the + flags that are provided on the + DBC->c_put() + + method. The relevant flags are: +

    +
    +
      +
    • +

      + DB_AFTER + +

      +

      + The data + + provided on the call to + DBC->c_put() + + + is placed into the database + as a duplicate record. The key used for this operation is + the key used for the record to which the cursor currently + refers. Any key provided on the call + + + to + DBC->c_put() + + + + is therefore ignored. +

      +

      + The duplicate record is inserted into the database + immediately after the cursor's current position in the + database. +

      +

      + This flag is ignored if sorted duplicates are supported for + the database. +

      +
    • +
    • +

      + DB_BEFORE + +

      +

      + Behaves the same as + DB_AFTER + + except that the new record is inserted immediately before + the cursor's current location in the database. +

      +
    • +
    • +

      + DB_KEYFIRST + +

      +

      + If the key + + provided on the call to + DBC->c_put() + + + already exists in the + database, and the database is configured to use duplicates + without sorting, then the new record is inserted as the first entry + in the appropriate duplicates list. +

      +
    • +
    • +

      + DB_KEYLAST + +

      +

      + Behaves identically to + DB_KEYFIRST + + except that the new duplicate record is inserted as the last + record in the duplicates list. +

      +
    • +
    +
    +
  • +
+
+
+
+
+
+
+

Configuring a Database to Support Duplicates

+
+
+
+
+

+ Duplicates support can only be configured + at database creation time. You do this by specifying the appropriate + + flags to + DB->set_flags() + + + + before the database is opened for the first time. +

+

+ The + flags + + that you can use are: +

+
+
    +
  • +

    + DB_DUP + +

    +

    + The database supports non-sorted duplicate records. +

    +
  • +
  • +

    + DB_DUPSORT + +

    +

    + The database supports sorted duplicate records. +

    +
  • +
+
+

+ The following code fragment illustrates how to configure a database + to support sorted duplicate records: +

+ +
#include <db.h>
+...
+
+DB *dbp;
+FILE *error_file_pointer;
+int ret;
+char *program_name = "my_prog";
+char *file_name = "mydb.db";
+
+/* Variable assignments omitted for brevity */
+
+/* Initialize the DB handle */
+ret = db_create(&dbp, NULL, 0);
+if (ret != 0) {
+    fprintf(error_file_pointer, "%s: %s\n", program_name,
+        db_strerror(ret));
+    return(ret);
+}
+
+/* Set up error handling for this database */
+dbp->set_errfile(dbp, error_file_pointer);
+dbp->set_errpfx(dbp, program_name);
+                                                                                                                                  
+/*
+ * Configure the database for sorted duplicates
+ */
+ret = dbp->set_flags(dbp, DB_DUPSORT);
+if (ret != 0) {
+    dbp->err(dbp, ret, "Attempt to set DUPSORT flag failed.");
+    dbp->close(dbp, 0);
+    return(ret);
+}
+                                                                                                                                  
+/* Now open the database */
+ret = dbp->open(dbp,        /* Pointer to the database */
+                NULL,       /* Txn pointer */
+                file_name,  /* File name */
+                NULL,       /* Logical db name (unneeded) */
+                DB_BTREE,   /* Database type (using btree) */
+                DB_CREATE,  /* Open flags */
+                0);         /* File mode. Using defaults */
+if (ret != 0) {
+    dbp->err(dbp, ret, "Database '%s' open failed.", file_name);
+    dbp->close(dbp, 0);
+    return(ret);
+} 
+
+
+
+
+
+
+

Setting Comparison Functions

+
+
+
+
+

+ By default, DB uses a lexicographical comparison function where + shorter records collate before longer records. For the majority of + cases, this comparison works well and you do not need to manage + it in any way. +

+

+ However, in some situations your application's performance can + benefit from setting a custom comparison routine. You can do this + either for database keys, or for the data if your + database supports sorted duplicate records. +

+

+ Some of the reasons why you may want to provide a custom sorting + function are: +

+
+
    +
  • +

    + Your database is keyed using strings and you want to provide + some sort of language-sensitive ordering to that data. Doing + so can help increase the locality of reference that allows + your database to perform at its best. +

    +
  • +
  • +

    + You are using a little-endian system (such as x86) and you + are using integers as your database's keys. Berkeley DB + stores keys as byte strings and little-endian integers + do not sort well when viewed as byte strings. There are + several solutions to this problem, one being to provide a + custom comparison function. See + http://www.sleepycat.com/docs/ref/am_misc/faq.html + for more information. +

    +
  • +
  • +

    + You you do not want the entire key to participate in the + comparison, for whatever reason. In + this case, you may want to provide a custom comparison + function so that only the relevant bytes are examined. +

    +
  • +
+
+
+
+
+
+

+ Creating Comparison Functions + +

+
+
+
+
+

+ You set a BTree's key + + comparison function + + + using + DB->set_bt_compare(). + + + You can also set a BTree's duplicate data comparison function using + DB->set_dup_compare(). + + + +

+

+ + You cannot use these methods after the database has been opened. + Also, if + + + the database already exists when it is opened, the + + function + + + provided to these methods must be the same as + that historically used to create the database or corruption can + occur. +

+

+ The value that you provide to the set_bt_compare() method + is a pointer to a function that has the following signature: +

+
int (*function)(DB *db, const DBT *key1, const DBT *key2)
+

+ This function must return an integer value less than, equal to, + or greater than 0. If key1 is considered to be greater than + key2, then the function must return a value that is greater than + 0. If the two are equal, then the function must return 0, and if + the first key is less than the second then the function must return + a negative value. +

+

+ The function that you provide to set_dup_compare() + works in exactly the same way, except that the + DBT + + parameters hold record data items instead of keys. +

+

+ For example, an example routine that is used to sort integer + keys in the database is: + + + +

+ +
int
+compare_int(DB *dbp, const DBT *a, const DBT *b)
+{
+    int ai, bi;
+
+    /* 
+     * Returns: 
+     * < 0 if a < b 
+     * = 0 if a = b 
+     * > 0 if a > b 
+     */ 
+    memcpy(&ai, a->data, sizeof(int)); 
+    memcpy(&bi, b->data, sizeof(int)); 
+    return (ai - bi); 
+} 
+

+ Note that the data must first be copied into memory that is + appropriately aligned, as Berkeley DB does not guarantee any kind of + alignment of the underlying data, including for comparison routines. + When writing comparison routines, remember that databases created on + machines of different architectures may have different integer byte + orders, for which your code may need to compensate. +

+

+ To cause DB to use this comparison function: +

+ +
#include <db.h>
+#include <string.h>
+
+...
+                                                                                                                                      
+DB *dbp;
+int ret;
+                                                                                                                                      
+/* Create a database */
+ret = db_create(&dbp, NULL, 0);
+if (ret != 0) {
+        fprintf(stderr, "%s: %s\n", "my_program",
+          db_strerror(ret));
+        return(-1);
+}
+                                                                                                                                      
+/* Set up the btree comparison function for this database */
+dbp->set_bt_compare(dbp, compare_int);
+
+/* Database open call follows sometime after this. */ 
+
+
+
+ + + diff --git a/db/docs/gsg/C/cachesize.html b/db/docs/gsg/C/cachesize.html new file mode 100644 index 000000000..21faf2439 --- /dev/null +++ b/db/docs/gsg/C/cachesize.html @@ -0,0 +1,98 @@ + + + + + + Selecting the Cache Size + + + + + + + + + +
+
+
+
+

Selecting the Cache Size

+
+
+
+
+

+ Cache size is important to your application because if it is set to too + small of a value, your application's performance will suffer from too + much disk I/O. On the other hand, if your cache is too large, then your + application will use more memory than it actually needs. + Moreover, if your application uses too much memory, then on most + operating systems this can result in your application being swapped out + of memory, resulting in extremely poor performance. +

+

+ You select your cache size using either + + DB->set_cachesize(), or + DB_ENV->set_cachesize(), + + + + depending on whether you are using a database environment or not. You + cache size must be a power of 2, but it is otherwise limited only by + available memory and performance considerations. +

+

+ Selecting a cache size is something of an art, but fortunately it is + selected at database (or environment) open time, so it can be easily + tuned to your application's data requirements as they change over time. + The best way to determine how large your cache needs to be is to put + your application into a production environment and watch to see how much + disk I/O is occurring. If your application is going to disk quite a lot + to retrieve database records, then you should increase the size of your + cache (provided that you have enough memory to do so). +

+

+ You can use the db_stat command line utility with the + -m option to gauge the effectiveness of your cache. + In particular, the number of pages found in the cache is shown, along + with a percentage value. The closer to 100% that you can get, the + better. If this value drops too low, and you are experiencing + performance problems, then you should consider increasing the size of + your cache, assuming you have memory to support it. +

+
+ + + diff --git a/db/docs/gsg/C/concepts.html b/db/docs/gsg/C/concepts.html new file mode 100644 index 000000000..8404051db --- /dev/null +++ b/db/docs/gsg/C/concepts.html @@ -0,0 +1,168 @@ + + + + + + Berkeley DB Concepts + + + + + + + + + +
+
+
+
+

Berkeley DB Concepts

+
+
+
+
+

+ Before continuing, it is useful to describe some of the larger concepts + that you will encounter when building a DB application. +

+

+ Conceptually, DB databases contain records. + Logically each record represents a single entry in the database. + Each such record contains two pieces of information: a key and a data. + This manual will on occaison describe a a record's + key or a record's data when it is + necessary to speak to one or the other portion of a database + record. +

+

+ Because of the key/data pairing used for DB databases, they are + sometimes thought of as a two-column table. However, data (and + sometimes keys, depending on the access method) can hold arbitrarily + complex data. Frequently, C structures and other such mechanisms are + stored in the record. This effectively turns a 2-column table + into a table with n columns, where + n-1 of those columns are provided by the structure's + fields. +

+

+ Note that a DB database is very much like a table in a relational + database system in that most DB applications use more than one + database (just as most relational databases use more than one table). +

+

+ Unlike relational systems, however, a DB database contains a single + collection of records organized according to a given access method + (BTree, Queue, Hash, and so forth). In a relational database system, + the underlying access method is generally hidden from you. +

+

+ In any case, frequently DB + applications are designed so that a single database stores a specific + type of data (just as in a relational database system, a single table + holds entries containing a specific set of fields). Because most applications + are required to manage multiple kinds of data, a DB application will + often use multiple databases. +

+

+ For example, consider an accounting application. This kind of an + application may manage data based on bank accounts, checking + accounts, stocks, bonds, loans, and so forth. An accounting application + will also have to manage information about people, banking institutions, + customer accounts, and so on. In a traditional relational database, all + of these different kinds of information would be stored and managed + using a (probably very) complex series of tables. In a DB + application, all of this information would instead be divided out and + managed using multiple databases. +

+

+ DB applications can efficiently use multiple databases using an + optional mechanism called an environment. + For more information, see Environments. +

+

+ You interact with most DB APIs using special structures that + contain pointers to functions. These callbacks are + called methods because they look so much like a + method on a C++ class. The variable that you use to access these + methods is often referred to as a + handle. For example, to use a database you will + obtain a handle to that database. +

+

+ Retrieving a record from a database is sometimes called + getting the record because the method that you use + to retrieve the records is called get(). + Similarly, storing database records is sometimes called + putting the record because you use the + put() method to do this. +

+

+ When you store, or put, a record to a database using its handle, the + record is stored according to whatever sort order is in use by the + database. Sorting is mostly performed based on the key, but sometimes + the data is considered too. If you put a record using a key that already + exists in the database, then the existing record is replaced with the + new data. However, if the database supports + duplicate records (that is, records with identical keys but + different data), then that new record is stored as a duplicate record and + any existing records are not overwritten. +

+

+ If a database supports duplicate records, then you can use a database + handle to retrieve only the first record in a set of duplicate records. +

+

+ In addition to using a database handle, you can also read and write data using a + special mechanism called a cursor. Cursors are + essentially iterators that you can use to walk over the records in a + database. You can use cursors to iterate over a database from the first + record to the last, and from the last to the first. You can also use + cursors to seek to a record. In the event that a database supports + duplicate records, cursors are the only way you can access all the + records in a set of duplicates. +

+

+ Finally, DB provides a special kind of a database called a + secondary database. Secondary databases serve as an + index into normal databases (called primary database to distinguish them + from secondaries). Secondary databases are interesting because DB + records can hold complex data types, but seeking to a given record is + performed only based on that record's key. If you wanted to be able to + seek to a record based on some piece of information that is not the key, + then you enable this through the use of secondary databases. +

+
+ + + diff --git a/db/docs/gsg/C/coredbclose.html b/db/docs/gsg/C/coredbclose.html new file mode 100644 index 000000000..55f99ffb3 --- /dev/null +++ b/db/docs/gsg/C/coredbclose.html @@ -0,0 +1,106 @@ + + + + + + Closing Databases + + + + + + + + + +
+
+
+
+

Closing Databases

+
+
+
+
+

+ Once you are done using the database, you must close it. You use the + DB->close() + + method to do this. +

+

+ Closing a database causes it to become unusable until it is opened + again. Note that you should make sure that any open cursors are closed + before closing your database. Active cursors during a database + close can cause unexpected results, especially if any of those cursors are + writing to the database. You should always make sure that all your + database accesses have completed before closing your database. +

+

+ Cursors are described in Using Cursors later in this manual. +

+

+ Be aware that when you close the last open handle + for a database, then by default its cache is flushed to disk. + This means that any information that has + been modified in the cache is guaranteed to be written to disk when the + last handle is closed. You can manually perform this operation using + the + DB->sync() + + + method, but for normal shutdown operations it is not necessary. + For more information about syncing your cache, see + Data Persistence. +

+

The following code fragment illustrates a database close:

+ +
#include <db.h>
+...
+DB *dbp;           /* DB struct handle */
+...
+
+/*
+ * Database open and access operations
+ * happen here.
+ */
+
+...
+
+/* When we're done with the database, close it. */
+if (dbp != NULL)
+    dbp->close(dbp, 0); 
+
+ + + diff --git a/db/docs/gsg/C/coreindexusage.html b/db/docs/gsg/C/coreindexusage.html new file mode 100644 index 000000000..ae95233a2 --- /dev/null +++ b/db/docs/gsg/C/coreindexusage.html @@ -0,0 +1,728 @@ + + + + + + Secondary Database Example + + + + + + + + + +
+
+
+
+

Secondary Database Example

+
+
+
+
+

+ In previous chapters in this book, we built applications that load + and display several DB databases. In this example, we will extend those + examples to use secondary databases. Specifically: +

+
+ +
+
+
+
+
+

Secondary Databases with + example_database_load + +

+
+
+
+
+

+ example_database_load + + uses several utility + functions to open and close its databases. In order to cause + example_database_load + + to maintain an index of + inventory item names, all we really need to do is update the utility + functions to: +

+
+
    +
  1. +

    + Create a new database to be used as a secondary database. +

    +
  2. +
  3. +

    + Associate our new database to the inventory primary + database. +

    +
  4. +
  5. +

    + Close the secondary database when we close the rest of our + databases. +

    +
  6. +
+
+

+ We also need a function that can create our secondary keys for us. +

+

+ Because DB maintains secondary databases for us; once this work + is done we need not make any other changes to + example_database_load. + + + Therefore, we can limit + all our work to the code found in gettingstarted_common.h + and gettingstarted_common.c. + +

+

+ Remember that you can find the complete implementation of these functions + in: +

+
DB_INSTALL/examples_c/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+

+ To begin, we need to update the stock_dbs + structure to accommodate the additional database. We defined this + structure in gettingstarted_common.h. We can + limit our update to this file to just that structure definition: +

+

+ Remember that new code is in bold. +

+ +
/* file: gettingstarted_common.h */
+#include <db.h>
+
+typedef struct stock_dbs {
+    DB *inventory_dbp; /* Database containing inventory information */
+    DB *vendor_dbp;    /* Database containing vendor information */
+    DB *itemname_sdbp; /* Index based on the item name index */
+                                                                                                                                
+    char *db_home_dir;       /* Directory containing the database files */
+    char *itemname_db_name;  /* Itemname secondary database */
+    char *inventory_db_name; /* Name of the inventory database */
+    char *vendor_db_name;    /* Name of the vendor database */
+} STOCK_DBS;
+
+/* Function prototypes */
+int databases_setup(STOCK_DBS *, const char *, FILE *);
+int databases_close(STOCK_DBS *);
+void initialize_stockdbs(STOCK_DBS *);
+int open_database(DB **, const char *, const char *, FILE *, int);
+void set_db_filenames(STOCK_DBS *my_stock);
+
+
+

+ Because we updated our stock_dbs structure, we need to update our + stock_dbs utility functions + (The stock_db Utility Functions) + accordingly. The updates are trivial and so we won't show them here + in the interest of space. You can find their complete implementation + in the gettingstarted_common.c file + accompanying this example in your DB distribution. +

+

+ More importantly, however, we need to go to + gettingstarted_common.c + and create our secondary key extractor function. When we + store our inventory items, we place the item name in the buffer + immediately after a float and an + int, so retrieving the string from the buffer is + fairly easy to do: +

+ +
/* file: gettingstarted_common.c */
+#include "gettingstarted_common.h"
+                                                                                                                                  
+/*
+ * Used to extract an inventory item's name from an
+ * inventory database record. This function is used to create
+ * keys for secondary database records.
+ */
+int
+get_item_name(DB *dbp, const DBT *pkey, const DBT *pdata, DBT *skey)
+{
+    int offset;
+                                                                                                                                  
+    /*
+     * First, obtain the buffer location where we placed the
+     * item's name. In this example, the item's name is located
+     * in the primary data. It is the first string in the
+     * buffer after the price (a float) and the quantity (an int).
+     *
+     * See load_inventory_database() in example_database_load.c
+     * for how we marshalled the inventory information into the
+     * data DBT.
+     */
+    offset = sizeof(float) + sizeof(int);
+                                                                                                                                  
+    /* Check to make sure there's data */
+    if (pdata->size < offset)
+        return (-1); /* Returning non-zero means that the
+                      * secondary record is not created/updated.
+                      */
+                                                                                                                                  
+    /* Now set the secondary key's data to be the item name */
+    memset(skey, 0, sizeof(DBT));
+    skey->data = pdata->data + offset;
+    skey->size = strlen(skey->data) + 1;
+                                                                                                                                  
+    return (0);
+} 
+

+ Having completed that function, we need to update + set_db_filenames() and + initialize_stockdbs() to handle the + new secondary databases that our application will now use. + These functions were originally introduced in + The stock_db Utility Functions. + +

+ +
+/* Initializes the STOCK_DBS struct.*/
+void
+initialize_stockdbs(STOCK_DBS *my_stock)
+{
+    my_stock->db_home_dir = DEFAULT_HOMEDIR;
+    my_stock->inventory_dbp = NULL;
+    my_stock->vendor_dbp = NULL;
+    my_stock->itemname_sdbp = NULL;
+
+    my_stock->inventory_db_name = NULL;
+    my_stock->vendor_db_name = NULL;
+    my_stock->itemname_db_name = NULL;
+}
+
+/* Identify all the files that will hold our databases. */
+void
+set_db_filenames(STOCK_DBS *my_stock)
+{
+    size_t size;
+
+    /* Create the Inventory DB file name */
+    size = strlen(my_stock->db_home_dir) + strlen(INVENTORYDB) + 1;
+    my_stock->inventory_db_name = malloc(size);
+    snprintf(my_stock->inventory_db_name, size, "%s%s",
+      my_stock->db_home_dir, INVENTORYDB);
+    
+    /* Create the Vendor DB file name */
+    size = strlen(my_stock->db_home_dir) + strlen(VENDORDB) + 1;
+    my_stock->vendor_db_name = malloc(size);
+    snprintf(my_stock->vendor_db_name, size, "%s%s",
+      my_stock->db_home_dir, VENDORDB);
+
+    /* Create the itemname DB file name */
+    size = strlen(my_stock->db_home_dir) + strlen(ITEMNAMEDB) + 1;
+    my_stock->itemname_db_name = malloc(size);
+    snprintf(my_stock->itemname_db_name, size, "%s%s",
+      my_stock->db_home_dir, ITEMNAMEDB);
+} 
+

+ We also need to update the + open_database() (as described in + open_database() Function) + to take special actions if we are + opening a secondary database. Unlike our primary databases, we want to + support sorted duplicates for our secondary database. This is because we + are indexing based on an item's name, and item names are + shared by multiple inventory records. As a result every key the secondary + database (an item name) will be used by multiple records (pointers to + records in our primary database). We allow this by configuring our + secondary database to support duplicate records. Further, because + BTrees perform best when their records are sorted, we go ahead and + configure our secondary database for sorted duplicates. +

+

+ To do this, we add a parameter to the function that indicates whether we are + opening a secondary database, and we add in the few lines of code + necessary to set the sorted duplicates flags. +

+ +
/* Opens a database */
+int
+open_database(DB **dbpp,       /* The DB handle that we are opening */
+    const char *file_name,     /* The file in which the db lives */
+    const char *program_name,  /* Name of the program calling this
+                                * function */
+    FILE *error_file_pointer,
+    int is_secondary)
+{
+    DB *dbp;    /* For convenience */
+    u_int32_t open_flags;
+    int ret;
+
+    /* Initialize the DB handle */
+    ret = db_create(&dbp, NULL, 0);
+    if (ret != 0) {
+        fprintf(error_file_pointer, "%s: %s\n", program_name,
+                db_strerror(ret));
+        return (ret);
+    }
+
+    /* Point to the memory malloc'd by db_create() */
+    *dbpp = dbp;
+
+    /* Set up error handling for this database */
+    dbp->set_errfile(dbp, error_file_pointer);
+    dbp->set_errpfx(dbp, program_name);
+
+    /*
+     * If this is a secondary database, then we want to allow
+     * sorted duplicates.
+     */
+    if (is_secondary) {
+        ret = dbp->set_flags(dbp, DB_DUPSORT);
+        if (ret != 0) {
+            dbp->err(dbp, ret, "Attempt to set DUPSORT flag failed.",
+              file_name);
+            return (ret);
+        }
+    }
+
+    /* Set the open flags */
+    open_flags = DB_CREATE;
+
+    /* Now open the database */
+    ret = dbp->open(dbp,        /* Pointer to the database */
+                    NULL,       /* Txn pointer */
+                    file_name,  /* File name */
+                    NULL,       /* Logical db name (unneeded) */
+                    DB_BTREE,   /* Database type (using btree) */
+                    open_flags, /* Open flags */
+                    0);         /* File mode. Using defaults */
+    if (ret != 0) {
+        dbp->err(dbp, ret, "Database '%s' open failed.", file_name);
+        return (ret);
+    }
+
+    return (ret);
+}   
+

+ That done, we can now update databases_setup() + (see The databases_setup() Function) to create + and open our secondary database. To do this, we have to add a flag to + each call to open_database() that indicates whether + the database is a secondary. We also have to associate our secondary + database with the inventory database (the primary). +

+

+ Note that we do not anywhere in this example show the definition of + PRIMARY_DB and SECONDARY_DB. See + gettingstarted_common.h in your DB examples + directory for those definitions (they are just 0 and + 1, respectively). +

+ +
/* opens all databases */
+int
+databases_setup(STOCK_DBS *my_stock, const char *program_name,
+  FILE *error_file_pointer)
+{
+    int ret;
+
+    /* Open the vendor database */
+    ret = open_database(&(my_stock->vendor_dbp),
+      my_stock->vendor_db_name,
+      program_name, error_file_pointer,
+      PRIMARY_DB);
+    if (ret != 0)
+        /*
+         * Error reporting is handled in open_database() so just return
+         * the return code here.
+         */
+        return (ret);
+
+    /* Open the inventory database */
+    ret = open_database(&(my_stock->inventory_dbp),
+      my_stock->inventory_db_name,
+      program_name, error_file_pointer,
+      PRIMARY_DB);
+    if (ret != 0)
+        /*
+         * Error reporting is handled in open_database() so just return
+         * the return code here.
+         */
+        return (ret);
+
+    /*
+     * Open the itemname secondary database. This is used to
+     * index the product names found in the inventory
+     * database.
+     */
+    ret = open_database(&(my_stock->itemname_sdbp),
+      my_stock->itemname_db_name,
+      program_name, error_file_pointer,
+      SECONDARY_DB);
+    if (ret != 0)
+        /*
+         * Error reporting is handled in open_database() so just return
+         * the return code here.
+         */
+        return (ret);
+                                                                                                                                
+    /*
+     * Associate the itemname db with its primary db
+     * (inventory db).
+     */
+     my_stock->inventory_dbp->associate(
+       my_stock->inventory_dbp,    /* Primary db */
+       NULL,                       /* txn id */
+       my_stock->itemname_sdbp,    /* Secondary db */
+       get_item_name,              /* Secondary key extractor */
+       0);                         /* Flags */
+     
+
+    printf("databases opened successfully\n");
+    return (0);
+}
+

+ Finally, we need to update databases_close() + (The databases_close() Function) + to close our + new secondary database. Note that we are careful to close the secondary + before the primary, even though the database close routine is single + threaded. +

+ +
/* Closes all the databases and secondary databases. */
+int
+databases_close(STOCK_DBS *my_stock)
+{
+    int ret;
+    /*
+     * Note that closing a database automatically flushes its cached data
+     * to disk, so no sync is required here.
+     */
+
+    if (my_stock->itemname_sdbp != NULL) {
+        ret = my_stock->itemname_sdbp->close(my_stock->itemname_sdbp, 0);
+        if (ret != 0)
+            fprintf(stderr, "Itemname database close failed: %s\n",
+              db_strerror(ret));
+    }
+
+    if (my_stock->inventory_dbp != NULL) {
+        ret = my_stock->inventory_dbp->close(my_stock->inventory_dbp, 0);
+        if (ret != 0)
+            fprintf(stderr, "Inventory database close failed: %s\n",
+              db_strerror(ret));
+    }
+
+    if (my_stock->vendor_dbp != NULL) {
+        ret = my_stock->vendor_dbp->close(my_stock->vendor_dbp, 0);
+        if (ret != 0)
+            fprintf(stderr, "Vendor database close failed: %s\n",
+              db_strerror(ret));
+    }
+
+    printf("databases closed.\n");
+    return (0);
+} 
+

+ And the implementation changes slightly to take advantage of the new + boolean. Note that to save space, we just show the constructor where the + code actually changes: +

+

+ That completes our update to + example_database_load. + + Now when this program is called, it will automatically index inventory + items based on their names. We can then query for those items using the + new index. We show how to do that in the next section. +

+
+
+
+
+
+

Secondary Databases with + example_database_read + +

+
+
+
+
+

+ In Cursor Example we + wrote an application that displays every inventory item in the + Inventory database. In this section, we will update that example to + allow us to search for and display an inventory item given a + specific name. To do this, we will make use of the secondary + database that + example_database_load + + now creates. +

+

+ Because we manage all our database open and close activities in + databases_setup() and + databases_close(), + the update to + example_database_read + is relatively modest. We need only add a command line parameter on + which we can specify the item name, and we will need a new function + in which we will perform the query and display the results. +

+

+ To begin, we add a single forward declaration to the application, + and update our usage function slightly: +

+ +
/* File: example_database_read.c */
+/* gettingstarted_common.h includes db.h for us */
+#include "gettingstarted_common.h"
+                                                                                                                                  
+/* Forward declarations */
+char * show_inventory_item(void *);
+int show_all_records(STOCK_DBS *);
+int show_records(STOCK_DBS *, char *);
+int show_vendor_record(char *, DB *); 
+

+ Next, we update main() to + + accept the new command line switch. + We also need a new variable to contain the item's name. +

+ +
/*
+ * Searches for a inventory item based on that item's name. The search is
+ * performed using the item name secondary database. Displays all
+ * inventory items that use the specified name, as well as the vendor
+ * associated with that inventory item.
+ *
+ * If no item name is provided, then all inventory items are displayed.
+ */
+int
+main(int argc, char *argv[])
+{
+    STOCK_DBS my_stock;
+    int ret;
+    char *itemname;
+                                                                                                                                         
+    /* Initialize the STOCK_DBS struct */
+    initialize_stockdbs(&my_stock);
+                                                                                                                                         
+    itemname = NULL;
+    /*
+     * Parse the command line arguments here and determine
+     * the location of the database files as well as the
+     * inventory item we want displayed, if any. This step is
+     * omitted for brevity.
+     */
+                                                                                                                                         
+    /*
+     * Identify the files that will hold our databases
+     * This function uses information obtained from the
+     * command line to identify the directory in which
+     * the database files reside.
+     */
+    set_db_filenames(&my_stock);
+                                                                                                                                         
+    /* Open all databases */
+    ret = databases_setup(&my_stock, "example_database_read", stderr);
+    if (ret != 0) {
+        fprintf(stderr, "Error opening databases\n");
+        databases_close(&my_stock);
+        return (ret);
+    }
+

+ The final update to the main() entails a little bit + of logic to determine whether we want to display all available inventory + items, or just the ones that match a name provided on the + -i command line parameter. +

+ +
    /* 
+     * Show either a single item or all items, depending
+     * on whether itemname is set to a value.
+     */
+    if (itemname == NULL)
+        ret = show_all_records(&my_stock);
+    else
+        ret = show_records(&my_stock, itemname);
+                                                                                                                                  
+    /* Close our databases */
+    databases_close(&my_stock);
+    return (ret);
+} 
+

+ The only other thing that we need to add to the application is the + implementation of the + show_records() + + function. +

+
+

Note

+

+ In the interest of space, we refrain from showing the other + functions used by this application. For their implementation, please + see Cursor Example. + Alternatively, you can see the entire implementation of this + application + in: +

+
DB_INSTALL/examples_c/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +
/*
+ * Search for an inventory item given its name (using the inventory item
+ * secondary database) and display that record and any duplicates that may
+ * exist.
+ */
+int
+show_records(STOCK_DBS *my_stock, char *itemname)
+{
+    DBC *itemname_cursorp;
+    DBT key, data;
+    char *the_vendor;
+    int ret, exit_value;
+                                                                                                                                  
+    /* Initialize our DBTs. */
+    memset(&key, 0, sizeof(DBT));
+    memset(&data, 0, sizeof(DBT));
+                                                                                                                                  
+    /* Get a cursor to the itemname db */
+    my_stock->itemname_sdbp->cursor(my_stock->itemname_sdbp, 0,
+      &itemname_cursorp, 0);
+                                                                                                                                  
+    /*
+     * Get the search key. This is the name on the inventory
+     * record that we want to examine.
+     */
+    key.data = itemname;
+    key.size = strlen(itemname) + 1;
+                                                                                                                                  
+    /*
+     * Position our cursor to the first record in the secondary
+     * database that has the appropriate key.
+     */
+    exit_value = 0;
+    ret = itemname_cursorp->c_get(itemname_cursorp, &key, &data, DB_SET);
+    if (!ret) {
+        do {
+            /*
+             * Show the inventory record and the vendor responsible
+             * for this inventory item.
+             */
+            the_vendor = show_inventory_item(data.data);
+            ret = show_vendor_record(the_vendor, my_stock->vendor_dbp);
+            if (ret) {
+                exit_value = ret;
+                break;
+            }
+            /*
+             * Our secondary allows duplicates, so we need to loop over
+             * the next duplicate records and show them all. This is done
+             * because an inventory item's name is not a unique value.
+             */
+        } while(itemname_cursorp->c_get(itemname_cursorp, &key, &data,
+            DB_NEXT_DUP) == 0);
+    } else {
+        printf("No records found for '%s'\n", itemname);
+    }
+                                                                                                                                  
+    /* Close the cursor */
+    itemname_cursorp->c_close(itemname_cursorp);
+                                                                                                                                  
+    return (exit_value);
+} 
+

+ This completes our update to + example_database_read. + + + Using this update, you + can now search for and show all inventory items that match a particular + name. For example: +

+
    example_database_read -i "Zulu Nut"
+
+
+ + + diff --git a/db/docs/gsg/C/cstructs.html b/db/docs/gsg/C/cstructs.html new file mode 100644 index 000000000..9b02832ff --- /dev/null +++ b/db/docs/gsg/C/cstructs.html @@ -0,0 +1,324 @@ + + + + + + Using C Structures with DB + + + + + + + + + +
+
+
+
+

Using C Structures with DB

+
+
+
+
+

+ Storing data in structures is a handy way to pack varied types of + information into each database record. DB databases are sometimes + thought of as a two column table where column 1 is the key and column 2 is + the data. By using structures, you can effectively turn this table into + n columns where n-1 columns + are contained in the structure. +

+

+ So long as a C structure contains fields that are not pointers, you can safely + store and retrieve them in the same way as you would any primitive + datatype. The following code fragment illustrates this: +

+ +
#include <db.h>
+#include <string.h>
+
+typedef struct my_struct {
+    int id;
+    char familiar_name[MAXLINE]; /* Some suitably large value */
+    char surname[MAXLINE];
+} MY_STRUCT;
+
+...
+
+DBT key, data;
+DB *my_database;
+MY_STRUCT user;
+char *fname = "David";
+char *sname = "Rider";
+
+/* Database open omitted for clarity */
+
+user.id = 1;
+strncpy(user.familiar_name, fname, strlen(fname)+1);
+strncpy(user.surname, sname, strlen(sname)+1);
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+key.data = &(user.id);
+key.size = sizeof(int);
+
+data.data = &user;
+data.size = sizeof(MY_STRUCT); 
+
+my_database->put(my_database, NULL, &key, &data, DB_NOOVERWRITE);
+

+ To retrieve the structure, make sure you supply your own + memory. The reason why is that like real numbers, some systems require + structures to be aligned in a specific way. Because it is possible that + the memory DB provides is not aligned properly, for safest result simply + use your own memory: +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DBT key, data;
+DB *my_database;
+MY_STRUCT user;
+
+/* Database open omitted for clarity */
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+/* Initialize the structure */
+memset(&user, 0, sizeof(MY_STRUCT));
+user.id = 1;
+
+key.data = &user.id;
+key.size = sizeof(int);
+
+/* Use our memory to retrieve the structure */
+data.data = &user;
+data.ulen = sizeof(MY_STRUCT); 
+data.flags = DB_DBT_USERMEM;
+
+my_database->get(my_database, NULL, &key, &data, 0);
+
+printf("Familiar name: %s\n", user.familiar_name);
+printf("Surname: %s\n", user.surname); 
+

+ Be aware that while this is the easiest way to manage structures stored + in DB databases, this approach does suffer from causing your + database to be larger than is strictly necessary. Each structure stored + in the database is of a fixed size, and you do not see any space savings + from storing a (for example) 5 character surname versus a 20 character + surname. +

+

+ For a simple example such as this, the padding stored with each record + is probably not critical. However, if you are storing structures that + contain a very large number of character arrays, or if you are simply + storing millions of records, then you may want to avoid this approach. + The wasted space in each record will only serve to make your databases + larger than need be, which will in turn require a larger cache and more + disk I/O than you would ordinarily need. +

+

+ An alternative approach is described next. +

+
+
+
+
+

C Structures with Pointers

+
+
+
+
+

+ It is often necessary in C structures + + + to use fields + + + that are pointers to + dynamically allocated memory. This is particularly + true if you want to store character strings (or any kind of an array for + that matter), and you want to avoid any overhead caused by + predesignating the size of the array. +

+

+ When storing structures + + like these you need to make sure that all of + the data pointed to and contained by the structure + + + is lined up in a + single contiguous block of memory. Remember that DB stores data + located at a specific address and of a particular size. If your structure + + includes fields + + that are pointing to dynamically allocated memory, then + the data that you want to store can be located in different, not + necessarily contiguous, locations on the heap. +

+

+ The easiest way to solve this problem is to pack your data + into a single memory location and then store the data in that location. + (This process is sometimes called marshalling the + data.) + For example: +

+ +
#include <db.h>
+#include <string.h>
+#include <stdlib.h>
+
+typedef struct my_struct {
+    int id;
+    char *familiar_name;
+    char *surname;
+} MY_STRUCT;
+
+...
+
+DBT key, data;
+DB *my_database;
+MY_STRUCT user;
+int buffsize, bufflen;
+char fname[ ] = "Pete";
+char sname[10];
+char *databuff;
+
+strncpy(sname, "Oar", strlen("Oar")+1);
+
+/* Database open omitted for clarity */
+
+user.id = 1;
+user.familiar_name = fname;
+user.surname = sname;
+
+/* Some of the structure's data is on the stack, and 
+ * some is on the heap. To store this structure's data, we
+ * need to marshall it -- pack it all into a single location 
+ * in memory.
+ */
+
+/* Get the buffer */
+buffsize = sizeof(int) + 
+  (strlen(user.familiar_name) + strlen(user.surname) + 2);
+databuff = malloc(buffsize);
+memset(databuff, 0, buffsize);
+
+/* copy everything to the buffer */
+memcpy(databuff, &(user.id), sizeof(int));
+bufflen = sizeof(int);
+
+memcpy(databuff + bufflen, user.familiar_name, 
+  strlen(user.familiar_name) + 1);
+bufflen += strlen(user.familiar_name) + 1;
+
+memcpy(databuff + bufflen, user.surname, 
+  strlen(user.surname) + 1);
+bufflen += strlen(user.surname) + 1;
+
+/* Now store it */
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+key.data = &(user.id);
+key.size = sizeof(int);
+
+data.data = databuff;
+data.size = bufflen;
+
+my_database->put(my_database, NULL, &key, &data, DB_NOOVERWRITE);
+free(sname);
+free(databuff);
+

+ To retrieve the stored structure: +

+ +
#include <db.h>
+#include <string.h>
+#include <stdlib.h>
+
+typedef struct my_struct {
+    char *familiar_name;
+    char *surname;
+    int id;
+} MY_STRUCT;
+
+...
+
+int id;
+DBT key, data;
+DB *my_database;
+MY_STRUCT user;
+char *buffer;
+
+/* Database open omitted for clarity */
+
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+id = 1;
+key.data = &id;
+key.size = sizeof(int);
+
+my_database->get(my_database, NULL, &key, &data, 0);
+
+/* 
+ * Some compilers won't allow pointer arithmetic on void *'s,
+ * so use a char * instead.
+ */
+buffer = data.data;
+
+user.id = *((int *)data.data);
+user.familiar_name = buffer + sizeof(int);
+user.surname = buffer + sizeof(int) + strlen(user.familiar_name) + 1; 
+
+
+ + + diff --git a/db/docs/gsg/C/databaseLimits.html b/db/docs/gsg/C/databaseLimits.html new file mode 100644 index 000000000..15c23a81c --- /dev/null +++ b/db/docs/gsg/C/databaseLimits.html @@ -0,0 +1,81 @@ + + + + + + Database Limits and Portability + + + + + + + + + +
+
+
+
+

Database Limits and Portability

+
+
+
+
+

+ Berkeley DB provides support for managing everything from very small + databases that fit entirely in memory, to extremely large databases + holding millions of records and terabytes of data. DB databases can + store up to 256 terabytes of data. Individual record keys or record + data can store up to 4 gigabytes of data. +

+

+ DB's databases store data in a binary format that is portable across + platforms, even of differing endian-ness. Be aware, however, that + portability aside, some performance issues can crop up in the event that + you are using little endian architecture. See Setting Comparison Functions + for more information. +

+

+ Also, DB's databases and data structures are designed for concurrent + access — they are thread-safe, and they share well across multiple + processes. That said, in order to allow multiple processes to share + databases and the cache, DB makes use of mechanisms that do not work + well on network-shared drives (NFS or Windows networks shares, for + example). For this reason, you cannot place your DB databases and + environments on network-mounted drives. +

+
+ + + diff --git a/db/docs/gsg/C/dbErrorReporting.html b/db/docs/gsg/C/dbErrorReporting.html new file mode 100644 index 000000000..af315e34b --- /dev/null +++ b/db/docs/gsg/C/dbErrorReporting.html @@ -0,0 +1,204 @@ + + + + + + Error Reporting Functions + + + + + + + + + +
+
+
+
+

Error Reporting Functions

+
+
+
+
+

+ To simplify error reporting and handling, the + DB structure + + + offers several useful methods. + + + + +

+
+
    +
  • +

    + set_errcall() + +

    +

    + Defines the function that is called when an error message is + issued by DB. The error prefix and message are passed to + this callback. It is up to the application to display this + information correctly. +

    +
  • +
  • +

    + set_errfile() +

    +

    + Sets the C library FILE * to be used for + displaying error messages issued by the DB library. +

    +
  • +
  • +

    + set_errpfx() + +

    +

    + Sets the prefix used to for any error messages issued by the + DB library. +

    +
  • +
  • +

    + err() +

    +

    + Issues an error message. The error message is sent to the + callback function as defined by set_errcall. + If that method has not been used, then the error message is sent to the + file defined by + set_errfile(). + + If none of these methods have been used, then the error message is sent to + standard error. +

    +

    + The error message consists of the prefix string + (as defined by set_errprefix()), + an optional printf-style formatted message, + the error message, and a trailing newline. +

    +
  • +
  • +

    + errx() +

    +

    + Behaves identically to err() except + that the DB message text associated with the supplied error + value is not appended to the error string. +

    +
  • +
+
+

+ In addition, you can use the db_strerror() + function to directly return the error string that corresponds to a + particular error number. +

+

+ For example, to send all error messages for a given database handle + to a callback for handling, first create your callback. Do something like this: +

+ +
/* 
+ * Function called to handle any database error messages
+ * issued by DB. 
+ */
+void
+my_error_handler(const char *error_prefix, char *msg)
+{
+  /* 
+   * Put your code to handle the error prefix and error
+   * message here. Note that one or both of these parameters
+   * may be NULL depending on how the error message is issued
+   * and how the DB handle is configured.
+   */
+} 
+

+ And then register the callback as follows: +

+ +
#include <db.h>
+#include <stdio.h>
+
+...
+
+DB *dbp;
+int ret;
+
+/*
+ * Create a database and initialize it for error
+ * reporting.
+ */
+ret = db_create(&dbp, NULL, 0);
+if (ret != 0) {
+        fprintf(stderr, "%s: %s\n", "my_program",
+          db_strerror(ret));
+        return(ret);
+}
+
+/* Set up error handling for this database */
+dbp->set_errcall(dbp, my_error_handler);
+dbp->set_errpfx(dbp, "my_example_program"); 
+

+ And to issue an error message: +

+ +
ret = dbp->open(dbp, 
+                NULL,
+                "mydb.db", 
+                NULL,
+                DB_BTREE,
+                DB_CREATE,
+                0);
+if (ret != 0) {
+    dbp->err(dbp, ret,
+      "Database open failed: %s", "mydb.db");
+    return(ret);
+}
+ + + +
+ + + diff --git a/db/docs/gsg/C/dbconfig.html b/db/docs/gsg/C/dbconfig.html new file mode 100644 index 000000000..2a682291d --- /dev/null +++ b/db/docs/gsg/C/dbconfig.html @@ -0,0 +1,401 @@ + + + + + + Chapter 6. Database Configuration + + + + + + + + + +
+
+
+
+

Chapter 6. Database Configuration

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Setting the Page Size + +
+
+
+
+ + Overflow Pages + +
+
+ + Locking + +
+
+ + IO Efficiency + +
+
+ + Page Sizing Advice + +
+
+
+
+ + Selecting the Cache Size + +
+
+ + BTree Configuration + +
+
+
+
+ + Allowing Duplicate Records + +
+
+ + Setting Comparison Functions + +
+
+
+
+
+

+ This chapter describes some of the database and cache configuration issues + that you need to consider when building your DB database. + In most cases, there is very little that you need to do in terms of + managing your databases. However, there are configuration issues that you + need to be concerned with, and these are largely dependent on the access + method that you are choosing for your database. +

+

+ The examples and descriptions throughout this document have mostly focused + on the BTree access method. This is because the majority of DB + applications use BTree. For this reason, where configuration issues are + dependent on the type of access method in use, this chapter will focus on + BTree only. For configuration descriptions surrounding the other access + methods, see the Berkeley DB Programmer's Tutorial and Reference + Guide. +

+
+
+
+
+

Setting the Page Size

+
+
+
+
+

+ Internally, DB stores database entries on pages. Page sizes are + important because they can affect your application's performance. +

+

+ DB pages can be between 512 bytes and 64K bytes in size. The size + that you select must be a power of 2. You set your database's + page size using + DB->set_pagesize(). + + +

+

+ Note that a database's page size can only be selected at database + creation time. +

+

+ When selecting a page size, you should consider the following issues: +

+
+
    +
  • +

    + Overflow pages. +

    +
  • +
  • +

    + Locking +

    +
  • +
  • +

    + Disk I/O. +

    +
  • +
+
+

+ These topics are discussed next. +

+
+
+
+
+

Overflow Pages

+
+
+
+
+

+ Overflow pages are used to hold a key or data item + that cannot fit on a single page. You do not have to do anything to + cause overflow pages to be created, other than to store data that is + too large for your database's page size. Also, the only way you can + prevent overflow pages from being created is to be sure to select a + page size that is large enough to hold your database entries. +

+

+ Because overflow pages exist outside of the normal database + structure, their use is expensive from a performance + perspective. If you select too small of a page size, then your + database will be forced to use an excessive number of overflow + pages. This will significantly harm your application's performance. +

+

+ For this reason, you want to select a page size that is at + least large enough to hold multiple entries given the expected + average size of your database entries. In BTree's case, for best + results select a page size that can hold at least 4 such entries. +

+

+ You can see how many overflow pages your database is using by + + using the + DB->stat() + + method, + + + + or by examining your database using the + db_stat command line utility. +

+
+
+
+
+
+

Locking

+
+
+
+
+

+ Locking and multi-threaded access to DB databases is built into + the product. However, in order to enable the locking subsystem and + in order to provide efficient sharing of the cache between + databases, you must use an environment. + Environments and multi-threaded access are not fully described + in this manual (see the Berkeley DB Programmer's Reference Manual for + information), however, we provide some information on sizing your + pages in a multi-threaded/multi-process environment in the interest + of providing a complete discussion on the topic. +

+

+ If your application is multi-threaded, or if your databases are + accessed by more than one process at a time, then page size can + influence your application's performance. The reason why is that + for most access methods (Queue is the exception), DB implements + page-level locking. This means that the finest locking granularity + is at the page, not at the record. +

+

+ In most cases, database pages contain multiple database + records. Further, in order to provide safe access to multiple + threads or processes, DB performs locking on pages as entries on + those pages are read or written. +

+

+ As the size of your page increases relative to the size of your + database entries, the number of entries that are held on any given + page also increase. The result is that the chances of two or more + readers and/or writers wanting to access entries on any given page + also increases. +

+

+ When two or more threads and/or processes want to manage data on a + page, lock contention occurs. Lock contention is resolved by one + thread (or process) waiting for another thread to give up its lock. + It is this waiting activity that is harmful to your application's + performance. +

+

+ It is possible to select a page size that is so large that your + application will spend excessive, and noticeable, amounts of time + resolving lock contention. Note that this scenario is particularly + likely to occur as the amount of concurrency built into your + application increases. +

+

+ Oh the other hand, if you select too small of a page size, then that + that will only make your tree deeper, which can also cause + performance penalties. The trick, therefore, is to select a + reasonable page size (one that will hold a sizeable number of + records) and then reduce the page size if you notice lock + contention. +

+

+ You can examine the number of lock conflicts and deadlocks occurring + in your application by examining your database environment lock + statistics. Either use the + DB_ENV->lock_stat() + + + method, or use the db_stat command line utility. + The number of locks that could not be obtained due to conflicts is + held in the lock statistic's st_nconflicts field. + +

+
+
+
+
+
+

IO Efficiency

+
+
+
+
+

+ Page size can affect how efficient DB is at moving data to and + from disk. For some applications, especially those for which the + in-memory cache can not be large enough to hold the entire working + dataset, IO efficiency can significantly impact application performance. +

+

+ Most operating systems use an internal block size to determine how much + data to move to and from disk for a single I/O operation. This block + size is usually equal to the filesystem's block size. For optimal + disk I/O efficiency, you should select a database page size that is + equal to the operating system's I/O block size. +

+

+ Essentially, DB performs data transfers based on the database + page size. That is, it moves data to and from disk a page at a time. + For this reason, if the page size does not match the I/O block size, + then the operating system can introduce inefficiencies in how it + responds to DB's I/O requests. +

+

+ For example, suppose your page size is smaller than your operating + system block size. In this case, when DB writes a page to disk + it is writing just a portion of a logical filesystem page. Any time + any application writes just a portion of a logical filesystem page, the + operating system brings in the real filesystem page, over writes + the portion of the page not written by the application, then writes + the filesystem page back to disk. The net result is significantly + more disk I/O than if the application had simply selected a page + size that was equal to the underlying filesystem block size. +

+

+ Alternatively, if you select a page size that is larger than the + underlying filesystem block size, then the operating system may have + to read more data than is necessary to fulfill a read request. + Further, on some operating systems, requesting a single database + page may result in the operating system reading enough filesystem + blocks to satisfy the operating system's criteria for read-ahead. In + this case, the operating system will be reading significantly more + data from disk than is actually required to fulfill DB's read + request. +

+
+

Note

+

+ While transactions are not discussed in this manual, a page size + other than your filesystem's block size can affect transactional + guarantees. The reason why is that page sizes larger than the + filesystem's block size causes DB to write pages in block + size increments. As a result, it is possible for a partial page + to be written as the result of a transactional commit. For more + information, see http://www.sleepycat.com/docs/ref/transapp/reclimit.html. +

+
+
+
+
+
+
+

Page Sizing Advice

+
+
+
+
+

+ Page sizing can be confusing at first, so here are some general + guidelines that you can use to select your page size. +

+

+ In general, and given no other considerations, a page size that is equal + to your filesystem block size is the ideal situation. +

+

+ If your data is designed such that 4 database entries cannot fit on a + single page (assuming BTree), then grow your page size to accommodate + your data. Once you've abandoned matching your filesystem's block + size, the general rule is that larger page sizes are better. +

+

+ The exception to this rule is if you have a great deal of + concurrency occurring in your application. In this case, the closer + you can match your page size to the ideal size needed for your + application's data, the better. Doing so will allow you to avoid + unnecessary contention for page locks. +

+
+
+
+ + + diff --git a/db/docs/gsg/C/environments.html b/db/docs/gsg/C/environments.html new file mode 100644 index 000000000..7223b5047 --- /dev/null +++ b/db/docs/gsg/C/environments.html @@ -0,0 +1,150 @@ + + + + + + Environments + + + + + + + + + +
+
+
+
+

Environments

+
+
+
+
+

+ This manual is meant as an introduction to the Berkeley DB library. + Consequently, it describes how to build a very simple, single-threaded + application. Consequently, this manual omits a great many powerful + aspects of the DB database engine that are not required by simple + applications. One of these is important enough that it warrants a brief + overview here: environments. +

+

+ While environments are frequently not used by applications running in + embedded environments where every byte counts, they will be used by + virutally any other DB application requiring anything other than + the bare minimum functionality. An environment is + essentially an encapsulation of one or more databases. Essentially, you + open an environment and then you open databases in that environment. + When you do so, the databases are created/located in a location relative + to the environment's home directory. +

+

+ Environments offer a great many features that a stand-alone DB + database cannot offer: +

+
+
    +
  • +

    + Multi-database files. +

    +

    + It is possible in DB to contain multiple databases in a + single physical file on disk. This is desireable for those + application that open more than a few handful of databases. + However, in order to have more than one database contained in + a single physical file, your application + must use an environment. +

    +
  • +
  • +

    + Multi-thread and multi-process support +

    +

    + When you use an environment, resources such as the in-memory + cache and locks can be shared by all of the databases opened in the + environment. The environment allows you to enable + subsystems that are designed to allow multiple threads and/or + processes to access DB databases. For example, you use an + environment to enable the concurrent data store (CDS), the + locking subsystem, and/or the shared memory buffer pool. +

    +
  • +
  • +

    + Transactional processing +

    +

    + DB offers a transactional subsystem that allows for full + ACID-protection of your database writes. You use environments to + enable the transactional subsystem, and then subsequently to obtain + transaction IDs. +

    +
  • +
  • +

    + High availability (replication) support +

    +

    + DB offers a replication subsystem that enables + single-master database replication with multiple read-only + copies of the replicated data. You use environments to enable + and then manage this subsystem. +

    +
  • +
  • +

    + Logging subsystem +

    +

    + DB offers write-ahead logging for applications that want to + obtain a high-degree of recoverability in the face of an + application or system crash. Once enabled, the logging subsystem + allows the application to perform two kinds of recovery + ("normal" and "catastrophic") through the use of the information + contained in the log files. +

    +
  • +
+
+

+ All of these topics are described in the Berkeley DB + Programmer's Reference Guide. +

+
+ + + diff --git a/db/docs/gsg/C/gettingStarted.css b/db/docs/gsg/C/gettingStarted.css new file mode 100644 index 000000000..c1b4c86b7 --- /dev/null +++ b/db/docs/gsg/C/gettingStarted.css @@ -0,0 +1,41 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 9pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 9pt; } + +div.navfooter { font-size: 9pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 9pt; } + +span.emphasis { font-style: italic; font-size: 9pt;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + + diff --git a/db/docs/gsg/C/gettingit.html b/db/docs/gsg/C/gettingit.html new file mode 100644 index 000000000..c722aefce --- /dev/null +++ b/db/docs/gsg/C/gettingit.html @@ -0,0 +1,77 @@ + + + + + + Getting and Using DB + + + + + + + + + +
+
+
+
+

Getting and Using DB

+
+
+
+
+

+ You can obtain DB by visiting the Sleepycat download page: + http://www.sleepycat.com/download/index.shtml. +

+

+ To install DB, untar or unzip the distribution to the directory of + your choice. You will then need to build the product binaries. + For information on building DB, see + DB_INSTALL/docs/index.html, + where DB_INSTALL is the directory where you unpacked + DB. On that page, you will find links to platform-specific build + instructions. +

+

+ That page also contains links to more documentation for DB. In + particular, you will find links for the Berkeley DB + Programmer's Tutorial and Reference Guide as + well as the API reference documentation. +

+
+ + + diff --git a/db/docs/gsg/C/index.html b/db/docs/gsg/C/index.html new file mode 100644 index 000000000..18e5b7920 --- /dev/null +++ b/db/docs/gsg/C/index.html @@ -0,0 +1,477 @@ + + + + + + Getting Started with Berkeley DB + + + + + + + +
+
+
+
+

Getting Started with Berkeley DB

+
+
+
+

+ Legal Notice +

+

+ This documentation is distributed under the terms of the Sleepycat + public license. You may review the terms of this license at: + http://www.sleepycat.com/download/oslicense.html +

+

+ Sleepycat Software, Berkeley DB, Berkeley DB XML and the Sleepycat logo + are trademarks or service marks of Sleepycat Software, Inc. All rights to + these marks are reserved. No third-party use is permitted without the + express prior written consent of Sleepycat Software, Inc. +

+

+ To obtain a copy of this document's original source code, please write + to . +

+
+
+
+

9/22/2004

+
+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+
+
+ + 1. Introduction to Berkeley DB + +
+
+
+
+ + About This Manual + +
+
+ + Berkeley DB Concepts + +
+
+ + Access Methods + +
+
+
+
+ + Selecting Access Methods + +
+
+ + Choosing between BTree and Hash + +
+
+ + Choosing between Queue and Recno + +
+
+
+
+ + Database Limits and Portability + +
+
+ + Environments + +
+
+ + Error Returns + +
+
+ + Getting and Using DB + +
+
+
+
+ + 2. Databases + +
+
+
+
+ + Opening Databases + +
+
+ + Closing Databases + +
+
+ + Database Open Flags + +
+
+ + Administrative Methods + +
+
+ + Error Reporting Functions + +
+
+ + Managing Databases in Environments + +
+
+ + Database Example + +
+
+
+
+ + 3. Database Records + +
+
+
+
+ + Using Database Records + +
+
+ + Reading and Writing Database Records + +
+
+
+
+ + Writing Records to the Database + +
+
+ + Getting Records from the Database + +
+
+ + Deleting Records + +
+
+ + Data Persistence + +
+
+
+
+ + Using C Structures with DB + +
+
+
+
+ + C Structures with Pointers + +
+
+
+
+ + Database Usage Example + +
+
+
+
+ + 4. Using Cursors + +
+
+
+
+ + Opening and Closing Cursors + +
+
+ + Getting Records Using the Cursor + +
+
+
+
+ + Searching for Records + +
+
+ + Working with Duplicate Records + +
+
+
+
+ + Putting Records Using Cursors + +
+
+ + Deleting Records Using Cursors + +
+
+ + Replacing Records Using Cursors + +
+
+ + Cursor Example + +
+
+
+
+ + 5. Secondary Databases + +
+
+
+
+ + Opening and Closing Secondary Databases + +
+
+ + Implementing Key + + Extractors + + +
+
+ + Reading Secondary Databases + +
+
+ + Deleting Secondary Database Records + +
+
+ + + + Using Cursors with Secondary Databases + + +
+
+ + Database Joins + +
+
+
+
+ + Using Join Cursors + +
+
+
+
+ + Secondary Database Example + +
+
+
+
+ + Secondary Databases with + example_database_load + + + +
+
+ + Secondary Databases with + example_database_read + + + +
+
+
+
+
+
+ + 6. Database Configuration + +
+
+
+
+ + Setting the Page Size + +
+
+
+
+ + Overflow Pages + +
+
+ + Locking + +
+
+ + IO Efficiency + +
+
+ + Page Sizing Advice + +
+
+
+
+ + Selecting the Cache Size + +
+
+ + BTree Configuration + +
+
+
+
+ + Allowing Duplicate Records + +
+
+ + Setting Comparison Functions + +
+
+
+
+
+
+
+ +
+ + + diff --git a/db/docs/gsg/C/indexes.html b/db/docs/gsg/C/indexes.html new file mode 100644 index 000000000..f1900d121 --- /dev/null +++ b/db/docs/gsg/C/indexes.html @@ -0,0 +1,357 @@ + + + + + + Chapter 5. Secondary Databases + + + + + + + + + +
+
+
+
+

Chapter 5. Secondary Databases

+
+
+
+
+ +

+ Usually you find database records by means of the record's key. However, + the key that you use for your record will not always contain the + information required to provide you with rapid access to the data that you + want to retrieve. For example, suppose your + + database + contains records related to users. The key might be a string that is some + unique identifier for the person, such as a user ID. Each record's data, + however, would likely contain a complex object containing details about + people such as names, addresses, phone numbers, and so forth. + While your application may frequently want to query a person by user + ID (that is, by the information stored in the key), it may also on occasion + want to location people by, say, their name. +

+

+ Rather than iterate through all of the records in your database, examining + each in turn for a given person's name, you create indexes based on names + and then just search that index for the name that you want. You can do this + using secondary databases. In DB, the + + database + that contains your data is called a + primary database. A database that provides an + alternative set of keys to access that data is called a secondary + database. In a secondary database, the keys are your alternative + (or secondary) index, and the data corresponds to a primary record's key. +

+

+ You create a secondary database by creating the database, opening it, and + then associating the database with + the primary database (that is, the database for which + you are creating the index). As a part of associating + the secondary database to the primary, you must provide a callback that is + used to create the secondary database keys. Typically this callback creates + a key based on data found in the primary database record's key or data. +

+

+ Once opened, DB manages secondary databases for you. Adding or deleting + records in your primary database causes DB to update the secondary as + necessary. Further, changing a record's data in the primary database may cause + DB to modify a record in the secondary, depending on whether the change + forces a modification of a key in the secondary database. +

+

+ Note that you can not write directly to a secondary database. + + + + Any attempt to write to a secondary database + results in a non-zero status return. + + To change the data referenced by a + + secondary + record, modify the primary database instead. The exception to this rule is + that delete operations are allowed on the + + secondary database. + + See Deleting Secondary Database Records for more + information. +

+
+

Note

+

+ + Secondary database records are updated/created by DB + only if the + + key creator callback function + returns + + 0. + If + + a value other than 0 + is returned, then DB will not add the key to the secondary database, and + in the event of a record update it will remove any existing key. + + Note that the callback can use either + DB_DONOTINDEX or some error code outside of DB's + name space to indicate that the entry should not be indexed. + +

+

+ See Implementing Key + + Extractors + for more + + information. + +

+
+

+ When you read a record from a secondary database, DB automatically + returns + + the data and optionally the key + from the corresponding record in the primary database. + +

+
+
+
+
+

Opening and Closing Secondary Databases

+
+
+
+
+

+ You manage secondary database opens and closes in the same way as you + would any normal database. The only difference is that: +

+
+
    +
  • +

    + You must associate the secondary to a primary database using + DB->associate(). + +

    +
  • +
  • +

    + When closing your databases, it is a good idea to make sure you + close your secondaries before closing your primaries. This is + particularly true if your database closes are not single + threaded. +

    +
  • +
+
+

+ When you associate a secondary to a primary database, you must provide a + callback that is used to generate the secondary's keys. These + callbacks are described in the next section. +

+

+ For example, to open a secondary database and associate it to a primary + database: +

+ +
#include <db.h>
+                                                                                                                                     
+...
+                                                                                                                                     
+DB *dbp, *sdbp;    /* Primary and secondary DB handles */
+u_int32_t flags;   /* Primary database open flags */
+int ret;           /* Function return value */
+                                                                                                                                     
+/* Primary */
+ret = db_create(&dbp, NULL, 0);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+                                                                                                                                     
+/* Secondary */
+ret = db_create(&sdbp, NULL, 0);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* Usually we want to support duplicates for secondary databases */
+ret = sdbp->set_flags(sdbp, DB_DUPSORT);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+
+/* Database open flags */
+flags = DB_CREATE;    /* If the database does not exist,
+                       * create it.*/
+
+/* open the primary database */
+ret = dbp->open(dbp,        /* DB structure pointer */
+                NULL,       /* Transaction pointer */
+                "my_db.db", /* On-disk file that holds the database. */
+                NULL,       /* Optional logical database name */
+                DB_BTREE,   /* Database access method */
+                flags,      /* Open flags */
+                0);         /* File mode (using defaults) */
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* open the secondary database */
+ret = sdbp->open(sdbp,          /* DB structure pointer */
+                 NULL,          /* Transaction pointer */
+                 "my_secdb.db", /* On-disk file that holds the database. */
+                 NULL,          /* Optional logical database name */
+                 DB_BTREE,      /* Database access method */
+                 flags,         /* Open flags */
+                 0);            /* File mode (using defaults) */
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* Now associate the secondary to the primary */
+dbp->associate(dbp,            /* Primary database */
+               NULL,           /* TXN id */
+               sdbp,           /* Secondary database */
+               get_sales_rep,  /* Callback used for key creation. Not 
+                                * defined in this example. See the next
+                                * section. */
+               0);              /* Flags */
+

+ Closing the primary and secondary databases is accomplished exactly as you + would for any database: +

+ +
/* Close the secondary before the primary */
+if (sdbp != NULL)
+    sdbp->close(sdbp, 0);
+if (dbp != NULL)
+    dbp->close(dbp, 0); 
+
+
+ + + diff --git a/db/docs/gsg/C/introduction.html b/db/docs/gsg/C/introduction.html new file mode 100644 index 000000000..a5b0d073f --- /dev/null +++ b/db/docs/gsg/C/introduction.html @@ -0,0 +1,229 @@ + + + + + + Chapter 1. Introduction to Berkeley DB + + + + + + + + + +
+
+
+
+

Chapter 1. Introduction to Berkeley DB

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + About This Manual + +
+
+ + Berkeley DB Concepts + +
+
+ + Access Methods + +
+
+
+
+ + Selecting Access Methods + +
+
+ + Choosing between BTree and Hash + +
+
+ + Choosing between Queue and Recno + +
+
+
+
+ + Database Limits and Portability + +
+
+ + Environments + +
+
+ + Error Returns + +
+
+ + Getting and Using DB + +
+
+
+

+ Welcome to Sleepycat's Berkeley DB (DB). DB is a general-purpose embedded + database engine that is capable of providing a wealth of data management services. + It is designed from the ground up for high-throughput applications requiring + in-process, bullet-proof management of mission-critical data. DB can + gracefully scale from managing a few bytes to terabytes of data. For the most + part, DB is limited only by your system's available physical resources. +

+

+ Because DB is an embedded database engine, it is extremely fast. You compile + and link it into your application in the same way as you would any + third-party library. This means that DB runs in the same process space + as does your application, allowing you to avoid the high cost of + interprocess communications incurred by stand-alone database servers. +

+

+ To further improve performance, DB offers an in-memory cache designed to + provide rapid access to your most frequently used data. Once configured, + cache usage is transparent. It requires very little attention on the part + of the application developer. +

+

+ Beyond raw speed, DB is also extremely configurable. It provides several + different ways of organizing your data in its databases. Known as + access methods, each such data organization mechanism + provides different characteristics that are appropriate for different data + management profiles. (Note that this manual focuses almost entirely on the + BTree access method as this is the access method used by the vast majority + of DB applications). +

+

+ To further improve its configurability, DB offers many different + subsystems, each of which can be used to extend DB's capabilities. For + example, many applications require write-protection of their data so + as to ensure that data is never left in an inconsistent state for any + reason (such as software bugs or hardware failures). For those + applications, a transaction subsystem can be enabled and used to + transactionally protect database writes. +

+

+ The list of operating systems on which DB is available is too long to + detail here. Suffice to say that it is available on all major commercial + operating systems, as well as on many embedded platforms. +

+

+ Finally, DB is available in a wealth of programming languages. Sleepycat + officially supports DB in C, C++, and Java, but the library is also + available in many other languages, especially scripting languages such as + Perl and Python. +

+
+

Note

+

+ Before going any further, it is important to mention that DB is not + a relational database (although you could use it to build a relational + database). Out of the box, DB does not provide higher-level features + such as triggers, or a high-level query language such as SQL. + Instead, DB provides just those minimal + APIs required to store and retrieve your data as + efficiently as possible. +

+ + + +
+
+
+
+
+

About This Manual

+
+
+
+
+

+ This manual introduces DB. As such, this book does not examine + intermediate or advanced features such as threaded library usage or + transactional usage. Instead, this manual provides a step-by-step + introduction to DB's basic concepts and library usage. +

+

+ Specifically, this manual introduces DB environments, databases, + database records, and storage and retrieval of database records. This + book also introduces cursors and their usage, and it describes + secondary databases. +

+

+ For the most part, this manual focuses on the BTree access method. A + chapter is given at the end of this manual that describes some of the + concepts involving BTree usage, such as duplicate record management and comparison + routines. +

+

+ Examples are given throughout this book that are designed to illustrate + API usage. At the end of each chapter, a complete example is given that + is designed to reinforce the concepts covered in that chapter. In + addition to being presented in this book, these final programs are also + available in the DB software distribution. You can find them in +

+
DB_INSTALL/examples_c/getting_started
+

+ where DB_INSTALL is the + location where you placed your DB distribution. +

+

+ This book uses the C programming languages for its examples. + Note that versions of this book exist for the C++ and Java languages as + well. +

+
+
+ + + diff --git a/db/docs/gsg/C/joins.html b/db/docs/gsg/C/joins.html new file mode 100644 index 000000000..42df5b1b8 --- /dev/null +++ b/db/docs/gsg/C/joins.html @@ -0,0 +1,304 @@ + + + + + + Database Joins + + + + + + + + + +
+
+
+
+

Database Joins

+
+
+
+
+

+ If you have two or more secondary databases associated with a primary + database, then you can retrieve primary records based on the union of + multiple secondary entries. You do this using a + + join cursor. +

+

+ Throughout this document we have presented a + + structure + that stores + + information on grocery + + vendors. + That + + structure + is fairly simple with a limited + number of data members, few of which would be interesting from a query + perspective. But suppose, instead, that we were storing + information on something with many more queryable characteristics, such + as an automobile. In that case, you may be storing information such as + color, number of doors, fuel mileage, automobile type, number of + passengers, make, model, and year, to name just a few. +

+

+ In this case, you would still likely be using some unique value to key your + primary entries (in the United States, the automobile's VIN would be + ideal for this purpose). You would then create a + + structure + that identifies + all the characteristics of the automobiles in your inventory. + + +

+

+ To query this data, you might then create multiple secondary databases, + one for each of the characteristics that you want to query. For + example, you might create a secondary for color, another for number of + doors, another for number of passengers, and so forth. Of course, you + will need a unique + + key extractor function + for each such secondary database. You do + all of this using the concepts and techniques described throughout this + chapter. +

+

+ Once you have created this primary database and all interesting + secondaries, what you have is the ability to retrieve automobile records + based on a single characteristic. You can, for example, find all the + automobiles that are red. Or you can find all the automobiles that have + four doors. Or all the automobiles that are minivans. +

+

+ The next most natural step, then, is to form compound queries, or joins. + For example, you might want to find all the automobiles that are red, + and that were built by Toyota, and that are minivans. You can do this + using a + + join cursor. + + + +

+
+
+
+
+

Using Join Cursors

+
+
+
+
+

+ To use a join cursor: +

+
+
    +
  • +

    + Open two or more + + cursors + + for + secondary databases that are associated with + the same primary database. +

    +
  • +
  • +

    + Position each such cursor to the secondary key + value in which you are interested. For example, to build on + the previous description, the cursor for the color + database is positioned to the red records + while the cursor for the model database is positioned to the + minivan records, and the cursor for the + make database is positioned to Toyota. +

    +
  • +
  • +

    + Create an array of cursors, and place in it each + of the cursors that are participating in your join query. +

    +
  • +
  • +

    + + Obtain a join cursor. You do this using the + + DB->join() + + method. You must pass this method the array of secondary cursors that you + opened and positioned in the previous steps. +

    +
  • +
  • +

    + Iterate over the set of matching records + + until + + the return code is not 0. +

    +
  • +
  • +

    + Close your cursor. +

    +
  • +
  • +

    + If you are done with them, close all your cursors. +

    +
  • +
+
+

+ For example: + +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *automotiveDB;
+DB *automotiveColorDB;
+DB *automotiveMakeDB;
+DB *automotiveTypeDB;
+DBC *color_curs, *make_curs, *type_curs, *join_curs;
+DBC *carray[3];
+DBT key, data;
+int ret;
+
+char *the_color = "red";
+char *the_type = "minivan";
+char *the_make = "Toyota";
+
+/* Database and secondary database opens omitted for brevity.
+ * Assume a primary database handle:
+ *   automotiveDB
+ * Assume 3 secondary database handles:
+ *   automotiveColorDB  -- secondary database based on automobile color
+ *   automotiveMakeDB  -- secondary database based on the manufacturer
+ *   automotiveTypeDB  -- secondary database based on automobile type
+ */
+
+/* initialize pointers and structures */
+color_curs = NULL;
+make_curs = NULL;
+type_curs = NULL;
+join_curs = NULL;
+
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+/* open the cursors */
+if (( ret =
+    automotiveColorDB->cursor(automotiveColorDB, NULL, 
+      &color_curs, 0)) != 0) {
+        /* Error handling goes here */
+}
+
+if (( ret =
+    automotiveMakeDB->cursor(automotiveMakeDB, NULL, 
+      &make_curs, 0)) != 0) {
+        /* Error handling goes here */
+}
+
+if (( ret =
+    automotiveTypeDB->cursor(automotiveTypeDB, NULL, 
+      &type_curs, 0)) != 0) {
+        /* Error handling goes here */
+}
+
+/* Position the cursors */
+key.data = the_color;
+key.size = strlen(the_color) + 1;
+if ((ret = color_curs->c_get(color_curs, &key, &data, DB_SET)) != 0)
+    /* Error handling goes here */
+
+key.data = the_make;
+key.size = strlen(the_make) + 1;
+if ((ret = make_curs->c_get(make_curs, &key, &data, DB_SET)) != 0)
+    /* Error handling goes here */
+
+key.data = the_type;
+key.size = strlen(the_type) + 1;
+if ((ret = type_curs->c_get(type_curs, &key, &data, DB_SET)) != 0)
+    /* Error handling goes here */
+
+/* Set up the cursor array */
+carray[0] = color_curs;
+carray[1] = make_curs;
+carray[2] = type_curs;
+
+/* Create the join */
+if ((ret = automotiveDB->join(automotiveDB, carray, &join_curs, 0)) != 0)
+    /* Error handling goes here */
+
+/* Iterate using the join cursor */
+while ((ret = join_curs->c_get(join_curs, &key, &data, 0)) == 0) {
+    /* Do interesting things with the key and data */
+}
+
+/*
+ * If we exited the loop because we ran out of records,
+ * then it has completed successfully.
+ */
+if (ret == DB_NOTFOUND) {
+    /* 
+     * Close all our cursors and databases as is appropriate,  and 
+     * then exit with a normal exit status (0). 
+     */
+} 
+
+
+ + + diff --git a/db/docs/gsg/C/keyCreator.html b/db/docs/gsg/C/keyCreator.html new file mode 100644 index 000000000..906f4d832 --- /dev/null +++ b/db/docs/gsg/C/keyCreator.html @@ -0,0 +1,149 @@ + + + + + + Implementing Key + + Extractors + + + + + + + + + + +
+
+
+
+

Implementing Key + + Extractors +

+
+
+
+
+

+ You must provide every secondary database with a + + callback + that creates keys from primary records. You identify this + + callback + + + + when you associate your secondary database to your primary. + +

+

+ You can create keys using whatever data you want. Typically you will + base your key on some information found in a record's data, but you + can also use information found in the primary record's key. How you build + your keys is entirely dependent upon the nature of the index that you + want to maintain. +

+

+ You implement a key extractor by writing a function that extracts + the necessary information from a primary record's key or data. + This function must conform to a specific prototype, and it must be + provided as a callback to the associate() + method. +

+

+ For example, suppose your primary database records contain data that + uses the following structure: +

+ +
typedef struct vendor {
+    char name[MAXFIELD];             /* Vendor name */
+    char street[MAXFIELD];           /* Street name and number */
+    char city[MAXFIELD];             /* City */
+    char state[3];                   /* Two-digit US state code */
+    char zipcode[6];                 /* US zipcode */
+    char phone_number[13];           /* Vendor phone number */
+    char sales_rep[MAXFIELD];        /* Name of sales representative */
+    char sales_rep_phone[MAXFIELD];  /* Sales rep's phone number */
+} VENDOR; 
+

+ Further suppose that you want to be able to query your primary database + based on the name of a sales representative. Then you would write a + function that looks like this: +

+ +
#include <db.h>
+
+...
+
+int
+get_sales_rep(DB *sdbp,          /* secondary db handle */
+              const DBT *pkey,   /* primary db record's key */
+              const DBT *pdata,  /* primary db record's data */
+              DBT *skey)         /* secondary db record's key */
+{
+    VENDOR *vendor;
+
+    /* First, extract the structure contained in the primary's data */
+    vendor = pdata->data;
+
+    /* Now set the secondary key's data to be the representative's name */
+    memset(skey, 0, sizeof(DBT));
+    skey->data = vendor->sales_rep;
+    skey->size = strlen(vendor->sales_rep) + 1;
+
+    /* Return 0 to indicate that the record can be created/updated. */
+    return (0);
+} 
+

+ In order to use this function, you provide it on the + associate() method after the primary and + secondary databases have been created and opened: +

+ +
dbp->associate(dbp,            /* Primary database */
+               NULL,           /* TXN id */
+               sdbp,           /* Secondary database */
+               get_sales_rep,  /* Callback used for key creation. */
+               0);             /* Flags */
+
+ + + diff --git a/db/docs/gsg/C/preface.html b/db/docs/gsg/C/preface.html new file mode 100644 index 000000000..d83f0f9a1 --- /dev/null +++ b/db/docs/gsg/C/preface.html @@ -0,0 +1,138 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+
+

+ Welcome to Berkeley DB (DB). This document introduces + + DB, version 4.3. + It is intended + to provide a rapid introduction to the DB API set and related concepts. The goal of this document is + to provide you with an efficient mechanism + with which you can evaluate DB against your project's technical requirements. As such, this document is + intended for + C developers and senior software architects who are + looking for an in-process data management solution. No prior experience with Sleepycat technologies is + expected or required. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Structure names are represented in monospaced font, as are method + names. For example: "DB->open() is a method + on a DB handle." +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + DB_INSTALL directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. + For example: +

+
/* File: gettingstarted_common.h */
+typedef struct stock_dbs {
+    DB *inventory_dbp; /* Database containing inventory information */
+    DB *vendor_dbp;    /* Database containing vendor information */
+
+    char *db_home_dir;       /* Directory containing the database files */
+    char *inventory_db_name; /* Name of the inventory database */
+    char *vendor_db_name;    /* Name of the vendor database */
+} STOCK_DBS; 
+

+ In some situations, programming examples are updated from one chapter to the next. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
typedef struct stock_dbs {
+    DB *inventory_dbp; /* Database containing inventory information */
+    DB *vendor_dbp;    /* Database containing vendor information */
+    DB *itemname_sdbp; /* Index based on the item name index */
+    char *db_home_dir;       /* Directory containing the database files */
+    char *itemname_db_name;  /* Itemname secondary database */
+    char *inventory_db_name; /* Name of the inventory database */
+    char *vendor_db_name;    /* Name of the vendor database */
+} STOCK_DBS; 
+
+

Note

+

+ Finally, notes of interest are represented using a note block such + as this. +

+
+
+
+ + + diff --git a/db/docs/gsg/C/readSecondary.html b/db/docs/gsg/C/readSecondary.html new file mode 100644 index 000000000..8933dccd7 --- /dev/null +++ b/db/docs/gsg/C/readSecondary.html @@ -0,0 +1,140 @@ + + + + + + Reading Secondary Databases + + + + + + + + + +
+
+
+
+

Reading Secondary Databases

+
+
+
+
+

+ Like a primary database, you can read records from your secondary + database either by using the + + + DB->get() + + or + DB->pget() + + methods, + + or by using + + a cursor on the secondary database. + + The main difference between reading secondary and primary databases is that when + you read a secondary database record, the secondary record's data is not + returned to you. Instead, the primary key and data corresponding to the + secondary key are returned to you. +

+

+ For example, assuming your secondary database contains keys related + to a person's full name: +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DB *my_secondary_database;
+DBT key; /* Used for the search key */
+DBT pkey, pdata; /* Used to return the primary key and data */
+char *search_name = "John Doe";
+
+/* Primary and secondary database opens omitted for brevity */
+                                                                                                                                     
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&pkey, 0, sizeof(DBT));
+memset(&pdata, 0, sizeof(DBT));
+                                                                                                                                     
+key.data = search_name;
+key.size = strlen(search_name) + 1;
+                                                                                                                                     
+/* Returns the key from the secondary database, and the data from the 
+ * associated primary database entry.
+ */
+my_secondary_database->get(my_secondary_database, NULL, 
+  &key, &pdata, 0);
+
+/* Returns the key from the secondary database, and the key and data 
+ * from the associated primary database entry.
+ */
+my_secondary_database->pget(my_secondary_database, NULL, 
+  &key, &pkey, &pdata, 0); 
+

+ Note that, just like + + a primary database, + + if your secondary database supports duplicate records then + + + DB->get() + + and + DB->pget() + + + only return the first record found in a matching duplicates set. If you + want to see all the records related to a specific secondary key, then use a + + + cursor opened on the secondary database. Cursors are described in + Using Cursors. + +

+
+ + + diff --git a/db/docs/gsg/C/secondaryCursor.html b/db/docs/gsg/C/secondaryCursor.html new file mode 100644 index 000000000..5e3ab92d9 --- /dev/null +++ b/db/docs/gsg/C/secondaryCursor.html @@ -0,0 +1,158 @@ + + + + + + + + Using Cursors with Secondary Databases + + + + + + + + + + +
+
+
+
+

+ + Using Cursors with Secondary Databases +

+
+
+
+
+

+ Just like cursors on a primary database, you can use + + cursors on secondary databases + to iterate over the records in a secondary database. Like + + + cursors used with primary databases, + + you can also use + + cursors with secondary databases + to search for specific records in a database, to seek to the first + or last record in the database, to get the next duplicate record, + + and so forth. For a complete description on cursors and their capabilities, see + Using Cursors. +

+

+ However, when you use + + cursors with secondary databases: +

+
+
    +
  • +

    + Any data returned is the data contained on the primary database + record referenced by the secondary record. +

    +
  • +
  • +

    + You cannot use DB_GET_BOTH and related flags with + DB->c_get() + + and a secondary database. Instead, you must use + DB->c_pget(). + + Also, in that case the primary and secondary key given on the call to + DB->c_pget() + + must match the secondary key and associated primary record key in + order for that primary record to be returned as a result of the + call. +

    +
  • +
+
+

+ For example, suppose you are using the databases, classes, and key + + extractors + described in Implementing Key + + Extractors + . + Then the following searches for a person's + name in the secondary database, and deletes all secondary and primary + records that use that name. + +

+ +
#include <db.h>
+#include <string.h>
+
+...
+                                                                                                                                     
+DB *sdbp;          /* Secondary DB handle */
+DBC *cursorp;      /* Cursor */
+DBT key, data;     /* DBTs used for the delete */
+char *search_name = "John Doe"; /* Name to delete */
+                                                                                                                                     
+/* Primary and secondary database opens omitted for brevity. */
+
+/* Get a cursor on the secondary database */
+sdbp->cursor(sdbp, NULL, &cursorp, 0);
+
+/*
+ * Zero out the DBT before using it.
+ */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+key.data = search_name;
+key.size = strlen(search_name) + 1;
+
+ 
+/* Position the cursor */
+while (cursorp->c_get(cursorp, &key, &data, DB_SET) == 0)
+    cursorp->c_del(cursorp, 0); 
+
+ + + diff --git a/db/docs/gsg/C/secondaryDelete.html b/db/docs/gsg/C/secondaryDelete.html new file mode 100644 index 000000000..a62f28b34 --- /dev/null +++ b/db/docs/gsg/C/secondaryDelete.html @@ -0,0 +1,210 @@ + + + + + + Deleting Secondary Database Records + + + + + + + + + +
+
+
+
+

Deleting Secondary Database Records

+
+
+
+
+

+ In general, you + + will + not modify a secondary database directly. In + order to modify a secondary database, you should modify the primary + database and simply allow DB to manage the secondary modifications for you. +

+

+ However, as a convenience, you can delete a + + secondary database + record directly. Doing so causes the associated primary key/data pair to be deleted. + This in turn causes DB to delete all + + secondary database + records that reference the primary record. +

+

+ You can use the + + DB->del() + + method to delete a secondary database record. + + + + Note that if your + secondary database + + contains duplicate records, then deleting a record from the set of + duplicates causes all of the duplicates to be deleted as well. + + +

+
+

Note

+

+ You can delete a secondary database record using the previously + described mechanism only if: +

+
+
    +
  • +

    + the + + secondary key extractor function + + returns + + 0 + (see Implementing Key + + Extractors + for information on this + + callback). +

    +
  • +
  • +

    + the primary database is opened for write access. +

    +
  • +
+
+

+ If either of these conditions are not met, then no delete operations can be performed on the secondary + database. +

+
+

For example:

+ +
#include <db.h>
+#include <string.h>
+                                                                                                                                     
+...
+                                                                                                                                     
+DB *dbp, *sdbp;    /* Primary and secondary DB handles */
+DBT key;           /* DBTs used for the delete */
+int ret;           /* Function return value */
+char *search_name = "John Doe"; /* Name to delete */
+
+/* Primary */
+ret = db_create(&dbp, NULL, 0);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+                                                                                                                                     
+/* Secondary */
+ret = db_create(&sdbp, NULL, 0);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* Usually we want to support duplicates for secondary databases */
+ret = sdbp->set_flags(sdbp, DB_DUPSORT);
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* open the primary database */
+ret = dbp->open(dbp,        /* DB structure pointer */
+                NULL,       /* Transaction pointer */
+                "my_db.db", /* On-disk file that holds the database.
+                             * Required. */
+                NULL,       /* Optional logical database name */
+                DB_BTREE,   /* Database access method */
+                0,          /* Open flags */
+                0);         /* File mode (using defaults) */
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* open the secondary database */
+ret = sdbp->open(sdbp,          /* DB structure pointer */
+                 NULL,          /* Transaction pointer */
+                 "my_secdb.db", /* On-disk file that holds the database.
+                                 * Required. */
+                 NULL,          /* Optional logical database name */
+                 DB_BTREE,      /* Database access method */
+                 0,             /* Open flags */
+                 0);            /* File mode (using defaults) */
+if (ret != 0) {
+  /* Error handling goes here */
+}
+
+/* Now associate the secondary to the primary */
+dbp->associate(dbp,            /* Primary database */
+               NULL,           /* TXN id */
+               sdbp,           /* Secondary database */
+               get_sales_rep,  /* Callback used for key creation. */
+               0);             /* Flags */
+
+/*
+ * Zero out the DBT before using it.
+ */
+memset(&key, 0, sizeof(DBT));
+
+key.data = search_name;
+key.size = strlen(search_name) + 1;
+
+/* Now delete the secondary record. This causes the associated primary
+ * record to be deleted. If any other secondary databases have secondary
+ * records referring to the deleted primary record, then those secondary
+ * records are also deleted.
+ */
+ sdbp->del(sdbp, NULL, &key, 0); 
+
+ + + diff --git a/db/docs/gsg/C/usingDbt.html b/db/docs/gsg/C/usingDbt.html new file mode 100644 index 000000000..549b39ad6 --- /dev/null +++ b/db/docs/gsg/C/usingDbt.html @@ -0,0 +1,397 @@ + + + + + + Reading and Writing Database Records + + + + + + + + + +
+
+
+
+

Reading and Writing Database Records

+
+
+
+
+

+ When reading and writing database records, be aware that there are some + slight differences in behavior depending on whether your database supports duplicate + records. Two or more database records are considered to be duplicates of + one another if they share the same key. The collection of records + sharing the same key are called a duplicates set. + + + In DB, a given key is stored only once for a single duplicates set. + +

+

+ By default, DB databases do + not support duplicate records. Where duplicate records are supported, + cursors (see below) are typically used + to access all of the records in the duplicates set. +

+

+ DB provides two basic mechanisms for the storage and retrieval of database + key/data pairs: +

+
+
    +
  • +

    + The + + DBT->put() + + and + + DBT->get() + + methods provide the easiest access for all non-duplicate records in the database. + These methods are described in this section. +

    +
  • +
  • +

    Cursors provide several methods for putting and getting database + records. Cursors and their database access methods are described in + Using Cursors.

    +
  • +
+
+
+
+
+
+

Writing Records to the Database

+
+
+
+
+

+ Records are stored in the database using whatever organization is + required by the access method that you have selected. In some cases (such as + BTree), records are stored in a sort order that you may want to define + (see Setting Comparison Functions for more information). +

+

+ In any case, the mechanics of putting and getting database records do not + change once you have selected your access method, configured your + sorting routines (if any), and opened your database. From your + code's perspective, a simple database put and get is largely the + same no matter what access method you are using. +

+

+ You use + DB->put() + + to put, or write, a database record. This method requires you to provide + the record's key and data in the form of a pair of + DBT structures. + + You can also provide one or more flags that control DB's behavior + for the database write. +

+

+ Of the flags available to this method, DB_NOOVERWRITE + may be interesting to you. This flag disallows overwriting (replacing) + an existing record in the database. If the provided key already exists + in the database, then this method returns DB_KEYEXIST even if + the database supports duplicates. +

+

+ For example: +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+char *description = "Grocery bill.";
+DBT key, data;
+DB *my_database;
+int ret;
+float money;
+
+/* Database open omitted for clarity */
+
+money = 122.45;
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+key.data = &money;
+key.size = sizeof(float);
+
+data.data = description;
+data.size = strlen(description) +1; 
+
+ret = my_database->put(my_database, NULL, &key, &data, DB_NOOVERWRITE);
+if (ret == DB_KEYEXIST) {
+    my_database->err(my_database, ret, 
+      "Put failed because key %f already exists", money);
+}
+
+
+
+
+
+

Getting Records from the Database

+
+
+
+
+

+ You can use the + DB->get() + + method to retrieve database records. Note that if your + database supports duplicate records, then by default this method will only + return the first record in a duplicate set. For this reason, if your + database supports duplicates, the common solution is to use a cursor to retrieve + records from it. Cursors are described in Using Cursors. +

+

+ (You can also retrieve a set of duplicate records using a bulk get. + To do this, you use the DB_MULTIPLE flag on the + call to + DB->get(). + + + For more information, see the DB Programmer's Reference Guide). +

+

+ By default, + DB->get() + + returns the first record found whose key matches the key + provide on the call to this method. If your database supports + duplicate records, you can change this behavior slightly by supplying + the DB_GET_BOTH flag. This flag causes + DB->get() + + to return the first record that matches the provided key and data. +

+

+ If the specified key and/or data does not exist in the database, this + method returns DB_NOTFOUND. +

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DBT key, data;
+DB *my_database;
+float money;
+char *description;
+
+/* Database open omitted for clarity */
+
+money = 122.45;
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+
+/* 
+ * Use our own memory to retrieve the float.
+ * For data alignment purposes.
+ */
+key.data = &money;
+key.ulen = sizeof(float);
+key.flags = DB_DBT_USERMEM;
+
+my_database->get(my_database, NULL, &key, &data, 0);
+
+/* 
+ * Money is set into the memory that we supplied.
+ */
+description = data.data;
+

+ Note that in this example, the + data.size + field would be automatically set to the size of the retrieved data. +

+
+
+
+
+
+

Deleting Records

+
+
+
+
+

+ + You can use the + + DB->del() + + method to delete a record from the database. If your database supports + duplicate records, then all records associated with the provided key are + deleted. To delete just one record from a list of duplicates, use a + cursor. Cursors are described in Using Cursors. + +

+

+ You can also delete every record in the database by using + + DB->truncate(). + +

+

For example:

+ +
#include <db.h>
+#include <string.h>
+
+...
+
+DBT key;
+DB *my_database;
+float money = 122.45;
+
+/* Database open omitted for clarity */
+
+/* Zero out the DBTs before using them. */
+memset(&key, 0, sizeof(DBT));
+
+key.data = &money;
+key.size = sizeof(float);
+
+my_database->del(my_database, NULL, &key, 0);
+
+
+
+
+
+

Data Persistence

+
+
+
+
+

+ When you perform a database modification, your modification is made + in the in-memory cache. This means that your data modifications + are not necessarily written to disk, and so your data may not appear + in the database after an application restart. +

+

+ Note that as a normal part of closing a database, its cache is + written to disk. However, in the event of an application or system + failure, there is no guarantee that your databases will close + cleanly. In this event, it is possible for you to lose data. Under + extremely rare circumstances, it is also possible for you to + experience database corruption. +

+

+ Therefore, if you care about whether your data persists across + application runs, and to guard against the rare possibility of + database corruption, you should use transactions to protect your + database modifications. Every time you commit a transaction, DB + ensures that the data will not be lost due to application or + system failure. + + + For information on transactions, see the Berkeley DB + Programmer's Tutorial and Reference Guide. + +

+

+ If you do not want to use transactions, then the assumption is that + your data is of a nature that it need not exist the next time your + application starts. You may want this if, for example, you are using + DB to cache data relevant only to the current application + runtime. +

+

+ If, however, you are not using transactions for some reason and you + still want some guarantee that your database modifications are + persistent, then you should periodically + + call DB->sync(). + + Syncs cause the entire contents of your in-memory cache to be written to disk. As + such, they are quite expensive and you should use them sparingly. +

+

+ Remember that by default a sync is performed any time a non-transactional + database is closed cleanly. (You can override this behavior by + specifying + DB_NOSYNC + + on the call to + DB->close().) + + + + That said, you can manually run a sync by calling + + DB->sync(). + + + +

+
+

Note

+

+ If your application or system crashes and you are not using + transactions, then you should either discard and recreate your + databases, or verify them. You can verify a database using + DB->verify(). + + + If your databases do not verify cleanly, use the + db_dump command to salvage as much of the + database as is possible. Use either the -R or + -r command line options to control how + aggressive db_dump should be when salvaging + your databases. +

+
+
+
+ + + diff --git a/db/docs/gsg/CXX/BerkeleyDB-Core-Cxx-GSG.pdf b/db/docs/gsg/CXX/BerkeleyDB-Core-Cxx-GSG.pdf new file mode 100644 index 000000000..c01f86255 Binary files /dev/null and b/db/docs/gsg/CXX/BerkeleyDB-Core-Cxx-GSG.pdf differ diff --git a/db/docs/gsg/CXX/CoreCursorUsage.html b/db/docs/gsg/CXX/CoreCursorUsage.html new file mode 100644 index 000000000..fa406ab07 --- /dev/null +++ b/db/docs/gsg/CXX/CoreCursorUsage.html @@ -0,0 +1,293 @@ + + + + + + Cursor Example + + + + + + + + + +
+
+
+
+

Cursor Example

+
+
+
+
+

+ In + + Database Usage Example + we wrote an + application that loaded two databases with + vendor and inventory information. In this example, we will write an + application to display all of the items in the inventory database. As a + part of showing any given inventory item, we will look up the vendor who + can provide the item and show the vendor's contact information. +

+

+ Specifically, the + + excxx_example_database_read + application does the following: +

+
+
    +
  1. +

    + Opens the the inventory and vendor databases + that were created by our + + excxx_example_database_load + application. See + + excxx_example_database_load + for information on how that + application creates the databases and writes data to them. +

    +
  2. +
  3. +

    Obtains a cursor from the inventory database.

    +
  4. +
  5. +

    + Steps through the inventory database, displaying + each record as it goes. +

    +
  6. +
  7. +

    + Gets the name of the vendor for that inventory item from the + inventory record. +

    +
  8. +
  9. +

    + Uses the vendor name to look up the vendor record in the vendor + database. +

    +
  10. +
  11. +

    Displays the vendor record.

    +
  12. +
+
+

+ Remember that you can find the complete implementation of this application + in: +

+
DB_INSTALL/examples_cxx/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 4.1 excxx_example_database_read +

+

+ To begin, we include the necessary header files and perform our + forward declarations. We also write our usage() + function. +

+ +
// File: excxx_example_database_read.cpp
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+                                                                                                                                    
+#include "MyDb.hpp"
+#include "gettingStartedCommon.hpp"
+
+// Forward declarations
+int show_all_records(MyDb &inventoryDB, MyDb &vendorDB);
+int show_vendor(MyDb &vendorDB, const char *vendor); 
+

+ Next we write our main() function. Note that it is + somewhat unnecessarily complicated here because we will be extending it + in the next chapter to perform inventory item lookups. +

+ +
// Displays all inventory items and the associated vendor record.
+int
+main (int argc, char *argv[])
+{
+    // Initialize the path to the database files
+    std::string databaseHome("./");
+
+    // Database names
+    std::string vDbName("vendordb.db");
+    std::string iDbName("inventorydb.db");
+
+    // Parse the command line arguments
+    // Omitted for brevity
+
+    try
+    {
+        // Open all databases.
+        MyDb inventoryDB(databaseHome, iDbName);
+        MyDb vendorDB(databaseHome, vDbName);
+
+        show_all_records(inventoryDB, vendorDB);
+    } catch(DbException &e) {
+        std::cerr << "Error reading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(e.get_errno());
+    } catch(std::exception &e) {
+        std::cerr << "Error reading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(-1);
+    }
+
+    return(0);
+} // End main 
+

+ Next we need to write the show_all_records() + function. This function displays all + of the inventory records found in the inventory database. Once it shows + the inventory record, it retrieves the vendor's name from that record + and uses it to look up and display the appropriate vendor record: +

+ +
// Shows all the records in the inventory database.
+// For each inventory record shown, the appropriate
+// vendor record is also displayed.
+int
+show_all_records(MyDb &inventoryDB, MyDb &vendorDB)
+{
+    // Get a cursor to the inventory db
+    Dbc *cursorp;
+    try {
+        inventoryDB.getDb().cursor(NULL, &cursorp, 0);
+
+        // Iterate over the inventory database, from the first record
+        // to the last, displaying each in turn
+        Dbt key, data;
+        int ret;
+        while ((ret = cursorp->get(&key, &data, DB_NEXT)) == 0 )
+        {
+            InventoryData inventoryItem(data.get_data());
+            inventoryItem.show();
+
+            show_vendor(vendorDB, inventoryItem.getVendor().c_str());
+        }
+    } catch(DbException &e) {
+        inventoryDB.getDb().err(e.get_errno(), "Error in show_all_records");
+        cursorp->close();
+        throw e;
+    } catch(std::exception &e) {
+        cursorp->close();
+        throw e;
+    }
+
+    cursorp->close();
+    return (0);
+} 
+

+ Note that the InventoryData class that we use here + is described in + InventoryData Class. +

+

+ Having displayed the inventory record, we now want to display the + vendor record corresponding to this record. + In this case we do not need to use a + cursor to display the vendor record. Using a cursor here complicates our + code slightly for no good gain. Instead, we simply perform a + get() directly against the vendor database. +

+ +
// Shows a vendor record. Each vendor record is an instance of
+// a vendor structure. See loadVendorDB() in
+// excxx_example_database_load for how this structure was originally
+// put into the database.
+int
+show_vendor(MyDb &vendorDB, const char *vendor)
+{
+    Dbt data;
+    VENDOR my_vendor;
+
+    try {
+        // Set the search key to the vendor's name
+        // vendor is explicitly cast to char * to stop a compiler
+        // complaint.
+        Dbt key((char *)vendor, strlen(vendor) + 1);
+
+        // Make sure we use the memory we set aside for the VENDOR
+        // structure rather than the memory that DB allocates.
+        // Some systems may require structures to be aligned in memory
+        // in a specific way, and DB may not get it right.
+
+        data.set_data(&my_vendor);
+        data.set_ulen(sizeof(VENDOR));
+        data.set_flags(DB_DBT_USERMEM);
+
+        // Get the record
+        vendorDB.getDb().get(NULL, &key, &data, 0);
+        std::cout << "        " << my_vendor.street << "\n"
+                  << "        " << my_vendor.city << ", "
+                  << my_vendor.state << "\n"
+                  << "        " << my_vendor.zipcode << "\n"
+                  << "        " << my_vendor.phone_number << "\n"
+                  << "        Contact: " << my_vendor.sales_rep << "\n"
+                  << "                 " << my_vendor.sales_rep_phone
+                  << std::endl;
+
+    } catch(DbException &e) {
+        vendorDB.getDb().err(e.get_errno(), "Error in show_vendor");
+        throw e;
+    } catch(std::exception &e) {
+        throw e;
+    }
+    return (0);
+} 
+
+

+ That completes the implementation of + excxx_example_database_read(). In the next chapter, we + will extend this application to make use of a secondary database so that + we can query the inventory database for a specific inventory item. +

+
+ + + diff --git a/db/docs/gsg/CXX/CoreDBAdmin.html b/db/docs/gsg/CXX/CoreDBAdmin.html new file mode 100644 index 000000000..c7cd1f8cf --- /dev/null +++ b/db/docs/gsg/CXX/CoreDBAdmin.html @@ -0,0 +1,147 @@ + + + + + + Administrative Methods + + + + + + + + + +
+
+
+
+

Administrative Methods

+
+
+
+
+

+ The following + + Db + + + methods may be useful to you when managing DB databases: +

+
+
    +
  • +

    + + Db::get_open_flags() +

    +

    + Returns the current open flags. It is an error to use this method on + an unopened database. +

    + +
    #include <db_cxx.h>
    +...
    +Db db(NULL, 0);
    +u_int32_t open_flags;
    +
    +// Database open and subsequent operations omitted for clarity
    +
    +db.get_open_flags(&open_flags); 
    +
  • +
  • +

    + + Db::remove() +

    +

    + Removes the specified database. If no value is given for the + database parameter, then the entire file + referenced by this method is removed. +

    +

    + Never remove a database that has handles opened for it. Never remove a file that + contains databases with opened handles. +

    + +
    #include <db_cxx.h>
    +...
    +Db db(NULL, 0);
    +
    +// Database open and subsequent operations omitted for clarity
    +
    +db.remove("mydb.db",             // Database file to remove 
    +          NULL,                  // Database to remove. This is
    +                                 // NULL so the entire file is
    +                                 // removed.  
    +         0);                     // Flags. None used.
    +
  • +
  • +

    + + Db::rename() +

    +

    + Renames the specified database. If no value is given for the + database parameter, then the entire file + referenced by this method is renamed. +

    +

    + Never rename a database that has handles opened for it. Never rename a file that + contains databases with opened handles. +

    + +
    #include <db_cxx.h>
    +...
    +Db db(NULL, 0);
    +
    +// Database open and subsequent operations omitted for clarity
    +
    +db.rename("mydb.db",             // Database file to rename
    +          NULL,                  // Database to rename. This is
    +                                 // NULL so the entire file is
    +                                 // renamed. 
    +         "newdb.db",             // New database file name
    +         0);                     // Flags. None used.
    +
  • +
+
+ + + +
+ + + diff --git a/db/docs/gsg/CXX/CoreDbCXXUsage.html b/db/docs/gsg/CXX/CoreDbCXXUsage.html new file mode 100644 index 000000000..c21905040 --- /dev/null +++ b/db/docs/gsg/CXX/CoreDbCXXUsage.html @@ -0,0 +1,191 @@ + + + + + + Database Example + + + + + + + + + +
+
+
+
+

Database Example

+
+
+
+
+

+ Throughout this book we will build a couple of applications that load + and retrieve inventory data from DB databases. While we are not yet ready to + begin reading from or writing to our databases, we can at least create + the class that we will use to manage our databases. +

+

+ Note that subsequent examples in this book will build on this code to + perform the more interesting work of writing to and reading from the + databases. +

+

+ Note that you can find the complete implementation of these functions + in: +

+
DB_INSTALL/examples_cxx/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 2.1 MyDb Class +

+

+ To manage our database open and close activities, we encapsulate them + in the MyDb class. There are several good reasons + to do this, the mort important being that we can ensure our databases are + closed by putting that activity in the MyDb + class destructor. +

+

+ To begin, we create our class definition: +

+ +
// File: MyDb.hpp
+#include <db_cxx.h>
+                                                                                                                                    
+class MyDb
+{
+public:
+    // Constructor requires a path to the database,
+    // and a database name.
+    MyDb(std::string &path, std::string &dbName);
+                                                                                                                                    
+    // Our destructor just calls our private close method.
+    ~MyDb() { close(); }
+                                                                                                                                    
+    inline Db &getDb() {return db_;}
+                                                                                                                                    
+private:
+    Db db_;
+    std::string dbFileName_;
+    u_int32_t cFlags_;
+                                                                                                                                    
+    // Make sure the default constructor is private
+    // We don't want it used.
+    MyDb() : db_(NULL, 0) {}
+                                                                                                                                    
+    // We put our database close activity here.
+    // This is called from our destructor. In
+    // a more complicated example, we might want
+    // to make this method public, but a private
+    // method is more appropriate for this example.
+    void close();
+}; 
+

+ Next we need the implementation for the constructor: +

+ +
// File: MyDb.cpp
+#include "MyDb.hpp"
+
+// Class constructor. Requires a path to the location
+// where the database is located, and a database name
+MyDb::MyDb(std::string &path, std::string &dbName)
+    : db_(NULL, 0),               // Instantiate Db object
+      dbFileName_(path + dbName), // Database file name
+      cFlags_(DB_CREATE)          // If the database doesn't yet exist,
+                                  // allow it to be created.
+{
+    try
+    {
+        // Redirect debugging information to std::cerr
+        db_.set_error_stream(&std::cerr);
+                                                                                                                                    
+        // Open the database
+        db_.open(NULL, dbFileName_.c_str(), NULL, DB_BTREE, cFlags_, 0);
+    }
+    // DbException is not a subclass of std::exception, so we
+    // need to catch them both.
+    catch(DbException &e)
+    {
+        std::cerr << "Error opening database: " << dbFileName_ << "\n";
+        std::cerr << e.what() << std::endl;
+    }
+    catch(std::exception &e)
+    {
+        std::cerr << "Error opening database: " << dbFileName_ << "\n";
+        std::cerr << e.what() << std::endl;
+    }
+}
+

+ And then we need the implementation for the + close() method: + +

+ +
// Private member used to close a database. Called from the class
+// destructor.
+void
+MyDb::close()
+{
+    // Close the db
+    try
+    {
+        db_.close(0);
+        std::cout << "Database " << dbFileName_
+                  << " is closed." << std::endl;
+    }
+    catch(DbException &e)
+    {
+        std::cerr << "Error closing database: " << dbFileName_ << "\n";
+        std::cerr << e.what() << std::endl;
+    }
+    catch(std::exception &e)
+    {
+        std::cerr << "Error closing database: " << dbFileName_ << "\n";
+        std::cerr << e.what() << std::endl;
+    }
+} 
+
+
+ + + diff --git a/db/docs/gsg/CXX/CoreEnvUsage.html b/db/docs/gsg/CXX/CoreEnvUsage.html new file mode 100644 index 000000000..8d1268545 --- /dev/null +++ b/db/docs/gsg/CXX/CoreEnvUsage.html @@ -0,0 +1,165 @@ + + + + + + Managing Databases in Environments + + + + + + + + + +
+
+
+
+

Managing Databases in Environments

+
+
+
+
+

+ In Environments, we introduced + environments. While environments are not used in the example built in this book, + they are so commonly used for a wide class of DB applications that it is + necessary to show their basic usage, if only from a completeness perspective. +

+

+ To use an environment, you must first + + open it. At open time, you must identify the directory in + which it resides. This directory must exist prior to the open attempt. + You can also identify open properties, such as whether the environment can be + created if it does not already exist. +

+

+ For example, to + create an environment handle and + open an environment: +

+ +
#include <db_cxx.h>
+...
+u_int32_t env_flags = DB_CREATE;  // If the environment does not
+                                  // exist, create it.
+std::string envHome("/export1/testEnv");
+DbEnv myEnv(0);
+
+try {
+    myEnv.open(envHome.c_str(), env_flags, 0);
+} catch(DbException &e) {
+    std::cerr << "Error opening database environment: "
+              << envHome << std::endl;
+    std::cerr << e.what() << std::endl;
+    exit( -1 );
+} catch(std::exception &e) {
+    std::cerr << "Error opening database environment: "
+              << envHome << std::endl;
+    std::cerr << e.what() << std::endl;
+    exit( -1 );
+} 
+

+ Once an environment is opened, you can open databases in it. Note that by default databases + are stored in the environment's home directory, or relative to that directory if you + provide any sort of a path in the database's file name: +

+ +
#include <db_cxx.h>
+...
+u_int32_t env_flags = DB_CREATE;  // If the environment does not
+                                  // exist, create it.
+u_int32_t db_flags = DB_CREATE;   // If the database does not
+                                  // exist, create it.
+std::string envHome("/export1/testEnv");
+std::string dbName("mydb.db");
+DbEnv myEnv(0);
+Db *myDb;
+
+try {
+    myEnv.open(envHome.c_str(), env_flags, 0);
+    myDb = new Db(&myEnv, 0);
+    myDb->open(NULL,
+               dbName.c_str(),
+               NULL,
+               DB_BTREE,
+               db_flags,
+               0);
+} catch(DbException &e) {
+    std::cerr << "Error opening database environment: "
+              << envHome 
+              << " and database "
+              << dbName << std::endl;
+    std::cerr << e.what() << std::endl;
+    exit( -1 );
+} catch(std::exception &e) {
+    std::cerr << "Error opening database environment: "
+              << envHome 
+              << " and database "
+              << dbName << std::endl;
+    std::cerr << e.what() << std::endl;
+    exit( -1 );
+} 
+

+ When you are done with an environment, you must close it. Before you close an environment, + make sure you close any opened databases. +

+ +
try {
+    if (myDb != NULL) {
+        myDb->close(0);
+    }
+    myEnv.close(0);
+    
+} catch(DbException &e) {
+    std::cerr << "Error closing database environment: "
+              << envHome 
+              << " or database "
+              << dbName << std::endl;
+    std::cerr << e.what() << std::endl;
+    exit( -1 );
+} catch(std::exception &e) {
+    std::cerr << "Error closing database environment: "
+              << envHome 
+              << " or database "
+              << dbName << std::endl;
+    std::cerr << e.what() << std::endl;
+    exit( -1 );
+} 
+
+ + + diff --git a/db/docs/gsg/CXX/Cursors.html b/db/docs/gsg/CXX/Cursors.html new file mode 100644 index 000000000..7b657c03e --- /dev/null +++ b/db/docs/gsg/CXX/Cursors.html @@ -0,0 +1,179 @@ + + + + + + Chapter 4. Using Cursors + + + + + + + + + +
+
+
+
+

Chapter 4. Using Cursors

+
+
+
+
+ +

+ Cursors provide a mechanism by which you can iterate over the records in a + database. Using cursors, you can get, put, and delete database records. If + a database allows duplicate records, then cursors are + + + + the easiest way that you can access anything + other than the first record for a given key. +

+

+ This chapter introduces cursors. It explains how to open and close them, how + to use them to modify databases, and how to use them with duplicate records. +

+
+
+
+
+

Opening and Closing Cursors

+
+
+
+
+

+ Cursors are managed using the + + Dbc class. + To use a cursor, you must open it using the + + Db::cursor() + method. +

+

For example:

+ +
#include <db_cxx.h>
+
+...
+
+Dbc *cursorp;
+Db my_database(NULL, 0);
+
+// Database open omitted for clarity
+
+// Get a cursor
+my_database.cursor(NULL, &cursorp, 0); 
+

+ When you are done with the cursor, you should close it. To close a + cursor, call the + + Dbc::close() + method. Note that closing your database while cursors are still opened + within the scope of the DB handle, especially if those cursors are + writing to the database, can have unpredictable results. Always + close your cursors before closing your database. +

+ +
#include <db_cxx.h>
+
+...
+
+Dbc *cursorp;
+Db my_database(NULL, 0);
+
+// Database and cursor open omitted for clarity
+
+if (cursorp != NULL) 
+    cursorp->close(); 
+
+my_database.close(0);
+
+
+ + + diff --git a/db/docs/gsg/CXX/DB.html b/db/docs/gsg/CXX/DB.html new file mode 100644 index 000000000..ec569c8ba --- /dev/null +++ b/db/docs/gsg/CXX/DB.html @@ -0,0 +1,182 @@ + + + + + + Chapter 2. Databases + + + + + + + + + +
+
+
+
+

Chapter 2. Databases

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Opening Databases + +
+
+ + Closing Databases + +
+
+ + Database Open Flags + +
+
+ + Administrative Methods + +
+
+ + Error Reporting Functions + +
+
+ + Managing Databases in Environments + +
+
+ + Database Example + +
+
+
+

In Berkeley DB, a database is a collection of records. Records, + in turn, consist of two parts: key and data. That is, records consist of + key/data pairings. +

+

+ Conceptually, you can think of a + + database + as containing a two-column table where column 1 contains a key and column 2 + contains data. Both the key and the data are managed using + + + Dbt + class instances + + (see Database Records for details on this + class + ). + So, fundamentally, using a DB + + database + involves putting, getting, and deleting database records, which in turns involves efficiently + managing information + encapsulated by + + + + + Dbt + + objects. + + The next several chapters of this book are dedicated to those activities. +

+
+
+
+
+

Opening Databases

+
+
+
+
+

+ You open a database by instantiating a Db object + and then calling its open() method. +

+

+ Note that by default, DB does not create databases if they do not already exist. + To override this behavior, specify the + DB_CREATE flag on the + open() method. +

+

+ The following code fragment illustrates a database open: + +

+ +
#include <db_cxx.h>
+
+...
+
+Db db(NULL, 0);               // Instantiate the Db object
+
+u_int32_t oFlags = DB_CREATE; // Open flags;
+
+try {
+    // Open the database
+    db.open(NULL,                // Transaction pointer 
+            "my_db.db",          // Database file name 
+            NULL,                // Optional logical database name
+            DB_BTREE,            // Database access method
+            oFlags,              // Open flags
+            0);                  // File mode (using defaults)
+// DbException is not subclassed from std::exception, so
+// need to catch both of these.
+} catch(DbException &e) {
+    // Error handling code goes here    
+} catch(std::exception &e) {
+    // Error handling code goes here
+} 
+
+
+ + + diff --git a/db/docs/gsg/CXX/DBEntry.html b/db/docs/gsg/CXX/DBEntry.html new file mode 100644 index 000000000..08fee38fe --- /dev/null +++ b/db/docs/gsg/CXX/DBEntry.html @@ -0,0 +1,191 @@ + + + + + + Chapter 3. Database Records + + + + + + + + + +
+
+
+
+

Chapter 3. Database Records

+
+
+
+
+ +

+ DB records contain two parts — a key and some data. Both the key + and its corresponding data are + encapsulated in + + + Dbt class objects. + Therefore, to access a DB record, you need two such + + objects, one for the key and + one for the data. +

+

+ Dbt objects provide a void * + data member that you use to point to your data, and another member that identifies + the data length. They can therefore be used to store anything from simple + primitive data to complex class objects so long as the information you want to + store resides in a single contiguous block of memory. +

+

+ This chapter describes + + Dbt + usage. It also + introduces storing and retrieving key/value pairs from a database. +

+
+
+
+
+

Using Database Records

+
+
+
+
+

+ Each database record is comprised of two + + + Dbt objects + — one for the key and another for the data. + + +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+float money = 122.45;
+char *description = "Grocery bill.";
+
+Dbt key(&money, sizeof(float));
+Dbt data(description, strlen(description)+1); 
+

+ Note that in the following example we do not allow DB to assign the + memory for the retrieval of the money value. The reason why is that some + systems may require float values to have a specific alignment, and the + memory as returned by + + Db + may not be properly aligned (the same problem may exist for structures + on some systems). We tell DB to use our memory instead of its + own by specifying the DB_DBT_USERMEM flag. Be aware that + when we do this, we must also identify how much user memory is available + through the use of the ulen field. +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+Dbt key, data;
+float money;
+char *description;
+
+key.set_data(&money);
+key.set_ulen(sizeof(float));
+key.set_flags(DB_DBT_USERMEM);
+
+// Database retrieval code goes here
+
+// Money is set into the memory that we supplied.
+description = (char *)data.get_data();
+
+
+ + + diff --git a/db/docs/gsg/CXX/DBOpenFlags.html b/db/docs/gsg/CXX/DBOpenFlags.html new file mode 100644 index 000000000..b091635a6 --- /dev/null +++ b/db/docs/gsg/CXX/DBOpenFlags.html @@ -0,0 +1,118 @@ + + + + + + Database Open Flags + + + + + + + + + +
+
+
+
+

Database Open Flags

+
+
+
+
+

+ The following are the flags that you may want to use at database open time. + Note that this list is not exhaustive — it includes only those flags likely + to be of interest for introductory, single-threaded + database applications. For a complete list of the flags available to you, see the + + Berkeley DB C++ API Reference Guide. +

+
+

Note

+

+ To specify more than one flag on the call to + + Db::open(), + you must bitwise inclusively OR them together: +

+ +
u_int32_t open_flags = DB_CREATE | DB_EXCL;
+
+
+
    +
  • +

    + DB_CREATE +

    +

    + If the database does not currently exist, create it. By default, the database open + fails if the database does not already exist. +

    +
  • +
  • +

    + DB_EXCL +

    +

    + Exclusive database creation. Causes the database open to fail if the database + already exists. This flag is only meaningful when used with + DB_CREATE. +

    +
  • +
  • +

    + DB_RDONLY +

    +

    + Open the database for read operations only. Causes any subsequent database write + operations to fail. +

    +
  • +
  • +

    + DB_TRUNCATE +

    +

    + Physically truncate (empty) the on-disk file that contains the database. + Causes DB to delete all databases physically contained in that file. +

    +
  • +
+
+
+ + + diff --git a/db/docs/gsg/CXX/DbCXXUsage.html b/db/docs/gsg/CXX/DbCXXUsage.html new file mode 100644 index 000000000..894a34089 --- /dev/null +++ b/db/docs/gsg/CXX/DbCXXUsage.html @@ -0,0 +1,530 @@ + + + + + + Database Usage Example + + + + + + + + + +
+
+
+
+

Database Usage Example

+
+
+
+
+

+ In Database Example we created + a class that opens and closes a database for us. + We now make use of that class to load inventory data into + two databases that we will use for our inventory system. +

+

+ Again, remember that you can find the complete implementation for these functions + in: +

+
DB_INSTALL/examples_cxx/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 3.1 VENDOR Structure +

+

+ We want to store data related to an inventory system. There are two + types of information that we want to manage: inventory data and related + vendor contact information. To manage this information, we could + have created a structure for each type of data, but to illustrate + storing mixed data without a structure we refrain from creating one + for the inventory data. +

+

+ We now show the definition of the VENDOR structure. + Note that the VENDOR structure uses fixed-length fields. + This is not necessary and in fact could + represent a waste of resources if the number of vendors stored in + our database scales to very large numbers. However, for simplicity we use + fixed-length fields anyway, especially + given that our sample data contains so few vendor records. +

+ +
// File: gettingStartedCommon.hpp
+#define MAXFIELD 20
+typedef struct vendor {
+    char name[MAXFIELD];             // Vendor name
+    char street[MAXFIELD];           // Street name and number
+    char city[MAXFIELD];             // City
+    char state[3];                   // Two-digit US state code
+    char zipcode[6];                 // US zipcode
+    char phone_number[13];           // Vendor phone number
+    char sales_rep[MAXFIELD];        // Name of sales representative
+    char sales_rep_phone[MAXFIELD];  // Sales rep's phone number
+} VENDOR;
+
+
+ +

+ Example 3.2 InventoryData Class +

+

+ In order to manage our actual inventory data, we create a class that + encapsulates the data that we want to store for each inventory + record. Beyond simple data encapsulation, this class is also capable + of marshalling the inventory data into a single contiguous buffer + for the purposes of storing in that data in a DB database. +

+

+ We also provide two constructors for this class. The default + constructor simply initializes all our data members for us. A second + constructor is also provided that is capable of populating our data + members from a void *. This second constructor is + not really needed until the next chapter where we show how to read + data from the databases, but we include it here for the purpose of + completeness anyway. +

+

+ To simplify things a bit, we include the entire implementation for this + class in gettingStartedCommon.hpp along with + our VENDOR structure definition. +

+

+ To begin, we create the public getter and setter methods that we + use with our class' private members. We also show the implementation + of the method that we use to initialize all our private members. +

+ +
class InventoryData
+{
+public:
+    inline void setPrice(double price) {price_ = price;}
+    inline void setQuantity(long quantity) {quantity_ = quantity;}
+    inline void setCategory(std::string &category) {category_ = category;}
+    inline void setName(std::string &name) {name_ = name;}
+    inline void setVendor(std::string &vendor) {vendor_ = vendor;}
+    inline void setSKU(std::string &sku) {sku_ = sku;}
+                                                                                                                                    
+    inline double& getPrice() {return(price_);}
+    inline long& getQuantity() {return(quantity_);}
+    inline std::string& getCategory() {return(category_);}
+    inline std::string& getName() {return(name_);}
+    inline std::string& getVendor() {return(vendor_);}
+    inline std::string& getSKU() {return(sku_);}
+                                                                                                                                    
+    // Initialize our data members
+    void clear()
+    {
+        price_ = 0.0;
+        quantity_ = 0;
+        category_.clear();
+        name_.clear();
+        vendor_.clear();
+        sku_.clear();
+    } 
+

+ Next we implement our constructors. The default constructor simply calls + the clear(). The second constructor takes a + void * as an argument, which it then uses to + initialize the data members. Note, again, that we will not actually use + this second constructor in this chapter, but we show it here just to be + complete anyway. +

+ +
    // Default constructor
+    InventoryData() { clear(); }
+                                                                                                                                    
+    // Constructor from a void *
+    // For use with the data returned from a bdb get
+    InventoryData(void *buffer)
+    {
+        char *buf = (char *)buffer;
+                                                                                                                                    
+        price_ = *((double *)buf);
+        bufLen_ = sizeof(double);
+                                                                                                                                    
+        quantity_ = *((long *)(buf + bufLen_));
+        bufLen_ += sizeof(long);
+                                                                                                                                    
+        name_ = buf + bufLen_;
+        bufLen_ += name_.size() + 1;
+                                                                                                                                    
+        sku_ = buf + bufLen_;
+        bufLen_ += sku_.size() + 1;
+                                                                                                                                    
+        category_ = buf + bufLen_;
+        bufLen_ += category_.size() + 1;
+                                                                                                                                    
+        vendor_ = buf + bufLen_;
+        bufLen_ += vendor_.size() + 1;
+    } 
+

+ Next we provide a couple of methods for returning the class' buffer and + the size of the buffer. These are used for actually storing the class' + data in a DB database. +

+ +
    // Marshalls this classes data members into a single
+    // contiguous memory location for the purpose of storing
+    // the data in a database.
+    char *
+    getBuffer()
+    {
+        // Zero out the buffer
+        memset(databuf_, 0, 500);
+        // Now pack the data into a single contiguous memory location for
+        // storage.
+        bufLen_ = 0;
+        int dataLen = 0;
+                                                                                                                                    
+        dataLen = sizeof(double);
+        memcpy(databuf_, &price_, dataLen);
+        bufLen_ += dataLen;
+                                                                                                                                    
+        dataLen = sizeof(long);
+        memcpy(databuf_ + bufLen_, &quantity_, dataLen);
+        bufLen_ += dataLen;
+                                                                                                                                    
+        packString(databuf_, name_);
+        packString(databuf_, sku_);
+        packString(databuf_, category_);
+        packString(databuf_, vendor_);
+                                                                                                                                    
+        return (databuf_);
+    }
+                                                                                                                                    
+    // Returns the size of the buffer. Used for storing
+    // the buffer in a database.
+    inline int getBufferSize() { return (bufLen_); } 
+

+ Our last public method is a utility method that we use to get the class + to show itself. +

+ +
     // Utility function used to show the contents of this class
+    void
+    show() {
+        std::cout << "\nName:         " << name_ << std::endl;
+        std::cout << "    SKU:        " << sku_ << std::endl;
+        std::cout << "    Price:      " << price_ << std::endl;
+        std::cout << "    Quantity:   " << quantity_ << std::endl;
+        std::cout << "    Category:   " << category_ << std::endl;
+        std::cout << "    Vendor:     " << vendor_ << std::endl;
+    } 
+

+ Finally, we provide a private method that is used to help us pack data + into our buffer, and we declare our private data members. +

+ +
private:
+
+    // Utility function that appends a char * to the end of
+    // the buffer.
+    void
+    packString(char *buffer, std::string &theString)
+    {
+        int string_size = theString.size() + 1;
+        memcpy(buffer+bufLen_, theString.c_str(), string_size);
+        bufLen_ += string_size;
+    }
+
+    // Data members
+    std::string category_, name_, vendor_, sku_;
+    double price_;
+    long quantity_;
+    int bufLen_;
+    char databuf_[500];
+}; 
+
+
+ +

+ Example 3.3 excxx_example_database_load +

+

+ Our initial sample application loads database information from + several flat files. To save space, we won't show all the details of + this example program. However, as always you can find the complete + implementation for this program here: +

+
DB_INSTALL/examples_cxx/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+

+ We begin with the normal include directives and forward declarations: +

+ +
// File: excxx_example_database_load.cpp
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+                                                                                                                                    
+#include "MyDb.hpp"
+#include "gettingStartedCommon.hpp" 
+
+// Forward declarations
+void loadVendorDB(MyDb&, std::string&);
+void loadInventoryDB(MyDb&, std::string&);
+
+

+ Next we begin our main() function with the variable + declarations and command line parsing that is normal for most command + line applications: +

+ +
// Loads the contents of vendors.txt and inventory.txt into
+// Berkeley DB databases. 
+int
+main(int argc, char *argv[])
+{
+    // Initialize the path to the database files
+    std::string basename("./");
+    std::string databaseHome("./");
+                                                                                                                                    
+    // Database names
+    std::string vDbName("vendordb.db");
+    std::string iDbName("inventorydb.db");
+
+    // Parse the command line arguments here and determine 
+    // the location of the flat text files containing the 
+    // inventory data here. This step is omitted for clarity.
+
+    //  Identify the full name for our input files, which should
+    //  also include some path information.
+    std::string inventoryFile = basename + "inventory.txt";
+    std::string vendorFile = basename + "vendors.txt";
+                                                                                                                                    
+    try
+    {
+        // Open all databases.
+        MyDb inventoryDB(databaseHome, iDbName);
+        MyDb vendorDB(databaseHome, vDbName);
+                                                                                                                                    
+        // Load the vendor database
+        loadVendorDB(vendorDB, vendorFile);
+
+        // Load the inventory database
+        loadInventoryDB(inventoryDB, inventoryFile);
+    } catch(DbException &e) {
+        std::cerr << "Error loading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(e.get_errno());
+    } catch(std::exception &e) {
+        std::cerr << "Error loading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(-1);
+    }
+                                                                                                                                    
+    return(0);
+} // End main 
+

+ Note that we do not explicitly close our databases here. This is because + the databases are encapsulated in MyDb class + objects, and those objects are on the stack. When they go out of scope, + their destructors will cause the database close to occur. +

+

+ Notice that there is not a lot to this function because we have pushed + off all the database activity to other places. +

+

+ Next we show the implementation of + loadVendorDB(). We load this data by + scanning (line by line) the contents of the + vendors.txt file into a VENDOR structure. Once we have a + line scanned into the structure, we can store that structure into our + vendors database. +

+

+ Note that we use the vendor's name as the key here. In doing so, we + assume that the vendor's name is unique in our database. If it was not, + we would either have to select a different key, or architect our + application such that it could cope with multiple vendor records with + the same name. +

+ +
// Loads the contents of the vendors.txt file into a database
+void
+loadVendorDB(MyDb &vendorDB, std::string &vendorFile)
+{
+    std::ifstream inFile(vendorFile.c_str(), std::ios::in);
+    if ( !inFile )
+    {
+        std::cerr << "Could not open file '" << vendorFile
+                  << "'. Giving up." << std::endl;
+        throw std::exception();
+    }
+                                                                                                                                    
+    VENDOR my_vendor;
+    while (!inFile.eof())
+    {
+        std::string stringBuf;
+        std::getline(inFile, stringBuf);
+        memset(&my_vendor, 0, sizeof(VENDOR));
+                                                                                                                                    
+        // Scan the line into the structure.
+        // Convenient, but not particularly safe.
+        // In a real program, there would be a lot more
+        // defensive code here.
+        sscanf(stringBuf.c_str(),
+          "%20[^#]#%20[^#]#%20[^#]#%3[^#]#%6[^#]#%13[^#]#%20[^#]#%20[^\n]",
+          my_vendor.name, my_vendor.street,
+          my_vendor.city, my_vendor.state,
+          my_vendor.zipcode, my_vendor.phone_number,
+          my_vendor.sales_rep, my_vendor.sales_rep_phone);
+                                                                                                                                    
+        Dbt key(my_vendor.name, strlen(my_vendor.name) + 1);
+        Dbt data(&my_vendor, sizeof(VENDOR));
+                                                                                                                                    
+        vendorDB.getDb().put(NULL, &key, &data, 0);
+    }
+    inFile.close();
+} 
+

+ Finally, we need to write the + loadInventoryDB() function. To load the inventory information, + we read in each line of the inventory.txt file, obtain each field from + it, then we load this data into an InventoryData + instance. +

+

+ To help us obtain the various fields from each line of input, + we also create a simple helper function that locates the position of the + first a field delimiter (a pound (#) sign) from a line of input. +

+

+ Note that we could have simply decided to store our inventory data in a + structure very much like the VENDOR structure that we use above. + However, by storing this data in the + InventoryData class, which identifies the size of + the data that it contains, + we can use the smallest amount of space possible for the data that we + are storing. The result is that our cache can be smaller than it might + otherwise be and our database will take less space on disk than if we used + a structure with fixed-length fields. +

+

+ For a trivial dataset such as what we use for these examples, these + resource savings are negligible. But if we were storing hundreds of + millions of records, then the cost savings may become significant. +

+ +
// Used to locate the first pound sign (a field delimiter)
+// in the input string.
+int
+getNextPound(std::string &theString, std::string &substring)
+{
+    int pos = theString.find("#");
+    substring.assign(theString, 0, pos);
+    theString.assign(theString, pos + 1, theString.size());
+    return (pos);
+}
+                                                                                                                                    
+                                                                                                                                    
+// Loads the contents of the inventory.txt file into a database
+void
+loadInventoryDB(MyDb &inventoryDB, std::string &inventoryFile)
+{
+    InventoryData inventoryData;
+    std::string substring;
+    int nextPound;
+                                                                                                                                    
+    std::ifstream inFile(inventoryFile.c_str(), std::ios::in);
+    if (!inFile)
+    {
+        std::cerr << "Could not open file '" << inventoryFile
+                  << "'. Giving up." << std::endl;
+        throw std::exception();
+    }
+                                                                                                                                    
+    while (!inFile.eof())
+    {
+        inventoryData.clear();
+        std::string stringBuf;
+        std::getline(inFile, stringBuf);
+                                                                                                                                    
+        // Now parse the line
+        if (!stringBuf.empty())
+        {
+            nextPound = getNextPound(stringBuf, substring);
+            inventoryData.setName(substring);
+                                                                                                                                    
+            nextPound = getNextPound(stringBuf, substring);
+            inventoryData.setSKU(substring);
+                                                                                                                                    
+            nextPound = getNextPound(stringBuf, substring);
+            inventoryData.setPrice(strtod(substring.c_str(), 0));
+                                                                                                                                    
+            nextPound = getNextPound(stringBuf, substring);
+            inventoryData.setQuantity(strtol(substring.c_str(), 0, 10));
+                                                                                                                                    
+            nextPound = getNextPound(stringBuf, substring);
+            inventoryData.setCategory(substring);
+                                                                                                                                    
+            nextPound = getNextPound(stringBuf, substring);
+            inventoryData.setVendor(substring);
+                                                                                                                                    
+            void *buff = (void *)inventoryData.getSKU().c_str();
+            int size = inventoryData.getSKU().size()+1;
+            Dbt key(buff, size);
+                                                                                                                                    
+            buff = inventoryData.getBuffer();
+            size = inventoryData.getBufferSize();
+            Dbt data(buff, size);
+                                                                                                                                    
+            inventoryDB.getDb().put(NULL, &key, &data, 0);
+        }
+    }
+    inFile.close();
+} 
+

+ In the next chapter we provide an example that shows how to read + the inventory and vendor databases. +

+
+
+ + + diff --git a/db/docs/gsg/CXX/DeleteEntryWCursor.html b/db/docs/gsg/CXX/DeleteEntryWCursor.html new file mode 100644 index 000000000..3a07d1326 --- /dev/null +++ b/db/docs/gsg/CXX/DeleteEntryWCursor.html @@ -0,0 +1,110 @@ + + + + + + Deleting Records Using Cursors + + + + + + + + + +
+
+
+
+

Deleting Records Using Cursors

+
+
+
+
+

+ + To delete a record using a cursor, simply position the cursor to the + record that you want to delete and then call + + + + + + Dbc::del(). + + +

+

For example:

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+char *key1str = "My first string";
+Db my_database(NULL, 0);
+Dbc *cursorp;
+
+try {
+    // Database open omitted 
+
+    // Get the cursor
+    my_database.cursor(NULL, &cursorp, 0);
+
+    // Set up our DBTs
+    Dbt key(key1str, strlen(key1str) + 1);
+    Dbt data;
+
+    // Iterate over the database, deleting each record in turn. 
+    int ret;
+    while ((ret = cursorp->get(&key, &data, 
+                                  DB_SET)) == 0) {
+        cursorp->del(0);
+    }
+
+} catch(DbException &e) {
+    my_database.err(e.get_errno(), "Error!");
+} catch(std::exception &e) {
+    my_database.errx("Error! %s", e.what());
+}
+
+// Cursors must be closed
+if (cursorp != NULL)
+    cursorp->close(); 
+
+my_database.close(0);
+
+ + + diff --git a/db/docs/gsg/CXX/Positioning.html b/db/docs/gsg/CXX/Positioning.html new file mode 100644 index 000000000..fe14c6eb0 --- /dev/null +++ b/db/docs/gsg/CXX/Positioning.html @@ -0,0 +1,582 @@ + + + + + + Getting Records Using the Cursor + + + + + + + + + +
+
+
+
+

Getting Records Using the Cursor

+
+
+
+
+

+ To iterate over database records, from the first record to + the last, simply open the cursor and then use the + + + Dbc::get() + method. + Note that you need to supply the + DB_NEXT flag to this method. + For example: +

+ +
#include <db_cxx.h>
+
+...
+
+Db my_database(NULL, 0);
+Dbc *cursorp;
+
+try {
+    // Database open omitted for clarity
+
+    // Get a cursor
+    my_database.cursor(NULL, &cursorp, 0); 
+
+    Dbt key, data;
+    int ret;
+
+    // Iterate over the database, retrieving each record in turn.
+    while ((ret = cursorp->get(&key, &data, DB_NEXT)) == 0) {
+        // Do interesting things with the Dbts here.
+    }
+    if (ret != DB_NOTFOUND) {
+        // ret should be DB_NOTFOUND upon exiting the loop.
+        // Dbc::get() will by default throw an exception if any
+        // significant errors occur, so by default this if block
+        // can never be reached. 
+    }
+} catch(DbException &e) {
+        my_database.err(e.get_errno(), "Error!");
+} catch(std::exception &e) {
+        my_database.errx("Error! %s", e.what());
+}
+
+// Cursors must be closed
+if (cursorp != NULL) 
+    cursorp->close(); 
+
+my_database.close(0);
+

+ To iterate over the database from the last record to the first, use + DB_PREV instead of DB_NEXT: +

+ +
#include <db_cxx.h>
+
+...
+
+Db my_database(NULL, 0);
+Dbc *cursorp;
+
+try {
+    // Database open omitted for clarity
+
+    // Get a cursor
+    my_database.cursor(NULL, &cursorp, 0); 
+
+    Dbt key, data;
+    int ret;
+    // Iterate over the database, retrieving each record in turn.
+    while ((ret = cursorp->get(&key, &data, DB_PREV)) == 0) {
+        // Do interesting things with the Dbts here.
+    }
+    if (ret != DB_NOTFOUND) {
+        // ret should be DB_NOTFOUND upon exiting the loop.
+        // Dbc::get() will by default throw an exception if any
+        // significant errors occur, so by default this if block
+        // can never be reached. 
+    }
+} catch(DbException &e) {
+        my_database.err(e.get_errno(), "Error!");
+} catch(std::exception &e) {
+        my_database.errx("Error! %s", e.what());
+}
+
+// Cursors must be closed
+if (cursorp != NULL) 
+    cursorp->close(); 
+
+my_database.close(0);
+
+
+
+
+

Searching for Records

+
+
+
+
+

+ You can use cursors to search for database records. You can search based + on just a key, or you can search based on both the key and the data. + You can also perform partial matches if your database supports sorted + duplicate sets. In all cases, the key and data parameters of these + methods are filled with the key and data values of the database record + to which the cursor is positioned as a result of the search. +

+

+ Also, if the search fails, then cursor's state is left unchanged + and + + DB_NOTFOUND + is returned. + + +

+

+ To use a cursor to search for a record, use + + Dbt::get(). + When you use this method, you can provide the following flags: +

+
+

Note

+

+ Notice in the following list that the cursor flags use the + keyword SET when the cursor examines just the key + portion of the records (in this case, the cursor is set to the + record whose key matches the value provided to the cursor). + Moreover, when the cursor uses the keyword GET, + then the cursor is positioned to both the key + and the data values provided to the cursor. +

+

+ Regardless of the keyword you use to get a record with a cursor, the + cursor's key and data + + Dbts + are filled with the data retrieved from the record to which the + cursor is positioned. +

+
+
+
    +
  • +

    + + DB_SET +

    +

    + Moves the cursor to the first record in the database with + the specified key. +

    +
  • +
  • +

    + + DB_SET_RANGE +

    +

    + Identical to + DB_SET + Cursor.getSearchKey() + unless you are using the BTree access. In this case, the cursor + moves + + + to the first record in the database whose + key is greater than or equal to the specified key. This comparison + is determined by the + + comparison function + that you provide for the database. If no + + comparison function + is provided, then the default lexicographical sorting is used. +

    +

    + For example, suppose you have database records that use the + following + Strings + + as keys: +

    +
    Alabama
    +Alaska
    +Arizona
    +

    + Then providing a search key of Alaska moves the + cursor to the second key noted above. Providing a key of + Al moves the cursor to the first key (Alabama), providing + a search key of Alas moves the cursor to the second key + (Alaska), and providing a key of Ar moves the + cursor to the last key (Arizona). +

    +
  • +
  • +

    + + DB_GET_BOTH +

    +

    + Moves the cursor to the first record in the database that uses + the specified key and data. +

    +
  • +
  • +

    + + DB_GET_BOTH_RANGE +

    +

    + Moves the cursor to the first record in the database whose key is + greater than or equal to the specified key. If the database supports + duplicate records, then on matching the key, the cursor is moved to + the duplicate record with the smallest data that is greater than or + equal to the specified data. +

    +

    + For example, + + suppose your database uses BTree + and it has + database records that use the following key/data pairs: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence 
    +

    then providing:

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    a search key of ...and a search data of ...moves the cursor to ...
    AlFlAlabama/Florence
    ArFlArizona/Florence
    AlFaAlaska/Fairbanks
    AlAAlabama/Athens
    +
    +
  • +
+
+

+ For example, assuming a database containing sorted duplicate records of + U.S. States/U.S Cities key/data pairs (both as + Strings), + + then the following code fragment can be used to position the cursor + to any record in the database and print its key/data values: + +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+Db my_database(NULL, 0);
+Dbc *cursorp;
+
+try {
+    // database open omitted for clarity
+
+    // Get a cursor
+    my_database.cursor(NULL, &cursorp, 0);
+
+    // Search criteria
+    char *search_key = "Al";
+    char *search_data = "Fa";
+
+    // Set up our DBTs
+    Dbt key(search_key, strlen(search_key) + 1);
+    Dbt data(search_data, strlen(search_data) + 1);
+
+    // Position the cursor to the first record in the database whose
+    // key and data begin with the correct strings.
+    int ret = cursorp->get(&key, &data, DB_GET_BOTH_RANGE);
+    if (!ret) {
+        // Do something with the data
+    }
+} catch(DbException &e) {
+        my_database.err(e.get_errno(), "Error!");
+} catch(std::exception &e) {
+        my_database.errx("Error! %s", e.what());
+}
+
+// Close the cursor
+if (cursorp != NULL)
+    cursorp->close();
+
+// Close the database
+my_database.close(0); 
+
+
+
+
+
+

Working with Duplicate Records

+
+
+
+
+

+ A record is a duplicate of another record if the two records share the + same key. For duplicate records, only the data portion of the record is unique. +

+

+ Duplicate records are supported only for the BTree or Hash access methods. + For information on configuring your database to use duplicate records, + see Allowing Duplicate Records. +

+

+ If your database supports duplicate records, then it can potentially + contain multiple records that share the same key. + + + + By default, normal database + get operations will only return the first such record in a set + of duplicate records. Typically, subsequent duplicate records are + accessed using a cursor. + + + The following + + + Dbc::get() flags + are interesting when working with databases that support duplicate records: +

+
+
    +
  • +

    + + + DB_NEXT, + DB_PREV + +

    +

    + Shows the next/previous record in the database, regardless of + whether it is a duplicate of the current record. For an example of + using these methods, see Getting Records Using the Cursor. +

    +
  • +
  • +

    + + DB_GET_BOTH_RANGE +

    +

    + Useful for seeking the cursor to a specific record, regardless of + whether it is a duplicate record. See Searching for Records for more + information. +

    +
  • +
  • +

    + + + DB_NEXT_NODUP, + DB_PREV_NODUP + +

    +

    + Gets the next/previous non-duplicate record in the database. This + allows you to skip over all the duplicates in a set of duplicate + records. If you call + + + + Dbc::get() + with DB_PREV_NODUP, + + then the cursor is positioned to the last record for the previous + key in the database. For example, if you have the following records + in your database: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence
    +

    + and your cursor is positioned to Alaska/Fairbanks, + and you then call + + + + Dbc::get() + with DB_PREV_NODUP, + + then the cursor is positioned to Alabama/Florence. Similarly, if + you call + + + + Dbc::get() + with DB_NEXT_NODUP, + + + then the cursor is positioned to the first record corresponding to + the next key in the database. +

    +

    + If there is no next/previous key in the database, then + + DB_NOTFOUND + is returned, and the cursor is left unchanged. +

    +
  • +
  • +

    + + DB_NEXT_DUP +

    +

    + + Gets the + + next + record that shares the current key. If the + cursor is positioned at the last record in the duplicate set and + you call + + + + Dbc::get() + with DB_NEXT_DUP, + + + then + + DB_NOTFOUND + is returned and the cursor is left unchanged. + +

    +
  • +
+
+

+ For example, the following code fragment positions a cursor to a key + + + + and displays it and all its + duplicates. + + +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+char *search_key = "Al";
+
+Db my_database(NULL, 0);
+Dbc *cursorp;
+
+try {
+    // database open omitted for clarity
+
+    // Get a cursor
+    my_database.cursor(NULL, &cursorp, 0);
+
+    // Set up our DBTs
+    Dbt key(search_key, strlen(search_key) + 1);
+    Dbt data;
+
+    // Position the cursor to the first record in the database whose
+    // key and data begin with the correct strings.
+    int ret = cursorp->get(&key, &data, DB_SET);
+    while (ret != DB_NOTFOUND) {
+        std::cout << "key: " << (char *)key.get_data() 
+                  << "data: " << (char *)data.get_data()<< std::endl;
+        ret = cursorp->get(&key, &data, DB_NEXT_DUP);
+    }
+} catch(DbException &e) {
+        my_database.err(e.get_errno(), "Error!");
+} catch(std::exception &e) {
+        my_database.errx("Error! %s", e.what());
+}
+
+// Close the cursor
+if (cursorp != NULL)
+    cursorp->close();
+
+// Close the database
+my_database.close(0); 
+
+
+ + + diff --git a/db/docs/gsg/CXX/PutEntryWCursor.html b/db/docs/gsg/CXX/PutEntryWCursor.html new file mode 100644 index 000000000..4a4ff126f --- /dev/null +++ b/db/docs/gsg/CXX/PutEntryWCursor.html @@ -0,0 +1,222 @@ + + + + + + Putting Records Using Cursors + + + + + + + + + +
+
+
+
+

Putting Records Using Cursors

+
+
+
+
+

+ You can use cursors to put records into the database. DB's behavior + when putting records into the database differs depending on the flags + that you use when writing the record, on the access method that you are + using, and on whether your database supports sorted duplicates. +

+

+ Note that when putting records to the database using a cursor, the + cursor is positioned at the record you inserted. Also, you can not + transactionally protect a put that is performed using a cursor; + if you want to transactionall protect your database writes, + put recrods using the database handle directly. +

+

+ You use + + Dbc::put() + + to put (write) records to the database. You can use the following flags + with this method: +

+
+
    +
  • +

    + + DB_NODUPDATA +

    +

    + If the provided key already exists + in the database, then this method returns + DB_KEYEXIST. +

    +

    + If the key does not exist, then the order that the record is put into the database + is determined by the + + + insertion order in use by the database. If a comparison + function has been provided to the database, the record is + inserted in its sorted location. Otherwise (assuming BTree), + lexicographical sorting is used, with + shorter items collating before longer items. + +

    +

    + This flag can only be used for the BTree and Hash access methods, + and only if the database has been configured to support sorted + duplicate data items (DB_DUPSORT was specified at + database creation time). +

    +

    + This flag cannot be used with the Queue or Recno access methods. +

    +

    + For more information on duplicate records, see + Allowing Duplicate Records. +

    +
  • +
  • +

    + + DB_KEYFIRST +

    +

    + For databases that do not support duplicates, this method behaves + + + exactly the same as if a default insertion was performed. + + If the database supports duplicate records, + + + and a duplicate sort function has been specified, the + inserted data item is added in its sorted location. If + the key already exists in the database and no duplicate + sort function has been specified, the inserted data item + is added as the first of the data items for that key. + +

    +
  • +
  • +

    + + DB_KEYLAST +

    +

    + Behaves exactly as if + DB_KEYFIRST + + was used, except that if the key already exists in the database and no + duplicate sort function has been specified, the + inserted data item is added as the last of the data + items for that key. +

    +
  • +
+
+

For example:

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+char *key1str = "My first string";
+char *data1str = "My first data";
+char *key2str = "A second string";
+char *data2str = "My second data";
+char *data3str = "My third data";
+
+
+Db my_database(NULL, 0);
+Dbc *cursorp;
+
+try {
+    // Set up our DBTs
+    Dbt key1(key1str, strlen(key1str) + 1);
+    Dbt data1(data1str, strlen(data1str) + 1);
+
+    Dbt key2(key2str, strlen(key2str) + 1);
+    Dbt data2(data2str, strlen(data2str) + 1);
+    Dbt data3(data3str, strlen(data3str) + 1);
+
+    // Database open omitted
+
+    // Get the cursor
+    my_database.cursor(NULL, &cursorp, 0);
+
+    // Assuming an empty database, this first put places
+    // "My first string"/"My first data" in the first 
+    // position in the database
+    int ret = cursorp->put(&key1, &data1, DB_KEYFIRST); 
+
+    // This put places "A second string"/"My second data" in the
+    // the database according to its key sorts against the key 
+    // used for the currently existing database record. Most likely
+    // this record would appear first in the database.
+    ret = cursorp->put(&key2, &data2, 
+            DB_KEYFIRST); /* Added according to sort order */
+
+    // If duplicates are not allowed, the currently existing record that 
+    // uses "key2" is overwritten with the data provided on this put.
+    // That is, the record "A second string"/"My second data" becomes
+    // "A second string"/"My third data"
+    // 
+    // If duplicates are allowed, then "My third data" is placed in the
+    // duplicates list according to how it sorts against "My second data".
+    ret = cursorp->put(&key2, &data3, 
+            DB_KEYFIRST); // If duplicates are not allowed, record 
+                          // is overwritten with new data. Otherwise, 
+                          // the record is added to the beginning of 
+                          // the duplicates list.
+} catch(DbException &e) {
+        my_database.err(e.get_errno(), "Error!");
+} catch(std::exception &e) {
+        my_database.errx("Error! %s", e.what());
+}
+
+// Cursors must be closed
+if (cursorp != NULL) 
+    cursorp->close(); 
+
+my_database.close(0);
+
+ + + diff --git a/db/docs/gsg/CXX/ReplacingEntryWCursor.html b/db/docs/gsg/CXX/ReplacingEntryWCursor.html new file mode 100644 index 000000000..6bbd080d5 --- /dev/null +++ b/db/docs/gsg/CXX/ReplacingEntryWCursor.html @@ -0,0 +1,138 @@ + + + + + + Replacing Records Using Cursors + + + + + + + + + +
+
+
+
+

Replacing Records Using Cursors

+
+
+
+
+

+ You replace the data for a database record by using + + + + + + + Dbc::put() + with the DB_CURRENT flag. + + +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+Db my_database(NULL, 0);
+Dbc *cursorp;
+
+int ret;
+char *key1str = "My first string";
+char *replacement_data = "replace me";
+
+try {
+    // Database open omitted
+
+    // Get the cursor
+    my_database.cursor(NULL, &cursorp, 0);
+
+    // Set up our DBTs 
+    Dbt key(key1str, strlen(key1str) + 1);
+    Dbt data;
+
+    // Position the cursor */
+    ret = cursorp->get(&key, &data, DB_SET);
+    if (ret == 0) {
+        data.set_data(replacement_data);
+        data.set_size(strlen(replacement_data) + 1);
+        cursorp->put(&key, &data, DB_CURRENT);
+    }
+} catch(DbException &e) {
+        my_database.err(e.get_errno(), "Error!");
+} catch(std::exception &e) {
+        my_database.errx("Error! %s", e.what());
+}
+
+// Cursors must be closed
+if (cursorp != NULL)
+    cursorp->close(); 
+
+my_database.close(0);
+

+ Note that you cannot change a record's key using this method; the key + parameter is always ignored when you replace a record. +

+

+ When replacing the data portion of a record, if you are replacing a + record that is a member of a sorted duplicates set, then the replacement + will be successful only if the new record sorts identically to the old + record. This means that if you are replacing a record that is a member + of a sorted duplicates set, and if you are using the default + lexicographic sort, then the replacement will fail due to violating the + sort order. However, if you + provide a custom sort routine that, for example, sorts based on just a + few bytes out of the data item, then potentially you can perform + a direct replacement and still not violate the restrictions described + here. +

+

+ Under these circumstances, if + + you want to replace the data contained by a duplicate record, + + and you are not using a custom sort routine, then + + delete the record and create a new record with the desired key and data. +

+
+ + + diff --git a/db/docs/gsg/CXX/accessmethods.html b/db/docs/gsg/CXX/accessmethods.html new file mode 100644 index 000000000..02469bb95 --- /dev/null +++ b/db/docs/gsg/CXX/accessmethods.html @@ -0,0 +1,281 @@ + + + + + + Access Methods + + + + + + + + + +
+
+
+
+

Access Methods

+
+
+
+
+

+ While this manual will focus primarily on the BTree access method, it is + still useful to briefly describe all of the access methods that DB + makes available. +

+

+ Note that an access method can be selected only when the database is + created. Once selected, actual API usage is generally + identical across all access methods. That is, while some + exceptions exist, mechanically you interact with the library in the same + way regardless of which access method you have selected. +

+

+ The access method that you should choose is gated first by what you want + to use as a key, and then secondly by the performance that you see + for a given access method. +

+

+ The following are the available access methods: +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Access MethodDescription
BTree +

+ Data is stored in a sorted, balanced tree structure. + Both the key and the data for BTree records can be + arbitrarily complex. That is, they can contain single values + such as an integer or a string, or complex types such as a + structure. Also, although not the default + behavior, it is possible for two records to + use keys that compare as equals. When this occurs, the + records are considered to be duplicates of one another. +

+
Hash +

+ Data is stored in an extended linear hash table. Like + BTree, the key and the data used for Hash records can be of + arbitrarily complex data. Also, like BTree, duplicate + records are optionally supported. +

+
Queue +

+ Data is stored in a queue as fixed-length records. Each + record uses a logical record number as its key. This access + method is designed for fast inserts at the tail of the + queue, and it has a special operation that deletes and + returns a record from the head of the queue. +

+

+ This access method is unusual in that it provides record + level locking. This can provide + beneficial performance improvements in applications + requiring concurrent access to the queue. +

+
Recno +

+ Data is stored in either fixed or variable-length records. + Like Queue, Recno records use logical record numbers as keys. +

+
+
+
+
+
+
+

Selecting Access Methods

+
+
+
+
+

+ To select an access method, you should first consider what you want + to use as a key for you database records. If you want to use + arbitrary data (even strings), then you should use either BTree or + Hash. If you want to use logical record numbers (essentially + integers) then you should use Queue or Recno. +

+

+ Once you have made this decision, you must choose between either + BTree or Queue, or Hash or Recno. This decision is described next. +

+
+
+
+
+
+

Choosing between BTree and Hash

+
+
+
+
+

+ For small working datasets that fit entirely in memory, there is no + difference between BTree and Hash. Both will perform just as well + as the other. In this situation, you might just as well use BTree, + if for no other reason than the majority of DB applications use + BTree. +

+

+ Note that the main concern here is your + working dataset, not your entire dataset. Many applications maintain + large amounts of information but only need to access some small + portion of that data with any frequency. So what you want to + consider is the data that you will routinely use, not the sum total + of all the data managed by your application. +

+

+ However, as your working dataset grows to the point + where you cannot fit it all into memory, then you need to take more + care when choosing your access method. Specifically, choose: +

+
+
    +
  • +

    + BTree if your keys have some locality of reference. That is, + if they sort well and you can expect that a query for a + given key will likely be followed by a query for one of its + neighbors. +

    +
  • +
  • +

    + Hash if your dataset is extremely large. For any given + access method, DB must maintain a certain amount of internal + information. However, the amount of information that DB + must maintain for BTree is much greater than for Hash. The + result is that as your dataset grows, this internal + information can dominate the cache to the point where there + is relatively little space left for application data. + As a result, BTree can be forced to perform disk I/O much more + frequently than would Hash given the same amount of data. +

    +

    + Moreover, if your dataset becomes so large that DB will + almost certainly have to perform disk I/O to satisfy a + random request, then Hash will definitely out perform BTree + because it has fewer internal records to search through than + does BTree. +

    +
  • +
+
+
+
+
+
+
+

Choosing between Queue and Recno

+
+
+
+
+

+ Queue or Recno are used when the application wants to use logical + record numbers for the primary database key. Logical record numbers + are essentially integers that uniquely identify the database + record. They can be either mutable or fixed, where a mutable record + number is one that might change as database records are stored or + deleted. Fixed logical record numbers never change regardless of + what database operations are performed. +

+

+ When deciding between Queue and Recno, choose: +

+
+
    +
  • +

    + Queue if your application requires high degrees of + concurrency. Queue provides record-level locking (as opposed + to the page-level locking that the other access methods + use), and this can result in significantly faster throughput + for highly concurrent applications. +

    +

    + Note, however, that Queue provides support only for fixed + length records. So if the size of the data that you want to + store varies widely from record to record, you should + probably choose an access method other than Queue. +

    +
  • +
  • +

    + Recno if you want mutable record numbers. Queue is only + capable of providing fixed record numbers. Also, Recno + provides support for databases whose permanent storage is a + flat text file. This is useful for applications looking for + fast, temporary storage while the data is being read or + modified. +

    +
  • +
+
+
+
+ + + diff --git a/db/docs/gsg/CXX/btree.html b/db/docs/gsg/CXX/btree.html new file mode 100644 index 000000000..d90e38e07 --- /dev/null +++ b/db/docs/gsg/CXX/btree.html @@ -0,0 +1,547 @@ + + + + + + BTree Configuration + + + + + + + + +
+
+
+
+

BTree Configuration

+
+
+
+
+

+ In going through the previous chapters in this book, you may notice that + we touch on some topics that are specific to BTree, but we do not cover + those topics in any real detail. In this section, we will discuss + configuration issues that are unique to BTree. +

+

+ Specifically, in this section we describe: +

+
+
    +
  • +

    + Allowing duplicate records. +

    +
  • +
  • +

    + Setting comparator callbacks. +

    +
  • +
+
+
+
+
+
+

Allowing Duplicate Records

+
+
+
+
+

+ BTree databases can contain duplicate records. One record is + considered to be a duplicate of another when both records use keys + that compare as equal to one another. +

+

+ By default, keys are compared using a lexicographical comparison, + with shorter keys collating higher than longer keys. + You can override this default using the + + Db::set_bt_compare() + + method. See the next section for details. +

+

+ By default, DB databases do not allow duplicate records. As a + result, any attempt to write a record that uses a key equal to a + previously existing record results in the previously existing record + being overwritten by the new record. +

+

+ Allowing duplicate records is useful if you have a database that + contains records keyed by a commonly occurring piece of information. + It is frequently necessary to allow duplicate records for secondary + databases. +

+

+ For example, suppose your primary database contained records related + to automobiles. You might in this case want to be able to find all + the automobiles in the database that are of a particular color, so + you would index on the color of the automobile. However, for any + given color there will probably be multiple automobiles. Since the + index is the secondary key, this means that multiple secondary + database records will share the same key, and so the secondary + database must support duplicate records. +

+
+
+
+
+

Sorted Duplicates

+
+
+
+
+

+ Duplicate records can be stored in sorted or unsorted order. + You can cause DB to automatically sort your duplicate + records by + + specifying the DB_DUPSORT flag at + database creation time. + + +

+

+ If sorted duplicates are supported, then the + + sorting function specified on + + Db::set_dup_compare() + + + is used to determine the location of the duplicate record in its + duplicate set. If no such function is provided, then the default + lexicographical comparison is used. +

+
+
+
+
+
+

Unsorted Duplicates

+
+
+
+
+

+ For performance reasons, BTrees should always contain sorted + records. (BTrees containing unsorted entries must potentially + spend a great deal more time locating an entry than does a BTree + that contains sorted entries). That said, DB provides support + for suppressing automatic sorting of duplicate records because it may be that + your application is inserting records that are already in a + sorted order. +

+

+ That is, if the database is configured to support unsorted + duplicates, then the assumption is that your application + will manually perform the sorting. In this event, + expect to pay a significant performance penalty. Any time you + place records into the database in a sort order not know to + DB, you will pay a performance penalty +

+

+ That said, this is how DB behaves when inserting records + into a database that supports non-sorted duplicates: +

+
+
    +
  • +

    + If your application simply adds a duplicate record using + + Db::put(), + + then the record is inserted at the end of its sorted duplicate set. +

    +
  • +
  • +

    + If a cursor is used to put the duplicate record to the database, + then the new record is placed in the duplicate set according to the + flags that are provided on the + + Dbc::put() + method. The relevant flags are: +

    +
    +
      +
    • +

      + DB_AFTER + +

      +

      + The data + + provided on the call to + + Dbc::put() + + is placed into the database + as a duplicate record. The key used for this operation is + the key used for the record to which the cursor currently + refers. Any key provided on the call + + + to + + Dbc::put() + + + is therefore ignored. +

      +

      + The duplicate record is inserted into the database + immediately after the cursor's current position in the + database. +

      +

      + This flag is ignored if sorted duplicates are supported for + the database. +

      +
    • +
    • +

      + DB_BEFORE + +

      +

      + Behaves the same as + DB_AFTER + + except that the new record is inserted immediately before + the cursor's current location in the database. +

      +
    • +
    • +

      + DB_KEYFIRST + +

      +

      + If the key + + provided on the call to + + Dbc::put() + + already exists in the + database, and the database is configured to use duplicates + without sorting, then the new record is inserted as the first entry + in the appropriate duplicates list. +

      +
    • +
    • +

      + DB_KEYLAST + +

      +

      + Behaves identically to + DB_KEYFIRST + + except that the new duplicate record is inserted as the last + record in the duplicates list. +

      +
    • +
    +
    +
  • +
+
+
+
+
+
+
+

Configuring a Database to Support Duplicates

+
+
+
+
+

+ Duplicates support can only be configured + at database creation time. You do this by specifying the appropriate + + flags to + + Db::set_flags() + + + before the database is opened for the first time. +

+

+ The + flags + + that you can use are: +

+
+
    +
  • +

    + DB_DUP + +

    +

    + The database supports non-sorted duplicate records. +

    +
  • +
  • +

    + DB_DUPSORT + +

    +

    + The database supports sorted duplicate records. +

    +
  • +
+
+

+ The following code fragment illustrates how to configure a database + to support sorted duplicate records: +

+ +
#include <db_cxx.h>
+...
+
+Db db(NULL, 0);
+const char *file_name = "myd.db";
+
+try {
+    // Configure the database for sorted duplicates
+    db.set_flags(DB_DUPSORT);
+
+    // Now open the database
+    db.open(NULL,       // Txn pointer
+            file_name,  // File name
+            NULL,       // Logical db name (unneeded)
+            DB_BTREE,   // Database type (using btree)
+            DB_CREATE,  // Open flags
+            0);         // File mode. Using defaults
+} catch(DbException &e) {
+    db.err(e.get_errno(), "Database '%s' open failed.", file_name);
+} catch(std::exception &e) {
+    db.errx("Error opening database: %s : %s\n", file_name, e.what());
+} 
+
+...
+
+try {
+    db.close(0);
+} catch(DbException &e) {
+    db.err(e.get_errno(), "Database '%s' close failed.", file_name);
+} catch(std::exception &e) {
+    db.errx("Error closing database: %s : %s\n", file_name, e.what());
+} 
+
+
+
+
+
+
+
+
+

Setting Comparison Functions

+
+
+
+
+

+ By default, DB uses a lexicographical comparison function where + shorter records collate before longer records. For the majority of + cases, this comparison works well and you do not need to manage + it in any way. +

+

+ However, in some situations your application's performance can + benefit from setting a custom comparison routine. You can do this + either for database keys, or for the data if your + database supports sorted duplicate records. +

+

+ Some of the reasons why you may want to provide a custom sorting + function are: +

+
+
    +
  • +

    + Your database is keyed using strings and you want to provide + some sort of language-sensitive ordering to that data. Doing + so can help increase the locality of reference that allows + your database to perform at its best. +

    +
  • +
  • +

    + You are using a little-endian system (such as x86) and you + are using integers as your database's keys. Berkeley DB + stores keys as byte strings and little-endian integers + do not sort well when viewed as byte strings. There are + several solutions to this problem, one being to provide a + custom comparison function. See + http://www.sleepycat.com/docs/ref/am_misc/faq.html + for more information. +

    +
  • +
  • +

    + You you do not want the entire key to participate in the + comparison, for whatever reason. In + this case, you may want to provide a custom comparison + function so that only the relevant bytes are examined. +

    +
  • +
+
+
+
+
+
+

+ Creating Comparison Functions + +

+
+
+
+
+

+ You set a BTree's key + + comparison function + + + using + + Db::set_bt_compare(). + + You can also set a BTree's duplicate data comparison function using + + Db::set_dup_compare(). + + +

+

+ + You cannot use these methods after the database has been opened. + Also, if + + + the database already exists when it is opened, the + + function + + + provided to these methods must be the same as + that historically used to create the database or corruption can + occur. +

+

+ The value that you provide to the set_bt_compare() method + is a pointer to a function that has the following signature: +

+
int (*function)(Db *db, const Dbt *key1, const Dbt *key2)
+

+ This function must return an integer value less than, equal to, + or greater than 0. If key1 is considered to be greater than + key2, then the function must return a value that is greater than + 0. If the two are equal, then the function must return 0, and if + the first key is less than the second then the function must return + a negative value. +

+

+ The function that you provide to set_dup_compare() + works in exactly the same way, except that the + + Dbt + parameters hold record data items instead of keys. +

+

+ For example, an example routine that is used to sort integer + keys in the database is: + +

+ +
int
+compare_int(Db *dbp, const Dbt *a, const Dbt *b)
+{
+    int ai, bi;
+
+    // Returns: 
+    // < 0 if a < b 
+    // = 0 if a = b 
+    // > 0 if a > b 
+    memcpy(&ai, a->get_data(), sizeof(int)); 
+    memcpy(&bi, b->get_data(), sizeof(int)); 
+    return (ai - bi); 
+} 
+

+ Note that the data must first be copied into memory that is + appropriately aligned, as Berkeley DB does not guarantee any kind of + alignment of the underlying data, including for comparison routines. + When writing comparison routines, remember that databases created on + machines of different architectures may have different integer byte + orders, for which your code may need to compensate. +

+

+ To cause DB to use this comparison function: +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+                                                                                                                                      
+Db db(NULL, 0);
+
+// Set up the btree comparison function for this database
+db.set_bt_compare(compare_int);
+
+// Database open call follows sometime after this.
+
+
+
+ + + diff --git a/db/docs/gsg/CXX/cachesize.html b/db/docs/gsg/CXX/cachesize.html new file mode 100644 index 000000000..8b95477a8 --- /dev/null +++ b/db/docs/gsg/CXX/cachesize.html @@ -0,0 +1,98 @@ + + + + + + Selecting the Cache Size + + + + + + + + + +
+
+
+
+

Selecting the Cache Size

+
+
+
+
+

+ Cache size is important to your application because if it is set to too + small of a value, your application's performance will suffer from too + much disk I/O. On the other hand, if your cache is too large, then your + application will use more memory than it actually needs. + Moreover, if your application uses too much memory, then on most + operating systems this can result in your application being swapped out + of memory, resulting in extremely poor performance. +

+

+ You select your cache size using either + + + Db::set_cachesize(), or + DbEnv::set_cachesize(), + + + depending on whether you are using a database environment or not. You + cache size must be a power of 2, but it is otherwise limited only by + available memory and performance considerations. +

+

+ Selecting a cache size is something of an art, but fortunately it is + selected at database (or environment) open time, so it can be easily + tuned to your application's data requirements as they change over time. + The best way to determine how large your cache needs to be is to put + your application into a production environment and watch to see how much + disk I/O is occurring. If your application is going to disk quite a lot + to retrieve database records, then you should increase the size of your + cache (provided that you have enough memory to do so). +

+

+ You can use the db_stat command line utility with the + -m option to gauge the effectiveness of your cache. + In particular, the number of pages found in the cache is shown, along + with a percentage value. The closer to 100% that you can get, the + better. If this value drops too low, and you are experiencing + performance problems, then you should consider increasing the size of + your cache, assuming you have memory to support it. +

+
+ + + diff --git a/db/docs/gsg/CXX/concepts.html b/db/docs/gsg/CXX/concepts.html new file mode 100644 index 000000000..8404051db --- /dev/null +++ b/db/docs/gsg/CXX/concepts.html @@ -0,0 +1,168 @@ + + + + + + Berkeley DB Concepts + + + + + + + + + +
+
+
+
+

Berkeley DB Concepts

+
+
+
+
+

+ Before continuing, it is useful to describe some of the larger concepts + that you will encounter when building a DB application. +

+

+ Conceptually, DB databases contain records. + Logically each record represents a single entry in the database. + Each such record contains two pieces of information: a key and a data. + This manual will on occaison describe a a record's + key or a record's data when it is + necessary to speak to one or the other portion of a database + record. +

+

+ Because of the key/data pairing used for DB databases, they are + sometimes thought of as a two-column table. However, data (and + sometimes keys, depending on the access method) can hold arbitrarily + complex data. Frequently, C structures and other such mechanisms are + stored in the record. This effectively turns a 2-column table + into a table with n columns, where + n-1 of those columns are provided by the structure's + fields. +

+

+ Note that a DB database is very much like a table in a relational + database system in that most DB applications use more than one + database (just as most relational databases use more than one table). +

+

+ Unlike relational systems, however, a DB database contains a single + collection of records organized according to a given access method + (BTree, Queue, Hash, and so forth). In a relational database system, + the underlying access method is generally hidden from you. +

+

+ In any case, frequently DB + applications are designed so that a single database stores a specific + type of data (just as in a relational database system, a single table + holds entries containing a specific set of fields). Because most applications + are required to manage multiple kinds of data, a DB application will + often use multiple databases. +

+

+ For example, consider an accounting application. This kind of an + application may manage data based on bank accounts, checking + accounts, stocks, bonds, loans, and so forth. An accounting application + will also have to manage information about people, banking institutions, + customer accounts, and so on. In a traditional relational database, all + of these different kinds of information would be stored and managed + using a (probably very) complex series of tables. In a DB + application, all of this information would instead be divided out and + managed using multiple databases. +

+

+ DB applications can efficiently use multiple databases using an + optional mechanism called an environment. + For more information, see Environments. +

+

+ You interact with most DB APIs using special structures that + contain pointers to functions. These callbacks are + called methods because they look so much like a + method on a C++ class. The variable that you use to access these + methods is often referred to as a + handle. For example, to use a database you will + obtain a handle to that database. +

+

+ Retrieving a record from a database is sometimes called + getting the record because the method that you use + to retrieve the records is called get(). + Similarly, storing database records is sometimes called + putting the record because you use the + put() method to do this. +

+

+ When you store, or put, a record to a database using its handle, the + record is stored according to whatever sort order is in use by the + database. Sorting is mostly performed based on the key, but sometimes + the data is considered too. If you put a record using a key that already + exists in the database, then the existing record is replaced with the + new data. However, if the database supports + duplicate records (that is, records with identical keys but + different data), then that new record is stored as a duplicate record and + any existing records are not overwritten. +

+

+ If a database supports duplicate records, then you can use a database + handle to retrieve only the first record in a set of duplicate records. +

+

+ In addition to using a database handle, you can also read and write data using a + special mechanism called a cursor. Cursors are + essentially iterators that you can use to walk over the records in a + database. You can use cursors to iterate over a database from the first + record to the last, and from the last to the first. You can also use + cursors to seek to a record. In the event that a database supports + duplicate records, cursors are the only way you can access all the + records in a set of duplicates. +

+

+ Finally, DB provides a special kind of a database called a + secondary database. Secondary databases serve as an + index into normal databases (called primary database to distinguish them + from secondaries). Secondary databases are interesting because DB + records can hold complex data types, but seeking to a given record is + performed only based on that record's key. If you wanted to be able to + seek to a record based on some piece of information that is not the key, + then you enable this through the use of secondary databases. +

+
+ + + diff --git a/db/docs/gsg/CXX/coreExceptions.html b/db/docs/gsg/CXX/coreExceptions.html new file mode 100644 index 000000000..4b0644d87 --- /dev/null +++ b/db/docs/gsg/CXX/coreExceptions.html @@ -0,0 +1,118 @@ + + + + + + Exception Handling + + + + + + + + + +
+
+
+
+

Exception Handling

+
+
+
+
+

+ Before continuing, it is useful to spend a few moments on exception + handling in DB with the + C++ API. +

+

+ By default, most + + DB methods throw + DbException + + in the event of a serious error. + + + However, be aware that + DbException does not inherit from + std::exception so your try + blocks should catch both types of exceptions. For example: + + +

+ +
#include <db_cxx.h>
+    ...
+try 
+{
+    // DB and other code goes here
+}
+catch(DbException &e)
+{
+  // DB error handling goes here
+}
+catch(std::exception &e)
+{
+    // All other error handling goes here
+} 
+

+ You can obtain the DB error number for a + DbException + + by using + + DbException::get_errno(). + You can also obtain the informational message associated with that error + number using DbException::what(). + + +

+

+ If for some reason you do not want to manage + DbException objects in your + try blocks, you can configure DB to suppress them + by setting DB_CXX_NO_EXCEPTIONS for your database and + environment handles. In this event, you must manage your DB error + conditions using the integer value returned by all DB methods. Be + aware that this manual assumes that you want to manage your error + conditions using DbException objects. For + information on managing error conditions using the integer return + values, see Getting Started with Berkeley DB for C. +

+
+ + + diff --git a/db/docs/gsg/CXX/coredbclose.html b/db/docs/gsg/CXX/coredbclose.html new file mode 100644 index 000000000..2f88f3123 --- /dev/null +++ b/db/docs/gsg/CXX/coredbclose.html @@ -0,0 +1,109 @@ + + + + + + Closing Databases + + + + + + + + + +
+
+
+
+

Closing Databases

+
+
+
+
+

+ Once you are done using the database, you must close it. You use the + + Db::close() + method to do this. +

+

+ Closing a database causes it to become unusable until it is opened + again. Note that you should make sure that any open cursors are closed + before closing your database. Active cursors during a database + close can cause unexpected results, especially if any of those cursors are + writing to the database. You should always make sure that all your + database accesses have completed before closing your database. +

+

+ Cursors are described in Using Cursors later in this manual. +

+

+ Be aware that when you close the last open handle + for a database, then by default its cache is flushed to disk. + This means that any information that has + been modified in the cache is guaranteed to be written to disk when the + last handle is closed. You can manually perform this operation using + the + + Db::sync() + + method, but for normal shutdown operations it is not necessary. + For more information about syncing your cache, see + Data Persistence. +

+

The following code fragment illustrates a database close:

+ +
#include <db_cxx.h>
+
+...
+
+Db db(NULL, 0);
+
+ // Database open and access operations happen here.
+
+try {
+    // Close the database
+    db.close(0);
+// DbException is not subclassed from std::exception, so
+// need to catch both of these.
+} catch(DbException &e) {
+    // Error handling code goes here    
+} catch(std::exception &e) {
+    // Error handling code goes here
+} 
+
+ + + diff --git a/db/docs/gsg/CXX/coreindexusage.html b/db/docs/gsg/CXX/coreindexusage.html new file mode 100644 index 000000000..496bd3726 --- /dev/null +++ b/db/docs/gsg/CXX/coreindexusage.html @@ -0,0 +1,560 @@ + + + + + + Secondary Database Example + + + + + + + + + +
+
+
+
+

Secondary Database Example

+
+
+
+
+

+ In previous chapters in this book, we built applications that load + and display several DB databases. In this example, we will extend those + examples to use secondary databases. Specifically: +

+
+ +
+
+
+
+
+

Secondary Databases with + + excxx_example_database_load +

+
+
+
+
+

+ In order to update excxx_example_database_load + to maintain an index of inventory item names, all we really need + to do is: +

+
+
    +
  1. +

    + Create a new database to be used as a secondary database. +

    +
  2. +
  3. +

    + Associate our new database to the inventory primary + database. +

    +
  4. +
+
+

+ We also need a function that can create our secondary keys for us. +

+

+ Because DB maintains secondary databases for us; once this work + is done we need not make any other changes to + + excxx_example_database_load. + + +

+

+ Remember that you can find the complete implementation of these functions + in: +

+
DB_INSTALL/examples_cxx/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+

+ To begin, we go to gettingStartedCommon.hpp and + we write our secondary key extractor function. This is a fairly + trivial function to write because we have already done most of the + work when we wrote the InventoryData class. + Recall that when we wrote that class, we provided a constructor that + accepts a pointer to a buffer and unpacks the contents of the buffer + for us (see InventoryData Class + for the implementation). We now make use of that constructor. +

+ +
// File: gettingStartedCommon.hpp
+// Forward declarations
+class Db;
+class Dbt;
+
+// Used to extract an inventory item's name from an
+// inventory database record. This function is used to create
+// keys for secondary database records.
+int
+get_item_name(Db *dbp, const Dbt *pkey, const Dbt *pdata, Dbt *skey)
+{
+    InventoryData id(pdata->get_data());
+    const char *itemname = id.getName().c_str();
+
+    // If these don't match, then there was a problem with
+    // the buffer contained in pdata, or there's a programming
+    // error in how the buffer is marshalled/unmarshalled.
+    // This should never happen!
+    if ((u_int32_t)id.getBufferSize() != pdata->get_size()) {
+        dbp->errx("get_item_name: buffer sizes do not match!");
+        // When we return non-zero, the index record is not
+        // added/updated.
+        return (-1);
+    }
+    // Now set the secondary key's data to be the item name 
+
+    skey->set_data((void *)itemname);
+    skey->set_size(strlen(itemname) + 1);
+
+    return (0);
+}; 
+

+ Having written our key extractor callback, we now need to make + a trivial update to our MyDb implementation. + Because an item name is used by multiple inventory records, we need our + secondary database to support sorted duplicates. We therefore must + update MyDb to handle this detail. +

+

+ The MyDb class definition changes to add a + boolean to the constructor (remember that new code is in + bold): +

+ +
// File: MyDb.hpp
+#include <db_cxx.h>
+
+class MyDb
+{
+public:
+    // Constructor requires a path to the database,
+    // and a database name.
+    MyDb(std::string &path, std::string &dbName,
+         bool isSecondary = false);
+                                                                                                                                    
+    // Our destructor just calls our private close method.
+    ~MyDb() { close(); }
+                                                                                                                                    
+    inline Db &getDb() {return db_;}
+                                                                                                                                    
+private:
+    Db db_;
+    std::string dbFileName_;
+    u_int32_t cFlags_;
+                                                                                                                                    
+    // Make sure the default constructor is private
+    // We don't want it used.
+    MyDb() : db_(0, 0) {}
+                                                                                                                                    
+    // We put our database close activity here.
+    // This is called from our destructor. In
+    // a more complicated example, we might want
+    // to make this method public, but a private
+    // method is more appropriate for this example.
+    void close();
+}; 
+

+ And the implementation changes slightly to take advantage of the new + boolean. Note that to save space, we just show the constructor where the + code actually changes: +

+ +
// File: MyDb.cpp
+#include "MyDb.hpp"
+
+// Class constructor. Requires a path to the location
+// where the database is located, and a database name
+MyDb::MyDb(std::string &path, std::string &dbName,
+           bool isSecondary)
+    : db_(NULL, 0),               // Instantiate Db object
+      dbFileName_(path + dbName), // Database file name
+      cFlags_(DB_CREATE)          // If the database doesn't yet exist,
+                                  // allow it to be created.
+{
+    try
+    {
+        // Redirect debugging information to std::cerr
+        db_.set_error_stream(&std::cerr);
+                                                                                                                                    
+        // If this is a secondary database, support
+        // sorted duplicates
+        if (isSecondary)
+            db_.set_flags(DB_DUPSORT);
+                                                                                                                                    
+        // Open the database
+        db_.open(NULL, dbFileName_.c_str(), NULL, DB_BTREE, cFlags_, 0);
+    }
+    // DbException is not a subclass of std::exception, so we
+    // need to catch them both.
+    catch(DbException &e)
+    {
+        std::cerr << "Error opening database: " << dbFileName_ << "\n";
+        std::cerr << e.what() << std::endl;
+    }
+    catch(std::exception &e)
+    {
+        std::cerr << "Error opening database: " << dbFileName_ << "\n";
+        std::cerr << e.what() << std::endl;
+    }
+} 
+

+ That done, we can now update + excxx_example_database_load to open our new secondary + database and associate it to the inventory database. +

+

+ To save space, we do not show the entire implementation for this program + here. Instead, we show just the main() function, + which is where all our modifications occur. To + see the rest of the implementation for this command, see + excxx_example_database_load. +

+ +
// Loads the contents of vendors.txt and inventory.txt into
+// Berkeley DB databases.
+int
+main(int argc, char *argv[])
+{
+    // Initialize the path to the database files
+    std::string basename("./");
+    std::string databaseHome("./");
+                                                                                                                                         
+    // Database names
+    std::string vDbName("vendordb.db");
+    std::string iDbName("inventorydb.db");
+    std::string itemSDbName("itemname.sdb");
+                                                                                                                                         
+    // Parse the command line arguments here and determine
+    // the location of the flat text files containing the
+    // inventory data here. This step is omitted for clarity.
+                                                                                                                                         
+    //  Identify the full name for our input files, which should
+    //  also include some path information.
+    std::string inventoryFile = basename + "inventory.txt";
+    std::string vendorFile = basename + "vendors.txt";
+                                                                                                                                         
+    try
+    {
+        // Open all databases.
+        MyDb inventoryDB(databaseHome, iDbName);
+        MyDb vendorDB(databaseHome, vDbName);
+        MyDb itemnameSDB(databaseHome, itemSDbName, true);
+
+        // Associate the primary and the secondary
+        inventoryDB.getDb().associate(NULL,
+                                      &(itemnameSDB.getDb()),
+                                      get_item_name,
+                                      0);
+
+        // Load the vendor database
+        loadVendorDB(vendorDB, vendorFile);
+                                                                                                                                         
+        // Load the inventory database
+        loadInventoryDB(inventoryDB, inventoryFile);
+    } catch(DbException &e) {
+        std::cerr << "Error loading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(e.get_errno());
+    } catch(std::exception &e) {
+        std::cerr << "Error loading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(-1);
+    }
+                                                                                                                                         
+    return(0);
+} // End main 
+

+ Note that the order in which we instantiate our + MyDb class instances is important. In general you + want to close a secondary database before closing the primary with which + it is associated. This is particularly true for multi-threaded or + multi-processed applications where the database closes are not single + threaded. Even so, it is a good habit to adopt, even for simple + applications such as this one. Here, we ensure that the databases are + closed in the desired order by opening the secondary database last. + This works because our MyDb objects are on + the stack, and therefore the last one opened is the first one closed. +

+

+ That completes our update to + + excxx_example_database_load. + Now when this program is called, it will automatically index inventory + items based on their names. We can then query for those items using the + new index. We show how to do that in the next section. +

+
+
+
+
+
+

Secondary Databases with + + excxx_example_database_read +

+
+
+
+
+

+ In Cursor Example we + wrote an application that displays every inventory item in the + Inventory database. In this section, we will update that example to + allow us to search for and display an inventory item given a + specific name. To do this, we will make use of the secondary + database that + + excxx_example_database_load + now creates. +

+

+ The update to + excxx_example_database_read + is relatively modest. We need to open the new secondary database + in exactly the same way was we do for + + excxx_example_database_load. + We also need to add a command line parameter on + which we can specify the item name, and we will need a new function + in which we will perform the query and display the results. +

+

+ To begin, we add a single forward declaration to the application, + and update our usage function slightly: +

+ +
// File: excxx_example_database_read.cpp
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+                                                                                                                                         
+#include "MyDb.hpp"
+#include "gettingStartedCommon.hpp"
+                                                                                                                                         
+// Forward declarations
+int show_all_records(MyDb &inventoryDB, MyDb &vendorDB);
+int show_item(MyDb &itemnameSDB, MyDb &vendorDB, std::string &itemName);
+int show_vendor(MyDb &vendorDB, const char *vendor); 
+

+ Next, we update main() to + open the new secondary database and + accept the new command line switch. + We also need a new variable to contain the item's name. +

+

+ The final update to the main() entails a little bit + of logic to determine whether we want to display all available inventory + items, or just the ones that match a name provided on the + -i command line parameter. +

+ +
// Displays all inventory items and the associated vendor record.
+int
+main (int argc, char *argv[])
+{
+    // Initialize the path to the database files
+    std::string databaseHome("./");
+    std::string itemName;
+                                                                                                                                         
+    // Database names
+    std::string vDbName("vendordb.db");
+    std::string iDbName("inventorydb.db");
+    std::string itemSDbName("itemname.sdb");
+                                                                                                                                         
+    // Parse the command line arguments
+    // Omitted for brevity
+                                                                                                                                         
+    try
+    {
+        // Open all databases.
+        MyDb inventoryDB(databaseHome, iDbName);
+        MyDb vendorDB(databaseHome, vDbName);
+        MyDb itemnameSDB(databaseHome, itemSDbName, true);
+                                                                                                                                    
+        // Associate the secondary to the primary
+        inventoryDB.getDb().associate(NULL,
+                                      &(itemnameSDB.getDb()),
+                                      get_item_name,
+                                      0);
+
+        if (itemName.empty())
+        {
+            show_all_records(inventoryDB, vendorDB);
+        } else {
+            show_item(itemnameSDB, vendorDB, itemName);
+        }
+    } catch(DbException &e) {
+        std::cerr << "Error reading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(e.get_errno());
+    } catch(std::exception &e) {
+        std::cerr << "Error reading databases. " << std::endl;
+        std::cerr << e.what() << std::endl;
+        return(-1);
+    }
+                                                                                                                                         
+    return(0);
+} // End main 
+

+ The only other thing that we need to add to the application is the + implementation of the + + show_item() + function. +

+
+

Note

+

+ In the interest of space, we refrain from showing the other + functions used by this application. For their implementation, please + see Cursor Example. + Alternatively, you can see the entire implementation of this + application + in: +

+
DB_INSTALL/examples_cxx/getting_started
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +
// Shows the records in the inventory database that
+// have a specific item name. For each inventory record
+// shown, the appropriate vendor record is also displayed.
+int
+show_item(MyDb &itemnameSDB, MyDb &vendorDB, std::string &itemName)
+{
+    // Get a cursor to the itemname secondary db
+    Dbc *cursorp;
+                                                                                                                                    
+    try {
+        itemnameSDB.getDb().cursor(NULL, &cursorp, 0);
+                                                                                                                                    
+        // Get the search key. This is the name on the inventory
+        // record that we want to examine.
+        std::cout << "Looking for " << itemName << std::endl;
+        Dbt key((void *)itemName.c_str(), itemName.length() + 1);
+        Dbt data;
+                                                                                                                                    
+        // Position the cursor to the first record in the secondary
+        // database that has the appropriate key.
+        int ret = cursorp->get(&key, &data, DB_SET);
+        if (!ret) {
+            do {
+                InventoryData inventoryItem(data.get_data());
+                inventoryItem.show();
+                                                                                                                                    
+                show_vendor(vendorDB, inventoryItem.getVendor().c_str());
+                                                                                                                                    
+            } while(cursorp->get(&key, &data, DB_NEXT_DUP) == 0);
+        } else {
+            std::cerr << "No records found for '" << itemName
+                      << "'" << std::endl;
+        }
+    } catch(DbException &e) {
+        itemnameSDB.getDb().err(e.get_errno(), "Error in show_item");
+        cursorp->close();
+        throw e;
+    } catch(std::exception &e) {
+        itemnameSDB.getDb().errx("Error in show_item: %s", e.what());
+        cursorp->close();
+        throw e;
+    }
+                                                                                                                                    
+    cursorp->close();
+    return (0);
+}
+
+
+

+ This completes our update to + + excxx_example_database_read. + + Using this update, you + can now search for and show all inventory items that match a particular + name. For example: +

+
    example_database_read -i "Zulu Nut"
+
+
+ + + diff --git a/db/docs/gsg/CXX/databaseLimits.html b/db/docs/gsg/CXX/databaseLimits.html new file mode 100644 index 000000000..15c23a81c --- /dev/null +++ b/db/docs/gsg/CXX/databaseLimits.html @@ -0,0 +1,81 @@ + + + + + + Database Limits and Portability + + + + + + + + + +
+
+
+
+

Database Limits and Portability

+
+
+
+
+

+ Berkeley DB provides support for managing everything from very small + databases that fit entirely in memory, to extremely large databases + holding millions of records and terabytes of data. DB databases can + store up to 256 terabytes of data. Individual record keys or record + data can store up to 4 gigabytes of data. +

+

+ DB's databases store data in a binary format that is portable across + platforms, even of differing endian-ness. Be aware, however, that + portability aside, some performance issues can crop up in the event that + you are using little endian architecture. See Setting Comparison Functions + for more information. +

+

+ Also, DB's databases and data structures are designed for concurrent + access — they are thread-safe, and they share well across multiple + processes. That said, in order to allow multiple processes to share + databases and the cache, DB makes use of mechanisms that do not work + well on network-shared drives (NFS or Windows networks shares, for + example). For this reason, you cannot place your DB databases and + environments on network-mounted drives. +

+
+ + + diff --git a/db/docs/gsg/CXX/dbErrorReporting.html b/db/docs/gsg/CXX/dbErrorReporting.html new file mode 100644 index 000000000..f90fe6bf8 --- /dev/null +++ b/db/docs/gsg/CXX/dbErrorReporting.html @@ -0,0 +1,211 @@ + + + + + + Error Reporting Functions + + + + + + + + + +
+
+
+
+

Error Reporting Functions

+
+
+
+
+

+ To simplify error reporting and handling, the + + Db class + + offers several useful methods. + + + + +

+
+
    +
  • +

    + set_error_stream() + +

    +

    + Sets the + C++ ostream + + to be used for displaying error messages issued by the DB library. +

    +
  • +
  • +

    + set_errcall() + +

    +

    + Defines the function that is called when an error message is + issued by DB. The error prefix and message are passed to + this callback. It is up to the application to display this + information correctly. +

    +
  • +
  • +

    + set_errfile() +

    +

    + Sets the C library FILE * to be used for + displaying error messages issued by the DB library. +

    +
  • +
  • +

    + set_errpfx() + +

    +

    + Sets the prefix used to for any error messages issued by the + DB library. +

    +
  • +
  • +

    + err() +

    +

    + Issues an error message. The error message is sent to the + callback function as defined by set_errcall. + If that method has not been used, then the error message is sent to the + file defined by + + + set_errfile() or set_error_stream(). + + If none of these methods have been used, then the error message is sent to + standard error. +

    +

    + The error message consists of the prefix string + (as defined by set_errprefix()), + an optional printf-style formatted message, + the error message, and a trailing newline. +

    +
  • +
  • +

    + errx() +

    +

    + Behaves identically to err() except + that the DB message text associated with the supplied error + value is not appended to the error string. +

    +
  • +
+
+

+ In addition, you can use the db_strerror() + function to directly return the error string that corresponds to a + particular error number. +

+

+ For example, to send all error messages for a given database handle + to a callback for handling, first create your callback. Do something like this: +

+ +
/* 
+ * Function called to handle any database error messages
+ * issued by DB. 
+ */
+void
+my_error_handler(const char *error_prefix, char *msg)
+{
+  /* 
+   * Put your code to handle the error prefix and error
+   * message here. Note that one or both of these parameters
+   * may be NULL depending on how the error message is issued
+   * and how the DB handle is configured.
+   */
+} 
+

+ And then register the callback as follows: +

+ +
#include <db_cxx.h>
+...
+
+Db db(NULL, 0);
+std::string dbFileName("my_db.db");
+
+try
+{
+    // Set up error handling for this database
+    db.set_errcall(my_error_handler);
+    db.set_errpfx("my_example_program"); 
+

+ And to issue an error message: +

+ +
    // Open the database
+    db.open(NULL, dbFileName.c_str(), NULL, DB_BTREE, DB_CREATE, 0);
+}
+    // Must catch both DbException and std::exception
+    catch(DbException &e)
+    {
+        db.err(e.get_errno(), "Database open failed %s", 
+            dbFileName.c_str());
+        throw e;
+    }
+    catch(std::exception &e)
+    {
+        // No DB error number available, so use errx
+        db.errx("Error opening database: %s", e.what());
+        throw e;
+    } 
+ + + +
+ + + diff --git a/db/docs/gsg/CXX/dbconfig.html b/db/docs/gsg/CXX/dbconfig.html new file mode 100644 index 000000000..3fb57e91f --- /dev/null +++ b/db/docs/gsg/CXX/dbconfig.html @@ -0,0 +1,401 @@ + + + + + + Chapter 6. Database Configuration + + + + + + + + + +
+
+
+
+

Chapter 6. Database Configuration

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Setting the Page Size + +
+
+
+
+ + Overflow Pages + +
+
+ + Locking + +
+
+ + IO Efficiency + +
+
+ + Page Sizing Advice + +
+
+
+
+ + Selecting the Cache Size + +
+
+ + BTree Configuration + +
+
+
+
+ + Allowing Duplicate Records + +
+
+ + Setting Comparison Functions + +
+
+
+
+
+

+ This chapter describes some of the database and cache configuration issues + that you need to consider when building your DB database. + In most cases, there is very little that you need to do in terms of + managing your databases. However, there are configuration issues that you + need to be concerned with, and these are largely dependent on the access + method that you are choosing for your database. +

+

+ The examples and descriptions throughout this document have mostly focused + on the BTree access method. This is because the majority of DB + applications use BTree. For this reason, where configuration issues are + dependent on the type of access method in use, this chapter will focus on + BTree only. For configuration descriptions surrounding the other access + methods, see the Berkeley DB Programmer's Tutorial and Reference + Guide. +

+
+
+
+
+

Setting the Page Size

+
+
+
+
+

+ Internally, DB stores database entries on pages. Page sizes are + important because they can affect your application's performance. +

+

+ DB pages can be between 512 bytes and 64K bytes in size. The size + that you select must be a power of 2. You set your database's + page size using + + Db::set_pagesize(). + +

+

+ Note that a database's page size can only be selected at database + creation time. +

+

+ When selecting a page size, you should consider the following issues: +

+
+
    +
  • +

    + Overflow pages. +

    +
  • +
  • +

    + Locking +

    +
  • +
  • +

    + Disk I/O. +

    +
  • +
+
+

+ These topics are discussed next. +

+
+
+
+
+

Overflow Pages

+
+
+
+
+

+ Overflow pages are used to hold a key or data item + that cannot fit on a single page. You do not have to do anything to + cause overflow pages to be created, other than to store data that is + too large for your database's page size. Also, the only way you can + prevent overflow pages from being created is to be sure to select a + page size that is large enough to hold your database entries. +

+

+ Because overflow pages exist outside of the normal database + structure, their use is expensive from a performance + perspective. If you select too small of a page size, then your + database will be forced to use an excessive number of overflow + pages. This will significantly harm your application's performance. +

+

+ For this reason, you want to select a page size that is at + least large enough to hold multiple entries given the expected + average size of your database entries. In BTree's case, for best + results select a page size that can hold at least 4 such entries. +

+

+ You can see how many overflow pages your database is using by + + using the + + Db::stat() + method, + + + + or by examining your database using the + db_stat command line utility. +

+
+
+
+
+
+

Locking

+
+
+
+
+

+ Locking and multi-threaded access to DB databases is built into + the product. However, in order to enable the locking subsystem and + in order to provide efficient sharing of the cache between + databases, you must use an environment. + Environments and multi-threaded access are not fully described + in this manual (see the Berkeley DB Programmer's Reference Manual for + information), however, we provide some information on sizing your + pages in a multi-threaded/multi-process environment in the interest + of providing a complete discussion on the topic. +

+

+ If your application is multi-threaded, or if your databases are + accessed by more than one process at a time, then page size can + influence your application's performance. The reason why is that + for most access methods (Queue is the exception), DB implements + page-level locking. This means that the finest locking granularity + is at the page, not at the record. +

+

+ In most cases, database pages contain multiple database + records. Further, in order to provide safe access to multiple + threads or processes, DB performs locking on pages as entries on + those pages are read or written. +

+

+ As the size of your page increases relative to the size of your + database entries, the number of entries that are held on any given + page also increase. The result is that the chances of two or more + readers and/or writers wanting to access entries on any given page + also increases. +

+

+ When two or more threads and/or processes want to manage data on a + page, lock contention occurs. Lock contention is resolved by one + thread (or process) waiting for another thread to give up its lock. + It is this waiting activity that is harmful to your application's + performance. +

+

+ It is possible to select a page size that is so large that your + application will spend excessive, and noticeable, amounts of time + resolving lock contention. Note that this scenario is particularly + likely to occur as the amount of concurrency built into your + application increases. +

+

+ Oh the other hand, if you select too small of a page size, then that + that will only make your tree deeper, which can also cause + performance penalties. The trick, therefore, is to select a + reasonable page size (one that will hold a sizeable number of + records) and then reduce the page size if you notice lock + contention. +

+

+ You can examine the number of lock conflicts and deadlocks occurring + in your application by examining your database environment lock + statistics. Either use the + + DbEnv::lock_stat() + Environment.getLockStats() + method, or use the db_stat command line utility. + The number of locks that could not be obtained due to conflicts is + held in the lock statistic's st_nconflicts field. + +

+
+
+
+
+
+

IO Efficiency

+
+
+
+
+

+ Page size can affect how efficient DB is at moving data to and + from disk. For some applications, especially those for which the + in-memory cache can not be large enough to hold the entire working + dataset, IO efficiency can significantly impact application performance. +

+

+ Most operating systems use an internal block size to determine how much + data to move to and from disk for a single I/O operation. This block + size is usually equal to the filesystem's block size. For optimal + disk I/O efficiency, you should select a database page size that is + equal to the operating system's I/O block size. +

+

+ Essentially, DB performs data transfers based on the database + page size. That is, it moves data to and from disk a page at a time. + For this reason, if the page size does not match the I/O block size, + then the operating system can introduce inefficiencies in how it + responds to DB's I/O requests. +

+

+ For example, suppose your page size is smaller than your operating + system block size. In this case, when DB writes a page to disk + it is writing just a portion of a logical filesystem page. Any time + any application writes just a portion of a logical filesystem page, the + operating system brings in the real filesystem page, over writes + the portion of the page not written by the application, then writes + the filesystem page back to disk. The net result is significantly + more disk I/O than if the application had simply selected a page + size that was equal to the underlying filesystem block size. +

+

+ Alternatively, if you select a page size that is larger than the + underlying filesystem block size, then the operating system may have + to read more data than is necessary to fulfill a read request. + Further, on some operating systems, requesting a single database + page may result in the operating system reading enough filesystem + blocks to satisfy the operating system's criteria for read-ahead. In + this case, the operating system will be reading significantly more + data from disk than is actually required to fulfill DB's read + request. +

+
+

Note

+

+ While transactions are not discussed in this manual, a page size + other than your filesystem's block size can affect transactional + guarantees. The reason why is that page sizes larger than the + filesystem's block size causes DB to write pages in block + size increments. As a result, it is possible for a partial page + to be written as the result of a transactional commit. For more + information, see http://www.sleepycat.com/docs/ref/transapp/reclimit.html. +

+
+
+
+
+
+
+

Page Sizing Advice

+
+
+
+
+

+ Page sizing can be confusing at first, so here are some general + guidelines that you can use to select your page size. +

+

+ In general, and given no other considerations, a page size that is equal + to your filesystem block size is the ideal situation. +

+

+ If your data is designed such that 4 database entries cannot fit on a + single page (assuming BTree), then grow your page size to accommodate + your data. Once you've abandoned matching your filesystem's block + size, the general rule is that larger page sizes are better. +

+

+ The exception to this rule is if you have a great deal of + concurrency occurring in your application. In this case, the closer + you can match your page size to the ideal size needed for your + application's data, the better. Doing so will allow you to avoid + unnecessary contention for page locks. +

+
+
+
+ + + diff --git a/db/docs/gsg/CXX/environments.html b/db/docs/gsg/CXX/environments.html new file mode 100644 index 000000000..fa1493eb4 --- /dev/null +++ b/db/docs/gsg/CXX/environments.html @@ -0,0 +1,150 @@ + + + + + + Environments + + + + + + + + + +
+
+
+
+

Environments

+
+
+
+
+

+ This manual is meant as an introduction to the Berkeley DB library. + Consequently, it describes how to build a very simple, single-threaded + application. Consequently, this manual omits a great many powerful + aspects of the DB database engine that are not required by simple + applications. One of these is important enough that it warrants a brief + overview here: environments. +

+

+ While environments are frequently not used by applications running in + embedded environments where every byte counts, they will be used by + virutally any other DB application requiring anything other than + the bare minimum functionality. An environment is + essentially an encapsulation of one or more databases. Essentially, you + open an environment and then you open databases in that environment. + When you do so, the databases are created/located in a location relative + to the environment's home directory. +

+

+ Environments offer a great many features that a stand-alone DB + database cannot offer: +

+
+
    +
  • +

    + Multi-database files. +

    +

    + It is possible in DB to contain multiple databases in a + single physical file on disk. This is desireable for those + application that open more than a few handful of databases. + However, in order to have more than one database contained in + a single physical file, your application + must use an environment. +

    +
  • +
  • +

    + Multi-thread and multi-process support +

    +

    + When you use an environment, resources such as the in-memory + cache and locks can be shared by all of the databases opened in the + environment. The environment allows you to enable + subsystems that are designed to allow multiple threads and/or + processes to access DB databases. For example, you use an + environment to enable the concurrent data store (CDS), the + locking subsystem, and/or the shared memory buffer pool. +

    +
  • +
  • +

    + Transactional processing +

    +

    + DB offers a transactional subsystem that allows for full + ACID-protection of your database writes. You use environments to + enable the transactional subsystem, and then subsequently to obtain + transaction IDs. +

    +
  • +
  • +

    + High availability (replication) support +

    +

    + DB offers a replication subsystem that enables + single-master database replication with multiple read-only + copies of the replicated data. You use environments to enable + and then manage this subsystem. +

    +
  • +
  • +

    + Logging subsystem +

    +

    + DB offers write-ahead logging for applications that want to + obtain a high-degree of recoverability in the face of an + application or system crash. Once enabled, the logging subsystem + allows the application to perform two kinds of recovery + ("normal" and "catastrophic") through the use of the information + contained in the log files. +

    +
  • +
+
+

+ All of these topics are described in the Berkeley DB + Programmer's Reference Guide. +

+
+ + + diff --git a/db/docs/gsg/CXX/gettingStarted.css b/db/docs/gsg/CXX/gettingStarted.css new file mode 100644 index 000000000..c1b4c86b7 --- /dev/null +++ b/db/docs/gsg/CXX/gettingStarted.css @@ -0,0 +1,41 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 9pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 9pt; } + +div.navfooter { font-size: 9pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 9pt; } + +span.emphasis { font-style: italic; font-size: 9pt;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + + diff --git a/db/docs/gsg/CXX/gettingit.html b/db/docs/gsg/CXX/gettingit.html new file mode 100644 index 000000000..c722aefce --- /dev/null +++ b/db/docs/gsg/CXX/gettingit.html @@ -0,0 +1,77 @@ + + + + + + Getting and Using DB + + + + + + + + + +
+
+
+
+

Getting and Using DB

+
+
+
+
+

+ You can obtain DB by visiting the Sleepycat download page: + http://www.sleepycat.com/download/index.shtml. +

+

+ To install DB, untar or unzip the distribution to the directory of + your choice. You will then need to build the product binaries. + For information on building DB, see + DB_INSTALL/docs/index.html, + where DB_INSTALL is the directory where you unpacked + DB. On that page, you will find links to platform-specific build + instructions. +

+

+ That page also contains links to more documentation for DB. In + particular, you will find links for the Berkeley DB + Programmer's Tutorial and Reference Guide as + well as the API reference documentation. +

+
+ + + diff --git a/db/docs/gsg/CXX/index.html b/db/docs/gsg/CXX/index.html new file mode 100644 index 000000000..e90f04122 --- /dev/null +++ b/db/docs/gsg/CXX/index.html @@ -0,0 +1,465 @@ + + + + + + Getting Started with Berkeley DB + + + + + + + +
+
+
+
+

Getting Started with Berkeley DB

+
+
+
+

+ Legal Notice +

+

+ This documentation is distributed under the terms of the Sleepycat + public license. You may review the terms of this license at: + http://www.sleepycat.com/download/oslicense.html +

+

+ Sleepycat Software, Berkeley DB, Berkeley DB XML and the Sleepycat logo + are trademarks or service marks of Sleepycat Software, Inc. All rights to + these marks are reserved. No third-party use is permitted without the + express prior written consent of Sleepycat Software, Inc. +

+

+ To obtain a copy of this document's original source code, please write + to . +

+
+
+
+

9/22/2004

+
+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+
+
+ + 1. Introduction to Berkeley DB + +
+
+
+
+ + About This Manual + +
+
+ + Berkeley DB Concepts + +
+
+ + Access Methods + +
+
+
+
+ + Selecting Access Methods + +
+
+ + Choosing between BTree and Hash + +
+
+ + Choosing between Queue and Recno + +
+
+
+
+ + Database Limits and Portability + +
+
+ + Environments + +
+
+ + Exception Handling + +
+
+ + Error Returns + +
+
+ + Getting and Using DB + +
+
+
+
+ + 2. Databases + +
+
+
+
+ + Opening Databases + +
+
+ + Closing Databases + +
+
+ + Database Open Flags + +
+
+ + Administrative Methods + +
+
+ + Error Reporting Functions + +
+
+ + Managing Databases in Environments + +
+
+ + Database Example + +
+
+
+
+ + 3. Database Records + +
+
+
+
+ + Using Database Records + +
+
+ + Reading and Writing Database Records + +
+
+
+
+ + Writing Records to the Database + +
+
+ + Getting Records from the Database + +
+
+ + Deleting Records + +
+
+ + Data Persistence + +
+
+
+
+ + Database Usage Example + +
+
+
+
+ + 4. Using Cursors + +
+
+
+
+ + Opening and Closing Cursors + +
+
+ + Getting Records Using the Cursor + +
+
+
+
+ + Searching for Records + +
+
+ + Working with Duplicate Records + +
+
+
+
+ + Putting Records Using Cursors + +
+
+ + Deleting Records Using Cursors + +
+
+ + Replacing Records Using Cursors + +
+
+ + Cursor Example + +
+
+
+
+ + 5. Secondary Databases + +
+
+
+
+ + Opening and Closing Secondary Databases + +
+
+ + Implementing Key + + Extractors + + +
+
+ + Reading Secondary Databases + +
+
+ + Deleting Secondary Database Records + +
+
+ + + + Using Cursors with Secondary Databases + + +
+
+ + Database Joins + +
+
+
+
+ + Using Join Cursors + +
+
+
+
+ + Secondary Database Example + +
+
+
+
+ + Secondary Databases with + + excxx_example_database_load + + +
+
+ + Secondary Databases with + + excxx_example_database_read + + +
+
+
+
+
+
+ + 6. Database Configuration + +
+
+
+
+ + Setting the Page Size + +
+
+
+
+ + Overflow Pages + +
+
+ + Locking + +
+
+ + IO Efficiency + +
+
+ + Page Sizing Advice + +
+
+
+
+ + Selecting the Cache Size + +
+
+ + BTree Configuration + +
+
+
+
+ + Allowing Duplicate Records + +
+
+ + Setting Comparison Functions + +
+
+
+
+
+
+
+
+

+ List of Examples +

+
+
2.1. MyDb Class
+
3.1. VENDOR Structure
+
3.2. InventoryData Class
+
3.3. excxx_example_database_load
+
4.1. excxx_example_database_read
+
+
+
+ + + diff --git a/db/docs/gsg/CXX/indexes.html b/db/docs/gsg/CXX/indexes.html new file mode 100644 index 000000000..947ee63f3 --- /dev/null +++ b/db/docs/gsg/CXX/indexes.html @@ -0,0 +1,327 @@ + + + + + + Chapter 5. Secondary Databases + + + + + + + + + +
+
+
+
+

Chapter 5. Secondary Databases

+
+
+
+
+ +

+ Usually you find database records by means of the record's key. However, + the key that you use for your record will not always contain the + information required to provide you with rapid access to the data that you + want to retrieve. For example, suppose your + + database + contains records related to users. The key might be a string that is some + unique identifier for the person, such as a user ID. Each record's data, + however, would likely contain a complex object containing details about + people such as names, addresses, phone numbers, and so forth. + While your application may frequently want to query a person by user + ID (that is, by the information stored in the key), it may also on occasion + want to location people by, say, their name. +

+

+ Rather than iterate through all of the records in your database, examining + each in turn for a given person's name, you create indexes based on names + and then just search that index for the name that you want. You can do this + using secondary databases. In DB, the + + database + that contains your data is called a + primary database. A database that provides an + alternative set of keys to access that data is called a secondary + database. In a secondary database, the keys are your alternative + (or secondary) index, and the data corresponds to a primary record's key. +

+

+ You create a secondary database by creating the database, opening it, and + then associating the database with + the primary database (that is, the database for which + you are creating the index). As a part of associating + the secondary database to the primary, you must provide a callback that is + used to create the secondary database keys. Typically this callback creates + a key based on data found in the primary database record's key or data. +

+

+ Once opened, DB manages secondary databases for you. Adding or deleting + records in your primary database causes DB to update the secondary as + necessary. Further, changing a record's data in the primary database may cause + DB to modify a record in the secondary, depending on whether the change + forces a modification of a key in the secondary database. +

+

+ Note that you can not write directly to a secondary database. + + + + Any attempt to write to a secondary database + results in a non-zero status return. + + To change the data referenced by a + + secondary + record, modify the primary database instead. The exception to this rule is + that delete operations are allowed on the + + secondary database. + + See Deleting Secondary Database Records for more + information. +

+
+

Note

+

+ + Secondary database records are updated/created by DB + only if the + + key creator callback function + returns + + 0. + If + + a value other than 0 + is returned, then DB will not add the key to the secondary database, and + in the event of a record update it will remove any existing key. + + Note that the callback can use either + DB_DONOTINDEX or some error code outside of DB's + name space to indicate that the entry should not be indexed. + +

+

+ See Implementing Key + + Extractors + for more + + information. + +

+
+

+ When you read a record from a secondary database, DB automatically + returns + + the data and optionally the key + from the corresponding record in the primary database. + +

+
+
+
+
+

Opening and Closing Secondary Databases

+
+
+
+
+

+ You manage secondary database opens and closes in the same way as you + would any normal database. The only difference is that: +

+
+
    +
  • +

    + You must associate the secondary to a primary database using + + Db::associate(). +

    +
  • +
  • +

    + When closing your databases, it is a good idea to make sure you + close your secondaries before closing your primaries. This is + particularly true if your database closes are not single + threaded. +

    +
  • +
+
+

+ When you associate a secondary to a primary database, you must provide a + callback that is used to generate the secondary's keys. These + callbacks are described in the next section. +

+

+ For example, to open a secondary database and associate it to a primary + database: +

+ +
#include <db_cxx.h>
+
+...
+                                                                                                                                     
+Db my_database(NULL, 0); // Primary
+Db my_index(NULL, 0);    // Secondary
+
+// Open the primary
+my_database.open(NULL,       // Transaction pointer
+                 "my_db.db", // On-disk file that holds the database.
+                NULL,        // Optional logical database name
+                DB_BTREE,    // Database access method
+                DB_CREATE,   // Open flags
+                0);          // File mode (using defaults)
+
+// Setup the secondary to use sorted duplicates.
+// This is often desireable for secondary databases.
+my_index.set_flags(DB_DUPSORT);
+
+// Open the secondary
+my_index.open(NULL,              // Transaction pointer
+              "my_secondary.db", // On-disk file that holds the database.
+              NULL,              // Optional logical database name
+              DB_BTREE,          // Database access method
+              DB_CREATE,         // Open flags.
+              0);                // File mode (using defaults)
+
+
+// Now associate the primary and the secondary
+my_database.associate(NULL,          // Txn id
+                      &my_index,     // Associated secondary database
+                      get_sales_rep, // Callback used for key extraction.
+                                     // This is described in the next
+                                     // section.
+                      0);            // Flags 
+

+ Closing the primary and secondary databases is accomplished exactly as you + would for any database: +

+ +
// Close the secondary before the primary
+my_index.close(0);
+my_database.close(0);
+
+
+ + + diff --git a/db/docs/gsg/CXX/introduction.html b/db/docs/gsg/CXX/introduction.html new file mode 100644 index 000000000..1ea18433f --- /dev/null +++ b/db/docs/gsg/CXX/introduction.html @@ -0,0 +1,234 @@ + + + + + + Chapter 1. Introduction to Berkeley DB + + + + + + + + + +
+
+
+
+

Chapter 1. Introduction to Berkeley DB

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + About This Manual + +
+
+ + Berkeley DB Concepts + +
+
+ + Access Methods + +
+
+
+
+ + Selecting Access Methods + +
+
+ + Choosing between BTree and Hash + +
+
+ + Choosing between Queue and Recno + +
+
+
+
+ + Database Limits and Portability + +
+
+ + Environments + +
+
+ + Exception Handling + +
+
+ + Error Returns + +
+
+ + Getting and Using DB + +
+
+
+

+ Welcome to Sleepycat's Berkeley DB (DB). DB is a general-purpose embedded + database engine that is capable of providing a wealth of data management services. + It is designed from the ground up for high-throughput applications requiring + in-process, bullet-proof management of mission-critical data. DB can + gracefully scale from managing a few bytes to terabytes of data. For the most + part, DB is limited only by your system's available physical resources. +

+

+ Because DB is an embedded database engine, it is extremely fast. You compile + and link it into your application in the same way as you would any + third-party library. This means that DB runs in the same process space + as does your application, allowing you to avoid the high cost of + interprocess communications incurred by stand-alone database servers. +

+

+ To further improve performance, DB offers an in-memory cache designed to + provide rapid access to your most frequently used data. Once configured, + cache usage is transparent. It requires very little attention on the part + of the application developer. +

+

+ Beyond raw speed, DB is also extremely configurable. It provides several + different ways of organizing your data in its databases. Known as + access methods, each such data organization mechanism + provides different characteristics that are appropriate for different data + management profiles. (Note that this manual focuses almost entirely on the + BTree access method as this is the access method used by the vast majority + of DB applications). +

+

+ To further improve its configurability, DB offers many different + subsystems, each of which can be used to extend DB's capabilities. For + example, many applications require write-protection of their data so + as to ensure that data is never left in an inconsistent state for any + reason (such as software bugs or hardware failures). For those + applications, a transaction subsystem can be enabled and used to + transactionally protect database writes. +

+

+ The list of operating systems on which DB is available is too long to + detail here. Suffice to say that it is available on all major commercial + operating systems, as well as on many embedded platforms. +

+

+ Finally, DB is available in a wealth of programming languages. Sleepycat + officially supports DB in C, C++, and Java, but the library is also + available in many other languages, especially scripting languages such as + Perl and Python. +

+
+

Note

+

+ Before going any further, it is important to mention that DB is not + a relational database (although you could use it to build a relational + database). Out of the box, DB does not provide higher-level features + such as triggers, or a high-level query language such as SQL. + Instead, DB provides just those minimal + APIs required to store and retrieve your data as + efficiently as possible. +

+ + + +
+
+
+
+
+

About This Manual

+
+
+
+
+

+ This manual introduces DB. As such, this book does not examine + intermediate or advanced features such as threaded library usage or + transactional usage. Instead, this manual provides a step-by-step + introduction to DB's basic concepts and library usage. +

+

+ Specifically, this manual introduces DB environments, databases, + database records, and storage and retrieval of database records. This + book also introduces cursors and their usage, and it describes + secondary databases. +

+

+ For the most part, this manual focuses on the BTree access method. A + chapter is given at the end of this manual that describes some of the + concepts involving BTree usage, such as duplicate record management and comparison + routines. +

+

+ Examples are given throughout this book that are designed to illustrate + API usage. At the end of each chapter, a complete example is given that + is designed to reinforce the concepts covered in that chapter. In + addition to being presented in this book, these final programs are also + available in the DB software distribution. You can find them in +

+
DB_INSTALL/examples_cxx/getting_started
+

+ where DB_INSTALL is the + location where you placed your DB distribution. +

+

+ This book uses the C++ programming languages for its examples. + Note that versions of this book exist for the C and Java languages as + well. +

+
+
+ + + diff --git a/db/docs/gsg/CXX/joins.html b/db/docs/gsg/CXX/joins.html new file mode 100644 index 000000000..a8cba860c --- /dev/null +++ b/db/docs/gsg/CXX/joins.html @@ -0,0 +1,282 @@ + + + + + + Database Joins + + + + + + + + + +
+
+
+
+

Database Joins

+
+
+
+
+

+ If you have two or more secondary databases associated with a primary + database, then you can retrieve primary records based on the union of + multiple secondary entries. You do this using a + + join cursor. +

+

+ Throughout this document we have presented a + + structure + that stores + + information on grocery + + vendors. + That + + structure + is fairly simple with a limited + number of data members, few of which would be interesting from a query + perspective. But suppose, instead, that we were storing + information on something with many more queryable characteristics, such + as an automobile. In that case, you may be storing information such as + color, number of doors, fuel mileage, automobile type, number of + passengers, make, model, and year, to name just a few. +

+

+ In this case, you would still likely be using some unique value to key your + primary entries (in the United States, the automobile's VIN would be + ideal for this purpose). You would then create a + + structure + that identifies + all the characteristics of the automobiles in your inventory. + + +

+

+ To query this data, you might then create multiple secondary databases, + one for each of the characteristics that you want to query. For + example, you might create a secondary for color, another for number of + doors, another for number of passengers, and so forth. Of course, you + will need a unique + + key extractor function + for each such secondary database. You do + all of this using the concepts and techniques described throughout this + chapter. +

+

+ Once you have created this primary database and all interesting + secondaries, what you have is the ability to retrieve automobile records + based on a single characteristic. You can, for example, find all the + automobiles that are red. Or you can find all the automobiles that have + four doors. Or all the automobiles that are minivans. +

+

+ The next most natural step, then, is to form compound queries, or joins. + For example, you might want to find all the automobiles that are red, + and that were built by Toyota, and that are minivans. You can do this + using a + + join cursor. + +

+
+
+
+
+

Using Join Cursors

+
+
+
+
+

+ To use a join cursor: +

+
+
    +
  • +

    + Open two or more + + cursors + + for + secondary databases that are associated with + the same primary database. +

    +
  • +
  • +

    + Position each such cursor to the secondary key + value in which you are interested. For example, to build on + the previous description, the cursor for the color + database is positioned to the red records + while the cursor for the model database is positioned to the + minivan records, and the cursor for the + make database is positioned to Toyota. +

    +
  • +
  • +

    + Create an array of cursors, and place in it each + of the cursors that are participating in your join query. +

    +
  • +
  • +

    + + Obtain a join cursor. You do this using the + + + Db::join() + method. You must pass this method the array of secondary cursors that you + opened and positioned in the previous steps. +

    +
  • +
  • +

    + Iterate over the set of matching records + + until + + the return code is not 0. +

    +
  • +
  • +

    + Close your cursor. +

    +
  • +
  • +

    + If you are done with them, close all your cursors. +

    +
  • +
+
+

+ For example: + + + +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+// Exception handling omitted
+
+int ret;
+
+Db automotiveDB(NULL, 0);
+Db automotiveColorDB(NULL, 0);
+Db automotiveMakeDB(NULL, 0);
+Db automotiveTypeDB(NULL, 0);
+
+// Database and secondary database opens omitted for brevity.
+// Assume a primary database:
+//   automotiveDB
+// Assume 3 secondary databases:
+//   automotiveColorDB  -- secondary database based on automobile color
+//   automotiveMakeDB  -- secondary database based on the manufacturer
+//   automotiveTypeDB  -- secondary database based on automobile type
+
+// Position the cursors
+Dbc *color_curs;
+automotiveColorDB.cursor(NULL, &color_curs, 0);
+char *the_color = "red";
+Dbt key(the_color, strlen(the_color) + 1);
+Dbt data;
+if ((ret = color_curs->get(&key, &data, DB_SET)) != 0) {
+    // Error handling goes here
+}
+
+Dbc *make_curs;
+automotiveMakeDB.cursor(NULL, &make_curs, 0);
+char *the_make = "Toyota";
+key.set_data(the_make);
+key.set_size(strlen(the_make) + 1);
+if ((ret = make_curs->get(&key, &data, DB_SET)) != 0) {
+    // Error handling goes here
+}
+
+Dbc *type_curs; 
+automotiveTypeDB.cursor(NULL, &type_curs, 0);
+char *the_type = "minivan";
+key.set_data(the_type);
+key.set_size(strlen(the_type) + 1);
+if ((ret = type_curs->get(&key, &data, DB_SET)) != 0) {
+    // Error handling goes here
+}
+
+// Set up the cursor array
+Dbc *carray[3];
+carray[0] = color_curs;
+carray[1] = make_curs;
+carray[2] = type_curs;
+
+// Create the join
+Dbc *join_curs;
+if ((ret = automotiveDB.join(carray, &join_curs, 0)) != 0) {
+    // Error handling goes here
+}
+
+// Iterate using the join cursor
+while ((ret = join_curs->get(&key, &data, 0)) == 0) {
+    // Do interesting things with the key and data
+}
+
+// If we exited the loop because we ran out of records,
+// then it has completed successfully.
+if (ret == DB_NOTFOUND) {
+     // Close all our cursors and databases as is appropriate,  and 
+     // then exit with a normal exit status (0). 
+} 
+
+
+ + + diff --git a/db/docs/gsg/CXX/keyCreator.html b/db/docs/gsg/CXX/keyCreator.html new file mode 100644 index 000000000..604a7befc --- /dev/null +++ b/db/docs/gsg/CXX/keyCreator.html @@ -0,0 +1,147 @@ + + + + + + Implementing Key + + Extractors + + + + + + + + + + +
+
+
+
+

Implementing Key + + Extractors +

+
+
+
+
+

+ You must provide every secondary database with a + class + + that creates keys from primary records. You identify this + class + + + + + when you associate your secondary database to your primary. + +

+

+ You can create keys using whatever data you want. Typically you will + base your key on some information found in a record's data, but you + can also use information found in the primary record's key. How you build + your keys is entirely dependent upon the nature of the index that you + want to maintain. +

+

+ You implement a key extractor by writing a function that extracts + the necessary information from a primary record's key or data. + This function must conform to a specific prototype, and it must be + provided as a callback to the associate() + method. +

+

+ For example, suppose your primary database records contain data that + uses the following structure: +

+ +
typedef struct vendor {
+    char name[MAXFIELD];             /* Vendor name */
+    char street[MAXFIELD];           /* Street name and number */
+    char city[MAXFIELD];             /* City */
+    char state[3];                   /* Two-digit US state code */
+    char zipcode[6];                 /* US zipcode */
+    char phone_number[13];           /* Vendor phone number */
+    char sales_rep[MAXFIELD];        /* Name of sales representative */
+    char sales_rep_phone[MAXFIELD];  /* Sales rep's phone number */
+} VENDOR; 
+

+ Further suppose that you want to be able to query your primary database + based on the name of a sales representative. Then you would write a + function that looks like this: +

+ +
#include <db_cxx.h>
+
+...
+
+int
+get_sales_rep(Db *sdbp,          // secondary db handle
+              const Dbt *pkey,   // primary db record's key
+              const Dbt *pdata,  // primary db record's data
+              Dbt *skey)         // secondary db record's key
+{
+    VENDOR *vendor;
+
+    // First, extract the structure contained in the primary's data
+    vendor = (VENDOR *)pdata->get_data();
+
+    // Now set the secondary key's data to be the representative's name
+    skey->set_data(vendor->sales_rep);
+    skey->set_size(strlen(vendor->sales_rep) + 1);
+
+    // Return 0 to indicate that the record can be created/updated.
+    return (0);
+} 
+

+ In order to use this function, you provide it on the + associate() method after the primary and + secondary databases have been created and opened: +

+ +
db.associate(NULL,           // TXN id
+             &sdb,           // Secondary database
+             get_sales_rep,      // Callback used for key creation.
+             0);                 // Flags
+
+ + + diff --git a/db/docs/gsg/CXX/preface.html b/db/docs/gsg/CXX/preface.html new file mode 100644 index 000000000..6f88298ab --- /dev/null +++ b/db/docs/gsg/CXX/preface.html @@ -0,0 +1,144 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+
+

+ Welcome to Berkeley DB (DB). This document introduces + + DB, version 4.3. + It is intended + to provide a rapid introduction to the DB API set and related concepts. The goal of this document is + to provide you with an efficient mechanism + with which you can evaluate DB against your project's technical requirements. As such, this document is + intended for C++ + developers and senior software architects who are + looking for an in-process data management solution. No prior experience with Sleepycat technologies is + expected or required. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Class names are represented in monospaced font, as are method + names. For example: + + + + + + "Db::open() is a + Db class method." +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + DB_INSTALL directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. + For example: +

+
typedef struct vendor {
+    char name[MAXFIELD];             // Vendor name
+    char street[MAXFIELD];           // Street name and number
+    char city[MAXFIELD];             // City
+    char state[3];                   // Two-digit US state code
+    char zipcode[6];                 // US zipcode
+    char phone_number[13];           // Vendor phone number
+} VENDOR; 
+

+ In some situations, programming examples are updated from one chapter to the next. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
typedef struct vendor {
+    char name[MAXFIELD];             // Vendor name
+    char street[MAXFIELD];           // Street name and number
+    char city[MAXFIELD];             // City
+    char state[3];                   // Two-digit US state code
+    char zipcode[6];                 // US zipcode
+    char phone_number[13];           // Vendor phone number
+    char sales_rep[MAXFIELD];        // Name of sales representative
+    char sales_rep_phone[MAXFIELD];  // Sales rep's phone number 
+} VENDOR; 
+
+

Note

+

+ Finally, notes of interest are represented using a note block such + as this. +

+
+
+
+ + + diff --git a/db/docs/gsg/CXX/readSecondary.html b/db/docs/gsg/CXX/readSecondary.html new file mode 100644 index 000000000..06c55521b --- /dev/null +++ b/db/docs/gsg/CXX/readSecondary.html @@ -0,0 +1,133 @@ + + + + + + Reading Secondary Databases + + + + + + + + + +
+
+
+
+

Reading Secondary Databases

+
+
+
+
+

+ Like a primary database, you can read records from your secondary + database either by using the + + + + Db::get() + or + + Db::pget() + methods, + + or by using + + a cursor on the secondary database. + + The main difference between reading secondary and primary databases is that when + you read a secondary database record, the secondary record's data is not + returned to you. Instead, the primary key and data corresponding to the + secondary key are returned to you. +

+

+ For example, assuming your secondary database contains keys related + to a person's full name: +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+
+// The string to search for
+char *search_name = "John Doe";
+
+// Instantiate our Dbt's
+Dbt key(search_name, strlen(search_name) + 1);
+Dbt pkey, pdata; // Primary key and data
+
+                                                                                                                                     
+Db my_secondary_database(NULL, 0);
+// Primary and secondary database opens omitted for brevity
+                                                                                                                                     
+// Returns the key from the secondary database, and the data from the 
+// associated primary database entry.
+my_secondary_database.get(NULL, &key, &pdata, 0);
+
+// Returns the key from the secondary database, and the key and data 
+// from the associated primary database entry.
+my_secondary_database.pget(NULL, &key, &pkey, &pdata, 0);
+

+ Note that, just like + + a primary database, + + if your secondary database supports duplicate records then + + + + Db::get() + and + + Db::pget() + + only return the first record found in a matching duplicates set. If you + want to see all the records related to a specific secondary key, then use a + + + cursor opened on the secondary database. Cursors are described in + Using Cursors. + +

+
+ + + diff --git a/db/docs/gsg/CXX/secondaryCursor.html b/db/docs/gsg/CXX/secondaryCursor.html new file mode 100644 index 000000000..221efcc82 --- /dev/null +++ b/db/docs/gsg/CXX/secondaryCursor.html @@ -0,0 +1,154 @@ + + + + + + + + Using Cursors with Secondary Databases + + + + + + + + + + +
+
+
+
+

+ + Using Cursors with Secondary Databases +

+
+
+
+
+

+ Just like cursors on a primary database, you can use + + cursors on secondary databases + to iterate over the records in a secondary database. Like + + + cursors used with primary databases, + + you can also use + + cursors with secondary databases + to search for specific records in a database, to seek to the first + or last record in the database, to get the next duplicate record, + + and so forth. For a complete description on cursors and their capabilities, see + Using Cursors. +

+

+ However, when you use + + cursors with secondary databases: +

+
+
    +
  • +

    + Any data returned is the data contained on the primary database + record referenced by the secondary record. +

    +
  • +
  • +

    + You cannot use DB_GET_BOTH and related flags with + + DB::c_get() + and a secondary database. Instead, you must use + + DB::c_pget(). + Also, in that case the primary and secondary key given on the call to + + DB::c_pget() + must match the secondary key and associated primary record key in + order for that primary record to be returned as a result of the + call. +

    +
  • +
+
+

+ For example, suppose you are using the databases, classes, and key + + extractors + described in Implementing Key + + Extractors + . + Then the following searches for a person's + name in the secondary database, and deletes all secondary and primary + records that use that name. + +

+ +
#include <db_cxx.h>
+                                                                                                                                     
+...
+                                                                                                                                     
+Db my_database(NULL, 0);
+Db my_index(NULL, 0);
+
+
+// Get a cursor on the secondary database
+Dbc *cursorp;
+my_index.cursor(NULL, &cursorp, 0);
+
+
+// Name to delete
+char *search_name = "John Doe"; 
+
+// Instantiate Dbts as normal
+Dbt key(search_name, strlen(search_name) + 1);
+Dbt data;
+
+ 
+// Position the cursor
+while (cursorp->get(&key, &data, DB_SET) == 0)
+    cursorp->del(0); 
+
+ + + diff --git a/db/docs/gsg/CXX/secondaryDelete.html b/db/docs/gsg/CXX/secondaryDelete.html new file mode 100644 index 000000000..711896c9c --- /dev/null +++ b/db/docs/gsg/CXX/secondaryDelete.html @@ -0,0 +1,181 @@ + + + + + + Deleting Secondary Database Records + + + + + + + + + +
+
+
+
+

Deleting Secondary Database Records

+
+
+
+
+

+ In general, you + + will + not modify a secondary database directly. In + order to modify a secondary database, you should modify the primary + database and simply allow DB to manage the secondary modifications for you. +

+

+ However, as a convenience, you can delete a + + secondary database + record directly. Doing so causes the associated primary key/data pair to be deleted. + This in turn causes DB to delete all + + secondary database + records that reference the primary record. +

+

+ You can use the + + + Db::del() + method to delete a secondary database record. + + + + Note that if your + secondary database + + contains duplicate records, then deleting a record from the set of + duplicates causes all of the duplicates to be deleted as well. + + +

+
+

Note

+

+ You can delete a secondary database record using the previously + described mechanism only if: +

+
+
    +
  • +

    + the + + secondary key extractor function + + returns + + 0 + (see Implementing Key + + Extractors + for information on this + + callback). +

    +
  • +
  • +

    + the primary database is opened for write access. +

    +
  • +
+
+

+ If either of these conditions are not met, then no delete operations can be performed on the secondary + database. +

+
+

For example:

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+                                                                                                                                     
+Db my_database(NULL, 0); // Primary
+Db my_index(NULL, 0);    // Secondary
+
+// Open the primary
+my_database.open(NULL,       // Transaction pointer
+                 "my_db.db", // On-disk file that holds the database.
+                NULL,        // Optional logical database name
+                DB_BTREE,    // Database access method
+                DB_CREATE,   // Open flags
+                0);          // File mode (using defaults)
+
+// Setup the secondary to use sorted duplicates.
+// This is often desireable for secondary databases.
+my_index.set_flags(DB_DUPSORT);
+
+// Open the secondary
+my_index.open(NULL,              // Transaction pointer
+              "my_secondary.db", // On-disk file that holds the database.
+              NULL,              // Optional logical database name
+              DB_BTREE,          // Database access method
+              DB_CREATE,         // Open flags.
+              0);                // File mode (using defaults)
+
+
+// Now associate the primary and the secondary
+my_database.associate(NULL,          // Txn id
+                      &my_index,     // Associated secondary database
+                      get_sales_rep, // Callback used for key extraction.
+                      0);            // Flags 
+
+// Name to delete
+char *search_name = "John Doe";
+
+// Get a search key
+Dbt key(search_name, strlen(search_name) + 1);
+                      
+// Now delete the secondary record. This causes the associated primary
+// record to be deleted. If any other secondary databases have secondary
+// records referring to the deleted primary record, then those secondary
+// records are also deleted.
+my_index.del(NULL, &key, 0); 
+
+ + + diff --git a/db/docs/gsg/CXX/usingDbt.html b/db/docs/gsg/CXX/usingDbt.html new file mode 100644 index 000000000..ace8fd22d --- /dev/null +++ b/db/docs/gsg/CXX/usingDbt.html @@ -0,0 +1,370 @@ + + + + + + Reading and Writing Database Records + + + + + + + + + +
+
+
+
+

Reading and Writing Database Records

+
+
+
+
+

+ When reading and writing database records, be aware that there are some + slight differences in behavior depending on whether your database supports duplicate + records. Two or more database records are considered to be duplicates of + one another if they share the same key. The collection of records + sharing the same key are called a duplicates set. + + + In DB, a given key is stored only once for a single duplicates set. + +

+

+ By default, DB databases do + not support duplicate records. Where duplicate records are supported, + cursors (see below) are typically used + to access all of the records in the duplicates set. +

+

+ DB provides two basic mechanisms for the storage and retrieval of database + key/data pairs: +

+
+
    +
  • +

    + The + + + Db::put() + and + + + Db::get() + methods provide the easiest access for all non-duplicate records in the database. + These methods are described in this section. +

    +
  • +
  • +

    Cursors provide several methods for putting and getting database + records. Cursors and their database access methods are described in + Using Cursors.

    +
  • +
+
+
+
+
+
+

Writing Records to the Database

+
+
+
+
+

+ Records are stored in the database using whatever organization is + required by the access method that you have selected. In some cases (such as + BTree), records are stored in a sort order that you may want to define + (see Setting Comparison Functions for more information). +

+

+ In any case, the mechanics of putting and getting database records do not + change once you have selected your access method, configured your + sorting routines (if any), and opened your database. From your + code's perspective, a simple database put and get is largely the + same no matter what access method you are using. +

+

+ You use + + Db::put() + to put, or write, a database record. This method requires you to provide + the record's key and data in the form of a pair of + + Dbt objects. + You can also provide one or more flags that control DB's behavior + for the database write. +

+

+ Of the flags available to this method, DB_NOOVERWRITE + may be interesting to you. This flag disallows overwriting (replacing) + an existing record in the database. If the provided key already exists + in the database, then this method returns DB_KEYEXIST even if + the database supports duplicates. +

+

+ For example: +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+char *description = "Grocery bill.";
+float money = 122.45;
+
+Db my_database(NULL, 0);
+// Database open omitted for clarity
+
+Dbt key(&money, sizeof(float));
+Dbt data(description, strlen(description) + 1);
+
+int ret = my_database.put(NULL, &key, &data, DB_NOOVERWRITE);
+if (ret == DB_KEYEXIST) {
+    my_database.err(ret, "Put failed because key %f already exists", money);
+}
+
+
+
+
+
+

Getting Records from the Database

+
+
+
+
+

+ You can use the + + Db::get() + method to retrieve database records. Note that if your + database supports duplicate records, then by default this method will only + return the first record in a duplicate set. For this reason, if your + database supports duplicates, the common solution is to use a cursor to retrieve + records from it. Cursors are described in Using Cursors. +

+

+ (You can also retrieve a set of duplicate records using a bulk get. + To do this, you use the DB_MULTIPLE flag on the + call to + + Db::get(). + + For more information, see the DB Programmer's Reference Guide). +

+

+ By default, + + Db::get() + returns the first record found whose key matches the key + provide on the call to this method. If your database supports + duplicate records, you can change this behavior slightly by supplying + the DB_GET_BOTH flag. This flag causes + + DB::get() + to return the first record that matches the provided key and data. +

+

+ If the specified key and/or data does not exist in the database, this + method returns DB_NOTFOUND. +

+ +
#include <db_cxx.h>
+#include <string.h>
+
+...
+
+float money;
+char *description;
+
+Db my_database(NULL, 0);
+// Database open omitted for clarity 
+
+money = 122.45;
+
+Dbt key, data;
+// Use our own memory to retrieve the float.
+// For data alignment purposes.
+key.set_data(&money);
+key.set_ulen(sizeof(float));
+key.set_flags(DB_DBT_USERMEM);
+
+my_database.get(NULL, &key, &data, 0);
+
+// Money is set into the memory that we supplied.
+description = (char *)data.get_data();
+

+ Note that in this example, the + data.size + field would be automatically set to the size of the retrieved data. +

+
+
+
+
+
+

Deleting Records

+
+
+
+
+

+ + You can use the + + + Db::del() + method to delete a record from the database. If your database supports + duplicate records, then all records associated with the provided key are + deleted. To delete just one record from a list of duplicates, use a + cursor. Cursors are described in Using Cursors. + +

+

+ You can also delete every record in the database by using + + + Db::truncate(). +

+

For example:

+ +
#include <db_cxx.h>
+
+...
+
+Db my_database(NULL, 0);
+// Database open omitted for clarity
+
+float money = 122.45;
+Dbt key(&money, sizeof(float));
+
+my_database.del(NULL, &key, 0);
+
+
+
+
+
+

Data Persistence

+
+
+
+
+

+ When you perform a database modification, your modification is made + in the in-memory cache. This means that your data modifications + are not necessarily written to disk, and so your data may not appear + in the database after an application restart. +

+

+ Note that as a normal part of closing a database, its cache is + written to disk. However, in the event of an application or system + failure, there is no guarantee that your databases will close + cleanly. In this event, it is possible for you to lose data. Under + extremely rare circumstances, it is also possible for you to + experience database corruption. +

+

+ Therefore, if you care about whether your data persists across + application runs, and to guard against the rare possibility of + database corruption, you should use transactions to protect your + database modifications. Every time you commit a transaction, DB + ensures that the data will not be lost due to application or + system failure. + + + For information on transactions, see the Berkeley DB + Programmer's Tutorial and Reference Guide. + +

+

+ If you do not want to use transactions, then the assumption is that + your data is of a nature that it need not exist the next time your + application starts. You may want this if, for example, you are using + DB to cache data relevant only to the current application + runtime. +

+

+ If, however, you are not using transactions for some reason and you + still want some guarantee that your database modifications are + persistent, then you should periodically + + + call Db::sync(). + Syncs cause the entire contents of your in-memory cache to be written to disk. As + such, they are quite expensive and you should use them sparingly. +

+

+ Remember that by default a sync is performed any time a non-transactional + database is closed cleanly. (You can override this behavior by + specifying + DB_NOSYNC + + on the call to + + Db::close().) + + + That said, you can manually run a sync by calling + + + Db::sync(). + + +

+
+

Note

+

+ If your application or system crashes and you are not using + transactions, then you should either discard and recreate your + databases, or verify them. You can verify a database using + + Db::verify(). + + If your databases do not verify cleanly, use the + db_dump command to salvage as much of the + database as is possible. Use either the -R or + -r command line options to control how + aggressive db_dump should be when salvaging + your databases. +

+
+
+
+ + + diff --git a/db/docs/gsg/JAVA/BerkeleyDB-Core-JAVA-GSG.pdf b/db/docs/gsg/JAVA/BerkeleyDB-Core-JAVA-GSG.pdf new file mode 100644 index 000000000..f6fabaa34 Binary files /dev/null and b/db/docs/gsg/JAVA/BerkeleyDB-Core-JAVA-GSG.pdf differ diff --git a/db/docs/gsg/JAVA/CoreEnvUsage.html b/db/docs/gsg/JAVA/CoreEnvUsage.html new file mode 100644 index 000000000..4759e5523 --- /dev/null +++ b/db/docs/gsg/JAVA/CoreEnvUsage.html @@ -0,0 +1,158 @@ + + + + + + Managing Databases in Environments + + + + + + + + + +
+
+
+
+

Managing Databases in Environments

+
+
+
+
+

+ In Environments, we introduced + environments. While environments are not used in the example built in this book, + they are so commonly used for a wide class of DB applications that it is + necessary to show their basic usage, if only from a completeness perspective. +

+

+ To use an environment, you must first + + open it. At open time, you must identify the directory in + which it resides. This directory must exist prior to the open attempt. + You can also identify open properties, such as whether the environment can be + created if it does not already exist. +

+

+ For example, to + create an environment handle and + open an environment: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+...
+
+Environment myEnv = null;
+File envHome = new File("/export1/testEnv");
+try {
+    EnvironmentConfig envConf = new EnvironmentConfig();
+    envConf.setAllowCreate(true);
+
+    myEnv = new Environment(envHome, envConf);
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here
+} 
+

+ Once an environment is opened, you can open databases in it. Note that by default databases + are stored in the environment's home directory, or relative to that directory if you + provide any sort of a path in the database's file name: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseType;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+...
+
+Environment myEnv = null;
+Database myDb = null;
+File envHome = new File("/export1/testEnv");
+String dbFileName = new String("mydb.db");
+
+try {
+    EnvironmentConfig envConf = new EnvironmentConfig();
+    envConf.setAllowCreate(true);
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setAllowCreate(true);
+    dbConfig.setType(DatabaseType.BTREE);
+
+    myEnv = new Environment(envHome, envConf);
+    myDb = myEnv.openDatabase(null, dbFileName, null, dbConfig);
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here
+} 
+

+ When you are done with an environment, you must close it. Before you close an environment, + make sure you close any opened databases. +

+ +
finally {
+    try {
+        if (myDb != null) {
+            myDb.close();
+        }
+
+        if (myEnv != null) {
+            myEnv.close();
+        }
+    } catch (DatabaseException de) {
+        // Exception handling goes here
+    }
+} 
+
+ + + diff --git a/db/docs/gsg/JAVA/CoreJavaUsage.html b/db/docs/gsg/JAVA/CoreJavaUsage.html new file mode 100644 index 000000000..84a24586b --- /dev/null +++ b/db/docs/gsg/JAVA/CoreJavaUsage.html @@ -0,0 +1,183 @@ + + + + + + Database Example + + + + + + + + + +
+
+
+
+

Database Example

+
+
+
+
+

+ Throughout this book we will build a couple of applications that load + and retrieve inventory data from DB databases. While we are not yet ready to + begin reading from or writing to our databases, we can at least create + the class that we will use to manage our databases. +

+

+ Note that subsequent examples in this book will build on this code to + perform the more interesting work of writing to and reading from the + databases. +

+

+ Note that you can find the complete implementation of these functions + in: +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 2.1 MyDbs Class +

+

+ To manage our database open and close activities, we encapsulate them + in the MyDbs class. There are several good reasons + to do this, the mort important being that we can ensure our databases are + closed by putting that activity in the MyDbs + class destructor. +

+

+ To begin, we import some needed classes: +

+ +
// File: MyDbs.java
+package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseType;
+
+import java.io.FileNotFoundException; 
+

+ And then we write our class declaration and provided some necessary private data members: +

+ +
public class MyDbs {
+
+    // The databases that our application uses
+    private Database vendorDb = null;
+    private Database inventoryDb = null;
+
+    private String vendordb = "VendorDB.db";
+    private String inventorydb = "InventoryDB.db";
+
+    // Our constructor does nothing
+    public MyDbs() {} 
+

+ Next we need a setup() method. This is where + we configure and open our databases. +

+ +
    // The setup() method opens all our databases
+    // for us.
+    public void setup(String databasesHome)
+        throws DatabaseException {
+
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+
+        myDbConfig.setErrorStream(System.err);
+        myDbConfig.setErrorPrefix("MyDbs");
+        myDbConfig.setType(DatabaseType.BTREE);
+        myDbConfig.setAllowCreate(true);
+
+        // Now open, or create and open, our databases
+        // Open the vendors and inventory databases
+        try {
+            vendordb = databasesHome + "/" + vendordb;
+            vendorDb = new Database(vendordb,
+                                    null,
+                                    myDbConfig);
+
+            inventorydb = databasesHome + "/" + inventorydb;
+            inventoryDb = new Database(inventorydb,
+                                        null,
+                                        myDbConfig);
+        } catch(FileNotFoundException fnfe) {
+            System.err.println("MyDbs: " + fnfe.toString());
+            System.exit(-1);
+        }
+    } 
+

+ Finally, we provide some getter methods, and our close() method. +

+ +
   // getter methods
+    public Database getVendorDB() {
+        return vendorDb;
+    }
+
+    public Database getInventoryDB() {
+        return inventoryDb;
+    }
+
+    // Close the databases
+    public void close() {
+        try {
+            if (vendorDb != null) {
+                vendorDb.close();
+            }
+
+            if (inventoryDb != null) {
+                inventoryDb.close();
+            }
+        } catch(DatabaseException dbe) {
+            System.err.println("Error closing MyDbs: " +
+                                dbe.toString());
+            System.exit(-1);
+        }
+    }
+} 
+
+
+ + + diff --git a/db/docs/gsg/JAVA/Cursors.html b/db/docs/gsg/JAVA/Cursors.html new file mode 100644 index 000000000..1b8079556 --- /dev/null +++ b/db/docs/gsg/JAVA/Cursors.html @@ -0,0 +1,194 @@ + + + + + + Chapter 4. Using Cursors + + + + + + + + + +
+
+
+
+

Chapter 4. Using Cursors

+
+
+
+
+ +

+ Cursors provide a mechanism by which you can iterate over the records in a + database. Using cursors, you can get, put, and delete database records. If + a database allows duplicate records, then cursors are + + + + the easiest way that you can access anything + other than the first record for a given key. +

+

+ This chapter introduces cursors. It explains how to open and close them, how + to use them to modify databases, and how to use them with duplicate records. +

+
+
+
+
+

Opening and Closing Cursors

+
+
+
+
+

+ To use a cursor, you must open it using the Database.openCursor() + method. When you open a + cursor, you can optionally pass it a CursorConfig + object to set cursor properties. + +

+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+    
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.CursorConfig;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseException;
+
+import java.io.FileNotFoundException;
+
+...
+Database myDatabase = null;
+Cursor myCursor = null;
+
+try {
+    myDatabase = new Database("myDB", null, null);
+
+    myCursor = myDatabase.openCursor(null, null);
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here ...
+} catch (DatabaseException dbe) {
+    // Exception handling goes here ...
+}
+

+ To close the cursor, call the Cursor.close() + method. Note that if you close a database that has cursors open in it, + then it will complain and close any open cursors for you. For best results, close your cursors from within a + finally block. + +

+ +
package com.sleepycat.examples.db.GettingStarted;
+    
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.Database;
+
+...
+try {
+    ...
+} catch ... {
+} finally {
+    try {
+        if (myCursor != null) {
+            myCursor.close();
+        }
+
+        if (myDatabase != null) {
+            myDatabase.close();
+        }
+    } catch(DatabaseException dbe) {
+        System.err.println("Error in close: " + dbe.toString());
+    }
+} 
+
+
+ + + diff --git a/db/docs/gsg/JAVA/DB.html b/db/docs/gsg/JAVA/DB.html new file mode 100644 index 000000000..b3d8837bb --- /dev/null +++ b/db/docs/gsg/JAVA/DB.html @@ -0,0 +1,182 @@ + + + + + + Chapter 2. Databases + + + + + + + + + +
+
+
+
+

Chapter 2. Databases

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Opening Databases + +
+
+ + Closing Databases + +
+
+ + Database Properties + +
+
+ + Administrative Methods + +
+
+ + Error Reporting Functions + +
+
+ + Managing Databases in Environments + +
+
+ + Database Example + +
+
+
+

In Berkeley DB, a database is a collection of records. Records, + in turn, consist of two parts: key and data. That is, records consist of + key/data pairings. +

+

+ Conceptually, you can think of a + Database + + as containing a two-column table where column 1 contains a key and column 2 + contains data. Both the key and the data are managed using + DatabaseEntry + + + class instances + + (see Database Records for details on this + class + ). + So, fundamentally, using a DB + Database + + involves putting, getting, and deleting database records, which in turns involves efficiently + managing information + encapsulated by + + + DatabaseEntry + + + + objects. + + The next several chapters of this book are dedicated to those activities. +

+
+
+
+
+

Opening Databases

+
+
+
+
+

+ You open a database by instantiating a Database + object. +

+

+ Note that by default, DB does not create databases if they do not already exist. + To override this behavior, set the creation property to true. +

+

+ The following code fragment illustrates a database open: + +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+
+import java.io.FileNotFoundException;
+...
+
+Database myDatabase = null;
+
+...
+
+try {
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setAllowCreate(true);
+    myDatabase = new Database ("sampleDatabase.db",
+                               null, 
+                               dbConfig); 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here
+}
+
+
+ + + diff --git a/db/docs/gsg/JAVA/DBAdmin.html b/db/docs/gsg/JAVA/DBAdmin.html new file mode 100644 index 000000000..c0b03b9b9 --- /dev/null +++ b/db/docs/gsg/JAVA/DBAdmin.html @@ -0,0 +1,120 @@ + + + + + + Administrative Methods + + + + + + + + + +
+
+
+
+

Administrative Methods

+
+
+
+
+

+ The Database class provides methods that are useful + for manipulating databases. These methods are: +

+
+
    +
  • +

    + Database.getDatabaseName() +

    +

    Returns the database's name.

    + +
    String dbName = myDatabase.getDatabaseName();
    +
  • +
  • +

    + Database.truncate() +

    +

    + Deletes every record in the database and optionally returns the + number of records that were deleted. Note that it is much less + expensive to truncate a database without counting the number of + records deleted than it is to truncate and count. +

    + +
    int numDiscarded = 
    +    myDatabase.truncate(null,
    +                        true); // If true, then the number of
    +                               // records deleted are counted.
    +System.out.println("Discarded " + numDiscarded +
    +                " records from database " + myDatabase.getDatabaseName()); 
    +
  • +
  • +

    + Database.rename() +

    +

    + Renames the specified database. If no value is given for the + database parameter, then the entire file + referenced by this method is renamed. +

    +

    + Never rename a database that has handles opened for it. Never rename a file that + contains databases with opened handles. +

    + +
    import java.io.FileNotFoundException;
    +...
    +myDatabase.close();
    +try {
    +    myDatabase.rename("mydb.db",     // Database file to rename
    +                      null,          // Database to rename. Not used so
    +                                     // the entire file is renamed.
    +                      "newdb.db",    // New name to use.
    +                      null);         // DatabaseConfig object. 
    +                                     // None provided.
    +} catch (FileNotFoundException fnfe) {
    +    // Exception handling goes here
    +}
    +
  • +
+
+
+ + + diff --git a/db/docs/gsg/JAVA/DBEntry.html b/db/docs/gsg/JAVA/DBEntry.html new file mode 100644 index 000000000..46237dfa9 --- /dev/null +++ b/db/docs/gsg/JAVA/DBEntry.html @@ -0,0 +1,250 @@ + + + + + + Chapter 3. Database Records + + + + + + + + + +
+
+
+
+

Chapter 3. Database Records

+
+
+
+
+ +

+ DB records contain two parts — a key and some data. Both the key + and its corresponding data are + encapsulated in + DatabaseEntry class objects. + + + Therefore, to access a DB record, you need two such + + objects, one for the key and + one for the data. +

+

+ DatabaseEntry can hold any kind of data from simple + Java primitive types to complex Java objects so long as that data can be + represented as a Java byte array. Note that due to + performance considerations, you should not use Java serialization to convert + a Java object to a byte array. +

+

+ This chapter describes how you can convert both Java primitives and Java + class objects into and out of byte arrays. It also + introduces storing and retrieving key/value pairs from a database. In + addition, this chapter describes how you can use comparators to influence + how DB sorts its database records. +

+
+
+
+
+

Using Database Records

+
+
+
+
+

+ Each database record is comprised of two + DatabaseEntry objects + + + — one for the key and another for the data. + + The key and data information is stored in + DatabaseEntry objects as byte + arrays. Therefore, using DatabaseEntry instances + is mostly an exercise in efficiently moving your keys and your data in + and out of byte arrays. +

+

+ For example, to store a database record where both the key and the + data are Java String objects, you instantiate a + pair of DatabaseEntry objects: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+
+...
+
+String aKey = "key";
+String aData = "data";
+
+try {
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry(aData.getBytes("UTF-8"));
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+    // Storing the record is described later in this chapter 
+
+

Note

+

+ Notice that we specify UTF-8 when we retrieve the + byte array from our String + object. Without parameters, String.getBytes() uses the + Java system's default encoding. You should never use a system's default + encoding when storing data in a database because the encoding can change. +

+
+

+ When the record is retrieved from the database, the method that you + use to perform this operation populates two DatabaseEntry + instances for you, one for the key and another for the data. Assuming Java + String objects, you retrieve your data from the + DatabaseEntry as follows: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+
+...
+
+// theKey and theData are DatabaseEntry objects. Database
+// retrieval is described later in this chapter. For now, 
+// we assume some database get method has populated these
+// objects for us.
+
+// Use DatabaseEntry.getData() to retrieve the encapsulated Java
+// byte array.
+
+byte[] myKey = theKey.getData();
+byte[] myData = theData.getData();
+
+String key = new String(myKey);
+String data = new String(myData); 
+

+ There are a large number of mechanisms that you can use to move data in + and out of byte arrays. To help you with this + activity, DB provides the bind APIs. These APIs allow you to + efficiently store both primitive data types and complex objects in + byte arrays. +

+

+ The next section describes basic database put and get operations. A + basic understanding of database access is useful when describing database + storage of more complex data such as is supported by the bind APIs. Basic + bind API usage is then described in Using the BIND APIs. +

+
+
+ + + diff --git a/db/docs/gsg/JAVA/DeleteEntryWCursor.html b/db/docs/gsg/JAVA/DeleteEntryWCursor.html new file mode 100644 index 000000000..78630680e --- /dev/null +++ b/db/docs/gsg/JAVA/DeleteEntryWCursor.html @@ -0,0 +1,111 @@ + + + + + + Deleting Records Using Cursors + + + + + + + + + +
+
+
+
+

Deleting Records Using Cursors

+
+
+
+
+

+ + To delete a record using a cursor, simply position the cursor to the + record that you want to delete and then call + + + + +

+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+    
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus; 
+
+...
+
+Cursor cursor = null;
+Database myDatabase = null;
+try {
+    ...
+    // Database open omitted for brevity
+    ...
+    // Create DatabaseEntry objects
+    // searchKey is some String.
+    DatabaseEntry theKey = new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Position the cursor. Ignoring the return value for clarity
+    OperationStatus retVal = cursor.getSearchKey(theKey, theData, 
+                                                 LockMode.DEFAULT);
+    
+    // Count the number of records using the given key. If there is only
+    // one, delete that record.
+    if (cursor.count() == 1) {
+            System.out.println("Deleting " + 
+                               new String(theKey.getData()) + "|" +
+                               new String(theData.getData()));
+            cursor.delete();
+    }
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+ + + diff --git a/db/docs/gsg/JAVA/Positioning.html b/db/docs/gsg/JAVA/Positioning.html new file mode 100644 index 000000000..a42ac98f9 --- /dev/null +++ b/db/docs/gsg/JAVA/Positioning.html @@ -0,0 +1,599 @@ + + + + + + Getting Records Using the Cursor + + + + + + + + + +
+
+
+
+

Getting Records Using the Cursor

+
+
+
+
+

+ To iterate over database records, from the first record to + the last, simply open the cursor and then use the + Cursor.getNext() + + + method. + + For example: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.LockMode;  
+import com.sleepycat.db.OperationStatus; 
+
+...
+
+Cursor cursor = null;
+try {
+    ...
+    Database myDatabase = null;
+    // Database open omitted for brevity
+    ...
+
+    // Open the cursor. 
+    cursor = myDatabase.openCursor(null, null);
+
+    // Cursors need a pair of DatabaseEntry objects to operate. These hold
+    // the key and data found at any given position in the database.
+    DatabaseEntry foundKey = new DatabaseEntry();
+    DatabaseEntry foundData = new DatabaseEntry();
+
+    // To iterate, just call getNext() until the last database record has been 
+    // read. All cursor operations return an OperationStatus, so just read 
+    // until we no longer see OperationStatus.SUCCESS
+    while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) ==
+        OperationStatus.SUCCESS) {
+        // getData() on the DatabaseEntry objects returns the byte array
+        // held by that object. We use this to get a String value. If the
+        // DatabaseEntry held a byte array representation of some other data
+        // type (such as a complex object) then this operation would look 
+        // considerably different.
+        String keyString = new String(foundKey.getData());
+        String dataString = new String(foundData.getData());
+        System.out.println("Key | Data : " + keyString + " | " + 
+                       dataString + "");
+    }
+} catch (DatabaseException de) {
+    System.err.println("Error accessing database." + de);
+} finally {
+    // Cursors must be closed.
+    cursor.close();
+}
+

+ To iterate over the database from the last record to the first, + instantiate the cursor, and then + use Cursor.getPrev() until you read the first record in + the database. For example: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+    
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.LockMode;  
+import com.sleepycat.db.OperationStatus; 
+
+...
+
+Cursor cursor = null;
+Database myDatabase = null;
+try {
+    ...
+    // Database open omitted for brevity
+    ...
+
+    // Open the cursor. 
+    cursor = myDatabase.openCursor(null, null);
+
+    // Get the DatabaseEntry objects that the cursor will use.
+    DatabaseEntry foundKey = new DatabaseEntry();
+    DatabaseEntry foundData = new DatabaseEntry();
+
+    // Iterate from the last record to the first in the database
+    while (cursor.getPrev(foundKey, foundData, LockMode.DEFAULT) == 
+        OperationStatus.SUCCESS) {
+
+        String theKey = new String(foundKey.getData());
+        String theData = new String(foundData.getData());
+        System.out.println("Key | Data : " +  theKey + " | " + theData + "");
+    }
+} catch (DatabaseException de) {
+    System.err.println("Error accessing database." + de);
+} finally {
+    // Cursors must be closed.
+    cursor.close();
+}
+
+
+
+
+

Searching for Records

+
+
+
+
+

+ You can use cursors to search for database records. You can search based + on just a key, or you can search based on both the key and the data. + You can also perform partial matches if your database supports sorted + duplicate sets. In all cases, the key and data parameters of these + methods are filled with the key and data values of the database record + to which the cursor is positioned as a result of the search. +

+

+ Also, if the search fails, then cursor's state is left unchanged + and + OperationStatus.NOTFOUND + + is returned. + + +

+

+ The following Cursor methods allow you to + perform database searches: +

+
+
    +
  • +

    + Cursor.getSearchKey() + +

    +

    + Moves the cursor to the first record in the database with + the specified key. +

    +
  • +
  • +

    + Cursor.getSearchKeyRange() + +

    +

    + Identical to + + + unless you are using the BTree access. In this case, the cursor + moves + + + to the first record in the database whose + key is greater than or equal to the specified key. This comparison + is determined by the + comparator + + that you provide for the database. If no + comparator + + is provided, then the default lexicographical sorting is used. +

    +

    + For example, suppose you have database records that use the + following + Strings + + as keys: +

    +
    Alabama
    +Alaska
    +Arizona
    +

    + Then providing a search key of Alaska moves the + cursor to the second key noted above. Providing a key of + Al moves the cursor to the first key (Alabama), providing + a search key of Alas moves the cursor to the second key + (Alaska), and providing a key of Ar moves the + cursor to the last key (Arizona). +

    +
  • +
  • +

    + Cursor.getSearchBoth() + +

    +

    + Moves the cursor to the first record in the database that uses + the specified key and data. +

    +
  • +
  • +

    + Cursor.getSearchBothRange() + +

    +

    + Moves the cursor to the first record in the database whose key is + greater than or equal to the specified key. If the database supports + duplicate records, then on matching the key, the cursor is moved to + the duplicate record with the smallest data that is greater than or + equal to the specified data. +

    +

    + For example, + + suppose your database uses BTree + and it has + database records that use the following key/data pairs: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence 
    +

    then providing:

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    a search key of ...and a search data of ...moves the cursor to ...
    AlFlAlabama/Florence
    ArFlArizona/Florence
    AlFaAlaska/Fairbanks
    AlAAlabama/Athens
    +
    +
  • +
+
+

+ For example, assuming a database containing sorted duplicate records of + U.S. States/U.S Cities key/data pairs (both as + Strings), + + then the following code fragment can be used to position the cursor + to any record in the database and print its key/data values: + +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus; 
+
+...
+  
+// For this example, hard code the search key and data
+String searchKey = "Al";
+String searchData = "Fa";
+
+Cursor cursor = null;
+Database myDatabase = null;
+try {
+    ...
+    // Database open omitted for brevity
+    ...
+
+    // Open the cursor. 
+    cursor = myDatabase.openCursor(null, null);
+
+    DatabaseEntry theKey = 
+         new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = 
+         new DatabaseEntry(searchData.getBytes("UTF-8"));
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Perform the search
+    OperationStatus retVal = cursor.getSearchBothRange(theKey, theData, 
+                                                       LockMode.DEFAULT);
+    // NOTFOUND is returned if a record cannot be found whose key begins 
+    // with the search key AND whose data begins with the search data.
+    if (retVal == OperationStatus.NOTFOUND) {
+        System.out.println(searchKey + "/" + searchData + 
+                           " not matched in database " + 
+                           myDatabase.getDatabaseName());
+    } else {
+        // Upon completing a search, the key and data DatabaseEntry 
+        // parameters for getSearchBothRange() are populated with the 
+        // key/data values of the found record.
+        String foundKey = new String(theKey.getData());
+        String foundData = new String(theData.getData());
+        System.out.println("Found record " + foundKey + "/" + foundData + 
+                           "for search key/data: " + searchKey + 
+                           "/" + searchData);
+    }
+
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+
+
+
+
+

Working with Duplicate Records

+
+
+
+
+

+ A record is a duplicate of another record if the two records share the + same key. For duplicate records, only the data portion of the record is unique. +

+

+ Duplicate records are supported only for the BTree or Hash access methods. + For information on configuring your database to use duplicate records, + see Allowing Duplicate Records. +

+

+ If your database supports duplicate records, then it can potentially + contain multiple records that share the same key. + + + + By default, normal database + get operations will only return the first such record in a set + of duplicate records. Typically, subsequent duplicate records are + accessed using a cursor. + + + The following + Cursor methods + + + are interesting when working with databases that support duplicate records: +

+
+
    +
  • +

    + + Cursor.getNext(), + Cursor.getPrev() + + +

    +

    + Shows the next/previous record in the database, regardless of + whether it is a duplicate of the current record. For an example of + using these methods, see Getting Records Using the Cursor. +

    +
  • +
  • +

    + Cursor.getSearchBothRange() + +

    +

    + Useful for seeking the cursor to a specific record, regardless of + whether it is a duplicate record. See Searching for Records for more + information. +

    +
  • +
  • +

    + + Cursor.getNextNoDup(), + Cursor.getPrevNoDup() + + +

    +

    + Gets the next/previous non-duplicate record in the database. This + allows you to skip over all the duplicates in a set of duplicate + records. If you call + Cursor.getPrevNoDup(), + + then the cursor is positioned to the last record for the previous + key in the database. For example, if you have the following records + in your database: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence
    +

    + and your cursor is positioned to Alaska/Fairbanks, + and you then call + Cursor.getPrevNoDup(), + + then the cursor is positioned to Alabama/Florence. Similarly, if + you call + Cursor.getNextNoDup(), + + + then the cursor is positioned to the first record corresponding to + the next key in the database. +

    +

    + If there is no next/previous key in the database, then + OperationStatus.NOTFOUND + + is returned, and the cursor is left unchanged. +

    +
  • +
  • +

    + + +

    +

    + + Gets the + + next + record that shares the current key. If the + cursor is positioned at the last record in the duplicate set and + you call + Cursor.getNextDup(), + + + then + OperationStatus.NOTFOUND + + is returned and the cursor is left unchanged. + + Likewise, if you call + getPrevDup() and the + cursor is positioned at the first record in the duplicate set, then + OperationStatus.NOTFOUND is returned and the + cursor is left unchanged. + +

    +
  • +
  • +

    + Cursor.count() +

    +

    Returns the total number of records that share the current key.

    +
  • +
+
+

+ For example, the following code fragment positions a cursor to a key + + + + and displays it and all its + duplicates. + + Note that the following code fragment assumes that the database contains + only String objects for the keys and data. +

+ +
package com.sleepycat.examples.db.GettingStarted;
+      
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus; 
+
+...
+  
+Cursor cursor = null;
+Database myDatabase = null;
+try {
+    ...
+    // Database open omitted for brevity
+    ...
+
+    // Create DatabaseEntry objects
+    // searchKey is some String.
+    DatabaseEntry theKey = new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Position the cursor
+    // Ignoring the return value for clarity
+    OperationStatus retVal = cursor.getSearchKey(theKey, theData, 
+                                                 LockMode.DEFAULT);
+    
+    // Count the number of duplicates. If the count is greater than 1, 
+    // print the duplicates.
+    if (cursor.count() > 1) {
+        while (retVal == OperationStatus.SUCCESS) {
+            String keyString = new String(theKey.getData());
+            String dataString = new String(theData.getData());
+            System.out.println("Key | Data : " +  keyString + " | " + 
+                               dataString + "");
+   
+            retVal = cursor.getNextDup(theKey, theData, LockMode.DEFAULT);
+        }
+    }
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+
+ + + diff --git a/db/docs/gsg/JAVA/PutEntryWCursor.html b/db/docs/gsg/JAVA/PutEntryWCursor.html new file mode 100644 index 000000000..d4cada443 --- /dev/null +++ b/db/docs/gsg/JAVA/PutEntryWCursor.html @@ -0,0 +1,209 @@ + + + + + + Putting Records Using Cursors + + + + + + + + + +
+
+
+
+

Putting Records Using Cursors

+
+
+
+
+

+ You can use cursors to put records into the database. DB's behavior + when putting records into the database differs depending on the flags + that you use when writing the record, on the access method that you are + using, and on whether your database supports sorted duplicates. +

+

+ Note that when putting records to the database using a cursor, the + cursor is positioned at the record you inserted. Also, you can not + transactionally protect a put that is performed using a cursor; + if you want to transactionall protect your database writes, + put recrods using the database handle directly. +

+
+
    +
  • +

    + Cursor.putNoDupData() + +

    +

    + If the provided key already exists + in the database, then this method returns + OperationStatus.KEYEXIST. +

    +

    + If the key does not exist, then the order that the record is put into the database + is determined by the + + + insertion order in use by the database. If a comparison + function has been provided to the database, the record is + inserted in its sorted location. Otherwise (assuming BTree), + lexicographical sorting is used, with + shorter items collating before longer items. + +

    +

    + This flag can only be used for the BTree and Hash access methods, + and only if the database has been configured to support sorted + duplicate data items (DB_DUPSORT was specified at + database creation time). +

    +

    + This flag cannot be used with the Queue or Recno access methods. +

    +

    + For more information on duplicate records, see + Allowing Duplicate Records. +

    +
  • +
  • +

    + Cursor.putNoOverwrite() +

    +

    + If the provided key already exists + in the database, then this method returns + . +

    +

    + If the key does not exist, then the order that the record is put into the database + is determined by the BTree (key) comparator in use by the database. +

    +
  • +
  • +

    + Cursor.putKeyFirst() + +

    +

    + For databases that do not support duplicates, this method behaves + + + exactly the same as if a default insertion was performed. + + If the database supports duplicate records, + + + and a duplicate sort function has been specified, the + inserted data item is added in its sorted location. If + the key already exists in the database and no duplicate + sort function has been specified, the inserted data item + is added as the first of the data items for that key. + +

    +
  • +
  • +

    + Cursor.putKeyLast() + +

    +

    + Behaves exactly as if + + Cursor.putKeyFirst() + was used, except that if the key already exists in the database and no + duplicate sort function has been specified, the + inserted data item is added as the last of the data + items for that key. +

    +
  • +
+
+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+    
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.OperationStatus; 
+
+...
+  
+// Create the data to put into the database
+String key1str = "My first string";
+String data1str = "My first data";
+String key2str = "My second string";
+String data2str = "My second data";
+String data3str = "My third data";
+  
+Cursor cursor = null;
+Database myDatabase = null;
+try {
+    ...
+    // Database open omitted for brevity
+    ...
+
+    DatabaseEntry key1 = new DatabaseEntry(key1str.getBytes("UTF-8"));
+    DatabaseEntry data1 = new DatabaseEntry(data1str.getBytes("UTF-8"));
+    DatabaseEntry key2 = new DatabaseEntry(key2str.getBytes("UTF-8"));
+    DatabaseEntry data2 = new DatabaseEntry(data2str.getBytes("UTF-8"));
+    DatabaseEntry data3 = new DatabaseEntry(data3str.getBytes("UTF-8"));
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Assuming an empty database.
+
+    OperationStatus retVal = cursor.put(key1, data1); // SUCCESS
+    retVal = cursor.put(key2, data2); // SUCCESS
+    retVal = cursor.put(key2, data3); // SUCCESS if dups allowed, 
+                                      // KEYEXIST if not.    
+                                              
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+ + + diff --git a/db/docs/gsg/JAVA/ReplacingEntryWCursor.html b/db/docs/gsg/JAVA/ReplacingEntryWCursor.html new file mode 100644 index 000000000..29cff39a6 --- /dev/null +++ b/db/docs/gsg/JAVA/ReplacingEntryWCursor.html @@ -0,0 +1,132 @@ + + + + + + Replacing Records Using Cursors + + + + + + + + + +
+
+
+
+

Replacing Records Using Cursors

+
+
+
+
+

+ You replace the data for a database record by using + + + + Cursor.putCurrent(). + + + + +

+ +
import com.sleepycat.db.Cursor;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus; 
+
+...
+Cursor cursor = null;
+Database myDatabase = null;
+try {
+    ...
+    // Database open omitted for brevity
+    ...
+    // Create DatabaseEntry objects
+    // searchKey is some String.
+    DatabaseEntry theKey = new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Position the cursor. Ignoring the return value for clarity
+    OperationStatus retVal = cursor.getSearchKey(theKey, theData, 
+                                                 LockMode.DEFAULT);
+    
+    // Replacement data
+    String replaceStr = "My replacement string";
+    DatabaseEntry replacementData = 
+        new DatabaseEntry(replaceStr.getBytes("UTF-8"));
+    cursor.putCurrent(replacementData);
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+

+ Note that you cannot change a record's key using this method; the key + parameter is always ignored when you replace a record. +

+

+ When replacing the data portion of a record, if you are replacing a + record that is a member of a sorted duplicates set, then the replacement + will be successful only if the new record sorts identically to the old + record. This means that if you are replacing a record that is a member + of a sorted duplicates set, and if you are using the default + lexicographic sort, then the replacement will fail due to violating the + sort order. However, if you + provide a custom sort routine that, for example, sorts based on just a + few bytes out of the data item, then potentially you can perform + a direct replacement and still not violate the restrictions described + here. +

+

+ Under these circumstances, if + + you want to replace the data contained by a duplicate record, + + and you are not using a custom sort routine, then + + delete the record and create a new record with the desired key and data. +

+
+ + + diff --git a/db/docs/gsg/JAVA/accessmethods.html b/db/docs/gsg/JAVA/accessmethods.html new file mode 100644 index 000000000..02469bb95 --- /dev/null +++ b/db/docs/gsg/JAVA/accessmethods.html @@ -0,0 +1,281 @@ + + + + + + Access Methods + + + + + + + + + +
+
+
+
+

Access Methods

+
+
+
+
+

+ While this manual will focus primarily on the BTree access method, it is + still useful to briefly describe all of the access methods that DB + makes available. +

+

+ Note that an access method can be selected only when the database is + created. Once selected, actual API usage is generally + identical across all access methods. That is, while some + exceptions exist, mechanically you interact with the library in the same + way regardless of which access method you have selected. +

+

+ The access method that you should choose is gated first by what you want + to use as a key, and then secondly by the performance that you see + for a given access method. +

+

+ The following are the available access methods: +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Access MethodDescription
BTree +

+ Data is stored in a sorted, balanced tree structure. + Both the key and the data for BTree records can be + arbitrarily complex. That is, they can contain single values + such as an integer or a string, or complex types such as a + structure. Also, although not the default + behavior, it is possible for two records to + use keys that compare as equals. When this occurs, the + records are considered to be duplicates of one another. +

+
Hash +

+ Data is stored in an extended linear hash table. Like + BTree, the key and the data used for Hash records can be of + arbitrarily complex data. Also, like BTree, duplicate + records are optionally supported. +

+
Queue +

+ Data is stored in a queue as fixed-length records. Each + record uses a logical record number as its key. This access + method is designed for fast inserts at the tail of the + queue, and it has a special operation that deletes and + returns a record from the head of the queue. +

+

+ This access method is unusual in that it provides record + level locking. This can provide + beneficial performance improvements in applications + requiring concurrent access to the queue. +

+
Recno +

+ Data is stored in either fixed or variable-length records. + Like Queue, Recno records use logical record numbers as keys. +

+
+
+
+
+
+
+

Selecting Access Methods

+
+
+
+
+

+ To select an access method, you should first consider what you want + to use as a key for you database records. If you want to use + arbitrary data (even strings), then you should use either BTree or + Hash. If you want to use logical record numbers (essentially + integers) then you should use Queue or Recno. +

+

+ Once you have made this decision, you must choose between either + BTree or Queue, or Hash or Recno. This decision is described next. +

+
+
+
+
+
+

Choosing between BTree and Hash

+
+
+
+
+

+ For small working datasets that fit entirely in memory, there is no + difference between BTree and Hash. Both will perform just as well + as the other. In this situation, you might just as well use BTree, + if for no other reason than the majority of DB applications use + BTree. +

+

+ Note that the main concern here is your + working dataset, not your entire dataset. Many applications maintain + large amounts of information but only need to access some small + portion of that data with any frequency. So what you want to + consider is the data that you will routinely use, not the sum total + of all the data managed by your application. +

+

+ However, as your working dataset grows to the point + where you cannot fit it all into memory, then you need to take more + care when choosing your access method. Specifically, choose: +

+
+
    +
  • +

    + BTree if your keys have some locality of reference. That is, + if they sort well and you can expect that a query for a + given key will likely be followed by a query for one of its + neighbors. +

    +
  • +
  • +

    + Hash if your dataset is extremely large. For any given + access method, DB must maintain a certain amount of internal + information. However, the amount of information that DB + must maintain for BTree is much greater than for Hash. The + result is that as your dataset grows, this internal + information can dominate the cache to the point where there + is relatively little space left for application data. + As a result, BTree can be forced to perform disk I/O much more + frequently than would Hash given the same amount of data. +

    +

    + Moreover, if your dataset becomes so large that DB will + almost certainly have to perform disk I/O to satisfy a + random request, then Hash will definitely out perform BTree + because it has fewer internal records to search through than + does BTree. +

    +
  • +
+
+
+
+
+
+
+

Choosing between Queue and Recno

+
+
+
+
+

+ Queue or Recno are used when the application wants to use logical + record numbers for the primary database key. Logical record numbers + are essentially integers that uniquely identify the database + record. They can be either mutable or fixed, where a mutable record + number is one that might change as database records are stored or + deleted. Fixed logical record numbers never change regardless of + what database operations are performed. +

+

+ When deciding between Queue and Recno, choose: +

+
+
    +
  • +

    + Queue if your application requires high degrees of + concurrency. Queue provides record-level locking (as opposed + to the page-level locking that the other access methods + use), and this can result in significantly faster throughput + for highly concurrent applications. +

    +

    + Note, however, that Queue provides support only for fixed + length records. So if the size of the data that you want to + store varies widely from record to record, you should + probably choose an access method other than Queue. +

    +
  • +
  • +

    + Recno if you want mutable record numbers. Queue is only + capable of providing fixed record numbers. Also, Recno + provides support for databases whose permanent storage is a + flat text file. This is useful for applications looking for + fast, temporary storage while the data is being read or + modified. +

    +
  • +
+
+
+
+ + + diff --git a/db/docs/gsg/JAVA/bindAPI.html b/db/docs/gsg/JAVA/bindAPI.html new file mode 100644 index 000000000..ac872849a --- /dev/null +++ b/db/docs/gsg/JAVA/bindAPI.html @@ -0,0 +1,760 @@ + + + + + + Using the BIND APIs + + + + + + + + + +
+
+
+
+

Using the BIND APIs

+
+
+
+
+

Except for Java String and boolean types, efficiently moving data in + and out of Java byte arrays for storage in a database can be a nontrivial + operation. To help you with this problem, DB provides the Bind APIs. + While these APIs are described in detail in the + Sleepycat Java Collections Tutorial (see + http://www.sleepycat.com/docs/ref/toc.html), + this section provides a brief introduction to using the Bind APIs with:

+
+
    +
  • +

    Single field numerical and string objects

    +

    Use this if you want to store a single numerical or string object, + such as Long, Double, or + String.

    +
  • +
  • +

    Complex objects that implement Java serialization.

    +

    Use this if you are storing objects that implement + Serializable and if you do not want to sort on + this information.

    +
  • +
  • +

    Non-serialized complex objects.

    +

    If you are storing objects that do not implement serialization, + you can create your own custom tuple bindings. Note that you should + use custom tuple bindings even if your objects are serializeable if + you want to sort on that data.

    +
  • +
+
+
+
+
+
+

Numerical and String Objects

+
+
+
+
+

You can use the Bind APIs to store primitive data in a DatabaseEntry + object. That is, you can store a single field containing one of the following types:

+
+
    +
  • +

    + String +

    +
  • +
  • +

    + Character +

    +
  • +
  • +

    + Boolean +

    +
  • +
  • +

    + Byte +

    +
  • +
  • +

    + Short +

    +
  • +
  • +

    + Integer +

    +
  • +
  • +

    + Long +

    +
  • +
  • +

    + Float +

    +
  • +
  • +

    + Double +

    +
  • +
+
+

+ To store primitive data using the Bind APIs: +

+
+
    +
  1. +

    Create an EntryBinding object.

    +

    When you do this, you use TupleBinding.getPrimitiveBinding() + to return the binding that you use for the conversion.

    +
  2. +
  3. +

    Use the EntryBinding object to place + the numerical object on the DatabaseEntry.

    +
  4. +
+
+

Once the data is stored in the DatabaseEntry, you can put it to + the database in whatever manner you wish. For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+
+...
+
+Database myDatabase = null;
+// Database open omitted for clarity.
+
+// Need a key for the put.
+try {
+    String aKey = "myLong";
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));    
+
+    // Now build the DatabaseEntry using a TupleBinding
+    Long myLong = new Long(123456789l);
+    DatabaseEntry theData = new DatabaseEntry();
+    EntryBinding myBinding = TupleBinding.getPrimitiveBinding(Long.class);
+    myBinding.objectToEntry(myLong, theData);
+
+    // Now store it
+    myDatabase.put(null, theKey, theData);
+} catch (Exception e) {
+    // Exception handling goes here
+}
+

Retrieval from the DatabaseEntry object is + performed in much the same way:

+ +
package com.sleepycat.examples.db.GettingStarted;
+      
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+
+...
+
+Database myDatabase = null;
+// Database open omitted for clarity
+
+try {
+    // Need a key for the get
+    String aKey = "myLong";
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    
+    // Need a DatabaseEntry to hold the associated data.
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Bindings need only be created once for a given scope
+    EntryBinding myBinding = TupleBinding.getPrimitiveBinding(Long.class);
+
+    // Get it
+    OperationStatus retVal = myDatabase.get(null, theKey, theData, 
+                                            LockMode.DEFAULT);
+    String retKey = null;
+    if (retVal == OperationStatus.SUCCESS) {
+        // Recreate the data.
+        // Use the binding to convert the byte array contained in theData
+        // to a Long type.
+        Long theLong = (Long) myBinding.entryToObject(theData);
+        retKey = new String(theKey.getData());
+        System.out.println("For key: '" + retKey + "' found Long: '" + 
+                            theLong + "'.");
+    } else {
+        System.out.println("No record found for key '" + retKey + "'.");
+    }
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Serializeable Complex Objects

+
+
+
+
+

Frequently your application requires you to store and manage + objects for your record data and/or keys. You may need to do this if you + are caching objects created by another process. You may also want to do + this if you want to store multiple data values on a record. When used + with just primitive data, or with objects containing a single data member, + DB database records effectively represent a single row in a two-column table. + By storing a complex object in the record, you can turn each record into + a single row in an n-column table, where + n is the number of data members contained by the + stored object(s).

+

In order to store objects in a + DB database, you must convert them to and from a byte array. + The first instinct for many Java programmers is to do this using Java + serialization. While this is functionally a correct solution, the result + is poor performance because this causes the class information + to be stored on every such database record. This information can be quite large + and it is redundant — the class information does not vary for serialized objects of the same type. +

+

+ In other words, directly using serialization to place your objects into byte + arrays means that you will be storing a great deal of unnecessary information in + your database, which ultimately leads to larger databases and more expensive disk + I/O. +

+

The easiest way for you to solve this problem is to use the Bind + APIs to perform the serialization for you. Doing so causes the extra + object information to be saved off to a unique Database + dedicated for that purpose. This means that you do not have to duplicate + that information on each record in the Database + that your application is using to store it's information.

+

+ Note that when you use the Bind APIs to perform serialization, you still + receive all the benefits of serialization. You can still use arbitrarily + complex object graphs, and you still receive built-in class evolution + through the serialVersionUID (SUID) scheme. All of the Java + serialization rules apply without modification. For example, you can + implement Externalizable instead of Serializable. +

+
+
+
+
+

Usage Caveats

+
+
+
+
+

Before using the Bind APIs to perform serialization, you may + want to consider writing your own custom tuple bindings. Specifically, + avoid serialization if: +

+
+
    +
  • +

    If you need to sort based on the objects your are storing. + The sort order is meaningless for the byte arrays that you + obtain through serialization. Consequently, you should not use serialization for keys if you + care about their sort order. You should + also not use serialization for record data if your + Database supports duplicate records and you care about sort order. +

    +
  • +
  • +

    + You want to minimize the size of your byte arrays. Even when using the Bind APIs to perform the + serialization the resulting byte array may be larger than necessary. You can achieve + more compact results by building your own custom tuple binding. +

    +
  • +
  • +

    + You want to optimize for speed. In general, custom tuple bindings are faster than serialization at + moving data in and out of byte arrays. +

    +
  • +
+
+

+ For information on building your own custom tuple binding, see Custom Tuple Bindings. +

+
+
+
+
+
+

Serializing Objects

+
+
+
+
+

To serialize and store a serializeable complex object using the + Bind APIs:

+
+
    +
  1. +

    Implement the class whose instances that you want to store. + Note that this class must implement java.io.Serializable. +

    +
  2. +
  3. +

    Open (create) your databases. You need two. The first is the + database that you use to store your data. The second is used to + store the class information.

    +
  4. +
  5. +

    Instantiate a class catalog. You do this with + com.sleepycat.bind.serial.StoredClassCatalog, + and at that time you must provide a handle to an open database + that is used to store the class information.

    +
  6. +
  7. +

    Create an entry binding that uses com.sleepycat.bind.serial.SerialBinding.

    +
  8. +
  9. +

    Instantiate an instance of the object that you want to + store, and place it in a DatabaseEntry + using the entry binding that you created in the previous step.

    +
  10. +
+
+

+ For example, suppose you want to store a long, double, and a + String as a record's data. Then you might create a class that + looks something like this: +

+ +
package com.sleepycat.examples.db.GettingStarted;    
+
+import java.io.Serializable;
+
+public class MyData implements Serializable {
+    private long longData;
+    private double doubleData;
+    private String description;
+
+    MyData() {
+        longData = 0;
+        doubleData = 0.0;
+        description = null;
+    }
+
+    public void setLong(long data) {
+        longData = data;
+    }
+
+    public void setDouble(double data) {
+        doubleData = data;
+    }
+
+    public void setDescription(String data) {
+        description = data;
+    }
+
+    public long getLong() {
+        return longData;
+    }
+
+    public double getDouble() {
+        return doubleData;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+}
+

You can then store instances of this class as follows:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseType;
+...
+
+// The key data.
+String aKey = "myData";
+
+// The data data
+MyData data2Store = new MyData();
+data2Store.setLong(123456789l);
+data2Store.setDouble(1234.9876543);
+data2Store.setDescription("A test instance of this class");
+
+try {
+    // Open the database that you will use to store your data
+    DatabaseConfig myDbConfig = new DatabaseConfig();
+    myDbConfig.setAllowCreate(true);
+    myDbConfig.setSortedDuplicates(true);
+    myDbConfig.setType(DatabaseType.BTREE);
+    Database myDatabase = new Database("myDb", null, myDbConfig);
+
+    // Open the database that you use to store your class information.
+    // The db used to store class information does not require duplicates
+    // support.
+    myDbConfig.setSortedDuplicates(false);
+    Database myClassDb = new Database("classDb", null, myDbConfig); 
+
+    // Instantiate the class catalog
+    StoredClassCatalog classCatalog = new StoredClassCatalog(myClassDb);
+
+    // Create the binding
+    EntryBinding dataBinding = new SerialBinding(classCatalog, 
+                                                 MyData.class);
+
+    // Create the DatabaseEntry for the key
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+
+    // Create the DatabaseEntry for the data. Use the EntryBinding object
+    // that was just created to populate the DatabaseEntry
+    DatabaseEntry theData = new DatabaseEntry();
+    dataBinding.objectToEntry(data2Store, theData);
+
+    // Put it as normal
+    myDatabase.put(null, theKey, theData);
+    
+    // Database and environment close omitted for brevity 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Deserializing Objects

+
+
+
+
+

Once an object is stored in the database, you can retrieve the + MyData objects from the retrieved + DatabaseEntry using the Bind APIs in much the + same way as is described above. For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseType;
+import com.sleepycat.db.LockMode;
+
+...
+
+// The key data.
+String aKey = "myData";
+
+try {
+    // Open the database that stores your data
+    DatabaseConfig myDbConfig = new DatabaseConfig();
+    myDbConfig.setAllowCreate(false);
+    myDbConfig.setType(DatabaseType.BTREE);
+    Database myDatabase = new Database("myDb", null, myDbConfig);
+
+    // Open the database that stores your class information.
+    Database myClassDb = new Database("classDb", null, myDbConfig); 
+
+    // Instantiate the class catalog
+    StoredClassCatalog classCatalog = new StoredClassCatalog(myClassDb);
+
+    // Create the binding
+    EntryBinding dataBinding = new SerialBinding(classCatalog, 
+                                                 MyData.class);
+
+    // Create DatabaseEntry objects for the key and data
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Do the get as normal
+    myDatabase.get(null, theKey, theData, LockMode.DEFAULT);
+
+    // Recreate the MyData object from the retrieved DatabaseEntry using
+    // the EntryBinding created above
+    MyData retrievedData = (MyData) dataBinding.entryToObject(theData);
+ 
+    // Database and environment close omitted for brevity
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+
+

Custom Tuple Bindings

+
+
+
+
+

+ If you want to store complex objects in your database, then you can use + tuple bindings to do this. While they are more work to write and + maintain than if you were to use serialization, the + byte array conversion is faster. In addition, custom + tuple bindings should allow you to create byte arrays + that are smaller than those created by serialization. Custom tuple + bindings also allow you to optimize your BTree comparisons, whereas + serialization does not. +

+

+ For information on using serialization to store complex objects, see + Serializeable Complex Objects. +

+

To store complex objects using a custom tuple binding:

+
+
    +
  1. +

    Implement the class whose instances that you want to store. + Note that you do not have to implement serialization.

    +
  2. +
  3. +

    Implement the com.sleepycat.bind.tuple.TupleBinding + interface.

    +
  4. +
  5. +

    Open (create) your database. Unlike serialization, you only + need one.

    +
  6. +
  7. +

    Create an entry binding that uses the tuple binding that you + implemented in step 2.

    +
  8. +
  9. +

    Instantiate an instance of the object that you want to store, + and place it in a DatabaseEntry using the + entry binding that you created in the previous step.

    +
  10. +
+
+

+ For example, suppose you want to your keys to be instances of the + following class: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+public class MyData2 {
+    private long longData;
+    private Double doubleData;
+    private String description;
+
+    public MyData2() {
+        longData = 0;
+        doubleData = new Double(0.0);
+        description = "";
+    }
+
+    public void setLong(long data) {
+        longData = data;
+    }
+
+    public void setDouble(Double data) {
+        doubleData = data;
+    }
+
+    public void setString(String data) {
+        description = data;
+    }
+
+    public long getLong() {
+        return longData;
+    }
+
+    public Double getDouble() {
+        return doubleData;
+    }
+
+    public String getString() {
+        return description;
+    }
+} 
+

In this case, you need to write a tuple binding for the + MyData2 class. When you do this, you must + implement the TupleBinding.objectToEntry() + and TupleBinding.entryToObject() abstract methods. + Remember the following as you implement these methods:

+
+
    +
  • +

    You use TupleBinding.objectToEntry() to convert + objects to byte arrays. You use + com.sleepycat.bind.tuple.TupleOutput to write + primitive data types to the byte array. Note that + TupleOutput provides methods that allows + you to work with numerical types (long, + double, int, and so forth) and + not the corresponding java.lang numerical + classes.

    +
  • +
  • +

    The order that you write data to the byte + array in TupleBinding.objectToEntry() is the order that + it appears in the array. So given the MyData2 + class as an example, if you write description, + doubleData, and then longData, + then the resulting byte array will contain these data elements in + that order. This means that your records will sort based on the + value of the description data member and then + the doubleData member, and so forth. If you + prefer to sort based on, say, the longData data + member, write it to the byte array first.

    +
  • +
  • +

    You use TupleBinding.entryToObject() to convert + the byte array back into an instance of your + original class. You use com.sleepycat.bind.tuple.TupleInput + to get data from the byte array.

    +
  • +
  • +

    The order that you read data from the byte + array must be exactly the same as the order in which it was written.

    +
  • +
+
+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+public class MyTupleBinding extends TupleBinding {
+
+    // Write a MyData2 object to a TupleOutput
+    public void objectToEntry(Object object, TupleOutput to) {
+
+        MyData2 myData = (MyData2)object;
+
+        // Write the data to the TupleOutput (a DatabaseEntry).
+        // Order is important. The first data written will be
+        // the first bytes used by the default comparison routines.
+        to.writeDouble(myData.getDouble().doubleValue());
+        to.writeLong(myData.getLong());
+        to.writeString(myData.getString());
+    }
+
+    // Convert a TupleInput to a MyData2 object
+    public Object entryToObject(TupleInput ti) {
+
+        // Data must be read in the same order that it was
+        // originally written.
+        Double theDouble = new Double(ti.readDouble());
+        long theLong = ti.readLong();
+        String theString = ti.readString();
+
+        MyData2 myData = new MyData2();
+        myData.setDouble(theDouble);
+        myData.setLong(theLong);
+        myData.setString(theString);
+
+        return myData;
+    }
+} 
+

In order to use the tuple binding, instantiate the binding and + then use:

+
+
    +
  • +

    MyTupleBinding.objectToEntry() to + convert a MyData2 object to a DatabaseEntry.

    +
  • +
  • +

    MyTupleBinding.entryToObject() to convert + a DatabaseEntry to a MyData2 + object.

    +
  • +
+
+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.DatabaseEntry;
+ 
+...
+
+TupleBinding keyBinding = new MyTupleBinding();
+
+MyData2 theKeyData = new MyData2();
+theKeyData.setLong(123456789l);
+theKeyData.setDouble(new Double(12345.6789));
+theKeyData.setString("My key data");
+
+DatabaseEntry myKey = new DatabaseEntry();
+
+try {
+    // Store theKeyData in the DatabaseEntry
+    keyBinding.objectToEntry(theKeyData, myKey);
+
+    ...
+    // Database put and get activity omitted for clarity
+    ...
+
+    // Retrieve the key data
+    theKeyData = (MyData2) keyBinding.entryToObject(myKey);
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+ + + diff --git a/db/docs/gsg/JAVA/btree.html b/db/docs/gsg/JAVA/btree.html new file mode 100644 index 000000000..5deb0fa36 --- /dev/null +++ b/db/docs/gsg/JAVA/btree.html @@ -0,0 +1,540 @@ + + + + + + BTree Configuration + + + + + + + + +
+
+
+
+

BTree Configuration

+
+
+
+
+

+ In going through the previous chapters in this book, you may notice that + we touch on some topics that are specific to BTree, but we do not cover + those topics in any real detail. In this section, we will discuss + configuration issues that are unique to BTree. +

+

+ Specifically, in this section we describe: +

+
+
    +
  • +

    + Allowing duplicate records. +

    +
  • +
  • +

    + Setting comparator callbacks. +

    +
  • +
+
+
+
+
+
+

Allowing Duplicate Records

+
+
+
+
+

+ BTree databases can contain duplicate records. One record is + considered to be a duplicate of another when both records use keys + that compare as equal to one another. +

+

+ By default, keys are compared using a lexicographical comparison, + with shorter keys collating higher than longer keys. + You can override this default using the + + + DatabaseConfig.setBtreeComparator() + method. See the next section for details. +

+

+ By default, DB databases do not allow duplicate records. As a + result, any attempt to write a record that uses a key equal to a + previously existing record results in the previously existing record + being overwritten by the new record. +

+

+ Allowing duplicate records is useful if you have a database that + contains records keyed by a commonly occurring piece of information. + It is frequently necessary to allow duplicate records for secondary + databases. +

+

+ For example, suppose your primary database contained records related + to automobiles. You might in this case want to be able to find all + the automobiles in the database that are of a particular color, so + you would index on the color of the automobile. However, for any + given color there will probably be multiple automobiles. Since the + index is the secondary key, this means that multiple secondary + database records will share the same key, and so the secondary + database must support duplicate records. +

+
+
+
+
+

Sorted Duplicates

+
+
+
+
+

+ Duplicate records can be stored in sorted or unsorted order. + You can cause DB to automatically sort your duplicate + records by + + + setting DatabaseConfig.setSortedDuplicates() + to true. Note that this property must be + set prior to database creation time and it cannot be changed + afterwards. + +

+

+ If sorted duplicates are supported, then the + + + java.util.Comparator implementation + identified to + DatabaseConfig.setDuplicateComparator() + + is used to determine the location of the duplicate record in its + duplicate set. If no such function is provided, then the default + lexicographical comparison is used. +

+
+
+
+
+
+

Unsorted Duplicates

+
+
+
+
+

+ For performance reasons, BTrees should always contain sorted + records. (BTrees containing unsorted entries must potentially + spend a great deal more time locating an entry than does a BTree + that contains sorted entries). That said, DB provides support + for suppressing automatic sorting of duplicate records because it may be that + your application is inserting records that are already in a + sorted order. +

+

+ That is, if the database is configured to support unsorted + duplicates, then the assumption is that your application + will manually perform the sorting. In this event, + expect to pay a significant performance penalty. Any time you + place records into the database in a sort order not know to + DB, you will pay a performance penalty +

+

+ That said, this is how DB behaves when inserting records + into a database that supports non-sorted duplicates: +

+
+
    +
  • +

    + If your application simply adds a duplicate record using + + + Database.put(), + then the record is inserted at the end of its sorted duplicate set. +

    +
  • +
  • +

    + If a cursor is used to put the duplicate record to the database, + then the new record is placed in the duplicate set according to the + actual method used to perform the put. The relevant methods + are: +

    +
    +
      +
    • +

      + + Cursor.putAfter() +

      +

      + The data + + is placed into the database + as a duplicate record. The key used for this operation is + the key used for the record to which the cursor currently + refers. Any key provided on the call + + + + is therefore ignored. +

      +

      + The duplicate record is inserted into the database + immediately after the cursor's current position in the + database. +

      +
    • +
    • +

      + + Cursor.putBefore() +

      +

      + Behaves the same as + + Cursor.putAfter() + except that the new record is inserted immediately before + the cursor's current location in the database. +

      +
    • +
    • +

      + + Cursor.putKeyFirst() +

      +

      + If the key + + already exists in the + database, and the database is configured to use duplicates + without sorting, then the new record is inserted as the first entry + in the appropriate duplicates list. +

      +
    • +
    • +

      + + Cursor.putKeyLast() +

      +

      + Behaves identically to + + Cursor.putKeyFirst() + except that the new duplicate record is inserted as the last + record in the duplicates list. +

      +
    • +
    +
    +
  • +
+
+
+
+
+
+
+

Configuring a Database to Support Duplicates

+
+
+
+
+

+ Duplicates support can only be configured + at database creation time. You do this by specifying the appropriate + + + DatabaseConfig method + + before the database is opened for the first time. +

+

+ The + + methods + that you can use are: +

+
+
    +
  • +

    + + DatabaseConfig.setUnsortedDuplicates() +

    +

    + The database supports non-sorted duplicate records. +

    +
  • +
  • +

    + + DatabaseConfig.setSortedDuplicates() +

    +

    + The database supports sorted duplicate records. +

    +
  • +
+
+

+ The following code fragment illustrates how to configure a database + to support sorted duplicate records: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import java.io.FileNotFoundException;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseType;
+
+...
+
+Database myDb = null;
+
+try {
+    // Typical configuration settings
+    DatabaseConfig myDbConfig = new DatabaseConfig();
+    myDbConfig.setType(DatabaseType.BTREE);
+    myDbConfig.setAllowCreate(true);
+
+    // Configure for sorted duplicates
+    myDbConfig.setSortedDuplicates(true);
+
+   // Open the database
+   myDb = new Database("mydb.db", null, myDbConfig);
+} catch(DatabaseException dbe) {
+    System.err.println("MyDbs: " + dbe.toString());
+    System.exit(-1);
+} catch(FileNotFoundException fnfe) {
+    System.err.println("MyDbs: " + fnfe.toString());
+    System.exit(-1);
+} 
+
+
+
+
+
+
+

Setting Comparison Functions

+
+
+
+
+

+ By default, DB uses a lexicographical comparison function where + shorter records collate before longer records. For the majority of + cases, this comparison works well and you do not need to manage + it in any way. +

+

+ However, in some situations your application's performance can + benefit from setting a custom comparison routine. You can do this + either for database keys, or for the data if your + database supports sorted duplicate records. +

+

+ Some of the reasons why you may want to provide a custom sorting + function are: +

+
+
    +
  • +

    + Your database is keyed using strings and you want to provide + some sort of language-sensitive ordering to that data. Doing + so can help increase the locality of reference that allows + your database to perform at its best. +

    +
  • +
  • +

    + You are using a little-endian system (such as x86) and you + are using integers as your database's keys. Berkeley DB + stores keys as byte strings and little-endian integers + do not sort well when viewed as byte strings. There are + several solutions to this problem, one being to provide a + custom comparison function. See + http://www.sleepycat.com/docs/ref/am_misc/faq.html + for more information. +

    +
  • +
  • +

    + You you do not want the entire key to participate in the + comparison, for whatever reason. In + this case, you may want to provide a custom comparison + function so that only the relevant bytes are examined. +

    +
  • +
+
+
+
+
+
+

+ + Creating Java Comparators +

+
+
+
+
+

+ You set a BTree's key + + + comparator + + using + + + DatabaseConfig.setBtreeComparator(). + You can also set a BTree's duplicate data comparison function using + + + DatabaseConfig.setDuplicateComparator(). + +

+

+ + + If + + the database already exists when it is opened, the + + + comparator + + provided to these methods must be the same as + that historically used to create the database or corruption can + occur. +

+

+ You override the default comparison function by providing a Java + Comparator class to the database. + The Java Comparator interface requires you to implement the + Comparator.compare() method + (see http://java.sun.com/j2se/1.4.2/docs/api/java/util/Comparator.html for details). +

+

+ DB hands your Comparator.compare() method + the byte arrays that you stored in the database. If + you know how your data is organized in the byte + array, then you can write a comparison routine that directly examines + the contents of the arrays. Otherwise, you have to reconstruct your + original objects, and then perform the comparison. +

+

+ For example, suppose you want to perform unicode lexical comparisons + instead of UTF-8 byte-by-byte comparisons. Then you could provide a + comparator that uses String.compareTo(), + which performs a Unicode comparison of two strings (note that for + single-byte roman characters, Unicode comparison and UTF-8 + byte-by-byte comparisons are identical – this is something you + would only want to do if you were using multibyte unicode characters + with DB). In this case, your comparator would look like the + following: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import java.util.Comparator;
+
+public class MyDataComparator implements Comparator {
+
+    public MyDataComparator() {}
+
+    public int compare(Object d1, Object d2) {
+
+        byte[] b1 = (byte[])d1;
+        byte[] b2 = (byte[])d2;
+
+        String s1 = new String(b1);
+        String s2 = new String(b2);
+        return s1.compareTo(s2);
+    }
+} 
+

+ To use this comparator: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import java.io.FileNotFoundException;
+import java.util.Comparator;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+
+...
+
+Database myDatabase = null;
+try {
+    // Get the database configuration object
+    DatabaseConfig myDbConfig = new DatabaseConfig();
+    myDbConfig.setAllowCreate(true);
+
+    // Set the duplicate comparator class
+    MyDataComparator mdc = new MyDataComparator();
+    myDbConfig.setDuplicateComparator(mdc);
+
+    // Open the database that you will use to store your data
+    myDbConfig.setSortedDuplicates(true);
+    myDatabase = new Database("myDb", null, myDbConfig);
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here
+}
+
+
+
+ + + diff --git a/db/docs/gsg/JAVA/cachesize.html b/db/docs/gsg/JAVA/cachesize.html new file mode 100644 index 000000000..ace996d76 --- /dev/null +++ b/db/docs/gsg/JAVA/cachesize.html @@ -0,0 +1,98 @@ + + + + + + Selecting the Cache Size + + + + + + + + + +
+
+
+
+

Selecting the Cache Size

+
+
+
+
+

+ Cache size is important to your application because if it is set to too + small of a value, your application's performance will suffer from too + much disk I/O. On the other hand, if your cache is too large, then your + application will use more memory than it actually needs. + Moreover, if your application uses too much memory, then on most + operating systems this can result in your application being swapped out + of memory, resulting in extremely poor performance. +

+

+ You select your cache size using either + + + + DatabaseConfig.setCacheSize(), or + EnvironmentConfig.setCacheSize(), + + depending on whether you are using a database environment or not. You + cache size must be a power of 2, but it is otherwise limited only by + available memory and performance considerations. +

+

+ Selecting a cache size is something of an art, but fortunately it is + selected at database (or environment) open time, so it can be easily + tuned to your application's data requirements as they change over time. + The best way to determine how large your cache needs to be is to put + your application into a production environment and watch to see how much + disk I/O is occurring. If your application is going to disk quite a lot + to retrieve database records, then you should increase the size of your + cache (provided that you have enough memory to do so). +

+

+ You can use the db_stat command line utility with the + -m option to gauge the effectiveness of your cache. + In particular, the number of pages found in the cache is shown, along + with a percentage value. The closer to 100% that you can get, the + better. If this value drops too low, and you are experiencing + performance problems, then you should consider increasing the size of + your cache, assuming you have memory to support it. +

+
+ + + diff --git a/db/docs/gsg/JAVA/concepts.html b/db/docs/gsg/JAVA/concepts.html new file mode 100644 index 000000000..8404051db --- /dev/null +++ b/db/docs/gsg/JAVA/concepts.html @@ -0,0 +1,168 @@ + + + + + + Berkeley DB Concepts + + + + + + + + + +
+
+
+
+

Berkeley DB Concepts

+
+
+
+
+

+ Before continuing, it is useful to describe some of the larger concepts + that you will encounter when building a DB application. +

+

+ Conceptually, DB databases contain records. + Logically each record represents a single entry in the database. + Each such record contains two pieces of information: a key and a data. + This manual will on occaison describe a a record's + key or a record's data when it is + necessary to speak to one or the other portion of a database + record. +

+

+ Because of the key/data pairing used for DB databases, they are + sometimes thought of as a two-column table. However, data (and + sometimes keys, depending on the access method) can hold arbitrarily + complex data. Frequently, C structures and other such mechanisms are + stored in the record. This effectively turns a 2-column table + into a table with n columns, where + n-1 of those columns are provided by the structure's + fields. +

+

+ Note that a DB database is very much like a table in a relational + database system in that most DB applications use more than one + database (just as most relational databases use more than one table). +

+

+ Unlike relational systems, however, a DB database contains a single + collection of records organized according to a given access method + (BTree, Queue, Hash, and so forth). In a relational database system, + the underlying access method is generally hidden from you. +

+

+ In any case, frequently DB + applications are designed so that a single database stores a specific + type of data (just as in a relational database system, a single table + holds entries containing a specific set of fields). Because most applications + are required to manage multiple kinds of data, a DB application will + often use multiple databases. +

+

+ For example, consider an accounting application. This kind of an + application may manage data based on bank accounts, checking + accounts, stocks, bonds, loans, and so forth. An accounting application + will also have to manage information about people, banking institutions, + customer accounts, and so on. In a traditional relational database, all + of these different kinds of information would be stored and managed + using a (probably very) complex series of tables. In a DB + application, all of this information would instead be divided out and + managed using multiple databases. +

+

+ DB applications can efficiently use multiple databases using an + optional mechanism called an environment. + For more information, see Environments. +

+

+ You interact with most DB APIs using special structures that + contain pointers to functions. These callbacks are + called methods because they look so much like a + method on a C++ class. The variable that you use to access these + methods is often referred to as a + handle. For example, to use a database you will + obtain a handle to that database. +

+

+ Retrieving a record from a database is sometimes called + getting the record because the method that you use + to retrieve the records is called get(). + Similarly, storing database records is sometimes called + putting the record because you use the + put() method to do this. +

+

+ When you store, or put, a record to a database using its handle, the + record is stored according to whatever sort order is in use by the + database. Sorting is mostly performed based on the key, but sometimes + the data is considered too. If you put a record using a key that already + exists in the database, then the existing record is replaced with the + new data. However, if the database supports + duplicate records (that is, records with identical keys but + different data), then that new record is stored as a duplicate record and + any existing records are not overwritten. +

+

+ If a database supports duplicate records, then you can use a database + handle to retrieve only the first record in a set of duplicate records. +

+

+ In addition to using a database handle, you can also read and write data using a + special mechanism called a cursor. Cursors are + essentially iterators that you can use to walk over the records in a + database. You can use cursors to iterate over a database from the first + record to the last, and from the last to the first. You can also use + cursors to seek to a record. In the event that a database supports + duplicate records, cursors are the only way you can access all the + records in a set of duplicates. +

+

+ Finally, DB provides a special kind of a database called a + secondary database. Secondary databases serve as an + index into normal databases (called primary database to distinguish them + from secondaries). Secondary databases are interesting because DB + records can hold complex data types, but seeking to a given record is + performed only based on that record's key. If you wanted to be able to + seek to a record based on some piece of information that is not the key, + then you enable this through the use of secondary databases. +

+
+ + + diff --git a/db/docs/gsg/JAVA/coreExceptions.html b/db/docs/gsg/JAVA/coreExceptions.html new file mode 100644 index 000000000..f3b8ae4b2 --- /dev/null +++ b/db/docs/gsg/JAVA/coreExceptions.html @@ -0,0 +1,102 @@ + + + + + + Exception Handling + + + + + + + + + +
+
+
+
+

Exception Handling

+
+
+
+
+

+ Before continuing, it is useful to spend a few moments on exception + handling in DB with the + java. +

+

+ + Most + DB methods throw + + DatabaseException + in the event of a serious error. + + + + So your DB code must either catch this exception or declare it + to be throwable. Be aware that DatabaseException extends + java.lang.Exception. For example: + +

+ +
import com.sleepycat.db.DatabaseException;
+
+    ...
+try 
+{
+    // DB and other code goes here
+}
+catch(DatabaseException e)
+{
+  // DB error handling goes here
+} 
+

+ You can obtain the DB error number for a + + DatabaseException + by using + + + DatabaseException.getErrno(). + You can also obtain any error message associated with that error + using DatabaseException.getMessage(). + +

+
+ + + diff --git a/db/docs/gsg/JAVA/coredbclose.html b/db/docs/gsg/JAVA/coredbclose.html new file mode 100644 index 000000000..29908b785 --- /dev/null +++ b/db/docs/gsg/JAVA/coredbclose.html @@ -0,0 +1,103 @@ + + + + + + Closing Databases + + + + + + + + + +
+
+
+
+

Closing Databases

+
+
+
+
+

+ Once you are done using the database, you must close it. You use the + + + method to do this. +

+

+ Closing a database causes it to become unusable until it is opened + again. Note that you should make sure that any open cursors are closed + before closing your database. Active cursors during a database + close can cause unexpected results, especially if any of those cursors are + writing to the database. You should always make sure that all your + database accesses have completed before closing your database. +

+

+ Cursors are described in Using Cursors later in this manual. +

+

+ Be aware that when you close the last open handle + for a database, then by default its cache is flushed to disk. + This means that any information that has + been modified in the cache is guaranteed to be written to disk when the + last handle is closed. You can manually perform this operation using + the + + + Database.sync() + method, but for normal shutdown operations it is not necessary. + For more information about syncing your cache, see + Data Persistence. +

+

The following code fragment illustrates a database close:

+ +
import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Database;
+
+...
+
+try {
+        if (myDatabase != null) {
+            myDatabase.close();
+        }
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} 
+
+ + + diff --git a/db/docs/gsg/JAVA/cursorJavaUsage.html b/db/docs/gsg/JAVA/cursorJavaUsage.html new file mode 100644 index 000000000..59ea0e5ed --- /dev/null +++ b/db/docs/gsg/JAVA/cursorJavaUsage.html @@ -0,0 +1,278 @@ + + + + + + Cursor Example + + + + + + + + + +
+
+
+
+

Cursor Example

+
+
+
+
+

In Database Usage Example we wrote an + application that loaded two Database objects with vendor + and inventory information. In this example, we will use those databases to + display all of the items in the inventory database. As a part of showing + any given inventory item, we will look up the vendor who can provide the + item and show the vendor's contact information.

+

To do this, we create the ExampleDatabaseRead + application. This application reads and displays all inventory records by:

+
+
    +
  1. +

    Opening the inventory, vendor, and + class catalog Database objects. We do this using the + MyDbs class. See Stored Class Catalog Management with MyDbs + for a description of this class.

    +
  2. +
  3. +

    Obtaining a cursor from the inventory Database.

    +
  4. +
  5. +

    Steps through the Database, displaying + each record as it goes.

    +
  6. +
  7. +

    To display the Inventory record, the custom tuple binding that + we created in InventoryBinding.java is used.

    +
  8. +
  9. +

    Database.get() is used to obtain the vendor that corresponds to + the inventory item.

    +
  10. +
  11. +

    A serial binding is used to convert the + DatabaseEntry returned + by the get() to a Vendor object.

    +
  12. +
  13. +

    The contents of the Vendor object are displayed.

    +
  14. +
+
+

We implemented the Vendor class in Vendor.java. We implemented the + Inventory class in Inventory.java.

+

The full implementation of ExampleDatabaseRead + can be found in: +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 4.1 ExampleDatabaseRead.java +

+

To begin, we import the necessary classes:

+ +
// file ExampleDatabaseRead.java
+package com.sleepycat.examples.db.GettingStarted;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+

Next we declare our class and set up some global variables. Note a + MyDbs object is instantiated here. We can do + this because its constructor never throws an exception. See Database Example for its implementation + details.

+ +
public class ExampleDatabaseRead {
+
+    private static String myDbsPath = "./";
+
+    // Encapsulates the database environment and databases.
+    private static MyDbs myDbs = new MyDbs();
+
+    private static TupleBinding inventoryBinding;
+    private static EntryBinding vendorBinding; 
+

+ Next we create the ExampleDatabaseRead.usage() and + ExampleDatabaseRead.main() methods. + We perform almost all of our exception handling from ExampleDatabaseRead.main(), and so we + must catch DatabaseException because the com.sleepycat.db.* + APIs throw them. +

+ +
   private static void usage() {
+        System.out.println("ExampleDatabaseRead [-h <env directory>]" +
+                           "[-s <item to locate>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        ExampleDatabaseRead edr = new ExampleDatabaseRead();
+        try {
+            edr.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleDatabaseRead: " + dbe.toString());
+            dbe.printStackTrace();
+        } finally {
+            myDbs.close();
+        }
+        System.out.println("All done.");
+    }
+

In ExampleDatabaseRead.run(), we call MyDbs.setup() to + open our databases. Then we create the bindings that we need for using our data objects with + DatabaseEntry objects. +

+ +
    private void run(String args[])
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbs.setup(myDbsPath);
+
+        // Setup our bindings.
+        inventoryBinding = new InventoryBinding();
+        vendorBinding =
+             new SerialBinding(myDbs.getClassCatalog(),
+                               Vendor.class);
+
+        showAllInventory();
+    }
+

Now we write the loop that displays the Inventory + records. We do this by opening a cursor on the inventory database and + iterating over all its contents, displaying each as we go.

+ +
    private void showAllInventory() 
+        throws DatabaseException {
+        // Get a cursor
+        Cursor cursor = myDbs.getInventoryDB().openCursor(null, null);
+
+        // DatabaseEntry objects used for reading records
+        DatabaseEntry foundKey = new DatabaseEntry();
+        DatabaseEntry foundData = new DatabaseEntry();
+                                                                                                                                       
+        try { // always want to make sure the cursor gets closed
+            while (cursor.getNext(foundKey, foundData,
+                        LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+                Inventory theInventory =
+                    (Inventory)inventoryBinding.entryToObject(foundData);
+                displayInventoryRecord(foundKey, theInventory);
+            }
+        } catch (Exception e) {
+            System.err.println("Error on inventory cursor:");
+            System.err.println(e.toString());
+            e.printStackTrace();
+        } finally {
+            cursor.close();
+        }
+    } 
+

We use ExampleDatabaseRead.displayInventoryRecord() + to actually show the record. This + method first displays all the relevant information from the retrieved + Inventory object. It then uses the vendor database to retrieve and + display the vendor. Because the vendor database is keyed by vendor name, + and because each inventory object contains this key, it is trivial to + retrieve the appropriate vendor record.

+ +
   private void displayInventoryRecord(DatabaseEntry theKey,
+                                        Inventory theInventory)
+        throws DatabaseException {
+
+        String theSKU = new String(theKey.getData());
+        System.out.println(theSKU + ":");
+        System.out.println("\t " + theInventory.getItemName());
+        System.out.println("\t " + theInventory.getCategory());
+        System.out.println("\t " + theInventory.getVendor());
+        System.out.println("\t\tNumber in stock: " +
+            theInventory.getVendorInventory());
+        System.out.println("\t\tPrice per unit:  " +
+            theInventory.getVendorPrice());
+        System.out.println("\t\tContact: ");
+
+        DatabaseEntry searchKey = null;
+        try {
+            searchKey =
+                new DatabaseEntry(theInventory.getVendor().getBytes("UTF-8"));
+        } catch (IOException willNeverOccur) {}
+        DatabaseEntry foundVendor = new DatabaseEntry();
+
+        if (myDbs.getVendorDB().get(null, searchKey, foundVendor,
+                LockMode.DEFAULT) != OperationStatus.SUCCESS) {
+            System.out.println("Could not find vendor: " +
+                theInventory.getVendor() + ".");
+            System.exit(-1);
+        } else {
+            Vendor theVendor =
+                (Vendor)vendorBinding.entryToObject(foundVendor);
+            System.out.println("\t\t " + theVendor.getAddress());
+            System.out.println("\t\t " + theVendor.getCity() + ", " +
+                theVendor.getState() + " " + theVendor.getZipcode());
+            System.out.println("\t\t Business Phone: " +
+                theVendor.getBusinessPhoneNumber());
+            System.out.println("\t\t Sales Rep: " +
+                                theVendor.getRepName());
+            System.out.println("\t\t            " +
+                theVendor.getRepPhoneNumber());
+       }
+    }
+

The remainder of this application provides a utility method used + to parse the command line options. From the perspective of this + document, this is relatively uninteresting. You can see how this is + implemented by looking at: +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+
+ + + diff --git a/db/docs/gsg/JAVA/databaseLimits.html b/db/docs/gsg/JAVA/databaseLimits.html new file mode 100644 index 000000000..15c23a81c --- /dev/null +++ b/db/docs/gsg/JAVA/databaseLimits.html @@ -0,0 +1,81 @@ + + + + + + Database Limits and Portability + + + + + + + + + +
+
+
+
+

Database Limits and Portability

+
+
+
+
+

+ Berkeley DB provides support for managing everything from very small + databases that fit entirely in memory, to extremely large databases + holding millions of records and terabytes of data. DB databases can + store up to 256 terabytes of data. Individual record keys or record + data can store up to 4 gigabytes of data. +

+

+ DB's databases store data in a binary format that is portable across + platforms, even of differing endian-ness. Be aware, however, that + portability aside, some performance issues can crop up in the event that + you are using little endian architecture. See Setting Comparison Functions + for more information. +

+

+ Also, DB's databases and data structures are designed for concurrent + access — they are thread-safe, and they share well across multiple + processes. That said, in order to allow multiple processes to share + databases and the cache, DB makes use of mechanisms that do not work + well on network-shared drives (NFS or Windows networks shares, for + example). For this reason, you cannot place your DB databases and + environments on network-mounted drives. +

+
+ + + diff --git a/db/docs/gsg/JAVA/dbErrorReporting.html b/db/docs/gsg/JAVA/dbErrorReporting.html new file mode 100644 index 000000000..d36831daf --- /dev/null +++ b/db/docs/gsg/JAVA/dbErrorReporting.html @@ -0,0 +1,147 @@ + + + + + + Error Reporting Functions + + + + + + + + + +
+
+
+
+

Error Reporting Functions

+
+
+
+
+

+ To simplify error reporting and handling, the + + + DatabaseConfig class + offers several useful methods. + + + + +

+
+
    +
  • +

    + + DatabaseConfig.setErrorStream() +

    +

    + Sets the + + Java OutputStream + to be used for displaying error messages issued by the DB library. +

    +
  • +
  • +

    + + DatabaseConfig.setMessageHandler() +

    +

    + Defines the message handler that is called when an error message is + issued by DB. The error prefix and message are passed to + this callback. It is up to the application to display this + information correctly. +

    +

    + Note that the message handler must be an implementation of the + com.sleepycat.db.MessageHandler + interface. +

    +
  • +
  • +

    + + DatabaseConfig.setErrorPrefix() +

    +

    + Sets the prefix used to for any error messages issued by the + DB library. +

    +
  • +
+
+

+ For example, to send all your error messages to a particular message + handler, first implement the handler: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.MessageHandler;
+
+public class MyMessageHandler implements MessageHandler  {
+
+    // Our constructor does nothing
+    public MyMessageHandler() {}
+
+    public void message(Environment dbenv, String message)
+    {
+        // Put your special message handling code here
+    }
+
+}
+

+ And then set up your database to use the message handler by identifying + it on the database's DatabaseConfig object: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseConfig;
+
+...
+
+DatabaseConfig myDbConfig = new DatabaseConfig();
+MyMessageHandler mmh = new MyMessageHandler();
+myDbConfig.setMessageHandler(mmh); 
+
+ + + diff --git a/db/docs/gsg/JAVA/db_config.html b/db/docs/gsg/JAVA/db_config.html new file mode 100644 index 000000000..e94fb3eab --- /dev/null +++ b/db/docs/gsg/JAVA/db_config.html @@ -0,0 +1,175 @@ + + + + + + Database Properties + + + + + + + + + +
+
+
+
+

Database Properties

+
+
+
+
+

You can set database properties using the DatabaseConfig + class. For each of the properties that you can set, there is a + corresponding getter method. Also, you can always retrieve the + DatabaseConfig object used by your database using + the Database.getConfig() method.

+

+ There are a large number of properties that you can set using this + class (see the javadoc for a complete listing). From the perspective of + this manual, some of the more interesting properties are: +

+
+
    +
  • +

    + DatabaseConfig.setAllowCreate() +

    +

    If true, the database is created when it is + opened. If false, the database open fails if the database does not + exist. This property has no meaning if the database currently exists. + Default is false.

    +
  • +
  • +

    + DatabaseConfig.setBtreeComparator() +

    +

    Sets the class that is used to compare the keys found on two + database records. This class is used to determine the sort order for + two records in the database. For more information, see + + Setting Comparison Functions. +

    +
  • +
  • +

    + DatabaseConfig.setDuplicateComparator() +

    +

    + Sets the class that is used to compare two duplicate records in + the database. For more information, see + + Setting Comparison Functions. +

    +
  • +
  • +

    + DatabaseConfig.setSortedDuplicates() +

    +

    If true, duplicate records are allowed in the + database. If this value is false, then putting a duplicate record into the database + results in the replacement of the old record with the new record. + Note that this property can be set only at database creation time. Default is false. +

    +
  • +
  • +

    + DatabaseConfig.setExclusiveCreate() +

    +

    If true, the database open fails if the + database currently exists. That is, the open must result in the + creation of a new database. Default is false.

    +
  • +
  • +

    + DatabaseConfig.setReadOnly() +

    +

    If true, the database is opened for read activities only. + Default is false.

    +
  • +
  • +

    + DatabaseConfig.setTruncate() +

    +

    If true, the database is truncated; that is, it is emptied of all + content. +

    +
  • +
  • +

    + DatabaseConfig.setType() +

    +

    Identifies the type of database that you want to create. This + manual will exclusively use DatabaseType.BTREE. +

    +
  • +
+
+

+ In addition to these, there are also methods that allow you to + control the IO stream used for error reporting purposes. + These are described later in this manual. +

+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseType;
+
+import java.io.FileNotFoundException;
+
+...
+Database myDatabase = null;
+try {
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setAllowCreate(true);
+    dbConfig.setSortedDuplicates(true);
+    dbConfig.setType(DatabaseType.BTREE);
+    myDatabase = new Database("sampleDatabase.db",
+                              null,
+                              dbConfig); 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here.
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here
+}
+
+ + + diff --git a/db/docs/gsg/JAVA/dbconfig.html b/db/docs/gsg/JAVA/dbconfig.html new file mode 100644 index 000000000..51454bca2 --- /dev/null +++ b/db/docs/gsg/JAVA/dbconfig.html @@ -0,0 +1,399 @@ + + + + + + Chapter 6. Database Configuration + + + + + + + + + +
+
+
+
+

Chapter 6. Database Configuration

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Setting the Page Size + +
+
+
+
+ + Overflow Pages + +
+
+ + Locking + +
+
+ + IO Efficiency + +
+
+ + Page Sizing Advice + +
+
+
+
+ + Selecting the Cache Size + +
+
+ + BTree Configuration + +
+
+
+
+ + Allowing Duplicate Records + +
+
+ + Setting Comparison Functions + +
+
+
+
+
+

+ This chapter describes some of the database and cache configuration issues + that you need to consider when building your DB database. + In most cases, there is very little that you need to do in terms of + managing your databases. However, there are configuration issues that you + need to be concerned with, and these are largely dependent on the access + method that you are choosing for your database. +

+

+ The examples and descriptions throughout this document have mostly focused + on the BTree access method. This is because the majority of DB + applications use BTree. For this reason, where configuration issues are + dependent on the type of access method in use, this chapter will focus on + BTree only. For configuration descriptions surrounding the other access + methods, see the Berkeley DB Programmer's Tutorial and Reference + Guide. +

+
+
+
+
+

Setting the Page Size

+
+
+
+
+

+ Internally, DB stores database entries on pages. Page sizes are + important because they can affect your application's performance. +

+

+ DB pages can be between 512 bytes and 64K bytes in size. The size + that you select must be a power of 2. You set your database's + page size using + + + DatabaseConfig.setPageSize(). +

+

+ Note that a database's page size can only be selected at database + creation time. +

+

+ When selecting a page size, you should consider the following issues: +

+
+
    +
  • +

    + Overflow pages. +

    +
  • +
  • +

    + Locking +

    +
  • +
  • +

    + Disk I/O. +

    +
  • +
+
+

+ These topics are discussed next. +

+
+
+
+
+

Overflow Pages

+
+
+
+
+

+ Overflow pages are used to hold a key or data item + that cannot fit on a single page. You do not have to do anything to + cause overflow pages to be created, other than to store data that is + too large for your database's page size. Also, the only way you can + prevent overflow pages from being created is to be sure to select a + page size that is large enough to hold your database entries. +

+

+ Because overflow pages exist outside of the normal database + structure, their use is expensive from a performance + perspective. If you select too small of a page size, then your + database will be forced to use an excessive number of overflow + pages. This will significantly harm your application's performance. +

+

+ For this reason, you want to select a page size that is at + least large enough to hold multiple entries given the expected + average size of your database entries. In BTree's case, for best + results select a page size that can hold at least 4 such entries. +

+

+ You can see how many overflow pages your database is using by + + + obtaining a DatabaseStats object using + the Database.getStats() method, + + + or by examining your database using the + db_stat command line utility. +

+
+
+
+
+
+

Locking

+
+
+
+
+

+ Locking and multi-threaded access to DB databases is built into + the product. However, in order to enable the locking subsystem and + in order to provide efficient sharing of the cache between + databases, you must use an environment. + Environments and multi-threaded access are not fully described + in this manual (see the Berkeley DB Programmer's Reference Manual for + information), however, we provide some information on sizing your + pages in a multi-threaded/multi-process environment in the interest + of providing a complete discussion on the topic. +

+

+ If your application is multi-threaded, or if your databases are + accessed by more than one process at a time, then page size can + influence your application's performance. The reason why is that + for most access methods (Queue is the exception), DB implements + page-level locking. This means that the finest locking granularity + is at the page, not at the record. +

+

+ In most cases, database pages contain multiple database + records. Further, in order to provide safe access to multiple + threads or processes, DB performs locking on pages as entries on + those pages are read or written. +

+

+ As the size of your page increases relative to the size of your + database entries, the number of entries that are held on any given + page also increase. The result is that the chances of two or more + readers and/or writers wanting to access entries on any given page + also increases. +

+

+ When two or more threads and/or processes want to manage data on a + page, lock contention occurs. Lock contention is resolved by one + thread (or process) waiting for another thread to give up its lock. + It is this waiting activity that is harmful to your application's + performance. +

+

+ It is possible to select a page size that is so large that your + application will spend excessive, and noticeable, amounts of time + resolving lock contention. Note that this scenario is particularly + likely to occur as the amount of concurrency built into your + application increases. +

+

+ Oh the other hand, if you select too small of a page size, then that + that will only make your tree deeper, which can also cause + performance penalties. The trick, therefore, is to select a + reasonable page size (one that will hold a sizeable number of + records) and then reduce the page size if you notice lock + contention. +

+

+ You can examine the number of lock conflicts and deadlocks occurring + in your application by examining your database environment lock + statistics. Either use the + + + + method, or use the db_stat command line utility. + The number of locks that could not be obtained due to conflicts is + held in the lock statistic's st_nconflicts field. + +

+
+
+
+
+
+

IO Efficiency

+
+
+
+
+

+ Page size can affect how efficient DB is at moving data to and + from disk. For some applications, especially those for which the + in-memory cache can not be large enough to hold the entire working + dataset, IO efficiency can significantly impact application performance. +

+

+ Most operating systems use an internal block size to determine how much + data to move to and from disk for a single I/O operation. This block + size is usually equal to the filesystem's block size. For optimal + disk I/O efficiency, you should select a database page size that is + equal to the operating system's I/O block size. +

+

+ Essentially, DB performs data transfers based on the database + page size. That is, it moves data to and from disk a page at a time. + For this reason, if the page size does not match the I/O block size, + then the operating system can introduce inefficiencies in how it + responds to DB's I/O requests. +

+

+ For example, suppose your page size is smaller than your operating + system block size. In this case, when DB writes a page to disk + it is writing just a portion of a logical filesystem page. Any time + any application writes just a portion of a logical filesystem page, the + operating system brings in the real filesystem page, over writes + the portion of the page not written by the application, then writes + the filesystem page back to disk. The net result is significantly + more disk I/O than if the application had simply selected a page + size that was equal to the underlying filesystem block size. +

+

+ Alternatively, if you select a page size that is larger than the + underlying filesystem block size, then the operating system may have + to read more data than is necessary to fulfill a read request. + Further, on some operating systems, requesting a single database + page may result in the operating system reading enough filesystem + blocks to satisfy the operating system's criteria for read-ahead. In + this case, the operating system will be reading significantly more + data from disk than is actually required to fulfill DB's read + request. +

+
+

Note

+

+ While transactions are not discussed in this manual, a page size + other than your filesystem's block size can affect transactional + guarantees. The reason why is that page sizes larger than the + filesystem's block size causes DB to write pages in block + size increments. As a result, it is possible for a partial page + to be written as the result of a transactional commit. For more + information, see http://www.sleepycat.com/docs/ref/transapp/reclimit.html. +

+
+
+
+
+
+
+

Page Sizing Advice

+
+
+
+
+

+ Page sizing can be confusing at first, so here are some general + guidelines that you can use to select your page size. +

+

+ In general, and given no other considerations, a page size that is equal + to your filesystem block size is the ideal situation. +

+

+ If your data is designed such that 4 database entries cannot fit on a + single page (assuming BTree), then grow your page size to accommodate + your data. Once you've abandoned matching your filesystem's block + size, the general rule is that larger page sizes are better. +

+

+ The exception to this rule is if you have a great deal of + concurrency occurring in your application. In this case, the closer + you can match your page size to the ideal size needed for your + application's data, the better. Doing so will allow you to avoid + unnecessary contention for page locks. +

+
+
+
+ + + diff --git a/db/docs/gsg/JAVA/dbtJavaUsage.html b/db/docs/gsg/JAVA/dbtJavaUsage.html new file mode 100644 index 000000000..191aa5252 --- /dev/null +++ b/db/docs/gsg/JAVA/dbtJavaUsage.html @@ -0,0 +1,703 @@ + + + + + + Database Usage Example + + + + + + + + + +
+
+
+
+

Database Usage Example

+
+
+
+
+

+ In MyDbs Class we created + a class that opens and closes databases for us. + We now make use of that class to load inventory data into + two databases that we will use for our inventory system. +

+

+ Again, remember that you can find the complete implementation for these functions + in: +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+

Note that in this example, we are going to save two types of + information. First there are a series of inventory records that identify + information about some food items (fruits, vegetables, and desserts). + These records identify particulars about each item such as the vendor that + the item can be obtained from, how much the vendor has in stock, the price + per unit, and so forth.

+

+ We also want to manage vendor contact information, such as the + vendor's address and phone number, the sales representative's name + and his phone number, and so forth. +

+
+ +

+ Example 3.1 Inventory.java +

+

+ All Inventory data is encapsulated in an instance of the following + class. Note that because this class is not serializable, we need a + custom tuple binding in order to place it on a DatabaseEntry + object. Because the TupleInput and + TupleOutput classes used by custom tuple bindings + support Java numerical types and not Java numerical classes, we use + int and float here instead of the + corresponding Integer and Float + classes. + +

+ +
// File Inventory.java
+package com.sleepycat.examples.db.GettingStarted;
+
+public class Inventory {
+
+    private String sku;
+    private String itemName;
+    private String category;
+    private String vendor;
+    private int vendorInventory;
+    private float vendorPrice;
+
+    public void setSku(String data) {
+            sku = data;
+    }
+
+    public void setItemName(String data) {
+            itemName = data;
+    }
+
+    public void setCategory(String data) {
+            category = data;
+    }
+
+    public void setVendorInventory(int data) {
+            vendorInventory = data;
+    }
+
+    public void setVendor(String data) {
+            vendor = data;
+    }
+
+    public void setVendorPrice(float data) {
+            vendorPrice = data;
+    }
+
+    public String getSku() { return sku; }
+    public String getItemName() { return itemName; }
+    public String getCategory() { return category; }
+    public int getVendorInventory() { return vendorInventory; }
+    public String getVendor() { return vendor; }
+    public float getVendorPrice() { return vendorPrice; }
+
+} 
+
+
+ +

+ Example 3.2 Vendor.java +

+

+ The data for vendor records are stored in instances of the following + class. Notice that we are using serialization with this class for no + other reason than to demonstrate serializing a class instance. +

+ +
// File Vendor.java
+package com.sleepycat.examples.db.GettingStarted;
+
+import java.io.Serializable;
+
+public class Vendor implements Serializable {
+
+    private String repName;
+    private String address;
+    private String city;
+    private String state;
+    private String zipcode;
+    private String bizPhoneNumber;
+    private String repPhoneNumber;
+    private String vendor;
+
+    public void setRepName(String data) {
+        repName = data;
+    }
+
+    public void setAddress(String data) {
+        address = data;
+    }
+
+    public void setCity(String data) {
+        city = data;
+    }
+
+    public void setState(String data) {
+        state = data;
+    }
+
+    public void setZipcode(String data) {
+        zipcode = data;
+    }
+
+    public void setBusinessPhoneNumber(String data) {
+        bizPhoneNumber = data;
+    }
+
+    public void setRepPhoneNumber(String data) {
+        repPhoneNumber = data;
+    }
+
+    public void setVendorName(String data) {
+        vendor = data;
+    }
+
+    ...
+    // Corresponding getter methods omitted for brevity.
+    // See examples/com/sleepycat/examples/je/gettingStarted/
+    //   examples/Vendor.java
+    // for a complete implementation of this class.
+
+} 
+
+

+ Because we will not be using serialization to convert our + Inventory objects to a DatabaseEntry + object, we need a custom tuple binding: +

+
+ +

+ Example 3.3 InventoryBinding.java +

+ +
// File InventoryBinding.java
+package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+public class InventoryBinding extends TupleBinding {
+
+    // Implement this abstract method. Used to convert
+    // a DatabaseEntry to an Inventory object.
+    public Object entryToObject(TupleInput ti) {
+
+        String sku = ti.readString();
+        String itemName = ti.readString();
+        String category = ti.readString();
+        String vendor = ti.readString();
+        int vendorInventory = ti.readInt();
+        float vendorPrice = ti.readFloat();
+
+        Inventory inventory = new Inventory();
+        inventory.setSku(sku);
+        inventory.setItemName(itemName);
+        inventory.setCategory(category);
+        inventory.setVendor(vendor);
+        inventory.setVendorInventory(vendorInventory);
+        inventory.setVendorPrice(vendorPrice);
+
+        return inventory;
+    }
+
+    // Implement this abstract method. Used to convert a
+    // Inventory object to a DatabaseEntry object.
+    public void objectToEntry(Object object, TupleOutput to) {
+
+        Inventory inventory = (Inventory)object;
+
+        to.writeString(inventory.getSku());
+        to.writeString(inventory.getItemName());
+        to.writeString(inventory.getCategory());
+        to.writeString(inventory.getVendor());
+        to.writeInt(inventory.getVendorInventory());
+        to.writeFloat(inventory.getVendorPrice());
+    }
+} 
+
+

+ In order to store the data identified above, we write the + ExampleDatabaseLoad application. This application + loads the inventory and vendor databases for you. +

+

+ Inventory information is stored in a Database + dedicated for that purpose. The key for each such record is a product + SKU. The inventory data stored in this database are objects of the + Inventory class (see Inventory.java for more information). + ExampleDatabaseLoad loads the inventory database + as follows: +

+
+
    +
  1. +

    + Reads the inventory data from a flat text file prepared in + advance for this purpose. +

    +
  2. +
  3. +

    + Uses java.lang.String to create a key based + on the item's SKU. +

    +
  4. +
  5. +

    Uses an Inventory class instance for the + record data. This object is stored on a DatabaseEntry + object using InventoryBinding, a custom tuple + binding that we implemented above.

    +
  6. +
  7. +

    Saves each record to the inventory database.

    +
  8. +
+
+

Vendor information is also stored in a Database + dedicated for that purpose. The vendor data stored in this database are objects of the + Vendor class (see Vendor.java for more information). To load this + Database, ExampleDatabaseLoad + does the following:

+
+
    +
  1. +

    Reads the vendor data from a flat text file prepared in advance + for this purpose.

    +
  2. +
  3. +

    Uses the vendor's name as the record's key.

    +
  4. +
  5. +

    Uses a Vendor class instance for the + record data. This object is stored on a DatabaseEntry + object using com.sleepycat.bind.serial.SerialBinding.

    +
  6. +
+
+
+ +

+ Example 3.4 Stored Class Catalog Management with MyDbs +

+

+ Before we can write ExampleDatabaseLoad, we need to update + MyDbs.java to support the class catalogs that we need for this application. +

+

+ To do this, we start by importing an additional class to support stored class catalogs: +

+ +
// File: MyDbs.java
+package com.sleepycat.examples.db.GettingStarted;
+                                                                                                                                  
+import com.sleepycat.bind.serial.StoredClassCatalog;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseType;
+
+import java.io.FileNotFoundException; 
+

+ We also need to add two additional private data members to this class. One + supports the database used for the class catalog, and the other is used as a + handle for the class catalog itself. +

+ +
public class MyDbs {
+
+    // The databases that our application uses
+    private Database vendorDb = null;
+    private Database inventoryDb = null;
+    private Database classCatalogDb = null;
+
+    // Needed for object serialization
+    private StoredClassCatalog classCatalog;
+
+    private String vendordb = "VendorDB.db";
+    private String inventorydb = "InventoryDB.db";
+    private String classcatalogdb = "ClassCatalogDB.db";
+
+    // Our constructor does nothing
+    public MyDbs() {} 
+

+ Next we need to update the MyDbs.setup() method + to open the class catalog database and create the class catalog. +

+ +
    // The setup() method opens all our databases
+    // for us.
+    public void setup(String databasesHome)
+        throws DatabaseException {
+
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+
+        ...
+        // Database configuration omitted for brevity
+        ...
+
+        // Now open, or create and open, our databases
+        // Open the vendors and inventory databases
+        try {
+            vendordb = databasesHome + "/" + vendordb;
+            vendorDb = new Database(vendordb,
+                                    null,
+                                    myDbConfig);
+
+            inventorydb = databasesHome + "/" + inventorydb;
+            inventoryDb = new Database(inventorydb,
+                                        null,
+                                        myDbConfig);
+
+            // Open the class catalog db. This is used to
+            // optimize class serialization.
+            classcatalogdb = databasesHome + "/" + classcatalogdb;
+            classCatalogDb = new Database(classcatalogdb,
+                                          null,
+                                          myDbConfig); 
+
+        } catch(FileNotFoundException fnfe) {
+            System.err.println("MyDbs: " + fnfe.toString());
+            System.exit(-1);
+        }
+    } 
+

+ Finally we need a getter method to return the class catalog. Note that we do not provide a getter for + the catalog database itself – our application has no need for that. +

+

+ We also update our close() to close our class catalog. +

+ +
   // getter methods
+    public Database getVendorDB() {
+        return vendorDb;
+    }
+
+    public Database getInventoryDB() {
+        return inventoryDb;
+    }
+
+    public StoredClassCatalog getClassCatalog() {
+        return classCatalog;
+    }
+
+

+ Finally, we need our close() method: +

+ +
+
+    // Close the databases
+    public void close() {
+        try {
+            if (vendorDb != null) {
+                vendorDb.close();
+            }
+
+            if (inventoryDb != null) {
+                inventoryDb.close();
+            }
+
+            if (classCatalogDb != null) {
+                classCatalogDb.close();
+            }
+        } catch(DatabaseException dbe) {
+            System.err.println("Error closing MyDbs: " +
+                                dbe.toString());
+            System.exit(-1);
+        }
+    }
+} 
+
+

+ So far we have identified the data that we want to store in our + databases and how we will convert that data in and out of + DatabaseEntry objects for database storage. We + have also updated MyDbs to manage our databases + for us. Now we write ExampleDatabaseLoad to + actually put the inventory and vendor data into their respective + databases. Because of the work that we have done so far, this + application is actually fairly simple to write. +

+
+ +

+ Example 3.5 ExampleDatabaseLoad.java +

+

First we need the usual series of import statements:

+ +
// File: ExampleDatabaseLoad.java
+package com.sleepycat.examples.db.GettingStarted;
+                                                                                                                                       
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException; 
+

Next comes the class declaration and the private data members that + we need for this class. Most of these are setting up default values for + the program.

+

Note that two DatabaseEntry objects are + instantiated here. We will reuse these for every database operation that + this program performs. Also a MyDbEnv object is + instantiated here. We can do this because its constructor never throws + an exception. See Stored Class Catalog Management with MyDbs for + its implementation details.

+

Finally, the inventory.txt and + vendors.txt file can be found in the GettingStarted + examples directory along with the classes described in this extended + example.

+ +
public class ExampleDatabaseLoad {
+
+    private static String myDbsPath = "./";
+    private static File inventoryFile = new File("./inventory.txt");
+    private static File vendorsFile = new File("./vendors.txt");
+
+    // DatabaseEntries used for loading records
+    private static DatabaseEntry theKey = new DatabaseEntry();
+    private static DatabaseEntry theData = new DatabaseEntry();
+
+    // Encapsulates the databases.
+    private static MyDbs myDbs = new MyDbs(); 
+

+ Next comes the usage() and + main() methods. Notice the exception handling + in the main() method. This is the only place in the application where we + catch exceptions. For this reason, we must catch + DatabaseException which is thrown by the + com.sleepycat.db.* classes. +

+

Also notice the call to MyDbs.close() + in the finally block. This is the only place in the + application where MyDbs.close() is called. + MyDbs.close() is responsible for closing + all open Database + handles for you.

+ +
    private static void usage() {
+        System.out.println("ExampleDatabaseLoad [-h <database home>]");
+        System.out.println("      [-s <selections file>] [-v <vendors file>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        ExampleDatabaseLoad edl = new ExampleDatabaseLoad();
+        try {
+            edl.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleDatabaseLoad: " + dbe.toString());
+            dbe.printStackTrace();
+        } catch (Exception e) {
+            System.out.println("Exception: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            myDbs.close();
+        }
+        System.out.println("All done.");
+    } 
+

Next we write the ExampleDatabaseLoad.run() + method. This method is responsible for initializing all objects. + Because our environment and databases are all opened using the + MyDbs.setup() method, ExampleDatabaseLoad.run() + method is only responsible for calling MyDbs.setup() and then calling + the ExampleDatabaseLoad methods that actually load the databases. +

+ +
    private void run(String args[]) throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbs.setup(myDbsPath); // path to the environment home
+
+        System.out.println("loading vendors db.");
+        loadVendorsDb();
+        System.out.println("loading inventory db.");
+        loadInventoryDb();
+    } 
+

This next method loads the vendor database. This method + uses serialization to convert the Vendor object + to a DatabaseEntry object.

+ +
   private void loadVendorsDb() 
+            throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        ArrayList vendors = loadFile(vendorsFile, 8);
+
+        // Now load the data into the database. The vendor's name is the
+        // key, and the data is a Vendor class object.
+
+        // Need a serial binding for the data
+        EntryBinding dataBinding =
+            new SerialBinding(myDbs.getClassCatalog(), Vendor.class);
+
+        for (int i = 0; i < vendors.size(); i++) {
+            String[] sArray = (String[])vendors.get(i);
+            Vendor theVendor = new Vendor();
+            theVendor.setVendorName(sArray[0]);
+            theVendor.setAddress(sArray[1]);
+            theVendor.setCity(sArray[2]);
+            theVendor.setState(sArray[3]);
+            theVendor.setZipcode(sArray[4]);
+            theVendor.setBusinessPhoneNumber(sArray[5]);
+            theVendor.setRepName(sArray[6]);
+            theVendor.setRepPhoneNumber(sArray[7]);
+
+            // The key is the vendor's name.
+            // ASSUMES THE VENDOR'S NAME IS UNIQUE!
+            String vendorName = theVendor.getVendorName();
+            try {
+                theKey = new DatabaseEntry(vendorName.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+
+            // Convert the Vendor object to a DatabaseEntry object
+            // using our SerialBinding
+            dataBinding.objectToEntry(theVendor, theData);
+
+            // Put it in the database.
+            myDbs.getVendorDB().put(null, theKey, theData);
+        }
+    } 
+

Now load the inventory database. This method uses our + custom tuple binding (see InventoryBinding.java) to convert the Inventory + object to a DatabaseEntry object.

+ +
    private void loadInventoryDb() 
+        throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        ArrayList inventoryArray = loadFile(inventoryFile, 6);
+
+        // Now load the data into the database. The item's sku is the
+        // key, and the data is an Inventory class object.
+
+        // Need a tuple binding for the Inventory class.
+        TupleBinding inventoryBinding = new InventoryBinding();
+
+        for (int i = 0; i < inventoryArray.size(); i++) {
+            String[] sArray = (String[])inventoryArray.get(i);
+            String sku = sArray[1];
+            try {
+                theKey = new DatabaseEntry(sku.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+
+            Inventory theInventory = new Inventory();
+            theInventory.setItemName(sArray[0]);
+            theInventory.setSku(sArray[1]);
+            Float price = new Float(sArray[2]);
+            theInventory.setVendorPrice(price.floatValue());
+            Integer vInventory = new Integer(sArray[3]);
+            theInventory.setVendorInventory(vInventory.intValue());
+            theInventory.setCategory(sArray[4]);
+            theInventory.setVendor(sArray[5]);
+
+            // Place the Vendor object on the DatabaseEntry object using 
+            // our the tuple binding we implemented in 
+            // InventoryBinding.java
+            inventoryBinding.objectToEntry(theInventory, theData);
+
+            // Put it in the database. Note that this causes our 
+            // secondary database to be automatically updated for us.
+            myDbs.getInventoryDB().put(null, theKey, theData);
+        }
+    }
+

+ The remainder of this application provides utility methods to + read a flat text file into an array of strings and parse the + command line options: +

+ +
    private static void parseArgs(String args[]) {
+        // Implementation omitted for brevity.
+    }
+
+    private ArrayList loadFile(File theFile, int numFields) {
+        ArrayList records = new ArrayList();
+        // Implementation omitted for brevity.
+        return records;
+    }
+
+    protected ExampleDatabaseLoad() {}
+} 
+

+ From the perspective of this document, these + things are relatively uninteresting. You can see how they are + implemented by looking at ExampleDatabaseLoad.java + in: +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+
+ + + diff --git a/db/docs/gsg/JAVA/environments.html b/db/docs/gsg/JAVA/environments.html new file mode 100644 index 000000000..fa1493eb4 --- /dev/null +++ b/db/docs/gsg/JAVA/environments.html @@ -0,0 +1,150 @@ + + + + + + Environments + + + + + + + + + +
+
+
+
+

Environments

+
+
+
+
+

+ This manual is meant as an introduction to the Berkeley DB library. + Consequently, it describes how to build a very simple, single-threaded + application. Consequently, this manual omits a great many powerful + aspects of the DB database engine that are not required by simple + applications. One of these is important enough that it warrants a brief + overview here: environments. +

+

+ While environments are frequently not used by applications running in + embedded environments where every byte counts, they will be used by + virutally any other DB application requiring anything other than + the bare minimum functionality. An environment is + essentially an encapsulation of one or more databases. Essentially, you + open an environment and then you open databases in that environment. + When you do so, the databases are created/located in a location relative + to the environment's home directory. +

+

+ Environments offer a great many features that a stand-alone DB + database cannot offer: +

+
+
    +
  • +

    + Multi-database files. +

    +

    + It is possible in DB to contain multiple databases in a + single physical file on disk. This is desireable for those + application that open more than a few handful of databases. + However, in order to have more than one database contained in + a single physical file, your application + must use an environment. +

    +
  • +
  • +

    + Multi-thread and multi-process support +

    +

    + When you use an environment, resources such as the in-memory + cache and locks can be shared by all of the databases opened in the + environment. The environment allows you to enable + subsystems that are designed to allow multiple threads and/or + processes to access DB databases. For example, you use an + environment to enable the concurrent data store (CDS), the + locking subsystem, and/or the shared memory buffer pool. +

    +
  • +
  • +

    + Transactional processing +

    +

    + DB offers a transactional subsystem that allows for full + ACID-protection of your database writes. You use environments to + enable the transactional subsystem, and then subsequently to obtain + transaction IDs. +

    +
  • +
  • +

    + High availability (replication) support +

    +

    + DB offers a replication subsystem that enables + single-master database replication with multiple read-only + copies of the replicated data. You use environments to enable + and then manage this subsystem. +

    +
  • +
  • +

    + Logging subsystem +

    +

    + DB offers write-ahead logging for applications that want to + obtain a high-degree of recoverability in the face of an + application or system crash. Once enabled, the logging subsystem + allows the application to perform two kinds of recovery + ("normal" and "catastrophic") through the use of the information + contained in the log files. +

    +
  • +
+
+

+ All of these topics are described in the Berkeley DB + Programmer's Reference Guide. +

+
+ + + diff --git a/db/docs/gsg/JAVA/gettingStarted.css b/db/docs/gsg/JAVA/gettingStarted.css new file mode 100644 index 000000000..c1b4c86b7 --- /dev/null +++ b/db/docs/gsg/JAVA/gettingStarted.css @@ -0,0 +1,41 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 9pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 9pt; } + +div.navfooter { font-size: 9pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 9pt; } + +span.emphasis { font-style: italic; font-size: 9pt;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + + diff --git a/db/docs/gsg/JAVA/gettingit.html b/db/docs/gsg/JAVA/gettingit.html new file mode 100644 index 000000000..c722aefce --- /dev/null +++ b/db/docs/gsg/JAVA/gettingit.html @@ -0,0 +1,77 @@ + + + + + + Getting and Using DB + + + + + + + + + +
+
+
+
+

Getting and Using DB

+
+
+
+
+

+ You can obtain DB by visiting the Sleepycat download page: + http://www.sleepycat.com/download/index.shtml. +

+

+ To install DB, untar or unzip the distribution to the directory of + your choice. You will then need to build the product binaries. + For information on building DB, see + DB_INSTALL/docs/index.html, + where DB_INSTALL is the directory where you unpacked + DB. On that page, you will find links to platform-specific build + instructions. +

+

+ That page also contains links to more documentation for DB. In + particular, you will find links for the Berkeley DB + Programmer's Tutorial and Reference Guide as + well as the API reference documentation. +

+
+ + + diff --git a/db/docs/gsg/JAVA/index.html b/db/docs/gsg/JAVA/index.html new file mode 100644 index 000000000..19c6ddde8 --- /dev/null +++ b/db/docs/gsg/JAVA/index.html @@ -0,0 +1,503 @@ + + + + + + Getting Started with Berkeley DB + + + + + + + +
+
+
+
+

Getting Started with Berkeley DB

+
+
+
+

+ Legal Notice +

+

+ This documentation is distributed under the terms of the Sleepycat + public license. You may review the terms of this license at: + http://www.sleepycat.com/download/oslicense.html +

+

+ Sleepycat Software, Berkeley DB, Berkeley DB XML and the Sleepycat logo + are trademarks or service marks of Sleepycat Software, Inc. All rights to + these marks are reserved. No third-party use is permitted without the + express prior written consent of Sleepycat Software, Inc. +

+

+ Java™ and all Java-based marks are a trademark + or registered trademark of Sun Microsystems, + Inc, in the United States and other countries. +

+

+ To obtain a copy of this document's original source code, please write + to . +

+
+
+
+

9/22/2004

+
+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+
+
+ + 1. Introduction to Berkeley DB + +
+
+
+
+ + About This Manual + +
+
+ + Berkeley DB Concepts + +
+
+ + Access Methods + +
+
+
+
+ + Selecting Access Methods + +
+
+ + Choosing between BTree and Hash + +
+
+ + Choosing between Queue and Recno + +
+
+
+
+ + Database Limits and Portability + +
+
+ + Environments + +
+
+ + Exception Handling + +
+
+ + Error Returns + +
+
+ + Getting and Using DB + +
+
+
+
+ + 2. Databases + +
+
+
+
+ + Opening Databases + +
+
+ + Closing Databases + +
+
+ + Database Properties + +
+
+ + Administrative Methods + +
+
+ + Error Reporting Functions + +
+
+ + Managing Databases in Environments + +
+
+ + Database Example + +
+
+
+
+ + 3. Database Records + +
+
+
+
+ + Using Database Records + +
+
+ + Reading and Writing Database Records + +
+
+
+
+ + Writing Records to the Database + +
+
+ + Getting Records from the Database + +
+
+ + Deleting Records + +
+
+ + Data Persistence + +
+
+
+
+ + Using the BIND APIs + +
+
+
+
+ + Numerical and String Objects + +
+
+ + Serializeable Complex Objects + +
+
+ + Custom Tuple Bindings + +
+
+
+
+ + Database Usage Example + +
+
+
+
+ + 4. Using Cursors + +
+
+
+
+ + Opening and Closing Cursors + +
+
+ + Getting Records Using the Cursor + +
+
+
+
+ + Searching for Records + +
+
+ + Working with Duplicate Records + +
+
+
+
+ + Putting Records Using Cursors + +
+
+ + Deleting Records Using Cursors + +
+
+ + Replacing Records Using Cursors + +
+
+ + Cursor Example + +
+
+
+
+ + 5. Secondary Databases + +
+
+
+
+ + Opening and Closing Secondary Databases + +
+
+ + Implementing Key + Creators + + + +
+
+ + Secondary Database Properties + +
+
+ + Reading Secondary Databases + +
+
+ + Deleting Secondary Database Records + +
+
+ + + Using Secondary Cursors + + + +
+
+ + Database Joins + +
+
+
+
+ + Using Join Cursors + +
+
+ + JoinCursor Properties + +
+
+
+
+ + Secondary Database Example + +
+
+
+
+ + Opening Secondary Databases with MyDbs + +
+
+ + Using Secondary Databases with ExampleDatabaseRead + +
+
+
+
+
+
+ + 6. Database Configuration + +
+
+
+
+ + Setting the Page Size + +
+
+
+
+ + Overflow Pages + +
+
+ + Locking + +
+
+ + IO Efficiency + +
+
+ + Page Sizing Advice + +
+
+
+
+ + Selecting the Cache Size + +
+
+ + BTree Configuration + +
+
+
+
+ + Allowing Duplicate Records + +
+
+ + Setting Comparison Functions + +
+
+
+
+
+
+
+ +
+ + + diff --git a/db/docs/gsg/JAVA/indexes.html b/db/docs/gsg/JAVA/indexes.html new file mode 100644 index 000000000..4c2af490c --- /dev/null +++ b/db/docs/gsg/JAVA/indexes.html @@ -0,0 +1,379 @@ + + + + + + Chapter 5. Secondary Databases + + + + + + + + + +
+
+
+
+

Chapter 5. Secondary Databases

+
+
+
+
+ +

+ Usually you find database records by means of the record's key. However, + the key that you use for your record will not always contain the + information required to provide you with rapid access to the data that you + want to retrieve. For example, suppose your + Database + + contains records related to users. The key might be a string that is some + unique identifier for the person, such as a user ID. Each record's data, + however, would likely contain a complex object containing details about + people such as names, addresses, phone numbers, and so forth. + While your application may frequently want to query a person by user + ID (that is, by the information stored in the key), it may also on occasion + want to location people by, say, their name. +

+

+ Rather than iterate through all of the records in your database, examining + each in turn for a given person's name, you create indexes based on names + and then just search that index for the name that you want. You can do this + using secondary databases. In DB, the + Database + + that contains your data is called a + primary database. A database that provides an + alternative set of keys to access that data is called a secondary + database In a secondary database, the keys are your alternative + (or secondary) index, and the data corresponds to a primary record's key. +

+

+ You create a secondary database by using a SecondaryConfig + class object to identify an implementation of a + SecondaryKeyCreator + class object that is used to create keys based on data found in the primary + database. You then pass this SecondaryConfig + object to the SecondaryDatabase constructor. +

+

+ Once opened, DB manages secondary databases for you. Adding or deleting + records in your primary database causes DB to update the secondary as + necessary. Further, changing a record's data in the primary database may cause + DB to modify a record in the secondary, depending on whether the change + forces a modification of a key in the secondary database. +

+

+ Note that you can not write directly to a secondary database. + + + + + + To change the data referenced by a + SecondaryDatabase + + record, modify the primary database instead. The exception to this rule is + that delete operations are allowed on the + SecondaryDatabase object. + + + See Deleting Secondary Database Records for more + information. +

+
+

Note

+

+ + Secondary database records are updated/created by DB + only if the + SecondaryKeyCreator.createSecondaryKey() method + + returns + true. + + If + false + + is returned, then DB will not add the key to the secondary database, and + in the event of a record update it will remove any existing key. + + + +

+

+ See Implementing Key + Creators + + for more + information on this interface and method. + + +

+
+

+ When you read a record from a secondary database, DB automatically + returns + + the data and optionally the key + from the corresponding record in the primary database. + +

+
+
+
+
+

Opening and Closing Secondary Databases

+
+
+
+
+

+ You manage secondary database opens and closes using the + + + SecondaryDatabase constructor. + + Just as is the case with primary databases, you must provide + + + the SecondaryDatabase() constructor + + with the database's + name and, optionally, other properties such as whether duplicate + records are allowed, or whether the secondary database can be created on + open. In addition, you must also provide: +

+
+
    +
  • +

    A handle to the primary database that this secondary database is + indexing. Note that this means that secondary databases are maintained + only for the specified Database handle. If you + open the same Database multiple times for write + (such as might occur when opening a database for read-only and read-write in the same application), + then you should open the SecondaryDatabase for + each such Database handle.

    +
  • +
  • +

    A SecondaryConfig object that provides + properties specific to a secondary database. The most important of + these is used to identify the key creator for the database. The key + creator is responsible for generating keys for the secondary database. + See Secondary Database Properties for details.

    +
  • +
+
+

So to open (create) a secondary database, you:

+
+
    +
  1. +

    Open your primary database.

    +
  2. +
  3. +

    Instantiate your key creator.

    +
  4. +
  5. +

    Instantiate your SecondaryConfig object.

    +
  6. +
  7. +

    Set your key creator object on your SecondaryConfig + object.

    +
  8. +
  9. +

    Open your secondary database, specifying your primary database + and your SecondaryConfig at that time.

    +
  10. +
+
+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseType;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.db.SecondaryConfig;
+
+import java.io.FileNotFoundException;
+
+...
+
+DatabaseConfig myDbConfig = new DatabaseConfig();
+myDbConfig.setAllowCreate(true);
+myDbConfig.setType(DatabaseType.BTREE);
+
+SecondaryConfig mySecConfig = new SecondaryConfig();
+mySecConfig.setAllowCreate(true);
+mySecConfig.setType(DatabaseType.BTREE);
+// Duplicates are frequently required for secondary databases.
+mySecConfig.setSortedDuplicates(true);
+
+// Open the primary
+Database myDb = null;
+SecondaryDatabase mySecDb = null;
+try {
+    String dbName = "myPrimaryDatabase";
+
+    myDb = new Database(dbName, null, myDbConfig);
+
+    // A fake tuple binding that is not actually implemented anywhere.
+    // The tuple binding is dependent on the data in use.
+    // Tuple bindings are described earlier in this manual.
+    TupleBinding myTupleBinding = new MyTupleBinding();
+
+    // Open the secondary.
+    // Key creators are described in the next section.
+    FullNameKeyCreator keyCreator = new FullNameKeyCreator(myTupleBinding);
+
+    // Get a secondary object and set the key creator on it.
+    mySecConfig.setKeyCreator(keyCreator);
+
+    // Perform the actual open
+    String secDbName = "mySecondaryDatabase";
+    mySecDb = new SecondaryDatabase(secDbName, null, myDb, mySecConfig); 
+} catch (DatabaseException de) {
+    // Exception handling goes here ...
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here ...
+}
+

To close a secondary database, call its close() method. Note that + for best results, you should close all the secondary databases associated + with a primary database before closing the primary.

+

For example:

+ +
try {
+    if (mySecDb != null) {
+        mySecDb.close();
+    }
+
+    if (myDb != null) {
+        myDb.close(); 
+    }
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+}
+
+
+ + + diff --git a/db/docs/gsg/JAVA/introduction.html b/db/docs/gsg/JAVA/introduction.html new file mode 100644 index 000000000..f9ec3edec --- /dev/null +++ b/db/docs/gsg/JAVA/introduction.html @@ -0,0 +1,234 @@ + + + + + + Chapter 1. Introduction to Berkeley DB + + + + + + + + + +
+
+
+
+

Chapter 1. Introduction to Berkeley DB

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + About This Manual + +
+
+ + Berkeley DB Concepts + +
+
+ + Access Methods + +
+
+
+
+ + Selecting Access Methods + +
+
+ + Choosing between BTree and Hash + +
+
+ + Choosing between Queue and Recno + +
+
+
+
+ + Database Limits and Portability + +
+
+ + Environments + +
+
+ + Exception Handling + +
+
+ + Error Returns + +
+
+ + Getting and Using DB + +
+
+
+

+ Welcome to Sleepycat's Berkeley DB (DB). DB is a general-purpose embedded + database engine that is capable of providing a wealth of data management services. + It is designed from the ground up for high-throughput applications requiring + in-process, bullet-proof management of mission-critical data. DB can + gracefully scale from managing a few bytes to terabytes of data. For the most + part, DB is limited only by your system's available physical resources. +

+

+ Because DB is an embedded database engine, it is extremely fast. You compile + and link it into your application in the same way as you would any + third-party library. This means that DB runs in the same process space + as does your application, allowing you to avoid the high cost of + interprocess communications incurred by stand-alone database servers. +

+

+ To further improve performance, DB offers an in-memory cache designed to + provide rapid access to your most frequently used data. Once configured, + cache usage is transparent. It requires very little attention on the part + of the application developer. +

+

+ Beyond raw speed, DB is also extremely configurable. It provides several + different ways of organizing your data in its databases. Known as + access methods, each such data organization mechanism + provides different characteristics that are appropriate for different data + management profiles. (Note that this manual focuses almost entirely on the + BTree access method as this is the access method used by the vast majority + of DB applications). +

+

+ To further improve its configurability, DB offers many different + subsystems, each of which can be used to extend DB's capabilities. For + example, many applications require write-protection of their data so + as to ensure that data is never left in an inconsistent state for any + reason (such as software bugs or hardware failures). For those + applications, a transaction subsystem can be enabled and used to + transactionally protect database writes. +

+

+ The list of operating systems on which DB is available is too long to + detail here. Suffice to say that it is available on all major commercial + operating systems, as well as on many embedded platforms. +

+

+ Finally, DB is available in a wealth of programming languages. Sleepycat + officially supports DB in C, C++, and Java, but the library is also + available in many other languages, especially scripting languages such as + Perl and Python. +

+
+

Note

+

+ Before going any further, it is important to mention that DB is not + a relational database (although you could use it to build a relational + database). Out of the box, DB does not provide higher-level features + such as triggers, or a high-level query language such as SQL. + Instead, DB provides just those minimal + APIs required to store and retrieve your data as + efficiently as possible. +

+ + + +
+
+
+
+
+

About This Manual

+
+
+
+
+

+ This manual introduces DB. As such, this book does not examine + intermediate or advanced features such as threaded library usage or + transactional usage. Instead, this manual provides a step-by-step + introduction to DB's basic concepts and library usage. +

+

+ Specifically, this manual introduces DB environments, databases, + database records, and storage and retrieval of database records. This + book also introduces cursors and their usage, and it describes + secondary databases. +

+

+ For the most part, this manual focuses on the BTree access method. A + chapter is given at the end of this manual that describes some of the + concepts involving BTree usage, such as duplicate record management and comparison + routines. +

+

+ Examples are given throughout this book that are designed to illustrate + API usage. At the end of each chapter, a complete example is given that + is designed to reinforce the concepts covered in that chapter. In + addition to being presented in this book, these final programs are also + available in the DB software distribution. You can find them in +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the + location where you placed your DB distribution. +

+

+ This book uses the Java programming languages for its examples. + Note that versions of this book exist for the C and C++ languages as + well. +

+
+
+ + + diff --git a/db/docs/gsg/JAVA/javaindexusage.html b/db/docs/gsg/JAVA/javaindexusage.html new file mode 100644 index 000000000..08c269405 --- /dev/null +++ b/db/docs/gsg/JAVA/javaindexusage.html @@ -0,0 +1,498 @@ + + + + + + Secondary Database Example + + + + + + + + + +
+
+
+
+

Secondary Database Example

+
+
+
+
+

In previous chapters in this book, we built applications that load + and display several DB databases. In this example, we will extend those + examples to use secondary databases. Specifically:

+
+ +
+

+ Before we can use a secondary database, we must implement a class to extract secondary keys for us. + We use ItemNameKeyCreator for this purpose. +

+
+ +

+ Example 5.1 ItemNameKeyCreator.java +

+

+ This class assumes the primary database + uses Inventory objects for the record data. The + Inventory class is described in Inventory.java.

+

In our key creator class, we make use of a custom tuple binding + called InventoryBinding. This class is described in InventoryBinding.java.

+

You can find InventoryBinding.java in:

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.db.SecondaryKeyCreator;
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import java.io.IOException;
+
+public class ItemNameKeyCreator implements SecondaryKeyCreator {
+
+    private TupleBinding theBinding;
+
+    // Use the constructor to set the tuple binding
+    ItemNameKeyCreator(TupleBinding binding) {
+        theBinding = binding;
+    }
+
+    // Abstract method that we must implement
+    public boolean createSecondaryKey(SecondaryDatabase secDb,
+        DatabaseEntry keyEntry,    // From the primary
+        DatabaseEntry dataEntry,   // From the primary
+        DatabaseEntry resultEntry) // set the key data on this.
+        throws DatabaseException {
+
+        if (dataEntry == null) {
+            throw new DatabaseException("Missing primary record data " +
+                "in key creator.");
+        }
+
+        try {
+            // Convert dataEntry to an Inventory object
+            Inventory inventoryItem = 
+                (Inventory) theBinding.entryToObject(dataEntry);
+            // Get the item name and use that as the key
+            String theItem = inventoryItem.getItemName();
+            resultEntry.setData(theItem.getBytes("UTF-8"));
+        } catch (IOException willNeverOccur) {}
+
+        return true;
+    }
+} 
+
+

+ Now that we have a key creator, we can use it to generate keys for a + secondary database. We will now extend MyDbs + to manage a secondary database, and to use + ItemNameKeyCreator to generate keys for that + secondary database. +

+
+
+
+
+

Opening Secondary Databases with MyDbs

+
+
+
+
+

In Stored Class Catalog Management with MyDbs we built + MyDbs as an example of a class that + encapsulates + Database opens and closes. We will now extend + that class to manage a SecondaryDatabase.

+
+ +

+ Example 5.2 SecondaryDatabase Management with MyDbs +

+

+ We start by importing two additional classes needed to support secondary databases. + We also add a global variable to use as a handle for our secondary database. +

+ +
// File MyDbs.java
+package com.sleepycat.examples.db.GettingStarted;
+                                                                                                                                       
+import java.io.FileNotFoundException;
+                                                                                                                                       
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseType;
+import com.sleepycat.db.SecondaryConfig;
+import com.sleepycat.db.SecondaryDatabase;
+
+public class MyDbs {
+
+    // The databases that our application uses
+    private Database vendorDb = null;
+    private Database inventoryDb = null;
+    private Database classCatalogDb = null;
+    private SecondaryDatabase itemNameIndexDb = null;
+
+    private String vendordb = "VendorDB.db";
+    private String inventorydb = "InventoryDB.db";
+    private String classcatalogdb = "ClassCatalogDB.db";
+    private String itemnameindexdb = "ItemNameIndexDB.db";
+
+    // Needed for object serialization
+    private StoredClassCatalog classCatalog;
+
+    // Our constructor does nothing
+    public MyDbs() {} 
+

+ Next we update the MyDbs.setup() method to open the + secondary database. As a part of this, we have to pass an + ItemNameKeyCreator object on the call to open the secondary + database. Also, in order to instantiate ItemNameKeyCreator, we need an + InventoryBinding object (we described this class in + InventoryBinding.java). + We do all this work together inside of MyDbs.setup(). +

+ +
    public void setup(String databasesHome)
+        throws DatabaseException {
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+        SecondaryConfig mySecConfig = new SecondaryConfig();
+
+        myDbConfig.setErrorStream(System.err);
+        mySecConfig.setErrorStream(System.err);
+        myDbConfig.setErrorPrefix("MyDbs");
+        mySecConfig.setErrorPrefix("MyDbs");
+        myDbConfig.setType(DatabaseType.BTREE);
+        mySecConfig.setType(DatabaseType.BTREE);
+        myDbConfig.setAllowCreate(true);
+        mySecConfig.setAllowCreate(true);
+
+        // Now open, or create and open, our databases
+        // Open the vendors and inventory databases
+        try {
+            vendordb = databasesHome + "/" + vendordb;
+            vendorDb = new Database(vendordb,
+                                    null,
+                                    myDbConfig);
+
+            inventorydb = databasesHome + "/" + inventorydb;
+            inventoryDb = new Database(inventorydb,
+                                        null,
+                                        myDbConfig);
+
+            // Open the class catalog db. This is used to
+            // optimize class serialization.
+            classcatalogdb = databasesHome + "/" + classcatalogdb;
+            classCatalogDb = new Database(classcatalogdb,
+                                          null,
+                                          myDbConfig);
+        } catch(FileNotFoundException fnfe) {
+            System.err.println("MyDbs: " + fnfe.toString());
+            System.exit(-1);
+        }
+
+        // Create our class catalog
+        classCatalog = new StoredClassCatalog(classCatalogDb);
+
+        // Need a tuple binding for the Inventory class.
+        // We use the InventoryBinding class
+        // that we implemented for this purpose.
+        TupleBinding inventoryBinding = new InventoryBinding();
+
+        // Open the secondary database. We use this to create a
+        // secondary index for the inventory database
+
+        // We want to maintain an index for the inventory entries based
+        // on the item name. So, instantiate the appropriate key creator
+        // and open a secondary database.
+        ItemNameKeyCreator keyCreator =
+            new ItemNameKeyCreator(new InventoryBinding());
+
+        // Set up additional secondary properties
+        // Need to allow duplicates for our secondary database
+        mySecConfig.setSortedDuplicates(true);
+        mySecConfig.setAllowPopulate(true); // Allow autopopulate
+        mySecConfig.setKeyCreator(keyCreator);
+        // Now open it
+        try {
+            itemnameindexdb = databasesHome + "/" + itemnameindexdb;
+            itemNameIndexDb = new SecondaryDatabase(itemnameindexdb,
+                                                    null,
+                                                    inventoryDb,
+                                                    mySecConfig);
+        } catch(FileNotFoundException fnfe) {
+            System.err.println("MyDbs: " + fnfe.toString());
+            System.exit(-1);
+        }
+    }
+    
+

+ Next we need an additional getter method for returning the secondary database. +

+ +
    public SecondaryDatabase getNameIndexDB() {
+        return itemNameIndexDb;
+    } 
+

Finally, we need to update the MyDbs.close() + method to close the new secondary database. We want to make sure that + the secondary is closed before the primaries. While + this is not necessary for this example because our + closes are single-threaded, it is still a good habit to adopt.

+ +
    public void close() {
+        try {
+            if (itemNameIndexDb != null) {
+                itemNameIndexDb.close();
+            }
+
+            if (vendorDb != null) {
+                vendorDb.close();
+            }
+
+            if (inventoryDb != null) {
+                inventoryDb.close();
+            }
+
+            if (classCatalogDb != null) {
+                classCatalogDb.close();
+            }
+
+        } catch(DatabaseException dbe) {
+            System.err.println("Error closing MyDbs: " +
+                                dbe.toString());
+            System.exit(-1);
+        }
+    }
+} 
+

That completes our update to MyDbs. You + can find the complete class implementation in: +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+
+
+
+
+
+

Using Secondary Databases with ExampleDatabaseRead

+
+
+
+
+

Because we performed all our secondary database configuration management in + MyDbs, we do not need to modify ExampleDatabaseLoad at all in + order to create our secondary indices. When ExampleDatabaseLoad calls + MyDbs.setup(), all of the necessary work is performed for us. +

+

+ However, we still need to take advantage of the new secondary indices. We do this by updating + ExampleDatabaseRead to allow us to query for an inventory record based on its name. + Remember that the primary key for an inventory record is the item's SKU. The item's name is contained in the + Inventory object that is stored as each record's data in the inventory database. But + our new secondary index now allows us to easily query based on the item's name. +

+

+ For this update, we modify + ExampleDatabaseRead to accept a new command line switch, + -s, whose argument is the name of an inventory item. + If the switch is present on the command line call to + ExampleDatabaseRead, then the application will + use the secondary database to look up and display all the inventory + records with that item name. Note that we use a SecondaryCursor + to seek to the item name key and then display all matching records. +

+

Remember that you can find ExampleDatabaseRead.java in:

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+ +

+ Example 5.3 SecondaryDatabase usage with ExampleDatabaseRead +

+

+ First we need to import an additional class in order to use the secondary cursor: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import java.io.IOException;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+import com.sleepycat.db.SecondaryCursor; 
+

Next we add a single global variable:

+ +
    public class ExampleDatabaseRead {
+
+    private static String myDbsPath = "./";
+
+    // Encapsulates the database environment and databases.
+    private static MyDbs myDbs = new MyDbs();
+
+    private static TupleBinding inventoryBinding;
+    private static EntryBinding vendorBinding;
+
+    // The item to locate if the -s switch is used
+    private static String locateItem; 
+

Next we update ExampleDatabaseRead.run() to + check to see if the locateItem global variable has a + value. If it does, then we show just those records related to the item + name passed on the -s switch.

+ +
    private void run(String args[]) 
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbs.setup(myDbsPath);
+
+        // Setup our bindings.
+        inventoryBinding = new InventoryBinding();
+        vendorBinding =
+             new SerialBinding(myDbs.getClassCatalog(),
+                               Vendor.class);
+
+            if (locateItem != null) {
+                showItem();
+            } else {
+                showAllInventory();
+            }
+    } 
+

+ Finally, we need to implement ExampleDatabaseRead.showItem(). + This is a fairly simple method that opens a secondary cursor, + and then displays every primary record that is related to the secondary + key identified by the locateItem global variable. +

+ +
    private void showItem() throws DatabaseException {
+        SecondaryCursor secCursor = null;
+        try {
+            // searchKey is the key that we want to find in the
+            // secondary db.
+            DatabaseEntry searchKey =
+                new DatabaseEntry(locateItem.getBytes("UTF-8"));
+
+            // foundKey and foundData are populated from the primary
+            // entry that is associated with the secondary db key.
+            DatabaseEntry foundKey = new DatabaseEntry();
+            DatabaseEntry foundData = new DatabaseEntry();
+
+            // open a secondary cursor
+            secCursor =
+                myDbs.getNameIndexDB().openSecondaryCursor(null, null);
+
+            // Search for the secondary database entry.
+            OperationStatus retVal =
+                secCursor.getSearchKey(searchKey, foundKey,
+                    foundData, LockMode.DEFAULT);
+
+            // Display the entry, if one is found. Repeat until no more
+            // secondary duplicate entries are found
+            while(retVal == OperationStatus.SUCCESS) {
+                Inventory theInventory =
+                    (Inventory)inventoryBinding.entryToObject(foundData);
+                displayInventoryRecord(foundKey, theInventory);
+                retVal = secCursor.getNextDup(searchKey, foundKey,
+                    foundData, LockMode.DEFAULT);
+            }
+        } catch (Exception e) {
+            System.err.println("Error on inventory secondary cursor:");
+            System.err.println(e.toString());
+            e.printStackTrace();
+        } finally {
+            if (secCursor != null) {
+                secCursor.close();
+            }
+        }
+
+    }
+

The only other thing left to do is to update + ExampleDatabaseRead.parseArgs() to support the -s command + line switch. To see how this is done, see + ExampleDatabaseRead.java in: +

+
DB_INSTALL/examples_java/src/com/sleepycat/examples/db/GettingStarted
+

+ where DB_INSTALL is the location where you + placed your DB distribution. +

+
+
+
+ + + diff --git a/db/docs/gsg/JAVA/joins.html b/db/docs/gsg/JAVA/joins.html new file mode 100644 index 000000000..a22619540 --- /dev/null +++ b/db/docs/gsg/JAVA/joins.html @@ -0,0 +1,366 @@ + + + + + + Database Joins + + + + + + + + + +
+
+
+
+

Database Joins

+
+
+
+
+

+ If you have two or more secondary databases associated with a primary + database, then you can retrieve primary records based on the union of + multiple secondary entries. You do this using a + JoinCursor. + +

+

+ Throughout this document we have presented a + class + + that stores + inventory + information on grocery + + + That + class + + is fairly simple with a limited + number of data members, few of which would be interesting from a query + perspective. But suppose, instead, that we were storing + information on something with many more queryable characteristics, such + as an automobile. In that case, you may be storing information such as + color, number of doors, fuel mileage, automobile type, number of + passengers, make, model, and year, to name just a few. +

+

+ In this case, you would still likely be using some unique value to key your + primary entries (in the United States, the automobile's VIN would be + ideal for this purpose). You would then create a + class + + that identifies + all the characteristics of the automobiles in your inventory. + + + You would + also have to create some mechanism by which you would move instances of + this class in and out of Java byte arrays. We + described the concepts and mechanisms by which you can perform these + activities in Database Records. + +

+

+ To query this data, you might then create multiple secondary databases, + one for each of the characteristics that you want to query. For + example, you might create a secondary for color, another for number of + doors, another for number of passengers, and so forth. Of course, you + will need a unique + key creator + + for each such secondary database. You do + all of this using the concepts and techniques described throughout this + chapter. +

+

+ Once you have created this primary database and all interesting + secondaries, what you have is the ability to retrieve automobile records + based on a single characteristic. You can, for example, find all the + automobiles that are red. Or you can find all the automobiles that have + four doors. Or all the automobiles that are minivans. +

+

+ The next most natural step, then, is to form compound queries, or joins. + For example, you might want to find all the automobiles that are red, + and that were built by Toyota, and that are minivans. You can do this + using a + JoinCursor class instance. + + +

+
+
+
+
+

Using Join Cursors

+
+
+
+
+

+ To use a join cursor: +

+
+
    +
  • +

    + Open two or more + secondary cursors. These + cursors + + for + secondary databases that are associated with + the same primary database. +

    +
  • +
  • +

    + Position each such cursor to the secondary key + value in which you are interested. For example, to build on + the previous description, the cursor for the color + database is positioned to the red records + while the cursor for the model database is positioned to the + minivan records, and the cursor for the + make database is positioned to Toyota. +

    +
  • +
  • +

    + Create an array of secondary cursors, and place in it each + of the cursors that are participating in your join query. +

    +
  • +
  • +

    + + Obtain a join cursor. You do this using the + Database.join() + + + method. You must pass this method the array of secondary cursors that you + opened and positioned in the previous steps. +

    +
  • +
  • +

    + Iterate over the set of matching records + using JoinCursor.getNext() + until + OperationStatus is not SUCCESS. + +

    +
  • +
  • +

    + Close your join cursor. +

    +
  • +
  • +

    + If you are done with them, close all your secondary cursors. +

    +
  • +
+
+

+ For example: + +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.JoinCursor;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+import com.sleepycat.db.SecondaryCursor;
+import com.sleepycat.db.SecondaryDatabase;
+
+...
+
+// Database and secondary database opens omitted for brevity.
+// Assume a primary database handle:
+//   automotiveDB
+// Assume 3 secondary database handles:
+//   automotiveColorDB  -- index based on automobile color
+//   automotiveTypeDB  -- index based on automobile type
+//   automotiveMakeDB   -- index based on the manufacturer
+Database automotiveDB = null;
+SecondaryDatabase automotiveColorDB = null;
+SecondaryDatabase automotiveTypeDB = null;
+SecondaryDatabase automotiveMakeDB = null;
+
+// Query strings:
+String theColor = "red";
+String theType = "minivan";
+String theMake = "Toyota";
+
+// Secondary cursors used for the query:
+SecondaryCursor colorSecCursor = null;
+SecondaryCursor typeSecCursor = null;
+SecondaryCursor makeSecCursor = null;
+
+// The join cursor
+JoinCursor joinCursor = null;
+
+// These are needed for our queries
+DatabaseEntry foundKey = new DatabaseEntry();
+DatabaseEntry foundData = new DatabaseEntry();
+
+// All cursor operations are enclosed in a try block to ensure that they
+// get closed in the event of an exception.
+
+try {
+    // Database entries used for the query:
+    DatabaseEntry color = new DatabaseEntry(theColor.getBytes("UTF-8"));
+    DatabaseEntry type = new DatabaseEntry(theType.getBytes("UTF-8"));
+    DatabaseEntry make = new DatabaseEntry(theMake.getBytes("UTF-8"));
+
+    colorSecCursor = automotiveColorDB.openSecondaryCursor(null, null); 
+    typeSecCursor = automotiveTypeDB.openSecondaryCursor(null, null); 
+    makeSecCursor = automotiveMakeDB.openSecondaryCursor(null, null); 
+
+    // Position all our secondary cursors to our query values.
+    OperationStatus colorRet = 
+        colorSecCursor.getSearchKey(color, foundData, LockMode.DEFAULT);
+    OperationStatus typeRet = 
+        typeSecCursor.getSearchKey(type, foundData, LockMode.DEFAULT);
+    OperationStatus makeRet = 
+        makeSecCursor.getSearchKey(make, foundData, LockMode.DEFAULT);
+
+    // If all our searches returned successfully, we can proceed
+    if (colorRet == OperationStatus.SUCCESS &&
+        typeRet == OperationStatus.SUCCESS &&
+        makeRet == OperationStatus.SUCCESS) {
+
+        // Get a secondary cursor array and populate it with our
+        // positioned cursors
+        SecondaryCursor[] cursorArray = {colorSecCursor,
+                                         typeSecCursor, 
+                                         makeSecCursor};
+
+        // Create the join cursor
+        joinCursor = automotiveDB.join(cursorArray, null);
+
+        // Now iterate over the results, handling each in turn
+        while (joinCursor.getNext(foundKey, foundData, LockMode.DEFAULT) ==
+                        OperationStatus.SUCCESS) {
+
+            // Do something with the key and data retrieved in
+            // foundKey and foundData
+        }
+    }
+} catch (DatabaseException dbe) {
+    // Error reporting goes here
+} catch (Exception e) {
+    // Error reporting goes here
+} finally {
+    try {
+        // Make sure to close out all our cursors
+        if (colorSecCursor != null) {
+            colorSecCursor.close();
+        }
+        if (typeSecCursor != null) {
+            typeSecCursor.close();
+        }
+        if (makeSecCursor != null) {
+            makeSecCursor.close();
+        }
+        if (joinCursor != null) {
+            joinCursor.close();
+        }
+    } catch (DatabaseException dbe) {
+        // Error reporting goes here
+    }
+} 
+
+
+
+
+
+

JoinCursor Properties

+
+
+
+
+

+ You can set JoinCursor properties using the + JoinConfig class. Currently there is just one property that you can + set: +

+
+
    +
  • +

    + JoinConfig.setNoSort() +

    +

    + Specifies whether automatic sorting of input cursors is disabled. The cursors are sorted from the + one that refers to the least number of data items to the one that refers to the most. +

    +

    + If the data is structured so that cursors with many data items also share many common elements, + higher performance will result from listing those cursors before cursors with fewer data + items. Turning off sorting permits applications to specify cursors in the proper order given this + scenario. +

    +

    + Default value is false (automatic cursor sorting is performed). +

    +

    + For example: +

    + +
    // All database and environments omitted
    +JoinConfig config = new JoinConfig();
    +config.setNoSort(true);
    +JoinCursor joinCursor = myDb.join(cursorArray, config); 
    +
  • +
+
+
+
+ + + diff --git a/db/docs/gsg/JAVA/keyCreator.html b/db/docs/gsg/JAVA/keyCreator.html new file mode 100644 index 000000000..5eae91890 --- /dev/null +++ b/db/docs/gsg/JAVA/keyCreator.html @@ -0,0 +1,244 @@ + + + + + + Implementing Key + Creators + + + + + + + + + + + +
+
+
+
+

Implementing Key + Creators + +

+
+
+
+
+

+ You must provide every secondary database with a + class + + that creates keys from primary records. You identify this + class + + + + using the SecondaryConfig.setKeyCreator() + method. + + +

+

+ You can create keys using whatever data you want. Typically you will + base your key on some information found in a record's data, but you + can also use information found in the primary record's key. How you build + your keys is entirely dependent upon the nature of the index that you + want to maintain. +

+

+ You implement a key creator by writing a class that implements the + SecondaryKeyCreator interface. This interface + requires you to implement the SecondaryKeyCreator.createSecondaryKey() + method. +

+

+ One thing to remember when implementing this method is that you will + need a way to extract the necessary information from the data + DatabaseEntry and/or the key + DatabaseEntry that are provided on calls to this + method. If you are using complex objects, then you are probably using the + Bind APIs to perform this conversion. The easiest thing to do is to + instantiate the EntryBinding or + TupleBinding that you need to perform the + conversion, and then provide this to your key creator's constructor. + The Bind APIs are introduced in Using the BIND APIs. +

+

+ Also, SecondaryKeyCreator.createSecondaryKey() returns a + boolean. A return value of false indicates that + no secondary key exists, and therefore no record should be added to the secondary database for that primary record. + If a record already exists in the secondary database, it is deleted. +

+

+ For example, suppose your primary database uses the following class + for its record data: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+public class PersonData {
+    private String userID;
+    private String surname;
+    private String familiarName;
+
+    public PersonData(String userID, String surname, String familiarName) {
+        this.userID = userID;
+        this.surname = surname;
+        this.familiarName = familiarName;
+    }
+
+    public String getUserID() {
+        return userID;
+    }
+
+    public String getSurname() {
+        return surname;
+    }
+
+    public String getFamiliarName() {
+        return familiarName;
+    }
+} 
+

+ Also, suppose that you have created a custom tuple binding, + PersonDataBinding, that you use to convert + PersonData objects to and from + DatabaseEntry objects. (Custom tuple bindings are + described in Custom Tuple Bindings.) +

+

+ Finally, suppose you want a secondary database that is keyed based + on the person's full name. +

+

+ Then in this case you might create a key creator as follows: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.SecondaryKeyCreator;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.SecondaryDatabase;
+
+import java.io.IOException;
+
+public class FullNameKeyCreator implements SecondaryKeyCreator {
+ 
+    private TupleBinding theBinding;
+
+    public FullNameKeyCreator(TupleBinding theBinding1) {
+            theBinding = theBinding1;
+    }
+
+    public boolean createSecondaryKey(SecondaryDatabase secDb,
+                                      DatabaseEntry keyEntry, 
+                                      DatabaseEntry dataEntry,
+                                      DatabaseEntry resultEntry) {
+
+        // If the dataEntry parameter is null, then we can
+        // not create the key
+        if (dataEntry == null) {
+            return false;
+        } else {           // Create the key
+            try {
+                PersonData pd = 
+                    (PersonData) theBinding.entryToObject(dataEntry);
+                String fullName = pd.getFamiliarName() + " " + 
+                    pd.getSurname();
+                resultEntry.setData(fullName.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+        }
+        return true;
+    }
+} 
+

Finally, you use this key creator as follows:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseType;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.db.SecondaryConfig;
+
+import java.io.FileNotFoundException;
+
+...
+Database myDb = null;
+SecondaryDatabase mySecDb = null;
+try {
+    // Primary database open omitted for brevity
+...
+
+    TupleBinding myDataBinding = new MyTupleBinding();
+    FullNameKeyCreator fnkc = new FullNameKeyCreator(myDataBinding);
+
+    SecondaryConfig mySecConfig = new SecondaryConfig();
+    mySecConfig.setKeyCreator(fnkc);
+    mySecConfig.setType(DatabaseType.BTREE);
+
+    //Perform the actual open
+    String secDbName = "mySecondaryDatabase";
+    mySecDb = new SecondaryDatabase(secDbName, null, myDb, mySecConfig);
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} catch (FileNotFoundException fnfe) {
+    // Exception handling goes here
+} finally {
+    try {
+        if (mySecDb != null) {
+            mySecDb.close();
+        }
+
+        if (myDb != null) {
+            myDb.close(); 
+        }
+    } catch (DatabaseException dbe) {
+        // Exception handling goes here
+    }
+}
+
+ + + diff --git a/db/docs/gsg/JAVA/preface.html b/db/docs/gsg/JAVA/preface.html new file mode 100644 index 000000000..56de76890 --- /dev/null +++ b/db/docs/gsg/JAVA/preface.html @@ -0,0 +1,142 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+
+

+ Welcome to Berkeley DB (DB). This document introduces + + DB, version 4.3. + It is intended + to provide a rapid introduction to the DB API set and related concepts. The goal of this document is + to provide you with an efficient mechanism + with which you can evaluate DB against your project's technical requirements. As such, this document is + intended for Java + developers and senior software architects who are + looking for an in-process data management solution. No prior experience with Sleepycat technologies is + expected or required. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Class names are represented in monospaced font, as are method + names. For example: + + + + "The Database() + constructor returns a Database class object." + + +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + DB_INSTALL directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. + For example: +

+
import com.sleepycat.db.DatabaseConfig;
+
+...
+
+// Allow the database to be created.
+DatabaseConfig myDbConfig = new DatabaseConfig();
+myDbConfig.setAllowCreate(true);
+

+ In some situations, programming examples are updated from one chapter to the next. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+
+...
+
+// Allow the database to be created.
+DatabaseConfig myDbConfig = new DatabaseConfig();
+myDbConfig.setAllowCreate(true);
+Database myDb = new Database("mydb.db", null, myDbConfig); 
+
+

Note

+

+ Finally, notes of interest are represented using a note block such + as this. +

+
+
+
+ + + diff --git a/db/docs/gsg/JAVA/readSecondary.html b/db/docs/gsg/JAVA/readSecondary.html new file mode 100644 index 000000000..be2c6b1b1 --- /dev/null +++ b/db/docs/gsg/JAVA/readSecondary.html @@ -0,0 +1,124 @@ + + + + + + Reading Secondary Databases + + + + + + + + + +
+
+
+
+

Reading Secondary Databases

+
+
+
+
+

+ Like a primary database, you can read records from your secondary + database either by using the + + SecondaryDatabase.get() method, + + + or by using + a SecondaryCursor. + + + The main difference between reading secondary and primary databases is that when + you read a secondary database record, the secondary record's data is not + returned to you. Instead, the primary key and data corresponding to the + secondary key are returned to you. +

+

+ For example, assuming your secondary database contains keys related + to a person's full name: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+import com.sleepycat.db.SecondaryDatabase;
+
+...
+SecondaryDatabase mySecondaryDatabase = null;
+try {
+    // Omitting all database opens
+    ...
+
+    String searchName = "John Doe";
+    DatabaseEntry searchKey = 
+        new DatabaseEntry(searchName.getBytes("UTF-8"));
+    DatabaseEntry primaryKey = new DatabaseEntry();
+    DatabaseEntry primaryData = new DatabaseEntry();
+
+    // Get the primary key and data for the user 'John Doe'.
+    OperationStatus retVal = mySecondaryDatabase.get(null, searchKey, 
+                                                     primaryKey, 
+                                                     primaryData, 
+                                                     LockMode.DEFAULT); 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+

+ Note that, just like + Database.get(), + + + if your secondary database supports duplicate records then + SecondaryDatabase.get() + + only return the first record found in a matching duplicates set. If you + want to see all the records related to a specific secondary key, then use a + + SecondaryCursor (described in + Using Secondary Cursors + + ). + + +

+
+ + + diff --git a/db/docs/gsg/JAVA/returns.html b/db/docs/gsg/JAVA/returns.html new file mode 100644 index 000000000..46433419f --- /dev/null +++ b/db/docs/gsg/JAVA/returns.html @@ -0,0 +1,81 @@ + + + + + + Error Returns + + + + + + + + + +
+
+
+
+

Error Returns

+
+
+
+
+

+ In addition to exceptions, the + + DB interfaces always return a value of 0 on success. If the + operation does not succeed for any reason, the return value will be + non-zero. +

+

+ If a system error occurred (for example, DB ran out of disk + space, or permission to access a file was denied, or an illegal argument + was specified to one of the interfaces), DB returns an + errno + value. All of the possible values of errno are greater than 0. +

+

+ If the operation did not fail due to a system error, but was not + successful either, DB returns a special error value. For + example, if you tried to retrieve data from the database and the + record for which you are searching does not exist, DB would return + DB_NOTFOUND, a special error value that means the requested + key does not appear in the database. All of the possible special error + values are less than 0. +

+
+ + + diff --git a/db/docs/gsg/JAVA/secondaryCursor.html b/db/docs/gsg/JAVA/secondaryCursor.html new file mode 100644 index 000000000..390769d67 --- /dev/null +++ b/db/docs/gsg/JAVA/secondaryCursor.html @@ -0,0 +1,158 @@ + + + + + + + Using Secondary Cursors + + + + + + + + + + + +
+
+
+
+

+ Using Secondary Cursors + +

+
+
+
+
+

+ Just like cursors on a primary database, you can use + secondary cursors + + to iterate over the records in a secondary database. Like + + normal cursors, + + + you can also use + secondary cursors + + to search for specific records in a database, to seek to the first + or last record in the database, to get the next duplicate record, + + and so forth. For a complete description on cursors and their capabilities, see + Using Cursors. +

+

+ However, when you use + secondary cursors: + +

+
+
    +
  • +

    + Any data returned is the data contained on the primary database + record referenced by the secondary record. +

    +
  • +
  • +

    + SecondaryCursor.getSearchBoth() and + related methods do not search based on a key/data pair. Instead, you + search based on a secondary key and a primary key. The data returned + is the primary data that most closely matches the two keys provided + for the search. +

    +
  • +
+
+

+ For example, suppose you are using the databases, classes, and key + creators + + described in Implementing Key + Creators + + . + Then the following searches for a person's + name in the secondary database, and deletes all secondary and primary + records that use that name. + +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.db.SecondaryCursor;
+  
+...
+try {
+    SecondaryDatabase mySecondaryDatabase = null;
+    // Database opens omitted for brevity
+    ...
+
+    String secondaryName = "John Doe";
+    DatabaseEntry secondaryKey = 
+        new DatabaseEntry(secondaryName.getBytes("UTF-8"));
+
+    DatabaseEntry foundData = new DatabaseEntry();
+
+    SecondaryCursor mySecCursor = 
+        mySecondaryDatabase.openSecondaryCursor(null, null);
+
+    OperationStatus retVal = mySecCursor.getSearchKey(secondaryKey, 
+                                                  foundData, 
+                                                  LockMode.DEFAULT);
+    while (retVal == OperationStatus.SUCCESS) {
+        mySecCursor.delete();
+        retVal = mySecCursor.getNextDup(secondaryKey, 
+                                        foundData, 
+                                        LockMode.DEFAULT);
+    } 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+ + + diff --git a/db/docs/gsg/JAVA/secondaryDelete.html b/db/docs/gsg/JAVA/secondaryDelete.html new file mode 100644 index 000000000..e69ee6dd5 --- /dev/null +++ b/db/docs/gsg/JAVA/secondaryDelete.html @@ -0,0 +1,158 @@ + + + + + + Deleting Secondary Database Records + + + + + + + + + +
+
+
+
+

Deleting Secondary Database Records

+
+
+
+
+

+ In general, you + + will + not modify a secondary database directly. In + order to modify a secondary database, you should modify the primary + database and simply allow DB to manage the secondary modifications for you. +

+

+ However, as a convenience, you can delete a + SecondaryDatabase + + record directly. Doing so causes the associated primary key/data pair to be deleted. + This in turn causes DB to delete all + SecondaryDatabase + + records that reference the primary record. +

+

+ You can use the + SecondaryDatabase.delete() + + + method to delete a secondary database record. + + + + Note that if your + + SecondaryDatabase + contains duplicate records, then deleting a record from the set of + duplicates causes all of the duplicates to be deleted as well. + + +

+
+

Note

+

SecondaryDatabase.delete() causes the + previously describe delete operations to occur only if: +

+
+
    +
  • +

    + the + SecondaryKeyCreator.createSecondaryKey() method + + + returns + true + + (see Implementing Key + Creators + + for information on this + interface and method). + +

    +
  • +
  • +

    + the primary database is opened for write access. +

    +
  • +
+
+

+ If either of these conditions are not met, then no delete operations can be performed on the secondary + database. +

+
+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.OperationStatus;
+import com.sleepycat.db.SecondaryDatabase;
+
+...
+try {
+    SecondaryDatabase mySecondaryDatabase = null;
+    // Omitting all database opens
+    ...
+
+    String searchName = "John Doe";
+    DatabaseEntry searchKey = 
+        new DatabaseEntry(searchName.getBytes("UTF-8"));
+
+    // Delete the first secondary record that uses "John Doe" as
+    // a key. This causes the primary record referenced by this secondary
+    // record to be deleted.
+    OperationStatus retVal = mySecondaryDatabase.delete(null, searchKey);
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+ + + diff --git a/db/docs/gsg/JAVA/secondaryProps.html b/db/docs/gsg/JAVA/secondaryProps.html new file mode 100644 index 000000000..b8f119d19 --- /dev/null +++ b/db/docs/gsg/JAVA/secondaryProps.html @@ -0,0 +1,92 @@ + + + + + + Secondary Database Properties + + + + + + + + + +
+
+
+
+

Secondary Database Properties

+
+
+
+
+

Secondary databases accept SecondaryConfig + objects. SecondaryConfig is a subclass of DatabaseConfig, + so it can manage all of the same properties as does DatabaseConfig. + See Database Properties for more information.

+

In addition to the DatabaseConfig properties, + SecondaryConfig also allows you to manage the following properties: +

+
+
    +
  • +

    + SecondaryConfig.setAllowPopulate() +

    +

    If true, the secondary database can be autopopulated. This means + that on open, if the secondary database is empty then the primary + database is read in its entirety and additions/modifications to the + secondary's records occur automatically.

    +
  • +
  • +

    + SecondaryConfig.setKeyCreator() +

    +

    Identifies the key creator object to be used for secondary key + creation. See Implementing Key + Creators + + + for more information.

    +
  • +
+
+
+ + + diff --git a/db/docs/gsg/JAVA/usingDbt.html b/db/docs/gsg/JAVA/usingDbt.html new file mode 100644 index 000000000..dec0c9c32 --- /dev/null +++ b/db/docs/gsg/JAVA/usingDbt.html @@ -0,0 +1,421 @@ + + + + + + Reading and Writing Database Records + + + + + + + + + +
+
+
+
+

Reading and Writing Database Records

+
+
+
+
+

+ When reading and writing database records, be aware that there are some + slight differences in behavior depending on whether your database supports duplicate + records. Two or more database records are considered to be duplicates of + one another if they share the same key. The collection of records + sharing the same key are called a duplicates set. + + + In DB, a given key is stored only once for a single duplicates set. + +

+

+ By default, DB databases do + not support duplicate records. Where duplicate records are supported, + cursors (see below) are typically used + to access all of the records in the duplicates set. +

+

+ DB provides two basic mechanisms for the storage and retrieval of database + key/data pairs: +

+
+
    +
  • +

    + The + Database.put() + + + and + Database.get() + + + methods provide the easiest access for all non-duplicate records in the database. + These methods are described in this section. +

    +
  • +
  • +

    Cursors provide several methods for putting and getting database + records. Cursors and their database access methods are described in + Using Cursors.

    +
  • +
+
+
+
+
+
+

Writing Records to the Database

+
+
+
+
+

+ Records are stored in the database using whatever organization is + required by the access method that you have selected. In some cases (such as + BTree), records are stored in a sort order that you may want to define + (see Setting Comparison Functions for more information). +

+

+ In any case, the mechanics of putting and getting database records do not + change once you have selected your access method, configured your + sorting routines (if any), and opened your database. From your + code's perspective, a simple database put and get is largely the + same no matter what access method you are using. +

+

You can use the following methods to put database records:

+
+
    +
  • +

    + Database.put() +

    +

    + Puts a database record into the database. If your database does not + support duplicate records, and if the provided key already exists in + the database, then the currently existing record is replaced with + the new data. +

    +
  • +
  • +

    + Database.putNoOverwrite() +

    +

    + Disallows overwriting (replacing) an existing record in the + database. If the provided key already exists in the database, + then this method returns + OperationStatus.KEYEXIST even if + the database supports duplicates. +

    +
  • +
  • +

    + Database.putNoDupData() +

    +

    + Puts a database record into the database. If the provided key + and data already exists in the database (that is, if you are + attempting to put a record that compares equally to an existing + record), then this returns + OperationStatus.KEYEXIST. +

    +
  • +
+
+

+ When you put database records, you provide both the key and the data as + DatabaseEntry objects. This means you must + convert your key and data into a Java byte array. For + example: +

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.Database;
+
+...
+
+// Database opens omitted for clarity.
+// Databases must NOT be opened read-only.
+
+String aKey = "myFirstKey";
+String aData = "myFirstData";
+
+try {
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry(aData.getBytes("UTF-8"));
+    myDatabase.put(null, theKey, theData);
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Getting Records from the Database

+
+
+
+
+

+ The Database class provides several + methods that you can use to retrieve database records. Note that if your + database supports duplicate records, then these methods will only ever + return the first record in a duplicate set. For this reason, if your + database supports duplicates, you should use a cursor to retrieve + records from it. Cursors are described in Using Cursors. +

+

+ You can use either of the following methods to retrieve records from the database: +

+
+
    +
  • +

    + Database.get() +

    +

    Retrieves the record whose key matches the key provided to the + method. If no records exists that uses the provided key, then + OperationStatus.NOTFOUND is returned.

    +
  • +
  • +

    + Database.getSearchBoth() +

    +

    Retrieve the record whose key matches both the key and the data + provided to the method. If no record exists that uses the provided + key and data, then OperationStatus.NOTFOUND is + returned.

    +
  • +
+
+

Both the key and data for a database record are returned as + DatabaseEntry objects. These objects are + passed as parameter values to the Database.get() method. +

+

In order to retrieve your data once Database.get() + has completed, you must retrieve the byte array stored + in the DatabaseEntry and then convert that + byte array back to the + appropriate datatype. For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+      
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+
+...
+
+Database myDatabase = null;
+// Database opens omitted for clarity.
+// Database may be opened read-only.  
+  
+String aKey = "myFirstKey";
+
+try {
+    // Create a pair of DatabaseEntry objects. theKey
+    // is used to perform the search. theData is used
+    // to store the data returned by the get() operation.
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+    
+    // Perform the get.
+    if (myDatabase.get(null, theKey, theData, LockMode.DEFAULT) ==
+        OperationStatus.SUCCESS) {
+
+        // Recreate the data String.
+        byte[] retData = theData.getData();
+        String foundData = new String(retData);
+        System.out.println("For key: '" + aKey + "' found data: '" + 
+                            foundData + "'.");
+    } else {
+        System.out.println("No record found for key '" + aKey + "'.");
+    } 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Deleting Records

+
+
+
+
+

+ + You can use the + Database.delete() + + + method to delete a record from the database. If your database supports + duplicate records, then all records associated with the provided key are + deleted. To delete just one record from a list of duplicates, use a + cursor. Cursors are described in Using Cursors. + +

+

+ You can also delete every record in the database by using + Database.truncate(). + + +

+

For example:

+ +
package com.sleepycat.examples.db.GettingStarted;
+
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.Database;
+
+...
+
+Database myDatabase = null;
+// Database opens omitted for clarity.
+// Database can NOT be opened read-only.  
+  
+try {
+    String aKey = "myFirstKey";
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    
+    // Perform the deletion. All records that use this key are
+    // deleted.
+    myDatabase.delete(null, theKey); 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Data Persistence

+
+
+
+
+

+ When you perform a database modification, your modification is made + in the in-memory cache. This means that your data modifications + are not necessarily written to disk, and so your data may not appear + in the database after an application restart. +

+

+ Note that as a normal part of closing a database, its cache is + written to disk. However, in the event of an application or system + failure, there is no guarantee that your databases will close + cleanly. In this event, it is possible for you to lose data. Under + extremely rare circumstances, it is also possible for you to + experience database corruption. +

+

+ Therefore, if you care about whether your data persists across + application runs, and to guard against the rare possibility of + database corruption, you should use transactions to protect your + database modifications. Every time you commit a transaction, DB + ensures that the data will not be lost due to application or + system failure. + + + For information on transactions, see the Berkeley DB + Programmer's Tutorial and Reference Guide. + +

+

+ If you do not want to use transactions, then the assumption is that + your data is of a nature that it need not exist the next time your + application starts. You may want this if, for example, you are using + DB to cache data relevant only to the current application + runtime. +

+

+ If, however, you are not using transactions for some reason and you + still want some guarantee that your database modifications are + persistent, then you should periodically + + + + Syncs cause the entire contents of your in-memory cache to be written to disk. As + such, they are quite expensive and you should use them sparingly. +

+

+ Remember that by default a sync is performed any time a non-transactional + database is closed cleanly. (You can override this behavior by + specifying + + true + on the call to + + + Database.close().) + + That said, you can manually run a sync by calling + + + + Database.sync(). + +

+
+

Note

+

+ If your application or system crashes and you are not using + transactions, then you should either discard and recreate your + databases, or verify them. You can verify a database using + + + Database.verify(). + If your databases do not verify cleanly, use the + db_dump command to salvage as much of the + database as is possible. Use either the -R or + -r command line options to control how + aggressive db_dump should be when salvaging + your databases. +

+
+
+
+ + + diff --git a/db/docs/index.html b/db/docs/index.html index f9ecf1835..e31ad4a5a 100644 --- a/db/docs/index.html +++ b/db/docs/index.html @@ -1,9 +1,9 @@ - + -Berkeley DB (Version: 4.2.52) +Berkeley DB (Version: 4.3.14) - + @@ -22,13 +22,17 @@ Building Berkeley DB - C API
- C API Permuted Index
+ C API
+ C Permuted Index
+ C Getting Started Guide: PDF, HTML

- C++ API
- C++ API Permuted Index
+ C++ API
+ C++ Permuted Index
+ C++ Getting Started Guide: PDF, HTML

Javadoc
+ Java Getting Started Guide: PDF, HTML
+ Java Collections Tutorial: PDF, HTML

Tcl API
@@ -36,7 +40,7 @@ Building for VxWorks

Building for Win32

- Upgrading Applications to the 4.2 release
+ Upgrading Applications to the 4.3 release
@@ -53,7 +57,7 @@ Contacting Sleepycat Software
Sleepycat Software Home Page
Sleepycat Software Product List
- Release Patches and Change Logs
+ Release Patches and Change Logs
License,  Legal Notices
@@ -62,8 +66,8 @@

-Version 4.2.52, December 3, 2003
-Copyright 1997-2003 Sleepycat Software, Inc. All Rights Reserved +Version 4.3.14, October 14, 2004
+Copyright 1997-2004 Sleepycat Software, Inc. All Rights Reserved

diff --git a/db/docs/java/allclasses-frame.html b/db/docs/java/allclasses-frame.html index 44d0539c5..762772b19 100644 --- a/db/docs/java/allclasses-frame.html +++ b/db/docs/java/allclasses-frame.html @@ -1,261 +1,278 @@ - + - + All Classes (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + - - + + All Classes
- +
- diff --git a/db/docs/java/allclasses-noframe.html b/db/docs/java/allclasses-noframe.html index 543a095ba..9f045913a 100644 --- a/db/docs/java/allclasses-noframe.html +++ b/db/docs/java/allclasses-noframe.html @@ -1,261 +1,278 @@ - + - + All Classes (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + - - + + All Classes
-
ByteArrayBinding +BooleanBinding +
+BtreePrefixCalculator +
+BtreeStats +
+ByteArrayBinding +
+ByteBinding +
+CacheFile +
+CacheFilePriority +
+CacheFileStats +
+CacheStats +
+CharacterBinding
-ByteArrayFormat +CheckpointConfig
-ClassCatalog +ClassCatalog
-CurrentTransaction +CurrentTransaction
-DataBinding +Cursor
-DataBuffer +CursorConfig
-DataCursor +Database
-DataDb +DatabaseConfig
-DataFormat +DatabaseEntry
-DataIndex +DatabaseException
-DataStore +DatabaseStats
-DataThang +DatabaseType
-DataType +DeadlockException
-DataView +DoubleBinding
-Db +EntityBinding
-DbAppDispatch +EntryBinding
-DbAppendRecno +Environment
-DbBtreeCompare +EnvironmentConfig
-DbBtreePrefix +ErrorHandler
-DbBtreeStat +ExceptionUnwrapper
-Dbc +ExceptionWrapper
-DbClient +FastInputStream
-DbDeadlockException +FastOutputStream
-DbDupCompare +FeedbackHandler
-DbEnv +FloatBinding
-DbEnv.RepProcessMessage +HashStats
-DbEnvFeedback +Hasher
-DbEnvFeedbackHandler +IOExceptionWrapper
-DbErrcall +IntegerBinding
-DbErrorHandler +JoinConfig
-DbException +JoinCursor
-DbFeedback +KeyRange
-DbFeedbackHandler +Lock
-DbHash +LockDetectMode
-DbHashStat +LockMode
-DbKeyRange +LockNotGrantedException
-DbLock +LockOperation
-DbLockNotGrantedException +LockRequest
-DbLockRequest +LockRequestMode
-DbLockStat +LockStats
-DbLogc +LogCursor
-DbLogStat +LogRecordHandler
-DbLsn +LogSequenceNumber
-DbMemoryException +LogStats
-DbMpoolFile +LongBinding
-DbMpoolFStat +MapEntryParameter
-DbMpoolStat +MarshalledTupleEntry
-DbMultipleDataIterator +MarshalledTupleKeyEntity
-DbMultipleKeyDataIterator +MemoryException
-DbMultipleRecnoDataIterator +MessageHandler
-DbPanicHandler +MultipleDataEntry
-DbPreplist +MultipleEntry
-DbQueueStat +MultipleKeyDataEntry
-DbRepStat +MultipleRecnoDataEntry
-DbRepTransport +OperationStatus
-DbRunRecoveryException +PanicHandler
-DbSecondaryKeyCreate +PreparedTransaction
-Dbt +PrimaryKeyAssigner
-DbTxn +QueueStats
-DbTxnStat +RecordNumberAppender
-DbTxnStat.Active +RecordNumberBinding
-EntityBinding +RecoveryOperation
-ExceptionUnwrapper +ReplicationHandleDeadException
-ExceptionWrapper +ReplicationStats
-FastInputStream +ReplicationStatus
-FastOutputStream +ReplicationTransport
-ForeignKeyIndex +RunRecoveryException
-IntegrityConstraintException +RuntimeExceptionWrapper
-IOExceptionWrapper +SecondaryConfig
-KeyExtractor +SecondaryCursor
-KeyRangeException +SecondaryDatabase
-MapEntry +SecondaryKeyCreator
-MarshalledTupleData +Sequence
-MarshalledTupleKeyEntity +SequenceConfig
-PrimaryKeyAssigner +SequenceStats
-RecordNumberBinding +SerialBinding
-RecordNumberFormat +SerialInput
-RuntimeExceptionWrapper +SerialOutput
-SerialBinding +SerialSerialBinding
-SerialFormat +SerialSerialKeyCreator
-SerialInput +ShortBinding
-SerialOutput +StatsConfig
-SerialSerialBinding +StoredClassCatalog
-SerialSerialKeyExtractor +StoredCollection
-SimpleBuffer +StoredCollections
-StoredClassCatalog +StoredContainer
-StoredCollection +StoredEntrySet
-StoredCollections +StoredIterator
-StoredContainer +StoredKeySet
-StoredEntrySet +StoredList
-StoredIterator +StoredMap
-StoredKeySet +StoredSortedEntrySet
-StoredList +StoredSortedKeySet
-StoredMap +StoredSortedMap
-StoredSortedEntrySet +StoredSortedValueSet
-StoredSortedKeySet +StoredValueSet
-StoredSortedMap +StringBinding
-StoredSortedValueSet +Transaction
-StoredValueSet +TransactionConfig
-TimeUnits +TransactionRunner
-TransactionRunner +TransactionStats
-TransactionWorker +TransactionWorker
-TupleBinding +TupleBinding
-TupleFormat +TupleInput
-TupleInput +TupleInputBinding
-TupleInputBinding +TupleMarshalledBinding
-TupleMarshalledBinding +TupleOutput
-TupleOutput +TupleSerialBinding
-TupleSerialBinding +TupleSerialFactory
-TupleSerialDbFactory +TupleSerialKeyCreator
-TupleSerialKeyExtractor +TupleSerialMarshalledBinding
-TupleSerialMarshalledBinding +TupleSerialMarshalledKeyCreator
-TupleSerialMarshalledKeyExtractor +TupleTupleBinding
-TupleTupleBinding +TupleTupleKeyCreator
-TupleTupleKeyExtractor +TupleTupleMarshalledBinding
-TupleTupleMarshalledBinding +TupleTupleMarshalledKeyCreator
-TupleTupleMarshalledKeyExtractor +UtfOps
-UtfOps +VerifyConfig
+
- diff --git a/db/docs/java/com/sleepycat/bind/ByteArrayBinding.html b/db/docs/java/com/sleepycat/bind/ByteArrayBinding.html new file mode 100644 index 000000000..b39b316ca --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/ByteArrayBinding.html @@ -0,0 +1,296 @@ + + + + + + +ByteArrayBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + +
ByteArrayBinding +BooleanBinding +
+BtreePrefixCalculator +
+BtreeStats +
+ByteArrayBinding +
+ByteBinding +
+CacheFile +
+CacheFilePriority +
+CacheFileStats +
+CacheStats +
+CharacterBinding
-ByteArrayFormat +CheckpointConfig
-ClassCatalog +ClassCatalog
-CurrentTransaction +CurrentTransaction
-DataBinding +Cursor
-DataBuffer +CursorConfig
-DataCursor +Database
-DataDb +DatabaseConfig
-DataFormat +DatabaseEntry
-DataIndex +DatabaseException
-DataStore +DatabaseStats
-DataThang +DatabaseType
-DataType +DeadlockException
-DataView +DoubleBinding
-Db +EntityBinding
-DbAppDispatch +EntryBinding
-DbAppendRecno +Environment
-DbBtreeCompare +EnvironmentConfig
-DbBtreePrefix +ErrorHandler
-DbBtreeStat +ExceptionUnwrapper
-Dbc +ExceptionWrapper
-DbClient +FastInputStream
-DbDeadlockException +FastOutputStream
-DbDupCompare +FeedbackHandler
-DbEnv +FloatBinding
-DbEnv.RepProcessMessage +HashStats
-DbEnvFeedback +Hasher
-DbEnvFeedbackHandler +IOExceptionWrapper
-DbErrcall +IntegerBinding
-DbErrorHandler +JoinConfig
-DbException +JoinCursor
-DbFeedback +KeyRange
-DbFeedbackHandler +Lock
-DbHash +LockDetectMode
-DbHashStat +LockMode
-DbKeyRange +LockNotGrantedException
-DbLock +LockOperation
-DbLockNotGrantedException +LockRequest
-DbLockRequest +LockRequestMode
-DbLockStat +LockStats
-DbLogc +LogCursor
-DbLogStat +LogRecordHandler
-DbLsn +LogSequenceNumber
-DbMemoryException +LogStats
-DbMpoolFile +LongBinding
-DbMpoolFStat +MapEntryParameter
-DbMpoolStat +MarshalledTupleEntry
-DbMultipleDataIterator +MarshalledTupleKeyEntity
-DbMultipleKeyDataIterator +MemoryException
-DbMultipleRecnoDataIterator +MessageHandler
-DbPanicHandler +MultipleDataEntry
-DbPreplist +MultipleEntry
-DbQueueStat +MultipleKeyDataEntry
-DbRepStat +MultipleRecnoDataEntry
-DbRepTransport +OperationStatus
-DbRunRecoveryException +PanicHandler
-DbSecondaryKeyCreate +PreparedTransaction
-Dbt +PrimaryKeyAssigner
-DbTxn +QueueStats
-DbTxnStat +RecordNumberAppender
-DbTxnStat.Active +RecordNumberBinding
-EntityBinding +RecoveryOperation
-ExceptionUnwrapper +ReplicationHandleDeadException
-ExceptionWrapper +ReplicationStats
-FastInputStream +ReplicationStatus
-FastOutputStream +ReplicationTransport
-ForeignKeyIndex +RunRecoveryException
-IntegrityConstraintException +RuntimeExceptionWrapper
-IOExceptionWrapper +SecondaryConfig
-KeyExtractor +SecondaryCursor
-KeyRangeException +SecondaryDatabase
-MapEntry +SecondaryKeyCreator
-MarshalledTupleData +Sequence
-MarshalledTupleKeyEntity +SequenceConfig
-PrimaryKeyAssigner +SequenceStats
-RecordNumberBinding +SerialBinding
-RecordNumberFormat +SerialInput
-RuntimeExceptionWrapper +SerialOutput
-SerialBinding +SerialSerialBinding
-SerialFormat +SerialSerialKeyCreator
-SerialInput +ShortBinding
-SerialOutput +StatsConfig
-SerialSerialBinding +StoredClassCatalog
-SerialSerialKeyExtractor +StoredCollection
-SimpleBuffer +StoredCollections
-StoredClassCatalog +StoredContainer
-StoredCollection +StoredEntrySet
-StoredCollections +StoredIterator
-StoredContainer +StoredKeySet
-StoredEntrySet +StoredList
-StoredIterator +StoredMap
-StoredKeySet +StoredSortedEntrySet
-StoredList +StoredSortedKeySet
-StoredMap +StoredSortedMap
-StoredSortedEntrySet +StoredSortedValueSet
-StoredSortedKeySet +StoredValueSet
-StoredSortedMap +StringBinding
-StoredSortedValueSet +Transaction
-StoredValueSet +TransactionConfig
-TimeUnits +TransactionRunner
-TransactionRunner +TransactionStats
-TransactionWorker +TransactionWorker
-TupleBinding +TupleBinding
-TupleFormat +TupleInput
-TupleInput +TupleInputBinding
-TupleInputBinding +TupleMarshalledBinding
-TupleMarshalledBinding +TupleOutput
-TupleOutput +TupleSerialBinding
-TupleSerialBinding +TupleSerialFactory
-TupleSerialDbFactory +TupleSerialKeyCreator
-TupleSerialKeyExtractor +TupleSerialMarshalledBinding
-TupleSerialMarshalledBinding +TupleSerialMarshalledKeyCreator
-TupleSerialMarshalledKeyExtractor +TupleTupleBinding
-TupleTupleBinding +TupleTupleKeyCreator
-TupleTupleKeyExtractor +TupleTupleMarshalledBinding
-TupleTupleMarshalledBinding +TupleTupleMarshalledKeyCreator
-TupleTupleMarshalledKeyExtractor +UtfOps
-UtfOps +VerifyConfig
+ + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +


+ +

+ +com.sleepycat.bind +
+Class ByteArrayBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.ByteArrayBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class ByteArrayBinding
extends Object
implements EntryBinding
+ +

+A pass-through EntryBinding that uses the entry's byte array as + the key or data object. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
ByteArrayBinding() + +
+          Creates a byte array binding.
+  + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry entry) + +
+          Converts a entry buffer into an Object.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+ByteArrayBinding

+
+public ByteArrayBinding()
+
+
Creates a byte array binding. +

+

+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts a entry buffer into an Object. +

+

+
Specified by:
entryToObject in interface EntryBinding
+
+
+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
+
+
+
Parameters:
object - is the source Object.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/EntityBinding.html b/db/docs/java/com/sleepycat/bind/EntityBinding.html new file mode 100644 index 000000000..a0e5e1041 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/EntityBinding.html @@ -0,0 +1,270 @@ + + + + + + +EntityBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind +
+Interface EntityBinding

+
+
All Known Implementing Classes:
SerialSerialBinding, TupleSerialBinding, TupleTupleBinding
+
+
+
+
public interface EntityBinding
+ +

+A binding between a key-value entry pair and an entity object. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+          Converts key and data entry buffers into an entity Object.
+ voidobjectToData(Object object, + DatabaseEntry data) + +
+          Extracts the data entry from an entity Object.
+ voidobjectToKey(Object object, + DatabaseEntry key) + +
+          Extracts the key entry from an entity Object.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry key,
+                            DatabaseEntry data)
+
+
Converts key and data entry buffers into an entity Object. +

+

+
Parameters:
key - is the source key entry.
data - is the source data entry. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToKey

+
+public void objectToKey(Object object,
+                        DatabaseEntry key)
+
+
Extracts the key entry from an entity Object. +

+

+
Parameters:
object - is the source Object.
key - is the destination entry buffer.
+
+
+
+ +

+objectToData

+
+public void objectToData(Object object,
+                         DatabaseEntry data)
+
+
Extracts the data entry from an entity Object. +

+

+
Parameters:
object - is the source Object.
data - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/EntryBinding.html b/db/docs/java/com/sleepycat/bind/EntryBinding.html new file mode 100644 index 000000000..387986375 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/EntryBinding.html @@ -0,0 +1,245 @@ + + + + + + +EntryBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind +
+Interface EntryBinding

+
+
All Known Implementing Classes:
ByteArrayBinding, RecordNumberBinding, SerialBinding, TupleBinding, TupleInputBinding
+
+
+
+
public interface EntryBinding
+ +

+A binding between a key or data entry and a key or data object. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry entry) + +
+          Converts a entry buffer into an Object.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry entry)
+
+
Converts a entry buffer into an Object. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Converts an Object into a entry buffer. +

+

+
Parameters:
object - is the source Object.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/RecordNumberBinding.html b/db/docs/java/com/sleepycat/bind/RecordNumberBinding.html new file mode 100644 index 000000000..205881972 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/RecordNumberBinding.html @@ -0,0 +1,354 @@ + + + + + + +RecordNumberBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind +
+Class RecordNumberBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.RecordNumberBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class RecordNumberBinding
extends Object
implements EntryBinding
+ +

+An EntryBinding that treats a record number key entry as a + Long key object. + +

Record numbers are returned as Long objects, although on + input any Number object may be used.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
RecordNumberBinding() + +
+          Creates a byte array binding.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry entry) + +
+          Converts a entry buffer into an Object.
+static longentryToRecordNumber(DatabaseEntry entry) + +
+          Utility method for use by bindings to translate a entry buffer to an + record number integer.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+static voidrecordNumberToEntry(long recordNumber, + DatabaseEntry entry) + +
+          Utility method for use by bindings to translate a record number integer + to a entry buffer.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+RecordNumberBinding

+
+public RecordNumberBinding()
+
+
Creates a byte array binding. +

+

+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts a entry buffer into an Object. +

+

+
Specified by:
entryToObject in interface EntryBinding
+
+
+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
+
+
+
Parameters:
object - is the source Object.
entry - is the destination entry buffer.
+
+
+
+ +

+entryToRecordNumber

+
+public static long entryToRecordNumber(DatabaseEntry entry)
+
+
Utility method for use by bindings to translate a entry buffer to an + record number integer. +

+

+
+
+
+
Parameters:
entry - the entry buffer. +
Returns:
the record number.
+
+
+
+ +

+recordNumberToEntry

+
+public static void recordNumberToEntry(long recordNumber,
+                                       DatabaseEntry entry)
+
+
Utility method for use by bindings to translate a record number integer + to a entry buffer. +

+

+
+
+
+
Parameters:
recordNumber - the record number.
entry - the entry buffer to hold the record number.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/class-use/ByteArrayBinding.html b/db/docs/java/com/sleepycat/bind/class-use/ByteArrayBinding.html new file mode 100644 index 000000000..6b01a7bcb --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/class-use/ByteArrayBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.ByteArrayBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.ByteArrayBinding

+
+No usage of com.sleepycat.bind.ByteArrayBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/class-use/EntityBinding.html b/db/docs/java/com/sleepycat/bind/class-use/EntityBinding.html new file mode 100644 index 000000000..f7665ce1f --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/class-use/EntityBinding.html @@ -0,0 +1,321 @@ + + + + + + +Uses of Interface com.sleepycat.bind.EntityBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.bind.EntityBinding

+
+ + + + + + + + + + + + + + + + + +
+Packages that use EntityBinding
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of EntityBinding in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + +
Classes in com.sleepycat.bind.serial that implement EntityBinding
+ classSerialSerialBinding + +
+          An abstract EntityBinding that treats an entity's key entry and + data entry as serialized objects.
+ classTupleSerialBinding + +
+          An abstract EntityBinding that treats an entity's key entry as + a tuple and its data entry as a serialized object.
+ classTupleSerialMarshalledBinding + +
+          A concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class.
+  +

+ + + + + +
+Uses of EntityBinding in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + +
Classes in com.sleepycat.bind.tuple that implement EntityBinding
+ classTupleTupleBinding + +
+          An abstract EntityBinding that treats an entity's key entry and + data entry as tuples.
+ classTupleTupleMarshalledBinding + +
+          A concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class.
+  +

+ + + + + +
+Uses of EntityBinding in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constructors in com.sleepycat.collections with parameters of type EntityBinding
StoredValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a value set entity view of a Database.
StoredSortedValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a sorted value set entity view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a sorted map entity view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map entity view of a Database with a PrimaryKeyAssigner.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a map entity view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map entity view of a Database with a PrimaryKeyAssigner.
StoredList(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a list entity view of a Database.
StoredList(Database database, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list entity view of a Database with a PrimaryKeyAssigner.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/class-use/EntryBinding.html b/db/docs/java/com/sleepycat/bind/class-use/EntryBinding.html new file mode 100644 index 000000000..705a298f9 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/class-use/EntryBinding.html @@ -0,0 +1,475 @@ + + + + + + +Uses of Interface com.sleepycat.bind.EntryBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.bind.EntryBinding

+
+ + + + + + + + + + + + + + + + + + + + + +
+Packages that use EntryBinding
com.sleepycat.bindBindings between database entries and Java objects
+[reference guide]
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of EntryBinding in com.sleepycat.bind
+  +

+ + + + + + + + + + + + + +
Classes in com.sleepycat.bind that implement EntryBinding
+ classByteArrayBinding + +
+          A pass-through EntryBinding that uses the entry's byte array as + the key or data object.
+ classRecordNumberBinding + +
+          An EntryBinding that treats a record number key entry as a + Long key object.
+  +

+ + + + + +
+Uses of EntryBinding in com.sleepycat.bind.serial
+  +

+ + + + + + + + + +
Classes in com.sleepycat.bind.serial that implement EntryBinding
+ classSerialBinding + +
+          A concrete EntryBinding that treats a key or data entry as + a serialized object.
+  +

+ + + + + +
+Uses of EntryBinding in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Classes in com.sleepycat.bind.tuple that implement EntryBinding
+ classBooleanBinding + +
+          A concrete TupleBinding for a Boolean primitive + wrapper or a boolean primitive.
+ classByteBinding + +
+          A concrete TupleBinding for a Byte primitive + wrapper or a byte primitive.
+ classCharacterBinding + +
+          A concrete TupleBinding for a Character primitive + wrapper or a char primitive.
+ classDoubleBinding + +
+          A concrete TupleBinding for a Double primitive + wrapper or a double primitive.
+ classFloatBinding + +
+          A concrete TupleBinding for a Float primitive + wrapper or a float primitive.
+ classIntegerBinding + +
+          A concrete TupleBinding for a Integer primitive + wrapper or an int primitive.
+ classLongBinding + +
+          A concrete TupleBinding for a Long primitive + wrapper or a long primitive.
+ classShortBinding + +
+          A concrete TupleBinding for a Short primitive + wrapper or a short primitive.
+ classStringBinding + +
+          A concrete TupleBinding for a simple String value.
+ classTupleBinding + +
+          An abstract EntryBinding that treats a key or data entry as a + tuple; it includes predefined bindings for Java primitive types.
+ classTupleInputBinding + +
+          A concrete EntryBinding that uses the TupleInput + object as the key or data object.
+ classTupleMarshalledBinding + +
+          A concrete TupleBinding that delegates to the + MarshalledTupleEntry interface of the data or key object.
+  +

+ + + + + +
+Uses of EntryBinding in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constructors in com.sleepycat.collections with parameters of type EntryBinding
StoredValueSet(Database database, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a value set view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a sorted map view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map view of a Database with a PrimaryKeyAssigner.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a sorted map entity view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map entity view of a Database with a PrimaryKeyAssigner.
StoredSortedKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) + +
+          Creates a sorted key set view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a map view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map view of a Database with a PrimaryKeyAssigner.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a map entity view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map entity view of a Database with a PrimaryKeyAssigner.
StoredList(Database database, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a list view of a Database.
StoredList(Database database, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list view of a Database with a PrimaryKeyAssigner.
StoredKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) + +
+          Creates a key set view of a Database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/class-use/RecordNumberBinding.html b/db/docs/java/com/sleepycat/bind/class-use/RecordNumberBinding.html new file mode 100644 index 000000000..fb296d56d --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/class-use/RecordNumberBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.RecordNumberBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.RecordNumberBinding

+
+No usage of com.sleepycat.bind.RecordNumberBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/package-frame.html b/db/docs/java/com/sleepycat/bind/package-frame.html new file mode 100644 index 000000000..a4b62dda4 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/package-frame.html @@ -0,0 +1,47 @@ + + + + + + +com.sleepycat.bind (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + +com.sleepycat.bind + + + + +
+Interfaces  + +
+EntityBinding +
+EntryBinding
+ + + + + + +
+Classes  + +
+ByteArrayBinding +
+RecordNumberBinding
+ + + + diff --git a/db/docs/java/com/sleepycat/bind/package-summary.html b/db/docs/java/com/sleepycat/bind/package-summary.html new file mode 100644 index 000000000..c88087a98 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/package-summary.html @@ -0,0 +1,189 @@ + + + + + + +com.sleepycat.bind (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+

+Package com.sleepycat.bind +

+Bindings between database entries and Java objects
+[reference guide]. +

+See: +
+          Description +

+ + + + + + + + + + + + + +
+Interface Summary
EntityBindingA binding between a key-value entry pair and an entity object.
EntryBindingA binding between a key or data entry and a key or data object.
+  + +

+ + + + + + + + + + + + + +
+Class Summary
ByteArrayBindingA pass-through EntryBinding that uses the entry's byte array as + the key or data object.
RecordNumberBindingAn EntryBinding that treats a record number key entry as a + Long key object.
+  + +

+

+Package com.sleepycat.bind Description +

+ +

+Bindings between database entries and Java objects
+[reference guide]. +

+ +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/package-tree.html b/db/docs/java/com/sleepycat/bind/package-tree.html new file mode 100644 index 000000000..d339a4e76 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/package-tree.html @@ -0,0 +1,152 @@ + + + + + + +com.sleepycat.bind Class Hierarchy (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Hierarchy For Package com.sleepycat.bind +

+
+
+
Package Hierarchies:
All Packages
+
+

+Class Hierarchy +

+ +

+Interface Hierarchy +

+ +
+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/package-use.html b/db/docs/java/com/sleepycat/bind/package-use.html new file mode 100644 index 000000000..55d74aa79 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/package-use.html @@ -0,0 +1,239 @@ + + + + + + +Uses of Package com.sleepycat.bind (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Package
com.sleepycat.bind

+
+ + + + + + + + + + + + + + + + + + + + + +
+Packages that use com.sleepycat.bind
com.sleepycat.bindBindings between database entries and Java objects
+[reference guide]
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + + + + +
+Classes in com.sleepycat.bind used by com.sleepycat.bind
EntryBinding + +
+          A binding between a key or data entry and a key or data object.
+  +

+ + + + + + + + + + + +
+Classes in com.sleepycat.bind used by com.sleepycat.bind.serial
EntityBinding + +
+          A binding between a key-value entry pair and an entity object.
EntryBinding + +
+          A binding between a key or data entry and a key or data object.
+  +

+ + + + + + + + + + + +
+Classes in com.sleepycat.bind used by com.sleepycat.bind.tuple
EntityBinding + +
+          A binding between a key-value entry pair and an entity object.
EntryBinding + +
+          A binding between a key or data entry and a key or data object.
+  +

+ + + + + + + + + + + +
+Classes in com.sleepycat.bind used by com.sleepycat.collections
EntityBinding + +
+          A binding between a key-value entry pair and an entity object.
EntryBinding + +
+          A binding between a key or data entry and a key or data object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/ClassCatalog.html b/db/docs/java/com/sleepycat/bind/serial/ClassCatalog.html new file mode 100644 index 000000000..7ede4e684 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/ClassCatalog.html @@ -0,0 +1,302 @@ + + + + + + +ClassCatalog (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Interface ClassCatalog

+
+
All Known Implementing Classes:
StoredClassCatalog
+
+
+
+
public interface ClassCatalog
+ +

+A catalog of class description information for use during object + serialization. + +

A catalog is used to store class descriptions separately from serialized + objects, to avoid redundantly stored information with each object. + When serialized objects are stored in a database, a StoredClassCatalog should be used.

+ +

This information is used for serialization of class descriptors or + java.io.ObjectStreamClass objects, each of which represents a unique class + format. For each unique format, a unique class ID is assigned by the + catalog. The class ID can then be used in the serialization stream in place + of the full class information. When used with SerialInput and + SerialOutput or any of the serial bindings, the use of the catalog + is transparent to the application.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidclose() + +
+          Close a catalog database and release any cached resources.
+ ObjectStreamClassgetClassFormat(byte[] classID) + +
+          Return the ObjectStreamClass for the given class ID.
+ byte[]getClassID(ObjectStreamClass classDesc) + +
+          Return the class ID for the current version of the given class + description.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Close a catalog database and release any cached resources. +

+

+ +
Throws: +
DatabaseException
+
+
+
+ +

+getClassID

+
+public byte[] getClassID(ObjectStreamClass classDesc)
+                  throws DatabaseException,
+                         ClassNotFoundException
+
+
Return the class ID for the current version of the given class + description. + This is used for storing in serialization streams in place of a full + class descriptor, since it is much more compact. To get back the + ObjectStreamClass for a class ID, call getClassFormat(byte[]). + This function causes a new class ID to be assigned if the class + description has changed. +

+

+
Parameters:
classDesc - The class description for which to return the + class ID. +
Returns:
The class ID for the current version of the class. +
Throws: +
DatabaseException +
ClassNotFoundException
+
+
+
+ +

+getClassFormat

+
+public ObjectStreamClass getClassFormat(byte[] classID)
+                                 throws DatabaseException,
+                                        ClassNotFoundException
+
+
Return the ObjectStreamClass for the given class ID. This may or may + not be the current class format, depending on whether the class has + changed since the class ID was generated. +

+

+
Parameters:
classID - The class ID for which to return the class format. +
Returns:
The class format for the given class ID, which may or may not + represent the current version of the class. +
Throws: +
DatabaseException +
ClassNotFoundException
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/SerialBinding.html b/db/docs/java/com/sleepycat/bind/serial/SerialBinding.html new file mode 100644 index 000000000..f0fc2d8d4 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/SerialBinding.html @@ -0,0 +1,338 @@ + + + + + + +SerialBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class SerialBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.SerialBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class SerialBinding
extends Object
implements EntryBinding
+ +

+A concrete EntryBinding that treats a key or data entry as + a serialized object. + +

This binding stores objects in serialized object format. The + deserialized objects are returned by the binding, and their + Class must implement the Serializable + interface.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
SerialBinding(ClassCatalog classCatalog, + Class baseClass) + +
+          Creates a serial binding.
+  + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry entry) + +
+          Deserialize an object from an entry buffer.
+ ClassgetBaseClass() + +
+          Returns the base class for this binding.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Serialize an object into an entry buffer.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+SerialBinding

+
+public SerialBinding(ClassCatalog classCatalog,
+                     Class baseClass)
+
+
Creates a serial binding. +

+

Parameters:
classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
baseClass - is the base class for serialized objects stored using + this binding -- all objects using this binding must be an instance of + this class.
+ + + + + + + + +
+Method Detail
+ +

+getBaseClass

+
+public final Class getBaseClass()
+
+
Returns the base class for this binding. +

+

+
+
+
+ +
Returns:
the base class for this binding.
+
+
+
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry entry)
+
+
Deserialize an object from an entry buffer. May only be called for data + that was serialized using objectToEntry(java.lang.Object, com.sleepycat.db.DatabaseEntry), since the fixed + serialization header is assumed to not be included in the input data. + SerialInput is used to deserialize the object. +

+

+
Specified by:
entryToObject in interface EntryBinding
+
+
+
Parameters:
entry - is the input serialized entry. +
Returns:
the output deserialized object.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Serialize an object into an entry buffer. The fixed serialization + header is not included in the output data to save space, and therefore + to deserialize the data the complementary entryToObject(com.sleepycat.db.DatabaseEntry) method + must be used. SerialOutput is used to serialize the object. +

+

+
Specified by:
objectToEntry in interface EntryBinding
+
+
+
Parameters:
object - is the input deserialized object.
entry - is the output serialized entry. +
Throws: +
IllegalArgumentException - if the object is not an instance of the + base class for this binding.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/SerialInput.html b/db/docs/java/com/sleepycat/bind/serial/SerialInput.html new file mode 100644 index 000000000..057dbd6a8 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/SerialInput.html @@ -0,0 +1,303 @@ + + + + + + +SerialInput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class SerialInput

+
+java.lang.Object
+  extended byjava.io.InputStream
+      extended byjava.io.ObjectInputStream
+          extended bycom.sleepycat.bind.serial.SerialInput
+
+
+
All Implemented Interfaces:
DataInput, ObjectInput, ObjectStreamConstants
+
+
+
+
public class SerialInput
extends ObjectInputStream
+ +

+A specialized ObjectInputStream that gets class description + information from a ClassCatalog. It is used by + SerialBinding. + +

This class is used instead of an ObjectInputStream, which it + extends, to read an object stream written by the SerialOutput class. + For reading objects from a database normally one of the serial binding + classes is used. SerialInput is used when an ObjectInputStream is needed along with compact storage. A ClassCatalog must be supplied, however, to stored shared class + descriptions.

+

+ +

+


+ +

+ + + + + + + +
+Nested Class Summary
+ + + + + + + +
Nested classes inherited from class java.io.ObjectInputStream
ObjectInputStream.GetField
+  + + + + + + + +
+Field Summary
+ + + + + + + +
Fields inherited from interface java.io.ObjectStreamConstants
baseWireHandle, PROTOCOL_VERSION_1, PROTOCOL_VERSION_2, SC_BLOCK_DATA, SC_EXTERNALIZABLE, SC_SERIALIZABLE, SC_WRITE_METHOD, STREAM_MAGIC, STREAM_VERSION, SUBCLASS_IMPLEMENTATION_PERMISSION, SUBSTITUTION_PERMISSION, TC_ARRAY, TC_BASE, TC_BLOCKDATA, TC_BLOCKDATALONG, TC_CLASS, TC_CLASSDESC, TC_ENDBLOCKDATA, TC_EXCEPTION, TC_LONGSTRING, TC_MAX, TC_NULL, TC_OBJECT, TC_PROXYCLASSDESC, TC_REFERENCE, TC_RESET, TC_STRING
+  + + + + + + + + + + +
+Constructor Summary
SerialInput(InputStream in, + ClassCatalog classCatalog) + +
+          Creates a serial input stream.
+  + + + + + + + + + + +
Methods inherited from class java.io.ObjectInputStream
available, close, defaultReadObject, read, read, readBoolean, readByte, readChar, readDouble, readFields, readFloat, readFully, readFully, readInt, readLine, readLong, readObject, readShort, readUnshared, readUnsignedByte, readUnsignedShort, readUTF, registerValidation, skipBytes
+ + + + + + + +
Methods inherited from class java.io.InputStream
mark, markSupported, read, reset, skip
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.io.ObjectInput
read, skip
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+SerialInput

+
+public SerialInput(InputStream in,
+                   ClassCatalog classCatalog)
+            throws IOException
+
+
Creates a serial input stream. +

+

Parameters:
in - is the input stream from which compact serialized objects will + be read.
classCatalog - is the catalog containing the class descriptions + for the serialized objects.
+ + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/SerialOutput.html b/db/docs/java/com/sleepycat/bind/serial/SerialOutput.html new file mode 100644 index 000000000..aa6a5b29f --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/SerialOutput.html @@ -0,0 +1,330 @@ + + + + + + +SerialOutput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class SerialOutput

+
+java.lang.Object
+  extended byjava.io.OutputStream
+      extended byjava.io.ObjectOutputStream
+          extended bycom.sleepycat.bind.serial.SerialOutput
+
+
+
All Implemented Interfaces:
DataOutput, ObjectOutput, ObjectStreamConstants
+
+
+
+
public class SerialOutput
extends ObjectOutputStream
+ +

+A specialized ObjectOutputStream that stores class description + information in a ClassCatalog. It is used by + SerialBinding. + +

This class is used instead of an ObjectOutputStream, which it + extends, to write a compact object stream. For writing objects to a + database normally one of the serial binding classes is used. SerialOutput is used when an ObjectOutputStream is needed along + with compact storage. A ClassCatalog must be supplied, however, to + stored shared class descriptions.

+ +

The ClassCatalog is used to store class definitions rather than + embedding these into the stream. Instead, a class format identifier is + embedded into the stream. This identifier is then used by SerialInput to load the class format to deserialize the object.

+

+ +

+


+ +

+ + + + + + + +
+Nested Class Summary
+ + + + + + + +
Nested classes inherited from class java.io.ObjectOutputStream
ObjectOutputStream.PutField
+  + + + + + + + +
+Field Summary
+ + + + + + + +
Fields inherited from interface java.io.ObjectStreamConstants
baseWireHandle, PROTOCOL_VERSION_1, PROTOCOL_VERSION_2, SC_BLOCK_DATA, SC_EXTERNALIZABLE, SC_SERIALIZABLE, SC_WRITE_METHOD, STREAM_MAGIC, STREAM_VERSION, SUBCLASS_IMPLEMENTATION_PERMISSION, SUBSTITUTION_PERMISSION, TC_ARRAY, TC_BASE, TC_BLOCKDATA, TC_BLOCKDATALONG, TC_CLASS, TC_CLASSDESC, TC_ENDBLOCKDATA, TC_EXCEPTION, TC_LONGSTRING, TC_MAX, TC_NULL, TC_OBJECT, TC_PROXYCLASSDESC, TC_REFERENCE, TC_RESET, TC_STRING
+  + + + + + + + + + + +
+Constructor Summary
SerialOutput(OutputStream out, + ClassCatalog classCatalog) + +
+          Creates a serial output stream.
+  + + + + + + + + + + + +
+Method Summary
+static byte[]getStreamHeader() + +
+          Returns the fixed stream header used for all serialized streams in + PROTOCOL_VERSION_2 format.
+ + + + + + + +
Methods inherited from class java.io.ObjectOutputStream
close, defaultWriteObject, flush, putFields, reset, useProtocolVersion, write, write, write, writeBoolean, writeByte, writeBytes, writeChar, writeChars, writeDouble, writeFields, writeFloat, writeInt, writeLong, writeObject, writeShort, writeUnshared, writeUTF
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+SerialOutput

+
+public SerialOutput(OutputStream out,
+                    ClassCatalog classCatalog)
+             throws IOException
+
+
Creates a serial output stream. +

+

Parameters:
out - is the output stream to which the compact serialized objects + will be written.
classCatalog - is the catalog to which the class descriptions for + the serialized objects will be written.
+ + + + + + + + +
+Method Detail
+ +

+getStreamHeader

+
+public static byte[] getStreamHeader()
+
+
Returns the fixed stream header used for all serialized streams in + PROTOCOL_VERSION_2 format. To save space this header can be removed and + serialized streams before storage and inserted before deserializing. + SerialOutput always uses PROTOCOL_VERSION_2 serialization format + to guarantee that this header is fixed. SerialBinding removes + this header from serialized streams automatically. +

+

+ +
Returns:
the fixed stream header.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/SerialSerialBinding.html b/db/docs/java/com/sleepycat/bind/serial/SerialSerialBinding.html new file mode 100644 index 000000000..804028681 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/SerialSerialBinding.html @@ -0,0 +1,435 @@ + + + + + + +SerialSerialBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class SerialSerialBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.SerialSerialBinding
+
+
+
All Implemented Interfaces:
EntityBinding
+
+
+
+
public abstract class SerialSerialBinding
extends Object
implements EntityBinding
+ +

+An abstract EntityBinding that treats an entity's key entry and + data entry as serialized objects. + +

This class takes care of serializing and deserializing the key and + data entry automatically. Its three abstract methods must be implemented by + a concrete subclass to convert the deserialized objects to/from an entity + object.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
SerialSerialBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) + +
+          Creates a serial-serial entity binding.
SerialSerialBinding(SerialBinding keyBinding, + SerialBinding dataBinding) + +
+          Creates a serial-serial entity binding.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+          Converts key and data entry buffers into an entity Object.
+abstract  ObjectentryToObject(Object keyInput, + Object dataInput) + +
+          Constructs an entity object from deserialized key and data objects.
+abstract  ObjectobjectToData(Object object) + +
+          Extracts a data object from an entity object.
+ voidobjectToData(Object object, + DatabaseEntry data) + +
+          Extracts the data entry from an entity Object.
+abstract  ObjectobjectToKey(Object object) + +
+          Extracts a key object from an entity object.
+ voidobjectToKey(Object object, + DatabaseEntry key) + +
+          Extracts the key entry from an entity Object.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+SerialSerialBinding

+
+public SerialSerialBinding(ClassCatalog classCatalog,
+                           Class keyClass,
+                           Class dataClass)
+
+
Creates a serial-serial entity binding. +

+

Parameters:
classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
keyClass - is the key base class.
dataClass - is the data base class.
+
+ +

+SerialSerialBinding

+
+public SerialSerialBinding(SerialBinding keyBinding,
+                           SerialBinding dataBinding)
+
+
Creates a serial-serial entity binding. +

+

Parameters:
keyBinding - is the key binding.
dataBinding - is the data binding.
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry key,
+                            DatabaseEntry data)
+
+
Description copied from interface: EntityBinding
+
Converts key and data entry buffers into an entity Object. +

+

+
Specified by:
entryToObject in interface EntityBinding
+
+
+
Parameters:
key - is the source key entry.
data - is the source data entry. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToKey

+
+public void objectToKey(Object object,
+                        DatabaseEntry key)
+
+
Description copied from interface: EntityBinding
+
Extracts the key entry from an entity Object. +

+

+
Specified by:
objectToKey in interface EntityBinding
+
+
+
Parameters:
object - is the source Object.
key - is the destination entry buffer.
+
+
+
+ +

+objectToData

+
+public void objectToData(Object object,
+                         DatabaseEntry data)
+
+
Description copied from interface: EntityBinding
+
Extracts the data entry from an entity Object. +

+

+
Specified by:
objectToData in interface EntityBinding
+
+
+
Parameters:
object - is the source Object.
data - is the destination entry buffer.
+
+
+
+ +

+entryToObject

+
+public abstract Object entryToObject(Object keyInput,
+                                     Object dataInput)
+
+
Constructs an entity object from deserialized key and data objects. +

+

+
+
+
+
Parameters:
keyInput - is the deserialized key object.
dataInput - is the deserialized data object. +
Returns:
the entity object constructed from the key and data.
+
+
+
+ +

+objectToKey

+
+public abstract Object objectToKey(Object object)
+
+
Extracts a key object from an entity object. +

+

+
+
+
+
Parameters:
object - is the entity object. +
Returns:
the deserialized key object.
+
+
+
+ +

+objectToData

+
+public abstract Object objectToData(Object object)
+
+
Extracts a data object from an entity object. +

+

+
+
+
+
Parameters:
object - is the entity object. +
Returns:
the deserialized data object.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/SerialSerialKeyCreator.html b/db/docs/java/com/sleepycat/bind/serial/SerialSerialKeyCreator.html new file mode 100644 index 000000000..1d8986a5a --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/SerialSerialKeyCreator.html @@ -0,0 +1,411 @@ + + + + + + +SerialSerialKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class SerialSerialKeyCreator

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.SerialSerialKeyCreator
+
+
+
All Implemented Interfaces:
SecondaryKeyCreator
+
+
+
+
public abstract class SerialSerialKeyCreator
extends Object
implements SecondaryKeyCreator
+ +

+A abstract key creator that uses a serial key and a serial data entry. + This class takes care of serializing and deserializing the key and data + entry automatically. + The following abstract method must be implemented by a concrete subclass + to create the index key using these objects +

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
SerialSerialKeyCreator(ClassCatalog classCatalog, + Class primaryKeyClass, + Class dataClass, + Class indexKeyClass) + +
+          Creates a serial-serial key creator.
SerialSerialKeyCreator(SerialBinding primaryKeyBinding, + SerialBinding dataBinding, + SerialBinding indexKeyBinding) + +
+          Creates a serial-serial entity binding.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+abstract  ObjectcreateSecondaryKey(Object primaryKey, + Object data) + +
+          Creates the index key object from primary key and entry objects.
+ booleancreateSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+          Creates a secondary key entry, given a primary key and data entry.
+ ObjectnullifyForeignKey(Object data) + +
+          Clears the index key in a data object.
+ booleannullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+SerialSerialKeyCreator

+
+public SerialSerialKeyCreator(ClassCatalog classCatalog,
+                              Class primaryKeyClass,
+                              Class dataClass,
+                              Class indexKeyClass)
+
+
Creates a serial-serial key creator. +

+

Parameters:
classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
primaryKeyClass - is the primary key base class.
dataClass - is the data base class.
indexKeyClass - is the index key base class.
+
+ +

+SerialSerialKeyCreator

+
+public SerialSerialKeyCreator(SerialBinding primaryKeyBinding,
+                              SerialBinding dataBinding,
+                              SerialBinding indexKeyBinding)
+
+
Creates a serial-serial entity binding. +

+

Parameters:
primaryKeyBinding - is the primary key binding.
dataBinding - is the data binding.
indexKeyBinding - is the index key binding.
+ + + + + + + + +
+Method Detail
+ +

+createSecondaryKey

+
+public boolean createSecondaryKey(SecondaryDatabase db,
+                                  DatabaseEntry primaryKeyEntry,
+                                  DatabaseEntry dataEntry,
+                                  DatabaseEntry indexKeyEntry)
+                           throws DatabaseException
+
+
Description copied from interface: SecondaryKeyCreator
+
Creates a secondary key entry, given a primary key and data entry. +

+

+

+
Specified by:
createSecondaryKey in interface SecondaryKeyCreator
+
+
+
Parameters:
db - the database to which the secondary key will be added. +

primaryKeyEntry - the primary key entry. This parameter must not be modified + by this method. +

dataEntry - the primary data entry. This parameter must not be modified + by this method. +

indexKeyEntry - the secondary key created by this method. +

+

Returns:
true if a key was created, or false to indicate that the key is + not present. +

+

Throws: +
DatabaseException - if an error occurs attempting to create the + secondary key.
+
+
+
+ +

+nullifyForeignKey

+
+public boolean nullifyForeignKey(SecondaryDatabase db,
+                                 DatabaseEntry dataEntry)
+                          throws DatabaseException
+
+
+
+
+
+ +
Throws: +
DatabaseException
+
+
+
+ +

+createSecondaryKey

+
+public abstract Object createSecondaryKey(Object primaryKey,
+                                          Object data)
+
+
Creates the index key object from primary key and entry objects. +

+

+
+
+
+
Parameters:
primaryKey - is the deserialized source primary key entry, or + null if no primary key entry is used to construct the index key.
data - is the deserialized source data entry, or null if no + data entry is used to construct the index key. +
Returns:
the destination index key object, or null to indicate that + the key is not present.
+
+
+
+ +

+nullifyForeignKey

+
+public Object nullifyForeignKey(Object data)
+
+
Clears the index key in a data object. + +

On entry the data parameter contains the index key to be cleared. It + should be changed by this method such that createSecondaryKey(com.sleepycat.db.SecondaryDatabase, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry) + will return false. Other fields in the data object should remain + unchanged.

+

+

+
+
+
+
Parameters:
data - is the source and destination data object. +
Returns:
the destination data object, or null to indicate that the + key is not present and no change is necessary. The data returned may + be the same object passed as the data parameter or a newly created + object.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/StoredClassCatalog.html b/db/docs/java/com/sleepycat/bind/serial/StoredClassCatalog.html new file mode 100644 index 000000000..23c433f18 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/StoredClassCatalog.html @@ -0,0 +1,354 @@ + + + + + + +StoredClassCatalog (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class StoredClassCatalog

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.StoredClassCatalog
+
+
+
All Implemented Interfaces:
ClassCatalog
+
+
+
+
public class StoredClassCatalog
extends Object
implements ClassCatalog
+ +

+A ClassCatalog that is stored in a Database. + +

A single StoredClassCatalog object is normally used along + with a set of databases that stored serialized objects.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
StoredClassCatalog(Database database) + +
+          Creates a catalog based on a given database.
+  + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidclose() + +
+          Close a catalog database and release any cached resources.
+ ObjectStreamClassgetClassFormat(byte[] classID) + +
+          Return the ObjectStreamClass for the given class ID.
+ byte[]getClassID(ObjectStreamClass classFormat) + +
+          Return the class ID for the current version of the given class + description.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredClassCatalog

+
+public StoredClassCatalog(Database database)
+                   throws DatabaseException,
+                          IllegalArgumentException
+
+
Creates a catalog based on a given database. To save resources, only a + single catalog object should be used for each unique catalog database. +

+

Parameters:
database - an open database to use as the class catalog. It must + be a BTREE database and must not allow duplicates. +
Throws: +
DatabaseException - if an error occurs accessing the database. +
IllegalArgumentException - if the database is not a BTREE database + or if it configured to allow duplicates.
+ + + + + + + + +
+Method Detail
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Description copied from interface: ClassCatalog
+
Close a catalog database and release any cached resources. +

+

+
Specified by:
close in interface ClassCatalog
+
+
+ +
Throws: +
DatabaseException
+
+
+
+ +

+getClassID

+
+public byte[] getClassID(ObjectStreamClass classFormat)
+                  throws DatabaseException,
+                         ClassNotFoundException
+
+
Description copied from interface: ClassCatalog
+
Return the class ID for the current version of the given class + description. + This is used for storing in serialization streams in place of a full + class descriptor, since it is much more compact. To get back the + ObjectStreamClass for a class ID, call ClassCatalog.getClassFormat(byte[]). + This function causes a new class ID to be assigned if the class + description has changed. +

+

+
Specified by:
getClassID in interface ClassCatalog
+
+
+
Parameters:
classFormat - The class description for which to return the + class ID. +
Returns:
The class ID for the current version of the class. +
Throws: +
DatabaseException +
ClassNotFoundException
+
+
+
+ +

+getClassFormat

+
+public ObjectStreamClass getClassFormat(byte[] classID)
+                                 throws DatabaseException,
+                                        ClassNotFoundException
+
+
Description copied from interface: ClassCatalog
+
Return the ObjectStreamClass for the given class ID. This may or may + not be the current class format, depending on whether the class has + changed since the class ID was generated. +

+

+
Specified by:
getClassFormat in interface ClassCatalog
+
+
+
Parameters:
classID - The class ID for which to return the class format. +
Returns:
The class format for the given class ID, which may or may not + represent the current version of the class. +
Throws: +
DatabaseException +
ClassNotFoundException
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/TupleSerialBinding.html b/db/docs/java/com/sleepycat/bind/serial/TupleSerialBinding.html new file mode 100644 index 000000000..a9a48fd2d --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/TupleSerialBinding.html @@ -0,0 +1,437 @@ + + + + + + +TupleSerialBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class TupleSerialBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.TupleSerialBinding
+
+
+
All Implemented Interfaces:
EntityBinding
+
+
+
Direct Known Subclasses:
TupleSerialMarshalledBinding
+
+
+
+
public abstract class TupleSerialBinding
extends Object
implements EntityBinding
+ +

+An abstract EntityBinding that treats an entity's key entry as + a tuple and its data entry as a serialized object. + +

This class takes care of serializing and deserializing the data entry, + and converting the key entry to/from TupleInput and TupleOutput objects. Its three abstract methods must be implemented by a + concrete subclass to convert these objects to/from an entity object.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
TupleSerialBinding(ClassCatalog classCatalog, + Class baseClass) + +
+          Creates a tuple-serial entity binding.
TupleSerialBinding(SerialBinding dataBinding) + +
+          Creates a tuple-serial entity binding.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+          Converts key and data entry buffers into an entity Object.
+abstract  ObjectentryToObject(TupleInput keyInput, + Object dataInput) + +
+          Constructs an entity object from TupleInput key entry and + deserialized data entry objects.
+abstract  ObjectobjectToData(Object object) + +
+          Extracts a data object from an entity object.
+ voidobjectToData(Object object, + DatabaseEntry data) + +
+          Extracts the data entry from an entity Object.
+ voidobjectToKey(Object object, + DatabaseEntry key) + +
+          Extracts the key entry from an entity Object.
+abstract  voidobjectToKey(Object object, + TupleOutput keyOutput) + +
+          Extracts a key tuple from an entity object.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleSerialBinding

+
+public TupleSerialBinding(ClassCatalog classCatalog,
+                          Class baseClass)
+
+
Creates a tuple-serial entity binding. +

+

Parameters:
classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
baseClass - is the base class.
+
+ +

+TupleSerialBinding

+
+public TupleSerialBinding(SerialBinding dataBinding)
+
+
Creates a tuple-serial entity binding. +

+

Parameters:
dataBinding - is the data binding.
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry key,
+                            DatabaseEntry data)
+
+
Description copied from interface: EntityBinding
+
Converts key and data entry buffers into an entity Object. +

+

+
Specified by:
entryToObject in interface EntityBinding
+
+
+
Parameters:
key - is the source key entry.
data - is the source data entry. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToKey

+
+public void objectToKey(Object object,
+                        DatabaseEntry key)
+
+
Description copied from interface: EntityBinding
+
Extracts the key entry from an entity Object. +

+

+
Specified by:
objectToKey in interface EntityBinding
+
+
+
Parameters:
object - is the source Object.
key - is the destination entry buffer.
+
+
+
+ +

+objectToData

+
+public void objectToData(Object object,
+                         DatabaseEntry data)
+
+
Description copied from interface: EntityBinding
+
Extracts the data entry from an entity Object. +

+

+
Specified by:
objectToData in interface EntityBinding
+
+
+
Parameters:
object - is the source Object.
data - is the destination entry buffer.
+
+
+
+ +

+entryToObject

+
+public abstract Object entryToObject(TupleInput keyInput,
+                                     Object dataInput)
+
+
Constructs an entity object from TupleInput key entry and + deserialized data entry objects. +

+

+
+
+
+
Parameters:
keyInput - is the TupleInput key entry object.
dataInput - is the deserialized data entry object. +
Returns:
the entity object constructed from the key and data.
+
+
+
+ +

+objectToKey

+
+public abstract void objectToKey(Object object,
+                                 TupleOutput keyOutput)
+
+
Extracts a key tuple from an entity object. +

+

+
+
+
+
Parameters:
object - is the entity object.
keyOutput - is the TupleOutput to which the key should be + written.
+
+
+
+ +

+objectToData

+
+public abstract Object objectToData(Object object)
+
+
Extracts a data object from an entity object. +

+

+
+
+
+
Parameters:
object - is the entity object. +
Returns:
the deserialized data object.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.html b/db/docs/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.html new file mode 100644 index 000000000..a54ea1ac2 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.html @@ -0,0 +1,415 @@ + + + + + + +TupleSerialKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class TupleSerialKeyCreator

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.TupleSerialKeyCreator
+
+
+
All Implemented Interfaces:
SecondaryKeyCreator
+
+
+
Direct Known Subclasses:
TupleSerialMarshalledKeyCreator
+
+
+
+
public abstract class TupleSerialKeyCreator
extends Object
implements SecondaryKeyCreator
+ +

+A abstract key creator that uses a tuple key and a serial data entry. This + class takes care of serializing and deserializing the data entry, and + converting the key entry to/from TupleInput and TupleOutput + objects. + The following abstract method must be implemented by a concrete subclass + to create the index key using these objects +

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
TupleSerialKeyCreator(ClassCatalog classCatalog, + Class dataClass) + +
+          Creates a tuple-serial key creator.
TupleSerialKeyCreator(SerialBinding dataBinding) + +
+          Creates a tuple-serial key creator.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleancreateSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+          Creates a secondary key entry, given a primary key and data entry.
+abstract  booleancreateSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key entry from primary key tuple entry and + deserialized data entry.
+ ObjectnullifyForeignKey(Object data) + +
+          Clears the index key in the deserialized data entry.
+ booleannullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleSerialKeyCreator

+
+public TupleSerialKeyCreator(ClassCatalog classCatalog,
+                             Class dataClass)
+
+
Creates a tuple-serial key creator. +

+

Parameters:
classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
dataClass - is the data base class.
+
+ +

+TupleSerialKeyCreator

+
+public TupleSerialKeyCreator(SerialBinding dataBinding)
+
+
Creates a tuple-serial key creator. +

+

Parameters:
dataBinding - is the data binding.
+ + + + + + + + +
+Method Detail
+ +

+createSecondaryKey

+
+public boolean createSecondaryKey(SecondaryDatabase db,
+                                  DatabaseEntry primaryKeyEntry,
+                                  DatabaseEntry dataEntry,
+                                  DatabaseEntry indexKeyEntry)
+                           throws DatabaseException
+
+
Description copied from interface: SecondaryKeyCreator
+
Creates a secondary key entry, given a primary key and data entry. +

+

+

+
Specified by:
createSecondaryKey in interface SecondaryKeyCreator
+
+
+
Parameters:
db - the database to which the secondary key will be added. +

primaryKeyEntry - the primary key entry. This parameter must not be modified + by this method. +

dataEntry - the primary data entry. This parameter must not be modified + by this method. +

indexKeyEntry - the secondary key created by this method. +

+

Returns:
true if a key was created, or false to indicate that the key is + not present. +

+

Throws: +
DatabaseException - if an error occurs attempting to create the + secondary key.
+
+
+
+ +

+nullifyForeignKey

+
+public boolean nullifyForeignKey(SecondaryDatabase db,
+                                 DatabaseEntry dataEntry)
+                          throws DatabaseException
+
+
+
+
+
+ +
Throws: +
DatabaseException
+
+
+
+ +

+createSecondaryKey

+
+public abstract boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                           Object dataInput,
+                                           TupleOutput indexKeyOutput)
+
+
Creates the index key entry from primary key tuple entry and + deserialized data entry. +

+

+
+
+
+
Parameters:
primaryKeyInput - is the TupleInput for the primary key + entry, or null if no primary key entry is used to construct the index + key.
dataInput - is the deserialized data entry, or null if no data + entry is used to construct the index key.
indexKeyOutput - is the destination index key tuple. For index + keys which are optionally present, no tuple entry should be output to + indicate that the key is not present or null. +
Returns:
true if a key was created, or false to indicate that the key is + not present.
+
+
+
+ +

+nullifyForeignKey

+
+public Object nullifyForeignKey(Object data)
+
+
Clears the index key in the deserialized data entry. + +

On entry the data parameter contains the index key to be cleared. It + should be changed by this method such that createSecondaryKey(com.sleepycat.db.SecondaryDatabase, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry) + will return false. Other fields in the data object should remain + unchanged.

+

+

+
+
+
+
Parameters:
data - is the source and destination deserialized data + entry. +
Returns:
the destination data object, or null to indicate that the + key is not present and no change is necessary. The data returned may + be the same object passed as the data parameter or a newly created + object.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.html b/db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.html new file mode 100644 index 000000000..108d40e2f --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.html @@ -0,0 +1,370 @@ + + + + + + +TupleSerialMarshalledBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class TupleSerialMarshalledBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.TupleSerialBinding
+      extended bycom.sleepycat.bind.serial.TupleSerialMarshalledBinding
+
+
+
All Implemented Interfaces:
EntityBinding
+
+
+
+
public class TupleSerialMarshalledBinding
extends TupleSerialBinding
+ +

+A concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class. + +

The MarshalledTupleKeyEntity interface must be implemented by the + entity class to convert between the key/data entry and entity object.

+ +

The binding is "tricky" in that it uses the entity class for both the + stored data entry and the combined entity object. To do this, the entity's + key field(s) are transient and are set by the binding after the data object + has been deserialized. This avoids the use of a "data" class completely. +

+

+ +

+

+
See Also:
MarshalledTupleKeyEntity
+
+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
TupleSerialMarshalledBinding(ClassCatalog classCatalog, + Class baseClass) + +
+          Creates a tuple-serial marshalled binding object.
TupleSerialMarshalledBinding(SerialBinding dataBinding) + +
+          Creates a tuple-serial marshalled binding object.
+  + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(TupleInput tupleInput, + Object javaInput) + +
+          Constructs an entity object from TupleInput key entry and + deserialized data entry objects.
+ ObjectobjectToData(Object object) + +
+          Extracts a data object from an entity object.
+ voidobjectToKey(Object object, + TupleOutput output) + +
+          Extracts a key tuple from an entity object.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.serial.TupleSerialBinding
entryToObject, objectToData, objectToKey
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleSerialMarshalledBinding

+
+public TupleSerialMarshalledBinding(ClassCatalog classCatalog,
+                                    Class baseClass)
+
+
Creates a tuple-serial marshalled binding object. +

+

Parameters:
classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
baseClass - is the base class for serialized objects stored using + this binding -- all objects using this binding must be an instance of + this class.
+
+ +

+TupleSerialMarshalledBinding

+
+public TupleSerialMarshalledBinding(SerialBinding dataBinding)
+
+
Creates a tuple-serial marshalled binding object. +

+

Parameters:
dataBinding - is the binding used for serializing and deserializing + the entity object.
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput tupleInput,
+                            Object javaInput)
+
+
Description copied from class: TupleSerialBinding
+
Constructs an entity object from TupleInput key entry and + deserialized data entry objects. +

+

+
Specified by:
entryToObject in class TupleSerialBinding
+
+
+
Parameters:
tupleInput - is the TupleInput key entry object.
javaInput - is the deserialized data entry object. +
Returns:
the entity object constructed from the key and data.
+
+
+
+ +

+objectToKey

+
+public void objectToKey(Object object,
+                        TupleOutput output)
+
+
Description copied from class: TupleSerialBinding
+
Extracts a key tuple from an entity object. +

+

+
Specified by:
objectToKey in class TupleSerialBinding
+
+
+
Parameters:
object - is the entity object.
output - is the TupleOutput to which the key should be + written.
+
+
+
+ +

+objectToData

+
+public Object objectToData(Object object)
+
+
Description copied from class: TupleSerialBinding
+
Extracts a data object from an entity object. +

+

+
Specified by:
objectToData in class TupleSerialBinding
+
+
+
Parameters:
object - is the entity object. +
Returns:
the deserialized data object.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.html b/db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.html new file mode 100644 index 000000000..2eddcf4f2 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.html @@ -0,0 +1,330 @@ + + + + + + +TupleSerialMarshalledKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.serial +
+Class TupleSerialMarshalledKeyCreator

+
+java.lang.Object
+  extended bycom.sleepycat.bind.serial.TupleSerialKeyCreator
+      extended bycom.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator
+
+
+
All Implemented Interfaces:
SecondaryKeyCreator
+
+
+
+
public class TupleSerialMarshalledKeyCreator
extends TupleSerialKeyCreator
+ +

+A concrete key creator that works in conjunction with a TupleSerialMarshalledBinding. This key creator works by calling the + methods of the MarshalledTupleKeyEntity interface to create and + clear the index key fields. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding binding, + String keyName) + +
+          Creates a tuple-serial marshalled key creator.
+  + + + + + + + + + + + + + + + +
+Method Summary
+ booleancreateSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key entry from primary key tuple entry and + deserialized data entry.
+ ObjectnullifyForeignKey(Object dataInput) + +
+          Clears the index key in the deserialized data entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.serial.TupleSerialKeyCreator
createSecondaryKey, nullifyForeignKey
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleSerialMarshalledKeyCreator

+
+public TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding binding,
+                                       String keyName)
+
+
Creates a tuple-serial marshalled key creator. +

+

Parameters:
binding - is the binding used for the tuple-serial entity.
keyName - is the key name passed to the MarshalledTupleKeyEntity.marshalSecondaryKey(java.lang.String, com.sleepycat.bind.tuple.TupleOutput) method to identify the + index key.
+ + + + + + + + +
+Method Detail
+ +

+createSecondaryKey

+
+public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                  Object dataInput,
+                                  TupleOutput indexKeyOutput)
+
+
Description copied from class: TupleSerialKeyCreator
+
Creates the index key entry from primary key tuple entry and + deserialized data entry. +

+

+
Specified by:
createSecondaryKey in class TupleSerialKeyCreator
+
+
+
Parameters:
primaryKeyInput - is the TupleInput for the primary key + entry, or null if no primary key entry is used to construct the index + key.
dataInput - is the deserialized data entry, or null if no data + entry is used to construct the index key.
indexKeyOutput - is the destination index key tuple. For index + keys which are optionally present, no tuple entry should be output to + indicate that the key is not present or null. +
Returns:
true if a key was created, or false to indicate that the key is + not present.
+
+
+
+ +

+nullifyForeignKey

+
+public Object nullifyForeignKey(Object dataInput)
+
+
Description copied from class: TupleSerialKeyCreator
+
Clears the index key in the deserialized data entry. + +

On entry the data parameter contains the index key to be cleared. It + should be changed by this method such that TupleSerialKeyCreator.createSecondaryKey(com.sleepycat.db.SecondaryDatabase, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry) + will return false. Other fields in the data object should remain + unchanged.

+

+

+
Overrides:
nullifyForeignKey in class TupleSerialKeyCreator
+
+
+
Parameters:
dataInput - is the source and destination deserialized data + entry. +
Returns:
the destination data object, or null to indicate that the + key is not present and no change is necessary. The data returned may + be the same object passed as the data parameter or a newly created + object.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/ClassCatalog.html b/db/docs/java/com/sleepycat/bind/serial/class-use/ClassCatalog.html new file mode 100644 index 000000000..147f150fb --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/ClassCatalog.html @@ -0,0 +1,283 @@ + + + + + + +Uses of Interface com.sleepycat.bind.serial.ClassCatalog (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.bind.serial.ClassCatalog

+
+ + + + + + + + + + + + + +
+Packages that use ClassCatalog
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of ClassCatalog in com.sleepycat.bind.serial
+  +

+ + + + + + + + + +
Classes in com.sleepycat.bind.serial that implement ClassCatalog
+ classStoredClassCatalog + +
+          A ClassCatalog that is stored in a Database.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constructors in com.sleepycat.bind.serial with parameters of type ClassCatalog
TupleSerialMarshalledBinding(ClassCatalog classCatalog, + Class baseClass) + +
+          Creates a tuple-serial marshalled binding object.
TupleSerialKeyCreator(ClassCatalog classCatalog, + Class dataClass) + +
+          Creates a tuple-serial key creator.
TupleSerialBinding(ClassCatalog classCatalog, + Class baseClass) + +
+          Creates a tuple-serial entity binding.
SerialSerialKeyCreator(ClassCatalog classCatalog, + Class primaryKeyClass, + Class dataClass, + Class indexKeyClass) + +
+          Creates a serial-serial key creator.
SerialSerialBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) + +
+          Creates a serial-serial entity binding.
SerialOutput(OutputStream out, + ClassCatalog classCatalog) + +
+          Creates a serial output stream.
SerialInput(InputStream in, + ClassCatalog classCatalog) + +
+          Creates a serial input stream.
SerialBinding(ClassCatalog classCatalog, + Class baseClass) + +
+          Creates a serial binding.
+  +

+ + + + + +
+Uses of ClassCatalog in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return ClassCatalog
+ ClassCatalogTupleSerialFactory.getCatalog() + +
+          Returns the class catalog associated with this factory.
+  +

+ + + + + + + + +
Constructors in com.sleepycat.collections with parameters of type ClassCatalog
TupleSerialFactory(ClassCatalog catalog) + +
+          Creates a tuple-serial factory for given environment and class catalog.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/SerialBinding.html b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialBinding.html new file mode 100644 index 000000000..9e8c3c58c --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialBinding.html @@ -0,0 +1,197 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.SerialBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.SerialBinding

+
+ + + + + + + + + +
+Packages that use SerialBinding
com.sleepycat.bind.serialBindings that use Java serialization. 
+  +

+ + + + + +
+Uses of SerialBinding in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + + + + +
Constructors in com.sleepycat.bind.serial with parameters of type SerialBinding
TupleSerialMarshalledBinding(SerialBinding dataBinding) + +
+          Creates a tuple-serial marshalled binding object.
TupleSerialKeyCreator(SerialBinding dataBinding) + +
+          Creates a tuple-serial key creator.
TupleSerialBinding(SerialBinding dataBinding) + +
+          Creates a tuple-serial entity binding.
SerialSerialKeyCreator(SerialBinding primaryKeyBinding, + SerialBinding dataBinding, + SerialBinding indexKeyBinding) + +
+          Creates a serial-serial entity binding.
SerialSerialBinding(SerialBinding keyBinding, + SerialBinding dataBinding) + +
+          Creates a serial-serial entity binding.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/SerialInput.html b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialInput.html new file mode 100644 index 000000000..bae02e2aa --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialInput.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.SerialInput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.SerialInput

+
+No usage of com.sleepycat.bind.serial.SerialInput +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/SerialOutput.html b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialOutput.html new file mode 100644 index 000000000..f854bdcdb --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialOutput.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.SerialOutput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.SerialOutput

+
+No usage of com.sleepycat.bind.serial.SerialOutput +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialBinding.html b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialBinding.html new file mode 100644 index 000000000..b310aecad --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.SerialSerialBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.SerialSerialBinding

+
+No usage of com.sleepycat.bind.serial.SerialSerialBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialKeyCreator.html b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialKeyCreator.html new file mode 100644 index 000000000..52cb7cd26 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialKeyCreator.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.SerialSerialKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.SerialSerialKeyCreator

+
+No usage of com.sleepycat.bind.serial.SerialSerialKeyCreator +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/StoredClassCatalog.html b/db/docs/java/com/sleepycat/bind/serial/class-use/StoredClassCatalog.html new file mode 100644 index 000000000..8b9e17fa6 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/StoredClassCatalog.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.StoredClassCatalog (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.StoredClassCatalog

+
+No usage of com.sleepycat.bind.serial.StoredClassCatalog +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialBinding.html b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialBinding.html new file mode 100644 index 000000000..6dace7ccb --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialBinding.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.TupleSerialBinding

+
+ + + + + + + + + +
+Packages that use TupleSerialBinding
com.sleepycat.bind.serialBindings that use Java serialization. 
+  +

+ + + + + +
+Uses of TupleSerialBinding in com.sleepycat.bind.serial
+  +

+ + + + + + + + + +
Subclasses of TupleSerialBinding in com.sleepycat.bind.serial
+ classTupleSerialMarshalledBinding + +
+          A concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialKeyCreator.html b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialKeyCreator.html new file mode 100644 index 000000000..fe1e39c7c --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialKeyCreator.html @@ -0,0 +1,172 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.TupleSerialKeyCreator

+
+ + + + + + + + + +
+Packages that use TupleSerialKeyCreator
com.sleepycat.bind.serialBindings that use Java serialization. 
+  +

+ + + + + +
+Uses of TupleSerialKeyCreator in com.sleepycat.bind.serial
+  +

+ + + + + + + + + +
Subclasses of TupleSerialKeyCreator in com.sleepycat.bind.serial
+ classTupleSerialMarshalledKeyCreator + +
+          A concrete key creator that works in conjunction with a TupleSerialMarshalledBinding.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledBinding.html b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledBinding.html new file mode 100644 index 000000000..f972def97 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledBinding.html @@ -0,0 +1,171 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialMarshalledBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.TupleSerialMarshalledBinding

+
+ + + + + + + + + +
+Packages that use TupleSerialMarshalledBinding
com.sleepycat.bind.serialBindings that use Java serialization. 
+  +

+ + + + + +
+Uses of TupleSerialMarshalledBinding in com.sleepycat.bind.serial
+  +

+ + + + + + + + +
Constructors in com.sleepycat.bind.serial with parameters of type TupleSerialMarshalledBinding
TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding binding, + String keyName) + +
+          Creates a tuple-serial marshalled key creator.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledKeyCreator.html b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledKeyCreator.html new file mode 100644 index 000000000..9c49ea711 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledKeyCreator.html @@ -0,0 +1,175 @@ + + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator

+
+ + + + + + + + + +
+Packages that use TupleSerialMarshalledKeyCreator
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of TupleSerialMarshalledKeyCreator in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return TupleSerialMarshalledKeyCreator
+ TupleSerialMarshalledKeyCreatorTupleSerialFactory.getKeyCreator(Class valueBaseClass, + String keyName) + +
+          Creates a SecondaryKeyCreator object for use in configuring + a SecondaryDatabase.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/package-frame.html b/db/docs/java/com/sleepycat/bind/serial/package-frame.html new file mode 100644 index 000000000..869556b18 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/package-frame.html @@ -0,0 +1,61 @@ + + + + + + +com.sleepycat.bind.serial (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + +com.sleepycat.bind.serial + + + + +
+Interfaces  + +
+ClassCatalog
+ + + + + + +
+Classes  + +
+SerialBinding +
+SerialInput +
+SerialOutput +
+SerialSerialBinding +
+SerialSerialKeyCreator +
+StoredClassCatalog +
+TupleSerialBinding +
+TupleSerialKeyCreator +
+TupleSerialMarshalledBinding +
+TupleSerialMarshalledKeyCreator
+ + + + diff --git a/db/docs/java/com/sleepycat/bind/serial/package-summary.html b/db/docs/java/com/sleepycat/bind/serial/package-summary.html new file mode 100644 index 000000000..cf9a4b7c6 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/package-summary.html @@ -0,0 +1,220 @@ + + + + + + +com.sleepycat.bind.serial (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+

+Package com.sleepycat.bind.serial +

+Bindings that use Java serialization. +

+See: +
+          Description +

+ + + + + + + + + +
+Interface Summary
ClassCatalogA catalog of class description information for use during object + serialization.
+  + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class Summary
SerialBindingA concrete EntryBinding that treats a key or data entry as + a serialized object.
SerialInputA specialized ObjectInputStream that gets class description + information from a ClassCatalog.
SerialOutputA specialized ObjectOutputStream that stores class description + information in a ClassCatalog.
SerialSerialBindingAn abstract EntityBinding that treats an entity's key entry and + data entry as serialized objects.
SerialSerialKeyCreatorA abstract key creator that uses a serial key and a serial data entry.
StoredClassCatalogA ClassCatalog that is stored in a Database.
TupleSerialBindingAn abstract EntityBinding that treats an entity's key entry as + a tuple and its data entry as a serialized object.
TupleSerialKeyCreatorA abstract key creator that uses a tuple key and a serial data entry.
TupleSerialMarshalledBindingA concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class.
TupleSerialMarshalledKeyCreatorA concrete key creator that works in conjunction with a TupleSerialMarshalledBinding.
+  + +

+

+Package com.sleepycat.bind.serial Description +

+ +

+Bindings that use Java serialization. +

+ +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/package-tree.html b/db/docs/java/com/sleepycat/bind/serial/package-tree.html new file mode 100644 index 000000000..9c4bd948d --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/package-tree.html @@ -0,0 +1,170 @@ + + + + + + +com.sleepycat.bind.serial Class Hierarchy (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Hierarchy For Package com.sleepycat.bind.serial +

+
+
+
Package Hierarchies:
All Packages
+
+

+Class Hierarchy +

+ +

+Interface Hierarchy +

+ +
+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/serial/package-use.html b/db/docs/java/com/sleepycat/bind/serial/package-use.html new file mode 100644 index 000000000..1b9912443 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/serial/package-use.html @@ -0,0 +1,217 @@ + + + + + + +Uses of Package com.sleepycat.bind.serial (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Package
com.sleepycat.bind.serial

+
+ + + + + + + + + + + + + +
+Packages that use com.sleepycat.bind.serial
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + + + + + + + + + + + + + + + + +
+Classes in com.sleepycat.bind.serial used by com.sleepycat.bind.serial
ClassCatalog + +
+          A catalog of class description information for use during object + serialization.
SerialBinding + +
+          A concrete EntryBinding that treats a key or data entry as + a serialized object.
TupleSerialBinding + +
+          An abstract EntityBinding that treats an entity's key entry as + a tuple and its data entry as a serialized object.
TupleSerialKeyCreator + +
+          A abstract key creator that uses a tuple key and a serial data entry.
TupleSerialMarshalledBinding + +
+          A concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class.
+  +

+ + + + + + + + + + + +
+Classes in com.sleepycat.bind.serial used by com.sleepycat.collections
ClassCatalog + +
+          A catalog of class description information for use during object + serialization.
TupleSerialMarshalledKeyCreator + +
+          A concrete key creator that works in conjunction with a TupleSerialMarshalledBinding.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/BooleanBinding.html b/db/docs/java/com/sleepycat/bind/tuple/BooleanBinding.html new file mode 100644 index 000000000..def86d470 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/BooleanBinding.html @@ -0,0 +1,387 @@ + + + + + + +BooleanBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class BooleanBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.BooleanBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class BooleanBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Boolean primitive + wrapper or a boolean primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
BooleanBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static voidbooleanToEntry(boolean val, + DatabaseEntry entry) + +
+          Converts a simple boolean value into an entry buffer.
+static booleanentryToBoolean(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple boolean value.
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+BooleanBinding

+
+public BooleanBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToBoolean

+
+public static boolean entryToBoolean(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple boolean value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+booleanToEntry

+
+public static void booleanToEntry(boolean val,
+                                  DatabaseEntry entry)
+
+
Converts a simple boolean value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/ByteBinding.html b/db/docs/java/com/sleepycat/bind/tuple/ByteBinding.html new file mode 100644 index 000000000..72555a518 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/ByteBinding.html @@ -0,0 +1,387 @@ + + + + + + +ByteBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class ByteBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.ByteBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class ByteBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Byte primitive + wrapper or a byte primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
ByteBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static voidbyteToEntry(byte val, + DatabaseEntry entry) + +
+          Converts a simple byte value into an entry buffer.
+static byteentryToByte(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple byte value.
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+ByteBinding

+
+public ByteBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToByte

+
+public static byte entryToByte(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple byte value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+byteToEntry

+
+public static void byteToEntry(byte val,
+                               DatabaseEntry entry)
+
+
Converts a simple byte value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/CharacterBinding.html b/db/docs/java/com/sleepycat/bind/tuple/CharacterBinding.html new file mode 100644 index 000000000..2b267484f --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/CharacterBinding.html @@ -0,0 +1,387 @@ + + + + + + +CharacterBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class CharacterBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.CharacterBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class CharacterBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Character primitive + wrapper or a char primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
CharacterBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static voidcharToEntry(char val, + DatabaseEntry entry) + +
+          Converts a simple char value into an entry buffer.
+static charentryToChar(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple char value.
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+CharacterBinding

+
+public CharacterBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToChar

+
+public static char entryToChar(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple char value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+charToEntry

+
+public static void charToEntry(char val,
+                               DatabaseEntry entry)
+
+
Converts a simple char value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/DoubleBinding.html b/db/docs/java/com/sleepycat/bind/tuple/DoubleBinding.html new file mode 100644 index 000000000..bee8645be --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/DoubleBinding.html @@ -0,0 +1,387 @@ + + + + + + +DoubleBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class DoubleBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.DoubleBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class DoubleBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Double primitive + wrapper or a double primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
DoubleBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static voiddoubleToEntry(double val, + DatabaseEntry entry) + +
+          Converts a simple double value into an entry buffer.
+static doubleentryToDouble(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple double value.
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+DoubleBinding

+
+public DoubleBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToDouble

+
+public static double entryToDouble(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple double value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+doubleToEntry

+
+public static void doubleToEntry(double val,
+                                 DatabaseEntry entry)
+
+
Converts a simple double value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/FloatBinding.html b/db/docs/java/com/sleepycat/bind/tuple/FloatBinding.html new file mode 100644 index 000000000..435e3808f --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/FloatBinding.html @@ -0,0 +1,387 @@ + + + + + + +FloatBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class FloatBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.FloatBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class FloatBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Float primitive + wrapper or a float primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
FloatBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static floatentryToFloat(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple float value.
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+static voidfloatToEntry(float val, + DatabaseEntry entry) + +
+          Converts a simple float value into an entry buffer.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+FloatBinding

+
+public FloatBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToFloat

+
+public static float entryToFloat(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple float value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+floatToEntry

+
+public static void floatToEntry(float val,
+                                DatabaseEntry entry)
+
+
Converts a simple float value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/IntegerBinding.html b/db/docs/java/com/sleepycat/bind/tuple/IntegerBinding.html new file mode 100644 index 000000000..d370c0ed9 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/IntegerBinding.html @@ -0,0 +1,387 @@ + + + + + + +IntegerBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class IntegerBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.IntegerBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class IntegerBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Integer primitive + wrapper or an int primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
IntegerBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static intentryToInt(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple int value.
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+static voidintToEntry(int val, + DatabaseEntry entry) + +
+          Converts a simple int value into an entry buffer.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+IntegerBinding

+
+public IntegerBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToInt

+
+public static int entryToInt(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple int value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+intToEntry

+
+public static void intToEntry(int val,
+                              DatabaseEntry entry)
+
+
Converts a simple int value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/LongBinding.html b/db/docs/java/com/sleepycat/bind/tuple/LongBinding.html new file mode 100644 index 000000000..86ec3597e --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/LongBinding.html @@ -0,0 +1,387 @@ + + + + + + +LongBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class LongBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.LongBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class LongBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Long primitive + wrapper or a long primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
LongBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static longentryToLong(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple long value.
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+static voidlongToEntry(long val, + DatabaseEntry entry) + +
+          Converts a simple long value into an entry buffer.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+LongBinding

+
+public LongBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToLong

+
+public static long entryToLong(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple long value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+longToEntry

+
+public static void longToEntry(long val,
+                               DatabaseEntry entry)
+
+
Converts a simple long value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleEntry.html b/db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleEntry.html new file mode 100644 index 000000000..334590985 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleEntry.html @@ -0,0 +1,252 @@ + + + + + + +MarshalledTupleEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Interface MarshalledTupleEntry

+
+
+
public interface MarshalledTupleEntry
+ +

+A marshalling interface implemented by key, data or entity classes that + are represented as tuples. + +

Key classes implement this interface to marshal their key entry. Data or + entity classes implement this interface to marshal their data entry. + Implementations of this interface must have a public no arguments + constructor so that they can be instantiated by a binding, prior to calling + the unmarshalEntry(com.sleepycat.bind.tuple.TupleInput) method.

+ +

Note that implementing this interface is not necessary when the object is + a Java simple type, for example: String, Integer, etc. These types can be + used with built-in bindings returned by TupleBinding.getPrimitiveBinding(java.lang.Class).

+

+ +

+

+
See Also:
TupleTupleMarshalledBinding
+
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidmarshalEntry(TupleOutput dataOutput) + +
+          Construct the key or data tuple entry from the key or data object.
+ voidunmarshalEntry(TupleInput dataInput) + +
+          Construct the key or data object from the key or data tuple entry.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+marshalEntry

+
+public void marshalEntry(TupleOutput dataOutput)
+
+
Construct the key or data tuple entry from the key or data object. +

+

+
Parameters:
dataOutput - is the output tuple.
+
+
+
+ +

+unmarshalEntry

+
+public void unmarshalEntry(TupleInput dataInput)
+
+
Construct the key or data object from the key or data tuple entry. +

+

+
Parameters:
dataInput - is the input tuple.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.html b/db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.html new file mode 100644 index 000000000..30c701698 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.html @@ -0,0 +1,305 @@ + + + + + + +MarshalledTupleKeyEntity (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Interface MarshalledTupleKeyEntity

+
+
+
public interface MarshalledTupleKeyEntity
+ +

+A marshalling interface implemented by entity classes that represent keys as + tuples. Since MarshalledTupleKeyEntity objects are instantiated + using Java deserialization, no particular constructor is required by classes + that implement this interface. + +

Note that a marshalled tuple key extractor is somewhat less efficient + than a non-marshalled key tuple extractor because more conversions are + needed. A marshalled key extractor must convert the entry to an object in + order to extract the key fields, while an unmarshalled key extractor does + not.

+

+ +

+

+
See Also:
TupleTupleMarshalledBinding, +TupleSerialMarshalledBinding
+
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidmarshalPrimaryKey(TupleOutput keyOutput) + +
+          Extracts the entity's primary key and writes it to the key output.
+ booleanmarshalSecondaryKey(String keyName, + TupleOutput keyOutput) + +
+          Extracts the entity's secondary key and writes it to the key output.
+ booleannullifyForeignKey(String keyName) + +
+          Clears the entity's secondary key fields for the given key name.
+ voidunmarshalPrimaryKey(TupleInput keyInput) + +
+          Completes construction of the entity by setting its primary key from the + stored primary key.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+marshalPrimaryKey

+
+public void marshalPrimaryKey(TupleOutput keyOutput)
+
+
Extracts the entity's primary key and writes it to the key output. +

+

+
Parameters:
keyOutput - is the output tuple.
+
+
+
+ +

+unmarshalPrimaryKey

+
+public void unmarshalPrimaryKey(TupleInput keyInput)
+
+
Completes construction of the entity by setting its primary key from the + stored primary key. +

+

+
Parameters:
keyInput - is the input tuple.
+
+
+
+ +

+marshalSecondaryKey

+
+public boolean marshalSecondaryKey(String keyName,
+                                   TupleOutput keyOutput)
+
+
Extracts the entity's secondary key and writes it to the key output. +

+

+
Parameters:
keyName - identifies the secondary key.
keyOutput - is the output tuple. +
Returns:
true if a key was created, or false to indicate that the key is + not present.
+
+
+
+ +

+nullifyForeignKey

+
+public boolean nullifyForeignKey(String keyName)
+
+
Clears the entity's secondary key fields for the given key name. + +

The specified index key should be changed by this method such that + marshalSecondaryKey(java.lang.String, com.sleepycat.bind.tuple.TupleOutput) for the same key name will return false. + Other fields in the data object should remain unchanged.

+

+

+
Parameters:
keyName - identifies the secondary key. +
Returns:
true if the key was cleared, or false to indicate that the key + is not present and no change is necessary.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/ShortBinding.html b/db/docs/java/com/sleepycat/bind/tuple/ShortBinding.html new file mode 100644 index 000000000..f32c697ae --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/ShortBinding.html @@ -0,0 +1,387 @@ + + + + + + +ShortBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class ShortBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.ShortBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class ShortBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a Short primitive + wrapper or a short primitive. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
ShortBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+static shortentryToShort(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple short value.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+static voidshortToEntry(short val, + DatabaseEntry entry) + +
+          Converts a simple short value into an entry buffer.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+ShortBinding

+
+public ShortBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToShort

+
+public static short entryToShort(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple short value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+shortToEntry

+
+public static void shortToEntry(short val,
+                                DatabaseEntry entry)
+
+
Converts a simple short value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/StringBinding.html b/db/docs/java/com/sleepycat/bind/tuple/StringBinding.html new file mode 100644 index 000000000..e50267059 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/StringBinding.html @@ -0,0 +1,386 @@ + + + + + + +StringBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class StringBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.StringBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class StringBinding
extends TupleBinding
+ +

+A concrete TupleBinding for a simple String value. + +

There are two ways to use this class:

+
    +
  1. When using the com.sleepycat.db package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
  2. +
  3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection. The easiest way to + obtain a binding instance is with the TupleBinding.getPrimitiveBinding(java.lang.Class) method.
  4. +
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
StringBinding() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+static StringentryToString(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple String value.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+static voidstringToEntry(String val, + DatabaseEntry entry) + +
+          Converts a simple String value into an entry buffer.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StringBinding

+
+public StringBinding()
+
+
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
Overrides:
objectToEntry in class TupleBinding
+
+
+
+
+
+
+ +

+entryToString

+
+public static String entryToString(DatabaseEntry entry)
+
+
Converts an entry buffer into a simple String value. +

+

+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting value.
+
+
+
+ +

+stringToEntry

+
+public static void stringToEntry(String val,
+                                 DatabaseEntry entry)
+
+
Converts a simple String value into an entry buffer. +

+

+
Parameters:
val - is the source value.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleBinding.html b/db/docs/java/com/sleepycat/bind/tuple/TupleBinding.html new file mode 100644 index 000000000..918669d99 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleBinding.html @@ -0,0 +1,537 @@ + + + + + + +TupleBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
Direct Known Subclasses:
BooleanBinding, ByteBinding, CharacterBinding, DoubleBinding, FloatBinding, IntegerBinding, LongBinding, ShortBinding, StringBinding, TupleMarshalledBinding
+
+
+
+
public abstract class TupleBinding
extends Object
implements EntryBinding
+ +

+An abstract EntryBinding that treats a key or data entry as a + tuple; it includes predefined bindings for Java primitive types. + +

This class takes care of converting the entries to/from TupleInput and TupleOutput objects. Its two abstract methods must + be implemented by a concrete subclass to convert between tuples and key or + data objects.

+ + +

For key or data entries which are Java primitive classes (String, + Integer, etc) getPrimitiveBinding(java.lang.Class) may be used to return a builtin + tuple binding. A custom tuple binding for these types is not needed.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleBinding() + +
+          Creates a tuple binding.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static TupleInputentryToInput(DatabaseEntry entry) + +
+          Utility method to create a new tuple input object for reading the data + from a given buffer.
+ ObjectentryToObject(DatabaseEntry entry) + +
+          Converts a entry buffer into an Object.
+abstract  ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+static TupleBindinggetPrimitiveBinding(Class cls) + +
+          Creates a tuple binding for a primitive Java class.
+static voidinputToEntry(TupleInput input, + DatabaseEntry entry) + +
+          Utility method to set the data in a entry buffer to the data in a tuple + input object.
+static TupleOutputnewOutput() + +
+          Utility method for use by bindings to create a tuple output object.
+static TupleOutputnewOutput(byte[] buffer) + +
+          Utility method for use by bindings to create a tuple output object + with a specific starting size.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+abstract  voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+static voidoutputToEntry(TupleOutput output, + DatabaseEntry entry) + +
+          Utility method to set the data in a entry buffer to the data in a tuple + output object.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleBinding

+
+public TupleBinding()
+
+
Creates a tuple binding. +

+

+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts a entry buffer into an Object. +

+

+
Specified by:
entryToObject in interface EntryBinding
+
+
+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
+
+
+
Parameters:
object - is the source Object.
entry - is the destination entry buffer.
+
+
+
+ +

+newOutput

+
+public static TupleOutput newOutput()
+
+
Utility method for use by bindings to create a tuple output object. +

+

+
+
+
+ +
Returns:
a new tuple output object.
+
+
+
+ +

+newOutput

+
+public static TupleOutput newOutput(byte[] buffer)
+
+
Utility method for use by bindings to create a tuple output object + with a specific starting size. +

+

+
+
+
+ +
Returns:
a new tuple output object.
+
+
+
+ +

+outputToEntry

+
+public static void outputToEntry(TupleOutput output,
+                                 DatabaseEntry entry)
+
+
Utility method to set the data in a entry buffer to the data in a tuple + output object. +

+

+
+
+
+
Parameters:
output - is the source tuple output object.
entry - is the destination entry buffer.
+
+
+
+ +

+inputToEntry

+
+public static void inputToEntry(TupleInput input,
+                                DatabaseEntry entry)
+
+
Utility method to set the data in a entry buffer to the data in a tuple + input object. +

+

+
+
+
+
Parameters:
input - is the source tuple input object.
entry - is the destination entry buffer.
+
+
+
+ +

+entryToInput

+
+public static TupleInput entryToInput(DatabaseEntry entry)
+
+
Utility method to create a new tuple input object for reading the data + from a given buffer. If an existing input is reused, it is reset before + returning it. +

+

+
+
+
+
Parameters:
entry - is the source entry buffer. +
Returns:
the new tuple input object.
+
+
+
+ +

+entryToObject

+
+public abstract Object entryToObject(TupleInput input)
+
+
Constructs a key or data object from a TupleInput entry. +

+

+
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public abstract void objectToEntry(Object object,
+                                   TupleOutput output)
+
+
Converts a key or data object to a tuple entry. +

+

+
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+
+ +

+getPrimitiveBinding

+
+public static TupleBinding getPrimitiveBinding(Class cls)
+
+
Creates a tuple binding for a primitive Java class. The following + Java classes are supported. +
    +
  • String
  • +
  • Character
  • +
  • Boolean
  • +
  • Byte
  • +
  • Short
  • +
  • Integer
  • +
  • Long
  • +
  • Float
  • +
  • Double
  • +
+

+

+
+
+
+
Parameters:
cls - is the primitive Java class. +
Returns:
a new binding for the primitive class or null if the cls + parameter is not one of the supported classes.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleInput.html b/db/docs/java/com/sleepycat/bind/tuple/TupleInput.html new file mode 100644 index 000000000..2f703054f --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleInput.html @@ -0,0 +1,861 @@ + + + + + + +TupleInput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleInput

+
+java.lang.Object
+  extended byjava.io.InputStream
+      extended bycom.sleepycat.util.FastInputStream
+          extended bycom.sleepycat.bind.tuple.TupleInput
+
+
+
+
public class TupleInput
extends FastInputStream
+ +

+An InputStream with DataInput-like methods for + reading tuple fields. It is used by TupleBinding. + +

This class has many methods that have the same signatures as methods in + the DataInput interface. The reason this class does not + implement DataInput is because it would break the interface + contract for those methods because of data format differences.

+ +

Signed numbers are stored in the buffer in MSB (most significant byte + first) order with their sign bit (high-order bit) inverted to cause negative + numbers to be sorted first when comparing values as unsigned byte arrays, + as done in a database. Unsigned numbers, including characters, are stored + in MSB order with no change to their sign bit.

+ +

Strings and character arrays are stored either as a fixed length array of + unicode characters, where the length must be known by the application, or as + a null-terminated UTF byte array.

+
    +
  • Null strings are UTF encoded as { 0xFF }, which is not allowed in a + standard UTF encoding. This allows null strings, as distinct from empty or + zero length strings, to be represented in a tuple. Using the default + comparator, null strings will be ordered last.
  • +
  • Zero (0x0000) character values are UTF encoded as non-zero values, and + therefore embedded zeros in the string are supported. The sequence { 0xC0, + 0x80 } is used to encode a zero character. This UTF encoding is the same + one used by native Java UTF libraries. However, this encoding of zero does + impact the lexicographical ordering, and zeros will not be sorted first (the + natural order) or last. For all character values other than zero, the + default UTF byte ordering is the same as the Unicode lexicographical + character ordering.
  • +
+ +

Floats and doubles are stored in standard Java integer-bit representation + (IEEE 754). Non-negative numbers are correctly ordered by numeric value. + However, negative numbers are not correctly ordered; therefore, if you use + negative floating point numbers in a key, you'll need to implement and + configure a custom comparator to get correct numeric ordering.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
TupleInput(byte[] buffer) + +
+          Creates a tuple input object for reading a byte array of tuple data.
TupleInput(byte[] buffer, + int offset, + int length) + +
+          Creates a tuple input object for reading a byte array of tuple data at + a given offset for a given length.
TupleInput(TupleOutput output) + +
+          Creates a tuple input object from the data contained in a tuple output + object.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleanreadBoolean() + +
+          Reads a boolean (one byte) unsigned value from the buffer and returns + true if it is non-zero and false if it is zero.
+ bytereadByte() + +
+          Reads a signed byte (one byte) value from the buffer.
+ voidreadBytes(char[] chars) + +
+          Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting array.
+ StringreadBytes(int length) + +
+          Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting string.
+ charreadChar() + +
+          Reads a char (two byte) unsigned value from the buffer.
+ voidreadChars(char[] chars) + +
+          Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting array.
+ StringreadChars(int length) + +
+          Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting string.
+ doublereadDouble() + +
+          Reads a signed double (eight byte) value from the buffer.
+ floatreadFloat() + +
+          Reads a signed float (four byte) value from the buffer.
+ intreadInt() + +
+          Reads a signed int (four byte) value from the buffer.
+ longreadLong() + +
+          Reads a signed long (eight byte) value from the buffer.
+ shortreadShort() + +
+          Reads a signed short (two byte) value from the buffer.
+ StringreadString() + +
+          Reads a null-terminated UTF string from the data buffer and converts + the data from UTF to Unicode.
+ voidreadString(char[] chars) + +
+          Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode.
+ StringreadString(int length) + +
+          Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode.
+ intreadUnsignedByte() + +
+          Reads an unsigned byte (one byte) value from the buffer.
+ longreadUnsignedInt() + +
+          Reads an unsigned int (four byte) value from the buffer.
+ intreadUnsignedShort() + +
+          Reads an unsigned short (two byte) value from the buffer.
+ + + + + + + +
Methods inherited from class com.sleepycat.util.FastInputStream
available, getBufferBytes, getBufferLength, getBufferOffset, mark, markSupported, read, read, read, readFast, readFast, readFast, reset, skip
+ + + + + + + +
Methods inherited from class java.io.InputStream
close
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleInput

+
+public TupleInput(byte[] buffer)
+
+
Creates a tuple input object for reading a byte array of tuple data. A + reference to the byte array will be kept by this object (it will not be + copied) and therefore the byte array should not be modified while this + object is in use. +

+

Parameters:
buffer - is the byte array to be read and should contain data in + tuple format.
+
+ +

+TupleInput

+
+public TupleInput(byte[] buffer,
+                  int offset,
+                  int length)
+
+
Creates a tuple input object for reading a byte array of tuple data at + a given offset for a given length. A reference to the byte array will + be kept by this object (it will not be copied) and therefore the byte + array should not be modified while this object is in use. +

+

Parameters:
buffer - is the byte array to be read and should contain data in + tuple format.
offset - is the byte offset at which to begin reading.
length - is the number of bytes to be read.
+
+ +

+TupleInput

+
+public TupleInput(TupleOutput output)
+
+
Creates a tuple input object from the data contained in a tuple output + object. A reference to the tuple output's byte array will be kept by + this object (it will not be copied) and therefore the tuple output + object should not be modified while this object is in use. +

+

Parameters:
output - is the tuple output object containing the data to be read.
+ + + + + + + + +
+Method Detail
+ +

+readString

+
+public final String readString()
+                        throws IndexOutOfBoundsException,
+                               IllegalArgumentException
+
+
Reads a null-terminated UTF string from the data buffer and converts + the data from UTF to Unicode. + Reads values that were written using TupleOutput.writeString(String). +

+

+ +
Returns:
the converted string. +
Throws: +
IndexOutOfBoundsException - if no null terminating byte is found + in the buffer. +
IllegalArgumentException - malformed UTF data is encountered.
+
+
+
+ +

+readChar

+
+public final char readChar()
+                    throws IndexOutOfBoundsException
+
+
Reads a char (two byte) unsigned value from the buffer. + Reads values that were written using TupleOutput.writeChar(int). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readBoolean

+
+public final boolean readBoolean()
+                          throws IndexOutOfBoundsException
+
+
Reads a boolean (one byte) unsigned value from the buffer and returns + true if it is non-zero and false if it is zero. + Reads values that were written using TupleOutput.writeBoolean(boolean). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readByte

+
+public final byte readByte()
+                    throws IndexOutOfBoundsException
+
+
Reads a signed byte (one byte) value from the buffer. + Reads values that were written using TupleOutput.writeByte(int). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readShort

+
+public final short readShort()
+                      throws IndexOutOfBoundsException
+
+
Reads a signed short (two byte) value from the buffer. + Reads values that were written using TupleOutput.writeShort(int). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readInt

+
+public final int readInt()
+                  throws IndexOutOfBoundsException
+
+
Reads a signed int (four byte) value from the buffer. + Reads values that were written using TupleOutput.writeInt(int). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readLong

+
+public final long readLong()
+                    throws IndexOutOfBoundsException
+
+
Reads a signed long (eight byte) value from the buffer. + Reads values that were written using TupleOutput.writeLong(long). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readFloat

+
+public final float readFloat()
+                      throws IndexOutOfBoundsException
+
+
Reads a signed float (four byte) value from the buffer. + Reads values that were written using TupleOutput.writeFloat(float). + Float.intBitsToFloat is used to convert the signed int + value. +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readDouble

+
+public final double readDouble()
+                        throws IndexOutOfBoundsException
+
+
Reads a signed double (eight byte) value from the buffer. + Reads values that were written using TupleOutput.writeDouble(double). + Double.longBitsToDouble is used to convert the signed long + value. +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readUnsignedByte

+
+public final int readUnsignedByte()
+                           throws IndexOutOfBoundsException
+
+
Reads an unsigned byte (one byte) value from the buffer. + Reads values that were written using TupleOutput.writeUnsignedByte(int). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readUnsignedShort

+
+public final int readUnsignedShort()
+                            throws IndexOutOfBoundsException
+
+
Reads an unsigned short (two byte) value from the buffer. + Reads values that were written using TupleOutput.writeUnsignedShort(int). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readUnsignedInt

+
+public final long readUnsignedInt()
+                           throws IndexOutOfBoundsException
+
+
Reads an unsigned int (four byte) value from the buffer. + Reads values that were written using TupleOutput.writeUnsignedInt(long). +

+

+ +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readBytes

+
+public final String readBytes(int length)
+                       throws IndexOutOfBoundsException
+
+
Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting string. + Reads values that were written using TupleOutput.writeBytes(java.lang.String). + Only characters with values below 0x100 may be read using this method. +

+

+
Parameters:
length - is the number of bytes to be read. +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readChars

+
+public final String readChars(int length)
+                       throws IndexOutOfBoundsException
+
+
Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting string. + Reads values that were written using TupleOutput.writeChars(java.lang.String). +

+

+
Parameters:
length - is the number of characters to be read. +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readBytes

+
+public final void readBytes(char[] chars)
+                     throws IndexOutOfBoundsException
+
+
Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting array. + Reads values that were written using TupleOutput.writeBytes(java.lang.String). + Only characters with values below 0x100 may be read using this method. +

+

+
Parameters:
chars - is the array to receive the data and whose length is used + to determine the number of bytes to be read. +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readChars

+
+public final void readChars(char[] chars)
+                     throws IndexOutOfBoundsException
+
+
Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting array. + Reads values that were written using TupleOutput.writeChars(java.lang.String). +

+

+
Parameters:
chars - is the array to receive the data and whose length is used + to determine the number of characters to be read. +
Returns:
the value read from the buffer. +
Throws: +
IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
+
+
+
+ +

+readString

+
+public final String readString(int length)
+                        throws IndexOutOfBoundsException,
+                               IllegalArgumentException
+
+
Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode. + Reads values that were written using TupleOutput.writeString(char[]). +

+

+
Parameters:
length - is the number of characters to be read. +
Returns:
the converted string. +
Throws: +
IndexOutOfBoundsException - if no null terminating byte is found + in the buffer. +
IllegalArgumentException - malformed UTF data is encountered.
+
+
+
+ +

+readString

+
+public final void readString(char[] chars)
+                      throws IndexOutOfBoundsException,
+                             IllegalArgumentException
+
+
Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode. + Reads values that were written using TupleOutput.writeString(char[]). +

+

+
Parameters:
chars - is the array to receive the data and whose length is used + to determine the number of characters to be read. +
Returns:
the converted string. +
Throws: +
IndexOutOfBoundsException - if no null terminating byte is found + in the buffer. +
IllegalArgumentException - malformed UTF data is encountered.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleInputBinding.html b/db/docs/java/com/sleepycat/bind/tuple/TupleInputBinding.html new file mode 100644 index 000000000..3fc57bcbf --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleInputBinding.html @@ -0,0 +1,300 @@ + + + + + + +TupleInputBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleInputBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleInputBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class TupleInputBinding
extends Object
implements EntryBinding
+ +

+A concrete EntryBinding that uses the TupleInput + object as the key or data object. + + A concrete tuple binding for key or data entries which are TupleInput objects. This binding is used when tuples themselves are the + objects, rather than using application defined objects. A TupleInput + must always be used. To convert a TupleOutput to a TupleInput, use the TupleInput.TupleInput(TupleOutput) constructor. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleInputBinding() + +
+          Creates a tuple input binding.
+  + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry entry) + +
+          Converts a entry buffer into an Object.
+ voidobjectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleInputBinding

+
+public TupleInputBinding()
+
+
Creates a tuple input binding. +

+

+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts a entry buffer into an Object. +

+

+
Specified by:
entryToObject in interface EntryBinding
+
+
+
Parameters:
entry - is the source entry buffer. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          DatabaseEntry entry)
+
+
Description copied from interface: EntryBinding
+
Converts an Object into a entry buffer. +

+

+
Specified by:
objectToEntry in interface EntryBinding
+
+
+
Parameters:
object - is the source Object.
entry - is the destination entry buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleMarshalledBinding.html b/db/docs/java/com/sleepycat/bind/tuple/TupleMarshalledBinding.html new file mode 100644 index 000000000..b00184131 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleMarshalledBinding.html @@ -0,0 +1,314 @@ + + + + + + +TupleMarshalledBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleMarshalledBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleBinding
+      extended bycom.sleepycat.bind.tuple.TupleMarshalledBinding
+
+
+
All Implemented Interfaces:
EntryBinding
+
+
+
+
public class TupleMarshalledBinding
extends TupleBinding
+ +

+A concrete TupleBinding that delegates to the + MarshalledTupleEntry interface of the data or key object. + +

This class works by calling the methods of the MarshalledTupleEntry interface, which must be implemented by the key or + data class, to convert between the key or data entry and the object.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleMarshalledBinding(Class cls) + +
+          Creates a tuple marshalled binding object.
+  + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+ voidobjectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleBinding
entryToInput, entryToObject, getPrimitiveBinding, inputToEntry, newOutput, newOutput, objectToEntry, outputToEntry
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleMarshalledBinding

+
+public TupleMarshalledBinding(Class cls)
+
+
Creates a tuple marshalled binding object. + +

The given class is used to instantiate key or data objects using + Class.forName(java.lang.String), and therefore must be a public class and have a + public no-arguments constructor. It must also implement the MarshalledTupleEntry interface.

+

+

Parameters:
cls - is the class of the key or data objects.
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput input)
+
+
Description copied from class: TupleBinding
+
Constructs a key or data object from a TupleInput entry. +

+

+
Specified by:
entryToObject in class TupleBinding
+
+
+
Parameters:
input - is the tuple key or data entry. +
Returns:
the key or data object constructed from the entry.
+
+
+
+ +

+objectToEntry

+
+public void objectToEntry(Object object,
+                          TupleOutput output)
+
+
Description copied from class: TupleBinding
+
Converts a key or data object to a tuple entry. +

+

+
Specified by:
objectToEntry in class TupleBinding
+
+
+
Parameters:
object - is the key or data object.
output - is the tuple entry to which the key or data should be + written.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleOutput.html b/db/docs/java/com/sleepycat/bind/tuple/TupleOutput.html new file mode 100644 index 000000000..2806515fd --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleOutput.html @@ -0,0 +1,759 @@ + + + + + + +TupleOutput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleOutput

+
+java.lang.Object
+  extended byjava.io.OutputStream
+      extended bycom.sleepycat.util.FastOutputStream
+          extended bycom.sleepycat.bind.tuple.TupleOutput
+
+
+
+
public class TupleOutput
extends FastOutputStream
+ +

+An OutputStream with DataOutput-like methods for + writing tuple fields. It is used by TupleBinding. + +

This class has many methods that have the same signatures as methods in + the DataOutput interface. The reason this class does not + implement DataOutput is because it would break the interface + contract for those methods because of data format differences.

+ +

Signed numbers are stored in the buffer in MSB (most significant byte + first) order with their sign bit (high-order bit) inverted to cause negative + numbers to be sorted first when comparing values as unsigned byte arrays, + as done in a database. Unsigned numbers, including characters, are stored + in MSB order with no change to their sign bit.

+ +

Strings and character arrays are stored either as a fixed length array of + unicode characters, where the length must be known by the application, or as + a null-terminated UTF byte array.

+
    +
  • Null strings are UTF encoded as { 0xFF }, which is not allowed in a + standard UTF encoding. This allows null strings, as distinct from empty or + zero length strings, to be represented in a tuple. Using the default + comparator, null strings will be ordered last.
  • +
  • Zero (0x0000) character values are UTF encoded as non-zero values, and + therefore embedded zeros in the string are supported. The sequence { 0xC0, + 0x80 } is used to encode a zero character. This UTF encoding is the same + one used by native Java UTF libraries. However, this encoding of zero does + impact the lexicographical ordering, and zeros will not be sorted first (the + natural order) or last. For all character values other than zero, the + default UTF byte ordering is the same as the Unicode lexicographical + character ordering.
  • +
+ +

Floats and doubles are stored in standard Java integer-bit representation + (IEEE 754). Non-negative numbers are correctly ordered by numeric value. + However, negative numbers are not correctly ordered; therefore, if you use + negative floating point numbers in a key, you'll need to implement and + configure a custom comparator to get correct numeric ordering.

+

+ +

+


+ +

+ + + + + + + + + + +
+Field Summary
+ + + + + + + +
Fields inherited from class com.sleepycat.util.FastOutputStream
DEFAULT_BUMP_SIZE, DEFAULT_INIT_SIZE
+  + + + + + + + + + + + + + +
+Constructor Summary
TupleOutput() + +
+          Creates a tuple output object for writing a byte array of tuple data.
TupleOutput(byte[] buffer) + +
+          Creates a tuple output object for writing a byte array of tuple data, + using a given buffer.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ TupleOutputwriteBoolean(boolean val) + +
+          Writes a boolean (one byte) unsigned value to the buffer, writing one + if the value is true and zero if it is false.
+ TupleOutputwriteByte(int val) + +
+          Writes an signed byte (one byte) value to the buffer.
+ TupleOutputwriteBytes(char[] chars) + +
+          Writes the specified bytes to the buffer, converting each character to + an unsigned byte value.
+ TupleOutputwriteBytes(String val) + +
+          Writes the specified bytes to the buffer, converting each character to + an unsigned byte value.
+ TupleOutputwriteChar(int val) + +
+          Writes a char (two byte) unsigned value to the buffer.
+ TupleOutputwriteChars(char[] chars) + +
+          Writes the specified characters to the buffer, converting each character + to a two byte unsigned value.
+ TupleOutputwriteChars(String val) + +
+          Writes the specified characters to the buffer, converting each character + to a two byte unsigned value.
+ TupleOutputwriteDouble(double val) + +
+          Writes an signed double (eight byte) value to the buffer.
+ TupleOutputwriteFloat(float val) + +
+          Writes an signed float (four byte) value to the buffer.
+ TupleOutputwriteInt(int val) + +
+          Writes an signed int (four byte) value to the buffer.
+ TupleOutputwriteLong(long val) + +
+          Writes an signed long (eight byte) value to the buffer.
+ TupleOutputwriteShort(int val) + +
+          Writes an signed short (two byte) value to the buffer.
+ TupleOutputwriteString(char[] chars) + +
+          Writes the specified characters to the buffer, converting each character + to UTF format.
+ TupleOutputwriteString(String val) + +
+          Writes the specified characters to the buffer, converting each character + to UTF format, and adding a null terminator byte.
+ TupleOutputwriteUnsignedByte(int val) + +
+          Writes an unsigned byte (one byte) value to the buffer.
+ TupleOutputwriteUnsignedInt(long val) + +
+          Writes an unsigned int (four byte) value to the buffer.
+ TupleOutputwriteUnsignedShort(int val) + +
+          Writes an unsigned short (two byte) value to the buffer.
+ + + + + + + +
Methods inherited from class com.sleepycat.util.FastOutputStream
addSize, getBufferBytes, getBufferLength, getBufferOffset, makeSpace, reset, size, toByteArray, toByteArray, toString, toString, write, write, write, writeFast, writeFast, writeFast, writeTo
+ + + + + + + +
Methods inherited from class java.io.OutputStream
close, flush
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleOutput

+
+public TupleOutput()
+
+
Creates a tuple output object for writing a byte array of tuple data. +

+

+
+ +

+TupleOutput

+
+public TupleOutput(byte[] buffer)
+
+
Creates a tuple output object for writing a byte array of tuple data, + using a given buffer. A new buffer will be allocated only if the number + of bytes needed is greater than the length of this buffer. A reference + to the byte array will be kept by this object and therefore the byte + array should not be modified while this object is in use. +

+

Parameters:
buffer - is the byte array to use as the buffer.
+ + + + + + + + +
+Method Detail
+ +

+writeBytes

+
+public final TupleOutput writeBytes(String val)
+
+
Writes the specified bytes to the buffer, converting each character to + an unsigned byte value. + Writes values that can be read using TupleInput.readBytes(int). + Only characters with values below 0x100 may be written using this + method, since the high-order 8 bits of all characters are discarded. +

+

+
Parameters:
val - is the string containing the values to be written. +
Returns:
this tuple output object. +
Throws: +
NullPointerException - if the val parameter is null.
+
+
+
+ +

+writeChars

+
+public final TupleOutput writeChars(String val)
+
+
Writes the specified characters to the buffer, converting each character + to a two byte unsigned value. + Writes values that can be read using TupleInput.readChars(int). +

+

+
Parameters:
val - is the string containing the characters to be written. +
Returns:
this tuple output object. +
Throws: +
NullPointerException - if the val parameter is null.
+
+
+
+ +

+writeString

+
+public final TupleOutput writeString(String val)
+
+
Writes the specified characters to the buffer, converting each character + to UTF format, and adding a null terminator byte. + Note that zero (0x0000) character values are encoded as non-zero values + and a null String parameter is encoded as 0xFF. + Writes values that can be read using TupleInput.readString(). +

+

+
Parameters:
val - is the string containing the characters to be written. +
Returns:
this tuple output object.
+
+
+
+ +

+writeChar

+
+public final TupleOutput writeChar(int val)
+
+
Writes a char (two byte) unsigned value to the buffer. + Writes values that can be read using TupleInput.readChar(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeBoolean

+
+public final TupleOutput writeBoolean(boolean val)
+
+
Writes a boolean (one byte) unsigned value to the buffer, writing one + if the value is true and zero if it is false. + Writes values that can be read using TupleInput.readBoolean(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeByte

+
+public final TupleOutput writeByte(int val)
+
+
Writes an signed byte (one byte) value to the buffer. + Writes values that can be read using TupleInput.readByte(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeShort

+
+public final TupleOutput writeShort(int val)
+
+
Writes an signed short (two byte) value to the buffer. + Writes values that can be read using TupleInput.readShort(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeInt

+
+public final TupleOutput writeInt(int val)
+
+
Writes an signed int (four byte) value to the buffer. + Writes values that can be read using TupleInput.readInt(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeLong

+
+public final TupleOutput writeLong(long val)
+
+
Writes an signed long (eight byte) value to the buffer. + Writes values that can be read using TupleInput.readLong(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeFloat

+
+public final TupleOutput writeFloat(float val)
+
+
Writes an signed float (four byte) value to the buffer. + Writes values that can be read using TupleInput.readFloat(). + Float.floatToIntBits is used to convert the signed float + value. +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeDouble

+
+public final TupleOutput writeDouble(double val)
+
+
Writes an signed double (eight byte) value to the buffer. + Writes values that can be read using TupleInput.readDouble(). + Double.doubleToLongBits is used to convert the signed + double value. +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeBytes

+
+public final TupleOutput writeBytes(char[] chars)
+
+
Writes the specified bytes to the buffer, converting each character to + an unsigned byte value. + Writes values that can be read using TupleInput.readBytes(int). + Only characters with values below 0x100 may be written using this + method, since the high-order 8 bits of all characters are discarded. +

+

+
Parameters:
chars - is the array of values to be written. +
Returns:
this tuple output object. +
Throws: +
NullPointerException - if the chars parameter is null.
+
+
+
+ +

+writeChars

+
+public final TupleOutput writeChars(char[] chars)
+
+
Writes the specified characters to the buffer, converting each character + to a two byte unsigned value. + Writes values that can be read using TupleInput.readChars(int). +

+

+
Parameters:
chars - is the array of characters to be written. +
Returns:
this tuple output object. +
Throws: +
NullPointerException - if the chars parameter is null.
+
+
+
+ +

+writeString

+
+public final TupleOutput writeString(char[] chars)
+
+
Writes the specified characters to the buffer, converting each character + to UTF format. + Note that zero (0x0000) character values are encoded as non-zero values. + Writes values that can be read using TupleInput.readString(int) + or TupleInput.readString(char[]). +

+

+
Parameters:
chars - is the array of characters to be written. +
Returns:
this tuple output object. +
Throws: +
NullPointerException - if the chars parameter is null.
+
+
+
+ +

+writeUnsignedByte

+
+public final TupleOutput writeUnsignedByte(int val)
+
+
Writes an unsigned byte (one byte) value to the buffer. + Writes values that can be read using TupleInput.readUnsignedByte(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeUnsignedShort

+
+public final TupleOutput writeUnsignedShort(int val)
+
+
Writes an unsigned short (two byte) value to the buffer. + Writes values that can be read using TupleInput.readUnsignedShort(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+
+ +

+writeUnsignedInt

+
+public final TupleOutput writeUnsignedInt(long val)
+
+
Writes an unsigned int (four byte) value to the buffer. + Writes values that can be read using TupleInput.readUnsignedInt(). +

+

+
Parameters:
val - is the value to write to the buffer. +
Returns:
this tuple output object.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleTupleBinding.html b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleBinding.html new file mode 100644 index 000000000..2701e1250 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleBinding.html @@ -0,0 +1,420 @@ + + + + + + +TupleTupleBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleTupleBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleTupleBinding
+
+
+
All Implemented Interfaces:
EntityBinding
+
+
+
Direct Known Subclasses:
TupleTupleMarshalledBinding
+
+
+
+
public abstract class TupleTupleBinding
extends Object
implements EntityBinding
+ +

+An abstract EntityBinding that treats an entity's key entry and + data entry as tuples. + +

This class takes care of converting the entries to/from TupleInput and TupleOutput objects. Its three abstract methods + must be implemented by a concrete subclass to convert between tuples and + entity objects.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleTupleBinding() + +
+          Creates a tuple-tuple entity binding.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+          Converts key and data entry buffers into an entity Object.
+abstract  ObjectentryToObject(TupleInput keyInput, + TupleInput dataInput) + +
+          Constructs an entity object from TupleInput key and data + entries.
+ voidobjectToData(Object object, + DatabaseEntry data) + +
+          Extracts the data entry from an entity Object.
+abstract  voidobjectToData(Object object, + TupleOutput output) + +
+          Extracts a key tuple from an entity object.
+ voidobjectToKey(Object object, + DatabaseEntry key) + +
+          Extracts the key entry from an entity Object.
+abstract  voidobjectToKey(Object object, + TupleOutput output) + +
+          Extracts a key tuple from an entity object.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleTupleBinding

+
+public TupleTupleBinding()
+
+
Creates a tuple-tuple entity binding. +

+

+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(DatabaseEntry key,
+                            DatabaseEntry data)
+
+
Description copied from interface: EntityBinding
+
Converts key and data entry buffers into an entity Object. +

+

+
Specified by:
entryToObject in interface EntityBinding
+
+
+
Parameters:
key - is the source key entry.
data - is the source data entry. +
Returns:
the resulting Object.
+
+
+
+ +

+objectToKey

+
+public void objectToKey(Object object,
+                        DatabaseEntry key)
+
+
Description copied from interface: EntityBinding
+
Extracts the key entry from an entity Object. +

+

+
Specified by:
objectToKey in interface EntityBinding
+
+
+
Parameters:
object - is the source Object.
key - is the destination entry buffer.
+
+
+
+ +

+objectToData

+
+public void objectToData(Object object,
+                         DatabaseEntry data)
+
+
Description copied from interface: EntityBinding
+
Extracts the data entry from an entity Object. +

+

+
Specified by:
objectToData in interface EntityBinding
+
+
+
Parameters:
object - is the source Object.
data - is the destination entry buffer.
+
+
+
+ +

+entryToObject

+
+public abstract Object entryToObject(TupleInput keyInput,
+                                     TupleInput dataInput)
+
+
Constructs an entity object from TupleInput key and data + entries. +

+

+
+
+
+
Parameters:
keyInput - is the TupleInput key entry object.
dataInput - is the TupleInput data entry object. +
Returns:
the entity object constructed from the key and data.
+
+
+
+ +

+objectToKey

+
+public abstract void objectToKey(Object object,
+                                 TupleOutput output)
+
+
Extracts a key tuple from an entity object. +

+

+
+
+
+
Parameters:
object - is the entity object.
output - is the TupleOutput to which the key should be + written.
+
+
+
+ +

+objectToData

+
+public abstract void objectToData(Object object,
+                                  TupleOutput output)
+
+
Extracts a key tuple from an entity object. +

+

+
+
+
+
Parameters:
object - is the entity object.
output - is the TupleOutput to which the data should be + written.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleTupleKeyCreator.html b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleKeyCreator.html new file mode 100644 index 000000000..a3d59e12d --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleKeyCreator.html @@ -0,0 +1,383 @@ + + + + + + +TupleTupleKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleTupleKeyCreator

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleTupleKeyCreator
+
+
+
All Implemented Interfaces:
SecondaryKeyCreator
+
+
+
Direct Known Subclasses:
TupleTupleMarshalledKeyCreator
+
+
+
+
public abstract class TupleTupleKeyCreator
extends Object
implements SecondaryKeyCreator
+ +

+An abstract key creator that uses a tuple key and a tuple data entry. This + class takes care of converting the key and data entry to/from TupleInput and TupleOutput objects. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleTupleKeyCreator() + +
+          Creates a tuple-tuple key creator.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleancreateSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+          Creates a secondary key entry, given a primary key and data entry.
+abstract  booleancreateSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key from primary key tuple and data tuple.
+ booleannullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ booleannullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) + +
+          Clears the index key in the tuple data entry.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleTupleKeyCreator

+
+public TupleTupleKeyCreator()
+
+
Creates a tuple-tuple key creator. +

+

+ + + + + + + + +
+Method Detail
+ +

+createSecondaryKey

+
+public boolean createSecondaryKey(SecondaryDatabase db,
+                                  DatabaseEntry primaryKeyEntry,
+                                  DatabaseEntry dataEntry,
+                                  DatabaseEntry indexKeyEntry)
+                           throws DatabaseException
+
+
Description copied from interface: SecondaryKeyCreator
+
Creates a secondary key entry, given a primary key and data entry. +

+

+

+
Specified by:
createSecondaryKey in interface SecondaryKeyCreator
+
+
+
Parameters:
db - the database to which the secondary key will be added. +

primaryKeyEntry - the primary key entry. This parameter must not be modified + by this method. +

dataEntry - the primary data entry. This parameter must not be modified + by this method. +

indexKeyEntry - the secondary key created by this method. +

+

Returns:
true if a key was created, or false to indicate that the key is + not present. +

+

Throws: +
DatabaseException - if an error occurs attempting to create the + secondary key.
+
+
+
+ +

+nullifyForeignKey

+
+public boolean nullifyForeignKey(SecondaryDatabase db,
+                                 DatabaseEntry dataEntry)
+                          throws DatabaseException
+
+
+
+
+
+ +
Throws: +
DatabaseException
+
+
+
+ +

+createSecondaryKey

+
+public abstract boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                           TupleInput dataInput,
+                                           TupleOutput indexKeyOutput)
+
+
Creates the index key from primary key tuple and data tuple. +

+

+
+
+
+
Parameters:
primaryKeyInput - is the TupleInput for the primary key + entry.
dataInput - is the TupleInput for the data entry.
indexKeyOutput - is the destination index key tuple. +
Returns:
true if a key was created, or false to indicate that the key is + not present.
+
+
+
+ +

+nullifyForeignKey

+
+public boolean nullifyForeignKey(TupleInput dataInput,
+                                 TupleOutput dataOutput)
+
+
Clears the index key in the tuple data entry. The dataInput should be + read and then written to the dataOutput, clearing the index key in the + process. + +

The secondary key should be output or removed by this method such + that createSecondaryKey(com.sleepycat.db.SecondaryDatabase, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry) will return false. Other fields in the + data object should remain unchanged.

+

+

+
+
+
+
Parameters:
dataInput - is the TupleInput for the data entry.
dataOutput - is the destination TupleOutput. +
Returns:
true if the key was cleared, or false to indicate that the key + is not present and no change is necessary.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.html b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.html new file mode 100644 index 000000000..adf696770 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.html @@ -0,0 +1,351 @@ + + + + + + +TupleTupleMarshalledBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleTupleMarshalledBinding

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleTupleBinding
+      extended bycom.sleepycat.bind.tuple.TupleTupleMarshalledBinding
+
+
+
All Implemented Interfaces:
EntityBinding
+
+
+
+
public class TupleTupleMarshalledBinding
extends TupleTupleBinding
+ +

+A concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class. + +

This class calls the methods of the MarshalledTupleEntry + interface to convert between the data entry and entity object. It calls the + methods of the MarshalledTupleKeyEntity interface to convert between + the key entry and the entity object. These two interfaces must both be + implemented by the entity class.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleTupleMarshalledBinding(Class cls) + +
+          Creates a tuple-tuple marshalled binding object.
+  + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ObjectentryToObject(TupleInput keyInput, + TupleInput dataInput) + +
+          Constructs an entity object from TupleInput key and data + entries.
+ voidobjectToData(Object object, + TupleOutput output) + +
+          Extracts a key tuple from an entity object.
+ voidobjectToKey(Object object, + TupleOutput output) + +
+          Extracts a key tuple from an entity object.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleTupleBinding
entryToObject, objectToData, objectToKey
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleTupleMarshalledBinding

+
+public TupleTupleMarshalledBinding(Class cls)
+
+
Creates a tuple-tuple marshalled binding object. + +

The given class is used to instantiate entity objects using + Class.forName(java.lang.String), and therefore must be a public class and have a + public no-arguments constructor. It must also implement the MarshalledTupleEntry and MarshalledTupleKeyEntity + interfaces.

+

+

Parameters:
cls - is the class of the entity objects.
+ + + + + + + + +
+Method Detail
+ +

+entryToObject

+
+public Object entryToObject(TupleInput keyInput,
+                            TupleInput dataInput)
+
+
Description copied from class: TupleTupleBinding
+
Constructs an entity object from TupleInput key and data + entries. +

+

+
Specified by:
entryToObject in class TupleTupleBinding
+
+
+
Parameters:
keyInput - is the TupleInput key entry object.
dataInput - is the TupleInput data entry object. +
Returns:
the entity object constructed from the key and data.
+
+
+
+ +

+objectToKey

+
+public void objectToKey(Object object,
+                        TupleOutput output)
+
+
Description copied from class: TupleTupleBinding
+
Extracts a key tuple from an entity object. +

+

+
Specified by:
objectToKey in class TupleTupleBinding
+
+
+
Parameters:
object - is the entity object.
output - is the TupleOutput to which the key should be + written.
+
+
+
+ +

+objectToData

+
+public void objectToData(Object object,
+                         TupleOutput output)
+
+
Description copied from class: TupleTupleBinding
+
Extracts a key tuple from an entity object. +

+

+
Specified by:
objectToData in class TupleTupleBinding
+
+
+
Parameters:
object - is the entity object.
output - is the TupleOutput to which the data should be + written.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.html b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.html new file mode 100644 index 000000000..075ad02ab --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.html @@ -0,0 +1,329 @@ + + + + + + +TupleTupleMarshalledKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.bind.tuple +
+Class TupleTupleMarshalledKeyCreator

+
+java.lang.Object
+  extended bycom.sleepycat.bind.tuple.TupleTupleKeyCreator
+      extended bycom.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator
+
+
+
All Implemented Interfaces:
SecondaryKeyCreator
+
+
+
+
public class TupleTupleMarshalledKeyCreator
extends TupleTupleKeyCreator
+ +

+A concrete key creator that works in conjunction with a TupleTupleMarshalledBinding. This key creator works by calling the + methods of the MarshalledTupleKeyEntity interface to create and + clear the index key. + +

Note that a marshalled tuple key creator is somewhat less efficient + than a non-marshalled key tuple creator because more conversions are + needed. A marshalled key creator must convert the entry to an object in + order to create the key, while an unmarshalled key creator does not.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding binding, + String keyName) + +
+          Creates a tuple-tuple marshalled key creator.
+  + + + + + + + + + + + + + + + +
+Method Summary
+ booleancreateSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key from primary key tuple and data tuple.
+ booleannullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) + +
+          Clears the index key in the tuple data entry.
+ + + + + + + +
Methods inherited from class com.sleepycat.bind.tuple.TupleTupleKeyCreator
createSecondaryKey, nullifyForeignKey
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleTupleMarshalledKeyCreator

+
+public TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding binding,
+                                      String keyName)
+
+
Creates a tuple-tuple marshalled key creator. +

+

Parameters:
binding - is the binding used for the tuple-tuple entity.
keyName - is the key name passed to the MarshalledTupleKeyEntity.marshalSecondaryKey(java.lang.String, com.sleepycat.bind.tuple.TupleOutput) method to identify the + index key.
+ + + + + + + + +
+Method Detail
+ +

+createSecondaryKey

+
+public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                  TupleInput dataInput,
+                                  TupleOutput indexKeyOutput)
+
+
Description copied from class: TupleTupleKeyCreator
+
Creates the index key from primary key tuple and data tuple. +

+

+
Specified by:
createSecondaryKey in class TupleTupleKeyCreator
+
+
+
Parameters:
primaryKeyInput - is the TupleInput for the primary key + entry.
dataInput - is the TupleInput for the data entry.
indexKeyOutput - is the destination index key tuple. +
Returns:
true if a key was created, or false to indicate that the key is + not present.
+
+
+
+ +

+nullifyForeignKey

+
+public boolean nullifyForeignKey(TupleInput dataInput,
+                                 TupleOutput dataOutput)
+
+
Description copied from class: TupleTupleKeyCreator
+
Clears the index key in the tuple data entry. The dataInput should be + read and then written to the dataOutput, clearing the index key in the + process. + +

The secondary key should be output or removed by this method such + that TupleTupleKeyCreator.createSecondaryKey(com.sleepycat.db.SecondaryDatabase, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry) will return false. Other fields in the + data object should remain unchanged.

+

+

+
Overrides:
nullifyForeignKey in class TupleTupleKeyCreator
+
+
+
Parameters:
dataInput - is the TupleInput for the data entry.
dataOutput - is the destination TupleOutput. +
Returns:
true if the key was cleared, or false to indicate that the key + is not present and no change is necessary.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/BooleanBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/BooleanBinding.html new file mode 100644 index 000000000..36cb9fbf6 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/BooleanBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.BooleanBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.BooleanBinding

+
+No usage of com.sleepycat.bind.tuple.BooleanBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/ByteBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/ByteBinding.html new file mode 100644 index 000000000..b38aa1fb4 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/ByteBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.ByteBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.ByteBinding

+
+No usage of com.sleepycat.bind.tuple.ByteBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/CharacterBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/CharacterBinding.html new file mode 100644 index 000000000..bceea221f --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/CharacterBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.CharacterBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.CharacterBinding

+
+No usage of com.sleepycat.bind.tuple.CharacterBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/DoubleBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/DoubleBinding.html new file mode 100644 index 000000000..055259042 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/DoubleBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.DoubleBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.DoubleBinding

+
+No usage of com.sleepycat.bind.tuple.DoubleBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/FloatBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/FloatBinding.html new file mode 100644 index 000000000..c1c598b55 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/FloatBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.FloatBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.FloatBinding

+
+No usage of com.sleepycat.bind.tuple.FloatBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/IntegerBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/IntegerBinding.html new file mode 100644 index 000000000..d0eb22221 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/IntegerBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.IntegerBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.IntegerBinding

+
+No usage of com.sleepycat.bind.tuple.IntegerBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/LongBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/LongBinding.html new file mode 100644 index 000000000..3bf8cd8db --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/LongBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.LongBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.LongBinding

+
+No usage of com.sleepycat.bind.tuple.LongBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleEntry.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleEntry.html new file mode 100644 index 000000000..75baa0289 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleEntry.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Interface com.sleepycat.bind.tuple.MarshalledTupleEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.bind.tuple.MarshalledTupleEntry

+
+No usage of com.sleepycat.bind.tuple.MarshalledTupleEntry +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleKeyEntity.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleKeyEntity.html new file mode 100644 index 000000000..f621d81be --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleKeyEntity.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.bind.tuple.MarshalledTupleKeyEntity

+
+No usage of com.sleepycat.bind.tuple.MarshalledTupleKeyEntity +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/ShortBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/ShortBinding.html new file mode 100644 index 000000000..415b35c53 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/ShortBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.ShortBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.ShortBinding

+
+No usage of com.sleepycat.bind.tuple.ShortBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/StringBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/StringBinding.html new file mode 100644 index 000000000..16914bd8d --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/StringBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.StringBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.StringBinding

+
+No usage of com.sleepycat.bind.tuple.StringBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleBinding.html new file mode 100644 index 000000000..1c29ef2ce --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleBinding.html @@ -0,0 +1,269 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleBinding

+
+ + + + + + + + + +
+Packages that use TupleBinding
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of TupleBinding in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Subclasses of TupleBinding in com.sleepycat.bind.tuple
+ classBooleanBinding + +
+          A concrete TupleBinding for a Boolean primitive + wrapper or a boolean primitive.
+ classByteBinding + +
+          A concrete TupleBinding for a Byte primitive + wrapper or a byte primitive.
+ classCharacterBinding + +
+          A concrete TupleBinding for a Character primitive + wrapper or a char primitive.
+ classDoubleBinding + +
+          A concrete TupleBinding for a Double primitive + wrapper or a double primitive.
+ classFloatBinding + +
+          A concrete TupleBinding for a Float primitive + wrapper or a float primitive.
+ classIntegerBinding + +
+          A concrete TupleBinding for a Integer primitive + wrapper or an int primitive.
+ classLongBinding + +
+          A concrete TupleBinding for a Long primitive + wrapper or a long primitive.
+ classShortBinding + +
+          A concrete TupleBinding for a Short primitive + wrapper or a short primitive.
+ classStringBinding + +
+          A concrete TupleBinding for a simple String value.
+ classTupleMarshalledBinding + +
+          A concrete TupleBinding that delegates to the + MarshalledTupleEntry interface of the data or key object.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.bind.tuple that return TupleBinding
+static TupleBindingTupleBinding.getPrimitiveBinding(Class cls) + +
+          Creates a tuple binding for a primitive Java class.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInput.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInput.html new file mode 100644 index 000000000..e518b3956 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInput.html @@ -0,0 +1,414 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleInput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleInput

+
+ + + + + + + + + + + + + +
+Packages that use TupleInput
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of TupleInput in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.serial with parameters of type TupleInput
+ booleanTupleSerialMarshalledKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput) + +
+           
+ ObjectTupleSerialMarshalledBinding.entryToObject(TupleInput tupleInput, + Object javaInput) + +
+           
+abstract  booleanTupleSerialKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key entry from primary key tuple entry and + deserialized data entry.
+abstract  ObjectTupleSerialBinding.entryToObject(TupleInput keyInput, + Object dataInput) + +
+          Constructs an entity object from TupleInput key entry and + deserialized data entry objects.
+  +

+ + + + + +
+Uses of TupleInput in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + +
Methods in com.sleepycat.bind.tuple that return TupleInput
+static TupleInputTupleBinding.entryToInput(DatabaseEntry entry) + +
+          Utility method to create a new tuple input object for reading the data + from a given buffer.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.tuple with parameters of type TupleInput
+ booleanTupleTupleMarshalledKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) + +
+           
+ booleanTupleTupleMarshalledKeyCreator.nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) + +
+           
+ ObjectTupleTupleMarshalledBinding.entryToObject(TupleInput keyInput, + TupleInput dataInput) + +
+           
+abstract  booleanTupleTupleKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key from primary key tuple and data tuple.
+ booleanTupleTupleKeyCreator.nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) + +
+          Clears the index key in the tuple data entry.
+abstract  ObjectTupleTupleBinding.entryToObject(TupleInput keyInput, + TupleInput dataInput) + +
+          Constructs an entity object from TupleInput key and data + entries.
+ ObjectTupleMarshalledBinding.entryToObject(TupleInput input) + +
+           
+static voidTupleBinding.inputToEntry(TupleInput input, + DatabaseEntry entry) + +
+          Utility method to set the data in a entry buffer to the data in a tuple + input object.
+abstract  ObjectTupleBinding.entryToObject(TupleInput input) + +
+          Constructs a key or data object from a TupleInput entry.
+ ObjectStringBinding.entryToObject(TupleInput input) + +
+           
+ ObjectShortBinding.entryToObject(TupleInput input) + +
+           
+ voidMarshalledTupleKeyEntity.unmarshalPrimaryKey(TupleInput keyInput) + +
+          Completes construction of the entity by setting its primary key from the + stored primary key.
+ voidMarshalledTupleEntry.unmarshalEntry(TupleInput dataInput) + +
+          Construct the key or data object from the key or data tuple entry.
+ ObjectLongBinding.entryToObject(TupleInput input) + +
+           
+ ObjectIntegerBinding.entryToObject(TupleInput input) + +
+           
+ ObjectFloatBinding.entryToObject(TupleInput input) + +
+           
+ ObjectDoubleBinding.entryToObject(TupleInput input) + +
+           
+ ObjectCharacterBinding.entryToObject(TupleInput input) + +
+           
+ ObjectByteBinding.entryToObject(TupleInput input) + +
+           
+ ObjectBooleanBinding.entryToObject(TupleInput input) + +
+           
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInputBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInputBinding.html new file mode 100644 index 000000000..71016f561 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleInputBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleInputBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleInputBinding

+
+No usage of com.sleepycat.bind.tuple.TupleInputBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleMarshalledBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleMarshalledBinding.html new file mode 100644 index 000000000..4f270836c --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleMarshalledBinding.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleMarshalledBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleMarshalledBinding

+
+No usage of com.sleepycat.bind.tuple.TupleMarshalledBinding +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleOutput.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleOutput.html new file mode 100644 index 000000000..b63c50264 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleOutput.html @@ -0,0 +1,615 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleOutput (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleOutput

+
+ + + + + + + + + + + + + +
+Packages that use TupleOutput
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of TupleOutput in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.serial with parameters of type TupleOutput
+ booleanTupleSerialMarshalledKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput) + +
+           
+ voidTupleSerialMarshalledBinding.objectToKey(Object object, + TupleOutput output) + +
+           
+abstract  booleanTupleSerialKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key entry from primary key tuple entry and + deserialized data entry.
+abstract  voidTupleSerialBinding.objectToKey(Object object, + TupleOutput keyOutput) + +
+          Extracts a key tuple from an entity object.
+  +

+ + + + + +
+Uses of TupleOutput in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.tuple that return TupleOutput
+ TupleOutputTupleOutput.writeBytes(String val) + +
+          Writes the specified bytes to the buffer, converting each character to + an unsigned byte value.
+ TupleOutputTupleOutput.writeChars(String val) + +
+          Writes the specified characters to the buffer, converting each character + to a two byte unsigned value.
+ TupleOutputTupleOutput.writeString(String val) + +
+          Writes the specified characters to the buffer, converting each character + to UTF format, and adding a null terminator byte.
+ TupleOutputTupleOutput.writeChar(int val) + +
+          Writes a char (two byte) unsigned value to the buffer.
+ TupleOutputTupleOutput.writeBoolean(boolean val) + +
+          Writes a boolean (one byte) unsigned value to the buffer, writing one + if the value is true and zero if it is false.
+ TupleOutputTupleOutput.writeByte(int val) + +
+          Writes an signed byte (one byte) value to the buffer.
+ TupleOutputTupleOutput.writeShort(int val) + +
+          Writes an signed short (two byte) value to the buffer.
+ TupleOutputTupleOutput.writeInt(int val) + +
+          Writes an signed int (four byte) value to the buffer.
+ TupleOutputTupleOutput.writeLong(long val) + +
+          Writes an signed long (eight byte) value to the buffer.
+ TupleOutputTupleOutput.writeFloat(float val) + +
+          Writes an signed float (four byte) value to the buffer.
+ TupleOutputTupleOutput.writeDouble(double val) + +
+          Writes an signed double (eight byte) value to the buffer.
+ TupleOutputTupleOutput.writeBytes(char[] chars) + +
+          Writes the specified bytes to the buffer, converting each character to + an unsigned byte value.
+ TupleOutputTupleOutput.writeChars(char[] chars) + +
+          Writes the specified characters to the buffer, converting each character + to a two byte unsigned value.
+ TupleOutputTupleOutput.writeString(char[] chars) + +
+          Writes the specified characters to the buffer, converting each character + to UTF format.
+ TupleOutputTupleOutput.writeUnsignedByte(int val) + +
+          Writes an unsigned byte (one byte) value to the buffer.
+ TupleOutputTupleOutput.writeUnsignedShort(int val) + +
+          Writes an unsigned short (two byte) value to the buffer.
+ TupleOutputTupleOutput.writeUnsignedInt(long val) + +
+          Writes an unsigned int (four byte) value to the buffer.
+static TupleOutputTupleBinding.newOutput() + +
+          Utility method for use by bindings to create a tuple output object.
+static TupleOutputTupleBinding.newOutput(byte[] buffer) + +
+          Utility method for use by bindings to create a tuple output object + with a specific starting size.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.tuple with parameters of type TupleOutput
+ booleanTupleTupleMarshalledKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) + +
+           
+ booleanTupleTupleMarshalledKeyCreator.nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) + +
+           
+ voidTupleTupleMarshalledBinding.objectToKey(Object object, + TupleOutput output) + +
+           
+ voidTupleTupleMarshalledBinding.objectToData(Object object, + TupleOutput output) + +
+           
+abstract  booleanTupleTupleKeyCreator.createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) + +
+          Creates the index key from primary key tuple and data tuple.
+ booleanTupleTupleKeyCreator.nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) + +
+          Clears the index key in the tuple data entry.
+abstract  voidTupleTupleBinding.objectToKey(Object object, + TupleOutput output) + +
+          Extracts a key tuple from an entity object.
+abstract  voidTupleTupleBinding.objectToData(Object object, + TupleOutput output) + +
+          Extracts a key tuple from an entity object.
+ voidTupleMarshalledBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+static voidTupleBinding.outputToEntry(TupleOutput output, + DatabaseEntry entry) + +
+          Utility method to set the data in a entry buffer to the data in a tuple + output object.
+abstract  voidTupleBinding.objectToEntry(Object object, + TupleOutput output) + +
+          Converts a key or data object to a tuple entry.
+ voidStringBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidShortBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidMarshalledTupleKeyEntity.marshalPrimaryKey(TupleOutput keyOutput) + +
+          Extracts the entity's primary key and writes it to the key output.
+ booleanMarshalledTupleKeyEntity.marshalSecondaryKey(String keyName, + TupleOutput keyOutput) + +
+          Extracts the entity's secondary key and writes it to the key output.
+ voidMarshalledTupleEntry.marshalEntry(TupleOutput dataOutput) + +
+          Construct the key or data tuple entry from the key or data object.
+ voidLongBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidIntegerBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidFloatBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidDoubleBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidCharacterBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidByteBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+ voidBooleanBinding.objectToEntry(Object object, + TupleOutput output) + +
+           
+  +

+ + + + + + + + +
Constructors in com.sleepycat.bind.tuple with parameters of type TupleOutput
TupleInput(TupleOutput output) + +
+          Creates a tuple input object from the data contained in a tuple output + object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleBinding.html new file mode 100644 index 000000000..a266bbdcf --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleBinding.html @@ -0,0 +1,174 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleTupleBinding

+
+ + + + + + + + + +
+Packages that use TupleTupleBinding
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of TupleTupleBinding in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + +
Subclasses of TupleTupleBinding in com.sleepycat.bind.tuple
+ classTupleTupleMarshalledBinding + +
+          A concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleKeyCreator.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleKeyCreator.html new file mode 100644 index 000000000..283744adf --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleKeyCreator.html @@ -0,0 +1,172 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleTupleKeyCreator

+
+ + + + + + + + + +
+Packages that use TupleTupleKeyCreator
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of TupleTupleKeyCreator in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + +
Subclasses of TupleTupleKeyCreator in com.sleepycat.bind.tuple
+ classTupleTupleMarshalledKeyCreator + +
+          A concrete key creator that works in conjunction with a TupleTupleMarshalledBinding.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledBinding.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledBinding.html new file mode 100644 index 000000000..8298add63 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledBinding.html @@ -0,0 +1,171 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleTupleMarshalledBinding

+
+ + + + + + + + + +
+Packages that use TupleTupleMarshalledBinding
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of TupleTupleMarshalledBinding in com.sleepycat.bind.tuple
+  +

+ + + + + + + + +
Constructors in com.sleepycat.bind.tuple with parameters of type TupleTupleMarshalledBinding
TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding binding, + String keyName) + +
+          Creates a tuple-tuple marshalled key creator.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledKeyCreator.html b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledKeyCreator.html new file mode 100644 index 000000000..e8d36859e --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledKeyCreator.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator

+
+No usage of com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/package-frame.html b/db/docs/java/com/sleepycat/bind/tuple/package-frame.html new file mode 100644 index 000000000..00f1f3b60 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/package-frame.html @@ -0,0 +1,79 @@ + + + + + + +com.sleepycat.bind.tuple (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + +com.sleepycat.bind.tuple + + + + +
+Interfaces  + +
+MarshalledTupleEntry +
+MarshalledTupleKeyEntity
+ + + + + + +
+Classes  + +
+BooleanBinding +
+ByteBinding +
+CharacterBinding +
+DoubleBinding +
+FloatBinding +
+IntegerBinding +
+LongBinding +
+ShortBinding +
+StringBinding +
+TupleBinding +
+TupleInput +
+TupleInputBinding +
+TupleMarshalledBinding +
+TupleOutput +
+TupleTupleBinding +
+TupleTupleKeyCreator +
+TupleTupleMarshalledBinding +
+TupleTupleMarshalledKeyCreator
+ + + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/package-summary.html b/db/docs/java/com/sleepycat/bind/tuple/package-summary.html new file mode 100644 index 000000000..dcd79b90b --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/package-summary.html @@ -0,0 +1,267 @@ + + + + + + +com.sleepycat.bind.tuple (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+

+Package com.sleepycat.bind.tuple +

+Bindings that use sequences of primitive fields, or tuples. +

+See: +
+          Description +

+ + + + + + + + + + + + + +
+Interface Summary
MarshalledTupleEntryA marshalling interface implemented by key, data or entity classes that + are represented as tuples.
MarshalledTupleKeyEntityA marshalling interface implemented by entity classes that represent keys as + tuples.
+  + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class Summary
BooleanBindingA concrete TupleBinding for a Boolean primitive + wrapper or a boolean primitive.
ByteBindingA concrete TupleBinding for a Byte primitive + wrapper or a byte primitive.
CharacterBindingA concrete TupleBinding for a Character primitive + wrapper or a char primitive.
DoubleBindingA concrete TupleBinding for a Double primitive + wrapper or a double primitive.
FloatBindingA concrete TupleBinding for a Float primitive + wrapper or a float primitive.
IntegerBindingA concrete TupleBinding for a Integer primitive + wrapper or an int primitive.
LongBindingA concrete TupleBinding for a Long primitive + wrapper or a long primitive.
ShortBindingA concrete TupleBinding for a Short primitive + wrapper or a short primitive.
StringBindingA concrete TupleBinding for a simple String value.
TupleBindingAn abstract EntryBinding that treats a key or data entry as a + tuple; it includes predefined bindings for Java primitive types.
TupleInputAn InputStream with DataInput-like methods for + reading tuple fields.
TupleInputBindingA concrete EntryBinding that uses the TupleInput + object as the key or data object.
TupleMarshalledBindingA concrete TupleBinding that delegates to the + MarshalledTupleEntry interface of the data or key object.
TupleOutputAn OutputStream with DataOutput-like methods for + writing tuple fields.
TupleTupleBindingAn abstract EntityBinding that treats an entity's key entry and + data entry as tuples.
TupleTupleKeyCreatorAn abstract key creator that uses a tuple key and a tuple data entry.
TupleTupleMarshalledBindingA concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class.
TupleTupleMarshalledKeyCreatorA concrete key creator that works in conjunction with a TupleTupleMarshalledBinding.
+  + +

+

+Package com.sleepycat.bind.tuple Description +

+ +

+Bindings that use sequences of primitive fields, or tuples. +

+ +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/package-tree.html b/db/docs/java/com/sleepycat/bind/tuple/package-tree.html new file mode 100644 index 000000000..0a9582015 --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/package-tree.html @@ -0,0 +1,168 @@ + + + + + + +com.sleepycat.bind.tuple Class Hierarchy (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Hierarchy For Package com.sleepycat.bind.tuple +

+
+
+
Package Hierarchies:
All Packages
+
+

+Class Hierarchy +

+ +

+Interface Hierarchy +

+ +
+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/bind/tuple/package-use.html b/db/docs/java/com/sleepycat/bind/tuple/package-use.html new file mode 100644 index 000000000..38473be0d --- /dev/null +++ b/db/docs/java/com/sleepycat/bind/tuple/package-use.html @@ -0,0 +1,225 @@ + + + + + + +Uses of Package com.sleepycat.bind.tuple (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Package
com.sleepycat.bind.tuple

+
+ + + + + + + + + + + + + +
+Packages that use com.sleepycat.bind.tuple
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + + + + + + + +
+Classes in com.sleepycat.bind.tuple used by com.sleepycat.bind.serial
TupleInput + +
+          An InputStream with DataInput-like methods for + reading tuple fields.
TupleOutput + +
+          An OutputStream with DataOutput-like methods for + writing tuple fields.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + +
+Classes in com.sleepycat.bind.tuple used by com.sleepycat.bind.tuple
TupleBinding + +
+          An abstract EntryBinding that treats a key or data entry as a + tuple; it includes predefined bindings for Java primitive types.
TupleInput + +
+          An InputStream with DataInput-like methods for + reading tuple fields.
TupleOutput + +
+          An OutputStream with DataOutput-like methods for + writing tuple fields.
TupleTupleBinding + +
+          An abstract EntityBinding that treats an entity's key entry and + data entry as tuples.
TupleTupleKeyCreator + +
+          An abstract key creator that uses a tuple key and a tuple data entry.
TupleTupleMarshalledBinding + +
+          A concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/CurrentTransaction.html b/db/docs/java/com/sleepycat/collections/CurrentTransaction.html new file mode 100644 index 000000000..42eb357b0 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/CurrentTransaction.html @@ -0,0 +1,387 @@ + + + + + + +CurrentTransaction (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class CurrentTransaction

+
+java.lang.Object
+  extended bycom.sleepycat.collections.CurrentTransaction
+
+
+
+
public class CurrentTransaction
extends Object
+ +

+Provides access to the current transaction for the current thread within the + context of a Berkeley DB environment. This class provides explicit + transaction control beyond that provided by the TransactionRunner + class. However, both methods of transaction control manage per-thread + transactions. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ TransactionabortTransaction() + +
+          Aborts the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
+ TransactionbeginTransaction(TransactionConfig config) + +
+          Begins a new transaction for this environment and associates it with + the current thread.
+ TransactioncommitTransaction() + +
+          Commits the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
+ EnvironmentgetEnvironment() + +
+          Returns the underlying Berkeley DB environment.
+static CurrentTransactiongetInstance(Environment env) + +
+          Gets the CurrentTransaction accessor for a specified Berkeley DB + environment.
+ TransactiongetTransaction() + +
+          Returns the transaction associated with the current thread for this + environment, or null if no transaction is active.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getInstance

+
+public static CurrentTransaction getInstance(Environment env)
+
+
Gets the CurrentTransaction accessor for a specified Berkeley DB + environment. This method always returns the same reference when called + more than once with the same environment parameter. +

+

+
Parameters:
env - is an open Berkeley DB environment. +
Returns:
the CurrentTransaction accessor for the given environment, or + null if the environment is not transactional.
+
+
+
+ +

+getEnvironment

+
+public final Environment getEnvironment()
+
+
Returns the underlying Berkeley DB environment. +

+

+
+
+
+
+ +

+getTransaction

+
+public final Transaction getTransaction()
+
+
Returns the transaction associated with the current thread for this + environment, or null if no transaction is active. +

+

+
+
+
+
+ +

+beginTransaction

+
+public final Transaction beginTransaction(TransactionConfig config)
+                                   throws DatabaseException
+
+
Begins a new transaction for this environment and associates it with + the current thread. If a transaction is already active for this + environment and thread, a nested transaction will be created. +

+

+
Parameters:
config - the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig), or null to use the default + configuration. +
Returns:
the new transaction. +
Throws: +
DatabaseException - if the transaction cannot be started, in which + case any existing transaction is not affected. +
IllegalStateException - if a transaction is already active and + nested transactions are not supported by the environment.
+
+
+
+ +

+commitTransaction

+
+public final Transaction commitTransaction()
+                                    throws DatabaseException,
+                                           IllegalStateException
+
+
Commits the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction. +

+

+ +
Returns:
the parent transaction or null if the committed transaction was + not nested. +
Throws: +
DatabaseException - if an error occurs committing the transaction. + The transaction will still be closed and the parent transaction will + become the current transaction. +
IllegalStateException - if no transaction is active for the + current thread for this environment.
+
+
+
+ +

+abortTransaction

+
+public final Transaction abortTransaction()
+                                   throws DatabaseException,
+                                          IllegalStateException
+
+
Aborts the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction. +

+

+ +
Returns:
the parent transaction or null if the aborted transaction was + not nested. +
Throws: +
DatabaseException - if an error occurs aborting the transaction. + The transaction will still be closed and the parent transaction will + become the current transaction. +
IllegalStateException - if no transaction is active for the + current thread for this environment.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/MapEntryParameter.html b/db/docs/java/com/sleepycat/collections/MapEntryParameter.html new file mode 100644 index 000000000..b2fa064af --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/MapEntryParameter.html @@ -0,0 +1,408 @@ + + + + + + +MapEntryParameter (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class MapEntryParameter

+
+java.lang.Object
+  extended bycom.sleepycat.collections.MapEntryParameter
+
+
+
All Implemented Interfaces:
Map.Entry
+
+
+
+
public class MapEntryParameter
extends Object
implements Map.Entry
+ +

+A simple Map.Entry implementation that can be used as in + input parameter. Since a MapEntryParameter is not obtained + from a map, it is not attached to any map in particular. To emphasize that + changing this object does not change the map, the setValue(java.lang.Object) method + always throws UnsupportedOperationException. + +

Warning: Use of this interface violates the Java Collections + interface contract since these state that Map.Entry objects + should only be obtained from Map.entrySet() sets, while this + class allows constructing them directly. However, it is useful for + performing operations on an entry set such as add(), contains(), etc. For + restrictions see getValue() and setValue(java.lang.Object).

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
MapEntryParameter(Object key, + Object value) + +
+          Creates a map entry with a given key and value.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleanequals(Object other) + +
+          Compares this entry to a given entry as specified by Map.Entry.equals(java.lang.Object).
+ ObjectgetKey() + +
+          Returns the key of this entry.
+ ObjectgetValue() + +
+          Returns the value of this entry.
+ inthashCode() + +
+          Computes a hash code as specified by Map.Entry.hashCode().
+ ObjectsetValue(Object newValue) + +
+          Always throws UnsupportedOperationException since this + object is not attached to a map.
+ StringtoString() + +
+          Converts the entry to a string representation for debugging.
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+MapEntryParameter

+
+public MapEntryParameter(Object key,
+                         Object value)
+
+
Creates a map entry with a given key and value. +

+

Parameters:
key - is the key to use.
value - is the value to use.
+ + + + + + + + +
+Method Detail
+ +

+hashCode

+
+public int hashCode()
+
+
Computes a hash code as specified by Map.Entry.hashCode(). +

+

+
Specified by:
hashCode in interface Map.Entry
+
+
+ +
Returns:
the computed hash code.
+
+
+
+ +

+equals

+
+public boolean equals(Object other)
+
+
Compares this entry to a given entry as specified by Map.Entry.equals(java.lang.Object). +

+

+
Specified by:
equals in interface Map.Entry
+
+
+ +
Returns:
the computed hash code.
+
+
+
+ +

+getKey

+
+public final Object getKey()
+
+
Returns the key of this entry. +

+

+
Specified by:
getKey in interface Map.Entry
+
+
+ +
Returns:
the key of this entry.
+
+
+
+ +

+getValue

+
+public final Object getValue()
+
+
Returns the value of this entry. Note that this will be the value + passed to the constructor or the last value passed to setValue(java.lang.Object). + It will not reflect changes made to a Map. +

+

+
Specified by:
getValue in interface Map.Entry
+
+
+ +
Returns:
the value of this entry.
+
+
+
+ +

+setValue

+
+public Object setValue(Object newValue)
+
+
Always throws UnsupportedOperationException since this + object is not attached to a map. +

+

+
Specified by:
setValue in interface Map.Entry
+
+
+
+
+
+
+ +

+toString

+
+public String toString()
+
+
Converts the entry to a string representation for debugging. +

+

+
+
+
+ +
Returns:
the string representation.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/PrimaryKeyAssigner.html b/db/docs/java/com/sleepycat/collections/PrimaryKeyAssigner.html new file mode 100644 index 000000000..cb25f980b --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/PrimaryKeyAssigner.html @@ -0,0 +1,224 @@ + + + + + + +PrimaryKeyAssigner (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Interface PrimaryKeyAssigner

+
+
+
public interface PrimaryKeyAssigner
+ +

+An interface implemented to assign new primary key values. + An implementation of this interface is passed to the StoredMap + or StoredSortedMap constructor to assign primary keys for that + store. Key assignment occurs when StoredMap.append() is called. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidassignKey(DatabaseEntry keyData) + +
+          Assigns a new primary key value into the given data buffer.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+assignKey

+
+public void assignKey(DatabaseEntry keyData)
+               throws DatabaseException
+
+
Assigns a new primary key value into the given data buffer. +

+

+ +
Throws: +
DatabaseException
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredCollection.html b/db/docs/java/com/sleepycat/collections/StoredCollection.html new file mode 100644 index 000000000..5f3baa744 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredCollection.html @@ -0,0 +1,665 @@ + + + + + + +StoredCollection (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredCollection

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+
+
+
All Implemented Interfaces:
Cloneable, Collection
+
+
+
Direct Known Subclasses:
StoredEntrySet, StoredKeySet, StoredList, StoredValueSet
+
+
+
+
public abstract class StoredCollection
extends StoredContainer
implements Collection
+ +

+A abstract base class for all stored collections. This class, and its + base class StoredContainer, provide implementations of most methods + in the Collection interface. Other methods, such as Collection.add(java.lang.Object) + and Collection.remove(java.lang.Object), are provided by concrete classes that extend this + class. + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition, this class provides the following methods for stored + collections only. Note that the use of these methods is not compatible with + the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleanaddAll(Collection coll) + +
+          Adds all of the elements in the specified collection to this collection + (optional operation).
+ booleancontainsAll(Collection coll) + +
+          Returns true if this collection contains all of the elements in the + specified collection.
+ booleanequals(Object other) + +
+          Compares the specified object with this collection for equality.
+ inthashCode() + +
+           
+ Iteratoriterator() + +
+          Returns an iterator over the elements in this collection.
+ StoredIteratoriterator(boolean writeAllowed) + +
+          Returns a read or read-write iterator over the elements in this + collection.
+ StoredIteratorjoin(StoredContainer[] indices, + Object[] indexKeys, + JoinConfig joinConfig) + +
+          Returns an iterator representing an equality join of the indices and + index key values specified.
+ booleanremoveAll(Collection coll) + +
+          Removes all this collection's elements that are also contained in the + specified collection (optional operation).
+ booleanretainAll(Collection coll) + +
+          Retains only the elements in this collection that are contained in the + specified collection (optional operation).
+ Object[]toArray() + +
+          Returns an array of all the elements in this collection.
+ Object[]toArray(Object[] a) + +
+          Returns an array of all the elements in this collection whose runtime + type is that of the specified array.
+ ListtoList() + +
+          Returns a copy of this collection as an ArrayList.
+ StringtoString() + +
+          Converts the collection to a string representation for debugging.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Collection
add, clear, contains, isEmpty, remove, size
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+iterator

+
+public Iterator iterator()
+
+
Returns an iterator over the elements in this collection. + The iterator will be read-only if the collection is read-only. + This method conforms to the Collection.iterator() interface. +

+

+
Specified by:
iterator in interface Collection
+
+
+ +
Returns:
a StoredIterator for this collection. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
See Also:
StoredContainer.isWriteAllowed()
+
+
+
+ +

+iterator

+
+public StoredIterator iterator(boolean writeAllowed)
+
+
Returns a read or read-write iterator over the elements in this + collection. + This method does not exist in the standard Collection interface. +

+

+
+
+
+
Parameters:
writeAllowed - is true to open a read-write iterator or false to + open a read-only iterator. If the collection is read-only the iterator + will always be read-only. +
Returns:
a StoredIterator for this collection. +
Throws: +
IllegalStateException - if writeAllowed is true but the collection + is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
See Also:
StoredContainer.isWriteAllowed()
+
+
+
+ +

+toArray

+
+public Object[] toArray()
+
+
Returns an array of all the elements in this collection. + This method conforms to the Collection.toArray() interface. +

+

+
Specified by:
toArray in interface Collection
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+toArray

+
+public Object[] toArray(Object[] a)
+
+
Returns an array of all the elements in this collection whose runtime + type is that of the specified array. + This method conforms to the Collection.toArray(Object[]) + interface. +

+

+
Specified by:
toArray in interface Collection
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+containsAll

+
+public boolean containsAll(Collection coll)
+
+
Returns true if this collection contains all of the elements in the + specified collection. + This method conforms to the Collection.containsAll(java.util.Collection) interface. +

+

+
Specified by:
containsAll in interface Collection
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+addAll

+
+public boolean addAll(Collection coll)
+
+
Adds all of the elements in the specified collection to this collection + (optional operation). + This method calls the Collection.add(Object) method of the concrete + collection class, which may or may not be supported. + This method conforms to the Collection.addAll(java.util.Collection) interface. +

+

+
Specified by:
addAll in interface Collection
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is read-only, or + if the collection is indexed, or if the add method is not supported by + the concrete collection. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+removeAll

+
+public boolean removeAll(Collection coll)
+
+
Removes all this collection's elements that are also contained in the + specified collection (optional operation). + This method conforms to the Collection.removeAll(java.util.Collection) interface. +

+

+
Specified by:
removeAll in interface Collection
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+retainAll

+
+public boolean retainAll(Collection coll)
+
+
Retains only the elements in this collection that are contained in the + specified collection (optional operation). + This method conforms to the Collection.removeAll(java.util.Collection) interface. +

+

+
Specified by:
retainAll in interface Collection
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+equals

+
+public boolean equals(Object other)
+
+
Compares the specified object with this collection for equality. + A value comparison is performed by this method and the stored values + are compared rather than calling the equals() method of each element. + This method conforms to the Collection.equals(java.lang.Object) interface. +

+

+
Specified by:
equals in interface Collection
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+hashCode

+
+public int hashCode()
+
+
+
Specified by:
hashCode in interface Collection
+
+
+
+
+
+
+ +

+toList

+
+public List toList()
+
+
Returns a copy of this collection as an ArrayList. This is the same as + toArray() but returns a collection instead of an array. +

+

+
+
+
+ +
Returns:
an ArrayList containing a copy of all elements in this + collection. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+toString

+
+public String toString()
+
+
Converts the collection to a string representation for debugging. + WARNING: All elements will be converted to strings and returned and + therefore the returned string may be very large. +

+

+
+
+
+ +
Returns:
the string representation. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+join

+
+public StoredIterator join(StoredContainer[] indices,
+                           Object[] indexKeys,
+                           JoinConfig joinConfig)
+
+
Returns an iterator representing an equality join of the indices and + index key values specified. + This method does not exist in the standard Collection interface. + +

The returned iterator supports only the two methods: hasNext() and + next(). All other methods will throw UnsupportedOperationException.

+

+

+
+
+
+
Parameters:
indices - is an array of indices with elements corresponding to + those in the indexKeys array.
indexKeys - is an array of index key values identifying the + elements to be selected.
joinConfig - is the join configuration, or null to use the + default configuration. +
Returns:
an iterator over the elements in this collection that match + all specified index key values. +
Throws: +
IllegalArgumentException - if this collection is indexed or if a + given index does not have the same store as this collection. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredCollections.html b/db/docs/java/com/sleepycat/collections/StoredCollections.html new file mode 100644 index 000000000..6a18b4c0d --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredCollections.html @@ -0,0 +1,407 @@ + + + + + + +StoredCollections (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredCollections

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredCollections
+
+
+
+
public class StoredCollections
extends Object
+ +

+This class consists exclusively of static methods that operate on or return + stored collections. It contains methods for changing certain properties of a + collection. Because collection properties are immutable, these methods + always return a new collection reference. This allows stored collections to + be used safely by multiple threads. Note that creating the new collection + reference is not expensive and creates only two new objects. + +

When a collection is created with a particular property, all collections + and iterators derived from that collection will inherit the property. For + example, if a dirty-read Map is created then calls to subMap(), values(), + entrySet(), and keySet() will create dirty-read collections also.

+ +

Dirty-Read Methods names beginning with dirtyRead create a new + dirty-read container from a given stored container. When dirty-read is + enabled, data will be read that has been modified by another transaction but + not committed. Using dirty-read can improve concurrency since reading will + not wait for other transactions to complete. For a non-transactional + container (when StoredContainer.isTransactional() returns false), + dirty-read has no effect. If StoredContainer.isDirtyReadAllowed() + returns false, dirty-read also has no effect. If dirty-ready is enabled + (and allowed) for a container, StoredContainer.isDirtyRead() will + return true. Dirty-read is disabled by default for a container.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static CollectiondirtyReadCollection(Collection storedCollection) + +
+          Creates a dirty-read collection from a given stored collection.
+static ListdirtyReadList(List storedList) + +
+          Creates a dirty-read list from a given stored list.
+static MapdirtyReadMap(Map storedMap) + +
+          Creates a dirty-read map from a given stored map.
+static SetdirtyReadSet(Set storedSet) + +
+          Creates a dirty-read set from a given stored set.
+static SortedMapdirtyReadSortedMap(SortedMap storedSortedMap) + +
+          Creates a dirty-read sorted map from a given stored sorted map.
+static SortedSetdirtyReadSortedSet(SortedSet storedSortedSet) + +
+          Creates a dirty-read sorted set from a given stored sorted set.
+static Iteratoriterator(Iterator storedIterator) + +
+          Clones a stored iterator preserving its current position.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+dirtyReadCollection

+
+public static Collection dirtyReadCollection(Collection storedCollection)
+
+
Creates a dirty-read collection from a given stored collection. +

+

+
Parameters:
storedCollection - the base collection. +
Returns:
the dirty-read collection. +
Throws: +
ClassCastException - if the given container is not a + StoredContainer.
+
+
+
+ +

+dirtyReadList

+
+public static List dirtyReadList(List storedList)
+
+
Creates a dirty-read list from a given stored list. +

+

+
Parameters:
storedList - the base list. +
Returns:
the dirty-read list. +
Throws: +
ClassCastException - if the given container is not a + StoredContainer.
+
+
+
+ +

+dirtyReadMap

+
+public static Map dirtyReadMap(Map storedMap)
+
+
Creates a dirty-read map from a given stored map. +

+

+
Parameters:
storedMap - the base map. +
Returns:
the dirty-read map. +
Throws: +
ClassCastException - if the given container is not a + StoredContainer.
+
+
+
+ +

+dirtyReadSet

+
+public static Set dirtyReadSet(Set storedSet)
+
+
Creates a dirty-read set from a given stored set. +

+

+
Parameters:
storedSet - the base set. +
Returns:
the dirty-read set. +
Throws: +
ClassCastException - if the given container is not a + StoredContainer.
+
+
+
+ +

+dirtyReadSortedMap

+
+public static SortedMap dirtyReadSortedMap(SortedMap storedSortedMap)
+
+
Creates a dirty-read sorted map from a given stored sorted map. +

+

+
Parameters:
storedSortedMap - the base map. +
Returns:
the dirty-read map. +
Throws: +
ClassCastException - if the given container is not a + StoredContainer.
+
+
+
+ +

+dirtyReadSortedSet

+
+public static SortedSet dirtyReadSortedSet(SortedSet storedSortedSet)
+
+
Creates a dirty-read sorted set from a given stored sorted set. +

+

+
Parameters:
storedSortedSet - the base set. +
Returns:
the dirty-read set. +
Throws: +
ClassCastException - if the given container is not a + StoredContainer.
+
+
+
+ +

+iterator

+
+public static Iterator iterator(Iterator storedIterator)
+
+
Clones a stored iterator preserving its current position. +

+

+
Parameters:
storedIterator - an iterator to clone. +
Returns:
a new StoredIterator having the same position as the + given iterator. +
Throws: +
ClassCastException - if the given iterator is not a + StoredIterator.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredContainer.html b/db/docs/java/com/sleepycat/collections/StoredContainer.html new file mode 100644 index 000000000..5e4c8f1c5 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredContainer.html @@ -0,0 +1,599 @@ + + + + + + +StoredContainer (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredContainer

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
Direct Known Subclasses:
StoredCollection, StoredMap
+
+
+
+
public abstract class StoredContainer
extends Object
implements Cloneable
+ +

+A abstract base class for all stored collections and maps. This class + provides implementations of methods that are common to the Collection and the Map interfaces, namely + clear(), isEmpty() and size(). + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition, this class provides the following methods for stored + collections only. Note that the use of these methods is not compatible with + the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleanareDuplicatesAllowed() + +
+          Returns whether duplicate keys are allowed in this container.
+ booleanareDuplicatesOrdered() + +
+          Returns whether duplicate keys are allowed and sorted by element value.
+ booleanareKeysRenumbered() + +
+          Returns whether keys are renumbered when insertions and deletions occur.
+ voidclear() + +
+          Removes all mappings or elements from this map or collection (optional + operation).
+ booleanisDirtyRead() + +
+          Returns whether dirty-read is enabled for this container.
+ booleanisDirtyReadAllowed() + +
+          Returns whether dirty-read is allowed for this container.
+ booleanisEmpty() + +
+          Returns true if this map or collection contains no mappings or elements.
+ booleanisOrdered() + +
+          Returns whether keys are ordered in this container.
+ booleanisSecondary() + +
+          Returns whether this container is a view on a secondary database rather + than directly on a primary database.
+ booleanisTransactional() + +
+          Returns whether the databases underlying this container are + transactional.
+ booleanisWriteAllowed() + +
+          Returns true if this is a read-write container or false if this is a + read-only container.
+ intsize() + +
+          Always throws UnsupportedOperationException.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+isWriteAllowed

+
+public final boolean isWriteAllowed()
+
+
Returns true if this is a read-write container or false if this is a + read-only container. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether write is allowed.
+
+
+
+ +

+isDirtyReadAllowed

+
+public final boolean isDirtyReadAllowed()
+
+
Returns whether dirty-read is allowed for this container. + For the JE product, dirty-read is always allowed; for the DB product, + dirty-read is allowed if it was configured for the underlying database + for this container. + Even when dirty-read is allowed it must specifically be enabled by + calling one of the StoredCollections methods. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether dirty-read is allowed.
+
+
+
+ +

+isDirtyRead

+
+public final boolean isDirtyRead()
+
+
Returns whether dirty-read is enabled for this container. + If dirty-read is enabled, data will be read that is modified but not + committed. + Dirty-read is disabled by default. + This method always returns false if isDirtyReadAllowed() returns + false. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether dirty-read is enabled.
+
+
+
+ +

+isTransactional

+
+public final boolean isTransactional()
+
+
Returns whether the databases underlying this container are + transactional. + Even in a transactional environment, a database will be transactional + only if it was opened within a transaction or if the auto-commit option + was specified when it was opened. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether the database is transactional.
+
+
+
+ +

+areDuplicatesAllowed

+
+public final boolean areDuplicatesAllowed()
+
+
Returns whether duplicate keys are allowed in this container. + Duplicates are optionally allowed for HASH and BTREE databases. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether duplicates are allowed.
+
+
+
+ +

+areDuplicatesOrdered

+
+public final boolean areDuplicatesOrdered()
+
+
Returns whether duplicate keys are allowed and sorted by element value. + Duplicates are optionally sorted for HASH and BTREE databases. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether duplicates are ordered.
+
+
+
+ +

+areKeysRenumbered

+
+public final boolean areKeysRenumbered()
+
+
Returns whether keys are renumbered when insertions and deletions occur. + Keys are optionally renumbered for RECNO databases. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether keys are renumbered.
+
+
+
+ +

+isOrdered

+
+public final boolean isOrdered()
+
+
Returns whether keys are ordered in this container. + Keys are ordered for BTREE, RECNO and QUEUE database. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether keys are ordered.
+
+
+
+ +

+isSecondary

+
+public final boolean isSecondary()
+
+
Returns whether this container is a view on a secondary database rather + than directly on a primary database. + This method does not exist in the standard Map or + Collection interfaces. +

+

+
+
+
+ +
Returns:
whether the view is for a secondary database.
+
+
+
+ +

+size

+
+public int size()
+
+
Always throws UnsupportedOperationException. The size of a database + cannot be obtained reliably or inexpensively. + This method therefore violates the Collection.size() and + Map.size() interfaces. +

+

+
+
+
+ +
Returns:
always throws an exception. +
Throws: +
UnsupportedOperationException - unconditionally.
+
+
+
+ +

+isEmpty

+
+public boolean isEmpty()
+
+
Returns true if this map or collection contains no mappings or elements. + This method conforms to the Collection.isEmpty() and + Map.isEmpty() interfaces. +

+

+
+
+
+ +
Returns:
whether the container is empty. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+clear

+
+public void clear()
+
+
Removes all mappings or elements from this map or collection (optional + operation). + This method conforms to the Collection.clear() and + Map.clear() interfaces. +

+

+
+
+
+ +
Throws: +
UnsupportedOperationException - if the container is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredEntrySet.html b/db/docs/java/com/sleepycat/collections/StoredEntrySet.html new file mode 100644 index 000000000..e6c88f205 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredEntrySet.html @@ -0,0 +1,384 @@ + + + + + + +StoredEntrySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredEntrySet

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+          extended bycom.sleepycat.collections.StoredEntrySet
+
+
+
All Implemented Interfaces:
Cloneable, Collection, Set
+
+
+
Direct Known Subclasses:
StoredSortedEntrySet
+
+
+
+
public class StoredEntrySet
extends StoredCollection
implements Set
+ +

+The Set returned by Map.entrySet(). This class may not be instantiated + directly. Contrary to what is stated by Map.entrySet() this class + does support the add(java.lang.Object) and StoredCollection.addAll(java.util.Collection) methods. + +

The Map.Entry.setValue(java.lang.Object) method of the Map.Entry objects + that are returned by this class and its iterators behaves just as the StoredIterator.set(java.lang.Object) method does.

+ +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleanadd(Object mapEntry) + +
+          Adds the specified element to this set if it is not already present + (optional operation).
+ booleancontains(Object mapEntry) + +
+          Returns true if this set contains the specified element.
+ booleanremove(Object mapEntry) + +
+          Removes the specified element from this set if it is present (optional + operation).
+ StringtoString() + +
+          Converts the collection to a string representation for debugging.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredCollection
addAll, containsAll, equals, hashCode, iterator, iterator, join, removeAll, retainAll, toArray, toArray, toList
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Set
addAll, clear, containsAll, equals, hashCode, isEmpty, iterator, removeAll, retainAll, size, toArray, toArray
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+add

+
+public boolean add(Object mapEntry)
+
+
Adds the specified element to this set if it is not already present + (optional operation). + This method conforms to the Set.add(java.lang.Object) interface. +

+

+
Specified by:
add in interface Set
+
+
+
Parameters:
mapEntry - must be a Map.Entry instance. +
Returns:
true if the key-value pair was added to the set (and was not + previously present). +
Throws: +
UnsupportedOperationException - if the collection is read-only. +
ClassCastException - if the mapEntry is not a Map.Entry instance. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+remove

+
+public boolean remove(Object mapEntry)
+
+
Removes the specified element from this set if it is present (optional + operation). + This method conforms to the Set.remove(java.lang.Object) interface. +

+

+
Specified by:
remove in interface Set
+
+
+
Parameters:
mapEntry - is a Map.Entry instance to be removed. +
Returns:
true if the key-value pair was removed from the set, or false if + the mapEntry is not a Map.Entry instance or is not + present in the set. +
Throws: +
UnsupportedOperationException - if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+contains

+
+public boolean contains(Object mapEntry)
+
+
Returns true if this set contains the specified element. + This method conforms to the Set.contains(java.lang.Object) interface. +

+

+
Specified by:
contains in interface Set
+
+
+
Parameters:
mapEntry - is a Map.Entry instance to be checked. +
Returns:
true if the key-value pair is present in the set, or false if + the mapEntry is not a Map.Entry instance or is not + present in the set. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+toString

+
+public String toString()
+
+
Description copied from class: StoredCollection
+
Converts the collection to a string representation for debugging. + WARNING: All elements will be converted to strings and returned and + therefore the returned string may be very large. +

+

+
Overrides:
toString in class StoredCollection
+
+
+ +
Returns:
the string representation.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredIterator.html b/db/docs/java/com/sleepycat/collections/StoredIterator.html new file mode 100644 index 000000000..eb3c0743b --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredIterator.html @@ -0,0 +1,714 @@ + + + + + + +StoredIterator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredIterator

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredIterator
+
+
+
All Implemented Interfaces:
Cloneable, Iterator, ListIterator
+
+
+
+
public class StoredIterator
extends Object
implements ListIterator, Cloneable
+ +

+The Iterator returned by all stored collections. + +

While in general this class conforms to the Iterator interface, + it is important to note that all iterators for stored collections must be + explicitly closed with close(). The static method close(java.util.Iterator) allows calling close for all iterators without + harm to iterators that are not from stored collections, and also avoids + casting. If a stored iterator is not closed, unpredictable behavior + including process death may result.

+ +

This class implements the Iterator interface for all stored + iterators. It also implements ListIterator because some list + iterator methods apply to all stored iterators, for example, previous() and hasPrevious(). Other list iterator methods are always + supported for lists, but for other types of collections are only supported + under certain conditions. See nextIndex(), previousIndex(), + add(java.lang.Object) and set(java.lang.Object) for details.

+ +

In addition, this class provides the following methods for stored + collection iterators only. Note that the use of these methods is not + compatible with the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidadd(Object value) + +
+          Inserts the specified element into the list or inserts a duplicate into + other types of collections (optional operation).
+ voidclose() + +
+          Closes this iterator.
+static voidclose(Iterator i) + +
+          Closes the given iterator using close() if it is a StoredIterator.
+ intcount() + +
+          Returns the number of elements having the same key value as the key + value of the element last returned by next() or previous().
+ StoredCollectiongetCollection() + +
+          Returns the collection associated with this iterator.
+ booleanhasNext() + +
+          Returns true if this iterator has more elements when traversing in the + forward direction.
+ booleanhasPrevious() + +
+          Returns true if this iterator has more elements when traversing in the + reverse direction.
+ booleanisReadModifyWrite() + +
+          Returns whether write-locks will be obtained when reading with this + cursor.
+ Objectnext() + +
+          Returns the next element in the iteration.
+ intnextIndex() + +
+          Returns the index of the element that would be returned by a subsequent + call to next.
+ Objectprevious() + +
+          Returns the next element in the iteration.
+ intpreviousIndex() + +
+          Returns the index of the element that would be returned by a subsequent + call to previous.
+ voidremove() + +
+          Removes the last element that was returned by next or previous (optional + operation).
+ voidset(Object value) + +
+          Replaces the last element returned by next or previous with the + specified element (optional operation).
+ voidsetReadModifyWrite(boolean lockForWrite) + +
+          Changes whether write-locks will be obtained when reading with this + cursor.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+close

+
+public static void close(Iterator i)
+
+
Closes the given iterator using close() if it is a StoredIterator. If the given iterator is not a StoredIterator, + this method does nothing. +

+

+
+
+
+
Parameters:
i - is the iterator to close. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+isReadModifyWrite

+
+public final boolean isReadModifyWrite()
+
+
Returns whether write-locks will be obtained when reading with this + cursor. + Obtaining write-locks can prevent deadlocks when reading and then + modifying data. +

+

+
+
+
+ +
Returns:
the write-lock setting.
+
+
+
+ +

+setReadModifyWrite

+
+public void setReadModifyWrite(boolean lockForWrite)
+
+
Changes whether write-locks will be obtained when reading with this + cursor. + Obtaining write-locks can prevent deadlocks when reading and then + modifying data. +

+

+
+
+
+
Parameters:
lockForWrite - the write-lock setting.
+
+
+
+ +

+hasNext

+
+public boolean hasNext()
+
+
Returns true if this iterator has more elements when traversing in the + forward direction. False is returned if the iterator has been closed. + This method conforms to the Iterator.hasNext() interface. +

+

+
Specified by:
hasNext in interface ListIterator
+
+
+ +
Returns:
whether next() will succeed. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+hasPrevious

+
+public boolean hasPrevious()
+
+
Returns true if this iterator has more elements when traversing in the + reverse direction. It returns false if the iterator has been closed. + This method conforms to the ListIterator.hasPrevious() interface. +

+

+
Specified by:
hasPrevious in interface ListIterator
+
+
+ +
Returns:
whether previous() will succeed. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+next

+
+public Object next()
+
+
Returns the next element in the iteration. + This method conforms to the Iterator.next() interface. +

+

+
Specified by:
next in interface ListIterator
+
+
+ +
Returns:
the next element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+previous

+
+public Object previous()
+
+
Returns the next element in the iteration. + This method conforms to the ListIterator.previous() interface. +

+

+
Specified by:
previous in interface ListIterator
+
+
+ +
Returns:
the previous element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+nextIndex

+
+public int nextIndex()
+
+
Returns the index of the element that would be returned by a subsequent + call to next. + This method conforms to the ListIterator.nextIndex() interface + except that it returns Integer.MAX_VALUE for stored lists when + positioned at the end of the list, rather than returning the list size + as specified by the ListIterator interface. This is because the database + size is not available. +

+

+
Specified by:
nextIndex in interface ListIterator
+
+
+ +
Returns:
the next index. +
Throws: +
UnsupportedOperationException - if this iterator's collection does + not use record number keys. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+previousIndex

+
+public int previousIndex()
+
+
Returns the index of the element that would be returned by a subsequent + call to previous. + This method conforms to the ListIterator.previousIndex() + interface. +

+

+
Specified by:
previousIndex in interface ListIterator
+
+
+ +
Returns:
the previous index. +
Throws: +
UnsupportedOperationException - if this iterator's collection does + not use record number keys. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+set

+
+public void set(Object value)
+
+
Replaces the last element returned by next or previous with the + specified element (optional operation). + This method conforms to the ListIterator.set(java.lang.Object) interface. +

+

+
Specified by:
set in interface ListIterator
+
+
+
Parameters:
value - the new value. +
Throws: +
UnsupportedOperationException - if the collection is a StoredKeySet (the set returned by Map.keySet()), or if + duplicates are sorted since this would change the iterator position, or + if the collection is indexed, or if the collection is read-only. +
IllegalArgumentException - if an entity value binding is used and + the primary key of the value given is different than the existing stored + primary key. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+remove

+
+public void remove()
+
+
Removes the last element that was returned by next or previous (optional + operation). + This method conforms to the ListIterator.remove() interface except + that when the collection is a list and the RECNO-RENUMBER access method + is not used, list indices will not be renumbered. +

+

+
Specified by:
remove in interface ListIterator
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is a sublist, or + if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+add

+
+public void add(Object value)
+
+
Inserts the specified element into the list or inserts a duplicate into + other types of collections (optional operation). + This method conforms to the ListIterator.add(java.lang.Object) interface when + the collection is a list and the RECNO-RENUMBER access method is used. + Otherwise, this method may only be called when duplicates are allowed. + If duplicates are unsorted, the new value will be inserted in the same + manner as list elements. + If duplicates are sorted, the new value will be inserted in sort order. +

+

+
Specified by:
add in interface ListIterator
+
+
+
Parameters:
value - the new value. +
Throws: +
UnsupportedOperationException - if the collection is a sublist, or + if the collection is indexed, or if the collection is read-only, or if + the collection is a list and the RECNO-RENUMBER access method was not + used, or if the collection is not a list and duplicates are not allowed. +
IllegalStateException - if the collection is empty and is not a + list with RECNO-RENUMBER access. +
IllegalArgumentException - if a duplicate value is being added + that already exists and duplicates are sorted. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+count

+
+public int count()
+
+
Returns the number of elements having the same key value as the key + value of the element last returned by next() or previous(). If no + duplicates are allowed, 1 is always returned. +

+

+
+
+
+ +
Returns:
the number of duplicates. +
Throws: +
IllegalStateException - if next() or previous() has not been + called for this iterator, or if remove() or add() were called after + the last call to next() or previous().
+
+
+
+ +

+close

+
+public void close()
+
+
Closes this iterator. + This method does not exist in the standard Iterator or ListIterator interfaces. + +

After being closed, only the hasNext() and hasPrevious() methods may be called and these will return false. close() may also be called again and will do nothing. If other + methods are called a NullPointerException will generally be + thrown.

+

+

+
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+getCollection

+
+public final StoredCollection getCollection()
+
+
Returns the collection associated with this iterator. + This method does not exist in the standard Iterator or ListIterator interfaces. +

+

+
+
+
+ +
Returns:
the collection associated with this iterator.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredKeySet.html b/db/docs/java/com/sleepycat/collections/StoredKeySet.html new file mode 100644 index 000000000..967c02559 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredKeySet.html @@ -0,0 +1,395 @@ + + + + + + +StoredKeySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredKeySet

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+          extended bycom.sleepycat.collections.StoredKeySet
+
+
+
All Implemented Interfaces:
Cloneable, Collection, Set
+
+
+
Direct Known Subclasses:
StoredSortedKeySet
+
+
+
+
public class StoredKeySet
extends StoredCollection
implements Set
+ +

+The Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed. + Since this collection is a set it only contains one element for each key, + even when duplicates are allowed. Key set iterators are therefore + particularly useful for enumerating the unique keys of a store or index that + allows duplicates. + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
StoredKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) + +
+          Creates a key set view of a Database.
+  + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleanadd(Object key) + +
+          Adds the specified key to this set if it is not already present + (optional operation).
+ booleancontains(Object key) + +
+          Returns true if this set contains the specified key.
+ booleanremove(Object key) + +
+          Removes the specified key from this set if it is present (optional + operation).
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredCollection
addAll, containsAll, equals, hashCode, iterator, iterator, join, removeAll, retainAll, toArray, toArray, toList, toString
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Set
addAll, clear, containsAll, equals, hashCode, isEmpty, iterator, removeAll, retainAll, size, toArray, toArray
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredKeySet

+
+public StoredKeySet(Database database,
+                    EntryBinding keyBinding,
+                    boolean writeAllowed)
+
+
Creates a key set view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+ + + + + + + + +
+Method Detail
+ +

+add

+
+public boolean add(Object key)
+
+
Adds the specified key to this set if it is not already present + (optional operation). + When a key is added the value in the underlying data store will be + empty. + This method conforms to the Set.add(java.lang.Object) interface. +

+

+
Specified by:
add in interface Set
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+remove

+
+public boolean remove(Object key)
+
+
Removes the specified key from this set if it is present (optional + operation). + If duplicates are allowed, this method removes all duplicates for the + given key. + This method conforms to the Set.remove(java.lang.Object) interface. +

+

+
Specified by:
remove in interface Set
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+contains

+
+public boolean contains(Object key)
+
+
Returns true if this set contains the specified key. + This method conforms to the Set.contains(java.lang.Object) interface. +

+

+
Specified by:
contains in interface Set
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredList.html b/db/docs/java/com/sleepycat/collections/StoredList.html new file mode 100644 index 000000000..2bebd23de --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredList.html @@ -0,0 +1,895 @@ + + + + + + +StoredList (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredList

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+          extended bycom.sleepycat.collections.StoredList
+
+
+
All Implemented Interfaces:
Cloneable, Collection, List
+
+
+
+
public class StoredList
extends StoredCollection
implements List
+ +

+A List view of a Database. + +

For all stored lists the keys of the underlying Database + must have record number format, and therefore the store or index must be a + RECNO, RECNO-RENUMBER, QUEUE, or BTREE-RECNUM database. Only RECNO-RENUMBER + allows true list behavior where record numbers are renumbered following the + position of an element that is added or removed. For the other access + methods (RECNO, QUEUE, and BTREE-RECNUM), stored Lists are most useful as + read-only collections where record numbers are not required to be + sequential.

+ +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition to the standard List methods, this class provides the + following methods for stored lists only. Note that the use of these methods + is not compatible with the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
StoredList(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a list entity view of a Database.
StoredList(Database database, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list entity view of a Database with a PrimaryKeyAssigner.
StoredList(Database database, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a list view of a Database.
StoredList(Database database, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list view of a Database with a PrimaryKeyAssigner.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidadd(int index, + Object value) + +
+          Inserts the specified element at the specified position in this list + (optional operation).
+ booleanadd(Object value) + +
+          Appends the specified element to the end of this list (optional + operation).
+ booleanaddAll(int index, + Collection coll) + +
+          Inserts all of the elements in the specified collection into this list + at the specified position (optional operation).
+ intappend(Object value) + +
+          Appends a given value returning the newly assigned index.
+ booleancontains(Object value) + +
+          Returns true if this list contains the specified element.
+ booleanequals(Object other) + +
+          Compares the specified object with this list for equality.
+ Objectget(int index) + +
+          Returns the element at the specified position in this list.
+ inthashCode() + +
+           
+ intindexOf(Object value) + +
+          Returns the index in this list of the first occurrence of the specified + element, or -1 if this list does not contain this element.
+ intlastIndexOf(Object value) + +
+          Returns the index in this list of the last occurrence of the specified + element, or -1 if this list does not contain this element.
+ ListIteratorlistIterator() + +
+          Returns a list iterator of the elements in this list (in proper + sequence).
+ ListIteratorlistIterator(int index) + +
+          Returns a list iterator of the elements in this list (in proper + sequence), starting at the specified position in this list.
+ Objectremove(int index) + +
+          Removes the element at the specified position in this list (optional + operation).
+ booleanremove(Object value) + +
+          Removes the first occurrence in this list of the specified element + (optional operation).
+ Objectset(int index, + Object value) + +
+          Replaces the element at the specified position in this list with the + specified element (optional operation).
+ ListsubList(int fromIndex, + int toIndex) + +
+          Returns a view of the portion of this list between the specified + fromIndex, inclusive, and toIndex, exclusive.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredCollection
addAll, containsAll, iterator, iterator, join, removeAll, retainAll, toArray, toArray, toList, toString
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.List
addAll, clear, containsAll, isEmpty, iterator, removeAll, retainAll, size, toArray, toArray
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredList

+
+public StoredList(Database database,
+                  EntryBinding valueBinding,
+                  boolean writeAllowed)
+
+
Creates a list view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
valueBinding - is the binding used to translate between value + buffers and value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+ +

+StoredList

+
+public StoredList(Database database,
+                  EntityBinding valueEntityBinding,
+                  boolean writeAllowed)
+
+
Creates a list entity view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+ +

+StoredList

+
+public StoredList(Database database,
+                  EntryBinding valueBinding,
+                  PrimaryKeyAssigner keyAssigner)
+
+
Creates a list view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created list. +

+

Parameters:
database - is the Database underlying the new collection.
valueBinding - is the binding used to translate between value + buffers and value objects.
keyAssigner - is used by the add(int, java.lang.Object) and append(java.lang.Object) + methods to assign primary keys. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+ +

+StoredList

+
+public StoredList(Database database,
+                  EntityBinding valueEntityBinding,
+                  PrimaryKeyAssigner keyAssigner)
+
+
Creates a list entity view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created list. +

+

Parameters:
database - is the Database underlying the new collection.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
keyAssigner - is used by the add(int, java.lang.Object) and append(java.lang.Object) + methods to assign primary keys. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+ + + + + + + + +
+Method Detail
+ +

+add

+
+public void add(int index,
+                Object value)
+
+
Inserts the specified element at the specified position in this list + (optional operation). + This method conforms to the List.add(int, Object) interface. +

+

+
Specified by:
add in interface List
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is a sublist, or + if the collection is indexed, or if the collection is read-only, or if + the RECNO-RENUMBER access method was not used. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+add

+
+public boolean add(Object value)
+
+
Appends the specified element to the end of this list (optional + operation). + This method conforms to the List.add(Object) interface. +

+

+
Specified by:
add in interface List
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is a sublist, or + if the collection is indexed, or if the collection is read-only, or if + the RECNO-RENUMBER access method was not used. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+append

+
+public int append(Object value)
+
+
Appends a given value returning the newly assigned index. + If a PrimaryKeyAssigner is associated + with Store for this list, it will be used to assigned the returned + index. Otherwise the Store must be a QUEUE or RECNO database and the + next available record number is assigned as the index. This method does + not exist in the standard List interface. +

+

+
+
+
+
Parameters:
value - the value to be appended. +
Returns:
the assigned index. +
Throws: +
UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only, or if the Store has no PrimaryKeyAssigner and is not a QUEUE or + RECNO database. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+addAll

+
+public boolean addAll(int index,
+                      Collection coll)
+
+
Inserts all of the elements in the specified collection into this list + at the specified position (optional operation). + This method conforms to the List.addAll(int, Collection) + interface. +

+

+
Specified by:
addAll in interface List
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is a sublist, or + if the collection is indexed, or if the collection is read-only, or if + the RECNO-RENUMBER access method was not used. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+contains

+
+public boolean contains(Object value)
+
+
Returns true if this list contains the specified element. + This method conforms to the List.contains(java.lang.Object) interface. +

+

+
Specified by:
contains in interface List
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+get

+
+public Object get(int index)
+
+
Returns the element at the specified position in this list. + This method conforms to the List.get(int) interface. +

+

+
Specified by:
get in interface List
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+indexOf

+
+public int indexOf(Object value)
+
+
Returns the index in this list of the first occurrence of the specified + element, or -1 if this list does not contain this element. + This method conforms to the List.indexOf(java.lang.Object) interface. +

+

+
Specified by:
indexOf in interface List
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+lastIndexOf

+
+public int lastIndexOf(Object value)
+
+
Returns the index in this list of the last occurrence of the specified + element, or -1 if this list does not contain this element. + This method conforms to the List.lastIndexOf(java.lang.Object) interface. +

+

+
Specified by:
lastIndexOf in interface List
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+listIterator

+
+public ListIterator listIterator()
+
+
Returns a list iterator of the elements in this list (in proper + sequence). + The iterator will be read-only if the collection is read-only. + This method conforms to the List.listIterator() interface. +

+

+
Specified by:
listIterator in interface List
+
+
+ +
Returns:
a StoredIterator for this collection. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
See Also:
StoredContainer.isWriteAllowed()
+
+
+
+ +

+listIterator

+
+public ListIterator listIterator(int index)
+
+
Returns a list iterator of the elements in this list (in proper + sequence), starting at the specified position in this list. + The iterator will be read-only if the collection is read-only. + This method conforms to the List.listIterator(int) interface. +

+

+
Specified by:
listIterator in interface List
+
+
+ +
Returns:
a StoredIterator for this collection. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
See Also:
StoredContainer.isWriteAllowed()
+
+
+
+ +

+remove

+
+public Object remove(int index)
+
+
Removes the element at the specified position in this list (optional + operation). + This method conforms to the List.remove(int) interface. +

+

+
Specified by:
remove in interface List
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is a sublist, or + if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+remove

+
+public boolean remove(Object value)
+
+
Removes the first occurrence in this list of the specified element + (optional operation). + This method conforms to the List.remove(Object) interface. +

+

+
Specified by:
remove in interface List
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is a sublist, or + if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+set

+
+public Object set(int index,
+                  Object value)
+
+
Replaces the element at the specified position in this list with the + specified element (optional operation). + This method conforms to the List.set(int, java.lang.Object) interface. +

+

+
Specified by:
set in interface List
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only. +
IllegalArgumentException - if an entity value binding is used and + the primary key of the value given is different than the existing stored + primary key. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+subList

+
+public List subList(int fromIndex,
+                    int toIndex)
+
+
Returns a view of the portion of this list between the specified + fromIndex, inclusive, and toIndex, exclusive. + Note that add() and remove() may not be called for the returned sublist. + This method conforms to the List.subList(int, int) interface. +

+

+
Specified by:
subList in interface List
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+equals

+
+public boolean equals(Object other)
+
+
Compares the specified object with this list for equality. + A value comparison is performed by this method and the stored values + are compared rather than calling the equals() method of each element. + This method conforms to the List.equals(java.lang.Object) interface. +

+

+
Specified by:
equals in interface List
Overrides:
equals in class StoredCollection
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+hashCode

+
+public int hashCode()
+
+
+
Specified by:
hashCode in interface List
Overrides:
hashCode in class StoredCollection
+
+
+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredMap.html b/db/docs/java/com/sleepycat/collections/StoredMap.html new file mode 100644 index 000000000..4e2162eac --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredMap.html @@ -0,0 +1,837 @@ + + + + + + +StoredMap (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredMap

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredMap
+
+
+
All Implemented Interfaces:
Cloneable, Map
+
+
+
Direct Known Subclasses:
StoredSortedMap
+
+
+
+
public class StoredMap
extends StoredContainer
implements Map
+ +

+A Map view of a Database. + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition to the standard Map methods, this class provides the + following methods for stored maps only. Note that the use of these methods + is not compatible with the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + +
+Nested Class Summary
+ + + + + + + +
Nested classes inherited from class java.util.Map
Map.Entry
+  + + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a map entity view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map entity view of a Database with a PrimaryKeyAssigner.
StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a map view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map view of a Database with a PrimaryKeyAssigner.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ Objectappend(Object value) + +
+          Appends a given value returning the newly assigned key.
+ booleancontainsKey(Object key) + +
+          Returns true if this map contains the specified key.
+ booleancontainsValue(Object value) + +
+          Returns true if this map contains the specified value.
+ Collectionduplicates(Object key) + +
+          Returns a new collection containing the values mapped to the given key + in this map.
+ SetentrySet() + +
+          Returns a set view of the mappings contained in this map.
+ booleanequals(Object other) + +
+          Compares the specified object with this map for equality.
+ Objectget(Object key) + +
+          Returns the value to which this map maps the specified key.
+ inthashCode() + +
+           
+ SetkeySet() + +
+          Returns a set view of the keys contained in this map.
+ Objectput(Object key, + Object value) + +
+          Associates the specified value with the specified key in this map + (optional operation).
+ voidputAll(Map map) + +
+          Copies all of the mappings from the specified map to this map (optional + operation).
+ Objectremove(Object key) + +
+          Removes the mapping for this key from this map if present (optional + operation).
+ StringtoString() + +
+          Converts the map to a string representation for debugging.
+ Collectionvalues() + +
+          Returns a collection view of the values contained in this map.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Map
clear, isEmpty, size
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredMap

+
+public StoredMap(Database database,
+                 EntryBinding keyBinding,
+                 EntryBinding valueBinding,
+                 boolean writeAllowed)
+
+
Creates a map view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueBinding - is the binding used to translate between value + buffers and value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+ +

+StoredMap

+
+public StoredMap(Database database,
+                 EntryBinding keyBinding,
+                 EntryBinding valueBinding,
+                 PrimaryKeyAssigner keyAssigner)
+
+
Creates a map view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueBinding - is the binding used to translate between value + buffers and value objects.
keyAssigner - is used by the append(java.lang.Object) method to assign + primary keys. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+ +

+StoredMap

+
+public StoredMap(Database database,
+                 EntryBinding keyBinding,
+                 EntityBinding valueEntityBinding,
+                 boolean writeAllowed)
+
+
Creates a map entity view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+ +

+StoredMap

+
+public StoredMap(Database database,
+                 EntryBinding keyBinding,
+                 EntityBinding valueEntityBinding,
+                 PrimaryKeyAssigner keyAssigner)
+
+
Creates a map entity view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
keyAssigner - is used by the append(java.lang.Object) method to assign + primary keys. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+ + + + + + + + +
+Method Detail
+ +

+get

+
+public Object get(Object key)
+
+
Returns the value to which this map maps the specified key. If + duplicates are allowed, this method returns the first duplicate, in the + order in which duplicates are configured, that maps to the specified + key. + + This method conforms to the Map.get(java.lang.Object) interface. +

+

+
Specified by:
get in interface Map
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+put

+
+public Object put(Object key,
+                  Object value)
+
+
Associates the specified value with the specified key in this map + (optional operation). If duplicates are allowed and the specified key + is already mapped to a value, this method appends the new duplicate + after the existing duplicates. This method conforms to the Map.put(java.lang.Object, java.lang.Object) interface. + +

The key parameter may be null if an entity binding is used and the + key will be derived from the value (entity) parameter. If an entity + binding is used and the key parameter is non-null, then the key + parameter must be equal to the key derived from the value parameter.

+

+

+
Specified by:
put in interface Map
+
+
+ +
Returns:
the previous value associated with specified key, or null if + there was no mapping for the key or if duplicates are allowed. +
Throws: +
UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only. +
IllegalArgumentException - if an entity value binding is used and + the primary key of the value given is different than the existing stored + primary key. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+append

+
+public Object append(Object value)
+
+
Appends a given value returning the newly assigned key. If a PrimaryKeyAssigner is associated with Store for this map, it will be + used to assigned the returned key. Otherwise the Store must be a QUEUE + or RECNO database and the next available record number is assigned as + the key. This method does not exist in the standard Map + interface. +

+

+
+
+
+
Parameters:
value - the value to be appended. +
Returns:
the assigned key. +
Throws: +
UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only, or if the Store has no PrimaryKeyAssigner and is not a QUEUE or RECNO database. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+remove

+
+public Object remove(Object key)
+
+
Removes the mapping for this key from this map if present (optional + operation). If duplicates are allowed, this method removes all + duplicates for the given key. This method conforms to the Map.remove(java.lang.Object) interface. +

+

+
Specified by:
remove in interface Map
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+containsKey

+
+public boolean containsKey(Object key)
+
+
Returns true if this map contains the specified key. This method + conforms to the Map.containsKey(java.lang.Object) interface. +

+

+
Specified by:
containsKey in interface Map
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+containsValue

+
+public boolean containsValue(Object value)
+
+
Returns true if this map contains the specified value. When an entity + binding is used, this method returns whether the map contains the + primary key and value mapping of the entity. This method conforms to + the Map.containsValue(java.lang.Object) interface. +

+

+
Specified by:
containsValue in interface Map
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+putAll

+
+public void putAll(Map map)
+
+
Copies all of the mappings from the specified map to this map (optional + operation). When duplicates are allowed, the mappings in the specified + map are effectively appended to the existing mappings in this map, that + is no previously existing mappings in this map are replaced. This + method conforms to the Map.putAll(java.util.Map) interface. +

+

+
Specified by:
putAll in interface Map
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is read-only, or + if the collection is indexed. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+keySet

+
+public Set keySet()
+
+
Returns a set view of the keys contained in this map. A SortedSet is returned if the map is ordered. The returned + collection will be read-only if the map is read-only. This method + conforms to the Map.keySet() interface. +

+

+
Specified by:
keySet in interface Map
+
+
+ +
Returns:
a StoredKeySet or a StoredSortedKeySet for this + map. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
See Also:
StoredContainer.isOrdered(), +StoredContainer.isWriteAllowed()
+
+
+
+ +

+entrySet

+
+public Set entrySet()
+
+
Returns a set view of the mappings contained in this map. A SortedSet is returned if the map is ordered. The returned + collection will be read-only if the map is read-only. This method + conforms to the Map.entrySet() interface. +

+

+
Specified by:
entrySet in interface Map
+
+
+ +
Returns:
a StoredEntrySet or a StoredSortedEntrySet for + this map. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
See Also:
StoredContainer.isOrdered(), +StoredContainer.isWriteAllowed()
+
+
+
+ +

+values

+
+public Collection values()
+
+
Returns a collection view of the values contained in this map. A SortedSet is returned if the map is ordered and the + value/entity binding can be used to derive the map's key from its + value/entity object. The returned collection will be read-only if the + map is read-only. This method conforms to the Map.entrySet() + interface. +

+

+
Specified by:
values in interface Map
+
+
+ +
Returns:
a StoredValueSet or a StoredSortedValueSet for + this map. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
See Also:
StoredContainer.isOrdered(), +StoredContainer.isWriteAllowed()
+
+
+
+ +

+duplicates

+
+public Collection duplicates(Object key)
+
+
Returns a new collection containing the values mapped to the given key + in this map. This collection's iterator() method is particularly useful + for iterating over the duplicates for a given key, since this is not + supported by the standard Map interface. This method does not exist in + the standard Map interface. + +

If no mapping for the given key is present, an empty collection is + returned. If duplicates are not allowed, at most a single value will be + in the collection returned. If duplicates are allowed, the returned + collection's add() method may be used to add values for the given + key.

+

+

+
+
+
+
Parameters:
key - is the key for which values are to be returned. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+equals

+
+public boolean equals(Object other)
+
+
Compares the specified object with this map for equality. A value + comparison is performed by this method and the stored values are + compared rather than calling the equals() method of each element. This + method conforms to the Map.equals(java.lang.Object) interface. +

+

+
Specified by:
equals in interface Map
+
+
+ +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+hashCode

+
+public int hashCode()
+
+
+
Specified by:
hashCode in interface Map
+
+
+
+
+
+
+ +

+toString

+
+public String toString()
+
+
Converts the map to a string representation for debugging. WARNING: All + mappings will be converted to strings and returned and therefore the + returned string may be very large. +

+

+
+
+
+ +
Returns:
the string representation. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredSortedEntrySet.html b/db/docs/java/com/sleepycat/collections/StoredSortedEntrySet.html new file mode 100644 index 000000000..8262701c7 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredSortedEntrySet.html @@ -0,0 +1,555 @@ + + + + + + +StoredSortedEntrySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredSortedEntrySet

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+          extended bycom.sleepycat.collections.StoredEntrySet
+              extended bycom.sleepycat.collections.StoredSortedEntrySet
+
+
+
All Implemented Interfaces:
Cloneable, Collection, Set, SortedSet
+
+
+
+
public class StoredSortedEntrySet
extends StoredEntrySet
implements SortedSet
+ +

+The SortedSet returned by Map.entrySet(). This class may not be + instantiated directly. Contrary to what is stated by Map.entrySet() + this class does support the StoredEntrySet.add(java.lang.Object) and StoredCollection.addAll(java.util.Collection) methods. + +

The Map.Entry.setValue(java.lang.Object) method of the Map.Entry objects + that are returned by this class and its iterators behaves just as the StoredIterator.set(java.lang.Object) method does.

+ +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition to the standard SortedSet methods, this class provides the + following methods for stored sorted sets only. Note that the use of these + methods is not compatible with the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ Comparatorcomparator() + +
+          Returns null since comparators are not supported.
+ Objectfirst() + +
+          Returns the first (lowest) element currently in this sorted set.
+ SortedSetheadSet(Object toMapEntry) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry.
+ SortedSetheadSet(Object toMapEntry, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry, optionally including toMapEntry.
+ Objectlast() + +
+          Returns the last (highest) element currently in this sorted set.
+ SortedSetsubSet(Object fromMapEntry, + boolean fromInclusive, + Object toMapEntry, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry and strictly less than toMapEntry, + optionally including fromMapEntry and toMapEntry.
+ SortedSetsubSet(Object fromMapEntry, + Object toMapEntry) + +
+          Returns a view of the portion of this sorted set whose elements range + from fromMapEntry, inclusive, to toMapEntry, exclusive.
+ SortedSettailSet(Object fromMapEntry) + +
+          Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromMapEntry.
+ SortedSettailSet(Object fromMapEntry, + boolean fromInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry, optionally including fromMapEntry.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredEntrySet
add, contains, remove, toString
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredCollection
addAll, containsAll, equals, hashCode, iterator, iterator, join, removeAll, retainAll, toArray, toArray, toList
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Set
add, addAll, clear, contains, containsAll, equals, hashCode, isEmpty, iterator, remove, removeAll, retainAll, size, toArray, toArray
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+comparator

+
+public Comparator comparator()
+
+
Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedSet.comparator() + interface. +

+

+
Specified by:
comparator in interface SortedSet
+
+
+ +
Returns:
null.
+
+
+
+ +

+first

+
+public Object first()
+
+
Returns the first (lowest) element currently in this sorted set. + This method conforms to the SortedSet.first() interface. +

+

+
Specified by:
first in interface SortedSet
+
+
+ +
Returns:
the first element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+last

+
+public Object last()
+
+
Returns the last (highest) element currently in this sorted set. + This method conforms to the SortedSet.last() interface. +

+

+
Specified by:
last in interface SortedSet
+
+
+ +
Returns:
the last element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headSet

+
+public SortedSet headSet(Object toMapEntry)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry. + This method conforms to the SortedSet.headSet(java.lang.Object) interface. +

+

+
Specified by:
headSet in interface SortedSet
+
+
+
Parameters:
toMapEntry - the upper bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headSet

+
+public SortedSet headSet(Object toMapEntry,
+                         boolean toInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry, optionally including toMapEntry. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
toMapEntry - is the upper bound.
toInclusive - is true to include toMapEntry. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailSet

+
+public SortedSet tailSet(Object fromMapEntry)
+
+
Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromMapEntry. + This method conforms to the SortedSet.tailSet(java.lang.Object) interface. +

+

+
Specified by:
tailSet in interface SortedSet
+
+
+
Parameters:
fromMapEntry - is the lower bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailSet

+
+public SortedSet tailSet(Object fromMapEntry,
+                         boolean fromInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry, optionally including fromMapEntry. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
fromMapEntry - is the lower bound.
fromInclusive - is true to include fromMapEntry. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subSet

+
+public SortedSet subSet(Object fromMapEntry,
+                        Object toMapEntry)
+
+
Returns a view of the portion of this sorted set whose elements range + from fromMapEntry, inclusive, to toMapEntry, exclusive. + This method conforms to the SortedSet.subSet(java.lang.Object, java.lang.Object) interface. +

+

+
Specified by:
subSet in interface SortedSet
+
+
+
Parameters:
fromMapEntry - is the lower bound.
toMapEntry - is the upper bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subSet

+
+public SortedSet subSet(Object fromMapEntry,
+                        boolean fromInclusive,
+                        Object toMapEntry,
+                        boolean toInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry and strictly less than toMapEntry, + optionally including fromMapEntry and toMapEntry. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
fromMapEntry - is the lower bound.
fromInclusive - is true to include fromMapEntry.
toMapEntry - is the upper bound.
toInclusive - is true to include toMapEntry. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredSortedKeySet.html b/db/docs/java/com/sleepycat/collections/StoredSortedKeySet.html new file mode 100644 index 000000000..823acf675 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredSortedKeySet.html @@ -0,0 +1,594 @@ + + + + + + +StoredSortedKeySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredSortedKeySet

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+          extended bycom.sleepycat.collections.StoredKeySet
+              extended bycom.sleepycat.collections.StoredSortedKeySet
+
+
+
All Implemented Interfaces:
Cloneable, Collection, Set, SortedSet
+
+
+
+
public class StoredSortedKeySet
extends StoredKeySet
implements SortedSet
+ +

+The SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed. + Since this collection is a set it only contains one element for each key, + even when duplicates are allowed. Key set iterators are therefore + particularly useful for enumerating the unique keys of a store or index that + allows duplicates. + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition to the standard SortedSet methods, this class provides the + following methods for stored sorted sets only. Note that the use of these + methods is not compatible with the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
StoredSortedKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) + +
+          Creates a sorted key set view of a Database.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ Comparatorcomparator() + +
+          Returns null since comparators are not supported.
+ Objectfirst() + +
+          Returns the first (lowest) element currently in this sorted set.
+ SortedSetheadSet(Object toKey) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly less than toKey.
+ SortedSetheadSet(Object toKey, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly less than toKey, optionally including toKey.
+ Objectlast() + +
+          Returns the last (highest) element currently in this sorted set.
+ SortedSetsubSet(Object fromKey, + boolean fromInclusive, + Object toKey, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey.
+ SortedSetsubSet(Object fromKey, + Object toKey) + +
+          Returns a view of the portion of this sorted set whose elements range + from fromKey, inclusive, to toKey, exclusive.
+ SortedSettailSet(Object fromKey) + +
+          Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromKey.
+ SortedSettailSet(Object fromKey, + boolean fromInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey, optionally including fromKey.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredKeySet
add, contains, remove
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredCollection
addAll, containsAll, equals, hashCode, iterator, iterator, join, removeAll, retainAll, toArray, toArray, toList, toString
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Set
add, addAll, clear, contains, containsAll, equals, hashCode, isEmpty, iterator, remove, removeAll, retainAll, size, toArray, toArray
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredSortedKeySet

+
+public StoredSortedKeySet(Database database,
+                          EntryBinding keyBinding,
+                          boolean writeAllowed)
+
+
Creates a sorted key set view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+ + + + + + + + +
+Method Detail
+ +

+comparator

+
+public Comparator comparator()
+
+
Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedSet.comparator() + interface. +

+

+
Specified by:
comparator in interface SortedSet
+
+
+ +
Returns:
null.
+
+
+
+ +

+first

+
+public Object first()
+
+
Returns the first (lowest) element currently in this sorted set. + This method conforms to the SortedSet.first() interface. +

+

+
Specified by:
first in interface SortedSet
+
+
+ +
Returns:
the first element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+last

+
+public Object last()
+
+
Returns the last (highest) element currently in this sorted set. + This method conforms to the SortedSet.last() interface. +

+

+
Specified by:
last in interface SortedSet
+
+
+ +
Returns:
the last element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headSet

+
+public SortedSet headSet(Object toKey)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly less than toKey. + This method conforms to the SortedSet.headSet(java.lang.Object) interface. +

+

+
Specified by:
headSet in interface SortedSet
+
+
+
Parameters:
toKey - is the upper bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headSet

+
+public SortedSet headSet(Object toKey,
+                         boolean toInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly less than toKey, optionally including toKey. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
toKey - is the upper bound.
toInclusive - is true to include toKey. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailSet

+
+public SortedSet tailSet(Object fromKey)
+
+
Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromKey. + This method conforms to the SortedSet.tailSet(java.lang.Object) interface. +

+

+
Specified by:
tailSet in interface SortedSet
+
+
+
Parameters:
fromKey - is the lower bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailSet

+
+public SortedSet tailSet(Object fromKey,
+                         boolean fromInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey, optionally including fromKey. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
fromKey - is the lower bound.
fromInclusive - is true to include fromKey. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subSet

+
+public SortedSet subSet(Object fromKey,
+                        Object toKey)
+
+
Returns a view of the portion of this sorted set whose elements range + from fromKey, inclusive, to toKey, exclusive. + This method conforms to the SortedSet.subSet(java.lang.Object, java.lang.Object) interface. +

+

+
Specified by:
subSet in interface SortedSet
+
+
+
Parameters:
fromKey - is the lower bound.
toKey - is the upper bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subSet

+
+public SortedSet subSet(Object fromKey,
+                        boolean fromInclusive,
+                        Object toKey,
+                        boolean toInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
fromKey - is the lower bound.
fromInclusive - is true to include fromKey.
toKey - is the upper bound.
toInclusive - is true to include toKey. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredSortedMap.html b/db/docs/java/com/sleepycat/collections/StoredSortedMap.html new file mode 100644 index 000000000..cca1af3a5 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredSortedMap.html @@ -0,0 +1,686 @@ + + + + + + +StoredSortedMap (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredSortedMap

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredMap
+          extended bycom.sleepycat.collections.StoredSortedMap
+
+
+
All Implemented Interfaces:
Cloneable, Map, SortedMap
+
+
+
+
public class StoredSortedMap
extends StoredMap
implements SortedMap
+ +

+A SortedMap view of a Database. + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition to the standard SortedMap methods, this class provides the + following methods for stored sorted maps only. Note that the use of these + methods is not compatible with the standard Java collections interface.

+ +

+ +

+


+ +

+ + + + + + + +
+Nested Class Summary
+ + + + + + + +
Nested classes inherited from class java.util.Map
Map.Entry
+  + + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a sorted map entity view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map entity view of a Database with a PrimaryKeyAssigner.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a sorted map view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map view of a Database with a PrimaryKeyAssigner.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ Comparatorcomparator() + +
+          Returns null since comparators are not supported.
+ ObjectfirstKey() + +
+          Returns the first (lowest) key currently in this sorted map.
+ SortedMapheadMap(Object toKey) + +
+          Returns a view of the portion of this sorted set whose keys are + strictly less than toKey.
+ SortedMapheadMap(Object toKey, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted map whose elements are + strictly less than toKey, optionally including toKey.
+ ObjectlastKey() + +
+          Returns the last (highest) element currently in this sorted map.
+ SortedMapsubMap(Object fromKey, + boolean fromInclusive, + Object toKey, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey.
+ SortedMapsubMap(Object fromKey, + Object toKey) + +
+          Returns a view of the portion of this sorted map whose elements range + from fromKey, inclusive, to toKey, exclusive.
+ SortedMaptailMap(Object fromKey) + +
+          Returns a view of the portion of this sorted map whose elements are + greater than or equal to fromKey.
+ SortedMaptailMap(Object fromKey, + boolean fromInclusive) + +
+          Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey, optionally including fromKey.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredMap
append, containsKey, containsValue, duplicates, entrySet, equals, get, hashCode, keySet, put, putAll, remove, toString, values
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed, size
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Map
clear, containsKey, containsValue, entrySet, equals, get, hashCode, isEmpty, keySet, put, putAll, remove, size, values
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredSortedMap

+
+public StoredSortedMap(Database database,
+                       EntryBinding keyBinding,
+                       EntryBinding valueBinding,
+                       boolean writeAllowed)
+
+
Creates a sorted map view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueBinding - is the binding used to translate between value + buffers and value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+ +

+StoredSortedMap

+
+public StoredSortedMap(Database database,
+                       EntryBinding keyBinding,
+                       EntryBinding valueBinding,
+                       PrimaryKeyAssigner keyAssigner)
+
+
Creates a sorted map view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueBinding - is the binding used to translate between value + buffers and value objects.
keyAssigner - is used by the StoredMap.append(java.lang.Object) method to assign + primary keys. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+ +

+StoredSortedMap

+
+public StoredSortedMap(Database database,
+                       EntryBinding keyBinding,
+                       EntityBinding valueEntityBinding,
+                       boolean writeAllowed)
+
+
Creates a sorted map entity view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+ +

+StoredSortedMap

+
+public StoredSortedMap(Database database,
+                       EntryBinding keyBinding,
+                       EntityBinding valueEntityBinding,
+                       PrimaryKeyAssigner keyAssigner)
+
+
Creates a sorted map entity view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map. +

+

Parameters:
database - is the Database underlying the new collection.
keyBinding - is the binding used to translate between key buffers + and key objects.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
keyAssigner - is used by the StoredMap.append(java.lang.Object) method to assign + primary keys. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+ + + + + + + + +
+Method Detail
+ +

+comparator

+
+public Comparator comparator()
+
+
Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedMap.comparator() + interface. +

+

+
Specified by:
comparator in interface SortedMap
+
+
+ +
Returns:
null.
+
+
+
+ +

+firstKey

+
+public Object firstKey()
+
+
Returns the first (lowest) key currently in this sorted map. + This method conforms to the SortedMap.firstKey() interface. +

+

+
Specified by:
firstKey in interface SortedMap
+
+
+ +
Returns:
the first key. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+lastKey

+
+public Object lastKey()
+
+
Returns the last (highest) element currently in this sorted map. + This method conforms to the SortedMap.lastKey() interface. +

+

+
Specified by:
lastKey in interface SortedMap
+
+
+ +
Returns:
the last key. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headMap

+
+public SortedMap headMap(Object toKey)
+
+
Returns a view of the portion of this sorted set whose keys are + strictly less than toKey. + This method conforms to the SortedMap.headMap(java.lang.Object) interface. +

+

+
Specified by:
headMap in interface SortedMap
+
+
+
Parameters:
toKey - is the upper bound. +
Returns:
the submap. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headMap

+
+public SortedMap headMap(Object toKey,
+                         boolean toInclusive)
+
+
Returns a view of the portion of this sorted map whose elements are + strictly less than toKey, optionally including toKey. + This method does not exist in the standard SortedMap interface. +

+

+
+
+
+
Parameters:
toKey - is the upper bound.
toInclusive - is true to include toKey. +
Returns:
the submap. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailMap

+
+public SortedMap tailMap(Object fromKey)
+
+
Returns a view of the portion of this sorted map whose elements are + greater than or equal to fromKey. + This method conforms to the SortedMap.tailMap(java.lang.Object) interface. +

+

+
Specified by:
tailMap in interface SortedMap
+
+
+
Parameters:
fromKey - is the lower bound. +
Returns:
the submap. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailMap

+
+public SortedMap tailMap(Object fromKey,
+                         boolean fromInclusive)
+
+
Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey, optionally including fromKey. + This method does not exist in the standard SortedMap interface. +

+

+
+
+
+
Parameters:
fromKey - is the lower bound.
fromInclusive - is true to include fromKey. +
Returns:
the submap. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subMap

+
+public SortedMap subMap(Object fromKey,
+                        Object toKey)
+
+
Returns a view of the portion of this sorted map whose elements range + from fromKey, inclusive, to toKey, exclusive. + This method conforms to the SortedMap.subMap(java.lang.Object, java.lang.Object) interface. +

+

+
Specified by:
subMap in interface SortedMap
+
+
+
Parameters:
fromKey - is the lower bound.
toKey - is the upper bound. +
Returns:
the submap. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subMap

+
+public SortedMap subMap(Object fromKey,
+                        boolean fromInclusive,
+                        Object toKey,
+                        boolean toInclusive)
+
+
Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey. + This method does not exist in the standard SortedMap interface. +

+

+
+
+
+
Parameters:
fromKey - is the lower bound.
fromInclusive - is true to include fromKey.
toKey - is the upper bound.
toInclusive - is true to include toKey. +
Returns:
the submap. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredSortedValueSet.html b/db/docs/java/com/sleepycat/collections/StoredSortedValueSet.html new file mode 100644 index 000000000..f43bc0900 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredSortedValueSet.html @@ -0,0 +1,593 @@ + + + + + + +StoredSortedValueSet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredSortedValueSet

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+          extended bycom.sleepycat.collections.StoredValueSet
+              extended bycom.sleepycat.collections.StoredSortedValueSet
+
+
+
All Implemented Interfaces:
Cloneable, Collection, Set, SortedSet
+
+
+
+
public class StoredSortedValueSet
extends StoredValueSet
implements SortedSet
+ +

+The SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed. + Although this collection is a set it may contain duplicate values. Only if + an entity value binding is used are all elements guaranteed to be unique. + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ + +

In addition to the standard SortedSet methods, this class provides the + following methods for stored sorted value sets only. Note that the use of + these methods is not compatible with the standard Java collections + interface.

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
StoredSortedValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a sorted value set entity view of a Database.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ Comparatorcomparator() + +
+          Returns null since comparators are not supported.
+ Objectfirst() + +
+          Returns the first (lowest) element currently in this sorted set.
+ SortedSetheadSet(Object toValue) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly less than toValue.
+ SortedSetheadSet(Object toValue, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly less than toValue, optionally including toValue.
+ Objectlast() + +
+          Returns the last (highest) element currently in this sorted set.
+ SortedSetsubSet(Object fromValue, + boolean fromInclusive, + Object toValue, + boolean toInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue and strictly less than toValue, + optionally including fromValue and toValue.
+ SortedSetsubSet(Object fromValue, + Object toValue) + +
+          Returns a view of the portion of this sorted set whose elements range + from fromValue, inclusive, to toValue, exclusive.
+ SortedSettailSet(Object fromValue) + +
+          Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromValue.
+ SortedSettailSet(Object fromValue, + boolean fromInclusive) + +
+          Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue, optionally including fromValue.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredValueSet
add, contains, remove, size
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredCollection
addAll, containsAll, equals, hashCode, iterator, iterator, join, removeAll, retainAll, toArray, toArray, toList, toString
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Set
add, addAll, clear, contains, containsAll, equals, hashCode, isEmpty, iterator, remove, removeAll, retainAll, size, toArray, toArray
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredSortedValueSet

+
+public StoredSortedValueSet(Database database,
+                            EntityBinding valueEntityBinding,
+                            boolean writeAllowed)
+
+
Creates a sorted value set entity view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+ + + + + + + + +
+Method Detail
+ +

+comparator

+
+public Comparator comparator()
+
+
Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedSet.comparator() + interface. +

+

+
Specified by:
comparator in interface SortedSet
+
+
+ +
Returns:
null.
+
+
+
+ +

+first

+
+public Object first()
+
+
Returns the first (lowest) element currently in this sorted set. + This method conforms to the SortedSet.first() interface. +

+

+
Specified by:
first in interface SortedSet
+
+
+ +
Returns:
the first element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+last

+
+public Object last()
+
+
Returns the last (highest) element currently in this sorted set. + This method conforms to the SortedSet.last() interface. +

+

+
Specified by:
last in interface SortedSet
+
+
+ +
Returns:
the last element. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headSet

+
+public SortedSet headSet(Object toValue)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly less than toValue. + This method conforms to the SortedSet.headSet(java.lang.Object) interface. +

+

+
Specified by:
headSet in interface SortedSet
+
+
+
Parameters:
toValue - the upper bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+headSet

+
+public SortedSet headSet(Object toValue,
+                         boolean toInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly less than toValue, optionally including toValue. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
toValue - is the upper bound.
toInclusive - is true to include toValue. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailSet

+
+public SortedSet tailSet(Object fromValue)
+
+
Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromValue. + This method conforms to the SortedSet.tailSet(java.lang.Object) interface. +

+

+
Specified by:
tailSet in interface SortedSet
+
+
+
Parameters:
fromValue - is the lower bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+tailSet

+
+public SortedSet tailSet(Object fromValue,
+                         boolean fromInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue, optionally including fromValue. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
fromValue - is the lower bound.
fromInclusive - is true to include fromValue. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subSet

+
+public SortedSet subSet(Object fromValue,
+                        Object toValue)
+
+
Returns a view of the portion of this sorted set whose elements range + from fromValue, inclusive, to toValue, exclusive. + This method conforms to the SortedSet.subSet(java.lang.Object, java.lang.Object) interface. +

+

+
Specified by:
subSet in interface SortedSet
+
+
+
Parameters:
fromValue - is the lower bound.
toValue - is the upper bound. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+
+ +

+subSet

+
+public SortedSet subSet(Object fromValue,
+                        boolean fromInclusive,
+                        Object toValue,
+                        boolean toInclusive)
+
+
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue and strictly less than toValue, + optionally including fromValue and toValue. + This method does not exist in the standard SortedSet interface. +

+

+
+
+
+
Parameters:
fromValue - is the lower bound.
fromInclusive - is true to include fromValue.
toValue - is the upper bound.
toInclusive - is true to include toValue. +
Returns:
the subset. +
Throws: +
RuntimeExceptionWrapper - if a DatabaseException is thrown.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/StoredValueSet.html b/db/docs/java/com/sleepycat/collections/StoredValueSet.html new file mode 100644 index 000000000..902fa0ac1 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/StoredValueSet.html @@ -0,0 +1,449 @@ + + + + + + +StoredValueSet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class StoredValueSet

+
+java.lang.Object
+  extended bycom.sleepycat.collections.StoredContainer
+      extended bycom.sleepycat.collections.StoredCollection
+          extended bycom.sleepycat.collections.StoredValueSet
+
+
+
All Implemented Interfaces:
Cloneable, Collection, Set
+
+
+
Direct Known Subclasses:
StoredSortedValueSet
+
+
+
+
public class StoredValueSet
extends StoredCollection
implements Set
+ +

+The Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed. + Although this collection is a set it may contain duplicate values. Only if + an entity value binding is used are all elements guaranteed to be unique. + +

Note that this class does not conform to the standard Java + collections interface in the following ways:

+ +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
StoredValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a value set entity view of a Database.
StoredValueSet(Database database, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a value set view of a Database.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleanadd(Object entity) + +
+          Adds the specified entity to this set if it is not already present + (optional operation).
+ booleancontains(Object value) + +
+          Returns true if this set contains the specified element.
+ booleanremove(Object value) + +
+          Removes the specified value from this set if it is present (optional + operation).
+ intsize() + +
+          Always throws UnsupportedOperationException.
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredCollection
addAll, containsAll, equals, hashCode, iterator, iterator, join, removeAll, retainAll, toArray, toArray, toList, toString
+ + + + + + + +
Methods inherited from class com.sleepycat.collections.StoredContainer
areDuplicatesAllowed, areDuplicatesOrdered, areKeysRenumbered, clear, isDirtyRead, isDirtyReadAllowed, isEmpty, isOrdered, isSecondary, isTransactional, isWriteAllowed
+ + + + + + + +
Methods inherited from class java.lang.Object
getClass, notify, notifyAll, wait, wait, wait
+ + + + + + + +
Methods inherited from interface java.util.Set
addAll, clear, containsAll, equals, hashCode, isEmpty, iterator, removeAll, retainAll, toArray, toArray
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+StoredValueSet

+
+public StoredValueSet(Database database,
+                      EntryBinding valueBinding,
+                      boolean writeAllowed)
+
+
Creates a value set view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
valueBinding - is the binding used to translate between value + buffers and value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+ +

+StoredValueSet

+
+public StoredValueSet(Database database,
+                      EntityBinding valueEntityBinding,
+                      boolean writeAllowed)
+
+
Creates a value set entity view of a Database. +

+

Parameters:
database - is the Database underlying the new collection.
valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection. +
Throws: +
IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+ + + + + + + + +
+Method Detail
+ +

+add

+
+public boolean add(Object entity)
+
+
Adds the specified entity to this set if it is not already present + (optional operation). + This method conforms to the Set.add(java.lang.Object) interface. +

+

+
Specified by:
add in interface Set
+
+
+
Parameters:
entity - is the entity to be added. +
Returns:
true if the entity was added, that is the key-value pair + represented by the entity was not previously present in the collection. +
Throws: +
UnsupportedOperationException - if the collection is read-only, + if the collection is indexed, or if an entity binding is not used. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+contains

+
+public boolean contains(Object value)
+
+
Returns true if this set contains the specified element. + This method conforms to the Set.contains(java.lang.Object) + interface. +

+

+
Specified by:
contains in interface Set
+
+
+
Parameters:
value - the value to check. +
Returns:
whether the set contains the given value.
+
+
+
+ +

+remove

+
+public boolean remove(Object value)
+
+
Removes the specified value from this set if it is present (optional + operation). + If an entity binding is used, the key-value pair represented by the + given entity is removed. If an entity binding is used, the first + occurrence of a key-value pair with the given value is removed. + This method conforms to the Set.remove(java.lang.Object) interface. +

+

+
Specified by:
remove in interface Set
+
+
+ +
Throws: +
UnsupportedOperationException - if the collection is read-only. +
RuntimeExceptionWrapper - if a DatabaseException is + thrown.
+
+
+
+ +

+size

+
+public int size()
+
+
Description copied from class: StoredContainer
+
Always throws UnsupportedOperationException. The size of a database + cannot be obtained reliably or inexpensively. + This method therefore violates the Collection.size() and + Map.size() interfaces. +

+

+
Specified by:
size in interface Set
Overrides:
size in class StoredContainer
+
+
+ +
Returns:
always throws an exception.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/TransactionRunner.html b/db/docs/java/com/sleepycat/collections/TransactionRunner.html new file mode 100644 index 000000000..d9e0c8aca --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/TransactionRunner.html @@ -0,0 +1,501 @@ + + + + + + +TransactionRunner (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class TransactionRunner

+
+java.lang.Object
+  extended bycom.sleepycat.collections.TransactionRunner
+
+
+
+
public class TransactionRunner
extends Object
+ +

+Starts a transaction, calls TransactionWorker.doWork(), and handles + transaction retry and exceptions. +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static intDEFAULT_MAX_RETRIES + +
+          The default maximum number of retries.
+  + + + + + + + + + + + + + +
+Constructor Summary
TransactionRunner(Environment env) + +
+          Creates a transaction runner for a given Berkeley DB environment.
TransactionRunner(Environment env, + int maxRetries, + TransactionConfig config) + +
+          Creates a transaction runner for a given Berkeley DB environment and + with a given number of maximum retries.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetAllowNestedTransactions() + +
+          Returns whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread.
+ intgetMaxRetries() + +
+          Returns the maximum number of retries that will be performed when + deadlocks are detected.
+ TransactionConfiggetTransactionConfig() + +
+          Returns the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig).
+ voidrun(TransactionWorker worker) + +
+          Calls the TransactionWorker.doWork() method and, for transactional + environments, begins and ends a transaction.
+ voidsetAllowNestedTransactions(boolean allowNestedTxn) + +
+          Changes whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread.
+ voidsetMaxRetries(int maxRetries) + +
+          Changes the maximum number of retries that will be performed when + deadlocks are detected.
+ voidsetTransactionConfig(TransactionConfig config) + +
+          Changes the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig).
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT_MAX_RETRIES

+
+public static final int DEFAULT_MAX_RETRIES
+
+
The default maximum number of retries. +

+

+
See Also:
Constant Field Values
+
+ + + + + + + + +
+Constructor Detail
+ +

+TransactionRunner

+
+public TransactionRunner(Environment env)
+
+
Creates a transaction runner for a given Berkeley DB environment. + The default maximum number of retries (DEFAULT_MAX_RETRIES) and + a null (default) TransactionConfig will be used. +

+

Parameters:
env - is the environment for running transactions.
+
+ +

+TransactionRunner

+
+public TransactionRunner(Environment env,
+                         int maxRetries,
+                         TransactionConfig config)
+
+
Creates a transaction runner for a given Berkeley DB environment and + with a given number of maximum retries. +

+

Parameters:
env - is the environment for running transactions.
maxRetries - is the maximum number of retries that will be + performed when deadlocks are detected.
config - the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig), or null to use the default + configuration. The configuration object is not cloned, and + any modifications to it will impact subsequent transactions.
+ + + + + + + + +
+Method Detail
+ +

+getMaxRetries

+
+public int getMaxRetries()
+
+
Returns the maximum number of retries that will be performed when + deadlocks are detected. +

+

+
+
+
+
+ +

+setMaxRetries

+
+public void setMaxRetries(int maxRetries)
+
+
Changes the maximum number of retries that will be performed when + deadlocks are detected. + Calling this method does not impact transactions already running. +

+

+
+
+
+
+ +

+getAllowNestedTransactions

+
+public boolean getAllowNestedTransactions()
+
+
Returns whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread. + By default this property is false. +

+

+
+
+
+
+ +

+setAllowNestedTransactions

+
+public void setAllowNestedTransactions(boolean allowNestedTxn)
+
+
Changes whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread. + Calling this method does not impact transactions already running. +

+

+
+
+
+
+ +

+getTransactionConfig

+
+public TransactionConfig getTransactionConfig()
+
+
Returns the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig). + +

If this property is null, the default configuration is used. The + configuration object is not cloned, and any modifications to it will + impact subsequent transactions.

+

+

+ +
Returns:
the transaction configuration.
+
+
+
+ +

+setTransactionConfig

+
+public void setTransactionConfig(TransactionConfig config)
+
+
Changes the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig). + +

If this property is null, the default configuration is used. The + configuration object is not cloned, and any modifications to it will + impact subsequent transactions.

+

+

+
Parameters:
config - the transaction configuration.
+
+
+
+ +

+run

+
+public void run(TransactionWorker worker)
+         throws DatabaseException,
+                Exception
+
+
Calls the TransactionWorker.doWork() method and, for transactional + environments, begins and ends a transaction. If the environment given + is non-transactional, a transaction will not be used but the doWork() + method will still be called. + +

In a transactional environment, a new transaction is started before + calling doWork(). This will start a nested transaction if one is + already active. If DeadlockException is thrown by doWork(), the + transaction will be aborted and the process will be repeated up to the + maximum number of retries specified. If another exception is thrown by + doWork() or the maximum number of retries has occurred, the transaction + will be aborted and the exception will be rethrown by this method. If + no exception is thrown by doWork(), the transaction will be committed. + This method will not attempt to commit or abort a transaction if it has + already been committed or aborted by doWork().

+

+

+ +
Throws: +
DeadlockException - when it is thrown by doWork() and the + maximum number of retries has occurred. The transaction will have been + aborted by this method. +
Exception - when any other exception is thrown by doWork(). The + exception will first be unwrapped by calling ExceptionUnwrapper.unwrap(java.lang.Exception). The transaction will have been aborted by + this method. +
DatabaseException
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/TransactionWorker.html b/db/docs/java/com/sleepycat/collections/TransactionWorker.html new file mode 100644 index 000000000..c4258e8b5 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/TransactionWorker.html @@ -0,0 +1,223 @@ + + + + + + +TransactionWorker (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Interface TransactionWorker

+
+
+
public interface TransactionWorker
+ +

+The interface implemented to perform the work within a transaction. + To run a transaction, an instance of this interface is passed to the + TransactionRunner.run(com.sleepycat.collections.TransactionWorker) method. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voiddoWork() + +
+          Perform the work for a single transaction.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+doWork

+
+public void doWork()
+            throws Exception
+
+
Perform the work for a single transaction. +

+

+ +
Throws: +
Exception
See Also:
TransactionRunner.run(com.sleepycat.collections.TransactionWorker)
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/TupleSerialFactory.html b/db/docs/java/com/sleepycat/collections/TupleSerialFactory.html new file mode 100644 index 000000000..bdd7bb889 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/TupleSerialFactory.html @@ -0,0 +1,354 @@ + + + + + + +TupleSerialFactory (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.collections +
+Class TupleSerialFactory

+
+java.lang.Object
+  extended bycom.sleepycat.collections.TupleSerialFactory
+
+
+
+
public class TupleSerialFactory
extends Object
+ +

+Creates stored collections having tuple keys and serialized entity values. + The entity classes must implement the java.io.Serializable and + MarshalledTupleKeyEntity interfaces. The key classes must either implement + the MarshalledTupleEntry interface or be one of the Java primitive type + classes. Underlying binding objects are created automatically. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
TupleSerialFactory(ClassCatalog catalog) + +
+          Creates a tuple-serial factory for given environment and class catalog.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ClassCataloggetCatalog() + +
+          Returns the class catalog associated with this factory.
+ TupleSerialMarshalledKeyCreatorgetKeyCreator(Class valueBaseClass, + String keyName) + +
+          Creates a SecondaryKeyCreator object for use in configuring + a SecondaryDatabase.
+ StoredMapnewMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) + +
+          Creates a map from a previously opened Database object.
+ StoredSortedMapnewSortedMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) + +
+          Creates a sorted map from a previously opened Database object.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+TupleSerialFactory

+
+public TupleSerialFactory(ClassCatalog catalog)
+
+
Creates a tuple-serial factory for given environment and class catalog. +

+

+ + + + + + + + +
+Method Detail
+ +

+getCatalog

+
+public final ClassCatalog getCatalog()
+
+
Returns the class catalog associated with this factory. +

+

+
+
+
+
+ +

+newMap

+
+public StoredMap newMap(Database db,
+                        Class keyClass,
+                        Class valueBaseClass,
+                        boolean writeAllowed)
+
+
Creates a map from a previously opened Database object. +

+

+
Parameters:
db - the previously opened Database object.
keyClass - is the class used for map keys. It must implement the + MarshalledTupleEntry interface or be + one of the Java primitive type classes.
valueBaseClass - the base class of the entity values for this + store. It must implement the MarshalledTupleKeyEntity interface.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
+
+
+
+ +

+newSortedMap

+
+public StoredSortedMap newSortedMap(Database db,
+                                    Class keyClass,
+                                    Class valueBaseClass,
+                                    boolean writeAllowed)
+
+
Creates a sorted map from a previously opened Database object. +

+

+
Parameters:
db - the previously opened Database object.
keyClass - is the class used for map keys. It must implement the + MarshalledTupleEntry interface or be + one of the Java primitive type classes.
valueBaseClass - the base class of the entity values for this + store. It must implement the MarshalledTupleKeyEntity interface.
writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
+
+
+
+ +

+getKeyCreator

+
+public TupleSerialMarshalledKeyCreator getKeyCreator(Class valueBaseClass,
+                                                     String keyName)
+
+
Creates a SecondaryKeyCreator object for use in configuring + a SecondaryDatabase. The returned object implements + the SecondaryKeyCreator interface. +

+

+
Parameters:
valueBaseClass - the base class of the entity values for this + store. It must implement the MarshalledTupleKeyEntity interface.
keyName - is the key name passed to the MarshalledTupleKeyEntity.marshalSecondaryKey(java.lang.String, com.sleepycat.bind.tuple.TupleOutput) + method to identify the secondary key.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/CurrentTransaction.html b/db/docs/java/com/sleepycat/collections/class-use/CurrentTransaction.html new file mode 100644 index 000000000..0b9e46400 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/CurrentTransaction.html @@ -0,0 +1,174 @@ + + + + + + +Uses of Class com.sleepycat.collections.CurrentTransaction (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.CurrentTransaction

+
+ + + + + + + + + +
+Packages that use CurrentTransaction
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of CurrentTransaction in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return CurrentTransaction
+static CurrentTransactionCurrentTransaction.getInstance(Environment env) + +
+          Gets the CurrentTransaction accessor for a specified Berkeley DB + environment.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/MapEntryParameter.html b/db/docs/java/com/sleepycat/collections/class-use/MapEntryParameter.html new file mode 100644 index 000000000..9fbdd92f1 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/MapEntryParameter.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.MapEntryParameter (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.MapEntryParameter

+
+No usage of com.sleepycat.collections.MapEntryParameter +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/PrimaryKeyAssigner.html b/db/docs/java/com/sleepycat/collections/class-use/PrimaryKeyAssigner.html new file mode 100644 index 000000000..2caba65fe --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/PrimaryKeyAssigner.html @@ -0,0 +1,217 @@ + + + + + + +Uses of Interface com.sleepycat.collections.PrimaryKeyAssigner (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.collections.PrimaryKeyAssigner

+
+ + + + + + + + + +
+Packages that use PrimaryKeyAssigner
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of PrimaryKeyAssigner in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + + + + + + + + + + + +
Constructors in com.sleepycat.collections with parameters of type PrimaryKeyAssigner
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map view of a Database with a PrimaryKeyAssigner.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map entity view of a Database with a PrimaryKeyAssigner.
StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map view of a Database with a PrimaryKeyAssigner.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map entity view of a Database with a PrimaryKeyAssigner.
StoredList(Database database, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list view of a Database with a PrimaryKeyAssigner.
StoredList(Database database, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list entity view of a Database with a PrimaryKeyAssigner.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredCollection.html b/db/docs/java/com/sleepycat/collections/class-use/StoredCollection.html new file mode 100644 index 000000000..4397acbd6 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredCollection.html @@ -0,0 +1,241 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredCollection (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredCollection

+
+ + + + + + + + + +
+Packages that use StoredCollection
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredCollection in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Subclasses of StoredCollection in com.sleepycat.collections
+ classStoredEntrySet + +
+          The Set returned by Map.entrySet().
+ classStoredKeySet + +
+          The Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed.
+ classStoredList + +
+          A List view of a Database.
+ classStoredSortedEntrySet + +
+          The SortedSet returned by Map.entrySet().
+ classStoredSortedKeySet + +
+          The SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed.
+ classStoredSortedValueSet + +
+          The SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed.
+ classStoredValueSet + +
+          The Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return StoredCollection
+ StoredCollectionStoredIterator.getCollection() + +
+          Returns the collection associated with this iterator.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredCollections.html b/db/docs/java/com/sleepycat/collections/class-use/StoredCollections.html new file mode 100644 index 000000000..d9cf5f8e4 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredCollections.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredCollections (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredCollections

+
+No usage of com.sleepycat.collections.StoredCollections +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredContainer.html b/db/docs/java/com/sleepycat/collections/class-use/StoredContainer.html new file mode 100644 index 000000000..5c658e749 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredContainer.html @@ -0,0 +1,268 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredContainer (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredContainer

+
+ + + + + + + + + +
+Packages that use StoredContainer
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredContainer in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Subclasses of StoredContainer in com.sleepycat.collections
+ classStoredCollection + +
+          A abstract base class for all stored collections.
+ classStoredEntrySet + +
+          The Set returned by Map.entrySet().
+ classStoredKeySet + +
+          The Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed.
+ classStoredList + +
+          A List view of a Database.
+ classStoredMap + +
+          A Map view of a Database.
+ classStoredSortedEntrySet + +
+          The SortedSet returned by Map.entrySet().
+ classStoredSortedKeySet + +
+          The SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed.
+ classStoredSortedMap + +
+          A SortedMap view of a Database.
+ classStoredSortedValueSet + +
+          The SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed.
+ classStoredValueSet + +
+          The Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections with parameters of type StoredContainer
+ StoredIteratorStoredCollection.join(StoredContainer[] indices, + Object[] indexKeys, + JoinConfig joinConfig) + +
+          Returns an iterator representing an equality join of the indices and + index key values specified.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredEntrySet.html b/db/docs/java/com/sleepycat/collections/class-use/StoredEntrySet.html new file mode 100644 index 000000000..62a2a0f68 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredEntrySet.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredEntrySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredEntrySet

+
+ + + + + + + + + +
+Packages that use StoredEntrySet
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredEntrySet in com.sleepycat.collections
+  +

+ + + + + + + + + +
Subclasses of StoredEntrySet in com.sleepycat.collections
+ classStoredSortedEntrySet + +
+          The SortedSet returned by Map.entrySet().
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredIterator.html b/db/docs/java/com/sleepycat/collections/class-use/StoredIterator.html new file mode 100644 index 000000000..7bbbac676 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredIterator.html @@ -0,0 +1,185 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredIterator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredIterator

+
+ + + + + + + + + +
+Packages that use StoredIterator
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredIterator in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.collections that return StoredIterator
+ StoredIteratorStoredCollection.iterator(boolean writeAllowed) + +
+          Returns a read or read-write iterator over the elements in this + collection.
+ StoredIteratorStoredCollection.join(StoredContainer[] indices, + Object[] indexKeys, + JoinConfig joinConfig) + +
+          Returns an iterator representing an equality join of the indices and + index key values specified.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredKeySet.html b/db/docs/java/com/sleepycat/collections/class-use/StoredKeySet.html new file mode 100644 index 000000000..2e2c13863 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredKeySet.html @@ -0,0 +1,174 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredKeySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredKeySet

+
+ + + + + + + + + +
+Packages that use StoredKeySet
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredKeySet in com.sleepycat.collections
+  +

+ + + + + + + + + +
Subclasses of StoredKeySet in com.sleepycat.collections
+ classStoredSortedKeySet + +
+          The SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredList.html b/db/docs/java/com/sleepycat/collections/class-use/StoredList.html new file mode 100644 index 000000000..d30124310 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredList.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredList (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredList

+
+No usage of com.sleepycat.collections.StoredList +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredMap.html b/db/docs/java/com/sleepycat/collections/class-use/StoredMap.html new file mode 100644 index 000000000..245a8b17f --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredMap.html @@ -0,0 +1,192 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredMap (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredMap

+
+ + + + + + + + + +
+Packages that use StoredMap
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredMap in com.sleepycat.collections
+  +

+ + + + + + + + + +
Subclasses of StoredMap in com.sleepycat.collections
+ classStoredSortedMap + +
+          A SortedMap view of a Database.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return StoredMap
+ StoredMapTupleSerialFactory.newMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) + +
+          Creates a map from a previously opened Database object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredSortedEntrySet.html b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedEntrySet.html new file mode 100644 index 000000000..9e2b11441 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedEntrySet.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredSortedEntrySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredSortedEntrySet

+
+No usage of com.sleepycat.collections.StoredSortedEntrySet +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredSortedKeySet.html b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedKeySet.html new file mode 100644 index 000000000..b1e86dfec --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedKeySet.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredSortedKeySet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredSortedKeySet

+
+No usage of com.sleepycat.collections.StoredSortedKeySet +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredSortedMap.html b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedMap.html new file mode 100644 index 000000000..fbf03070f --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedMap.html @@ -0,0 +1,176 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredSortedMap (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredSortedMap

+
+ + + + + + + + + +
+Packages that use StoredSortedMap
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredSortedMap in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return StoredSortedMap
+ StoredSortedMapTupleSerialFactory.newSortedMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) + +
+          Creates a sorted map from a previously opened Database object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredSortedValueSet.html b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedValueSet.html new file mode 100644 index 000000000..e56ed71bf --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredSortedValueSet.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredSortedValueSet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredSortedValueSet

+
+No usage of com.sleepycat.collections.StoredSortedValueSet +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/StoredValueSet.html b/db/docs/java/com/sleepycat/collections/class-use/StoredValueSet.html new file mode 100644 index 000000000..f89acd155 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/StoredValueSet.html @@ -0,0 +1,174 @@ + + + + + + +Uses of Class com.sleepycat.collections.StoredValueSet (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.StoredValueSet

+
+ + + + + + + + + +
+Packages that use StoredValueSet
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of StoredValueSet in com.sleepycat.collections
+  +

+ + + + + + + + + +
Subclasses of StoredValueSet in com.sleepycat.collections
+ classStoredSortedValueSet + +
+          The SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/TransactionRunner.html b/db/docs/java/com/sleepycat/collections/class-use/TransactionRunner.html new file mode 100644 index 000000000..5934803d2 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/TransactionRunner.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.TransactionRunner (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.TransactionRunner

+
+No usage of com.sleepycat.collections.TransactionRunner +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/TransactionWorker.html b/db/docs/java/com/sleepycat/collections/class-use/TransactionWorker.html new file mode 100644 index 000000000..31309de91 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/TransactionWorker.html @@ -0,0 +1,174 @@ + + + + + + +Uses of Interface com.sleepycat.collections.TransactionWorker (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.collections.TransactionWorker

+
+ + + + + + + + + +
+Packages that use TransactionWorker
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + +
+Uses of TransactionWorker in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections with parameters of type TransactionWorker
+ voidTransactionRunner.run(TransactionWorker worker) + +
+          Calls the doWork() method and, for transactional + environments, begins and ends a transaction.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/class-use/TupleSerialFactory.html b/db/docs/java/com/sleepycat/collections/class-use/TupleSerialFactory.html new file mode 100644 index 000000000..a003dc1dc --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/class-use/TupleSerialFactory.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.collections.TupleSerialFactory (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.collections.TupleSerialFactory

+
+No usage of com.sleepycat.collections.TupleSerialFactory +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/package-frame.html b/db/docs/java/com/sleepycat/collections/package-frame.html new file mode 100644 index 000000000..fcc3b750e --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/package-frame.html @@ -0,0 +1,77 @@ + + + + + + +com.sleepycat.collections (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + +com.sleepycat.collections + + + + +
+Interfaces  + +
+PrimaryKeyAssigner +
+TransactionWorker
+ + + + + + +
+Classes  + +
+CurrentTransaction +
+MapEntryParameter +
+StoredCollection +
+StoredCollections +
+StoredContainer +
+StoredEntrySet +
+StoredIterator +
+StoredKeySet +
+StoredList +
+StoredMap +
+StoredSortedEntrySet +
+StoredSortedKeySet +
+StoredSortedMap +
+StoredSortedValueSet +
+StoredValueSet +
+TransactionRunner +
+TupleSerialFactory
+ + + + diff --git a/db/docs/java/com/sleepycat/collections/package-summary.html b/db/docs/java/com/sleepycat/collections/package-summary.html new file mode 100644 index 000000000..991e17570 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/package-summary.html @@ -0,0 +1,257 @@ + + + + + + +com.sleepycat.collections (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+

+Package com.sleepycat.collections +

+Data access based on the standard Java collections API
+[reference guide]. +

+See: +
+          Description +

+ + + + + + + + + + + + + +
+Interface Summary
PrimaryKeyAssignerAn interface implemented to assign new primary key values.
TransactionWorkerThe interface implemented to perform the work within a transaction.
+  + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class Summary
CurrentTransactionProvides access to the current transaction for the current thread within the + context of a Berkeley DB environment.
MapEntryParameterA simple Map.Entry implementation that can be used as in + input parameter.
StoredCollectionA abstract base class for all stored collections.
StoredCollectionsThis class consists exclusively of static methods that operate on or return + stored collections.
StoredContainerA abstract base class for all stored collections and maps.
StoredEntrySetThe Set returned by Map.entrySet().
StoredIteratorThe Iterator returned by all stored collections.
StoredKeySetThe Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed.
StoredListA List view of a Database.
StoredMapA Map view of a Database.
StoredSortedEntrySetThe SortedSet returned by Map.entrySet().
StoredSortedKeySetThe SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed.
StoredSortedMapA SortedMap view of a Database.
StoredSortedValueSetThe SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed.
StoredValueSetThe Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed.
TransactionRunnerStarts a transaction, calls TransactionWorker.doWork(), and handles + transaction retry and exceptions.
TupleSerialFactoryCreates stored collections having tuple keys and serialized entity values.
+  + +

+

+Package com.sleepycat.collections Description +

+ +

+Data access based on the standard Java collections API
+[reference guide]. +

+Examples can be found in je/examples/com/sleepycat/examples/collections. Build and run directions are in the installation notes. +

+ +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/package-tree.html b/db/docs/java/com/sleepycat/collections/package-tree.html new file mode 100644 index 000000000..bb7ac7b49 --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/package-tree.html @@ -0,0 +1,175 @@ + + + + + + +com.sleepycat.collections Class Hierarchy (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Hierarchy For Package com.sleepycat.collections +

+
+
+
Package Hierarchies:
All Packages
+
+

+Class Hierarchy +

+ +

+Interface Hierarchy +

+ +
+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/collections/package-use.html b/db/docs/java/com/sleepycat/collections/package-use.html new file mode 100644 index 000000000..f5e1cd35b --- /dev/null +++ b/db/docs/java/com/sleepycat/collections/package-use.html @@ -0,0 +1,226 @@ + + + + + + +Uses of Package com.sleepycat.collections (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Package
com.sleepycat.collections

+
+ + + + + + + + + +
+Packages that use com.sleepycat.collections
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Classes in com.sleepycat.collections used by com.sleepycat.collections
CurrentTransaction + +
+          Provides access to the current transaction for the current thread within the + context of a Berkeley DB environment.
PrimaryKeyAssigner + +
+          An interface implemented to assign new primary key values.
StoredCollection + +
+          A abstract base class for all stored collections.
StoredContainer + +
+          A abstract base class for all stored collections and maps.
StoredEntrySet + +
+          The Set returned by Map.entrySet().
StoredIterator + +
+          The Iterator returned by all stored collections.
StoredKeySet + +
+          The Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed.
StoredMap + +
+          A Map view of a Database.
StoredSortedMap + +
+          A SortedMap view of a Database.
StoredValueSet + +
+          The Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed.
TransactionWorker + +
+          The interface implemented to perform the work within a transaction.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/BtreePrefixCalculator.html b/db/docs/java/com/sleepycat/db/BtreePrefixCalculator.html new file mode 100644 index 000000000..490436849 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/BtreePrefixCalculator.html @@ -0,0 +1,223 @@ + + + + + + +BtreePrefixCalculator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface BtreePrefixCalculator

+
+
+
public interface BtreePrefixCalculator
+ +

+An interface specifying how Btree prefixes should be calculated. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intprefix(Database db, + DatabaseEntry dbt1, + DatabaseEntry dbt2) + +
+          The application-specific Btree prefix callback.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+prefix

+
+public int prefix(Database db,
+                  DatabaseEntry dbt1,
+                  DatabaseEntry dbt2)
+
+
The application-specific Btree prefix callback. +

+

+

+
Parameters:
db - The enclosing database handle.
dbt1 - A database entry representing a database key.
dbt2 - A database entry representing a database key.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/BtreeStats.html b/db/docs/java/com/sleepycat/db/BtreeStats.html new file mode 100644 index 000000000..4c7122d65 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/BtreeStats.html @@ -0,0 +1,740 @@ + + + + + + +BtreeStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class BtreeStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseStats
+      extended bycom.sleepycat.db.BtreeStats
+
+
+
+
public class BtreeStats
extends DatabaseStats
+ +

+The BtreeStats object is used to return Btree +or Recno database statistics. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetDupPages() + +
+          The number of database duplicate pages.
+ intgetDupPagesFree() + +
+          The number of bytes free in database duplicate pages.
+ intgetEmptyPages() + +
+          The number of empty database pages.
+ intgetFree() + +
+          The number of pages on the free list.
+ intgetIntPages() + +
+          The number of database internal pages.
+ intgetIntPagesFree() + +
+          The number of bytes free in database internal pages.
+ intgetLeafPages() + +
+          The number of database leaf pages.
+ intgetLeafPagesFree() + +
+          The number of bytes free in database leaf pages.
+ intgetLevels() + +
+          The number of levels in the database.
+ intgetMagic() + +
+          The magic number that identifies the file as a Btree database.
+ intgetMaxKey() + +
+          The maximum keys per page.
+ intgetMetaFlags() + +
+          The metadata flags.
+ intgetMinKey() + +
+          The minimum keys per page.
+ intgetNumData() + +
+          The number of key/data pairs or records in the database.
+ intgetNumKeys() + +
+          The number of keys or records in the database.
+ intgetOverPages() + +
+          The number of database overflow pages.
+ intgetOverPagesFree() + +
+          The number of bytes free in database overflow pages.
+ intgetPageSize() + +
+          The underlying database page size, in bytes.
+ intgetReLen() + +
+          The length of fixed-length records.
+ intgetRePad() + +
+          The padding byte value for fixed-length records.
+ intgetVersion() + +
+          The version of the Btree database.
+ StringtoString() + +
+          For convenience, the BtreeStats class has a toString method + that lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getMagic

+
+public int getMagic()
+
+
The magic number that identifies the file as a Btree database. +

+

+
+
+
+
+ +

+getVersion

+
+public int getVersion()
+
+
The version of the Btree database. +

+

+
+
+
+
+ +

+getMetaFlags

+
+public int getMetaFlags()
+
+
The metadata flags. +

+

+
+
+
+
+ +

+getNumKeys

+
+public int getNumKeys()
+
+
The number of keys or records in the database. +

+ For the Btree Access Method, the number of keys in the database. If + the Database.getStats call was not configured by the + StatsConfig.setFast method or the database was configured + to support retrieval by record number, the count will be exact. + Otherwise, the count will be the last saved value unless it has + never been calculated, in which case it will be 0. +

+ For the Recno Access Method, the number of records in the database. + If the database was configured with mutable record numbers the count + will be exact. Otherwise, if the Database.getStats call + was configured by the StatsConfig.setFast method, the + count will be exact but will include deleted records; if the + Database.getStats call was not configured by the + StatsConfig.setFast method, the count will be exact and + will not include deleted records. +

+

+
+
+
+
+ +

+getNumData

+
+public int getNumData()
+
+
The number of key/data pairs or records in the database. +

+ For the Btree Access Method, the number of key/data pairs in the + database. If the Database.getStats call was not + configured by the StatsConfig.setFast method, the count + will be exact. Otherwise, the count will be the last saved value + unless it has never been calculated, in which case it will be 0. +

+ For the Recno Access Method, the number of records in the database. + If the database was configured with mutable record numbers, the + count will be exact. Otherwise, if the Database.getStats + call was configured by the StatsConfig.setFast method, the + count will be exact but will include deleted records; if the + Database.getStats call was not configured by the + StatsConfig.setFast method, the count will be exact and + will not include deleted records. +

+

+
+
+
+
+ +

+getPageSize

+
+public int getPageSize()
+
+
The underlying database page size, in bytes. +

+

+
+
+
+
+ +

+getMaxKey

+
+public int getMaxKey()
+
+
The maximum keys per page. +

+

+
+
+
+
+ +

+getMinKey

+
+public int getMinKey()
+
+
The minimum keys per page. +

+

+
+
+
+
+ +

+getReLen

+
+public int getReLen()
+
+
The length of fixed-length records. +

+

+
+
+
+
+ +

+getRePad

+
+public int getRePad()
+
+
The padding byte value for fixed-length records. +

+

+
+
+
+
+ +

+getLevels

+
+public int getLevels()
+
+
The number of levels in the database. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getIntPages

+
+public int getIntPages()
+
+
The number of database internal pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getLeafPages

+
+public int getLeafPages()
+
+
The number of database leaf pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getDupPages

+
+public int getDupPages()
+
+
The number of database duplicate pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getOverPages

+
+public int getOverPages()
+
+
The number of database overflow pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getEmptyPages

+
+public int getEmptyPages()
+
+
The number of empty database pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getFree

+
+public int getFree()
+
+
The number of pages on the free list. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getIntPagesFree

+
+public int getIntPagesFree()
+
+
The number of bytes free in database internal pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getLeafPagesFree

+
+public int getLeafPagesFree()
+
+
The number of bytes free in database leaf pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getDupPagesFree

+
+public int getDupPagesFree()
+
+
The number of bytes free in database duplicate pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getOverPagesFree

+
+public int getOverPagesFree()
+
+
The number of bytes free in database overflow pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the BtreeStats class has a toString method + that lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/CacheFile.html b/db/docs/java/com/sleepycat/db/CacheFile.html new file mode 100644 index 000000000..6d74e80f5 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/CacheFile.html @@ -0,0 +1,475 @@ + + + + + + +CacheFile (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class CacheFile

+
+java.lang.Object
+  extended bycom.sleepycat.db.CacheFile
+
+
+
+
public class CacheFile
extends Object
+ +

+This class allows applications to modify settings for +a Database using the Database.getCacheFile. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ longgetMaximumSize() + +
+          Return the maximum size for the file backing the database, or 0 if + no maximum file size has been configured.
+ booleangetNoFile() + +
+          Return if the opening of backing temporary files for in-memory + databases has been disallowed.
+ CacheFilePrioritygetPriority() + +
+          Return the cache priority for pages from the specified file.
+ booleangetUnlink() + +
+          Return if the file will be removed when the last reference to it is + closed.
+ voidsetMaximumSize(long bytes) + +
+          Set the +maximum size for the file backing the database.
+ voidsetNoFile(boolean onoff) + +
+          Disallow opening backing temporary files for in-memory + databases, even if they expand to fill the entire cache.
+ voidsetPriority(CacheFilePriority priority) + +
+          Set the +cache priority for pages from the specified file.
+ voidsetUnlink(boolean onoff) + +
+          Remove the file when the last reference to it is closed.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getPriority

+
+public CacheFilePriority getPriority()
+                              throws DatabaseException
+
+
Return the cache priority for pages from the specified file. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The cache priority for pages from the specified file. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setPriority

+
+public void setPriority(CacheFilePriority priority)
+                 throws DatabaseException
+
+
Set the +cache priority for pages from the specified file. +

The priority of a page biases the replacement algorithm to be more + or less likely to discard a page when space is needed in the buffer + pool. The bias is temporary, and pages will eventually be discarded + if they are not referenced again. Setting the priority is only + advisory, and does not guarantee pages will be treated in a specific + way. +

+This method may be called at any time during the life of the application. +

+

+

+
Parameters:
priority - The cache priority for pages from the specified file. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getMaximumSize

+
+public long getMaximumSize()
+                    throws DatabaseException
+
+
Return the maximum size for the file backing the database, or 0 if + no maximum file size has been configured. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The maximum size for the file backing the database, or 0 if + no maximum file size has been configured. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setMaximumSize

+
+public void setMaximumSize(long bytes)
+                    throws DatabaseException
+
+
Set the +maximum size for the file backing the database. +

Attempts to allocate new pages in the file after the limit has been + reached will fail. +

+This method may be called at any time during the life of the application. +

+

+

+
Parameters:
bytes - The maximum size for the file backing the database. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getNoFile

+
+public boolean getNoFile()
+                  throws DatabaseException
+
+
Return if the opening of backing temporary files for in-memory + databases has been disallowed. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the opening of backing temporary files for in-memory + databases has been disallowed. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setNoFile

+
+public void setNoFile(boolean onoff)
+               throws DatabaseException
+
+
Disallow opening backing temporary files for in-memory + databases, even if they expand to fill the entire cache. +

Attempts to create new file pages after the cache has been filled + will fail. +

+This method may be called at any time during the life of the application. +

+

+

+
Parameters:
onoff - If true, +disallow opening backing temporary files for in-memory + databases, even if they expand to fill the entire cache. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getUnlink

+
+public boolean getUnlink()
+                  throws DatabaseException
+
+
Return if the file will be removed when the last reference to it is + closed. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the file will be removed when the last reference to it is + closed. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setUnlink

+
+public void setUnlink(boolean onoff)
+               throws DatabaseException
+
+
Remove the file when the last reference to it is closed. +

+This method may be called at any time during the life of the application. +

+

+

+
Parameters:
onoff - If true, +remove the file when the last reference to it is closed. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/CacheFilePriority.html b/db/docs/java/com/sleepycat/db/CacheFilePriority.html new file mode 100644 index 000000000..5b3d187e9 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/CacheFilePriority.html @@ -0,0 +1,310 @@ + + + + + + +CacheFilePriority (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class CacheFilePriority

+
+java.lang.Object
+  extended bycom.sleepycat.db.CacheFilePriority
+
+
+
+
public final class CacheFilePriority
extends Object
+ +

+Priorities that can be assigned to files in the cache. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static CacheFilePriorityDEFAULT + +
+          The default priority.
+static CacheFilePriorityHIGH + +
+          The second highest priority.
+static CacheFilePriorityLOW + +
+          The second lowest priority.
+static CacheFilePriorityVERY_HIGH + +
+          The highest priority: pages are the least likely to be discarded.
+static CacheFilePriorityVERY_LOW + +
+          The lowest priority: pages are the most likely to be discarded.
+  + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final CacheFilePriority DEFAULT
+
+
The default priority. +

+

+
+
+
+ +

+HIGH

+
+public static final CacheFilePriority HIGH
+
+
The second highest priority. +

+

+
+
+
+ +

+LOW

+
+public static final CacheFilePriority LOW
+
+
The second lowest priority. +

+

+
+
+
+ +

+VERY_HIGH

+
+public static final CacheFilePriority VERY_HIGH
+
+
The highest priority: pages are the least likely to be discarded. +

+

+
+
+
+ +

+VERY_LOW

+
+public static final CacheFilePriority VERY_LOW
+
+
The lowest priority: pages are the most likely to be discarded. +

+

+
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/CacheFileStats.html b/db/docs/java/com/sleepycat/db/CacheFileStats.html new file mode 100644 index 000000000..9f84c714f --- /dev/null +++ b/db/docs/java/com/sleepycat/db/CacheFileStats.html @@ -0,0 +1,401 @@ + + + + + + +CacheFileStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class CacheFileStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.CacheFileStats
+
+
+
+
public class CacheFileStats
extends Object
+ +

+Statistics for a file in the cache. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetCacheHit() + +
+          Requested pages found in the cache.
+ intgetCacheMiss() + +
+          Requested pages not found in the cache.
+ StringgetFileName() + +
+          The name of the file.
+ intgetMap() + +
+          Requested pages mapped into the process' address space.
+ intgetPageCreate() + +
+          Pages created in the cache.
+ intgetPageIn() + +
+          Pages read into the cache.
+ intgetPageOut() + +
+          Pages written from the cache to the backing file.
+ intgetPageSize() + +
+          Page size in bytes.
+ StringtoString() + +
+          For convenience, the CacheFileStats class has a toString method + that lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getFileName

+
+public String getFileName()
+
+
The name of the file. +

+

+
+
+
+
+ +

+getPageSize

+
+public int getPageSize()
+
+
Page size in bytes. +

+

+
+
+
+
+ +

+getMap

+
+public int getMap()
+
+
Requested pages mapped into the process' address space. +

+

+
+
+
+
+ +

+getCacheHit

+
+public int getCacheHit()
+
+
Requested pages found in the cache. +

+

+
+
+
+
+ +

+getCacheMiss

+
+public int getCacheMiss()
+
+
Requested pages not found in the cache. +

+

+
+
+
+
+ +

+getPageCreate

+
+public int getPageCreate()
+
+
Pages created in the cache. +

+

+
+
+
+
+ +

+getPageIn

+
+public int getPageIn()
+
+
Pages read into the cache. +

+

+
+
+
+
+ +

+getPageOut

+
+public int getPageOut()
+
+
Pages written from the cache to the backing file. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the CacheFileStats class has a toString method + that lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/CacheStats.html b/db/docs/java/com/sleepycat/db/CacheStats.html new file mode 100644 index 000000000..efaa7a0bc --- /dev/null +++ b/db/docs/java/com/sleepycat/db/CacheStats.html @@ -0,0 +1,965 @@ + + + + + + +CacheStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class CacheStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.CacheStats
+
+
+
+
public class CacheStats
extends Object
+ +

+Cache statistics for a database environment. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetAlloc() + +
+          Number of page allocations.
+ intgetAllocBuckets() + +
+          Number of hash buckets checked during allocation.
+ intgetAllocMaxBuckets() + +
+          Maximum number of hash buckets checked during an allocation.
+ intgetAllocMaxPages() + +
+          Maximum number of pages checked during an allocation.
+ intgetAllocPages() + +
+          Number of pages checked during allocation.
+ intgetBytes() + +
+          Bytes of cache (total cache size is st_gbytes + st_bytes).
+ intgetCacheHit() + +
+          Requested pages found in the cache.
+ intgetCacheMiss() + +
+          Requested pages not found in the cache.
+ intgetGbytes() + +
+          Gigabytes of cache (total cache size is st_gbytes + st_bytes).
+ intgetHashBuckets() + +
+          Number of hash buckets in buffer hash table.
+ intgetHashExamined() + +
+          Total number of hash elements traversed during hash table lookups.
+ intgetHashLongest() + +
+          The longest chain ever encountered in buffer hash table lookups.
+ intgetHashMaxWait() + +
+          The maximum number of times any hash bucket lock was waited for by + a thread of control.
+ intgetHashNowait() + +
+          The number of times that a thread of control was able to obtain a + hash bucket lock without waiting.
+ intgetHashSearches() + +
+          Total number of buffer hash table lookups.
+ intgetHashWait() + +
+          The number of times that a thread of control was forced to wait + before obtaining a hash bucket lock.
+ intgetMap() + +
+          Requested pages mapped into the process' address space (there is no + available information about whether or not this request caused disk I/O, + although examining the application page fault rate may be helpful).
+ intgetMaxOpenfd() + +
+          Maximum number of open file descriptors.
+ intgetMaxWrite() + +
+          The maximum number of sequential write operations scheduled by the library + when flushing dirty pages from the cache.
+ intgetMaxWriteSleep() + +
+          The number of microseconds the thread of control should pause before + scheduling further write operations.
+ intgetMmapSize() + +
+          Maximum file size for mmap.
+ intgetNumCache() + +
+          Number of caches.
+ intgetPageClean() + +
+          Clean pages currently in the cache.
+ intgetPageCreate() + +
+          Pages created in the cache.
+ intgetPageDirty() + +
+          Dirty pages currently in the cache.
+ intgetPageIn() + +
+          Pages read into the cache.
+ intgetPageOut() + +
+          Pages written from the cache to the backing file.
+ intgetPages() + +
+          Pages in the cache.
+ intgetPageTrickle() + +
+          Dirty pages written using Environment.trickleCacheWrite.
+ intgetRegionNowait() + +
+          The number of times that a thread of control was able to obtain a + region lock without waiting.
+ intgetRegionWait() + +
+          The number of times that a thread of control was forced to wait + before obtaining a region lock.
+ intgetRegSize() + +
+          Individual cache size.
+ intgetRoEvict() + +
+          Clean pages forced from the cache.
+ intgetRwEvict() + +
+          Dirty pages forced from the cache.
+ StringtoString() + +
+          For convenience, the CacheStats class has a toString method that + lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getGbytes

+
+public int getGbytes()
+
+
Gigabytes of cache (total cache size is st_gbytes + st_bytes). +

+

+
+
+
+
+ +

+getBytes

+
+public int getBytes()
+
+
Bytes of cache (total cache size is st_gbytes + st_bytes). +

+

+
+
+
+
+ +

+getNumCache

+
+public int getNumCache()
+
+
Number of caches. +

+

+
+
+
+
+ +

+getRegSize

+
+public int getRegSize()
+
+
Individual cache size. +

+

+
+
+
+
+ +

+getMap

+
+public int getMap()
+
+
Requested pages mapped into the process' address space (there is no + available information about whether or not this request caused disk I/O, + although examining the application page fault rate may be helpful). +

+

+
+
+
+
+ +

+getMmapSize

+
+public int getMmapSize()
+
+
Maximum file size for mmap. +

+

+
+
+
+
+ +

+getMaxOpenfd

+
+public int getMaxOpenfd()
+
+
Maximum number of open file descriptors. +

+

+
+
+
+
+ +

+getMaxWrite

+
+public int getMaxWrite()
+
+
The maximum number of sequential write operations scheduled by the library + when flushing dirty pages from the cache. +

+

+
+
+
+
+ +

+getMaxWriteSleep

+
+public int getMaxWriteSleep()
+
+
The number of microseconds the thread of control should pause before + scheduling further write operations. +

+

+
+
+
+
+ +

+getCacheHit

+
+public int getCacheHit()
+
+
Requested pages found in the cache. +

+

+
+
+
+
+ +

+getCacheMiss

+
+public int getCacheMiss()
+
+
Requested pages not found in the cache. +

+

+
+
+
+
+ +

+getPageCreate

+
+public int getPageCreate()
+
+
Pages created in the cache. +

+

+
+
+
+
+ +

+getPageIn

+
+public int getPageIn()
+
+
Pages read into the cache. +

+

+
+
+
+
+ +

+getPageOut

+
+public int getPageOut()
+
+
Pages written from the cache to the backing file. +

+

+
+
+
+
+ +

+getRoEvict

+
+public int getRoEvict()
+
+
Clean pages forced from the cache. +

+

+
+
+
+
+ +

+getRwEvict

+
+public int getRwEvict()
+
+
Dirty pages forced from the cache. +

+

+
+
+
+
+ +

+getPageTrickle

+
+public int getPageTrickle()
+
+
Dirty pages written using Environment.trickleCacheWrite. +

+

+
+
+
+
+ +

+getPages

+
+public int getPages()
+
+
Pages in the cache. +

+

+
+
+
+
+ +

+getPageClean

+
+public int getPageClean()
+
+
Clean pages currently in the cache. +

+

+
+
+
+
+ +

+getPageDirty

+
+public int getPageDirty()
+
+
Dirty pages currently in the cache. +

+

+
+
+
+
+ +

+getHashBuckets

+
+public int getHashBuckets()
+
+
Number of hash buckets in buffer hash table. +

+

+
+
+
+
+ +

+getHashSearches

+
+public int getHashSearches()
+
+
Total number of buffer hash table lookups. +

+

+
+
+
+
+ +

+getHashLongest

+
+public int getHashLongest()
+
+
The longest chain ever encountered in buffer hash table lookups. +

+

+
+
+
+
+ +

+getHashExamined

+
+public int getHashExamined()
+
+
Total number of hash elements traversed during hash table lookups. +

+

+
+
+
+
+ +

+getHashNowait

+
+public int getHashNowait()
+
+
The number of times that a thread of control was able to obtain a + hash bucket lock without waiting. +

+

+
+
+
+
+ +

+getHashWait

+
+public int getHashWait()
+
+
The number of times that a thread of control was forced to wait + before obtaining a hash bucket lock. +

+

+
+
+
+
+ +

+getHashMaxWait

+
+public int getHashMaxWait()
+
+
The maximum number of times any hash bucket lock was waited for by + a thread of control. +

+

+
+
+
+
+ +

+getRegionNowait

+
+public int getRegionNowait()
+
+
The number of times that a thread of control was able to obtain a + region lock without waiting. +

+

+
+
+
+
+ +

+getRegionWait

+
+public int getRegionWait()
+
+
The number of times that a thread of control was forced to wait + before obtaining a region lock. +

+

+
+
+
+
+ +

+getAlloc

+
+public int getAlloc()
+
+
Number of page allocations. +

+

+
+
+
+
+ +

+getAllocBuckets

+
+public int getAllocBuckets()
+
+
Number of hash buckets checked during allocation. +

+

+
+
+
+
+ +

+getAllocMaxBuckets

+
+public int getAllocMaxBuckets()
+
+
Maximum number of hash buckets checked during an allocation. +

+

+
+
+
+
+ +

+getAllocPages

+
+public int getAllocPages()
+
+
Number of pages checked during allocation. +

+

+
+
+
+
+ +

+getAllocMaxPages

+
+public int getAllocMaxPages()
+
+
Maximum number of pages checked during an allocation. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the CacheStats class has a toString method that + lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/CheckpointConfig.html b/db/docs/java/com/sleepycat/db/CheckpointConfig.html new file mode 100644 index 000000000..e6ad60cd4 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/CheckpointConfig.html @@ -0,0 +1,422 @@ + + + + + + +CheckpointConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class CheckpointConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.CheckpointConfig
+
+
+
+
public class CheckpointConfig
extends Object
+ +

+Specifies the attributes of an application invoked checkpoint operation. +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static CheckpointConfigDEFAULT + +
+          Default configuration used if null is passed to + Environment.checkpoint.
+  + + + + + + + + + + +
+Constructor Summary
CheckpointConfig() + +
+          An instance created using the default constructor is initialized + with the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetForce() + +
+          Return the configuration of the checkpoint force option.
+ intgetKBytes() + +
+          Return the checkpoint log data threshold, in kilobytes.
+ intgetMinutes() + +
+          Return the checkpoint time threshold, in minutes.
+ voidsetForce(boolean force) + +
+          Configure the checkpoint force option.
+ voidsetKBytes(int kbytes) + +
+          Configure the checkpoint log data threshold, in kilobytes.
+ voidsetMinutes(int minutes) + +
+          Configure the checkpoint time threshold, in minutes.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final CheckpointConfig DEFAULT
+
+
Default configuration used if null is passed to + Environment.checkpoint. +

+

+
+
+ + + + + + + + +
+Constructor Detail
+ +

+CheckpointConfig

+
+public CheckpointConfig()
+
+
An instance created using the default constructor is initialized + with the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setKBytes

+
+public void setKBytes(int kbytes)
+
+
Configure the checkpoint log data threshold, in kilobytes. +

+ The default is 0 for this class and the database environment. +

+

+

+
Parameters:
kbytes - A checkpoint will be performed if more than kbytes of log data have + been written since the last checkpoint.
+
+
+
+ +

+getKBytes

+
+public int getKBytes()
+
+
Return the checkpoint log data threshold, in kilobytes. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The checkpoint log data threshold, in kilobytes.
+
+
+
+ +

+setMinutes

+
+public void setMinutes(int minutes)
+
+
Configure the checkpoint time threshold, in minutes. +

+ The default is 0 for this class and the database environment. +

+

+

+
Parameters:
minutes - A checkpoint is performed if more than min minutes have passed since + the last checkpoint.
+
+
+
+ +

+getMinutes

+
+public int getMinutes()
+
+
Return the checkpoint time threshold, in minutes. +

+

+

+ +
Returns:
The checkpoint time threshold, in minutes.
+
+
+
+ +

+setForce

+
+public void setForce(boolean force)
+
+
Configure the checkpoint force option. +

+ The default is false for this class and the BDB JE environment. +

+

+

+
Parameters:
force - If set to true, force a checkpoint, even if there has been no + activity since the last checkpoint.
+
+
+
+ +

+getForce

+
+public boolean getForce()
+
+
Return the configuration of the checkpoint force option. +

+

+

+ +
Returns:
The configuration of the checkpoint force option.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/Cursor.html b/db/docs/java/com/sleepycat/db/Cursor.html new file mode 100644 index 000000000..c7ace9c03 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/Cursor.html @@ -0,0 +1,1605 @@ + + + + + + +Cursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class Cursor

+
+java.lang.Object
+  extended bycom.sleepycat.db.Cursor
+
+
+
Direct Known Subclasses:
SecondaryCursor
+
+
+
+
public class Cursor
extends Object
+ +

+A database cursor. Cursors are used for operating on collections of +records, for iterating over a database, and for saving handles to +individual records, so that they can be modified after they have been +read. +

+Cursors may be used by multiple threads, but only serially. That is, +the application must serialize access to the handle. +

+If the cursor is to be used to perform operations on behalf of a +transaction, the cursor must be opened and closed within the context of +that single transaction. +

+Once the cursor close method has been called, the handle may not be +accessed again, regardless of the close method's success or failure. +

+To obtain a cursor with default attributes: +

+    Cursor cursor = myDatabase.openCursor(txn, null);
+
+To customize the attributes of a cursor, use a CursorConfig object. +
+    CursorConfig config = new CursorConfig();
+    config.setDirtyRead(true);
+    Cursor cursor = myDatabase.openCursor(txn, config);
+
+

+Modifications to the database during a sequential scan will be reflected +in the scan; that is, records inserted behind a cursor will not be +returned while records inserted in front of a cursor will be returned. +In Queue and Recno databases, missing entries (that is, entries that +were never explicitly created or that were created and then deleted) +will be ignored during a sequential scan. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidclose() + +
+          Discard the cursor.
+ intcount() + +
+          Return a count of the number of data items for the key to which the + cursor refers.
+ OperationStatusdelete() + +
+          Delete the key/data pair to which the cursor refers.
+ Cursordup(boolean samePosition) + +
+          Return a new cursor with the same transaction and locker ID as the + original cursor.
+ CursorConfiggetConfig() + +
+          Return this cursor's configuration.
+ OperationStatusgetCurrent(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ DatabasegetDatabase() + +
+          Return the Database handle associated with this Cursor.
+ OperationStatusgetFirst(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusgetLast(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusgetNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusgetNextDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusgetNextNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusgetPrev(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusgetPrevDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusgetPrevNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusgetRecordNumber(DatabaseEntry data, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusgetSearchBoth(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key/data pair, where both the key and +data items must match.
+ OperationStatusgetSearchBothRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key and closest matching data item of the +database.
+ OperationStatusgetSearchKey(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusgetSearchKeyRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusgetSearchRecordNumber(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ OperationStatusput(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusputAfter(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusputBefore(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusputCurrent(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusputKeyFirst(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusputKeyLast(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusputNoDupData(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusputNoOverwrite(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getDatabase

+
+public Database getDatabase()
+
+
Return the Database handle associated with this Cursor. +

+

+

+ +
Returns:
The Database handle associated with this Cursor. +

+
+
+
+ +

+getConfig

+
+public CursorConfig getConfig()
+
+
Return this cursor's configuration. +

+ This may differ from the configuration used to open this object if + the cursor existed previously. +

+

+

+ +
Returns:
This cursor's configuration. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Discard the cursor. +

+ This method may throw a DeadlockException, signaling the enclosing + transaction should be aborted. If the application is already + intending to abort the transaction, this error can be ignored, and + the application should proceed. +

+ The cursor handle may not be used again after this method has been + called, regardless of the method's success or failure. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+count

+
+public int count()
+          throws DatabaseException
+
+
Return a count of the number of data items for the key to which the + cursor refers. +

+

+

+ +
Returns:
A count of the number of data items for the key to which the cursor + refers. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+dup

+
+public Cursor dup(boolean samePosition)
+           throws DatabaseException
+
+
Return a new cursor with the same transaction and locker ID as the + original cursor. +

+ This is useful when an application is using locking and requires two + or more cursors in the same thread of control. +

+

+

+
Parameters:
samePosition - If true, the newly created cursor is initialized to refer to the + same position in the database as the original cursor (if any) and + hold the same locks (if any). If false, or the original cursor does + not hold a database position and locks, the returned cursor is + uninitialized and will behave like a newly created cursor. +

+

Returns:
A new cursor with the same transaction and locker ID as the original + cursor. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+delete

+
+public OperationStatus delete()
+                       throws DatabaseException
+
+
Delete the key/data pair to which the cursor refers. +

+ When called on a cursor opened on a database that has been made into a + secondary index, this method the key/data pair from the primary database + and all secondary indices. +

+ The cursor position is unchanged after a delete, and subsequent calls +to cursor functions expecting the cursor to refer to an existing key +will fail. +

+

+

+

+ +
Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+put

+
+public OperationStatus put(DatabaseEntry key,
+                           DatabaseEntry data)
+                    throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the put method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the put method fails for any reason, the +state of the cursor will be unchanged. +

+If the key already appears in the database and duplicates are supported, +the new data value is inserted at the correct sorted location. If the +key already appears in the database and duplicates are not supported, +the existing key/data pair will be replaced. +

+

+

+
Parameters:
key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putNoOverwrite

+
+public OperationStatus putNoOverwrite(DatabaseEntry key,
+                                      DatabaseEntry data)
+                               throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the putNoOverwrite method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the putNoOverwrite method fails for any reason, the +state of the cursor will be unchanged. +

+If the key already appears in the database, putNoOverwrite will return +OperationStatus.KEYEXIST. +

+

+

+
Parameters:
key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putNoDupData

+
+public OperationStatus putNoDupData(DatabaseEntry key,
+                                    DatabaseEntry data)
+                             throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the putNoDupData method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the putNoDupData method fails for any reason, the +state of the cursor will be unchanged. +

+In the case of the Btree and Hash access methods, insert +the specified key/data pair into the database, unless a key/data pair +comparing equally to it already exists in the database. If a matching +key/data pair already exists in the database, OperationStatus.KEYEXIST is returned. +

+This method may only be called if the underlying database has been +configured to support sorted duplicate data items. +

+This method may not be called for the Queue or Recno access methods. +

+

+

+
Parameters:
key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putCurrent

+
+public OperationStatus putCurrent(DatabaseEntry data)
+                           throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the putCurrent method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the putCurrent method fails for any reason, the +state of the cursor will be unchanged. +

+Overwrite the data of the key/data pair to which the cursor refers with +the specified data item. This method will fail and throw an exception +if the cursor currently refers to an already-deleted key/data pair. +

+

+

+
Parameters:
data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putAfter

+
+public OperationStatus putAfter(DatabaseEntry data)
+                         throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the putAfter method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the putAfter method fails for any reason, the +state of the cursor will be unchanged. +

+In the case of the Btree and Hash access methods, insert the data +element as a duplicate element of the key to which the cursor refers. +The new element appears immediately +after +the current cursor position. It is an error to call this method if the +underlying Btree or Hash database does not support duplicate data items. +The key parameter is ignored. +

+In the case of the Hash access method, the putAfter method will fail and +throw an exception if the current cursor record has already been deleted. +

+In the case of the Recno access method, it is an error to call this +method if the underlying Recno database was not configured to have +mutable record numbers. A new key is created, all records after the +inserted item are automatically renumbered, and the key of the new +record is returned in the key parameter. The initial value of the key +parameter is ignored. +

+The putAfter method may not be called for the Queue access method. +

+

+

+
Parameters:
data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putBefore

+
+public OperationStatus putBefore(DatabaseEntry data)
+                          throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the putBefore method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the putBefore method fails for any reason, the +state of the cursor will be unchanged. +

+In the case of the Btree and Hash access methods, insert the data +element as a duplicate element of the key to which the cursor refers. +The new element appears immediately +before +the current cursor position. It is an error to call this method if the +underlying Btree or Hash database does not support duplicate data items. +The key parameter is ignored. +

+In the case of the Hash access method, the putBefore method will fail and +throw an exception if the current cursor record has already been deleted. +

+In the case of the Recno access method, it is an error to call this +method if the underlying Recno database was not configured to have +mutable record numbers. A new key is created, all records after the +inserted item are automatically renumbered, and the key of the new +record is returned in the key parameter. The initial value of the key +parameter is ignored. +

+The putBefore method may not be called for the Queue access method. +

+

+

+
Parameters:
data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putKeyFirst

+
+public OperationStatus putKeyFirst(DatabaseEntry key,
+                                   DatabaseEntry data)
+                            throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the putKeyFirst method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the putKeyFirst method fails for any reason, the +state of the cursor will be unchanged. +

+In the case of the Btree and Hash access methods, insert the specified +key/data pair into the database. +

+If the underlying database supports duplicate data items, and if the +key already exists in the database and a duplicate sort function has +been specified, the inserted data item is added in its sorted location. +If the key already exists in the database and no duplicate sort function +has been specified, the inserted data item is added as the +first +of the data items for that key. +

+The putKeyFirst method may not be called for the Queue or Recno access methods. +

+

+

+
Parameters:
key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putKeyLast

+
+public OperationStatus putKeyLast(DatabaseEntry key,
+                                  DatabaseEntry data)
+                           throws DatabaseException
+
+
Store a key/data pair into the database. +

+If the putKeyLast method succeeds, the cursor is always positioned to refer to +the newly inserted item. If the putKeyLast method fails for any reason, the +state of the cursor will be unchanged. +

+In the case of the Btree and Hash access methods, insert the specified +key/data pair into the database. +

+If the underlying database supports duplicate data items, and if the +key already exists in the database and a duplicate sort function has +been specified, the inserted data item is added in its sorted location. +If the key already exists in the database and no duplicate sort function +has been specified, the inserted data item is added as the +last +of the data items for that key. +

+The putKeyLast method may not be called for the Queue or Recno access methods. +

+

+

+
Parameters:
key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getCurrent

+
+public OperationStatus getCurrent(DatabaseEntry key,
+                                  DatabaseEntry data,
+                                  LockMode lockMode)
+                           throws DatabaseException
+
+
Returns the key/data pair to which the cursor refers. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.KEYEMPTY if the key/pair at the cursor +position has been deleted; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getFirst

+
+public OperationStatus getFirst(DatabaseEntry key,
+                                DatabaseEntry data,
+                                LockMode lockMode)
+                         throws DatabaseException
+
+
Move the cursor to the first key/data pair of the database, and return +that pair. If the first key has duplicate values, the first data item +in the set of duplicates is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getLast

+
+public OperationStatus getLast(DatabaseEntry key,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Move the cursor to the last key/data pair of the database, and return +that pair. If the last key has duplicate values, the last data item in +the set of duplicates is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getNext

+
+public OperationStatus getNext(DatabaseEntry key,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Move the cursor to the next key/data pair and return that pair. If +the matching key has duplicate values, the first data item in the set +of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the first +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the next key/data pair of the database, and that pair +is returned. In the presence of duplicate key values, the value of the +key may not change. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getNextDup

+
+public OperationStatus getNextDup(DatabaseEntry key,
+                                  DatabaseEntry data,
+                                  LockMode lockMode)
+                           throws DatabaseException
+
+
If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getNextNoDup

+
+public OperationStatus getNextNoDup(DatabaseEntry key,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+                             throws DatabaseException
+
+
Move the cursor to the next non-duplicate key/data pair and return +that pair. If the matching key has duplicate values, the first data +item in the set of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the first +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the next non-duplicate key of the database, and that +key/data pair is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getPrev

+
+public OperationStatus getPrev(DatabaseEntry key,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Move the cursor to the previous key/data pair and return that pair. +If the matching key has duplicate values, the last data item in the set +of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the last +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the previous key/data pair of the database, and that +pair is returned. In the presence of duplicate key values, the value of +the key may not change. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getPrevDup

+
+public OperationStatus getPrevDup(DatabaseEntry key,
+                                  DatabaseEntry data,
+                                  LockMode lockMode)
+                           throws DatabaseException
+
+
If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getPrevNoDup

+
+public OperationStatus getPrevNoDup(DatabaseEntry key,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+                             throws DatabaseException
+
+
Move the cursor to the previous non-duplicate key/data pair and return +that pair. If the matching key has duplicate values, the last data item +in the set of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the last +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the previous non-duplicate key of the database, and +that key/data pair is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchKey

+
+public OperationStatus getSearchKey(DatabaseEntry key,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+                             throws DatabaseException
+
+
Move the cursor to the given key of the database, and return the datum +associated with the given key. If the matching key has duplicate +values, the first data item in the set of duplicates is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +used as input. It must be initialized with a non-null byte array by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchKeyRange

+
+public OperationStatus getSearchKeyRange(DatabaseEntry key,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+                                  throws DatabaseException
+
+
Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key. If the matching key has +duplicate values, the first data item in the set of duplicates is returned. +

+The returned key/data pair is for the smallest key greater than or equal +to the specified key (as determined by the key comparison function), +permitting partial key matches and range searches. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +used as input and returned as output. It must be initialized with a non-null +byte array by the caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchBoth

+
+public OperationStatus getSearchBoth(DatabaseEntry key,
+                                     DatabaseEntry data,
+                                     LockMode lockMode)
+                              throws DatabaseException
+
+
Move the cursor to the specified key/data pair, where both the key and +data items must match. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +used as input. It must be initialized with a non-null byte array by the +caller.
data - the data +used as input. It must be initialized with a non-null byte array by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchBothRange

+
+public OperationStatus getSearchBothRange(DatabaseEntry key,
+                                          DatabaseEntry data,
+                                          LockMode lockMode)
+                                   throws DatabaseException
+
+
Move the cursor to the specified key and closest matching data item of the +database. +

+In the case of any database supporting sorted duplicate sets, the returned +key/data pair is for the smallest data item greater than or equal to the +specified data item (as determined by the duplicate comparison function), +permitting partial matches and range searches in duplicate data sets. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +used as input and returned as output. It must be initialized with a non-null +byte array by the caller.
data - the data +used as input and returned as output. It must be initialized with a non-null +byte array by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getRecordNumber

+
+public OperationStatus getRecordNumber(DatabaseEntry data,
+                                       LockMode lockMode)
+                                throws DatabaseException
+
+
Return the record number associated with the cursor. The record number +will be returned in the data parameter. +

+For this method to be called, the underlying database must be of type +Btree, and it must have been configured to support record numbers. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
data - the data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchRecordNumber

+
+public OperationStatus getSearchRecordNumber(DatabaseEntry key,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+                                      throws DatabaseException
+
+
Move the cursor to the specific numbered record of the database, and +return the associated key/data pair. +

+The data field of the specified key must be a byte array containing a +record number, as described in DatabaseEntry. This determines +the record to be retrieved. +

+For this method to be called, the underlying database must be of type +Btree, and it must have been configured to support record numbers. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/CursorConfig.html b/db/docs/java/com/sleepycat/db/CursorConfig.html new file mode 100644 index 000000000..b1ce9830f --- /dev/null +++ b/db/docs/java/com/sleepycat/db/CursorConfig.html @@ -0,0 +1,520 @@ + + + + + + +CursorConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class CursorConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.CursorConfig
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
+
public class CursorConfig
extends Object
implements Cloneable
+ +

+Specify the attributes of database cursor. An instance created with the +default constructor is initialized with the system's default settings. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static CursorConfigDEFAULT + +
+          Default configuration used if null is passed to methods that create a + cursor.
+static CursorConfigDEGREE_2 + +
+          A convenience instance to configure a cursor for degree 2 isolation.
+static CursorConfigDIRTY_READ + +
+          A convenience instance to configure read operations performed by the + cursor to return modified but not yet committed data.
+static CursorConfigWRITECURSOR + +
+          A convenience instance to specify the Concurrent Data Store environment + cursor will be used to update the database.
+  + + + + + + + + + + +
+Constructor Summary
CursorConfig() + +
+          An instance created using the default constructor is initialized with + the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetDegree2() + +
+          Return if the cursor is configured for degree 2 isolation.
+ booleangetDirtyRead() + +
+          Return if read operations performed by the cursor are configured to + return modified but not yet committed data.
+ booleangetWriteCursor() + +
+          Return if the Concurrent Data Store environment cursor will be used to + update the database.
+ voidsetDegree2(boolean degree2) + +
+          Configure the cursor for degree 2 isolation.
+ voidsetDirtyRead(boolean dirtyRead) + +
+          Configure read operations performed by the cursor to return modified + but not yet committed data.
+ voidsetWriteCursor(boolean writeCursor) + +
+          Specify the Concurrent Data Store environment cursor will be used to + update the database.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final CursorConfig DEFAULT
+
+
Default configuration used if null is passed to methods that create a + cursor. +

+

+
+
+
+ +

+DIRTY_READ

+
+public static final CursorConfig DIRTY_READ
+
+
A convenience instance to configure read operations performed by the + cursor to return modified but not yet committed data. +

+

+
+
+
+ +

+DEGREE_2

+
+public static final CursorConfig DEGREE_2
+
+
A convenience instance to configure a cursor for degree 2 isolation. +

+ This ensures the stability of the current data item read by this + cursor but permits data read by this cursor to be modified or + deleted prior to the commit of the transaction for this cursor. +

+

+
+
+
+ +

+WRITECURSOR

+
+public static final CursorConfig WRITECURSOR
+
+
A convenience instance to specify the Concurrent Data Store environment + cursor will be used to update the database. +

+ The underlying Berkeley DB database environment must have been + configured as a Concurrent Data Store environment. +

+

+
+
+ + + + + + + + +
+Constructor Detail
+ +

+CursorConfig

+
+public CursorConfig()
+
+
An instance created using the default constructor is initialized with + the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setDirtyRead

+
+public void setDirtyRead(boolean dirtyRead)
+
+
Configure read operations performed by the cursor to return modified + but not yet committed data. +

+

+

+
+
+
+
Parameters:
dirtyRead - If true, configure read operations performed by the cursor to return + modified but not yet committed data.
+
+
+
+ +

+getDirtyRead

+
+public boolean getDirtyRead()
+
+
Return if read operations performed by the cursor are configured to + return modified but not yet committed data. +

+

+

+
+
+
+ +
Returns:
If read operations performed by the cursor are configured to return + modified but not yet committed data.
+
+
+
+ +

+setDegree2

+
+public void setDegree2(boolean degree2)
+
+
Configure the cursor for degree 2 isolation. +

+ This ensures the stability of the current data item read by this + cursor but permits data read by this cursor to be modified or + deleted prior to the commit of the transaction for this cursor. +

+

+

+
+
+
+
Parameters:
degree2 - If true, configure the cursor for degree 2 isolation.
+
+
+
+ +

+getDegree2

+
+public boolean getDegree2()
+
+
Return if the cursor is configured for degree 2 isolation. +

+

+

+
+
+
+ +
Returns:
If the cursor is configured for degree 2 isolation.
+
+
+
+ +

+setWriteCursor

+
+public void setWriteCursor(boolean writeCursor)
+
+
Specify the Concurrent Data Store environment cursor will be used to + update the database. +

+

+

+
+
+
+
Parameters:
writeCursor - If true, specify the Concurrent Data Store environment cursor will be + used to update the database.
+
+
+
+ +

+getWriteCursor

+
+public boolean getWriteCursor()
+
+
Return if the Concurrent Data Store environment cursor will be used to + update the database. +

+

+

+
+
+
+ +
Returns:
If the Concurrent Data Store environment cursor will be used to update + the database.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/Database.html b/db/docs/java/com/sleepycat/db/Database.html new file mode 100644 index 000000000..2d30e55cc --- /dev/null +++ b/db/docs/java/com/sleepycat/db/Database.html @@ -0,0 +1,1671 @@ + + + + + + +Database (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class Database

+
+java.lang.Object
+  extended bycom.sleepycat.db.Database
+
+
+
Direct Known Subclasses:
SecondaryDatabase
+
+
+
+
public class Database
extends Object
+ +

+A database handle. +

+Database attributes are specified in the DatabaseConfig class. +

+Database handles are free-threaded unless opened in an environment +that is not free-threaded. +

+To open an existing database with default attributes: +

+    Environment env = new Environment(home, null);
+    Database myDatabase = env.openDatabase(null, "mydatabase", null);
+
+To create a transactional database that supports duplicates: +
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    dbConfig.setAllowCreate(true);
+    dbConfig.setSortedDuplicates(true);
+    Database newlyCreateDb = env.openDatabase(txn, "mydatabase", dbConfig);
+
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
Database(String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ OperationStatusappend(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Append the key/data pair to the end of the database.
+ voidclose() + +
+          Flush any cached database information to disk and discard the database +handle.
+ voidclose(boolean noSync) + +
+          Flush any cached database information to disk and discard the database +handle.
+ OperationStatusconsume(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + boolean wait) + +
+          Return the record number and data from the available record closest to +the head of the queue, and delete the record.
+ OperationStatusdelete(Transaction txn, + DatabaseEntry key) + +
+          Remove key/data pairs from the database.
+ OperationStatusget(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ CacheFilegetCacheFile() + +
+          Return the handle for the cache file underlying the database.
+ DatabaseConfiggetConfig() + +
+          Return this Database object's configuration.
+ StringgetDatabaseFile() + +
+          Return the database's underlying file name.
+ StringgetDatabaseName() + +
+          Return the database name.
+ EnvironmentgetEnvironment() + +
+          Return the Environment handle for the database environment + underlying the Database.
+ KeyRangegetKeyRange(Transaction txn, + DatabaseEntry key) + +
+          Return an estimate of the proportion of keys in the database less + than, equal to, and greater than the specified key.
+ OperationStatusgetSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match.
+ OperationStatusgetSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ DatabaseStatsgetStats(Transaction txn, + StatsConfig statsConfig) + +
+          Return database statistics.
+ JoinCursorjoin(Cursor[] cursors, + JoinConfig config) + +
+          Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices.
+ CursoropenCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Return a cursor into the database.
+ SequenceopenSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Open a sequence in the database.
+ OperationStatusput(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database.
+ OperationStatusputNoDupData(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if it does not already appear +in the database.
+ OperationStatusputNoOverwrite(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if the key does not already +appear in the database.
+static voidremove(String fileName, + String databaseName, + DatabaseConfig config) + +
+           +Remove a database.
+ voidremoveSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Remove the sequence from the database.
+static voidrename(String fileName, + String oldDatabaseName, + String newDatabaseName, + DatabaseConfig config) + +
+           +Rename a database.
+ voidsetConfig(DatabaseConfig config) + +
+          Change the settings in an existing database handle.
+ voidsync() + +
+          Flush any cached information to disk.
+ inttruncate(Transaction txn, + boolean returnCount) + +
+          Empty the database, discarding all records it contains.
+static voidupgrade(String fileName, + DatabaseConfig config) + +
+          Upgrade all of the databases included in the specified file.
+ booleanverify(String fileName, + String databaseName, + PrintStream dumpStream, + VerifyConfig config) + +
+          Return if all of the databases in a file are uncorrupted.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+Database

+
+public Database(String fileName,
+                String databaseName,
+                DatabaseConfig config)
+         throws DatabaseException,
+                FileNotFoundException
+
+
Open a database. +

+The database is represented by the file and database parameters. +

+The currently supported database file formats (or access +methods) are Btree, Hash, Queue, and Recno. The Btree format is a +representation of a sorted, balanced tree structure. The Hash format +is an extensible, dynamic hashing scheme. The Queue format supports +fast access to fixed-length records accessed sequentially or by logical +record number. The Recno format supports fixed- or variable-length +records, accessed sequentially or by logical record number, and +optionally backed by a flat text file. +

+Storage and retrieval are based on key/data pairs; see DatabaseEntry +for more information. +

+Opening a database is a relatively expensive operation, and maintaining +a set of open databases will normally be preferable to repeatedly +opening and closing the database for each new query. +

+In-memory databases never intended to be preserved on disk may be +created by setting both the fileName and databaseName parameters to +null. Note that in-memory databases can only ever be shared by sharing +the single database handle that created them, in circumstances where +doing so is safe. The environment variable TMPDIR may +be used as a directory in which to create temporary backing files. +

+

+

Parameters:
fileName - The name of an underlying file that will be used to back the database. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

databaseName - An optional parameter that allows applications to have multiple +databases in a single file. Although no databaseName parameter needs +to be specified, it is an error to attempt to open a second database in +a physical file that was not initially created using a databaseName +parameter. Further, the databaseName parameter is not supported by the +Queue format. +

config - The database open attributes. If null, default attributes are used.
+ + + + + + + + +
+Method Detail
+ +

+delete

+
+public OperationStatus delete(Transaction txn,
+                              DatabaseEntry key)
+                       throws DatabaseException
+
+
Remove key/data pairs from the database. +

+ The key/data pair associated with the specified key is discarded + from the database. In the presence of duplicate key values, all + records associated with the designated key will be discarded. +

+ The key/data pair is also deleted from any associated secondary + databases. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

key - the key DatabaseEntry operated on. +

+

Returns:
The method will return OperationStatus.NOTFOUND if the + specified key is not found in the database; + The method will return OperationStatus.KEYEMPTY if the + database is a Queue or Recno database and the specified key exists, + but was never explicitly created by the application or was later + deleted; + otherwise the method will return OperationStatus.SUCCESS. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+get

+
+public OperationStatus get(Transaction txn,
+                           DatabaseEntry key,
+                           DatabaseEntry data,
+                           LockMode lockMode)
+                    throws DatabaseException
+
+
Retrieves the key/data pair with the given key. If the matching key has +duplicate values, the first data item in the set of duplicates is returned. +Retrieval of duplicates requires the use of Cursor operations. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified to +transaction-protect the operation, or null may be specified to perform the +operation without transaction protection. For a non-transactional database, +null must be specified. +

key - the key +used as input. It must be initialized with a non-null byte array by the +caller. +

+

data - the data +returned as output. Its byte array does not need to be initialized by the +caller. +

lockMode - the locking attributes; if null, default attributes are used. +

+

Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getSearchBoth

+
+public OperationStatus getSearchBoth(Transaction txn,
+                                     DatabaseEntry key,
+                                     DatabaseEntry data,
+                                     LockMode lockMode)
+                              throws DatabaseException
+
+
Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified to +transaction-protect the operation, or null may be specified to perform the +operation without transaction protection. For a non-transactional database, +null must be specified.
key - the key +used as input. It must be initialized with a non-null byte array by the +caller.
data - the data +used as input. It must be initialized with a non-null byte array by the +caller. +

lockMode - the locking attributes; if null, default attributes are used. +

+

Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+put

+
+public OperationStatus put(Transaction txn,
+                           DatabaseEntry key,
+                           DatabaseEntry data)
+                    throws DatabaseException
+
+

+Store the key/data pair into the database. +

+If the key already appears in the database and duplicates are not +configured, the existing key/data pair will be replaced. If the key +already appears in the database and sorted duplicates are configured, +the new data value is inserted at the correct sorted location. +If the key already appears in the database and unsorted duplicates are +configured, the new data value is appended at the end of the duplicate +set. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+append

+
+public OperationStatus append(Transaction txn,
+                              DatabaseEntry key,
+                              DatabaseEntry data)
+                       throws DatabaseException
+
+

+Append the key/data pair to the end of the database. +

+The underlying database must be a Queue or Recno database. The record +number allocated to the record is returned in the key parameter. +

+There is a minor behavioral difference between the Recno and Queue +access methods this method. If a transaction enclosing this method +aborts, the record number may be decremented (and later reallocated by +a subsequent operation) in the Recno access method, but will not be +decremented or reallocated in the Queue access method. +

+It may be useful to modify the stored data based on the generated key. +If a callback function is specified using DatabaseConfig.setRecordNumberAppender, it will be called after the record number has +been selected, but before the data has been stored. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putNoOverwrite

+
+public OperationStatus putNoOverwrite(Transaction txn,
+                                      DatabaseEntry key,
+                                      DatabaseEntry data)
+                               throws DatabaseException
+
+

+Store the key/data pair into the database if the key does not already +appear in the database. +

+This method will fail if the key already exists in the database, even +if the database supports duplicates. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

Returns:
If the key already appears in the database, this method will return +OperationStatus.KEYEXIST. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+putNoDupData

+
+public OperationStatus putNoDupData(Transaction txn,
+                                    DatabaseEntry key,
+                                    DatabaseEntry data)
+                             throws DatabaseException
+
+

+Store the key/data pair into the database if it does not already appear +in the database. +

+This method may only be called if the underlying database has been +configured to support sorted duplicates. +(This method may not be specified to the Queue or Recno access methods.) +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

key - the key DatabaseEntry operated on. +

data - the data DatabaseEntry stored. +

+

Returns:
If the key/data pair already appears in the database, this method will +return OperationStatus.KEYEXIST. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getDatabaseName

+
+public String getDatabaseName()
+                       throws DatabaseException
+
+
Return the database name. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The database name. +
Throws: +
DatabaseException
+
+
+
+ +

+getConfig

+
+public DatabaseConfig getConfig()
+                         throws DatabaseException
+
+
Return this Database object's configuration. +

+ This may differ from the configuration used to open this object if + the database existed previously. +

+

+

+ +
Returns:
This Database object's configuration. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getEnvironment

+
+public Environment getEnvironment()
+                           throws DatabaseException
+
+
Return the Environment handle for the database environment + underlying the Database. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The Environment handle for the database environment + underlying the Database. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Flush any cached database information to disk and discard the database +handle. +

+The database handle should not be closed while any other handle that +refers to it is not yet closed; for example, database handles should not +be closed while cursor handles into the database remain open, or +transactions that include operations on the database have not yet been +committed or aborted. Specifically, this includes Cursor and +Transaction handles. +

+Because key/data pairs are cached in memory, failing to sync the file +with the Database.close or Database.sync methods +may result in inconsistent or lost information. +

+When multiple threads are using the Database handle +concurrently, only a single thread may call this method. +

+The database handle may not be accessed again after this method is +called, regardless of the method's success or failure. +

+When called on a database that is the primary database for a secondary +index, the primary database should be closed only after all secondary +indices which reference it have been closed. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+close

+
+public void close(boolean noSync)
+           throws DatabaseException
+
+
Flush any cached database information to disk and discard the database +handle. +

+The database handle should not be closed while any other handle that +refers to it is not yet closed; for example, database handles should not +be closed while cursor handles into the database remain open, or +transactions that include operations on the database have not yet been +committed or aborted. Specifically, this includes Cursor and +Transaction handles. +

+Because key/data pairs are cached in memory, failing to sync the file +with the Database.close or Database.sync methods +may result in inconsistent or lost information. +

+When multiple threads are using the Database handle +concurrently, only a single thread may call this method. +

+The database handle may not be accessed again after this method is +called, regardless of the method's success or failure. +

+When called on a database that is the primary database for a secondary +index, the primary database should be closed only after all secondary +indices which reference it have been closed. +

+

+
Parameters:
noSync - Do not flush cached information to disk. The noSync parameter is a +dangerous option. It should be set only if the application is doing +logging (with transactions) so that the database is recoverable after a +system or application crash, or if the database is always generated from +scratch after any system or application crash. + +It is important to understand that flushing cached information to disk +only minimizes the window of opportunity for corrupted data. +

+ +Although unlikely, it is possible for database corruption to happen if +a system or application crash occurs while writing data to the database. +To ensure that database corruption never occurs, applications must +either: use transactions and logging with automatic recovery; use +logging and application-specific recovery; or edit a copy of the +database, and once all applications using the database have successfully +called this method, atomically replace the original database with the +updated copy. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+openCursor

+
+public Cursor openCursor(Transaction txn,
+                         CursorConfig cursorConfig)
+                  throws DatabaseException
+
+
Return a cursor into the database. +

+

+

+
Parameters:
txn - To use a cursor for writing to a transactional database, an explicit +transaction must be specified. For read-only access to a transactional +database, the transaction may be null. For a non-transactional database, the +transaction must be null. +

+To transaction-protect cursor operations, cursors must be opened and +closed within the context of a transaction, and the txn parameter +specifies the transaction context in which the cursor will be used. +

cursorConfig - The cursor attributes. If null, default attributes are used. +

+

Returns:
A database cursor. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+openSequence

+
+public Sequence openSequence(Transaction txn,
+                             DatabaseEntry key,
+                             SequenceConfig sequenceConfig)
+                      throws DatabaseException
+
+
Open a sequence in the database. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

key - The key DatabaseEntry of the sequence. +

sequenceConfig - The sequence attributes. If null, default attributes are used. +

+

Returns:
A sequence handle. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+removeSequence

+
+public void removeSequence(Transaction txn,
+                           DatabaseEntry key,
+                           SequenceConfig sequenceConfig)
+                    throws DatabaseException
+
+
Remove the sequence from the database. This method should not be called if + there are open handles on this sequence. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

key - The key DatabaseEntry of the sequence. +

sequenceConfig - The sequence attributes. If null, default attributes are used. +
Throws: +
DatabaseException
+
+
+
+ +

+getStats

+
+public DatabaseStats getStats(Transaction txn,
+                              StatsConfig statsConfig)
+                       throws DatabaseException
+
+
Return database statistics. +

+ If this method has not been configured to avoid expensive operations + (using the StatsConfig.setFast method), it will access + some of or all the pages in the database, incurring a severe + performance penalty as well as possibly flushing the underlying + buffer pool. +

+ In the presence of multiple threads or processes accessing an active + database, the information returned by this method may be out-of-date. +

+ If the database was not opened read-only and this method was not + configured using the StatsConfig.setFast method, cached + key and record numbers will be updated after the statistical + information has been gathered. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified to +transaction-protect the operation, or null may be specified to perform the +operation without transaction protection. For a non-transactional database, +null must be specified. +

statsConfig - The statistics returned; if null, default statistics are returned. +

+

Returns:
Database statistics. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+truncate

+
+public int truncate(Transaction txn,
+                    boolean returnCount)
+             throws DatabaseException
+
+
Empty the database, discarding all records it contains. +

+ When called on a database configured with secondary indices, this + method truncates the primary database and all secondary indices. If + configured to return a count of the records discarded, the returned + count is the count of records discarded from the primary database. +

+ It is an error to call this method on a database with open cursors. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

returnCount - If true, count and return the number of records discarded. +

+

Returns:
The number of records discarded, or -1 if returnCount is false. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+join

+
+public JoinCursor join(Cursor[] cursors,
+                       JoinConfig config)
+                throws DatabaseException
+
+
Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices. +

+ Each cursor in the cursors array must have been + initialized to refer to the key on which the underlying database should + be joined. Typically, this initialization is done by calling + Cursor.getSearchKey. +

+ Once the cursors have been passed to this method, they should not be + accessed or modified until the newly created join cursor has been + closed, or else inconsistent results may be returned. However, the + position of the cursors will not be changed by this method or by the + methods of the join cursor. +

+

+

+
Parameters:
cursors - an array of cursors associated with this primary + database. +

config - The join attributes. If null, default attributes are used. +

+

Returns:
a specialized cursor that returns the results of the equality join + operation. +

+

+

Throws: +
DatabaseException - if a failure occurs.
See Also:
JoinCursor
+
+
+
+ +

+getDatabaseFile

+
+public String getDatabaseFile()
+                       throws DatabaseException
+
+
Return the database's underlying file name. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The database's underlying file name. +
Throws: +
DatabaseException
+
+
+
+ +

+setConfig

+
+public void setConfig(DatabaseConfig config)
+               throws DatabaseException
+
+
Change the settings in an existing database handle. +

+

+

+
Parameters:
config - The environment attributes. If null, default attributes are used. +

+

+

Throws: +
IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getCacheFile

+
+public CacheFile getCacheFile()
+                       throws DatabaseException
+
+
Return the handle for the cache file underlying the database. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The handle for the cache file underlying the database. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+consume

+
+public OperationStatus consume(Transaction txn,
+                               DatabaseEntry key,
+                               DatabaseEntry data,
+                               boolean wait)
+                        throws DatabaseException
+
+
Return the record number and data from the available record closest to +the head of the queue, and delete the record. The record number will be +returned in the key parameter, and the data will be returned +in the data parameter. A record is available if it is not +deleted and is not currently locked. The underlying database must be +of type Queue for this method to be called. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified to +transaction-protect the operation, or null may be specified to perform the +operation without transaction protection. For a non-transactional database, +null must be specified.
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Its byte array does not need to be initialized by the +caller.
wait - if there is no record available, this parameter determines whether the +method waits for one to become available, or returns immediately with +status NOTFOUND. +

+

Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getKeyRange

+
+public KeyRange getKeyRange(Transaction txn,
+                            DatabaseEntry key)
+                     throws DatabaseException
+
+
Return an estimate of the proportion of keys in the database less + than, equal to, and greater than the specified key. +

+ The underlying database must be of type Btree. +

+ This method does not retain the locks it acquires for the life of + the transaction, so estimates are not repeatable. +

+

+

+
Parameters:
key - The key DatabaseEntry being compared. +

txn - For a transactional database, an explicit transaction may be specified to +transaction-protect the operation, or null may be specified to perform the +operation without transaction protection. For a non-transactional database, +null must be specified. +

+

Returns:
An estimate of the proportion of keys in the database less than, + equal to, and greater than the specified key. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getSearchRecordNumber

+
+public OperationStatus getSearchRecordNumber(Transaction txn,
+                                             DatabaseEntry key,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+                                      throws DatabaseException
+
+
Retrieves the key/data pair associated with the specific numbered record of the database. +

+The data field of the specified key must be a byte array containing a +record number, as described in DatabaseEntry. This determines +the record to be retrieved. +

+For this method to be called, the underlying database must be of type +Btree, and it must have been configured to support record numbers. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+remove

+
+public static void remove(String fileName,
+                          String databaseName,
+                          DatabaseConfig config)
+                   throws DatabaseException,
+                          FileNotFoundException
+
+

+Remove a database. +

+If no database is specified, the underlying file specified is removed. +

+Applications should never remove databases with open Database +handles, or in the case of removing a file, when any database in the +file has an open handle. For example, some architectures do not permit +the removal of files with open system handles. On these architectures, +attempts to remove databases currently in use by any thread of control +in the system may fail. +

+If the database was opened within a database environment, the +environment variable DB_HOME may be used as the path of the database +environment home. +

+This method is affected by any database directory specified with +EnvironmentConfig.addDataDir, or by setting the "set_data_dir" +string in the database environment's DB_CONFIG file. +

+The Database handle may not be accessed +again after this method is called, regardless of this method's success +or failure. +

+

+

+
Parameters:
fileName - The physical file which contains the database to be removed. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

databaseName - The database to be removed. +

config - The database remove attributes. If null, default attributes are used. +

+

+

Throws: +
DatabaseException - if a failure occurs. +
FileNotFoundException
+
+
+
+ +

+rename

+
+public static void rename(String fileName,
+                          String oldDatabaseName,
+                          String newDatabaseName,
+                          DatabaseConfig config)
+                   throws DatabaseException,
+                          FileNotFoundException
+
+

+Rename a database. +

+If no database name is specified, the underlying file specified is +renamed, incidentally renaming all of the databases it contains. +

+Applications should never rename databases that are currently in use. +If an underlying file is being renamed and logging is currently enabled +in the database environment, no database in the file may be open when +this method is called. In particular, some architectures do not permit +renaming files with open handles. On these architectures, attempts to +rename databases that are currently in use by any thread of control in +the system may fail. +

+If the database was opened within a database environment, the +environment variable DB_HOME may be used as the path of the database +environment home. +

+This method is affected by any database directory specified with +EnvironmentConfig.addDataDir, or by setting the "set_data_dir" +string in the database environment's DB_CONFIG file. +

+The Database handle may not be accessed +again after this method is called, regardless of this method's success +or failure. +

+

+

+
Parameters:
fileName - The physical file which contains the database to be renamed. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

oldDatabaseName - The database to be renamed. +

newDatabaseName - The new name of the database or file. +

config - The database rename attributes. If null, default attributes are used. +

+

+

Throws: +
DatabaseException - if a failure occurs. +
FileNotFoundException
+
+
+
+ +

+sync

+
+public void sync()
+          throws DatabaseException
+
+
Flush any cached information to disk. +

+ If the database is in memory only, this method has no effect and + will always succeed. +

+ It is important to understand that flushing cached information to + disk only minimizes the window of opportunity for corrupted data. +

+ Although unlikely, it is possible for database corruption to happen + if a system or application crash occurs while writing data to the + database. To ensure that database corruption never occurs, + applications must either: use transactions and logging with + automatic recovery; use logging and application-specific recovery; + or edit a copy of the database, and once all applications using the + database have successfully closed the copy of the database, + atomically replace the original database with the updated copy. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+upgrade

+
+public static void upgrade(String fileName,
+                           DatabaseConfig config)
+                    throws DatabaseException,
+                           FileNotFoundException
+
+
Upgrade all of the databases included in the specified file. +

+ If no upgrade is necessary, always returns success. +

+ + Database upgrades are done in place and are destructive. For example, + if pages need to be allocated and no disk space is available, the + database may be left corrupted. Backups should be made before databases + are upgraded. + +

+ + The following information is only meaningful when upgrading databases + from releases before the Berkeley DB 3.1 release: + +

+ As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 + release, the on-disk format of duplicate data items changed. To + correctly upgrade the format requires applications to specify + whether duplicate data items in the database are sorted or not. + Configuring the database object to support sorted duplicates by the + DatabaseConfig.setSortedDuplicates method informs this + method that the duplicates are sorted; otherwise they are assumed + to be unsorted. Incorrectly specifying this configuration + information may lead to database corruption. +

+ Further, because this method upgrades a physical file (including all + the databases it contains), it is not possible to use this method + to upgrade files in which some of the databases it includes have + sorted duplicate data items, and some of the databases it includes + have unsorted duplicate data items. If the file does not have more + than a single database, if the databases do not support duplicate + data items, or if all of the databases that support duplicate data + items support the same style of duplicates (either sorted or + unsorted), this method will work correctly as long as the duplicate + configuration is correctly specified. Otherwise, the file cannot + be upgraded using this method; it must be upgraded manually by + dumping and reloading the databases. +

+ Unlike all other database operations, upgrades may only be done on + a system with the same byte-order as the database. +

+

+

+
Parameters:
fileName - The physical file containing the databases to be upgraded. +

+

+

Throws: +
DatabaseException - if a failure occurs. +
FileNotFoundException
+
+
+
+ +

+verify

+
+public boolean verify(String fileName,
+                      String databaseName,
+                      PrintStream dumpStream,
+                      VerifyConfig config)
+               throws DatabaseException,
+                      FileNotFoundException
+
+
Return if all of the databases in a file are uncorrupted. +

+ This method optionally outputs the databases' key/data pairs to a + file stream. +

+ + This method does not perform any locking, even in database + environments are configured with a locking subsystem. As such, it + should only be used on files that are not being modified by another + thread of control. + +

+ This method may not be called after the database is opened. +

+ If the database was opened within a database environment, the +environment variable DB_HOME may be used as the path of the database +environment home. +

+This method is affected by any database directory specified with +EnvironmentConfig.addDataDir, or by setting the "set_data_dir" +string in the database environment's DB_CONFIG file. +

+ The Database handle may not be accessed +again after this method is called, regardless of this method's success +or failure. +

+

+

+
Parameters:
fileName - The physical file in which the databases to be verified are found. +

databaseName - The database in the file on which the database checks for btree and + duplicate sort order and for hashing are to be performed. This + parameter should be set to null except when the operation has been + been configured by VerifyConfig.setOrderCheckOnly. +

dumpStream - An optional file stream to which the databases' key/data pairs are + written. This parameter should be set to null except when the + operation has been been configured by VerifyConfig.setSalvage. +

config - The VerifyConfig attributes. If null, default attributes are used. +

+

Returns:
True, if all of the databases in the file are uncorrupted. If this + method returns false, and the operation was configured by + VerifyConfig.setSalvage, all of the key/data pairs in the + file may not have been successfully output. +

+

+

Throws: +
DatabaseException - if a failure occurs. +
FileNotFoundException
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/DatabaseConfig.html b/db/docs/java/com/sleepycat/db/DatabaseConfig.html new file mode 100644 index 000000000..41521bf22 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/DatabaseConfig.html @@ -0,0 +1,3445 @@ + + + + + + +DatabaseConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class DatabaseConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseConfig
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
Direct Known Subclasses:
SecondaryConfig
+
+
+
+
public class DatabaseConfig
extends Object
implements Cloneable
+ +

+Specify the attributes of a database. +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static DatabaseConfigDEFAULT + +
+           
+  + + + + + + + + + + +
+Constructor Summary
DatabaseConfig() + +
+          An instance created using the default constructor is initialized with + the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetAllowCreate() + +
+          Return if the Environment.openDatabase method is configured + to create the database if it does not already exist.
+ ComparatorgetBtreeComparator() + +
+          Return the Comparator used to compare keys in a Btree.
+ intgetBtreeMinKey() + +
+          Return the minimum number of key/data pairs intended to be stored + on any single Btree leaf page.
+ BtreePrefixCalculatorgetBtreePrefixCalculator() + +
+          Return the Btree prefix callback.
+ booleangetBtreeRecordNumbers() + +
+          Return if the Btree is configured to support retrieval by record number.
+ intgetByteOrder() + +
+          Return the database byte order; a byte order of 4,321 indicates a + big endian order, and a byte order of 1,234 indicates a little + endian order.
+ booleangetByteSwapped() + +
+          Return if the underlying database files were created on an architecture + of the same byte order as the current one.
+ intgetCacheCount() + +
+          Return the number of shared memory buffer pools, that is, the number + of caches.
+ longgetCacheSize() + +
+          Return the size of the shared memory buffer pool, that is, the cache.
+ booleangetChecksum() + +
+          Return if the database environment is configured to do checksum + verification of pages read into the cache from the backing + filestore.
+ booleangetDirtyRead() + +
+          Return if the database is configured to support dirty reads.
+ ComparatorgetDuplicateComparator() + +
+          Return the duplicate data item comparison callback.
+ booleangetEncrypted() + +
+          Return if the database has been configured to perform encryption.
+ ErrorHandlergetErrorHandler() + +
+          Return the function to be called if an error occurs.
+ StringgetErrorPrefix() + +
+          Return the prefix string that appears before error messages.
+ OutputStreamgetErrorStream() + +
+          Return the an OutputStream for displaying error messages.
+ booleangetExclusiveCreate() + +
+          Return if the Environment.openDatabase method is configured + to fail if the database already exists.
+ FeedbackHandlergetFeedbackHandler() + +
+          Return the object's methods to be called to provide feedback.
+ HashergetHasher() + +
+          Return the database-specific hash function.
+ intgetHashFillFactor() + +
+          Return the hash table density.
+ intgetHashNumElements() + +
+          Return the estimate of the final size of the hash table.
+ MessageHandlergetMessageHandler() + +
+          Return the function to be called with an informational message.
+ OutputStreamgetMessageStream() + +
+          Return the an OutputStream for displaying informational messages.
+ longgetMode() + +
+          Return the mode used to create files.
+ booleangetNoMMap() + +
+          Return if the library is configured to not map this database into + memory.
+ intgetPageSize() + +
+          Return the size of the pages used to hold items in the database, in bytes.
+ PanicHandlergetPanicHandler() + +
+          Return the function to be called if the database environment panics.
+ intgetQueueExtentSize() + +
+          Return the size of the extents used to hold pages in a Queue database, + specified as a number of pages.
+ booleangetQueueInOrder() + +
+          Return if the Database.consume method is configured to return + key/data pairs in order, always returning the key/data item from the + head of the queue.
+ booleangetReadOnly() + +
+          Return if the database is configured in read-only mode.
+ intgetRecordDelimiter() + +
+          Return the delimiting byte used to mark the end of a record in the + backing source file for the Recno access method.
+ intgetRecordLength() + +
+          Return the database record length, in bytes.
+ RecordNumberAppendergetRecordNumberAppender() + +
+          Return the function to call after the record number has been + selected but before the data has been stored into the database.
+ intgetRecordPad() + +
+          Return the padding character for short, fixed-length records for the + Queue and Recno access methods.
+ FilegetRecordSource() + +
+          Return the name of an underlying flat text database file that is + read to initialize a transient record number index.
+ booleangetRenumbering() + +
+          Return if the logical record numbers are mutable, and change as + records are added to and deleted from the database.
+ booleangetReverseSplitOff() + +
+          Return if the Btree has been configured to not do reverse splits.
+ booleangetSnapshot() + +
+          Return if the any specified backing source file will be read in its + entirety when the database is opened.
+ booleangetSortedDuplicates() + +
+          Return if the database is configured to support sorted duplicate data + items.
+ booleangetTransactional() + +
+          Return if the database open is enclosed within a transaction.
+ booleangetTransactionNotDurable() + +
+          Return if the database environment is configured to not write log + records for this database.
+ booleangetTruncate() + +
+          Return if the database has been configured to be physically truncated + by truncating the underlying file, discarding all previous databases + it might have held.
+ DatabaseTypegetType() + +
+          Return the type of the database.
+ booleangetUnsortedDuplicates() + +
+          Return if the database is configured to support duplicate data items.
+ booleangetXACreate() + +
+          Return if the database has been configured to be accessed via applications + running under an X/Open conformant Transaction Manager.
+ voidsetAllowCreate(boolean allowCreate) + +
+          Configure the Environment.openDatabase method to create + the database if it does not already exist.
+ voidsetBtreeComparator(Comparator btreeComparator) + +
+          By default, a byte by byte lexicographic comparison is used for + btree keys.
+ voidsetBtreeMinKey(int btMinKey) + +
+          Set the minimum number of key/data pairs intended to be stored on any + single Btree leaf page.
+ voidsetBtreePrefixCalculator(BtreePrefixCalculator btreePrefixCalculator) + +
+          Set the Btree prefix callback.
+ voidsetBtreeRecordNumbers(boolean btreeRecordNumbers) + +
+          Configure the Btree to support retrieval by record number.
+ voidsetByteOrder(int byteOrder) + +
+          Set the byte order for integers in the stored database metadata.
+ voidsetCacheCount(int cacheCount) + +
+          Set the number of shared memory buffer pools, that is, the number of +caches.
+ voidsetCacheSize(long cacheSize) + +
+          Set the size of the shared memory buffer pool, that is, the size of the +cache.
+ voidsetChecksum(boolean checksum) + +
+          Configure the database environment to do checksum verification of + pages read into the cache from the backing filestore.
+ voidsetDirtyRead(boolean dirtyRead) + +
+          Configure the database to support dirty reads.
+ voidsetDuplicateComparator(Comparator duplicateComparator) + +
+          Set the duplicate data item comparison callback.
+ voidsetEncrypted(String password) + +
+          Set the password used to perform encryption and decryption.
+ voidsetErrorHandler(ErrorHandler errorHandler) + +
+          Set the function to be called if an error occurs.
+ voidsetErrorPrefix(String errorPrefix) + +
+          Set the prefix string that appears before error messages.
+ voidsetErrorStream(OutputStream errorStream) + +
+          Set an OutputStream for displaying error messages.
+ voidsetExclusiveCreate(boolean exclusiveCreate) + +
+          Configure the Environment.openDatabase method to fail if + the database already exists.
+ voidsetFeedbackHandler(FeedbackHandler feedbackHandler) + +
+          Set an object whose methods are called to provide feedback.
+ voidsetHasher(Hasher hasher) + +
+          Set a database-specific hash function.
+ voidsetHashFillFactor(int hashFillFactor) + +
+          Set the desired density within the hash table.
+ voidsetHashNumElements(int hashNumElements) + +
+          Set an estimate of the final size of the hash table.
+ voidsetMessageHandler(MessageHandler messageHandler) + +
+          Set a function to be called with an informational message.
+ voidsetMessageStream(OutputStream messageStream) + +
+          Set an OutputStream for displaying informational messages.
+ voidsetMode(int mode) + +
+          On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files + created by the database open are created with mode mode + (as described in the chmod(2) manual page) and modified + by the process' umask value at the time of creation (see the + umask(2) manual page).
+ voidsetNoMMap(boolean noMMap) + +
+          Configure the library to not map this database into memory.
+ voidsetPageSize(int pageSize) + +
+          Set the size of the pages used to hold items in the database, in bytes.
+ voidsetPanicHandler(PanicHandler panicHandler) + +
+          Set the function to be called if the database environment panics.
+ voidsetQueueExtentSize(int queueExtentSize) + +
+          Set the size of the extents used to hold pages in a Queue database, + specified as a number of pages.
+ voidsetQueueInOrder(boolean queueInOrder) + +
+          Configure Database.consume to return key/data pairs in + order, always returning the key/data item from the head of the + queue.
+ voidsetReadOnly(boolean readOnly) + +
+          Configure the database in read-only mode.
+ voidsetRecordDelimiter(int recordDelimiter) + +
+          Set the delimiting byte used to mark the end of a record in the backing + source file for the Recno access method.
+ voidsetRecordLength(int recordLength) + +
+          Specify the database record length, in bytes.
+ voidsetRecordNumberAppender(RecordNumberAppender recnoAppender) + +
+          Configure Database.append to call the function after the + record number has been selected but before the data has been stored + into the database.
+ voidsetRecordPad(int recordPad) + +
+          Set the padding character for short, fixed-length records for the Queue + and Recno access methods.
+ voidsetRecordSource(File recordSource) + +
+          Set the underlying source file for the Recno access method.
+ voidsetRenumbering(boolean renumbering) + +
+          Configure the logical record numbers to be mutable, and change as + records are added to and deleted from the database.
+ voidsetReverseSplitOff(boolean reverseSplitOff) + +
+          Configure the Btree to not do reverse splits.
+ voidsetSnapshot(boolean snapshot) + +
+          Specify that any specified backing source file be read in its entirety + when the database is opened.
+ voidsetSortedDuplicates(boolean sortedDuplicates) + +
+          Configure the database to support sorted, duplicate data items.
+ voidsetTransactional(boolean transactional) + +
+          Enclose the database open within a transaction.
+ voidsetTransactionNotDurable(boolean transactionNotDurable) + +
+          Configure the database environment to not write log records for this + database.
+ voidsetTruncate(boolean truncate) + +
+          Configure the database to be physically truncated by truncating the + underlying file, discarding all previous databases it might have + held.
+ voidsetType(DatabaseType type) + +
+          Configure the type of the database.
+ voidsetUnsortedDuplicates(boolean unsortedDuplicates) + +
+          Configure the database to support unsorted duplicate data items.
+ voidsetXACreate(boolean xaCreate) + +
+          Configure the database to be accessed via applications running under + an X/Open conformant Transaction Manager.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final DatabaseConfig DEFAULT
+
+
+
+
+ + + + + + + + +
+Constructor Detail
+ +

+DatabaseConfig

+
+public DatabaseConfig()
+
+
An instance created using the default constructor is initialized with + the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setAllowCreate

+
+public void setAllowCreate(boolean allowCreate)
+
+
Configure the Environment.openDatabase method to create + the database if it does not already exist. +

+

+

+
+
+
+
Parameters:
allowCreate - If true, configure the Environment.openDatabase method to + create the database if it does not already exist.
+
+
+
+ +

+getAllowCreate

+
+public boolean getAllowCreate()
+
+
Return if the Environment.openDatabase method is configured + to create the database if it does not already exist. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Environment.openDatabase method is configured + to create the database if it does not already exist.
+
+
+
+ +

+setExclusiveCreate

+
+public void setExclusiveCreate(boolean exclusiveCreate)
+
+
Configure the Environment.openDatabase method to fail if + the database already exists. +

+ The exclusiveCreate mode is only meaningful if specified with the + allowCreate mode. +

+

+

+
+
+
+
Parameters:
exclusiveCreate - If true, configure the Environment.openDatabase method to + fail if the database already exists.
+
+
+
+ +

+getExclusiveCreate

+
+public boolean getExclusiveCreate()
+
+
Return if the Environment.openDatabase method is configured + to fail if the database already exists. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Environment.openDatabase method is configured + to fail if the database already exists.
+
+
+
+ +

+getSortedDuplicates

+
+public boolean getSortedDuplicates()
+
+
Return if the database is configured to support sorted duplicate data + items. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database is configured to support sorted duplicate data + items.
+
+
+
+ +

+setReadOnly

+
+public void setReadOnly(boolean readOnly)
+
+
Configure the database in read-only mode. +

+ Any attempt to modify items in the database will fail, regardless + of the actual permissions of any underlying files. +

+

+

+
+
+
+
Parameters:
readOnly - If true, configure the database in read-only mode.
+
+
+
+ +

+getReadOnly

+
+public boolean getReadOnly()
+
+
Return if the database is configured in read-only mode. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database is configured in read-only mode.
+
+
+
+ +

+setSortedDuplicates

+
+public void setSortedDuplicates(boolean sortedDuplicates)
+
+
Configure the database to support sorted, duplicate data items. +

+ Insertion when the key of the key/data pair being inserted already + exists in the database will be successful. The ordering of + duplicates in the database is determined by the duplicate comparison + function. +

+ If the application does not specify a duplicate data item comparison + function, a default lexical comparison will be used. +

+ If a primary database is to be associated with one or more secondary + databases, it may not be configured for duplicates. +

+ A Btree that supports duplicate data items cannot also be configured + for retrieval by record number. +

+ Calling this method affects the database, including all threads of +control accessing the database. +

+ If the database already exists when the database is opened, any database +configuration specified by this method +

+

+

+
+
+
+
Parameters:
sortedDuplicates - If true, configure the database to support duplicate data items. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+setTransactional

+
+public void setTransactional(boolean transactional)
+
+
Enclose the database open within a transaction. +

+ If the call succeeds, the open operation will be recoverable. If + the call fails, no database will have been created. +

+ All future operations on this database, which are not explicitly + enclosed in a transaction by the application, will be enclosed in + in a transaction within the library. +

+

+

+
+
+
+
Parameters:
transactional - If true, enclose the database open within a transaction.
+
+
+
+ +

+getTransactional

+
+public boolean getTransactional()
+
+
Return if the database open is enclosed within a transaction. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database open is enclosed within a transaction.
+
+
+
+ +

+setBtreeComparator

+
+public void setBtreeComparator(Comparator btreeComparator)
+
+
By default, a byte by byte lexicographic comparison is used for + btree keys. To customize the comparison, supply a different + Comparator. +

+ The compare method is passed DatabaseEntry + objects representing keys that are stored in the database. If you know + how your data is organized in the byte array, then you can write a + comparison routine that directly examines the contents of the + arrays. Otherwise, you have to reconstruct your original objects, + and then perform the comparison. +

+

+
+
+
+
+
+
+
+ +

+getBtreeComparator

+
+public Comparator getBtreeComparator()
+
+
Return the Comparator used to compare keys in a Btree. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The Comparator used to compare keys in a Btree.
+
+
+
+ +

+setBtreeMinKey

+
+public void setBtreeMinKey(int btMinKey)
+
+
Set the minimum number of key/data pairs intended to be stored on any + single Btree leaf page. +

+ This value is used to determine if key or data items will be stored + on overflow pages instead of Btree leaf pages. The value must be + at least 2; if the value is not explicitly set, a value of 2 is used. +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
btMinKey - The minimum number of key/data pairs intended to be stored on any + single Btree leaf page.
+
+
+
+ +

+getBtreeMinKey

+
+public int getBtreeMinKey()
+
+
Return the minimum number of key/data pairs intended to be stored + on any single Btree leaf page. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The minimum number of key/data pairs intended to be stored + on any single Btree leaf page.
+
+
+
+ +

+setByteOrder

+
+public void setByteOrder(int byteOrder)
+
+
Set the byte order for integers in the stored database metadata. +

+ The host byte order of the machine where the process is running will + be used if no byte order is set. +

+ + The access methods provide no guarantees about the byte ordering of the + application data stored in the database, and applications are + responsible for maintaining any necessary ordering. + +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. + If creating additional databases in a single physical file, information + specified to this method will be ignored and the byte order of the + existing databases will be used. +

+

+

+
+
+
+
Parameters:
byteOrder - The byte order as an integer; for example, big endian order is the + number 4,321, and little endian order is the number 1,234.
+
+
+
+ +

+getByteOrder

+
+public int getByteOrder()
+
+
Return the database byte order; a byte order of 4,321 indicates a + big endian order, and a byte order of 1,234 indicates a little + endian order. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The database byte order; a byte order of 4,321 indicates a + big endian order, and a byte order of 1,234 indicates a little + endian order.
+
+
+
+ +

+getByteSwapped

+
+public boolean getByteSwapped()
+
+
Return if the underlying database files were created on an architecture + of the same byte order as the current one. +

+ This information may be used to determine whether application data + needs to be adjusted for this architecture or not. +

+ This method may not be called before the +database has been opened. +

+

+

+
+
+
+ +
Returns:
Return false if the underlying database files were created on an + architecture of the same byte order as the current one, and true if + they were not (that is, big-endian on a little-endian machine, or + vice versa).
+
+
+
+ +

+setBtreePrefixCalculator

+
+public void setBtreePrefixCalculator(BtreePrefixCalculator btreePrefixCalculator)
+
+
Set the Btree prefix callback. The prefix callback is used to determine + the amount by which keys stored on the Btree internal pages can be + safely truncated without losing their uniqueness. See the + Btree prefix + comparison section of the Berkeley DB Reference Guide for more + details about how this works. The usefulness of this is data-dependent, + but can produce significantly reduced tree sizes and search times in + some data sets. +

+ If no prefix callback or key comparison callback is specified by the + application, a default lexical comparison function is used to calculate + prefixes. If no prefix callback is specified and a key comparison + callback is specified, no prefix function is used. It is an error to + specify a prefix function without also specifying a Btree key comparison + function. +

+

+
+
+
+
+
+
+
+ +

+getBtreePrefixCalculator

+
+public BtreePrefixCalculator getBtreePrefixCalculator()
+
+
Return the Btree prefix callback. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The Btree prefix callback.
+
+
+
+ +

+setCacheSize

+
+public void setCacheSize(long cacheSize)
+
+
Set the size of the shared memory buffer pool, that is, the size of the +cache. +

+The cache should be the size of the normal working data set of the +application, with some small amount of additional memory for unusual +situations. (Note: the working set is not the same as the number of +pages accessed simultaneously, and is usually much larger.) +

+The default cache size is 256KB, and may not be specified as less than +20KB. Any cache size less than 500MB is automatically increased by 25% +to account for buffer pool overhead; cache sizes larger than 500MB are +used as specified. The current maximum size of a single cache is 4GB. +(All sizes are in powers-of-two, that is, 256KB is 2^18 not 256,000.) +

+Because databases opened within database environments use the cache +specified to the environment, it is an error to attempt to configure a +cache size in a database created within an environment. +

+This method may not be called after the database is opened. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
cacheSize - The size of the shared memory buffer pool, that is, the size of the +cache. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getCacheSize

+
+public long getCacheSize()
+
+
Return the size of the shared memory buffer pool, that is, the cache. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The size of the shared memory buffer pool, that is, the cache.
+
+
+
+ +

+setCacheCount

+
+public void setCacheCount(int cacheCount)
+
+
Set the number of shared memory buffer pools, that is, the number of +caches. +

+It is possible to specify caches larger than 4GB and/or large enough +they cannot be allocated contiguously on some architectures. For +example, some releases of Solaris limit the amount of memory that may +be allocated contiguously by a process. This method allows applications +to break the cache broken up into a number of equally sized, separate +pieces of memory. +

+

+Because databases opened within database environments use the cache +specified to the environment, it is an error to attempt to configure +multiple caches in a database created within an environment. +

+This method may not be called after the database is opened. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
cacheCount - The number of shared memory buffer pools, that is, the number of caches. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getCacheCount

+
+public int getCacheCount()
+
+
Return the number of shared memory buffer pools, that is, the number + of caches. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The number of shared memory buffer pools, that is, the number + of caches.
+
+
+
+ +

+setChecksum

+
+public void setChecksum(boolean checksum)
+
+
Configure the database environment to do checksum verification of + pages read into the cache from the backing filestore. +

+ Berkeley DB uses the SHA1 Secure Hash Algorithm if encryption is + also configured for this database, and a general hash algorithm if + it is not. +

+ Calling this method only affects the specified Database handle +(and any other library handles opened within the scope of that handle). +

+ If the database already exists when the database is opened, any database +configuration specified by this method + If creating additional databases in a file, the checksum behavior + specified must be consistent with the existing databases in the file or + an error will be returned. +

+

+

+
+
+
+
Parameters:
checksum - If true, configure the database environment to do checksum verification + of pages read into the cache from the backing filestore. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getChecksum

+
+public boolean getChecksum()
+
+
Return if the database environment is configured to do checksum + verification of pages read into the cache from the backing + filestore. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to do checksum + verification of pages read into the cache from the backing + filestore.
+
+
+
+ +

+setDirtyRead

+
+public void setDirtyRead(boolean dirtyRead)
+
+
Configure the database to support dirty reads. +

+ Read operations on the database may request the return of modified + but not yet committed data. This flag must be specified on all + Database handles used to perform dirty reads or database + updates, otherwise requests for dirty reads may not be honored and + the read may block. +

+

+

+
+
+
+
Parameters:
dirtyRead - If true, configure the database to support dirty reads.
+
+
+
+ +

+getDirtyRead

+
+public boolean getDirtyRead()
+
+
Return if the database is configured to support dirty reads. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database is configured to support dirty reads.
+
+
+
+ +

+setDuplicateComparator

+
+public void setDuplicateComparator(Comparator duplicateComparator)
+
+
Set the duplicate data item comparison callback. The comparison + function is called whenever it is necessary to compare a data item + specified by the application with a data item currently stored in the + database. This comparator is only used if + DatabaseConfig.setSortedDuplicates is also configured. +

+ If no comparison function is specified, the data items are compared + lexically, with shorter data items collating before longer data items. +

+

+

+
+
+
+
Parameters:
duplicateComparator - the comparison callback for duplicate data items.
+
+
+
+ +

+getDuplicateComparator

+
+public Comparator getDuplicateComparator()
+
+
Return the duplicate data item comparison callback. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The duplicate data item comparison callback.
+
+
+
+ +

+setEncrypted

+
+public void setEncrypted(String password)
+
+
Set the password used to perform encryption and decryption. +

+ Because databases opened within environments use the password + specified to the environment, it is an error to attempt to set a + password in a database created within an environment. +

+ Berkeley DB uses the Rijndael/AES (also known as the Advanced + Encryption Standard and Federal Information Processing + Standard (FIPS) 197) algorithm for encryption or decryption. +

+

+
+
+
+
+
+
+
+ +

+getEncrypted

+
+public boolean getEncrypted()
+
+
Return if the database has been configured to perform encryption. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database has been configured to perform encryption.
+
+
+
+ +

+setErrorHandler

+
+public void setErrorHandler(ErrorHandler errorHandler)
+
+
Set the function to be called if an error occurs. +

+When an error occurs in the Berkeley DB library, an exception is thrown. +In some cases, however, the error information returned to the +application may be insufficient to completely describe the cause of the +error, especially during initial application debugging. +

+The EnvironmentConfig.setErrorHandler and DatabaseConfig.setErrorHandler methods are used to enhance the mechanism for reporting +error messages to the application. In some cases, when an error occurs, +Berkeley DB will invoke the ErrorHandler's object error method. It is +up to this method to display the error message in an appropriate manner. +

+Alternatively, applications can use EnvironmentConfig.setErrorStream and DatabaseConfig.setErrorStream to +display the additional information via an output stream. Applications +should not mix these approaches. +

+This error-logging enhancement does not slow performance or significantly +increase application size, and may be run during normal operation as well +as during application debugging. +

+For Database handles opened inside of database environments, +calling this method affects the entire environment and is equivalent to +calling EnvironmentConfig.setErrorHandler. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
errorHandler - The function to be called if an error occurs.
+
+
+
+ +

+getErrorHandler

+
+public ErrorHandler getErrorHandler()
+
+
Return the function to be called if an error occurs. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The function to be called if an error occurs.
+
+
+
+ +

+setErrorPrefix

+
+public void setErrorPrefix(String errorPrefix)
+
+
Set the prefix string that appears before error messages. +

+For Database handles opened inside of database environments, +calling this method affects the entire environment and is equivalent to +calling EnvironmentConfig.setErrorPrefix. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
errorPrefix - The prefix string that appears before error messages.
+
+
+
+ +

+getErrorPrefix

+
+public String getErrorPrefix()
+
+
Return the prefix string that appears before error messages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The prefix string that appears before error messages.
+
+
+
+ +

+setErrorStream

+
+public void setErrorStream(OutputStream errorStream)
+
+
Set an OutputStream for displaying error messages. +

+When an error occurs in the Berkeley DB library, an exception is thrown. +In some cases, however, the error information returned to the +application may be insufficient to completely describe the cause of the +error, especially during initial application debugging. +

+The EnvironmentConfig.setErrorStream and +EnvironmentConfig.setErrorStream methods are used to enhance +the mechanism for reporting error messages to the application by setting +a OutputStream to be used for displaying additional Berkeley DB error +messages. In some cases, when an error occurs, Berkeley DB will output +an additional error message to the specified stream. +

+The error message will consist of the prefix string and a colon +(":") (if a prefix string was previously specified using +EnvironmentConfig.setErrorPrefix or DatabaseConfig.setErrorPrefix), an error string, and a trailing newline character. +

+Setting errorStream to null unconfigures the interface. +

+Alternatively, applications can use EnvironmentConfig.setErrorHandler and DatabaseConfig.setErrorHandler to capture +the additional error information in a way that does not use output +streams. Applications should not mix these approaches. +

+This error-logging enhancement does not slow performance or significantly +increase application size, and may be run during normal operation as well +as during application debugging. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
errorStream - The application-specified OutputStream for error messages.
+
+
+
+ +

+getErrorStream

+
+public OutputStream getErrorStream()
+
+
Return the an OutputStream for displaying error messages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The an OutputStream for displaying error messages.
+
+
+
+ +

+setFeedbackHandler

+
+public void setFeedbackHandler(FeedbackHandler feedbackHandler)
+
+
Set an object whose methods are called to provide feedback. +

+Some operations performed by the Berkeley DB library can take +non-trivial amounts of time. This method can be used by applications +to monitor progress within these operations. When an operation is +likely to take a long time, Berkeley DB will call the object's methods +with progress information. +

+It is up to the object's methods to display this information in an +appropriate manner. +

+This method configures only operations performed using a single a +Environment handle +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
feedbackHandler - An object whose methods are called to provide feedback.
+
+
+
+ +

+getFeedbackHandler

+
+public FeedbackHandler getFeedbackHandler()
+
+
Return the object's methods to be called to provide feedback. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The object's methods to be called to provide feedback.
+
+
+
+ +

+setHashFillFactor

+
+public void setHashFillFactor(int hashFillFactor)
+
+
Set the desired density within the hash table. +

+ If no value is specified, the fill factor will be selected dynamically + as pages are filled. +

+ The density is an approximation of the number of keys allowed to + accumulate in any one bucket, determining when the hash table grows or + shrinks. If you know the average sizes of the keys and data in your + data set, setting the fill factor can enhance performance. A reasonable + rule computing fill factor is to set it to the following: +

+        (pagesize - 32) / (average_key_size + average_data_size + 8)
+    
+

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
hashFillFactor - The desired density within the hash table.
+
+
+
+ +

+getHashFillFactor

+
+public int getHashFillFactor()
+
+
Return the hash table density. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The hash table density.
+
+
+
+ +

+setHasher

+
+public void setHasher(Hasher hasher)
+
+
Set a database-specific hash function. +

+ If no hash function is specified, a default hash function is used. + Because no hash function performs equally well on all possible data, + the user may find that the built-in hash function performs poorly + with a particular data set. +

+ This method configures operations performed using the specified +Database object, not all operations performed on the underlying +database. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method must be the same as that +historically used to create the database or corruption can occur. +

+

+

+
+
+
+
Parameters:
hasher - A database-specific hash function.
+
+
+
+ +

+getHasher

+
+public Hasher getHasher()
+
+
Return the database-specific hash function. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The database-specific hash function.
+
+
+
+ +

+setHashNumElements

+
+public void setHashNumElements(int hashNumElements)
+
+
Set an estimate of the final size of the hash table. +

+ In order for the estimate to be used when creating the database, the + DatabaseConfig.setHashFillFactor method must also be called. + If the estimate or fill factor are not set or are set too low, hash + tables will still expand gracefully as keys are entered, although a + slight performance degradation may be noticed. +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
hashNumElements - An estimate of the final size of the hash table.
+
+
+
+ +

+getHashNumElements

+
+public int getHashNumElements()
+
+
Return the estimate of the final size of the hash table. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The estimate of the final size of the hash table.
+
+
+
+ +

+setMessageHandler

+
+public void setMessageHandler(MessageHandler messageHandler)
+
+
Set a function to be called with an informational message. +

+There are interfaces in the Berkeley DB library which either directly +output informational messages or statistical information, or configure +the library to output such messages when performing other operations, +EnvironmentConfig.setVerboseDeadlock for example. +

+The EnvironmentConfig.setMessageHandler and +DatabaseConfig.setMessageHandler methods are used to display +these messages for the application. +

+Setting messageHandler to null unconfigures the interface. +

+Alternatively, you can use EnvironmentConfig.setMessageStream +and DatabaseConfig.setMessageStream to send the additional +information directly to an output streams. You should not mix these +approaches. +

+For Database handles opened inside of database environments, +calling this method affects the entire environment and is equivalent to +calling EnvironmentConfig.setMessageHandler. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
messageHandler - The application-specified function for informational messages.
+
+
+
+ +

+getMessageHandler

+
+public MessageHandler getMessageHandler()
+
+
Return the function to be called with an informational message. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The function to be called with an informational message.
+
+
+
+ +

+setMessageStream

+
+public void setMessageStream(OutputStream messageStream)
+
+
Set an OutputStream for displaying informational messages. +

+There are interfaces in the Berkeley DB library which either directly +output informational messages or statistical information, or configure +the library to output such messages when performing other operations, +EnvironmentConfig.setVerboseDeadlock for example. +

+The EnvironmentConfig.setMessageStream and +DatabaseConfig.setMessageStream methods are used to display +these messages for the application. In this case, the message will +include a trailing newline character. +

+Setting messageStream to null unconfigures the interface. +

+Alternatively, you can use EnvironmentConfig.setMessageHandler +and DatabaseConfig.setMessageHandler to capture the additional +information in a way that does not use output streams. You should not +mix these approaches. +

+For Database handles opened inside of database environments, +calling this method affects the entire environment and is equivalent to +calling EnvironmentConfig.setMessageStream. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
messageStream - The application-specified OutputStream for informational messages.
+
+
+
+ +

+getMessageStream

+
+public OutputStream getMessageStream()
+
+
Return the an OutputStream for displaying informational messages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The an OutputStream for displaying informational messages.
+
+
+
+ +

+setMode

+
+public void setMode(int mode)
+
+
On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files + created by the database open are created with mode mode + (as described in the chmod(2) manual page) and modified + by the process' umask value at the time of creation (see the + umask(2) manual page). Created files are owned by the + process owner; the group ownership of created files is based on the + system and directory defaults, and is not further specified by Berkeley + DB. System shared memory segments created by the database open are + created with mode mode, unmodified by the process' umask + value. If mode is 0, the database open will use a default + mode of readable and writable by both owner and group. +

+ On Windows systems, the mode parameter is ignored. +

+

+

+
+
+
+
Parameters:
mode - the mode used to create files
+
+
+
+ +

+getMode

+
+public long getMode()
+
+
Return the mode used to create files. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The mode used to create files.
+
+
+
+ +

+setNoMMap

+
+public void setNoMMap(boolean noMMap)
+
+
Configure the library to not map this database into memory. +

+

+

+
+
+
+
Parameters:
noMMap - If true, configure the library to not map this database into memory.
+
+
+
+ +

+getNoMMap

+
+public boolean getNoMMap()
+
+
Return if the library is configured to not map this database into + memory. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the library is configured to not map this database into + memory.
+
+
+
+ +

+setPageSize

+
+public void setPageSize(int pageSize)
+
+
Set the size of the pages used to hold items in the database, in bytes. +

+ The minimum page size is 512 bytes, the maximum page size is 64K bytes, + and the page size must be a power-of-two. If the page size is not + explicitly set, one is selected based on the underlying filesystem I/O + block size. The automatically selected size has a lower limit of 512 + bytes and an upper limit of 16K bytes. +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. + If creating additional databases in a file, the page size specified must + be consistent with the existing databases in the file or an error will + be returned. +

+

+

+
+
+
+
Parameters:
pageSize - The size of the pages used to hold items in the database, in bytes.
+
+
+
+ +

+getPageSize

+
+public int getPageSize()
+
+
Return the size of the pages used to hold items in the database, in bytes. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The size of the pages used to hold items in the database, in bytes.
+
+
+
+ +

+setPanicHandler

+
+public void setPanicHandler(PanicHandler panicHandler)
+
+
Set the function to be called if the database environment panics. +

+Errors can occur in the Berkeley DB library where the only solution is +to shut down the application and run recovery (for example, if Berkeley +DB is unable to allocate heap memory). In such cases, the Berkeley DB +methods will throw a RunRecoveryException. It is often easier +to simply exit the application when such errors occur rather than +gracefully return up the stack. This method specifies a function to be +called when RunRecoveryException is about to be thrown from a +Berkeley DB method. +

+For Database handles opened inside of database environments, +calling this method affects the entire environment and is equivalent to +calling EnvironmentConfig.setPanicHandler. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
panicHandler - The function to be called if the database environment panics.
+
+
+
+ +

+getPanicHandler

+
+public PanicHandler getPanicHandler()
+
+
Return the function to be called if the database environment panics. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The function to be called if the database environment panics.
+
+
+
+ +

+setQueueExtentSize

+
+public void setQueueExtentSize(int queueExtentSize)
+
+
Set the size of the extents used to hold pages in a Queue database, + specified as a number of pages. +

+ Each extent is created as a separate physical file. If no extent + size is set, the default behavior is to create only a single + underlying database file. +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
queueExtentSize - The number of pages in a Queue database extent.
+
+
+
+ +

+getQueueExtentSize

+
+public int getQueueExtentSize()
+
+
Return the size of the extents used to hold pages in a Queue database, + specified as a number of pages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The size of the extents used to hold pages in a Queue database, + specified as a number of pages.
+
+
+
+ +

+setQueueInOrder

+
+public void setQueueInOrder(boolean queueInOrder)
+
+
Configure Database.consume to return key/data pairs in + order, always returning the key/data item from the head of the + queue. +

+ The default behavior of queue databases is optimized for multiple + readers, and does not guarantee that record will be retrieved in the + order they are added to the queue. Specifically, if a writing + thread adds multiple records to an empty queue, reading threads may + skip some of the initial records when the next call to retrieve a + key/data pair returns. +

+ This flag configures the Database.consume method to verify + that the record being returned is in fact the head of the queue. + This will increase contention and reduce concurrency when there are + many reading threads. +

+ Calling this method only affects the specified Database handle +(and any other library handles opened within the scope of that handle). +

+

+

+
+
+
+
Parameters:
queueInOrder - If true, configure the Database.consume method to return + key/data pairs in order, always returning the key/data item from the + head of the queue. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getQueueInOrder

+
+public boolean getQueueInOrder()
+
+
Return if the Database.consume method is configured to return + key/data pairs in order, always returning the key/data item from the + head of the queue. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Database.consume method is configured to return + key/data pairs in order, always returning the key/data item from the + head of the queue.
+
+
+
+ +

+setRecordNumberAppender

+
+public void setRecordNumberAppender(RecordNumberAppender recnoAppender)
+
+
Configure Database.append to call the function after the + record number has been selected but before the data has been stored + into the database. +

+ This method configures operations performed using the specified +Database object, not all operations performed on the underlying +database. +

+ This method may not be called after the database is opened. +

+

+

+
+
+
+
Parameters:
recnoAppender - The function to call after the record number has been selected but + before the data has been stored into the database.
+
+
+
+ +

+getRecordNumberAppender

+
+public RecordNumberAppender getRecordNumberAppender()
+
+
Return the function to call after the record number has been + selected but before the data has been stored into the database. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The function to call after the record number has been + selected but before the data has been stored into the database.
+
+
+
+ +

+setRecordDelimiter

+
+public void setRecordDelimiter(int recordDelimiter)
+
+
Set the delimiting byte used to mark the end of a record in the backing + source file for the Recno access method. +

+ This byte is used for variable length records if a backing source + file is specified. If a backing source file is specified and no + delimiting byte was specified, newline characters (that is, ASCII + 0x0a) are interpreted as end-of-record markers. +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
recordDelimiter - The delimiting byte used to mark the end of a record in the backing + source file for the Recno access method.
+
+
+
+ +

+getRecordDelimiter

+
+public int getRecordDelimiter()
+
+
Return the delimiting byte used to mark the end of a record in the + backing source file for the Recno access method. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The delimiting byte used to mark the end of a record in the + backing source file for the Recno access method.
+
+
+
+ +

+setRecordLength

+
+public void setRecordLength(int recordLength)
+
+
Specify the database record length, in bytes. +

+ For the Queue access method, specify the record length. For the + Queue access method, the record length must be enough smaller than + the database's page size that at least one record plus the database + page's metadata information can fit on each database page. +

+ For the Recno access method, specify the records are fixed-length, + not byte-delimited, and the record length. +

+ Any records added to the database that are less than the specified + length are automatically padded (see + DatabaseConfig.setRecordPad for more information). +

+ Any attempt to insert records into the database that are greater + than the specified length will cause the call to fail. +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
recordLength - The database record length, in bytes.
+
+
+
+ +

+getRecordLength

+
+public int getRecordLength()
+
+
Return the database record length, in bytes. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The database record length, in bytes.
+
+
+
+ +

+setBtreeRecordNumbers

+
+public void setBtreeRecordNumbers(boolean btreeRecordNumbers)
+
+
Configure the Btree to support retrieval by record number. +

+ Logical record numbers in Btree databases are mutable in the face of + record insertion or deletion. +

+ Maintaining record counts within a Btree introduces a serious point + of contention, namely the page locations where the record counts are + stored. In addition, the entire database must be locked during both + insertions and deletions, effectively single-threading the database + for those operations. Configuring a Btree for retrieval by record + number can result in serious performance degradation for some + applications and data sets. +

+ Retrieval by record number may not be configured for a Btree that also + supports duplicate data items. +

+ Calling this method affects the database, including all threads of +control accessing the database. +

+ If the database already exists when the database is opened, any database +configuration specified by this method +

+

+

+
+
+
+
Parameters:
btreeRecordNumbers - If true, configure the Btree to support retrieval by record number. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getBtreeRecordNumbers

+
+public boolean getBtreeRecordNumbers()
+
+
Return if the Btree is configured to support retrieval by record number. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Btree is configured to support retrieval by record number.
+
+
+
+ +

+setRecordPad

+
+public void setRecordPad(int recordPad)
+
+
Set the padding character for short, fixed-length records for the Queue + and Recno access methods. +

+ If no pad character is specified, "space" characters (that is, ASCII + 0x20) are used for padding. +

+ This method configures a database, not only operations performed using +the specified Database handle. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
recordPad - The padding character for short, fixed-length records for the Queue + and Recno access methods. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getRecordPad

+
+public int getRecordPad()
+
+
Return the padding character for short, fixed-length records for the + Queue and Recno access methods. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The padding character for short, fixed-length records for the + Queue and Recno access methods.
+
+
+
+ +

+setRecordSource

+
+public void setRecordSource(File recordSource)
+
+
Set the underlying source file for the Recno access method. +

+ The purpose of the source file is to provide fast access and + modification to databases that are normally stored as flat text + files. +

+ The recordSource parameter specifies an underlying flat text + database file that is read to initialize a transient record number + index. In the case of variable length records, the records are + separated, as specified by the + DatabaseConfig.setRecordDelimiter method. For example, + standard UNIX byte stream files can be interpreted as a sequence of + variable length records separated by newline characters (that is, ASCII + 0x0a). +

+ In addition, when cached data would normally be written back to the + underlying database file (for example, the Database.close + or Database.sync methods are called), the in-memory copy + of the database will be written back to the source file. +

+ By default, the backing source file is read lazily; that is, records + are not read from the file until they are requested by the application. + + If multiple processes (not threads) are accessing a Recno database + concurrently, and are either inserting or deleting records, the backing + source file must be read in its entirety before more than a single + process accesses the database, and only that process should specify + the backing source file as part of opening the database. See the + DatabaseConfig.setSnapshot method for more information. + +

+ + Reading and writing the backing source file cannot be + transaction-protected because it involves filesystem operations that + are not part of the Database transaction methodology. For + this reason, if a temporary database is used to hold the records, + it is possible to lose the contents of the source file, for example, + if the system crashes at the right instant. If a file is used to + hold the database, normal database recovery on that file can be used + to prevent information loss, although it is still possible that the + contents of the source file will be lost if the system crashes. + +

+ The source file must already exist (but may be zero-length) when + the database is opened. +

+ It is not an error to specify a read-only source file when creating + a database, nor is it an error to modify the resulting database. + However, any attempt to write the changes to the backing source file + using either the Database.sync or Database.close + methods will fail, of course. Specify the noSync argument to the + Database.close method to stop it from attempting to write + the changes to the backing file; instead, they will be silently + discarded. +

+ For all of the previous reasons, the source file is generally used + to specify databases that are read-only for Berkeley DB + applications; and that are either generated on the fly by software + tools or modified using a different mechanism -- for example, a text + editor. +

+ This method configures operations performed using the specified +Database object, not all operations performed on the underlying +database. +

+ This method may not be called after the database is opened. +If the database already exists when it is opened, +the information specified to this method must be the same as that +historically used to create the database or corruption can occur. +

+

+

+
+
+
+
Parameters:
recordSource - The name of an underlying flat text database file that is read to + initialize a transient record number index. In the case of variable + length records, the records are separated, as specified by the + DatabaseConfig.setRecordDelimiter method. For example, + standard UNIX byte stream files can be interpreted as a sequence of + variable length records separated by newline characters (that is, ASCII + 0x0a). +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getRecordSource

+
+public File getRecordSource()
+
+
Return the name of an underlying flat text database file that is + read to initialize a transient record number index. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The name of an underlying flat text database file that is + read to initialize a transient record number index.
+
+
+
+ +

+setRenumbering

+
+public void setRenumbering(boolean renumbering)
+
+
Configure the logical record numbers to be mutable, and change as + records are added to and deleted from the database. +

+ For example, the deletion of record number 4 causes records numbered + 5 and greater to be renumbered downward by one. If a cursor was + positioned to record number 4 before the deletion, it will refer to + the new record number 4, if any such record exists, after the + deletion. If a cursor was positioned after record number 4 before + the deletion, it will be shifted downward one logical record, + continuing to refer to the same record as it did before. +

+ Creating new records will cause the creation of multiple records if + the record number is more than one greater than the largest record + currently in the database. For example, creating record 28, when + record 25 was previously the last record in the database, will + create records 26 and 27 as well as 28. Attempts to retrieve + records that were created in this manner will result in an error + return of OperationStatus.KEYEMPTY. +

+ If a created record is not at the end of the database, all records + following the new record will be automatically renumbered upward by one. + For example, the creation of a new record numbered 8 causes records + numbered 8 and greater to be renumbered upward by one. If a cursor was + positioned to record number 8 or greater before the insertion, it will + be shifted upward one logical record, continuing to refer to the same + record as it did before. +

+ For these reasons, concurrent access to a Recno database configured + with mutable record numbers may be largely meaningless, although it + is supported. +

+ Calling this method affects the database, including all threads of +control accessing the database. +

+ If the database already exists when the database is opened, any database +configuration specified by this method +

+

+

+
+
+
+
Parameters:
renumbering - If true, configure the logical record numbers to be mutable, and + change as records are added to and deleted from the database. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getRenumbering

+
+public boolean getRenumbering()
+
+
Return if the logical record numbers are mutable, and change as + records are added to and deleted from the database. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the logical record numbers are mutable, and change as + records are added to and deleted from the database.
+
+
+
+ +

+setReverseSplitOff

+
+public void setReverseSplitOff(boolean reverseSplitOff)
+
+
Configure the Btree to not do reverse splits. +

+ As pages are emptied in a database, the Btree implementation + attempts to coalesce empty pages into higher-level pages in order + to keep the database as small as possible and minimize search time. + This can hurt performance in applications with cyclical data + demands; that is, applications where the database grows and shrinks + repeatedly. For example, because Berkeley DB does page-level locking, + the maximum level of concurrency in a database of two pages is far + smaller than that in a database of 100 pages, so a database that has + shrunk to a minimal size can cause severe deadlocking when a new + cycle of data insertion begins. +

+ Calling this method only affects the specified Database handle +(and any other library handles opened within the scope of that handle). +

+

+

+
+
+
+
Parameters:
reverseSplitOff - If true, configure the Btree to not do reverse splits. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getReverseSplitOff

+
+public boolean getReverseSplitOff()
+
+
Return if the Btree has been configured to not do reverse splits. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Btree has been configured to not do reverse splits.
+
+
+
+ +

+setUnsortedDuplicates

+
+public void setUnsortedDuplicates(boolean unsortedDuplicates)
+
+
Configure the database to support unsorted duplicate data items. +

+ Insertion when the key of the key/data pair being inserted already + exists in the database will be successful. The ordering of + duplicates in the database is determined by the order of insertion, + unless the ordering is otherwise specified by use of a database + cursor operation. +

+ If a primary database is to be associated with one or more secondary + databases, it may not be configured for duplicates. +

+ Sorted duplicates are preferred to unsorted duplicates for + performance reasons. Unsorted duplicates should only be used by + applications wanting to order duplicate data items manually. +

+ Calling this method affects the database, including all threads of +control accessing the database. +

+ If the database already exists when the database is opened, any database +configuration specified by this method +

+

+

+
+
+
+
Parameters:
unsortedDuplicates - If true, configure the database to support unsorted duplicate data items. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getUnsortedDuplicates

+
+public boolean getUnsortedDuplicates()
+
+
Return if the database is configured to support duplicate data items. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database is configured to support duplicate data items.
+
+
+
+ +

+setSnapshot

+
+public void setSnapshot(boolean snapshot)
+
+
Specify that any specified backing source file be read in its entirety + when the database is opened. +

+ If this flag is not specified, the backing source file may be read + lazily. +

+ Calling this method only affects the specified Database handle +(and any other library handles opened within the scope of that handle). +

+

+

+
+
+
+
Parameters:
snapshot - If true, any specified backing source file will be read in its entirety + when the database is opened. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getSnapshot

+
+public boolean getSnapshot()
+
+
Return if the any specified backing source file will be read in its + entirety when the database is opened. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the any specified backing source file will be read in its + entirety when the database is opened.
+
+
+
+ +

+setTransactionNotDurable

+
+public void setTransactionNotDurable(boolean transactionNotDurable)
+
+
Configure the database environment to not write log records for this + database. +

+ This means that updates of this database exhibit the ACI (atomicity, + consistency, and isolation) properties, but not D (durability); that + is, database integrity will be maintained, but if the application + or system fails, integrity will not persist. The database file must + be verified and/or restored from backup after a failure. In order + to ensure integrity after application shut down, the database + must be flushed to disk before the database handles are closed, + or all + database changes must be flushed from the database environment cache + using Environment.checkpoint. +

+ All database handles for a single physical file must call this method, + including database handles for different databases in a physical file. +

+ Calling this method only affects the specified Database handle +(and any other library handles opened within the scope of that handle). +

+

+

+
+
+
+
Parameters:
transactionNotDurable - If true, configure the database environment to not write log records + for this database. + A value of false is illegal to this method, that is, once set, the +configuration cannot be cleared.
+
+
+
+ +

+getTransactionNotDurable

+
+public boolean getTransactionNotDurable()
+
+
Return if the database environment is configured to not write log + records for this database. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to not write log + records for this database.
+
+
+
+ +

+setTruncate

+
+public void setTruncate(boolean truncate)
+
+
Configure the database to be physically truncated by truncating the + underlying file, discarding all previous databases it might have + held. +

+ Underlying filesystem primitives are used to implement this + configuration. For this reason, it is applicable only to a physical + file and cannot be used to discard databases within a file. +

+ This configuration option cannot be lock or transaction-protected, and + it is an error to specify it in a locking or transaction-protected + database environment. +

+

+

+
+
+
+
Parameters:
truncate - If true, configure the database to be physically truncated by truncating + the underlying file, discarding all previous databases it might have + held.
+
+
+
+ +

+getTruncate

+
+public boolean getTruncate()
+
+
Return if the database has been configured to be physically truncated + by truncating the underlying file, discarding all previous databases + it might have held. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database has been configured to be physically truncated + by truncating the underlying file, discarding all previous databases + it might have held.
+
+
+
+ +

+setType

+
+public void setType(DatabaseType type)
+
+
Configure the type of the database. +

+ If they type is DB_UNKNOWN, the database must already exist. +

+

+

+
+
+
+
Parameters:
type - The type of the database.
+
+
+
+ +

+getType

+
+public DatabaseType getType()
+
+
Return the type of the database. +

+ This method may be used to determine the type of the database after + opening it. +

+ This method may not be called before the +database has been opened. +

+

+

+
+
+
+ +
Returns:
The type of the database.
+
+
+
+ +

+setXACreate

+
+public void setXACreate(boolean xaCreate)
+
+
Configure the database to be accessed via applications running under + an X/Open conformant Transaction Manager. +

+ The database will be opened in the environment specified by the + OPENINFO parameter of the GROUPS section of the ubbconfig file. +

+

+

+
+
+
+
Parameters:
xaCreate - If true, configure the database to be accessed via applications + running under an X/Open conformant Transaction Manager.
+
+
+
+ +

+getXACreate

+
+public boolean getXACreate()
+
+
Return if the database has been configured to be accessed via applications + running under an X/Open conformant Transaction Manager. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database has been configured to be accessed via applications + running under an X/Open conformant Transaction Manager.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/DatabaseEntry.html b/db/docs/java/com/sleepycat/db/DatabaseEntry.html new file mode 100644 index 000000000..9861aa647 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/DatabaseEntry.html @@ -0,0 +1,956 @@ + + + + + + +DatabaseEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class DatabaseEntry

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseEntry
+
+
+
Direct Known Subclasses:
MultipleEntry
+
+
+
+
public class DatabaseEntry
extends Object
+ +

+Encodes database key and data items as a byte array. +

+Storage and retrieval for the Database and Cursor methods +are based on key/data pairs. Both key and data items are represented by +DatabaseEntry objects. Key and data byte arrays may refer to arrays of zero +length up to arrays of essentially unlimited length. +

+The DatabaseEntry class provides simple access to an underlying object whose +elements can be examined or changed. DatabaseEntry objects can be +subclassed, providing a way to associate with it additional data or +references to other structures. +

+Access to DatabaseEntry objects is not re-entrant. In particular, if +multiple threads simultaneously access the same DatabaseEntry object using +Database or Cursor methods, the results are undefined. +

+DatabaseEntry objects may be used in conjunction with the object mapping +support provided in the com.sleepycat.bind package. +

+

Input and Output Parameters

+

+DatabaseEntry objects are used for both input data (when writing to a +database or specifying a search parameter) and output data (when reading +from a database). For certain methods, one parameter may be an input +parameter and another may be an output parameter. For example, the +Database.get(com.sleepycat.db.Transaction, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.DatabaseEntry, com.sleepycat.db.LockMode) method has an input key parameter and an output data +parameter. The documentation for each method describes whether its +parameters are input or output parameters. +

+For DatabaseEntry input parameters, the caller is responsible for +initializing the data array of the DatabaseEntry. For DatabaseEntry output +parameters, the method called will initialize the data array. +

+Also note that for DatabaseEntry output parameters, the method called will +always allocate a new byte array. The byte array specified by the caller +will not be used. Therefore, after calling a method that returns output +parameters, the caller can safely keep a reference to the byte array return +by getData() without danger that the array will be overwritten in a +subsequent call. +

+

Offset and Size Properties

+

+By default the Offset property is zero and the Size property is the length +of the byte array. However, to allow for optimizations involving the +partial use of a byte array, the Offset and Size may be set to non-default +values. +

+For DatabaseEntry output parameters, the Size will always be set to the +length of the byte array and the Offset will always be set to zero. +

+However, for DatabaseEntry input parameters the Offset and Size are set to +non-default values by the built-in tuple and serial bindings. For example, +with a tuple or serial binding the byte array is grown dynamically as data +is output, and the Size is set to the number of bytes actually used. For a +serial binding, the Offset is set to a non-zero value in order to implement +an optimization having to do with the serialization stream header. +

+Therefore, for output DatabaseEntry parameters the application can assume +that the Offset is zero and the Size is the length of the byte array. +However, for input DatabaseEntry parameters the application should not make +this assumption. In general, it is safest for the application to always +honor the Size and Offset properties, rather than assuming they have default +values. +

+

Partial Offset and Length Properties

+

+By default the specified data (byte array, offset and size) corresponds to +the full stored key or data item. Optionally, the Partial property can be +set to true, and the PartialOffset and PartialLength properties are used to +specify the portion of the key or data item to be read or written. For +details, see the setPartial(int,int,boolean) method. +

+Note that the Partial properties are set only by the caller. They will +never be set by a Database or Cursor method, nor will they every be set by +bindings. Therefore, the application can assume that the Partial properties +are not set, unless the application itself sets them explicitly. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
DatabaseEntry() + +
+          Construct a DatabaseEntry with null data.
DatabaseEntry(byte[] data) + +
+          Construct a DatabaseEntry with a given byte array.
DatabaseEntry(byte[] data, + int offset, + int size) + +
+          Constructs a DatabaseEntry with a given byte array, offset and size.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ byte[]getData() + +
+          Return the byte array.
+ intgetOffset() + +
+          Return the byte offset into the data array.
+ booleangetPartial() + +
+          Return whether this DatabaseEntry is configured to read or write partial + records.
+ intgetPartialLength() + +
+          Return the byte length of the partial record being read or written by + the application, in bytes.
+ intgetPartialOffset() + +
+          Return the offset of the partial record being read or written by the + application, in bytes.
+ intgetRecordNumber() + +
+          Return the record number encoded in this entry's buffer.
+ booleangetReuseBuffer() + +
+          Return if the whether the entry is configured to reuse the buffer.
+ intgetSize() + +
+          Return the byte size of the data array.
+ booleangetUserBuffer() + +
+          Return if the whether the buffer in this entry is owned by the + application.
+ intgetUserBufferLength() + +
+          Return the length of the application's buffer.
+ voidsetData(byte[] data) + +
+          Sets the byte array.
+ voidsetData(byte[] data, + int offset, + int size) + +
+          Sets the byte array, offset and size.
+ voidsetOffset(int offset) + +
+          Set the byte offset into the data array.
+ voidsetPartial(boolean partial) + +
+          Configure this DatabaseEntry to read or write partial records.
+ voidsetPartial(int doff, + int dlen, + boolean partial) + +
+          Configures this DatabaseEntry to read or write partial records.
+ voidsetPartialLength(int dlen) + +
+          Set the byte length of the partial record being read or written by + the application, in bytes.
+ voidsetPartialOffset(int doff) + +
+          Set the offset of the partial record being read or written by the + application, in bytes.
+ voidsetRecordNumber(int recno) + +
+          Initialize the entry from a logical record number.
+ voidsetReuseBuffer(boolean reuse) + +
+          Configures the entry to try to reuse the buffer before allocating a new + one.
+ voidsetSize(int size) + +
+          Set the byte size of the data array.
+ voidsetUserBuffer(int length, + boolean usermem) + +
+          Configures the entry with an application-owned buffer.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+DatabaseEntry

+
+public DatabaseEntry()
+
+
Construct a DatabaseEntry with null data. The offset and size are set to + zero. +

+

+
+ +

+DatabaseEntry

+
+public DatabaseEntry(byte[] data)
+
+
Construct a DatabaseEntry with a given byte array. The offset is + set to zero; the size is set to the length of the array, or to zero if + null is passed. +

+

+

Parameters:
data - Byte array wrapped by the DatabaseEntry.
+
+ +

+DatabaseEntry

+
+public DatabaseEntry(byte[] data,
+                     int offset,
+                     int size)
+
+
Constructs a DatabaseEntry with a given byte array, offset and size. +

+

+

Parameters:
data - Byte array wrapped by the DatabaseEntry.
offset - Offset in the first byte in the byte array to be included.
size - Number of bytes in the byte array to be included.
+ + + + + + + + +
+Method Detail
+ +

+getData

+
+public byte[] getData()
+
+
Return the byte array. +

+ For a DatabaseEntry that is used as an output parameter, the byte + array will always be a newly allocated array. The byte array specified + by the caller will not be used and may be null. +

+

+

+ +
Returns:
The byte array.
+
+
+
+ +

+setData

+
+public void setData(byte[] data)
+
+
Sets the byte array. The offset is set to zero; the size is set to the + length of the array, or to zero if null is passed. +

+

+

+
Parameters:
data - Byte array wrapped by the DatabaseEntry.
+
+
+
+ +

+setData

+
+public void setData(byte[] data,
+                    int offset,
+                    int size)
+
+
Sets the byte array, offset and size. +

+

+

+
Parameters:
data - Byte array wrapped by the DatabaseEntry.
offset - Offset in the first byte in the byte array to be included.
size - Number of bytes in the byte array to be included.
+
+
+
+ +

+setPartial

+
+public void setPartial(int doff,
+                       int dlen,
+                       boolean partial)
+
+
Configures this DatabaseEntry to read or write partial records. +

+ Do partial retrieval or storage of an item. If the calling + application is doing a retrieval, length bytes specified by + dlen, starting at the offset set by doff bytes from + the beginning of the retrieved data record are returned as if they + comprised the entire record. If any or all of the specified bytes do + not exist in the record, the get is successful, and any existing bytes + are returned. +

+ For example, if the data portion of a retrieved record was 100 bytes, + and a partial retrieval was done using a DatabaseEntry having a partial + length of 20 and a partial offset of 85, the retrieval would succeed and + the retrieved data would be the last 15 bytes of the record. +

+ If the calling application is storing an item, length bytes specified + by dlen, starting at the offset set by doff + bytes from the beginning of the specified key's data item are replaced + by the data specified by the DatabaseEntry. If the partial length is + smaller than the data, the record will grow; if the partial length is + larger than the data, the record will shrink. If the specified bytes do + not exist, the record will be extended using nul bytes as necessary, and + the store will succeed. +

+ It is an error to specify a partial key when performing a put + operation of any kind. +

+ It is an error to attempt a partial store using the Database.put method in a database that supports duplicate records. Partial + stores in databases supporting duplicate records must be done using a + cursor method. +

+ Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method. +

+

+

+
Parameters:
doff - The offset of the partial record being read or written by the + application, in bytes. +

dlen - The byte length of the partial record being read or written by the + application, in bytes. +

partial - Whether this DatabaseEntry is configured to read or write partial + records.
+
+
+
+ +

+getPartialLength

+
+public int getPartialLength()
+
+
Return the byte length of the partial record being read or written by + the application, in bytes. +

+ Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method. +

+

+

+ +
Returns:
The byte length of the partial record being read or written by the + application, in bytes. +

See Also:
setPartial(int,int,boolean)
+
+
+
+ +

+setPartialLength

+
+public void setPartialLength(int dlen)
+
+
Set the byte length of the partial record being read or written by + the application, in bytes. +

+ Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method. +

+

+

+
Parameters:
dlen - The byte length of the partial record being read or written by the +

See Also:
application, in bytes.
+
+
+
+ +

+getPartialOffset

+
+public int getPartialOffset()
+
+
Return the offset of the partial record being read or written by the + application, in bytes. +

+ Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method. +

+

+

+ +
Returns:
The offset of the partial record being read or written by the + application, in bytes. +

See Also:
setPartial(int,int,boolean)
+
+
+
+ +

+setPartialOffset

+
+public void setPartialOffset(int doff)
+
+
Set the offset of the partial record being read or written by the + application, in bytes. +

+ Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method. +

+

+

+
Parameters:
doff - The offset of the partial record being read or written by the + application, in bytes. +

See Also:
setPartial(int,int,boolean)
+
+
+
+ +

+getPartial

+
+public boolean getPartial()
+
+
Return whether this DatabaseEntry is configured to read or write partial + records. +

+ Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method. +

+

+

+ +
Returns:
Whether this DatabaseEntry is configured to read or write partial + records. +

See Also:
setPartial(int,int,boolean)
+
+
+
+ +

+setPartial

+
+public void setPartial(boolean partial)
+
+
Configure this DatabaseEntry to read or write partial records. +

+ Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method. +

+

+

+
Parameters:
partial - Whether this DatabaseEntry is configured to read or write partial + records. +

See Also:
setPartial(int,int,boolean)
+
+
+
+ +

+getOffset

+
+public int getOffset()
+
+
Return the byte offset into the data array. +

+ For a DatabaseEntry that is used as an output parameter, the offset + will always be zero. +

+

+

+ +
Returns:
Offset in the first byte in the byte array to be included.
+
+
+
+ +

+setOffset

+
+public void setOffset(int offset)
+
+
Set the byte offset into the data array. +

+

+

+
Parameters:
offset - Offset in the first byte in the byte array to be included.
+
+
+
+ +

+getSize

+
+public int getSize()
+
+
Return the byte size of the data array. +

+ For a DatabaseEntry that is used as an output parameter, the size + will always be the length of the data array. +

+

+

+ +
Returns:
Number of bytes in the byte array to be included.
+
+
+
+ +

+setSize

+
+public void setSize(int size)
+
+
Set the byte size of the data array. +

+

+

+
Parameters:
size - Number of bytes in the byte array to be included.
+
+
+
+ +

+getRecordNumber

+
+public int getRecordNumber()
+
+
Return the record number encoded in this entry's buffer. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The record number encoded in this entry's buffer.
+
+
+
+ +

+setRecordNumber

+
+public void setRecordNumber(int recno)
+
+
Initialize the entry from a logical record number. Record numbers + are integer keys starting at 1. When this method is called the data, + size and offset fields are implicitly set to hold a byte array + representation of the integer key. +

+

+
Parameters:
recno - the record number to be encoded
+
+
+
+ +

+setReuseBuffer

+
+public void setReuseBuffer(boolean reuse)
+
+
Configures the entry to try to reuse the buffer before allocating a new + one. +

+

+

+
Parameters:
reuse - whether to reuse the buffer
+
+
+
+ +

+getReuseBuffer

+
+public boolean getReuseBuffer()
+
+
Return if the whether the entry is configured to reuse the buffer. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the whether the entry is configured to reuse the buffer.
+
+
+
+ +

+setUserBuffer

+
+public void setUserBuffer(int length,
+                          boolean usermem)
+
+
Configures the entry with an application-owned buffer. +

+ The data field of the entry must refer to a buffer that is + at least length bytes in length. +

+ If the length of the requested item is less than or equal to that number + of bytes, the item is copied into the memory to which the + data field refers. Otherwise, the size field + is set to the length needed for the requested item, and a + MemoryException is thrown. +

+ Applications can determine the length of a record by setting + length to 0 and calling DatabaseEntry.getSize + on the return value. +

+

+

+
Parameters:
length - the length of the buffer +

usermem - whether the buffer is owned by the application
+
+
+
+ +

+getUserBuffer

+
+public boolean getUserBuffer()
+
+
Return if the whether the buffer in this entry is owned by the + application. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the whether the buffer in this entry is owned by the + application.
+
+
+
+ +

+getUserBufferLength

+
+public int getUserBufferLength()
+
+
Return the length of the application's buffer. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The length of the application's buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/DatabaseException.html b/db/docs/java/com/sleepycat/db/DatabaseException.html new file mode 100644 index 000000000..8a13a2252 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/DatabaseException.html @@ -0,0 +1,285 @@ + + + + + + +DatabaseException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class DatabaseException

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended bycom.sleepycat.db.DatabaseException
+
+
+
All Implemented Interfaces:
Serializable
+
+
+
Direct Known Subclasses:
DeadlockException, MemoryException, ReplicationHandleDeadException, RunRecoveryException
+
+
+
+
public class DatabaseException
extends Exception
+ +

+The root of all database exceptions. +

+Note that in some cases, certain methods return status values without issuing +an exception. This occurs in situations that are not normally considered an +error, but when some informational status is returned. For example, +Database.get returns OperationStatus.NOTFOUND when a +requested key does not appear in the database. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ EnvironmentgetEnvironment() + +
+          Return the environment in which the exception occurred.
+ intgetErrno() + +
+          Return the system or C API error number that caused the exception.
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getEnvironment

+
+public Environment getEnvironment()
+
+
Return the environment in which the exception occurred. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The environment in which the exception occurred.
+
+
+
+ +

+getErrno

+
+public int getErrno()
+
+
Return the system or C API error number that caused the exception. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The system or C API error number that caused the exception.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/DatabaseStats.html b/db/docs/java/com/sleepycat/db/DatabaseStats.html new file mode 100644 index 000000000..7f5be50a7 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/DatabaseStats.html @@ -0,0 +1,200 @@ + + + + + + +DatabaseStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class DatabaseStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseStats
+
+
+
Direct Known Subclasses:
BtreeStats, HashStats, QueueStats
+
+
+
+
public abstract class DatabaseStats
extends Object
+ +

+Statistics for a single database. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + +


+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/DatabaseType.html b/db/docs/java/com/sleepycat/db/DatabaseType.html new file mode 100644 index 000000000..8c527cf58 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/DatabaseType.html @@ -0,0 +1,316 @@ + + + + + + +DatabaseType (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class DatabaseType

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseType
+
+
+
+
public final class DatabaseType
extends Object
+ +

+Database types. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static DatabaseTypeBTREE + +
+          The database is a Btree.
+static DatabaseTypeHASH + +
+          The database is a Hash.
+static DatabaseTypeQUEUE + +
+          The database is a Queue.
+static DatabaseTypeRECNO + +
+          The database is a Recno.
+static DatabaseTypeUNKNOWN + +
+          The database type is unknown.
+  + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+BTREE

+
+public static final DatabaseType BTREE
+
+
The database is a Btree. The Btree format is a representation of a + sorted, balanced tree structure. +

+

+
+
+
+ +

+HASH

+
+public static final DatabaseType HASH
+
+
The database is a Hash. The Hash format is an extensible, dynamic + hashing scheme. +

+

+
+
+
+ +

+QUEUE

+
+public static final DatabaseType QUEUE
+
+
The database is a Queue. The Queue format supports fast access to + fixed-length records accessed sequentially or by logical record + number. +

+

+
+
+
+ +

+RECNO

+
+public static final DatabaseType RECNO
+
+
The database is a Recno. The Recno format supports fixed- or + variable-length records, accessed sequentially or by logical + record number, and optionally backed by a flat text file. +

+

+
+
+
+ +

+UNKNOWN

+
+public static final DatabaseType UNKNOWN
+
+
The database type is unknown. +

+

+
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/DeadlockException.html b/db/docs/java/com/sleepycat/db/DeadlockException.html new file mode 100644 index 000000000..c63c4a610 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/DeadlockException.html @@ -0,0 +1,230 @@ + + + + + + +DeadlockException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class DeadlockException

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended bycom.sleepycat.db.DatabaseException
+              extended bycom.sleepycat.db.DeadlockException
+
+
+
All Implemented Interfaces:
Serializable
+
+
+
Direct Known Subclasses:
LockNotGrantedException
+
+
+
+
public class DeadlockException
extends DatabaseException
+ +

+DeadlockException is thrown to a thread of control when multiple threads +competing for a lock are +deadlocked, when a lock request has timed out +or when a lock request would need to block and the transaction has been +configured to not wait for locks. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseException
getEnvironment, getErrno
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + +


+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/Environment.html b/db/docs/java/com/sleepycat/db/Environment.html new file mode 100644 index 000000000..fd31c03ea --- /dev/null +++ b/db/docs/java/com/sleepycat/db/Environment.html @@ -0,0 +1,1893 @@ + + + + + + +Environment (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class Environment

+
+java.lang.Object
+  extended bycom.sleepycat.db.Environment
+
+
+
+
public class Environment
extends Object
+ +

+A database environment. Environments include support for some or +all of caching, locking, logging and transactions. +

+To open an existing environment with default attributes the application +may use a default environment configuration object or null: +

+

+    // Open an environment handle with default attributes.
+    Environment env = new Environment(home, new EnvironmentConfig());
+
+

+or +

+

+    Environment env = new Environment(home, null);
+
+

+Note that many Environment objects may access a single environment. +

+To create an environment or customize attributes, the application should +customize the configuration class. For example: +

+

+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setTransactional(true);
+    envConfig.setAllowCreate(true);
+    envConfig.setCacheSize(1000000);
+    

+ Environment newlyCreatedEnv = new Environment(home, envConfig); +

+

+db Environment handles are free-threaded unless +EnvironmentConfig.setThreaded is called to disable this before +the environment is opened. +

+An environment handle is an Environment instance. More than +one Environment instance may be created for the same physical directory, +which is the same as saying that more than one Environment handle may +be open at one time for a given environment. +

+The Environment handle should not be closed while any other handle +remains open that is using it as a reference (for example, +Database or Transaction. Once Environment.close +is called, this object may not be accessed again, regardless of +whether or not it throws an exception. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
Environment(File envHome, + EnvironmentConfig envConfig) + +
+          Create a database environment handle.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ TransactionbeginTransaction(Transaction parent, + TransactionConfig txnConfig) + +
+          Create a new transaction in the database environment.
+ voidcheckpoint(CheckpointConfig checkpointConfig) + +
+          Synchronously checkpoint the database environment.
+ voidclose() + +
+          Close the database environment, freeing any allocated resources and + closing any underlying subsystems.
+ intcreateLockerID() + +
+          Allocate a locker ID.
+ intdetectDeadlocks(LockDetectMode mode) + +
+          Run one iteration of the deadlock detector.
+ intelectReplicationMaster(int nsites, + int nvotes, + int priority, + int timeout) + +
+          Hold an election for the master of a replication group.
+ voidfreeLockerID(int id) + +
+          Free a locker ID.
+ File[]getArchiveDatabases() + +
+          Return the names of the database files that need to be archived in + order to recover the database from catastrophic failure.
+ File[]getArchiveLogFiles(boolean includeInUse) + +
+          Return the names of all of the log files that are no longer in use.
+ CacheFileStats[]getCacheFileStats(StatsConfig config) + +
+          Return the database environment's per-file memory pool (that is, the + buffer cache) statistics.
+ CacheStatsgetCacheStats(StatsConfig config) + +
+           
+ EnvironmentConfiggetConfig() + +
+          Return this object's configuration.
+ FilegetHome() + +
+          Return the database environment's home directory.
+ LockgetLock(int locker, + boolean noWait, + DatabaseEntry object, + LockRequestMode mode) + +
+          Acquire a lock from the lock table.
+ LockStatsgetLockStats(StatsConfig config) + +
+          Return the database environment's locking statistics.
+ StringgetLogFileName(LogSequenceNumber lsn) + +
+          Return the name of the log file that contains the log record + specified by a LogSequenceNumber object.
+ LogStatsgetLogStats(StatsConfig config) + +
+          Return the database environment's logging statistics.
+ ReplicationStatsgetReplicationStats(StatsConfig config) + +
+          Return the database environment's replication statistics.
+ TransactionStatsgetTransactionStats(StatsConfig config) + +
+          Return the database environment's transactional statistics.
+static intgetVersionMajor() + +
+          Return the release major number.
+static intgetVersionMinor() + +
+          Return the release minor number.
+static intgetVersionPatch() + +
+          Return the release patch number.
+static StringgetVersionString() + +
+          Return the release version information, suitable for display.
+ voidlockVector(int locker, + boolean noWait, + LockRequest[] list) + +
+          Atomically obtain and release one or more locks from the lock table.
+ voidlogFlush(LogSequenceNumber lsn) + +
+          Flush log records to stable storage.
+ LogSequenceNumberlogPut(DatabaseEntry data, + boolean flush) + +
+          Append a record to the log.
+ DatabaseopenDatabase(Transaction txn, + String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+ LogCursoropenLogCursor() + +
+          Return a log cursor.
+ SecondaryDatabaseopenSecondaryDatabase(Transaction txn, + String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+ voidpanic(boolean onoff) + +
+          Set the panic state for the database environment.
+ ReplicationStatusprocessReplicationMessage(DatabaseEntry control, + DatabaseEntry rec, + int envid) + +
+          Process an incoming replication message sent by a member of the + replication group to the local database environment.
+ voidputLock(Lock lock) + +
+          Release a lock.
+ PreparedTransaction[]recover(int count, + boolean continued) + +
+          Return a list of prepared but not yet resolved transactions.
+static voidremove(File home, + boolean force, + EnvironmentConfig config) + +
+          Destroy a database environment.
+ voidremoveDatabase(Transaction txn, + String fileName, + String databaseName) + +
+           +Remove a database.
+ voidremoveOldLogFiles() + +
+          Remove log files that are no longer needed.
+ voidrenameDatabase(Transaction txn, + String fileName, + String oldDatabaseName, + String newDatabaseName) + +
+           +Rename a database.
+ voidsetConfig(EnvironmentConfig config) + +
+          Change the settings in an existing environment handle.
+ voidstartReplication(DatabaseEntry cdata, + boolean master) + +
+          Configure the database environment as a client or master in a group + of replicated database environments.
+ inttrickleCacheWrite(int percent) + +
+          Ensure that a specified percent of the pages in the shared memory + pool are clean, by writing dirty pages to their backing files.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+Environment

+
+public Environment(File envHome,
+                   EnvironmentConfig envConfig)
+            throws DatabaseException,
+                   FileNotFoundException
+
+
Create a database environment handle. +

+

+

Parameters:
envHome - The database environment's home directory. + The environment variable DB_HOME may be used as + the path of the database home. + For more information on envHome and filename + resolution in general, see + File Naming. +

envConfig - The database environment attributes. If null, default attributes are used. +

+

+

Throws: +
IllegalArgumentException - if an invalid parameter was specified. +

+

+

DatabaseException - if a failure occurs. +
FileNotFoundException
+ + + + + + + + +
+Method Detail
+ +

+getHome

+
+public File getHome()
+             throws DatabaseException
+
+
Return the database environment's home directory. +

+

+

+ +
Returns:
The database environment's home directory. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getConfig

+
+public EnvironmentConfig getConfig()
+                            throws DatabaseException
+
+
Return this object's configuration. +

+

+

+ +
Returns:
This object's configuration. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+beginTransaction

+
+public Transaction beginTransaction(Transaction parent,
+                                    TransactionConfig txnConfig)
+                             throws DatabaseException
+
+
Create a new transaction in the database environment. +

+ Transactions may only span threads if they do so serially; that is, + each transaction must be active in only a single thread of control + at a time. +

+ This restriction holds for parents of nested transactions as well; + no two children may be concurrently active in more than one thread + of control at any one time. +

+ Cursors may not span transactions; that is, each cursor must be opened + and closed within a single transaction. +

+ A parent transaction may not issue any Berkeley DB operations -- + except for Environment.beginTransaction, + Transaction.abort and Transaction.commit -- + while it has active child transactions (child transactions that have + not yet been committed or aborted). +

+

+

+
Parameters:
parent - If the parent parameter is non-null, the new transaction will be a + nested transaction, with the transaction indicated by parent as its + parent. Transactions may be nested to any level. In the presence + of distributed transactions and two-phase commit, only the parental + transaction, that is a transaction without a parent specified, + should be passed as an parameter to Transaction.prepare. +

txnConfig - The transaction attributes. If null, default attributes are used. +

+

Returns:
The newly created transaction's handle. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+checkpoint

+
+public void checkpoint(CheckpointConfig checkpointConfig)
+                throws DatabaseException
+
+
Synchronously checkpoint the database environment. +

+

+

+

+
Parameters:
checkpointConfig - The checkpoint attributes. If null, default attributes are used. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLockStats

+
+public LockStats getLockStats(StatsConfig config)
+                       throws DatabaseException
+
+
Return the database environment's locking statistics. +

+

+

+
Parameters:
config - The locking statistics attributes. If null, default attributes are used. +

+

Returns:
The database environment's locking statistics. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getTransactionStats

+
+public TransactionStats getTransactionStats(StatsConfig config)
+                                     throws DatabaseException
+
+
Return the database environment's transactional statistics. +

+

+

+
Parameters:
config - The transactional statistics attributes. If null, default attributes are used. +

+

Returns:
The database environment's transactional statistics. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Close the database environment, freeing any allocated resources and + closing any underlying subsystems. +

+ The Environment handle should not be closed while any other + handle that refers to it is not yet closed; for example, database + environment handles must not be closed while database handles remain + open, or transactions in the environment have not yet been committed + or aborted. Specifically, this includes Database, + Cursor, Transaction, and LogCursor + handles. +

+ Where the environment was initialized with a locking subsystem, + closing the environment does not release any locks still held by the + closing process, providing functionality for long-lived locks. +

+ Where the environment was initialized with a transaction subsystem, + closing the environment aborts any unresolved transactions. + Applications should not depend on this behavior for transactions + involving databases; all such transactions should be explicitly + resolved. The problem with depending on this semantic is that + aborting an unresolved transaction involving database operations + requires a database handle. Because the database handles should + have been closed before closing the environment, it will not be + possible to abort the transaction, and recovery will have to be run + on the database environment before further operations are done. +

+ Where log cursors were created, closing the environment does not + imply closing those cursors. +

+ In multithreaded applications, only a single thread may call this + method. +

+ After this method has been called, regardless of its return, the + Environment handle may not be accessed again. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+remove

+
+public static void remove(File home,
+                          boolean force,
+                          EnvironmentConfig config)
+                   throws DatabaseException,
+                          FileNotFoundException
+
+
Destroy a database environment. +

+ If the environment is not in use, the environment regions, including + any backing files, are removed. Any log or database files and the + environment directory itself are not removed. +

+ If there are processes currently using the database environment, + this method will fail without further action (unless the force + argument is true, in which case the environment will be removed, + regardless of any processes still using it). +

+ The result of attempting to forcibly destroy the environment when + it is in use is unspecified. Processes using an environment often + maintain open file descriptors for shared regions within it. On + UNIX systems, the environment removal will usually succeed, and + processes that have already joined the region will continue to run + in that region without change. However, processes attempting to + join the environment will either fail or create new regions. On + other systems in which the unlink system call will fail if any + process has an open file descriptor for the file (for example + Windows/NT), the region removal will fail. +

+ Calling this method should not be necessary for most applications + because the environment is cleaned up as part of normal + database recovery procedures. However, applications may want to call + this method as part of application shut down to free up system + resources. For example, if system shared memory was used to back + the database environment, it may be useful to call this method in + order to release system shared memory segments that have been + allocated. Or, on architectures in which mutexes require allocation + of underlying system resources, it may be useful to call + this method in order to release those resources. Alternatively, if + recovery is not required because no database state is maintained + across failures, and no system resources need to be released, it is + possible to clean up an environment by simply removing all the + Berkeley DB files in the database environment's directories. +

+ In multithreaded applications, only a single thread may call this + method. +

+ After this method has been called, regardless of its return, the + Environment handle may not be accessed again. +

+

+

+
Parameters:
home - The database environment to be removed. + On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

force - The environment is removed, regardless of any processes that may + still using it, and no locks are acquired during this process. + (Generally, the force argument is specified only when applications + were unable to shut down cleanly, and there is a risk that an + application may have died holding a Berkeley DB mutex or lock. +

+

+

Throws: +
DatabaseException - if a failure occurs. +
FileNotFoundException
+
+
+
+ +

+setConfig

+
+public void setConfig(EnvironmentConfig config)
+               throws DatabaseException
+
+
Change the settings in an existing environment handle. +

+

+

+
Parameters:
config - The database environment attributes. If null, default attributes are used. +

+

+

Throws: +
IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+openDatabase

+
+public Database openDatabase(Transaction txn,
+                             String fileName,
+                             String databaseName,
+                             DatabaseConfig config)
+                      throws DatabaseException,
+                             FileNotFoundException
+
+
Open a database. +

+The database is represented by the file and database parameters. +

+The currently supported database file formats (or access +methods) are Btree, Hash, Queue, and Recno. The Btree format is a +representation of a sorted, balanced tree structure. The Hash format +is an extensible, dynamic hashing scheme. The Queue format supports +fast access to fixed-length records accessed sequentially or by logical +record number. The Recno format supports fixed- or variable-length +records, accessed sequentially or by logical record number, and +optionally backed by a flat text file. +

+Storage and retrieval are based on key/data pairs; see DatabaseEntry +for more information. +

+Opening a database is a relatively expensive operation, and maintaining +a set of open databases will normally be preferable to repeatedly +opening and closing the database for each new query. +

+In-memory databases never intended to be preserved on disk may be +created by setting both the fileName and databaseName parameters to +null. Note that in-memory databases can only ever be shared by sharing +the single database handle that created them, in circumstances where +doing so is safe. The environment variable TMPDIR may +be used as a directory in which to create temporary backing files. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +Note that transactionally protected operations on a Database handle +require that the Database handle itself be transactionally protected +during its open, either with a non-null transaction handle, or by calling +DatabaseConfig.setTransactional on the configuration object. +

fileName - The name of an underlying file that will be used to back the database. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

databaseName - An optional parameter that allows applications to have multiple +databases in a single file. Although no databaseName parameter needs +to be specified, it is an error to attempt to open a second database in +a physical file that was not initially created using a databaseName +parameter. Further, the databaseName parameter is not supported by the +Queue format. +

config - The database open attributes. If null, default attributes are used. +
Throws: +
DatabaseException +
FileNotFoundException
+
+
+
+ +

+openSecondaryDatabase

+
+public SecondaryDatabase openSecondaryDatabase(Transaction txn,
+                                               String fileName,
+                                               String databaseName,
+                                               Database primaryDatabase,
+                                               SecondaryConfig config)
+                                        throws DatabaseException,
+                                               FileNotFoundException
+
+
Open a database. +

+The database is represented by the file and database parameters. +

+The currently supported database file formats (or access +methods) are Btree, Hash, Queue, and Recno. The Btree format is a +representation of a sorted, balanced tree structure. The Hash format +is an extensible, dynamic hashing scheme. The Queue format supports +fast access to fixed-length records accessed sequentially or by logical +record number. The Recno format supports fixed- or variable-length +records, accessed sequentially or by logical record number, and +optionally backed by a flat text file. +

+Storage and retrieval are based on key/data pairs; see DatabaseEntry +for more information. +

+Opening a database is a relatively expensive operation, and maintaining +a set of open databases will normally be preferable to repeatedly +opening and closing the database for each new query. +

+In-memory databases never intended to be preserved on disk may be +created by setting both the fileName and databaseName parameters to +null. Note that in-memory databases can only ever be shared by sharing +the single database handle that created them, in circumstances where +doing so is safe. The environment variable TMPDIR may +be used as a directory in which to create temporary backing files. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +Note that transactionally protected operations on a Database handle +require that the Database handle itself be transactionally protected +during its open, either with a non-null transaction handle, or by calling +DatabaseConfig.setTransactional on the configuration object. +

fileName - The name of an underlying file that will be used to back the database. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

databaseName - An optional parameter that allows applications to have multiple +databases in a single file. Although no databaseName parameter needs +to be specified, it is an error to attempt to open a second database in +a physical file that was not initially created using a databaseName +parameter. Further, the databaseName parameter is not supported by the +Queue format. +

primaryDatabase - a database handle for the primary database that is to be indexed. +

config - The secondary database open attributes. If null, default attributes are used. +
Throws: +
DatabaseException +
FileNotFoundException
+
+
+
+ +

+removeDatabase

+
+public void removeDatabase(Transaction txn,
+                           String fileName,
+                           String databaseName)
+                    throws DatabaseException,
+                           FileNotFoundException
+
+

+Remove a database. +

+If no database is specified, the underlying file specified is removed. +

+Applications should never remove databases with open Database +handles, or in the case of removing a file, when any database in the +file has an open handle. For example, some architectures do not permit +the removal of files with open system handles. On these architectures, +attempts to remove databases currently in use by any thread of control +in the system may fail. +

+The +environment variable DB_HOME may be used as the path of the database +environment home. +

+This method is affected by any database directory specified with +EnvironmentConfig.addDataDir, or by setting the "set_data_dir" +string in the database environment's DB_CONFIG file. +

+The Database handle may not be accessed +again after this method is called, regardless of this method's success +or failure. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

fileName - The physical file which contains the database to be removed. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

databaseName - The database to be removed. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs. +
FileNotFoundException
+
+
+
+ +

+renameDatabase

+
+public void renameDatabase(Transaction txn,
+                           String fileName,
+                           String oldDatabaseName,
+                           String newDatabaseName)
+                    throws DatabaseException,
+                           FileNotFoundException
+
+

+Rename a database. +

+If no database name is specified, the underlying file specified is +renamed, incidentally renaming all of the databases it contains. +

+Applications should never rename databases that are currently in use. +If an underlying file is being renamed and logging is currently enabled +in the database environment, no database in the file may be open when +this method is called. In particular, some architectures do not permit +renaming files with open handles. On these architectures, attempts to +rename databases that are currently in use by any thread of control in +the system may fail. +

+The +environment variable DB_HOME may be used as the path of the database +environment home. +

+This method is affected by any database directory specified with +EnvironmentConfig.addDataDir, or by setting the "set_data_dir" +string in the database environment's DB_CONFIG file. +

+The Database handle may not be accessed +again after this method is called, regardless of this method's success +or failure. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

fileName - The physical file which contains the database to be renamed. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

oldDatabaseName - The database to be renamed. +

newDatabaseName - The new name of the database or file. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

DatabaseException - if a failure occurs. +
FileNotFoundException
+
+
+
+ +

+trickleCacheWrite

+
+public int trickleCacheWrite(int percent)
+                      throws DatabaseException
+
+
Ensure that a specified percent of the pages in the shared memory + pool are clean, by writing dirty pages to their backing files. +

+ The purpose of this method is to enable a memory pool manager to ensure + that a page is always available for reading in new information + without having to wait for a write. +

+

+

+
Parameters:
percent - The percent of the pages in the cache that should be clean. +

+

Returns:
The number of pages that were written to reach the specified + percentage. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+detectDeadlocks

+
+public int detectDeadlocks(LockDetectMode mode)
+                    throws DatabaseException
+
+
Run one iteration of the deadlock detector. +

+ The deadlock detector traverses the lock table and marks one of the + participating lock requesters for rejection in each deadlock it finds. +

+

+

+
Parameters:
mode - Which lock request(s) to reject. +

+

Returns:
The number of lock requests that were rejected. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLock

+
+public Lock getLock(int locker,
+                    boolean noWait,
+                    DatabaseEntry object,
+                    LockRequestMode mode)
+             throws DatabaseException
+
+
Acquire a lock from the lock table. +

+

+

+
Parameters:
locker - An unsigned 32-bit integer quantity representing the entity + requesting the lock. +

mode - The lock mode. +

noWait - If a lock cannot be granted because the requested lock conflicts + with an existing lock, throw a LockNotGrantedException + immediately instead of waiting for the lock to become available. +

object - An untyped byte string that specifies the object to be locked. + Applications using the locking subsystem directly while also doing + locking via the Berkeley DB access methods must take care not to + inadvertently lock objects that happen to be equal to the unique + file IDs used to lock files. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+putLock

+
+public void putLock(Lock lock)
+             throws DatabaseException
+
+
Release a lock. +

+

+

+
Parameters:
lock - The lock to be released. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+createLockerID

+
+public int createLockerID()
+                   throws DatabaseException
+
+
Allocate a locker ID. +

+ The locker ID is guaranteed to be unique for the database environment. +

+ Call Environment.freeLockerID to return the locker ID to + the environment when it is no longer needed. +

+

+

+ +
Returns:
A locker ID. +
Throws: +
DatabaseException
+
+
+
+ +

+freeLockerID

+
+public void freeLockerID(int id)
+                  throws DatabaseException
+
+
Free a locker ID. +

+

+

+
Parameters:
id - The locker id to be freed. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+lockVector

+
+public void lockVector(int locker,
+                       boolean noWait,
+                       LockRequest[] list)
+                throws DatabaseException
+
+
Atomically obtain and release one or more locks from the lock table. + This method is intended to support acquisition or trading of + multiple locks under one lock table semaphore, as is needed for lock + coupling or in multigranularity locking for lock escalation. +

+ If any of the requested locks cannot be acquired, or any of the locks to + be released cannot be released, the operations before the failing + operation are guaranteed to have completed successfully, and + the method throws an exception. +

+

+

+
Parameters:
noWait - If a lock cannot be granted because the requested lock conflicts + with an existing lock, throw a LockNotGrantedException + immediately instead of waiting for the lock to become available. + The index of the request that was not granted will be returned by + LockNotGrantedException.getIndex. +

locker - An unsigned 32-bit integer quantity representing the entity + requesting the lock. +

list - An array of LockRequest objects, listing the requested lock + operations. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+openLogCursor

+
+public LogCursor openLogCursor()
+                        throws DatabaseException
+
+
Return a log cursor. +

+

+

+ +
Returns:
A log cursor. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLogFileName

+
+public String getLogFileName(LogSequenceNumber lsn)
+                      throws DatabaseException
+
+
Return the name of the log file that contains the log record + specified by a LogSequenceNumber object. +

+ This mapping of LogSequenceNumber objects to files is needed for + database administration. For example, a transaction manager + typically records the earliest LogSequenceNumber object needed for + restart, and the database administrator may want to archive log + files to tape when they contain only log records before the earliest + one needed for restart. +

+

+

+
Parameters:
lsn - The LogSequenceNumber object for which a filename is wanted. +

+

Returns:
The name of the log file that contains the log record specified by a + LogSequenceNumber object. +

+

+

Throws: +
IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+electReplicationMaster

+
+public int electReplicationMaster(int nsites,
+                                  int nvotes,
+                                  int priority,
+                                  int timeout)
+                           throws DatabaseException
+
+
Hold an election for the master of a replication group. +

+ If the election is successful, the new master's ID may be the ID of the + previous master, or the ID of the current environment. The application + is responsible for adjusting its usage of the other environments in the + replication group, including directing all database updates to the newly + selected master, in accordance with the results of this election. +

+ The thread of control that calls this method must not be the thread + of control that processes incoming messages; processing the incoming + messages is necessary to successfully complete an election. +

+

+

+
Parameters:
nsites - The number of environments that the application believes are in the + replication group. This number is used by Berkeley DB to avoid + having two masters active simultaneously, even in the case of a + network partition. During an election, a new master cannot be + elected unless more than half of nsites agree on the new master. + Thus, in the face of a network partition, the side of the partition + with more than half the environments will elect a new master and + continue, while the environments communicating with fewer than half + the other environments will fail to find a new master. +

nvotes - The number of votes required by the application to successfully + elect a new master. It must be a positive integer, no greater than + nsites, or 0 if the election should use a simple majority of the + nsites value as the requirement. A warning is given if half or + fewer votes are required to win an election as that can potentially + lead to multiple masters in the face of a network partition. +

priority - The priority of this environment. It must be a positive integer, + or 0 if this environment is not permitted to become a master. +

timeout - A timeout period for an election. If the election has not completed + after timeout microseconds, the election will fail. +

+

Returns:
The newly elected master's ID. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+processReplicationMessage

+
+public ReplicationStatus processReplicationMessage(DatabaseEntry control,
+                                                   DatabaseEntry rec,
+                                                   int envid)
+                                            throws DatabaseException
+
+
Process an incoming replication message sent by a member of the + replication group to the local database environment. +

+ For implementation reasons, all incoming replication messages must + be processed using the same Environment handle. It is not + required that a single thread of control process all messages, only + that all threads of control processing messages use the same handle. +

+

+

+
Parameters:
control - A copy of the control parameter specified by Berkeley DB on the + sending environment. +

envid - The local identifier that corresponds to the environment that sent + the message to be processed. +

rec - A copy of the rec parameter specified by Berkeley DB on the sending + environment. +

+

Returns:
A ReplicationStatus object. +
Throws: +
DatabaseException
+
+
+
+ +

+startReplication

+
+public void startReplication(DatabaseEntry cdata,
+                             boolean master)
+                      throws DatabaseException
+
+
Configure the database environment as a client or master in a group + of replicated database environments. Replication master + environments are the only database environments where replicated + databases may be modified. Replication client environments are + read-only as long as they are clients. Replication client + environments may be upgraded to be replication master environments + in the case that the current master fails or there is no master + present. +

+ The enclosing database environment must already have been configured + to send replication messages by calling EnvironmentConfig.setReplicationTransport. +

+

+

+
Parameters:
cdata - An opaque data item that is sent over the communication infrastructure + when the client or master comes online. If no such information is + useful, cdata should be null. +

master - Configure the environment as a replication master. If false, the + environment will be configured as as a replication client. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getCacheStats

+
+public CacheStats getCacheStats(StatsConfig config)
+                         throws DatabaseException
+
+
+ +
Throws: +
DatabaseException
+
+
+
+ +

+getCacheFileStats

+
+public CacheFileStats[] getCacheFileStats(StatsConfig config)
+                                   throws DatabaseException
+
+
Return the database environment's per-file memory pool (that is, the + buffer cache) statistics. +

+

+

+
Parameters:
config - The statistics attributes. If null, default attributes are used. +

+

Returns:
The database environment's per-file memory pool (that is, the buffer + cache) statistics. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLogStats

+
+public LogStats getLogStats(StatsConfig config)
+                     throws DatabaseException
+
+
Return the database environment's logging statistics. +

+

+

+
Parameters:
config - The statistics attributes. If null, default attributes are used. +

+

Returns:
The database environment's logging statistics. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getReplicationStats

+
+public ReplicationStats getReplicationStats(StatsConfig config)
+                                     throws DatabaseException
+
+
Return the database environment's replication statistics. +

+

+

+
Parameters:
config - The statistics attributes. If null, default attributes are used. +

+

Returns:
The database environment's replication statistics. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+logFlush

+
+public void logFlush(LogSequenceNumber lsn)
+              throws DatabaseException
+
+
Flush log records to stable storage. +

+

+

+
Parameters:
lsn - All log records with LogSequenceNumber values less than or equal to + the lsn parameter are written to stable storage. If lsn is null, + all records in the log are flushed. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+logPut

+
+public LogSequenceNumber logPut(DatabaseEntry data,
+                                boolean flush)
+                         throws DatabaseException
+
+
Append a record to the log. +

+

+

+
Parameters:
data - The record to append to the log. +

+ The caller is responsible for providing any necessary structure to + data. (For example, in a write-ahead logging protocol, the + application must understand what part of data is an operation code, + what part is redo information, and what part is undo information. + In addition, most transaction managers will store in data the + LogSequenceNumber of the previous log record for the same + transaction, to support chaining back through the transaction's log + records during undo.) +

flush - The log is forced to disk after this record is written, guaranteeing + that all records with LogSequenceNumber values less than or equal + to the one being "put" are on disk before this method returns. +

+

Returns:
The LogSequenceNumber of the put record. +
Throws: +
DatabaseException
+
+
+
+ +

+getArchiveLogFiles

+
+public File[] getArchiveLogFiles(boolean includeInUse)
+                          throws DatabaseException
+
+
Return the names of all of the log files that are no longer in use. +

+ This method returns the names of all of the log files that are no + longer in use (for example, that are no longer involved in active + transactions), and that may safely be archived for catastrophic + recovery and then removed from the system. If there are no + filenames to return, the method returns null. +

+ Log cursor handles (returned by the Environment.openLogCursor) may have open file descriptors for log files in the + database environment. Also, the Berkeley DB interfaces to the + database environment logging subsystem (for example, + logPut Environment logPut and Transaction.abort may + allocate log cursors and have open file descriptors for log files + as well. On operating systems where filesystem related system calls + (for example, rename and unlink on Windows/NT) can fail if a process + has an open file descriptor for the affected file, attempting to + move or remove the log files listed by this method may fail. All + Berkeley DB internal use of log cursors operates on active log files + only and furthermore, is short-lived in nature. So, an application + seeing such a failure should be restructured to close any open log + cursors it may have, and otherwise to retry the operation until it + succeeds. (Although the latter is not likely to be necessary; it + is hard to imagine a reason to move or rename a log file in which + transactions are being logged or aborted.) +

+

+

+
Parameters:
includeInUse - Return all the log filenames, regardless of whether or not they are + in use. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getArchiveDatabases

+
+public File[] getArchiveDatabases()
+                           throws DatabaseException
+
+
Return the names of the database files that need to be archived in + order to recover the database from catastrophic failure. +

+ If any of the database files have not been accessed during the + lifetime of the current log files, their names will not be included + in this list. It is also possible that some of the files referred + to by the log have since been deleted from the system. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+removeOldLogFiles

+
+public void removeOldLogFiles()
+                       throws DatabaseException
+
+
Remove log files that are no longer needed. +

+ Automatic log file removal is likely to make catastrophic recovery + impossible. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+recover

+
+public PreparedTransaction[] recover(int count,
+                                     boolean continued)
+                              throws DatabaseException
+
+
Return a list of prepared but not yet resolved transactions. +

+ This method should only be called after the environment has been + recovered. Because database environment state must be preserved + between recovery and the application calling this method, + applications must either call this method using the same environment + handle used when recovery is done, or the database environment must + not be configured using the EnvironmentConfig.setPrivate + method. +

+ This method returns a list of transactions that must be resolved by + the application (either committed, aborted or discarded). The + return value is an array of objects of type DbPreplist. +

+ The application must call Transaction.abort, + Transaction.commit or Transaction.discard on + each returned Transaction handle before starting any new + operations. +

+

+

+
Parameters:
count - The maximum number of transactions to return. +

continued - If false, begin returning a list of prepared, but not yet resolved + transactions. If true, continue returning a list of prepared + transactions, starting where the last call to this method left off. +

+

Returns:
A list of prepared but not yet resolved transactions. +
Throws: +
DatabaseException
+
+
+
+ +

+panic

+
+public void panic(boolean onoff)
+           throws DatabaseException
+
+
Set the panic state for the database environment. + Database environments in a panic state normally refuse all attempts to + call library functions, throwing a RunRecoveryException. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may be called at any time during the life of the application. +

+

+

+
Parameters:
onoff - If true, set the panic state for the database environment. +
Throws: +
DatabaseException
+
+
+
+ +

+getVersionString

+
+public static String getVersionString()
+
+
Return the release version information, suitable for display. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The release version information, suitable for display.
+
+
+
+ +

+getVersionMajor

+
+public static int getVersionMajor()
+
+
Return the release major number. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The release major number.
+
+
+
+ +

+getVersionMinor

+
+public static int getVersionMinor()
+
+
Return the release minor number. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The release minor number.
+
+
+
+ +

+getVersionPatch

+
+public static int getVersionPatch()
+
+
Return the release patch number. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
The release patch number.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/EnvironmentConfig.html b/db/docs/java/com/sleepycat/db/EnvironmentConfig.html new file mode 100644 index 000000000..29f197800 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/EnvironmentConfig.html @@ -0,0 +1,5278 @@ + + + + + + +EnvironmentConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class EnvironmentConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.EnvironmentConfig
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
+
public class EnvironmentConfig
extends Object
implements Cloneable
+ +

+Specifies the attributes of an environment. +

+To change the default settings for a database environment, an application +creates a configuration object, customizes settings and uses it for +environment construction. The set methods of this class validate the +configuration values when the method is invoked. An +IllegalArgumentException is thrown if the value is not valid for that +attribute. +

+All commonly used environment attributes have convenience setter/getter +methods defined in this class. For example, to change the default +transaction timeout setting for an environment, the application should +do the following: +

+

+    // customize an environment configuration
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setTxnTimeout(10000);  // will throw if timeout value is invalid
+    

+ // Open the environment. + Environment myEnvironment = new Environment(home, envConfig); +

+

+Additional parameters are described in the example.properties file found at +the top level of the distribution package. These additional parameters +will not be needed by most applications. This category of +properties can be specified for the EnvironmentConfig object through a +Properties object read by EnvironmentConfig(Properties), or +individually through EnvironmentConfig.setConfigParam(). +

+For example, an application can change the default btree node size with: +

+    envConfig.setConfigParam("je.nodeMaxEntries", "256");
+
+

+Environment configuration follows this order of precedence: +

    +
  1. Configuration parameters specified in <environment +home>/je.properties take first precedence. +
  2. Configuration parameters set in the EnvironmentConfig object used +at Environment construction are next. +
  3. Any configuration parameters not set by the application are set to +system defaults, described in the example.properties file. +
+

+An EnvironmentConfig can be used to specify both mutable and immutable +environment properties. Immutable properties may be specified when the +first Environment handle (instance) is opened for a given physical +environment. When more handles are opened for the same environment, the +following rules apply: +

+

    +
  1. Immutable properties must equal the original values specified when +constructing an Environment handle for an already open environment. When a +mismatch occurs, an exception is thrown. +
  2. Mutable properties are ignored when constructing an Environment +handle for an already open environment. +
+

+After an Environment has been constructed, it's mutable properties may +be changed using +Environment.setConfig(com.sleepycat.db.EnvironmentConfig). +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static EnvironmentConfigDEFAULT + +
+           
+  + + + + + + + + + + +
+Constructor Summary
EnvironmentConfig() + +
+          Create an EnvironmentConfig initialized with the system default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidaddDataDir(String dataDir) + +
+          Set the path of a directory to be used as the location of the access + method database files.
+ booleangetAllowCreate() + +
+          Return if the database environment is configured to create any + underlying files, as necessary.
+ intgetCacheCount() + +
+          Return the number of shared memory buffer pools, that is, the number + of caches.
+ longgetCacheSize() + +
+          Return the size of the shared memory buffer pool, that is, the cache.
+ booleangetCDBLockAllDatabases() + +
+          Return if the Concurrent Data Store applications are configured to + perform locking on an environment-wide basis rather than on a + per-database basis.
+ String[]getDataDirs() + +
+          Return the array of data directories.
+ booleangetDirectDatabaseIO() + +
+          Return if the database environment has been configured to not buffer + database files.
+ booleangetDirectLogIO() + +
+          Return if the database environment has been configured to not buffer + log files.
+ booleangetDsyncLog() + +
+          Return if the database environment has been configured to flush log + writes to the backing disk before returning from the write system + call.
+ booleangetEncrypted() + +
+          Return the database environment has been configured to perform + encryption.
+ ErrorHandlergetErrorHandler() + +
+          Return the function to be called if an error occurs.
+ StringgetErrorPrefix() + +
+          Return the prefix string that appears before error messages.
+ OutputStreamgetErrorStream() + +
+          Return the an OutputStream for displaying error messages.
+ FeedbackHandlergetFeedbackHandler() + +
+          Return the object's methods to be called to provide feedback.
+ booleangetInitializeCache() + +
+          Return if the database environment is configured with a shared + memory buffer pool.
+ booleangetInitializeCDB() + +
+          Return if the database environment is configured for the Concurrent + Data Store product.
+ booleangetInitializeLocking() + +
+          Return if the database environment is configured for locking.
+ booleangetInitializeLogging() + +
+          Return if the database environment is configured for logging.
+ booleangetInitializeRegions() + +
+          Return if the database environment has been configured to page-fault + shared regions into memory when initially creating or joining a + database environment.
+ booleangetInitializeReplication() + +
+          Return if the database environment is configured for replication.
+ booleangetJoinEnvironment() + +
+          Return the handle is configured to join an existing environment.
+ byte[][]getLockConflicts() + +
+          Return the locking conflicts matrix.
+ LockDetectModegetLockDetectMode() + +
+          Return if the deadlock detector is configured to run whenever a lock + conflict occurs.
+ booleangetLockDown() + +
+          Return if the database environment is configured to lock shared + environment files and memory-mapped databases into memory.
+ longgetLockTimeout() + +
+          Return the database environment lock timeout value, in microseconds; + a timeout of 0 means no timeout is set.
+ booleangetLogAutoRemove() + +
+          Return if the system has been configured to to automatically remove log + files that are no longer needed.
+ intgetLogBufferSize() + +
+          Return the size of the in-memory log buffer, in bytes.
+ FilegetLogDirectory() + +
+          Return the path of a directory to be used as the location of logging files.
+ booleangetLogInMemory() + +
+          Return if the database environment is configured to maintain transaction logs + in memory rather than on disk.
+ LogRecordHandlergetLogRecordHandler() + +
+          Return the handler for application-specific log records.
+ intgetLogRegionSize() + +
+          Return the size of the underlying logging subsystem region.
+ intgetMaxLockers() + +
+          Return the maximum number of lockers.
+ intgetMaxLockObjects() + +
+          Return the maximum number of locked objects.
+ intgetMaxLocks() + +
+          Return the maximum number of locks.
+ intgetMaxLogFileSize() + +
+          Return the maximum size of a single file in the log, in bytes.
+ MessageHandlergetMessageHandler() + +
+          Return the function to be called with an informational message.
+ OutputStreamgetMessageStream() + +
+          Return the an OutputStream for displaying informational messages.
+ longgetMMapSize() + +
+          Return the maximum file size, in bytes, for a file to be mapped into + the process address space.
+ longgetMode() + +
+          Return the mode to use when creating underlying files and shared + memory segments.
+ booleangetNoLocking() + +
+          Return if the system has been configured to grant all requested mutual + exclusion mutexes and database locks without regard for their actual + availability.
+ booleangetNoMMap() + +
+          Return if the system has been configured to copy read-only database files + into the local cache instead of potentially mapping them into process + memory.
+ booleangetNoPanic() + +
+          Return if the system has been configured to ignore any panic state in + the database environment.
+ booleangetOverwrite() + +
+          Return if the system has been configured to overwrite files stored in + encrypted formats before deleting them.
+ PanicHandlergetPanicHandler() + +
+          Return the function to be called if the database environment panics.
+ booleangetPrivate() + +
+          Return if the database environment is configured to only be accessed + by a single process.
+ booleangetReadOnly() + +
+          Return if the whether the environment handle is opened read-only.
+ longgetReplicationLimit() + +
+          Return the transmit limit in bytes for a single call to + Environment.processReplicationMessage.
+ ReplicationTransportgetReplicationTransport() + +
+          Return the replication callback function used to transmit data using + the replication application's communication infrastructure.
+ booleangetRunFatalRecovery() + +
+          Return the handle is configured to run catastrophic recovery on + the database environment before opening it for use.
+ booleangetRunRecovery() + +
+          Return the handle is configured to run normal recovery on the + database environment before opening it for use.
+ longgetSegmentId() + +
+          Return the base segment ID.
+ booleangetSystemMemory() + +
+          Return if the database environment is configured to allocate memory + from system shared memory instead of from memory backed by the + filesystem.
+ StringgetTemporaryDirectory() + +
+          Return the path of a directory to be used as the location of + temporary files.
+ intgetTestAndSetSpins() + +
+          Return the number of times test-and-set mutexes should spin before + blocking.
+ booleangetThreaded() + +
+          Return if the handle is configured to be free-threaded.
+ booleangetTransactional() + +
+          Return if the database environment is configured for transactions.
+ intgetTxnMaxActive() + +
+          Return the minimum number of simultaneously active transactions + supported by the database environment.
+ booleangetTxnNoSync() + +
+          Return if the system has been configured to not write or synchronously + flush the log on transaction commit.
+ booleangetTxnNotDurable() + +
+          Return if the system has been configured to not write log records.
+ longgetTxnTimeout() + +
+          Return the database environment transaction timeout value, in + microseconds; a timeout of 0 means no timeout is set.
+ DategetTxnTimestamp() + +
+          Return the time to which recovery will be done, or 0 if recovery will + be done to the most current possible date.
+ booleangetTxnWriteNoSync() + +
+          Return if the system has been configured to write, but not synchronously + flush, the log on transaction commit.
+ booleangetUseEnvironment() + +
+          Return if the database environment is configured to accept information + from the process environment when naming files.
+ booleangetUseEnvironmentRoot() + +
+          Return if the database environment is configured to accept information + from the process environment when naming files if the process has + appropriate permissions.
+ booleangetVerboseDeadlock() + +
+          Return if the database environment is configured to display + additional information when doing deadlock detection.
+ booleangetVerboseRecovery() + +
+          Return if the database environment is configured to display + additional information when performing recovery.
+ booleangetVerboseReplication() + +
+          Return if the database environment is configured to display + additional information when processing replication messages.
+ booleangetVerboseWaitsFor() + +
+          Return if the database environment is configured to display the + waits-for table when doing deadlock detection.
+ booleangetYieldCPU() + +
+          Return if the system has been configured to yield the processor + immediately after each page or mutex acquisition.
+ voidsetAllowCreate(boolean allowCreate) + +
+          Configure the database environment to create any underlying files, + as necessary.
+ voidsetCacheCount(int cacheCount) + +
+          Set the number of shared memory buffer pools, that is, the number of +caches.
+ voidsetCacheSize(long cacheSize) + +
+          Set the size of the shared memory buffer pool, that is, the size of the +cache.
+ voidsetCDBLockAllDatabases(boolean cdbLockAllDatabases) + +
+          Configure Concurrent Data Store applications to perform locking on + an environment-wide basis rather than on a per-database basis.
+ voidsetDirectDatabaseIO(boolean directDatabaseIO) + +
+          Configure the database environment to not buffer database files.
+ voidsetDirectLogIO(boolean directLogIO) + +
+          Configure the database environment to not buffer log files.
+ voidsetDsyncLog(boolean dsyncLog) + +
+          Configure the database environment to flush log writes to the + backing disk before returning from the write system call, rather + than flushing log writes explicitly in a separate system call.
+ voidsetEncrypted(String password) + +
+          Set the password used to perform encryption and decryption.
+ voidsetErrorHandler(ErrorHandler errorHandler) + +
+          Set the function to be called if an error occurs.
+ voidsetErrorPrefix(String errorPrefix) + +
+          Set the prefix string that appears before error messages.
+ voidsetErrorStream(OutputStream errorStream) + +
+          Set an OutputStream for displaying error messages.
+ voidsetFeedbackHandler(FeedbackHandler feedbackHandler) + +
+          Set an object whose methods are called to provide feedback.
+ voidsetInitializeCache(boolean initializeCache) + +
+          Configure a shared memory buffer pool in the database environment.
+ voidsetInitializeCDB(boolean initializeCDB) + +
+          Configure the database environment for the Concurrent Data Store + product.
+ voidsetInitializeLocking(boolean initializeLocking) + +
+          Configure the database environment for locking.
+ voidsetInitializeLogging(boolean initializeLogging) + +
+          Configure the database environment for logging.
+ voidsetInitializeRegions(boolean initializeRegions) + +
+          Configure the database environment to page-fault shared regions into + memory when initially creating or joining a database environment.
+ voidsetInitializeReplication(boolean initializeReplication) + +
+          Configure the database environment for replication.
+ voidsetJoinEnvironment(boolean joinEnvironment) + +
+          Configure the handle to join an existing environment.
+ voidsetLockConflicts(byte[][] lockConflicts) + +
+          Configure the locking conflicts matrix.
+ voidsetLockDetectMode(LockDetectMode lockDetectMode) + +
+          Configure if the deadlock detector is to be run whenever a lock + conflict occurs.
+ voidsetLockDown(boolean lockDown) + +
+          Configure the database environment to lock shared environment files + and memory-mapped databases into memory.
+ voidsetLockTimeout(long lockTimeout) + +
+          Set the timeout value for the database environment +locks.
+ voidsetLogAutoRemove(boolean logAutoRemove) + +
+          Configure the system to automatically remove log files that are no + longer needed.
+ voidsetLogBufferSize(int logBufferSize) + +
+          Set the size of the in-memory log buffer, in bytes.
+ voidsetLogDirectory(File logDirectory) + +
+          Set the path of a directory to be used as the location of logging files.
+ voidsetLogInMemory(boolean inmemory) + +
+          If set, maintain transaction logs in memory rather than on disk.
+ voidsetLogRecordHandler(LogRecordHandler logRecordHandler) + +
+          Set a function to process application-specific log records.
+ voidsetLogRegionSize(int logRegionSize) + +
+          Set the size of the underlying logging area of the database + environment, in bytes.
+ voidsetMaxLockers(int maxLockers) + +
+          Set the maximum number of locking entities supported by the database + environment.
+ voidsetMaxLockObjects(int maxLockObjects) + +
+          Set the maximum number of locked objects supported by the database + environment.
+ voidsetMaxLocks(int maxLocks) + +
+          Set the maximum number of locks supported by the database + environment.
+ voidsetMaxLogFileSize(int maxLogFileSize) + +
+          Set the maximum size of a single file in the log, in bytes.
+ voidsetMessageHandler(MessageHandler messageHandler) + +
+          Set a function to be called with an informational message.
+ voidsetMessageStream(OutputStream messageStream) + +
+          Set an OutputStream for displaying informational messages.
+ voidsetMMapSize(long mmapSize) + +
+          Set the maximum file size, in bytes, for a file to be mapped into + the process address space.
+ voidsetMode(int mode) + +
+          Configure the database environment to use a specific mode when + creating underlying files and shared memory segments.
+ voidsetNoLocking(boolean noLocking) + +
+          Configure the system to grant all requested mutual exclusion mutexes + and database locks without regard for their actual availability.
+ voidsetNoMMap(boolean noMMap) + +
+          Configure the system to copy read-only database files into the local + cache instead of potentially mapping them into process memory.
+ voidsetNoPanic(boolean noPanic) + +
+          Configure the system to ignore any panic state in the database + environment.
+ voidsetOverwrite(boolean overwrite) + +
+          Configure the system to overwrite files stored in encrypted formats + before deleting them.
+ voidsetPanicHandler(PanicHandler panicHandler) + +
+          Set the function to be called if the database environment panics.
+ voidsetPrivate(boolean isPrivate) + +
+          Configure the database environment to only be accessed by a single + process (although that process may be multithreaded).
+ voidsetReadOnly(boolean readOnly) + +
+          Configure the environment handle to be opened read-only.
+ voidsetReplicationLimit(long replicationLimit) + +
+          Impose a byte-count limit on the amount of data that will be + transmitted from a site in a single call to Environment.processReplicationMessage.
+ voidsetReplicationTransport(int envid, + ReplicationTransport replicationTransport) + +
+          Initialize the communication infrastructure for a database environment + participating in a replicated application.
+ voidsetRPCServer(String rpcServer, + long rpcClientTimeout, + long rpcServerTimeout) + +
+          Establish a connection to a RPC server for this database environment.
+ voidsetRunFatalRecovery(boolean runFatalRecovery) + +
+          Configure to run catastrophic recovery on this environment before opening it for +normal use.
+ voidsetRunRecovery(boolean runRecovery) + +
+          Configure to run normal recovery on this environment before opening it for +normal use.
+ voidsetSegmentId(long segmentId) + +
+          Specify a base segment ID for database environment shared memory + regions created in system memory on VxWorks or systems supporting + X/Open-style shared memory interfaces; for example, UNIX systems + supporting shmget and related System V IPC interfaces.
+ voidsetSystemMemory(boolean systemMemory) + +
+          Configure the database environment to allocate memory from system + shared memory instead of from memory backed by the filesystem.
+ voidsetTemporaryDirectory(String temporaryDirectory) + +
+          Set the path of a directory to be used as the location of temporary + files.
+ voidsetTestAndSetSpins(int testAndSetSpins) + +
+          Set the number of times test-and-set mutexes should spin before + blocking.
+ voidsetThreaded(boolean threaded) + +
+          Configure the handle to be free-threaded; that is, usable + by multiple threads within a single address space.
+ voidsetTransactional(boolean transactional) + +
+          Configure the database environment for transactions.
+ voidsetTxnMaxActive(int txnMaxActive) + +
+          Configure the database environment to support at least txnMaxActive + active transactions.
+ voidsetTxnNoSync(boolean txnNoSync) + +
+          Configure the system to not write or synchronously flush the log + on transaction commit.
+ voidsetTxnNotDurable(boolean txnNotDurable) + +
+          Configure the system to not write log records.
+ voidsetTxnTimeout(long txnTimeout) + +
+          Set the timeout value for the database environment +transactions.
+ voidsetTxnTimestamp(Date txnTimestamp) + +
+          Recover to the specified time rather than to the most current + possible date.
+ voidsetTxnWriteNoSync(boolean txnWriteNoSync) + +
+          Configure the system to write, but not synchronously flush, the log on + transaction commit.
+ voidsetUseEnvironment(boolean useEnvironment) + +
+          Configure the database environment to accept information from the + process environment when naming files, regardless of the status of + the process.
+ voidsetUseEnvironmentRoot(boolean useEnvironmentRoot) + +
+          Configure the database environment to accept information from the + process environment when naming files, if the process has + appropriate permissions (for example, users with a user-ID of 0 on + UNIX systems).
+ voidsetVerboseDeadlock(boolean verboseDeadlock) + +
+          Display additional information when doing deadlock detection.
+ voidsetVerboseRecovery(boolean verboseRecovery) + +
+          Display additional information when performing recovery.
+ voidsetVerboseReplication(boolean verboseReplication) + +
+          Display additional information when processing replication messages.
+ voidsetVerboseWaitsFor(boolean verboseWaitsFor) + +
+          Display the waits-for table when doing deadlock detection.
+ voidsetYieldCPU(boolean yieldCPU) + +
+          Configure the system to yield the processor immediately after each + page or mutex acquisition.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final EnvironmentConfig DEFAULT
+
+
+
+
+ + + + + + + + +
+Constructor Detail
+ +

+EnvironmentConfig

+
+public EnvironmentConfig()
+
+
Create an EnvironmentConfig initialized with the system default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setTransactional

+
+public void setTransactional(boolean transactional)
+
+
Configure the database environment for transactions. +

+ This configuration option should be used when transactional guarantees + such as atomicity of multiple operations and durability are important. +

+

+

+
+
+
+
Parameters:
transactional - If true, configure the database environment for transactions.
+
+
+
+ +

+getTransactional

+
+public boolean getTransactional()
+
+
Return if the database environment is configured for transactions. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured for transactions.
+
+
+
+ +

+setAllowCreate

+
+public void setAllowCreate(boolean allowCreate)
+
+
Configure the database environment to create any underlying files, + as necessary. +

+

+

+
+
+
+
Parameters:
allowCreate - If true, configure the database environment to create any underlying + files, as necessary.
+
+
+
+ +

+getAllowCreate

+
+public boolean getAllowCreate()
+
+
Return if the database environment is configured to create any + underlying files, as necessary. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to create any + underlying files, as necessary.
+
+
+
+ +

+setCacheSize

+
+public void setCacheSize(long cacheSize)
+
+
Set the size of the shared memory buffer pool, that is, the size of the +cache. +

+The cache should be the size of the normal working data set of the +application, with some small amount of additional memory for unusual +situations. (Note: the working set is not the same as the number of +pages accessed simultaneously, and is usually much larger.) +

+The default cache size is 256KB, and may not be specified as less than +20KB. Any cache size less than 500MB is automatically increased by 25% +to account for buffer pool overhead; cache sizes larger than 500MB are +used as specified. The current maximum size of a single cache is 4GB. +(All sizes are in powers-of-two, that is, 256KB is 2^18 not 256,000.) +

+The database environment's cache size may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_cachesize", one or more whitespace characters, and the cache size specified in three parts: the gigabytes of cache, the +additional bytes of cache, and the number of caches, also separated by +whitespace characters. For example, "set_cachesize 2 524288000 3" would +create a 2.5GB logical cache, split between three physical caches. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
cacheSize - The size of the shared memory buffer pool, that is, the size of the +cache. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getCacheSize

+
+public long getCacheSize()
+
+
Return the size of the shared memory buffer pool, that is, the cache. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The size of the shared memory buffer pool, that is, the cache.
+
+
+
+ +

+setCacheCount

+
+public void setCacheCount(int cacheCount)
+
+
Set the number of shared memory buffer pools, that is, the number of +caches. +

+It is possible to specify caches larger than 4GB and/or large enough +they cannot be allocated contiguously on some architectures. For +example, some releases of Solaris limit the amount of memory that may +be allocated contiguously by a process. This method allows applications +to break the cache broken up into a number of equally sized, separate +pieces of memory. +

+

+The database environment's cache size may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_cachesize", one or more whitespace characters, and the cache size specified in three parts: the gigabytes of cache, the +additional bytes of cache, and the number of caches, also separated by +whitespace characters. For example, "set_cachesize 2 524288000 3" would +create a 2.5GB logical cache, split between three physical caches. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
cacheCount - The number of shared memory buffer pools, that is, the number of caches. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getCacheCount

+
+public int getCacheCount()
+
+
Return the number of shared memory buffer pools, that is, the number + of caches. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The number of shared memory buffer pools, that is, the number + of caches.
+
+
+
+ +

+setCDBLockAllDatabases

+
+public void setCDBLockAllDatabases(boolean cdbLockAllDatabases)
+
+
Configure Concurrent Data Store applications to perform locking on + an environment-wide basis rather than on a per-database basis. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may not be called after the +environment has been opened. +

+

+

+
+
+
+
Parameters:
cdbLockAllDatabases - If true, configure Concurrent Data Store applications to perform + locking on an environment-wide basis rather than on a per-database + basis.
+
+
+
+ +

+getCDBLockAllDatabases

+
+public boolean getCDBLockAllDatabases()
+
+
Return if the Concurrent Data Store applications are configured to + perform locking on an environment-wide basis rather than on a + per-database basis. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Concurrent Data Store applications are configured to + perform locking on an environment-wide basis rather than on a + per-database basis.
+
+
+
+ +

+addDataDir

+
+public void addDataDir(String dataDir)
+
+
Set the path of a directory to be used as the location of the access + method database files. +

+ Paths specified to Environment.openDatabase and + Environment.openSecondaryDatabase will be searched + relative to this path. Paths set using this method are additive, and + specifying more than one will result in each specified directory + being searched for database files. If any directories are + specified, created database files will always be created in the + first path specified. +

+ If no database directories are specified, database files must be named + either by absolute paths or relative to the environment home directory. +

+ The database environment's data directories may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_data_dir", one or more whitespace characters, and the directory name. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, the +information specified to this method must be consistent with the +existing environment or corruption can occur. +

+

+

+
+
+
+
Parameters:
dataDir - A directory to be used as a location for database files. + On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getDataDirs

+
+public String[] getDataDirs()
+
+
Return the array of data directories. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The array of data directories.
+
+
+
+ +

+setDirectDatabaseIO

+
+public void setDirectDatabaseIO(boolean directDatabaseIO)
+
+
Configure the database environment to not buffer database files. +

+ This is intended to avoid to avoid double caching. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
directDatabaseIO - If true, configure the database environment to not buffer database files.
+
+
+
+ +

+getDirectDatabaseIO

+
+public boolean getDirectDatabaseIO()
+
+
Return if the database environment has been configured to not buffer + database files. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment has been configured to not buffer + database files.
+
+
+
+ +

+setDirectLogIO

+
+public void setDirectLogIO(boolean directLogIO)
+
+
Configure the database environment to not buffer log files. +

+ This is intended to avoid to avoid double caching. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
directLogIO - If true, configure the database environment to not buffer log files.
+
+
+
+ +

+getDirectLogIO

+
+public boolean getDirectLogIO()
+
+
Return if the database environment has been configured to not buffer + log files. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment has been configured to not buffer + log files.
+
+
+
+ +

+setDsyncLog

+
+public void setDsyncLog(boolean dsyncLog)
+
+
Configure the database environment to flush log writes to the + backing disk before returning from the write system call, rather + than flushing log writes explicitly in a separate system call. +

+ This configuration is only available on some systems (for example, + systems supporting the POSIX standard O_DSYNC flag, or systems + supporting the Win32 FILE_FLAG_WRITE_THROUGH flag). This + configuration may result in inaccurate file modification times and + other file-level information for Berkeley DB log files. This + configuration may offer a performance increase on some systems and + a performance decrease on others. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
dsyncLog - If true, configure the database environment to flush log writes to + the backing disk before returning from the write system call, rather + than flushing log writes explicitly in a separate system call.
+
+
+
+ +

+getDsyncLog

+
+public boolean getDsyncLog()
+
+
Return if the database environment has been configured to flush log + writes to the backing disk before returning from the write system + call. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment has been configured to flush log + writes to the backing disk before returning from the write system + call.
+
+
+
+ +

+setEncrypted

+
+public void setEncrypted(String password)
+
+
Set the password used to perform encryption and decryption. +

+ Berkeley DB uses the Rijndael/AES (also known as the Advanced + Encryption Standard and Federal Information Processing + Standard (FIPS) 197) algorithm for encryption or decryption. +

+

+
+
+
+
+
+
+
+ +

+getEncrypted

+
+public boolean getEncrypted()
+
+
Return the database environment has been configured to perform + encryption. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The database environment has been configured to perform + encryption.
+
+
+
+ +

+setErrorHandler

+
+public void setErrorHandler(ErrorHandler errorHandler)
+
+
Set the function to be called if an error occurs. +

+When an error occurs in the Berkeley DB library, an exception is thrown. +In some cases, however, the error information returned to the +application may be insufficient to completely describe the cause of the +error, especially during initial application debugging. +

+The EnvironmentConfig.setErrorHandler and DatabaseConfig.setErrorHandler methods are used to enhance the mechanism for reporting +error messages to the application. In some cases, when an error occurs, +Berkeley DB will invoke the ErrorHandler's object error method. It is +up to this method to display the error message in an appropriate manner. +

+Alternatively, applications can use EnvironmentConfig.setErrorStream and DatabaseConfig.setErrorStream to +display the additional information via an output stream. Applications +should not mix these approaches. +

+This error-logging enhancement does not slow performance or significantly +increase application size, and may be run during normal operation as well +as during application debugging. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
errorHandler - The function to be called if an error occurs.
+
+
+
+ +

+getErrorHandler

+
+public ErrorHandler getErrorHandler()
+
+
Return the function to be called if an error occurs. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The function to be called if an error occurs.
+
+
+
+ +

+setErrorPrefix

+
+public void setErrorPrefix(String errorPrefix)
+
+
Set the prefix string that appears before error messages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
errorPrefix - The prefix string that appears before error messages.
+
+
+
+ +

+getErrorPrefix

+
+public String getErrorPrefix()
+
+
Return the prefix string that appears before error messages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The prefix string that appears before error messages.
+
+
+
+ +

+setErrorStream

+
+public void setErrorStream(OutputStream errorStream)
+
+
Set an OutputStream for displaying error messages. +

+When an error occurs in the Berkeley DB library, an exception is thrown. +In some cases, however, the error information returned to the +application may be insufficient to completely describe the cause of the +error, especially during initial application debugging. +

+The EnvironmentConfig.setErrorStream and +EnvironmentConfig.setErrorStream methods are used to enhance +the mechanism for reporting error messages to the application by setting +a OutputStream to be used for displaying additional Berkeley DB error +messages. In some cases, when an error occurs, Berkeley DB will output +an additional error message to the specified stream. +

+The error message will consist of the prefix string and a colon +(":") (if a prefix string was previously specified using +EnvironmentConfig.setErrorPrefix or DatabaseConfig.setErrorPrefix), an error string, and a trailing newline character. +

+Setting errorStream to null unconfigures the interface. +

+Alternatively, applications can use EnvironmentConfig.setErrorHandler and DatabaseConfig.setErrorHandler to capture +the additional error information in a way that does not use output +streams. Applications should not mix these approaches. +

+This error-logging enhancement does not slow performance or significantly +increase application size, and may be run during normal operation as well +as during application debugging. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
errorStream - The application-specified OutputStream for error messages.
+
+
+
+ +

+getErrorStream

+
+public OutputStream getErrorStream()
+
+
Return the an OutputStream for displaying error messages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The an OutputStream for displaying error messages.
+
+
+
+ +

+setFeedbackHandler

+
+public void setFeedbackHandler(FeedbackHandler feedbackHandler)
+
+
Set an object whose methods are called to provide feedback. +

+Some operations performed by the Berkeley DB library can take +non-trivial amounts of time. This method can be used by applications +to monitor progress within these operations. When an operation is +likely to take a long time, Berkeley DB will call the object's methods +with progress information. +

+It is up to the object's methods to display this information in an +appropriate manner. +

+This method configures only operations performed using a single a +Environment handle +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
feedbackHandler - An object whose methods are called to provide feedback.
+
+
+
+ +

+getFeedbackHandler

+
+public FeedbackHandler getFeedbackHandler()
+
+
Return the object's methods to be called to provide feedback. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The object's methods to be called to provide feedback.
+
+
+
+ +

+setInitializeCache

+
+public void setInitializeCache(boolean initializeCache)
+
+
Configure a shared memory buffer pool in the database environment. +

+ This subsystem should be used whenever an application is using any + Berkeley DB access method. +

+

+

+
+
+
+
Parameters:
initializeCache - If true, configure a shared memory buffer pool in the database + environment.
+
+
+
+ +

+getInitializeCache

+
+public boolean getInitializeCache()
+
+
Return if the database environment is configured with a shared + memory buffer pool. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured with a shared + memory buffer pool.
+
+
+
+ +

+setInitializeCDB

+
+public void setInitializeCDB(boolean initializeCDB)
+
+
Configure the database environment for the Concurrent Data Store + product. +

+ In this mode, Berkeley DB provides multiple reader/single writer access. + The only other subsystem that should be specified for this handle is a + cache. +

+

+

+
+
+
+
Parameters:
initializeCDB - If true, configure the database environment for the Concurrent Data + Store product.
+
+
+
+ +

+getInitializeCDB

+
+public boolean getInitializeCDB()
+
+
Return if the database environment is configured for the Concurrent + Data Store product. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured for the Concurrent + Data Store product.
+
+
+
+ +

+setInitializeLocking

+
+public void setInitializeLocking(boolean initializeLocking)
+
+
Configure the database environment for locking. +

+ Locking should be used when multiple processes or threads are going + to be reading and writing a database, so they do not interfere with + each other. If all threads are accessing the database(s) read-only, + locking is unnecessary. When locking is configured, it is usually + necessary to run a deadlock detector, as well. +

+

+

+
+
+
+
Parameters:
initializeLocking - If true, configure the database environment for locking.
+
+
+
+ +

+getInitializeLocking

+
+public boolean getInitializeLocking()
+
+
Return if the database environment is configured for locking. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured for locking.
+
+
+
+ +

+setInitializeLogging

+
+public void setInitializeLogging(boolean initializeLogging)
+
+
Configure the database environment for logging. +

+ Logging should be used when recovery from application or system + failure is necessary. If the log region is being created and log + files are already present, the log files are reviewed; subsequent + log writes are appended to the end of the log, rather than overwriting + current log entries. +

+

+

+
+
+
+
Parameters:
initializeLogging - If true, configure the database environment for logging.
+
+
+
+ +

+getInitializeLogging

+
+public boolean getInitializeLogging()
+
+
Return if the database environment is configured for logging. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured for logging.
+
+
+
+ +

+setInitializeRegions

+
+public void setInitializeRegions(boolean initializeRegions)
+
+
Configure the database environment to page-fault shared regions into + memory when initially creating or joining a database environment. +

+ In some applications, the expense of page-faulting the underlying + shared memory regions can affect performance. For example, if the + page-fault occurs while holding a lock, other lock requests can + convoy, and overall throughput may decrease. This method + configures Berkeley DB to page-fault shared regions into memory when + initially creating or joining a database environment. In addition, + Berkeley DB will write the shared regions when creating an + environment, forcing the underlying virtual memory and filesystems + to instantiate both the necessary memory and the necessary disk + space. This can also avoid out-of-disk space failures later on. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
initializeRegions - If true, configure the database environment to page-fault shared + regions into memory when initially creating or joining a database + environment.
+
+
+
+ +

+getInitializeRegions

+
+public boolean getInitializeRegions()
+
+
Return if the database environment has been configured to page-fault + shared regions into memory when initially creating or joining a + database environment. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment has been configured to page-fault + shared regions into memory when initially creating or joining a + database environment.
+
+
+
+ +

+setInitializeReplication

+
+public void setInitializeReplication(boolean initializeReplication)
+
+
Configure the database environment for replication. +

+ Replication requires both locking and transactions. +

+

+

+
+
+
+
Parameters:
initializeReplication - If true, configure the database environment for replication.
+
+
+
+ +

+getInitializeReplication

+
+public boolean getInitializeReplication()
+
+
Return if the database environment is configured for replication. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured for replication.
+
+
+
+ +

+setJoinEnvironment

+
+public void setJoinEnvironment(boolean joinEnvironment)
+
+
Configure the handle to join an existing environment. +

+ This option allows applications to join an existing environment + without knowing which subsystems the environment supports. +

+

+

+
+
+
+
Parameters:
joinEnvironment - If true, configure the handle to join an existing environment.
+
+
+
+ +

+getJoinEnvironment

+
+public boolean getJoinEnvironment()
+
+
Return the handle is configured to join an existing environment. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The handle is configured to join an existing environment.
+
+
+
+ +

+setLockConflicts

+
+public void setLockConflicts(byte[][] lockConflicts)
+
+
Configure the locking conflicts matrix. +

+ If the locking conflicts matrix is never configured, a standard + conflicts array is used. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
lockConflicts - The locking conflicts matrix. A non-0 value for an array element + indicates the requested_mode and held_mode conflict: +
+        lockConflicts[requested_mode][held_mode]
+    
+

+ The not-granted mode must be represented by 0. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLockConflicts

+
+public byte[][] getLockConflicts()
+
+
Return the locking conflicts matrix. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The locking conflicts matrix.
+
+
+
+ +

+setLockDetectMode

+
+public void setLockDetectMode(LockDetectMode lockDetectMode)
+
+
Configure if the deadlock detector is to be run whenever a lock + conflict occurs. +

+ The database environment's deadlock detector configuration may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lk_detect", one or more whitespace characters, and the method detect parameter as a string; for example, + "set_lk_detect DB_LOCK_OLDEST". +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ Although the method may be called at any time during the life of the +application, it should normally be called before opening the database +environment. +

+

+

+
+
+
+
Parameters:
lockDetectMode - The lock request(s) to be rejected. As transactions acquire locks + on behalf of a single locker ID, rejecting a lock request associated + with a transaction normally requires the transaction be aborted. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLockDetectMode

+
+public LockDetectMode getLockDetectMode()
+
+
Return if the deadlock detector is configured to run whenever a lock + conflict occurs. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the deadlock detector is configured to run whenever a lock + conflict occurs.
+
+
+
+ +

+setLockDown

+
+public void setLockDown(boolean lockDown)
+
+
Configure the database environment to lock shared environment files + and memory-mapped databases into memory. +

+

+

+
+
+
+
Parameters:
lockDown - If true, configure the database environment to lock shared + environment files and memory-mapped databases into memory.
+
+
+
+ +

+getLockDown

+
+public boolean getLockDown()
+
+
Return if the database environment is configured to lock shared + environment files and memory-mapped databases into memory. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to lock shared + environment files and memory-mapped databases into memory.
+
+
+
+ +

+setLockTimeout

+
+public void setLockTimeout(long lockTimeout)
+
+
Set the timeout value for the database environment +locks. +

+Lock timeouts are checked whenever a thread of control blocks on a lock +or when deadlock detection is performed. The lock may have been +requested explicitly through the Lock subsystem interfaces, or it may +be a lock requested by the database access methods underlying the +application. +As timeouts are only checked when the lock request first blocks or when +deadlock detection is performed, the accuracy of the timeout depends on +how often deadlock detection is performed. +

+Timeout values specified for the database environment may be overridden +on a +per-lock basis by Environment.lockVector. +

+This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
lockTimeout - The timeout value, specified as an unsigned 32-bit number of +microseconds, limiting the maximum timeout to roughly 71 minutes. +

+

+

Throws: +
IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getLockTimeout

+
+public long getLockTimeout()
+
+
Return the database environment lock timeout value, in microseconds; + a timeout of 0 means no timeout is set. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The database environment lock timeout value, in microseconds; + a timeout of 0 means no timeout is set.
+
+
+
+ +

+setLogAutoRemove

+
+public void setLogAutoRemove(boolean logAutoRemove)
+
+
Configure the system to automatically remove log files that are no + longer needed. +

+ Automatic log file removal is likely to make catastrophic recovery + impossible. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
logAutoRemove - If true, configure the system to automatically remove log files that + are no longer needed.
+
+
+
+ +

+getLogAutoRemove

+
+public boolean getLogAutoRemove()
+
+
Return if the system has been configured to to automatically remove log + files that are no longer needed. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to to automatically remove log + files that are no longer needed.
+
+
+
+ +

+setLogInMemory

+
+public void setLogInMemory(boolean inmemory)
+
+
If set, maintain transaction logs in memory rather than on disk. This means + that transactions exhibit the ACI (atomicity, consistency, and isolation) + properties, but not D (durability); that is, database integrity will be + maintained, but if the application or system fails, integrity will not + persist. All database files must be verified and/or restored from a + replication group master or archival backup after application or system + failure. +

+ When in-memory logs are configured and no more log buffer space is + available, Berkeley DB methods will throw a DatabaseException. + When choosing log buffer and file sizes for in-memory logs, applications + should ensure the in-memory log buffer size is large enough that no + transaction will ever span the entire buffer, and avoid a state where the + in-memory buffer is full and no space can be freed because a transaction + that started in the first log "file" is still active. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
inmemory - If true, maintain transaction logs in memory rather than on disk.
+
+
+
+ +

+getLogInMemory

+
+public boolean getLogInMemory()
+
+
Return if the database environment is configured to maintain transaction logs + in memory rather than on disk. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to maintain transaction logs + in memory rather than on disk.
+
+
+
+ +

+setLogRecordHandler

+
+public void setLogRecordHandler(LogRecordHandler logRecordHandler)
+
+
Set a function to process application-specific log records. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, the +information specified to this method must be consistent with the +existing environment or corruption can occur. +

+

+

+
+
+
+
Parameters:
logRecordHandler - The handler for application-specific log records. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLogRecordHandler

+
+public LogRecordHandler getLogRecordHandler()
+
+
Return the handler for application-specific log records. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The handler for application-specific log records.
+
+
+
+ +

+setMaxLocks

+
+public void setMaxLocks(int maxLocks)
+
+
Set the maximum number of locks supported by the database + environment. +

+ This value is used during environment creation to estimate how much + space to allocate for various lock-table data structures. The + default value is 1000 locks. +

+ The database environment's maximum number of locks may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lk_max_locks", one or more whitespace characters, and the number of locks. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
maxLocks - The maximum number of locks supported by the database environment. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getMaxLocks

+
+public int getMaxLocks()
+
+
Return the maximum number of locks. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The maximum number of locks.
+
+
+
+ +

+setMaxLockers

+
+public void setMaxLockers(int maxLockers)
+
+
Set the maximum number of locking entities supported by the database + environment. +

+ This value is used during environment creation to estimate how much + space to allocate for various lock-table data structures. The default + value is 1000 lockers. +

+ The database environment's maximum number of lockers may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lk_max_lockers", one or more whitespace characters, and the number of lockers. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
maxLockers - The maximum number simultaneous locking entities supported by the + database environment. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getMaxLockers

+
+public int getMaxLockers()
+
+
Return the maximum number of lockers. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The maximum number of lockers.
+
+
+
+ +

+setMaxLockObjects

+
+public void setMaxLockObjects(int maxLockObjects)
+
+
Set the maximum number of locked objects supported by the database + environment. +

+ This value is used during environment creation to estimate how much + space to allocate for various lock-table data structures. The default + value is 1000 objects. +

+ The database environment's maximum number of objects may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lk_max_objects", one or more whitespace characters, and the number of objects. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
maxLockObjects - The maximum number of locked objects supported by the database + environment. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getMaxLockObjects

+
+public int getMaxLockObjects()
+
+
Return the maximum number of locked objects. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The maximum number of locked objects.
+
+
+
+ +

+setMaxLogFileSize

+
+public void setMaxLogFileSize(int maxLogFileSize)
+
+
Set the maximum size of a single file in the log, in bytes. +

+ By default, or if the maxLogFileSize parameter is set to 0, a size + of 10MB is used. If no size is specified by the application, the + size last specified for the database region will be used, or if no + database region previously existed, the default will be used. + Because LogSequenceNumber file offsets are unsigned four-byte + values, the set value may not be larger than the maximum unsigned + four-byte value. +

+ The database environment's log file size may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lg_max", one or more whitespace characters, and the size in bytes. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
maxLogFileSize - The maximum size of a single file in the log, in bytes. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getMaxLogFileSize

+
+public int getMaxLogFileSize()
+
+
Return the maximum size of a single file in the log, in bytes. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The maximum size of a single file in the log, in bytes.
+
+
+
+ +

+setLogBufferSize

+
+public void setLogBufferSize(int logBufferSize)
+
+
Set the size of the in-memory log buffer, in bytes. +

+ Log information is stored in-memory until the storage space fills up + or transaction commit forces the information to be flushed to stable + storage. In the presence of long-running transactions or transactions + producing large amounts of data, larger buffer sizes can increase + throughput. +

+ By default, or if the value is set to 0, a size of 32K is used. +

+ The database environment's log buffer size may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lg_bsize", one or more whitespace characters, and the size in bytes. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
logBufferSize - The size of the in-memory log buffer, in bytes. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLogBufferSize

+
+public int getLogBufferSize()
+
+
Return the size of the in-memory log buffer, in bytes. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The size of the in-memory log buffer, in bytes.
+
+
+
+ +

+setLogDirectory

+
+public void setLogDirectory(File logDirectory)
+
+
Set the path of a directory to be used as the location of logging files. +

+ Log files created by the Log Manager subsystem will be created in this + directory. If no logging directory is specified, log files are + created in the environment home directory. +

+ For the greatest degree of recoverability from system or application + failure, database files and log files should be located on separate + physical devices. +

+ The database environment's logging directory may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lg_dir", one or more whitespace characters, and the directory name. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, the +information specified to this method must be consistent with the +existing environment or corruption can occur. +

+

+

+
+
+
+
Parameters:
logDirectory - The directory used to store the logging files. + On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLogDirectory

+
+public File getLogDirectory()
+
+
Return the path of a directory to be used as the location of logging files. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The path of a directory to be used as the location of logging files.
+
+
+
+ +

+setLogRegionSize

+
+public void setLogRegionSize(int logRegionSize)
+
+
Set the size of the underlying logging area of the database + environment, in bytes. +

+ By default, or if the value is set to 0, the default size is 60KB. + The log region is used to store filenames, and so may need to be + increased in size if a large number of files will be opened and + registered with the specified database environment's log manager. +

+ The database environment's log region size may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_lg_regionmax", one or more whitespace characters, and the size in bytes. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
logRegionSize - The size of the logging area in the database environment, in bytes. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLogRegionSize

+
+public int getLogRegionSize()
+
+
Return the size of the underlying logging subsystem region. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The size of the underlying logging subsystem region.
+
+
+
+ +

+setMessageHandler

+
+public void setMessageHandler(MessageHandler messageHandler)
+
+
Set a function to be called with an informational message. +

+There are interfaces in the Berkeley DB library which either directly +output informational messages or statistical information, or configure +the library to output such messages when performing other operations, +EnvironmentConfig.setVerboseDeadlock for example. +

+The EnvironmentConfig.setMessageHandler and +DatabaseConfig.setMessageHandler methods are used to display +these messages for the application. +

+Setting messageHandler to null unconfigures the interface. +

+Alternatively, you can use EnvironmentConfig.setMessageStream +and DatabaseConfig.setMessageStream to send the additional +information directly to an output streams. You should not mix these +approaches. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
messageHandler - The application-specified function for informational messages.
+
+
+
+ +

+getMessageHandler

+
+public MessageHandler getMessageHandler()
+
+
Return the function to be called with an informational message. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The function to be called with an informational message.
+
+
+
+ +

+setMessageStream

+
+public void setMessageStream(OutputStream messageStream)
+
+
Set an OutputStream for displaying informational messages. +

+There are interfaces in the Berkeley DB library which either directly +output informational messages or statistical information, or configure +the library to output such messages when performing other operations, +EnvironmentConfig.setVerboseDeadlock for example. +

+The EnvironmentConfig.setMessageStream and +DatabaseConfig.setMessageStream methods are used to display +these messages for the application. In this case, the message will +include a trailing newline character. +

+Setting messageStream to null unconfigures the interface. +

+Alternatively, you can use EnvironmentConfig.setMessageHandler +and DatabaseConfig.setMessageHandler to capture the additional +information in a way that does not use output streams. You should not +mix these approaches. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
messageStream - The application-specified OutputStream for informational messages.
+
+
+
+ +

+getMessageStream

+
+public OutputStream getMessageStream()
+
+
Return the an OutputStream for displaying informational messages. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The an OutputStream for displaying informational messages.
+
+
+
+ +

+setMMapSize

+
+public void setMMapSize(long mmapSize)
+
+
Set the maximum file size, in bytes, for a file to be mapped into + the process address space. +

+ If no value is specified, it defaults to 10MB. +

+ Files that are opened read-only in the pool (and that satisfy a few + other criteria) are, by default, mapped into the process address space + instead of being copied into the local cache. This can result in + better-than-usual performance because available virtual memory is + normally much larger than the local cache, and page faults are faster + than page copying on many systems. However, it can cause resource + starvation in the presence of limited virtual memory, and it can result + in immense process sizes in the presence of large databases. +

+

+

+
+
+
+
Parameters:
mmapSize - The maximum file size, in bytes, for a file to be mapped into the + process address space. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may be called at any time during the life of the application. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getMMapSize

+
+public long getMMapSize()
+
+
Return the maximum file size, in bytes, for a file to be mapped into + the process address space. +

+

+

+
+
+
+ +
Returns:
The maximum file size, in bytes, for a file to be mapped into the + process address space. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setMode

+
+public void setMode(int mode)
+
+
Configure the database environment to use a specific mode when + creating underlying files and shared memory segments. +

+ On UNIX systems or in POSIX environments, files created in the + database environment are created with the specified mode (as + modified by the process' umask value at the time of creation). +

+ On UNIX systems or in POSIX environments, system shared memory + segments created by the library are created with the specified + mode, unmodified by the process' umask value. +

+ If is 0, the library will use a default mode of readable and + writable by both owner and group. +

+ Created files are owned by the process owner; the group ownership + of created files is based on the system and directory defaults, + and is not further specified by the library. +

+

+

+
+
+
+
Parameters:
mode - The mode to use when creating underlying files and shared memory + segments.
+
+
+
+ +

+getMode

+
+public long getMode()
+
+
Return the mode to use when creating underlying files and shared + memory segments. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The mode to use when creating underlying files and shared + memory segments.
+
+
+
+ +

+setNoLocking

+
+public void setNoLocking(boolean noLocking)
+
+
Configure the system to grant all requested mutual exclusion mutexes + and database locks without regard for their actual availability. +

+ This functionality should never be used for purposes other than + debugging. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
noLocking - If true, configure the system to grant all requested mutual exclusion + mutexes and database locks without regard for their actual availability.
+
+
+
+ +

+getNoLocking

+
+public boolean getNoLocking()
+
+
Return if the system has been configured to grant all requested mutual + exclusion mutexes and database locks without regard for their actual + availability. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to grant all requested mutual + exclusion mutexes and database locks without regard for their actual + availability.
+
+
+
+ +

+setNoMMap

+
+public void setNoMMap(boolean noMMap)
+
+
Configure the system to copy read-only database files into the local + cache instead of potentially mapping them into process memory. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
noMMap - If true, configure the system to copy read-only database files into + the local cache instead of potentially mapping them into process memory.
+
+
+
+ +

+getNoMMap

+
+public boolean getNoMMap()
+
+
Return if the system has been configured to copy read-only database files + into the local cache instead of potentially mapping them into process + memory. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to copy read-only database files + into the local cache instead of potentially mapping them into process + memory.
+
+
+
+ +

+setNoPanic

+
+public void setNoPanic(boolean noPanic)
+
+
Configure the system to ignore any panic state in the database + environment. +

+ Database environments in a panic state normally refuse all attempts to + call Berkeley DB functions, throwing RunRecoveryException. + This functionality should never be used for purposes other than + debugging. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
noPanic - If true, configure the system to ignore any panic state in the + database environment.
+
+
+
+ +

+getNoPanic

+
+public boolean getNoPanic()
+
+
Return if the system has been configured to ignore any panic state in + the database environment. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to ignore any panic state in + the database environment.
+
+
+
+ +

+setOverwrite

+
+public void setOverwrite(boolean overwrite)
+
+
Configure the system to overwrite files stored in encrypted formats + before deleting them. +

+ Berkeley DB overwrites files using alternating 0xff, 0x00 and 0xff + byte patterns. For file overwriting to be effective, the underlying + file must be stored on a fixed-block filesystem. Systems with + journaling or logging filesystems will require operating system + support and probably modification of the Berkeley DB sources. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
overwrite - If true, configure the system to overwrite files stored in encrypted + formats before deleting them.
+
+
+
+ +

+getOverwrite

+
+public boolean getOverwrite()
+
+
Return if the system has been configured to overwrite files stored in + encrypted formats before deleting them. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to overwrite files stored in + encrypted formats before deleting them.
+
+
+
+ +

+setPanicHandler

+
+public void setPanicHandler(PanicHandler panicHandler)
+
+
Set the function to be called if the database environment panics. +

+Errors can occur in the Berkeley DB library where the only solution is +to shut down the application and run recovery (for example, if Berkeley +DB is unable to allocate heap memory). In such cases, the Berkeley DB +methods will throw a RunRecoveryException. It is often easier +to simply exit the application when such errors occur rather than +gracefully return up the stack. This method specifies a function to be +called when RunRecoveryException is about to be thrown from a +Berkeley DB method. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
panicHandler - The function to be called if the database environment panics.
+
+
+
+ +

+getPanicHandler

+
+public PanicHandler getPanicHandler()
+
+
Return the function to be called if the database environment panics. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The function to be called if the database environment panics.
+
+
+
+ +

+setPrivate

+
+public void setPrivate(boolean isPrivate)
+
+
Configure the database environment to only be accessed by a single + process (although that process may be multithreaded). +

+ This has two effects on the database environment. First, all + underlying data structures are allocated from per-process memory + instead of from shared memory that is potentially accessible to more + than a single process. Second, mutexes are only configured to work + between threads. +

+ This flag should not be specified if more than a single process is + accessing the environment because it is likely to cause database + corruption and unpredictable behavior. For example, if both a + server application and the a Berkeley DB utility are expected to + access the environment, the database environment should not be + configured as private. +

+

+

+
+
+
+
Parameters:
isPrivate - If true, configure the database environment to only be accessed by + a single process.
+
+
+
+ +

+getPrivate

+
+public boolean getPrivate()
+
+
Return if the database environment is configured to only be accessed + by a single process. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to only be accessed + by a single process.
+
+
+
+ +

+setReadOnly

+
+public void setReadOnly(boolean readOnly)
+
+
Configure the environment handle to be opened read-only. +

+

+
+
+
+
Parameters:
readOnly - whether the environment should be opened read-only
+
+
+
+ +

+getReadOnly

+
+public boolean getReadOnly()
+
+
Return if the whether the environment handle is opened read-only. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the whether the environment handle is opened read-only.
+
+
+
+ +

+setReplicationLimit

+
+public void setReplicationLimit(long replicationLimit)
+
+
Impose a byte-count limit on the amount of data that will be + transmitted from a site in a single call to Environment.processReplicationMessage. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called before the database environment is opened. +

+

+

+
+
+
+
Parameters:
replicationLimit - The maximum number of bytes that will be sent in a single call to + Environment.processReplicationMessage. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getReplicationLimit

+
+public long getReplicationLimit()
+
+
Return the transmit limit in bytes for a single call to + Environment.processReplicationMessage. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The transmit limit in bytes for a single call to Environment.processReplicationMessage. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setReplicationTransport

+
+public void setReplicationTransport(int envid,
+                                    ReplicationTransport replicationTransport)
+
+
Initialize the communication infrastructure for a database environment + participating in a replicated application. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
envid - The local environment's ID. It must be a positive integer and + uniquely identify this Berkeley DB database environment. +

replicationTransport - The callback function is used to transmit data using the replication + application's communication infrastructure.
+
+
+
+ +

+getReplicationTransport

+
+public ReplicationTransport getReplicationTransport()
+
+
Return the replication callback function used to transmit data using + the replication application's communication infrastructure. +

+

+

+
+
+
+ +
Returns:
The replication callback function used to transmit data using the + replication application's communication infrastructure.
+
+
+
+ +

+setRunFatalRecovery

+
+public void setRunFatalRecovery(boolean runFatalRecovery)
+
+
Configure to run catastrophic recovery on this environment before opening it for +normal use. +

+A standard part of the recovery process is to remove the existing +database environment and create a new one. Applications running +recovery must be prepared to re-create the environment because +underlying shared regions will be removed and re-created. +

+If the thread of control performing recovery does not specify the +correct database environment initialization information (for example, +the correct memory pool cache size), the result can be an application +running in an environment with incorrect cache and other subsystem +sizes. For this reason, the thread of control performing recovery +should specify correct configuration information before recovering the +environment; or it should remove the environment after recovery is +completed, leaving creation of a correctly sized environment to a +subsequent call. +

+All recovery processing must be single-threaded; that is, only a single +thread of control may perform recovery or access a database environment +while recovery is being performed. Because it is not an error to run +recovery for an environment for which no recovery is required, it is +reasonable programming practice for the thread of control responsible +for performing recovery and creating the environment to always specify +recovery during startup. +

+This method returns successfully if recovery is run no log files exist, +so it is necessary to ensure that all necessary log files are present +before running recovery. +

+

+

+
+
+
+
Parameters:
runFatalRecovery - If true, configure to run catastrophic recovery on this environment +before opening it for normal use.
+
+
+
+ +

+getRunFatalRecovery

+
+public boolean getRunFatalRecovery()
+
+
Return the handle is configured to run catastrophic recovery on + the database environment before opening it for use. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The handle is configured to run catastrophic recovery on + the database environment before opening it for use.
+
+
+
+ +

+setRunRecovery

+
+public void setRunRecovery(boolean runRecovery)
+
+
Configure to run normal recovery on this environment before opening it for +normal use. +

+A standard part of the recovery process is to remove the existing +database environment and create a new one. Applications running +recovery must be prepared to re-create the environment because +underlying shared regions will be removed and re-created. +

+If the thread of control performing recovery does not specify the +correct database environment initialization information (for example, +the correct memory pool cache size), the result can be an application +running in an environment with incorrect cache and other subsystem +sizes. For this reason, the thread of control performing recovery +should specify correct configuration information before recovering the +environment; or it should remove the environment after recovery is +completed, leaving creation of a correctly sized environment to a +subsequent call. +

+All recovery processing must be single-threaded; that is, only a single +thread of control may perform recovery or access a database environment +while recovery is being performed. Because it is not an error to run +recovery for an environment for which no recovery is required, it is +reasonable programming practice for the thread of control responsible +for performing recovery and creating the environment to always specify +recovery during startup. +

+This method returns successfully if recovery is run no log files exist, +so it is necessary to ensure that all necessary log files are present +before running recovery. +

+

+

+
+
+
+
Parameters:
runRecovery - If true, configure to run catastrophic recovery on this environment +before opening it for normal use.
+
+
+
+ +

+getRunRecovery

+
+public boolean getRunRecovery()
+
+
Return the handle is configured to run normal recovery on the + database environment before opening it for use. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The handle is configured to run normal recovery on the + database environment before opening it for use.
+
+
+
+ +

+setSystemMemory

+
+public void setSystemMemory(boolean systemMemory)
+
+
Configure the database environment to allocate memory from system + shared memory instead of from memory backed by the filesystem. +

+

+

+
+
+
+
Parameters:
systemMemory - If true, configure the database environment to allocate memory from + system shared memory instead of from memory backed by the filesystem.
+
+
+
+ +

+getSystemMemory

+
+public boolean getSystemMemory()
+
+
Return if the database environment is configured to allocate memory + from system shared memory instead of from memory backed by the + filesystem. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to allocate memory + from system shared memory instead of from memory backed by the + filesystem.
+
+
+
+ +

+setRPCServer

+
+public void setRPCServer(String rpcServer,
+                         long rpcClientTimeout,
+                         long rpcServerTimeout)
+
+
Establish a connection to a RPC server for this database environment. +

+ After this method is called, subsequent calls to Berkeley DB library + interfaces may throw exceptions encapsulating DB_NOSERVER, + DB_NOSERVER_ID or DB_NOSERVER_HOME. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may not be called after the +environment has been opened. +

+

+

+
+
+
+
Parameters:
rpcServer - The host to which the client will connect and create a channel for + communication. +

rpcClientTimeout - The number of seconds the client should wait for results to come + back from the server. Once the timeout has expired on any + communication with the server, DatabaseException + encapsulating DB_NOSERVER will be thrown. If this value is zero, a + default timeout is used. +

rpcServerTimeout - The number of seconds the server should allow a client connection + to remain idle before assuming that the client is gone. Once that + timeout has been reached, the server releases all resources + associated with that client connection. Subsequent attempts by that + client to communicate with the server result in an error return, + indicating that an invalid identifier has been given to the server. + This value can be considered a hint to the server. The server may + alter this value based on its own policies or allowed values. If + this value is zero, a default timeout is used.
+
+
+
+ +

+setSegmentId

+
+public void setSegmentId(long segmentId)
+
+
Specify a base segment ID for database environment shared memory + regions created in system memory on VxWorks or systems supporting + X/Open-style shared memory interfaces; for example, UNIX systems + supporting shmget and related System V IPC interfaces. +

+ This base segment ID will be used when database environment shared + memory regions are first created. It will be incremented a small + integer value each time a new shared memory region is created; that + is, if the base ID is 35, the first shared memory region created + will have a segment ID of 35, and the next one will have a segment + ID between 36 and 40 or so. A database environment always creates + a master shared memory region; an additional shared memory region + for each of the subsystems supported by the environment (Locking, + Logging, Memory Pool and Transaction); plus an additional shared + memory region for each additional memory pool cache that is + supported. Already existing regions with the same segment IDs will + be removed. +

+ The intent behind this method is two-fold: without it, applications + have no way to ensure that two Berkeley DB applications don't + attempt to use the same segment IDs when creating different database + environments. In addition, by using the same segment IDs each time + the environment is created, previously created segments will be + removed, and the set of segments on the system will not grow without + bound. + The database environment's base segment ID may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_shm_key", one or more whitespace characters, and the ID. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, the +information specified to this method must be consistent with the +existing environment or corruption can occur. +

+

+

+
+
+
+
Parameters:
segmentId - The base segment ID for the database environment. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getSegmentId

+
+public long getSegmentId()
+
+
Return the base segment ID. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The base segment ID.
+
+
+
+ +

+setTemporaryDirectory

+
+public void setTemporaryDirectory(String temporaryDirectory)
+
+
Set the path of a directory to be used as the location of temporary + files. +

+ The files created to back in-memory access method databases will be + created relative to this path. These temporary files can be quite + large, depending on the size of the database. +

+ If no directory is specified, the following alternatives are checked + in the specified order. The first existing directory path is used + for all temporary files. +

    +
  1. The value of the environment variable TMPDIR. +
  2. The value of the environment variable TEMP. +
  3. The value of the environment variable TMP. +
  4. The value of the environment variable TempFolder. +
  5. The value returned by the GetTempPath interface. +
  6. The directory /var/tmp. +
  7. The directory /usr/tmp. +
  8. The directory /temp. +
  9. The directory /tmp. +
  10. The directory C:/temp. +
  11. The directory C:/tmp. + +

    + Note: the environment variables are only checked if the database + environment has been configured with one of + EnvironmentConfig.setUseEnvironment or + EnvironmentConfig.setUseEnvironmentRoot. +

    + Note: the GetTempPath interface is only checked on Win/32 platforms. +

    + The database environment's temporary file directory may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_tmp_dir", one or more whitespace characters, and the directory name. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

    + This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

    + This method may not be called after the +environment has been opened. +If joining an existing database environment, the +information specified to this method must be consistent with the +existing environment or corruption can occur. +

    +

    +

    +
    +
    +
    +
    Parameters:
    temporaryDirectory - The directory to be used to store temporary files. + On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

    +

    +

    Throws: +
    DatabaseException - if a failure occurs.
    +
    +
+
+ +

+getTemporaryDirectory

+
+public String getTemporaryDirectory()
+
+
Return the path of a directory to be used as the location of + temporary files. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The path of a directory to be used as the location of + temporary files.
+
+
+
+ +

+setTestAndSetSpins

+
+public void setTestAndSetSpins(int testAndSetSpins)
+
+
Set the number of times test-and-set mutexes should spin before + blocking. +

+ The value defaults to 1 on uniprocessor systems and to 50 times the + number of processors on multiprocessor systems. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
testAndSetSpins - The number of times test-and-set mutexes should spin before blocking.
+
+
+
+ +

+getTestAndSetSpins

+
+public int getTestAndSetSpins()
+
+
Return the number of times test-and-set mutexes should spin before + blocking. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The number of times test-and-set mutexes should spin before + blocking.
+
+
+
+ +

+setThreaded

+
+public void setThreaded(boolean threaded)
+
+
Configure the handle to be free-threaded; that is, usable + by multiple threads within a single address space. +

+ This is the default; threading is always assumed in Java, so no special + configuration is required. +

+

+

+
+
+
+
Parameters:
threaded - If true, configure the handle to be free-threaded.
+
+
+
+ +

+getThreaded

+
+public boolean getThreaded()
+
+
Return if the handle is configured to be free-threaded. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the handle is configured to be free-threaded.
+
+
+
+ +

+setTxnNoSync

+
+public void setTxnNoSync(boolean txnNoSync)
+
+
Configure the system to not write or synchronously flush the log + on transaction commit. +

+ This means that transactions exhibit the ACI (atomicity, consistency, + and isolation) properties, but not D (durability); that is, database + integrity will be maintained, but if the application or system fails, + it is possible some number of the most recently committed transactions + may be undone during recovery. The number of transactions at risk is + governed by how many log updates can fit into the log buffer, how often + the operating system flushes dirty buffers to disk, and how often the + log is checkpointed. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
txnNoSync - If true, configure the system to not write or synchronously flush + the log on transaction commit.
+
+
+
+ +

+getTxnNoSync

+
+public boolean getTxnNoSync()
+
+
Return if the system has been configured to not write or synchronously + flush the log on transaction commit. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to not write or synchronously + flush the log on transaction commit.
+
+
+
+ +

+setTxnNotDurable

+
+public void setTxnNotDurable(boolean txnNotDurable)
+
+
Configure the system to not write log records. +

+ This means that transactions exhibit the ACI (atomicity, consistency, + and isolation) properties, but not D (durability); that is, database + integrity will be maintained, but if the application or system + fails, integrity will not persist. All database files must be + verified and/or restored from backup after a failure. In order to + ensure integrity after application shut down, all database handles + must be closed without specifying noSync, or all database changes + must be flushed from the database environment cache using the + Environment.checkpoint. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
txnNotDurable - If true, configure the system to not write log records.
+
+
+
+ +

+getTxnNotDurable

+
+public boolean getTxnNotDurable()
+
+
Return if the system has been configured to not write log records. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to not write log records.
+
+
+
+ +

+setTxnMaxActive

+
+public void setTxnMaxActive(int txnMaxActive)
+
+
Configure the database environment to support at least txnMaxActive + active transactions. +

+ This value bounds the size of the memory allocated for transactions. + Child transactions are counted as active until they either commit + or abort. +

+ When all of the memory available in the database environment for + transactions is in use, calls to Environment.beginTransaction + will fail (until some active transactions complete). If this + interface is never called, the database environment is configured + to support at least 20 active transactions. +

+ The database environment's number of active transactions may also be set using the environment's +DB_CONFIG file. The syntax of the entry in that file is a single line +with the string "set_tx_max", one or more whitespace characters, and the number of transactions. +Because the DB_CONFIG file is read when the database environment is +opened, it will silently overrule configuration done before that time. +

+ This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+ This method may not be called after the +environment has been opened. +If joining an existing database environment, any +information specified to this method will be ignored. +

+

+

+
+
+
+
Parameters:
txnMaxActive - The minimum number of simultaneously active transactions supported + by the database environment. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getTxnMaxActive

+
+public int getTxnMaxActive()
+
+
Return the minimum number of simultaneously active transactions + supported by the database environment. +

+

+

+
+
+
+ +
Returns:
The minimum number of simultaneously active transactions supported + by the database environment. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setTxnTimeout

+
+public void setTxnTimeout(long txnTimeout)
+
+
Set the timeout value for the database environment +transactions. +

+Transaction timeouts are checked whenever a thread of control blocks on +a lock or when deadlock detection is performed. The lock is one +requested on behalf of a transaction, normally by the database access +methods underlying the application. +As timeouts are only checked when the lock request first blocks or when +deadlock detection is performed, the accuracy of the timeout depends on +how often deadlock detection is performed. +

+Timeout values specified for the database environment may be overridden +on a +per-transaction basis by Transaction.setTxnTimeout. +

+This method configures a database environment, including all threads +of control accessing the database environment, not only the operations +performed using a specified Environment handle. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
txnTimeout - The timeout value, specified as an unsigned 32-bit number of +microseconds, limiting the maximum timeout to roughly 71 minutes. +

+

+

Throws: +
IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getTxnTimeout

+
+public long getTxnTimeout()
+
+
Return the database environment transaction timeout value, in + microseconds; a timeout of 0 means no timeout is set. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The database environment transaction timeout value, in + microseconds; a timeout of 0 means no timeout is set.
+
+
+
+ +

+setTxnTimestamp

+
+public void setTxnTimestamp(Date txnTimestamp)
+
+
Recover to the specified time rather than to the most current + possible date. +

+ Once a database environment has been upgraded to a new version of + Berkeley DB involving a log format change, it is no longer possible + to recover to a specific time before that upgrade. +

+ This method configures only operations performed using a single a +Environment handle, not an entire database environment. +

+ This method may not be called after the +environment has been opened. +

+

+

+
+
+
+
Parameters:
txnTimestamp - The recovery timestamp. + Only the seconds (not the milliseconds) of the timestamp are used. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getTxnTimestamp

+
+public Date getTxnTimestamp()
+
+
Return the time to which recovery will be done, or 0 if recovery will + be done to the most current possible date. +

+

+

+
+
+
+ +
Returns:
The time to which recovery will be done, or 0 if recovery will be + done to the most current possible date. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setTxnWriteNoSync

+
+public void setTxnWriteNoSync(boolean txnWriteNoSync)
+
+
Configure the system to write, but not synchronously flush, the log on + transaction commit. +

+ This means that transactions exhibit the ACI (atomicity, consistency, + and isolation) properties, but not D (durability); that is, database + integrity will be maintained, but if the system fails, it is possible + some number of the most recently committed transactions may be undone + during recovery. The number of transactions at risk is governed by how + often the system flushes dirty buffers to disk and how often the log is + checkpointed. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
txnWriteNoSync - If true, configure the system to write, but not synchronously flush, + the log on transaction commit.
+
+
+
+ +

+getTxnWriteNoSync

+
+public boolean getTxnWriteNoSync()
+
+
Return if the system has been configured to write, but not synchronously + flush, the log on transaction commit. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to write, but not synchronously + flush, the log on transaction commit.
+
+
+
+ +

+setUseEnvironment

+
+public void setUseEnvironment(boolean useEnvironment)
+
+
Configure the database environment to accept information from the + process environment when naming files, regardless of the status of + the process. +

+ Because permitting users to specify which files are used can create + security problems, environment information will be used in file + naming for all users only if configured to do so. +

+

+

+
+
+
+
Parameters:
useEnvironment - If true, configure the database environment to accept information + from the process environment when naming files.
+
+
+
+ +

+getUseEnvironment

+
+public boolean getUseEnvironment()
+
+
Return if the database environment is configured to accept information + from the process environment when naming files. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to accept information + from the process environment when naming files.
+
+
+
+ +

+setUseEnvironmentRoot

+
+public void setUseEnvironmentRoot(boolean useEnvironmentRoot)
+
+
Configure the database environment to accept information from the + process environment when naming files, if the process has + appropriate permissions (for example, users with a user-ID of 0 on + UNIX systems). +

+ Because permitting users to specify which files are used can create + security problems, environment information will be used in file + naming for all users only if configured to do so. +

+

+

+
+
+
+
Parameters:
useEnvironmentRoot - If true, configure the database environment to accept information + from the process environment when naming files if the process has + appropriate permissions.
+
+
+
+ +

+getUseEnvironmentRoot

+
+public boolean getUseEnvironmentRoot()
+
+
Return if the database environment is configured to accept information + from the process environment when naming files if the process has + appropriate permissions. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to accept information + from the process environment when naming files if the process has + appropriate permissions.
+
+
+
+ +

+setVerboseDeadlock

+
+public void setVerboseDeadlock(boolean verboseDeadlock)
+
+
Display additional information when doing deadlock detection. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
verboseDeadlock - If true, display additional information when doing deadlock + detection.
+
+
+
+ +

+getVerboseDeadlock

+
+public boolean getVerboseDeadlock()
+
+
Return if the database environment is configured to display + additional information when doing deadlock detection. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to display additional + information when doing deadlock detection.
+
+
+
+ +

+setVerboseRecovery

+
+public void setVerboseRecovery(boolean verboseRecovery)
+
+
Display additional information when performing recovery. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
verboseRecovery - If true, display additional information when performing recovery.
+
+
+
+ +

+getVerboseRecovery

+
+public boolean getVerboseRecovery()
+
+
Return if the database environment is configured to display + additional information when performing recovery. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to display additional + information when performing recovery.
+
+
+
+ +

+setVerboseReplication

+
+public void setVerboseReplication(boolean verboseReplication)
+
+
Display additional information when processing replication messages. +

+ Note, to get complete replication logging when debugging replication + applications, you must also configure and build the Berkeley DB + library with the --enable-diagnostic configuration option as well + as call this method. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
verboseReplication - If true, display additional information when processing replication + messages.
+
+
+
+ +

+getVerboseReplication

+
+public boolean getVerboseReplication()
+
+
Return if the database environment is configured to display + additional information when processing replication messages. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to display additional + information when processing replication messages.
+
+
+
+ +

+setVerboseWaitsFor

+
+public void setVerboseWaitsFor(boolean verboseWaitsFor)
+
+
Display the waits-for table when doing deadlock detection. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
verboseWaitsFor - If true, display the waits-for table when doing deadlock detection.
+
+
+
+ +

+getVerboseWaitsFor

+
+public boolean getVerboseWaitsFor()
+
+
Return if the database environment is configured to display the + waits-for table when doing deadlock detection. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the database environment is configured to display the waits-for + table when doing deadlock detection.
+
+
+
+ +

+setYieldCPU

+
+public void setYieldCPU(boolean yieldCPU)
+
+
Configure the system to yield the processor immediately after each + page or mutex acquisition. +

+ This functionality should never be used for purposes other than + stress testing. +

+ This method only affects the specified Environment handle (and +any other library handles opened within the scope of that handle). +For consistent behavior across the environment, all Environment +handles opened in the database environment must either call this method +or the configuration should be specified in the database environment's +DB_CONFIG configuration file. +

+ This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
yieldCPU - If true, configure the system to yield the processor immediately + after each page or mutex acquisition.
+
+
+
+ +

+getYieldCPU

+
+public boolean getYieldCPU()
+
+
Return if the system has been configured to yield the processor + immediately after each page or mutex acquisition. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the system has been configured to yield the processor + immediately after each page or mutex acquisition.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/ErrorHandler.html b/db/docs/java/com/sleepycat/db/ErrorHandler.html new file mode 100644 index 000000000..7a127a109 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/ErrorHandler.html @@ -0,0 +1,242 @@ + + + + + + +ErrorHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface ErrorHandler

+
+
+
public interface ErrorHandler
+ +

+An interface specifying a callback function to be called when an error +occurs in the Berkeley DB library. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voiderror(Environment environment, + String errpfx, + String msg) + +
+          A callback function to be called when an error occurs in the + Berkeley DB library.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+error

+
+public void error(Environment environment,
+                  String errpfx,
+                  String msg)
+
+
A callback function to be called when an error occurs in the + Berkeley DB library. +

+ When an error occurs in the Berkeley DB library, an exception is + thrown. In some cases, however, the exception may be insufficient + to completely describe the cause of the error, especially during + initial application debugging. +

+ The EnvironmentConfig.setErrorHandler and + DatabaseConfig.setErrorHandler methods are used to enhance + the mechanism for reporting error messages to the application. In + some cases, when an error occurs, Berkeley DB will invoke the + ErrorHandler's object error method. It is up to this method to + display the error message in an appropriate manner. +

+

+

+
Parameters:
environment - The enclosing database environment handle. +

errpfx - The prefix string, as previously configured by + EnvironmentConfig.setErrorPrefix or + DatabaseConfig.setErrorPrefix. +

msg - An error message string.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/FeedbackHandler.html b/db/docs/java/com/sleepycat/db/FeedbackHandler.html new file mode 100644 index 000000000..c6d7e833c --- /dev/null +++ b/db/docs/java/com/sleepycat/db/FeedbackHandler.html @@ -0,0 +1,284 @@ + + + + + + +FeedbackHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface FeedbackHandler

+
+
+
public interface FeedbackHandler
+ +

+An interface specifying a function to be called to provide feedback. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidrecoveryFeedback(Environment environment, + int percent) + +
+          A function called with progress information when the database environment is being recovered.
+ voidupgradeFeedback(Database database, + int percent) + +
+          A function called with progress information when the database is being upgraded.
+ voidverifyFeedback(Database database, + int percent) + +
+          A function called with progress information when the database is being verified.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+recoveryFeedback

+
+public void recoveryFeedback(Environment environment,
+                             int percent)
+
+
A function called with progress information when the database environment is being recovered. +

+It is up to this function to display this information in an appropriate +manner. +

+

+

+
Parameters:
environment - A reference to the enclosing database environment. +

percent - The percent of the operation completed, specified as an integer value +between 0 and 100.
+
+
+
+ +

+upgradeFeedback

+
+public void upgradeFeedback(Database database,
+                            int percent)
+
+
A function called with progress information when the database is being upgraded. +

+It is up to this function to display this information in an appropriate +manner. +

+

+

+
Parameters:
database - A reference to the enclosing database. +

percent - The percent of the operation completed, specified as an integer value +between 0 and 100.
+
+
+
+ +

+verifyFeedback

+
+public void verifyFeedback(Database database,
+                           int percent)
+
+
A function called with progress information when the database is being verified. +

+It is up to this function to display this information in an appropriate +manner. +

+

+

+
Parameters:
database - A reference to the enclosing database. +

percent - The percent of the operation completed, specified as an integer value +between 0 and 100.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/HashStats.html b/db/docs/java/com/sleepycat/db/HashStats.html new file mode 100644 index 000000000..2227612f1 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/HashStats.html @@ -0,0 +1,604 @@ + + + + + + +HashStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class HashStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseStats
+      extended bycom.sleepycat.db.HashStats
+
+
+
+
public class HashStats
extends DatabaseStats
+ +

+The HashStats object is used to return Hash database statistics. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetBFree() + +
+          The number of bytes free on bucket pages.
+ intgetBigBFree() + +
+          The number of bytes free on big item pages.
+ intgetBigPages() + +
+          The number of big key/data pages.
+ intgetBuckets() + +
+          The the number of hash buckets.
+ intgetDup() + +
+          The number of duplicate pages.
+ intgetDupFree() + +
+          The number of bytes free on duplicate pages.
+ intgetFfactor() + +
+          The desired fill factor specified at database-creation time.
+ intgetFree() + +
+          The number of pages on the free list.
+ intgetMagic() + +
+          The magic number that identifies the file as a Hash file.
+ intgetMetaFlags() + +
+          The metadata flags.
+ intgetNumData() + +
+          The number of key/data pairs in the database.
+ intgetNumKeys() + +
+          The number of unique keys in the database.
+ intgetOverflows() + +
+          The number of overflow pages.
+ intgetOvflFree() + +
+          The number of bytes free on overflow pages.
+ intgetPageSize() + +
+          The underlying Hash database page (and bucket) size, in bytes.
+ intgetVersion() + +
+          The version of the Hash database.
+ StringtoString() + +
+          For convenience, the HashStats class has a toString method + that lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getMagic

+
+public int getMagic()
+
+
The magic number that identifies the file as a Hash file. +

+

+
+
+
+
+ +

+getVersion

+
+public int getVersion()
+
+
The version of the Hash database. +

+

+
+
+
+
+ +

+getMetaFlags

+
+public int getMetaFlags()
+
+
The metadata flags. +

+

+
+
+
+
+ +

+getNumKeys

+
+public int getNumKeys()
+
+
The number of unique keys in the database. +

+ If the Database.getStats call was configured by the + StatsConfig.setFast method, the count will be the last + saved value unless it has never been calculated, in which case it + will be 0. +

+

+
+
+
+
+ +

+getNumData

+
+public int getNumData()
+
+
The number of key/data pairs in the database. +

+ If the Database.getStats call was configured by the + StatsConfig.setFast method, the count will be the last + saved value unless it has never been calculated, in which case it + will be 0. +

+

+
+
+
+
+ +

+getPageSize

+
+public int getPageSize()
+
+
The underlying Hash database page (and bucket) size, in bytes. +

+

+
+
+
+
+ +

+getFfactor

+
+public int getFfactor()
+
+
The desired fill factor specified at database-creation time. +

+

+
+
+
+
+ +

+getBuckets

+
+public int getBuckets()
+
+
The the number of hash buckets. +

+

+
+
+
+
+ +

+getFree

+
+public int getFree()
+
+
The number of pages on the free list. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getBFree

+
+public int getBFree()
+
+
The number of bytes free on bucket pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getBigPages

+
+public int getBigPages()
+
+
The number of big key/data pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getBigBFree

+
+public int getBigBFree()
+
+
The number of bytes free on big item pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getOverflows

+
+public int getOverflows()
+
+
The number of overflow pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getOvflFree

+
+public int getOvflFree()
+
+
The number of bytes free on overflow pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getDup

+
+public int getDup()
+
+
The number of duplicate pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getDupFree

+
+public int getDupFree()
+
+
The number of bytes free on duplicate pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the HashStats class has a toString method + that lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/Hasher.html b/db/docs/java/com/sleepycat/db/Hasher.html new file mode 100644 index 000000000..2c27b79c1 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/Hasher.html @@ -0,0 +1,228 @@ + + + + + + +Hasher (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface Hasher

+
+
+
public interface Hasher
+ +

+An application-specified, database hash function. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ inthash(Database db, + byte[] data, + int len) + +
+          An application-specified, database-specific hash function.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+hash

+
+public int hash(Database db,
+                byte[] data,
+                int len)
+
+
An application-specified, database-specific hash function. +

+ The hash function must handle any key values used by the application + (possibly including zero-length keys). +

+

+

+
Parameters:
db - The enclosing database handle.
data - The byte string to be hashed.
len - The length of the byte string in bytes. +

+

Returns:
The hash value of the byte string.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/JoinConfig.html b/db/docs/java/com/sleepycat/db/JoinConfig.html new file mode 100644 index 000000000..9f3fc50b7 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/JoinConfig.html @@ -0,0 +1,360 @@ + + + + + + +JoinConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class JoinConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.JoinConfig
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
+
public class JoinConfig
extends Object
implements Cloneable
+ +

+The configuration properties of a JoinCursor. +The join cursor configuration is specified when calling Database.join. +

+To create a configuration object with default attributes: +

+    JoinConfig config = new JoinConfig();
+
+To set custom attributes: +
+    JoinConfig config = new JoinConfig();
+    config.setNoSort(true);
+
+

+

+ +

+

+
See Also:
Database.join, +JoinCursor
+
+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static JoinConfigDEFAULT + +
+          Default configuration used if null is passed to Database.join
+  + + + + + + + + + + +
+Constructor Summary
JoinConfig() + +
+          Creates an instance with the system's default settings.
+  + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetNoSort() + +
+          Returns whether automatic sorting of the input cursors is disabled.
+ voidsetNoSort(boolean noSort) + +
+          Specifies whether automatic sorting of the input cursors is disabled.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final JoinConfig DEFAULT
+
+
Default configuration used if null is passed to Database.join +

+

+
+
+ + + + + + + + +
+Constructor Detail
+ +

+JoinConfig

+
+public JoinConfig()
+
+
Creates an instance with the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setNoSort

+
+public void setNoSort(boolean noSort)
+
+
Specifies whether automatic sorting of the input cursors is disabled. +

+ Joined values are retrieved by doing a sequential iteration over the + first cursor in the cursor array, and a nested iteration over each + following cursor in the order they are specified in the array. This + requires database traversals to search for the current datum in all the + cursors after the first. For this reason, the best join performance + normally results from sorting the cursors from the one that refers to + the least number of data items to the one that refers to the most. + Unless this method is called with true, Database.join does + this sort on behalf of its caller. +

+ If the data are structured so that cursors with many data items also + share many common elements, higher performance will result from listing + those cursors before cursors with fewer data items; that is, a sort + order other than the default. Calling this method permits applications + to perform join optimization prior to calling + Database.join. +

+

+

+
+
+
+
Parameters:
noSort - whether automatic sorting of the input cursors is disabled. +

See Also:
Database.join
+
+
+
+ +

+getNoSort

+
+public boolean getNoSort()
+
+
Returns whether automatic sorting of the input cursors is disabled. +

+

+

+
+
+
+ +
Returns:
whether automatic sorting of the input cursors is disabled. +

See Also:
setNoSort(boolean)
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/JoinCursor.html b/db/docs/java/com/sleepycat/db/JoinCursor.html new file mode 100644 index 000000000..393a1fa32 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/JoinCursor.html @@ -0,0 +1,438 @@ + + + + + + +JoinCursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class JoinCursor

+
+java.lang.Object
+  extended bycom.sleepycat.db.JoinCursor
+
+
+
+
public class JoinCursor
extends Object
+ +

+A specialized join cursor for use in performing equality or natural joins on +secondary indices. +

+A join cursor is returned when calling Database.join. +

+To open a join cursor using two secondary cursors: +

+    Transaction txn = ...
+    Database primaryDb = ...
+    SecondaryDatabase secondaryDb1 = ...
+    SecondaryDatabase secondaryDb2 = ...
+    

+ SecondaryCursor cursor1 = null; + SecondaryCursor cursor2 = null; + JoinCursor joinCursor = null; + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); +

+ cursor1 = secondaryDb1.openSecondaryCursor(txn, null); + cursor2 = secondaryDb2.openSecondaryCursor(txn, null); +

+ key.setData(...); // initialize key for secondary index 1 + OperationStatus status1 = + cursor1.getSearchKey(key, data, LockMode.DEFAULT); + key.setData(...); // initialize key for secondary index 2 + OperationStatus status2 = + cursor2.getSearchKey(key, data, LockMode.DEFAULT); +

+ if (status1 == OperationStatus.SUCCESS && + status2 == OperationStatus.SUCCESS) { +

+ SecondaryCursor[] cursors = {cursor1, cursor2}; + joinCursor = primaryDb.join(cursors, null); +

+ while (true) { + OperationStatus joinStatus = joinCursor.getNext(key, data, + LockMode.DEFAULT); + if (joinStatus == OperationStatus.SUCCESS) { + // Do something with the key and data. + } else { + break; + } + } + } + } finally { + if (cursor1 != null) { + cursor1.close(); + } + if (cursor2 != null) { + cursor2.close(); + } + if (joinCursor != null) { + joinCursor.close(); + } + } +

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidclose() + +
+          Closes the cursors that have been opened by this join cursor.
+ JoinConfiggetConfig() + +
+          Returns this object's configuration.
+ DatabasegetDatabase() + +
+          Returns the primary database handle associated with this cursor.
+ OperationStatusgetNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the next primary key and data resulting from the join operation.
+ OperationStatusgetNext(DatabaseEntry key, + LockMode lockMode) + +
+          Returns the next primary key resulting from the join operation.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Closes the cursors that have been opened by this join cursor. +

+ The cursors passed to Database.join are not closed + by this method, and should be closed by the caller. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getDatabase

+
+public Database getDatabase()
+
+
Returns the primary database handle associated with this cursor. +

+

+

+ +
Returns:
the primary database handle associated with this cursor.
+
+
+
+ +

+getConfig

+
+public JoinConfig getConfig()
+
+
Returns this object's configuration. +

+

+

+ +
Returns:
this object's configuration.
+
+
+
+ +

+getNext

+
+public OperationStatus getNext(DatabaseEntry key,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Returns the next primary key resulting from the join operation. +

+An entry is returned by the join cursor for each primary key/data pair having +all secondary key values that were specified using the array of secondary +cursors passed to Database.join. +

+

+

+
Parameters:
key - the primary key +returned as output. Its byte array does not need to be initialized by the +caller. +

lockMode - the locking attributes; if null, default attributes are used. +

+

Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +

+

Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getNext

+
+public OperationStatus getNext(DatabaseEntry key,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Returns the next primary key and data resulting from the join operation. +

+An entry is returned by the join cursor for each primary key/data pair having +all secondary key values that were specified using the array of secondary +cursors passed to Database.join. +

+

+

+
Parameters:
key - the primary key +returned as output. Its byte array does not need to be initialized by the +caller. +

data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller. +

lockMode - the locking attributes; if null, default attributes are used. +

+

Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +

+

Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/KeyRange.html b/db/docs/java/com/sleepycat/db/KeyRange.html new file mode 100644 index 000000000..47045db35 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/KeyRange.html @@ -0,0 +1,307 @@ + + + + + + +KeyRange (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class KeyRange

+
+java.lang.Object
+  extended bycom.sleepycat.db.KeyRange
+
+
+
+
public class KeyRange
extends Object
+ +

+An object that returns status from the Database.getKeyRange method. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+ doubleequal + +
+          Zero if there is no matching key, and non-zero otherwise.
+ doublegreater + +
+          A value between 0 and 1, the proportion of keys greater than the + specified key.
+ doubleless + +
+          A value between 0 and 1, the proportion of keys less than the specified + key.
+  + + + + + + + + + + +
+Constructor Summary
KeyRange() + +
+           
+  + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+equal

+
+public double equal
+
+
Zero if there is no matching key, and non-zero otherwise. +

+

+
+
+
+ +

+greater

+
+public double greater
+
+
A value between 0 and 1, the proportion of keys greater than the + specified key. +

+ For example, if the value is 0.05, 5% of the keys in the database + are greater than the specified key. +

+

+
+
+
+ +

+less

+
+public double less
+
+
A value between 0 and 1, the proportion of keys less than the specified + key. +

+ For example, if the value is 0.05, 5% of the keys in the database + are less than the specified key. +

+

+
+
+ + + + + + + + +
+Constructor Detail
+ +

+KeyRange

+
+public KeyRange()
+
+
+ + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/Lock.html b/db/docs/java/com/sleepycat/db/Lock.html new file mode 100644 index 000000000..7c4b7fe51 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/Lock.html @@ -0,0 +1,199 @@ + + + + + + +Lock (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class Lock

+
+java.lang.Object
+  extended bycom.sleepycat.db.Lock
+
+
+
+
public final class Lock
extends Object
+ +

+The locking interfaces for the database environment are methods of the +Environment handle. The Lock object is the handle for +a single lock, and has no methods of its own. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + +


+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LockDetectMode.html b/db/docs/java/com/sleepycat/db/LockDetectMode.html new file mode 100644 index 000000000..c721db5c5 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LockDetectMode.html @@ -0,0 +1,414 @@ + + + + + + +LockDetectMode (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LockDetectMode

+
+java.lang.Object
+  extended bycom.sleepycat.db.LockDetectMode
+
+
+
+
public final class LockDetectMode
extends Object
+ +

+Deadlock detection modes. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static LockDetectModeDEFAULT + +
+          Use whatever lock policy was specified when the database environment + was created.
+static LockDetectModeEXPIRE + +
+          Reject lock requests which have timed out.
+static LockDetectModeMAXLOCKS + +
+          Reject the lock request for the locker ID with the most locks.
+static LockDetectModeMAXWRITE + +
+          Reject the lock request for the locker ID with the most write locks.
+static LockDetectModeMINLOCKS + +
+          Reject the lock request for the locker ID with the fewest locks.
+static LockDetectModeMINWRITE + +
+          Reject the lock request for the locker ID with the fewest write locks.
+static LockDetectModeNONE + +
+          Turn off deadlock detection.
+static LockDetectModeOLDEST + +
+          Reject the lock request for the locker ID with the oldest lock.
+static LockDetectModeRANDOM + +
+          Reject the lock request for a random locker ID.
+static LockDetectModeYOUNGEST + +
+          Reject the lock request for the locker ID with the youngest lock.
+  + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+NONE

+
+public static final LockDetectMode NONE
+
+
Turn off deadlock detection. +

+

+
+
+
+ +

+DEFAULT

+
+public static final LockDetectMode DEFAULT
+
+
Use whatever lock policy was specified when the database environment + was created. If no lock policy has yet been specified, set the lock + policy to DB_LOCK_RANDOM. +

+

+
+
+
+ +

+EXPIRE

+
+public static final LockDetectMode EXPIRE
+
+
Reject lock requests which have timed out. No other deadlock detection + is performed. +

+

+
+
+
+ +

+MAXLOCKS

+
+public static final LockDetectMode MAXLOCKS
+
+
Reject the lock request for the locker ID with the most locks. +

+

+
+
+
+ +

+MAXWRITE

+
+public static final LockDetectMode MAXWRITE
+
+
Reject the lock request for the locker ID with the most write locks. +

+

+
+
+
+ +

+MINLOCKS

+
+public static final LockDetectMode MINLOCKS
+
+
Reject the lock request for the locker ID with the fewest locks. +

+

+
+
+
+ +

+MINWRITE

+
+public static final LockDetectMode MINWRITE
+
+
Reject the lock request for the locker ID with the fewest write locks. +

+

+
+
+
+ +

+OLDEST

+
+public static final LockDetectMode OLDEST
+
+
Reject the lock request for the locker ID with the oldest lock. +

+

+
+
+
+ +

+RANDOM

+
+public static final LockDetectMode RANDOM
+
+
Reject the lock request for a random locker ID. +

+

+
+
+
+ +

+YOUNGEST

+
+public static final LockDetectMode YOUNGEST
+
+
Reject the lock request for the locker ID with the youngest lock. +

+

+
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LockMode.html b/db/docs/java/com/sleepycat/db/LockMode.html new file mode 100644 index 000000000..87ec8fe20 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LockMode.html @@ -0,0 +1,301 @@ + + + + + + +LockMode (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LockMode

+
+java.lang.Object
+  extended bycom.sleepycat.db.LockMode
+
+
+
+
public final class LockMode
extends Object
+ +

+Locking modes for database operations. Locking modes are required +parameters for operations that retrieve data or modify the database. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static LockModeDEFAULT + +
+          Acquire read locks for read operations and write locks for write + operations.
+static LockModeDEGREE_2 + +
+          Degree 2 isolation provides for cursor stability but not repeatable + reads.
+static LockModeDIRTY_READ + +
+          Read modified but not yet committed data.
+static LockModeRMW + +
+          Acquire write locks instead of read locks when doing the retrieval.
+  + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final LockMode DEFAULT
+
+
Acquire read locks for read operations and write locks for write + operations. +

+

+
+
+
+ +

+DIRTY_READ

+
+public static final LockMode DIRTY_READ
+
+
Read modified but not yet committed data. +

+

+
+
+
+ +

+RMW

+
+public static final LockMode RMW
+
+
Acquire write locks instead of read locks when doing the retrieval. + Setting this flag can eliminate deadlock during a read-modify-write + cycle by acquiring the write lock during the read part of the cycle + so that another thread of control acquiring a read lock for the same + item, in its own read-modify-write cycle, will not result in deadlock. +

+

+
+
+
+ +

+DEGREE_2

+
+public static final LockMode DEGREE_2
+
+
Degree 2 isolation provides for cursor stability but not repeatable + reads. Data items which have been previously read by this cursor or + transaction may be deleted or modified by other transactions + before the cursor is closed or the transaction completes. +

+

+
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LockNotGrantedException.html b/db/docs/java/com/sleepycat/db/LockNotGrantedException.html new file mode 100644 index 000000000..360aa492c --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LockNotGrantedException.html @@ -0,0 +1,366 @@ + + + + + + +LockNotGrantedException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LockNotGrantedException

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended bycom.sleepycat.db.DatabaseException
+              extended bycom.sleepycat.db.DeadlockException
+                  extended bycom.sleepycat.db.LockNotGrantedException
+
+
+
All Implemented Interfaces:
Serializable
+
+
+
+
public class LockNotGrantedException
extends DeadlockException
+ +

+A LockNotGrantedException is thrown when a lock requested using the +Environment.getLock or Environment.lockVector +methods, where the noWait flag or lock timers were configured, could not +be granted before the wait-time expired. +

+Additionally, LockNotGrantedException is thrown when a Concurrent Data +Store database environment configured for lock timeouts was unable to +grant a lock in the allowed time. +

+Additionally, LockNotGrantedException is thrown when lock or transaction +timeouts have been configured and a database operation has timed out. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetIndex() + +
+          Returns -1 when Environment.getLock was called, and + returns the index of the failed LockRequest when Environment.lockVector was called.
+ LockgetLock() + +
+          Returns null when Environment.getLock was called, and + returns the lock in the failed LockRequest when Environment.lockVector was called.
+ intgetMode() + +
+          Returns the mode parameter when Environment.getLock was + called, and returns the mode for the failed LockRequest when + Environment.lockVector was called.
+ DatabaseEntrygetObj() + +
+          Returns the object parameter when Environment.getLock was + called, and returns the object for the failed LockRequest when + Environment.lockVector was called.
+ intgetOp() + +
+          Returns 0 when Environment.getLock was called, and returns + the op parameter for the failed LockRequest when Environment.lockVector was called.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseException
getEnvironment, getErrno
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getIndex

+
+public int getIndex()
+
+
Returns -1 when Environment.getLock was called, and + returns the index of the failed LockRequest when Environment.lockVector was called. +

+

+
+
+
+
+ +

+getLock

+
+public Lock getLock()
+
+
Returns null when Environment.getLock was called, and + returns the lock in the failed LockRequest when Environment.lockVector was called. +

+

+
+
+
+
+ +

+getMode

+
+public int getMode()
+
+
Returns the mode parameter when Environment.getLock was + called, and returns the mode for the failed LockRequest when + Environment.lockVector was called. +

+

+
+
+
+
+ +

+getObj

+
+public DatabaseEntry getObj()
+
+
Returns the object parameter when Environment.getLock was + called, and returns the object for the failed LockRequest when + Environment.lockVector was called. +

+

+
+
+
+
+ +

+getOp

+
+public int getOp()
+
+
Returns 0 when Environment.getLock was called, and returns + the op parameter for the failed LockRequest when Environment.lockVector was called. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LockOperation.html b/db/docs/java/com/sleepycat/db/LockOperation.html new file mode 100644 index 000000000..760f2de84 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LockOperation.html @@ -0,0 +1,354 @@ + + + + + + +LockOperation (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LockOperation

+
+java.lang.Object
+  extended bycom.sleepycat.db.LockOperation
+
+
+
+
public final class LockOperation
extends Object
+ +

+Operations that can be performed on locks. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static LockOperationGET + +
+          Get the lock defined by the values of the mode and obj fields, for + the specified locker.
+static LockOperationGET_TIMEOUT + +
+          Identical to LockOperation GET except that the value in the timeout + field overrides any previously specified timeout value for this + lock.
+static LockOperationPUT + +
+          The lock to which the lock field refers is released.
+static LockOperationPUT_ALL + +
+          All locks held by the specified locker are released.
+static LockOperationPUT_OBJ + +
+          All locks held on obj are released.
+static LockOperationTIMEOUT + +
+          Cause the specified locker to timeout immediately.
+  + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+GET

+
+public static final LockOperation GET
+
+
Get the lock defined by the values of the mode and obj fields, for + the specified locker. Upon return from Environment.lockVector, if the lock field is non-null, a reference to the + acquired lock is stored there. (This reference is invalidated by + any call to Environment.lockVector or Environment.putLock that releases the lock.) +

+

+
+
+
+ +

+GET_TIMEOUT

+
+public static final LockOperation GET_TIMEOUT
+
+
Identical to LockOperation GET except that the value in the timeout + field overrides any previously specified timeout value for this + lock. A value of 0 turns off any previously specified timeout. +

+

+
+
+
+ +

+PUT

+
+public static final LockOperation PUT
+
+
The lock to which the lock field refers is released. The locker, + mode and obj fields are ignored. +

+

+
+
+
+ +

+PUT_ALL

+
+public static final LockOperation PUT_ALL
+
+
All locks held by the specified locker are released. The lock, + mode, and obj fields are ignored. Locks acquired in operations + performed by the current call to Environment.lockVector + which appear before the PUT_ALL operation are released; those + acquired in operations appearing after the PUT_ALL operation are not + released. +

+

+
+
+
+ +

+PUT_OBJ

+
+public static final LockOperation PUT_OBJ
+
+
All locks held on obj are released. The locker parameter and the + lock and mode fields are ignored. Locks acquired in operations + performed by the current call to Environment.lockVector + that appear before the PUT_OBJ operation operation are released; + those acquired in operations appearing after the PUT_OBJ operation + are not released. +

+

+
+
+
+ +

+TIMEOUT

+
+public static final LockOperation TIMEOUT
+
+
Cause the specified locker to timeout immediately. If the database + environment has not configured automatic deadlock detection, the + transaction will timeout the next time deadlock detection is + performed. As transactions acquire locks on behalf of a single + locker ID, timing out the locker ID associated with a transaction + will time out the transaction itself. +

+

+
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LockRequest.html b/db/docs/java/com/sleepycat/db/LockRequest.html new file mode 100644 index 000000000..72c7719bd --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LockRequest.html @@ -0,0 +1,497 @@ + + + + + + +LockRequest (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LockRequest

+
+java.lang.Object
+  extended bycom.sleepycat.db.LockRequest
+
+
+
+
public class LockRequest
extends Object
+ +

+The LockRequest object is used to encapsulate a single lock request. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock) + +
+          Construct a LockRequest with the specified operation, mode and lock, + for the specified object.
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock, + int timeout) + +
+          Construct a LockRequest with the specified operation, mode, lock and + timeout for the specified object.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ LockgetLock() + +
+          Return the lock reference.
+ LockRequestModegetMode() + +
+          Return the lock mode.
+ DatabaseEntrygetObj() + +
+          Return the lock object.
+ LockOperationgetOp() + +
+          Return the lock operation.
+ intgetTimeout() + +
+          Return the lock timeout value.
+ voidsetLock(Lock lock) + +
+          Set the lock reference.
+ voidsetMode(LockRequestMode mode) + +
+          Set the lock mode.
+ voidsetObj(DatabaseEntry obj) + +
+          Set the lock object.
+ voidsetOp(LockOperation op) + +
+          Set the operation.
+ voidsetTimeout(int timeout) + +
+          Set the lock timeout value.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+LockRequest

+
+public LockRequest(LockOperation op,
+                   LockRequestMode mode,
+                   DatabaseEntry obj,
+                   Lock lock)
+
+
Construct a LockRequest with the specified operation, mode and lock, + for the specified object. +

+

+

Parameters:
lock - The lock type for the object. +

mode - The permissions mode for the object. +

obj - The object being locked. +

op - The operation being performed.
+
+ +

+LockRequest

+
+public LockRequest(LockOperation op,
+                   LockRequestMode mode,
+                   DatabaseEntry obj,
+                   Lock lock,
+                   int timeout)
+
+
Construct a LockRequest with the specified operation, mode, lock and + timeout for the specified object. +

+

+

Parameters:
lock - The lock type for the object. +

mode - The permissions mode for the object. +

obj - The object being locked. +

op - The operation being performed. +

timeout - The timeout value for the lock.
+ + + + + + + + +
+Method Detail
+ +

+setLock

+
+public void setLock(Lock lock)
+
+
Set the lock reference. +

+

+

+
Parameters:
lock - The lock reference.
+
+
+
+ +

+setMode

+
+public void setMode(LockRequestMode mode)
+
+
Set the lock mode. +

+

+

+
Parameters:
mode - the lock mode.
+
+
+
+ +

+setObj

+
+public void setObj(DatabaseEntry obj)
+
+
Set the lock object. +

+

+

+
Parameters:
obj - The lock object.
+
+
+
+ +

+setOp

+
+public void setOp(LockOperation op)
+
+
Set the operation. +

+

+

+
Parameters:
op - The operation.
+
+
+
+ +

+setTimeout

+
+public void setTimeout(int timeout)
+
+
Set the lock timeout value. +

+

+

+
Parameters:
timeout - The lock timeout value.
+
+
+
+ +

+getLock

+
+public Lock getLock()
+
+
Return the lock reference. +

+

+
+
+
+
+ +

+getMode

+
+public LockRequestMode getMode()
+
+
Return the lock mode. +

+

+
+
+
+
+ +

+getObj

+
+public DatabaseEntry getObj()
+
+
Return the lock object. +

+

+
+
+
+
+ +

+getOp

+
+public LockOperation getOp()
+
+
Return the lock operation. +

+

+
+
+
+
+ +

+getTimeout

+
+public int getTimeout()
+
+
Return the lock timeout value. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LockRequestMode.html b/db/docs/java/com/sleepycat/db/LockRequestMode.html new file mode 100644 index 000000000..05cbe4d6e --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LockRequestMode.html @@ -0,0 +1,344 @@ + + + + + + +LockRequestMode (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LockRequestMode

+
+java.lang.Object
+  extended bycom.sleepycat.db.LockRequestMode
+
+
+
+
public final class LockRequestMode
extends Object
+ +

+When using the default lock conflict matrix, the LockRequestMode class +defines the set of possible lock modes. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static LockRequestModeIREAD + +
+          Intention to read (shared).
+static LockRequestModeIWR + +
+          Intention to read and write (shared).
+static LockRequestModeIWRITE + +
+          Intention to write (shared).
+static LockRequestModeREAD + +
+          Read (shared).
+static LockRequestModeWRITE + +
+          Write (exclusive).
+  + + + + + + + + + + +
+Constructor Summary
LockRequestMode(String operationName, + int flag) + +
+          Construct a custom lock request mode.
+  + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+READ

+
+public static final LockRequestMode READ
+
+
Read (shared). +

+

+
+
+
+ +

+WRITE

+
+public static final LockRequestMode WRITE
+
+
Write (exclusive). +

+

+
+
+
+ +

+IWRITE

+
+public static final LockRequestMode IWRITE
+
+
Intention to write (shared). +

+

+
+
+
+ +

+IREAD

+
+public static final LockRequestMode IREAD
+
+
Intention to read (shared). +

+

+
+
+
+ +

+IWR

+
+public static final LockRequestMode IWR
+
+
Intention to read and write (shared). +

+

+
+
+ + + + + + + + +
+Constructor Detail
+ +

+LockRequestMode

+
+public LockRequestMode(String operationName,
+                       int flag)
+
+
Construct a custom lock request mode. +

+

+

Parameters:
operationName - Name used to display the mode +

flag - Flag value used as an index into the lock conflict matrix
+ + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LockStats.html b/db/docs/java/com/sleepycat/db/LockStats.html new file mode 100644 index 000000000..791906ac5 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LockStats.html @@ -0,0 +1,742 @@ + + + + + + +LockStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LockStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.LockStats
+
+
+
+
public class LockStats
extends Object
+ +

+Lock statistics for a database environment. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetCurMaxId() + +
+           
+ intgetId() + +
+          The last allocated locker ID.
+ intgetLockTimeout() + +
+          Lock timeout value.
+ intgetMaxLockers() + +
+          The maximum number of lockers possible.
+ intgetMaxLocks() + +
+          The maximum number of locks possible.
+ intgetMaxNlockers() + +
+          The maximum number of lockers at any one time.
+ intgetMaxNlocks() + +
+          The maximum number of locks at any one time.
+ intgetMaxNobjects() + +
+          The maximum number of lock objects at any one time.
+ intgetMaxObjects() + +
+          The maximum number of lock objects possible.
+ intgetNobjects() + +
+          The number of current lock objects.
+ intgetNumConflicts() + +
+          The total number of locks not immediately available due to conflicts.
+ intgetNumDeadlocks() + +
+          The number of deadlocks.
+ intgetNumLockers() + +
+          The number of current lockers.
+ intgetNumLocks() + +
+          The number of current locks.
+ intgetNumLockTimeouts() + +
+          The number of lock requests that have timed out.
+ intgetNumModes() + +
+          The number of lock modes.
+ intgetNumNowaits() + +
+          The total number of lock requests failing because DB_LOCK_NOWAIT was + set.
+ intgetNumReleases() + +
+          The total number of locks released.
+ intgetNumRequests() + +
+          The total number of locks requested.
+ intgetNumTxnTimeouts() + +
+          The number of transactions that have timed out.
+ intgetRegionNowait() + +
+          The number of times that a thread of control was able to obtain the + region lock without waiting.
+ intgetRegionWait() + +
+          The number of times that a thread of control was forced to wait + before obtaining the region lock.
+ intgetRegSize() + +
+          The size of the lock region.
+ intgetTxnTimeout() + +
+          Transaction timeout value.
+ StringtoString() + +
+          For convenience, the LockStats class has a toString method + that lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getId

+
+public int getId()
+
+
The last allocated locker ID. +

+

+
+
+
+
+ +

+getCurMaxId

+
+public int getCurMaxId()
+
+
+
+
+
+
+ +

+getMaxLocks

+
+public int getMaxLocks()
+
+
The maximum number of locks possible. +

+

+
+
+
+
+ +

+getMaxLockers

+
+public int getMaxLockers()
+
+
The maximum number of lockers possible. +

+

+
+
+
+
+ +

+getMaxObjects

+
+public int getMaxObjects()
+
+
The maximum number of lock objects possible. +

+

+
+
+
+
+ +

+getNumModes

+
+public int getNumModes()
+
+
The number of lock modes. +

+

+
+
+
+
+ +

+getNumLocks

+
+public int getNumLocks()
+
+
The number of current locks. +

+

+
+
+
+
+ +

+getMaxNlocks

+
+public int getMaxNlocks()
+
+
The maximum number of locks at any one time. +

+

+
+
+
+
+ +

+getNumLockers

+
+public int getNumLockers()
+
+
The number of current lockers. +

+

+
+
+
+
+ +

+getMaxNlockers

+
+public int getMaxNlockers()
+
+
The maximum number of lockers at any one time. +

+

+
+
+
+
+ +

+getNobjects

+
+public int getNobjects()
+
+
The number of current lock objects. +

+

+
+
+
+
+ +

+getMaxNobjects

+
+public int getMaxNobjects()
+
+
The maximum number of lock objects at any one time. +

+

+
+
+
+
+ +

+getNumConflicts

+
+public int getNumConflicts()
+
+
The total number of locks not immediately available due to conflicts. +

+

+
+
+
+
+ +

+getNumRequests

+
+public int getNumRequests()
+
+
The total number of locks requested. +

+

+
+
+
+
+ +

+getNumReleases

+
+public int getNumReleases()
+
+
The total number of locks released. +

+

+
+
+
+
+ +

+getNumNowaits

+
+public int getNumNowaits()
+
+
The total number of lock requests failing because DB_LOCK_NOWAIT was + set. +

+

+
+
+
+
+ +

+getNumDeadlocks

+
+public int getNumDeadlocks()
+
+
The number of deadlocks. +

+

+
+
+
+
+ +

+getLockTimeout

+
+public int getLockTimeout()
+
+
Lock timeout value. +

+

+
+
+
+
+ +

+getNumLockTimeouts

+
+public int getNumLockTimeouts()
+
+
The number of lock requests that have timed out. +

+

+
+
+
+
+ +

+getTxnTimeout

+
+public int getTxnTimeout()
+
+
Transaction timeout value. +

+

+
+
+
+
+ +

+getNumTxnTimeouts

+
+public int getNumTxnTimeouts()
+
+
The number of transactions that have timed out. This value is also + a component of st_ndeadlocks, the total number of deadlocks detected. +

+

+
+
+
+
+ +

+getRegionWait

+
+public int getRegionWait()
+
+
The number of times that a thread of control was forced to wait + before obtaining the region lock. +

+

+
+
+
+
+ +

+getRegionNowait

+
+public int getRegionNowait()
+
+
The number of times that a thread of control was able to obtain the + region lock without waiting. +

+

+
+
+
+
+ +

+getRegSize

+
+public int getRegSize()
+
+
The size of the lock region. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the LockStats class has a toString method + that lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LogCursor.html b/db/docs/java/com/sleepycat/db/LogCursor.html new file mode 100644 index 000000000..65dc80b66 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LogCursor.html @@ -0,0 +1,463 @@ + + + + + + +LogCursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LogCursor

+
+java.lang.Object
+  extended bycom.sleepycat.db.LogCursor
+
+
+
+
public class LogCursor
extends Object
+ +

+The LogCursor object is the handle for a cursor into the log files, +supporting sequential access to the records stored in log files. +

+The handle is not free-threaded. Once the LogCursor.close +method is called, the handle may not be accessed again, regardless of +that method's success or failure. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidclose() + +
+          Close the log cursor.
+ OperationStatusgetCurrent(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the LogSequenceNumber and log record to which the log cursor + currently refers.
+ OperationStatusgetFirst(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the first LogSequenceNumber and log record.
+ OperationStatusgetLast(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the last LogSequenceNumber and log record.
+ OperationStatusgetNext(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the next LogSequenceNumber and log record.
+ OperationStatusgetPrev(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the previous LogSequenceNumber and log record.
+ OperationStatusset(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return a specific log record.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Close the log cursor. +

+ The log cursor may not be used again after this method has been + called, regardless of the method's success or failure. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getCurrent

+
+public OperationStatus getCurrent(LogSequenceNumber lsn,
+                                  DatabaseEntry data)
+                           throws DatabaseException
+
+
Return the LogSequenceNumber and log record to which the log cursor + currently refers. +

+

+

+
Parameters:
lsn - The returned LogSequenceNumber. +

data - The returned log record. The data field is set to the record + retrieved, and the size field indicates the number of bytes in + the record. +

+

Returns:
The status of the operation. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getNext

+
+public OperationStatus getNext(LogSequenceNumber lsn,
+                               DatabaseEntry data)
+                        throws DatabaseException
+
+
Return the next LogSequenceNumber and log record. +

+ The current log position is advanced to the next record in the log, + and its LogSequenceNumber and data are returned. If the cursor has + not been initialized, the first available log record in the log will + be returned. +

+

+

+
Parameters:
lsn - The returned LogSequenceNumber. +

data - The returned log record. +

+

Returns:
The status of the operation; a return of NOTFOUND indicates the last + log record has already been returned or the log is empty. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getFirst

+
+public OperationStatus getFirst(LogSequenceNumber lsn,
+                                DatabaseEntry data)
+                         throws DatabaseException
+
+
Return the first LogSequenceNumber and log record. +

+ The current log position is set to the first record in the log, + and its LogSequenceNumber and data are returned. +

+

+

+
Parameters:
lsn - The returned LogSequenceNumber. +

data - The returned log record. +

+

Returns:
The status of the operation; a return of NOTFOUND indicates the log + is empty. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getLast

+
+public OperationStatus getLast(LogSequenceNumber lsn,
+                               DatabaseEntry data)
+                        throws DatabaseException
+
+
Return the last LogSequenceNumber and log record. +

+ The current log position is set to the last record in the log, + and its LogSequenceNumber and data are returned. +

+

+

+
Parameters:
lsn - The returned LogSequenceNumber. +

data - The returned log record. +

+

Returns:
The status of the operation; a return of NOTFOUND indicates the log + is empty. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getPrev

+
+public OperationStatus getPrev(LogSequenceNumber lsn,
+                               DatabaseEntry data)
+                        throws DatabaseException
+
+
Return the previous LogSequenceNumber and log record. +

+ The current log position is advanced to the previous record in the log, + and its LogSequenceNumber and data are returned. If the cursor has + not been initialized, the last available log record in the log will + be returned. +

+

+

+
Parameters:
lsn - The returned LogSequenceNumber. +

data - The returned log record. +

+

Returns:
The status of the operation; a return of NOTFOUND indicates the first + log record has already been returned or the log is empty. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+set

+
+public OperationStatus set(LogSequenceNumber lsn,
+                           DatabaseEntry data)
+                    throws DatabaseException
+
+
Return a specific log record. +

+ The current log position is set to the specified record in the log, + and its data is returned. +

+

+

+
Parameters:
lsn - The specified LogSequenceNumber. +

data - The returned log record. +

+

Returns:
The status of the operation. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LogRecordHandler.html b/db/docs/java/com/sleepycat/db/LogRecordHandler.html new file mode 100644 index 000000000..bc0c5c568 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LogRecordHandler.html @@ -0,0 +1,222 @@ + + + + + + +LogRecordHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface LogRecordHandler

+
+
+
public interface LogRecordHandler
+ +

+A function to process application-specific log records. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ inthandleLogRecord(Environment environment, + DatabaseEntry logRecord, + LogSequenceNumber lsn, + RecoveryOperation operation) + +
+           
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+handleLogRecord

+
+public int handleLogRecord(Environment environment,
+                           DatabaseEntry logRecord,
+                           LogSequenceNumber lsn,
+                           RecoveryOperation operation)
+
+
+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LogSequenceNumber.html b/db/docs/java/com/sleepycat/db/LogSequenceNumber.html new file mode 100644 index 000000000..a248485e2 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LogSequenceNumber.html @@ -0,0 +1,340 @@ + + + + + + +LogSequenceNumber (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LogSequenceNumber

+
+java.lang.Object
+  extended bycom.sleepycat.db.LogSequenceNumber
+
+
+
+
public class LogSequenceNumber
extends Object
+ +

+The LogSequenceNumber object is a log sequence number which +specifies a unique location in a log file. A LogSequenceNumber consists +of two unsigned 32-bit integers -- one specifies the log file number, +and the other specifies the offset in the log file. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
LogSequenceNumber() + +
+          Construct an uninitialized LogSequenceNumber.
LogSequenceNumber(int file, + int offset) + +
+          Construct a LogSequenceNumber with the specified file and offset.
+  + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static intcompare(LogSequenceNumber lsn0, + LogSequenceNumber lsn1) + +
+          Compare two LogSequenceNumber objects.
+ intgetFile() + +
+          Return the file number component of the LogSequenceNumber.
+ intgetOffset() + +
+          Return the file offset component of the LogSequenceNumber.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+LogSequenceNumber

+
+public LogSequenceNumber(int file,
+                         int offset)
+
+
Construct a LogSequenceNumber with the specified file and offset. +

+

+

Parameters:
file - The log file number. +

offset - The log file offset.
+
+ +

+LogSequenceNumber

+
+public LogSequenceNumber()
+
+
Construct an uninitialized LogSequenceNumber. +

+

+ + + + + + + + +
+Method Detail
+ +

+getFile

+
+public int getFile()
+
+
Return the file number component of the LogSequenceNumber. +

+

+

+ +
Returns:
The file number component of the LogSequenceNumber.
+
+
+
+ +

+getOffset

+
+public int getOffset()
+
+
Return the file offset component of the LogSequenceNumber. +

+

+

+ +
Returns:
The file offset component of the LogSequenceNumber.
+
+
+
+ +

+compare

+
+public static int compare(LogSequenceNumber lsn0,
+                          LogSequenceNumber lsn1)
+
+
Compare two LogSequenceNumber objects. +

+ This method returns 0 if the two LogSequenceNumber objects are + equal, 1 if lsn0 is greater than lsn1, and -1 if lsn0 is less than + lsn1. +

+

+

+
Parameters:
lsn0 - One of the LogSequenceNumber objects to be compared. +

lsn1 - One of the LogSequenceNumber objects to be compared. +

+

Returns:
0 if the two LogSequenceNumber objects are equal, 1 if lsn0 is + greater than lsn1, and -1 if lsn0 is less than lsn1.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/LogStats.html b/db/docs/java/com/sleepycat/db/LogStats.html new file mode 100644 index 000000000..7b08e1fe1 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/LogStats.html @@ -0,0 +1,684 @@ + + + + + + +LogStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class LogStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.LogStats
+
+
+
+
public class LogStats
extends Object
+ +

+Log statistics for a database environment. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetCurFile() + +
+          The current log file number.
+ intgetCurOffset() + +
+          The byte offset in the current log file.
+ intgetDiskFile() + +
+          The log file number of the last record known to be on disk.
+ intgetDiskOffset() + +
+          The byte offset of the last record known to be on disk.
+ intgetLgBSize() + +
+          The in-memory log record cache size.
+ intgetLgSize() + +
+          The current log file size.
+ intgetMagic() + +
+          The magic number that identifies a file as a log file.
+ intgetMaxCommitperflush() + +
+          The maximum number of commits contained in a single log flush.
+ intgetMinCommitperflush() + +
+          The minimum number of commits contained in a single log flush that + contained a commit.
+ intgetMode() + +
+          The mode of any created log files.
+ intgetRegionNowait() + +
+          The number of times that a thread of control was able to obtain the + region lock without waiting.
+ intgetRegionWait() + +
+          The number of times that a thread of control was forced to wait + before obtaining the region lock.
+ intgetRegSize() + +
+          The size of the region.
+ intgetSCount() + +
+          The number of times the log has been flushed to disk.
+ intgetVersion() + +
+          The version of the log file type.
+ intgetWBytes() + +
+          The number of bytes over and above st_w_mbytes written to this log.
+ intgetWcBytes() + +
+          The number of bytes over and above LogStats.getWcMbytes + written to this log since the last checkpoint.
+ intgetWcMbytes() + +
+          The number of megabytes written to this log since the last checkpoint.
+ intgetWCount() + +
+          The number of times the log has been written to disk.
+ intgetWCountFill() + +
+          The number of times the log has been written to disk because the + in-memory log record cache filled up.
+ intgetWMbytes() + +
+          The number of megabytes written to this log.
+ StringtoString() + +
+          For convenience, the LogStats class has a toString method that lists + all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getMagic

+
+public int getMagic()
+
+
The magic number that identifies a file as a log file. +

+

+
+
+
+
+ +

+getVersion

+
+public int getVersion()
+
+
The version of the log file type. +

+

+
+
+
+
+ +

+getMode

+
+public int getMode()
+
+
The mode of any created log files. +

+

+
+
+
+
+ +

+getLgBSize

+
+public int getLgBSize()
+
+
The in-memory log record cache size. +

+

+
+
+
+
+ +

+getLgSize

+
+public int getLgSize()
+
+
The current log file size. +

+

+
+
+
+
+ +

+getWBytes

+
+public int getWBytes()
+
+
The number of bytes over and above st_w_mbytes written to this log. +

+

+
+
+
+
+ +

+getWMbytes

+
+public int getWMbytes()
+
+
The number of megabytes written to this log. +

+

+
+
+
+
+ +

+getWcBytes

+
+public int getWcBytes()
+
+
The number of bytes over and above LogStats.getWcMbytes + written to this log since the last checkpoint. +

+

+
+
+
+
+ +

+getWcMbytes

+
+public int getWcMbytes()
+
+
The number of megabytes written to this log since the last checkpoint. +

+

+
+
+
+
+ +

+getWCount

+
+public int getWCount()
+
+
The number of times the log has been written to disk. +

+

+
+
+
+
+ +

+getWCountFill

+
+public int getWCountFill()
+
+
The number of times the log has been written to disk because the + in-memory log record cache filled up. +

+

+
+
+
+
+ +

+getSCount

+
+public int getSCount()
+
+
The number of times the log has been flushed to disk. +

+

+
+
+
+
+ +

+getRegionWait

+
+public int getRegionWait()
+
+
The number of times that a thread of control was forced to wait + before obtaining the region lock. +

+

+
+
+
+
+ +

+getRegionNowait

+
+public int getRegionNowait()
+
+
The number of times that a thread of control was able to obtain the + region lock without waiting. +

+

+
+
+
+
+ +

+getCurFile

+
+public int getCurFile()
+
+
The current log file number. +

+

+
+
+
+
+ +

+getCurOffset

+
+public int getCurOffset()
+
+
The byte offset in the current log file. +

+

+
+
+
+
+ +

+getDiskFile

+
+public int getDiskFile()
+
+
The log file number of the last record known to be on disk. +

+

+
+
+
+
+ +

+getDiskOffset

+
+public int getDiskOffset()
+
+
The byte offset of the last record known to be on disk. +

+

+
+
+
+
+ +

+getRegSize

+
+public int getRegSize()
+
+
The size of the region. +

+

+
+
+
+
+ +

+getMaxCommitperflush

+
+public int getMaxCommitperflush()
+
+
The maximum number of commits contained in a single log flush. +

+

+
+
+
+
+ +

+getMinCommitperflush

+
+public int getMinCommitperflush()
+
+
The minimum number of commits contained in a single log flush that + contained a commit. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the LogStats class has a toString method that lists + all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/MemoryException.html b/db/docs/java/com/sleepycat/db/MemoryException.html new file mode 100644 index 000000000..90f24ac58 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/MemoryException.html @@ -0,0 +1,268 @@ + + + + + + +MemoryException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class MemoryException

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended bycom.sleepycat.db.DatabaseException
+              extended bycom.sleepycat.db.MemoryException
+
+
+
All Implemented Interfaces:
Serializable
+
+
+
+
public class MemoryException
extends DatabaseException
+ +

+This exception is thrown when a DatabaseEntry +passed to a Database or Cursor method is not large +enough to hold a value being returned. This only applies to +DatabaseEntry objects configured with the +DatabaseEntry.setUserBuffer method. +In a Java Virtual Machine, there are usually separate heaps for memory +allocated by native code and for objects allocated in Java code. If the +Java heap is exhausted, the JVM will throw an +OutOfMemoryError, so you may see that exception +rather than this one. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ DatabaseEntrygetDatabaseEntry() + +
+          Returns the DatabaseEntry object with insufficient memory + to complete the operation to complete the operation.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseException
getEnvironment, getErrno
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getDatabaseEntry

+
+public DatabaseEntry getDatabaseEntry()
+
+
Returns the DatabaseEntry object with insufficient memory + to complete the operation to complete the operation. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/MessageHandler.html b/db/docs/java/com/sleepycat/db/MessageHandler.html new file mode 100644 index 000000000..8ea2de541 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/MessageHandler.html @@ -0,0 +1,232 @@ + + + + + + +MessageHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface MessageHandler

+
+
+
public interface MessageHandler
+ +

+An interface specifying a callback function to be called to display +informational messages. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidmessage(Environment environment, + String message) + +
+          A callback function to be called to display informational messages.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+message

+
+public void message(Environment environment,
+                    String message)
+
+
A callback function to be called to display informational messages. +

+ There are interfaces in the Berkeley DB library which either directly + output informational messages or statistical information, or configure + the library to output such messages when performing other operations, + EnvironmentConfig.setVerboseDeadlock for example. +

+ The EnvironmentConfig.setMessageHandler and + DatabaseConfig.setMessageHandler methods are used to + display these messages for the application. +

+

+

+
Parameters:
environment - The enclosing database environment handle. +

message - An informational message string.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/MultipleDataEntry.html b/db/docs/java/com/sleepycat/db/MultipleDataEntry.html new file mode 100644 index 000000000..75308ec49 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/MultipleDataEntry.html @@ -0,0 +1,334 @@ + + + + + + +MultipleDataEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class MultipleDataEntry

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseEntry
+      extended bycom.sleepycat.db.MultipleEntry
+          extended bycom.sleepycat.db.MultipleDataEntry
+
+
+
+
public class MultipleDataEntry
extends MultipleEntry
+ +

+A DatabaseEntry that holds multiple data items returned by a single +Database or Cursor get call. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
MultipleDataEntry() + +
+          Construct an entry with no data.
MultipleDataEntry(byte[] data) + +
+          Construct an entry with a given byte array.
MultipleDataEntry(byte[] data, + int offset, + int size) + +
+          Constructs a DatabaseEntry with a given byte array, offset and size.
+  + + + + + + + + + + + +
+Method Summary
+ booleannext(DatabaseEntry data) + +
+          Get the next data element in the returned set.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.MultipleEntry
setUserBuffer
+ + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseEntry
getData, getOffset, getPartial, getPartialLength, getPartialOffset, getRecordNumber, getReuseBuffer, getSize, getUserBuffer, getUserBufferLength, setData, setData, setOffset, setPartial, setPartial, setPartialLength, setPartialOffset, setRecordNumber, setReuseBuffer, setSize
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+MultipleDataEntry

+
+public MultipleDataEntry()
+
+
Construct an entry with no data. The object must be configured + before use with the MultipleEntry.setUserBuffer method. +

+

+
+ +

+MultipleDataEntry

+
+public MultipleDataEntry(byte[] data)
+
+
Construct an entry with a given byte array. The offset is + set to zero; the size is set to the length of the array. If null + is passed, the object must be configured before use with the + MultipleEntry.setUserBuffer method. +

+

+

Parameters:
data - Byte array wrapped by the entry.
+
+ +

+MultipleDataEntry

+
+public MultipleDataEntry(byte[] data,
+                         int offset,
+                         int size)
+
+
Constructs a DatabaseEntry with a given byte array, offset and size. +

+

+

Parameters:
data - Byte array wrapped by the DatabaseEntry.
offset - Offset in the first byte in the byte array to be included.
size - Number of bytes in the byte array to be included.
+ + + + + + + + +
+Method Detail
+ +

+next

+
+public boolean next(DatabaseEntry data)
+
+
Get the next data element in the returned set. This method may only + be called after a successful call to a Database or + Cursor get method with this object as the data parameter. +

+ When used with the Queue and Recno access methods, + data.getData() will return null for deleted + records. +

+

+

+
Parameters:
data - an entry that is set to refer to the next data element in the returned + set. +

+

Returns:
indicates whether a value was found. A return of false + indicates that the end of the set was reached.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/MultipleEntry.html b/db/docs/java/com/sleepycat/db/MultipleEntry.html new file mode 100644 index 000000000..15e96c010 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/MultipleEntry.html @@ -0,0 +1,273 @@ + + + + + + +MultipleEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class MultipleEntry

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseEntry
+      extended bycom.sleepycat.db.MultipleEntry
+
+
+
Direct Known Subclasses:
MultipleDataEntry, MultipleKeyDataEntry, MultipleRecnoDataEntry
+
+
+
+
public abstract class MultipleEntry
extends DatabaseEntry
+ +

+An abstract class representing a DatabaseEntry that holds multiple results +returned by a single Cursor get method. Use one of the concrete +subclasses depending on whether you need: +

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidsetUserBuffer(int length, + boolean usermem) + +
+          Configures the entry with an application-owned buffer.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseEntry
getData, getOffset, getPartial, getPartialLength, getPartialOffset, getRecordNumber, getReuseBuffer, getSize, getUserBuffer, getUserBufferLength, setData, setData, setOffset, setPartial, setPartial, setPartialLength, setPartialOffset, setRecordNumber, setReuseBuffer, setSize
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+setUserBuffer

+
+public void setUserBuffer(int length,
+                          boolean usermem)
+
+
Description copied from class: DatabaseEntry
+
Configures the entry with an application-owned buffer. +

+ The data field of the entry must refer to a buffer that is + at least length bytes in length. +

+ If the length of the requested item is less than or equal to that number + of bytes, the item is copied into the memory to which the + data field refers. Otherwise, the size field + is set to the length needed for the requested item, and a + MemoryException is thrown. +

+ Applications can determine the length of a record by setting + length to 0 and calling DatabaseEntry.getSize + on the return value. +

+

+

+
Overrides:
setUserBuffer in class DatabaseEntry
+
+
+
Parameters:
length - the length of the buffer +

usermem - whether the buffer is owned by the application
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/MultipleKeyDataEntry.html b/db/docs/java/com/sleepycat/db/MultipleKeyDataEntry.html new file mode 100644 index 000000000..1da5e7a17 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/MultipleKeyDataEntry.html @@ -0,0 +1,334 @@ + + + + + + +MultipleKeyDataEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class MultipleKeyDataEntry

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseEntry
+      extended bycom.sleepycat.db.MultipleEntry
+          extended bycom.sleepycat.db.MultipleKeyDataEntry
+
+
+
+
public class MultipleKeyDataEntry
extends MultipleEntry
+ +

+A DatabaseEntry that holds multiple key/data pairs returned by a single +Database or Cursor get call. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
MultipleKeyDataEntry() + +
+          Construct an entry with no data.
MultipleKeyDataEntry(byte[] data) + +
+          Construct an entry with a given byte array.
MultipleKeyDataEntry(byte[] data, + int offset, + int size) + +
+          Constructs a DatabaseEntry with a given byte array, offset and size.
+  + + + + + + + + + + + +
+Method Summary
+ booleannext(DatabaseEntry key, + DatabaseEntry data) + +
+          Get the next key/data pair in the returned set.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.MultipleEntry
setUserBuffer
+ + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseEntry
getData, getOffset, getPartial, getPartialLength, getPartialOffset, getRecordNumber, getReuseBuffer, getSize, getUserBuffer, getUserBufferLength, setData, setData, setOffset, setPartial, setPartial, setPartialLength, setPartialOffset, setRecordNumber, setReuseBuffer, setSize
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+MultipleKeyDataEntry

+
+public MultipleKeyDataEntry()
+
+
Construct an entry with no data. The object must be configured + before use with the MultipleEntry.setUserBuffer method. +

+

+
+ +

+MultipleKeyDataEntry

+
+public MultipleKeyDataEntry(byte[] data)
+
+
Construct an entry with a given byte array. The offset is + set to zero; the size is set to the length of the array. If null + is passed, the object must be configured before use with the + MultipleEntry.setUserBuffer method. +

+

+

Parameters:
data - Byte array wrapped by the entry.
+
+ +

+MultipleKeyDataEntry

+
+public MultipleKeyDataEntry(byte[] data,
+                            int offset,
+                            int size)
+
+
Constructs a DatabaseEntry with a given byte array, offset and size. +

+

+

Parameters:
data - Byte array wrapped by the DatabaseEntry.
offset - Offset in the first byte in the byte array to be included.
size - Number of bytes in the byte array to be included.
+ + + + + + + + +
+Method Detail
+ +

+next

+
+public boolean next(DatabaseEntry key,
+                    DatabaseEntry data)
+
+
Get the next key/data pair in the returned set. This method may only + be called after a successful call to a Database or + Cursor get method with this object as the data parameter. +

+

+

+
Parameters:
key - an entry that is set to refer to the next key element in the returned + set. +

data - an entry that is set to refer to the next data element in the returned + set. +

+

Returns:
indicates whether a value was found. A return of false + indicates that the end of the set was reached.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/MultipleRecnoDataEntry.html b/db/docs/java/com/sleepycat/db/MultipleRecnoDataEntry.html new file mode 100644 index 000000000..d3601c7f8 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/MultipleRecnoDataEntry.html @@ -0,0 +1,338 @@ + + + + + + +MultipleRecnoDataEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class MultipleRecnoDataEntry

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseEntry
+      extended bycom.sleepycat.db.MultipleEntry
+          extended bycom.sleepycat.db.MultipleRecnoDataEntry
+
+
+
+
public class MultipleRecnoDataEntry
extends MultipleEntry
+ +

+A DatabaseEntry that holds multiple record number/data pairs returned by a +single Database or Cursor get call. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
MultipleRecnoDataEntry() + +
+          Construct an entry with no data.
MultipleRecnoDataEntry(byte[] data) + +
+          Construct an entry with a given byte array.
MultipleRecnoDataEntry(byte[] data, + int offset, + int size) + +
+          Constructs a DatabaseEntry with a given byte array, offset and size.
+  + + + + + + + + + + + +
+Method Summary
+ booleannext(DatabaseEntry recno, + DatabaseEntry data) + +
+          Get the next record number/data pair in the returned set.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.MultipleEntry
setUserBuffer
+ + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseEntry
getData, getOffset, getPartial, getPartialLength, getPartialOffset, getRecordNumber, getReuseBuffer, getSize, getUserBuffer, getUserBufferLength, setData, setData, setOffset, setPartial, setPartial, setPartialLength, setPartialOffset, setRecordNumber, setReuseBuffer, setSize
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+MultipleRecnoDataEntry

+
+public MultipleRecnoDataEntry()
+
+
Construct an entry with no data. The object must be configured + before use with the MultipleEntry.setUserBuffer method. +

+

+
+ +

+MultipleRecnoDataEntry

+
+public MultipleRecnoDataEntry(byte[] data)
+
+
Construct an entry with a given byte array. The offset is + set to zero; the size is set to the length of the array. If null + is passed, the object must be configured before use with the + MultipleEntry.setUserBuffer method. +

+

+

Parameters:
data - Byte array wrapped by the entry.
+
+ +

+MultipleRecnoDataEntry

+
+public MultipleRecnoDataEntry(byte[] data,
+                              int offset,
+                              int size)
+
+
Constructs a DatabaseEntry with a given byte array, offset and size. +

+

+

Parameters:
data - Byte array wrapped by the DatabaseEntry.
offset - Offset in the first byte in the byte array to be included.
size - Number of bytes in the byte array to be included.
+ + + + + + + + +
+Method Detail
+ +

+next

+
+public boolean next(DatabaseEntry recno,
+                    DatabaseEntry data)
+
+
Get the next record number/data pair in the returned set. This method + may only be called after a successful call to a Database or + Cursor get method with this object as the data parameter. +

+ When used with the Queue and Recno access methods, + data.getData() will return null for deleted + records. +

+

+

+
Parameters:
recno - an entry that is set to refer to the next record number in the returned + set. +

data - an entry that is set to refer to the next data element in the returned + set. +

+

Returns:
indicates whether a value was found. A return of false + indicates that the end of the set was reached.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/OperationStatus.html b/db/docs/java/com/sleepycat/db/OperationStatus.html new file mode 100644 index 000000000..dbb524b5d --- /dev/null +++ b/db/docs/java/com/sleepycat/db/OperationStatus.html @@ -0,0 +1,294 @@ + + + + + + +OperationStatus (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class OperationStatus

+
+java.lang.Object
+  extended bycom.sleepycat.db.OperationStatus
+
+
+
+
public final class OperationStatus
extends Object
+ +

+Status values from database operations. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static OperationStatusKEYEMPTY + +
+          The cursor operation was unsuccessful because the current record + was deleted.
+static OperationStatusKEYEXIST + +
+          The operation to insert data was configured to not allow overwrite + and the key already exists in the database.
+static OperationStatusNOTFOUND + +
+          The requested key/data pair was not found.
+static OperationStatusSUCCESS + +
+          The operation was successful.
+  + + + + + + + + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+SUCCESS

+
+public static final OperationStatus SUCCESS
+
+
The operation was successful. +

+

+
+
+
+ +

+KEYEXIST

+
+public static final OperationStatus KEYEXIST
+
+
The operation to insert data was configured to not allow overwrite + and the key already exists in the database. +

+

+
+
+
+ +

+KEYEMPTY

+
+public static final OperationStatus KEYEMPTY
+
+
The cursor operation was unsuccessful because the current record + was deleted. +

+

+
+
+
+ +

+NOTFOUND

+
+public static final OperationStatus NOTFOUND
+
+
The requested key/data pair was not found. +

+

+
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/PanicHandler.html b/db/docs/java/com/sleepycat/db/PanicHandler.html new file mode 100644 index 000000000..213cdbd79 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/PanicHandler.html @@ -0,0 +1,235 @@ + + + + + + +PanicHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface PanicHandler

+
+
+
public interface PanicHandler
+ +

+An interface specifying a function to be called if the database +environment panics. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidpanic(Environment environment, + DatabaseException e) + +
+          A function to be called if the database environment panics.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+panic

+
+public void panic(Environment environment,
+                  DatabaseException e)
+
+
A function to be called if the database environment panics. +

+ Errors can occur in the Berkeley DB library where the only solution + is to shut down the application and run recovery (for example, if + Berkeley DB is unable to allocate heap memory). In such cases, the + Berkeley DB methods will throw a RunRecoveryException. +

+ It is often easier to simply exit the application when such errors + occur rather than gracefully return up the stack. The panic + callback function is a function called when + RunRecoveryException is about to be thrown from a from a + Berkeley DB method. +

+

+

+
Parameters:
environment - The enclosing database environment handle. +

e - The DatabaseException that would have been thrown to + the calling method.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/PreparedTransaction.html b/db/docs/java/com/sleepycat/db/PreparedTransaction.html new file mode 100644 index 000000000..3ed2dd515 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/PreparedTransaction.html @@ -0,0 +1,261 @@ + + + + + + +PreparedTransaction (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class PreparedTransaction

+
+java.lang.Object
+  extended bycom.sleepycat.db.PreparedTransaction
+
+
+
+
public class PreparedTransaction
extends Object
+ +

+The PreparedTransaction object is used to encapsulate a single prepared, +but not yet resolved, transaction. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ byte[]getGID() + +
+          Return the global transaction ID for the transaction.
+ TransactiongetTransaction() + +
+          Return the transaction handle for the transaction.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getGID

+
+public byte[] getGID()
+
+
Return the global transaction ID for the transaction. +

+ The global transaction ID is the one specified when the transaction + was originally prepared. The application is responsible for ensuring + uniqueness among global transaction IDs. +

+

+

+ +
Returns:
The global transaction ID for the transaction.
+
+
+
+ +

+getTransaction

+
+public Transaction getTransaction()
+
+
Return the transaction handle for the transaction. +

+

+

+ +
Returns:
The transaction handle for the transaction.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/QueueStats.html b/db/docs/java/com/sleepycat/db/QueueStats.html new file mode 100644 index 000000000..dcda75049 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/QueueStats.html @@ -0,0 +1,523 @@ + + + + + + +QueueStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class QueueStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseStats
+      extended bycom.sleepycat.db.QueueStats
+
+
+
+
public class QueueStats
extends DatabaseStats
+ +

+The QueueStats object is used to return Queue database statistics. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetCurRecno() + +
+          The next available record number.
+ intgetExtentSize() + +
+          The underlying database extent size, in pages.
+ intgetFirstRecno() + +
+          The first undeleted record in the database.
+ intgetMagic() + +
+          The magic number that identifies the file as a Queue file.
+ intgetMetaFlags() + +
+          The metadata flags.
+ intgetNumData() + +
+          The number of records in the database.
+ intgetNumKeys() + +
+          The number of records in the database.
+ intgetPages() + +
+          The number of pages in the database.
+ intgetPagesFree() + +
+          The number of bytes free in database pages.
+ intgetPageSize() + +
+          The underlying database page size, in bytes.
+ intgetReLen() + +
+          The length of the records.
+ intgetRePad() + +
+          The padding byte value for the records.
+ intgetVersion() + +
+          The version of the Queue database.
+ StringtoString() + +
+          For convenience, the QueueStats class has a toString method + that lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getMagic

+
+public int getMagic()
+
+
The magic number that identifies the file as a Queue file. +

+

+
+
+
+
+ +

+getVersion

+
+public int getVersion()
+
+
The version of the Queue database. +

+

+
+
+
+
+ +

+getMetaFlags

+
+public int getMetaFlags()
+
+
The metadata flags. +

+

+
+
+
+
+ +

+getNumKeys

+
+public int getNumKeys()
+
+
The number of records in the database. +

+ If the Database.getStats call was configured by the + StatsConfig.setFast method, the count will be the last + saved value unless it has never been calculated, in which case it + will be 0. +

+

+
+
+
+
+ +

+getNumData

+
+public int getNumData()
+
+
The number of records in the database. +

+ If the Database.getStats call was configured by the + StatsConfig.setFast method, the count will be the last + saved value unless it has never been calculated, in which case it + will be 0. +

+

+
+
+
+
+ +

+getPageSize

+
+public int getPageSize()
+
+
The underlying database page size, in bytes. +

+

+
+
+
+
+ +

+getExtentSize

+
+public int getExtentSize()
+
+
The underlying database extent size, in pages. +

+

+
+
+
+
+ +

+getPages

+
+public int getPages()
+
+
The number of pages in the database. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getReLen

+
+public int getReLen()
+
+
The length of the records. +

+

+
+
+
+
+ +

+getRePad

+
+public int getRePad()
+
+
The padding byte value for the records. +

+

+
+
+
+
+ +

+getPagesFree

+
+public int getPagesFree()
+
+
The number of bytes free in database pages. +

+The information is only included if the Database.getStats call +was not configured by the StatsConfig.setFast method. +

+

+
+
+
+
+ +

+getFirstRecno

+
+public int getFirstRecno()
+
+
The first undeleted record in the database. +

+

+
+
+
+
+ +

+getCurRecno

+
+public int getCurRecno()
+
+
The next available record number. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the QueueStats class has a toString method + that lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/RecordNumberAppender.html b/db/docs/java/com/sleepycat/db/RecordNumberAppender.html new file mode 100644 index 000000000..d00eb2b58 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/RecordNumberAppender.html @@ -0,0 +1,242 @@ + + + + + + +RecordNumberAppender (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface RecordNumberAppender

+
+
+
public interface RecordNumberAppender
+ +

+An interface specifying a callback function that modifies stored data +based on a generated key. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidappendRecordNumber(Database db, + DatabaseEntry data, + int recno) + +
+          A callback function to modify the stored database based on the + generated key.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+appendRecordNumber

+
+public void appendRecordNumber(Database db,
+                               DatabaseEntry data,
+                               int recno)
+                        throws DatabaseException
+
+
A callback function to modify the stored database based on the + generated key. +

+ When storing records using Database.append it may be + useful to modify the stored data based on the generated key. + This function will be called after the record number has been + selected, but before the data has been stored. +

+ The callback function may modify the data DatabaseEntry. +

+ The callback function must throw a DatabaseException object + to encapsulate the error on failure. That object will be thrown to + caller of Database.append. +

+

+

+
Parameters:
db - The enclosing database handle. +

data - The data to be stored. +

recno - The generated record number. +
Throws: +
DatabaseException
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/RecoveryOperation.html b/db/docs/java/com/sleepycat/db/RecoveryOperation.html new file mode 100644 index 000000000..f9b84b788 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/RecoveryOperation.html @@ -0,0 +1,370 @@ + + + + + + +RecoveryOperation (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class RecoveryOperation

+
+java.lang.Object
+  extended bycom.sleepycat.db.RecoveryOperation
+
+
+
+
public final class RecoveryOperation
extends Object
+ +

+The recovery operation being performed when LogRecordHandler.handleLogRecord is called. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field Summary
+static RecoveryOperationABORT + +
+          The log is being read backward during a transaction abort; undo the + operation described by the log record.
+static RecoveryOperationAPPLY + +
+          The log is being applied on a replica site; redo the operation + described by the log record.
+static RecoveryOperationBACKWARD_ROLL + +
+          The log is being read backward to determine which transactions have + been committed and to abort those operations that were not; undo the + operation described by the log record.
+static RecoveryOperationFORWARD_ROLL + +
+          The log is being played forward; redo the operation described by the log + record.
+static RecoveryOperationPRINT + +
+          The log is being printed for debugging purposes; print the contents of + this log record in the desired format.
+  + + + + + + + + + + + + + + +
+Method Summary
+static RecoveryOperationfromFlag(int flag) + +
+          Internal: this is public only so it can be called from an internal + package.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+BACKWARD_ROLL

+
+public static final RecoveryOperation BACKWARD_ROLL
+
+
The log is being read backward to determine which transactions have + been committed and to abort those operations that were not; undo the + operation described by the log record. +

+

+
+
+
+ +

+FORWARD_ROLL

+
+public static final RecoveryOperation FORWARD_ROLL
+
+
The log is being played forward; redo the operation described by the log + record. +

+ The FORWARD_ROLL and APPLY operations frequently imply the same actions, + redoing changes that appear in the log record, although if a recovery + function is to be used on a replication client where reads may be taking + place concurrently with the processing of incoming messages, APPLY + operations should also perform appropriate locking. +

+

+
+
+
+ +

+ABORT

+
+public static final RecoveryOperation ABORT
+
+
The log is being read backward during a transaction abort; undo the + operation described by the log record. +

+

+
+
+
+ +

+APPLY

+
+public static final RecoveryOperation APPLY
+
+
The log is being applied on a replica site; redo the operation + described by the log record. +

+ The FORWARD_ROLL and APPLY operations frequently imply the same actions, + redoing changes that appear in the log record, although if a recovery + function is to be used on a replication client where reads may be taking + place concurrently with the processing of incoming messages, APPLY + operations should also perform appropriate locking. +

+

+
+
+
+ +

+PRINT

+
+public static final RecoveryOperation PRINT
+
+
The log is being printed for debugging purposes; print the contents of + this log record in the desired format. +

+

+
+
+ + + + + + + + + + + +
+Method Detail
+ +

+fromFlag

+
+public static RecoveryOperation fromFlag(int flag)
+
+
Internal: this is public only so it can be called from an internal + package. +

+

+
Parameters:
flag - the internal flag value to be wrapped in a RecoveryException object
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/ReplicationHandleDeadException.html b/db/docs/java/com/sleepycat/db/ReplicationHandleDeadException.html new file mode 100644 index 000000000..25be0b68e --- /dev/null +++ b/db/docs/java/com/sleepycat/db/ReplicationHandleDeadException.html @@ -0,0 +1,230 @@ + + + + + + +ReplicationHandleDeadException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class ReplicationHandleDeadException

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended bycom.sleepycat.db.DatabaseException
+              extended bycom.sleepycat.db.ReplicationHandleDeadException
+
+
+
All Implemented Interfaces:
Serializable
+
+
+
+
public class ReplicationHandleDeadException
extends DatabaseException
+ +

+Thrown when a database handle has been invalidated because a replication +election unrolled a committed transaction. +When this happens the handle is no longer able to be used and the application +must close the handle using the Database.close method and open a new +handle. This exception is thrown when a client unrolls a transaction in order +to synchronize with a new master. If the application was permitted to use the +original handle, it's possible the handle might attempt to access nonexistent +resources. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseException
getEnvironment, getErrno
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + +


+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/ReplicationStats.html b/db/docs/java/com/sleepycat/db/ReplicationStats.html new file mode 100644 index 000000000..2870c71dd --- /dev/null +++ b/db/docs/java/com/sleepycat/db/ReplicationStats.html @@ -0,0 +1,1149 @@ + + + + + + +ReplicationStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class ReplicationStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.ReplicationStats
+
+
+
+
public class ReplicationStats
extends Object
+ +

+Replication statistics for a database environment. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetDupmasters() + +
+          The number of duplicate master conditions detected.
+ intgetEgen() + +
+          The current election generation number.
+ intgetElectionCurWinner() + +
+          The election winner.
+ intgetElectionGen() + +
+          The election generation number.
+ LogSequenceNumbergetElectionLsn() + +
+          The maximum LSN of election winner.
+ intgetElectionNumSites() + +
+          The number sites expected to participate in elections.
+ intgetElectionNumVotes() + +
+          The number of votes required to complete the election.
+ intgetElectionPriority() + +
+          The election priority.
+ intgetElections() + +
+          The number of elections held.
+ intgetElectionStatus() + +
+          The current election phase (0 if no election is in progress).
+ intgetElectionsWon() + +
+          The number of elections won.
+ intgetElectionTiebreaker() + +
+          The election tiebreaker value.
+ intgetElectionVotes() + +
+          The votes received this election round.
+ intgetEnvId() + +
+          The current environment ID.
+ intgetEnvPriority() + +
+          The current environment priority.
+ intgetGen() + +
+          The current generation number.
+ intgetLogDuplicated() + +
+          The number of duplicate log records received.
+ intgetLogQueued() + +
+          The number of log records currently queued.
+ intgetLogQueuedMax() + +
+          The maximum number of log records ever queued at once.
+ intgetLogQueuedTotal() + +
+          The total number of log records queued.
+ intgetLogRecords() + +
+          The number of log records received and appended to the log.
+ intgetLogRequested() + +
+          The number of log records missed and requested.
+ intgetMaster() + +
+          The current master environment ID.
+ intgetMasterChanges() + +
+          The number of times the master has changed.
+ intgetMsgsBadgen() + +
+          The number of messages received with a bad generation number.
+ intgetMsgsProcessed() + +
+          The number of messages received and processed.
+ intgetMsgsRecover() + +
+          The number of messages ignored due to pending recovery.
+ intgetMsgsSendFailures() + +
+          The number of failed message sends.
+ intgetMsgsSent() + +
+          The number of messages sent.
+ intgetNewsites() + +
+          The number of new site messages received.
+ LogSequenceNumbergetNextLsn() + +
+          In replication environments configured as masters, the next LSN + expected.
+ intgetNextPages() + +
+          The next page number we expect to receive.
+ intgetNumSites() + +
+          The number of sites believed to be in the replication group.
+ intgetNumThrottles() + +
+          Transmission limited.
+ intgetOutdated() + +
+          The number of outdated conditions detected.
+ intgetPagesDuplicated() + +
+          The number of duplicate pages received.
+ intgetPagesRecords() + +
+          The number of pages received and stored.
+ intgetPagesRequested() + +
+          The number of pages missed and requested from the master.
+ intgetStartupComplete() + +
+          The client site has completed its startup procedures and is now + handling live records from the master.
+ intgetStatus() + +
+          The current replication mode.
+ intgetTxnsApplied() + +
+          The number of transactions applied.
+ LogSequenceNumbergetWaitingLsn() + +
+          The LSN of the first log record we have after missing log records + being waited for, or 0 if no log records are currently missing.
+ intgetWaitingPages() + +
+          The page number of the first page we have after missing pages being + waited for, or 0 if no pages are currently missing.
+ StringtoString() + +
+          For convenience, the ReplicationStats class has a toString method + that lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getStatus

+
+public int getStatus()
+
+
The current replication mode. Set to DB_REP_MASTER if the environment + is a replication master, DB_REP_CLIENT if the environment is a + replication client, or 0 if replication is not configured. +

+

+
+
+
+
+ +

+getNextLsn

+
+public LogSequenceNumber getNextLsn()
+
+
In replication environments configured as masters, the next LSN + expected. In replication environments configured as clients, the next + LSN to be used. +

+

+
+
+
+
+ +

+getWaitingLsn

+
+public LogSequenceNumber getWaitingLsn()
+
+
The LSN of the first log record we have after missing log records + being waited for, or 0 if no log records are currently missing. +

+

+
+
+
+
+ +

+getNextPages

+
+public int getNextPages()
+
+
The next page number we expect to receive. +

+

+
+
+
+
+ +

+getWaitingPages

+
+public int getWaitingPages()
+
+
The page number of the first page we have after missing pages being + waited for, or 0 if no pages are currently missing. +

+

+
+
+
+
+ +

+getDupmasters

+
+public int getDupmasters()
+
+
The number of duplicate master conditions detected. +

+

+
+
+
+
+ +

+getEnvId

+
+public int getEnvId()
+
+
The current environment ID. +

+

+
+
+
+
+ +

+getEnvPriority

+
+public int getEnvPriority()
+
+
The current environment priority. +

+

+
+
+
+
+ +

+getGen

+
+public int getGen()
+
+
The current generation number. +

+

+
+
+
+
+ +

+getEgen

+
+public int getEgen()
+
+
The current election generation number. +

+

+
+
+
+
+ +

+getLogDuplicated

+
+public int getLogDuplicated()
+
+
The number of duplicate log records received. +

+

+
+
+
+
+ +

+getLogQueued

+
+public int getLogQueued()
+
+
The number of log records currently queued. +

+

+
+
+
+
+ +

+getLogQueuedMax

+
+public int getLogQueuedMax()
+
+
The maximum number of log records ever queued at once. +

+

+
+
+
+
+ +

+getLogQueuedTotal

+
+public int getLogQueuedTotal()
+
+
The total number of log records queued. +

+

+
+
+
+
+ +

+getLogRecords

+
+public int getLogRecords()
+
+
The number of log records received and appended to the log. +

+

+
+
+
+
+ +

+getLogRequested

+
+public int getLogRequested()
+
+
The number of log records missed and requested. +

+

+
+
+
+
+ +

+getMaster

+
+public int getMaster()
+
+
The current master environment ID. +

+

+
+
+
+
+ +

+getMasterChanges

+
+public int getMasterChanges()
+
+
The number of times the master has changed. +

+

+
+
+
+
+ +

+getMsgsBadgen

+
+public int getMsgsBadgen()
+
+
The number of messages received with a bad generation number. +

+

+
+
+
+
+ +

+getMsgsProcessed

+
+public int getMsgsProcessed()
+
+
The number of messages received and processed. +

+

+
+
+
+
+ +

+getMsgsRecover

+
+public int getMsgsRecover()
+
+
The number of messages ignored due to pending recovery. +

+

+
+
+
+
+ +

+getMsgsSendFailures

+
+public int getMsgsSendFailures()
+
+
The number of failed message sends. +

+

+
+
+
+
+ +

+getMsgsSent

+
+public int getMsgsSent()
+
+
The number of messages sent. +

+

+
+
+
+
+ +

+getNewsites

+
+public int getNewsites()
+
+
The number of new site messages received. +

+

+
+
+
+
+ +

+getNumSites

+
+public int getNumSites()
+
+
The number of sites believed to be in the replication group. +

+

+
+
+
+
+ +

+getNumThrottles

+
+public int getNumThrottles()
+
+
Transmission limited. This indicates the number of times data + transmission was stopped to limit the amount of data sent in response to + a single call to Environment.processReplicationMessage. +

+

+
+
+
+
+ +

+getOutdated

+
+public int getOutdated()
+
+
The number of outdated conditions detected. +

+

+
+
+
+
+ +

+getPagesDuplicated

+
+public int getPagesDuplicated()
+
+
The number of duplicate pages received. +

+

+
+
+
+
+ +

+getPagesRecords

+
+public int getPagesRecords()
+
+
The number of pages received and stored. +

+

+
+
+
+
+ +

+getPagesRequested

+
+public int getPagesRequested()
+
+
The number of pages missed and requested from the master. +

+

+
+
+
+
+ +

+getStartupComplete

+
+public int getStartupComplete()
+
+
The client site has completed its startup procedures and is now + handling live records from the master. +

+

+
+
+
+
+ +

+getTxnsApplied

+
+public int getTxnsApplied()
+
+
The number of transactions applied. +

+

+
+
+
+
+ +

+getElections

+
+public int getElections()
+
+
The number of elections held. +

+

+
+
+
+
+ +

+getElectionsWon

+
+public int getElectionsWon()
+
+
The number of elections won. +

+

+
+
+
+
+ +

+getElectionCurWinner

+
+public int getElectionCurWinner()
+
+
The election winner. +

+

+
+
+
+
+ +

+getElectionGen

+
+public int getElectionGen()
+
+
The election generation number. +

+

+
+
+
+
+ +

+getElectionLsn

+
+public LogSequenceNumber getElectionLsn()
+
+
The maximum LSN of election winner. +

+

+
+
+
+
+ +

+getElectionNumSites

+
+public int getElectionNumSites()
+
+
The number sites expected to participate in elections. +

+

+
+
+
+
+ +

+getElectionNumVotes

+
+public int getElectionNumVotes()
+
+
The number of votes required to complete the election. +

+

+
+
+
+
+ +

+getElectionPriority

+
+public int getElectionPriority()
+
+
The election priority. +

+

+
+
+
+
+ +

+getElectionStatus

+
+public int getElectionStatus()
+
+
The current election phase (0 if no election is in progress). +

+

+
+
+
+
+ +

+getElectionTiebreaker

+
+public int getElectionTiebreaker()
+
+
The election tiebreaker value. +

+

+
+
+
+
+ +

+getElectionVotes

+
+public int getElectionVotes()
+
+
The votes received this election round. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the ReplicationStats class has a toString method + that lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/ReplicationStatus.html b/db/docs/java/com/sleepycat/db/ReplicationStatus.html new file mode 100644 index 000000000..451a11b60 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/ReplicationStatus.html @@ -0,0 +1,482 @@ + + + + + + +ReplicationStatus (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class ReplicationStatus

+
+java.lang.Object
+  extended bycom.sleepycat.db.ReplicationStatus
+
+
+
+
public final class ReplicationStatus
extends Object
+ +

+The return status from processing a replication message. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ DatabaseEntrygetCData() + +
+          Whenever the system receives contact information from a new + environment, a copy of the opaque data specified in the cdata + parameter to the Environment.startReplication is available + from the getCDAta method.
+ intgetEnvID() + +
+          Whenever a new master is elected, the environment ID of the new master + is available from the getEnvID method.
+ LogSequenceNumbergetLSN() + +
+          Whenever processing a messages results in the processing of messages + that are permanent, or a message carrying a DB_REP_PERMANENT flag + was processed successfully, but was not written to disk, the LSN of + the record is available from the getLSN method.
+ booleanisDupMaster() + +
+          The replication group has more than one master.
+ booleanisHoldElection() + +
+          An election is needed.
+ booleanisNewMaster() + +
+          A new master has been elected.
+ booleanisNewSite() + +
+          The system received contact information from a new environment.
+ booleanisNotPermanent() + +
+          A message carrying a DB_REP_PERMANENT flag was processed successfully, + but was not written to disk.
+ booleanisPermanent() + +
+          Processing this message resulted in the processing of records that + are permanent.
+ booleanisStartupDone() + +
+          The client completed startup synchronization.
+ booleanisSuccess() + +
+          The operation succeeded.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+isSuccess

+
+public boolean isSuccess()
+
+
The operation succeeded. +

+

+
+
+
+
+ +

+isDupMaster

+
+public boolean isDupMaster()
+
+
The replication group has more than one master. The application + should reconfigure itself as a client by calling the + Environment.startReplication method, and then call for an + election by calling Environment.electReplicationMaster. +

+

+
+
+
+
+ +

+isHoldElection

+
+public boolean isHoldElection()
+
+
An election is needed. The application should call for an election + by calling Environment.electReplicationMaster. +

+

+
+
+
+
+ +

+isPermanent

+
+public boolean isPermanent()
+
+
Processing this message resulted in the processing of records that + are permanent. The maximum LSN of the permanent records stored is + available from the getLSN method. +

+

+
+
+
+
+ +

+isNewMaster

+
+public boolean isNewMaster()
+
+
A new master has been elected. The environment ID of the new master + is available from the getEnvID method. If the recipient of this error + return has been made master, it is the application's responsibility to + begin acting as the master environment. +

+

+
+
+
+
+ +

+isNewSite

+
+public boolean isNewSite()
+
+
The system received contact information from a new environment. A + copy of the opaque data specified in the cdata parameter to the + Environment.startReplication is available from the + getCDAta method. The application should take whatever action is + needed to establish a communication channel with this new + environment. +

+

+
+
+
+
+ +

+isNotPermanent

+
+public boolean isNotPermanent()
+
+
A message carrying a DB_REP_PERMANENT flag was processed successfully, + but was not written to disk. The LSN of this record is available from + the getLSN method. The application should take whatever action is + deemed necessary to retain its recoverability characteristics. +

+

+
+
+
+
+ +

+isStartupDone

+
+public boolean isStartupDone()
+
+
The client completed startup synchronization. The client application + knows this client is now processing live log records received from the + master. +

+

+
+
+
+
+ +

+getCData

+
+public DatabaseEntry getCData()
+
+
Whenever the system receives contact information from a new + environment, a copy of the opaque data specified in the cdata + parameter to the Environment.startReplication is available + from the getCDAta method. The application should take whatever + action is needed to establish a communication channel with this new + environment. +

+

+
+
+
+
+ +

+getEnvID

+
+public int getEnvID()
+
+
Whenever a new master is elected, the environment ID of the new master + is available from the getEnvID method. If the recipient of this error + return has been made master, it is the application's responsibility to + begin acting as the master environment. +

+

+
+
+
+
+ +

+getLSN

+
+public LogSequenceNumber getLSN()
+
+
Whenever processing a messages results in the processing of messages + that are permanent, or a message carrying a DB_REP_PERMANENT flag + was processed successfully, but was not written to disk, the LSN of + the record is available from the getLSN method. The application + should take whatever action is deemed necessary to retain its + recoverability characteristics. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/ReplicationTransport.html b/db/docs/java/com/sleepycat/db/ReplicationTransport.html new file mode 100644 index 000000000..172739b53 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/ReplicationTransport.html @@ -0,0 +1,325 @@ + + + + + + +ReplicationTransport (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface ReplicationTransport

+
+
+
public interface ReplicationTransport
+ +

+An interface specifying a replication transmit function, which sends +information to other members of the replication group. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + +
+Field Summary
+static intEID_BROADCAST + +
+          A message that should be broadcast to every environment in the + replication group.
+static intEID_INVALID + +
+          An invalid environment ID, and may be used to initialize environment ID + variables that are subsequently checked for validity.
+  + + + + + + + + + + + + + + +
+Method Summary
+ intsend(Environment environment, + DatabaseEntry control, + DatabaseEntry rec, + LogSequenceNumber lsn, + int envid, + boolean noBuffer, + boolean permanent) + +
+          The callback used when Berkeley DB needs to transmit a replication + message.
+  +

+ + + + + + + + +
+Field Detail
+ +

+EID_BROADCAST

+
+public static final int EID_BROADCAST
+
+
A message that should be broadcast to every environment in the + replication group. The application may use a true broadcast protocol or + may send the message in sequence to each machine with which it is in + communication. In both cases, the sending site should not be asked to + process the message. +

+

+
See Also:
Constant Field Values
+
+
+ +

+EID_INVALID

+
+public static final int EID_INVALID
+
+
An invalid environment ID, and may be used to initialize environment ID + variables that are subsequently checked for validity. +

+

+
See Also:
Constant Field Values
+
+ + + + + + + + + + + +
+Method Detail
+ +

+send

+
+public int send(Environment environment,
+                DatabaseEntry control,
+                DatabaseEntry rec,
+                LogSequenceNumber lsn,
+                int envid,
+                boolean noBuffer,
+                boolean permanent)
+         throws DatabaseException
+
+
The callback used when Berkeley DB needs to transmit a replication + message. This method must not call back down into Berkeley DB. It must + return 0 on success and non-zero on failure. If the transmission fails, + the message being sent is necessary to maintain database integrity, and + the local log is not configured for synchronous flushing, the local log + will be flushed; otherwise, any error from the function will be ignored. +

+

+

+
Parameters:
environment - The enclosing database environment handle. +

control - The first of the two data elements to be transmitted. +

rec - The second of the two data elements to be transmitted. +

lsn - If the type of message to be sent has an LSN associated with it, + then the lsn contains the LSN of the record being sent. This LSN + can be used to determine that certain records have been processed + successfully by clients. +

envid - A positive integer identifier that specifies the replication + environment to which the message should be sent. +

+ The value DB_EID_BROADCAST indicates that a message should be + broadcast to every environment in the replication group. The + application may use a true broadcast protocol or may send the + message in sequence to each machine with which it is in + communication. In both cases, the sending site should not be asked + to process the message. +

noBuffer - The record being sent should be transmitted immediately and not buffered + or delayed. +

permanent - The record being sent is critical for maintaining database integrity + (for example, the message includes a transaction commit). The + application should take appropriate action to enforce the reliability + guarantees it has chosen, such as waiting for acknowledgement from one + or more clients. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/RunRecoveryException.html b/db/docs/java/com/sleepycat/db/RunRecoveryException.html new file mode 100644 index 000000000..1899b1697 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/RunRecoveryException.html @@ -0,0 +1,228 @@ + + + + + + +RunRecoveryException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class RunRecoveryException

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended bycom.sleepycat.db.DatabaseException
+              extended bycom.sleepycat.db.RunRecoveryException
+
+
+
All Implemented Interfaces:
Serializable
+
+
+
+
public class RunRecoveryException
extends DatabaseException
+ +

+Thrown when the database environment needs to be recovered. + +Errors can occur in where the only solution is to shut down the +application and run recovery. When a fatal error occurs, this +exception will be thrown, and all subsequent calls will also fail in +the same way. When this occurs, recovery should be performed. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseException
getEnvironment, getErrno
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + +


+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/SecondaryConfig.html b/db/docs/java/com/sleepycat/db/SecondaryConfig.html new file mode 100644 index 000000000..5fdcf8364 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/SecondaryConfig.html @@ -0,0 +1,443 @@ + + + + + + +SecondaryConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class SecondaryConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.DatabaseConfig
+      extended bycom.sleepycat.db.SecondaryConfig
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
+
public class SecondaryConfig
extends DatabaseConfig
implements Cloneable
+ +

+The configuration properties of a SecondaryDatabase extend +those of a primary Database. +The secondary database configuration is specified when calling Environment.openSecondaryDatabase. +

+To create a configuration object with default attributes: +

+    SecondaryConfig config = new SecondaryConfig();
+
+To set custom attributes: +
+    SecondaryConfig config = new SecondaryConfig();
+    config.setAllowCreate(true);
+    config.setAllowPopulate(true);
+
+

+


+

+NOTE: There are two situations where the use of secondary databases without +transactions requires special consideration. When using a transactional +database or when doing read operations only, this note does not apply. +

    +
  • If secondary is configured to not allow duplicates, when the secondary +is being updated it is possible that an error will occur when the secondary +key value in a record being added is already present in the database. A +DatabaseException will be thrown in this situation.
  • +
  • If a foreign key constraint is configured with the delete action +ABORT (the default setting), a DatabaseException +will be thrown if an attempt is made to delete a referenced foreign +key.
  • +
+In both cases, the operation will be partially complete because the primary +database record will have already been updated or deleted. In the presence +of transactions, the exception will cause the transaction to abort. Without +transactions, it is the responsibility of the caller to handle the results +of the incomplete update or to take steps to prevent this situation from +happening in the first place. +

+ +

+

+ +

+

+
See Also:
Environment.openSecondaryDatabase, +SecondaryDatabase
+
+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static SecondaryConfigDEFAULT + +
+           
+  + + + + + + + + + + +
+Constructor Summary
SecondaryConfig() + +
+          Creates an instance with the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetAllowPopulate() + +
+          Returns whether automatic population of the secondary is allowed.
+ SecondaryKeyCreatorgetKeyCreator() + +
+          Returns the user-supplied object used for creating secondary keys.
+ voidsetAllowPopulate(boolean allowPopulate) + +
+          Specifies whether automatic population of the secondary is allowed.
+ voidsetKeyCreator(SecondaryKeyCreator keyCreator) + +
+          Specifies the user-supplied object used for creating secondary keys.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.DatabaseConfig
getAllowCreate, getBtreeComparator, getBtreeMinKey, getBtreePrefixCalculator, getBtreeRecordNumbers, getByteOrder, getByteSwapped, getCacheCount, getCacheSize, getChecksum, getDirtyRead, getDuplicateComparator, getEncrypted, getErrorHandler, getErrorPrefix, getErrorStream, getExclusiveCreate, getFeedbackHandler, getHasher, getHashFillFactor, getHashNumElements, getMessageHandler, getMessageStream, getMode, getNoMMap, getPageSize, getPanicHandler, getQueueExtentSize, getQueueInOrder, getReadOnly, getRecordDelimiter, getRecordLength, getRecordNumberAppender, getRecordPad, getRecordSource, getRenumbering, getReverseSplitOff, getSnapshot, getSortedDuplicates, getTransactional, getTransactionNotDurable, getTruncate, getType, getUnsortedDuplicates, getXACreate, setAllowCreate, setBtreeComparator, setBtreeMinKey, setBtreePrefixCalculator, setBtreeRecordNumbers, setByteOrder, setCacheCount, setCacheSize, setChecksum, setDirtyRead, setDuplicateComparator, setEncrypted, setErrorHandler, setErrorPrefix, setErrorStream, setExclusiveCreate, setFeedbackHandler, setHasher, setHashFillFactor, setHashNumElements, setMessageHandler, setMessageStream, setMode, setNoMMap, setPageSize, setPanicHandler, setQueueExtentSize, setQueueInOrder, setReadOnly, setRecordDelimiter, setRecordLength, setRecordNumberAppender, setRecordPad, setRecordSource, setRenumbering, setReverseSplitOff, setSnapshot, setSortedDuplicates, setTransactional, setTransactionNotDurable, setTruncate, setType, setUnsortedDuplicates, setXACreate
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final SecondaryConfig DEFAULT
+
+
+
+
+ + + + + + + + +
+Constructor Detail
+ +

+SecondaryConfig

+
+public SecondaryConfig()
+
+
Creates an instance with the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setKeyCreator

+
+public void setKeyCreator(SecondaryKeyCreator keyCreator)
+
+
Specifies the user-supplied object used for creating secondary keys. +

+ Unless the primary database is read-only, a key creator is required + when opening a secondary database. +

+

+

+
+
+
+
Parameters:
keyCreator - the user-supplied object used for creating secondary + keys.
+
+
+
+ +

+getKeyCreator

+
+public SecondaryKeyCreator getKeyCreator()
+
+
Returns the user-supplied object used for creating secondary keys. +

+

+

+
+
+
+ +
Returns:
the user-supplied object used for creating secondary keys. +

See Also:
setKeyCreator(com.sleepycat.db.SecondaryKeyCreator)
+
+
+
+ +

+setAllowPopulate

+
+public void setAllowPopulate(boolean allowPopulate)
+
+
Specifies whether automatic population of the secondary is allowed. +

+ If automatic population is allowed, when the secondary database is + opened it is checked to see if it is empty. If it is empty, the + primary database is read in its entirety and keys are added to the + secondary database using the information read from the primary. +

+ If this property is set to true and the environment is transactional, + the secondary database must be opened within a transaction. +

+

+

+
+
+
+
Parameters:
allowPopulate - whether automatic population of the secondary is + allowed.
+
+
+
+ +

+getAllowPopulate

+
+public boolean getAllowPopulate()
+
+
Returns whether automatic population of the secondary is allowed. If + setAllowPopulate(boolean) has not been called, this method returns + false. +

+

+

+
+
+
+ +
Returns:
whether automatic population of the secondary is allowed. +

See Also:
setAllowPopulate(boolean)
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/SecondaryCursor.html b/db/docs/java/com/sleepycat/db/SecondaryCursor.html new file mode 100644 index 000000000..03e6ba5f2 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/SecondaryCursor.html @@ -0,0 +1,1176 @@ + + + + + + +SecondaryCursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class SecondaryCursor

+
+java.lang.Object
+  extended bycom.sleepycat.db.Cursor
+      extended bycom.sleepycat.db.SecondaryCursor
+
+
+
+
public class SecondaryCursor
extends Cursor
+ +

+A database cursor for a secondary database. Cursors are not thread safe +and the application is responsible for coordinating any multithreaded +access to a single cursor object. +

+Secondary cursors are returned by +SecondaryDatabase.openCursor and +SecondaryDatabase.openSecondaryCursor. The distinguishing characteristics +of a secondary cursor are: +

    +
  • Direct calls to put() methods on a secondary cursor are +prohibited. +
  • The Cursor.delete() method of a secondary cursor will delete the primary +record and as well as all its associated secondary records. +
  • Calls to all get methods will return the data from the associated +primary database. +
  • Additional get method signatures are provided to return the primary key +in an additional pKey parameter. +
  • Calls to dup(boolean) will return a SecondaryCursor. +
  • The dupSecondary(boolean) method is provided to return a SecondaryCursor that doesn't require casting. +
+

+To obtain a secondary cursor with default attributes: +

+    SecondaryCursor cursor = myDb.openSecondaryCursor(txn, null);
+
+To customize the attributes of a cursor, use a CursorConfig object. +
+    CursorConfig config = new CursorConfig();
+    config.setDirtyRead(true);
+    SecondaryCursor cursor = myDb.openSecondaryCursor(txn, config);
+
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ Cursordup(boolean samePosition) + +
+          Returns a new SecondaryCursor for the same transaction as + the original cursor.
+ SecondaryCursordupSecondary(boolean samePosition) + +
+          Returns a new copy of the cursor as a SecondaryCursor.
+ OperationStatusgetCurrent(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusgetFirst(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusgetLast(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusgetNext(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusgetNextDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusgetNextNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusgetPrev(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusgetPrevDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusgetPrevNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusgetRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry primaryRecno, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusgetSearchBoth(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary and primary key, where both +the primary and secondary key items must match.
+ OperationStatusgetSearchBothRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary key and closest matching primary +key of the database.
+ OperationStatusgetSearchKey(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusgetSearchKeyRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusgetSearchRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ SecondaryDatabasegetSecondaryDatabase() + +
+          Return the SecondaryDatabase handle associated with this Cursor.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.Cursor
close, count, delete, getConfig, getCurrent, getDatabase, getFirst, getLast, getNext, getNextDup, getNextNoDup, getPrev, getPrevDup, getPrevNoDup, getRecordNumber, getSearchBoth, getSearchBothRange, getSearchKey, getSearchKeyRange, getSearchRecordNumber, put, putAfter, putBefore, putCurrent, putKeyFirst, putKeyLast, putNoDupData, putNoOverwrite
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+dup

+
+public Cursor dup(boolean samePosition)
+           throws DatabaseException
+
+
Returns a new SecondaryCursor for the same transaction as + the original cursor. +

+

+
Overrides:
dup in class Cursor
+
+
+
Parameters:
samePosition - If true, the newly created cursor is initialized to refer to the + same position in the database as the original cursor (if any) and + hold the same locks (if any). If false, or the original cursor does + not hold a database position and locks, the returned cursor is + uninitialized and will behave like a newly created cursor. +

+

Returns:
A new cursor with the same transaction and locker ID as the original + cursor. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+dupSecondary

+
+public SecondaryCursor dupSecondary(boolean samePosition)
+                             throws DatabaseException
+
+
Returns a new copy of the cursor as a SecondaryCursor. +

+ Calling this method is the equivalent of calling dup(boolean) and + casting the result to SecondaryCursor. +

+

+

+ +
Throws: +
DatabaseException
See Also:
dup(boolean)
+
+
+
+ +

+getCurrent

+
+public OperationStatus getCurrent(DatabaseEntry key,
+                                  DatabaseEntry pKey,
+                                  DatabaseEntry data,
+                                  LockMode lockMode)
+                           throws DatabaseException
+
+
Returns the key/data pair to which the cursor refers. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.KEYEMPTY if the key/pair at the cursor +position has been deleted; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getFirst

+
+public OperationStatus getFirst(DatabaseEntry key,
+                                DatabaseEntry pKey,
+                                DatabaseEntry data,
+                                LockMode lockMode)
+                         throws DatabaseException
+
+
Move the cursor to the first key/data pair of the database, and return +that pair. If the first key has duplicate values, the first data item +in the set of duplicates is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getLast

+
+public OperationStatus getLast(DatabaseEntry key,
+                               DatabaseEntry pKey,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Move the cursor to the last key/data pair of the database, and return +that pair. If the last key has duplicate values, the last data item in +the set of duplicates is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getNext

+
+public OperationStatus getNext(DatabaseEntry key,
+                               DatabaseEntry pKey,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Move the cursor to the next key/data pair and return that pair. If +the matching key has duplicate values, the first data item in the set +of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the first +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the next key/data pair of the database, and that pair +is returned. In the presence of duplicate key values, the value of the +key may not change. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getNextDup

+
+public OperationStatus getNextDup(DatabaseEntry key,
+                                  DatabaseEntry pKey,
+                                  DatabaseEntry data,
+                                  LockMode lockMode)
+                           throws DatabaseException
+
+
If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getNextNoDup

+
+public OperationStatus getNextNoDup(DatabaseEntry key,
+                                    DatabaseEntry pKey,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+                             throws DatabaseException
+
+
Move the cursor to the next non-duplicate key/data pair and return +that pair. If the matching key has duplicate values, the first data +item in the set of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the first +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the next non-duplicate key of the database, and that +key/data pair is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getPrev

+
+public OperationStatus getPrev(DatabaseEntry key,
+                               DatabaseEntry pKey,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+                        throws DatabaseException
+
+
Move the cursor to the previous key/data pair and return that pair. +If the matching key has duplicate values, the last data item in the set +of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the last +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the previous key/data pair of the database, and that +pair is returned. In the presence of duplicate key values, the value of +the key may not change. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getPrevDup

+
+public OperationStatus getPrevDup(DatabaseEntry key,
+                                  DatabaseEntry pKey,
+                                  DatabaseEntry data,
+                                  LockMode lockMode)
+                           throws DatabaseException
+
+
If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getPrevNoDup

+
+public OperationStatus getPrevNoDup(DatabaseEntry key,
+                                    DatabaseEntry pKey,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+                             throws DatabaseException
+
+
Move the cursor to the previous non-duplicate key/data pair and return +that pair. If the matching key has duplicate values, the last data item +in the set of duplicates is returned. +

+If the cursor is not yet initialized, move the cursor to the last +key/data pair of the database, and return that pair. Otherwise, the +cursor is moved to the previous non-duplicate key of the database, and +that key/data pair is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchKey

+
+public OperationStatus getSearchKey(DatabaseEntry key,
+                                    DatabaseEntry pKey,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+                             throws DatabaseException
+
+
Move the cursor to the given key of the database, and return the datum +associated with the given key. If the matching key has duplicate +values, the first data item in the set of duplicates is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +used as input. It must be initialized with a non-null byte array by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchKeyRange

+
+public OperationStatus getSearchKeyRange(DatabaseEntry key,
+                                         DatabaseEntry pKey,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+                                  throws DatabaseException
+
+
Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key. If the matching key has +duplicate values, the first data item in the set of duplicates is returned. +

+The returned key/data pair is for the smallest key greater than or equal +to the specified key (as determined by the key comparison function), +permitting partial key matches and range searches. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +used as input and returned as output. It must be initialized with a non-null +byte array by the caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchBoth

+
+public OperationStatus getSearchBoth(DatabaseEntry key,
+                                     DatabaseEntry pKey,
+                                     DatabaseEntry data,
+                                     LockMode lockMode)
+                              throws DatabaseException
+
+
Move the cursor to the specified secondary and primary key, where both +the primary and secondary key items must match. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +used as input. It must be initialized with a non-null byte array by the +caller.
pKey - the primary key +used as input. It must be initialized with a non-null byte array by the +caller.
data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchBothRange

+
+public OperationStatus getSearchBothRange(DatabaseEntry key,
+                                          DatabaseEntry pKey,
+                                          DatabaseEntry data,
+                                          LockMode lockMode)
+                                   throws DatabaseException
+
+
Move the cursor to the specified secondary key and closest matching primary +key of the database. +

+In the case of any database supporting sorted duplicate sets, the returned +key/data pair is for the smallest primary key greater than or equal to the +specified primary key (as determined by the key comparison function), +permitting partial matches and range searches in duplicate data sets. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +used as input and returned as output. It must be initialized with a non-null +byte array by the caller.
pKey - the primary key +used as input and returned as output. It must be initialized with a non-null +byte array by the caller.
data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSecondaryDatabase

+
+public SecondaryDatabase getSecondaryDatabase()
+
+
Return the SecondaryDatabase handle associated with this Cursor. +

+

+

+ +
Returns:
The SecondaryDatabase handle associated with this Cursor. +

+
+
+
+ +

+getRecordNumber

+
+public OperationStatus getRecordNumber(DatabaseEntry secondaryRecno,
+                                       DatabaseEntry primaryRecno,
+                                       LockMode lockMode)
+                                throws DatabaseException
+
+
Return the record number associated with the cursor. The record number +will be returned in the data parameter. +

+For this method to be called, the underlying database must be of type +Btree, and it must have been configured to support record numbers. +

+When called on a cursor opened on a database that has been made into a +secondary index, the method returns the record numbers of both the +secondary and primary databases. If either underlying database is not of +type Btree or is not configured with record numbers, the out-of-band +record number of 0 is returned. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
secondaryRecno - the secondary record number +returned as output. Its byte array does not need to be initialized by the +caller.
primaryRecno - the primary record number +returned as output. Its byte array does not need to be initialized by the +caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+
+ +

+getSearchRecordNumber

+
+public OperationStatus getSearchRecordNumber(DatabaseEntry secondaryRecno,
+                                             DatabaseEntry pKey,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+                                      throws DatabaseException
+
+
Move the cursor to the specific numbered record of the database, and +return the associated key/data pair. +

+The data field of the specified key must be a byte array containing a +record number, as described in DatabaseEntry. This determines +the record to be retrieved. +

+For this method to be called, the underlying database must be of type +Btree, and it must have been configured to support record numbers. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
secondaryRecno - the secondary record number +used as input. It must be initialized with a non-null byte array by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/SecondaryDatabase.html b/db/docs/java/com/sleepycat/db/SecondaryDatabase.html new file mode 100644 index 000000000..70f482578 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/SecondaryDatabase.html @@ -0,0 +1,617 @@ + + + + + + +SecondaryDatabase (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class SecondaryDatabase

+
+java.lang.Object
+  extended bycom.sleepycat.db.Database
+      extended bycom.sleepycat.db.SecondaryDatabase
+
+
+
+
public class SecondaryDatabase
extends Database
+ +

+A secondary database handle. +

+Secondary databases are opened with Environment.openSecondaryDatabase and are +always associated with a single primary database. The distinguishing +characteristics of a secondary database are: +

    +
  • Records are automatically added to a secondary database when records are +added, modified and deleted in the primary database. Direct calls to +put() methods on a secondary database are prohibited.
  • +
  • The delete method of a secondary database will delete +the primary record and as well as all its associated secondary records.
  • +
  • Calls to all get() methods will return the data from the +associated primary database.
  • +
  • Additional get() method signatures are provided to return +the primary key in an additional pKey parameter.
  • +
  • Calls to openCursor will return a SecondaryCursor, which itself has get() methods that return +the data of the primary database and additional get() method +signatures for returning the primary key.
  • +
  • The openSecondaryCursor method is provided +to return a SecondaryCursor that doesn't require casting.
  • +
+

+

+Before opening or creating a secondary database you must implement the +SecondaryKeyCreator interface. For example: +

+    class MyKeyCreator implements SecondaryKeyCreator {
+        public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                        DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        DatabaseEntry result)
+                throws DatabaseException {
+           //
+           // DO HERE: Extract the secondary key from the primary key and data,
+           // and set the secondary key into the result parameter.
+           //
+            return true;
+        }
+    }
+
+

+

+To create a secondary database that supports duplicates: +

+    Database primaryDb; // The primary database must already be open.
+    SecondaryConfig secConfig = new SecondaryConfig();
+    secConfig.setAllowCreate(true);
+    secConfig.setSortedDuplicates(true);
+    secConfig.setKeyCreator(new MyKeyCreator());
+    SecondaryDatabase newDb = env.openSecondaryDatabase(txn, "foo",
+        primaryDb, secConfig)
+
+

+If a primary database is to be associated with one or more secondary +databases, it may not be configured for duplicates. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
SecondaryDatabase(String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ OperationStatusget(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ DatabasegetPrimaryDatabase() + +
+          Returns the primary database associated with this secondary database.
+ OperationStatusgetSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match.
+ OperationStatusgetSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ SecondaryConfiggetSecondaryConfig() + +
+          Returns a copy of the secondary configuration of this database.
+ SecondaryCursoropenSecondaryCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Obtain a cursor on a database, returning a SecondaryCursor.
+ + + + + + + +
Methods inherited from class com.sleepycat.db.Database
append, close, close, consume, delete, get, getCacheFile, getConfig, getDatabaseFile, getDatabaseName, getEnvironment, getKeyRange, getSearchBoth, getSearchRecordNumber, getStats, join, openCursor, openSequence, put, putNoDupData, putNoOverwrite, remove, removeSequence, rename, setConfig, sync, truncate, upgrade, verify
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+SecondaryDatabase

+
+public SecondaryDatabase(String fileName,
+                         String databaseName,
+                         Database primaryDatabase,
+                         SecondaryConfig config)
+                  throws DatabaseException,
+                         FileNotFoundException
+
+
Open a database. +

+The database is represented by the file and database parameters. +

+The currently supported database file formats (or access +methods) are Btree, Hash, Queue, and Recno. The Btree format is a +representation of a sorted, balanced tree structure. The Hash format +is an extensible, dynamic hashing scheme. The Queue format supports +fast access to fixed-length records accessed sequentially or by logical +record number. The Recno format supports fixed- or variable-length +records, accessed sequentially or by logical record number, and +optionally backed by a flat text file. +

+Storage and retrieval are based on key/data pairs; see DatabaseEntry +for more information. +

+Opening a database is a relatively expensive operation, and maintaining +a set of open databases will normally be preferable to repeatedly +opening and closing the database for each new query. +

+In-memory databases never intended to be preserved on disk may be +created by setting both the fileName and databaseName parameters to +null. Note that in-memory databases can only ever be shared by sharing +the single database handle that created them, in circumstances where +doing so is safe. The environment variable TMPDIR may +be used as a directory in which to create temporary backing files. +

+

+

Parameters:
fileName - The name of an underlying file that will be used to back the database. +On Windows platforms, this argument will be interpreted as a UTF-8 +string, which is equivalent to ASCII for Latin characters. +

databaseName - An optional parameter that allows applications to have multiple +databases in a single file. Although no databaseName parameter needs +to be specified, it is an error to attempt to open a second database in +a physical file that was not initially created using a databaseName +parameter. Further, the databaseName parameter is not supported by the +Queue format. +

primaryDatabase - a database handle for the primary database that is to be indexed. +

config - The secondary database open attributes. If null, default attributes are used.
+ + + + + + + + +
+Method Detail
+ +

+getSecondaryConfig

+
+public SecondaryConfig getSecondaryConfig()
+                                   throws DatabaseException
+
+
Returns a copy of the secondary configuration of this database. +

+

+

+ +
Returns:
a copy of the secondary configuration of this database. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+get

+
+public OperationStatus get(Transaction txn,
+                           DatabaseEntry key,
+                           DatabaseEntry pKey,
+                           DatabaseEntry data,
+                           LockMode lockMode)
+                    throws DatabaseException
+
+
Retrieves the key/data pair with the given key. If the matching key has +duplicate values, the first data item in the set of duplicates is returned. +Retrieval of duplicates requires the use of Cursor operations. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified to +transaction-protect the operation, or null may be specified to perform the +operation without transaction protection. For a non-transactional database, +null must be specified. +

key - the secondary key +used as input. It must be initialized with a non-null byte array by the +caller. +

pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller. +

data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller. +

lockMode - the locking attributes; if null, default attributes are used. +

+

Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getSearchBoth

+
+public OperationStatus getSearchBoth(Transaction txn,
+                                     DatabaseEntry key,
+                                     DatabaseEntry pKey,
+                                     DatabaseEntry data,
+                                     LockMode lockMode)
+                              throws DatabaseException
+
+
Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified to +transaction-protect the operation, or null may be specified to perform the +operation without transaction protection. For a non-transactional database, +null must be specified.
key - the secondary key +used as input. It must be initialized with a non-null byte array by the +caller.
pKey - the primary key +used as input. It must be initialized with a non-null byte array by the +caller.
data - the primary data +returned as output. Its byte array does not need to be initialized by the +caller. +

lockMode - the locking attributes; if null, default attributes are used. +

+

Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +

+

+

Throws: +
DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs.
+
+
+
+ +

+getPrimaryDatabase

+
+public Database getPrimaryDatabase()
+                            throws DatabaseException
+
+
Returns the primary database associated with this secondary database. +

+

+

+ +
Returns:
the primary database associated with this secondary database. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+openSecondaryCursor

+
+public SecondaryCursor openSecondaryCursor(Transaction txn,
+                                           CursorConfig cursorConfig)
+                                    throws DatabaseException
+
+
Obtain a cursor on a database, returning a SecondaryCursor. + Calling this method is the equivalent of calling Database.openCursor(com.sleepycat.db.Transaction, com.sleepycat.db.CursorConfig) and + casting the result to SecondaryCursor. +

+

+

+
Parameters:
txn - To use a cursor for writing to a transactional database, an explicit +transaction must be specified. For read-only access to a transactional +database, the transaction may be null. For a non-transactional database, the +transaction must be null. +

+To transaction-protect cursor operations, cursors must be opened and +closed within the context of a transaction, and the txn parameter +specifies the transaction context in which the cursor will be used. +

cursorConfig - The cursor attributes. If null, default attributes are used. +

+

Returns:
A secondary database cursor. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+getSearchRecordNumber

+
+public OperationStatus getSearchRecordNumber(Transaction txn,
+                                             DatabaseEntry key,
+                                             DatabaseEntry pKey,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+                                      throws DatabaseException
+
+
Retrieves the key/data pair associated with the specific numbered record of the database. +

+The data field of the specified key must be a byte array containing a +record number, as described in DatabaseEntry. This determines +the record to be retrieved. +

+For this method to be called, the underlying database must be of type +Btree, and it must have been configured to support record numbers. +

+If this method fails for any reason, the position of the cursor will be +unchanged. +

+

+
Parameters:
key - the secondary key +returned as output. Its byte array does not need to be initialized by the +caller.
pKey - the primary key +returned as output. Its byte array does not need to be initialized by the +caller.
data - the primary data +returned as output. Multiple results can be retrieved by passing an object +that is a subclass of MultipleEntry, otherwise its byte array does not +need to be initialized by the caller.
lockMode - the locking attributes; if null, default attributes are used. +
Returns:
OperationStatus.NOTFOUND if no matching key/data pair is +found; otherwise, OperationStatus.SUCCESS. +
Throws: +
NullPointerException - if a DatabaseEntry parameter is null or +does not contain a required non-null byte array. +

+

DeadlockException - if the operation was selected to resolve a +deadlock. +

+

IllegalArgumentException - if an invalid parameter was specified. +

+

DatabaseException - if a failure occurs. +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/SecondaryKeyCreator.html b/db/docs/java/com/sleepycat/db/SecondaryKeyCreator.html new file mode 100644 index 000000000..f8c47910d --- /dev/null +++ b/db/docs/java/com/sleepycat/db/SecondaryKeyCreator.html @@ -0,0 +1,274 @@ + + + + + + +SecondaryKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Interface SecondaryKeyCreator

+
+
All Known Implementing Classes:
SerialSerialKeyCreator, TupleSerialKeyCreator, TupleTupleKeyCreator
+
+
+
+
public interface SecondaryKeyCreator
+ +

+An interface specifying how secondary keys for a +SecondaryDatabase are created. +

+A secondary key may be derived from the primary key, primary data, or a +combination of the primary key and data. For secondary keys that are +optional, the key creator method may return false and the key/data pair will +not be indexed. To ensure the integrity of a secondary database the key +creator method must always return the same result for a given set of input +parameters. +

+The key creator object is specified by calling +SecondaryConfig.setKeyCreator. +The secondary database configuration is specified when calling +Environment.openSecondaryDatabase. +

+For example: +

+    class MyKeyCreator implements SecondaryKeyCreator {
+        public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                            DatabaseEntry key,
+                                            DatabaseEntry data,
+                                            DatabaseEntry result)
+                throws DatabaseException {
+            //
+            // DO HERE: Extract the secondary key from the primary key and data,
+            // and set the secondary key into the result parameter.
+            //
+            return true;
+        }
+    }
+    ...
+    SecondaryConfig secConfig = new SecondaryConfig();
+    secConfig.setKeyCreator(new MyKeyCreator());
+
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleancreateSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) + +
+          Creates a secondary key entry, given a primary key and data entry.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+createSecondaryKey

+
+public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                  DatabaseEntry key,
+                                  DatabaseEntry data,
+                                  DatabaseEntry result)
+                           throws DatabaseException
+
+
Creates a secondary key entry, given a primary key and data entry. +

+

+

+
Parameters:
secondary - the database to which the secondary key will be added. +

key - the primary key entry. This parameter must not be modified + by this method. +

data - the primary data entry. This parameter must not be modified + by this method. +

result - the secondary key created by this method. +

+

Returns:
true if a key was created, or false to indicate that the key is + not present. +

+

Throws: +
DatabaseException - if an error occurs attempting to create the + secondary key.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/Sequence.html b/db/docs/java/com/sleepycat/db/Sequence.html new file mode 100644 index 000000000..50e1d82ff --- /dev/null +++ b/db/docs/java/com/sleepycat/db/Sequence.html @@ -0,0 +1,372 @@ + + + + + + +Sequence (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class Sequence

+
+java.lang.Object
+  extended bycom.sleepycat.db.Sequence
+
+
+
+
public class Sequence
extends Object
+ +

+A Sequence handle is used to manipulate a sequence record in a database. +Sequence handles are opened using the Database.openSequence method. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidclose() + +
+          Close a sequence.
+ longget(Transaction txn, + int delta) + +
+          Return the next available element in the sequence and changes the sequence + value by delta.
+ DatabasegetDatabase() + +
+          Return the Database handle associated with this sequence.
+ DatabaseEntrygetKey() + +
+          Return the DatabaseEntry used to open this sequence.
+ SequenceStatsgetStats(StatsConfig config) + +
+          Return statistical information about the sequence.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+close

+
+public void close()
+           throws DatabaseException
+
+
Close a sequence. Any unused cached values are lost. +

+ The sequence handle may not be used again after this method has been + called, regardless of the method's success or failure. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+get

+
+public long get(Transaction txn,
+                int delta)
+         throws DatabaseException
+
+
Return the next available element in the sequence and changes the sequence + value by delta. The value of delta must be + greater than zero. If there are enough cached values in the sequence + handle then they will be returned. Otherwise the next value will be + fetched from the database and incremented (decremented) by enough to cover + the delta and the next batch of cached values. +

+ The txn handle must be null if the sequence handle was opened + with a non-zero cache size. +

+ For maximum concurrency, a non-zero cache size should be specified prior to + opening the sequence handle, the txn handle should be + null, and SequenceConfig.setAutoCommitNoSync should + be called to disable log flushes. +

+

+

+
Parameters:
txn - For a transactional database, an explicit transaction may be specified, or null +may be specified to use auto-commit. For a non-transactional database, null +must be specified. +

delta - the amount by which to increment or decrement the sequence +

+

Returns:
the next available element in the sequence +
Throws: +
DatabaseException
+
+
+
+ +

+getDatabase

+
+public Database getDatabase()
+                     throws DatabaseException
+
+
Return the Database handle associated with this sequence. +

+

+

+ +
Returns:
The Database handle associated with this sequence. +
Throws: +
DatabaseException
+
+
+
+ +

+getKey

+
+public DatabaseEntry getKey()
+                     throws DatabaseException
+
+
Return the DatabaseEntry used to open this sequence. +

+

+

+ +
Returns:
The DatabaseEntry used to open this sequence. +
Throws: +
DatabaseException
+
+
+
+ +

+getStats

+
+public SequenceStats getStats(StatsConfig config)
+                       throws DatabaseException
+
+
Return statistical information about the sequence. +

+ In the presence of multiple threads or processes accessing an active + sequence, the information returned by this method may be out-of-date. +

+ The getStats method cannot be transaction-protected. For this reason, it + should be called in a thread of control that has no open cursors or active + transactions. +

+

+

+
Parameters:
config - The statistics returned; if null, default statistics are returned. +

+

Returns:
Sequence statistics. +
Throws: +
DatabaseException
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/SequenceConfig.html b/db/docs/java/com/sleepycat/db/SequenceConfig.html new file mode 100644 index 000000000..d9921f04a --- /dev/null +++ b/db/docs/java/com/sleepycat/db/SequenceConfig.html @@ -0,0 +1,779 @@ + + + + + + +SequenceConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class SequenceConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.SequenceConfig
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
+
public class SequenceConfig
extends Object
implements Cloneable
+ +

+Specify the attributes of a sequence. +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static SequenceConfigDEFAULT + +
+           
+  + + + + + + + + + + +
+Constructor Summary
SequenceConfig() + +
+          An instance created using the default constructor is initialized with + the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetAllowCreate() + +
+          Return if the Database.openSequence method is configured + to create the sequence if it does not already exist.
+ booleangetAutoCommitNoSync() + +
+          Return if the auto-commit operations on the sequence are configure to not + flush the transaction log..
+ intgetCacheSize() + +
+          Return the number of elements cached by a sequence handle..
+ booleangetDecrement() + +
+          Return if the sequence is configured to decrement.
+ booleangetExclusiveCreate() + +
+          Return if the Database.openSequence method is configured to + fail if the database already exists.
+ longgetInitialValue() + +
+          Return the initial value for a sequence..
+ longgetRangeMax() + +
+          Return the maximum value for the sequence.
+ longgetRangeMin() + +
+          Return the minimum value for the sequence.
+ booleangetWrap() + +
+          Return if the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value.
+ voidsetAllowCreate(boolean allowCreate) + +
+          Configure the Database.openSequence method to + create the sequence if it does not already exist.
+ voidsetAutoCommitNoSync(boolean autoCommitNoSync) + +
+          Configure auto-commit operations on the sequence to not flush + the transaction log.
+ voidsetCacheSize(int cacheSize) + +
+          Set the +Configure the number of elements cached by a sequence handle.
+ voidsetDecrement(boolean decrement) + +
+          Specify that the sequence should be decremented.
+ voidsetExclusiveCreate(boolean exclusiveCreate) + +
+          Configure the Database.openSequence method to + fail if the database already exists.
+ voidsetInitialValue(long initialValue) + +
+          Set the +Set the initial value for a sequence.
+ voidsetRange(long min, + long max) + +
+          Configure a sequence range.
+ voidsetWrap(boolean wrap) + +
+          Specify that the sequence should wrap around when it is + incremented (decremented) past the specified maximum (minimum) value.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final SequenceConfig DEFAULT
+
+
+
+
+ + + + + + + + +
+Constructor Detail
+ +

+SequenceConfig

+
+public SequenceConfig()
+
+
An instance created using the default constructor is initialized with + the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setAllowCreate

+
+public void setAllowCreate(boolean allowCreate)
+
+
Configure the Database.openSequence method to + create the sequence if it does not already exist. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
allowCreate - If true, +configure the Database.openSequence method to + create the sequence if it does not already exist.
+
+
+
+ +

+getAllowCreate

+
+public boolean getAllowCreate()
+
+
Return if the Database.openSequence method is configured + to create the sequence if it does not already exist. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Database.openSequence method is configured + to create the sequence if it does not already exist.
+
+
+
+ +

+setAutoCommitNoSync

+
+public void setAutoCommitNoSync(boolean autoCommitNoSync)
+
+
Configure auto-commit operations on the sequence to not flush + the transaction log. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
autoCommitNoSync - If true, +configure auto-commit operations on the sequence to not flush + the transaction log.
+
+
+
+ +

+getAutoCommitNoSync

+
+public boolean getAutoCommitNoSync()
+
+
Return if the auto-commit operations on the sequence are configure to not + flush the transaction log.. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the auto-commit operations on the sequence are configure to not + flush the transaction log..
+
+
+
+ +

+setCacheSize

+
+public void setCacheSize(int cacheSize)
+
+
Set the +Configure the number of elements cached by a sequence handle. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
cacheSize - The Configure the number of elements cached by a sequence handle.
+
+
+
+ +

+getCacheSize

+
+public int getCacheSize()
+
+
Return the number of elements cached by a sequence handle.. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The number of elements cached by a sequence handle..
+
+
+
+ +

+setDecrement

+
+public void setDecrement(boolean decrement)
+
+
Specify that the sequence should be decremented. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
decrement - If true, +specify that the sequence should be decremented.
+
+
+
+ +

+getDecrement

+
+public boolean getDecrement()
+
+
Return if the sequence is configured to decrement. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the sequence is configured to decrement.
+
+
+
+ +

+setExclusiveCreate

+
+public void setExclusiveCreate(boolean exclusiveCreate)
+
+
Configure the Database.openSequence method to + fail if the database already exists. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
exclusiveCreate - If true, +configure the Database.openSequence method to + fail if the database already exists.
+
+
+
+ +

+getExclusiveCreate

+
+public boolean getExclusiveCreate()
+
+
Return if the Database.openSequence method is configured to + fail if the database already exists. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the Database.openSequence method is configured to + fail if the database already exists.
+
+
+
+ +

+setInitialValue

+
+public void setInitialValue(long initialValue)
+
+
Set the +Set the initial value for a sequence. +

This call is only + effective when the sequence is being created. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
initialValue - The Set the initial value for a sequence.
+
+
+
+ +

+getInitialValue

+
+public long getInitialValue()
+
+
Return the initial value for a sequence.. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The initial value for a sequence..
+
+
+
+ +

+setRange

+
+public void setRange(long min,
+                     long max)
+
+
Configure a sequence range. This call is only effective when the + sequence is being created. +

+

+

+
+
+
+
Parameters:
min - The minimum value for the sequence.
max - The maximum value for the sequence.
+
+
+
+ +

+getRangeMin

+
+public long getRangeMin()
+
+
Return the minimum value for the sequence. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The minimum value for the sequence.
+
+
+
+ +

+getRangeMax

+
+public long getRangeMax()
+
+
Return the maximum value for the sequence. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
The maximum value for the sequence.
+
+
+
+ +

+setWrap

+
+public void setWrap(boolean wrap)
+
+
Specify that the sequence should wrap around when it is + incremented (decremented) past the specified maximum (minimum) value. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+
Parameters:
wrap - If true, +specify that the sequence should wrap around when it is + incremented (decremented) past the specified maximum (minimum) value.
+
+
+
+ +

+getWrap

+
+public boolean getWrap()
+
+
Return if the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value. +

+This method may be called at any time during the life of the application. +

+

+

+
+
+
+ +
Returns:
If the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/SequenceStats.html b/db/docs/java/com/sleepycat/db/SequenceStats.html new file mode 100644 index 000000000..dec708d1f --- /dev/null +++ b/db/docs/java/com/sleepycat/db/SequenceStats.html @@ -0,0 +1,403 @@ + + + + + + +SequenceStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class SequenceStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.SequenceStats
+
+
+
+
public class SequenceStats
extends Object
+ +

+A SequenceStats object is used to return sequenece statistics. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intgetCacheSize() + +
+          The number of values that will be cached in this handle.
+ longgetCurrent() + +
+          The current value of the sequence in the database.
+ intgetFlags() + +
+          The flags value for the sequence.
+ longgetLastValue() + +
+          The last cached value of the sequence.
+ longgetMax() + +
+          The maximum permitted value of the sequence.
+ longgetMin() + +
+          The minimum permitted value of the sequence.
+ intgetNowait() + +
+          The number of times that a thread of control was able to obtain handle + mutex without waiting.
+ longgetValue() + +
+          The current cached value of the sequence.
+ intgetWait() + +
+          The number of times a thread of control was forced to wait on the + handle mutex.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getWait

+
+public int getWait()
+
+
The number of times a thread of control was forced to wait on the + handle mutex. +

+

+
+
+
+
+ +

+getNowait

+
+public int getNowait()
+
+
The number of times that a thread of control was able to obtain handle + mutex without waiting. +

+

+
+
+
+
+ +

+getCurrent

+
+public long getCurrent()
+
+
The current value of the sequence in the database. +

+

+
+
+
+
+ +

+getValue

+
+public long getValue()
+
+
The current cached value of the sequence. +

+

+
+
+
+
+ +

+getLastValue

+
+public long getLastValue()
+
+
The last cached value of the sequence. +

+

+
+
+
+
+ +

+getMin

+
+public long getMin()
+
+
The minimum permitted value of the sequence. +

+

+
+
+
+
+ +

+getMax

+
+public long getMax()
+
+
The maximum permitted value of the sequence. +

+

+
+
+
+
+ +

+getCacheSize

+
+public int getCacheSize()
+
+
The number of values that will be cached in this handle. +

+

+
+
+
+
+ +

+getFlags

+
+public int getFlags()
+
+
The flags value for the sequence. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/StatsConfig.html b/db/docs/java/com/sleepycat/db/StatsConfig.html new file mode 100644 index 000000000..6c0c78e01 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/StatsConfig.html @@ -0,0 +1,392 @@ + + + + + + +StatsConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class StatsConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.StatsConfig
+
+
+
+
public class StatsConfig
extends Object
+ +

+Specifies the attributes of a statistics retrieval operation. +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static StatsConfigDEFAULT + +
+           
+  + + + + + + + + + + +
+Constructor Summary
StatsConfig() + +
+          An instance created using the default constructor is initialized + with the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetClear() + +
+          Return if the statistics operation is configured to reset + statistics after they are returned.
+ booleangetFast() + +
+          Return if the statistics operation is configured to return only the + values which do not require expensive actions.
+ voidsetClear(boolean clear) + +
+          Configure the statistics operation to reset statistics after they + are returned.
+ voidsetFast(boolean fast) + +
+          Configure the statistics operation to return only the values which + do not incur some performance penalty.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final StatsConfig DEFAULT
+
+
+
+
+ + + + + + + + +
+Constructor Detail
+ +

+StatsConfig

+
+public StatsConfig()
+
+
An instance created using the default constructor is initialized + with the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setFast

+
+public void setFast(boolean fast)
+
+
Configure the statistics operation to return only the values which + do not incur some performance penalty. +

+ The default value is false. +

+ For example, skip stats that require a traversal of the database or + in-memory tree, or which lock down the lock table for a period of + time. +

+ Among other things, this flag makes it possible for applications to + request key and record counts without incurring the performance + penalty of traversing the entire database. If the underlying + database is of type Recno, or of type Btree and the database was + configured to support retrieval by record number, the count of keys + will be exact. Otherwise, the count of keys will be the value saved + the last time the database was traversed, or 0 if no count of keys + has ever been made. If the underlying database is of type Recno, + the count of data items will be exact, otherwise, the count of data + items will be the value saved the last time the database was + traversed, or 0 if no count of data items has ever been done. +

+

+

+
Parameters:
fast - If set to true, configure the statistics operation to return only + the values which do not incur some performance penalty.
+
+
+
+ +

+getFast

+
+public boolean getFast()
+
+
Return if the statistics operation is configured to return only the + values which do not require expensive actions. +

+

+

+ +
Returns:
If the statistics operation is configured to return only the values + which do not require expensive actions.
+
+
+
+ +

+setClear

+
+public void setClear(boolean clear)
+
+
Configure the statistics operation to reset statistics after they + are returned. The default value is false. +

+

+

+
Parameters:
clear - If set to true, configure the statistics operation to reset + statistics after they are returned.
+
+
+
+ +

+getClear

+
+public boolean getClear()
+
+
Return if the statistics operation is configured to reset + statistics after they are returned. +

+

+

+ +
Returns:
If the statistics operation is configured to reset statistics after + they are returned.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/Transaction.html b/db/docs/java/com/sleepycat/db/Transaction.html new file mode 100644 index 000000000..eb72441db --- /dev/null +++ b/db/docs/java/com/sleepycat/db/Transaction.html @@ -0,0 +1,632 @@ + + + + + + +Transaction (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class Transaction

+
+java.lang.Object
+  extended bycom.sleepycat.db.Transaction
+
+
+
+
public class Transaction
extends Object
+ +

+The Transaction object is the handle for a transaction. Methods off the +transaction handle are used to configure, abort and commit the +transaction. Transaction handles are provided to other Berkeley DB +methods in order to transactionally protect those operations. +

+Transaction handles are not free-threaded; transactions handles may +be used by multiple threads, but only serially, that is, the application +must serialize access to the handle. Once the +Transaction.abort, Transaction.commit or +Transaction.discard +methods are called, the handle may +not be accessed again, regardless of the success or failure of the method. +In addition, parent transactions may not issue any Berkeley DB operations +while they have active child transactions (child transactions that have +not yet been committed or aborted) except for Environment.beginTransaction, Transaction.abort and Transaction.commit. +

+To obtain a transaction with default attributes: +

+    Transaction txn = myEnvironment.beginTransaction(null, null);
+
+To customize the attributes of a transaction: +
+    TransactionConfig config = new TransactionConfig();
+    config.setDirtyRead(true);
+    Transaction txn = myEnvironment.beginTransaction(null, config);
+
+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidabort() + +
+          Cause an abnormal termination of the transaction.
+ voidcommit() + +
+          End the transaction.
+ voidcommitNoSync() + +
+          End the transaction, not committing synchronously.
+ voidcommitSync() + +
+          End the transaction, committing synchronously.
+ voiddiscard() + +
+          Free up all the per-process resources associated with the specified + Transaction handle, neither committing nor aborting the + transaction.
+ intgetId() + +
+          Return the transaction's unique ID.
+ voidprepare(byte[] gid) + +
+          Initiate the beginning of a two-phase commit.
+ voidsetLockTimeout(long timeOut) + +
+          Configure the lock request timeout value for the transaction.
+ voidsetTxnTimeout(long timeOut) + +
+          Configure the timeout value for the transaction lifetime.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getId

+
+public int getId()
+          throws DatabaseException
+
+
Return the transaction's unique ID. +

+ Locking calls made on behalf of this transaction should use the + value returned from this method as the locker parameter to the + Environment.getLock or Environment.lockVector + calls. +

+

+

+ +
Returns:
The transaction's unique ID. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+abort

+
+public void abort()
+           throws DatabaseException
+
+
Cause an abnormal termination of the transaction. +

+ The log is played backward, and any necessary undo operations are done. + Before Transaction.abort returns, any locks held by the transaction will + have been released. +

+ In the case of nested transactions, aborting a parent transaction + causes all children (unresolved or not) of the parent transaction + to be aborted. +

+ All cursors opened within the transaction must be closed before the + transaction is aborted. +

+ After Transaction.abort has been called, regardless of its return, the + Transaction handle may not be accessed again. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+commit

+
+public void commit()
+            throws DatabaseException
+
+
End the transaction. If the environment is configured for synchronous +commit, the transaction will be committed synchronously to stable +storage before the call returns. This means the transaction will exhibit +all of the ACID (atomicity, consistency, isolation, and durability) +properties. +

+If the environment is not configured for synchronous commit, the commit +will not necessarily have been committed to stable storage before the +call returns. This means the transaction will exhibit the ACI (atomicity, +consistency, and isolation) properties, but not D (durability); that is, +database integrity will be maintained, but it is possible this transaction +may be undone during recovery. +

+In the case of nested transactions, if the transaction is a parent +transaction, committing the parent transaction causes all unresolved +children of the parent to be committed. In the case of nested +transactions, if the transaction is a child transaction, its locks are +not released, but are acquired by its parent. Although the commit of the +child transaction will succeed, the actual resolution of the child +transaction is postponed until the parent transaction is committed or +aborted; that is, if its parent transaction commits, it will be +committed; and if its parent transaction aborts, it will be aborted. +

+All cursors opened within the transaction must be closed before the +transaction is committed. +

+After this method returns the Transaction handle may not be +accessed again, regardless of the method's success or failure. If the +method encounters an error, the transaction and all child transactions +of the transaction will have been aborted when the call returns. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+commitSync

+
+public void commitSync()
+                throws DatabaseException
+
+
End the transaction, committing synchronously. This means the +transaction will exhibit all of the ACID (atomicity, consistency, +isolation, and durability) properties. +

+This behavior is the default for database environments unless otherwise +configured using the EnvironmentConfig.setTxnNoSync method. This +behavior may also be set for a single transaction using the +Environment.beginTransaction method. Any value specified to +this method overrides both of those settings. +

+In the case of nested transactions, if the transaction is a parent +transaction, committing the parent transaction causes all unresolved +children of the parent to be committed. In the case of nested +transactions, if the transaction is a child transaction, its locks are +not released, but are acquired by its parent. Although the commit of the +child transaction will succeed, the actual resolution of the child +transaction is postponed until the parent transaction is committed or +aborted; that is, if its parent transaction commits, it will be +committed; and if its parent transaction aborts, it will be aborted. +

+All cursors opened within the transaction must be closed before the +transaction is committed. +

+After this method returns the Transaction handle may not be +accessed again, regardless of the method's success or failure. If the +method encounters an error, the transaction and all child transactions +of the transaction will have been aborted when the call returns. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+commitNoSync

+
+public void commitNoSync()
+                  throws DatabaseException
+
+
End the transaction, not committing synchronously. This means the +transaction will exhibit the ACI (atomicity, consistency, and isolation) +properties, but not D (durability); that is, database integrity will be +maintained, but it is possible this transaction may be undone during +recovery. +

+This behavior may be set for a database environment using the +EnvironmentConfig.setTxnNoSync method or for a single transaction +using the Environment.beginTransaction method. Any value +specified to this method overrides both of those settings. +

+In the case of nested transactions, if the transaction is a parent +transaction, committing the parent transaction causes all unresolved +children of the parent to be committed. In the case of nested +transactions, if the transaction is a child transaction, its locks are +not released, but are acquired by its parent. Although the commit of the +child transaction will succeed, the actual resolution of the child +transaction is postponed until the parent transaction is committed or +aborted; that is, if its parent transaction commits, it will be +committed; and if its parent transaction aborts, it will be aborted. +

+All cursors opened within the transaction must be closed before the +transaction is committed. +

+After this method returns the Transaction handle may not be +accessed again, regardless of the method's success or failure. If the +method encounters an error, the transaction and all child transactions +of the transaction will have been aborted when the call returns. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setTxnTimeout

+
+public void setTxnTimeout(long timeOut)
+                   throws DatabaseException
+
+
Configure the timeout value for the transaction lifetime. +

+ If the transaction runs longer than this time, the transaction may + may throw DatabaseException. +

+ Timeouts are checked whenever a thread of control blocks on a lock + or when deadlock detection is performed. For this reason, the + accuracy of the timeout depends on how often deadlock detection is + performed. +

+

+

+
Parameters:
timeOut - The timeout value for the transaction lifetime, in microseconds. As + the value is an unsigned 32-bit number of microseconds, the maximum + timeout is roughly 71 minutes. A value of 0 disables timeouts for + the transaction. +

+ This method may be called at any time during the life of the application. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+setLockTimeout

+
+public void setLockTimeout(long timeOut)
+                    throws DatabaseException
+
+
Configure the lock request timeout value for the transaction. +

+ If a lock request cannot be granted in this time, the transaction + may throw DatabaseException. +

+ Timeouts are checked whenever a thread of control blocks on a lock + or when deadlock detection is performed. For this reason, the + accuracy of the timeout depends on how often deadlock detection is + performed. +

+

+

+
Parameters:
timeOut - The lock request timeout value for the transaction, in microseconds. + As the value is an unsigned 32-bit number of microseconds, the maximum + timeout is roughly 71 minutes. A value of 0 disables timeouts for the + transaction. +

+ This method may be called at any time during the life of the application. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+discard

+
+public void discard()
+             throws DatabaseException
+
+
Free up all the per-process resources associated with the specified + Transaction handle, neither committing nor aborting the + transaction. This call may be used only after calls to + Environment.recover when there are multiple global + transaction managers recovering transactions in a single database + environment. Any transactions returned by Environment.recover that are not handled by the current global transaction + manager should be discarded using this method. +

+ The Transaction handle may not be accessed again after this + method has been called, regardless of the method's success or failure. +

+

+

+

+ +
Throws: +
DatabaseException - if a failure occurs.
+
+
+
+ +

+prepare

+
+public void prepare(byte[] gid)
+             throws DatabaseException
+
+
Initiate the beginning of a two-phase commit. +

+ In a distributed transaction environment, Berkeley DB can be used + as a local transaction manager. In this case, the distributed + transaction manager must send prepare messages to each + local manager. The local manager must then issue a + Transaction.prepare call and await its successful return + before responding to the distributed transaction manager. Only + after the distributed transaction manager receives successful + responses from all of its prepare messages should it issue + any commit messages. +

+ In the case of nested transactions, preparing the parent causes all + unresolved children of the parent transaction to be committed. + Child transactions should never be explicitly prepared. Their fate + will be resolved along with their parent's during global recovery. +

+

+

+
Parameters:
gid - The global transaction ID by which this transaction will be known. + This global transaction ID will be returned in calls to + Environment.recover method, telling the application which + global transactions must be resolved. The gid parameter must be sized + at least DB_XIDDATASIZE (currently 128) bytes; only the first + DB_XIDDATASIZE bytes are used. +

+

+

Throws: +
DatabaseException - if a failure occurs.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/TransactionConfig.html b/db/docs/java/com/sleepycat/db/TransactionConfig.html new file mode 100644 index 000000000..e6c72e24a --- /dev/null +++ b/db/docs/java/com/sleepycat/db/TransactionConfig.html @@ -0,0 +1,579 @@ + + + + + + +TransactionConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class TransactionConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.TransactionConfig
+
+
+
All Implemented Interfaces:
Cloneable
+
+
+
+
public class TransactionConfig
extends Object
implements Cloneable
+ +

+Specifies the attributes of a database environment transaction. +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static TransactionConfigDEFAULT + +
+           
+  + + + + + + + + + + +
+Constructor Summary
TransactionConfig() + +
+          An instance created using the default constructor is initialized + with the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetDegree2() + +
+          Return if the transaction has been configured to have degree 2 isolation.
+ booleangetDirtyRead() + +
+          Return if the transaction is configured to perform dirty reads.
+ booleangetNoSync() + +
+          Return if the transaction is configured to not write or synchronously + flush the log it when commits.
+ booleangetNoWait() + +
+          Return if the transaction is configured to not wait if a lock + request cannot be immediately granted.
+ booleangetSync() + +
+          Return if the transaction is configured to not write or synchronously + flush the log it when commits.
+ voidsetDegree2(boolean degree2) + +
+          Configure this transaction to have degree 2 isolation.
+ voidsetDirtyRead(boolean dirtyRead) + +
+          Configure the transaction to perform dirty reads.
+ voidsetNoSync(boolean txnNoSync) + +
+          Configure the transaction to not write or synchronously flush the log + it when commits.
+ voidsetNoWait(boolean noWait) + +
+          Configure the transaction to not wait if a lock request cannot be + immediately granted.
+ voidsetSync(boolean txnSync) + +
+          Configure the transaction to write or synchronously flush the log + it when commits.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final TransactionConfig DEFAULT
+
+
+
+
+ + + + + + + + +
+Constructor Detail
+ +

+TransactionConfig

+
+public TransactionConfig()
+
+
An instance created using the default constructor is initialized + with the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setDegree2

+
+public void setDegree2(boolean degree2)
+
+
Configure this transaction to have degree 2 isolation. This provides + for cursor stability but not repeatable reads. Data items which have + been previously read by this transaction may be deleted or modified by + other transactions before this transaction completes. +

+ The default is false for this class and the database environment. +

+

+
+
+
+
+
+
+
+ +

+getDegree2

+
+public boolean getDegree2()
+
+
Return if the transaction has been configured to have degree 2 isolation. +

+

+

+
+
+
+ +
Returns:
If the transaction has been configured to have degree 2 isolation.
+
+
+
+ +

+setSync

+
+public void setSync(boolean txnSync)
+
+
Configure the transaction to write or synchronously flush the log + it when commits. +

+ This behavior may be set for a database environment using the + Environment.setMutableConfig method. Any value specified to this + method overrides that setting. +

+ The default is false for this class and true for the database + environment. +

+ If true is passed to both setSync and setNoSync, setSync will take + precedence. +

+

+

+
+
+
+
Parameters:
txnSync - If true, transactions exhibit all the ACID (atomicity, consistency, + isolation, and durability) properties.
+
+
+
+ +

+getSync

+
+public boolean getSync()
+
+
Return if the transaction is configured to not write or synchronously + flush the log it when commits. +

+

+

+
+
+
+ +
Returns:
If the transaction is configured to not write or synchronously flush + the log it when commits.
+
+
+
+ +

+setNoSync

+
+public void setNoSync(boolean txnNoSync)
+
+
Configure the transaction to not write or synchronously flush the log + it when commits. +

+ This behavior may be set for a database environment using the + Environment.setMutableConfig method. Any value specified to this method + overrides that setting. +

+ The default is false for this class and the database environment. +

+

+

+
+
+
+
Parameters:
txnNoSync - If true, transactions exhibit the ACI (atomicity, consistency, and + isolation) properties, but not D (durability); that is, database + integrity will be maintained, but if the application or system + fails, it is possible some number of the most recently committed + transactions may be undone during recovery. The number of + transactions at risk is governed by how many log updates can fit + into the log buffer, how often the operating system flushes dirty + buffers to disk, and how often the log is checkpointed.
+
+
+
+ +

+getNoSync

+
+public boolean getNoSync()
+
+
Return if the transaction is configured to not write or synchronously + flush the log it when commits. +

+

+

+
+
+
+ +
Returns:
If the transaction is configured to not write or synchronously flush + the log it when commits.
+
+
+
+ +

+setNoWait

+
+public void setNoWait(boolean noWait)
+
+
Configure the transaction to not wait if a lock request cannot be + immediately granted. +

+ The default is false for this class and the database environment. +

+

+

+
+
+
+
Parameters:
noWait - If true, transactions will not wait if a lock request cannot be + immediately granted, instead DeadlockException will be thrown.
+
+
+
+ +

+getNoWait

+
+public boolean getNoWait()
+
+
Return if the transaction is configured to not wait if a lock + request cannot be immediately granted. +

+

+

+
+
+
+ +
Returns:
If the transaction is configured to not wait if a lock request + cannot be immediately granted.
+
+
+
+ +

+setDirtyRead

+
+public void setDirtyRead(boolean dirtyRead)
+
+
Configure the transaction to perform dirty reads. +

+ The default is false for this class and the database environment. +

+

+

+
+
+
+
Parameters:
dirtyRead - If true, all read operations performed by the transaction may read + modified but not yet committed data.
+
+
+
+ +

+getDirtyRead

+
+public boolean getDirtyRead()
+
+
Return if the transaction is configured to perform dirty reads. +

+

+

+
+
+
+ +
Returns:
If the transaction is configured to perform dirty reads.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/TransactionStats.Active.html b/db/docs/java/com/sleepycat/db/TransactionStats.Active.html new file mode 100644 index 000000000..fe758600f --- /dev/null +++ b/db/docs/java/com/sleepycat/db/TransactionStats.Active.html @@ -0,0 +1,322 @@ + + + + + + +TransactionStats.Active (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class TransactionStats.Active

+
+java.lang.Object
+  extended bycom.sleepycat.db.TransactionStats.Active
+
+
+
Enclosing class:
TransactionStats
+
+
+
+
public class TransactionStats.Active
extends Object
+ +

+The Active class represents an active transaction. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ LogSequenceNumbergetLsn() + +
+          The log sequence number of the transaction's first log record.
+ intgetParentId() + +
+          The transaction ID of the parent transaction (or 0, if no parent).
+ intgetTxnId() + +
+          The transaction ID of the transaction.
+ intgetXaStatus() + +
+          If the transaction is an XA transaction, the status of the + transaction, otherwise 0.
+ byte[]getXId() + +
+          If the transaction is an XA transaction, the XA global + transaction ID.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getTxnId

+
+public int getTxnId()
+
+
The transaction ID of the transaction. +

+

+
+
+
+
+ +

+getParentId

+
+public int getParentId()
+
+
The transaction ID of the parent transaction (or 0, if no parent). +

+

+
+
+
+
+ +

+getLsn

+
+public LogSequenceNumber getLsn()
+
+
The log sequence number of the transaction's first log record. +

+

+
+
+
+
+ +

+getXaStatus

+
+public int getXaStatus()
+
+
If the transaction is an XA transaction, the status of the + transaction, otherwise 0. +

+

+
+
+
+
+ +

+getXId

+
+public byte[] getXId()
+
+
If the transaction is an XA transaction, the XA global + transaction ID. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/TransactionStats.html b/db/docs/java/com/sleepycat/db/TransactionStats.html new file mode 100644 index 000000000..424a02368 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/TransactionStats.html @@ -0,0 +1,550 @@ + + + + + + +TransactionStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class TransactionStats

+
+java.lang.Object
+  extended bycom.sleepycat.db.TransactionStats
+
+
+
+
public class TransactionStats
extends Object
+ +

+Transaction statistics for a database environment. +

+ +

+


+ +

+ + + + + + + + + + + +
+Nested Class Summary
+ classTransactionStats.Active + +
+          The Active class represents an active transaction.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ LogSequenceNumbergetLastCkp() + +
+          The LSN of the last checkpoint.
+ intgetLastTxnId() + +
+          The last transaction ID allocated.
+ intgetMaxNactive() + +
+          The maximum number of active transactions at any one time.
+ intgetMaxTxns() + +
+          The maximum number of active transactions configured.
+ intgetNaborts() + +
+          The number of transactions that have aborted.
+ intgetNactive() + +
+          The number of transactions that are currently active.
+ intgetNumBegins() + +
+          The number of transactions that have begun.
+ intgetNumCommits() + +
+          The number of transactions that have committed.
+ intgetNumRestores() + +
+          The number of transactions that have been restored.
+ intgetRegionNowait() + +
+          The number of times that a thread of control was able to obtain the + region lock without waiting.
+ intgetRegionWait() + +
+          The number of times that a thread of control was forced to wait + before obtaining the region lock.
+ intgetRegSize() + +
+          The size of the region.
+ longgetTimeCkp() + +
+          The time the last completed checkpoint finished (as the number of + seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 + (POSIX) time interface).
+ TransactionStats.Active[]getTxnarray() + +
+          The array of active transactions.
+ StringtoString() + +
+          For convenience, the TransactionStats class has a toString method + that lists all the data fields.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getLastCkp

+
+public LogSequenceNumber getLastCkp()
+
+
The LSN of the last checkpoint. +

+

+
+
+
+
+ +

+getTimeCkp

+
+public long getTimeCkp()
+
+
The time the last completed checkpoint finished (as the number of + seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 + (POSIX) time interface). +

+

+
+
+
+
+ +

+getLastTxnId

+
+public int getLastTxnId()
+
+
The last transaction ID allocated. +

+

+
+
+
+
+ +

+getMaxTxns

+
+public int getMaxTxns()
+
+
The maximum number of active transactions configured. +

+

+
+
+
+
+ +

+getNaborts

+
+public int getNaborts()
+
+
The number of transactions that have aborted. +

+

+
+
+
+
+ +

+getNumBegins

+
+public int getNumBegins()
+
+
The number of transactions that have begun. +

+

+
+
+
+
+ +

+getNumCommits

+
+public int getNumCommits()
+
+
The number of transactions that have committed. +

+

+
+
+
+
+ +

+getNactive

+
+public int getNactive()
+
+
The number of transactions that are currently active. +

+

+
+
+
+
+ +

+getNumRestores

+
+public int getNumRestores()
+
+
The number of transactions that have been restored. +

+

+
+
+
+
+ +

+getMaxNactive

+
+public int getMaxNactive()
+
+
The maximum number of active transactions at any one time. +

+

+
+
+
+
+ +

+getRegionWait

+
+public int getRegionWait()
+
+
The number of times that a thread of control was forced to wait + before obtaining the region lock. +

+

+
+
+
+
+ +

+getRegionNowait

+
+public int getRegionNowait()
+
+
The number of times that a thread of control was able to obtain the + region lock without waiting. +

+

+
+
+
+
+ +

+getRegSize

+
+public int getRegSize()
+
+
The size of the region. +

+

+
+
+
+
+ +

+getTxnarray

+
+public TransactionStats.Active[] getTxnarray()
+
+
The array of active transactions. +

+

+
+
+
+
+ +

+toString

+
+public String toString()
+
+
For convenience, the TransactionStats class has a toString method + that lists all the data fields. +

+

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/VerifyConfig.html b/db/docs/java/com/sleepycat/db/VerifyConfig.html new file mode 100644 index 000000000..17907710c --- /dev/null +++ b/db/docs/java/com/sleepycat/db/VerifyConfig.html @@ -0,0 +1,601 @@ + + + + + + +VerifyConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.db +
+Class VerifyConfig

+
+java.lang.Object
+  extended bycom.sleepycat.db.VerifyConfig
+
+
+
+
public class VerifyConfig
extends Object
+ +

+Specifies the attributes of a verification operation. +

+ +

+


+ +

+ + + + + + + + + + + + + + +
+Field Summary
+static VerifyConfigDEFAULT + +
+          Default configuration used if null is passed to + Database.verify.
+  + + + + + + + + + + +
+Constructor Summary
VerifyConfig() + +
+          An instance created using the default constructor is initialized + with the system's default settings.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ booleangetAggressive() + +
+          Return if the Database.verify is configured to output + all the key/data pairs in the file that can be found.
+ booleangetNoOrderCheck() + +
+          Return if the Database.verify is configured to skip the + database checks for btree and duplicate sort order and for hashing.
+ booleangetOrderCheckOnly() + +
+          Return if the Database.verify is configured to do database + checks for btree and duplicate sort order and for hashing, skipped + by verification operations configured by VerifyConfig.setNoOrderCheck.
+ booleangetPrintable() + +
+          Return if the Database.verify is configured to use printing + characters to where possible.
+ booleangetSalvage() + +
+          Return if the Database.verify is configured to write the + key/data pairs from all databases in the file to the file stream + named by the outfile parameter..
+ voidsetAggressive(boolean aggressive) + +
+          Configure Database.verify to output all the + key/data pairs in the file that can be found.
+ voidsetNoOrderCheck(boolean noOrderCheck) + +
+          Configure Database.verify to skip the database checks for + btree and duplicate sort order and for hashing.
+ voidsetOrderCheckOnly(boolean orderCheckOnly) + +
+          Configure Database.verify to do database checks for btree + and duplicate sort order and for hashing, skipped by verification + operations configured by VerifyConfig.setNoOrderCheck.
+ voidsetPrintable(boolean printable) + +
+          Configure Database.verify to use printing characters to + where possible.
+ voidsetSalvage(boolean salvage) + +
+          Configure Database.verify to write the key/data pairs from + all databases in the file to the file stream named by the outfile + parameter.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT

+
+public static final VerifyConfig DEFAULT
+
+
Default configuration used if null is passed to + Database.verify. +

+

+
+
+ + + + + + + + +
+Constructor Detail
+ +

+VerifyConfig

+
+public VerifyConfig()
+
+
An instance created using the default constructor is initialized + with the system's default settings. +

+

+ + + + + + + + +
+Method Detail
+ +

+setNoOrderCheck

+
+public void setNoOrderCheck(boolean noOrderCheck)
+
+
Configure Database.verify to skip the database checks for + btree and duplicate sort order and for hashing. +

+ Database.verify normally verifies that btree keys and + duplicate items are correctly sorted, and hash keys are correctly + hashed. If the file being verified contains multiple databases + using differing sorting or hashing algorithms, some of them must + necessarily fail database verification because only one sort order + or hash function can be specified before Database.verify + is called. To verify files with multiple databases having differing + sorting orders or hashing functions, first perform verification of + the file as a whole using Database.verify configured with + VerifyConfig.setNoOrderCheck, and then individually verify + the sort order and hashing function for each database in the file + using 4_link(Database, verify) configured with VerifyConfig.setOrderCheckOnly. +

+

+

+
Parameters:
noOrderCheck - If true, configure Database.verify to skip the database + checks for btree and duplicate sort order and for hashing.
+
+
+
+ +

+getNoOrderCheck

+
+public boolean getNoOrderCheck()
+
+
Return if the Database.verify is configured to skip the + database checks for btree and duplicate sort order and for hashing. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the Database.verify is configured to skip the + database checks for btree and duplicate sort order and for hashing.
+
+
+
+ +

+setOrderCheckOnly

+
+public void setOrderCheckOnly(boolean orderCheckOnly)
+
+
Configure Database.verify to do database checks for btree + and duplicate sort order and for hashing, skipped by verification + operations configured by VerifyConfig.setNoOrderCheck. +

+ When this flag is specified, a database name must be specified to + Database.verify, indicating the database in the physical + file which is to be checked. +

+ This configuration is only safe to use on databases that have + already successfully been verified with VerifyConfig.setNoOrderCheck configured. +

+

+

+
Parameters:
orderCheckOnly - If true, configure Database.verify to do database checks + for btree and duplicate sort order and for hashing, skipped by + verification operations configured by VerifyConfig.setNoOrderCheck.
+
+
+
+ +

+getOrderCheckOnly

+
+public boolean getOrderCheckOnly()
+
+
Return if the Database.verify is configured to do database + checks for btree and duplicate sort order and for hashing, skipped + by verification operations configured by VerifyConfig.setNoOrderCheck. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the Database.verify is configured to do database + checks for btree and duplicate sort order and for hashing, skipped + by verification operations configured by VerifyConfig.setNoOrderCheck.
+
+
+
+ +

+setPrintable

+
+public void setPrintable(boolean printable)
+
+
Configure Database.verify to use printing characters to + where possible. +

+ This method is only meaningful when combined with + VerifyConfig.setSalvage. +

+ This configuration permits users to use standard text editors and + tools to modify the contents of databases or selectively remove data + from salvager output. +

+ Note: different systems may have different notions about what characters + are considered printing characters, and databases dumped in + this manner may be less portable to external systems. +

+

+

+
Parameters:
printable - If true, configure Database.verify to use printing + characters to where possible.
+
+
+
+ +

+getPrintable

+
+public boolean getPrintable()
+
+
Return if the Database.verify is configured to use printing + characters to where possible. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the Database.verify is configured to use printing + characters to where possible.
+
+
+
+ +

+setSalvage

+
+public void setSalvage(boolean salvage)
+
+
Configure Database.verify to write the key/data pairs from + all databases in the file to the file stream named by the outfile + parameter. +

+ The output format is the same as that specified for the db_dump + utility, and can be used as input for the db_load utility. +

+ Because the key/data pairs are output in page order as opposed to the + sort order used by db_dump, using Database.verify to dump + key/data pairs normally produces less than optimal loads for Btree + databases. +

+

+

+
Parameters:
salvage - If true, configure Database.verify to write the key/data + pairs from all databases in the file to the file stream named by the + outfile parameter.
+
+
+
+ +

+getSalvage

+
+public boolean getSalvage()
+
+
Return if the Database.verify is configured to write the + key/data pairs from all databases in the file to the file stream + named by the outfile parameter.. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the Database.verify is configured to write the + key/data pairs from all databases in the file to the file stream + named by the outfile parameter..
+
+
+
+ +

+setAggressive

+
+public void setAggressive(boolean aggressive)
+
+
Configure Database.verify to output all the + key/data pairs in the file that can be found. +

+ By default, Database.verify does not assume corruption. + For example, if a key/data pair on a page is marked as deleted, it + is not then written to the output file. When Database.verify is configured with this method, corruption is assumed, and + any key/data pair that can be found is written. In this case, + key/data pairs that are corrupted or have been deleted may appear + in the output (even if the file being salvaged is in no way + corrupt), and the output will almost certainly require editing + before being loaded into a database. +

+

+

+
Parameters:
aggressive - If true, configure Database.verify to output all + the key/data pairs in the file that can be found.
+
+
+
+ +

+getAggressive

+
+public boolean getAggressive()
+
+
Return if the Database.verify is configured to output + all the key/data pairs in the file that can be found. +

+This method may be called at any time during the life of the application. +

+

+

+ +
Returns:
If the Database.verify is configured to output + all the key/data pairs in the file that can be found.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/BtreePrefixCalculator.html b/db/docs/java/com/sleepycat/db/class-use/BtreePrefixCalculator.html new file mode 100644 index 000000000..23099822c --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/BtreePrefixCalculator.html @@ -0,0 +1,189 @@ + + + + + + +Uses of Interface com.sleepycat.db.BtreePrefixCalculator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.BtreePrefixCalculator

+
+ + + + + + + + + +
+Packages that use BtreePrefixCalculator
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of BtreePrefixCalculator in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return BtreePrefixCalculator
+ BtreePrefixCalculatorDatabaseConfig.getBtreePrefixCalculator() + +
+          Return the Btree prefix callback.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type BtreePrefixCalculator
+ voidDatabaseConfig.setBtreePrefixCalculator(BtreePrefixCalculator btreePrefixCalculator) + +
+          Set the Btree prefix callback.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/BtreeStats.html b/db/docs/java/com/sleepycat/db/class-use/BtreeStats.html new file mode 100644 index 000000000..398ea7cc4 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/BtreeStats.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.BtreeStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.BtreeStats

+
+No usage of com.sleepycat.db.BtreeStats +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/CacheFile.html b/db/docs/java/com/sleepycat/db/class-use/CacheFile.html new file mode 100644 index 000000000..bf3bd8c84 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/CacheFile.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.CacheFile (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.CacheFile

+
+ + + + + + + + + +
+Packages that use CacheFile
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of CacheFile in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return CacheFile
+ CacheFileDatabase.getCacheFile() + +
+          Return the handle for the cache file underlying the database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/CacheFilePriority.html b/db/docs/java/com/sleepycat/db/class-use/CacheFilePriority.html new file mode 100644 index 000000000..b882fb2ba --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/CacheFilePriority.html @@ -0,0 +1,238 @@ + + + + + + +Uses of Class com.sleepycat.db.CacheFilePriority (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.CacheFilePriority

+
+ + + + + + + + + +
+Packages that use CacheFilePriority
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of CacheFilePriority in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as CacheFilePriority
+static CacheFilePriorityCacheFilePriority.DEFAULT + +
+          The default priority.
+static CacheFilePriorityCacheFilePriority.HIGH + +
+          The second highest priority.
+static CacheFilePriorityCacheFilePriority.LOW + +
+          The second lowest priority.
+static CacheFilePriorityCacheFilePriority.VERY_HIGH + +
+          The highest priority: pages are the least likely to be discarded.
+static CacheFilePriorityCacheFilePriority.VERY_LOW + +
+          The lowest priority: pages are the most likely to be discarded.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return CacheFilePriority
+ CacheFilePriorityCacheFile.getPriority() + +
+          Return the cache priority for pages from the specified file.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type CacheFilePriority
+ voidCacheFile.setPriority(CacheFilePriority priority) + +
+          Set the +cache priority for pages from the specified file.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/CacheFileStats.html b/db/docs/java/com/sleepycat/db/class-use/CacheFileStats.html new file mode 100644 index 000000000..d2aaccfc2 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/CacheFileStats.html @@ -0,0 +1,174 @@ + + + + + + +Uses of Class com.sleepycat.db.CacheFileStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.CacheFileStats

+
+ + + + + + + + + +
+Packages that use CacheFileStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of CacheFileStats in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return CacheFileStats
+ CacheFileStats[]Environment.getCacheFileStats(StatsConfig config) + +
+          Return the database environment's per-file memory pool (that is, the + buffer cache) statistics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/CacheStats.html b/db/docs/java/com/sleepycat/db/class-use/CacheStats.html new file mode 100644 index 000000000..129e19ba3 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/CacheStats.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.CacheStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.CacheStats

+
+ + + + + + + + + +
+Packages that use CacheStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of CacheStats in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return CacheStats
+ CacheStatsEnvironment.getCacheStats(StatsConfig config) + +
+           
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/CheckpointConfig.html b/db/docs/java/com/sleepycat/db/class-use/CheckpointConfig.html new file mode 100644 index 000000000..e965b3130 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/CheckpointConfig.html @@ -0,0 +1,190 @@ + + + + + + +Uses of Class com.sleepycat.db.CheckpointConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.CheckpointConfig

+
+ + + + + + + + + +
+Packages that use CheckpointConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of CheckpointConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as CheckpointConfig
+static CheckpointConfigCheckpointConfig.DEFAULT + +
+          Default configuration used if null is passed to + Environment.checkpoint.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type CheckpointConfig
+ voidEnvironment.checkpoint(CheckpointConfig checkpointConfig) + +
+          Synchronously checkpoint the database environment.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/Cursor.html b/db/docs/java/com/sleepycat/db/class-use/Cursor.html new file mode 100644 index 000000000..3276f05ac --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/Cursor.html @@ -0,0 +1,226 @@ + + + + + + +Uses of Class com.sleepycat.db.Cursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.Cursor

+
+ + + + + + + + + +
+Packages that use Cursor
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of Cursor in com.sleepycat.db
+  +

+ + + + + + + + + +
Subclasses of Cursor in com.sleepycat.db
+ classSecondaryCursor + +
+          A database cursor for a secondary database.
+  +

+ + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db that return Cursor
+ CursorSecondaryCursor.dup(boolean samePosition) + +
+          Returns a new SecondaryCursor for the same transaction as + the original cursor.
+ CursorDatabase.openCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Return a cursor into the database.
+ CursorCursor.dup(boolean samePosition) + +
+          Return a new cursor with the same transaction and locker ID as the + original cursor.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type Cursor
+ JoinCursorDatabase.join(Cursor[] cursors, + JoinConfig config) + +
+          Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/CursorConfig.html b/db/docs/java/com/sleepycat/db/class-use/CursorConfig.html new file mode 100644 index 000000000..a745df918 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/CursorConfig.html @@ -0,0 +1,242 @@ + + + + + + +Uses of Class com.sleepycat.db.CursorConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.CursorConfig

+
+ + + + + + + + + +
+Packages that use CursorConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of CursorConfig in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as CursorConfig
+static CursorConfigCursorConfig.DEFAULT + +
+          Default configuration used if null is passed to methods that create a + cursor.
+static CursorConfigCursorConfig.DIRTY_READ + +
+          A convenience instance to configure read operations performed by the + cursor to return modified but not yet committed data.
+static CursorConfigCursorConfig.DEGREE_2 + +
+          A convenience instance to configure a cursor for degree 2 isolation.
+static CursorConfigCursorConfig.WRITECURSOR + +
+          A convenience instance to specify the Concurrent Data Store environment + cursor will be used to update the database.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return CursorConfig
+ CursorConfigCursor.getConfig() + +
+          Return this cursor's configuration.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type CursorConfig
+ SecondaryCursorSecondaryDatabase.openSecondaryCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Obtain a cursor on a database, returning a SecondaryCursor.
+ CursorDatabase.openCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Return a cursor into the database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/Database.html b/db/docs/java/com/sleepycat/db/class-use/Database.html new file mode 100644 index 000000000..5d5ab40fe --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/Database.html @@ -0,0 +1,533 @@ + + + + + + +Uses of Class com.sleepycat.db.Database (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.Database

+
+ + + + + + + + + + + + + + + + + +
+Packages that use Database
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of Database in com.sleepycat.bind.serial
+  +

+ + + + + + + + +
Constructors in com.sleepycat.bind.serial with parameters of type Database
StoredClassCatalog(Database database) + +
+          Creates a catalog based on a given database.
+  +

+ + + + + +
+Uses of Database in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.collections with parameters of type Database
+ StoredMapTupleSerialFactory.newMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) + +
+          Creates a map from a previously opened Database object.
+ StoredSortedMapTupleSerialFactory.newSortedMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) + +
+          Creates a sorted map from a previously opened Database object.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constructors in com.sleepycat.collections with parameters of type Database
StoredValueSet(Database database, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a value set view of a Database.
StoredValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a value set entity view of a Database.
StoredSortedValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a sorted value set entity view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a sorted map view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map view of a Database with a PrimaryKeyAssigner.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a sorted map entity view of a Database.
StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a sorted map entity view of a Database with a PrimaryKeyAssigner.
StoredSortedKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) + +
+          Creates a sorted key set view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a map view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map view of a Database with a PrimaryKeyAssigner.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a map entity view of a Database.
StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a map entity view of a Database with a PrimaryKeyAssigner.
StoredList(Database database, + EntryBinding valueBinding, + boolean writeAllowed) + +
+          Creates a list view of a Database.
StoredList(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) + +
+          Creates a list entity view of a Database.
StoredList(Database database, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list view of a Database with a PrimaryKeyAssigner.
StoredList(Database database, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) + +
+          Creates a list entity view of a Database with a PrimaryKeyAssigner.
StoredKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) + +
+          Creates a key set view of a Database.
+  +

+ + + + + +
+Uses of Database in com.sleepycat.db
+  +

+ + + + + + + + + +
Subclasses of Database in com.sleepycat.db
+ classSecondaryDatabase + +
+          A secondary database handle.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db that return Database
+ DatabaseSequence.getDatabase() + +
+          Return the Database handle associated with this sequence.
+ DatabaseSecondaryDatabase.getPrimaryDatabase() + +
+          Returns the primary database associated with this secondary database.
+ DatabaseJoinCursor.getDatabase() + +
+          Returns the primary database handle associated with this cursor.
+ DatabaseEnvironment.openDatabase(Transaction txn, + String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+ DatabaseCursor.getDatabase() + +
+          Return the Database handle associated with this Cursor.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type Database
+ voidRecordNumberAppender.appendRecordNumber(Database db, + DatabaseEntry data, + int recno) + +
+          A callback function to modify the stored database based on the + generated key.
+ intHasher.hash(Database db, + byte[] data, + int len) + +
+          An application-specified, database-specific hash function.
+ voidFeedbackHandler.upgradeFeedback(Database database, + int percent) + +
+          A function called with progress information when the database is being upgraded.
+ voidFeedbackHandler.verifyFeedback(Database database, + int percent) + +
+          A function called with progress information when the database is being verified.
+ SecondaryDatabaseEnvironment.openSecondaryDatabase(Transaction txn, + String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+ intBtreePrefixCalculator.prefix(Database db, + DatabaseEntry dbt1, + DatabaseEntry dbt2) + +
+          The application-specific Btree prefix callback.
+  +

+ + + + + + + + +
Constructors in com.sleepycat.db with parameters of type Database
SecondaryDatabase(String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/DatabaseConfig.html b/db/docs/java/com/sleepycat/db/class-use/DatabaseConfig.html new file mode 100644 index 000000000..306415acf --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/DatabaseConfig.html @@ -0,0 +1,281 @@ + + + + + + +Uses of Class com.sleepycat.db.DatabaseConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.DatabaseConfig

+
+ + + + + + + + + +
+Packages that use DatabaseConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of DatabaseConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Subclasses of DatabaseConfig in com.sleepycat.db
+ classSecondaryConfig + +
+          The configuration properties of a SecondaryDatabase extend +those of a primary Database.
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as DatabaseConfig
+static DatabaseConfigDatabaseConfig.DEFAULT + +
+           
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return DatabaseConfig
+ DatabaseConfigDatabase.getConfig() + +
+          Return this Database object's configuration.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type DatabaseConfig
+ DatabaseEnvironment.openDatabase(Transaction txn, + String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+ voidDatabase.setConfig(DatabaseConfig config) + +
+          Change the settings in an existing database handle.
+static voidDatabase.remove(String fileName, + String databaseName, + DatabaseConfig config) + +
+           +Remove a database.
+static voidDatabase.rename(String fileName, + String oldDatabaseName, + String newDatabaseName, + DatabaseConfig config) + +
+           +Rename a database.
+static voidDatabase.upgrade(String fileName, + DatabaseConfig config) + +
+          Upgrade all of the databases included in the specified file.
+  +

+ + + + + + + + +
Constructors in com.sleepycat.db with parameters of type DatabaseConfig
Database(String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/DatabaseEntry.html b/db/docs/java/com/sleepycat/db/class-use/DatabaseEntry.html new file mode 100644 index 000000000..f0d3484e9 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/DatabaseEntry.html @@ -0,0 +1,1721 @@ + + + + + + +Uses of Class com.sleepycat.db.DatabaseEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.DatabaseEntry

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+Packages that use DatabaseEntry
com.sleepycat.bindBindings between database entries and Java objects
+[reference guide]
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of DatabaseEntry in com.sleepycat.bind
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind with parameters of type DatabaseEntry
+ ObjectRecordNumberBinding.entryToObject(DatabaseEntry entry) + +
+           
+ voidRecordNumberBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static longRecordNumberBinding.entryToRecordNumber(DatabaseEntry entry) + +
+          Utility method for use by bindings to translate a entry buffer to an + record number integer.
+static voidRecordNumberBinding.recordNumberToEntry(long recordNumber, + DatabaseEntry entry) + +
+          Utility method for use by bindings to translate a record number integer + to a entry buffer.
+ ObjectEntryBinding.entryToObject(DatabaseEntry entry) + +
+          Converts a entry buffer into an Object.
+ voidEntryBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+          Converts an Object into a entry buffer.
+ ObjectEntityBinding.entryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+          Converts key and data entry buffers into an entity Object.
+ voidEntityBinding.objectToKey(Object object, + DatabaseEntry key) + +
+          Extracts the key entry from an entity Object.
+ voidEntityBinding.objectToData(Object object, + DatabaseEntry data) + +
+          Extracts the data entry from an entity Object.
+ ObjectByteArrayBinding.entryToObject(DatabaseEntry entry) + +
+           
+ voidByteArrayBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+  +

+ + + + + +
+Uses of DatabaseEntry in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.serial with parameters of type DatabaseEntry
+ booleanTupleSerialKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanTupleSerialKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ ObjectTupleSerialBinding.entryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+           
+ voidTupleSerialBinding.objectToKey(Object object, + DatabaseEntry key) + +
+           
+ voidTupleSerialBinding.objectToData(Object object, + DatabaseEntry data) + +
+           
+ booleanSerialSerialKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanSerialSerialKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ ObjectSerialSerialBinding.entryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+           
+ voidSerialSerialBinding.objectToKey(Object object, + DatabaseEntry key) + +
+           
+ voidSerialSerialBinding.objectToData(Object object, + DatabaseEntry data) + +
+           
+ ObjectSerialBinding.entryToObject(DatabaseEntry entry) + +
+          Deserialize an object from an entry buffer.
+ voidSerialBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+          Serialize an object into an entry buffer.
+  +

+ + + + + +
+Uses of DatabaseEntry in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.tuple with parameters of type DatabaseEntry
+ booleanTupleTupleKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanTupleTupleKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ ObjectTupleTupleBinding.entryToObject(DatabaseEntry key, + DatabaseEntry data) + +
+           
+ voidTupleTupleBinding.objectToKey(Object object, + DatabaseEntry key) + +
+           
+ voidTupleTupleBinding.objectToData(Object object, + DatabaseEntry data) + +
+           
+ ObjectTupleInputBinding.entryToObject(DatabaseEntry entry) + +
+           
+ voidTupleInputBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+ ObjectTupleBinding.entryToObject(DatabaseEntry entry) + +
+           
+ voidTupleBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static voidTupleBinding.outputToEntry(TupleOutput output, + DatabaseEntry entry) + +
+          Utility method to set the data in a entry buffer to the data in a tuple + output object.
+static voidTupleBinding.inputToEntry(TupleInput input, + DatabaseEntry entry) + +
+          Utility method to set the data in a entry buffer to the data in a tuple + input object.
+static TupleInputTupleBinding.entryToInput(DatabaseEntry entry) + +
+          Utility method to create a new tuple input object for reading the data + from a given buffer.
+ voidStringBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static StringStringBinding.entryToString(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple String value.
+static voidStringBinding.stringToEntry(String val, + DatabaseEntry entry) + +
+          Converts a simple String value into an entry buffer.
+ voidShortBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static shortShortBinding.entryToShort(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple short value.
+static voidShortBinding.shortToEntry(short val, + DatabaseEntry entry) + +
+          Converts a simple short value into an entry buffer.
+ voidLongBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static longLongBinding.entryToLong(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple long value.
+static voidLongBinding.longToEntry(long val, + DatabaseEntry entry) + +
+          Converts a simple long value into an entry buffer.
+ voidIntegerBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static intIntegerBinding.entryToInt(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple int value.
+static voidIntegerBinding.intToEntry(int val, + DatabaseEntry entry) + +
+          Converts a simple int value into an entry buffer.
+ voidFloatBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static floatFloatBinding.entryToFloat(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple float value.
+static voidFloatBinding.floatToEntry(float val, + DatabaseEntry entry) + +
+          Converts a simple float value into an entry buffer.
+ voidDoubleBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static doubleDoubleBinding.entryToDouble(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple double value.
+static voidDoubleBinding.doubleToEntry(double val, + DatabaseEntry entry) + +
+          Converts a simple double value into an entry buffer.
+ voidCharacterBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static charCharacterBinding.entryToChar(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple char value.
+static voidCharacterBinding.charToEntry(char val, + DatabaseEntry entry) + +
+          Converts a simple char value into an entry buffer.
+ voidByteBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static byteByteBinding.entryToByte(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple byte value.
+static voidByteBinding.byteToEntry(byte val, + DatabaseEntry entry) + +
+          Converts a simple byte value into an entry buffer.
+ voidBooleanBinding.objectToEntry(Object object, + DatabaseEntry entry) + +
+           
+static booleanBooleanBinding.entryToBoolean(DatabaseEntry entry) + +
+          Converts an entry buffer into a simple boolean value.
+static voidBooleanBinding.booleanToEntry(boolean val, + DatabaseEntry entry) + +
+          Converts a simple boolean value into an entry buffer.
+  +

+ + + + + +
+Uses of DatabaseEntry in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections with parameters of type DatabaseEntry
+ voidPrimaryKeyAssigner.assignKey(DatabaseEntry keyData) + +
+          Assigns a new primary key value into the given data buffer.
+  +

+ + + + + +
+Uses of DatabaseEntry in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Subclasses of DatabaseEntry in com.sleepycat.db
+ classMultipleDataEntry + +
+          A DatabaseEntry that holds multiple data items returned by a single +Database or Cursor get call.
+ classMultipleEntry + +
+          An abstract class representing a DatabaseEntry that holds multiple results +returned by a single Cursor get method.
+ classMultipleKeyDataEntry + +
+          A DatabaseEntry that holds multiple key/data pairs returned by a single +Database or Cursor get call.
+ classMultipleRecnoDataEntry + +
+          A DatabaseEntry that holds multiple record number/data pairs returned by a +single Database or Cursor get call.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db that return DatabaseEntry
+ DatabaseEntrySequence.getKey() + +
+          Return the DatabaseEntry used to open this sequence.
+ DatabaseEntryReplicationStatus.getCData() + +
+          Whenever the system receives contact information from a new + environment, a copy of the opaque data specified in the cdata + parameter to the Environment.startReplication is available + from the getCDAta method.
+ DatabaseEntryMemoryException.getDatabaseEntry() + +
+          Returns the DatabaseEntry object with insufficient memory + to complete the operation to complete the operation.
+ DatabaseEntryLockRequest.getObj() + +
+          Return the lock object.
+ DatabaseEntryLockNotGrantedException.getObj() + +
+          Returns the object parameter when Environment.getLock was + called, and returns the object for the failed LockRequest when + Environment.lockVector was called.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type DatabaseEntry
+ booleanSecondaryKeyCreator.createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) + +
+          Creates a secondary key entry, given a primary key and data entry.
+ OperationStatusSecondaryDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusSecondaryDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match.
+ OperationStatusSecondaryDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ OperationStatusSecondaryCursor.getCurrent(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusSecondaryCursor.getFirst(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getLast(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getNext(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getNextDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusSecondaryCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getPrev(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusSecondaryCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusSecondaryCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusSecondaryCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary and primary key, where both +the primary and secondary key items must match.
+ OperationStatusSecondaryCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary key and closest matching primary +key of the database.
+ OperationStatusSecondaryCursor.getRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry primaryRecno, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusSecondaryCursor.getSearchRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ intReplicationTransport.send(Environment environment, + DatabaseEntry control, + DatabaseEntry rec, + LogSequenceNumber lsn, + int envid, + boolean noBuffer, + boolean permanent) + +
+          The callback used when Berkeley DB needs to transmit a replication + message.
+ voidRecordNumberAppender.appendRecordNumber(Database db, + DatabaseEntry data, + int recno) + +
+          A callback function to modify the stored database based on the + generated key.
+ booleanMultipleRecnoDataEntry.next(DatabaseEntry recno, + DatabaseEntry data) + +
+          Get the next record number/data pair in the returned set.
+ booleanMultipleKeyDataEntry.next(DatabaseEntry key, + DatabaseEntry data) + +
+          Get the next key/data pair in the returned set.
+ booleanMultipleDataEntry.next(DatabaseEntry data) + +
+          Get the next data element in the returned set.
+ intLogRecordHandler.handleLogRecord(Environment environment, + DatabaseEntry logRecord, + LogSequenceNumber lsn, + RecoveryOperation operation) + +
+           
+ OperationStatusLogCursor.getCurrent(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the LogSequenceNumber and log record to which the log cursor + currently refers.
+ OperationStatusLogCursor.getNext(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the next LogSequenceNumber and log record.
+ OperationStatusLogCursor.getFirst(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the first LogSequenceNumber and log record.
+ OperationStatusLogCursor.getLast(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the last LogSequenceNumber and log record.
+ OperationStatusLogCursor.getPrev(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the previous LogSequenceNumber and log record.
+ OperationStatusLogCursor.set(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return a specific log record.
+ voidLockRequest.setObj(DatabaseEntry obj) + +
+          Set the lock object.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + LockMode lockMode) + +
+          Returns the next primary key resulting from the join operation.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the next primary key and data resulting from the join operation.
+ LockEnvironment.getLock(int locker, + boolean noWait, + DatabaseEntry object, + LockRequestMode mode) + +
+          Acquire a lock from the lock table.
+ ReplicationStatusEnvironment.processReplicationMessage(DatabaseEntry control, + DatabaseEntry rec, + int envid) + +
+          Process an incoming replication message sent by a member of the + replication group to the local database environment.
+ voidEnvironment.startReplication(DatabaseEntry cdata, + boolean master) + +
+          Configure the database environment as a client or master in a group + of replicated database environments.
+ LogSequenceNumberEnvironment.logPut(DatabaseEntry data, + boolean flush) + +
+          Append a record to the log.
+ OperationStatusDatabase.delete(Transaction txn, + DatabaseEntry key) + +
+          Remove key/data pairs from the database.
+ OperationStatusDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match.
+ OperationStatusDatabase.put(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database.
+ OperationStatusDatabase.append(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Append the key/data pair to the end of the database.
+ OperationStatusDatabase.putNoOverwrite(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if the key does not already +appear in the database.
+ OperationStatusDatabase.putNoDupData(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if it does not already appear +in the database.
+ SequenceDatabase.openSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Open a sequence in the database.
+ voidDatabase.removeSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Remove the sequence from the database.
+ OperationStatusDatabase.consume(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + boolean wait) + +
+          Return the record number and data from the available record closest to +the head of the queue, and delete the record.
+ KeyRangeDatabase.getKeyRange(Transaction txn, + DatabaseEntry key) + +
+          Return an estimate of the proportion of keys in the database less + than, equal to, and greater than the specified key.
+ OperationStatusDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ OperationStatusCursor.put(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putNoOverwrite(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putNoDupData(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putCurrent(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putAfter(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putBefore(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putKeyFirst(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putKeyLast(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.getCurrent(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusCursor.getFirst(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getLast(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusCursor.getNextDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getPrev(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key/data pair, where both the key and +data items must match.
+ OperationStatusCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key and closest matching data item of the +database.
+ OperationStatusCursor.getRecordNumber(DatabaseEntry data, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusCursor.getSearchRecordNumber(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ intBtreePrefixCalculator.prefix(Database db, + DatabaseEntry dbt1, + DatabaseEntry dbt2) + +
+          The application-specific Btree prefix callback.
+  +

+ + + + + + + + + + + +
Constructors in com.sleepycat.db with parameters of type DatabaseEntry
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock) + +
+          Construct a LockRequest with the specified operation, mode and lock, + for the specified object.
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock, + int timeout) + +
+          Construct a LockRequest with the specified operation, mode, lock and + timeout for the specified object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/DatabaseException.html b/db/docs/java/com/sleepycat/db/class-use/DatabaseException.html new file mode 100644 index 000000000..51494412a --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/DatabaseException.html @@ -0,0 +1,1950 @@ + + + + + + +Uses of Class com.sleepycat.db.DatabaseException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.DatabaseException

+
+ + + + + + + + + + + + + + + + + + + + + +
+Packages that use DatabaseException
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of DatabaseException in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.serial that throw DatabaseException
+ booleanTupleSerialKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanTupleSerialKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ voidStoredClassCatalog.close() + +
+           
+ byte[]StoredClassCatalog.getClassID(ObjectStreamClass classFormat) + +
+           
+ ObjectStreamClassStoredClassCatalog.getClassFormat(byte[] classID) + +
+           
+ booleanSerialSerialKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanSerialSerialKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ voidClassCatalog.close() + +
+          Close a catalog database and release any cached resources.
+ byte[]ClassCatalog.getClassID(ObjectStreamClass classDesc) + +
+          Return the class ID for the current version of the given class + description.
+ ObjectStreamClassClassCatalog.getClassFormat(byte[] classID) + +
+          Return the ObjectStreamClass for the given class ID.
+  +

+ + + + + + + + +
Constructors in com.sleepycat.bind.serial that throw DatabaseException
StoredClassCatalog(Database database) + +
+          Creates a catalog based on a given database.
+  +

+ + + + + +
+Uses of DatabaseException in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.bind.tuple that throw DatabaseException
+ booleanTupleTupleKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanTupleTupleKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+  +

+ + + + + +
+Uses of DatabaseException in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.collections that throw DatabaseException
+ voidTransactionRunner.run(TransactionWorker worker) + +
+          Calls the TransactionWorker.doWork() method and, for transactional + environments, begins and ends a transaction.
+ voidPrimaryKeyAssigner.assignKey(DatabaseEntry keyData) + +
+          Assigns a new primary key value into the given data buffer.
+ TransactionCurrentTransaction.beginTransaction(TransactionConfig config) + +
+          Begins a new transaction for this environment and associates it with + the current thread.
+ TransactionCurrentTransaction.commitTransaction() + +
+          Commits the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
+ TransactionCurrentTransaction.abortTransaction() + +
+          Aborts the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
+  +

+ + + + + +
+Uses of DatabaseException in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Subclasses of DatabaseException in com.sleepycat.db
+ classDeadlockException + +
+          DeadlockException is thrown to a thread of control when multiple threads +competing for a lock are +deadlocked, when a lock request has timed out +or when a lock request would need to block and the transaction has been +configured to not wait for locks.
+ classLockNotGrantedException + +
+          A LockNotGrantedException is thrown when a lock requested using the +Environment.getLock or Environment.lockVector +methods, where the noWait flag or lock timers were configured, could not +be granted before the wait-time expired.
+ classMemoryException + +
+          This exception is thrown when a DatabaseEntry +passed to a Database or Cursor method is not large +enough to hold a value being returned.
+ classReplicationHandleDeadException + +
+          Thrown when a database handle has been invalidated because a replication +election unrolled a committed transaction.
+ classRunRecoveryException + +
+          Thrown when the database environment needs to be recovered.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type DatabaseException
+ voidPanicHandler.panic(Environment environment, + DatabaseException e) + +
+          A function to be called if the database environment panics.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db that throw DatabaseException
+ intTransaction.getId() + +
+          Return the transaction's unique ID.
+ voidTransaction.abort() + +
+          Cause an abnormal termination of the transaction.
+ voidTransaction.commit() + +
+          End the transaction.
+ voidTransaction.commitSync() + +
+          End the transaction, committing synchronously.
+ voidTransaction.commitNoSync() + +
+          End the transaction, not committing synchronously.
+ voidTransaction.setTxnTimeout(long timeOut) + +
+          Configure the timeout value for the transaction lifetime.
+ voidTransaction.setLockTimeout(long timeOut) + +
+          Configure the lock request timeout value for the transaction.
+ voidTransaction.discard() + +
+          Free up all the per-process resources associated with the specified + Transaction handle, neither committing nor aborting the + transaction.
+ voidTransaction.prepare(byte[] gid) + +
+          Initiate the beginning of a two-phase commit.
+ voidSequence.close() + +
+          Close a sequence.
+ longSequence.get(Transaction txn, + int delta) + +
+          Return the next available element in the sequence and changes the sequence + value by delta.
+ DatabaseSequence.getDatabase() + +
+          Return the Database handle associated with this sequence.
+ DatabaseEntrySequence.getKey() + +
+          Return the DatabaseEntry used to open this sequence.
+ SequenceStatsSequence.getStats(StatsConfig config) + +
+          Return statistical information about the sequence.
+ booleanSecondaryKeyCreator.createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) + +
+          Creates a secondary key entry, given a primary key and data entry.
+ SecondaryConfigSecondaryDatabase.getSecondaryConfig() + +
+          Returns a copy of the secondary configuration of this database.
+ OperationStatusSecondaryDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusSecondaryDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match.
+ DatabaseSecondaryDatabase.getPrimaryDatabase() + +
+          Returns the primary database associated with this secondary database.
+ SecondaryCursorSecondaryDatabase.openSecondaryCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Obtain a cursor on a database, returning a SecondaryCursor.
+ OperationStatusSecondaryDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ CursorSecondaryCursor.dup(boolean samePosition) + +
+          Returns a new SecondaryCursor for the same transaction as + the original cursor.
+ SecondaryCursorSecondaryCursor.dupSecondary(boolean samePosition) + +
+          Returns a new copy of the cursor as a SecondaryCursor.
+ OperationStatusSecondaryCursor.getCurrent(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusSecondaryCursor.getFirst(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getLast(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getNext(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getNextDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusSecondaryCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getPrev(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusSecondaryCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusSecondaryCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusSecondaryCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary and primary key, where both +the primary and secondary key items must match.
+ OperationStatusSecondaryCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary key and closest matching primary +key of the database.
+ OperationStatusSecondaryCursor.getRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry primaryRecno, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusSecondaryCursor.getSearchRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ intReplicationTransport.send(Environment environment, + DatabaseEntry control, + DatabaseEntry rec, + LogSequenceNumber lsn, + int envid, + boolean noBuffer, + boolean permanent) + +
+          The callback used when Berkeley DB needs to transmit a replication + message.
+ voidRecordNumberAppender.appendRecordNumber(Database db, + DatabaseEntry data, + int recno) + +
+          A callback function to modify the stored database based on the + generated key.
+ voidLogCursor.close() + +
+          Close the log cursor.
+ OperationStatusLogCursor.getCurrent(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the LogSequenceNumber and log record to which the log cursor + currently refers.
+ OperationStatusLogCursor.getNext(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the next LogSequenceNumber and log record.
+ OperationStatusLogCursor.getFirst(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the first LogSequenceNumber and log record.
+ OperationStatusLogCursor.getLast(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the last LogSequenceNumber and log record.
+ OperationStatusLogCursor.getPrev(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the previous LogSequenceNumber and log record.
+ OperationStatusLogCursor.set(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return a specific log record.
+ voidJoinCursor.close() + +
+          Closes the cursors that have been opened by this join cursor.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + LockMode lockMode) + +
+          Returns the next primary key resulting from the join operation.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the next primary key and data resulting from the join operation.
+ FileEnvironment.getHome() + +
+          Return the database environment's home directory.
+ EnvironmentConfigEnvironment.getConfig() + +
+          Return this object's configuration.
+ TransactionEnvironment.beginTransaction(Transaction parent, + TransactionConfig txnConfig) + +
+          Create a new transaction in the database environment.
+ voidEnvironment.checkpoint(CheckpointConfig checkpointConfig) + +
+          Synchronously checkpoint the database environment.
+ LockStatsEnvironment.getLockStats(StatsConfig config) + +
+          Return the database environment's locking statistics.
+ TransactionStatsEnvironment.getTransactionStats(StatsConfig config) + +
+          Return the database environment's transactional statistics.
+ voidEnvironment.close() + +
+          Close the database environment, freeing any allocated resources and + closing any underlying subsystems.
+static voidEnvironment.remove(File home, + boolean force, + EnvironmentConfig config) + +
+          Destroy a database environment.
+ voidEnvironment.setConfig(EnvironmentConfig config) + +
+          Change the settings in an existing environment handle.
+ DatabaseEnvironment.openDatabase(Transaction txn, + String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+ SecondaryDatabaseEnvironment.openSecondaryDatabase(Transaction txn, + String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+ voidEnvironment.removeDatabase(Transaction txn, + String fileName, + String databaseName) + +
+           +Remove a database.
+ voidEnvironment.renameDatabase(Transaction txn, + String fileName, + String oldDatabaseName, + String newDatabaseName) + +
+           +Rename a database.
+ intEnvironment.trickleCacheWrite(int percent) + +
+          Ensure that a specified percent of the pages in the shared memory + pool are clean, by writing dirty pages to their backing files.
+ intEnvironment.detectDeadlocks(LockDetectMode mode) + +
+          Run one iteration of the deadlock detector.
+ LockEnvironment.getLock(int locker, + boolean noWait, + DatabaseEntry object, + LockRequestMode mode) + +
+          Acquire a lock from the lock table.
+ voidEnvironment.putLock(Lock lock) + +
+          Release a lock.
+ intEnvironment.createLockerID() + +
+          Allocate a locker ID.
+ voidEnvironment.freeLockerID(int id) + +
+          Free a locker ID.
+ voidEnvironment.lockVector(int locker, + boolean noWait, + LockRequest[] list) + +
+          Atomically obtain and release one or more locks from the lock table.
+ LogCursorEnvironment.openLogCursor() + +
+          Return a log cursor.
+ StringEnvironment.getLogFileName(LogSequenceNumber lsn) + +
+          Return the name of the log file that contains the log record + specified by a LogSequenceNumber object.
+ intEnvironment.electReplicationMaster(int nsites, + int nvotes, + int priority, + int timeout) + +
+          Hold an election for the master of a replication group.
+ ReplicationStatusEnvironment.processReplicationMessage(DatabaseEntry control, + DatabaseEntry rec, + int envid) + +
+          Process an incoming replication message sent by a member of the + replication group to the local database environment.
+ voidEnvironment.startReplication(DatabaseEntry cdata, + boolean master) + +
+          Configure the database environment as a client or master in a group + of replicated database environments.
+ CacheStatsEnvironment.getCacheStats(StatsConfig config) + +
+           
+ CacheFileStats[]Environment.getCacheFileStats(StatsConfig config) + +
+          Return the database environment's per-file memory pool (that is, the + buffer cache) statistics.
+ LogStatsEnvironment.getLogStats(StatsConfig config) + +
+          Return the database environment's logging statistics.
+ ReplicationStatsEnvironment.getReplicationStats(StatsConfig config) + +
+          Return the database environment's replication statistics.
+ voidEnvironment.logFlush(LogSequenceNumber lsn) + +
+          Flush log records to stable storage.
+ LogSequenceNumberEnvironment.logPut(DatabaseEntry data, + boolean flush) + +
+          Append a record to the log.
+ File[]Environment.getArchiveLogFiles(boolean includeInUse) + +
+          Return the names of all of the log files that are no longer in use.
+ File[]Environment.getArchiveDatabases() + +
+          Return the names of the database files that need to be archived in + order to recover the database from catastrophic failure.
+ voidEnvironment.removeOldLogFiles() + +
+          Remove log files that are no longer needed.
+ PreparedTransaction[]Environment.recover(int count, + boolean continued) + +
+          Return a list of prepared but not yet resolved transactions.
+ voidEnvironment.panic(boolean onoff) + +
+          Set the panic state for the database environment.
+ OperationStatusDatabase.delete(Transaction txn, + DatabaseEntry key) + +
+          Remove key/data pairs from the database.
+ OperationStatusDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match.
+ OperationStatusDatabase.put(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database.
+ OperationStatusDatabase.append(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Append the key/data pair to the end of the database.
+ OperationStatusDatabase.putNoOverwrite(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if the key does not already +appear in the database.
+ OperationStatusDatabase.putNoDupData(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if it does not already appear +in the database.
+ StringDatabase.getDatabaseName() + +
+          Return the database name.
+ DatabaseConfigDatabase.getConfig() + +
+          Return this Database object's configuration.
+ EnvironmentDatabase.getEnvironment() + +
+          Return the Environment handle for the database environment + underlying the Database.
+ voidDatabase.close() + +
+          Flush any cached database information to disk and discard the database +handle.
+ voidDatabase.close(boolean noSync) + +
+          Flush any cached database information to disk and discard the database +handle.
+ CursorDatabase.openCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Return a cursor into the database.
+ SequenceDatabase.openSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Open a sequence in the database.
+ voidDatabase.removeSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Remove the sequence from the database.
+ DatabaseStatsDatabase.getStats(Transaction txn, + StatsConfig statsConfig) + +
+          Return database statistics.
+ intDatabase.truncate(Transaction txn, + boolean returnCount) + +
+          Empty the database, discarding all records it contains.
+ JoinCursorDatabase.join(Cursor[] cursors, + JoinConfig config) + +
+          Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices.
+ StringDatabase.getDatabaseFile() + +
+          Return the database's underlying file name.
+ voidDatabase.setConfig(DatabaseConfig config) + +
+          Change the settings in an existing database handle.
+ CacheFileDatabase.getCacheFile() + +
+          Return the handle for the cache file underlying the database.
+ OperationStatusDatabase.consume(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + boolean wait) + +
+          Return the record number and data from the available record closest to +the head of the queue, and delete the record.
+ KeyRangeDatabase.getKeyRange(Transaction txn, + DatabaseEntry key) + +
+          Return an estimate of the proportion of keys in the database less + than, equal to, and greater than the specified key.
+ OperationStatusDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+static voidDatabase.remove(String fileName, + String databaseName, + DatabaseConfig config) + +
+           +Remove a database.
+static voidDatabase.rename(String fileName, + String oldDatabaseName, + String newDatabaseName, + DatabaseConfig config) + +
+           +Rename a database.
+ voidDatabase.sync() + +
+          Flush any cached information to disk.
+static voidDatabase.upgrade(String fileName, + DatabaseConfig config) + +
+          Upgrade all of the databases included in the specified file.
+ booleanDatabase.verify(String fileName, + String databaseName, + PrintStream dumpStream, + VerifyConfig config) + +
+          Return if all of the databases in a file are uncorrupted.
+ voidCursor.close() + +
+          Discard the cursor.
+ intCursor.count() + +
+          Return a count of the number of data items for the key to which the + cursor refers.
+ CursorCursor.dup(boolean samePosition) + +
+          Return a new cursor with the same transaction and locker ID as the + original cursor.
+ OperationStatusCursor.delete() + +
+          Delete the key/data pair to which the cursor refers.
+ OperationStatusCursor.put(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putNoOverwrite(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putNoDupData(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putCurrent(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putAfter(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putBefore(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putKeyFirst(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putKeyLast(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.getCurrent(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusCursor.getFirst(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getLast(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusCursor.getNextDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getPrev(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key/data pair, where both the key and +data items must match.
+ OperationStatusCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key and closest matching data item of the +database.
+ OperationStatusCursor.getRecordNumber(DatabaseEntry data, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusCursor.getSearchRecordNumber(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ CacheFilePriorityCacheFile.getPriority() + +
+          Return the cache priority for pages from the specified file.
+ voidCacheFile.setPriority(CacheFilePriority priority) + +
+          Set the +cache priority for pages from the specified file.
+ longCacheFile.getMaximumSize() + +
+          Return the maximum size for the file backing the database, or 0 if + no maximum file size has been configured.
+ voidCacheFile.setMaximumSize(long bytes) + +
+          Set the +maximum size for the file backing the database.
+ booleanCacheFile.getNoFile() + +
+          Return if the opening of backing temporary files for in-memory + databases has been disallowed.
+ voidCacheFile.setNoFile(boolean onoff) + +
+          Disallow opening backing temporary files for in-memory + databases, even if they expand to fill the entire cache.
+ booleanCacheFile.getUnlink() + +
+          Return if the file will be removed when the last reference to it is + closed.
+ voidCacheFile.setUnlink(boolean onoff) + +
+          Remove the file when the last reference to it is closed.
+  +

+ + + + + + + + + + + + + + +
Constructors in com.sleepycat.db that throw DatabaseException
SecondaryDatabase(String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
Environment(File envHome, + EnvironmentConfig envConfig) + +
+          Create a database environment handle.
Database(String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/DatabaseStats.html b/db/docs/java/com/sleepycat/db/class-use/DatabaseStats.html new file mode 100644 index 000000000..07692af43 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/DatabaseStats.html @@ -0,0 +1,207 @@ + + + + + + +Uses of Class com.sleepycat.db.DatabaseStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.DatabaseStats

+
+ + + + + + + + + +
+Packages that use DatabaseStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of DatabaseStats in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + +
Subclasses of DatabaseStats in com.sleepycat.db
+ classBtreeStats + +
+          The BtreeStats object is used to return Btree +or Recno database statistics.
+ classHashStats + +
+          The HashStats object is used to return Hash database statistics.
+ classQueueStats + +
+          The QueueStats object is used to return Queue database statistics.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return DatabaseStats
+ DatabaseStatsDatabase.getStats(Transaction txn, + StatsConfig statsConfig) + +
+          Return database statistics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/DatabaseType.html b/db/docs/java/com/sleepycat/db/class-use/DatabaseType.html new file mode 100644 index 000000000..939d66936 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/DatabaseType.html @@ -0,0 +1,237 @@ + + + + + + +Uses of Class com.sleepycat.db.DatabaseType (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.DatabaseType

+
+ + + + + + + + + +
+Packages that use DatabaseType
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of DatabaseType in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as DatabaseType
+static DatabaseTypeDatabaseType.BTREE + +
+          The database is a Btree.
+static DatabaseTypeDatabaseType.HASH + +
+          The database is a Hash.
+static DatabaseTypeDatabaseType.QUEUE + +
+          The database is a Queue.
+static DatabaseTypeDatabaseType.RECNO + +
+          The database is a Recno.
+static DatabaseTypeDatabaseType.UNKNOWN + +
+          The database type is unknown.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return DatabaseType
+ DatabaseTypeDatabaseConfig.getType() + +
+          Return the type of the database.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type DatabaseType
+ voidDatabaseConfig.setType(DatabaseType type) + +
+          Configure the type of the database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/DeadlockException.html b/db/docs/java/com/sleepycat/db/class-use/DeadlockException.html new file mode 100644 index 000000000..b73abf334 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/DeadlockException.html @@ -0,0 +1,176 @@ + + + + + + +Uses of Class com.sleepycat.db.DeadlockException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.DeadlockException

+
+ + + + + + + + + +
+Packages that use DeadlockException
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of DeadlockException in com.sleepycat.db
+  +

+ + + + + + + + + +
Subclasses of DeadlockException in com.sleepycat.db
+ classLockNotGrantedException + +
+          A LockNotGrantedException is thrown when a lock requested using the +Environment.getLock or Environment.lockVector +methods, where the noWait flag or lock timers were configured, could not +be granted before the wait-time expired.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/Environment.html b/db/docs/java/com/sleepycat/db/class-use/Environment.html new file mode 100644 index 000000000..8a1823cb2 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/Environment.html @@ -0,0 +1,324 @@ + + + + + + +Uses of Class com.sleepycat.db.Environment (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.Environment

+
+ + + + + + + + + + + + + +
+Packages that use Environment
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of Environment in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return Environment
+ EnvironmentCurrentTransaction.getEnvironment() + +
+          Returns the underlying Berkeley DB environment.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections with parameters of type Environment
+static CurrentTransactionCurrentTransaction.getInstance(Environment env) + +
+          Gets the CurrentTransaction accessor for a specified Berkeley DB + environment.
+  +

+ + + + + + + + + + + +
Constructors in com.sleepycat.collections with parameters of type Environment
TransactionRunner(Environment env) + +
+          Creates a transaction runner for a given Berkeley DB environment.
TransactionRunner(Environment env, + int maxRetries, + TransactionConfig config) + +
+          Creates a transaction runner for a given Berkeley DB environment and + with a given number of maximum retries.
+  +

+ + + + + +
+Uses of Environment in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return Environment
+ EnvironmentDatabaseException.getEnvironment() + +
+          Return the environment in which the exception occurred.
+ EnvironmentDatabase.getEnvironment() + +
+          Return the Environment handle for the database environment + underlying the Database.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type Environment
+ intReplicationTransport.send(Environment environment, + DatabaseEntry control, + DatabaseEntry rec, + LogSequenceNumber lsn, + int envid, + boolean noBuffer, + boolean permanent) + +
+          The callback used when Berkeley DB needs to transmit a replication + message.
+ voidPanicHandler.panic(Environment environment, + DatabaseException e) + +
+          A function to be called if the database environment panics.
+ voidMessageHandler.message(Environment environment, + String message) + +
+          A callback function to be called to display informational messages.
+ intLogRecordHandler.handleLogRecord(Environment environment, + DatabaseEntry logRecord, + LogSequenceNumber lsn, + RecoveryOperation operation) + +
+           
+ voidFeedbackHandler.recoveryFeedback(Environment environment, + int percent) + +
+          A function called with progress information when the database environment is being recovered.
+ voidErrorHandler.error(Environment environment, + String errpfx, + String msg) + +
+          A callback function to be called when an error occurs in the + Berkeley DB library.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/EnvironmentConfig.html b/db/docs/java/com/sleepycat/db/class-use/EnvironmentConfig.html new file mode 100644 index 000000000..e1f94b7ba --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/EnvironmentConfig.html @@ -0,0 +1,230 @@ + + + + + + +Uses of Class com.sleepycat.db.EnvironmentConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.EnvironmentConfig

+
+ + + + + + + + + +
+Packages that use EnvironmentConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of EnvironmentConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as EnvironmentConfig
+static EnvironmentConfigEnvironmentConfig.DEFAULT + +
+           
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return EnvironmentConfig
+ EnvironmentConfigEnvironment.getConfig() + +
+          Return this object's configuration.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type EnvironmentConfig
+static voidEnvironment.remove(File home, + boolean force, + EnvironmentConfig config) + +
+          Destroy a database environment.
+ voidEnvironment.setConfig(EnvironmentConfig config) + +
+          Change the settings in an existing environment handle.
+  +

+ + + + + + + + +
Constructors in com.sleepycat.db with parameters of type EnvironmentConfig
Environment(File envHome, + EnvironmentConfig envConfig) + +
+          Create a database environment handle.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/ErrorHandler.html b/db/docs/java/com/sleepycat/db/class-use/ErrorHandler.html new file mode 100644 index 000000000..16d366ef4 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/ErrorHandler.html @@ -0,0 +1,205 @@ + + + + + + +Uses of Interface com.sleepycat.db.ErrorHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.ErrorHandler

+
+ + + + + + + + + +
+Packages that use ErrorHandler
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of ErrorHandler in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return ErrorHandler
+ ErrorHandlerEnvironmentConfig.getErrorHandler() + +
+          Return the function to be called if an error occurs.
+ ErrorHandlerDatabaseConfig.getErrorHandler() + +
+          Return the function to be called if an error occurs.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type ErrorHandler
+ voidEnvironmentConfig.setErrorHandler(ErrorHandler errorHandler) + +
+          Set the function to be called if an error occurs.
+ voidDatabaseConfig.setErrorHandler(ErrorHandler errorHandler) + +
+          Set the function to be called if an error occurs.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/FeedbackHandler.html b/db/docs/java/com/sleepycat/db/class-use/FeedbackHandler.html new file mode 100644 index 000000000..ade47b0e0 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/FeedbackHandler.html @@ -0,0 +1,205 @@ + + + + + + +Uses of Interface com.sleepycat.db.FeedbackHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.FeedbackHandler

+
+ + + + + + + + + +
+Packages that use FeedbackHandler
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of FeedbackHandler in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return FeedbackHandler
+ FeedbackHandlerEnvironmentConfig.getFeedbackHandler() + +
+          Return the object's methods to be called to provide feedback.
+ FeedbackHandlerDatabaseConfig.getFeedbackHandler() + +
+          Return the object's methods to be called to provide feedback.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type FeedbackHandler
+ voidEnvironmentConfig.setFeedbackHandler(FeedbackHandler feedbackHandler) + +
+          Set an object whose methods are called to provide feedback.
+ voidDatabaseConfig.setFeedbackHandler(FeedbackHandler feedbackHandler) + +
+          Set an object whose methods are called to provide feedback.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/HashStats.html b/db/docs/java/com/sleepycat/db/class-use/HashStats.html new file mode 100644 index 000000000..39c6a498b --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/HashStats.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.HashStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.HashStats

+
+No usage of com.sleepycat.db.HashStats +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/Hasher.html b/db/docs/java/com/sleepycat/db/class-use/Hasher.html new file mode 100644 index 000000000..7d87341f7 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/Hasher.html @@ -0,0 +1,189 @@ + + + + + + +Uses of Interface com.sleepycat.db.Hasher (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.Hasher

+
+ + + + + + + + + +
+Packages that use Hasher
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of Hasher in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return Hasher
+ HasherDatabaseConfig.getHasher() + +
+          Return the database-specific hash function.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type Hasher
+ voidDatabaseConfig.setHasher(Hasher hasher) + +
+          Set a database-specific hash function.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/JoinConfig.html b/db/docs/java/com/sleepycat/db/class-use/JoinConfig.html new file mode 100644 index 000000000..6f956a43c --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/JoinConfig.html @@ -0,0 +1,240 @@ + + + + + + +Uses of Class com.sleepycat.db.JoinConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.JoinConfig

+
+ + + + + + + + + + + + + +
+Packages that use JoinConfig
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of JoinConfig in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections with parameters of type JoinConfig
+ StoredIteratorStoredCollection.join(StoredContainer[] indices, + Object[] indexKeys, + JoinConfig joinConfig) + +
+          Returns an iterator representing an equality join of the indices and + index key values specified.
+  +

+ + + + + +
+Uses of JoinConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as JoinConfig
+static JoinConfigJoinConfig.DEFAULT + +
+          Default configuration used if null is passed to Database.join
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return JoinConfig
+ JoinConfigJoinCursor.getConfig() + +
+          Returns this object's configuration.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type JoinConfig
+ JoinCursorDatabase.join(Cursor[] cursors, + JoinConfig config) + +
+          Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/JoinCursor.html b/db/docs/java/com/sleepycat/db/class-use/JoinCursor.html new file mode 100644 index 000000000..27ff243f9 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/JoinCursor.html @@ -0,0 +1,175 @@ + + + + + + +Uses of Class com.sleepycat.db.JoinCursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.JoinCursor

+
+ + + + + + + + + +
+Packages that use JoinCursor
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of JoinCursor in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return JoinCursor
+ JoinCursorDatabase.join(Cursor[] cursors, + JoinConfig config) + +
+          Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/KeyRange.html b/db/docs/java/com/sleepycat/db/class-use/KeyRange.html new file mode 100644 index 000000000..8b340424b --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/KeyRange.html @@ -0,0 +1,175 @@ + + + + + + +Uses of Class com.sleepycat.db.KeyRange (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.KeyRange

+
+ + + + + + + + + +
+Packages that use KeyRange
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of KeyRange in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return KeyRange
+ KeyRangeDatabase.getKeyRange(Transaction txn, + DatabaseEntry key) + +
+          Return an estimate of the proportion of keys in the database less + than, equal to, and greater than the specified key.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/Lock.html b/db/docs/java/com/sleepycat/db/class-use/Lock.html new file mode 100644 index 000000000..6c45d302f --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/Lock.html @@ -0,0 +1,246 @@ + + + + + + +Uses of Class com.sleepycat.db.Lock (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.Lock

+
+ + + + + + + + + +
+Packages that use Lock
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of Lock in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db that return Lock
+ LockLockRequest.getLock() + +
+          Return the lock reference.
+ LockLockNotGrantedException.getLock() + +
+          Returns null when Environment.getLock was called, and + returns the lock in the failed LockRequest when Environment.lockVector was called.
+ LockEnvironment.getLock(int locker, + boolean noWait, + DatabaseEntry object, + LockRequestMode mode) + +
+          Acquire a lock from the lock table.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type Lock
+ voidLockRequest.setLock(Lock lock) + +
+          Set the lock reference.
+ voidEnvironment.putLock(Lock lock) + +
+          Release a lock.
+  +

+ + + + + + + + + + + +
Constructors in com.sleepycat.db with parameters of type Lock
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock) + +
+          Construct a LockRequest with the specified operation, mode and lock, + for the specified object.
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock, + int timeout) + +
+          Construct a LockRequest with the specified operation, mode, lock and + timeout for the specified object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LockDetectMode.html b/db/docs/java/com/sleepycat/db/class-use/LockDetectMode.html new file mode 100644 index 000000000..e13aa85a7 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LockDetectMode.html @@ -0,0 +1,288 @@ + + + + + + +Uses of Class com.sleepycat.db.LockDetectMode (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LockDetectMode

+
+ + + + + + + + + +
+Packages that use LockDetectMode
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LockDetectMode in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as LockDetectMode
+static LockDetectModeLockDetectMode.NONE + +
+          Turn off deadlock detection.
+static LockDetectModeLockDetectMode.DEFAULT + +
+          Use whatever lock policy was specified when the database environment + was created.
+static LockDetectModeLockDetectMode.EXPIRE + +
+          Reject lock requests which have timed out.
+static LockDetectModeLockDetectMode.MAXLOCKS + +
+          Reject the lock request for the locker ID with the most locks.
+static LockDetectModeLockDetectMode.MAXWRITE + +
+          Reject the lock request for the locker ID with the most write locks.
+static LockDetectModeLockDetectMode.MINLOCKS + +
+          Reject the lock request for the locker ID with the fewest locks.
+static LockDetectModeLockDetectMode.MINWRITE + +
+          Reject the lock request for the locker ID with the fewest write locks.
+static LockDetectModeLockDetectMode.OLDEST + +
+          Reject the lock request for the locker ID with the oldest lock.
+static LockDetectModeLockDetectMode.RANDOM + +
+          Reject the lock request for a random locker ID.
+static LockDetectModeLockDetectMode.YOUNGEST + +
+          Reject the lock request for the locker ID with the youngest lock.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return LockDetectMode
+ LockDetectModeEnvironmentConfig.getLockDetectMode() + +
+          Return if the deadlock detector is configured to run whenever a lock + conflict occurs.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type LockDetectMode
+ voidEnvironmentConfig.setLockDetectMode(LockDetectMode lockDetectMode) + +
+          Configure if the deadlock detector is to be run whenever a lock + conflict occurs.
+ intEnvironment.detectDeadlocks(LockDetectMode mode) + +
+          Run one iteration of the deadlock detector.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LockMode.html b/db/docs/java/com/sleepycat/db/class-use/LockMode.html new file mode 100644 index 000000000..61ce3d109 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LockMode.html @@ -0,0 +1,636 @@ + + + + + + +Uses of Class com.sleepycat.db.LockMode (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LockMode

+
+ + + + + + + + + +
+Packages that use LockMode
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LockMode in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as LockMode
+static LockModeLockMode.DEFAULT + +
+          Acquire read locks for read operations and write locks for write + operations.
+static LockModeLockMode.DIRTY_READ + +
+          Read modified but not yet committed data.
+static LockModeLockMode.RMW + +
+          Acquire write locks instead of read locks when doing the retrieval.
+static LockModeLockMode.DEGREE_2 + +
+          Degree 2 isolation provides for cursor stability but not repeatable + reads.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type LockMode
+ OperationStatusSecondaryDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusSecondaryDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match.
+ OperationStatusSecondaryDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ OperationStatusSecondaryCursor.getCurrent(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusSecondaryCursor.getFirst(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getLast(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getNext(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getNextDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusSecondaryCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getPrev(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusSecondaryCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusSecondaryCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusSecondaryCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary and primary key, where both +the primary and secondary key items must match.
+ OperationStatusSecondaryCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary key and closest matching primary +key of the database.
+ OperationStatusSecondaryCursor.getRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry primaryRecno, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusSecondaryCursor.getSearchRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + LockMode lockMode) + +
+          Returns the next primary key resulting from the join operation.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the next primary key and data resulting from the join operation.
+ OperationStatusDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match.
+ OperationStatusDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ OperationStatusCursor.getCurrent(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusCursor.getFirst(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getLast(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusCursor.getNextDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getPrev(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key/data pair, where both the key and +data items must match.
+ OperationStatusCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key and closest matching data item of the +database.
+ OperationStatusCursor.getRecordNumber(DatabaseEntry data, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusCursor.getSearchRecordNumber(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LockNotGrantedException.html b/db/docs/java/com/sleepycat/db/class-use/LockNotGrantedException.html new file mode 100644 index 000000000..2852a2665 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LockNotGrantedException.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.LockNotGrantedException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LockNotGrantedException

+
+No usage of com.sleepycat.db.LockNotGrantedException +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LockOperation.html b/db/docs/java/com/sleepycat/db/class-use/LockOperation.html new file mode 100644 index 000000000..7f3471f52 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LockOperation.html @@ -0,0 +1,277 @@ + + + + + + +Uses of Class com.sleepycat.db.LockOperation (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LockOperation

+
+ + + + + + + + + +
+Packages that use LockOperation
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LockOperation in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as LockOperation
+static LockOperationLockOperation.GET + +
+          Get the lock defined by the values of the mode and obj fields, for + the specified locker.
+static LockOperationLockOperation.GET_TIMEOUT + +
+          Identical to LockOperation GET except that the value in the timeout + field overrides any previously specified timeout value for this + lock.
+static LockOperationLockOperation.PUT + +
+          The lock to which the lock field refers is released.
+static LockOperationLockOperation.PUT_ALL + +
+          All locks held by the specified locker are released.
+static LockOperationLockOperation.PUT_OBJ + +
+          All locks held on obj are released.
+static LockOperationLockOperation.TIMEOUT + +
+          Cause the specified locker to timeout immediately.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return LockOperation
+ LockOperationLockRequest.getOp() + +
+          Return the lock operation.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type LockOperation
+ voidLockRequest.setOp(LockOperation op) + +
+          Set the operation.
+  +

+ + + + + + + + + + + +
Constructors in com.sleepycat.db with parameters of type LockOperation
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock) + +
+          Construct a LockRequest with the specified operation, mode and lock, + for the specified object.
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock, + int timeout) + +
+          Construct a LockRequest with the specified operation, mode, lock and + timeout for the specified object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LockRequest.html b/db/docs/java/com/sleepycat/db/class-use/LockRequest.html new file mode 100644 index 000000000..a274843bf --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LockRequest.html @@ -0,0 +1,175 @@ + + + + + + +Uses of Class com.sleepycat.db.LockRequest (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LockRequest

+
+ + + + + + + + + +
+Packages that use LockRequest
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LockRequest in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type LockRequest
+ voidEnvironment.lockVector(int locker, + boolean noWait, + LockRequest[] list) + +
+          Atomically obtain and release one or more locks from the lock table.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LockRequestMode.html b/db/docs/java/com/sleepycat/db/class-use/LockRequestMode.html new file mode 100644 index 000000000..8ba801c3a --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LockRequestMode.html @@ -0,0 +1,277 @@ + + + + + + +Uses of Class com.sleepycat.db.LockRequestMode (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LockRequestMode

+
+ + + + + + + + + +
+Packages that use LockRequestMode
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LockRequestMode in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as LockRequestMode
+static LockRequestModeLockRequestMode.READ + +
+          Read (shared).
+static LockRequestModeLockRequestMode.WRITE + +
+          Write (exclusive).
+static LockRequestModeLockRequestMode.IWRITE + +
+          Intention to write (shared).
+static LockRequestModeLockRequestMode.IREAD + +
+          Intention to read (shared).
+static LockRequestModeLockRequestMode.IWR + +
+          Intention to read and write (shared).
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return LockRequestMode
+ LockRequestModeLockRequest.getMode() + +
+          Return the lock mode.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type LockRequestMode
+ voidLockRequest.setMode(LockRequestMode mode) + +
+          Set the lock mode.
+ LockEnvironment.getLock(int locker, + boolean noWait, + DatabaseEntry object, + LockRequestMode mode) + +
+          Acquire a lock from the lock table.
+  +

+ + + + + + + + + + + +
Constructors in com.sleepycat.db with parameters of type LockRequestMode
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock) + +
+          Construct a LockRequest with the specified operation, mode and lock, + for the specified object.
LockRequest(LockOperation op, + LockRequestMode mode, + DatabaseEntry obj, + Lock lock, + int timeout) + +
+          Construct a LockRequest with the specified operation, mode, lock and + timeout for the specified object.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LockStats.html b/db/docs/java/com/sleepycat/db/class-use/LockStats.html new file mode 100644 index 000000000..73153e2d8 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LockStats.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.LockStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LockStats

+
+ + + + + + + + + +
+Packages that use LockStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LockStats in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return LockStats
+ LockStatsEnvironment.getLockStats(StatsConfig config) + +
+          Return the database environment's locking statistics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LogCursor.html b/db/docs/java/com/sleepycat/db/class-use/LogCursor.html new file mode 100644 index 000000000..3f6d39074 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LogCursor.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.LogCursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LogCursor

+
+ + + + + + + + + +
+Packages that use LogCursor
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LogCursor in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return LogCursor
+ LogCursorEnvironment.openLogCursor() + +
+          Return a log cursor.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LogRecordHandler.html b/db/docs/java/com/sleepycat/db/class-use/LogRecordHandler.html new file mode 100644 index 000000000..d711f7156 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LogRecordHandler.html @@ -0,0 +1,189 @@ + + + + + + +Uses of Interface com.sleepycat.db.LogRecordHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.LogRecordHandler

+
+ + + + + + + + + +
+Packages that use LogRecordHandler
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LogRecordHandler in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return LogRecordHandler
+ LogRecordHandlerEnvironmentConfig.getLogRecordHandler() + +
+          Return the handler for application-specific log records.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type LogRecordHandler
+ voidEnvironmentConfig.setLogRecordHandler(LogRecordHandler logRecordHandler) + +
+          Set a function to process application-specific log records.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LogSequenceNumber.html b/db/docs/java/com/sleepycat/db/class-use/LogSequenceNumber.html new file mode 100644 index 000000000..4d5a51c4d --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LogSequenceNumber.html @@ -0,0 +1,342 @@ + + + + + + +Uses of Class com.sleepycat.db.LogSequenceNumber (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LogSequenceNumber

+
+ + + + + + + + + +
+Packages that use LogSequenceNumber
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LogSequenceNumber in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db that return LogSequenceNumber
+ LogSequenceNumberTransactionStats.getLastCkp() + +
+          The LSN of the last checkpoint.
+ LogSequenceNumberTransactionStats.Active.getLsn() + +
+          The log sequence number of the transaction's first log record.
+ LogSequenceNumberReplicationStatus.getLSN() + +
+          Whenever processing a messages results in the processing of messages + that are permanent, or a message carrying a DB_REP_PERMANENT flag + was processed successfully, but was not written to disk, the LSN of + the record is available from the getLSN method.
+ LogSequenceNumberReplicationStats.getNextLsn() + +
+          In replication environments configured as masters, the next LSN + expected.
+ LogSequenceNumberReplicationStats.getWaitingLsn() + +
+          The LSN of the first log record we have after missing log records + being waited for, or 0 if no log records are currently missing.
+ LogSequenceNumberReplicationStats.getElectionLsn() + +
+          The maximum LSN of election winner.
+ LogSequenceNumberEnvironment.logPut(DatabaseEntry data, + boolean flush) + +
+          Append a record to the log.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type LogSequenceNumber
+ intReplicationTransport.send(Environment environment, + DatabaseEntry control, + DatabaseEntry rec, + LogSequenceNumber lsn, + int envid, + boolean noBuffer, + boolean permanent) + +
+          The callback used when Berkeley DB needs to transmit a replication + message.
+static intLogSequenceNumber.compare(LogSequenceNumber lsn0, + LogSequenceNumber lsn1) + +
+          Compare two LogSequenceNumber objects.
+ intLogRecordHandler.handleLogRecord(Environment environment, + DatabaseEntry logRecord, + LogSequenceNumber lsn, + RecoveryOperation operation) + +
+           
+ OperationStatusLogCursor.getCurrent(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the LogSequenceNumber and log record to which the log cursor + currently refers.
+ OperationStatusLogCursor.getNext(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the next LogSequenceNumber and log record.
+ OperationStatusLogCursor.getFirst(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the first LogSequenceNumber and log record.
+ OperationStatusLogCursor.getLast(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the last LogSequenceNumber and log record.
+ OperationStatusLogCursor.getPrev(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the previous LogSequenceNumber and log record.
+ OperationStatusLogCursor.set(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return a specific log record.
+ StringEnvironment.getLogFileName(LogSequenceNumber lsn) + +
+          Return the name of the log file that contains the log record + specified by a LogSequenceNumber object.
+ voidEnvironment.logFlush(LogSequenceNumber lsn) + +
+          Flush log records to stable storage.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/LogStats.html b/db/docs/java/com/sleepycat/db/class-use/LogStats.html new file mode 100644 index 000000000..56165df14 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/LogStats.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.LogStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.LogStats

+
+ + + + + + + + + +
+Packages that use LogStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of LogStats in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return LogStats
+ LogStatsEnvironment.getLogStats(StatsConfig config) + +
+          Return the database environment's logging statistics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/MemoryException.html b/db/docs/java/com/sleepycat/db/class-use/MemoryException.html new file mode 100644 index 000000000..528e65887 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/MemoryException.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.MemoryException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.MemoryException

+
+No usage of com.sleepycat.db.MemoryException +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/MessageHandler.html b/db/docs/java/com/sleepycat/db/class-use/MessageHandler.html new file mode 100644 index 000000000..753d5c2bb --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/MessageHandler.html @@ -0,0 +1,205 @@ + + + + + + +Uses of Interface com.sleepycat.db.MessageHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.MessageHandler

+
+ + + + + + + + + +
+Packages that use MessageHandler
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of MessageHandler in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return MessageHandler
+ MessageHandlerEnvironmentConfig.getMessageHandler() + +
+          Return the function to be called with an informational message.
+ MessageHandlerDatabaseConfig.getMessageHandler() + +
+          Return the function to be called with an informational message.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type MessageHandler
+ voidEnvironmentConfig.setMessageHandler(MessageHandler messageHandler) + +
+          Set a function to be called with an informational message.
+ voidDatabaseConfig.setMessageHandler(MessageHandler messageHandler) + +
+          Set a function to be called with an informational message.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/MultipleDataEntry.html b/db/docs/java/com/sleepycat/db/class-use/MultipleDataEntry.html new file mode 100644 index 000000000..bbfee7152 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/MultipleDataEntry.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.MultipleDataEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.MultipleDataEntry

+
+No usage of com.sleepycat.db.MultipleDataEntry +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/MultipleEntry.html b/db/docs/java/com/sleepycat/db/class-use/MultipleEntry.html new file mode 100644 index 000000000..40669e2c2 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/MultipleEntry.html @@ -0,0 +1,192 @@ + + + + + + +Uses of Class com.sleepycat.db.MultipleEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.MultipleEntry

+
+ + + + + + + + + +
+Packages that use MultipleEntry
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of MultipleEntry in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + +
Subclasses of MultipleEntry in com.sleepycat.db
+ classMultipleDataEntry + +
+          A DatabaseEntry that holds multiple data items returned by a single +Database or Cursor get call.
+ classMultipleKeyDataEntry + +
+          A DatabaseEntry that holds multiple key/data pairs returned by a single +Database or Cursor get call.
+ classMultipleRecnoDataEntry + +
+          A DatabaseEntry that holds multiple record number/data pairs returned by a +single Database or Cursor get call.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/MultipleKeyDataEntry.html b/db/docs/java/com/sleepycat/db/class-use/MultipleKeyDataEntry.html new file mode 100644 index 000000000..b15626aa3 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/MultipleKeyDataEntry.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.MultipleKeyDataEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.MultipleKeyDataEntry

+
+No usage of com.sleepycat.db.MultipleKeyDataEntry +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/MultipleRecnoDataEntry.html b/db/docs/java/com/sleepycat/db/class-use/MultipleRecnoDataEntry.html new file mode 100644 index 000000000..bd5e4119d --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/MultipleRecnoDataEntry.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.MultipleRecnoDataEntry (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.MultipleRecnoDataEntry

+
+No usage of com.sleepycat.db.MultipleRecnoDataEntry +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/OperationStatus.html b/db/docs/java/com/sleepycat/db/class-use/OperationStatus.html new file mode 100644 index 000000000..d55e7af32 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/OperationStatus.html @@ -0,0 +1,835 @@ + + + + + + +Uses of Class com.sleepycat.db.OperationStatus (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.OperationStatus

+
+ + + + + + + + + +
+Packages that use OperationStatus
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of OperationStatus in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as OperationStatus
+static OperationStatusOperationStatus.SUCCESS + +
+          The operation was successful.
+static OperationStatusOperationStatus.KEYEXIST + +
+          The operation to insert data was configured to not allow overwrite + and the key already exists in the database.
+static OperationStatusOperationStatus.KEYEMPTY + +
+          The cursor operation was unsuccessful because the current record + was deleted.
+static OperationStatusOperationStatus.NOTFOUND + +
+          The requested key/data pair was not found.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db that return OperationStatus
+ OperationStatusSecondaryDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusSecondaryDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match.
+ OperationStatusSecondaryDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ OperationStatusSecondaryCursor.getCurrent(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusSecondaryCursor.getFirst(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getLast(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusSecondaryCursor.getNext(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getNextDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusSecondaryCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getPrev(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusSecondaryCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusSecondaryCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusSecondaryCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusSecondaryCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusSecondaryCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary and primary key, where both +the primary and secondary key items must match.
+ OperationStatusSecondaryCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified secondary key and closest matching primary +key of the database.
+ OperationStatusSecondaryCursor.getRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry primaryRecno, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusSecondaryCursor.getSearchRecordNumber(DatabaseEntry secondaryRecno, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+ OperationStatusLogCursor.getCurrent(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the LogSequenceNumber and log record to which the log cursor + currently refers.
+ OperationStatusLogCursor.getNext(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the next LogSequenceNumber and log record.
+ OperationStatusLogCursor.getFirst(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the first LogSequenceNumber and log record.
+ OperationStatusLogCursor.getLast(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the last LogSequenceNumber and log record.
+ OperationStatusLogCursor.getPrev(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return the previous LogSequenceNumber and log record.
+ OperationStatusLogCursor.set(LogSequenceNumber lsn, + DatabaseEntry data) + +
+          Return a specific log record.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + LockMode lockMode) + +
+          Returns the next primary key resulting from the join operation.
+ OperationStatusJoinCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the next primary key and data resulting from the join operation.
+ OperationStatusDatabase.delete(Transaction txn, + DatabaseEntry key) + +
+          Remove key/data pairs from the database.
+ OperationStatusDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match.
+ OperationStatusDatabase.put(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database.
+ OperationStatusDatabase.append(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Append the key/data pair to the end of the database.
+ OperationStatusDatabase.putNoOverwrite(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if the key does not already +appear in the database.
+ OperationStatusDatabase.putNoDupData(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if it does not already appear +in the database.
+ OperationStatusDatabase.consume(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + boolean wait) + +
+          Return the record number and data from the available record closest to +the head of the queue, and delete the record.
+ OperationStatusDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ OperationStatusCursor.delete() + +
+          Delete the key/data pair to which the cursor refers.
+ OperationStatusCursor.put(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putNoOverwrite(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putNoDupData(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putCurrent(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putAfter(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putBefore(DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putKeyFirst(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.putKeyLast(DatabaseEntry key, + DatabaseEntry data) + +
+          Store a key/data pair into the database.
+ OperationStatusCursor.getCurrent(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Returns the key/data pair to which the cursor refers.
+ OperationStatusCursor.getFirst(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the first key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getLast(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the last key/data pair of the database, and return +that pair.
+ OperationStatusCursor.getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next key/data pair and return that pair.
+ OperationStatusCursor.getNextDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair.
+ OperationStatusCursor.getNextNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the next non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getPrev(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous key/data pair and return that pair.
+ OperationStatusCursor.getPrevDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair.
+ OperationStatusCursor.getPrevNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the previous non-duplicate key/data pair and return +that pair.
+ OperationStatusCursor.getSearchKey(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the given key of the database, and return the datum +associated with the given key.
+ OperationStatusCursor.getSearchKeyRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key.
+ OperationStatusCursor.getSearchBoth(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key/data pair, where both the key and +data items must match.
+ OperationStatusCursor.getSearchBothRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specified key and closest matching data item of the +database.
+ OperationStatusCursor.getRecordNumber(DatabaseEntry data, + LockMode lockMode) + +
+          Return the record number associated with the cursor.
+ OperationStatusCursor.getSearchRecordNumber(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Move the cursor to the specific numbered record of the database, and +return the associated key/data pair.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/PanicHandler.html b/db/docs/java/com/sleepycat/db/class-use/PanicHandler.html new file mode 100644 index 000000000..3b68755d0 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/PanicHandler.html @@ -0,0 +1,205 @@ + + + + + + +Uses of Interface com.sleepycat.db.PanicHandler (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.PanicHandler

+
+ + + + + + + + + +
+Packages that use PanicHandler
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of PanicHandler in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return PanicHandler
+ PanicHandlerEnvironmentConfig.getPanicHandler() + +
+          Return the function to be called if the database environment panics.
+ PanicHandlerDatabaseConfig.getPanicHandler() + +
+          Return the function to be called if the database environment panics.
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type PanicHandler
+ voidEnvironmentConfig.setPanicHandler(PanicHandler panicHandler) + +
+          Set the function to be called if the database environment panics.
+ voidDatabaseConfig.setPanicHandler(PanicHandler panicHandler) + +
+          Set the function to be called if the database environment panics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/PreparedTransaction.html b/db/docs/java/com/sleepycat/db/class-use/PreparedTransaction.html new file mode 100644 index 000000000..0a789f301 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/PreparedTransaction.html @@ -0,0 +1,174 @@ + + + + + + +Uses of Class com.sleepycat.db.PreparedTransaction (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.PreparedTransaction

+
+ + + + + + + + + +
+Packages that use PreparedTransaction
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of PreparedTransaction in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return PreparedTransaction
+ PreparedTransaction[]Environment.recover(int count, + boolean continued) + +
+          Return a list of prepared but not yet resolved transactions.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/QueueStats.html b/db/docs/java/com/sleepycat/db/class-use/QueueStats.html new file mode 100644 index 000000000..80e237fb2 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/QueueStats.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.QueueStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.QueueStats

+
+No usage of com.sleepycat.db.QueueStats +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/RecordNumberAppender.html b/db/docs/java/com/sleepycat/db/class-use/RecordNumberAppender.html new file mode 100644 index 000000000..a89a48f75 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/RecordNumberAppender.html @@ -0,0 +1,192 @@ + + + + + + +Uses of Interface com.sleepycat.db.RecordNumberAppender (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.RecordNumberAppender

+
+ + + + + + + + + +
+Packages that use RecordNumberAppender
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of RecordNumberAppender in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return RecordNumberAppender
+ RecordNumberAppenderDatabaseConfig.getRecordNumberAppender() + +
+          Return the function to call after the record number has been + selected but before the data has been stored into the database.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type RecordNumberAppender
+ voidDatabaseConfig.setRecordNumberAppender(RecordNumberAppender recnoAppender) + +
+          Configure Database.append to call the function after the + record number has been selected but before the data has been stored + into the database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/RecoveryOperation.html b/db/docs/java/com/sleepycat/db/class-use/RecoveryOperation.html new file mode 100644 index 000000000..e0b0e72c7 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/RecoveryOperation.html @@ -0,0 +1,247 @@ + + + + + + +Uses of Class com.sleepycat.db.RecoveryOperation (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.RecoveryOperation

+
+ + + + + + + + + +
+Packages that use RecoveryOperation
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of RecoveryOperation in com.sleepycat.db
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Fields in com.sleepycat.db declared as RecoveryOperation
+static RecoveryOperationRecoveryOperation.BACKWARD_ROLL + +
+          The log is being read backward to determine which transactions have + been committed and to abort those operations that were not; undo the + operation described by the log record.
+static RecoveryOperationRecoveryOperation.FORWARD_ROLL + +
+          The log is being played forward; redo the operation described by the log + record.
+static RecoveryOperationRecoveryOperation.ABORT + +
+          The log is being read backward during a transaction abort; undo the + operation described by the log record.
+static RecoveryOperationRecoveryOperation.APPLY + +
+          The log is being applied on a replica site; redo the operation + described by the log record.
+static RecoveryOperationRecoveryOperation.PRINT + +
+          The log is being printed for debugging purposes; print the contents of + this log record in the desired format.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return RecoveryOperation
+static RecoveryOperationRecoveryOperation.fromFlag(int flag) + +
+          Internal: this is public only so it can be called from an internal + package.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type RecoveryOperation
+ intLogRecordHandler.handleLogRecord(Environment environment, + DatabaseEntry logRecord, + LogSequenceNumber lsn, + RecoveryOperation operation) + +
+           
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/ReplicationHandleDeadException.html b/db/docs/java/com/sleepycat/db/class-use/ReplicationHandleDeadException.html new file mode 100644 index 000000000..1d2514451 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/ReplicationHandleDeadException.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.ReplicationHandleDeadException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.ReplicationHandleDeadException

+
+No usage of com.sleepycat.db.ReplicationHandleDeadException +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/ReplicationStats.html b/db/docs/java/com/sleepycat/db/class-use/ReplicationStats.html new file mode 100644 index 000000000..85e2f6f22 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/ReplicationStats.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.ReplicationStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.ReplicationStats

+
+ + + + + + + + + +
+Packages that use ReplicationStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of ReplicationStats in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return ReplicationStats
+ ReplicationStatsEnvironment.getReplicationStats(StatsConfig config) + +
+          Return the database environment's replication statistics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/ReplicationStatus.html b/db/docs/java/com/sleepycat/db/class-use/ReplicationStatus.html new file mode 100644 index 000000000..0e8bcf108 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/ReplicationStatus.html @@ -0,0 +1,176 @@ + + + + + + +Uses of Class com.sleepycat.db.ReplicationStatus (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.ReplicationStatus

+
+ + + + + + + + + +
+Packages that use ReplicationStatus
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of ReplicationStatus in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return ReplicationStatus
+ ReplicationStatusEnvironment.processReplicationMessage(DatabaseEntry control, + DatabaseEntry rec, + int envid) + +
+          Process an incoming replication message sent by a member of the + replication group to the local database environment.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/ReplicationTransport.html b/db/docs/java/com/sleepycat/db/class-use/ReplicationTransport.html new file mode 100644 index 000000000..71e0d9af6 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/ReplicationTransport.html @@ -0,0 +1,192 @@ + + + + + + +Uses of Interface com.sleepycat.db.ReplicationTransport (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.ReplicationTransport

+
+ + + + + + + + + +
+Packages that use ReplicationTransport
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of ReplicationTransport in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return ReplicationTransport
+ ReplicationTransportEnvironmentConfig.getReplicationTransport() + +
+          Return the replication callback function used to transmit data using + the replication application's communication infrastructure.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type ReplicationTransport
+ voidEnvironmentConfig.setReplicationTransport(int envid, + ReplicationTransport replicationTransport) + +
+          Initialize the communication infrastructure for a database environment + participating in a replicated application.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/RunRecoveryException.html b/db/docs/java/com/sleepycat/db/class-use/RunRecoveryException.html new file mode 100644 index 000000000..8c6c1268f --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/RunRecoveryException.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.db.RunRecoveryException (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.RunRecoveryException

+
+No usage of com.sleepycat.db.RunRecoveryException +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/SecondaryConfig.html b/db/docs/java/com/sleepycat/db/class-use/SecondaryConfig.html new file mode 100644 index 000000000..6323482ee --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/SecondaryConfig.html @@ -0,0 +1,226 @@ + + + + + + +Uses of Class com.sleepycat.db.SecondaryConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.SecondaryConfig

+
+ + + + + + + + + +
+Packages that use SecondaryConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of SecondaryConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as SecondaryConfig
+static SecondaryConfigSecondaryConfig.DEFAULT + +
+           
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return SecondaryConfig
+ SecondaryConfigSecondaryDatabase.getSecondaryConfig() + +
+          Returns a copy of the secondary configuration of this database.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type SecondaryConfig
+ SecondaryDatabaseEnvironment.openSecondaryDatabase(Transaction txn, + String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+  +

+ + + + + + + + +
Constructors in com.sleepycat.db with parameters of type SecondaryConfig
SecondaryDatabase(String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/SecondaryCursor.html b/db/docs/java/com/sleepycat/db/class-use/SecondaryCursor.html new file mode 100644 index 000000000..55c0a9115 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/SecondaryCursor.html @@ -0,0 +1,182 @@ + + + + + + +Uses of Class com.sleepycat.db.SecondaryCursor (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.SecondaryCursor

+
+ + + + + + + + + +
+Packages that use SecondaryCursor
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of SecondaryCursor in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return SecondaryCursor
+ SecondaryCursorSecondaryDatabase.openSecondaryCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Obtain a cursor on a database, returning a SecondaryCursor.
+ SecondaryCursorSecondaryCursor.dupSecondary(boolean samePosition) + +
+          Returns a new copy of the cursor as a SecondaryCursor.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/SecondaryDatabase.html b/db/docs/java/com/sleepycat/db/class-use/SecondaryDatabase.html new file mode 100644 index 000000000..bfaf80abb --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/SecondaryDatabase.html @@ -0,0 +1,306 @@ + + + + + + +Uses of Class com.sleepycat.db.SecondaryDatabase (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.SecondaryDatabase

+
+ + + + + + + + + + + + + + + + + +
+Packages that use SecondaryDatabase
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of SecondaryDatabase in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.bind.serial with parameters of type SecondaryDatabase
+ booleanTupleSerialKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanTupleSerialKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+ booleanSerialSerialKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanSerialSerialKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+  +

+ + + + + +
+Uses of SecondaryDatabase in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.bind.tuple with parameters of type SecondaryDatabase
+ booleanTupleTupleKeyCreator.createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + +
+           
+ booleanTupleTupleKeyCreator.nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + +
+           
+  +

+ + + + + +
+Uses of SecondaryDatabase in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return SecondaryDatabase
+ SecondaryDatabaseSecondaryCursor.getSecondaryDatabase() + +
+          Return the SecondaryDatabase handle associated with this Cursor.
+ SecondaryDatabaseEnvironment.openSecondaryDatabase(Transaction txn, + String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type SecondaryDatabase
+ booleanSecondaryKeyCreator.createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) + +
+          Creates a secondary key entry, given a primary key and data entry.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/SecondaryKeyCreator.html b/db/docs/java/com/sleepycat/db/class-use/SecondaryKeyCreator.html new file mode 100644 index 000000000..76b4f0979 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/SecondaryKeyCreator.html @@ -0,0 +1,271 @@ + + + + + + +Uses of Interface com.sleepycat.db.SecondaryKeyCreator (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.db.SecondaryKeyCreator

+
+ + + + + + + + + + + + + + + + + +
+Packages that use SecondaryKeyCreator
com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of SecondaryKeyCreator in com.sleepycat.bind.serial
+  +

+ + + + + + + + + + + + + + + + + +
Classes in com.sleepycat.bind.serial that implement SecondaryKeyCreator
+ classSerialSerialKeyCreator + +
+          A abstract key creator that uses a serial key and a serial data entry.
+ classTupleSerialKeyCreator + +
+          A abstract key creator that uses a tuple key and a serial data entry.
+ classTupleSerialMarshalledKeyCreator + +
+          A concrete key creator that works in conjunction with a TupleSerialMarshalledBinding.
+  +

+ + + + + +
+Uses of SecondaryKeyCreator in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + + + + + +
Classes in com.sleepycat.bind.tuple that implement SecondaryKeyCreator
+ classTupleTupleKeyCreator + +
+          An abstract key creator that uses a tuple key and a tuple data entry.
+ classTupleTupleMarshalledKeyCreator + +
+          A concrete key creator that works in conjunction with a TupleTupleMarshalledBinding.
+  +

+ + + + + +
+Uses of SecondaryKeyCreator in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return SecondaryKeyCreator
+ SecondaryKeyCreatorSecondaryConfig.getKeyCreator() + +
+          Returns the user-supplied object used for creating secondary keys.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type SecondaryKeyCreator
+ voidSecondaryConfig.setKeyCreator(SecondaryKeyCreator keyCreator) + +
+          Specifies the user-supplied object used for creating secondary keys.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/Sequence.html b/db/docs/java/com/sleepycat/db/class-use/Sequence.html new file mode 100644 index 000000000..75a2aac8c --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/Sequence.html @@ -0,0 +1,175 @@ + + + + + + +Uses of Class com.sleepycat.db.Sequence (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.Sequence

+
+ + + + + + + + + +
+Packages that use Sequence
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of Sequence in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return Sequence
+ SequenceDatabase.openSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Open a sequence in the database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/SequenceConfig.html b/db/docs/java/com/sleepycat/db/class-use/SequenceConfig.html new file mode 100644 index 000000000..a03a0ebf5 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/SequenceConfig.html @@ -0,0 +1,201 @@ + + + + + + +Uses of Class com.sleepycat.db.SequenceConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.SequenceConfig

+
+ + + + + + + + + +
+Packages that use SequenceConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of SequenceConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as SequenceConfig
+static SequenceConfigSequenceConfig.DEFAULT + +
+           
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type SequenceConfig
+ SequenceDatabase.openSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Open a sequence in the database.
+ voidDatabase.removeSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Remove the sequence from the database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/SequenceStats.html b/db/docs/java/com/sleepycat/db/class-use/SequenceStats.html new file mode 100644 index 000000000..486f79643 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/SequenceStats.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.SequenceStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.SequenceStats

+
+ + + + + + + + + +
+Packages that use SequenceStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of SequenceStats in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return SequenceStats
+ SequenceStatsSequence.getStats(StatsConfig config) + +
+          Return statistical information about the sequence.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/StatsConfig.html b/db/docs/java/com/sleepycat/db/class-use/StatsConfig.html new file mode 100644 index 000000000..738679cc0 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/StatsConfig.html @@ -0,0 +1,247 @@ + + + + + + +Uses of Class com.sleepycat.db.StatsConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.StatsConfig

+
+ + + + + + + + + +
+Packages that use StatsConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of StatsConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as StatsConfig
+static StatsConfigStatsConfig.DEFAULT + +
+           
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type StatsConfig
+ SequenceStatsSequence.getStats(StatsConfig config) + +
+          Return statistical information about the sequence.
+ LockStatsEnvironment.getLockStats(StatsConfig config) + +
+          Return the database environment's locking statistics.
+ TransactionStatsEnvironment.getTransactionStats(StatsConfig config) + +
+          Return the database environment's transactional statistics.
+ CacheStatsEnvironment.getCacheStats(StatsConfig config) + +
+           
+ CacheFileStats[]Environment.getCacheFileStats(StatsConfig config) + +
+          Return the database environment's per-file memory pool (that is, the + buffer cache) statistics.
+ LogStatsEnvironment.getLogStats(StatsConfig config) + +
+          Return the database environment's logging statistics.
+ ReplicationStatsEnvironment.getReplicationStats(StatsConfig config) + +
+          Return the database environment's replication statistics.
+ DatabaseStatsDatabase.getStats(Transaction txn, + StatsConfig statsConfig) + +
+          Return database statistics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/Transaction.html b/db/docs/java/com/sleepycat/db/class-use/Transaction.html new file mode 100644 index 000000000..459b8d668 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/Transaction.html @@ -0,0 +1,519 @@ + + + + + + +Uses of Class com.sleepycat.db.Transaction (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.Transaction

+
+ + + + + + + + + + + + + +
+Packages that use Transaction
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of Transaction in com.sleepycat.collections
+  +

+ + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.collections that return Transaction
+ TransactionCurrentTransaction.getTransaction() + +
+          Returns the transaction associated with the current thread for this + environment, or null if no transaction is active.
+ TransactionCurrentTransaction.beginTransaction(TransactionConfig config) + +
+          Begins a new transaction for this environment and associates it with + the current thread.
+ TransactionCurrentTransaction.commitTransaction() + +
+          Commits the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
+ TransactionCurrentTransaction.abortTransaction() + +
+          Aborts the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
+  +

+ + + + + +
+Uses of Transaction in com.sleepycat.db
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.db that return Transaction
+ TransactionPreparedTransaction.getTransaction() + +
+          Return the transaction handle for the transaction.
+ TransactionEnvironment.beginTransaction(Transaction parent, + TransactionConfig txnConfig) + +
+          Create a new transaction in the database environment.
+  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Methods in com.sleepycat.db with parameters of type Transaction
+ longSequence.get(Transaction txn, + int delta) + +
+          Return the next available element in the sequence and changes the sequence + value by delta.
+ OperationStatusSecondaryDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusSecondaryDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match.
+ SecondaryCursorSecondaryDatabase.openSecondaryCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Obtain a cursor on a database, returning a SecondaryCursor.
+ OperationStatusSecondaryDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+ TransactionEnvironment.beginTransaction(Transaction parent, + TransactionConfig txnConfig) + +
+          Create a new transaction in the database environment.
+ DatabaseEnvironment.openDatabase(Transaction txn, + String fileName, + String databaseName, + DatabaseConfig config) + +
+          Open a database.
+ SecondaryDatabaseEnvironment.openSecondaryDatabase(Transaction txn, + String fileName, + String databaseName, + Database primaryDatabase, + SecondaryConfig config) + +
+          Open a database.
+ voidEnvironment.removeDatabase(Transaction txn, + String fileName, + String databaseName) + +
+           +Remove a database.
+ voidEnvironment.renameDatabase(Transaction txn, + String fileName, + String oldDatabaseName, + String newDatabaseName) + +
+           +Rename a database.
+ OperationStatusDatabase.delete(Transaction txn, + DatabaseEntry key) + +
+          Remove key/data pairs from the database.
+ OperationStatusDatabase.get(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key.
+ OperationStatusDatabase.getSearchBoth(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match.
+ OperationStatusDatabase.put(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database.
+ OperationStatusDatabase.append(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Append the key/data pair to the end of the database.
+ OperationStatusDatabase.putNoOverwrite(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if the key does not already +appear in the database.
+ OperationStatusDatabase.putNoDupData(Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + +
+           +Store the key/data pair into the database if it does not already appear +in the database.
+ CursorDatabase.openCursor(Transaction txn, + CursorConfig cursorConfig) + +
+          Return a cursor into the database.
+ SequenceDatabase.openSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Open a sequence in the database.
+ voidDatabase.removeSequence(Transaction txn, + DatabaseEntry key, + SequenceConfig sequenceConfig) + +
+          Remove the sequence from the database.
+ DatabaseStatsDatabase.getStats(Transaction txn, + StatsConfig statsConfig) + +
+          Return database statistics.
+ intDatabase.truncate(Transaction txn, + boolean returnCount) + +
+          Empty the database, discarding all records it contains.
+ OperationStatusDatabase.consume(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + boolean wait) + +
+          Return the record number and data from the available record closest to +the head of the queue, and delete the record.
+ KeyRangeDatabase.getKeyRange(Transaction txn, + DatabaseEntry key) + +
+          Return an estimate of the proportion of keys in the database less + than, equal to, and greater than the specified key.
+ OperationStatusDatabase.getSearchRecordNumber(Transaction txn, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + +
+          Retrieves the key/data pair associated with the specific numbered record of the database.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/TransactionConfig.html b/db/docs/java/com/sleepycat/db/class-use/TransactionConfig.html new file mode 100644 index 000000000..57cb28e70 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/TransactionConfig.html @@ -0,0 +1,264 @@ + + + + + + +Uses of Class com.sleepycat.db.TransactionConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.TransactionConfig

+
+ + + + + + + + + + + + + +
+Packages that use TransactionConfig
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of TransactionConfig in com.sleepycat.collections
+  +

+ + + + + + + + + +
Methods in com.sleepycat.collections that return TransactionConfig
+ TransactionConfigTransactionRunner.getTransactionConfig() + +
+          Returns the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig).
+  +

+ + + + + + + + + + + + + +
Methods in com.sleepycat.collections with parameters of type TransactionConfig
+ voidTransactionRunner.setTransactionConfig(TransactionConfig config) + +
+          Changes the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig).
+ TransactionCurrentTransaction.beginTransaction(TransactionConfig config) + +
+          Begins a new transaction for this environment and associates it with + the current thread.
+  +

+ + + + + + + + +
Constructors in com.sleepycat.collections with parameters of type TransactionConfig
TransactionRunner(Environment env, + int maxRetries, + TransactionConfig config) + +
+          Creates a transaction runner for a given Berkeley DB environment and + with a given number of maximum retries.
+  +

+ + + + + +
+Uses of TransactionConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as TransactionConfig
+static TransactionConfigTransactionConfig.DEFAULT + +
+           
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type TransactionConfig
+ TransactionEnvironment.beginTransaction(Transaction parent, + TransactionConfig txnConfig) + +
+          Create a new transaction in the database environment.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/TransactionStats.Active.html b/db/docs/java/com/sleepycat/db/class-use/TransactionStats.Active.html new file mode 100644 index 000000000..a812ac9d8 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/TransactionStats.Active.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.TransactionStats.Active (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.TransactionStats.Active

+
+ + + + + + + + + +
+Packages that use TransactionStats.Active
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of TransactionStats.Active in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return TransactionStats.Active
+ TransactionStats.Active[]TransactionStats.getTxnarray() + +
+          The array of active transactions.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/TransactionStats.html b/db/docs/java/com/sleepycat/db/class-use/TransactionStats.html new file mode 100644 index 000000000..fff69d70d --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/TransactionStats.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.db.TransactionStats (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.TransactionStats

+
+ + + + + + + + + +
+Packages that use TransactionStats
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of TransactionStats in com.sleepycat.db
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db that return TransactionStats
+ TransactionStatsEnvironment.getTransactionStats(StatsConfig config) + +
+          Return the database environment's transactional statistics.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/class-use/VerifyConfig.html b/db/docs/java/com/sleepycat/db/class-use/VerifyConfig.html new file mode 100644 index 000000000..372593e33 --- /dev/null +++ b/db/docs/java/com/sleepycat/db/class-use/VerifyConfig.html @@ -0,0 +1,193 @@ + + + + + + +Uses of Class com.sleepycat.db.VerifyConfig (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.db.VerifyConfig

+
+ + + + + + + + + +
+Packages that use VerifyConfig
com.sleepycat.dbBerkeley DB Java API
+[reference guide] [Java programming notes]
+  +

+ + + + + +
+Uses of VerifyConfig in com.sleepycat.db
+  +

+ + + + + + + + + +
Fields in com.sleepycat.db declared as VerifyConfig
+static VerifyConfigVerifyConfig.DEFAULT + +
+          Default configuration used if null is passed to + Database.verify.
+  +

+ + + + + + + + + +
Methods in com.sleepycat.db with parameters of type VerifyConfig
+ booleanDatabase.verify(String fileName, + String databaseName, + PrintStream dumpStream, + VerifyConfig config) + +
+          Return if all of the databases in a file are uncorrupted.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/db/package-frame.html b/db/docs/java/com/sleepycat/db/package-frame.html index cca61cc96..8960230ca 100644 --- a/db/docs/java/com/sleepycat/db/package-frame.html +++ b/db/docs/java/com/sleepycat/db/package-frame.html @@ -1,140 +1,177 @@ - + - + com.sleepycat.db (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + - - + + -com.sleepycat.db - +com.sleepycat.db +
+SecondaryKeyCreator
Interfaces 
-DbAppDispatch -
-DbAppendRecno -
-DbBtreeCompare -
-DbBtreePrefix -
-DbClient +BtreePrefixCalculator
-DbDupCompare +ErrorHandler
-DbEnvFeedback +FeedbackHandler
-DbEnvFeedbackHandler +Hasher
-DbErrcall +LogRecordHandler
-DbErrorHandler +MessageHandler
-DbFeedback +PanicHandler
-DbFeedbackHandler +RecordNumberAppender
-DbHash +ReplicationTransport
-DbPanicHandler -
-DbRepTransport -
-DbSecondaryKeyCreate
- +
+MultipleRecnoDataEntry +
+OperationStatus +
+PreparedTransaction +
+QueueStats +
+RecoveryOperation +
+ReplicationStats +
+ReplicationStatus +
+SecondaryConfig +
+SecondaryCursor +
+SecondaryDatabase +
+Sequence +
+SequenceConfig +
+SequenceStats +
+StatsConfig +
+Transaction +
+TransactionConfig +
+TransactionStats +
+VerifyConfig
Classes 
-Db +BtreeStats +
+CacheFile +
+CacheFilePriority +
+CacheFileStats +
+CacheStats +
+CheckpointConfig +
+Cursor +
+CursorConfig
-DbBtreeStat +Database
-Dbc +DatabaseConfig
-DbEnv +DatabaseEntry
-DbEnv.RepProcessMessage +DatabaseStats
-DbHashStat +DatabaseType
-DbKeyRange +Environment
-DbLock +EnvironmentConfig
-DbLockRequest +HashStats
-DbLockStat +JoinConfig
-DbLogc +JoinCursor
-DbLogStat +KeyRange
-DbLsn +Lock
-DbMpoolFile +LockDetectMode
-DbMpoolFStat +LockMode
-DbMpoolStat +LockOperation
-DbMultipleDataIterator +LockRequest
-DbMultipleKeyDataIterator +LockRequestMode
-DbMultipleRecnoDataIterator +LockStats
-DbPreplist +LogCursor
-DbQueueStat +LogSequenceNumber
-DbRepStat +LogStats
-Dbt +MultipleDataEntry
-DbTxn +MultipleEntry
-DbTxnStat +MultipleKeyDataEntry
-DbTxnStat.Active
- +
+RunRecoveryException
Exceptions 
-DbDeadlockException +DatabaseException +
+DeadlockException
-DbException +LockNotGrantedException
-DbLockNotGrantedException +MemoryException
-DbMemoryException +ReplicationHandleDeadException
-DbRunRecoveryException
diff --git a/db/docs/java/com/sleepycat/db/package-summary.html b/db/docs/java/com/sleepycat/db/package-summary.html index 6bc01a6d4..cababc2a6 100644 --- a/db/docs/java/com/sleepycat/db/package-summary.html +++ b/db/docs/java/com/sleepycat/db/package-summary.html @@ -1,29 +1,36 @@ - + - + com.sleepycat.db (Sleepycat Software, Inc. - Berkeley DB Java API) + + - - - - + + + + + + - + +
PREV PACKAGE  + NEXT PACKAGE
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +

Package com.sleepycat.db

-Java API programming notes
-[reference guide] +Berkeley DB Java API
+[reference guide] [Java programming notes].

See:
          Description

- +
- - + + + + + + + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + + +
Interface Summary
DbAppDispatchAn interface specifying a recovery function, which recovers application-specific actions.BtreePrefixCalculatorAn interface specifying how Btree prefixes should be calculated.
ErrorHandlerAn interface specifying a callback function to be called when an error +occurs in the Berkeley DB library.
FeedbackHandlerAn interface specifying a function to be called to provide feedback.
HasherAn application-specified, database hash function.
DbAppendRecnoAn interface specifying a callback function that modifies stored data based on a generated key.LogRecordHandlerA function to process application-specific log records.
DbBtreeCompareAn interface specifying a comparison function, which imposes a total ordering on the keys in a Btree database.MessageHandlerAn interface specifying a callback function to be called to display +informational messages.
DbBtreePrefixAn interface specifying a comparison function, which specifies the number of bytes needed to differentiate Btree keys.PanicHandlerAn interface specifying a function to be called if the database +environment panics.
DbClientThe DbClient object is used to encapsulate a reference to an RPC client.RecordNumberAppenderAn interface specifying a callback function that modifies stored data +based on a generated key.
DbDupCompareAn interface specifying a comparison function, which imposes a total ordering on the duplicate data items in a Btree database.ReplicationTransportAn interface specifying a replication transmit function, which sends +information to other members of the replication group.
DbEnvFeedbackDeprecated. As of Berkeley DB 4.2, replaced by DbEnvFeedbackHandlerSecondaryKeyCreatorAn interface specifying how secondary keys for a +SecondaryDatabase are created.
+  + +

+ + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + -
+Class Summary
DbEnvFeedbackHandlerThe DbEnvFeedbackHandler interface is used by the DbEnv.setFeedback method.BtreeStatsThe BtreeStats object is used to return Btree +or Recno database statistics.
DbErrcallDeprecated. As of Berkeley DB 4.2, replaced by DbErrorHandlerCacheFileThis class allows applications to modify settings for +a Database using the Database.getCacheFile.
DbErrorHandlerAn interface specifying a application-specific error reporting function.CacheFilePriorityPriorities that can be assigned to files in the cache.
DbFeedbackDeprecated. As of Berkeley DB 4.2, replaced by DbFeedbackHandlerCacheFileStatsStatistics for a file in the cache.
DbFeedbackHandlerThe DbFeedbackHandler interface is used by the Db.setFeedback method.CacheStatsCache statistics for a database environment.
DbHashAn interface specifying a hashing function, which imposes a total ordering on the Hash database.CheckpointConfigSpecifies the attributes of an application invoked checkpoint operation.
DbPanicHandlerAn interface specifying a function to handle database environment panics.CursorA database cursor.
DbRepTransportAn interface specifying a replication transmit function, which sends information to other members of the replication group.CursorConfigSpecify the attributes of database cursor.
DbSecondaryKeyCreateAn interface specifying a function which constructs secondary keys from primary key and data items.DatabaseA database handle.
-  - -

- - - - + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Class Summary
DatabaseConfigSpecify the attributes of a database.
DbThe Db handle is the handle for a Berkeley DB database, which may or may not be part of a database environment.DatabaseEntryEncodes database key and data items as a byte array.
DbBtreeStatThe DbBtreeStat object is used to return Btree or Recno database statistics.DatabaseStatsStatistics for a single database.
DbcThe Dbc object is the handle for a cursor into a Berkeley DB database.DatabaseTypeDatabase types.
DbEnvThe DbEnv object is the handle for a Berkeley DB environment -- a collection including support for some or all of caching, locking, logging and transaction subsystems, as well as databases and log files.EnvironmentA database environment.
DbEnv.RepProcessMessage EnvironmentConfigSpecifies the attributes of an environment.
DbHashStatThe DbHashStat object is used to return Hash database statistics.HashStatsThe HashStats object is used to return Hash database statistics.
DbKeyRange JoinConfigThe configuration properties of a JoinCursor.
DbLockThe locking interfaces for the Berkeley DB database environment are methods of the DbEnv handle.JoinCursorA specialized join cursor for use in performing equality or natural joins on +secondary indices.
DbLockRequestThe DbLockRequest object is used to encapsulate a single lock request.KeyRangeAn object that returns status from the Database.getKeyRange method.
DbLockStatThe DbLockStat object is used to return lock region statistics.LockThe locking interfaces for the database environment are methods of the +Environment handle.
DbLogcThe DbLogc object is the handle for a cursor into the log files, supporting sequential access to the records stored in log files.LockDetectModeDeadlock detection modes.
DbLogStatThe DbLogStat object is used to return logging subsystem statistics.LockModeLocking modes for database operations.
DbLsnThe DbLsn object is a log sequence number which specifies a unique location in a log file.LockOperationOperations that can be performed on locks.
DbMpoolFileThe memory pool interfaces for the Berkeley DB database environment are methods of the DbEnv handle.LockRequestThe LockRequest object is used to encapsulate a single lock request.
DbMpoolFStatThe DbMpoolFStat object is used to return memory pool per-file statistics.LockRequestModeWhen using the default lock conflict matrix, the LockRequestMode class +defines the set of possible lock modes.
DbMpoolStatThe DbMpoolStat object is used to return memory pool statistics.LockStatsLock statistics for a database environment.
DbMultipleDataIteratorThe DbMultipleDataIterator class is used to iterate through data returned using the Db.DB_MULTIPLE flag from a database belonging to any access method.LogCursorThe LogCursor object is the handle for a cursor into the log files, +supporting sequential access to the records stored in log files.
DbMultipleKeyDataIteratorThe DbMultipleKeyDataIterator class is used to iterate through data returned using the Db.DB_MULTIPLE_KEY flag from a database belonging to the Btree or Hash access methods.LogSequenceNumberThe LogSequenceNumber object is a log sequence number which +specifies a unique location in a log file.
DbMultipleRecnoDataIteratorThis class is used to iterate through data returned using the Db.DB_MULTIPLE_KEY flag from a database belonging to the Recno or Queue access methods.LogStatsLog statistics for a database environment.
DbPreplistThe DbPreplist object is used to encapsulate a single prepared, but not yet resolved, transaction.MultipleDataEntryA DatabaseEntry that holds multiple data items returned by a single +Database or Cursor get call.
DbQueueStatThe DbQueueStat object is used to return Queue database statistics.MultipleEntryAn abstract class representing a DatabaseEntry that holds multiple results +returned by a single Cursor get method.
DbRepStatThe DbRepStat object is used to return replication subsystem statistics.MultipleKeyDataEntryA DatabaseEntry that holds multiple key/data pairs returned by a single +Database or Cursor get call.
DbtThis information describes the specific details of the Dbt class, used to encode keys and data items in a database.MultipleRecnoDataEntryA DatabaseEntry that holds multiple record number/data pairs returned by a +single Database or Cursor get call.
DbTxnThe DbTxn object is the handle for a transaction.OperationStatusStatus values from database operations.
DbTxnStatThe DbTxnStat object is used to return transaction subsystem statistics.PreparedTransactionThe PreparedTransaction object is used to encapsulate a single prepared, +but not yet resolved, transaction.
DbTxnStat.Active QueueStatsThe QueueStats object is used to return Queue database statistics.
RecoveryOperationThe recovery operation being performed when LogRecordHandler.handleLogRecord is called.
ReplicationStatsReplication statistics for a database environment.
ReplicationStatusThe return status from processing a replication message.
SecondaryConfigThe configuration properties of a SecondaryDatabase extend +those of a primary Database.
SecondaryCursorA database cursor for a secondary database.
SecondaryDatabaseA secondary database handle.
SequenceA Sequence handle is used to manipulate a sequence record in a database.
SequenceConfigSpecify the attributes of a sequence.
SequenceStatsA SequenceStats object is used to return sequenece statistics.
StatsConfigSpecifies the attributes of a statistics retrieval operation.
TransactionThe Transaction object is the handle for a transaction.
TransactionConfigSpecifies the attributes of a database environment transaction.
TransactionStatsTransaction statistics for a database environment.
VerifyConfigSpecifies the attributes of a verification operation.
 

- +
- - + + + + + + - - + + - - + + - - + + - - + +
Exception Summary
DbDeadlockExceptionThis information describes the DbDeadlockException class and how it is used in the Berkeley DB library.DatabaseExceptionThe root of all database exceptions.
DeadlockExceptionDeadlockException is thrown to a thread of control when multiple threads +competing for a lock are +deadlocked, when a lock request has timed out +or when a lock request would need to block and the transaction has been +configured to not wait for locks.
DbExceptionThis information describes the DbException class and how it is used by the various Berkeley DB classes.LockNotGrantedExceptionA LockNotGrantedException is thrown when a lock requested using the +Environment.getLock or Environment.lockVector +methods, where the noWait flag or lock timers were configured, could not +be granted before the wait-time expired.
DbLockNotGrantedExceptionThis information describes the DbLockNotGrantedException class and how it is used by the various Db* classes.MemoryExceptionThis exception is thrown when a DatabaseEntry +passed to a Database or Cursor method is not large +enough to hold a value being returned.
DbMemoryExceptionThis information describes the DbMemoryException class and how it is used by the various Db* classes.ReplicationHandleDeadExceptionThrown when a database handle has been invalidated because a replication +election unrolled a committed transaction.
DbRunRecoveryExceptionThis information describes the DbRunRecoveryException class and how it is used by the various Berkeley DB classes.RunRecoveryExceptionThrown when the database environment needs to be recovered.
  @@ -298,20 +411,32 @@ Package com.sleepycat.db Description

-

Java API programming notes
-[reference guide] +Berkeley DB Java API
+[reference guide] [Java programming notes]. +

+This package is a wrapper around the Berkeley DB library. It uses JNI +to provide access to Berkeley DB, which is implemented in C. That means +that a shared library or DLL must be available when applications use +this package. +

+There are also several utilities provided with Berkeley DB that make +administrative tasks possible from the command line. For more +information, see the page Berkeley DB Supporting Utilities.


- + + - + +
PREV PACKAGE  + NEXT PACKAGE
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
-Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/java/com/sleepycat/db/package-tree.html b/db/docs/java/com/sleepycat/db/package-tree.html index 3900389bc..979fc997b 100644 --- a/db/docs/java/com/sleepycat/db/package-tree.html +++ b/db/docs/java/com/sleepycat/db/package-tree.html @@ -1,28 +1,35 @@ - + - + com.sleepycat.db Class Hierarchy (Sleepycat Software, Inc. - Berkeley DB Java API) + + - - - - + + + + + + - + +
PREV  + NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
@@ -76,32 +84,53 @@ Hierarchy For Package com.sleepycat.db Class Hierarchy

- +
- - + + - - + + + + + + + + + + - +
Packages that use com.sleepycat.db
com.sleepycat.bdbCore database classes for defining an environment, creating data stores, and running transactions
-[reference guide] 
com.sleepycat.bindBindings between database entries and Java objects
+[reference guide]
com.sleepycat.bdb.factoryFactory classes com.sleepycat.bind.serialBindings that use Java serialization. 
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.collectionsData access based on the standard Java collections API
+[reference guide]
com.sleepycat.dbJava API programming notes
-[reference guide] 
Berkeley DB Java API
+[reference guide] [Java programming notes]
 

- - + +
- + + + +          Encodes database key and data items as a byte array. + +
-Classes in com.sleepycat.db used by com.sleepycat.bdb
Db +Classes in com.sleepycat.db used by com.sleepycat.bind
DatabaseEntry
-          The Db handle is the handle for a Berkeley DB database, which may or may not be part of a database environment.
+  +

+ + + + - +          A database handle. - +          Encodes database key and data items as a byte array. - +          The root of all database exceptions. - +          A secondary database handle. - +          An interface specifying how secondary keys for a +SecondaryDatabase are created. - +
+Classes in com.sleepycat.db used by com.sleepycat.bind.serial
Dbc +Database
-          The Dbc object is the handle for a cursor into a Berkeley DB database.
DbEnv +DatabaseEntry
-          The DbEnv object is the handle for a Berkeley DB environment -- a collection including support for some or all of caching, locking, logging and transaction subsystems, as well as databases and log files.
DbException +DatabaseException
-          This information describes the DbException class and how it is used by the various Berkeley DB classes.
Dbt +SecondaryDatabase
-          This information describes the specific details of the Dbt class, used to encode keys and data items in a database.
DbTxn +SecondaryKeyCreator
-          The DbTxn object is the handle for a transaction.
+  +

+ + + + + + + + + + + + + + + +
+Classes in com.sleepycat.db used by com.sleepycat.bind.tuple
DatabaseEntry + +
+          Encodes database key and data items as a byte array.
DatabaseException + +
+          The root of all database exceptions.
SecondaryDatabase + +
+          A secondary database handle.
SecondaryKeyCreator + +
+          An interface specifying how secondary keys for a +SecondaryDatabase are created.
 

- - + +
- + + + +          A database handle. - + + + + + + + + + + + + + + + + +
-Classes in com.sleepycat.db used by com.sleepycat.bdb.factory
Db +Classes in com.sleepycat.db used by com.sleepycat.collections
Database
-          The Db handle is the handle for a Berkeley DB database, which may or may not be part of a database environment.
DatabaseEntry + +
+          Encodes database key and data items as a byte array.
DatabaseException + +
+          The root of all database exceptions.
Environment + +
+          A database environment.
JoinConfig + +
+          The configuration properties of a JoinCursor.
Transaction + +
+          The Transaction object is the handle for a transaction.
TransactionConfig + +
+          Specifies the attributes of a database environment transaction.
 

- +
- + + + +          An interface specifying how Btree prefixes should be calculated. - +          This class allows applications to modify settings for +a Database using the Database.getCacheFile. - +          Priorities that can be assigned to files in the cache. - +          Statistics for a file in the cache. - +          Cache statistics for a database environment. - +          Specifies the attributes of an application invoked checkpoint operation. - +          A database cursor. - +          Specify the attributes of database cursor. - +          A database handle. - +          Specify the attributes of a database. - +          Encodes database key and data items as a byte array. - +          The root of all database exceptions. - +          Statistics for a single database. - +          Database types. - +          DeadlockException is thrown to a thread of control when multiple threads +competing for a lock are +deadlocked, when a lock request has timed out +or when a lock request would need to block and the transaction has been +configured to not wait for locks. - +          A database environment. - +          Specifies the attributes of an environment. - +          An interface specifying a callback function to be called when an error +occurs in the Berkeley DB library. - +          An interface specifying a function to be called to provide feedback. - +          An application-specified, database hash function. - +          The configuration properties of a JoinCursor. - +          A specialized join cursor for use in performing equality or natural joins on +secondary indices. - +          An object that returns status from the Database.getKeyRange method. - +          The locking interfaces for the database environment are methods of the +Environment handle. - +          Deadlock detection modes. - +          Locking modes for database operations. - +          Operations that can be performed on locks. - +          The LockRequest object is used to encapsulate a single lock request. - +          When using the default lock conflict matrix, the LockRequestMode class +defines the set of possible lock modes. - +          Lock statistics for a database environment. - +          The LogCursor object is the handle for a cursor into the log files, +supporting sequential access to the records stored in log files. - +          A function to process application-specific log records. - +          The LogSequenceNumber object is a log sequence number which +specifies a unique location in a log file. - +          Log statistics for a database environment. - +          An interface specifying a callback function to be called to display +informational messages. - +          An abstract class representing a DatabaseEntry that holds multiple results +returned by a single Cursor get method. - +          Status values from database operations. - +          An interface specifying a function to be called if the database +environment panics. - +          The PreparedTransaction object is used to encapsulate a single prepared, +but not yet resolved, transaction. - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Classes in com.sleepycat.db used by com.sleepycat.db
Db +Classes in com.sleepycat.db used by com.sleepycat.db
BtreePrefixCalculator
-          The Db handle is the handle for a Berkeley DB database, which may or may not be part of a database environment.
DbAppDispatch +CacheFile
-          An interface specifying a recovery function, which recovers application-specific actions.
DbAppendRecno +CacheFilePriority
-          An interface specifying a callback function that modifies stored data based on a generated key.
DbBtreeCompare +CacheFileStats
-          An interface specifying a comparison function, which imposes a total ordering on the keys in a Btree database.
DbBtreePrefix +CacheStats
-          An interface specifying a comparison function, which specifies the number of bytes needed to differentiate Btree keys.
Dbc +CheckpointConfig
-          The Dbc object is the handle for a cursor into a Berkeley DB database.
DbClient +Cursor
-          The DbClient object is used to encapsulate a reference to an RPC client.
DbDeadlockException +CursorConfig
-          This information describes the DbDeadlockException class and how it is used in the Berkeley DB library.
DbDupCompare +Database
-          An interface specifying a comparison function, which imposes a total ordering on the duplicate data items in a Btree database.
DbEnv +DatabaseConfig
-          The DbEnv object is the handle for a Berkeley DB environment -- a collection including support for some or all of caching, locking, logging and transaction subsystems, as well as databases and log files.
DbEnv.RepProcessMessage +DatabaseEntry
-           
DbEnvFeedback +DatabaseException
-          Deprecated. As of Berkeley DB 4.2, replaced by DbEnvFeedbackHandler
DbEnvFeedbackHandler +DatabaseStats
-          The DbEnvFeedbackHandler interface is used by the DbEnv.setFeedback method.
DbErrcall +DatabaseType
-          Deprecated. As of Berkeley DB 4.2, replaced by DbErrorHandler
DbErrorHandler +DeadlockException
-          An interface specifying a application-specific error reporting function.
DbException +Environment
-          This information describes the DbException class and how it is used by the various Berkeley DB classes.
DbFeedback +EnvironmentConfig
-          Deprecated. As of Berkeley DB 4.2, replaced by DbFeedbackHandler
DbFeedbackHandler +ErrorHandler
-          The DbFeedbackHandler interface is used by the Db.setFeedback method.
DbHash +FeedbackHandler
-          An interface specifying a hashing function, which imposes a total ordering on the Hash database.
DbKeyRange +Hasher
-           
DbLock +JoinConfig
-          The locking interfaces for the Berkeley DB database environment are methods of the DbEnv handle.
DbLockNotGrantedException +JoinCursor
-          This information describes the DbLockNotGrantedException class and how it is used by the various Db* classes.
DbLockRequest +KeyRange
-          The DbLockRequest object is used to encapsulate a single lock request.
DbLockStat +Lock
-          The DbLockStat object is used to return lock region statistics.
DbLogc +LockDetectMode
-          The DbLogc object is the handle for a cursor into the log files, supporting sequential access to the records stored in log files.
DbLogStat +LockMode
-          The DbLogStat object is used to return logging subsystem statistics.
DbLsn +LockOperation
-          The DbLsn object is a log sequence number which specifies a unique location in a log file.
DbMpoolFile +LockRequest
-          The memory pool interfaces for the Berkeley DB database environment are methods of the DbEnv handle.
DbMpoolFStat +LockRequestMode
-          The DbMpoolFStat object is used to return memory pool per-file statistics.
DbMpoolStat +LockStats
-          The DbMpoolStat object is used to return memory pool statistics.
DbPanicHandler +LogCursor
-          An interface specifying a function to handle database environment panics.
DbPreplist +LogRecordHandler
-          The DbPreplist object is used to encapsulate a single prepared, but not yet resolved, transaction.
DbRepStat +LogSequenceNumber
-          The DbRepStat object is used to return replication subsystem statistics.
DbRepTransport +LogStats
-          An interface specifying a replication transmit function, which sends information to other members of the replication group.
DbSecondaryKeyCreate +MessageHandler
-          An interface specifying a function which constructs secondary keys from primary key and data items.
Dbt +MultipleEntry
-          This information describes the specific details of the Dbt class, used to encode keys and data items in a database.
DbTxn +OperationStatus
-          The DbTxn object is the handle for a transaction.
DbTxnStat +PanicHandler
-          The DbTxnStat object is used to return transaction subsystem statistics.
DbTxnStat.Active +PreparedTransaction
-           
RecordNumberAppender + +
+          An interface specifying a callback function that modifies stored data +based on a generated key.
RecoveryOperation + +
+          The recovery operation being performed when LogRecordHandler.handleLogRecord is called.
ReplicationStats + +
+          Replication statistics for a database environment.
ReplicationStatus + +
+          The return status from processing a replication message.
ReplicationTransport + +
+          An interface specifying a replication transmit function, which sends +information to other members of the replication group.
SecondaryConfig + +
+          The configuration properties of a SecondaryDatabase extend +those of a primary Database.
SecondaryCursor + +
+          A database cursor for a secondary database.
SecondaryDatabase + +
+          A secondary database handle.
SecondaryKeyCreator + +
+          An interface specifying how secondary keys for a +SecondaryDatabase are created.
Sequence + +
+          A Sequence handle is used to manipulate a sequence record in a database.
SequenceConfig + +
+          Specify the attributes of a sequence.
SequenceStats + +
+          A SequenceStats object is used to return sequenece statistics.
StatsConfig + +
+          Specifies the attributes of a statistics retrieval operation.
Transaction + +
+          The Transaction object is the handle for a transaction.
TransactionConfig + +
+          Specifies the attributes of a database environment transaction.
TransactionStats + +
+          Transaction statistics for a database environment.
TransactionStats.Active + +
+          The Active class represents an active transaction.
VerifyConfig + +
+          Specifies the attributes of a verification operation.
 


- + + - + +
@@ -425,25 +651,26 @@ Classes in com.sleepyca  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
-Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/java/com/sleepycat/util/ExceptionUnwrapper.html b/db/docs/java/com/sleepycat/util/ExceptionUnwrapper.html new file mode 100644 index 000000000..10c4ecf0d --- /dev/null +++ b/db/docs/java/com/sleepycat/util/ExceptionUnwrapper.html @@ -0,0 +1,289 @@ + + + + + + +ExceptionUnwrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.util +
+Class ExceptionUnwrapper

+
+java.lang.Object
+  extended bycom.sleepycat.util.ExceptionUnwrapper
+
+
+
+
public class ExceptionUnwrapper
extends Object
+ +

+Unwraps nested exceptions by calling the ExceptionWrapper.getDetail() method for exceptions that implement the + ExceptionWrapper interface. Does not currently support the Java 1.4 + Throwable.getDetail() method. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
ExceptionUnwrapper() + +
+           
+  + + + + + + + + + + + + + + + +
+Method Summary
+static Exceptionunwrap(Exception e) + +
+          Unwraps an Exception and returns the underlying Exception, or throws an + Error if the underlying Throwable is an Error.
+static ThrowableunwrapAny(Throwable e) + +
+          Unwraps an Exception and returns the underlying Throwable.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+ExceptionUnwrapper

+
+public ExceptionUnwrapper()
+
+
+ + + + + + + + +
+Method Detail
+ +

+unwrap

+
+public static Exception unwrap(Exception e)
+
+
Unwraps an Exception and returns the underlying Exception, or throws an + Error if the underlying Throwable is an Error. +

+

+
Parameters:
e - is the Exception to unwrap. +
Returns:
the underlying Exception. +
Throws: +
Error - if the underlying Throwable is an Error. +
IllegalArgumentException - if the underlying Throwable is not an + Exception or an Error.
+
+
+
+ +

+unwrapAny

+
+public static Throwable unwrapAny(Throwable e)
+
+
Unwraps an Exception and returns the underlying Throwable. +

+

+
Parameters:
e - is the Exception to unwrap. +
Returns:
the underlying Throwable.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/ExceptionWrapper.html b/db/docs/java/com/sleepycat/util/ExceptionWrapper.html new file mode 100644 index 000000000..65628599b --- /dev/null +++ b/db/docs/java/com/sleepycat/util/ExceptionWrapper.html @@ -0,0 +1,222 @@ + + + + + + +ExceptionWrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.util +
+Interface ExceptionWrapper

+
+
All Known Implementing Classes:
IOExceptionWrapper, RuntimeExceptionWrapper
+
+
+
+
public interface ExceptionWrapper
+ +

+Interface implemented by exceptions that can contain nested exceptions. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ ThrowablegetDetail() + +
+          Returns the nested exception or null if none is present.
+  +

+ + + + + + + + + + + + + + +
+Method Detail
+ +

+getDetail

+
+public Throwable getDetail()
+
+
Returns the nested exception or null if none is present. +

+

+ +
Returns:
the nested exception or null if none is present.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/FastInputStream.html b/db/docs/java/com/sleepycat/util/FastInputStream.html new file mode 100644 index 000000000..7dcc96132 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/FastInputStream.html @@ -0,0 +1,583 @@ + + + + + + +FastInputStream (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.util +
+Class FastInputStream

+
+java.lang.Object
+  extended byjava.io.InputStream
+      extended bycom.sleepycat.util.FastInputStream
+
+
+
Direct Known Subclasses:
TupleInput
+
+
+
+
public class FastInputStream
extends InputStream
+ +

+A replacement for ByteArrayInputStream that does not synchronize every + byte read. + +

This class extends InputStream and its read() + methods allow it to be used as a standard input stream. In addition, it + provides readFast() methods that are not declared to throw + IOException. IOException is never thrown by this + class.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + + +
+Constructor Summary
FastInputStream(byte[] buffer) + +
+          Creates an input stream.
FastInputStream(byte[] buffer, + int offset, + int length) + +
+          Creates an input stream.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ intavailable() + +
+           
+ byte[]getBufferBytes() + +
+          Returns the underlying data being read.
+ intgetBufferLength() + +
+          Returns the end of the buffer being read.
+ intgetBufferOffset() + +
+          Returns the offset at which data is being read from the buffer.
+ voidmark(int pos) + +
+           
+ booleanmarkSupported() + +
+           
+ intread() + +
+           
+ intread(byte[] toBuf) + +
+           
+ intread(byte[] toBuf, + int offset, + int length) + +
+           
+ intreadFast() + +
+          Equivalent to read() but does not throw + IOException.
+ intreadFast(byte[] toBuf) + +
+          Equivalent to read(byte[]) but does not throw + IOException.
+ intreadFast(byte[] toBuf, + int offset, + int length) + +
+          Equivalent to read(byte[],int,int) but does not throw + IOException.
+ voidreset() + +
+           
+ longskip(long count) + +
+           
+ + + + + + + +
Methods inherited from class java.io.InputStream
close
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+FastInputStream

+
+public FastInputStream(byte[] buffer)
+
+
Creates an input stream. +

+

Parameters:
buffer - the data to read.
+
+ +

+FastInputStream

+
+public FastInputStream(byte[] buffer,
+                       int offset,
+                       int length)
+
+
Creates an input stream. +

+

Parameters:
buffer - the data to read.
offset - the byte offset at which to begin reading.
length - the number of bytes to read.
+ + + + + + + + +
+Method Detail
+ +

+available

+
+public int available()
+
+
+
+
+
+
+ +

+markSupported

+
+public boolean markSupported()
+
+
+
+
+
+
+ +

+mark

+
+public void mark(int pos)
+
+
+
+
+
+
+ +

+reset

+
+public void reset()
+
+
+
+
+
+
+ +

+skip

+
+public long skip(long count)
+
+
+
+
+
+
+ +

+read

+
+public int read()
+         throws IOException
+
+
+ +
Throws: +
IOException
+
+
+
+ +

+read

+
+public int read(byte[] toBuf)
+         throws IOException
+
+
+ +
Throws: +
IOException
+
+
+
+ +

+read

+
+public int read(byte[] toBuf,
+                int offset,
+                int length)
+         throws IOException
+
+
+ +
Throws: +
IOException
+
+
+
+ +

+readFast

+
+public final int readFast()
+
+
Equivalent to read() but does not throw + IOException. +

+

+
See Also:
read()
+
+
+
+ +

+readFast

+
+public final int readFast(byte[] toBuf)
+
+
Equivalent to read(byte[]) but does not throw + IOException. +

+

+
See Also:
read(byte[])
+
+
+
+ +

+readFast

+
+public final int readFast(byte[] toBuf,
+                          int offset,
+                          int length)
+
+
Equivalent to read(byte[],int,int) but does not throw + IOException. +

+

+
See Also:
read(byte[],int,int)
+
+
+
+ +

+getBufferBytes

+
+public final byte[] getBufferBytes()
+
+
Returns the underlying data being read. +

+

+ +
Returns:
the underlying data.
+
+
+
+ +

+getBufferOffset

+
+public final int getBufferOffset()
+
+
Returns the offset at which data is being read from the buffer. +

+

+ +
Returns:
the offset at which data is being read.
+
+
+
+ +

+getBufferLength

+
+public final int getBufferLength()
+
+
Returns the end of the buffer being read. +

+

+ +
Returns:
the end of the buffer.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/FastOutputStream.html b/db/docs/java/com/sleepycat/util/FastOutputStream.html new file mode 100644 index 000000000..513944c16 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/FastOutputStream.html @@ -0,0 +1,780 @@ + + + + + + +FastOutputStream (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.util +
+Class FastOutputStream

+
+java.lang.Object
+  extended byjava.io.OutputStream
+      extended bycom.sleepycat.util.FastOutputStream
+
+
+
Direct Known Subclasses:
TupleOutput
+
+
+
+
public class FastOutputStream
extends OutputStream
+ +

+A replacement for ByteArrayOutputStream that does not synchronize every + byte read. + +

This class extends OutputStream and its write() + methods allow it to be used as a standard output stream. In addition, it + provides writeFast() methods that are not declared to throw + IOException. IOException is never thrown by this + class.

+

+ +

+


+ +

+ + + + + + + + + + + + + + + + + + +
+Field Summary
+static intDEFAULT_BUMP_SIZE + +
+           
+static intDEFAULT_INIT_SIZE + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + +
+Constructor Summary
FastOutputStream() + +
+          Creates an output stream with default sizes.
FastOutputStream(byte[] buffer) + +
+          Creates an output stream with a given initial buffer and a default + bump size.
FastOutputStream(byte[] buffer, + int bumpSize) + +
+          Creates an output stream with a given initial buffer and a given + bump size.
FastOutputStream(int initialSize) + +
+          Creates an output stream with a default bump size and a given initial + size.
FastOutputStream(int initialSize, + int bumpSize) + +
+          Creates an output stream with a given bump size and initial size.
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+ voidaddSize(int sizeAdded) + +
+          Skip the given number of bytes in the buffer.
+ byte[]getBufferBytes() + +
+          Returns the buffer owned by this object.
+ intgetBufferLength() + +
+          Returns the length used in the internal buffer, i.e., the offset at + which data will be written next.
+ intgetBufferOffset() + +
+          Returns the offset of the internal buffer.
+ voidmakeSpace(int sizeNeeded) + +
+          Ensure that at least the given number of bytes are available in the + internal buffer.
+ voidreset() + +
+           
+ intsize() + +
+           
+ byte[]toByteArray() + +
+           
+ voidtoByteArray(byte[] toBuf, + int offset) + +
+          Copy the buffered data to the given array.
+ StringtoString() + +
+           
+ StringtoString(String encoding) + +
+           
+ voidwrite(byte[] fromBuf) + +
+           
+ voidwrite(byte[] fromBuf, + int offset, + int length) + +
+           
+ voidwrite(int b) + +
+           
+ voidwriteFast(byte[] fromBuf) + +
+          Equivalent to write(byte[]) but does not throw + IOException.
+ voidwriteFast(byte[] fromBuf, + int offset, + int length) + +
+          Equivalent to write(byte[],int,int) but does not throw + IOException.
+ voidwriteFast(int b) + +
+          Equivalent to write(int) but does not throw + IOException.
+ voidwriteTo(OutputStream out) + +
+           
+ + + + + + + +
Methods inherited from class java.io.OutputStream
close, flush
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + +
+Field Detail
+ +

+DEFAULT_INIT_SIZE

+
+public static final int DEFAULT_INIT_SIZE
+
+
+
See Also:
Constant Field Values
+
+
+ +

+DEFAULT_BUMP_SIZE

+
+public static final int DEFAULT_BUMP_SIZE
+
+
+
See Also:
Constant Field Values
+
+ + + + + + + + +
+Constructor Detail
+ +

+FastOutputStream

+
+public FastOutputStream()
+
+
Creates an output stream with default sizes. +

+

+
+ +

+FastOutputStream

+
+public FastOutputStream(int initialSize)
+
+
Creates an output stream with a default bump size and a given initial + size. +

+

Parameters:
initialSize - the initial size of the buffer.
+
+ +

+FastOutputStream

+
+public FastOutputStream(int initialSize,
+                        int bumpSize)
+
+
Creates an output stream with a given bump size and initial size. +

+

Parameters:
initialSize - the initial size of the buffer.
bumpSize - the amount to increment the buffer.
+
+ +

+FastOutputStream

+
+public FastOutputStream(byte[] buffer)
+
+
Creates an output stream with a given initial buffer and a default + bump size. +

+

Parameters:
buffer - the initial buffer; will be owned by this object.
+
+ +

+FastOutputStream

+
+public FastOutputStream(byte[] buffer,
+                        int bumpSize)
+
+
Creates an output stream with a given initial buffer and a given + bump size. +

+

Parameters:
buffer - the initial buffer; will be owned by this object.
bumpSize - the amount to increment the buffer.
+ + + + + + + + +
+Method Detail
+ +

+size

+
+public int size()
+
+
+
+
+
+
+ +

+reset

+
+public void reset()
+
+
+
+
+
+
+ +

+write

+
+public void write(int b)
+           throws IOException
+
+
+ +
Throws: +
IOException
+
+
+
+ +

+write

+
+public void write(byte[] fromBuf)
+           throws IOException
+
+
+ +
Throws: +
IOException
+
+
+
+ +

+write

+
+public void write(byte[] fromBuf,
+                  int offset,
+                  int length)
+           throws IOException
+
+
+ +
Throws: +
IOException
+
+
+
+ +

+writeTo

+
+public void writeTo(OutputStream out)
+             throws IOException
+
+
+ +
Throws: +
IOException
+
+
+
+ +

+toString

+
+public String toString()
+
+
+
+
+
+
+ +

+toString

+
+public String toString(String encoding)
+                throws UnsupportedEncodingException
+
+
+ +
Throws: +
UnsupportedEncodingException
+
+
+
+ +

+toByteArray

+
+public byte[] toByteArray()
+
+
+
+
+
+
+ +

+writeFast

+
+public final void writeFast(int b)
+
+
Equivalent to write(int) but does not throw + IOException. +

+

+
See Also:
write(int)
+
+
+
+ +

+writeFast

+
+public final void writeFast(byte[] fromBuf)
+
+
Equivalent to write(byte[]) but does not throw + IOException. +

+

+
See Also:
write(byte[])
+
+
+
+ +

+writeFast

+
+public final void writeFast(byte[] fromBuf,
+                            int offset,
+                            int length)
+
+
Equivalent to write(byte[],int,int) but does not throw + IOException. +

+

+
See Also:
write(byte[],int,int)
+
+
+
+ +

+toByteArray

+
+public void toByteArray(byte[] toBuf,
+                        int offset)
+
+
Copy the buffered data to the given array. +

+

+
Parameters:
toBuf - the buffer to hold a copy of the data.
offset - the offset at which to start copying.
+
+
+
+ +

+getBufferBytes

+
+public byte[] getBufferBytes()
+
+
Returns the buffer owned by this object. +

+

+ +
Returns:
the buffer.
+
+
+
+ +

+getBufferOffset

+
+public int getBufferOffset()
+
+
Returns the offset of the internal buffer. +

+

+ +
Returns:
always zero currently.
+
+
+
+ +

+getBufferLength

+
+public int getBufferLength()
+
+
Returns the length used in the internal buffer, i.e., the offset at + which data will be written next. +

+

+ +
Returns:
the buffer length.
+
+
+
+ +

+makeSpace

+
+public void makeSpace(int sizeNeeded)
+
+
Ensure that at least the given number of bytes are available in the + internal buffer. +

+

+
Parameters:
sizeNeeded - the number of bytes desired.
+
+
+
+ +

+addSize

+
+public void addSize(int sizeAdded)
+
+
Skip the given number of bytes in the buffer. +

+

+
Parameters:
sizeAdded - number of bytes to skip.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/IOExceptionWrapper.html b/db/docs/java/com/sleepycat/util/IOExceptionWrapper.html new file mode 100644 index 000000000..13d946c5c --- /dev/null +++ b/db/docs/java/com/sleepycat/util/IOExceptionWrapper.html @@ -0,0 +1,280 @@ + + + + + + +IOExceptionWrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.util +
+Class IOExceptionWrapper

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended byjava.io.IOException
+              extended bycom.sleepycat.util.IOExceptionWrapper
+
+
+
All Implemented Interfaces:
ExceptionWrapper, Serializable
+
+
+
+
public class IOExceptionWrapper
extends IOException
implements ExceptionWrapper
+ +

+An IOException that can contain nested exceptions. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
IOExceptionWrapper(Throwable e) + +
+           
+  + + + + + + + + + + + +
+Method Summary
+ ThrowablegetDetail() + +
+          Returns the nested exception or null if none is present.
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+IOExceptionWrapper

+
+public IOExceptionWrapper(Throwable e)
+
+
+ + + + + + + + +
+Method Detail
+ +

+getDetail

+
+public Throwable getDetail()
+
+
Description copied from interface: ExceptionWrapper
+
Returns the nested exception or null if none is present. +

+

+
Specified by:
getDetail in interface ExceptionWrapper
+
+
+ +
Returns:
the nested exception or null if none is present.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/RuntimeExceptionWrapper.html b/db/docs/java/com/sleepycat/util/RuntimeExceptionWrapper.html new file mode 100644 index 000000000..3cd5d74f3 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/RuntimeExceptionWrapper.html @@ -0,0 +1,280 @@ + + + + + + +RuntimeExceptionWrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.util +
+Class RuntimeExceptionWrapper

+
+java.lang.Object
+  extended byjava.lang.Throwable
+      extended byjava.lang.Exception
+          extended byjava.lang.RuntimeException
+              extended bycom.sleepycat.util.RuntimeExceptionWrapper
+
+
+
All Implemented Interfaces:
ExceptionWrapper, Serializable
+
+
+
+
public class RuntimeExceptionWrapper
extends RuntimeException
implements ExceptionWrapper
+ +

+A RuntimeException that can contain nested exceptions. +

+ +

+

+
See Also:
Serialized Form
+
+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
RuntimeExceptionWrapper(Throwable e) + +
+           
+  + + + + + + + + + + + +
+Method Summary
+ ThrowablegetDetail() + +
+          Returns the nested exception or null if none is present.
+ + + + + + + +
Methods inherited from class java.lang.Throwable
fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+RuntimeExceptionWrapper

+
+public RuntimeExceptionWrapper(Throwable e)
+
+
+ + + + + + + + +
+Method Detail
+ +

+getDetail

+
+public Throwable getDetail()
+
+
Description copied from interface: ExceptionWrapper
+
Returns the nested exception or null if none is present. +

+

+
Specified by:
getDetail in interface ExceptionWrapper
+
+
+ +
Returns:
the nested exception or null if none is present.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/UtfOps.html b/db/docs/java/com/sleepycat/util/UtfOps.html new file mode 100644 index 000000000..888893725 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/UtfOps.html @@ -0,0 +1,506 @@ + + + + + + +UtfOps (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+ +

+ +com.sleepycat.util +
+Class UtfOps

+
+java.lang.Object
+  extended bycom.sleepycat.util.UtfOps
+
+
+
+
public class UtfOps
extends Object
+ +

+UTF operations with more flexibility than is provided by DataInput and + DataOutput. +

+ +

+


+ +

+ + + + + + + + + + + + + + + + +
+Constructor Summary
UtfOps() + +
+           
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Method Summary
+static intbytesToChars(byte[] bytes, + int byteOffset, + char[] chars, + int charOffset, + int len, + boolean isByteLen) + +
+          Converts byte arrays into character arrays.
+static StringbytesToString(byte[] bytes, + int offset, + int length) + +
+          Converts byte arrays into strings.
+static voidcharsToBytes(char[] chars, + int charOffset, + byte[] bytes, + int byteOffset, + int charLength) + +
+          Converts character arrays into byte arrays.
+static intgetByteLength(char[] chars) + +
+          Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
+static intgetByteLength(char[] chars, + int offset, + int length) + +
+          Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
+static intgetCharLength(byte[] bytes) + +
+          Returns the number of characters represented by the given UTF string.
+static intgetCharLength(byte[] bytes, + int offset, + int length) + +
+          Returns the number of characters represented by the given UTF string.
+static intgetZeroTerminatedByteLength(byte[] bytes, + int offset) + +
+          Returns the byte length of a null terminated UTF string, not including + the terminator.
+static byte[]stringToBytes(String string) + +
+          Converts strings to byte arrays.
+ + + + + + + +
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
+  +

+ + + + + + + + + + + +
+Constructor Detail
+ +

+UtfOps

+
+public UtfOps()
+
+
+ + + + + + + + +
+Method Detail
+ +

+getZeroTerminatedByteLength

+
+public static int getZeroTerminatedByteLength(byte[] bytes,
+                                              int offset)
+                                       throws IndexOutOfBoundsException
+
+
Returns the byte length of a null terminated UTF string, not including + the terminator. +

+

+
Parameters:
bytes - the data containing the UTF string.
offset - the beginning of the string the measure. +
Returns:
the number of bytes. +
Throws: +
IndexOutOfBoundsException - if no zero terminator is found.
+
+
+
+ +

+getByteLength

+
+public static int getByteLength(char[] chars)
+
+
Returns the byte length of the UTF string that would be created by + converting the given characters to UTF. +

+

+
Parameters:
chars - the characters that would be converted. +
Returns:
the byte length of the equivalent UTF data.
+
+
+
+ +

+getByteLength

+
+public static int getByteLength(char[] chars,
+                                int offset,
+                                int length)
+
+
Returns the byte length of the UTF string that would be created by + converting the given characters to UTF. +

+

+
Parameters:
chars - the characters that would be converted.
offset - the first character to be converted.
length - the number of characters to be converted. +
Returns:
the byte length of the equivalent UTF data.
+
+
+
+ +

+getCharLength

+
+public static int getCharLength(byte[] bytes)
+                         throws IllegalArgumentException,
+                                IndexOutOfBoundsException
+
+
Returns the number of characters represented by the given UTF string. +

+

+
Parameters:
bytes - the UTF string. +
Returns:
the number of characters. +
Throws: +
IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete. +
IllegalArgumentException - if an illegal UTF sequence is + encountered.
+
+
+
+ +

+getCharLength

+
+public static int getCharLength(byte[] bytes,
+                                int offset,
+                                int length)
+                         throws IllegalArgumentException,
+                                IndexOutOfBoundsException
+
+
Returns the number of characters represented by the given UTF string. +

+

+
Parameters:
bytes - the data containing the UTF string.
offset - the first byte to be converted.
length - the number of byte to be converted. +
Throws: +
IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete. +
IllegalArgumentException - if an illegal UTF sequence is + encountered.
+
+
+
+ +

+bytesToChars

+
+public static int bytesToChars(byte[] bytes,
+                               int byteOffset,
+                               char[] chars,
+                               int charOffset,
+                               int len,
+                               boolean isByteLen)
+                        throws IllegalArgumentException,
+                               IndexOutOfBoundsException
+
+
Converts byte arrays into character arrays. +

+

+
Parameters:
bytes - the source byte data to convert
byteOffset - the offset into the byte array at which + to start the conversion
chars - the destination array
charOffset - the offset into chars at which to begin the copy
len - the amount of information to copy into chars
isByteLen - if true then len is a measure of bytes, otherwise + len is a measure of characters +
Throws: +
IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete. +
IllegalArgumentException - if an illegal UTF sequence is + encountered.
+
+
+
+ +

+charsToBytes

+
+public static void charsToBytes(char[] chars,
+                                int charOffset,
+                                byte[] bytes,
+                                int byteOffset,
+                                int charLength)
+
+
Converts character arrays into byte arrays. +

+

+
Parameters:
chars - the source character data to convert
charOffset - the offset into the character array at which + to start the conversion
bytes - the destination array
byteOffset - the offset into bytes at which to begin the copy
charLength - the length of characters to copy into bytes
+
+
+
+ +

+bytesToString

+
+public static String bytesToString(byte[] bytes,
+                                   int offset,
+                                   int length)
+                            throws IllegalArgumentException,
+                                   IndexOutOfBoundsException
+
+
Converts byte arrays into strings. +

+

+
Parameters:
bytes - the source byte data to convert
offset - the offset into the byte array at which + to start the conversion
length - the number of bytes to be converted. +
Returns:
the string. +
Throws: +
IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete. +
IllegalArgumentException - if an illegal UTF sequence is + encountered.
+
+
+
+ +

+stringToBytes

+
+public static byte[] stringToBytes(String string)
+
+
Converts strings to byte arrays. +

+

+
Parameters:
string - the string to convert. +
Returns:
the UTF byte array.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/class-use/ExceptionUnwrapper.html b/db/docs/java/com/sleepycat/util/class-use/ExceptionUnwrapper.html new file mode 100644 index 000000000..14783188f --- /dev/null +++ b/db/docs/java/com/sleepycat/util/class-use/ExceptionUnwrapper.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.util.ExceptionUnwrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.util.ExceptionUnwrapper

+
+No usage of com.sleepycat.util.ExceptionUnwrapper +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/class-use/ExceptionWrapper.html b/db/docs/java/com/sleepycat/util/class-use/ExceptionWrapper.html new file mode 100644 index 000000000..07365e8d1 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/class-use/ExceptionWrapper.html @@ -0,0 +1,180 @@ + + + + + + +Uses of Interface com.sleepycat.util.ExceptionWrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Interface
com.sleepycat.util.ExceptionWrapper

+
+ + + + + + + + + +
+Packages that use ExceptionWrapper
com.sleepycat.utilGeneral utilities used throughout Berkeley DB. 
+  +

+ + + + + +
+Uses of ExceptionWrapper in com.sleepycat.util
+  +

+ + + + + + + + + + + + + +
Classes in com.sleepycat.util that implement ExceptionWrapper
+ classIOExceptionWrapper + +
+          An IOException that can contain nested exceptions.
+ classRuntimeExceptionWrapper + +
+          A RuntimeException that can contain nested exceptions.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/class-use/FastInputStream.html b/db/docs/java/com/sleepycat/util/class-use/FastInputStream.html new file mode 100644 index 000000000..d5f82cbb1 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/class-use/FastInputStream.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.util.FastInputStream (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.util.FastInputStream

+
+ + + + + + + + + +
+Packages that use FastInputStream
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of FastInputStream in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + +
Subclasses of FastInputStream in com.sleepycat.bind.tuple
+ classTupleInput + +
+          An InputStream with DataInput-like methods for + reading tuple fields.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/class-use/FastOutputStream.html b/db/docs/java/com/sleepycat/util/class-use/FastOutputStream.html new file mode 100644 index 000000000..debc741e3 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/class-use/FastOutputStream.html @@ -0,0 +1,173 @@ + + + + + + +Uses of Class com.sleepycat.util.FastOutputStream (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.util.FastOutputStream

+
+ + + + + + + + + +
+Packages that use FastOutputStream
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
+  +

+ + + + + +
+Uses of FastOutputStream in com.sleepycat.bind.tuple
+  +

+ + + + + + + + + +
Subclasses of FastOutputStream in com.sleepycat.bind.tuple
+ classTupleOutput + +
+          An OutputStream with DataOutput-like methods for + writing tuple fields.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/class-use/IOExceptionWrapper.html b/db/docs/java/com/sleepycat/util/class-use/IOExceptionWrapper.html new file mode 100644 index 000000000..c5f7fe548 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/class-use/IOExceptionWrapper.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.util.IOExceptionWrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.util.IOExceptionWrapper

+
+No usage of com.sleepycat.util.IOExceptionWrapper +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/class-use/RuntimeExceptionWrapper.html b/db/docs/java/com/sleepycat/util/class-use/RuntimeExceptionWrapper.html new file mode 100644 index 000000000..4dd0e946c --- /dev/null +++ b/db/docs/java/com/sleepycat/util/class-use/RuntimeExceptionWrapper.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.util.RuntimeExceptionWrapper (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.util.RuntimeExceptionWrapper

+
+No usage of com.sleepycat.util.RuntimeExceptionWrapper +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/class-use/UtfOps.html b/db/docs/java/com/sleepycat/util/class-use/UtfOps.html new file mode 100644 index 000000000..33849722d --- /dev/null +++ b/db/docs/java/com/sleepycat/util/class-use/UtfOps.html @@ -0,0 +1,136 @@ + + + + + + +Uses of Class com.sleepycat.util.UtfOps (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Class
com.sleepycat.util.UtfOps

+
+No usage of com.sleepycat.util.UtfOps +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/package-frame.html b/db/docs/java/com/sleepycat/util/package-frame.html new file mode 100644 index 000000000..991b19b40 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/package-frame.html @@ -0,0 +1,62 @@ + + + + + + +com.sleepycat.util (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + +com.sleepycat.util + + + + +
+Interfaces  + +
+ExceptionWrapper
+ + + + + + +
+Classes  + +
+ExceptionUnwrapper +
+FastInputStream +
+FastOutputStream +
+UtfOps
+ + + + + + +
+Exceptions  + +
+IOExceptionWrapper +
+RuntimeExceptionWrapper
+ + + + diff --git a/db/docs/java/com/sleepycat/util/package-summary.html b/db/docs/java/com/sleepycat/util/package-summary.html new file mode 100644 index 000000000..bb50e4304 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/package-summary.html @@ -0,0 +1,211 @@ + + + + + + +com.sleepycat.util (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+

+Package com.sleepycat.util +

+General utilities used throughout Berkeley DB. +

+See: +
+          Description +

+ + + + + + + + + +
+Interface Summary
ExceptionWrapperInterface implemented by exceptions that can contain nested exceptions.
+  + +

+ + + + + + + + + + + + + + + + + + + + + +
+Class Summary
ExceptionUnwrapperUnwraps nested exceptions by calling the ExceptionWrapper.getDetail() method for exceptions that implement the + ExceptionWrapper interface.
FastInputStreamA replacement for ByteArrayInputStream that does not synchronize every + byte read.
FastOutputStreamA replacement for ByteArrayOutputStream that does not synchronize every + byte read.
UtfOpsUTF operations with more flexibility than is provided by DataInput and + DataOutput.
+  + +

+ + + + + + + + + + + + + +
+Exception Summary
IOExceptionWrapperAn IOException that can contain nested exceptions.
RuntimeExceptionWrapperA RuntimeException that can contain nested exceptions.
+  + +

+

+Package com.sleepycat.util Description +

+ +

+General utilities used throughout Berkeley DB. +

+ +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/package-tree.html b/db/docs/java/com/sleepycat/util/package-tree.html new file mode 100644 index 000000000..9aa8cdd59 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/package-tree.html @@ -0,0 +1,165 @@ + + + + + + +com.sleepycat.util Class Hierarchy (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Hierarchy For Package com.sleepycat.util +

+
+
+
Package Hierarchies:
All Packages
+
+

+Class Hierarchy +

+ +

+Interface Hierarchy +

+ +
+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/com/sleepycat/util/package-use.html b/db/docs/java/com/sleepycat/util/package-use.html new file mode 100644 index 000000000..9510a0d05 --- /dev/null +++ b/db/docs/java/com/sleepycat/util/package-use.html @@ -0,0 +1,189 @@ + + + + + + +Uses of Package com.sleepycat.util (Sleepycat Software, Inc. - Berkeley DB Java API) + + + + + + + + + + + + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+
+

+Uses of Package
com.sleepycat.util

+
+ + + + + + + + + + + + + +
+Packages that use com.sleepycat.util
com.sleepycat.bind.tupleBindings that use sequences of primitive fields, or tuples. 
com.sleepycat.utilGeneral utilities used throughout Berkeley DB. 
+  +

+ + + + + + + + + + + +
+Classes in com.sleepycat.util used by com.sleepycat.bind.tuple
FastInputStream + +
+          A replacement for ByteArrayInputStream that does not synchronize every + byte read.
FastOutputStream + +
+          A replacement for ByteArrayOutputStream that does not synchronize every + byte read.
+  +

+ + + + + + + + +
+Classes in com.sleepycat.util used by com.sleepycat.util
ExceptionWrapper + +
+          Interface implemented by exceptions that can contain nested exceptions.
+  +

+


+ + + + + + + + + + + + + + + +
+Berkeley DB
version 4.3.14
+
+ + + +
+Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/java/constant-values.html b/db/docs/java/constant-values.html index 4bcc19550..e6931e43c 100644 --- a/db/docs/java/constant-values.html +++ b/db/docs/java/constant-values.html @@ -1,28 +1,35 @@ - + - + Constant Field Values (Sleepycat Software, Inc. - Berkeley DB Java API) + + - - - - + + + + + + - + +
@@ -45,23 +52,24 @@ parent.document.title="Constant Field Values (Sleepycat Software, Inc. - Berkele  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
@@ -74,7 +82,7 @@ Constant Field Values

- +
@@ -83,33 +91,15 @@ Constant Field Values

-

com.sleepycat.*
+
- - - - - - + - - - - - - - - - - - - - + + @@ -119,88 +109,22 @@ Constant Field Values

-

com.sleepycat.bdb.DataDb
-public static final intEINVAL22com.sleepycat.collections.TransactionRunner
+ public static final intENOMEM12
-public static final intFLAGS_MOD_MASK-256
-public static final intFLAGS_POS_MASK255DEFAULT_MAX_RETRIES10
+
- + - - + - - - - - - - - - - - -
com.sleepycat.bdb.ForeignKeyIndexcom.sleepycat.db.ReplicationTransport
+ public static final intON_DELETE_ABORTEID_BROADCAST 0
-public static final intON_DELETE_CASCADE1
-public static final intON_DELETE_CLEAR2
- -

- -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - -
com.sleepycat.bdb.bind.DataType
-public static final intBINARY2
-public static final intDATETIME7
-public static final intDOUBLE6
-public static final intFLOAT5
+ public static final intINT3
-public static final intLONG4
-public static final intNONEEID_INVALID 0
-public static final intSTRING1
@@ -209,20 +133,20 @@ Constant Field Values

- +
- + - - + - - + @@ -231,256 +155,18 @@ Constant Field Values

-

- -

com.sleepycat.bdb.util.FastOutputStreamcom.sleepycat.util.FastOutputStream
+ public static final intDEFAULT_BUMP_SIZEDEFAULT_BUMP_SIZE 100
+ public static final intDEFAULT_INIT_SIZEDEFAULT_INIT_SIZE 100
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
com.sleepycat.bdb.util.TimeUnits
-public static final longONE_DAY86400000l
-public static final intONE_HOUR3600000
-public static final intONE_MINUTE60000
-public static final intONE_SECOND1000
-public static final longONE_WEEK604800000l
- -

- -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
com.sleepycat.db.Db
-public static final intDB_BTREE1
-public static final intDB_DONOTINDEX-30999
-public static final intDB_FILEOPEN-30998
-public static final intDB_HASH2
-public static final intDB_KEYEMPTY-30997
-public static final intDB_KEYEXIST-30996
-public static final intDB_LOCK_DEADLOCK-30995
-public static final intDB_LOCK_NOTGRANTED-30994
-public static final intDB_NOSERVER-30993
-public static final intDB_NOSERVER_HOME-30992
-public static final intDB_NOSERVER_ID-30991
-public static final intDB_NOTFOUND-30990
-public static final intDB_OLD_VERSION-30989
-public static final intDB_PAGE_NOTFOUND-30988
-public static final intDB_QUEUE4
-public static final intDB_RECNO3
-public static final intDB_REP_DUPMASTER-30987
-public static final intDB_REP_HANDLE_DEAD-30986
-public static final intDB_REP_HOLDELECTION-30985
-public static final intDB_REP_NEWMASTER-30983
-public static final intDB_REP_NEWSITE-30982
-public static final intDB_REP_OUTDATED-30980
-public static final intDB_RUNRECOVERY-30978
-public static final intDB_SECONDARY_BAD-30977
-public static final intDB_TXN_ABORT0
-public static final intDB_TXN_APPLY1
-public static final intDB_TXN_BACKWARD_ROLL3
-public static final intDB_TXN_FORWARD_ROLL4
-public static final intDB_TXN_PRINT8
-public static final intDB_UNKNOWN5
-public static final intDB_VERIFY_BAD-30976
- -

-


- + + - + +
@@ -503,25 +189,26 @@ Constant Field Values  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
-Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/java/deprecated-list.html b/db/docs/java/deprecated-list.html index 05e3f368f..ce63cf530 100644 --- a/db/docs/java/deprecated-list.html +++ b/db/docs/java/deprecated-list.html @@ -1,28 +1,35 @@ - + - + Deprecated List (Sleepycat Software, Inc. - Berkeley DB Java API) + + - - - - + + + + + + - + +
@@ -45,988 +52,41 @@ parent.document.title="Deprecated List (Sleepycat Software, Inc. - Berkeley DB J  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +

Deprecated API

- - - - - - - - - - - - - - -
-Deprecated Interfaces
com.sleepycat.db.DbEnvFeedback -
-          As of Berkeley DB 4.2, replaced by DbEnvFeedbackHandler 
com.sleepycat.db.DbErrcall -
-          As of Berkeley DB 4.2, replaced by DbErrorHandler 
com.sleepycat.db.DbFeedback -
-          As of Berkeley DB 4.2, replaced by DbFeedbackHandler 
-  -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Deprecated Methods
com.sleepycat.db.DbEnv.dbremove(DbTxn, String, String, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.dbRemove(DbTxn,String,String,int) 
com.sleepycat.db.DbEnv.dbrename(DbTxn, String, String, String, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.dbRename(DbTxn,String,String,String,int) 
com.sleepycat.db.Db.del(DbTxn, Dbt, int) -
-          As of Berkeley DB 4.2, replaced by delete(DbTxn,Dbt,int) 
com.sleepycat.db.Dbc.del(int) -
-          As of Berkeley DB 4.2, replaced by Dbc.delete(int) 
com.sleepycat.db.DbErrcall.errcall(String, String) -
-          As of Berkeley DB 4.2, replaced by DbErrorHandler.error(String,String) 
com.sleepycat.db.DbEnvFeedback.feedback(DbEnv, int, int) -
-          As of Berkeley DB 4.2, replaced by DbEnvFeedbackHandler.feedback(DbEnv,int,int) 
com.sleepycat.db.DbFeedback.feedback(Db, int, int) -
-          As of Berkeley DB 4.2, replaced by DbFeedbackHandler.feedback(Db,int,int) 
com.sleepycat.db.Db.get_bt_minkey() -
-          As of Berkeley DB 4.2, replaced by getBtreeMinKey() 
com.sleepycat.db.Db.get_byteswapped() -
-          As of Berkeley DB 4.2, replaced by isByteSwapped() 
com.sleepycat.db.DbEnv.get_cachesize_ncache() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getCacheSizeNcache() 
com.sleepycat.db.Db.get_cachesize_ncache() -
-          As of Berkeley DB 4.2, replaced by getCacheSizeNcache() 
com.sleepycat.db.DbEnv.get_cachesize() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getCacheSize() 
com.sleepycat.db.Db.get_cachesize() -
-          As of Berkeley DB 4.2, replaced by getCacheSize() 
com.sleepycat.db.DbEnv.get_data_dirs() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getDataDirs() 
com.sleepycat.db.Dbt.get_data() -
-          As of Berkeley DB 4.2, replaced by Dbt.getData() 
com.sleepycat.db.Db.get_dbname() -
-          As of Berkeley DB 4.2, replaced by getDatabaseName() 
com.sleepycat.db.DbMemoryException.get_dbt() -
-          As of Berkeley DB 4.2, replaced by DbMemoryException.getDbt() 
com.sleepycat.db.Dbt.get_dlen() -
-          As of Berkeley DB 4.2, replaced by Dbt.getPartialLength() 
com.sleepycat.db.Dbt.get_doff() -
-          As of Berkeley DB 4.2, replaced by Dbt.getPartialOffset() 
com.sleepycat.db.DbEnv.get_encrypt_flags() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getEncryptFlags() 
com.sleepycat.db.Db.get_encrypt_flags() -
-          As of Berkeley DB 4.2, replaced by getEncryptFlags() 
com.sleepycat.db.Db.get_env() -
-          As of Berkeley DB 4.2, replaced by getDbEnv() 
com.sleepycat.db.DbException.get_errno() -
-          As of Berkeley DB 4.2, replaced by DbException.getErrno() 
com.sleepycat.db.DbEnv.get_errpfx() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getErrorPrefix() 
com.sleepycat.db.Db.get_errpfx() -
-          As of Berkeley DB 4.2, replaced by getErrorPrefix() 
com.sleepycat.db.DbLsn.get_file() -
-          As of Berkeley DB 4.2, replaced by DbLsn.getFile() 
com.sleepycat.db.Db.get_filename() -
-          As of Berkeley DB 4.2, replaced by getFileName() 
com.sleepycat.db.Dbt.get_flags() -
-          As of Berkeley DB 4.2, replaced by Dbt.getFlags() 
com.sleepycat.db.DbMpoolFile.get_flags() -
-          As of Berkeley DB 4.2, replaced by DbMpoolFile.getFlags() 
com.sleepycat.db.DbEnv.get_flags() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getFlags() 
com.sleepycat.db.Db.get_flags() -
-          As of Berkeley DB 4.2, replaced by getFlags() 
com.sleepycat.db.Db.get_h_ffactor() -
-          As of Berkeley DB 4.2, replaced by getHashFillFactor() 
com.sleepycat.db.Db.get_h_nelem() -
-          As of Berkeley DB 4.2, replaced by getHashNumElements() 
com.sleepycat.db.DbEnv.get_home() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getDbEnvHome() 
com.sleepycat.db.DbLockNotGrantedException.get_index() -
-          As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getIndex() 
com.sleepycat.db.DbEnv.get_lg_bsize() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLogBufferSize() 
com.sleepycat.db.DbEnv.get_lg_dir() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLogDir() 
com.sleepycat.db.DbEnv.get_lg_max() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLogMax() 
com.sleepycat.db.DbEnv.get_lg_regionmax() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLogRegionMax() 
com.sleepycat.db.DbEnv.get_lk_conflicts() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLockConflicts() 
com.sleepycat.db.DbEnv.get_lk_detect() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLockDetect() 
com.sleepycat.db.DbEnv.get_lk_max_lockers() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLockMaxLockers() 
com.sleepycat.db.DbEnv.get_lk_max_locks() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLockMaxLocks() 
com.sleepycat.db.DbEnv.get_lk_max_objects() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getLockMaxObjects() 
com.sleepycat.db.DbLockRequest.get_lock() -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.getLock() 
com.sleepycat.db.DbLockNotGrantedException.get_lock() -
-          As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getLock() 
com.sleepycat.db.Db.get_lorder() -
-          As of Berkeley DB 4.2, replaced by getByteOrder() 
com.sleepycat.db.DbMpoolFile.get_maxsize() -
-          As of Berkeley DB 4.2, replaced by DbMpoolFile.getMaxsize() 
com.sleepycat.db.DbLockRequest.get_mode() -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.getMode() 
com.sleepycat.db.DbLockNotGrantedException.get_mode() -
-          As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getMode() 
com.sleepycat.db.DbEnv.get_mp_mmapsize() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getMemoryPoolMapSize() 
com.sleepycat.db.DbLockRequest.get_obj() -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.getObj() 
com.sleepycat.db.DbLockNotGrantedException.get_obj() -
-          As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getObj() 
com.sleepycat.db.Dbt.get_object() -
-          As of Berkeley DB - 4.2, replaced by Dbt.getObject() 
com.sleepycat.db.Dbt.get_offset() -
-          As of Berkeley DB 4.2, replaced by Dbt.getOffset() 
com.sleepycat.db.DbLsn.get_offset() -
-          As of Berkeley DB 4.2, replaced by DbLsn.getOffset() 
com.sleepycat.db.DbLockRequest.get_op() -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.getOp() 
com.sleepycat.db.DbLockNotGrantedException.get_op() -
-          As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getOp() 
com.sleepycat.db.DbEnv.get_open_flags() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getOpenFlags() 
com.sleepycat.db.Db.get_open_flags() -
-          As of Berkeley DB 4.2, replaced by getOpenFlags() 
com.sleepycat.db.Db.get_pagesize() -
-          As of Berkeley DB 4.2, replaced by getPageSize() 
com.sleepycat.db.DbMpoolFile.get_priority() -
-          As of Berkeley DB 4.2, replaced by DbMpoolFile.getPriority() 
com.sleepycat.db.Db.get_q_extentsize() -
-          As of Berkeley DB 4.2, replaced by getQueueExtentSize() 
com.sleepycat.db.Db.get_re_delim() -
-          As of Berkeley DB 4.2, replaced by getRecordDelimiter() 
com.sleepycat.db.Db.get_re_len() -
-          As of Berkeley DB 4.2, replaced by getRecordLength() 
com.sleepycat.db.Db.get_re_pad() -
-          As of Berkeley DB 4.2, replaced by getRecordPad() 
com.sleepycat.db.Db.get_re_source() -
-          As of Berkeley DB 4.2, replaced by getRecordSource() 
com.sleepycat.db.Dbt.get_recno_key_data() -
-          As of Berkeley DB 4.2, replaced by Dbt.getRecordNumber() 
com.sleepycat.db.DbEnv.get_rep_limit() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getReplicationLimit() 
com.sleepycat.db.DbEnv.get_shm_key() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getSegmentId() 
com.sleepycat.db.Dbt.get_size() -
-          As of Berkeley DB 4.2, replaced by Dbt.getSize() 
com.sleepycat.db.DbEnv.get_tas_spins() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getTestAndSetSpins() 
com.sleepycat.db.DbEnv.get_timeout(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.getTimeout(int) 
com.sleepycat.db.DbEnv.get_tmp_dir() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getTmpDir() 
com.sleepycat.db.Db.get_transactional() -
-          As of Berkeley DB 4.2, replaced by isTransactional() 
com.sleepycat.db.DbEnv.get_tx_max() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getTxnMax() 
com.sleepycat.db.DbEnv.get_tx_timestamp() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getTxnTimestamp() 
com.sleepycat.db.Db.get_type() -
-          As of Berkeley DB 4.2, replaced by getDbType() 
com.sleepycat.db.Dbt.get_ulen() -
-          As of Berkeley DB 4.2, replaced by Dbt.getUserBufferLength() 
com.sleepycat.db.DbEnv.get_verbose(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.getVerbose(int) 
com.sleepycat.db.DbEnv.get_version_major() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getVersionMajor() 
com.sleepycat.db.DbEnv.get_version_minor() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getVersionMinor() 
com.sleepycat.db.DbEnv.get_version_patch() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getVersionPatch() 
com.sleepycat.db.DbEnv.get_version_string() -
-          As of Berkeley DB 4.2, replaced by DbEnv.getVersionString() 
com.sleepycat.db.Db.key_range(DbTxn, Dbt, DbKeyRange, int) -
-          As of Berkeley DB 4.2, replaced by keyRange(DbTxn,Dbt,DbKeyRange,int) 
com.sleepycat.db.DbEnv.lock_detect(int, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.lockDetect(int,int) 
com.sleepycat.db.DbEnv.lock_get(int, int, Dbt, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.lockGet(int,int,Dbt,int) 
com.sleepycat.db.DbEnv.lock_id_free(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.lockIdFree(int) 
com.sleepycat.db.DbEnv.lock_id() -
-          As of Berkeley DB 4.2, replaced by DbEnv.lockId() 
com.sleepycat.db.DbEnv.lock_put(DbLock) -
-          As of Berkeley DB 4.2, replaced by DbEnv.lockPut(DbLock) 
com.sleepycat.db.DbEnv.lock_stat(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.lockStat(int) 
com.sleepycat.db.DbEnv.lock_vec(int, int, DbLockRequest[], int, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.lockVector(int,int,DbLockRequest[],int,int) 
com.sleepycat.db.DbEnv.log_archive(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.logArchive(int) 
com.sleepycat.db.DbEnv.log_compare(DbLsn, DbLsn) -
-          As of Berkeley DB 4.2, replaced by DbEnv.logCompare(DbLsn,DbLsn) 
com.sleepycat.db.DbEnv.log_cursor(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.logCursor(int) 
com.sleepycat.db.DbEnv.log_file(DbLsn) -
-          As of Berkeley DB 4.2, replaced by DbEnv.logFile(DbLsn) 
com.sleepycat.db.DbEnv.log_flush(DbLsn) -
-          As of Berkeley DB 4.2, replaced by DbEnv.logFlush(DbLsn) 
com.sleepycat.db.DbEnv.log_put(DbLsn, Dbt, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.logPut(DbLsn,Dbt,int) 
com.sleepycat.db.DbEnv.log_stat(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.logStat(int) 
com.sleepycat.db.DbEnv.memp_fstat(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.memoryPoolFileStat(int) 
com.sleepycat.db.DbEnv.memp_stat(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.memoryPoolStat(int) 
com.sleepycat.db.DbEnv.memp_trickle(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.memoryPoolTrickle(int) 
com.sleepycat.db.Dbc.pget(Dbt, Dbt, Dbt, int) -
-          Replaced in Berkeley DB 4.2 by Dbc.get(Dbt,Dbt,Dbt,int) 
com.sleepycat.db.Db.pget(DbTxn, Dbt, Dbt, Dbt, int) -
-          Replaced in Berkeley DB 4.2 by get(DbTxn,Dbt,Dbt,Dbt,int) 
com.sleepycat.db.DbEnv.rep_elect(int, int, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.replicationElect(int,int,int) 
com.sleepycat.db.DbEnv.rep_process_message(Dbt, Dbt, DbEnv.RepProcessMessage, DbLsn) -
-          As of Berkeley DB 4.2, replaced by DbEnv.replicationProcessMessage(Dbt,Dbt,DbEnv.RepProcessMessage,DbLsn) 
com.sleepycat.db.DbEnv.rep_start(Dbt, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.replicationStart(Dbt,int) 
com.sleepycat.db.DbEnv.rep_stat(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.replicationStat(int) 
com.sleepycat.db.DbEnv.set_app_dispatch(DbAppDispatch) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setAppDispatch(DbAppDispatch) 
com.sleepycat.db.Db.set_append_recno(DbAppendRecno) -
-          As of Berkeley DB 4.2, replaced by setAppendRecno(DbAppendRecno) 
com.sleepycat.db.Db.set_bt_compare(DbBtreeCompare) -
-          As of Berkeley DB 4.2, replaced by setBtreeCompare(DbBtreeCompare) 
com.sleepycat.db.Db.set_bt_minkey(int) -
-          As of Berkeley DB 4.2, replaced by setBtreeMinKey(int) 
com.sleepycat.db.Db.set_bt_prefix(DbBtreePrefix) -
-          As of Berkeley DB 4.2, replaced by setBtreePrefix(DbBtreePrefix) 
com.sleepycat.db.DbEnv.set_cachesize(int, int, int) -
-          Replaced in Berkeley DB 4.2 by DbEnv.setCacheSize(long,int) 
com.sleepycat.db.Db.set_cachesize(int, int, int) -
-          Replaced in Berkeley DB 4.2 by setCacheSize(long,int) 
com.sleepycat.db.DbEnv.set_cachesize(long, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setCacheSize(long,int) 
com.sleepycat.db.Db.set_cachesize(long, int) -
-          As of Berkeley DB 4.2, replaced by setCacheSize(long,int) 
com.sleepycat.db.DbEnv.set_data_dir(String) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setDataDir(String) 
com.sleepycat.db.Dbt.set_data(byte[]) -
-          As of Berkeley DB 4.2, replaced by Dbt.setData(byte[]) 
com.sleepycat.db.Dbt.set_dlen(int) -
-          As of Berkeley DB 4.2, replaced by Dbt.setPartialLength(int) 
com.sleepycat.db.Dbt.set_doff(int) -
-          As of Berkeley DB 4.2, replaced by Dbt.setPartialOffset(int) 
com.sleepycat.db.Db.set_dup_compare(DbDupCompare) -
-          As of Berkeley DB 4.2, replaced by setDuplicateCompare(DbDupCompare) 
com.sleepycat.db.DbEnv.set_encrypt(String, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setEncrypted(String,int) 
com.sleepycat.db.Db.set_encrypt(String, int) -
-          As of Berkeley DB 4.2, replaced by setEncrypted(String,int) 
com.sleepycat.db.DbEnv.set_errcall(DbErrcall) -
-          Replaced in Berkeley DB 4.2 by DbEnv.setErrorHandler(DbErrorHandler) 
com.sleepycat.db.Db.set_errcall(DbErrcall) -
-          Replaced in Berkeley DB 4.2 by setErrorHandler(DbErrorHandler) 
com.sleepycat.db.DbEnv.set_error_stream(OutputStream) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setErrorStream(java.io.OutputStream) 
com.sleepycat.db.Db.set_error_stream(OutputStream) -
-          As of Berkeley DB 4.2, replaced by setErrorStream(java.io.OutputStream) 
com.sleepycat.db.DbEnv.set_errpfx(String) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setErrorPrefix(String) 
com.sleepycat.db.Db.set_errpfx(String) -
-          As of Berkeley DB 4.2, replaced by setErrorPrefix(String) 
com.sleepycat.db.DbEnv.set_feedback(DbEnvFeedback) -
-          Replaced in Berkeley DB 4.2 by DbEnv.setFeedbackHandler(DbEnvFeedbackHandler) 
com.sleepycat.db.Db.set_feedback(DbFeedback) -
-          Replaced in Berkeley DB 4.2 by setFeedbackHandler(DbFeedbackHandler) 
com.sleepycat.db.Dbt.set_flags(int) -
-          As of Berkeley DB 4.2, replaced by Dbt.setFlags(int) 
com.sleepycat.db.Db.set_flags(int) -
-          As of Berkeley DB 4.2, replaced by setFlags(int) 
com.sleepycat.db.DbMpoolFile.set_flags(int, boolean) -
-          As of Berkeley DB 4.2, replaced by DbMpoolFile.setFlags(int,boolean) 
com.sleepycat.db.DbEnv.set_flags(int, boolean) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setFlags(int,boolean) 
com.sleepycat.db.Db.set_h_ffactor(int) -
-          As of Berkeley DB 4.2, replaced by setHashFillFactor(int) 
com.sleepycat.db.Db.set_h_hash(DbHash) -
-          As of Berkeley DB 4.2, replaced by setHash(DbHash) 
com.sleepycat.db.Db.set_h_nelem(int) -
-          As of Berkeley DB 4.2, replaced by setHashNumElements(int) 
com.sleepycat.db.DbEnv.set_lg_bsize(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLogBufferSize(int) 
com.sleepycat.db.DbEnv.set_lg_dir(String) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLogDir(String) 
com.sleepycat.db.DbEnv.set_lg_max(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLogMax(int) 
com.sleepycat.db.DbEnv.set_lg_regionmax(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLogRegionMax(int) 
com.sleepycat.db.DbEnv.set_lk_conflicts(byte[][]) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLockConflicts(byte[][]) 
com.sleepycat.db.DbEnv.set_lk_detect(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLockDetect(int) 
com.sleepycat.db.DbEnv.set_lk_max_lockers(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLockMaxLockers(int) 
com.sleepycat.db.DbEnv.set_lk_max_locks(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLockMaxLocks(int) 
com.sleepycat.db.DbEnv.set_lk_max_objects(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setLockMaxObjects(int) 
com.sleepycat.db.DbLockRequest.set_lock(DbLock) -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.setLock(DbLock) 
com.sleepycat.db.Db.set_lorder(int) -
-          As of Berkeley DB 4.2, replaced by setByteOrder(int) 
com.sleepycat.db.DbMpoolFile.set_maxsize(long) -
-          As of Berkeley DB 4.2, replaced by DbMpoolFile.setMaxsize(long) 
com.sleepycat.db.DbLockRequest.set_mode(int) -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.setMode(int) 
com.sleepycat.db.DbEnv.set_mp_mmapsize(long) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setMemoryPoolMapSize(long) 
com.sleepycat.db.DbLockRequest.set_obj(Dbt) -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.setObj(Dbt) 
com.sleepycat.db.Dbt.set_object(Object) -
-          As of Berkeley DB 4.2, replaced - by Dbt.setObject(Object) 
com.sleepycat.db.Dbt.set_offset(int) -
-          As of Berkeley DB 4.2, replaced by Dbt.setOffset(int) 
com.sleepycat.db.DbLockRequest.set_op(int) -
-          As of Berkeley DB 4.2, replaced by DbLockRequest.setOp(int) 
com.sleepycat.db.Db.set_pagesize(long) -
-          As of Berkeley DB 4.2, replaced by setPageSize(long) 
com.sleepycat.db.DbMpoolFile.set_priority(int) -
-          As of Berkeley DB 4.2, replaced by DbMpoolFile.setPriority(int) 
com.sleepycat.db.Db.set_q_extentsize(int) -
-          As of Berkeley DB 4.2, replaced by setQueueExtentSize(int) 
com.sleepycat.db.Db.set_re_delim(int) -
-          As of Berkeley DB 4.2, replaced by setRecordDelimiter(int) 
com.sleepycat.db.Db.set_re_len(int) -
-          As of Berkeley DB 4.2, replaced by setRecordLength(int) 
com.sleepycat.db.Db.set_re_pad(int) -
-          As of Berkeley DB 4.2, replaced by setRecordPad(int) 
com.sleepycat.db.Db.set_re_source(String) -
-          As of Berkeley DB 4.2, replaced by setRecordSource(String) 
com.sleepycat.db.Dbt.set_recno_key_data(int) -
-          As of Berkeley DB 4.2, replaced by Dbt.setRecordNumber(int) 
com.sleepycat.db.DbEnv.set_rep_limit(int, int) -
-          Replaced in Berkeley DB 4.2 by DbEnv.setReplicationLimit(long) 
com.sleepycat.db.DbEnv.set_rep_limit(long) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setReplicationLimit(long) 
com.sleepycat.db.DbEnv.set_rep_transport(int, DbRepTransport) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setReplicationTransport(int,DbRepTransport) 
com.sleepycat.db.DbEnv.set_rpc_server(DbClient, String, long, long, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setRpcServer(DbClient,String,long,long,int) 
com.sleepycat.db.DbEnv.set_shm_key(long) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setSegmentId(long) 
com.sleepycat.db.Dbt.set_size(int) -
-          As of Berkeley DB 4.2, replaced by Dbt.setSize(int) 
com.sleepycat.db.DbEnv.set_tas_spins(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setTestAndSetSpins(int) 
com.sleepycat.db.DbTxn.set_timeout(long, int) -
-          As of Berkeley DB 4.2, replaced by DbTxn.setTimeout(long,int) 
com.sleepycat.db.DbEnv.set_timeout(long, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setTimeout(long,int) 
com.sleepycat.db.DbEnv.set_tmp_dir(String) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setTmpDir(String) 
com.sleepycat.db.DbEnv.set_tx_max(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setTxnMax(int) 
com.sleepycat.db.DbEnv.set_tx_timestamp(Date) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setTxnTimestamp(java.util.Date) 
com.sleepycat.db.Dbt.set_ulen(int) -
-          As of Berkeley DB 4.2, replaced by Dbt.setUserBufferLength(int) 
com.sleepycat.db.DbEnv.set_verbose(int, boolean) -
-          As of Berkeley DB 4.2, replaced by DbEnv.setVerbose(int,boolean) 
com.sleepycat.db.DbEnv.txn_begin(DbTxn, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.txnBegin(DbTxn,int) 
com.sleepycat.db.DbEnv.txn_checkpoint(int, int, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.txnCheckpoint(int,int,int) 
com.sleepycat.db.DbEnv.txn_recover(int, int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.txnRecover(int,int) 
com.sleepycat.db.DbEnv.txn_stat(int) -
-          As of Berkeley DB 4.2, replaced by DbEnv.txnStat(int) 
-  -


- + + - + +
@@ -1049,25 +109,26 @@ parent.document.title="Deprecated List (Sleepycat Software, Inc. - Berkeley DB J  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
-Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/java/help-doc.html b/db/docs/java/help-doc.html index b6693b36b..77e2ee5bc 100644 --- a/db/docs/java/help-doc.html +++ b/db/docs/java/help-doc.html @@ -1,28 +1,35 @@ - + - + API Help (Sleepycat Software, Inc. - Berkeley DB Java API) + + - - - - + + + + + + - + +
@@ -45,23 +52,24 @@ parent.document.title="API Help (Sleepycat Software, Inc. - Berkeley DB Java API  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
@@ -129,13 +137,15 @@ This help file applies to API documentation generated using the standard doclet.

- + + - + +
@@ -158,25 +168,26 @@ This help file applies to API documentation generated using the standard doclet.  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + +
-Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/java/index-all.html b/db/docs/java/index-all.html index 747427269..8ec3eecbf 100644 --- a/db/docs/java/index-all.html +++ b/db/docs/java/index-all.html @@ -1,28 +1,35 @@ - + - + Index (Sleepycat Software, Inc. - Berkeley DB Java API) + + - - - - + + + + + + - + +
@@ -45,4574 +52,4459 @@ parent.document.title="Index (Sleepycat Software, Inc. - Berkeley DB Java API)";  PREV   NEXT
-Berkeley DB
version 4.2.52
+Berkeley DB
version 4.3.14
- + + -A B C D E F G H I J K L M N O P Q R S T U V W X
+A B C D E F G H I J K L M N O P Q R S T U V W Y

A

-
abort() - -Method in class com.sleepycat.db.DbTxn -
The DbTxn.abort method causes an abnormal termination of the transaction. -
abortTxn() - -Method in class com.sleepycat.bdb.CurrentTransaction +
ABORT - +Static variable in class com.sleepycat.db.RecoveryOperation +
The log is being read backward during a transaction abort; undo the + operation described by the log record. +
APPLY - +Static variable in class com.sleepycat.db.RecoveryOperation +
The log is being applied on a replica site; redo the operation + described by the log record. +
abort() - +Method in class com.sleepycat.db.Transaction +
Cause an abnormal termination of the transaction. +
abortTransaction() - +Method in class com.sleepycat.collections.CurrentTransaction
Aborts the transaction that is active for the current thread for this environment and makes the parent transaction (if any) the current transaction. -
add(int, Object) - -Method in class com.sleepycat.bdb.collection.StoredList -
Inserts the specified element at the specified position in this list +
add(Object) - +Method in class com.sleepycat.collections.StoredEntrySet +
Adds the specified element to this set if it is not already present (optional operation). -
add(Object) - -Method in class com.sleepycat.bdb.collection.StoredValueSet -
Adds the specified entity to this set if it is not already present +
add(Object) - +Method in class com.sleepycat.collections.StoredIterator +
Inserts the specified element into the list or inserts a duplicate into + other types of collections (optional operation). +
add(Object) - +Method in class com.sleepycat.collections.StoredKeySet +
Adds the specified key to this set if it is not already present + (optional operation). +
add(int, Object) - +Method in class com.sleepycat.collections.StoredList +
Inserts the specified element at the specified position in this list (optional operation). -
add(Object) - -Method in class com.sleepycat.bdb.collection.StoredList +
add(Object) - +Method in class com.sleepycat.collections.StoredList
Appends the specified element to the end of this list (optional operation). -
add(Object) - -Method in class com.sleepycat.bdb.collection.StoredKeySet -
Adds the specified key to this set if it is not already present - (optional operation). -
add(Object) - -Method in class com.sleepycat.bdb.collection.StoredIterator -
Inserts the specified element into the list or inserts a duplicate into - other types of collections (optional operation). -
add(Object) - -Method in class com.sleepycat.bdb.collection.StoredEntrySet -
Adds the specified element to this set if it is not already present +
add(Object) - +Method in class com.sleepycat.collections.StoredValueSet +
Adds the specified entity to this set if it is not already present (optional operation). -
addAll(Collection) - -Method in class com.sleepycat.bdb.collection.StoredCollection +
addAll(Collection) - +Method in class com.sleepycat.collections.StoredCollection
Adds all of the elements in the specified collection to this collection (optional operation). -
addAll(int, Collection) - -Method in class com.sleepycat.bdb.collection.StoredList +
addAll(int, Collection) - +Method in class com.sleepycat.collections.StoredList
Inserts all of the elements in the specified collection into this list at the specified position (optional operation). -
addSize(int) - -Method in class com.sleepycat.bdb.util.FastOutputStream +
addDataDir(String) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the path of a directory to be used as the location of the access + method database files. +
addSize(int) - +Method in class com.sleepycat.util.FastOutputStream
Skip the given number of bytes in the buffer. -
addValue(DataThang, Object, int) - -Method in class com.sleepycat.bdb.DataView -
Adds a duplicate value for a specified key. -
appDispatch(DbEnv, Dbt, DbLsn, int) - -Method in interface com.sleepycat.db.DbAppDispatch -
The DbAppDispatch interface is used by the DbEnv.setAppDispatch method. -
append(Object) - -Method in class com.sleepycat.bdb.collection.StoredMap -
Appends a given value returning the newly assigned key. -
append(Object) - -Method in class com.sleepycat.bdb.collection.StoredList +
append(Object) - +Method in class com.sleepycat.collections.StoredList
Appends a given value returning the newly assigned index. -
append(Object, Object[], Object[]) - -Method in class com.sleepycat.bdb.DataView -
Appends a value and returns the new key. -
areDuplicatesAllowed() - -Method in class com.sleepycat.bdb.DataView -
Returns whether duplicates are allowed for the index or store. -
areDuplicatesAllowed() - -Method in class com.sleepycat.bdb.DataDb -
Returns whether duplicates are allowed for the database. -
areDuplicatesAllowed() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
append(Object) - +Method in class com.sleepycat.collections.StoredMap +
Appends a given value returning the newly assigned key. +
append(Transaction, DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Database +
+Append the key/data pair to the end of the database. +
appendRecordNumber(Database, DatabaseEntry, int) - +Method in interface com.sleepycat.db.RecordNumberAppender +
A callback function to modify the stored database based on the + generated key. +
areDuplicatesAllowed() - +Method in class com.sleepycat.collections.StoredContainer
Returns whether duplicate keys are allowed in this container. -
areDuplicatesOrdered() - -Method in class com.sleepycat.bdb.DataView -
Returns whether duplicates are ordered for the index or store. -
areDuplicatesOrdered() - -Method in class com.sleepycat.bdb.DataDb -
Returns whether duplicates are ordered for the database. -
areDuplicatesOrdered() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
areDuplicatesOrdered() - +Method in class com.sleepycat.collections.StoredContainer
Returns whether duplicate keys are allowed and sorted by element value. -
areKeysRenumbered() - -Method in class com.sleepycat.bdb.DataView -
Returns whether keys (record numbers) are renumbered for the index or - store. -
areKeysRenumbered() - -Method in class com.sleepycat.bdb.DataDb -
Returns whether keys (record numbers) are renumbered for the database. -
areKeysRenumbered() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
areKeysRenumbered() - +Method in class com.sleepycat.collections.StoredContainer
Returns whether keys are renumbered when insertions and deletions occur. -
assignKey(DataBuffer) - -Method in interface com.sleepycat.bdb.PrimaryKeyAssigner +
assignKey(DatabaseEntry) - +Method in interface com.sleepycat.collections.PrimaryKeyAssigner
Assigns a new primary key value into the given data buffer. -
associate(DbTxn, Db, DbSecondaryKeyCreate, int) - -Method in class com.sleepycat.db.Db -
  -
autoCommitCollection(Collection) - -Static method in class com.sleepycat.bdb.collection.StoredCollections -
Creates a auto-commit collection from a given stored collection. -
autoCommitList(List) - -Static method in class com.sleepycat.bdb.collection.StoredCollections -
Creates a auto-commit list from a given stored list. -
autoCommitMap(Map) - -Static method in class com.sleepycat.bdb.collection.StoredCollections -
Creates a auto-commit map from a given stored map. -
autoCommitSet(Set) - -Static method in class com.sleepycat.bdb.collection.StoredCollections -
Creates a auto-commit set from a given stored set. -
autoCommitSortedMap(SortedMap) - -Static method in class com.sleepycat.bdb.collection.StoredCollections -
Creates a auto-commit sorted map from a given stored sorted map. -
autoCommitSortedSet(SortedSet) - -Static method in class com.sleepycat.bdb.collection.StoredCollections -
Creates a auto-commit sorted set from a given stored sorted set. -
autoCommitView(boolean) - -Method in class com.sleepycat.bdb.DataView -
Returns a new view with a specified autoCommit setting. -
available() - -Method in class com.sleepycat.bdb.util.FastInputStream +
available() - +Method in class com.sleepycat.util.FastInputStream
 

B

-
beginTxn() - -Method in class com.sleepycat.bdb.CurrentTransaction -
Begins a new transaction for this environment and associates it with - the current thread. -
beginTxn(boolean, boolean) - -Method in class com.sleepycat.bdb.CurrentTransaction +
BACKWARD_ROLL - +Static variable in class com.sleepycat.db.RecoveryOperation +
The log is being read backward to determine which transactions have + been committed and to abort those operations that were not; undo the + operation described by the log record. +
BTREE - +Static variable in class com.sleepycat.db.DatabaseType +
The database is a Btree. +
BooleanBinding - class com.sleepycat.bind.tuple.BooleanBinding.
A concrete TupleBinding for a Boolean primitive + wrapper or a boolean primitive.
BooleanBinding() - +Constructor for class com.sleepycat.bind.tuple.BooleanBinding +
  +
BtreePrefixCalculator - interface com.sleepycat.db.BtreePrefixCalculator.
An interface specifying how Btree prefixes should be calculated.
BtreeStats - class com.sleepycat.db.BtreeStats.
The BtreeStats object is used to return Btree +or Recno database statistics.
ByteArrayBinding - class com.sleepycat.bind.ByteArrayBinding.
A pass-through EntryBinding that uses the entry's byte array as + the key or data object.
ByteArrayBinding() - +Constructor for class com.sleepycat.bind.ByteArrayBinding +
Creates a byte array binding. +
ByteBinding - class com.sleepycat.bind.tuple.ByteBinding.
A concrete TupleBinding for a Byte primitive + wrapper or a byte primitive.
ByteBinding() - +Constructor for class com.sleepycat.bind.tuple.ByteBinding +
  +
beginTransaction(TransactionConfig) - +Method in class com.sleepycat.collections.CurrentTransaction
Begins a new transaction for this environment and associates it with the current thread. -
BINARY - -Static variable in interface com.sleepycat.bdb.bind.DataType -
byte[] data type. -
bt_dup_pg - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of database duplicate pages. -
bt_dup_pgfree - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of bytes free in database duplicate pages. -
bt_free - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of pages on the free list. -
bt_int_pg - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of database internal pages. -
bt_int_pgfree - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of bytes free in database internal pages. -
bt_leaf_pg - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of database leaf pages. -
bt_leaf_pgfree - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of bytes free in database leaf pages. -
bt_levels - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of levels in the database. -
bt_magic - -Variable in class com.sleepycat.db.DbBtreeStat -
Magic number that identifies the file as a Btree database. -
bt_maxkey - -Variable in class com.sleepycat.db.DbBtreeStat -
  -
bt_metaflags - -Variable in class com.sleepycat.db.DbBtreeStat -
  -
bt_minkey - -Variable in class com.sleepycat.db.DbBtreeStat -
The minimum keys per page. -
bt_ndata - -Variable in class com.sleepycat.db.DbBtreeStat -
For the Btree Access Method, the number of key/data pairs in the database. -
bt_nkeys - -Variable in class com.sleepycat.db.DbBtreeStat -
For the Btree Access Method, the number of unique keys in the database. -
bt_over_pg - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of database overflow pages. -
bt_over_pgfree - -Variable in class com.sleepycat.db.DbBtreeStat -
Number of bytes free in database overflow pages. -
bt_pagesize - -Variable in class com.sleepycat.db.DbBtreeStat -
Underlying database page size, in bytes. -
bt_re_len - -Variable in class com.sleepycat.db.DbBtreeStat -
The length of fixed-length records. -
bt_re_pad - -Variable in class com.sleepycat.db.DbBtreeStat -
The padding byte value for fixed-length records. -
bt_version - -Variable in class com.sleepycat.db.DbBtreeStat -
The version of the Btree database. -
ByteArrayBinding - class com.sleepycat.bdb.bind.ByteArrayBinding.
A transparent binding where the data byte array is used as the object.
ByteArrayBinding(ByteArrayFormat) - -Constructor for class com.sleepycat.bdb.bind.ByteArrayBinding -
Creates a byte array binding. -
ByteArrayFormat - class com.sleepycat.bdb.bind.ByteArrayFormat.
The format for data stored as a byte array.
ByteArrayFormat() - -Constructor for class com.sleepycat.bdb.bind.ByteArrayFormat -
Creates a byte array format. -
bytesToChars(byte[], int, char[], int, int, boolean) - -Static method in class com.sleepycat.bdb.util.UtfOps +
beginTransaction(Transaction, TransactionConfig) - +Method in class com.sleepycat.db.Environment +
Create a new transaction in the database environment. +
booleanToEntry(boolean, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.BooleanBinding +
Converts a simple boolean value into an entry buffer. +
byteToEntry(byte, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.ByteBinding +
Converts a simple byte value into an entry buffer. +
bytesToChars(byte[], int, char[], int, int, boolean) - +Static method in class com.sleepycat.util.UtfOps
Converts byte arrays into character arrays. -
bytesToString(byte[], int, int) - -Static method in class com.sleepycat.bdb.util.UtfOps +
bytesToString(byte[], int, int) - +Static method in class com.sleepycat.util.UtfOps
Converts byte arrays into strings.

C

-
canDeriveKeyFromValue() - -Method in class com.sleepycat.bdb.DataView -
Returns whether data keys can be derived from the value/entity binding - of this view, which determines whether a value/entity object alone is - sufficient for operations that require keys. -
charsToBytes(char[], int, byte[], int, int) - -Static method in class com.sleepycat.bdb.util.UtfOps +
CacheFile - class com.sleepycat.db.CacheFile.
This class allows applications to modify settings for +a Database using the Database.getCacheFile.
CacheFilePriority - class com.sleepycat.db.CacheFilePriority.
Priorities that can be assigned to files in the cache.
CacheFileStats - class com.sleepycat.db.CacheFileStats.
Statistics for a file in the cache.
CacheStats - class com.sleepycat.db.CacheStats.
Cache statistics for a database environment.
CharacterBinding - class com.sleepycat.bind.tuple.CharacterBinding.
A concrete TupleBinding for a Character primitive + wrapper or a char primitive.
CharacterBinding() - +Constructor for class com.sleepycat.bind.tuple.CharacterBinding +
  +
CheckpointConfig - class com.sleepycat.db.CheckpointConfig.
Specifies the attributes of an application invoked checkpoint operation.
CheckpointConfig() - +Constructor for class com.sleepycat.db.CheckpointConfig +
An instance created using the default constructor is initialized + with the system's default settings. +
ClassCatalog - interface com.sleepycat.bind.serial.ClassCatalog.
A catalog of class description information for use during object + serialization.
CurrentTransaction - class com.sleepycat.collections.CurrentTransaction.
Provides access to the current transaction for the current thread within the + context of a Berkeley DB environment.
Cursor - class com.sleepycat.db.Cursor.
A database cursor.
CursorConfig - class com.sleepycat.db.CursorConfig.
Specify the attributes of database cursor.
CursorConfig() - +Constructor for class com.sleepycat.db.CursorConfig +
An instance created using the default constructor is initialized with + the system's default settings. +
charToEntry(char, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.CharacterBinding +
Converts a simple char value into an entry buffer. +
charsToBytes(char[], int, byte[], int, int) - +Static method in class com.sleepycat.util.UtfOps
Converts character arrays into byte arrays. -
ClassCatalog - interface com.sleepycat.bdb.bind.serial.ClassCatalog.
Represents a catalog of class information for use in object serialization so - that class descriptions can be stored separately from serialized objects.
clear() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
checkpoint(CheckpointConfig) - +Method in class com.sleepycat.db.Environment +
Synchronously checkpoint the database environment. +
clear() - +Method in class com.sleepycat.collections.StoredContainer
Removes all mappings or elements from this map or collection (optional operation). -
clear(Collection) - -Method in class com.sleepycat.bdb.DataView -
Deletes all records in the current range, optionally returning the - values for the deleted records. -
clearDataFormation() - -Method in class com.sleepycat.bdb.DataThang -
  -
clearDataFormation() - -Method in class com.sleepycat.bdb.bind.SimpleBuffer -
  -
clearDataFormation() - -Method in interface com.sleepycat.bdb.bind.DataBuffer -
Sets the formation associated with the data in this buffer to null. -
clearIndexKey(DataBuffer) - -Method in interface com.sleepycat.bdb.bind.KeyExtractor -
Clears the index key in a value buffer. -
clearIndexKey(DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
  -
clearIndexKey(DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor -
  -
clearIndexKey(DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
  -
clearIndexKey(Object) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledKeyExtractor -
  -
clearIndexKey(Object) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
Clears the index key in the deserialized value data. -
clearIndexKey(Object) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor -
Clears the index key in a value data object. -
clearIndexKey(String) - -Method in interface com.sleepycat.bdb.bind.tuple.MarshalledTupleKeyEntity -
Clears the entity's index key value for the given key name. -
clearIndexKey(TupleInput, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledKeyExtractor -
  -
clearIndexKey(TupleInput, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
Clears the index key in the tuple value data. -
close() - -Method in class com.sleepycat.bdb.StoredClassCatalog -
  -
close() - -Method in class com.sleepycat.bdb.DataStore -
Closes the store and all associated indices. -
close() - -Method in class com.sleepycat.bdb.DataDb -
Closes the database. -
close() - -Method in class com.sleepycat.bdb.DataCursor -
Closes a cursor. -
close() - -Method in interface com.sleepycat.bdb.bind.serial.ClassCatalog +
close() - +Method in interface com.sleepycat.bind.serial.ClassCatalog
Close a catalog database and release any cached resources. -
close() - -Method in class com.sleepycat.bdb.collection.StoredIterator +
close() - +Method in class com.sleepycat.bind.serial.StoredClassCatalog +
  +
close(Iterator) - +Static method in class com.sleepycat.collections.StoredIterator +
Closes the given iterator using StoredIterator.close() if it is a StoredIterator. +
close() - +Method in class com.sleepycat.collections.StoredIterator
Closes this iterator. -
close() - -Method in class com.sleepycat.db.Dbc -
The Dbc.close method discards the cursor. -
close(int) - -Method in class com.sleepycat.db.DbLogc -
The DbLogc.close method discards the log cursor. -
close(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.close method closes the Berkeley DB environment, freeing any allocated resources and closing any underlying subsystems. -
close(int) - -Method in class com.sleepycat.db.Db -
The Db.close method flushes any cached database information to disk, closes any open cursors, frees any allocated resources, and closes any underlying files. -
close(Iterator) - -Static method in class com.sleepycat.bdb.collection.StoredIterator -
Closes the given iterator using StoredIterator.close() if it is a StoredIterator. -
closeCursor(Dbc) - -Method in class com.sleepycat.bdb.DataDb -
Closes a cursor for this database. -
com.sleepycat.bdb - package com.sleepycat.bdb
Core database classes for defining an environment, creating data stores, and running transactions
-[reference guide]
com.sleepycat.bdb.bind - package com.sleepycat.bdb.bind
Interfaces for defining data formats and data-to-object bindings -[reference guide]
com.sleepycat.bdb.bind.serial - package com.sleepycat.bdb.bind.serial
Formats and bindings that use Java serialization
com.sleepycat.bdb.bind.tuple - package com.sleepycat.bdb.bind.tuple
Formats and bindings that use sequences of primitive data items or tuples
com.sleepycat.bdb.collection - package com.sleepycat.bdb.collection
Collection classes providing Map, Set, List and Iterator views of a data store
-[reference guide]
com.sleepycat.bdb.factory - package com.sleepycat.bdb.factory
Factory classes
com.sleepycat.bdb.util - package com.sleepycat.bdb.util
General utilities used throughout DB
-[reference guide]
com.sleepycat.db - package com.sleepycat.db
Java API programming notes
-[reference guide]
commit(int) - -Method in class com.sleepycat.db.DbTxn -
The DbTxn.commit method ends the transaction. -
commitTxn() - -Method in class com.sleepycat.bdb.CurrentTransaction +
close() - +Method in class com.sleepycat.db.Cursor +
Discard the cursor. +
close() - +Method in class com.sleepycat.db.Database +
Flush any cached database information to disk and discard the database +handle. +
close(boolean) - +Method in class com.sleepycat.db.Database +
Flush any cached database information to disk and discard the database +handle. +
close() - +Method in class com.sleepycat.db.Environment +
Close the database environment, freeing any allocated resources and + closing any underlying subsystems. +
close() - +Method in class com.sleepycat.db.JoinCursor +
Closes the cursors that have been opened by this join cursor. +
close() - +Method in class com.sleepycat.db.LogCursor +
Close the log cursor. +
close() - +Method in class com.sleepycat.db.Sequence +
Close a sequence. +
com.sleepycat.bind - package com.sleepycat.bind
Bindings between database entries and Java objects
+[reference guide].
com.sleepycat.bind.serial - package com.sleepycat.bind.serial
Bindings that use Java serialization.
com.sleepycat.bind.tuple - package com.sleepycat.bind.tuple
Bindings that use sequences of primitive fields, or tuples.
com.sleepycat.collections - package com.sleepycat.collections
Data access based on the standard Java collections API
+[reference guide].
com.sleepycat.db - package com.sleepycat.db
Berkeley DB Java API
+[reference guide] [Java programming notes].
com.sleepycat.util - package com.sleepycat.util
General utilities used throughout Berkeley DB.
commit() - +Method in class com.sleepycat.db.Transaction +
End the transaction. +
commitNoSync() - +Method in class com.sleepycat.db.Transaction +
End the transaction, not committing synchronously. +
commitSync() - +Method in class com.sleepycat.db.Transaction +
End the transaction, committing synchronously. +
commitTransaction() - +Method in class com.sleepycat.collections.CurrentTransaction
Commits the transaction that is active for the current thread for this environment and makes the parent transaction (if any) the current transaction. -
comparator() - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet +
comparator() - +Method in class com.sleepycat.collections.StoredSortedEntrySet
Returns null since comparators are not supported. -
comparator() - -Method in class com.sleepycat.bdb.collection.StoredSortedMap +
comparator() - +Method in class com.sleepycat.collections.StoredSortedKeySet
Returns null since comparators are not supported. -
comparator() - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet +
comparator() - +Method in class com.sleepycat.collections.StoredSortedMap
Returns null since comparators are not supported. -
comparator() - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet +
comparator() - +Method in class com.sleepycat.collections.StoredSortedValueSet
Returns null since comparators are not supported. -
compare(Db, Dbt, Dbt) - -Method in interface com.sleepycat.db.DbBtreeCompare -
The DbBtreeCompare interface is used by the Db.setBtreeCompare method. -
compareDuplicates(Db, Dbt, Dbt) - -Method in interface com.sleepycat.db.DbDupCompare -
The DbDupCompare interface is used by the Db.setDuplicatelicateCompare method. -
compareTo(Dbt) - -Method in class com.sleepycat.bdb.DataThang -
Returns -1 if the byte array of this thang is less than that of the - given thang, 0 if they are equal, or 1 if greater. -
consume(int, Object[], Object[]) - -Method in class com.sleepycat.bdb.DataView -
Performs a database 'get and consume' operation. -
contains(Object) - -Method in class com.sleepycat.bdb.collection.StoredValueSet +
compare(LogSequenceNumber, LogSequenceNumber) - +Static method in class com.sleepycat.db.LogSequenceNumber +
Compare two LogSequenceNumber objects. +
consume(Transaction, DatabaseEntry, DatabaseEntry, boolean) - +Method in class com.sleepycat.db.Database +
Return the record number and data from the available record closest to +the head of the queue, and delete the record. +
contains(Object) - +Method in class com.sleepycat.collections.StoredEntrySet
Returns true if this set contains the specified element. -
contains(Object) - -Method in class com.sleepycat.bdb.collection.StoredList -
Returns true if this list contains the specified element. -
contains(Object) - -Method in class com.sleepycat.bdb.collection.StoredKeySet +
contains(Object) - +Method in class com.sleepycat.collections.StoredKeySet
Returns true if this set contains the specified key. -
contains(Object) - -Method in class com.sleepycat.bdb.collection.StoredEntrySet +
contains(Object) - +Method in class com.sleepycat.collections.StoredList +
Returns true if this list contains the specified element. +
contains(Object) - +Method in class com.sleepycat.collections.StoredValueSet
Returns true if this set contains the specified element. -
containsAll(Collection) - -Method in class com.sleepycat.bdb.collection.StoredCollection +
containsAll(Collection) - +Method in class com.sleepycat.collections.StoredCollection
Returns true if this collection contains all of the elements in the specified collection. -
containsKey(Object) - -Method in class com.sleepycat.bdb.collection.StoredMap +
containsKey(Object) - +Method in class com.sleepycat.collections.StoredMap
Returns true if this map contains the specified key. -
containsValue(Object) - -Method in class com.sleepycat.bdb.collection.StoredMap +
containsValue(Object) - +Method in class com.sleepycat.collections.StoredMap
Returns true if this map contains the specified value. -
copy(DataThang) - -Method in class com.sleepycat.bdb.DataThang -
Copies the data from the given thang to this thang.. -
count() - -Method in class com.sleepycat.bdb.DataCursor -
Return the number of duplicates for the current key. -
count() - -Method in class com.sleepycat.bdb.collection.StoredIterator +
count() - +Method in class com.sleepycat.collections.StoredIterator
Returns the number of elements having the same key value as the key value of the element last returned by next() or previous(). -
count(int) - -Method in class com.sleepycat.db.Dbc -
The Dbc.count method returns a count of the number of data items for the key to which the cursor refers. -
CurrentTransaction - class com.sleepycat.bdb.CurrentTransaction.
Provides access to the current transaction for the current thread within the - context of a Berkeley DB environment.
cursor(DbTxn, int) - -Method in class com.sleepycat.db.Db -
The Db.cursor method returns a created database cursor. +
count() - +Method in class com.sleepycat.db.Cursor +
Return a count of the number of data items for the key to which the + cursor refers. +
createLockerID() - +Method in class com.sleepycat.db.Environment +
Allocate a locker ID. +
createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator +
  +
createSecondaryKey(Object, Object) - +Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator +
Creates the index key object from primary key and entry objects. +
createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator +
  +
createSecondaryKey(TupleInput, Object, TupleOutput) - +Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator +
Creates the index key entry from primary key tuple entry and + deserialized data entry. +
createSecondaryKey(TupleInput, Object, TupleOutput) - +Method in class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator +
  +
createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator +
  +
createSecondaryKey(TupleInput, TupleInput, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator +
Creates the index key from primary key tuple and data tuple. +
createSecondaryKey(TupleInput, TupleInput, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator +
  +
createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - +Method in interface com.sleepycat.db.SecondaryKeyCreator +
Creates a secondary key entry, given a primary key and data entry.

D

-
DataBinding - interface com.sleepycat.bdb.bind.DataBinding.
The interface implemented by all data-to-object bindings.
DataBuffer - interface com.sleepycat.bdb.bind.DataBuffer.
The interface used in bindings to access the data as a byte array.
DataCursor - class com.sleepycat.bdb.DataCursor.
(internal) Represents a Berkeley DB cursor and adds support for - indices, bindings and key ranges.
DataCursor(DataCursor) - -Constructor for class com.sleepycat.bdb.DataCursor -
Clones a cursor preserving the current position. -
DataCursor(DataView, boolean) - -Constructor for class com.sleepycat.bdb.DataCursor -
Creates a cursor for a given view. -
DataCursor(DataView, boolean, Object) - -Constructor for class com.sleepycat.bdb.DataCursor -
Creates a cursor for a given view and single key range. -
DataCursor(DataView, boolean, Object, boolean, Object, boolean) - -Constructor for class com.sleepycat.bdb.DataCursor -
Creates a cursor for a given view and key range. -
DataDb - class com.sleepycat.bdb.DataDb.
(internal) Wraps a Berkeley DB database (Db) object and adds - normalization of certain flags and environment modes.
DataDb(Db) - -Constructor for class com.sleepycat.bdb.DataDb -
Creates a database wrapper. -
DataFormat - interface com.sleepycat.bdb.bind.DataFormat.
The tag interface implemented by all data formats.
DataIndex - class com.sleepycat.bdb.DataIndex.
Represents a Berkeley DB secondary index.
DataIndex(DataStore, Db, DataFormat, KeyExtractor) - -Constructor for class com.sleepycat.bdb.DataIndex -
Creates an index from a previously opened Db object. -
DataStore - class com.sleepycat.bdb.DataStore.
Represents a Berkeley DB database in the role of a primary data store.
DataStore(Db, DataFormat, DataFormat, PrimaryKeyAssigner) - -Constructor for class com.sleepycat.bdb.DataStore -
Creates a store from a previously opened Db object. -
DataThang - class com.sleepycat.bdb.DataThang.
(internal) An extension of a Berkeley DB thang (Dbt) that supports - the DataBuffer interface for bindings and other added utilities.
DataThang() - -Constructor for class com.sleepycat.bdb.DataThang -
Creates a thang with no data. -
DataThang(byte[]) - -Constructor for class com.sleepycat.bdb.DataThang -
Creates a thang containing the given data data. -
DataThang(DataThang) - -Constructor for class com.sleepycat.bdb.DataThang -
Creates a thang with a copy of the data from the given thang. -
dataToInput(DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleFormat -
Utility method to create a new tuple input object for reading the data - from a given buffer. -
dataToObject(DataBuffer) - -Method in class com.sleepycat.bdb.RecordNumberBinding -
  -
dataToObject(DataBuffer) - -Method in interface com.sleepycat.bdb.bind.DataBinding -
Converts a data buffer into an Object. -
dataToObject(DataBuffer) - -Method in class com.sleepycat.bdb.bind.ByteArrayBinding -
  -
dataToObject(DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialFormat -
Utility method for use by bindings to deserialize an object. -
dataToObject(DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialBinding -
  -
dataToObject(DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInputBinding -
  -
dataToObject(DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleBinding -
  -
dataToObject(DataBuffer, DataBuffer) - -Method in interface com.sleepycat.bdb.bind.EntityBinding -
Converts key and value data buffers into an entity Object. -
dataToObject(DataBuffer, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding -
  -
dataToObject(DataBuffer, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding -
  -
dataToObject(DataBuffer, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding -
  -
dataToObject(Object) - -Method in class com.sleepycat.bdb.bind.serial.SerialBinding -
Can be overridden to convert the deserialized data object to another - object. -
dataToObject(Object, Object) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding -
Constructs an entity object from deserialized key and value data - objects. -
dataToObject(TupleInput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleMarshalledBinding -
  -
dataToObject(TupleInput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleBinding -
Constructs a key or value object from TupleInput data. -
dataToObject(TupleInput, Object) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledBinding -
  -
dataToObject(TupleInput, Object) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding -
Constructs an entity object from TupleInput key data and - deserialized value data objects. -
dataToObject(TupleInput, TupleInput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledBinding -
  -
dataToObject(TupleInput, TupleInput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding -
Constructs an entity object from TupleInput key and value data - objects. -
dataToRecordNumber(DataBuffer) - -Method in class com.sleepycat.bdb.RecordNumberFormat -
Utility method for use by bindings to translate a data buffer to an - record number integer. -
DataType - interface com.sleepycat.bdb.bind.DataType.
Primitive data type constants.
DataView - class com.sleepycat.bdb.DataView.
(internal) Represents a Berkeley DB database and adds support - for indices, bindings and key ranges.
DataView(DataStore, DataIndex, DataBinding, DataBinding, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.DataView -
Creates a view for a given store/index and bindings. -
DATETIME - -Static variable in interface com.sleepycat.bdb.bind.DataType -
Date data type. -
Db - class com.sleepycat.db.Db.
The Db handle is the handle for a Berkeley DB database, which may or may not be part of a database environment.
DB_AFTER - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_AGGRESSIVE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_APPEND - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_ARCH_ABS - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_ARCH_DATA - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_ARCH_LOG - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_ARCH_REMOVE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_AUTO_COMMIT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_BEFORE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_BTREE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_CACHED_COUNTS - -Static variable in class com.sleepycat.db.Db -
  -
DB_CDB_ALLDB - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_CHKSUM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_CONSUME - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_CONSUME_WAIT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_CREATE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_CURRENT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_CXX_NO_EXCEPTIONS - -Static variable in class com.sleepycat.db.Db -
  -
DB_DBT_MALLOC - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DBT_PARTIAL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DBT_REALLOC - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DBT_USERMEM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DIRECT_DB - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DIRECT_LOG - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DIRTY_READ - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DONOTINDEX - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DUP - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_DUPSORT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_EID_BROADCAST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_EID_INVALID - -Static variable in class com.sleepycat.db.Db -
  -
DB_ENCRYPT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_ENCRYPT_AES - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_EXCL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_FAST_STAT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_FILEOPEN - -Static variable in class com.sleepycat.db.Db -
  -
DB_FIRST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_FLUSH - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_FORCE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_GET_BOTH - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_GET_BOTH_RANGE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_GET_RECNO - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_HASH - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_INIT_CDB - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_INIT_LOCK - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_INIT_LOG - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_INIT_MPOOL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_INIT_REP - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_INIT_TXN - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_JOIN_ITEM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_JOIN_NOSORT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_JOINENV - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_KEYEMPTY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_KEYEXIST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_KEYFIRST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_KEYLAST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LAST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_DEADLOCK - -Static variable in class com.sleepycat.db.Db -
  -
DB_LOCK_DEFAULT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_EXPIRE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_GET - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_GET_TIMEOUT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_IREAD - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_IWR - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_IWRITE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_MAXLOCKS - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_MINLOCKS - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_MINWRITE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_NOTGRANTED - -Static variable in class com.sleepycat.db.Db -
  -
DB_LOCK_NOWAIT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_OLDEST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_PUT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_PUT_ALL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_PUT_OBJ - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_RANDOM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_READ - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_TIMEOUT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_WRITE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCK_YOUNGEST - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOCKDOWN - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_LOG_AUTOREMOVE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_MPOOL_NOFILE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_MULTIPLE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_MULTIPLE_KEY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NEXT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NEXT_DUP - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NEXT_NODUP - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NODUPDATA - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOLOCKING - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOMMAP - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOORDERCHK - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOOVERWRITE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOPANIC - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOSERVER - -Static variable in class com.sleepycat.db.Db -
  -
DB_NOSERVER_HOME - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOSERVER_ID - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOSYNC - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_NOTFOUND - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_OLD_VERSION - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_ORDERCHKONLY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_OVERWRITE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PAGE_NOTFOUND - -Static variable in class com.sleepycat.db.Db -
  -
DB_PANIC_ENVIRONMENT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_POSITION - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PREV - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PREV_NODUP - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PRINTABLE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PRIORITY_DEFAULT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PRIORITY_HIGH - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PRIORITY_LOW - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PRIORITY_VERY_HIGH - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PRIORITY_VERY_LOW - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_PRIVATE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_QUEUE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RDONLY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RECNO - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RECNUM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RECORDCOUNT - -Static variable in class com.sleepycat.db.Db -
  -
DB_RECOVER - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RECOVER_FATAL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REGION_INIT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RENUMBER - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_CLIENT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_DUPMASTER - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_HANDLE_DEAD - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_HOLDELECTION - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_ISPERM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_LOGSONLY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_MASTER - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_NEWMASTER - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_NEWSITE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_NOBUFFER - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_NOTPERM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_OUTDATED - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_PERMANENT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REP_UNAVAIL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_REVSPLITOFF - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RMW - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RPCCLIENT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_RUNRECOVERY - -Static variable in class com.sleepycat.db.Db -
  -
DB_SALVAGE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SECONDARY_BAD - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SET - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SET_LOCK_TIMEOUT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SET_RANGE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SET_RECNO - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SET_TXN_TIMEOUT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SNAPSHOT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_STAT_CLEAR - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_SYSTEM_MEM - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_THREAD - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TIME_NOTGRANTED - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TRUNCATE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_ABORT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_APPLY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_BACKWARD_ROLL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_FORWARD_ROLL - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_NOSYNC - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_NOT_DURABLE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_NOWAIT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_PRINT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_SYNC - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_TXN_WRITE_NOSYNC - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_UNKNOWN - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_UPGRADE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_USE_ENVIRON - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_USE_ENVIRON_ROOT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERB_CHKPOINT - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERB_DEADLOCK - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERB_RECOVERY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERB_REPLICATION - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERB_WAITSFOR - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERIFY - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERIFY_BAD - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_VERSION_MAJOR - -Static variable in class com.sleepycat.db.Db -
  -
DB_VERSION_MINOR - -Static variable in class com.sleepycat.db.Db -
  -
DB_VERSION_PATCH - -Static variable in class com.sleepycat.db.Db -
  -
DB_WRITECURSOR - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_XA_CREATE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_XIDDATASIZE - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
DB_YIELDCPU - -Static variable in class com.sleepycat.db.Db -
A constant used to configure the system. -
Db(DbEnv, int) - -Constructor for class com.sleepycat.db.Db -
The constructor creates a Db object that is the handle for a Berkeley DB database. -
DbAppDispatch - interface com.sleepycat.db.DbAppDispatch.
An interface specifying a recovery function, which recovers application-specific actions.
DbAppendRecno - interface com.sleepycat.db.DbAppendRecno.
An interface specifying a callback function that modifies stored data based on a generated key.
dbAppendRecno(Db, Dbt, int) - -Method in interface com.sleepycat.db.DbAppendRecno -
The DbAppendRecno interface is used by the Db.setAppendRecno method. -
DbBtreeCompare - interface com.sleepycat.db.DbBtreeCompare.
An interface specifying a comparison function, which imposes a total ordering on the keys in a Btree database.
DbBtreePrefix - interface com.sleepycat.db.DbBtreePrefix.
An interface specifying a comparison function, which specifies the number of bytes needed to differentiate Btree keys.
DbBtreeStat - class com.sleepycat.db.DbBtreeStat.
The DbBtreeStat object is used to return Btree or Recno database statistics.
DbBtreeStat() - -Constructor for class com.sleepycat.db.DbBtreeStat -
  -
Dbc - class com.sleepycat.db.Dbc.
The Dbc object is the handle for a cursor into a Berkeley DB database.
DbClient - interface com.sleepycat.db.DbClient.
The DbClient object is used to encapsulate a reference to an RPC client.
DbDeadlockException - exception com.sleepycat.db.DbDeadlockException.
This information describes the DbDeadlockException class and how it is used in the Berkeley DB library.
DbDupCompare - interface com.sleepycat.db.DbDupCompare.
An interface specifying a comparison function, which imposes a total ordering on the duplicate data items in a Btree database.
DbEnv - class com.sleepycat.db.DbEnv.
The DbEnv object is the handle for a Berkeley DB environment -- a collection including support for some or all of caching, locking, logging and transaction subsystems, as well as databases and log files.
DbEnv.RepProcessMessage - class com.sleepycat.db.DbEnv.RepProcessMessage.
 
DbEnv.RepProcessMessage() - -Constructor for class com.sleepycat.db.DbEnv.RepProcessMessage -
  -
DbEnv(int) - -Constructor for class com.sleepycat.db.DbEnv -
The constructor creates the DbEnv object. -
DbEnvFeedback - interface com.sleepycat.db.DbEnvFeedback.
Deprecated. As of Berkeley DB 4.2, replaced by DbEnvFeedbackHandler
DbEnvFeedbackHandler - interface com.sleepycat.db.DbEnvFeedbackHandler.
The DbEnvFeedbackHandler interface is used by the DbEnv.setFeedback method.
DbErrcall - interface com.sleepycat.db.DbErrcall.
Deprecated. As of Berkeley DB 4.2, replaced by DbErrorHandler
DbErrorHandler - interface com.sleepycat.db.DbErrorHandler.
An interface specifying a application-specific error reporting function.
DbException - exception com.sleepycat.db.DbException.
This information describes the DbException class and how it is used by the various Berkeley DB classes.
DbException(String) - -Constructor for class com.sleepycat.db.DbException -
The DbException constructor returns an instance of the DbException class containing the string. -
DbException(String, int) - -Constructor for class com.sleepycat.db.DbException -
The DbException constructor returns an instance of the DbException class containing the string and the encapsulated errno. -
DbException(String, int, DbEnv) - -Constructor for class com.sleepycat.db.DbException -
The DbException constructor returns an instance of the DbException class containing the string, the encapsulated errno, and the database environment. -
DbFeedback - interface com.sleepycat.db.DbFeedback.
Deprecated. As of Berkeley DB 4.2, replaced by DbFeedbackHandler
DbFeedbackHandler - interface com.sleepycat.db.DbFeedbackHandler.
The DbFeedbackHandler interface is used by the Db.setFeedback method.
DbHash - interface com.sleepycat.db.DbHash.
An interface specifying a hashing function, which imposes a total ordering on the Hash database.
DbHashStat - class com.sleepycat.db.DbHashStat.
The DbHashStat object is used to return Hash database statistics.
DbHashStat() - -Constructor for class com.sleepycat.db.DbHashStat -
  -
DbKeyRange - class com.sleepycat.db.DbKeyRange.
 
DbKeyRange() - -Constructor for class com.sleepycat.db.DbKeyRange -
  -
DbLock - class com.sleepycat.db.DbLock.
The locking interfaces for the Berkeley DB database environment are methods of the DbEnv handle.
DbLockNotGrantedException - exception com.sleepycat.db.DbLockNotGrantedException.
This information describes the DbLockNotGrantedException class and how it is used by the various Db* classes.
DbLockRequest - class com.sleepycat.db.DbLockRequest.
The DbLockRequest object is used to encapsulate a single lock request.
DbLockRequest(int, int, Dbt, DbLock) - -Constructor for class com.sleepycat.db.DbLockRequest -
The DbLockRequest constructor constructs a DbLockRequest with the specified operation, mode and lock, for the specified object. -
DbLockRequest(int, int, Dbt, DbLock, int) - -Constructor for class com.sleepycat.db.DbLockRequest -
The DbLockRequest constructor constructs a DbLockRequest with the specified operation, mode, lock and timeout for the specified object. -
DbLockStat - class com.sleepycat.db.DbLockStat.
The DbLockStat object is used to return lock region statistics.
DbLockStat() - -Constructor for class com.sleepycat.db.DbLockStat -
  -
DbLogc - class com.sleepycat.db.DbLogc.
The DbLogc object is the handle for a cursor into the log files, supporting sequential access to the records stored in log files.
DbLogStat - class com.sleepycat.db.DbLogStat.
The DbLogStat object is used to return logging subsystem statistics.
DbLogStat() - -Constructor for class com.sleepycat.db.DbLogStat -
  -
DbLsn - class com.sleepycat.db.DbLsn.
The DbLsn object is a log sequence number which specifies a unique location in a log file.
DbLsn(int, int) - -Constructor for class com.sleepycat.db.DbLsn -
The DbLsn constructor constructs a DbLsn with the specified file and offset. -
DbMemoryException - exception com.sleepycat.db.DbMemoryException.
This information describes the DbMemoryException class and how it is used by the various Db* classes.
DbMpoolFile - class com.sleepycat.db.DbMpoolFile.
The memory pool interfaces for the Berkeley DB database environment are methods of the DbEnv handle.
DbMpoolFStat - class com.sleepycat.db.DbMpoolFStat.
The DbMpoolFStat object is used to return memory pool per-file statistics.
DbMpoolFStat() - -Constructor for class com.sleepycat.db.DbMpoolFStat -
  -
DbMpoolStat - class com.sleepycat.db.DbMpoolStat.
The DbMpoolStat object is used to return memory pool statistics.
DbMpoolStat() - -Constructor for class com.sleepycat.db.DbMpoolStat -
  -
DbMultipleDataIterator - class com.sleepycat.db.DbMultipleDataIterator.
The DbMultipleDataIterator class is used to iterate through data returned using the Db.DB_MULTIPLE flag from a database belonging to any access method.
DbMultipleDataIterator(Dbt) - -Constructor for class com.sleepycat.db.DbMultipleDataIterator -
The constructor takes the data Dbt returned by the call to Db.get or Dbc.get that used the Db.DB_MULTIPLE flag. -
DbMultipleKeyDataIterator - class com.sleepycat.db.DbMultipleKeyDataIterator.
The DbMultipleKeyDataIterator class is used to iterate through data returned using the Db.DB_MULTIPLE_KEY flag from a database belonging to the Btree or Hash access methods.
DbMultipleKeyDataIterator(Dbt) - -Constructor for class com.sleepycat.db.DbMultipleKeyDataIterator -
The constructor takes the data Dbt returned by the call to Db.get or Dbc.get that used the Db.DB_MULTIPLE_KEY flag. -
DbMultipleRecnoDataIterator - class com.sleepycat.db.DbMultipleRecnoDataIterator.
This class is used to iterate through data returned using the Db.DB_MULTIPLE_KEY flag from a database belonging to the Recno or Queue access methods.
DbMultipleRecnoDataIterator(Dbt) - -Constructor for class com.sleepycat.db.DbMultipleRecnoDataIterator -
The constructor takes the data Dbt returned by the call to Db.get or Dbc.get that used the Db.DB_MULTIPLE_KEY flag. -
DbPanicHandler - interface com.sleepycat.db.DbPanicHandler.
An interface specifying a function to handle database environment panics.
DbPreplist - class com.sleepycat.db.DbPreplist.
The DbPreplist object is used to encapsulate a single prepared, but not yet resolved, transaction.
DbQueueStat - class com.sleepycat.db.DbQueueStat.
The DbQueueStat object is used to return Queue database statistics.
DbQueueStat() - -Constructor for class com.sleepycat.db.DbQueueStat -
  -
dbremove(DbTxn, String, String, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.dbRemove(DbTxn,String,String,int) -
dbRemove(DbTxn, String, String, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.dbRemove method removes the database specified by the file and database parameters. -
dbrename(DbTxn, String, String, String, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.dbRename(DbTxn,String,String,String,int) -
dbRename(DbTxn, String, String, String, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.dbRename method renames the database specified by the file and database parameters to newname. -
DbRepStat - class com.sleepycat.db.DbRepStat.
The DbRepStat object is used to return replication subsystem statistics.
DbRepStat() - -Constructor for class com.sleepycat.db.DbRepStat -
  -
DbRepTransport - interface com.sleepycat.db.DbRepTransport.
An interface specifying a replication transmit function, which sends information to other members of the replication group.
DbRunRecoveryException - exception com.sleepycat.db.DbRunRecoveryException.
This information describes the DbRunRecoveryException class and how it is used by the various Berkeley DB classes.
DbSecondaryKeyCreate - interface com.sleepycat.db.DbSecondaryKeyCreate.
An interface specifying a function which constructs secondary keys from primary key and data items.
Dbt - class com.sleepycat.db.Dbt.
This information describes the specific details of the Dbt class, used to encode keys and data items in a database.
Dbt() - -Constructor for class com.sleepycat.db.Dbt -
Construct an empty Dbt. -
Dbt(byte[]) - -Constructor for class com.sleepycat.db.Dbt -
Construct a Dbt where the data is the contents of the array and the Dbt's length is set to the length of the array. -
Dbt(byte[], int, int) - -Constructor for class com.sleepycat.db.Dbt -
Construct a Dbt from len bytes from the array, starting at off. -
Dbt(Object) - -Constructor for class com.sleepycat.db.Dbt -
Construct a Dbt where the data is the serialized form of the Object. -
DbTxn - class com.sleepycat.db.DbTxn.
The DbTxn object is the handle for a transaction.
DbTxnStat - class com.sleepycat.db.DbTxnStat.
The DbTxnStat object is used to return transaction subsystem statistics.
DbTxnStat.Active - class com.sleepycat.db.DbTxnStat.Active.
 
DbTxnStat.Active() - -Constructor for class com.sleepycat.db.DbTxnStat.Active -
  -
DbTxnStat() - -Constructor for class com.sleepycat.db.DbTxnStat -
  -
DEFAULT_BUMP_SIZE - -Static variable in class com.sleepycat.bdb.util.FastOutputStream -
  -
DEFAULT_INIT_SIZE - -Static variable in class com.sleepycat.bdb.util.FastOutputStream -
  -
del(DbTxn, Dbt, int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by delete(DbTxn,Dbt,int) -
del(int) - -Method in class com.sleepycat.db.Dbc -
Deprecated. As of Berkeley DB 4.2, replaced by Dbc.delete(int) -
delete() - -Method in class com.sleepycat.bdb.DataCursor -
Perform an arbitrary database 'delete' operation. -
delete(DataThang, int) - -Method in class com.sleepycat.bdb.DataDb -
Performs a general database 'delete' operation. -
delete(DbTxn, Dbt, int) - -Method in class com.sleepycat.db.Db -
The Db.delete method removes key/data pairs from the database. -
delete(int) - -Method in class com.sleepycat.db.Dbc -
The Dbc.delete method deletes the key/data pair to which the cursor refers. -
dirtyReadCollection(Collection) - -Static method in class com.sleepycat.bdb.collection.StoredCollections +
DEFAULT - +Static variable in class com.sleepycat.db.CacheFilePriority +
The default priority. +
DEFAULT - +Static variable in class com.sleepycat.db.CheckpointConfig +
Default configuration used if null is passed to + Environment.checkpoint. +
DEFAULT - +Static variable in class com.sleepycat.db.CursorConfig +
Default configuration used if null is passed to methods that create a + cursor. +
DEFAULT - +Static variable in class com.sleepycat.db.DatabaseConfig +
  +
DEFAULT - +Static variable in class com.sleepycat.db.EnvironmentConfig +
  +
DEFAULT - +Static variable in class com.sleepycat.db.JoinConfig +
Default configuration used if null is passed to Database.join +
DEFAULT - +Static variable in class com.sleepycat.db.LockDetectMode +
Use whatever lock policy was specified when the database environment + was created. +
DEFAULT - +Static variable in class com.sleepycat.db.LockMode +
Acquire read locks for read operations and write locks for write + operations. +
DEFAULT - +Static variable in class com.sleepycat.db.SecondaryConfig +
  +
DEFAULT - +Static variable in class com.sleepycat.db.SequenceConfig +
  +
DEFAULT - +Static variable in class com.sleepycat.db.StatsConfig +
  +
DEFAULT - +Static variable in class com.sleepycat.db.TransactionConfig +
  +
DEFAULT - +Static variable in class com.sleepycat.db.VerifyConfig +
Default configuration used if null is passed to + Database.verify. +
DEFAULT_BUMP_SIZE - +Static variable in class com.sleepycat.util.FastOutputStream +
  +
DEFAULT_INIT_SIZE - +Static variable in class com.sleepycat.util.FastOutputStream +
  +
DEFAULT_MAX_RETRIES - +Static variable in class com.sleepycat.collections.TransactionRunner +
The default maximum number of retries. +
DEGREE_2 - +Static variable in class com.sleepycat.db.CursorConfig +
A convenience instance to configure a cursor for degree 2 isolation. +
DEGREE_2 - +Static variable in class com.sleepycat.db.LockMode +
Degree 2 isolation provides for cursor stability but not repeatable + reads. +
DIRTY_READ - +Static variable in class com.sleepycat.db.CursorConfig +
A convenience instance to configure read operations performed by the + cursor to return modified but not yet committed data. +
DIRTY_READ - +Static variable in class com.sleepycat.db.LockMode +
Read modified but not yet committed data. +
Database - class com.sleepycat.db.Database.
A database handle.
Database(String, String, DatabaseConfig) - +Constructor for class com.sleepycat.db.Database +
Open a database. +
DatabaseConfig - class com.sleepycat.db.DatabaseConfig.
Specify the attributes of a database.
DatabaseConfig() - +Constructor for class com.sleepycat.db.DatabaseConfig +
An instance created using the default constructor is initialized with + the system's default settings. +
DatabaseEntry - class com.sleepycat.db.DatabaseEntry.
Encodes database key and data items as a byte array.
DatabaseEntry() - +Constructor for class com.sleepycat.db.DatabaseEntry +
Construct a DatabaseEntry with null data. +
DatabaseEntry(byte[]) - +Constructor for class com.sleepycat.db.DatabaseEntry +
Construct a DatabaseEntry with a given byte array. +
DatabaseEntry(byte[], int, int) - +Constructor for class com.sleepycat.db.DatabaseEntry +
Constructs a DatabaseEntry with a given byte array, offset and size. +
DatabaseException - exception com.sleepycat.db.DatabaseException.
The root of all database exceptions.
DatabaseStats - class com.sleepycat.db.DatabaseStats.
Statistics for a single database.
DatabaseType - class com.sleepycat.db.DatabaseType.
Database types.
DeadlockException - exception com.sleepycat.db.DeadlockException.
DeadlockException is thrown to a thread of control when multiple threads +competing for a lock are +deadlocked, when a lock request has timed out +or when a lock request would need to block and the transaction has been +configured to not wait for locks.
DoubleBinding - class com.sleepycat.bind.tuple.DoubleBinding.
A concrete TupleBinding for a Double primitive + wrapper or a double primitive.
DoubleBinding() - +Constructor for class com.sleepycat.bind.tuple.DoubleBinding +
  +
delete() - +Method in class com.sleepycat.db.Cursor +
Delete the key/data pair to which the cursor refers. +
delete(Transaction, DatabaseEntry) - +Method in class com.sleepycat.db.Database +
Remove key/data pairs from the database. +
detectDeadlocks(LockDetectMode) - +Method in class com.sleepycat.db.Environment +
Run one iteration of the deadlock detector. +
dirtyReadCollection(Collection) - +Static method in class com.sleepycat.collections.StoredCollections
Creates a dirty-read collection from a given stored collection. -
dirtyReadList(List) - -Static method in class com.sleepycat.bdb.collection.StoredCollections +
dirtyReadList(List) - +Static method in class com.sleepycat.collections.StoredCollections
Creates a dirty-read list from a given stored list. -
dirtyReadMap(Map) - -Static method in class com.sleepycat.bdb.collection.StoredCollections +
dirtyReadMap(Map) - +Static method in class com.sleepycat.collections.StoredCollections
Creates a dirty-read map from a given stored map. -
dirtyReadSet(Set) - -Static method in class com.sleepycat.bdb.collection.StoredCollections +
dirtyReadSet(Set) - +Static method in class com.sleepycat.collections.StoredCollections
Creates a dirty-read set from a given stored set. -
dirtyReadSortedMap(SortedMap) - -Static method in class com.sleepycat.bdb.collection.StoredCollections +
dirtyReadSortedMap(SortedMap) - +Static method in class com.sleepycat.collections.StoredCollections
Creates a dirty-read sorted map from a given stored sorted map. -
dirtyReadSortedSet(SortedSet) - -Static method in class com.sleepycat.bdb.collection.StoredCollections +
dirtyReadSortedSet(SortedSet) - +Static method in class com.sleepycat.collections.StoredCollections
Creates a dirty-read sorted set from a given stored sorted set. -
dirtyReadView(boolean) - -Method in class com.sleepycat.bdb.DataView -
Returns a new view with a specified dirtyRead setting. -
discard(int) - -Method in class com.sleepycat.db.DbTxn -
The DbTxn.discard method frees up all the per-process resources associated with the specified DbTxn handle, neither committing nor aborting the transaction. -
DOUBLE - -Static variable in interface com.sleepycat.bdb.bind.DataType -
Double data type. -
doWork() - -Method in interface com.sleepycat.bdb.TransactionWorker +
discard() - +Method in class com.sleepycat.db.Transaction +
Free up all the per-process resources associated with the specified + Transaction handle, neither committing nor aborting the + transaction. +
doWork() - +Method in interface com.sleepycat.collections.TransactionWorker
Perform the work for a single transaction. -
dump(Dbt, PrintStream) - -Static method in class com.sleepycat.bdb.DataThang -
Prints the byte array of the given thing to the given stream using - toString() to convert the bytes to a string. -
dump(PrintStream) - -Method in class com.sleepycat.bdb.DataThang -
Prints the byte array of this thing to the given stream using toString() - to convert the bytes to a string. -
dup(int) - -Method in class com.sleepycat.db.Dbc -
The Dbc.dup method creates a new cursor that uses the same transaction and locker ID as the original cursor. -
dupCursor(Dbc, boolean, int) - -Method in class com.sleepycat.bdb.DataDb -
Duplicates a cursor for this database. -
duplicates(Object) - -Method in class com.sleepycat.bdb.collection.StoredMap -
Returns a new collection containing the values mapped to the given - key in this map. +
doubleToEntry(double, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.DoubleBinding +
Converts a simple double value into an entry buffer. +
dup(boolean) - +Method in class com.sleepycat.db.Cursor +
Return a new cursor with the same transaction and locker ID as the + original cursor. +
dup(boolean) - +Method in class com.sleepycat.db.SecondaryCursor +
Returns a new SecondaryCursor for the same transaction as + the original cursor. +
dupSecondary(boolean) - +Method in class com.sleepycat.db.SecondaryCursor +
Returns a new copy of the cursor as a SecondaryCursor. +
duplicates(Object) - +Method in class com.sleepycat.collections.StoredMap +
Returns a new collection containing the values mapped to the given key + in this map.

E

-
EINVAL - -Static variable in class com.sleepycat.bdb.DataDb -
  -
ENOMEM - -Static variable in class com.sleepycat.bdb.DataDb -
  -
EntityBinding - interface com.sleepycat.bdb.bind.EntityBinding.
The interface implemented by all entity or key/data-to-object bindings.
entrySet() - -Method in class com.sleepycat.bdb.collection.StoredMap +
EID_BROADCAST - +Static variable in interface com.sleepycat.db.ReplicationTransport +
A message that should be broadcast to every environment in the + replication group. +
EID_INVALID - +Static variable in interface com.sleepycat.db.ReplicationTransport +
An invalid environment ID, and may be used to initialize environment ID + variables that are subsequently checked for validity. +
EXPIRE - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject lock requests which have timed out. +
EntityBinding - interface com.sleepycat.bind.EntityBinding.
A binding between a key-value entry pair and an entity object.
EntryBinding - interface com.sleepycat.bind.EntryBinding.
A binding between a key or data entry and a key or data object.
Environment - class com.sleepycat.db.Environment.
A database environment.
Environment(File, EnvironmentConfig) - +Constructor for class com.sleepycat.db.Environment +
Create a database environment handle. +
EnvironmentConfig - class com.sleepycat.db.EnvironmentConfig.
Specifies the attributes of an environment.
EnvironmentConfig() - +Constructor for class com.sleepycat.db.EnvironmentConfig +
Create an EnvironmentConfig initialized with the system default settings. +
ErrorHandler - interface com.sleepycat.db.ErrorHandler.
An interface specifying a callback function to be called when an error +occurs in the Berkeley DB library.
ExceptionUnwrapper - class com.sleepycat.util.ExceptionUnwrapper.
Unwraps nested exceptions by calling the ExceptionWrapper.getDetail() method for exceptions that implement the + ExceptionWrapper interface.
ExceptionUnwrapper() - +Constructor for class com.sleepycat.util.ExceptionUnwrapper +
  +
ExceptionWrapper - interface com.sleepycat.util.ExceptionWrapper.
Interface implemented by exceptions that can contain nested exceptions.
electReplicationMaster(int, int, int, int) - +Method in class com.sleepycat.db.Environment +
Hold an election for the master of a replication group. +
entrySet() - +Method in class com.sleepycat.collections.StoredMap
Returns a set view of the mappings contained in this map. -
envid - -Variable in class com.sleepycat.db.DbEnv.RepProcessMessage -
The envid field contains the local identifier of the environment returned by the DbEnv.replicationProcessMessage method. -
equal - -Variable in class com.sleepycat.db.DbKeyRange -
A value between 0 and 1, the proportion of keys equal to the - specified key. -
equals(Dbt) - -Method in class com.sleepycat.bdb.DataThang -
Returns whether the byte array of this thang is equal to that of the - given thang. -
equals(Object) - -Method in class com.sleepycat.bdb.RecordNumberFormat -
Test for equality. -
equals(Object) - -Method in class com.sleepycat.bdb.collection.StoredMap -
Compares the specified object with this map for equality. -
equals(Object) - -Method in class com.sleepycat.bdb.collection.StoredList -
Compares the specified object with this list for equality. -
equals(Object) - -Method in class com.sleepycat.bdb.collection.StoredCollection +
entryToBoolean(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.BooleanBinding +
Converts an entry buffer into a simple boolean value. +
entryToByte(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.ByteBinding +
Converts an entry buffer into a simple byte value. +
entryToChar(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.CharacterBinding +
Converts an entry buffer into a simple char value. +
entryToDouble(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.DoubleBinding +
Converts an entry buffer into a simple double value. +
entryToFloat(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.FloatBinding +
Converts an entry buffer into a simple float value. +
entryToInput(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.TupleBinding +
Utility method to create a new tuple input object for reading the data + from a given buffer. +
entryToInt(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.IntegerBinding +
Converts an entry buffer into a simple int value. +
entryToLong(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.LongBinding +
Converts an entry buffer into a simple long value. +
entryToObject(DatabaseEntry) - +Method in class com.sleepycat.bind.ByteArrayBinding +
  +
entryToObject(DatabaseEntry, DatabaseEntry) - +Method in interface com.sleepycat.bind.EntityBinding +
Converts key and data entry buffers into an entity Object. +
entryToObject(DatabaseEntry) - +Method in interface com.sleepycat.bind.EntryBinding +
Converts a entry buffer into an Object. +
entryToObject(DatabaseEntry) - +Method in class com.sleepycat.bind.RecordNumberBinding +
  +
entryToObject(DatabaseEntry) - +Method in class com.sleepycat.bind.serial.SerialBinding +
Deserialize an object from an entry buffer. +
entryToObject(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.SerialSerialBinding +
  +
entryToObject(Object, Object) - +Method in class com.sleepycat.bind.serial.SerialSerialBinding +
Constructs an entity object from deserialized key and data objects. +
entryToObject(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.TupleSerialBinding +
  +
entryToObject(TupleInput, Object) - +Method in class com.sleepycat.bind.serial.TupleSerialBinding +
Constructs an entity object from TupleInput key entry and + deserialized data entry objects. +
entryToObject(TupleInput, Object) - +Method in class com.sleepycat.bind.serial.TupleSerialMarshalledBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.BooleanBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.ByteBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.CharacterBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.DoubleBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.FloatBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.IntegerBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.LongBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.ShortBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.StringBinding +
  +
entryToObject(DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.TupleBinding +
Constructs a key or data object from a TupleInput entry. +
entryToObject(DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleInputBinding +
  +
entryToObject(TupleInput) - +Method in class com.sleepycat.bind.tuple.TupleMarshalledBinding +
  +
entryToObject(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleTupleBinding +
  +
entryToObject(TupleInput, TupleInput) - +Method in class com.sleepycat.bind.tuple.TupleTupleBinding +
Constructs an entity object from TupleInput key and data + entries. +
entryToObject(TupleInput, TupleInput) - +Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding +
  +
entryToRecordNumber(DatabaseEntry) - +Static method in class com.sleepycat.bind.RecordNumberBinding +
Utility method for use by bindings to translate a entry buffer to an + record number integer. +
entryToShort(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.ShortBinding +
Converts an entry buffer into a simple short value. +
entryToString(DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.StringBinding +
Converts an entry buffer into a simple String value. +
equal - +Variable in class com.sleepycat.db.KeyRange +
Zero if there is no matching key, and non-zero otherwise. +
equals(Object) - +Method in class com.sleepycat.collections.MapEntryParameter +
Compares this entry to a given entry as specified by Map.Entry.equals(java.lang.Object). +
equals(Object) - +Method in class com.sleepycat.collections.StoredCollection
Compares the specified object with this collection for equality. -
equals(Object) - -Method in class com.sleepycat.bdb.collection.MapEntry -
Compares this entry to a given entry as specified by Map.Entry.equals(java.lang.Object). -
equals(Object) - -Method in class com.sleepycat.db.DbTxn -
  -
err(int, String) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.err, DbEnv.errx, Db.err and Db.errx methods provide error-messaging functionality for applications written using the Berkeley DB library. -
err(int, String) - -Method in class com.sleepycat.db.Db -
The DbEnv.err, DbEnv.errx, Db.err and Db.errx methods provide error-messaging functionality for applications written using the Berkeley DB library. -
errcall(String, String) - -Method in interface com.sleepycat.db.DbErrcall -
Deprecated. As of Berkeley DB 4.2, replaced by DbErrorHandler.error(String,String) -
error(String, String) - -Method in interface com.sleepycat.db.DbErrorHandler -
In some cases, when an error occurs, Berkeley DB will call the DbErrorHandler interface with additional error information. -
errx(String) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.errx and Db.errx methods perform identically to the DbEnv.err and Db.err methods, except that they do not append the final separator characters and standard error string to the error message. -
errx(String) - -Method in class com.sleepycat.db.Db -
The DbEnv.errx and Db.errx methods perform identically to the DbEnv.err and Db.err methods, except that they do not append the final separator characters and standard error string to the error message. -
ExceptionUnwrapper - class com.sleepycat.bdb.util.ExceptionUnwrapper.
Unwraps nested exceptions by calling the ExceptionWrapper.getDetail() method for exceptions that implement the - ExceptionWrapper interface.
ExceptionUnwrapper() - -Constructor for class com.sleepycat.bdb.util.ExceptionUnwrapper -
  -
ExceptionWrapper - interface com.sleepycat.bdb.util.ExceptionWrapper.
Interface implemented by exceptions that can contain nested exceptions.
extractIndexKey(DataBuffer, DataBuffer, DataBuffer) - -Method in interface com.sleepycat.bdb.bind.KeyExtractor -
Extracts the index key data from primary key and value buffers. -
extractIndexKey(DataBuffer, DataBuffer, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
  -
extractIndexKey(DataBuffer, DataBuffer, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor -
  -
extractIndexKey(DataBuffer, DataBuffer, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
  -
extractIndexKey(Object, Object) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor -
Extracts the index key data object from primary key and value data - objects. -
extractIndexKey(TupleInput, Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledKeyExtractor -
  -
extractIndexKey(TupleInput, Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
Extracts the index key data from primary key tuple data and deserialized - value data. -
extractIndexKey(TupleInput, TupleInput, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledKeyExtractor -
  -
extractIndexKey(TupleInput, TupleInput, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
Extracts the index key data from primary - key tuple and value tuple data. +
equals(Object) - +Method in class com.sleepycat.collections.StoredList +
Compares the specified object with this list for equality. +
equals(Object) - +Method in class com.sleepycat.collections.StoredMap +
Compares the specified object with this map for equality. +
error(Environment, String, String) - +Method in interface com.sleepycat.db.ErrorHandler +
A callback function to be called when an error occurs in the + Berkeley DB library.

F

-
FastInputStream - class com.sleepycat.bdb.util.FastInputStream.
A replacement for ByteArrayInputStream that does not synchronize every - byte read.
FastInputStream(byte[]) - -Constructor for class com.sleepycat.bdb.util.FastInputStream +
FORWARD_ROLL - +Static variable in class com.sleepycat.db.RecoveryOperation +
The log is being played forward; redo the operation described by the log + record. +
FastInputStream - class com.sleepycat.util.FastInputStream.
A replacement for ByteArrayInputStream that does not synchronize every + byte read.
FastInputStream(byte[]) - +Constructor for class com.sleepycat.util.FastInputStream
Creates an input stream. -
FastInputStream(byte[], int, int) - -Constructor for class com.sleepycat.bdb.util.FastInputStream +
FastInputStream(byte[], int, int) - +Constructor for class com.sleepycat.util.FastInputStream
Creates an input stream. -
FastOutputStream - class com.sleepycat.bdb.util.FastOutputStream.
A replacement for ByteArrayOutputStream that does not synchronize every - byte read.
FastOutputStream() - -Constructor for class com.sleepycat.bdb.util.FastOutputStream +
FastOutputStream - class com.sleepycat.util.FastOutputStream.
A replacement for ByteArrayOutputStream that does not synchronize every + byte read.
FastOutputStream() - +Constructor for class com.sleepycat.util.FastOutputStream
Creates an output stream with default sizes. -
FastOutputStream(byte[]) - -Constructor for class com.sleepycat.bdb.util.FastOutputStream +
FastOutputStream(int) - +Constructor for class com.sleepycat.util.FastOutputStream +
Creates an output stream with a default bump size and a given initial + size. +
FastOutputStream(int, int) - +Constructor for class com.sleepycat.util.FastOutputStream +
Creates an output stream with a given bump size and initial size. +
FastOutputStream(byte[]) - +Constructor for class com.sleepycat.util.FastOutputStream
Creates an output stream with a given initial buffer and a default bump size. -
FastOutputStream(byte[], int) - -Constructor for class com.sleepycat.bdb.util.FastOutputStream +
FastOutputStream(byte[], int) - +Constructor for class com.sleepycat.util.FastOutputStream
Creates an output stream with a given initial buffer and a given bump size. -
FastOutputStream(int) - -Constructor for class com.sleepycat.bdb.util.FastOutputStream -
Creates an output stream with a default bump size and a given initial - size. -
FastOutputStream(int, int) - -Constructor for class com.sleepycat.bdb.util.FastOutputStream -
Creates an output stream with a given bump size and initial size. -
feedback(DbEnv, int, int) - -Method in interface com.sleepycat.db.DbEnvFeedbackHandler -
The DbEnvFeedbackHandler interface is used by the DbEnv.setFeedback method. -
feedback(DbEnv, int, int) - -Method in interface com.sleepycat.db.DbEnvFeedback -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnvFeedbackHandler.feedback(DbEnv,int,int) -
feedback(Db, int, int) - -Method in interface com.sleepycat.db.DbFeedbackHandler -
The DbFeedbackHandler interface is used by the Db.setFeedback method. -
feedback(Db, int, int) - -Method in interface com.sleepycat.db.DbFeedback -
Deprecated. As of Berkeley DB 4.2, replaced by DbFeedbackHandler.feedback(Db,int,int) -
file_name - -Variable in class com.sleepycat.db.DbMpoolFStat -
The name of the file. -
find(Object, boolean) - -Method in class com.sleepycat.bdb.DataCursor -
Find the given value, using DB_GET_BOTH if possible, or a sequential - search otherwise. -
first() - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet +
FeedbackHandler - interface com.sleepycat.db.FeedbackHandler.
An interface specifying a function to be called to provide feedback.
FloatBinding - class com.sleepycat.bind.tuple.FloatBinding.
A concrete TupleBinding for a Float primitive + wrapper or a float primitive.
FloatBinding() - +Constructor for class com.sleepycat.bind.tuple.FloatBinding +
  +
first() - +Method in class com.sleepycat.collections.StoredSortedEntrySet
Returns the first (lowest) element currently in this sorted set. -
first() - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet +
first() - +Method in class com.sleepycat.collections.StoredSortedKeySet
Returns the first (lowest) element currently in this sorted set. -
first() - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet +
first() - +Method in class com.sleepycat.collections.StoredSortedValueSet
Returns the first (lowest) element currently in this sorted set. -
firstKey() - -Method in class com.sleepycat.bdb.collection.StoredSortedMap +
firstKey() - +Method in class com.sleepycat.collections.StoredSortedMap
Returns the first (lowest) key currently in this sorted map. -
FLAGS_MOD_MASK - -Static variable in class com.sleepycat.bdb.DataDb -
  -
FLAGS_POS_MASK - -Static variable in class com.sleepycat.bdb.DataDb -
  -
FLOAT - -Static variable in interface com.sleepycat.bdb.bind.DataType -
Float data type. -
ForeignKeyIndex - class com.sleepycat.bdb.ForeignKeyIndex.
Represents a Berkeley DB secondary index where the index key is the primary - key of another data store.
ForeignKeyIndex(DataStore, Db, KeyExtractor, DataStore, int) - -Constructor for class com.sleepycat.bdb.ForeignKeyIndex -
Creates a foreign key index from a previously opened Db object. +
floatToEntry(float, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.FloatBinding +
Converts a simple float value into an entry buffer. +
freeLockerID(int) - +Method in class com.sleepycat.db.Environment +
Free a locker ID. +
fromFlag(int) - +Static method in class com.sleepycat.db.RecoveryOperation +
Internal: this is public only so it can be called from an internal + package.

G

-
get_bt_minkey() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getBtreeMinKey() -
get_byteswapped() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by isByteSwapped() -
get_cachesize_ncache() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getCacheSizeNcache() -
get_cachesize_ncache() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getCacheSizeNcache() -
get_cachesize() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getCacheSize() -
get_cachesize() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getCacheSize() -
get_data_dirs() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getDataDirs() -
get_data() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getData() -
get_dbname() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getDatabaseName() -
get_dbt() - -Method in class com.sleepycat.db.DbMemoryException -
Deprecated. As of Berkeley DB 4.2, replaced by DbMemoryException.getDbt() -
get_dlen() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getPartialLength() -
get_doff() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getPartialOffset() -
get_encrypt_flags() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getEncryptFlags() -
get_encrypt_flags() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getEncryptFlags() -
get_env() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getDbEnv() -
get_errno() - -Method in class com.sleepycat.db.DbException -
Deprecated. As of Berkeley DB 4.2, replaced by DbException.getErrno() -
get_errpfx() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getErrorPrefix() -
get_errpfx() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getErrorPrefix() -
get_file() - -Method in class com.sleepycat.db.DbLsn -
Deprecated. As of Berkeley DB 4.2, replaced by DbLsn.getFile() -
get_filename() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getFileName() -
get_flags_raw() - -Method in class com.sleepycat.db.Db -
  -
get_flags() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getFlags() -
get_flags() - -Method in class com.sleepycat.db.DbMpoolFile -
Deprecated. As of Berkeley DB 4.2, replaced by DbMpoolFile.getFlags() -
get_flags() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getFlags() -
get_flags() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getFlags() -
get_h_ffactor() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getHashFillFactor() -
get_h_nelem() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getHashNumElements() -
get_home() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getDbEnvHome() -
get_index() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getIndex() -
get_lg_bsize() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLogBufferSize() -
get_lg_dir() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLogDir() -
get_lg_max() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLogMax() -
get_lg_regionmax() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLogRegionMax() -
get_lk_conflicts() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLockConflicts() -
get_lk_detect() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLockDetect() -
get_lk_max_lockers() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLockMaxLockers() -
get_lk_max_locks() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLockMaxLocks() -
get_lk_max_objects() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getLockMaxObjects() -
get_lock() - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.getLock() -
get_lock() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getLock() -
get_lorder() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getByteOrder() -
get_maxsize() - -Method in class com.sleepycat.db.DbMpoolFile -
Deprecated. As of Berkeley DB 4.2, replaced by DbMpoolFile.getMaxsize() -
get_mode() - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.getMode() -
get_mode() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getMode() -
get_mp_mmapsize() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getMemoryPoolMapSize() -
get_mpf() - -Method in class com.sleepycat.db.Db -
The Db.get_mpf method returns the handle for the cache file underlying the database. -
get_obj() - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.getObj() -
get_obj() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getObj() -
get_object() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB - 4.2, replaced by Dbt.getObject() -
get_offset() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getOffset() -
get_offset() - -Method in class com.sleepycat.db.DbLsn -
Deprecated. As of Berkeley DB 4.2, replaced by DbLsn.getOffset() -
get_op() - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.getOp() -
get_op() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockNotGrantedException.getOp() -
get_open_flags() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getOpenFlags() -
get_open_flags() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getOpenFlags() -
get_pagesize() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getPageSize() -
get_priority() - -Method in class com.sleepycat.db.DbMpoolFile -
Deprecated. As of Berkeley DB 4.2, replaced by DbMpoolFile.getPriority() -
get_q_extentsize() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getQueueExtentSize() -
get_re_delim() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getRecordDelimiter() -
get_re_len() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getRecordLength() -
get_re_pad() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getRecordPad() -
get_re_source() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getRecordSource() -
get_recno_key_data() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getRecordNumber() -
get_rep_limit() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getReplicationLimit() -
get_shm_key() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getSegmentId() -
get_size() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getSize() -
get_tas_spins() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getTestAndSetSpins() -
get_timeout(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getTimeout(int) -
get_tmp_dir() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getTmpDir() -
get_transactional() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by isTransactional() -
get_tx_max() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getTxnMax() -
get_tx_timestamp() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getTxnTimestamp() -
get_type() - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by getDbType() -
get_ulen() - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.getUserBufferLength() -
get_verbose(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getVerbose(int) -
get_version_major() - -Static method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getVersionMajor() -
get_version_minor() - -Static method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getVersionMinor() -
get_version_patch() - -Static method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getVersionPatch() -
get_version_string() - -Static method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.getVersionString() -
get(DataThang, DataThang, int) - -Method in class com.sleepycat.bdb.DataDb -
Performs a general database 'get' operation. -
get(Dbc, DataThang, DataThang, int) - -Method in class com.sleepycat.bdb.DataDb -
Performs a general database 'get' operation via a cursor. -
get(DbLsn, Dbt, int) - -Method in class com.sleepycat.db.DbLogc -
The DbLogc.get method returns records from the log. -
get(Dbt, Dbt, Dbt, int) - -Method in class com.sleepycat.db.Dbc -
The Dbc.get method retrieves key/data pairs from the database. -
get(Dbt, Dbt, int) - -Method in class com.sleepycat.db.Dbc -
The Dbc.get method retrieves key/data pairs from the database. -
get(DbTxn, Dbt, Dbt, Dbt, int) - -Method in class com.sleepycat.db.Db -
The Db.get method retrieves key/data pairs from the database. -
get(DbTxn, Dbt, Dbt, int) - -Method in class com.sleepycat.db.Db -
The Db.get method retrieves key/data pairs from the database. -
get(int) - -Method in class com.sleepycat.bdb.collection.StoredList +
GET - +Static variable in class com.sleepycat.db.LockOperation +
Get the lock defined by the values of the mode and obj fields, for + the specified locker. +
GET_TIMEOUT - +Static variable in class com.sleepycat.db.LockOperation +
Identical to LockOperation GET except that the value in the timeout + field overrides any previously specified timeout value for this + lock. +
get(int) - +Method in class com.sleepycat.collections.StoredList
Returns the element at the specified position in this list. -
get(Object) - -Method in class com.sleepycat.bdb.collection.StoredMap +
get(Object) - +Method in class com.sleepycat.collections.StoredMap
Returns the value to which this map maps the specified key. -
get(Object, Object, int, boolean) - -Method in class com.sleepycat.bdb.DataCursor -
Perform a database 'get' using the given key and value. -
get(Object, Object, int, boolean, Object[]) - -Method in class com.sleepycat.bdb.DataView -
Performs a general database 'get' operation. -
getBaseClass() - -Method in class com.sleepycat.bdb.bind.serial.SerialFormat -
Returns the base class for this format. -
getBtreeMinKey() - -Method in class com.sleepycat.db.Db -
The Db.getBtreeMinKey method returns the minimum number of key/data pairs intended to be stored on any single Btree leaf page. -
getBufferBytes() - -Method in class com.sleepycat.bdb.util.FastOutputStream -
Returns the buffer owned by this object. -
getBufferBytes() - -Method in class com.sleepycat.bdb.util.FastInputStream +
get(Transaction, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Database +
Retrieves the key/data pair with the given key. +
get(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryDatabase +
Retrieves the key/data pair with the given key. +
get(Transaction, int) - +Method in class com.sleepycat.db.Sequence +
Return the next available element in the sequence and changes the sequence + value by delta. +
getAggressive() - +Method in class com.sleepycat.db.VerifyConfig +
Return if the Database.verify is configured to output + all the key/data pairs in the file that can be found. +
getAlloc() - +Method in class com.sleepycat.db.CacheStats +
Number of page allocations. +
getAllocBuckets() - +Method in class com.sleepycat.db.CacheStats +
Number of hash buckets checked during allocation. +
getAllocMaxBuckets() - +Method in class com.sleepycat.db.CacheStats +
Maximum number of hash buckets checked during an allocation. +
getAllocMaxPages() - +Method in class com.sleepycat.db.CacheStats +
Maximum number of pages checked during an allocation. +
getAllocPages() - +Method in class com.sleepycat.db.CacheStats +
Number of pages checked during allocation. +
getAllowCreate() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the Environment.openDatabase method is configured + to create the database if it does not already exist. +
getAllowCreate() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to create any + underlying files, as necessary. +
getAllowCreate() - +Method in class com.sleepycat.db.SequenceConfig +
Return if the Database.openSequence method is configured + to create the sequence if it does not already exist. +
getAllowNestedTransactions() - +Method in class com.sleepycat.collections.TransactionRunner +
Returns whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread. +
getAllowPopulate() - +Method in class com.sleepycat.db.SecondaryConfig +
Returns whether automatic population of the secondary is allowed. +
getArchiveDatabases() - +Method in class com.sleepycat.db.Environment +
Return the names of the database files that need to be archived in + order to recover the database from catastrophic failure. +
getArchiveLogFiles(boolean) - +Method in class com.sleepycat.db.Environment +
Return the names of all of the log files that are no longer in use. +
getAutoCommitNoSync() - +Method in class com.sleepycat.db.SequenceConfig +
Return if the auto-commit operations on the sequence are configure to not + flush the transaction log.. +
getBFree() - +Method in class com.sleepycat.db.HashStats +
The number of bytes free on bucket pages. +
getBaseClass() - +Method in class com.sleepycat.bind.serial.SerialBinding +
Returns the base class for this binding. +
getBigBFree() - +Method in class com.sleepycat.db.HashStats +
The number of bytes free on big item pages. +
getBigPages() - +Method in class com.sleepycat.db.HashStats +
The number of big key/data pages. +
getBtreeComparator() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the Comparator used to compare keys in a Btree. +
getBtreeMinKey() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the minimum number of key/data pairs intended to be stored + on any single Btree leaf page. +
getBtreePrefixCalculator() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the Btree prefix callback. +
getBtreeRecordNumbers() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the Btree is configured to support retrieval by record number. +
getBuckets() - +Method in class com.sleepycat.db.HashStats +
The the number of hash buckets. +
getBufferBytes() - +Method in class com.sleepycat.util.FastInputStream
Returns the underlying data being read. -
getBufferLength() - -Method in class com.sleepycat.bdb.util.FastOutputStream -
Returns the length used in the internal buffer, that is, the offset at - which data will be written next. -
getBufferLength() - -Method in class com.sleepycat.bdb.util.FastInputStream +
getBufferBytes() - +Method in class com.sleepycat.util.FastOutputStream +
Returns the buffer owned by this object. +
getBufferLength() - +Method in class com.sleepycat.util.FastInputStream
Returns the end of the buffer being read. -
getBufferOffset() - -Method in class com.sleepycat.bdb.util.FastOutputStream -
Returns the offset of the internal buffer. -
getBufferOffset() - -Method in class com.sleepycat.bdb.util.FastInputStream +
getBufferLength() - +Method in class com.sleepycat.util.FastOutputStream +
Returns the length used in the internal buffer, i.e., the offset at + which data will be written next. +
getBufferOffset() - +Method in class com.sleepycat.util.FastInputStream
Returns the offset at which data is being read from the buffer. -
getByteLength(char[]) - -Static method in class com.sleepycat.bdb.util.UtfOps +
getBufferOffset() - +Method in class com.sleepycat.util.FastOutputStream +
Returns the offset of the internal buffer. +
getByteLength(char[]) - +Static method in class com.sleepycat.util.UtfOps
Returns the byte length of the UTF string that would be created by converting the given characters to UTF. -
getByteLength(char[], int, int) - -Static method in class com.sleepycat.bdb.util.UtfOps +
getByteLength(char[], int, int) - +Static method in class com.sleepycat.util.UtfOps
Returns the byte length of the UTF string that would be created by converting the given characters to UTF. -
getByteOrder() - -Method in class com.sleepycat.db.Db -
The Db.getByteOrder method returns the database byte order; a byte order of 4,321 indicates a big endian order, and a byte order of 1,234 indicates a little endian order. -
getBytes() - -Method in class com.sleepycat.bdb.DataThang -
Returns the data for this thang. -
getByteStream() - -Method in class com.sleepycat.bdb.DataThang -
Returns the data for this thang as a byte array input stream.. -
getCacheSize() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getCacheSize method returns the size of the cache. -
getCacheSize() - -Method in class com.sleepycat.db.Db -
The Db.getCacheSize method returns the size of the cache. -
getCacheSizeNcache() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getCacheSizeNcache method returns the number of caches. -
getCacheSizeNcache() - -Method in class com.sleepycat.db.Db -
The DbEnv.getCacheSizeNcache method returns the number of caches. -
getCatalog() - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory +
getByteOrder() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the database byte order; a byte order of 4,321 indicates a + big endian order, and a byte order of 1,234 indicates a little + endian order. +
getByteSwapped() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the underlying database files were created on an architecture + of the same byte order as the current one. +
getBytes() - +Method in class com.sleepycat.db.CacheStats +
Bytes of cache (total cache size is st_gbytes + st_bytes). +
getCDBLockAllDatabases() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the Concurrent Data Store applications are configured to + perform locking on an environment-wide basis rather than on a + per-database basis. +
getCData() - +Method in class com.sleepycat.db.ReplicationStatus +
Whenever the system receives contact information from a new + environment, a copy of the opaque data specified in the cdata + parameter to the Environment.startReplication is available + from the getCDAta method. +
getCacheCount() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the number of shared memory buffer pools, that is, the number + of caches. +
getCacheCount() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the number of shared memory buffer pools, that is, the number + of caches. +
getCacheFile() - +Method in class com.sleepycat.db.Database +
Return the handle for the cache file underlying the database. +
getCacheFileStats(StatsConfig) - +Method in class com.sleepycat.db.Environment +
Return the database environment's per-file memory pool (that is, the + buffer cache) statistics. +
getCacheHit() - +Method in class com.sleepycat.db.CacheFileStats +
Requested pages found in the cache. +
getCacheHit() - +Method in class com.sleepycat.db.CacheStats +
Requested pages found in the cache. +
getCacheMiss() - +Method in class com.sleepycat.db.CacheFileStats +
Requested pages not found in the cache. +
getCacheMiss() - +Method in class com.sleepycat.db.CacheStats +
Requested pages not found in the cache. +
getCacheSize() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the size of the shared memory buffer pool, that is, the cache. +
getCacheSize() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the size of the shared memory buffer pool, that is, the cache. +
getCacheSize() - +Method in class com.sleepycat.db.SequenceConfig +
Return the number of elements cached by a sequence handle.. +
getCacheSize() - +Method in class com.sleepycat.db.SequenceStats +
The number of values that will be cached in this handle. +
getCacheStats(StatsConfig) - +Method in class com.sleepycat.db.Environment +
  +
getCatalog() - +Method in class com.sleepycat.collections.TupleSerialFactory
Returns the class catalog associated with this factory. -
getCharLength(byte[]) - -Static method in class com.sleepycat.bdb.util.UtfOps +
getCharLength(byte[]) - +Static method in class com.sleepycat.util.UtfOps
Returns the number of characters represented by the given UTF string. -
getCharLength(byte[], int, int) - -Static method in class com.sleepycat.bdb.util.UtfOps +
getCharLength(byte[], int, int) - +Static method in class com.sleepycat.util.UtfOps
Returns the number of characters represented by the given UTF string. -
getClassFormat(byte[]) - -Method in class com.sleepycat.bdb.StoredClassCatalog -
  -
getClassFormat(byte[]) - -Method in interface com.sleepycat.bdb.bind.serial.ClassCatalog +
getChecksum() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database environment is configured to do checksum + verification of pages read into the cache from the backing + filestore. +
getClassFormat(byte[]) - +Method in interface com.sleepycat.bind.serial.ClassCatalog
Return the ObjectStreamClass for the given class ID. -
getClassFormat(String) - -Method in class com.sleepycat.bdb.StoredClassCatalog -
  -
getClassFormat(String) - -Method in interface com.sleepycat.bdb.bind.serial.ClassCatalog -
Return the ObjectStreamClass for the given class name. -
getClassID(String) - -Method in class com.sleepycat.bdb.StoredClassCatalog -
  -
getClassID(String) - -Method in interface com.sleepycat.bdb.bind.serial.ClassCatalog -
Return the class ID for the current version of the given class name. -
getCollection() - -Method in class com.sleepycat.bdb.collection.StoredIterator +
getClassFormat(byte[]) - +Method in class com.sleepycat.bind.serial.StoredClassCatalog +
  +
getClassID(ObjectStreamClass) - +Method in interface com.sleepycat.bind.serial.ClassCatalog +
Return the class ID for the current version of the given class + description. +
getClassID(ObjectStreamClass) - +Method in class com.sleepycat.bind.serial.StoredClassCatalog +
  +
getClear() - +Method in class com.sleepycat.db.StatsConfig +
Return if the statistics operation is configured to reset + statistics after they are returned. +
getCollection() - +Method in class com.sleepycat.collections.StoredIterator
Returns the collection associated with this iterator. -
getCurrentKey() - -Method in class com.sleepycat.bdb.DataCursor -
Returns the key object for the last record read. -
getCurrentRecordNumber() - -Method in class com.sleepycat.bdb.DataCursor -
Returns the record number for the last record read. -
getCurrentTxn() - -Method in class com.sleepycat.bdb.DataView -
Returns the current transaction for the view or null if the environment - is non-transactional. -
getCurrentValue() - -Method in class com.sleepycat.bdb.DataCursor -
Returns the value object for the last record read. -
getData() - -Method in class com.sleepycat.db.Dbt -
Return the data array. -
getDatabaseName() - -Method in class com.sleepycat.db.Db -
The Db.getDatabaseName method returns the current database name. -
getDataBytes() - -Method in class com.sleepycat.bdb.DataThang -
  -
getDataBytes() - -Method in class com.sleepycat.bdb.bind.SimpleBuffer -
  -
getDataBytes() - -Method in interface com.sleepycat.bdb.bind.DataBuffer -
Returns the byte array of the data buffer. -
getDataDirs() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getDataDirs method returns the array of directories. -
getDataFormat() - -Method in class com.sleepycat.bdb.RecordNumberBinding -
  -
getDataFormat() - -Method in interface com.sleepycat.bdb.bind.DataBinding -
Returns the format used for the data of this binding. -
getDataFormat() - -Method in class com.sleepycat.bdb.bind.ByteArrayBinding -
  -
getDataFormat() - -Method in class com.sleepycat.bdb.bind.serial.SerialBinding -
  -
getDataFormat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInputBinding -
  -
getDataFormat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleBinding -
  -
getDataFormation() - -Method in class com.sleepycat.bdb.DataThang -
  -
getDataFormation() - -Method in class com.sleepycat.bdb.bind.SimpleBuffer -
  -
getDataFormation() - -Method in interface com.sleepycat.bdb.bind.DataBuffer -
Returns the formation associated with the data in this buffer. -
getDataLength() - -Method in class com.sleepycat.bdb.DataThang -
  -
getDataLength() - -Method in class com.sleepycat.bdb.bind.SimpleBuffer -
  -
getDataLength() - -Method in interface com.sleepycat.bdb.bind.DataBuffer -
Returns the byte length of the data in the array. -
getDataOffset() - -Method in class com.sleepycat.bdb.DataThang -
  -
getDataOffset() - -Method in class com.sleepycat.bdb.bind.SimpleBuffer -
  -
getDataOffset() - -Method in interface com.sleepycat.bdb.bind.DataBuffer -
Returns the byte offset of the data in the array. -
getDb() - -Method in class com.sleepycat.bdb.DataView -
Returns the database for the index, if one is used, or store, if no - index is used. -
getDb() - -Method in class com.sleepycat.bdb.DataDb -
Returns the underlying database. -
getDbEnv() - -Method in class com.sleepycat.db.DbException -
The DbException.getDbEnv method returns the database environment. -
getDbEnv() - -Method in class com.sleepycat.db.Db -
The Db.getDbEnv method returns the handle for the database environment underlying the database. -
getDbEnvHome() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getDbEnvHome method returns the database environment home directory. -
getDbt() - -Method in class com.sleepycat.db.DbMemoryException -
The getDbt method returns the Dbt with insufficient memory to complete the operation, causing the DbMemoryException to be thrown. -
getDbType() - -Method in class com.sleepycat.db.Db -
The Db.getDbType method returns the type of the underlying access method (and file format). -
getDeleteAction() - -Method in class com.sleepycat.bdb.ForeignKeyIndex -
Returns a value indicating what action occurs when the foreign key - is deleted. -
getDetail() - -Method in class com.sleepycat.bdb.util.RuntimeExceptionWrapper -
  -
getDetail() - -Method in class com.sleepycat.bdb.util.IOExceptionWrapper +
getConfig() - +Method in class com.sleepycat.db.Cursor +
Return this cursor's configuration. +
getConfig() - +Method in class com.sleepycat.db.Database +
Return this Database object's configuration. +
getConfig() - +Method in class com.sleepycat.db.Environment +
Return this object's configuration. +
getConfig() - +Method in class com.sleepycat.db.JoinCursor +
Returns this object's configuration. +
getCurFile() - +Method in class com.sleepycat.db.LogStats +
The current log file number. +
getCurMaxId() - +Method in class com.sleepycat.db.LockStats
  -
getDetail() - -Method in interface com.sleepycat.bdb.util.ExceptionWrapper +
getCurOffset() - +Method in class com.sleepycat.db.LogStats +
The byte offset in the current log file. +
getCurRecno() - +Method in class com.sleepycat.db.QueueStats +
The next available record number. +
getCurrent(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Returns the key/data pair to which the cursor refers. +
getCurrent(LogSequenceNumber, DatabaseEntry) - +Method in class com.sleepycat.db.LogCursor +
Return the LogSequenceNumber and log record to which the log cursor + currently refers. +
getCurrent(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Returns the key/data pair to which the cursor refers. +
getCurrent() - +Method in class com.sleepycat.db.SequenceStats +
The current value of the sequence in the database. +
getData() - +Method in class com.sleepycat.db.DatabaseEntry +
Return the byte array. +
getDataDirs() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the array of data directories. +
getDatabase() - +Method in class com.sleepycat.db.Cursor +
Return the Database handle associated with this Cursor. +
getDatabase() - +Method in class com.sleepycat.db.JoinCursor +
Returns the primary database handle associated with this cursor. +
getDatabase() - +Method in class com.sleepycat.db.Sequence +
Return the Database handle associated with this sequence. +
getDatabaseEntry() - +Method in class com.sleepycat.db.MemoryException +
Returns the DatabaseEntry object with insufficient memory + to complete the operation to complete the operation. +
getDatabaseFile() - +Method in class com.sleepycat.db.Database +
Return the database's underlying file name. +
getDatabaseName() - +Method in class com.sleepycat.db.Database +
Return the database name. +
getDecrement() - +Method in class com.sleepycat.db.SequenceConfig +
Return if the sequence is configured to decrement. +
getDegree2() - +Method in class com.sleepycat.db.CursorConfig +
Return if the cursor is configured for degree 2 isolation. +
getDegree2() - +Method in class com.sleepycat.db.TransactionConfig +
Return if the transaction has been configured to have degree 2 isolation. +
getDetail() - +Method in interface com.sleepycat.util.ExceptionWrapper
Returns the nested exception or null if none is present. -
getDirtyRead() - -Method in class com.sleepycat.bdb.TransactionRunner -
Returns whether transactions will read data that is modified by another - transaction but not committed. -
getEncryptFlags() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getEncryptFlags method returns the encryption flags. -
getEncryptFlags() - -Method in class com.sleepycat.db.Db -
The Db.getEncryptFlags method returns the encryption flags. -
getEnv() - -Method in class com.sleepycat.bdb.DataView -
Returns the environment for the store and index. -
getEnv() - -Method in class com.sleepycat.bdb.DataStore -
Returns the environment associated with this store. -
getEnv() - -Method in class com.sleepycat.bdb.DataDb -
Returns the environment. -
getEnv() - -Method in class com.sleepycat.bdb.CurrentTransaction +
getDetail() - +Method in class com.sleepycat.util.IOExceptionWrapper +
  +
getDetail() - +Method in class com.sleepycat.util.RuntimeExceptionWrapper +
  +
getDirectDatabaseIO() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment has been configured to not buffer + database files. +
getDirectLogIO() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment has been configured to not buffer + log files. +
getDirtyRead() - +Method in class com.sleepycat.db.CursorConfig +
Return if read operations performed by the cursor are configured to + return modified but not yet committed data. +
getDirtyRead() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database is configured to support dirty reads. +
getDirtyRead() - +Method in class com.sleepycat.db.TransactionConfig +
Return if the transaction is configured to perform dirty reads. +
getDiskFile() - +Method in class com.sleepycat.db.LogStats +
The log file number of the last record known to be on disk. +
getDiskOffset() - +Method in class com.sleepycat.db.LogStats +
The byte offset of the last record known to be on disk. +
getDsyncLog() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment has been configured to flush log + writes to the backing disk before returning from the write system + call. +
getDup() - +Method in class com.sleepycat.db.HashStats +
The number of duplicate pages. +
getDupFree() - +Method in class com.sleepycat.db.HashStats +
The number of bytes free on duplicate pages. +
getDupPages() - +Method in class com.sleepycat.db.BtreeStats +
The number of database duplicate pages. +
getDupPagesFree() - +Method in class com.sleepycat.db.BtreeStats +
The number of bytes free in database duplicate pages. +
getDuplicateComparator() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the duplicate data item comparison callback. +
getDupmasters() - +Method in class com.sleepycat.db.ReplicationStats +
The number of duplicate master conditions detected. +
getEgen() - +Method in class com.sleepycat.db.ReplicationStats +
The current election generation number. +
getElectionCurWinner() - +Method in class com.sleepycat.db.ReplicationStats +
The election winner. +
getElectionGen() - +Method in class com.sleepycat.db.ReplicationStats +
The election generation number. +
getElectionLsn() - +Method in class com.sleepycat.db.ReplicationStats +
The maximum LSN of election winner. +
getElectionNumSites() - +Method in class com.sleepycat.db.ReplicationStats +
The number sites expected to participate in elections. +
getElectionNumVotes() - +Method in class com.sleepycat.db.ReplicationStats +
The number of votes required to complete the election. +
getElectionPriority() - +Method in class com.sleepycat.db.ReplicationStats +
The election priority. +
getElectionStatus() - +Method in class com.sleepycat.db.ReplicationStats +
The current election phase (0 if no election is in progress). +
getElectionTiebreaker() - +Method in class com.sleepycat.db.ReplicationStats +
The election tiebreaker value. +
getElectionVotes() - +Method in class com.sleepycat.db.ReplicationStats +
The votes received this election round. +
getElections() - +Method in class com.sleepycat.db.ReplicationStats +
The number of elections held. +
getElectionsWon() - +Method in class com.sleepycat.db.ReplicationStats +
The number of elections won. +
getEmptyPages() - +Method in class com.sleepycat.db.BtreeStats +
The number of empty database pages. +
getEncrypted() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database has been configured to perform encryption. +
getEncrypted() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the database environment has been configured to perform + encryption. +
getEnvID() - +Method in class com.sleepycat.db.ReplicationStatus +
Whenever a new master is elected, the environment ID of the new master + is available from the getEnvID method. +
getEnvId() - +Method in class com.sleepycat.db.ReplicationStats +
The current environment ID. +
getEnvPriority() - +Method in class com.sleepycat.db.ReplicationStats +
The current environment priority. +
getEnvironment() - +Method in class com.sleepycat.collections.CurrentTransaction
Returns the underlying Berkeley DB environment. -
getErrno() - -Method in class com.sleepycat.db.DbException -
The DbException.getErrno method returns the error value. -
getErrorPrefix() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getErrorPrefix method returns the error prefix. -
getErrorPrefix() - -Method in class com.sleepycat.db.Db -
The Db.getErrorPrefix method returns the error prefix. -
getFile() - -Method in class com.sleepycat.db.DbLsn -
The DbLsn.getFile method returns the DbLsn object's file number. -
getFileName() - -Method in class com.sleepycat.db.Db -
The Db.getFileName method returns the current filename. -
getFlags() - -Method in class com.sleepycat.db.Dbt -
Return the object flag value. -
getFlags() - -Method in class com.sleepycat.db.DbMpoolFile -
The DbMpoolFile.getFlags method returns the flags. -
getFlags() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getFlags method returns the configuration flags. -
getFlags() - -Method in class com.sleepycat.db.Db -
The Db.getFlags method returns the current flags. -
getForeignStore() - -Method in class com.sleepycat.bdb.ForeignKeyIndex -
Returns the foreign store which has the primary key which matches the - index key of this store. -
getHashFillFactor() - -Method in class com.sleepycat.db.Db -
The Db.getHashFillFactor method returns the hash table density. -
getHashNumElements() - -Method in class com.sleepycat.db.Db -
The Db.getHashNumElements method returns the estimate of the final size of the hash table. -
getIndex() - -Method in class com.sleepycat.bdb.DataView -
Returns the index, as specified to the constructor. -
getIndex() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
The getIndex method returns -1 when DbEnv.lockGet was called, and returns the index of the failed DbLockRequest when DbEnv.lockVector was called. -
getIndexKeyFormat() - -Method in interface com.sleepycat.bdb.bind.KeyExtractor -
Returns the format of the index key data. -
getIndexKeyFormat() - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
  -
getIndexKeyFormat() - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor -
  -
getIndexKeyFormat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
  -
getIndices() - -Method in class com.sleepycat.bdb.DataStore -
Returns the indices associated with this store. -
getInstance(DbEnv) - -Static method in class com.sleepycat.bdb.CurrentTransaction +
getEnvironment() - +Method in class com.sleepycat.db.Database +
Return the Environment handle for the database environment + underlying the Database. +
getEnvironment() - +Method in class com.sleepycat.db.DatabaseException +
Return the environment in which the exception occurred. +
getErrno() - +Method in class com.sleepycat.db.DatabaseException +
Return the system or C API error number that caused the exception. +
getErrorHandler() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the function to be called if an error occurs. +
getErrorHandler() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the function to be called if an error occurs. +
getErrorPrefix() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the prefix string that appears before error messages. +
getErrorPrefix() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the prefix string that appears before error messages. +
getErrorStream() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the an OutputStream for displaying error messages. +
getErrorStream() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the an OutputStream for displaying error messages. +
getExclusiveCreate() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the Environment.openDatabase method is configured + to fail if the database already exists. +
getExclusiveCreate() - +Method in class com.sleepycat.db.SequenceConfig +
Return if the Database.openSequence method is configured to + fail if the database already exists. +
getExtentSize() - +Method in class com.sleepycat.db.QueueStats +
The underlying database extent size, in pages. +
getFast() - +Method in class com.sleepycat.db.StatsConfig +
Return if the statistics operation is configured to return only the + values which do not require expensive actions. +
getFeedbackHandler() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the object's methods to be called to provide feedback. +
getFeedbackHandler() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the object's methods to be called to provide feedback. +
getFfactor() - +Method in class com.sleepycat.db.HashStats +
The desired fill factor specified at database-creation time. +
getFile() - +Method in class com.sleepycat.db.LogSequenceNumber +
Return the file number component of the LogSequenceNumber. +
getFileName() - +Method in class com.sleepycat.db.CacheFileStats +
The name of the file. +
getFirst(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the first key/data pair of the database, and return +that pair. +
getFirst(LogSequenceNumber, DatabaseEntry) - +Method in class com.sleepycat.db.LogCursor +
Return the first LogSequenceNumber and log record. +
getFirst(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the first key/data pair of the database, and return +that pair. +
getFirstRecno() - +Method in class com.sleepycat.db.QueueStats +
The first undeleted record in the database. +
getFlags() - +Method in class com.sleepycat.db.SequenceStats +
The flags value for the sequence. +
getForce() - +Method in class com.sleepycat.db.CheckpointConfig +
Return the configuration of the checkpoint force option. +
getFree() - +Method in class com.sleepycat.db.BtreeStats +
The number of pages on the free list. +
getFree() - +Method in class com.sleepycat.db.HashStats +
The number of pages on the free list. +
getGID() - +Method in class com.sleepycat.db.PreparedTransaction +
Return the global transaction ID for the transaction. +
getGbytes() - +Method in class com.sleepycat.db.CacheStats +
Gigabytes of cache (total cache size is st_gbytes + st_bytes). +
getGen() - +Method in class com.sleepycat.db.ReplicationStats +
The current generation number. +
getHashBuckets() - +Method in class com.sleepycat.db.CacheStats +
Number of hash buckets in buffer hash table. +
getHashExamined() - +Method in class com.sleepycat.db.CacheStats +
Total number of hash elements traversed during hash table lookups. +
getHashFillFactor() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the hash table density. +
getHashLongest() - +Method in class com.sleepycat.db.CacheStats +
The longest chain ever encountered in buffer hash table lookups. +
getHashMaxWait() - +Method in class com.sleepycat.db.CacheStats +
The maximum number of times any hash bucket lock was waited for by + a thread of control. +
getHashNowait() - +Method in class com.sleepycat.db.CacheStats +
The number of times that a thread of control was able to obtain a + hash bucket lock without waiting. +
getHashNumElements() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the estimate of the final size of the hash table. +
getHashSearches() - +Method in class com.sleepycat.db.CacheStats +
Total number of buffer hash table lookups. +
getHashWait() - +Method in class com.sleepycat.db.CacheStats +
The number of times that a thread of control was forced to wait + before obtaining a hash bucket lock. +
getHasher() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the database-specific hash function. +
getHome() - +Method in class com.sleepycat.db.Environment +
Return the database environment's home directory. +
getId() - +Method in class com.sleepycat.db.LockStats +
The last allocated locker ID. +
getId() - +Method in class com.sleepycat.db.Transaction +
Return the transaction's unique ID. +
getIndex() - +Method in class com.sleepycat.db.LockNotGrantedException +
Returns -1 when Environment.getLock was called, and + returns the index of the failed LockRequest when Environment.lockVector was called. +
getInitialValue() - +Method in class com.sleepycat.db.SequenceConfig +
Return the initial value for a sequence.. +
getInitializeCDB() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured for the Concurrent + Data Store product. +
getInitializeCache() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured with a shared + memory buffer pool. +
getInitializeLocking() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured for locking. +
getInitializeLogging() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured for logging. +
getInitializeRegions() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment has been configured to page-fault + shared regions into memory when initially creating or joining a + database environment. +
getInitializeReplication() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured for replication. +
getInstance(Environment) - +Static method in class com.sleepycat.collections.CurrentTransaction
Gets the CurrentTransaction accessor for a specified Berkeley DB environment. -
getKey() - -Method in class com.sleepycat.bdb.collection.MapEntry +
getIntPages() - +Method in class com.sleepycat.db.BtreeStats +
The number of database internal pages. +
getIntPagesFree() - +Method in class com.sleepycat.db.BtreeStats +
The number of bytes free in database internal pages. +
getJoinEnvironment() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the handle is configured to join an existing environment. +
getKBytes() - +Method in class com.sleepycat.db.CheckpointConfig +
Return the checkpoint log data threshold, in kilobytes. +
getKey() - +Method in class com.sleepycat.collections.MapEntryParameter
Returns the key of this entry. -
getKeyAssigner() - -Method in class com.sleepycat.bdb.DataStore -
Returns the key assigner associated with this store. -
getKeyBinding() - -Method in class com.sleepycat.bdb.DataView -
Returns the key binding that is used. -
getKeyExtractor() - -Method in class com.sleepycat.bdb.DataIndex -
Returns the key extractor associated with this index. -
getKeyFormat() - -Method in class com.sleepycat.bdb.DataStore -
Returns the key format associated with this store. -
getKeyFormat() - -Method in class com.sleepycat.bdb.DataIndex -
Returns the key format associated with this index. -
getKeyFormat() - -Method in interface com.sleepycat.bdb.bind.EntityBinding -
Returns the format used for the key data of this binding. -
getKeyFormat() - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding -
  -
getKeyFormat() - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding -
  -
getKeyFormat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding -
  -
getLock() - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.getLock method returns the lock reference. -
getLock() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
The getLock method returns null when DbEnv.lockGet was called, and returns the lock in the failed DbLockRequest when DbEnv.lockVector was called. -
getLockConflicts() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLockConflicts method returns the conflicts array. -
getLockDetect() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLockDetect method returns the deadlock detector configuration. -
getLockForWrite() - -Method in class com.sleepycat.bdb.collection.StoredIterator -
Returns whether write-locks will be obtained when reading with this - cursor. -
getLockMaxLockers() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLockMaxLockers method returns the maximum number of lockers. -
getLockMaxLocks() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.setLockMaxLocks method returns the maximum number of locks. -
getLockMaxObjects() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLockMaxObjects method returns the maximum number of locked objects. -
getLogBufferSize() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLogBufferSize method returns the size of the log buffer, in bytes. -
getLogDir() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLogDir method returns the log directory. -
getLogMax() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLogMax method returns the maximum log file size. -
getLogRegionMax() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getLogRegionMax method returns the size of the underlying logging subsystem region. -
getMaxRetries() - -Method in class com.sleepycat.bdb.TransactionRunner +
getKey() - +Method in class com.sleepycat.db.Sequence +
Return the DatabaseEntry used to open this sequence. +
getKeyCreator(Class, String) - +Method in class com.sleepycat.collections.TupleSerialFactory +
Creates a SecondaryKeyCreator object for use in configuring + a SecondaryDatabase. +
getKeyCreator() - +Method in class com.sleepycat.db.SecondaryConfig +
Returns the user-supplied object used for creating secondary keys. +
getKeyRange(Transaction, DatabaseEntry) - +Method in class com.sleepycat.db.Database +
Return an estimate of the proportion of keys in the database less + than, equal to, and greater than the specified key. +
getLSN() - +Method in class com.sleepycat.db.ReplicationStatus +
Whenever processing a messages results in the processing of messages + that are permanent, or a message carrying a DB_REP_PERMANENT flag + was processed successfully, but was not written to disk, the LSN of + the record is available from the getLSN method. +
getLast(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the last key/data pair of the database, and return +that pair. +
getLast(LogSequenceNumber, DatabaseEntry) - +Method in class com.sleepycat.db.LogCursor +
Return the last LogSequenceNumber and log record. +
getLast(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the last key/data pair of the database, and return +that pair. +
getLastCkp() - +Method in class com.sleepycat.db.TransactionStats +
The LSN of the last checkpoint. +
getLastTxnId() - +Method in class com.sleepycat.db.TransactionStats +
The last transaction ID allocated. +
getLastValue() - +Method in class com.sleepycat.db.SequenceStats +
The last cached value of the sequence. +
getLeafPages() - +Method in class com.sleepycat.db.BtreeStats +
The number of database leaf pages. +
getLeafPagesFree() - +Method in class com.sleepycat.db.BtreeStats +
The number of bytes free in database leaf pages. +
getLevels() - +Method in class com.sleepycat.db.BtreeStats +
The number of levels in the database. +
getLgBSize() - +Method in class com.sleepycat.db.LogStats +
The in-memory log record cache size. +
getLgSize() - +Method in class com.sleepycat.db.LogStats +
The current log file size. +
getLock(int, boolean, DatabaseEntry, LockRequestMode) - +Method in class com.sleepycat.db.Environment +
Acquire a lock from the lock table. +
getLock() - +Method in class com.sleepycat.db.LockNotGrantedException +
Returns null when Environment.getLock was called, and + returns the lock in the failed LockRequest when Environment.lockVector was called. +
getLock() - +Method in class com.sleepycat.db.LockRequest +
Return the lock reference. +
getLockConflicts() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the locking conflicts matrix. +
getLockDetectMode() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the deadlock detector is configured to run whenever a lock + conflict occurs. +
getLockDown() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to lock shared + environment files and memory-mapped databases into memory. +
getLockStats(StatsConfig) - +Method in class com.sleepycat.db.Environment +
Return the database environment's locking statistics. +
getLockTimeout() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the database environment lock timeout value, in microseconds; + a timeout of 0 means no timeout is set. +
getLockTimeout() - +Method in class com.sleepycat.db.LockStats +
Lock timeout value. +
getLogAutoRemove() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to to automatically remove log + files that are no longer needed. +
getLogBufferSize() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the size of the in-memory log buffer, in bytes. +
getLogDirectory() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the path of a directory to be used as the location of logging files. +
getLogDuplicated() - +Method in class com.sleepycat.db.ReplicationStats +
The number of duplicate log records received. +
getLogFileName(LogSequenceNumber) - +Method in class com.sleepycat.db.Environment +
Return the name of the log file that contains the log record + specified by a LogSequenceNumber object. +
getLogInMemory() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to maintain transaction logs + in memory rather than on disk. +
getLogQueued() - +Method in class com.sleepycat.db.ReplicationStats +
The number of log records currently queued. +
getLogQueuedMax() - +Method in class com.sleepycat.db.ReplicationStats +
The maximum number of log records ever queued at once. +
getLogQueuedTotal() - +Method in class com.sleepycat.db.ReplicationStats +
The total number of log records queued. +
getLogRecordHandler() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the handler for application-specific log records. +
getLogRecords() - +Method in class com.sleepycat.db.ReplicationStats +
The number of log records received and appended to the log. +
getLogRegionSize() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the size of the underlying logging subsystem region. +
getLogRequested() - +Method in class com.sleepycat.db.ReplicationStats +
The number of log records missed and requested. +
getLogStats(StatsConfig) - +Method in class com.sleepycat.db.Environment +
Return the database environment's logging statistics. +
getLsn() - +Method in class com.sleepycat.db.TransactionStats.Active +
The log sequence number of the transaction's first log record. +
getMMapSize() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the maximum file size, in bytes, for a file to be mapped into + the process address space. +
getMagic() - +Method in class com.sleepycat.db.BtreeStats +
The magic number that identifies the file as a Btree database. +
getMagic() - +Method in class com.sleepycat.db.HashStats +
The magic number that identifies the file as a Hash file. +
getMagic() - +Method in class com.sleepycat.db.LogStats +
The magic number that identifies a file as a log file. +
getMagic() - +Method in class com.sleepycat.db.QueueStats +
The magic number that identifies the file as a Queue file. +
getMap() - +Method in class com.sleepycat.db.CacheFileStats +
Requested pages mapped into the process' address space. +
getMap() - +Method in class com.sleepycat.db.CacheStats +
Requested pages mapped into the process' address space (there is no + available information about whether or not this request caused disk I/O, + although examining the application page fault rate may be helpful). +
getMaster() - +Method in class com.sleepycat.db.ReplicationStats +
The current master environment ID. +
getMasterChanges() - +Method in class com.sleepycat.db.ReplicationStats +
The number of times the master has changed. +
getMax() - +Method in class com.sleepycat.db.SequenceStats +
The maximum permitted value of the sequence. +
getMaxCommitperflush() - +Method in class com.sleepycat.db.LogStats +
The maximum number of commits contained in a single log flush. +
getMaxKey() - +Method in class com.sleepycat.db.BtreeStats +
The maximum keys per page. +
getMaxLockObjects() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the maximum number of locked objects. +
getMaxLockers() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the maximum number of lockers. +
getMaxLockers() - +Method in class com.sleepycat.db.LockStats +
The maximum number of lockers possible. +
getMaxLocks() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the maximum number of locks. +
getMaxLocks() - +Method in class com.sleepycat.db.LockStats +
The maximum number of locks possible. +
getMaxLogFileSize() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the maximum size of a single file in the log, in bytes. +
getMaxNactive() - +Method in class com.sleepycat.db.TransactionStats +
The maximum number of active transactions at any one time. +
getMaxNlockers() - +Method in class com.sleepycat.db.LockStats +
The maximum number of lockers at any one time. +
getMaxNlocks() - +Method in class com.sleepycat.db.LockStats +
The maximum number of locks at any one time. +
getMaxNobjects() - +Method in class com.sleepycat.db.LockStats +
The maximum number of lock objects at any one time. +
getMaxObjects() - +Method in class com.sleepycat.db.LockStats +
The maximum number of lock objects possible. +
getMaxOpenfd() - +Method in class com.sleepycat.db.CacheStats +
Maximum number of open file descriptors. +
getMaxRetries() - +Method in class com.sleepycat.collections.TransactionRunner
Returns the maximum number of retries that will be performed when deadlocks are detected. -
getMaxsize() - -Method in class com.sleepycat.db.DbMpoolFile -
The DbMpoolFile.getMaxsize method returns the size of the cache in bytes. -
getMemoryPoolMapSize() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getMemoryPoolMapSize method returns the maximum file map size. -
getMode() - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.getMode method returns the lock mode. -
getMode() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
The getMode method returns the mode parameter when DbEnv.lockGet was called, and returns the mode for the failed DbLockRequest when DbEnv.lockVector was called. -
getNoWait() - -Method in class com.sleepycat.bdb.TransactionRunner -
Returns whether transactions will throw DbLockNotGrantedException - instead of blocking when trying to access data that is locked by another - transaction. -
getObj() - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.getObj method returns the object protected by this lock. -
getObj() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
The getObj method returns the mode parameter when returns the object parameter when DbEnv.lockGet was called, and returns the object for the failed DbLockRequest when DbEnv.lockVector was called. -
getObject() - -Method in class com.sleepycat.db.Dbt -
Return an object from the data array, encoding the object using the Java serialization API. -
getOffset() - -Method in class com.sleepycat.db.Dbt +
getMaxTxns() - +Method in class com.sleepycat.db.TransactionStats +
The maximum number of active transactions configured. +
getMaxWrite() - +Method in class com.sleepycat.db.CacheStats +
The maximum number of sequential write operations scheduled by the library + when flushing dirty pages from the cache. +
getMaxWriteSleep() - +Method in class com.sleepycat.db.CacheStats +
The number of microseconds the thread of control should pause before + scheduling further write operations. +
getMaximumSize() - +Method in class com.sleepycat.db.CacheFile +
Return the maximum size for the file backing the database, or 0 if + no maximum file size has been configured. +
getMessageHandler() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the function to be called with an informational message. +
getMessageHandler() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the function to be called with an informational message. +
getMessageStream() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the an OutputStream for displaying informational messages. +
getMessageStream() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the an OutputStream for displaying informational messages. +
getMetaFlags() - +Method in class com.sleepycat.db.BtreeStats +
The metadata flags. +
getMetaFlags() - +Method in class com.sleepycat.db.HashStats +
The metadata flags. +
getMetaFlags() - +Method in class com.sleepycat.db.QueueStats +
The metadata flags. +
getMin() - +Method in class com.sleepycat.db.SequenceStats +
The minimum permitted value of the sequence. +
getMinCommitperflush() - +Method in class com.sleepycat.db.LogStats +
The minimum number of commits contained in a single log flush that + contained a commit. +
getMinKey() - +Method in class com.sleepycat.db.BtreeStats +
The minimum keys per page. +
getMinutes() - +Method in class com.sleepycat.db.CheckpointConfig +
Return the checkpoint time threshold, in minutes. +
getMmapSize() - +Method in class com.sleepycat.db.CacheStats +
Maximum file size for mmap. +
getMode() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the mode used to create files. +
getMode() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the mode to use when creating underlying files and shared + memory segments. +
getMode() - +Method in class com.sleepycat.db.LockNotGrantedException +
Returns the mode parameter when Environment.getLock was + called, and returns the mode for the failed LockRequest when + Environment.lockVector was called. +
getMode() - +Method in class com.sleepycat.db.LockRequest +
Return the lock mode. +
getMode() - +Method in class com.sleepycat.db.LogStats +
The mode of any created log files. +
getMsgsBadgen() - +Method in class com.sleepycat.db.ReplicationStats +
The number of messages received with a bad generation number. +
getMsgsProcessed() - +Method in class com.sleepycat.db.ReplicationStats +
The number of messages received and processed. +
getMsgsRecover() - +Method in class com.sleepycat.db.ReplicationStats +
The number of messages ignored due to pending recovery. +
getMsgsSendFailures() - +Method in class com.sleepycat.db.ReplicationStats +
The number of failed message sends. +
getMsgsSent() - +Method in class com.sleepycat.db.ReplicationStats +
The number of messages sent. +
getNaborts() - +Method in class com.sleepycat.db.TransactionStats +
The number of transactions that have aborted. +
getNactive() - +Method in class com.sleepycat.db.TransactionStats +
The number of transactions that are currently active. +
getNewsites() - +Method in class com.sleepycat.db.ReplicationStats +
The number of new site messages received. +
getNext(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the next key/data pair and return that pair. +
getNext(DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.JoinCursor +
Returns the next primary key resulting from the join operation. +
getNext(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.JoinCursor +
Returns the next primary key and data resulting from the join operation. +
getNext(LogSequenceNumber, DatabaseEntry) - +Method in class com.sleepycat.db.LogCursor +
Return the next LogSequenceNumber and log record. +
getNext(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the next key/data pair and return that pair. +
getNextDup(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair. +
getNextDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
If the next key/data pair of the database is a duplicate data record for +the current key/data pair, move the cursor to the next key/data pair +of the database and return that pair. +
getNextLsn() - +Method in class com.sleepycat.db.ReplicationStats +
In replication environments configured as masters, the next LSN + expected. +
getNextNoDup(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the next non-duplicate key/data pair and return +that pair. +
getNextNoDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the next non-duplicate key/data pair and return +that pair. +
getNextPages() - +Method in class com.sleepycat.db.ReplicationStats +
The next page number we expect to receive. +
getNoFile() - +Method in class com.sleepycat.db.CacheFile +
Return if the opening of backing temporary files for in-memory + databases has been disallowed. +
getNoLocking() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to grant all requested mutual + exclusion mutexes and database locks without regard for their actual + availability. +
getNoMMap() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the library is configured to not map this database into + memory. +
getNoMMap() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to copy read-only database files + into the local cache instead of potentially mapping them into process + memory. +
getNoOrderCheck() - +Method in class com.sleepycat.db.VerifyConfig +
Return if the Database.verify is configured to skip the + database checks for btree and duplicate sort order and for hashing. +
getNoPanic() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to ignore any panic state in + the database environment. +
getNoSort() - +Method in class com.sleepycat.db.JoinConfig +
Returns whether automatic sorting of the input cursors is disabled. +
getNoSync() - +Method in class com.sleepycat.db.TransactionConfig +
Return if the transaction is configured to not write or synchronously + flush the log it when commits. +
getNoWait() - +Method in class com.sleepycat.db.TransactionConfig +
Return if the transaction is configured to not wait if a lock + request cannot be immediately granted. +
getNobjects() - +Method in class com.sleepycat.db.LockStats +
The number of current lock objects. +
getNowait() - +Method in class com.sleepycat.db.SequenceStats +
The number of times that a thread of control was able to obtain handle + mutex without waiting. +
getNumBegins() - +Method in class com.sleepycat.db.TransactionStats +
The number of transactions that have begun. +
getNumCache() - +Method in class com.sleepycat.db.CacheStats +
Number of caches. +
getNumCommits() - +Method in class com.sleepycat.db.TransactionStats +
The number of transactions that have committed. +
getNumConflicts() - +Method in class com.sleepycat.db.LockStats +
The total number of locks not immediately available due to conflicts. +
getNumData() - +Method in class com.sleepycat.db.BtreeStats +
The number of key/data pairs or records in the database. +
getNumData() - +Method in class com.sleepycat.db.HashStats +
The number of key/data pairs in the database. +
getNumData() - +Method in class com.sleepycat.db.QueueStats +
The number of records in the database. +
getNumDeadlocks() - +Method in class com.sleepycat.db.LockStats +
The number of deadlocks. +
getNumKeys() - +Method in class com.sleepycat.db.BtreeStats +
The number of keys or records in the database. +
getNumKeys() - +Method in class com.sleepycat.db.HashStats +
The number of unique keys in the database. +
getNumKeys() - +Method in class com.sleepycat.db.QueueStats +
The number of records in the database. +
getNumLockTimeouts() - +Method in class com.sleepycat.db.LockStats +
The number of lock requests that have timed out. +
getNumLockers() - +Method in class com.sleepycat.db.LockStats +
The number of current lockers. +
getNumLocks() - +Method in class com.sleepycat.db.LockStats +
The number of current locks. +
getNumModes() - +Method in class com.sleepycat.db.LockStats +
The number of lock modes. +
getNumNowaits() - +Method in class com.sleepycat.db.LockStats +
The total number of lock requests failing because DB_LOCK_NOWAIT was + set. +
getNumReleases() - +Method in class com.sleepycat.db.LockStats +
The total number of locks released. +
getNumRequests() - +Method in class com.sleepycat.db.LockStats +
The total number of locks requested. +
getNumRestores() - +Method in class com.sleepycat.db.TransactionStats +
The number of transactions that have been restored. +
getNumSites() - +Method in class com.sleepycat.db.ReplicationStats +
The number of sites believed to be in the replication group. +
getNumThrottles() - +Method in class com.sleepycat.db.ReplicationStats +
Transmission limited. +
getNumTxnTimeouts() - +Method in class com.sleepycat.db.LockStats +
The number of transactions that have timed out. +
getObj() - +Method in class com.sleepycat.db.LockNotGrantedException +
Returns the object parameter when Environment.getLock was + called, and returns the object for the failed LockRequest when + Environment.lockVector was called. +
getObj() - +Method in class com.sleepycat.db.LockRequest +
Return the lock object. +
getOffset() - +Method in class com.sleepycat.db.DatabaseEntry
Return the byte offset into the data array. -
getOffset() - -Method in class com.sleepycat.db.DbLsn -
The DbLsn.getOffset method returns the DbLsn object's file number. -
getOp() - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.getOp method returns the operation. -
getOp() - -Method in class com.sleepycat.db.DbLockNotGrantedException -
The getOp method returns 0 when DbEnv.lockGet was called, and returns the op for the failed DbLockRequest when DbEnv.lockVector was called. -
getOpenFlags() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getOpenFlags method returns the open method flags. -
getOpenFlags() - -Method in class com.sleepycat.db.Db -
The Db.getOpenFlags method returns the current open method flags. -
getPageSize() - -Method in class com.sleepycat.db.Db -
The Db.getPageSize method returns the page size. -
getPartialLength() - -Method in class com.sleepycat.db.Dbt -
Return the length of the partial record, in bytes. -
getPartialOffset() - -Method in class com.sleepycat.db.Dbt -
Return the offset of the partial record, in bytes. -
getPrimaryKeyFormat() - -Method in interface com.sleepycat.bdb.bind.KeyExtractor -
Returns the format of the primary key data or null if the index key data - is not derived from the primary key data. -
getPrimaryKeyFormat() - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
  -
getPrimaryKeyFormat() - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor -
  -
getPrimaryKeyFormat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
  -
getPrimitiveBinding(Class, TupleFormat) - -Static method in class com.sleepycat.bdb.bind.tuple.TupleBinding +
getOffset() - +Method in class com.sleepycat.db.LogSequenceNumber +
Return the file offset component of the LogSequenceNumber. +
getOp() - +Method in class com.sleepycat.db.LockNotGrantedException +
Returns 0 when Environment.getLock was called, and returns + the op parameter for the failed LockRequest when Environment.lockVector was called. +
getOp() - +Method in class com.sleepycat.db.LockRequest +
Return the lock operation. +
getOrderCheckOnly() - +Method in class com.sleepycat.db.VerifyConfig +
Return if the Database.verify is configured to do database + checks for btree and duplicate sort order and for hashing, skipped + by verification operations configured by VerifyConfig.setNoOrderCheck. +
getOutdated() - +Method in class com.sleepycat.db.ReplicationStats +
The number of outdated conditions detected. +
getOverPages() - +Method in class com.sleepycat.db.BtreeStats +
The number of database overflow pages. +
getOverPagesFree() - +Method in class com.sleepycat.db.BtreeStats +
The number of bytes free in database overflow pages. +
getOverflows() - +Method in class com.sleepycat.db.HashStats +
The number of overflow pages. +
getOverwrite() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to overwrite files stored in + encrypted formats before deleting them. +
getOvflFree() - +Method in class com.sleepycat.db.HashStats +
The number of bytes free on overflow pages. +
getPageClean() - +Method in class com.sleepycat.db.CacheStats +
Clean pages currently in the cache. +
getPageCreate() - +Method in class com.sleepycat.db.CacheFileStats +
Pages created in the cache. +
getPageCreate() - +Method in class com.sleepycat.db.CacheStats +
Pages created in the cache. +
getPageDirty() - +Method in class com.sleepycat.db.CacheStats +
Dirty pages currently in the cache. +
getPageIn() - +Method in class com.sleepycat.db.CacheFileStats +
Pages read into the cache. +
getPageIn() - +Method in class com.sleepycat.db.CacheStats +
Pages read into the cache. +
getPageOut() - +Method in class com.sleepycat.db.CacheFileStats +
Pages written from the cache to the backing file. +
getPageOut() - +Method in class com.sleepycat.db.CacheStats +
Pages written from the cache to the backing file. +
getPageSize() - +Method in class com.sleepycat.db.BtreeStats +
The underlying database page size, in bytes. +
getPageSize() - +Method in class com.sleepycat.db.CacheFileStats +
Page size in bytes. +
getPageSize() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the size of the pages used to hold items in the database, in bytes. +
getPageSize() - +Method in class com.sleepycat.db.HashStats +
The underlying Hash database page (and bucket) size, in bytes. +
getPageSize() - +Method in class com.sleepycat.db.QueueStats +
The underlying database page size, in bytes. +
getPageTrickle() - +Method in class com.sleepycat.db.CacheStats +
Dirty pages written using Environment.trickleCacheWrite. +
getPages() - +Method in class com.sleepycat.db.CacheStats +
Pages in the cache. +
getPages() - +Method in class com.sleepycat.db.QueueStats +
The number of pages in the database. +
getPagesDuplicated() - +Method in class com.sleepycat.db.ReplicationStats +
The number of duplicate pages received. +
getPagesFree() - +Method in class com.sleepycat.db.QueueStats +
The number of bytes free in database pages. +
getPagesRecords() - +Method in class com.sleepycat.db.ReplicationStats +
The number of pages received and stored. +
getPagesRequested() - +Method in class com.sleepycat.db.ReplicationStats +
The number of pages missed and requested from the master. +
getPanicHandler() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the function to be called if the database environment panics. +
getPanicHandler() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the function to be called if the database environment panics. +
getParentId() - +Method in class com.sleepycat.db.TransactionStats.Active +
The transaction ID of the parent transaction (or 0, if no parent). +
getPartial() - +Method in class com.sleepycat.db.DatabaseEntry +
Return whether this DatabaseEntry is configured to read or write partial + records. +
getPartialLength() - +Method in class com.sleepycat.db.DatabaseEntry +
Return the byte length of the partial record being read or written by + the application, in bytes. +
getPartialOffset() - +Method in class com.sleepycat.db.DatabaseEntry +
Return the offset of the partial record being read or written by the + application, in bytes. +
getPrev(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the previous key/data pair and return that pair. +
getPrev(LogSequenceNumber, DatabaseEntry) - +Method in class com.sleepycat.db.LogCursor +
Return the previous LogSequenceNumber and log record. +
getPrev(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the previous key/data pair and return that pair. +
getPrevDup(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair. +
getPrevDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
If the previous key/data pair of the database is a duplicate data record +for the current key/data pair, move the cursor to the previous key/data +pair of the database and return that pair. +
getPrevNoDup(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the previous non-duplicate key/data pair and return +that pair. +
getPrevNoDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the previous non-duplicate key/data pair and return +that pair. +
getPrimaryDatabase() - +Method in class com.sleepycat.db.SecondaryDatabase +
Returns the primary database associated with this secondary database. +
getPrimitiveBinding(Class) - +Static method in class com.sleepycat.bind.tuple.TupleBinding
Creates a tuple binding for a primitive Java class. -
getPriority() - -Method in class com.sleepycat.db.DbMpoolFile -
The DbMpoolFile.getPriority method returns the cache priority. -
getQueueExtentSize() - -Method in class com.sleepycat.db.Db -
The Db.getQueueExtentSize method returns the number of pages in an extent. -
getRecordDelimiter() - -Method in class com.sleepycat.db.Db -
The Db.getRecordDelimiter method returns the delimiting byte. -
getRecordLength() - -Method in class com.sleepycat.db.Db -
The Db.getRecordLength method returns the record length. -
getRecordNumber() - -Method in class com.sleepycat.db.Dbt -
Return an object from the data array, expecting that data to be a logical record number. -
getRecordPad() - -Method in class com.sleepycat.db.Db -
The Db.getRecordPad method returns the pad character. -
getRecordSource() - -Method in class com.sleepycat.db.Db -
The Db.getRecordSource method returns the source file. -
getReplicationLimit() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getReplicationLimit method returns the transmit limit in bytes. -
getSegmentId() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getSegmentId method returns the base segment ID. -
getSingleKeyThang() - -Method in class com.sleepycat.bdb.DataView -
Returns the key thang for a single key range, or null if a single key - range is not used. -
getSize() - -Method in class com.sleepycat.db.Dbt -
Return the data array size. -
getStore() - -Method in class com.sleepycat.bdb.DataView -
Returns the store, as specified to the constructor. -
getStore() - -Method in class com.sleepycat.bdb.DataIndex -
Returns the store associated with this index. -
getStreamHeader() - -Static method in class com.sleepycat.bdb.bind.serial.SerialOutput +
getPrintable() - +Method in class com.sleepycat.db.VerifyConfig +
Return if the Database.verify is configured to use printing + characters to where possible. +
getPriority() - +Method in class com.sleepycat.db.CacheFile +
Return the cache priority for pages from the specified file. +
getPrivate() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to only be accessed + by a single process. +
getQueueExtentSize() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the size of the extents used to hold pages in a Queue database, + specified as a number of pages. +
getQueueInOrder() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the Database.consume method is configured to return + key/data pairs in order, always returning the key/data item from the + head of the queue. +
getRangeMax() - +Method in class com.sleepycat.db.SequenceConfig +
Return the maximum value for the sequence. +
getRangeMin() - +Method in class com.sleepycat.db.SequenceConfig +
Return the minimum value for the sequence. +
getReLen() - +Method in class com.sleepycat.db.BtreeStats +
The length of fixed-length records. +
getReLen() - +Method in class com.sleepycat.db.QueueStats +
The length of the records. +
getRePad() - +Method in class com.sleepycat.db.BtreeStats +
The padding byte value for fixed-length records. +
getRePad() - +Method in class com.sleepycat.db.QueueStats +
The padding byte value for the records. +
getReadOnly() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database is configured in read-only mode. +
getReadOnly() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the whether the environment handle is opened read-only. +
getRecordDelimiter() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the delimiting byte used to mark the end of a record in the + backing source file for the Recno access method. +
getRecordLength() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the database record length, in bytes. +
getRecordNumber(DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Return the record number associated with the cursor. +
getRecordNumber() - +Method in class com.sleepycat.db.DatabaseEntry +
Return the record number encoded in this entry's buffer. +
getRecordNumber(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Return the record number associated with the cursor. +
getRecordNumberAppender() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the function to call after the record number has been + selected but before the data has been stored into the database. +
getRecordPad() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the padding character for short, fixed-length records for the + Queue and Recno access methods. +
getRecordSource() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the name of an underlying flat text database file that is + read to initialize a transient record number index. +
getRegSize() - +Method in class com.sleepycat.db.CacheStats +
Individual cache size. +
getRegSize() - +Method in class com.sleepycat.db.LockStats +
The size of the lock region. +
getRegSize() - +Method in class com.sleepycat.db.LogStats +
The size of the region. +
getRegSize() - +Method in class com.sleepycat.db.TransactionStats +
The size of the region. +
getRegionNowait() - +Method in class com.sleepycat.db.CacheStats +
The number of times that a thread of control was able to obtain a + region lock without waiting. +
getRegionNowait() - +Method in class com.sleepycat.db.LockStats +
The number of times that a thread of control was able to obtain the + region lock without waiting. +
getRegionNowait() - +Method in class com.sleepycat.db.LogStats +
The number of times that a thread of control was able to obtain the + region lock without waiting. +
getRegionNowait() - +Method in class com.sleepycat.db.TransactionStats +
The number of times that a thread of control was able to obtain the + region lock without waiting. +
getRegionWait() - +Method in class com.sleepycat.db.CacheStats +
The number of times that a thread of control was forced to wait + before obtaining a region lock. +
getRegionWait() - +Method in class com.sleepycat.db.LockStats +
The number of times that a thread of control was forced to wait + before obtaining the region lock. +
getRegionWait() - +Method in class com.sleepycat.db.LogStats +
The number of times that a thread of control was forced to wait + before obtaining the region lock. +
getRegionWait() - +Method in class com.sleepycat.db.TransactionStats +
The number of times that a thread of control was forced to wait + before obtaining the region lock. +
getRenumbering() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the logical record numbers are mutable, and change as + records are added to and deleted from the database. +
getReplicationLimit() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the transmit limit in bytes for a single call to + Environment.processReplicationMessage. +
getReplicationStats(StatsConfig) - +Method in class com.sleepycat.db.Environment +
Return the database environment's replication statistics. +
getReplicationTransport() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the replication callback function used to transmit data using + the replication application's communication infrastructure. +
getReuseBuffer() - +Method in class com.sleepycat.db.DatabaseEntry +
Return if the whether the entry is configured to reuse the buffer. +
getReverseSplitOff() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the Btree has been configured to not do reverse splits. +
getRoEvict() - +Method in class com.sleepycat.db.CacheStats +
Clean pages forced from the cache. +
getRunFatalRecovery() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the handle is configured to run catastrophic recovery on + the database environment before opening it for use. +
getRunRecovery() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the handle is configured to run normal recovery on the + database environment before opening it for use. +
getRwEvict() - +Method in class com.sleepycat.db.CacheStats +
Dirty pages forced from the cache. +
getSCount() - +Method in class com.sleepycat.db.LogStats +
The number of times the log has been flushed to disk. +
getSalvage() - +Method in class com.sleepycat.db.VerifyConfig +
Return if the Database.verify is configured to write the + key/data pairs from all databases in the file to the file stream + named by the outfile parameter.. +
getSearchBoth(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the specified key/data pair, where both the key and +data items must match. +
getSearchBoth(Transaction, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Database +
Retrieves the key/data pair with the given key and data value, that is, both +the key and data items must match. +
getSearchBoth(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the specified secondary and primary key, where both +the primary and secondary key items must match. +
getSearchBoth(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryDatabase +
Retrieves the key/data pair with the specified secondary and primary key, that +is, both the primary and secondary key items must match. +
getSearchBothRange(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the specified key and closest matching data item of the +database. +
getSearchBothRange(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the specified secondary key and closest matching primary +key of the database. +
getSearchKey(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the given key of the database, and return the datum +associated with the given key. +
getSearchKey(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the given key of the database, and return the datum +associated with the given key. +
getSearchKeyRange(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key. +
getSearchKeyRange(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the closest matching key of the database, and return +the data item associated with the matching key. +
getSearchRecordNumber(DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Cursor +
Move the cursor to the specific numbered record of the database, and +return the associated key/data pair. +
getSearchRecordNumber(Transaction, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.Database +
Retrieves the key/data pair associated with the specific numbered record of the database. +
getSearchRecordNumber(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryCursor +
Move the cursor to the specific numbered record of the database, and +return the associated key/data pair. +
getSearchRecordNumber(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - +Method in class com.sleepycat.db.SecondaryDatabase +
Retrieves the key/data pair associated with the specific numbered record of the database. +
getSecondaryConfig() - +Method in class com.sleepycat.db.SecondaryDatabase +
Returns a copy of the secondary configuration of this database. +
getSecondaryDatabase() - +Method in class com.sleepycat.db.SecondaryCursor +
Return the SecondaryDatabase handle associated with this Cursor. +
getSegmentId() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the base segment ID. +
getSize() - +Method in class com.sleepycat.db.DatabaseEntry +
Return the byte size of the data array. +
getSnapshot() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the any specified backing source file will be read in its + entirety when the database is opened. +
getSortedDuplicates() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database is configured to support sorted duplicate data + items. +
getStartupComplete() - +Method in class com.sleepycat.db.ReplicationStats +
The client site has completed its startup procedures and is now + handling live records from the master. +
getStats(Transaction, StatsConfig) - +Method in class com.sleepycat.db.Database +
Return database statistics. +
getStats(StatsConfig) - +Method in class com.sleepycat.db.Sequence +
Return statistical information about the sequence. +
getStatus() - +Method in class com.sleepycat.db.ReplicationStats +
The current replication mode. +
getStreamHeader() - +Static method in class com.sleepycat.bind.serial.SerialOutput
Returns the fixed stream header used for all serialized streams in PROTOCOL_VERSION_2 format. -
getTestAndSetSpins() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getTestAndSetSpins method returns the test-and-set spin count. -
getTimeout() - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.getTimeout method returns the lock timeout value. -
getTimeout(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getTimeout method returns a timeout value, in microseconds. -
getTmpDir() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getTmpDir method returns the database environment temporary file directory. -
getTxn() - -Method in class com.sleepycat.bdb.CurrentTransaction +
getSync() - +Method in class com.sleepycat.db.TransactionConfig +
Return if the transaction is configured to not write or synchronously + flush the log it when commits. +
getSystemMemory() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to allocate memory + from system shared memory instead of from memory backed by the + filesystem. +
getTemporaryDirectory() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the path of a directory to be used as the location of + temporary files. +
getTestAndSetSpins() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the number of times test-and-set mutexes should spin before + blocking. +
getThreaded() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the handle is configured to be free-threaded. +
getTimeCkp() - +Method in class com.sleepycat.db.TransactionStats +
The time the last completed checkpoint finished (as the number of + seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 + (POSIX) time interface). +
getTimeout() - +Method in class com.sleepycat.db.LockRequest +
Return the lock timeout value. +
getTransaction() - +Method in class com.sleepycat.collections.CurrentTransaction
Returns the transaction associated with the current thread for this environment, or null if no transaction is active. -
getTxnMax() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getTxnMax method returns the number of active transactions. -
getTxnTimestamp() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getTxnTimestamp method returns the recovery timestamp. -
getUserBufferLength() - -Method in class com.sleepycat.db.Dbt -
Return the length in bytes of the user-specified buffer. -
getValue() - -Method in class com.sleepycat.bdb.collection.MapEntry +
getTransaction() - +Method in class com.sleepycat.db.PreparedTransaction +
Return the transaction handle for the transaction. +
getTransactionConfig() - +Method in class com.sleepycat.collections.TransactionRunner +
Returns the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig). +
getTransactionNotDurable() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database environment is configured to not write log + records for this database. +
getTransactionStats(StatsConfig) - +Method in class com.sleepycat.db.Environment +
Return the database environment's transactional statistics. +
getTransactional() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database open is enclosed within a transaction. +
getTransactional() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured for transactions. +
getTruncate() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database has been configured to be physically truncated + by truncating the underlying file, discarding all previous databases + it might have held. +
getTxnId() - +Method in class com.sleepycat.db.TransactionStats.Active +
The transaction ID of the transaction. +
getTxnMaxActive() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the minimum number of simultaneously active transactions + supported by the database environment. +
getTxnNoSync() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to not write or synchronously + flush the log on transaction commit. +
getTxnNotDurable() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to not write log records. +
getTxnTimeout() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the database environment transaction timeout value, in + microseconds; a timeout of 0 means no timeout is set. +
getTxnTimeout() - +Method in class com.sleepycat.db.LockStats +
Transaction timeout value. +
getTxnTimestamp() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return the time to which recovery will be done, or 0 if recovery will + be done to the most current possible date. +
getTxnWriteNoSync() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to write, but not synchronously + flush, the log on transaction commit. +
getTxnarray() - +Method in class com.sleepycat.db.TransactionStats +
The array of active transactions. +
getTxnsApplied() - +Method in class com.sleepycat.db.ReplicationStats +
The number of transactions applied. +
getType() - +Method in class com.sleepycat.db.DatabaseConfig +
Return the type of the database. +
getUnlink() - +Method in class com.sleepycat.db.CacheFile +
Return if the file will be removed when the last reference to it is + closed. +
getUnsortedDuplicates() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database is configured to support duplicate data items. +
getUseEnvironment() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to accept information + from the process environment when naming files. +
getUseEnvironmentRoot() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to accept information + from the process environment when naming files if the process has + appropriate permissions. +
getUserBuffer() - +Method in class com.sleepycat.db.DatabaseEntry +
Return if the whether the buffer in this entry is owned by the + application. +
getUserBufferLength() - +Method in class com.sleepycat.db.DatabaseEntry +
Return the length of the application's buffer. +
getValue() - +Method in class com.sleepycat.collections.MapEntryParameter
Returns the value of this entry. -
getValueBinding() - -Method in class com.sleepycat.bdb.DataView -
Returns the value binding that is used. -
getValueEntityBinding() - -Method in class com.sleepycat.bdb.DataView -
Returns the entity binding that is used. -
getValueFormat() - -Method in class com.sleepycat.bdb.DataStore -
Returns the value format associated with this store. -
getValueFormat() - -Method in interface com.sleepycat.bdb.bind.KeyExtractor -
Returns the format of the value data or null if the index key data is - not derived from the value data. -
getValueFormat() - -Method in interface com.sleepycat.bdb.bind.EntityBinding -
Returns the format used for the value data of this binding. -
getValueFormat() - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
  -
getValueFormat() - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding -
  -
getValueFormat() - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor -
  -
getValueFormat() - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding -
  -
getValueFormat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
  -
getValueFormat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding -
  -
getVerbose(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.getVerbose method returns whether the specified which parameter is currently set or not. -
getVersionMajor() - -Static method in class com.sleepycat.db.DbEnv -
The DbEnv.getVersionMajor method returns the release major number. -
getVersionMinor() - -Static method in class com.sleepycat.db.DbEnv -
The DbEnv.getVersionMinor method returns the release minor number. -
getVersionPatch() - -Static method in class com.sleepycat.db.DbEnv -
The DbEnv.getVersionPatch method returns the release patch number. -
getVersionString() - -Static method in class com.sleepycat.db.DbEnv -
The DbEnv.getVersionString method returns the release verbose version information, suitable for display. -
getView() - -Method in class com.sleepycat.bdb.DataCursor -
Returns the view for this cursor, as specified to the constructor. -
getZeroTerminatedByteLength(byte[], int) - -Static method in class com.sleepycat.bdb.util.UtfOps +
getValue() - +Method in class com.sleepycat.db.SequenceStats +
The current cached value of the sequence. +
getVerboseDeadlock() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to display + additional information when doing deadlock detection. +
getVerboseRecovery() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to display + additional information when performing recovery. +
getVerboseReplication() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to display + additional information when processing replication messages. +
getVerboseWaitsFor() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the database environment is configured to display the + waits-for table when doing deadlock detection. +
getVersion() - +Method in class com.sleepycat.db.BtreeStats +
The version of the Btree database. +
getVersion() - +Method in class com.sleepycat.db.HashStats +
The version of the Hash database. +
getVersion() - +Method in class com.sleepycat.db.LogStats +
The version of the log file type. +
getVersion() - +Method in class com.sleepycat.db.QueueStats +
The version of the Queue database. +
getVersionMajor() - +Static method in class com.sleepycat.db.Environment +
Return the release major number. +
getVersionMinor() - +Static method in class com.sleepycat.db.Environment +
Return the release minor number. +
getVersionPatch() - +Static method in class com.sleepycat.db.Environment +
Return the release patch number. +
getVersionString() - +Static method in class com.sleepycat.db.Environment +
Return the release version information, suitable for display. +
getWBytes() - +Method in class com.sleepycat.db.LogStats +
The number of bytes over and above st_w_mbytes written to this log. +
getWCount() - +Method in class com.sleepycat.db.LogStats +
The number of times the log has been written to disk. +
getWCountFill() - +Method in class com.sleepycat.db.LogStats +
The number of times the log has been written to disk because the + in-memory log record cache filled up. +
getWMbytes() - +Method in class com.sleepycat.db.LogStats +
The number of megabytes written to this log. +
getWait() - +Method in class com.sleepycat.db.SequenceStats +
The number of times a thread of control was forced to wait on the + handle mutex. +
getWaitingLsn() - +Method in class com.sleepycat.db.ReplicationStats +
The LSN of the first log record we have after missing log records + being waited for, or 0 if no log records are currently missing. +
getWaitingPages() - +Method in class com.sleepycat.db.ReplicationStats +
The page number of the first page we have after missing pages being + waited for, or 0 if no pages are currently missing. +
getWcBytes() - +Method in class com.sleepycat.db.LogStats +
The number of bytes over and above LogStats.getWcMbytes + written to this log since the last checkpoint. +
getWcMbytes() - +Method in class com.sleepycat.db.LogStats +
The number of megabytes written to this log since the last checkpoint. +
getWrap() - +Method in class com.sleepycat.db.SequenceConfig +
Return if the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value. +
getWriteCursor() - +Method in class com.sleepycat.db.CursorConfig +
Return if the Concurrent Data Store environment cursor will be used to + update the database. +
getXACreate() - +Method in class com.sleepycat.db.DatabaseConfig +
Return if the database has been configured to be accessed via applications + running under an X/Open conformant Transaction Manager. +
getXId() - +Method in class com.sleepycat.db.TransactionStats.Active +
If the transaction is an XA transaction, the XA global + transaction ID. +
getXaStatus() - +Method in class com.sleepycat.db.TransactionStats.Active +
If the transaction is an XA transaction, the status of the + transaction, otherwise 0. +
getYieldCPU() - +Method in class com.sleepycat.db.EnvironmentConfig +
Return if the system has been configured to yield the processor + immediately after each page or mutex acquisition. +
getZeroTerminatedByteLength(byte[], int) - +Static method in class com.sleepycat.util.UtfOps
Returns the byte length of a null terminated UTF string, not including the terminator. -
gid - -Variable in class com.sleepycat.db.DbPreplist -
The global transaction ID for the transaction. -
greater - -Variable in class com.sleepycat.db.DbKeyRange -
A value between 0 and 1, the proportion of keys greater than - the specified key. +
greater - +Variable in class com.sleepycat.db.KeyRange +
A value between 0 and 1, the proportion of keys greater than the + specified key.

H

-
hash_bfree - -Variable in class com.sleepycat.db.DbHashStat -
The number of bytes free on bucket pages. -
hash_big_bfree - -Variable in class com.sleepycat.db.DbHashStat -
The number of bytes free on big item pages. -
hash_bigpages - -Variable in class com.sleepycat.db.DbHashStat -
The number of big key/data pages. -
hash_buckets - -Variable in class com.sleepycat.db.DbHashStat -
The number of hash buckets. -
hash_dup - -Variable in class com.sleepycat.db.DbHashStat -
The number of duplicate pages. -
hash_dup_free - -Variable in class com.sleepycat.db.DbHashStat -
The number of bytes free on duplicate pages. -
hash_ffactor - -Variable in class com.sleepycat.db.DbHashStat -
The desired fill factor (number of items per bucket) specified at database-creation time. -
hash_free - -Variable in class com.sleepycat.db.DbHashStat -
The number of pages on the free list. -
hash_magic - -Variable in class com.sleepycat.db.DbHashStat -
Magic number that identifies the file as a Hash file. -
hash_metaflags - -Variable in class com.sleepycat.db.DbHashStat -
  -
hash_ndata - -Variable in class com.sleepycat.db.DbHashStat -
The number of key/data pairs in the database. -
hash_nkeys - -Variable in class com.sleepycat.db.DbHashStat -
The number of unique keys in the database. -
hash_overflows - -Variable in class com.sleepycat.db.DbHashStat -
The number of overflow pages (overflow pages are pages that contain items that did not fit in the main bucket page). -
hash_ovfl_free - -Variable in class com.sleepycat.db.DbHashStat -
The number of bytes free on overflow pages. -
hash_pagesize - -Variable in class com.sleepycat.db.DbHashStat -
The underlying Hash database page (and bucket) size, in bytes. -
hash_version - -Variable in class com.sleepycat.db.DbHashStat -
The version of the Hash database. -
hash(Db, byte[], int) - -Method in interface com.sleepycat.db.DbHash -
The DbHash interface is used by the Db.setHash method. -
hashCode() - -Method in class com.sleepycat.bdb.collection.MapEntry -
Computes a hash code as specified by Map.Entry.hashCode(). -
hashCode() - -Method in class com.sleepycat.db.DbTxn -
  -
hasNext() - -Method in class com.sleepycat.bdb.collection.StoredIterator +
HASH - +Static variable in class com.sleepycat.db.DatabaseType +
The database is a Hash. +
HIGH - +Static variable in class com.sleepycat.db.CacheFilePriority +
The second highest priority. +
HashStats - class com.sleepycat.db.HashStats.
The HashStats object is used to return Hash database statistics.
Hasher - interface com.sleepycat.db.Hasher.
An application-specified, database hash function.
handleLogRecord(Environment, DatabaseEntry, LogSequenceNumber, RecoveryOperation) - +Method in interface com.sleepycat.db.LogRecordHandler +
  +
hasNext() - +Method in class com.sleepycat.collections.StoredIterator
Returns true if this iterator has more elements when traversing in the forward direction. -
hasPrevious() - -Method in class com.sleepycat.bdb.collection.StoredIterator +
hasPrevious() - +Method in class com.sleepycat.collections.StoredIterator
Returns true if this iterator has more elements when traversing in the reverse direction. -
hasRecNumAccess() - -Method in class com.sleepycat.bdb.DataDb -
Returns whether record number access is allowed. -
hasRecNumAccess() - -Method in class com.sleepycat.bdb.DataCursor -
Returns whether record number access is allowed. -
headMap(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedMap +
hash(Database, byte[], int) - +Method in interface com.sleepycat.db.Hasher +
An application-specified, database-specific hash function. +
hashCode() - +Method in class com.sleepycat.collections.MapEntryParameter +
Computes a hash code as specified by Map.Entry.hashCode(). +
hashCode() - +Method in class com.sleepycat.collections.StoredCollection +
  +
hashCode() - +Method in class com.sleepycat.collections.StoredList +
  +
hashCode() - +Method in class com.sleepycat.collections.StoredMap +
  +
headMap(Object) - +Method in class com.sleepycat.collections.StoredSortedMap
Returns a view of the portion of this sorted set whose keys are strictly less than toKey. -
headMap(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedMap +
headMap(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedMap
Returns a view of the portion of this sorted map whose elements are strictly less than toKey, optionally including toKey. -
headSet(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet -
Returns a view of the portion of this sorted set whose elements are - strictly less than toValue. -
headSet(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet -
Returns a view of the portion of this sorted set whose elements are - strictly less than toKey. -
headSet(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet +
headSet(Object) - +Method in class com.sleepycat.collections.StoredSortedEntrySet
Returns a view of the portion of this sorted set whose elements are strictly less than toMapEntry. -
headSet(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet +
headSet(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedEntrySet
Returns a view of the portion of this sorted set whose elements are - strictly less than toValue, optionally including toValue. -
headSet(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet + strictly less than toMapEntry, optionally including toMapEntry. +
headSet(Object) - +Method in class com.sleepycat.collections.StoredSortedKeySet +
Returns a view of the portion of this sorted set whose elements are + strictly less than toKey. +
headSet(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedKeySet
Returns a view of the portion of this sorted set whose elements are strictly less than toKey, optionally including toKey. -
headSet(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet +
headSet(Object) - +Method in class com.sleepycat.collections.StoredSortedValueSet
Returns a view of the portion of this sorted set whose elements are - strictly less than toMapEntry, optionally including toMapEntry. + strictly less than toValue. +
headSet(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedValueSet +
Returns a view of the portion of this sorted set whose elements are + strictly less than toValue, optionally including toValue.

I

-
id() - -Method in class com.sleepycat.db.DbTxn -
The DbTxn.id method returns the unique transaction id associated with the specified transaction. -
increment() - -Method in class com.sleepycat.bdb.DataThang -
Increments the data value for this thang, treating the byte array as - a Java BigInteger where the bytes are in MSB-first order. -
indexOf(Object) - -Method in class com.sleepycat.bdb.collection.StoredList +
IOExceptionWrapper - exception com.sleepycat.util.IOExceptionWrapper.
An IOException that can contain nested exceptions.
IOExceptionWrapper(Throwable) - +Constructor for class com.sleepycat.util.IOExceptionWrapper +
  +
IREAD - +Static variable in class com.sleepycat.db.LockRequestMode +
Intention to read (shared). +
IWR - +Static variable in class com.sleepycat.db.LockRequestMode +
Intention to read and write (shared). +
IWRITE - +Static variable in class com.sleepycat.db.LockRequestMode +
Intention to write (shared). +
IntegerBinding - class com.sleepycat.bind.tuple.IntegerBinding.
A concrete TupleBinding for a Integer primitive + wrapper or an int primitive.
IntegerBinding() - +Constructor for class com.sleepycat.bind.tuple.IntegerBinding +
  +
indexOf(Object) - +Method in class com.sleepycat.collections.StoredList
Returns the index in this list of the first occurrence of the specified element, or -1 if this list does not contain this element. -
inputToData(TupleInput, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleFormat -
Utility method to set the data in a data buffer to the data in a tuple +
inputToEntry(TupleInput, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.TupleBinding +
Utility method to set the data in a entry buffer to the data in a tuple input object. -
INT - -Static variable in interface com.sleepycat.bdb.bind.DataType -
Integer data type. -
IntegrityConstraintException - exception com.sleepycat.bdb.IntegrityConstraintException.
Thrown when an integrity constraint violation occurs.
IntegrityConstraintException(String) - -Constructor for class com.sleepycat.bdb.IntegrityConstraintException -
Creates an integrity constraint exception. -
IOExceptionWrapper - exception com.sleepycat.bdb.util.IOExceptionWrapper.
An IOException that can contain nested exceptions.
IOExceptionWrapper(Throwable) - -Constructor for class com.sleepycat.bdb.util.IOExceptionWrapper -
  -
isAutoCommit() - -Method in class com.sleepycat.bdb.DataView -
Returns whether auto-commit is set for this view or for the - transactional environment of the store and index. -
isAutoCommit() - -Method in class com.sleepycat.bdb.CurrentTransaction -
Returns whether AUTO_COMMIT will be used for all non-cursor write - operations when no transaction is active. -
isAutoCommit() - -Method in class com.sleepycat.bdb.collection.StoredContainer -
Returns whether auto-commit is enabled for this container or for its - associated DbEnv. -
isByteSwapped() - -Method in class com.sleepycat.db.Db -
The Db.isByteSwapped method returns false if the underlying database files were created on an architecture of the same byte order as the current one, and true if they were not (that is, big-endian on a little-endian machine, or vice versa). -
isDirtyRead() - -Method in class com.sleepycat.bdb.CurrentTransaction -
Returns whether dirty-read is used for the current transaction. -
isDirtyReadAllowed() - -Method in class com.sleepycat.bdb.DataView -
Returns whether DIRTY_READ was specified for both the Store and Index. -
isDirtyReadAllowed() - -Method in class com.sleepycat.bdb.DataDb -
Returns whether dirty-read is allowed for the database. -
isDirtyReadAllowed() - -Method in class com.sleepycat.bdb.collection.StoredContainer -
Returns whether dirty-read is allowed for this container. -
isDirtyReadEnabled() - -Method in class com.sleepycat.bdb.DataView -
Returns whether DIRTY_READ will be used for all read operations. -
isDirtyReadEnabled() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
intToEntry(int, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.IntegerBinding +
Converts a simple int value into an entry buffer. +
isDirtyRead() - +Method in class com.sleepycat.collections.StoredContainer
Returns whether dirty-read is enabled for this container. -
isEmpty() - -Method in class com.sleepycat.bdb.DataView -
Returns whether no records are present in the view. -
isEmpty() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
isDirtyReadAllowed() - +Method in class com.sleepycat.collections.StoredContainer +
Returns whether dirty-read is allowed for this container. +
isDupMaster() - +Method in class com.sleepycat.db.ReplicationStatus +
The replication group has more than one master. +
isEmpty() - +Method in class com.sleepycat.collections.StoredContainer
Returns true if this map or collection contains no mappings or elements. -
isEncrypted() - -Method in class com.sleepycat.db.Db -
The Db.isEncrypted method returns false if the underlying database files are encrypted and true if they are not. -
isIndexed() - -Method in class com.sleepycat.bdb.collection.StoredContainer -
Returns whether this container is a view on a DataIndex rather - than directly on a DataStore. -
isNoWait() - -Method in class com.sleepycat.bdb.CurrentTransaction -
Returns whether no-wait is used for the current transaction. -
isOrdered() - -Method in class com.sleepycat.bdb.DataView -
Returns whether keys are ordered for the index or store. -
isOrdered() - -Method in class com.sleepycat.bdb.DataDb -
Returns whether keys are ordered for the database. -
isOrdered() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
isHoldElection() - +Method in class com.sleepycat.db.ReplicationStatus +
An election is needed. +
isNewMaster() - +Method in class com.sleepycat.db.ReplicationStatus +
A new master has been elected. +
isNewSite() - +Method in class com.sleepycat.db.ReplicationStatus +
The system received contact information from a new environment. +
isNotPermanent() - +Method in class com.sleepycat.db.ReplicationStatus +
A message carrying a DB_REP_PERMANENT flag was processed successfully, + but was not written to disk. +
isOrdered() - +Method in class com.sleepycat.collections.StoredContainer
Returns whether keys are ordered in this container. -
isTransactional() - -Method in class com.sleepycat.bdb.DataView -
Returns whether the store and index are transactional. -
isTransactional() - -Method in class com.sleepycat.bdb.DataDb -
Returns whether the database was opened in a transaction and therefore - must be written in a transaction. -
isTransactional() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
isPermanent() - +Method in class com.sleepycat.db.ReplicationStatus +
Processing this message resulted in the processing of records that + are permanent. +
isReadModifyWrite() - +Method in class com.sleepycat.collections.StoredIterator +
Returns whether write-locks will be obtained when reading with this + cursor. +
isSecondary() - +Method in class com.sleepycat.collections.StoredContainer +
Returns whether this container is a view on a secondary database rather + than directly on a primary database. +
isStartupDone() - +Method in class com.sleepycat.db.ReplicationStatus +
The client completed startup synchronization. +
isSuccess() - +Method in class com.sleepycat.db.ReplicationStatus +
The operation succeeded. +
isTransactional() - +Method in class com.sleepycat.collections.StoredContainer
Returns whether the databases underlying this container are transactional. -
isTransactional() - -Method in class com.sleepycat.db.Db -
The Db.isTransactional method returns true if the Db handle has been opened in a transactional mode. -
isWriteAllowed() - -Method in class com.sleepycat.bdb.DataView -
Returns whether write operations are allowed. -
isWriteAllowed() - -Method in class com.sleepycat.bdb.DataCursor -
Returns whether write is allowed for this cursor, as specified to the - constructor. -
isWriteAllowed() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
isWriteAllowed() - +Method in class com.sleepycat.collections.StoredContainer
Returns true if this is a read-write container or false if this is a read-only container. -
iterator() - -Method in class com.sleepycat.bdb.collection.StoredCollection +
iterator() - +Method in class com.sleepycat.collections.StoredCollection
Returns an iterator over the elements in this collection. -
iterator(boolean) - -Method in class com.sleepycat.bdb.collection.StoredCollection +
iterator(boolean) - +Method in class com.sleepycat.collections.StoredCollection
Returns a read or read-write iterator over the elements in this collection. -
iterator(Iterator) - -Static method in class com.sleepycat.bdb.collection.StoredCollections +
iterator(Iterator) - +Static method in class com.sleepycat.collections.StoredCollections
Clones a stored iterator preserving its current position.

J

-
join(DataCursor[], boolean) - -Method in class com.sleepycat.bdb.DataView -
Returns a cursor for this view that reads only records having the - index key values at the specified cursors. -
join(DataView[], Object[], boolean) - -Method in class com.sleepycat.bdb.DataView -
Returns a cursor for this view that reads only records having the - specified index key values. -
join(Dbc[], int) - -Method in class com.sleepycat.db.Db -
The Db.join method creates a specialized join cursor for use in performing equality or natural joins on secondary indices. -
join(StoredContainer[], Object[]) - -Method in class com.sleepycat.bdb.collection.StoredCollection -
Returns an iterator representing an equality join of the indices and - index key values specified. -
join(StoredContainer[], Object[], boolean) - -Method in class com.sleepycat.bdb.collection.StoredCollection +
JoinConfig - class com.sleepycat.db.JoinConfig.
The configuration properties of a JoinCursor.
JoinConfig() - +Constructor for class com.sleepycat.db.JoinConfig +
Creates an instance with the system's default settings. +
JoinCursor - class com.sleepycat.db.JoinCursor.
A specialized join cursor for use in performing equality or natural joins on +secondary indices.
join(StoredContainer[], Object[], JoinConfig) - +Method in class com.sleepycat.collections.StoredCollection
Returns an iterator representing an equality join of the indices and index key values specified. +
join(Cursor[], JoinConfig) - +Method in class com.sleepycat.db.Database +
Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices.

K

-
key_range(DbTxn, Dbt, DbKeyRange, int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by keyRange(DbTxn,Dbt,DbKeyRange,int) -
KeyExtractor - interface com.sleepycat.bdb.bind.KeyExtractor.
The interface implemented for extracting the index key from primary key - and/or value buffers, and for clearing the index key in a value buffer.
keyRange(DbTxn, Dbt, DbKeyRange, int) - -Method in class com.sleepycat.db.Db -
The Db.keyRange method returns an estimate of the proportion of keys that are less than, equal to, and greater than the specified key. -
KeyRangeException - exception com.sleepycat.bdb.KeyRangeException.
(internal) An exception thrown when a key is out of range.
KeyRangeException(String) - -Constructor for class com.sleepycat.bdb.KeyRangeException -
Creates a key range exception. -
keySet() - -Method in class com.sleepycat.bdb.collection.StoredMap +
KEYEMPTY - +Static variable in class com.sleepycat.db.OperationStatus +
The cursor operation was unsuccessful because the current record + was deleted. +
KEYEXIST - +Static variable in class com.sleepycat.db.OperationStatus +
The operation to insert data was configured to not allow overwrite + and the key already exists in the database. +
KeyRange - class com.sleepycat.db.KeyRange.
An object that returns status from the Database.getKeyRange method.
KeyRange() - +Constructor for class com.sleepycat.db.KeyRange +
  +
keySet() - +Method in class com.sleepycat.collections.StoredMap
Returns a set view of the keys contained in this map. -
keySetView() - -Method in class com.sleepycat.bdb.DataView -
Return a new key-set view derived from this view by setting the - entity and value binding to null.

L

-
last() - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet +
LOW - +Static variable in class com.sleepycat.db.CacheFilePriority +
The second lowest priority. +
Lock - class com.sleepycat.db.Lock.
The locking interfaces for the database environment are methods of the +Environment handle.
LockDetectMode - class com.sleepycat.db.LockDetectMode.
Deadlock detection modes.
LockMode - class com.sleepycat.db.LockMode.
Locking modes for database operations.
LockNotGrantedException - exception com.sleepycat.db.LockNotGrantedException.
A LockNotGrantedException is thrown when a lock requested using the +Environment.getLock or Environment.lockVector +methods, where the noWait flag or lock timers were configured, could not +be granted before the wait-time expired.
LockOperation - class com.sleepycat.db.LockOperation.
Operations that can be performed on locks.
LockRequest - class com.sleepycat.db.LockRequest.
The LockRequest object is used to encapsulate a single lock request.
LockRequest(LockOperation, LockRequestMode, DatabaseEntry, Lock) - +Constructor for class com.sleepycat.db.LockRequest +
Construct a LockRequest with the specified operation, mode and lock, + for the specified object. +
LockRequest(LockOperation, LockRequestMode, DatabaseEntry, Lock, int) - +Constructor for class com.sleepycat.db.LockRequest +
Construct a LockRequest with the specified operation, mode, lock and + timeout for the specified object. +
LockRequestMode - class com.sleepycat.db.LockRequestMode.
When using the default lock conflict matrix, the LockRequestMode class +defines the set of possible lock modes.
LockRequestMode(String, int) - +Constructor for class com.sleepycat.db.LockRequestMode +
Construct a custom lock request mode. +
LockStats - class com.sleepycat.db.LockStats.
Lock statistics for a database environment.
LogCursor - class com.sleepycat.db.LogCursor.
The LogCursor object is the handle for a cursor into the log files, +supporting sequential access to the records stored in log files.
LogRecordHandler - interface com.sleepycat.db.LogRecordHandler.
A function to process application-specific log records.
LogSequenceNumber - class com.sleepycat.db.LogSequenceNumber.
The LogSequenceNumber object is a log sequence number which +specifies a unique location in a log file.
LogSequenceNumber(int, int) - +Constructor for class com.sleepycat.db.LogSequenceNumber +
Construct a LogSequenceNumber with the specified file and offset. +
LogSequenceNumber() - +Constructor for class com.sleepycat.db.LogSequenceNumber +
Construct an uninitialized LogSequenceNumber. +
LogStats - class com.sleepycat.db.LogStats.
Log statistics for a database environment.
LongBinding - class com.sleepycat.bind.tuple.LongBinding.
A concrete TupleBinding for a Long primitive + wrapper or a long primitive.
LongBinding() - +Constructor for class com.sleepycat.bind.tuple.LongBinding +
  +
last() - +Method in class com.sleepycat.collections.StoredSortedEntrySet
Returns the last (highest) element currently in this sorted set. -
last() - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet +
last() - +Method in class com.sleepycat.collections.StoredSortedKeySet
Returns the last (highest) element currently in this sorted set. -
last() - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet +
last() - +Method in class com.sleepycat.collections.StoredSortedValueSet
Returns the last (highest) element currently in this sorted set. -
lastIndexOf(Object) - -Method in class com.sleepycat.bdb.collection.StoredList +
lastIndexOf(Object) - +Method in class com.sleepycat.collections.StoredList
Returns the index in this list of the last occurrence of the specified element, or -1 if this list does not contain this element. -
lastKey() - -Method in class com.sleepycat.bdb.collection.StoredSortedMap +
lastKey() - +Method in class com.sleepycat.collections.StoredSortedMap
Returns the last (highest) element currently in this sorted map. -
less - -Variable in class com.sleepycat.db.DbKeyRange -
A value between 0 and 1, the proportion of keys less than the - specified key. -
listIterator() - -Method in class com.sleepycat.bdb.collection.StoredList +
less - +Variable in class com.sleepycat.db.KeyRange +
A value between 0 and 1, the proportion of keys less than the specified + key. +
listIterator() - +Method in class com.sleepycat.collections.StoredList
Returns a list iterator of the elements in this list (in proper sequence). -
listIterator(int) - -Method in class com.sleepycat.bdb.collection.StoredList +
listIterator(int) - +Method in class com.sleepycat.collections.StoredList
Returns a list iterator of the elements in this list (in proper sequence), starting at the specified position in this list. -
lock_detect(int, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.lockDetect(int,int) -
lock_get(int, int, Dbt, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.lockGet(int,int,Dbt,int) -
lock_id_free(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.lockIdFree(int) -
lock_id() - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.lockId() -
lock_put(DbLock) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.lockPut(DbLock) -
lock_stat(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.lockStat(int) -
lock_vec(int, int, DbLockRequest[], int, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.lockVector(int,int,DbLockRequest[],int,int) -
lockDetect(int, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.lockDetect method runs one iteration of the deadlock detector. -
lockGet(int, int, Dbt, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.lockGet method acquires a lock from the lock table, returning information about it in a DbLock object. -
lockId() - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.lockId method returns a locker ID, which is guaranteed to be unique in the specified lock table. -
lockIdFree(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.lockIdFree method frees a locker ID allocated by the DbEnv.lockId method. -
lockPut(DbLock) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.lockPut method releases lock. -
lockStat(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.lockStat method returns the locking subsystem statistics. -
lockVector(int, int, DbLockRequest[], int, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.lockVector method atomically obtains and releases one or more locks from the lock table. -
log_archive(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.logArchive(int) -
log_compare(DbLsn, DbLsn) - -Static method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.logCompare(DbLsn,DbLsn) -
log_cursor(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.logCursor(int) -
log_file(DbLsn) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.logFile(DbLsn) -
log_flush(DbLsn) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.logFlush(DbLsn) -
log_put(DbLsn, Dbt, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.logPut(DbLsn,Dbt,int) -
log_stat(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.logStat(int) -
logArchive(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.logArchive method returns an array of log or database filenames. -
logCompare(DbLsn, DbLsn) - -Static method in class com.sleepycat.db.DbEnv -
The DbEnv.logCompare method allows the caller to compare two DbLsn objects, returning 0 if they are equal, 1 if lsn0 is greater than lsn1, and -1 if lsn0 is less than lsn1. -
logCursor(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.logCursor method returns a created log cursor. -
logFile(DbLsn) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.logFile method maps DbLsn objects to filenames, returning the name of the file containing the record named by lsn. -
logFlush(DbLsn) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.logFlush method writes log records to disk. -
logPut(DbLsn, Dbt, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.logPut method appends records to the log. -
logStat(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.logStat method returns the logging subsystem statistics. -
LONG - -Static variable in interface com.sleepycat.bdb.bind.DataType -
Long data type. -
lsn - -Variable in class com.sleepycat.db.DbTxnStat.Active -
The current log sequence number when the transaction was begun. +
lockVector(int, boolean, LockRequest[]) - +Method in class com.sleepycat.db.Environment +
Atomically obtain and release one or more locks from the lock table. +
logFlush(LogSequenceNumber) - +Method in class com.sleepycat.db.Environment +
Flush log records to stable storage. +
logPut(DatabaseEntry, boolean) - +Method in class com.sleepycat.db.Environment +
Append a record to the log. +
longToEntry(long, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.LongBinding +
Converts a simple long value into an entry buffer.

M

-
makeSpace(int) - -Method in class com.sleepycat.bdb.util.FastOutputStream +
MAXLOCKS - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject the lock request for the locker ID with the most locks. +
MAXWRITE - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject the lock request for the locker ID with the most write locks. +
MINLOCKS - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject the lock request for the locker ID with the fewest locks. +
MINWRITE - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject the lock request for the locker ID with the fewest write locks. +
MapEntryParameter - class com.sleepycat.collections.MapEntryParameter.
A simple Map.Entry implementation that can be used as in + input parameter.
MapEntryParameter(Object, Object) - +Constructor for class com.sleepycat.collections.MapEntryParameter +
Creates a map entry with a given key and value. +
MarshalledTupleEntry - interface com.sleepycat.bind.tuple.MarshalledTupleEntry.
A marshalling interface implemented by key, data or entity classes that + are represented as tuples.
MarshalledTupleKeyEntity - interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity.
A marshalling interface implemented by entity classes that represent keys as + tuples.
MemoryException - exception com.sleepycat.db.MemoryException.
This exception is thrown when a DatabaseEntry +passed to a Database or Cursor method is not large +enough to hold a value being returned.
MessageHandler - interface com.sleepycat.db.MessageHandler.
An interface specifying a callback function to be called to display +informational messages.
MultipleDataEntry - class com.sleepycat.db.MultipleDataEntry.
A DatabaseEntry that holds multiple data items returned by a single +Database or Cursor get call.
MultipleDataEntry() - +Constructor for class com.sleepycat.db.MultipleDataEntry +
Construct an entry with no data. +
MultipleDataEntry(byte[]) - +Constructor for class com.sleepycat.db.MultipleDataEntry +
Construct an entry with a given byte array. +
MultipleDataEntry(byte[], int, int) - +Constructor for class com.sleepycat.db.MultipleDataEntry +
Constructs a DatabaseEntry with a given byte array, offset and size. +
MultipleEntry - class com.sleepycat.db.MultipleEntry.
An abstract class representing a DatabaseEntry that holds multiple results +returned by a single Cursor get method.
MultipleKeyDataEntry - class com.sleepycat.db.MultipleKeyDataEntry.
A DatabaseEntry that holds multiple key/data pairs returned by a single +Database or Cursor get call.
MultipleKeyDataEntry() - +Constructor for class com.sleepycat.db.MultipleKeyDataEntry +
Construct an entry with no data. +
MultipleKeyDataEntry(byte[]) - +Constructor for class com.sleepycat.db.MultipleKeyDataEntry +
Construct an entry with a given byte array. +
MultipleKeyDataEntry(byte[], int, int) - +Constructor for class com.sleepycat.db.MultipleKeyDataEntry +
Constructs a DatabaseEntry with a given byte array, offset and size. +
MultipleRecnoDataEntry - class com.sleepycat.db.MultipleRecnoDataEntry.
A DatabaseEntry that holds multiple record number/data pairs returned by a +single Database or Cursor get call.
MultipleRecnoDataEntry() - +Constructor for class com.sleepycat.db.MultipleRecnoDataEntry +
Construct an entry with no data. +
MultipleRecnoDataEntry(byte[]) - +Constructor for class com.sleepycat.db.MultipleRecnoDataEntry +
Construct an entry with a given byte array. +
MultipleRecnoDataEntry(byte[], int, int) - +Constructor for class com.sleepycat.db.MultipleRecnoDataEntry +
Constructs a DatabaseEntry with a given byte array, offset and size. +
makeSpace(int) - +Method in class com.sleepycat.util.FastOutputStream
Ensure that at least the given number of bytes are available in the internal buffer. -
MapEntry - class com.sleepycat.bdb.collection.MapEntry.
A simple Map.Entry implementation.
MapEntry(Object, Object) - -Constructor for class com.sleepycat.bdb.collection.MapEntry -
Creates a map entry with a given key and value. -
mark(int) - -Method in class com.sleepycat.bdb.util.FastInputStream +
mark(int) - +Method in class com.sleepycat.util.FastInputStream
  -
markSupported() - -Method in class com.sleepycat.bdb.util.FastInputStream +
markSupported() - +Method in class com.sleepycat.util.FastInputStream
  -
marshalData(TupleOutput) - -Method in interface com.sleepycat.bdb.bind.tuple.MarshalledTupleData -
Construct the key or value tuple data from the key or value object. -
marshalIndexKey(String, TupleOutput) - -Method in interface com.sleepycat.bdb.bind.tuple.MarshalledTupleKeyEntity -
Extracts the entity's index key and writes it to the key output. -
MarshalledTupleData - interface com.sleepycat.bdb.bind.tuple.MarshalledTupleData.
A marshalling interface implemented by key, value or entity classes that - have tuple data.
MarshalledTupleKeyEntity - interface com.sleepycat.bdb.bind.tuple.MarshalledTupleKeyEntity.
A marshalling interface implemented by entity classes that have tuple data - keys.
marshalPrimaryKey(TupleOutput) - -Method in interface com.sleepycat.bdb.bind.tuple.MarshalledTupleKeyEntity +
marshalEntry(TupleOutput) - +Method in interface com.sleepycat.bind.tuple.MarshalledTupleEntry +
Construct the key or data tuple entry from the key or data object. +
marshalPrimaryKey(TupleOutput) - +Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity
Extracts the entity's primary key and writes it to the key output. -
memoryPoolFileStat(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.memoryPoolFileStat method creates an array of DbMpoolFStat objects containing statistics for individual files in the cache. -
memoryPoolStat(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.memoryPoolStat method returns the memory pool (that is, the buffer cache) subsystem statistics. -
memoryPoolTrickle(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.memoryPoolTrickle method ensures that a specified percent of the pages in the shared memory pool are clean, by writing dirty pages to their backing files. -
memp_fstat(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.memoryPoolFileStat(int) -
memp_stat(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.memoryPoolStat(int) -
memp_trickle(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.memoryPoolTrickle(int) +
marshalSecondaryKey(String, TupleOutput) - +Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity +
Extracts the entity's secondary key and writes it to the key output. +
message(Environment, String) - +Method in interface com.sleepycat.db.MessageHandler +
A callback function to be called to display informational messages.

N

-
newDataIndex(DataStore, Db, String, boolean, boolean) - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory -
Creates an index from a previously opened Db object. -
newDataStore(Db, Class, PrimaryKeyAssigner) - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory -
Creates a store from a previously opened Db object. -
newForeignKeyIndex(DataStore, Db, String, boolean, boolean, DataStore, int) - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory -
Creates a foreign key index from a previously opened Db object. -
newMap(DataIndex, Class, boolean) - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory -
Creates a map for a given index that was obtained from this factory. -
newMap(DataStore, Class, boolean) - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory -
Creates a map for a given store that was obtained from this factory. -
newOutput() - -Method in class com.sleepycat.bdb.bind.tuple.TupleFormat +
NONE - +Static variable in class com.sleepycat.db.LockDetectMode +
Turn off deadlock detection. +
NOTFOUND - +Static variable in class com.sleepycat.db.OperationStatus +
The requested key/data pair was not found. +
newMap(Database, Class, Class, boolean) - +Method in class com.sleepycat.collections.TupleSerialFactory +
Creates a map from a previously opened Database object. +
newOutput() - +Static method in class com.sleepycat.bind.tuple.TupleBinding
Utility method for use by bindings to create a tuple output object. -
newSortedMap(DataIndex, Class, boolean) - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory -
Creates a sorted map for a given index that was obtained from this - factory. -
newSortedMap(DataStore, Class, boolean) - -Method in class com.sleepycat.bdb.factory.TupleSerialDbFactory -
Creates a sorted map for a given store that was obtained from this - factory. -
next() - -Method in class com.sleepycat.bdb.collection.StoredIterator -
Returns the next element in the interation. -
next(Dbt) - -Method in class com.sleepycat.db.DbMultipleDataIterator -
The DbMultipleDataIterator.next method takes a Dbt that will be filled in with a reference to a buffer, a size, and an offset that together yield the next data item in the original bulk retrieval buffer. -
next(Dbt, Dbt) - -Method in class com.sleepycat.db.DbMultipleRecnoDataIterator -
The DbMultipleRecnoDataIterator.next method takes two Dbts, one for a key and one for a data item, that will each be filled in with a reference to a buffer, a size, and an offset that together yield the next key and data item in the original bulk retrieval buffer. -
next(Dbt, Dbt) - -Method in class com.sleepycat.db.DbMultipleKeyDataIterator -
The DbMultipleKeyDataIterator.next method takes two Dbts, one for a key and one for a data item, that will each be filled in with a reference to a buffer, a size, and an offset that together yield the next key and data item in the original bulk retrieval buffer. -
nextIndex() - -Method in class com.sleepycat.bdb.collection.StoredIterator +
newOutput(byte[]) - +Static method in class com.sleepycat.bind.tuple.TupleBinding +
Utility method for use by bindings to create a tuple output object + with a specific starting size. +
newSortedMap(Database, Class, Class, boolean) - +Method in class com.sleepycat.collections.TupleSerialFactory +
Creates a sorted map from a previously opened Database object. +
next() - +Method in class com.sleepycat.collections.StoredIterator +
Returns the next element in the iteration. +
next(DatabaseEntry) - +Method in class com.sleepycat.db.MultipleDataEntry +
Get the next data element in the returned set. +
next(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.MultipleKeyDataEntry +
Get the next key/data pair in the returned set. +
next(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.MultipleRecnoDataEntry +
Get the next record number/data pair in the returned set. +
nextIndex() - +Method in class com.sleepycat.collections.StoredIterator
Returns the index of the element that would be returned by a subsequent call to next. -
NONE - -Static variable in interface com.sleepycat.bdb.bind.DataType -
Undefined data type. +
nullifyForeignKey(SecondaryDatabase, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator +
  +
nullifyForeignKey(Object) - +Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator +
Clears the index key in a data object. +
nullifyForeignKey(SecondaryDatabase, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator +
  +
nullifyForeignKey(Object) - +Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator +
Clears the index key in the deserialized data entry. +
nullifyForeignKey(Object) - +Method in class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator +
  +
nullifyForeignKey(String) - +Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity +
Clears the entity's secondary key fields for the given key name. +
nullifyForeignKey(SecondaryDatabase, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator +
  +
nullifyForeignKey(TupleInput, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator +
Clears the index key in the tuple data entry. +
nullifyForeignKey(TupleInput, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator +
 

O

-
objectToData(Object) - -Method in class com.sleepycat.bdb.bind.serial.SerialBinding -
Can be overridden to convert the object to a deserialized data object. -
objectToData(Object, DataBuffer) - -Method in class com.sleepycat.bdb.RecordNumberBinding +
OLDEST - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject the lock request for the locker ID with the oldest lock. +
OperationStatus - class com.sleepycat.db.OperationStatus.
Status values from database operations.
objectToData(Object, DatabaseEntry) - +Method in interface com.sleepycat.bind.EntityBinding +
Extracts the data entry from an entity Object. +
objectToData(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.SerialSerialBinding +
  +
objectToData(Object) - +Method in class com.sleepycat.bind.serial.SerialSerialBinding +
Extracts a data object from an entity object. +
objectToData(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.TupleSerialBinding +
  +
objectToData(Object) - +Method in class com.sleepycat.bind.serial.TupleSerialBinding +
Extracts a data object from an entity object. +
objectToData(Object) - +Method in class com.sleepycat.bind.serial.TupleSerialMarshalledBinding +
  +
objectToData(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleTupleBinding +
  +
objectToData(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleBinding +
Extracts a key tuple from an entity object. +
objectToData(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding
  -
objectToData(Object, DataBuffer) - -Method in interface com.sleepycat.bdb.bind.DataBinding -
Converts an Object into a data buffer. -
objectToData(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.ByteArrayBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.ByteArrayBinding
  -
objectToData(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialFormat -
Utility method for use by bindings to serialize an object. -
objectToData(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialBinding +
objectToEntry(Object, DatabaseEntry) - +Method in interface com.sleepycat.bind.EntryBinding +
Converts an Object into a entry buffer. +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.RecordNumberBinding
  -
objectToData(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInputBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.SerialBinding +
Serialize an object into an entry buffer. +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.BooleanBinding
  -
objectToData(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.BooleanBinding
  -
objectToData(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleMarshalledBinding +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.ByteBinding
  -
objectToData(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleBinding -
Converts a key or value object to a tuple data. -
objectToKey(Object) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding -
Extracts a key object from an entity object. -
objectToKey(Object, DataBuffer) - -Method in interface com.sleepycat.bdb.bind.EntityBinding -
Extracts the key data from an entity Object. -
objectToKey(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.ByteBinding
  -
objectToKey(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.CharacterBinding
  -
objectToKey(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.CharacterBinding
  -
objectToKey(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledBinding +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.DoubleBinding
  -
objectToKey(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding -
Extracts a key tuple from an entity object. -
objectToKey(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.DoubleBinding
  -
objectToKey(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding -
Extracts a key tuple from an entity object. -
objectToValue(Object) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledBinding +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.FloatBinding +
  +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.FloatBinding +
  +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.IntegerBinding +
  +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.IntegerBinding +
  +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.LongBinding +
  +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.LongBinding
  -
objectToValue(Object) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding -
Extracts a value object from an entity object. -
objectToValue(Object) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding -
Extracts a value object from an entity object. -
objectToValue(Object, DataBuffer) - -Method in interface com.sleepycat.bdb.bind.EntityBinding -
Extracts the value data from an entity Object. -
objectToValue(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.TupleSerialBinding +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.ShortBinding
  -
objectToValue(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.serial.SerialSerialBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.ShortBinding
  -
objectToValue(Object, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.StringBinding
  -
objectToValue(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.StringBinding
  -
objectToValue(Object, TupleOutput) - -Method in class com.sleepycat.bdb.bind.tuple.TupleTupleBinding +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleBinding +
  +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleBinding +
Converts a key or data object to a tuple entry. +
objectToEntry(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleInputBinding +
  +
objectToEntry(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleMarshalledBinding +
  +
objectToKey(Object, DatabaseEntry) - +Method in interface com.sleepycat.bind.EntityBinding +
Extracts the key entry from an entity Object. +
objectToKey(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.SerialSerialBinding +
  +
objectToKey(Object) - +Method in class com.sleepycat.bind.serial.SerialSerialBinding +
Extracts a key object from an entity object. +
objectToKey(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.serial.TupleSerialBinding +
  +
objectToKey(Object, TupleOutput) - +Method in class com.sleepycat.bind.serial.TupleSerialBinding
Extracts a key tuple from an entity object. -
ON_DELETE_ABORT - -Static variable in class com.sleepycat.bdb.ForeignKeyIndex -
When the foreign key is deleted, throw an exception. -
ON_DELETE_CASCADE - -Static variable in class com.sleepycat.bdb.ForeignKeyIndex -
When the foreign key is deleted, delete the index key. -
ON_DELETE_CLEAR - -Static variable in class com.sleepycat.bdb.ForeignKeyIndex -
When the foreign key is deleted, clear the index key. -
ONE_DAY - -Static variable in class com.sleepycat.bdb.util.TimeUnits -
One day in milliseconds. -
ONE_HOUR - -Static variable in class com.sleepycat.bdb.util.TimeUnits -
One hour in milliseconds. -
ONE_MINUTE - -Static variable in class com.sleepycat.bdb.util.TimeUnits -
One minute in milliseconds. -
ONE_SECOND - -Static variable in class com.sleepycat.bdb.util.TimeUnits -
One second in milliseconds. -
ONE_WEEK - -Static variable in class com.sleepycat.bdb.util.TimeUnits -
One week in milliseconds. -
open(DbTxn, String, String, int, int, int) - -Method in class com.sleepycat.db.Db -
The Db.open method opens the database represented by the file and database parameters for both reading and writing. -
open(String, int, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.open method opens a Berkeley DB environment. -
openCursor(boolean) - -Method in class com.sleepycat.bdb.DataDb -
Opens a cursor for this database. -
outputToData(TupleOutput, DataBuffer) - -Method in class com.sleepycat.bdb.bind.tuple.TupleFormat -
Utility method to set the data in a data buffer to the data in a tuple +
objectToKey(Object, TupleOutput) - +Method in class com.sleepycat.bind.serial.TupleSerialMarshalledBinding +
  +
objectToKey(Object, DatabaseEntry) - +Method in class com.sleepycat.bind.tuple.TupleTupleBinding +
  +
objectToKey(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleBinding +
Extracts a key tuple from an entity object. +
objectToKey(Object, TupleOutput) - +Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding +
  +
openCursor(Transaction, CursorConfig) - +Method in class com.sleepycat.db.Database +
Return a cursor into the database. +
openDatabase(Transaction, String, String, DatabaseConfig) - +Method in class com.sleepycat.db.Environment +
Open a database. +
openLogCursor() - +Method in class com.sleepycat.db.Environment +
Return a log cursor. +
openSecondaryCursor(Transaction, CursorConfig) - +Method in class com.sleepycat.db.SecondaryDatabase +
Obtain a cursor on a database, returning a SecondaryCursor. +
openSecondaryDatabase(Transaction, String, String, Database, SecondaryConfig) - +Method in class com.sleepycat.db.Environment +
Open a database. +
openSequence(Transaction, DatabaseEntry, SequenceConfig) - +Method in class com.sleepycat.db.Database +
Open a sequence in the database. +
outputToEntry(TupleOutput, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.TupleBinding +
Utility method to set the data in a entry buffer to the data in a tuple output object.

P

-
panic(DbEnv, DbException) - -Method in interface com.sleepycat.db.DbPanicHandler -
The DbPanicHandler interface is used by the DbEnv.setPanicHandler method. -
parentid - -Variable in class com.sleepycat.db.DbTxnStat.Active -
The transaction ID of the parent transaction (or 0, if no parent). -
pget(Dbt, Dbt, Dbt, int) - -Method in class com.sleepycat.db.Dbc -
Deprecated. Replaced in Berkeley DB 4.2 by Dbc.get(Dbt,Dbt,Dbt,int) -
pget(DbTxn, Dbt, Dbt, Dbt, int) - -Method in class com.sleepycat.db.Db -
Deprecated. Replaced in Berkeley DB 4.2 by get(DbTxn,Dbt,Dbt,Dbt,int) -
prefix(Db, Dbt, Dbt) - -Method in interface com.sleepycat.db.DbBtreePrefix -
The DbBtreePrefix interface is used by the Db.setBtreePrefix method. -
prepare(byte[]) - -Method in class com.sleepycat.db.DbTxn -
The DbTxn.prepare method initiates the beginning of a two-phase commit. -
previous() - -Method in class com.sleepycat.bdb.collection.StoredIterator -
Returns the next element in the interation. -
previousIndex() - -Method in class com.sleepycat.bdb.collection.StoredIterator +
PRINT - +Static variable in class com.sleepycat.db.RecoveryOperation +
The log is being printed for debugging purposes; print the contents of + this log record in the desired format. +
PUT - +Static variable in class com.sleepycat.db.LockOperation +
The lock to which the lock field refers is released. +
PUT_ALL - +Static variable in class com.sleepycat.db.LockOperation +
All locks held by the specified locker are released. +
PUT_OBJ - +Static variable in class com.sleepycat.db.LockOperation +
All locks held on obj are released. +
PanicHandler - interface com.sleepycat.db.PanicHandler.
An interface specifying a function to be called if the database +environment panics.
PreparedTransaction - class com.sleepycat.db.PreparedTransaction.
The PreparedTransaction object is used to encapsulate a single prepared, +but not yet resolved, transaction.
PrimaryKeyAssigner - interface com.sleepycat.collections.PrimaryKeyAssigner.
An interface implemented to assign new primary key values.
panic(boolean) - +Method in class com.sleepycat.db.Environment +
Set the panic state for the database environment. +
panic(Environment, DatabaseException) - +Method in interface com.sleepycat.db.PanicHandler +
A function to be called if the database environment panics. +
prefix(Database, DatabaseEntry, DatabaseEntry) - +Method in interface com.sleepycat.db.BtreePrefixCalculator +
The application-specific Btree prefix callback. +
prepare(byte[]) - +Method in class com.sleepycat.db.Transaction +
Initiate the beginning of a two-phase commit. +
previous() - +Method in class com.sleepycat.collections.StoredIterator +
Returns the next element in the iteration. +
previousIndex() - +Method in class com.sleepycat.collections.StoredIterator
Returns the index of the element that would be returned by a subsequent call to previous. -
PrimaryKeyAssigner - interface com.sleepycat.bdb.PrimaryKeyAssigner.
An interface implemented to assign new primary key values.
put(DataThang, DataThang, int) - -Method in class com.sleepycat.bdb.DataDb -
Performs a general database 'put' operation. -
put(Dbc, DataThang, DataThang, int) - -Method in class com.sleepycat.bdb.DataDb -
Performs a general database 'put' operation via a cursor. -
put(Dbt, Dbt, int) - -Method in class com.sleepycat.db.Dbc -
The Dbc.put method stores key/data pairs into the database. -
put(DbTxn, Dbt, Dbt, int) - -Method in class com.sleepycat.db.Db -
The Db.put method stores key/data pairs in the database. -
put(Object, Object) - -Method in class com.sleepycat.bdb.collection.StoredMap +
processReplicationMessage(DatabaseEntry, DatabaseEntry, int) - +Method in class com.sleepycat.db.Environment +
Process an incoming replication message sent by a member of the + replication group to the local database environment. +
put(Object, Object) - +Method in class com.sleepycat.collections.StoredMap
Associates the specified value with the specified key in this map (optional operation). -
put(Object, Object, int, Object[]) - -Method in class com.sleepycat.bdb.DataView -
Performs a database 'put' operation, optionally returning the old value. -
put(Object, Object, int, Object[]) - -Method in class com.sleepycat.bdb.DataCursor -
Perform an arbitrary database 'put' operation, optionally returning - the previous value. -
put(Object, Object, int, Object[], boolean) - -Method in class com.sleepycat.bdb.DataCursor -
Perform an arbitrary database 'put' operation, optionally using the - current key instead of the key parameter. -
putAll(Map) - -Method in class com.sleepycat.bdb.collection.StoredMap +
put(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
put(Transaction, DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Database +
+Store the key/data pair into the database. +
putAfter(DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
putAll(Map) - +Method in class com.sleepycat.collections.StoredMap
Copies all of the mappings from the specified map to this map (optional operation). +
putBefore(DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
putCurrent(DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
putKeyFirst(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
putKeyLast(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
putLock(Lock) - +Method in class com.sleepycat.db.Environment +
Release a lock. +
putNoDupData(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
putNoDupData(Transaction, DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Database +
+Store the key/data pair into the database if it does not already appear +in the database. +
putNoOverwrite(DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Cursor +
Store a key/data pair into the database. +
putNoOverwrite(Transaction, DatabaseEntry, DatabaseEntry) - +Method in class com.sleepycat.db.Database +
+Store the key/data pair into the database if the key does not already +appear in the database.

Q

-
qs_cur_recno - -Variable in class com.sleepycat.db.DbQueueStat -
Next available record number. -
qs_extentsize - -Variable in class com.sleepycat.db.DbQueueStat -
Underlying database extent size, in pages. -
qs_first_recno - -Variable in class com.sleepycat.db.DbQueueStat -
First undeleted record in the database. -
qs_magic - -Variable in class com.sleepycat.db.DbQueueStat -
Magic number that identifies the file as a Queue file. -
qs_metaflags - -Variable in class com.sleepycat.db.DbQueueStat -
  -
qs_ndata - -Variable in class com.sleepycat.db.DbQueueStat -
The number of records in the database. -
qs_nkeys - -Variable in class com.sleepycat.db.DbQueueStat -
The number of records in the database. -
qs_pages - -Variable in class com.sleepycat.db.DbQueueStat -
Number of pages in the database. -
qs_pagesize - -Variable in class com.sleepycat.db.DbQueueStat -
Underlying database page size, in bytes. -
qs_pgfree - -Variable in class com.sleepycat.db.DbQueueStat -
Number of bytes free in database pages. -
qs_re_len - -Variable in class com.sleepycat.db.DbQueueStat -
The length of the records. -
qs_re_pad - -Variable in class com.sleepycat.db.DbQueueStat -
The padding byte value for the records. -
qs_version - -Variable in class com.sleepycat.db.DbQueueStat -
The version of the Queue file type. -
+
QUEUE - +Static variable in class com.sleepycat.db.DatabaseType +
The database is a Queue. +
QueueStats - class com.sleepycat.db.QueueStats.
The QueueStats object is used to return Queue database statistics.

R

-
read() - -Method in class com.sleepycat.bdb.util.FastInputStream +
RANDOM - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject the lock request for a random locker ID. +
READ - +Static variable in class com.sleepycat.db.LockRequestMode +
Read (shared). +
RECNO - +Static variable in class com.sleepycat.db.DatabaseType +
The database is a Recno. +
RMW - +Static variable in class com.sleepycat.db.LockMode +
Acquire write locks instead of read locks when doing the retrieval. +
RecordNumberAppender - interface com.sleepycat.db.RecordNumberAppender.
An interface specifying a callback function that modifies stored data +based on a generated key.
RecordNumberBinding - class com.sleepycat.bind.RecordNumberBinding.
An EntryBinding that treats a record number key entry as a + Long key object.
RecordNumberBinding() - +Constructor for class com.sleepycat.bind.RecordNumberBinding +
Creates a byte array binding. +
RecoveryOperation - class com.sleepycat.db.RecoveryOperation.
The recovery operation being performed when LogRecordHandler.handleLogRecord is called.
ReplicationHandleDeadException - exception com.sleepycat.db.ReplicationHandleDeadException.
Thrown when a database handle has been invalidated because a replication +election unrolled a committed transaction.
ReplicationStats - class com.sleepycat.db.ReplicationStats.
Replication statistics for a database environment.
ReplicationStatus - class com.sleepycat.db.ReplicationStatus.
The return status from processing a replication message.
ReplicationTransport - interface com.sleepycat.db.ReplicationTransport.
An interface specifying a replication transmit function, which sends +information to other members of the replication group.
RunRecoveryException - exception com.sleepycat.db.RunRecoveryException.
Thrown when the database environment needs to be recovered.
RuntimeExceptionWrapper - exception com.sleepycat.util.RuntimeExceptionWrapper.
A RuntimeException that can contain nested exceptions.
RuntimeExceptionWrapper(Throwable) - +Constructor for class com.sleepycat.util.RuntimeExceptionWrapper +
  +
read() - +Method in class com.sleepycat.util.FastInputStream
  -
read(byte[]) - -Method in class com.sleepycat.bdb.util.FastInputStream +
read(byte[]) - +Method in class com.sleepycat.util.FastInputStream
  -
read(byte[], int, int) - -Method in class com.sleepycat.bdb.util.FastInputStream +
read(byte[], int, int) - +Method in class com.sleepycat.util.FastInputStream
  -
readBoolean() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readBoolean() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a boolean (one byte) unsigned value from the buffer and returns true if it is non-zero and false if it is zero. -
readByte() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readByte() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a signed byte (one byte) value from the buffer. -
readBytes(char[]) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput -
Reads the specified number of bytes from the buffer, converting each - unsigned byte value to a character of the resulting array. -
readBytes(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readBytes(int) - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads the specified number of bytes from the buffer, converting each unsigned byte value to a character of the resulting string. -
readChar() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readBytes(char[]) - +Method in class com.sleepycat.bind.tuple.TupleInput +
Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting array. +
readChar() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a char (two byte) unsigned value from the buffer. -
readChars(char[]) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput -
Reads the specified number of characters from the buffer, converting - each two byte unsigned value to a character of the resulting array. -
readChars(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readChars(int) - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads the specified number of characters from the buffer, converting each two byte unsigned value to a character of the resulting string. -
readDouble() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readChars(char[]) - +Method in class com.sleepycat.bind.tuple.TupleInput +
Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting array. +
readDouble() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a signed double (eight byte) value from the buffer. -
readFloat() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readFast() - +Method in class com.sleepycat.util.FastInputStream +
Equivalent to read() but does not throw + IOException. +
readFast(byte[]) - +Method in class com.sleepycat.util.FastInputStream +
Equivalent to read(byte[]) but does not throw + IOException. +
readFast(byte[], int, int) - +Method in class com.sleepycat.util.FastInputStream +
Equivalent to read(byte[],int,int) but does not throw + IOException. +
readFloat() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a signed float (four byte) value from the buffer. -
readInt() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readInt() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a signed int (four byte) value from the buffer. -
readLong() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readLong() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a signed long (eight byte) value from the buffer. -
readShort() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readShort() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a signed short (two byte) value from the buffer. -
readString() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readString() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads a null-terminated UTF string from the data buffer and converts the data from UTF to Unicode. -
readString(char[]) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readString(int) - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads the specified number of UTF characters string from the data buffer and converts the data from UTF to Unicode. -
readString(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readString(char[]) - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads the specified number of UTF characters string from the data buffer and converts the data from UTF to Unicode. -
readUnsignedByte() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readUnsignedByte() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads an unsigned byte (one byte) value from the buffer. -
readUnsignedInt() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readUnsignedInt() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads an unsigned int (four byte) value from the buffer. -
readUnsignedShort() - -Method in class com.sleepycat.bdb.bind.tuple.TupleInput +
readUnsignedShort() - +Method in class com.sleepycat.bind.tuple.TupleInput
Reads an unsigned short (two byte) value from the buffer. -
RecordNumberBinding - class com.sleepycat.bdb.RecordNumberBinding.
A concrete binding for record number keys.
RecordNumberBinding(RecordNumberFormat) - -Constructor for class com.sleepycat.bdb.RecordNumberBinding -
Creates a byte array binding. -
RecordNumberFormat - class com.sleepycat.bdb.RecordNumberFormat.
The data format for record number keys.
RecordNumberFormat() - -Constructor for class com.sleepycat.bdb.RecordNumberFormat -
Creates a record number format. -
recordNumberToData(long, DataBuffer) - -Method in class com.sleepycat.bdb.RecordNumberFormat +
recordNumberToEntry(long, DatabaseEntry) - +Static method in class com.sleepycat.bind.RecordNumberBinding
Utility method for use by bindings to translate a record number integer - to a data buffer. -
remove() - -Method in class com.sleepycat.bdb.collection.StoredIterator -
Removes the last element that was returned by next or previous (optional + to a entry buffer. +
recover(int, boolean) - +Method in class com.sleepycat.db.Environment +
Return a list of prepared but not yet resolved transactions. +
recoveryFeedback(Environment, int) - +Method in interface com.sleepycat.db.FeedbackHandler +
A function called with progress information when the database environment is being recovered. +
remove(Object) - +Method in class com.sleepycat.collections.StoredEntrySet +
Removes the specified element from this set if it is present (optional operation). -
remove(int) - -Method in class com.sleepycat.bdb.collection.StoredList -
Removes the element at the specified position in this list (optional +
remove() - +Method in class com.sleepycat.collections.StoredIterator +
Removes the last element that was returned by next or previous (optional operation). -
remove(Object) - -Method in class com.sleepycat.bdb.collection.StoredValueSet -
Removes the specified value from this set if it is present (optional +
remove(Object) - +Method in class com.sleepycat.collections.StoredKeySet +
Removes the specified key from this set if it is present (optional operation). -
remove(Object) - -Method in class com.sleepycat.bdb.collection.StoredMap -
Removes the mapping for this key from this map if present (optional +
remove(int) - +Method in class com.sleepycat.collections.StoredList +
Removes the element at the specified position in this list (optional operation). -
remove(Object) - -Method in class com.sleepycat.bdb.collection.StoredList +
remove(Object) - +Method in class com.sleepycat.collections.StoredList
Removes the first occurrence in this list of the specified element (optional operation). -
remove(Object) - -Method in class com.sleepycat.bdb.collection.StoredKeySet -
Removes the specified key from this set if it is present (optional +
remove(Object) - +Method in class com.sleepycat.collections.StoredMap +
Removes the mapping for this key from this map if present (optional operation). -
remove(Object) - -Method in class com.sleepycat.bdb.collection.StoredEntrySet -
Removes the specified element from this set if it is present (optional +
remove(Object) - +Method in class com.sleepycat.collections.StoredValueSet +
Removes the specified value from this set if it is present (optional operation). -
remove(String, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.remove method destroys a Berkeley DB environment if it is not currently in use. -
remove(String, String, int) - -Method in class com.sleepycat.db.Db -
The Db.remove method removes the database specified by the file and database parameters. -
removeAll(Collection) - -Method in class com.sleepycat.bdb.collection.StoredCollection +
remove(String, String, DatabaseConfig) - +Static method in class com.sleepycat.db.Database +
+Remove a database. +
remove(File, boolean, EnvironmentConfig) - +Static method in class com.sleepycat.db.Environment +
Destroy a database environment. +
removeAll(Collection) - +Method in class com.sleepycat.collections.StoredCollection
Removes all this collection's elements that are also contained in the specified collection (optional operation). -
rename(String, String, String, int) - -Method in class com.sleepycat.db.Db -
The Db.rename method renames the database specified by the file and database parameters to newname. -
rep_elect(int, int, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.replicationElect(int,int,int) -
rep_process_message(Dbt, Dbt, DbEnv.RepProcessMessage, DbLsn) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.replicationProcessMessage(Dbt,Dbt,DbEnv.RepProcessMessage,DbLsn) -
rep_start(Dbt, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.replicationStart(Dbt,int) -
rep_stat(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.replicationStat(int) -
replicationElect(int, int, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.replicationElect method holds an election for the master of a replication group. -
replicationProcessMessage(Dbt, Dbt, DbEnv.RepProcessMessage, DbLsn) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.replicationProcessMessage method processes an incoming replication message sent by a member of the replication group to the local database environment. -
replicationStart(Dbt, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.replicationStart method configures the database environment as a client or master in a group of replicated database environments. -
replicationStat(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.replicationStat method returns the replication subsystem statistics. -
reset() - -Method in class com.sleepycat.bdb.util.FastOutputStream -
  -
reset() - -Method in class com.sleepycat.bdb.util.FastInputStream -
  -
retainAll(Collection) - -Method in class com.sleepycat.bdb.collection.StoredCollection +
removeDatabase(Transaction, String, String) - +Method in class com.sleepycat.db.Environment +
+Remove a database. +
removeOldLogFiles() - +Method in class com.sleepycat.db.Environment +
Remove log files that are no longer needed. +
removeSequence(Transaction, DatabaseEntry, SequenceConfig) - +Method in class com.sleepycat.db.Database +
Remove the sequence from the database. +
rename(String, String, String, DatabaseConfig) - +Static method in class com.sleepycat.db.Database +
+Rename a database. +
renameDatabase(Transaction, String, String, String) - +Method in class com.sleepycat.db.Environment +
+Rename a database. +
reset() - +Method in class com.sleepycat.util.FastInputStream +
  +
reset() - +Method in class com.sleepycat.util.FastOutputStream +
  +
retainAll(Collection) - +Method in class com.sleepycat.collections.StoredCollection
Retains only the elements in this collection that are contained in the specified collection (optional operation). -
run(TransactionWorker) - -Method in class com.sleepycat.bdb.TransactionRunner -
Calls the TransactionWorker.doWork() method and, for transactional +
run(TransactionWorker) - +Method in class com.sleepycat.collections.TransactionRunner +
Calls the TransactionWorker.doWork() method and, for transactional environments, begins and ends a transaction. -
RuntimeExceptionWrapper - exception com.sleepycat.bdb.util.RuntimeExceptionWrapper.
A RuntimeException that can contain nested exceptions.
RuntimeExceptionWrapper(Throwable) - -Constructor for class com.sleepycat.bdb.util.RuntimeExceptionWrapper -
 

S

-
secondaryKeyCreate(Db, Dbt, Dbt, Dbt) - -Method in interface com.sleepycat.db.DbSecondaryKeyCreate -
The secondaryKeyCreate interface is used by the Db.associate method. -
send(DbEnv, Dbt, Dbt, DbLsn, int, int) - -Method in interface com.sleepycat.db.DbRepTransport -
The DbRepTransport interface is used by the DbEnv.setReplicationTransport method. -
SerialBinding - class com.sleepycat.bdb.bind.serial.SerialBinding.
A concrete serial binding for keys or values.
SerialBinding(SerialFormat) - -Constructor for class com.sleepycat.bdb.bind.serial.SerialBinding +
SUCCESS - +Static variable in class com.sleepycat.db.OperationStatus +
The operation was successful. +
SecondaryConfig - class com.sleepycat.db.SecondaryConfig.
The configuration properties of a SecondaryDatabase extend +those of a primary Database.
SecondaryConfig() - +Constructor for class com.sleepycat.db.SecondaryConfig +
Creates an instance with the system's default settings. +
SecondaryCursor - class com.sleepycat.db.SecondaryCursor.
A database cursor for a secondary database.
SecondaryDatabase - class com.sleepycat.db.SecondaryDatabase.
A secondary database handle.
SecondaryDatabase(String, String, Database, SecondaryConfig) - +Constructor for class com.sleepycat.db.SecondaryDatabase +
Open a database. +
SecondaryKeyCreator - interface com.sleepycat.db.SecondaryKeyCreator.
An interface specifying how secondary keys for a +SecondaryDatabase are created.
Sequence - class com.sleepycat.db.Sequence.
A Sequence handle is used to manipulate a sequence record in a database.
SequenceConfig - class com.sleepycat.db.SequenceConfig.
Specify the attributes of a sequence.
SequenceConfig() - +Constructor for class com.sleepycat.db.SequenceConfig +
An instance created using the default constructor is initialized with + the system's default settings. +
SequenceStats - class com.sleepycat.db.SequenceStats.
A SequenceStats object is used to return sequenece statistics.
SerialBinding - class com.sleepycat.bind.serial.SerialBinding.
A concrete EntryBinding that treats a key or data entry as + a serialized object.
SerialBinding(ClassCatalog, Class) - +Constructor for class com.sleepycat.bind.serial.SerialBinding
Creates a serial binding. -
SerialFormat - class com.sleepycat.bdb.bind.serial.SerialFormat.
The format for serialized data.
SerialFormat(ClassCatalog, Class) - -Constructor for class com.sleepycat.bdb.bind.serial.SerialFormat -
Creates a serial format. -
SerialInput - class com.sleepycat.bdb.bind.serial.SerialInput.
Used instead of an ObjectInputStream, which it extends, to read an - object stream written by the SerialOutput class.
SerialInput(InputStream, ClassCatalog) - -Constructor for class com.sleepycat.bdb.bind.serial.SerialInput +
SerialInput - class com.sleepycat.bind.serial.SerialInput.
A specialized ObjectInputStream that gets class description + information from a ClassCatalog.
SerialInput(InputStream, ClassCatalog) - +Constructor for class com.sleepycat.bind.serial.SerialInput
Creates a serial input stream. -
SerialOutput - class com.sleepycat.bdb.bind.serial.SerialOutput.
Used instead of an ObjectOutputStream, which it extends, to write a compact - object stream.
SerialOutput(OutputStream, ClassCatalog) - -Constructor for class com.sleepycat.bdb.bind.serial.SerialOutput +
SerialOutput - class com.sleepycat.bind.serial.SerialOutput.
A specialized ObjectOutputStream that stores class description + information in a ClassCatalog.
SerialOutput(OutputStream, ClassCatalog) - +Constructor for class com.sleepycat.bind.serial.SerialOutput
Creates a serial output stream. -
SerialSerialBinding - class com.sleepycat.bdb.bind.serial.SerialSerialBinding.
An abstract entity binding that uses a serial key and a serial value.
SerialSerialBinding(SerialFormat, SerialFormat) - -Constructor for class com.sleepycat.bdb.bind.serial.SerialSerialBinding +
SerialSerialBinding - class com.sleepycat.bind.serial.SerialSerialBinding.
An abstract EntityBinding that treats an entity's key entry and + data entry as serialized objects.
SerialSerialBinding(ClassCatalog, Class, Class) - +Constructor for class com.sleepycat.bind.serial.SerialSerialBinding
Creates a serial-serial entity binding. -
SerialSerialKeyExtractor - class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor.
A abstract key extractor that uses a serial key and a serial value.
SerialSerialKeyExtractor(SerialFormat, SerialFormat, SerialFormat) - -Constructor for class com.sleepycat.bdb.bind.serial.SerialSerialKeyExtractor +
SerialSerialBinding(SerialBinding, SerialBinding) - +Constructor for class com.sleepycat.bind.serial.SerialSerialBinding
Creates a serial-serial entity binding. -
set_app_dispatch(DbAppDispatch) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setAppDispatch(DbAppDispatch) -
set_append_recno(DbAppendRecno) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setAppendRecno(DbAppendRecno) -
set_bt_compare(DbBtreeCompare) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setBtreeCompare(DbBtreeCompare) -
set_bt_maxkey(int) - -Method in class com.sleepycat.db.Db -
  -
set_bt_minkey(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setBtreeMinKey(int) -
set_bt_prefix(DbBtreePrefix) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setBtreePrefix(DbBtreePrefix) -
set_cachesize(int, int, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. Replaced in Berkeley DB 4.2 by DbEnv.setCacheSize(long,int) -
set_cachesize(int, int, int) - -Method in class com.sleepycat.db.Db -
Deprecated. Replaced in Berkeley DB 4.2 by setCacheSize(long,int) -
set_cachesize(long, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setCacheSize(long,int) -
set_cachesize(long, int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setCacheSize(long,int) -
set_data_dir(String) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setDataDir(String) -
set_data(byte[]) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setData(byte[]) -
set_dlen(int) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setPartialLength(int) -
set_doff(int) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setPartialOffset(int) -
set_dup_compare(DbDupCompare) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setDuplicateCompare(DbDupCompare) -
set_encrypt(String, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setEncrypted(String,int) -
set_encrypt(String, int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setEncrypted(String,int) -
set_errcall(DbErrcall) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. Replaced in Berkeley DB 4.2 by DbEnv.setErrorHandler(DbErrorHandler) -
set_errcall(DbErrcall) - -Method in class com.sleepycat.db.Db -
Deprecated. Replaced in Berkeley DB 4.2 by setErrorHandler(DbErrorHandler) -
set_error_stream(OutputStream) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setErrorStream(java.io.OutputStream) -
set_error_stream(OutputStream) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setErrorStream(java.io.OutputStream) -
set_errpfx(String) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setErrorPrefix(String) -
set_errpfx(String) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setErrorPrefix(String) -
set_feedback(DbEnvFeedback) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. Replaced in Berkeley DB 4.2 by DbEnv.setFeedbackHandler(DbEnvFeedbackHandler) -
set_feedback(DbFeedback) - -Method in class com.sleepycat.db.Db -
Deprecated. Replaced in Berkeley DB 4.2 by setFeedbackHandler(DbFeedbackHandler) -
set_flags(int) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setFlags(int) -
set_flags(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setFlags(int) -
set_flags(int, boolean) - -Method in class com.sleepycat.db.DbMpoolFile -
Deprecated. As of Berkeley DB 4.2, replaced by DbMpoolFile.setFlags(int,boolean) -
set_flags(int, boolean) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setFlags(int,boolean) -
set_h_ffactor(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setHashFillFactor(int) -
set_h_hash(DbHash) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setHash(DbHash) -
set_h_nelem(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setHashNumElements(int) -
set_lg_bsize(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLogBufferSize(int) -
set_lg_dir(String) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLogDir(String) -
set_lg_max(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLogMax(int) -
set_lg_regionmax(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLogRegionMax(int) -
set_lk_conflicts(byte[][]) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLockConflicts(byte[][]) -
set_lk_detect(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLockDetect(int) -
set_lk_max_lockers(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLockMaxLockers(int) -
set_lk_max_locks(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLockMaxLocks(int) -
set_lk_max_objects(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setLockMaxObjects(int) -
set_lock(DbLock) - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.setLock(DbLock) -
set_lorder(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setByteOrder(int) -
set_maxsize(long) - -Method in class com.sleepycat.db.DbMpoolFile -
Deprecated. As of Berkeley DB 4.2, replaced by DbMpoolFile.setMaxsize(long) -
set_mode(int) - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.setMode(int) -
set_mp_mmapsize(long) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setMemoryPoolMapSize(long) -
set_obj(Dbt) - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.setObj(Dbt) -
set_object(Object) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced - by Dbt.setObject(Object) -
set_offset(int) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setOffset(int) -
set_op(int) - -Method in class com.sleepycat.db.DbLockRequest -
Deprecated. As of Berkeley DB 4.2, replaced by DbLockRequest.setOp(int) -
set_pagesize(long) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setPageSize(long) -
set_priority(int) - -Method in class com.sleepycat.db.DbMpoolFile -
Deprecated. As of Berkeley DB 4.2, replaced by DbMpoolFile.setPriority(int) -
set_q_extentsize(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setQueueExtentSize(int) -
set_re_delim(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setRecordDelimiter(int) -
set_re_len(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setRecordLength(int) -
set_re_pad(int) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setRecordPad(int) -
set_re_source(String) - -Method in class com.sleepycat.db.Db -
Deprecated. As of Berkeley DB 4.2, replaced by setRecordSource(String) -
set_recno_key_data(int) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setRecordNumber(int) -
set_rep_limit(int, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. Replaced in Berkeley DB 4.2 by DbEnv.setReplicationLimit(long) -
set_rep_limit(long) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setReplicationLimit(long) -
set_rep_transport(int, DbRepTransport) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setReplicationTransport(int,DbRepTransport) -
set_rpc_server(DbClient, String, long, long, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setRpcServer(DbClient,String,long,long,int) -
set_shm_key(long) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setSegmentId(long) -
set_size(int) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setSize(int) -
set_tas_spins(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setTestAndSetSpins(int) -
set_timeout(long, int) - -Method in class com.sleepycat.db.DbTxn -
Deprecated. As of Berkeley DB 4.2, replaced by DbTxn.setTimeout(long,int) -
set_timeout(long, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setTimeout(long,int) -
set_tmp_dir(String) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setTmpDir(String) -
set_tx_max(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setTxnMax(int) -
set_tx_timestamp(Date) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setTxnTimestamp(java.util.Date) -
set_ulen(int) - -Method in class com.sleepycat.db.Dbt -
Deprecated. As of Berkeley DB 4.2, replaced by Dbt.setUserBufferLength(int) -
set_verbose(int, boolean) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.setVerbose(int,boolean) -
set(int, Object) - -Method in class com.sleepycat.bdb.collection.StoredList -
Replaces the element at the specified position in this list with the - specified element (optional operation). -
set(Object) - -Method in class com.sleepycat.bdb.collection.StoredIterator +
SerialSerialKeyCreator - class com.sleepycat.bind.serial.SerialSerialKeyCreator.
A abstract key creator that uses a serial key and a serial data entry.
SerialSerialKeyCreator(ClassCatalog, Class, Class, Class) - +Constructor for class com.sleepycat.bind.serial.SerialSerialKeyCreator +
Creates a serial-serial key creator. +
SerialSerialKeyCreator(SerialBinding, SerialBinding, SerialBinding) - +Constructor for class com.sleepycat.bind.serial.SerialSerialKeyCreator +
Creates a serial-serial entity binding. +
ShortBinding - class com.sleepycat.bind.tuple.ShortBinding.
A concrete TupleBinding for a Short primitive + wrapper or a short primitive.
ShortBinding() - +Constructor for class com.sleepycat.bind.tuple.ShortBinding +
  +
StatsConfig - class com.sleepycat.db.StatsConfig.
Specifies the attributes of a statistics retrieval operation.
StatsConfig() - +Constructor for class com.sleepycat.db.StatsConfig +
An instance created using the default constructor is initialized + with the system's default settings. +
StoredClassCatalog - class com.sleepycat.bind.serial.StoredClassCatalog.
A ClassCatalog that is stored in a Database.
StoredClassCatalog(Database) - +Constructor for class com.sleepycat.bind.serial.StoredClassCatalog +
Creates a catalog based on a given database. +
StoredCollection - class com.sleepycat.collections.StoredCollection.
A abstract base class for all stored collections.
StoredCollections - class com.sleepycat.collections.StoredCollections.
This class consists exclusively of static methods that operate on or return + stored collections.
StoredContainer - class com.sleepycat.collections.StoredContainer.
A abstract base class for all stored collections and maps.
StoredEntrySet - class com.sleepycat.collections.StoredEntrySet.
The Set returned by Map.entrySet().
StoredIterator - class com.sleepycat.collections.StoredIterator.
The Iterator returned by all stored collections.
StoredKeySet - class com.sleepycat.collections.StoredKeySet.
The Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed.
StoredKeySet(Database, EntryBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredKeySet +
Creates a key set view of a Database. +
StoredList - class com.sleepycat.collections.StoredList.
A List view of a Database.
StoredList(Database, EntryBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredList +
Creates a list view of a Database. +
StoredList(Database, EntityBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredList +
Creates a list entity view of a Database. +
StoredList(Database, EntryBinding, PrimaryKeyAssigner) - +Constructor for class com.sleepycat.collections.StoredList +
Creates a list view of a Database with a PrimaryKeyAssigner. +
StoredList(Database, EntityBinding, PrimaryKeyAssigner) - +Constructor for class com.sleepycat.collections.StoredList +
Creates a list entity view of a Database with a PrimaryKeyAssigner. +
StoredMap - class com.sleepycat.collections.StoredMap.
A Map view of a Database.
StoredMap(Database, EntryBinding, EntryBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredMap +
Creates a map view of a Database. +
StoredMap(Database, EntryBinding, EntryBinding, PrimaryKeyAssigner) - +Constructor for class com.sleepycat.collections.StoredMap +
Creates a map view of a Database with a PrimaryKeyAssigner. +
StoredMap(Database, EntryBinding, EntityBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredMap +
Creates a map entity view of a Database. +
StoredMap(Database, EntryBinding, EntityBinding, PrimaryKeyAssigner) - +Constructor for class com.sleepycat.collections.StoredMap +
Creates a map entity view of a Database with a PrimaryKeyAssigner. +
StoredSortedEntrySet - class com.sleepycat.collections.StoredSortedEntrySet.
The SortedSet returned by Map.entrySet().
StoredSortedKeySet - class com.sleepycat.collections.StoredSortedKeySet.
The SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed.
StoredSortedKeySet(Database, EntryBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredSortedKeySet +
Creates a sorted key set view of a Database. +
StoredSortedMap - class com.sleepycat.collections.StoredSortedMap.
A SortedMap view of a Database.
StoredSortedMap(Database, EntryBinding, EntryBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredSortedMap +
Creates a sorted map view of a Database. +
StoredSortedMap(Database, EntryBinding, EntryBinding, PrimaryKeyAssigner) - +Constructor for class com.sleepycat.collections.StoredSortedMap +
Creates a sorted map view of a Database with a PrimaryKeyAssigner. +
StoredSortedMap(Database, EntryBinding, EntityBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredSortedMap +
Creates a sorted map entity view of a Database. +
StoredSortedMap(Database, EntryBinding, EntityBinding, PrimaryKeyAssigner) - +Constructor for class com.sleepycat.collections.StoredSortedMap +
Creates a sorted map entity view of a Database with a PrimaryKeyAssigner. +
StoredSortedValueSet - class com.sleepycat.collections.StoredSortedValueSet.
The SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed.
StoredSortedValueSet(Database, EntityBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredSortedValueSet +
Creates a sorted value set entity view of a Database. +
StoredValueSet - class com.sleepycat.collections.StoredValueSet.
The Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed.
StoredValueSet(Database, EntryBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredValueSet +
Creates a value set view of a Database. +
StoredValueSet(Database, EntityBinding, boolean) - +Constructor for class com.sleepycat.collections.StoredValueSet +
Creates a value set entity view of a Database. +
StringBinding - class com.sleepycat.bind.tuple.StringBinding.
A concrete TupleBinding for a simple String value.
StringBinding() - +Constructor for class com.sleepycat.bind.tuple.StringBinding +
  +
send(Environment, DatabaseEntry, DatabaseEntry, LogSequenceNumber, int, boolean, boolean) - +Method in interface com.sleepycat.db.ReplicationTransport +
The callback used when Berkeley DB needs to transmit a replication + message. +
set(Object) - +Method in class com.sleepycat.collections.StoredIterator
Replaces the last element returned by next or previous with the specified element (optional operation). -
setAppDispatch(DbAppDispatch) - -Method in class com.sleepycat.db.DbEnv -
  -
setAppendRecno(DbAppendRecno) - -Method in class com.sleepycat.db.Db -
  -
setBtreeCompare(DbBtreeCompare) - -Method in class com.sleepycat.db.Db -
  -
setBtreeMinKey(int) - -Method in class com.sleepycat.db.Db -
Set the minimum number of key/data pairs intended to be stored on any single Btree leaf page. -
setBtreePrefix(DbBtreePrefix) - -Method in class com.sleepycat.db.Db -
  -
setByteOrder(int) - -Method in class com.sleepycat.db.Db +
set(int, Object) - +Method in class com.sleepycat.collections.StoredList +
Replaces the element at the specified position in this list with the + specified element (optional operation). +
set(LogSequenceNumber, DatabaseEntry) - +Method in class com.sleepycat.db.LogCursor +
Return a specific log record. +
setAggressive(boolean) - +Method in class com.sleepycat.db.VerifyConfig +
Configure Database.verify to output all the + key/data pairs in the file that can be found. +
setAllowCreate(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the Environment.openDatabase method to create + the database if it does not already exist. +
setAllowCreate(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to create any underlying files, + as necessary. +
setAllowCreate(boolean) - +Method in class com.sleepycat.db.SequenceConfig +
Configure the Database.openSequence method to + create the sequence if it does not already exist. +
setAllowNestedTransactions(boolean) - +Method in class com.sleepycat.collections.TransactionRunner +
Changes whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread. +
setAllowPopulate(boolean) - +Method in class com.sleepycat.db.SecondaryConfig +
Specifies whether automatic population of the secondary is allowed. +
setAutoCommitNoSync(boolean) - +Method in class com.sleepycat.db.SequenceConfig +
Configure auto-commit operations on the sequence to not flush + the transaction log. +
setBtreeComparator(Comparator) - +Method in class com.sleepycat.db.DatabaseConfig +
By default, a byte by byte lexicographic comparison is used for + btree keys. +
setBtreeMinKey(int) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the minimum number of key/data pairs intended to be stored on any + single Btree leaf page. +
setBtreePrefixCalculator(BtreePrefixCalculator) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the Btree prefix callback. +
setBtreeRecordNumbers(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the Btree to support retrieval by record number. +
setByteOrder(int) - +Method in class com.sleepycat.db.DatabaseConfig
Set the byte order for integers in the stored database metadata. -
setBytes(byte[]) - -Method in class com.sleepycat.bdb.DataThang -
Sets the data for this thang. -
setBytes(byte[], int, int) - -Method in class com.sleepycat.bdb.DataThang -
Sets the data for this thang. -
setCacheSize(long, int) - -Method in class com.sleepycat.db.DbEnv -
Set the size of the shared memory buffer pool -- that is, the cache. -
setCacheSize(long, int) - -Method in class com.sleepycat.db.Db -
Set the size of the shared memory buffer pool -- that is, the cache. -
setData(byte[]) - -Method in class com.sleepycat.db.Dbt -
Set the data array. -
setData(byte[], int, int) - -Method in class com.sleepycat.bdb.DataThang -
  -
setData(byte[], int, int) - -Method in class com.sleepycat.bdb.bind.SimpleBuffer -
  -
setData(byte[], int, int) - -Method in interface com.sleepycat.bdb.bind.DataBuffer -
Sets the data in this buffer to the given value. -
setDataDir(String) - -Method in class com.sleepycat.db.DbEnv -
Set the path of a directory to be used as the location of the access method database files. -
setDataFormation(Object) - -Method in class com.sleepycat.bdb.DataThang -
  -
setDataFormation(Object) - -Method in class com.sleepycat.bdb.bind.SimpleBuffer -
  -
setDataFormation(Object) - -Method in interface com.sleepycat.bdb.bind.DataBuffer -
Sets the formation associated with the data in this buffer. -
setDirtyRead(boolean) - -Method in class com.sleepycat.bdb.TransactionRunner -
Changes whether transactions will read data that is modified by another - transaction but not committed. -
setDuplicateCompare(DbDupCompare) - -Method in class com.sleepycat.db.Db -
  -
setEncrypted(String, int) - -Method in class com.sleepycat.db.DbEnv -
Set the password used by the Berkeley DB library to perform encryption and decryption. -
setEncrypted(String, int) - -Method in class com.sleepycat.db.Db -
Set the password used by the Berkeley DB library to perform encryption and decryption. -
setErrorHandler(DbErrorHandler) - -Method in class com.sleepycat.db.DbEnv -
When an error occurs in the Berkeley DB library, an exception is thrown. -
setErrorHandler(DbErrorHandler) - -Method in class com.sleepycat.db.Db -
When an error occurs in the Berkeley DB library, an exception is thrown. -
setErrorPrefix(String) - -Method in class com.sleepycat.db.DbEnv -
Set the prefix string that appears before error messages issued by Berkeley DB. -
setErrorPrefix(String) - -Method in class com.sleepycat.db.Db -
Set the prefix string that appears before error messages issued by Berkeley DB. -
setErrorStream(OutputStream) - -Method in class com.sleepycat.db.DbEnv -
When an error occurs in the Berkeley DB library, an exception is thrown. -
setErrorStream(OutputStream) - -Method in class com.sleepycat.db.Db -
When an error occurs in the Berkeley DB library, an exception is thrown. -
setFeedbackHandler(DbEnvFeedbackHandler) - -Method in class com.sleepycat.db.DbEnv -
  -
setFeedbackHandler(DbFeedbackHandler) - -Method in class com.sleepycat.db.Db -
  -
setFlags(int) - -Method in class com.sleepycat.db.Dbt -
Set the object flag value. -
setFlags(int) - -Method in class com.sleepycat.db.Db -
Configure a database. -
setFlags(int, boolean) - -Method in class com.sleepycat.db.DbMpoolFile -
Configure a file in the cache. -
setFlags(int, boolean) - -Method in class com.sleepycat.db.DbEnv -
Configure a database environment. -
setHash(DbHash) - -Method in class com.sleepycat.db.Db -
  -
setHashFillFactor(int) - -Method in class com.sleepycat.db.Db +
setCDBLockAllDatabases(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure Concurrent Data Store applications to perform locking on + an environment-wide basis rather than on a per-database basis. +
setCacheCount(int) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the number of shared memory buffer pools, that is, the number of +caches. +
setCacheCount(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the number of shared memory buffer pools, that is, the number of +caches. +
setCacheSize(long) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the size of the shared memory buffer pool, that is, the size of the +cache. +
setCacheSize(long) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the size of the shared memory buffer pool, that is, the size of the +cache. +
setCacheSize(int) - +Method in class com.sleepycat.db.SequenceConfig +
Set the +Configure the number of elements cached by a sequence handle. +
setChecksum(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database environment to do checksum verification of + pages read into the cache from the backing filestore. +
setClear(boolean) - +Method in class com.sleepycat.db.StatsConfig +
Configure the statistics operation to reset statistics after they + are returned. +
setConfig(DatabaseConfig) - +Method in class com.sleepycat.db.Database +
Change the settings in an existing database handle. +
setConfig(EnvironmentConfig) - +Method in class com.sleepycat.db.Environment +
Change the settings in an existing environment handle. +
setData(byte[]) - +Method in class com.sleepycat.db.DatabaseEntry +
Sets the byte array. +
setData(byte[], int, int) - +Method in class com.sleepycat.db.DatabaseEntry +
Sets the byte array, offset and size. +
setDecrement(boolean) - +Method in class com.sleepycat.db.SequenceConfig +
Specify that the sequence should be decremented. +
setDegree2(boolean) - +Method in class com.sleepycat.db.CursorConfig +
Configure the cursor for degree 2 isolation. +
setDegree2(boolean) - +Method in class com.sleepycat.db.TransactionConfig +
Configure this transaction to have degree 2 isolation. +
setDirectDatabaseIO(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to not buffer database files. +
setDirectLogIO(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to not buffer log files. +
setDirtyRead(boolean) - +Method in class com.sleepycat.db.CursorConfig +
Configure read operations performed by the cursor to return modified + but not yet committed data. +
setDirtyRead(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database to support dirty reads. +
setDirtyRead(boolean) - +Method in class com.sleepycat.db.TransactionConfig +
Configure the transaction to perform dirty reads. +
setDsyncLog(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to flush log writes to the + backing disk before returning from the write system call, rather + than flushing log writes explicitly in a separate system call. +
setDuplicateComparator(Comparator) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the duplicate data item comparison callback. +
setEncrypted(String) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the password used to perform encryption and decryption. +
setEncrypted(String) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the password used to perform encryption and decryption. +
setErrorHandler(ErrorHandler) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the function to be called if an error occurs. +
setErrorHandler(ErrorHandler) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the function to be called if an error occurs. +
setErrorPrefix(String) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the prefix string that appears before error messages. +
setErrorPrefix(String) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the prefix string that appears before error messages. +
setErrorStream(OutputStream) - +Method in class com.sleepycat.db.DatabaseConfig +
Set an OutputStream for displaying error messages. +
setErrorStream(OutputStream) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set an OutputStream for displaying error messages. +
setExclusiveCreate(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the Environment.openDatabase method to fail if + the database already exists. +
setExclusiveCreate(boolean) - +Method in class com.sleepycat.db.SequenceConfig +
Configure the Database.openSequence method to + fail if the database already exists. +
setFast(boolean) - +Method in class com.sleepycat.db.StatsConfig +
Configure the statistics operation to return only the values which + do not incur some performance penalty. +
setFeedbackHandler(FeedbackHandler) - +Method in class com.sleepycat.db.DatabaseConfig +
Set an object whose methods are called to provide feedback. +
setFeedbackHandler(FeedbackHandler) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set an object whose methods are called to provide feedback. +
setForce(boolean) - +Method in class com.sleepycat.db.CheckpointConfig +
Configure the checkpoint force option. +
setHashFillFactor(int) - +Method in class com.sleepycat.db.DatabaseConfig
Set the desired density within the hash table. -
setHashNumElements(int) - -Method in class com.sleepycat.db.Db +
setHashNumElements(int) - +Method in class com.sleepycat.db.DatabaseConfig
Set an estimate of the final size of the hash table. -
setLock(DbLock) - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.setLock method sets the lock reference. -
setLockConflicts(byte[][]) - -Method in class com.sleepycat.db.DbEnv -
Set the locking conflicts matrix. -
setLockDetect(int) - -Method in class com.sleepycat.db.DbEnv -
Set if the deadlock detector is to be run whenever a lock conflict occurs, and specify what lock request(s) should be rejected. -
setLockForWrite(boolean) - -Method in class com.sleepycat.bdb.collection.StoredIterator -
Changes whether write-locks will be obtained when reading with this - cursor. -
setLockMaxLockers(int) - -Method in class com.sleepycat.db.DbEnv -
Set the maximum number of locking entities supported by the Berkeley DB environment. -
setLockMaxLocks(int) - -Method in class com.sleepycat.db.DbEnv -
Set the maximum number of locks supported by the Berkeley DB environment. -
setLockMaxObjects(int) - -Method in class com.sleepycat.db.DbEnv -
Set the maximum number of locked objects supported by the Berkeley DB environment. -
setLogBufferSize(int) - -Method in class com.sleepycat.db.DbEnv +
setHasher(Hasher) - +Method in class com.sleepycat.db.DatabaseConfig +
Set a database-specific hash function. +
setInitialValue(long) - +Method in class com.sleepycat.db.SequenceConfig +
Set the +Set the initial value for a sequence. +
setInitializeCDB(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment for the Concurrent Data Store + product. +
setInitializeCache(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure a shared memory buffer pool in the database environment. +
setInitializeLocking(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment for locking. +
setInitializeLogging(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment for logging. +
setInitializeRegions(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to page-fault shared regions into + memory when initially creating or joining a database environment. +
setInitializeReplication(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment for replication. +
setJoinEnvironment(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the handle to join an existing environment. +
setKBytes(int) - +Method in class com.sleepycat.db.CheckpointConfig +
Configure the checkpoint log data threshold, in kilobytes. +
setKeyCreator(SecondaryKeyCreator) - +Method in class com.sleepycat.db.SecondaryConfig +
Specifies the user-supplied object used for creating secondary keys. +
setLock(Lock) - +Method in class com.sleepycat.db.LockRequest +
Set the lock reference. +
setLockConflicts(byte[][]) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the locking conflicts matrix. +
setLockDetectMode(LockDetectMode) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure if the deadlock detector is to be run whenever a lock + conflict occurs. +
setLockDown(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to lock shared environment files + and memory-mapped databases into memory. +
setLockTimeout(long) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the timeout value for the database environment +locks. +
setLockTimeout(long) - +Method in class com.sleepycat.db.Transaction +
Configure the lock request timeout value for the transaction. +
setLogAutoRemove(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to automatically remove log files that are no + longer needed. +
setLogBufferSize(int) - +Method in class com.sleepycat.db.EnvironmentConfig
Set the size of the in-memory log buffer, in bytes. -
setLogDir(String) - -Method in class com.sleepycat.db.DbEnv -
The path of a directory to be used as the location of logging files. -
setLogMax(int) - -Method in class com.sleepycat.db.DbEnv +
setLogDirectory(File) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the path of a directory to be used as the location of logging files. +
setLogInMemory(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
If set, maintain transaction logs in memory rather than on disk. +
setLogRecordHandler(LogRecordHandler) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set a function to process application-specific log records. +
setLogRegionSize(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the size of the underlying logging area of the database + environment, in bytes. +
setMMapSize(long) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the maximum file size, in bytes, for a file to be mapped into + the process address space. +
setMaxLockObjects(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the maximum number of locked objects supported by the database + environment. +
setMaxLockers(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the maximum number of locking entities supported by the database + environment. +
setMaxLocks(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the maximum number of locks supported by the database + environment. +
setMaxLogFileSize(int) - +Method in class com.sleepycat.db.EnvironmentConfig
Set the maximum size of a single file in the log, in bytes. -
setLogRegionMax(int) - -Method in class com.sleepycat.db.DbEnv -
Set the size of the underlying logging area of the Berkeley DB environment, in bytes. -
setMaxRetries(int) - -Method in class com.sleepycat.bdb.TransactionRunner +
setMaxRetries(int) - +Method in class com.sleepycat.collections.TransactionRunner
Changes the maximum number of retries that will be performed when deadlocks are detected. -
setMaxsize(long) - -Method in class com.sleepycat.db.DbMpoolFile -
Set the maximum size for the file to be bytes bytes. -
setMemoryPoolMapSize(long) - -Method in class com.sleepycat.db.DbEnv -
Files that are opened read-only in the pool (and that satisfy a few other criteria) are, by default, mapped into the process address space instead of being copied into the local cache. -
setMode(int) - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.setMode method sets the lock mode. -
setNoWait(boolean) - -Method in class com.sleepycat.bdb.TransactionRunner -
Changes whether transactions will throw DbLockNotGrantedException - instead of blocking when trying to access data that is locked by another - transaction. -
setObj(Dbt) - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.setObj method sets the lock object. -
setObject(Object) - -Method in class com.sleepycat.db.Dbt -
Initialize the data array from a serialized object, encoding the object using the Java serialization API. -
setOffset(int) - -Method in class com.sleepycat.db.Dbt +
setMaximumSize(long) - +Method in class com.sleepycat.db.CacheFile +
Set the +maximum size for the file backing the database. +
setMessageHandler(MessageHandler) - +Method in class com.sleepycat.db.DatabaseConfig +
Set a function to be called with an informational message. +
setMessageHandler(MessageHandler) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set a function to be called with an informational message. +
setMessageStream(OutputStream) - +Method in class com.sleepycat.db.DatabaseConfig +
Set an OutputStream for displaying informational messages. +
setMessageStream(OutputStream) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set an OutputStream for displaying informational messages. +
setMinutes(int) - +Method in class com.sleepycat.db.CheckpointConfig +
Configure the checkpoint time threshold, in minutes. +
setMode(int) - +Method in class com.sleepycat.db.DatabaseConfig +
On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, files + created by the database open are created with mode mode + (as described in the chmod(2) manual page) and modified + by the process' umask value at the time of creation (see the + umask(2) manual page). +
setMode(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to use a specific mode when + creating underlying files and shared memory segments. +
setMode(LockRequestMode) - +Method in class com.sleepycat.db.LockRequest +
Set the lock mode. +
setNoFile(boolean) - +Method in class com.sleepycat.db.CacheFile +
Disallow opening backing temporary files for in-memory + databases, even if they expand to fill the entire cache. +
setNoLocking(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to grant all requested mutual exclusion mutexes + and database locks without regard for their actual availability. +
setNoMMap(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the library to not map this database into memory. +
setNoMMap(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to copy read-only database files into the local + cache instead of potentially mapping them into process memory. +
setNoOrderCheck(boolean) - +Method in class com.sleepycat.db.VerifyConfig +
Configure Database.verify to skip the database checks for + btree and duplicate sort order and for hashing. +
setNoPanic(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to ignore any panic state in the database + environment. +
setNoSort(boolean) - +Method in class com.sleepycat.db.JoinConfig +
Specifies whether automatic sorting of the input cursors is disabled. +
setNoSync(boolean) - +Method in class com.sleepycat.db.TransactionConfig +
Configure the transaction to not write or synchronously flush the log + it when commits. +
setNoWait(boolean) - +Method in class com.sleepycat.db.TransactionConfig +
Configure the transaction to not wait if a lock request cannot be + immediately granted. +
setObj(DatabaseEntry) - +Method in class com.sleepycat.db.LockRequest +
Set the lock object. +
setOffset(int) - +Method in class com.sleepycat.db.DatabaseEntry
Set the byte offset into the data array. -
setOp(int) - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.setOp method sets the operation. -
setPageSize(long) - -Method in class com.sleepycat.db.Db +
setOp(LockOperation) - +Method in class com.sleepycat.db.LockRequest +
Set the operation. +
setOrderCheckOnly(boolean) - +Method in class com.sleepycat.db.VerifyConfig +
Configure Database.verify to do database checks for btree + and duplicate sort order and for hashing, skipped by verification + operations configured by VerifyConfig.setNoOrderCheck. +
setOverwrite(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to overwrite files stored in encrypted formats + before deleting them. +
setPageSize(int) - +Method in class com.sleepycat.db.DatabaseConfig
Set the size of the pages used to hold items in the database, in bytes. -
setPanicHandler(DbPanicHandler) - -Method in class com.sleepycat.db.DbEnv -
  -
setPanicHandler(DbPanicHandler) - -Method in class com.sleepycat.db.Db -
Errors can occur in the Berkeley DB library where the only solution is to shut down the application and run recovery (for example, if Berkeley DB is unable to allocate heap memory). -
setPartialLength(int) - -Method in class com.sleepycat.db.Dbt -
Set the byte length of the partial record being read or written by the application, in bytes. -
setPartialOffset(int) - -Method in class com.sleepycat.db.Dbt -
Set the offset of the partial record being read or written by the application, in bytes. -
setPriority(int) - -Method in class com.sleepycat.db.DbMpoolFile -
Set the cache priority for pages from the specified file. -
setQueueExtentSize(int) - -Method in class com.sleepycat.db.Db -
Set the size of the extents used to hold pages in a Queue database, specified as a number of pages. -
setRecordDelimiter(int) - -Method in class com.sleepycat.db.Db -
Set the delimiting byte used to mark the end of a record in the backing source file for the Recno access method. -
setRecordLength(int) - -Method in class com.sleepycat.db.Db -
For the Queue access method, specify that the records are of length re_len. -
setRecordNumber(int) - -Method in class com.sleepycat.db.Dbt -
Initialize the data array from a logical record number. -
setRecordPad(int) - -Method in class com.sleepycat.db.Db -
Set the padding character for short, fixed-length records for the Queue and Recno access methods. -
setRecordSource(String) - -Method in class com.sleepycat.db.Db +
setPanicHandler(PanicHandler) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the function to be called if the database environment panics. +
setPanicHandler(PanicHandler) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the function to be called if the database environment panics. +
setPartial(int, int, boolean) - +Method in class com.sleepycat.db.DatabaseEntry +
Configures this DatabaseEntry to read or write partial records. +
setPartial(boolean) - +Method in class com.sleepycat.db.DatabaseEntry +
Configure this DatabaseEntry to read or write partial records. +
setPartialLength(int) - +Method in class com.sleepycat.db.DatabaseEntry +
Set the byte length of the partial record being read or written by + the application, in bytes. +
setPartialOffset(int) - +Method in class com.sleepycat.db.DatabaseEntry +
Set the offset of the partial record being read or written by the + application, in bytes. +
setPrintable(boolean) - +Method in class com.sleepycat.db.VerifyConfig +
Configure Database.verify to use printing characters to + where possible. +
setPriority(CacheFilePriority) - +Method in class com.sleepycat.db.CacheFile +
Set the +cache priority for pages from the specified file. +
setPrivate(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to only be accessed by a single + process (although that process may be multithreaded). +
setQueueExtentSize(int) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the size of the extents used to hold pages in a Queue database, + specified as a number of pages. +
setQueueInOrder(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure Database.consume to return key/data pairs in + order, always returning the key/data item from the head of the + queue. +
setRPCServer(String, long, long) - +Method in class com.sleepycat.db.EnvironmentConfig +
Establish a connection to a RPC server for this database environment. +
setRange(long, long) - +Method in class com.sleepycat.db.SequenceConfig +
Configure a sequence range. +
setReadModifyWrite(boolean) - +Method in class com.sleepycat.collections.StoredIterator +
Changes whether write-locks will be obtained when reading with this + cursor. +
setReadOnly(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database in read-only mode. +
setReadOnly(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the environment handle to be opened read-only. +
setRecordDelimiter(int) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the delimiting byte used to mark the end of a record in the backing + source file for the Recno access method. +
setRecordLength(int) - +Method in class com.sleepycat.db.DatabaseConfig +
Specify the database record length, in bytes. +
setRecordNumber(int) - +Method in class com.sleepycat.db.DatabaseEntry +
Initialize the entry from a logical record number. +
setRecordNumberAppender(RecordNumberAppender) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure Database.append to call the function after the + record number has been selected but before the data has been stored + into the database. +
setRecordPad(int) - +Method in class com.sleepycat.db.DatabaseConfig +
Set the padding character for short, fixed-length records for the Queue + and Recno access methods. +
setRecordSource(File) - +Method in class com.sleepycat.db.DatabaseConfig
Set the underlying source file for the Recno access method. -
setReplicationLimit(long) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.setReplicationLimit method imposes a byte-count limit on the amount of data that will be transmitted from a site in a single call to DbEnv.replicationProcessMessage method. -
setReplicationTransport(int, DbRepTransport) - -Method in class com.sleepycat.db.DbEnv -
  -
setRpcServer(DbClient, String, long, long, int) - -Method in class com.sleepycat.db.DbEnv -
Establishes a connection for this dbenv to a RPC server. -
setSegmentId(long) - -Method in class com.sleepycat.db.DbEnv -
Specify a base segment ID for Berkeley DB environment shared memory regions created in system memory on VxWorks or systems supporting X/Open-style shared memory interfaces; for example, UNIX systems supporting shmget(2) and related System V IPC interfaces. -
setSize(int) - -Method in class com.sleepycat.db.Dbt +
setRenumbering(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the logical record numbers to be mutable, and change as + records are added to and deleted from the database. +
setReplicationLimit(long) - +Method in class com.sleepycat.db.EnvironmentConfig +
Impose a byte-count limit on the amount of data that will be + transmitted from a site in a single call to Environment.processReplicationMessage. +
setReplicationTransport(int, ReplicationTransport) - +Method in class com.sleepycat.db.EnvironmentConfig +
Initialize the communication infrastructure for a database environment + participating in a replicated application. +
setReuseBuffer(boolean) - +Method in class com.sleepycat.db.DatabaseEntry +
Configures the entry to try to reuse the buffer before allocating a new + one. +
setReverseSplitOff(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the Btree to not do reverse splits. +
setRunFatalRecovery(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure to run catastrophic recovery on this environment before opening it for +normal use. +
setRunRecovery(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure to run normal recovery on this environment before opening it for +normal use. +
setSalvage(boolean) - +Method in class com.sleepycat.db.VerifyConfig +
Configure Database.verify to write the key/data pairs from + all databases in the file to the file stream named by the outfile + parameter. +
setSegmentId(long) - +Method in class com.sleepycat.db.EnvironmentConfig +
Specify a base segment ID for database environment shared memory + regions created in system memory on VxWorks or systems supporting + X/Open-style shared memory interfaces; for example, UNIX systems + supporting shmget and related System V IPC interfaces. +
setSize(int) - +Method in class com.sleepycat.db.DatabaseEntry
Set the byte size of the data array. -
setTestAndSetSpins(int) - -Method in class com.sleepycat.db.DbEnv -
Specify that test-and-set mutexes should spin tas_spins times without blocking. -
setTimeout(int) - -Method in class com.sleepycat.db.DbLockRequest -
The DbLockRequest.setTimeout method sets the lock timeout value. -
setTimeout(long, int) - -Method in class com.sleepycat.db.DbTxn -
The DbTxn.setTimeout method sets timeout values for locks or transactions for the specified transaction. -
setTimeout(long, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.setTimeout method sets timeout values for locks or transactions in the database environment. -
setTmpDir(String) - -Method in class com.sleepycat.db.DbEnv -
Specify the path of a directory to be used as the location of temporary files. -
setTxnMax(int) - -Method in class com.sleepycat.db.DbEnv -
Configure the Berkeley DB database environment to support at least max active transactions. -
setTxnTimestamp(Date) - -Method in class com.sleepycat.db.DbEnv -
Recover to the time specified by timestamp rather than to the most current possible date. -
setUserBufferLength(int) - -Method in class com.sleepycat.db.Dbt -
Set the byte size of the user-specified buffer. -
setValue(Object) - -Method in class com.sleepycat.bdb.collection.MapEntry -
Changes the value of this entry. -
setVerbose(int, boolean) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.setVerbose method turns specific additional informational and debugging messages in the Berkeley DB message output on and off. -
SimpleBuffer - class com.sleepycat.bdb.bind.SimpleBuffer.
A simple data buffer implementation that allows using bindings for arbitrary - data outside the context of a database.
SimpleBuffer() - -Constructor for class com.sleepycat.bdb.bind.SimpleBuffer -
Creates a simple buffer with null data, an offset of zero and a length - of zero. -
SimpleBuffer(byte[]) - -Constructor for class com.sleepycat.bdb.bind.SimpleBuffer -
Creates a simple buffer with the given data with an offset of zero and a - length equal to the length of the data array. -
SimpleBuffer(byte[], int, int) - -Constructor for class com.sleepycat.bdb.bind.SimpleBuffer -
Creates a simple buffer with the given data, offset and length. -
size() - -Method in class com.sleepycat.bdb.collection.StoredValueSet -
  -
size() - -Method in class com.sleepycat.bdb.collection.StoredContainer +
setSnapshot(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Specify that any specified backing source file be read in its entirety + when the database is opened. +
setSortedDuplicates(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database to support sorted, duplicate data items. +
setSync(boolean) - +Method in class com.sleepycat.db.TransactionConfig +
Configure the transaction to write or synchronously flush the log + it when commits. +
setSystemMemory(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to allocate memory from system + shared memory instead of from memory backed by the filesystem. +
setTemporaryDirectory(String) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the path of a directory to be used as the location of temporary + files. +
setTestAndSetSpins(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the number of times test-and-set mutexes should spin before + blocking. +
setThreaded(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the handle to be free-threaded; that is, usable + by multiple threads within a single address space. +
setTimeout(int) - +Method in class com.sleepycat.db.LockRequest +
Set the lock timeout value. +
setTransactionConfig(TransactionConfig) - +Method in class com.sleepycat.collections.TransactionRunner +
Changes the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.db.Transaction, com.sleepycat.db.TransactionConfig). +
setTransactionNotDurable(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database environment to not write log records for this + database. +
setTransactional(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Enclose the database open within a transaction. +
setTransactional(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment for transactions. +
setTruncate(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database to be physically truncated by truncating the + underlying file, discarding all previous databases it might have + held. +
setTxnMaxActive(int) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to support at least txnMaxActive + active transactions. +
setTxnNoSync(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to not write or synchronously flush the log + on transaction commit. +
setTxnNotDurable(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to not write log records. +
setTxnTimeout(long) - +Method in class com.sleepycat.db.EnvironmentConfig +
Set the timeout value for the database environment +transactions. +
setTxnTimeout(long) - +Method in class com.sleepycat.db.Transaction +
Configure the timeout value for the transaction lifetime. +
setTxnTimestamp(Date) - +Method in class com.sleepycat.db.EnvironmentConfig +
Recover to the specified time rather than to the most current + possible date. +
setTxnWriteNoSync(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to write, but not synchronously flush, the log on + transaction commit. +
setType(DatabaseType) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the type of the database. +
setUnlink(boolean) - +Method in class com.sleepycat.db.CacheFile +
Remove the file when the last reference to it is closed. +
setUnsortedDuplicates(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database to support unsorted duplicate data items. +
setUseEnvironment(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to accept information from the + process environment when naming files, regardless of the status of + the process. +
setUseEnvironmentRoot(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the database environment to accept information from the + process environment when naming files, if the process has + appropriate permissions (for example, users with a user-ID of 0 on + UNIX systems). +
setUserBuffer(int, boolean) - +Method in class com.sleepycat.db.DatabaseEntry +
Configures the entry with an application-owned buffer. +
setUserBuffer(int, boolean) - +Method in class com.sleepycat.db.MultipleEntry +
  +
setValue(Object) - +Method in class com.sleepycat.collections.MapEntryParameter +
Always throws UnsupportedOperationException since this + object is not attached to a map. +
setVerboseDeadlock(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Display additional information when doing deadlock detection. +
setVerboseRecovery(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Display additional information when performing recovery. +
setVerboseReplication(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Display additional information when processing replication messages. +
setVerboseWaitsFor(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Display the waits-for table when doing deadlock detection. +
setWrap(boolean) - +Method in class com.sleepycat.db.SequenceConfig +
Specify that the sequence should wrap around when it is + incremented (decremented) past the specified maximum (minimum) value. +
setWriteCursor(boolean) - +Method in class com.sleepycat.db.CursorConfig +
Specify the Concurrent Data Store environment cursor will be used to + update the database. +
setXACreate(boolean) - +Method in class com.sleepycat.db.DatabaseConfig +
Configure the database to be accessed via applications running under + an X/Open conformant Transaction Manager. +
setYieldCPU(boolean) - +Method in class com.sleepycat.db.EnvironmentConfig +
Configure the system to yield the processor immediately after each + page or mutex acquisition. +
shortToEntry(short, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.ShortBinding +
Converts a simple short value into an entry buffer. +
size() - +Method in class com.sleepycat.collections.StoredContainer
Always throws UnsupportedOperationException. -
size() - -Method in class com.sleepycat.bdb.util.FastOutputStream +
size() - +Method in class com.sleepycat.collections.StoredValueSet
  -
skip(long) - -Method in class com.sleepycat.bdb.util.FastInputStream +
size() - +Method in class com.sleepycat.util.FastOutputStream
  -
st_alloc - -Variable in class com.sleepycat.db.DbMpoolStat -
Number of page allocations. -
st_alloc_buckets - -Variable in class com.sleepycat.db.DbMpoolStat -
Number of hash buckets checked during allocation. -
st_alloc_max_buckets - -Variable in class com.sleepycat.db.DbMpoolStat -
Maximum number of hash buckets checked during an allocation. -
st_alloc_max_pages - -Variable in class com.sleepycat.db.DbMpoolStat -
Maximum number of pages checked during an allocation. -
st_alloc_pages - -Variable in class com.sleepycat.db.DbMpoolStat -
Number of pages checked during allocation. -
st_bytes - -Variable in class com.sleepycat.db.DbMpoolStat -
Bytes of cache (total cache size is st_gbytes + st_bytes). -
st_cache_hit - -Variable in class com.sleepycat.db.DbMpoolStat -
Requested pages found in the cache. -
st_cache_hit - -Variable in class com.sleepycat.db.DbMpoolFStat -
Requested pages found in the cache. -
st_cache_miss - -Variable in class com.sleepycat.db.DbMpoolStat -
Requested pages not found in the cache. -
st_cache_miss - -Variable in class com.sleepycat.db.DbMpoolFStat -
Requested pages not found in the cache. -
st_cur_file - -Variable in class com.sleepycat.db.DbLogStat -
The current log file number. -
st_cur_maxid - -Variable in class com.sleepycat.db.DbLockStat -
The current maximum unused locker ID. -
st_cur_offset - -Variable in class com.sleepycat.db.DbLogStat -
The byte offset in the current log file. -
st_disk_file - -Variable in class com.sleepycat.db.DbLogStat -
The log file number of the last record known to be on disk. -
st_disk_offset - -Variable in class com.sleepycat.db.DbLogStat -
The byte offset of the last record known to be on disk. -
st_dupmasters - -Variable in class com.sleepycat.db.DbRepStat -
The number of duplicate master conditions detected. -
st_election_cur_winner - -Variable in class com.sleepycat.db.DbRepStat -
The election winner. -
st_election_gen - -Variable in class com.sleepycat.db.DbRepStat -
The election generation number. -
st_election_lsn - -Variable in class com.sleepycat.db.DbRepStat -
The maximum LSN of election winner. -
st_election_nsites - -Variable in class com.sleepycat.db.DbRepStat -
The number sites expected to participate in elections. -
st_election_priority - -Variable in class com.sleepycat.db.DbRepStat -
The election priority. -
st_election_status - -Variable in class com.sleepycat.db.DbRepStat -
The current election phase (0 if no election is in progress). -
st_election_tiebreaker - -Variable in class com.sleepycat.db.DbRepStat -
The election tiebreaker value. -
st_election_votes - -Variable in class com.sleepycat.db.DbRepStat -
The votes received this election round. -
st_elections - -Variable in class com.sleepycat.db.DbRepStat -
The number of elections held. -
st_elections_won - -Variable in class com.sleepycat.db.DbRepStat -
The number of elections won. -
st_env_id - -Variable in class com.sleepycat.db.DbRepStat -
The current environment ID. -
st_env_priority - -Variable in class com.sleepycat.db.DbRepStat -
The current environment priority. -
st_gbytes - -Variable in class com.sleepycat.db.DbMpoolStat -
Gigabytes of cache (total cache size is st_gbytes + st_bytes). -
st_gen - -Variable in class com.sleepycat.db.DbRepStat -
The current generation number. -
st_hash_buckets - -Variable in class com.sleepycat.db.DbMpoolStat -
Number of hash buckets in buffer hash table. -
st_hash_examined - -Variable in class com.sleepycat.db.DbMpoolStat -
Total number of hash elements traversed during hash table lookups. -
st_hash_longest - -Variable in class com.sleepycat.db.DbMpoolStat -
The longest chain ever encountered in buffer hash table lookups. -
st_hash_max_wait - -Variable in class com.sleepycat.db.DbMpoolStat -
The maximum number of times any hash bucket lock was waited for by a thread of control. -
st_hash_nowait - -Variable in class com.sleepycat.db.DbMpoolStat -
The number of times that a thread of control was able to obtain a hash bucket lock without waiting. -
st_hash_searches - -Variable in class com.sleepycat.db.DbMpoolStat -
Total number of buffer hash table lookups. -
st_hash_wait - -Variable in class com.sleepycat.db.DbMpoolStat -
The number of times that a thread of control was forced to wait before obtaining a hash bucket lock. -
st_id - -Variable in class com.sleepycat.db.DbLockStat -
The last allocated locker ID. -
st_in_recovery - -Variable in class com.sleepycat.db.DbRepStat -
The site is currently in client recovery. -
st_last_ckp - -Variable in class com.sleepycat.db.DbTxnStat -
The LSN of the last checkpoint. -
st_last_txnid - -Variable in class com.sleepycat.db.DbTxnStat -
The last transaction ID allocated. -
st_lg_bsize - -Variable in class com.sleepycat.db.DbLogStat -
The in-memory log record cache size. -
st_lg_size - -Variable in class com.sleepycat.db.DbLogStat -
The current log file size. -
st_locktimeout - -Variable in class com.sleepycat.db.DbLockStat -
Lock timeout value. -
st_log_duplicated - -Variable in class com.sleepycat.db.DbRepStat -
The number of duplicate log records received. -
st_log_queued - -Variable in class com.sleepycat.db.DbRepStat -
The number of log records currently queued. -
st_log_queued_max - -Variable in class com.sleepycat.db.DbRepStat -
The maximum number of log records ever queued at once. -
st_log_queued_total - -Variable in class com.sleepycat.db.DbRepStat -
The total number of log records queued. -
st_log_records - -Variable in class com.sleepycat.db.DbRepStat -
The number of log records received and appended to the log. -
st_log_requested - -Variable in class com.sleepycat.db.DbRepStat -
The number of log records missed and requested. -
st_magic - -Variable in class com.sleepycat.db.DbLogStat -
The magic number that identifies a file as a log file. -
st_map - -Variable in class com.sleepycat.db.DbMpoolStat -
Requested pages mapped into the process' address space (there is no available information about whether or not this request caused disk I/O, although examining the application page fault rate may be helpful). -
st_map - -Variable in class com.sleepycat.db.DbMpoolFStat -
Requested pages mapped into the process' address space. -
st_master - -Variable in class com.sleepycat.db.DbRepStat -
The current master environment ID. -
st_master_changes - -Variable in class com.sleepycat.db.DbRepStat -
The number of times the master has changed. -
st_maxcommitperflush - -Variable in class com.sleepycat.db.DbLogStat -
The maximum number of commits contained in a single log flush. -
st_maxlockers - -Variable in class com.sleepycat.db.DbLockStat -
The maximum number of lockers possible. -
st_maxlocks - -Variable in class com.sleepycat.db.DbLockStat -
The maximum number of locks possible. -
st_maxnactive - -Variable in class com.sleepycat.db.DbTxnStat -
The maximum number of active transactions at any one time. -
st_maxnlockers - -Variable in class com.sleepycat.db.DbLockStat -
The maximum number of lockers at any one time. -
st_maxnlocks - -Variable in class com.sleepycat.db.DbLockStat -
The maximum number of locks at any one time. -
st_maxnobjects - -Variable in class com.sleepycat.db.DbLockStat -
The maximum number of lock objects at any one time. -
st_maxobjects - -Variable in class com.sleepycat.db.DbLockStat -
The maximum number of lock objects possible. -
st_maxtxns - -Variable in class com.sleepycat.db.DbTxnStat -
The maximum number of active transactions configured. -
st_mincommitperflush - -Variable in class com.sleepycat.db.DbLogStat -
The minimum number of commits contained in a single log flush that contained a commit. -
st_mode - -Variable in class com.sleepycat.db.DbLogStat -
The mode of any created log files. -
st_msgs_badgen - -Variable in class com.sleepycat.db.DbRepStat -
The number of messages received with a bad generation number. -
st_msgs_processed - -Variable in class com.sleepycat.db.DbRepStat -
The number of messages received and processed. -
st_msgs_recover - -Variable in class com.sleepycat.db.DbRepStat -
The number of messages ignored due to pending recovery. -
st_msgs_send_failures - -Variable in class com.sleepycat.db.DbRepStat -
The number of failed message sends. -
st_msgs_sent - -Variable in class com.sleepycat.db.DbRepStat -
The number of messages sent. -
st_naborts - -Variable in class com.sleepycat.db.DbTxnStat -
The number of transactions that have aborted. -
st_nactive - -Variable in class com.sleepycat.db.DbTxnStat -
The number of transactions that are currently active. -
st_nbegins - -Variable in class com.sleepycat.db.DbTxnStat -
The number of transactions that have begun. -
st_ncache - -Variable in class com.sleepycat.db.DbMpoolStat -
Number of caches. -
st_ncommits - -Variable in class com.sleepycat.db.DbTxnStat -
The number of transactions that have committed. -
st_nconflicts - -Variable in class com.sleepycat.db.DbLockStat -
The total number of locks not immediately available due to conflicts. -
st_ndeadlocks - -Variable in class com.sleepycat.db.DbLockStat -
The number of deadlocks. -
st_newsites - -Variable in class com.sleepycat.db.DbRepStat -
The number of new site messages received. -
st_next_lsn - -Variable in class com.sleepycat.db.DbRepStat -
In replication environments configured as masters, the next LSN expected. -
st_nlockers - -Variable in class com.sleepycat.db.DbLockStat -
The number of current lockers. -
st_nlocks - -Variable in class com.sleepycat.db.DbLockStat -
The number of current locks. -
st_nlocktimeouts - -Variable in class com.sleepycat.db.DbLockStat -
The number of lock requests that have timed out. -
st_nmodes - -Variable in class com.sleepycat.db.DbLockStat -
The number of lock modes. -
st_nnowaits - -Variable in class com.sleepycat.db.DbLockStat -
The total number of lock requests failing because Db.DB_LOCK_NOWAIT was set. -
st_nobjects - -Variable in class com.sleepycat.db.DbLockStat -
The number of current lock objects. -
st_nreleases - -Variable in class com.sleepycat.db.DbLockStat -
The total number of locks released. -
st_nrequests - -Variable in class com.sleepycat.db.DbLockStat -
The total number of locks requested. -
st_nrestores - -Variable in class com.sleepycat.db.DbTxnStat -
The number of transactions that have been restored. -
st_nsites - -Variable in class com.sleepycat.db.DbRepStat -
The number of sites believed to be in the replication group. -
st_nthrottles - -Variable in class com.sleepycat.db.DbRepStat -
Transmission limited. -
st_ntxntimeouts - -Variable in class com.sleepycat.db.DbLockStat -
The number of transactions that have timed out. -
st_outdated - -Variable in class com.sleepycat.db.DbRepStat -
The number of outdated conditions detected. -
st_page_clean - -Variable in class com.sleepycat.db.DbMpoolStat -
Clean pages currently in the cache. -
st_page_create - -Variable in class com.sleepycat.db.DbMpoolStat -
Pages created in the cache. -
st_page_create - -Variable in class com.sleepycat.db.DbMpoolFStat -
Pages created in the cache. -
st_page_dirty - -Variable in class com.sleepycat.db.DbMpoolStat -
Dirty pages currently in the cache. -
st_page_in - -Variable in class com.sleepycat.db.DbMpoolStat -
Pages read into the cache. -
st_page_in - -Variable in class com.sleepycat.db.DbMpoolFStat -
Pages read into the cache. -
st_page_out - -Variable in class com.sleepycat.db.DbMpoolStat -
Pages written from the cache to the backing file. -
st_page_out - -Variable in class com.sleepycat.db.DbMpoolFStat -
Pages written from the cache to the backing file. -
st_page_trickle - -Variable in class com.sleepycat.db.DbMpoolStat -
Dirty pages written using the DbEnv.memoryPoolTrickle method. -
st_pages - -Variable in class com.sleepycat.db.DbMpoolStat -
Pages in the cache. -
st_pagesize - -Variable in class com.sleepycat.db.DbMpoolFStat -
Page size in bytes. -
st_region_nowait - -Variable in class com.sleepycat.db.DbTxnStat -
The number of times that a thread of control was able to obtain the region lock without waiting. -
st_region_nowait - -Variable in class com.sleepycat.db.DbMpoolStat -
The number of times that a thread of control was able to obtain a region lock without waiting. -
st_region_nowait - -Variable in class com.sleepycat.db.DbLogStat -
The number of times that a thread of control was able to obtain the region lock without waiting. -
st_region_nowait - -Variable in class com.sleepycat.db.DbLockStat -
The number of times that a thread of control was able to obtain the region lock without waiting. -
st_region_wait - -Variable in class com.sleepycat.db.DbTxnStat -
The number of times that a thread of control was forced to wait before obtaining the region lock. -
st_region_wait - -Variable in class com.sleepycat.db.DbMpoolStat -
The number of times that a thread of control was forced to wait before obtaining a region lock. -
st_region_wait - -Variable in class com.sleepycat.db.DbLogStat -
The number of times that a thread of control was forced to wait before obtaining the region lock. -
st_region_wait - -Variable in class com.sleepycat.db.DbLockStat -
The number of times that a thread of control was forced to wait before obtaining the region lock. -
st_regsize - -Variable in class com.sleepycat.db.DbTxnStat -
The size of the region. -
st_regsize - -Variable in class com.sleepycat.db.DbMpoolStat -
Individual cache size. -
st_regsize - -Variable in class com.sleepycat.db.DbLogStat -
The size of the region. -
st_regsize - -Variable in class com.sleepycat.db.DbLockStat -
The size of the lock region. -
st_ro_evict - -Variable in class com.sleepycat.db.DbMpoolStat -
Clean pages forced from the cache. -
st_rw_evict - -Variable in class com.sleepycat.db.DbMpoolStat -
Dirty pages forced from the cache. -
st_scount - -Variable in class com.sleepycat.db.DbLogStat -
The number of times the log has been flushed to disk. -
st_status - -Variable in class com.sleepycat.db.DbRepStat -
The current replication mode. -
st_time_ckp - -Variable in class com.sleepycat.db.DbTxnStat -
The time the last completed checkpoint finished (as the number of seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) time function). -
st_txnarray - -Variable in class com.sleepycat.db.DbTxnStat +
skip(long) - +Method in class com.sleepycat.util.FastInputStream
  -
st_txns_applied - -Variable in class com.sleepycat.db.DbRepStat -
The number of transactions applied. -
st_txntimeout - -Variable in class com.sleepycat.db.DbLockStat -
Transaction timeout value. -
st_version - -Variable in class com.sleepycat.db.DbLogStat -
The version of the log file type. -
st_w_bytes - -Variable in class com.sleepycat.db.DbLogStat -
The number of bytes over and above st_w_mbytes written to this log. -
st_w_mbytes - -Variable in class com.sleepycat.db.DbLogStat -
The number of megabytes written to this log. -
st_waiting_lsn - -Variable in class com.sleepycat.db.DbRepStat -
The LSN of the first log record we have after missing log records being waited for, or 0 if no log records are currently missing. -
st_wc_bytes - -Variable in class com.sleepycat.db.DbLogStat -
The number of bytes over and above st_wc_mbytes written to this log since the last checkpoint. -
st_wc_mbytes - -Variable in class com.sleepycat.db.DbLogStat -
The number of megabytes written to this log since the last checkpoint. -
st_wcount - -Variable in class com.sleepycat.db.DbLogStat -
The number of times the log has been written to disk. -
st_wcount_fill - -Variable in class com.sleepycat.db.DbLogStat -
The number of times the log has been written to disk because the in-memory log record cache filled up. -
stat(int) - -Method in class com.sleepycat.db.Db -
The Db.stat method creates a statistical structure and fills it with statistics for the database. -
StoredClassCatalog - class com.sleepycat.bdb.StoredClassCatalog.
Java serialization catalog used for compact storage of database objects.
StoredClassCatalog(DbEnv, String, String, int) - -Constructor for class com.sleepycat.bdb.StoredClassCatalog -
Open a catalog database. -
StoredCollection - class com.sleepycat.bdb.collection.StoredCollection.
A abstract base class for all stored collections.
StoredCollections - class com.sleepycat.bdb.collection.StoredCollections.
This class consists exclusively of static methods that operate on or return - stored collections.
StoredContainer - class com.sleepycat.bdb.collection.StoredContainer.
A abstract base class for all stored collections and maps.
StoredEntrySet - class com.sleepycat.bdb.collection.StoredEntrySet.
The Set returned by Map.entrySet().
StoredIterator - class com.sleepycat.bdb.collection.StoredIterator.
The Iterator returned by all stored collections.
StoredKeySet - class com.sleepycat.bdb.collection.StoredKeySet.
The Set returned by Map.keySet() and which can also be constructed directly - if a Map is not needed.
StoredKeySet(DataIndex, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredKeySet -
Creates a key set view of a DataIndex. -
StoredKeySet(DataStore, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredKeySet -
Creates a key set view of a DataStore. -
StoredList - class com.sleepycat.bdb.collection.StoredList.
A List view of a DataStore or DataIndex.
StoredList(DataIndex, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredList -
Creates a list view of a DataIndex. -
StoredList(DataIndex, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredList -
Creates a list entity view of a DataIndex. -
StoredList(DataStore, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredList -
Creates a list view of a DataStore. -
StoredList(DataStore, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredList -
Creates a list entity view of a DataStore. -
StoredMap - class com.sleepycat.bdb.collection.StoredMap.
A Map view of a DataStore or DataIndex.
StoredMap(DataIndex, DataBinding, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredMap -
Creates a map view of a DataIndex. -
StoredMap(DataIndex, DataBinding, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredMap -
Creates a map entity view of a DataIndex. -
StoredMap(DataStore, DataBinding, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredMap -
Creates a map view of a DataStore. -
StoredMap(DataStore, DataBinding, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredMap -
Creates a map entity view of a DataStore. -
StoredSortedEntrySet - class com.sleepycat.bdb.collection.StoredSortedEntrySet.
The SortedSet returned by Map.entrySet().
StoredSortedKeySet - class com.sleepycat.bdb.collection.StoredSortedKeySet.
The SortedSet returned by Map.keySet() and which can also be constructed - directly if a Map is not needed.
StoredSortedKeySet(DataIndex, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedKeySet -
Creates a sorted key set view of a DataIndex. -
StoredSortedKeySet(DataStore, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedKeySet -
Creates a sorted key set view of a DataStore. -
StoredSortedMap - class com.sleepycat.bdb.collection.StoredSortedMap.
A SortedMap view of a DataStore or DataIndex.
StoredSortedMap(DataIndex, DataBinding, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedMap -
Creates a sorted map view of a DataIndex. -
StoredSortedMap(DataIndex, DataBinding, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedMap -
Creates a sorted map entity view of a DataIndex. -
StoredSortedMap(DataStore, DataBinding, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedMap -
Creates a sorted map view of a DataStore. -
StoredSortedMap(DataStore, DataBinding, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedMap -
Creates a sorted map entity view of a DataStore. -
StoredSortedValueSet - class com.sleepycat.bdb.collection.StoredSortedValueSet.
The SortedSet returned by Map.values() and which can also be constructed - directly if a Map is not needed.
StoredSortedValueSet(DataIndex, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedValueSet -
Creates a sorted value set view of a DataIndex. -
StoredSortedValueSet(DataIndex, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedValueSet -
Creates a sorted value set entity view of a DataIndex. -
StoredSortedValueSet(DataStore, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredSortedValueSet -
Creates a sorted value set entity view of a DataStore. -
StoredValueSet - class com.sleepycat.bdb.collection.StoredValueSet.
The Set returned by Map.values() and Map.duplicates(), and which can also be - constructed directly if a Map is not needed.
StoredValueSet(DataIndex, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredValueSet -
Creates a value set view of a DataIndex. -
StoredValueSet(DataIndex, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredValueSet -
Creates a value set entity view of a DataIndex. -
StoredValueSet(DataStore, DataBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredValueSet -
Creates a value set view of a DataStore. -
StoredValueSet(DataStore, EntityBinding, boolean) - -Constructor for class com.sleepycat.bdb.collection.StoredValueSet -
Creates a value set entity view of a DataStore. -
strerror(int) - -Static method in class com.sleepycat.db.DbEnv -
The DbEnv.strerror method returns an error message string corresponding to the error number error parameter. -
STRING - -Static variable in interface com.sleepycat.bdb.bind.DataType -
String data type. -
stringToBytes(String) - -Static method in class com.sleepycat.bdb.util.UtfOps +
startReplication(DatabaseEntry, boolean) - +Method in class com.sleepycat.db.Environment +
Configure the database environment as a client or master in a group + of replicated database environments. +
stringToBytes(String) - +Static method in class com.sleepycat.util.UtfOps
Converts strings to byte arrays. -
subList(int, int) - -Method in class com.sleepycat.bdb.collection.StoredList +
stringToEntry(String, DatabaseEntry) - +Static method in class com.sleepycat.bind.tuple.StringBinding +
Converts a simple String value into an entry buffer. +
subList(int, int) - +Method in class com.sleepycat.collections.StoredList
Returns a view of the portion of this list between the specified fromIndex, inclusive, and toIndex, exclusive. -
subMap(Object, boolean, Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedMap -
Returns a view of the portion of this sorted map whose elements are - strictly greater than fromKey and strictly less than toKey, - optionally including fromKey and toKey. -
subMap(Object, Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedMap +
subMap(Object, Object) - +Method in class com.sleepycat.collections.StoredSortedMap
Returns a view of the portion of this sorted map whose elements range from fromKey, inclusive, to toKey, exclusive. -
subSet(Object, boolean, Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet -
Returns a view of the portion of this sorted set whose elements are - strictly greater than fromValue and strictly less than toValue, - optionally including fromValue and toValue. -
subSet(Object, boolean, Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet -
Returns a view of the portion of this sorted set whose elements are +
subMap(Object, boolean, Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedMap +
Returns a view of the portion of this sorted map whose elements are strictly greater than fromKey and strictly less than toKey, optionally including fromKey and toKey. -
subSet(Object, boolean, Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet +
subSet(Object, Object) - +Method in class com.sleepycat.collections.StoredSortedEntrySet +
Returns a view of the portion of this sorted set whose elements range + from fromMapEntry, inclusive, to toMapEntry, exclusive. +
subSet(Object, boolean, Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedEntrySet
Returns a view of the portion of this sorted set whose elements are strictly greater than fromMapEntry and strictly less than toMapEntry, optionally including fromMapEntry and toMapEntry. -
subSet(Object, Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet -
Returns a view of the portion of this sorted set whose elements range - from fromValue, inclusive, to toValue, exclusive. -
subSet(Object, Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet +
subSet(Object, Object) - +Method in class com.sleepycat.collections.StoredSortedKeySet
Returns a view of the portion of this sorted set whose elements range from fromKey, inclusive, to toKey, exclusive. -
subSet(Object, Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet +
subSet(Object, boolean, Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedKeySet +
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey. +
subSet(Object, Object) - +Method in class com.sleepycat.collections.StoredSortedValueSet
Returns a view of the portion of this sorted set whose elements range - from fromMapEntry, inclusive, to toMapEntry, exclusive. -
subView(Object, boolean, Object, boolean, DataBinding) - -Method in class com.sleepycat.bdb.DataView -
Return a new value-set view for key range, optionally changing - the key binding. -
sync(int) - -Method in class com.sleepycat.db.Db -
The Db.sync method flushes any cached information to disk. + from fromValue, inclusive, to toValue, exclusive. +
subSet(Object, boolean, Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedValueSet +
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue and strictly less than toValue, + optionally including fromValue and toValue. +
sync() - +Method in class com.sleepycat.db.Database +
Flush any cached information to disk.

T

-
tailMap(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedMap -
Returns a view of the portion of this sorted map whose elements are - greater than or equal to fromKey. -
tailMap(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedMap -
Returns a view of the portion of this sorted map whose elements are - strictly greater than fromKey, optionally including fromKey. -
tailSet(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet -
Returns a view of the portion of this sorted set whose elements are - greater than or equal to fromValue. -
tailSet(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet -
Returns a view of the portion of this sorted set whose elements are - greater than or equal to fromKey. -
tailSet(Object) - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet -
Returns a view of the portion of this sorted set whose elements are - greater than or equal to fromMapEntry. -
tailSet(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedValueSet -
Returns a view of the portion of this sorted set whose elements are - strictly greater than fromValue, optionally including fromValue. -
tailSet(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedKeySet -
Returns a view of the portion of this sorted set whose elements are - strictly greater than fromKey, optionally including fromKey. -
tailSet(Object, boolean) - -Method in class com.sleepycat.bdb.collection.StoredSortedEntrySet -
Returns a view of the portion of this sorted set whose elements are - strictly greater than fromMapEntry, optionally including fromMapEntry. -
TimeUnits - class com.sleepycat.bdb.util.TimeUnits.
Common time unit definitions.
TimeUnits() - -Constructor for class com.sleepycat.bdb.util.TimeUnits -
  -
toArray() - -Method in class com.sleepycat.bdb.collection.StoredCollection -
Returns an array of all the elements in this collection. -
toArray(Object[]) - -Method in class com.sleepycat.bdb.collection.StoredCollection -
Returns an array of all the elements in this collection whose runtime - type is that of the specified array. -
toByteArray() - -Method in class com.sleepycat.bdb.util.FastOutputStream -
  -
toByteArray(byte[], int) - -Method in class com.sleepycat.bdb.util.FastOutputStream -
Copy the buffered data to the given array. -
toList() - -Method in class com.sleepycat.bdb.collection.StoredCollection -
Returns a copy of this collection as an ArrayList. -
toString() - -Method in class com.sleepycat.bdb.DataThang -
Converts the byte array of this thang to space-separated integers, - and suffixed by the record number if applicable. -
toString() - -Method in class com.sleepycat.bdb.DataStore -
Returns a printable string identifying the filename and datbase name - of the store. -
toString() - -Method in class com.sleepycat.bdb.DataIndex -
Returns a printable string identifying the file and database name - of the index. -
toString() - -Method in class com.sleepycat.bdb.DataDb -
Returns a debugging string containing the database name. -
toString() - -Method in class com.sleepycat.bdb.collection.StoredMap -
Converts the map to a string representation for debugging. -
toString() - -Method in class com.sleepycat.bdb.collection.StoredEntrySet -
  -
toString() - -Method in class com.sleepycat.bdb.collection.StoredCollection -
Converts the collection to a string representation for debugging. -
toString() - -Method in class com.sleepycat.bdb.collection.MapEntry -
Converts the entry to a string representation for debugging. -
toString() - -Method in class com.sleepycat.bdb.util.FastOutputStream -
  -
toString() - -Method in class com.sleepycat.db.DbTxnStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbTxnStat.Active -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbRepStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbQueueStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbMpoolStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbMpoolFStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbMemoryException -
Override of DbException.toString(): the extra verbage that - comes from DbEnv.strerror(ENOMEM) is not helpful. -
toString() - -Method in class com.sleepycat.db.DbLogStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbLockStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbHashStat -
Provide a string representation of all the fields contained - within this class. -
toString() - -Method in class com.sleepycat.db.DbException -
  -
toString() - -Method in class com.sleepycat.db.DbBtreeStat -
Provide a string representation of all the fields contained - within this class. -
toString(Dbt) - -Static method in class com.sleepycat.bdb.DataThang -
Converts the byte array of this thang to space-separated integers, - and suffixed by the record number if applicable. -
toString(String) - -Method in class com.sleepycat.bdb.util.FastOutputStream -
  -
TransactionRunner - class com.sleepycat.bdb.TransactionRunner.
Starts a transaction, calls TransactionWorker.doWork(), and handles - transaction retry and exceptions.
TransactionRunner(DbEnv) - -Constructor for class com.sleepycat.bdb.TransactionRunner +
TIMEOUT - +Static variable in class com.sleepycat.db.LockOperation +
Cause the specified locker to timeout immediately. +
Transaction - class com.sleepycat.db.Transaction.
The Transaction object is the handle for a transaction.
TransactionConfig - class com.sleepycat.db.TransactionConfig.
Specifies the attributes of a database environment transaction.
TransactionConfig() - +Constructor for class com.sleepycat.db.TransactionConfig +
An instance created using the default constructor is initialized + with the system's default settings. +
TransactionRunner - class com.sleepycat.collections.TransactionRunner.
Starts a transaction, calls TransactionWorker.doWork(), and handles + transaction retry and exceptions.
TransactionRunner(Environment) - +Constructor for class com.sleepycat.collections.TransactionRunner
Creates a transaction runner for a given Berkeley DB environment. -
TransactionRunner(DbEnv, int) - -Constructor for class com.sleepycat.bdb.TransactionRunner +
TransactionRunner(Environment, int, TransactionConfig) - +Constructor for class com.sleepycat.collections.TransactionRunner
Creates a transaction runner for a given Berkeley DB environment and with a given number of maximum retries. -
TransactionWorker - interface com.sleepycat.bdb.TransactionWorker.
The interface implemented to perform the work within a transaction.
truncate(DbTxn, int) - -Method in class com.sleepycat.db.Db -
The Db.truncate method empties the database, discarding all records it contains. -
TupleBinding - class com.sleepycat.bdb.bind.tuple.TupleBinding.
An abstract tuple binding for tuple keys or values.
TupleBinding(TupleFormat) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleBinding +
TransactionStats - class com.sleepycat.db.TransactionStats.
Transaction statistics for a database environment.
TransactionStats.Active - class com.sleepycat.db.TransactionStats.Active.
The Active class represents an active transaction.
TransactionWorker - interface com.sleepycat.collections.TransactionWorker.
The interface implemented to perform the work within a transaction.
TupleBinding - class com.sleepycat.bind.tuple.TupleBinding.
An abstract EntryBinding that treats a key or data entry as a + tuple; it includes predefined bindings for Java primitive types.
TupleBinding() - +Constructor for class com.sleepycat.bind.tuple.TupleBinding
Creates a tuple binding. -
TupleFormat - class com.sleepycat.bdb.bind.tuple.TupleFormat.
The format for tuple data.
TupleFormat() - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleFormat -
Creates a tuple format. -
TupleInput - class com.sleepycat.bdb.bind.tuple.TupleInput.
Used by tuple bindings to read tuple data.
TupleInput(byte[]) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleInput +
TupleInput - class com.sleepycat.bind.tuple.TupleInput.
An InputStream with DataInput-like methods for + reading tuple fields.
TupleInput(byte[]) - +Constructor for class com.sleepycat.bind.tuple.TupleInput
Creates a tuple input object for reading a byte array of tuple data. -
TupleInput(byte[], int, int) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleInput +
TupleInput(byte[], int, int) - +Constructor for class com.sleepycat.bind.tuple.TupleInput
Creates a tuple input object for reading a byte array of tuple data at a given offset for a given length. -
TupleInput(TupleOutput) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleInput +
TupleInput(TupleOutput) - +Constructor for class com.sleepycat.bind.tuple.TupleInput
Creates a tuple input object from the data contained in a tuple output object. -
TupleInputBinding - class com.sleepycat.bdb.bind.tuple.TupleInputBinding.
A concrete tuple binding for keys or values which are TupleInput - objects.
TupleInputBinding(TupleFormat) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleInputBinding +
TupleInputBinding - class com.sleepycat.bind.tuple.TupleInputBinding.
A concrete EntryBinding that uses the TupleInput + object as the key or data object.
TupleInputBinding() - +Constructor for class com.sleepycat.bind.tuple.TupleInputBinding
Creates a tuple input binding. -
TupleMarshalledBinding - class com.sleepycat.bdb.bind.tuple.TupleMarshalledBinding.
A concrete key or value binding that uses the MarshalledTupleData - interface.
TupleMarshalledBinding(TupleFormat, Class) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleMarshalledBinding +
TupleMarshalledBinding - class com.sleepycat.bind.tuple.TupleMarshalledBinding.
A concrete TupleBinding that delegates to the + MarshalledTupleEntry interface of the data or key object.
TupleMarshalledBinding(Class) - +Constructor for class com.sleepycat.bind.tuple.TupleMarshalledBinding
Creates a tuple marshalled binding object. -
TupleOutput - class com.sleepycat.bdb.bind.tuple.TupleOutput.
Used by tuple bindings to write tuple data.
TupleOutput() - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleOutput +
TupleOutput - class com.sleepycat.bind.tuple.TupleOutput.
An OutputStream with DataOutput-like methods for + writing tuple fields.
TupleOutput() - +Constructor for class com.sleepycat.bind.tuple.TupleOutput
Creates a tuple output object for writing a byte array of tuple data. -
TupleOutput(byte[]) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleOutput +
TupleOutput(byte[]) - +Constructor for class com.sleepycat.bind.tuple.TupleOutput
Creates a tuple output object for writing a byte array of tuple data, using a given buffer. -
TupleSerialBinding - class com.sleepycat.bdb.bind.serial.TupleSerialBinding.
A abstract entity binding that uses a tuple key and a serial value.
TupleSerialBinding(TupleFormat, SerialFormat) - -Constructor for class com.sleepycat.bdb.bind.serial.TupleSerialBinding +
TupleSerialBinding - class com.sleepycat.bind.serial.TupleSerialBinding.
An abstract EntityBinding that treats an entity's key entry as + a tuple and its data entry as a serialized object.
TupleSerialBinding(ClassCatalog, Class) - +Constructor for class com.sleepycat.bind.serial.TupleSerialBinding +
Creates a tuple-serial entity binding. +
TupleSerialBinding(SerialBinding) - +Constructor for class com.sleepycat.bind.serial.TupleSerialBinding
Creates a tuple-serial entity binding. -
TupleSerialDbFactory - class com.sleepycat.bdb.factory.TupleSerialDbFactory.
Creates stored collections having tuple keys and serialized entity values.
TupleSerialDbFactory(ClassCatalog) - -Constructor for class com.sleepycat.bdb.factory.TupleSerialDbFactory +
TupleSerialFactory - class com.sleepycat.collections.TupleSerialFactory.
Creates stored collections having tuple keys and serialized entity values.
TupleSerialFactory(ClassCatalog) - +Constructor for class com.sleepycat.collections.TupleSerialFactory
Creates a tuple-serial factory for given environment and class catalog. -
TupleSerialKeyExtractor - class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor.
A abstract key extractor that uses a tuple key and a serial value.
TupleSerialKeyExtractor(TupleFormat, SerialFormat, TupleFormat) - -Constructor for class com.sleepycat.bdb.bind.serial.TupleSerialKeyExtractor -
Creates a tuple-serial key extractor. -
TupleSerialMarshalledBinding - class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledBinding.
A concrete entity binding that uses the MarshalledTupleKeyEntity - interface.
TupleSerialMarshalledBinding(TupleFormat, SerialFormat) - -Constructor for class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledBinding +
TupleSerialKeyCreator - class com.sleepycat.bind.serial.TupleSerialKeyCreator.
A abstract key creator that uses a tuple key and a serial data entry.
TupleSerialKeyCreator(ClassCatalog, Class) - +Constructor for class com.sleepycat.bind.serial.TupleSerialKeyCreator +
Creates a tuple-serial key creator. +
TupleSerialKeyCreator(SerialBinding) - +Constructor for class com.sleepycat.bind.serial.TupleSerialKeyCreator +
Creates a tuple-serial key creator. +
TupleSerialMarshalledBinding - class com.sleepycat.bind.serial.TupleSerialMarshalledBinding.
A concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class.
TupleSerialMarshalledBinding(ClassCatalog, Class) - +Constructor for class com.sleepycat.bind.serial.TupleSerialMarshalledBinding
Creates a tuple-serial marshalled binding object. -
TupleSerialMarshalledKeyExtractor - class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledKeyExtractor.
A concrete key extractor that works in conjunction with a TupleSerialMarshalledBinding.
TupleSerialMarshalledKeyExtractor(TupleSerialMarshalledBinding, TupleFormat, String, boolean, boolean) - -Constructor for class com.sleepycat.bdb.bind.serial.TupleSerialMarshalledKeyExtractor -
Creates a tuple-serial marshalled key extractor. -
TupleTupleBinding - class com.sleepycat.bdb.bind.tuple.TupleTupleBinding.
An abstract entity binding that uses a tuple key and a tuple value.
TupleTupleBinding(TupleFormat, TupleFormat) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleTupleBinding +
TupleSerialMarshalledBinding(SerialBinding) - +Constructor for class com.sleepycat.bind.serial.TupleSerialMarshalledBinding +
Creates a tuple-serial marshalled binding object. +
TupleSerialMarshalledKeyCreator - class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator.
A concrete key creator that works in conjunction with a TupleSerialMarshalledBinding.
TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding, String) - +Constructor for class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator +
Creates a tuple-serial marshalled key creator. +
TupleTupleBinding - class com.sleepycat.bind.tuple.TupleTupleBinding.
An abstract EntityBinding that treats an entity's key entry and + data entry as tuples.
TupleTupleBinding() - +Constructor for class com.sleepycat.bind.tuple.TupleTupleBinding
Creates a tuple-tuple entity binding. -
TupleTupleKeyExtractor - class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor.
An abstract key extractor that uses a tuple key and a tuple value.
TupleTupleKeyExtractor(TupleFormat, TupleFormat, TupleFormat) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleTupleKeyExtractor -
Creates a tuple-tuple key extractor. -
TupleTupleMarshalledBinding - class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledBinding.
A concrete entity binding that uses the MarshalledTupleData and the - MarshalledTupleKeyEntity interfaces.
TupleTupleMarshalledBinding(TupleFormat, TupleFormat, Class) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledBinding +
TupleTupleKeyCreator - class com.sleepycat.bind.tuple.TupleTupleKeyCreator.
An abstract key creator that uses a tuple key and a tuple data entry.
TupleTupleKeyCreator() - +Constructor for class com.sleepycat.bind.tuple.TupleTupleKeyCreator +
Creates a tuple-tuple key creator. +
TupleTupleMarshalledBinding - class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding.
A concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class.
TupleTupleMarshalledBinding(Class) - +Constructor for class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding
Creates a tuple-tuple marshalled binding object. -
TupleTupleMarshalledKeyExtractor - class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledKeyExtractor.
A concrete key extractor that works in conjunction with a TupleTupleMarshalledBinding.
TupleTupleMarshalledKeyExtractor(TupleTupleMarshalledBinding, TupleFormat, String, boolean, boolean) - -Constructor for class com.sleepycat.bdb.bind.tuple.TupleTupleMarshalledKeyExtractor -
Creates a tuple-tuple marshalled key extractor. -
txn - -Variable in class com.sleepycat.db.DbPreplist -
The transaction handle for the transaction. -
txn_begin(DbTxn, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.txnBegin(DbTxn,int) -
txn_checkpoint(int, int, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.txnCheckpoint(int,int,int) -
txn_recover(int, int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.txnRecover(int,int) -
txn_stat(int) - -Method in class com.sleepycat.db.DbEnv -
Deprecated. As of Berkeley DB 4.2, replaced by DbEnv.txnStat(int) -
txnBegin(DbTxn, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.txnBegin method creates a new transaction in the environment and returns a DbTxn that uniquely identifies it. -
txnCheckpoint(int, int, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.txnCheckpoint method flushes the underlying memory pool, writes a checkpoint record to the log, and then flushes the log. -
txnid - -Variable in class com.sleepycat.db.DbTxnStat.Active -
The transaction ID of the transaction. -
txnRecover(int, int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.txnRecover method returns a list of prepared but not yet resolved transactions. -
txnStat(int) - -Method in class com.sleepycat.db.DbEnv -
The DbEnv.txnStat method returns the transaction subsystem statistics. +
TupleTupleMarshalledKeyCreator - class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator.
A concrete key creator that works in conjunction with a TupleTupleMarshalledBinding.
TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding, String) - +Constructor for class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator +
Creates a tuple-tuple marshalled key creator. +
tailMap(Object) - +Method in class com.sleepycat.collections.StoredSortedMap +
Returns a view of the portion of this sorted map whose elements are + greater than or equal to fromKey. +
tailMap(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedMap +
Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey, optionally including fromKey. +
tailSet(Object) - +Method in class com.sleepycat.collections.StoredSortedEntrySet +
Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromMapEntry. +
tailSet(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedEntrySet +
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry, optionally including fromMapEntry. +
tailSet(Object) - +Method in class com.sleepycat.collections.StoredSortedKeySet +
Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromKey. +
tailSet(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedKeySet +
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey, optionally including fromKey. +
tailSet(Object) - +Method in class com.sleepycat.collections.StoredSortedValueSet +
Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromValue. +
tailSet(Object, boolean) - +Method in class com.sleepycat.collections.StoredSortedValueSet +
Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue, optionally including fromValue. +
toArray() - +Method in class com.sleepycat.collections.StoredCollection +
Returns an array of all the elements in this collection. +
toArray(Object[]) - +Method in class com.sleepycat.collections.StoredCollection +
Returns an array of all the elements in this collection whose runtime + type is that of the specified array. +
toByteArray() - +Method in class com.sleepycat.util.FastOutputStream +
  +
toByteArray(byte[], int) - +Method in class com.sleepycat.util.FastOutputStream +
Copy the buffered data to the given array. +
toList() - +Method in class com.sleepycat.collections.StoredCollection +
Returns a copy of this collection as an ArrayList. +
toString() - +Method in class com.sleepycat.collections.MapEntryParameter +
Converts the entry to a string representation for debugging. +
toString() - +Method in class com.sleepycat.collections.StoredCollection +
Converts the collection to a string representation for debugging. +
toString() - +Method in class com.sleepycat.collections.StoredEntrySet +
  +
toString() - +Method in class com.sleepycat.collections.StoredMap +
Converts the map to a string representation for debugging. +
toString() - +Method in class com.sleepycat.db.BtreeStats +
For convenience, the BtreeStats class has a toString method + that lists all the data fields. +
toString() - +Method in class com.sleepycat.db.CacheFileStats +
For convenience, the CacheFileStats class has a toString method + that lists all the data fields. +
toString() - +Method in class com.sleepycat.db.CacheStats +
For convenience, the CacheStats class has a toString method that + lists all the data fields. +
toString() - +Method in class com.sleepycat.db.HashStats +
For convenience, the HashStats class has a toString method + that lists all the data fields. +
toString() - +Method in class com.sleepycat.db.LockStats +
For convenience, the LockStats class has a toString method + that lists all the data fields. +
toString() - +Method in class com.sleepycat.db.LogStats +
For convenience, the LogStats class has a toString method that lists + all the data fields. +
toString() - +Method in class com.sleepycat.db.QueueStats +
For convenience, the QueueStats class has a toString method + that lists all the data fields. +
toString() - +Method in class com.sleepycat.db.ReplicationStats +
For convenience, the ReplicationStats class has a toString method + that lists all the data fields. +
toString() - +Method in class com.sleepycat.db.TransactionStats +
For convenience, the TransactionStats class has a toString method + that lists all the data fields. +
toString() - +Method in class com.sleepycat.util.FastOutputStream +
  +
toString(String) - +Method in class com.sleepycat.util.FastOutputStream +
  +
trickleCacheWrite(int) - +Method in class com.sleepycat.db.Environment +
Ensure that a specified percent of the pages in the shared memory + pool are clean, by writing dirty pages to their backing files. +
truncate(Transaction, boolean) - +Method in class com.sleepycat.db.Database +
Empty the database, discarding all records it contains.

U

-
unmarshalData(TupleInput) - -Method in interface com.sleepycat.bdb.bind.tuple.MarshalledTupleData -
Construct the key or value object from the key or value tuple data. -
unmarshalPrimaryKey(TupleInput) - -Method in interface com.sleepycat.bdb.bind.tuple.MarshalledTupleKeyEntity +
UNKNOWN - +Static variable in class com.sleepycat.db.DatabaseType +
The database type is unknown. +
UtfOps - class com.sleepycat.util.UtfOps.
UTF operations with more flexibility than is provided by DataInput and + DataOutput.
UtfOps() - +Constructor for class com.sleepycat.util.UtfOps +
  +
unmarshalEntry(TupleInput) - +Method in interface com.sleepycat.bind.tuple.MarshalledTupleEntry +
Construct the key or data object from the key or data tuple entry. +
unmarshalPrimaryKey(TupleInput) - +Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity
Completes construction of the entity by setting its primary key from the stored primary key. -
unwrap(Exception) - -Static method in class com.sleepycat.bdb.util.ExceptionUnwrapper +
unwrap(Exception) - +Static method in class com.sleepycat.util.ExceptionUnwrapper
Unwraps an Exception and returns the underlying Exception, or throws an Error if the underlying Throwable is an Error. -
unwrapAny(Throwable) - -Static method in class com.sleepycat.bdb.util.ExceptionUnwrapper +
unwrapAny(Throwable) - +Static method in class com.sleepycat.util.ExceptionUnwrapper
Unwraps an Exception and returns the underlying Throwable. -
upgrade(String, int) - -Method in class com.sleepycat.db.Db -
The Db.upgrade method upgrades all of the databases included in the file file, if necessary. -
UtfOps - class com.sleepycat.bdb.util.UtfOps.
UTF operations with more flexibility than is provided by DataInput and - DataOutput.
UtfOps() - -Constructor for class com.sleepycat.bdb.util.UtfOps -
  +
upgrade(String, DatabaseConfig) - +Static method in class com.sleepycat.db.Database +
Upgrade all of the databases included in the specified file. +
upgradeFeedback(Database, int) - +Method in interface com.sleepycat.db.FeedbackHandler +
A function called with progress information when the database is being upgraded.

V

-
values() - -Method in class com.sleepycat.bdb.collection.StoredMap +
VERY_HIGH - +Static variable in class com.sleepycat.db.CacheFilePriority +
The highest priority: pages are the least likely to be discarded. +
VERY_LOW - +Static variable in class com.sleepycat.db.CacheFilePriority +
The lowest priority: pages are the most likely to be discarded. +
VerifyConfig - class com.sleepycat.db.VerifyConfig.
Specifies the attributes of a verification operation.
VerifyConfig() - +Constructor for class com.sleepycat.db.VerifyConfig +
An instance created using the default constructor is initialized + with the system's default settings. +
values() - +Method in class com.sleepycat.collections.StoredMap
Returns a collection view of the values contained in this map. -
valueSetView() - -Method in class com.sleepycat.bdb.DataView -
Return a new value-set view derived from this view by setting the - key binding to null. -
valueSetView(Object) - -Method in class com.sleepycat.bdb.DataView -
Return a new value-set view for single key range. -
verify(String, String, OutputStream, int) - -Method in class com.sleepycat.db.Db -
The Db.verify method verifies the integrity of all databases in the file specified by the file parameter, and optionally outputs the databases' key/data pairs to the file stream specified by the outfile parameter. +
verify(String, String, PrintStream, VerifyConfig) - +Method in class com.sleepycat.db.Database +
Return if all of the databases in a file are uncorrupted. +
verifyFeedback(Database, int) - +Method in interface com.sleepycat.db.FeedbackHandler +
A function called with progress information when the database is being verified.

W

-
write(byte[]) - -Method in class com.sleepycat.bdb.util.FastOutputStream -
  -
write(byte[], int, int) - -Method in class com.sleepycat.bdb.util.FastOutputStream -
  -
write(int) - -Method in class com.sleepycat.bdb.util.FastOutputStream -
  -
writeBoolean(boolean) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
WRITE - +Static variable in class com.sleepycat.db.LockRequestMode +
Write (exclusive). +
WRITECURSOR - +Static variable in class com.sleepycat.db.CursorConfig +
A convenience instance to specify the Concurrent Data Store environment + cursor will be used to update the database. +
write(int) - +Method in class com.sleepycat.util.FastOutputStream +
  +
write(byte[]) - +Method in class com.sleepycat.util.FastOutputStream +
  +
write(byte[], int, int) - +Method in class com.sleepycat.util.FastOutputStream +
  +
writeBoolean(boolean) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes a boolean (one byte) unsigned value to the buffer, writing one if the value is true and zero if it is false. -
writeByte(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeByte(int) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an signed byte (one byte) value to the buffer. -
writeBytes(char[]) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeBytes(String) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes the specified bytes to the buffer, converting each character to an unsigned byte value. -
writeBytes(String) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeBytes(char[]) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes the specified bytes to the buffer, converting each character to an unsigned byte value. -
writeChar(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeChar(int) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes a char (two byte) unsigned value to the buffer. -
writeChars(char[]) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeChars(String) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes the specified characters to the buffer, converting each character to a two byte unsigned value. -
writeChars(String) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeChars(char[]) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes the specified characters to the buffer, converting each character to a two byte unsigned value. -
writeDouble(double) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeDouble(double) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an signed double (eight byte) value to the buffer. -
writeFloat(float) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeFast(int) - +Method in class com.sleepycat.util.FastOutputStream +
Equivalent to write(int) but does not throw + IOException. +
writeFast(byte[]) - +Method in class com.sleepycat.util.FastOutputStream +
Equivalent to write(byte[]) but does not throw + IOException. +
writeFast(byte[], int, int) - +Method in class com.sleepycat.util.FastOutputStream +
Equivalent to write(byte[],int,int) but does not throw + IOException. +
writeFloat(float) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an signed float (four byte) value to the buffer. -
writeInt(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeInt(int) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an signed int (four byte) value to the buffer. -
writeLong(long) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeLong(long) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an signed long (eight byte) value to the buffer. -
writeShort(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeShort(int) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an signed short (two byte) value to the buffer. -
writeString(char[]) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput -
Writes the specified characters to the buffer, converting each character - to UTF format. -
writeString(String) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeString(String) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes the specified characters to the buffer, converting each character to UTF format, and adding a null terminator byte. -
writeTo(OutputStream) - -Method in class com.sleepycat.bdb.util.FastOutputStream +
writeString(char[]) - +Method in class com.sleepycat.bind.tuple.TupleOutput +
Writes the specified characters to the buffer, converting each character + to UTF format. +
writeTo(OutputStream) - +Method in class com.sleepycat.util.FastOutputStream
  -
writeUnsignedByte(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeUnsignedByte(int) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an unsigned byte (one byte) value to the buffer. -
writeUnsignedInt(long) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeUnsignedInt(long) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an unsigned int (four byte) value to the buffer. -
writeUnsignedShort(int) - -Method in class com.sleepycat.bdb.bind.tuple.TupleOutput +
writeUnsignedShort(int) - +Method in class com.sleepycat.bind.tuple.TupleOutput
Writes an unsigned short (two byte) value to the buffer.

-

-X

+

+Y

-
xa_status - -Variable in class com.sleepycat.db.DbTxnStat.Active -
If the transaction is an XA transaction, the status of the transaction, otherwise 0. -
xid - -Variable in class com.sleepycat.db.DbTxnStat.Active -
If the transaction is an XA transaction, the transaction's XA ID. +
YOUNGEST - +Static variable in class com.sleepycat.db.LockDetectMode +
Reject the lock request for the locker ID with the youngest lock.

-A B C D E F G H I J K L M N O P Q R S T U V W X - +A B C D E F G H I J K L M N O P Q R S T U V W Y + + - + +

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/delete.html b/db/docs/ref/am/delete.html index 8a8d2ed52..e29cffdff 100644 --- a/db/docs/ref/am/delete.html +++ b/db/docs/ref/am/delete.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Deleting records - + @@ -24,6 +24,6 @@ it from the database.

individual duplicate records, you must use a Berkeley DB cursor interface.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/get.html b/db/docs/ref/am/get.html index f541740fd..030ce2c59 100644 --- a/db/docs/ref/am/get.html +++ b/db/docs/ref/am/get.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Retrieving records - + @@ -20,13 +20,13 @@ DB->get takes a key and returns the associated data from the database.

There are a few flags that you can set to customize retrieval:

-

-

DB_GET_BOTH
Search for a matching key and data item, that is, only return success +
+
DB_GET_BOTH
Search for a matching key and data item, that is, only return success if both the key and the data items match those stored in the database. -

DB_RMW
Read-modify-write: acquire write locks instead of read locks during +
DB_RMW
Read-modify-write: acquire write locks instead of read locks during retrieval. This can enhance performance in threaded applications by reducing the chance of deadlock. -

DB_SET_RECNO
If the underlying database is a Btree, and was configured so that it +
DB_SET_RECNO
If the underlying database is a Btree, and was configured so that it is possible to search it by logical record number, retrieve a specific record.
@@ -35,6 +35,6 @@ record. set.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/join.html b/db/docs/ref/am/join.html index aa1157e1c..7805ac7b2 100644 --- a/db/docs/ref/am/join.html +++ b/db/docs/ref/am/join.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Equality Join - + @@ -70,11 +70,11 @@ to the DB->set_flags method.

What the DB->join method does is review a list of secondary keys, and, when it finds a data item that appears as a data item for all of the -secondary keys, it uses that data items as a lookup into the primary +secondary keys, it uses that data item as a lookup into the primary database, and returns the associated data item.

-

If there were a another secondary index that had as its key the -cost of the fruit, a similar lookup could be done on stores -where inexpensive fruit could be purchased:

+

If there were another secondary index that had as its key the cost +of the fruit, a similar lookup could be done on stores where inexpensive +fruit could be purchased:

@@ -97,16 +97,16 @@ would be used as the key for retrieval from the primary index, and would then return the store where expensive, red fruit could be purchased.

Example

Consider the following three databases:

-

-

personnel

    +
    +
    personnel

    • key = SSN
    • data = record containing name, address, phone number, job title
    -

    lastname

      +
      lastname

      • key = lastname
      • data = SSN
      -

      jobs

        +
        jobs

        • key = job title
        • data = SSN
        @@ -186,6 +186,6 @@ from the join method. This code then loops over the join cursor getting the personnel records of each one until there are no more.

Secondary key:Secondary data:
expensive blueberry

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/open.html b/db/docs/ref/am/open.html index 079f7cc50..f49ee98ed 100644 --- a/db/docs/ref/am/open.html +++ b/db/docs/ref/am/open.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Database open - + @@ -17,31 +17,31 @@

Database open

The DB->open method opens a database, and takes five arguments:

-

-

file
The name of the file to be opened. -

database
An optional database name. -

type
The type of database to open. This value will be one of the four access +
+
file
The name of the file to be opened. +
database
An optional database name. +
type
The type of database to open. This value will be one of the four access methods Berkeley DB supports: DB_BTREE, DB_HASH, DB_QUEUE or DB_RECNO, or the special value DB_UNKNOWN, which allows you to open an existing file without knowing its type. -

mode
The permissions to give to any created file. +
mode
The permissions to give to any created file.

There are a few flags that you can set to customize open:

-

-

DB_CREATE
Create the underlying database and any necessary physical files. -

DB_NOMMAP
Do not map this database into process memory. -

DB_RDONLY
Treat the data base as read-only. -

DB_THREAD
The returned handle is free-threaded, that is, it can be used +
+
DB_CREATE
Create the underlying database and any necessary physical files. +
DB_NOMMAP
Do not map this database into process memory. +
DB_RDONLY
Treat the data base as read-only. +
DB_THREAD
The returned handle is free-threaded, that is, it can be used simultaneously by multiple threads within the process. -

DB_TRUNCATE
Physically truncate the underlying database file, discarding all +
DB_TRUNCATE
Physically truncate the underlying database file, discarding all databases it contained. Underlying filesystem primitives are used to implement this flag. For this reason it is only applicable to the physical file and cannot be used to discard individual databases from within physical files. -

DB_UPGRADE
Upgrade the database format as necessary. +
DB_UPGRADE
Upgrade the database format as necessary.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/opensub.html b/db/docs/ref/am/opensub.html index bf7aae61d..3be91dd0a 100644 --- a/db/docs/ref/am/opensub.html +++ b/db/docs/ref/am/opensub.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Opening multiple databases in a single file - + @@ -21,7 +21,7 @@ reasonably small, in order to avoid creating a large number of underlying files, or when it is desirable to include secondary index databases in the same file as the primary index database. Multiple databases are an administrative convenience and using them is unlikely -to effect database performance. To open or create a file that will +to affect database performance. To open or create a file that will include more than a single database, specify a database name when calling the DB->open method.

Physical files do not need to be comprised of a single type of database, @@ -34,48 +34,48 @@ standard Berkeley DB file size and disk space limitations.

not initially created using a database name, that is, the file must initially be specified as capable of containing multiple databases for a second database to be created in it.

-

It is not an error to open a file that contains multiple databases without -specifying a database name, however the database type should be specified -as DB_UNKNOWN and the database must be opened read-only. The handle that -is returned from such a call is a handle on a database whose key values -are the names of the databases stored in the database file and whose data -values are opaque objects. No keys or data values may be modified or -stored using this database handle.

-

Storing multiple databases in a single file is identical to storing each -database in a separate file with the exception of some configuration -information and the likely need for locking and a shared underlying -memory pool.

-

There are four types of configuration information which must be specified -consistently for all databases in a file, rather than differing on a -per-database basis. They are: byte order, checksum and encryption -behavior, and page size. When creating additional databases in a file, -any of these configuration values specified must be consistent with the -existing databases in the file or an error will be returned.

+

It is not an error to open a file that contains multiple databases +without specifying a database name, however the database type should be +specified as DB_UNKNOWN and the database must be opened read-only. The +handle that is returned from such a call is a handle on a database whose +key values are the names of the databases stored in the database file +and whose data values are opaque objects. No keys or data values may be +modified or stored using this database handle.

+

The main difference when storing multiple databases in a single file +rather than in separate files is that if any of the databases +in a file is opened for updates, all of the databases in the file must +share a memory pool. In other words, they must be opened in the same +environment. In addition, there are some constraints on configuration +information that apply to databases in the same file.

-An additional difference is how locking and the underlying memory pool -services must to be configured. As an example, consider two databases -instantiated in two different physical files. If access to each -separate database is single-threaded, there is no reason to perform any -locking of any kind, and the two databases may be read and written -simultaneously. Further, there would be no requirement to create a -shared database environment in which to open the databases.

-

Because multiple databases in a file exist in a single physical file, -opening two databases in the same file requires locking be enabled -(unless access to the databases is known to be single-threaded, that -is, only one of the databases is ever read or written at a time). -As the locks for the two databases can only conflict during page -allocation, this additional locking is unlikely to effect performance.

+If databases are in separate files, and access to each separate database +is single-threaded, there is no reason to perform any locking of any +kind, and the two databases may be read and written simultaneously. +Further, there would be no requirement to create a shared database +environment in which to open the databases.

+

However, since multiple databases in a file exist in a single physical +file, opening two databases in the same file simultaneously requires +locking be enabled unless all of the handles are read-only. As the +locks for the two databases can only conflict during page allocation, +this additional locking is unlikely to affect performance.

Also, because multiple databases in a file exist in a single physical file, opening two databases in the same file requires the databases share an underlying memory pool so that per-physical-file information common between the two databases is updated correctly.

-

In summary, programmers writing applications that open multiple databases -in a single file will almost certainly need to create a shared database -environment in the application as well. For more information on -database environments, see Database +

In summary, programmers writing applications that open multiple +databases in a single file will almost certainly need to create a shared +database environment in the application as well. For more information +on database environments, see Database environment introduction.

+

In addition, there are four types of configuration information which +must be specified consistently for all databases in a file, rather than +differing on a per-database basis. They are: byte order, checksum and +encryption behavior, and page size. When creating additional databases +in a file, any of these configuration values specified must be +consistent with the existing databases in the file or an error will be +returned.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/ops.html b/db/docs/ref/am/ops.html index dc1b2c431..5e9af0ca0 100644 --- a/db/docs/ref/am/ops.html +++ b/db/docs/ref/am/ops.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Access method operations - + @@ -29,60 +29,62 @@ version upgrade (DB->upgrade), veri (DB->verify), flushing to a backing file (DB->sync), and association of secondary indices (DB->associate). Database handles are eventually closed using DB->close.

- +
- + - - + - + - + + + + + + - - - - - - + + - - - - + + + + + - - - - - + + + + + +
Databases and Related MethodsDescription
Database OperationsDescription
db_createCreate a database handle
DB->associateAssociate a secondary index
DB->closeClose a database
DB->cursorCreate a cursor handle
DB->delDelete items from a database
DB->errError message with error string
DB->errxError message
DB->fdReturn a file descriptor from a database
DB->getGet items from a database
DB->get, DB->pgetGet items from a database
DB->get_byteswappedReturn if the underlying database is in host order
DB->get_envReturn a handle for the underlying database environment
DB->get_envReturn database environment handle
DB->get_typeReturn the database type
DB->joinPerform a database join on cursors
DB->key_rangeReturn estimate of key location
DB->openOpen a database
DB->pgetGet items from a database
DB->putStore items into a database
DB->removeRemove a database
DB->renameRename a database
DB->stat, DB->stat_printDatabase statistics
DB->syncFlush a database to stable storage
DB->truncateEmpty a database
DB->upgradeUpgrade a database
DB->verifyVerify/salvage a database
Database Configuration
DB->set_allocSet local space allocation functions
DB->set_append_recnoSet record append callback
DB->set_bt_compareSet a Btree comparison function
DB->set_bt_minkeySet the minimum number of keys per Btree page
DB->set_bt_prefixSet a Btree prefix comparison function
DB->set_cachesizeSet the database cache size
DB->set_dup_compareSet a duplicate comparison function
DB->set_encryptSet the database cryptographic key
DB->set_errcallSet error message callback
DB->set_errfileSet error message FILE
DB->set_errcall, DB->set_msgcallSet error and informational message callback
DB->set_errfile, DB->set_msgfileSet error and informational message FILE
DB->set_errpfxSet error message prefix
DB->set_feedbackSet feedback callback
DB->set_flagsGeneral database configuration
DB->set_h_ffactorSet the Hash table density
DB->set_h_hashSet a hashing function
DB->set_h_nelemSet the Hash table size
DB->set_lorderSet the database byte order
DB->set_pagesizeSet the underlying database page size
DB->set_paniccallSet panic callback
DB->set_q_extentsizeSet Queue database extent size
Btree/Recno Configuration
DB->set_append_recnoSet record append callback
DB->set_bt_compareSet a Btree comparison function
DB->set_bt_minkeySet the minimum number of keys per Btree page
DB->set_bt_prefixSet a Btree prefix comparison function
DB->set_re_delimSet the variable-length record delimiter
DB->set_re_lenSet the fixed-length record length
DB->set_re_padSet the fixed-length record pad byte
DB->set_re_sourceSet the backing Recno text file
DB->statReturn database statistics
DB->syncFlush a database to stable storage
DB->truncateEmpty a database
DB->upgradeUpgrade a database
DB->verifyVerify/salvage a database
Hash Configuration
DB->set_h_ffactorSet the Hash table density
DB->set_h_hashSet a hashing function
DB->set_h_nelemSet the Hash table size
Queue Configuration
DB->set_q_extentsizeSet Queue database extent size

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/put.html b/db/docs/ref/am/put.html index d9156bd98..f56346b66 100644 --- a/db/docs/ref/am/put.html +++ b/db/docs/ref/am/put.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Storing records - + @@ -20,11 +20,11 @@ DB->put takes a key and stores the associated data into the database.

There are a few flags that you can set to customize storage:

-

-

DB_APPEND
Simply append the data to the end of the database, treating the database +
+
DB_APPEND
Simply append the data to the end of the database, treating the database much like a simple log. This flag is only valid for the Queue and Recno access methods. -

DB_NOOVERWRITE
Only store the data item if the key does not already appear in the database. +
DB_NOOVERWRITE
Only store the data item if the key does not already appear in the database.

If the database has been configured to support duplicate records, the DB->put method will add the new data value at the end of the duplicate @@ -32,6 +32,6 @@ set. If the database supports sorted duplicates, the new data value is inserted at the correct sorted location.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/second.html b/db/docs/ref/am/second.html index 850144b76..169a974e8 100644 --- a/db/docs/ref/am/second.html +++ b/db/docs/ref/am/second.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Secondary indices - + @@ -36,7 +36,8 @@ a secondary index in which the secondary key was this last name.

CREATE TABLE students(student_id CHAR(4) NOT NULL,
 	lastname CHAR(15), firstname CHAR(15), PRIMARY KEY(student_id));
 CREATE INDEX lname ON students(lastname);
-

In Berkeley DB, this would work as follows:

+

In Berkeley DB, this would work as follows (a +Java API example is also available):

struct student_record { char student_id[4]; char last_name[15]; @@ -213,6 +214,6 @@ a new database handle and the DB->remove secondary database handles associated with it.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/second.javas b/db/docs/ref/am/second.javas new file mode 100644 index 000000000..bb96a7bc5 --- /dev/null +++ b/db/docs/ref/am/second.javas @@ -0,0 +1,156 @@ +/*- + * Copyright (c) 2002 + * Sleepycat Software. All rights reserved. + * + * $Id: second.javas,v 10.1 2004/09/15 19:40:07 bostic Exp $ + */ +package com.sleepycat.examples; + +import com.sleepycat.db.*; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.InputStreamReader; +import java.io.IOException; +import java.io.PrintStream; + +class SecondaryExample +{ + private static final String progname = "SecondaryExample"; + private static final String DATABASE_HOME = "TESTDIR"; + + public static void main(String[] args) + { + try { + SecondaryExample app = new SecondaryExample(); + app.run(); + } catch(Exception e) { + System.err.println(progname + ": " + e); + e.printStackTrace(System.err); + System.exit(1); + } + } + + void run() throws DbException, FileNotFoundException + { + DbEnv dbenv = new DbEnv(0); + + /* Open the environment. */ + dbenv.open(DATABASE_HOME, + Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_LOG | + Db.DB_INIT_MPOOL | Db.DB_INIT_TXN, 0); + + try { + run_app(dbenv); + } finally { + dbenv.close(0); + } + } + + private void run_app(DbEnv dbenv) + throws DbException, FileNotFoundException + { + Db dbp, sdbp; + Dbt key, pkey, skey, data; + StudentRecord srec; + + /* Open/create primary */ + dbp = new Db(dbenv, 0); + dbp.open(null, "students.db", null, Db.DB_BTREE, Db.DB_CREATE, + 0600); + + /* + * Open/create secondary. Note that it supports duplicate data + * items, since last names might not be unique. + */ + sdbp = new Db(dbenv, 0); + sdbp.set_flags(Db.DB_DUP | Db.DB_DUPSORT); + sdbp.open(null, "lastname.db", null, Db.DB_BTREE, Db.DB_CREATE, + 0600); + + try { + /* Associate the secondary with the primary. */ + dbp.associate(sdbp, new GetName(), 0); + + /* Add a new record */ + key = new Dbt(); + key.set_data("WC42".getBytes()); + key.set_size(4); + srec = new StudentRecord(); + srec.student_id = "WC42"; + srec.last_name = "Churchill "; + srec.first_name = "Winston "; + data = new Dbt(); + srec.encode(data); + + System.out.println("Adding a record with primary key " + + new String(key.get_data()) + " and secondary key " + + srec.last_name); + dbp.put(null, key, data, 0); + + /* Now do a lookup */ + skey = new Dbt(); + pkey = new Dbt(); + data = new Dbt(); + skey.set_data("Churchill ".getBytes()); + skey.set_size(15); + System.out.println("Searching with secondary key " + + new String(skey.get_data())); + sdbp.pget(null, skey, pkey, data, 0); + + System.out.println("Found a record with primary key " + + new String(pkey.get_data())); + } finally { + dbp.close(0); + sdbp.close(0); + } + } + + /* + * getname -- extracts a secondary key (the last name) from a primary + * key/data pair + */ + class GetName implements DbSecondaryKeyCreate { + public int secondary_key_create(Db secondary, + Dbt pkey, Dbt pdata, Dbt skey) { + StudentRecord srec = new StudentRecord(); + srec.decode(pdata); + + // Make a fixed-length array of last_name + byte[] last_name_data = srec.last_name.getBytes(); + byte[] last_name_raw = new byte[15]; + System.arraycopy(last_name_data, 0, last_name_raw, 0, + last_name_data.length); + + skey.set_data(last_name_raw); + skey.set_size(last_name_raw.length); + return (0); + } + } + + class StudentRecord + { + String student_id; // assumed to be 4 bytes long + String last_name; // assumed to be 15 bytes long + String first_name; // assumed to be 15 bytes long + + void decode(Dbt dbt) { + byte[] data = dbt.get_data(); + student_id = new String(data, 0, 4); + last_name = new String(data, 4, 15); + first_name = new String(data, 19, 15); + } + + void encode(Dbt dbt) { + byte[] data = new byte[34]; + System.arraycopy(student_id.getBytes(), 0, data, 0, 4); + byte[] last_name_raw = last_name.getBytes(); + System.arraycopy(last_name_raw, 0, data, 4, + last_name_raw.length); + byte[] first_name_raw = first_name.getBytes(); + System.arraycopy(first_name_raw, 0, data, 19, + first_name_raw.length); + dbt.set_data(data); + dbt.set_size(data.length); + } + } +} diff --git a/db/docs/ref/am/stat.html b/db/docs/ref/am/stat.html index 2a655278a..a1864b61d 100644 --- a/db/docs/ref/am/stat.html +++ b/db/docs/ref/am/stat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Database statistics - + @@ -20,12 +20,12 @@ database, for example, the number of key/data pairs in the database, how the database was originally configured, and so on.

There is one flag you can set to customize the returned statistics:

-

-

DB_FAST_STAT
Return only information that can be acquired without traversing the +
+
DB_FAST_STAT
Return only information that can be acquired without traversing the entire database.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/sync.html b/db/docs/ref/am/sync.html index b36c7f2d3..22441f8b6 100644 --- a/db/docs/ref/am/sync.html +++ b/db/docs/ref/am/sync.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Flushing the database cache - + @@ -34,6 +34,6 @@ atomically replace the original database with the updated copy.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/truncate.html b/db/docs/ref/am/truncate.html index fe4eecd77..cd51aa52d 100644 --- a/db/docs/ref/am/truncate.html +++ b/db/docs/ref/am/truncate.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Database truncation - + @@ -19,6 +19,6 @@

The DB->truncate method empties a database of all records.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/upgrade.html b/db/docs/ref/am/upgrade.html index 37da05527..f6b0ce882 100644 --- a/db/docs/ref/am/upgrade.html +++ b/db/docs/ref/am/upgrade.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Database upgrade - + @@ -46,6 +46,6 @@ appropriate copies of their application or application sources if they may need to access archived databases without first upgrading them.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am/verify.html b/db/docs/ref/am/verify.html index ab467affb..d4958ce6a 100644 --- a/db/docs/ref/am/verify.html +++ b/db/docs/ref/am/verify.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Database verification and salvage - + @@ -46,6 +46,6 @@ preferable to any kind of data loss.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/bt_compare.html b/db/docs/ref/am_conf/bt_compare.html index 0f9453f8b..8173dd75f 100644 --- a/db/docs/ref/am_conf/bt_compare.html +++ b/db/docs/ref/am_conf/bt_compare.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Btree comparison - + @@ -76,9 +76,7 @@ compare_dbt(dbp, a, b) well-ordered. The most important implication of being well-ordered is that the key relations must be transitive, that is, if key A is less than key B, and key B is less than key C, then the comparison routine -must also return that key A is less than key C. In addition, comparisons -will only be able to return 0 when comparing full length keys; partial -key comparisons must always return a result less than or greater than 0.

+must also return that key A is less than key C.

It is reasonable for a comparison function to not examine an entire key in some applications, which implies that partial keys may be specified to the Berkeley DB interfaces. When partial keys are specified to Berkeley DB, @@ -89,6 +87,6 @@ key stored in the database. The actual key can be retrieved by calling the DBcursor->c_get method with the DB_CURRENT flag.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/bt_minkey.html b/db/docs/ref/am_conf/bt_minkey.html index 1ad0d403f..2ca2eed2f 100644 --- a/db/docs/ref/am_conf/bt_minkey.html +++ b/db/docs/ref/am_conf/bt_minkey.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Minimum keys per page - + @@ -49,6 +49,6 @@ value incorrectly can result in overusing overflow pages and decreasing the application's overall performance.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/bt_prefix.html b/db/docs/ref/am_conf/bt_prefix.html index 7d5ec1f74..b22d9c2a0 100644 --- a/db/docs/ref/am_conf/bt_prefix.html +++ b/db/docs/ref/am_conf/bt_prefix.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Btree prefix comparison - + @@ -27,13 +27,15 @@ comparison function of the Btree, since what distinguishes any two keys depends entirely on the function used to compare them. This means that if a prefix comparison routine is specified by the application, a compatible overall comparison routine must also have been specified.

-

Prefix comparison routines are passed pointers to keys as arguments. The -keys are represented as DBT structures. The prefix comparison -function must return the number of bytes of the second key argument that -are necessary to determine if it is greater than the first key argument. -If the keys are equal, the length of the second key should be returned. -The only fields that the routines may examine in the DBT -structures are data and size fields.

+

Prefix comparison routines are passed pointers to keys as arguments. +The keys are represented as DBT structures. The only fields +the routines may examine in the DBT structures are data +and size fields.

+

The prefix comparison function must return the number of bytes necessary +to distinguish the two keys. If the keys are identical (equal and equal +in length), the length should be returned. If the keys are equal up to +the smaller of the two lengths, then the length of the smaller key plus +1 should be returned.

An example prefix comparison routine follows:

u_int32_t
 compare_prefix(dbp, a, b)
@@ -63,6 +65,6 @@ compare_prefix(dbp, a, b)
 sets can produce significantly reduced tree sizes and faster search times.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/bt_recnum.html b/db/docs/ref/am_conf/bt_recnum.html index f1953b923..6003b102f 100644 --- a/db/docs/ref/am_conf/bt_recnum.html +++ b/db/docs/ref/am_conf/bt_recnum.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Retrieving Btree records by logical record number - + @@ -101,6 +101,6 @@ err: /* Close the cursor. */ }


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/byteorder.html b/db/docs/ref/am_conf/byteorder.html index bb305f7a3..f0ee13f84 100644 --- a/db/docs/ref/am_conf/byteorder.html +++ b/db/docs/ref/am_conf/byteorder.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Selecting a byte order - + @@ -33,6 +33,6 @@ exactly as they were written when retrieved on a big-endian format architecture.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/cachesize.html b/db/docs/ref/am_conf/cachesize.html index c24e04337..6b0e5ea4a 100644 --- a/db/docs/ref/am_conf/cachesize.html +++ b/db/docs/ref/am_conf/cachesize.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Selecting a cache size - + @@ -81,6 +81,6 @@ means that the cache is working well, yielding a 97% cache hit rate. The as a whole and for each file within the cache separately.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/dup.html b/db/docs/ref/am_conf/dup.html index ec6ffd7a5..c343a7bc5 100644 --- a/db/docs/ref/am_conf/dup.html +++ b/db/docs/ref/am_conf/dup.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Duplicate data items - + @@ -70,6 +70,6 @@ presence of duplicates (sorted or not), see the DB->put, DBcursor->c_get and DBcursor->c_put documentation.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/extentsize.html b/db/docs/ref/am_conf/extentsize.html index 595e27ad2..de7603608 100644 --- a/db/docs/ref/am_conf/extentsize.html +++ b/db/docs/ref/am_conf/extentsize.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Selecting a Queue extent size - + @@ -39,6 +39,6 @@ many files, all those files will need to be open at the same time, consuming system and process file resources.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/h_ffactor.html b/db/docs/ref/am_conf/h_ffactor.html index f8720544f..6ca6b339c 100644 --- a/db/docs/ref/am_conf/h_ffactor.html +++ b/db/docs/ref/am_conf/h_ffactor.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Page fill factor - + @@ -27,6 +27,6 @@ the DB->set_h_ffactor method. be selected dynamically as pages are filled.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/h_hash.html b/db/docs/ref/am_conf/h_hash.html index b0ee1f639..5c4751f03 100644 --- a/db/docs/ref/am_conf/h_hash.html +++ b/db/docs/ref/am_conf/h_hash.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Specifying a database hash - + @@ -35,6 +35,6 @@ take a reference to a DB object, a point its length, as arguments and return an unsigned, 32-bit hash value.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/h_nelem.html b/db/docs/ref/am_conf/h_nelem.html index 0b6346c27..94d6176db 100644 --- a/db/docs/ref/am_conf/h_nelem.html +++ b/db/docs/ref/am_conf/h_nelem.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Hash table size - + @@ -28,6 +28,6 @@ of elements to be a useful value to Berkeley DB, the
PrevRefNext -

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/intro.html b/db/docs/ref/am_conf/intro.html index 5e34912c0..2f8139cf7 100644 --- a/db/docs/ref/am_conf/intro.html +++ b/db/docs/ref/am_conf/intro.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: What are the available access methods? - + -

Berkeley DB Reference Guide:
Access Methods

PrevRefNext +PrevRefNext

What are the available access methods?

@@ -39,8 +39,8 @@ the head of the queue. The Queue access method uses record level locking.

The Recno access method stores both fixed and variable-length records with logical record numbers as keys, optionally backed by a flat text (byte stream) file.

-

PrevRefNext +

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/logrec.html b/db/docs/ref/am_conf/logrec.html index c1ca82a5d..a273c6d63 100644 --- a/db/docs/ref/am_conf/logrec.html +++ b/db/docs/ref/am_conf/logrec.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Logical record numbers - + @@ -30,11 +30,6 @@ The db_recno_t type is a 32-bit unsigned type, which limits the number of logical records in a Queue or Recno database, and the maximum logical record which may be directly retrieved from a Btree database, to 4,294,967,295.

-

Record numbers in Queue databases wrap around. When the tail of the -queue reaches the maximum record number, the next record appended will -be given record number 1. If the head of the queue ever catches up to -the tail of the queue, the Berkeley DB methods will return the system error -EFBIG.

Record numbers in Recno databases can be configured to run in either mutable or fixed mode: mutable, where logical record numbers change as records are deleted or inserted, and fixed, where record numbers never @@ -45,6 +40,12 @@ as records are deleted or inserted, the logical record number for other records in the database can change. See Logically renumbering records for more information.

+

When appending new data items into Queue databases, record numbers wrap +around. When the tail of the queue reaches the maximum record number, +the next record appended will be given record number 1. If the head of +the queue ever catches up to the tail of the queue, Berkeley DB will return +the system error EFBIG. Record numbers do not wrap around when appending +new data items into Recno databases.

Configuring Btree databases to support record numbers can severely limit the throughput of applications with multiple concurrent threads writing the database, because locations used to store record counts often become @@ -118,6 +119,6 @@ recno_build(dbp) }

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/malloc.html b/db/docs/ref/am_conf/malloc.html index 8f1aed123..c8dad08f8 100644 --- a/db/docs/ref/am_conf/malloc.html +++ b/db/docs/ref/am_conf/malloc.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Non-local memory allocation - + @@ -28,6 +28,6 @@ uses to free it, or vice versa. To avoid this problem, the give Berkeley DB references to the application's allocation routines.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/pagesize.html b/db/docs/ref/am_conf/pagesize.html index 1a6002c25..2772543c8 100644 --- a/db/docs/ref/am_conf/pagesize.html +++ b/db/docs/ref/am_conf/pagesize.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Selecting a page size - + @@ -73,6 +73,6 @@ from which your database can recover See information.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/re_source.html b/db/docs/ref/am_conf/re_source.html index 7d525e5de..5e64cbcec 100644 --- a/db/docs/ref/am_conf/re_source.html +++ b/db/docs/ref/am_conf/re_source.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Flat-text backing files - + @@ -58,6 +58,6 @@ are either generated on the fly by software tools, or modified using a different mechanism such as a text editor.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/recno.html b/db/docs/ref/am_conf/recno.html index d2281a9a4..0d14433bf 100644 --- a/db/docs/ref/am_conf/recno.html +++ b/db/docs/ref/am_conf/recno.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Managing record-based databases - + @@ -65,6 +65,6 @@ files, see Flat-text backing files.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/renumber.html b/db/docs/ref/am_conf/renumber.html index 82c307581..a990d540a 100644 --- a/db/docs/ref/am_conf/renumber.html +++ b/db/docs/ref/am_conf/renumber.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Logically renumbering records - + @@ -76,6 +76,6 @@ record by its record number will also result in the


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_conf/select.html b/db/docs/ref/am_conf/select.html index 4eb01fb4f..f72200675 100644 --- a/db/docs/ref/am_conf/select.html +++ b/db/docs/ref/am_conf/select.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Selecting an access method - + @@ -113,6 +113,6 @@ permanent storage is a flat text file and the database is used as a fast, temporary storage area while the data is being read or modified.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/align.html b/db/docs/ref/am_misc/align.html index 7daceaac6..80f59224c 100644 --- a/db/docs/ref/am_misc/align.html +++ b/db/docs/ref/am_misc/align.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Data alignment - + @@ -24,6 +24,6 @@ any necessary alignment. The

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/dbsizes.html b/db/docs/ref/am_misc/dbsizes.html index 732535f4c..f0cc71a22 100644 --- a/db/docs/ref/am_misc/dbsizes.html +++ b/db/docs/ref/am_misc/dbsizes.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Database limits - + @@ -27,20 +27,21 @@ maximum database size of 241 (2 terabytes).

if the host system does not have filesystem support for files larger than 232, including the ability to seek to absolute offsets within those files.

-

The largest key or data item that Berkeley DB can support is largely limited -by available memory. Specifically, while key and data byte strings may -be of essentially unlimited length, any one of them must fit into -available memory so that it can be returned to the application. As some -of the Berkeley DB interfaces return both key and data items to the application, -those interfaces will require that any key/data pair fit simultaneously -into memory. Further, as the access methods may need to compare key and -data items with other key and data items, it may be a requirement that -any two key or two data items fit into available memory. Finally, when -writing applications supporting transactions, it may be necessary to have -an additional copy of any data item in memory for logging purposes.

+

The largest key or data item that Berkeley DB can support is 232, +or more likely by available memory. Specifically, while key and data +byte strings may be of essentially unlimited length, any one of them +must fit into available memory so that it can be returned to the +application. As some of the Berkeley DB interfaces return both key and data +items to the application, those interfaces will require that any +key/data pair fit simultaneously into memory. Further, as the access +methods may need to compare key and data items with other key and data +items, it may be a requirement that any two key or two data items fit +into available memory. Finally, when writing applications supporting +transactions, it may be necessary to have an additional copy of any data +item in memory for logging purposes.

The maximum Btree depth is 255.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/diskspace.html b/db/docs/ref/am_misc/diskspace.html index 6c9d693d0..b66906c72 100644 --- a/db/docs/ref/am_misc/diskspace.html +++ b/db/docs/ref/am_misc/diskspace.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Disk space requirements - + @@ -144,6 +144,6 @@ only at specific points in the file, and this too can lead to sparse hash tables.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/error.html b/db/docs/ref/am_misc/error.html index d92d4766c..575e33d77 100644 --- a/db/docs/ref/am_misc/error.html +++ b/db/docs/ref/am_misc/error.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Error support - + @@ -58,6 +58,6 @@ an EACCESS system error, the error messages shown would appear as follows:

my_app: contact your system administrator: session ID was 14

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/faq.html b/db/docs/ref/am_misc/faq.html index 760aca14c..08d988ff1 100644 --- a/db/docs/ref/am_misc/faq.html +++ b/db/docs/ref/am_misc/faq.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Access method FAQ - + -

Berkeley DB Reference Guide:
Access Methods

PrevRefNext +PrevRefNext

Access method FAQ

@@ -124,8 +124,8 @@ to take the first argument to the callback function and cast it to a MyDb (in C++, cast it to (MyDb*)). That will allow you to access your data members or methods.

-

PrevRefNext +

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/get_bulk.html b/db/docs/ref/am_misc/get_bulk.html index 434cea707..bbd94ba70 100644 --- a/db/docs/ref/am_misc/get_bulk.html +++ b/db/docs/ref/am_misc/get_bulk.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Retrieving records in bulk - + @@ -42,12 +42,9 @@ This is implemented for the C and C++ APIs using four macros: DB_MULTIPLE_INIT, DB_MULTIPLE_NEXT, DB_MULTIPLE_KEY_NEXT, and DB_MULTIPLE_RECNO_NEXT. For the Java API, this is implemented as three iterator classes: -DbMultipleDataIterator -, -DbMultipleKeyDataIterator -, and -DbMultipleRecnoDataIterator -.

+MultipleDataEntry, +MultipleKeyDataEntry, and +MultipleRecnoDataEntry.

The DB_MULTIPLE_INIT macro is always called first. It initializes a local application variable and the data DBT for stepping through the set of returned records. Then, @@ -102,8 +99,8 @@ rec_display(dbp) /* * Acquire the next set of key/data pairs. This code does * not handle single key/data pairs that won't fit in a - * BUFFER_LENGTH size buffer, instead returning ENOMEM to - * our caller. + * BUFFER_LENGTH size buffer, instead returning DB_BUFFER_SMALL + * to our caller. */ if ((ret = dbcp->c_get(dbcp, &key, &data, DB_MULTIPLE_KEY | DB_NEXT)) != 0) { @@ -134,6 +131,6 @@ rec_display(dbp) }

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/partial.html b/db/docs/ref/am_misc/partial.html index d8c8896ba..80b604495 100644 --- a/db/docs/ref/am_misc/partial.html +++ b/db/docs/ref/am_misc/partial.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Partial record storage and retrieval - + @@ -129,6 +129,6 @@ ABCDEFGHIJ0123456789 -> ABCDEFGHIJ0123456789\0\0\0\0\0abcdefghij

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/perm.html b/db/docs/ref/am_misc/perm.html index e713c1abd..58902350c 100644 --- a/db/docs/ref/am_misc/perm.html +++ b/db/docs/ref/am_misc/perm.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Retrieved key/data permanence for C/C++ - + @@ -32,6 +32,6 @@ pointer stored into the DBT refers is o call to Berkeley DB using the DBC handle returned by DB->cursor.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/stability.html b/db/docs/ref/am_misc/stability.html index e2a7fafe1..b4bb1e9d7 100644 --- a/db/docs/ref/am_misc/stability.html +++ b/db/docs/ref/am_misc/stability.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Cursor stability - + @@ -38,7 +38,9 @@ records are not locked, therefore another transaction may replace a deleted record between two calls to retrieve it. The record would not appear in the first call but would be seen by the second call. For readers not enclosed in transactions, all access method calls provide -degree 2 isolation, that is, reads are not repeatable. Finally, Berkeley DB +degree 2 isolation, that is, reads are not repeatable. A transaction +may be declared to run with degree 2 isolation by specifying the +DB_DEGREE_2 flag. Finally, Berkeley DB provides degree 1 isolation when the DB_DIRTY_READ flag is specified; that is, reads may see data modified in transactions which have not yet committed.

@@ -50,6 +52,6 @@ the scan inserts a new pair into the database, it is possible that duplicate key/data pairs will be returned.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/struct.html b/db/docs/ref/am_misc/struct.html index 74881ede9..ff5423abe 100644 --- a/db/docs/ref/am_misc/struct.html +++ b/db/docs/ref/am_misc/struct.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Storing C/C++ structures/objects - + @@ -83,6 +83,6 @@ memcpy(&info.buf[0], string, strlen(string) + 1); without any additional work.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/am_misc/tune.html b/db/docs/ref/am_misc/tune.html index b8b828787..fa2c31607 100644 --- a/db/docs/ref/am_misc/tune.html +++ b/db/docs/ref/am_misc/tune.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Access method tuning - + @@ -18,8 +18,8 @@

Access method tuning

There are a few different issues to consider when tuning the performance of Berkeley DB access method applications.

-

-

access method
An application's choice of a database access method can significantly +
+
access method
An application's choice of a database access method can significantly affect performance. Applications using fixed-length records and integer keys are likely to get better performance from the Queue access method. Applications using variable-length records are likely to get better @@ -28,7 +28,7 @@ most applications than either the Hash or Recno access methods. Because the access method APIs are largely identical between the Berkeley DB access methods, it is easy for applications to benchmark the different access methods against each other. See Selecting an access method for more information. -

cache size
The Berkeley DB database cache defaults to a fairly small size, and most +
cache size
The Berkeley DB database cache defaults to a fairly small size, and most applications concerned with performance will want to set it explicitly. Using a too-small cache will result in horrible performance. The first step in tuning the cache size is to use the db_stat utility (or the @@ -49,7 +49,7 @@ cache to the maximum size possible without triggering paging activity. Finally, always remember to make your measurements under conditions as close as possible to the conditions your deployed application will run under, and to test your final choices under worst-case conditions. -

shared memory
By default, Berkeley DB creates its database environment shared regions in +
shared memory
By default, Berkeley DB creates its database environment shared regions in filesystem backed memory. Some systems do not distinguish between regular filesystem pages and memory-mapped pages backed by the filesystem, when selecting dirty pages to be flushed back to disk. For @@ -61,7 +61,7 @@ regions in system shared memory (DB_PRIVATE), or, in cases where this behavior is configurable, to turn off the operating system's flushing of memory-mapped pages. -

large key/data items
Storing large key/data items in a database can alter the performance +
large key/data items
Storing large key/data items in a database can alter the performance characteristics of Btree, Hash and Recno databases. The first parameter to consider is the database page size. When a key/data item is too large to be placed on a database page, it is stored on "overflow" pages @@ -117,6 +117,6 @@ of control to to some small multiple of the number of CPUs is usually the right choice to make.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/apprec/auto.html b/db/docs/ref/apprec/auto.html index 743ce6bd2..a321f2aea 100644 --- a/db/docs/ref/apprec/auto.html +++ b/db/docs/ref/apprec/auto.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Automatically generated functions - + @@ -90,20 +90,20 @@ ex_apprec_template and ex_apprec_rec.c for an example.)

(but not the automatically generated source_file, as that will get overwritten each time gen_rec.awk is run) and fully developed there. The recovery function takes the following parameters:

-

-

dbenv
The environment in which recovery is running. -

rec
The record being recovered. -

lsn
The log sequence number of the record being recovered. The +
+
dbenv
The environment in which recovery is running. +
rec
The record being recovered. +
lsn
The log sequence number of the record being recovered. The prev_lsn field, automatically included in every auto-generated log record, should be returned through this argument. The prev_lsn field is used to chain log records together to allow transaction aborts; because the recovery function is the only place that a log record gets parsed, the responsibility for returning this value lies with the recovery function writer. -

op
A parameter of type db_recops, which indicates what operation is being +
op
A parameter of type db_recops, which indicates what operation is being run (DB_TXN_ABORT, DB_TXN_APPLY, DB_TXN_BACKWARD_ROLL, DB_TXN_FORWARD_ROLL or DB_TXN_PRINT). -

info
A structure passed by the dispatch function. It is used to contain a +
info
A structure passed by the dispatch function. It is used to contain a list of committed transactions and information about files that may have been deleted. Application-specific log records can usually simply ignore this field. @@ -114,15 +114,15 @@ record type.

The log function marshalls the parameters into a buffer, and calls DB_ENV->log_put on that buffer returning 0 on success and non-zero on failure. The log function takes the following parameters:

-

-

dbenv
The environment in which recovery is running. -

txnid
The transaction identifier for the transaction handle returned by +
+
dbenv
The environment in which recovery is running. +
txnid
The transaction identifier for the transaction handle returned by DB_ENV->txn_begin. -

lsnp
A pointer to storage for a log sequence number into which the log +
lsnp
A pointer to storage for a log sequence number into which the log sequence number of the new log record will be returned. -

syncflag
A flag indicating whether the record must be written synchronously. +
syncflag
A flag indicating whether the record must be written synchronously. Valid values are 0 and DB_FLUSH. -

args
The remaining parameters to the log message are the fields described +
args
The remaining parameters to the log message are the fields described in the XXX.src file, in order.

The read function takes a buffer and unmarshalls its contents into a @@ -130,10 +130,10 @@ structure of the appropriate type. It returns 0 on success and non-zero on error. After the fields of the structure have been used, the pointer returned from the read function should be freed. The read function takes the following parameters:

-

-

dbenv
The environment in which recovery is running. -

recbuf
A buffer. -

argp
A pointer to a structure of the appropriate type. +
+
dbenv
The environment in which recovery is running. +
recbuf
A buffer. +
argp
A pointer to a structure of the appropriate type.

The print function displays the contents of the record. The print function takes the same parameters as the recovery function described @@ -141,12 +141,12 @@ previously. Although some of the parameters are unused by the print function, taking the same parameters allows a single dispatch loop to dispatch to a variety of functions. The print function takes the following parameters:

-

-

dbenv
The environment in which recovery is running. -

rec
The record being recovered. -

lsn
The log sequence number of the record being recovered. -

op
Unused. -

info
Unused. +
+
dbenv
The environment in which recovery is running. +
rec
The record being recovered. +
lsn
The log sequence number of the record being recovered. +
op
Unused. +
info
Unused.

Finally, the source file will contain a function (named XXX_init_print, where XXX is replaced by the prefix) which should be added to the @@ -154,6 +154,6 @@ initialization part of the standalone d so that utility can be used to display application-specific log records.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/apprec/config.html b/db/docs/ref/apprec/config.html index 76579c60c..7497419e2 100644 --- a/db/docs/ref/apprec/config.html +++ b/db/docs/ref/apprec/config.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Application configuration - + @@ -123,6 +123,6 @@ to stable storage before calling the D to allow the periodic removal of database environment log files.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/apprec/def.html b/db/docs/ref/apprec/def.html index c85a4eddc..57960cc0d 100644 --- a/db/docs/ref/apprec/def.html +++ b/db/docs/ref/apprec/def.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Defining application-specific log records - + @@ -91,6 +91,6 @@ file btree/btree.src contains the definitions for the log records supported by the Berkeley DB Btree access method.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/apprec/intro.html b/db/docs/ref/apprec/intro.html index 7318bb7b7..7c3a5dd5d 100644 --- a/db/docs/ref/apprec/intro.html +++ b/db/docs/ref/apprec/intro.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Introduction - + @@ -47,13 +47,16 @@ and what part is undo information.

system call. After all requests are issued, the application may call DB_TXN->commit. When DB_TXN->commit returns, the caller is guaranteed that all necessary log writes have been written to disk.

-

At any time before issuing a DB_TXN->commit, -the application may call DB_TXN->abort, which will -result in restoration of the database to a consistent pretransaction -state. (The application may specify its own recovery function for this -purpose using the DB_ENV->set_app_dispatch method. The recovery -function must be able to either reapply or undo the update depending on -the context, for each different type of log record.)

+

At any time before issuing a DB_TXN->commit, the application may +call DB_TXN->abort, which will result in restoration of the database +to a consistent pretransaction state. (The application may specify its +own recovery function for this purpose using the +DB_ENV->set_app_dispatch method. The recovery function must be able to +either reapply or undo the update depending on the context, for each +different type of log record. The recovery functions must not use Berkeley DB +methods to access data in the environment as there is no way to +coordinate these accesses with either the aborting transaction or the +updates done by recovery or replication.)

If the application crashes, the recovery process uses the log to restore the database to a consistent state.

Berkeley DB includes tools to assist in the development of application-specific @@ -72,6 +75,6 @@ usable on any system, not just POSIX systems.

in the Berkeley DB distribution, in the directory examples_c/ex_apprec.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/arch/apis.html b/db/docs/ref/arch/apis.html index 7813c495a..fa776f838 100644 --- a/db/docs/ref/arch/apis.html +++ b/db/docs/ref/arch/apis.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Programmatic APIs - + @@ -72,6 +72,6 @@ the effectiveness of the internal hashing function on the particular data set. This is not a problem with Berkeley DB.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/arch/bigpic.html b/db/docs/ref/arch/bigpic.html index 54673406f..9d527a2f5 100644 --- a/db/docs/ref/arch/bigpic.html +++ b/db/docs/ref/arch/bigpic.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: The big picture - + @@ -77,29 +77,29 @@ it is wrapped in transaction calls. The transaction is started with operation fails due to a deadlock, the transaction is aborted using DB_TXN->abort, after which the operation may be retried.

There are actually five major subsystems in Berkeley DB, as follows:

-

-

Access Methods
The access methods subsystem provides general-purpose support for +
+
Access Methods
The access methods subsystem provides general-purpose support for creating and accessing database files formatted as Btrees, Hashed files, and Fixed- and Variable-length records. These modules are useful in the absence of transactions for applications that need fast formatted file support. See DB->open and DB->cursor for more information. These functions were already discussed in detail in the previous chapters. -

Memory Pool
The Memory Pool subsystem is the general-purpose shared memory buffer pool +
Memory Pool
The Memory Pool subsystem is the general-purpose shared memory buffer pool used by Berkeley DB. This is the shared memory cache that allows multiple processes and threads within processes to share access to databases. This module is useful outside of the Berkeley DB package for processes that require portable, page-oriented, cached, shared file access. -

Transaction
The Transaction subsystem allows a group of database changes to be +
Transaction
The Transaction subsystem allows a group of database changes to be treated as an atomic unit so that either all of the changes are done, or none of the changes are done. The transaction subsystem implements the Berkeley DB transaction model. This module is useful outside of the Berkeley DB package for processes that want to transaction-protect their own data modifications. -

Locking
The Locking subsystem is the general-purpose lock manager used by Berkeley DB. +
Locking
The Locking subsystem is the general-purpose lock manager used by Berkeley DB. This module is useful outside of the Berkeley DB package for processes that require a portable, fast, configurable lock manager. -

Logging
The Logging subsystem is the write-ahead logging used to support the +
Logging
The Logging subsystem is the write-ahead logging used to support the Berkeley DB transaction model. It is largely specific to the Berkeley DB package, and unlikely to be useful elsewhere except as a supporting module for the Berkeley DB transaction subsystem. @@ -120,6 +120,6 @@ subsystem, or the access methods subsystem wrapped in calls to the Berkeley DB transaction interfaces.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/arch/progmodel.html b/db/docs/ref/arch/progmodel.html index aad85ae76..f62904779 100644 --- a/db/docs/ref/arch/progmodel.html +++ b/db/docs/ref/arch/progmodel.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Programming model - + @@ -39,6 +39,6 @@ call. Of course, this model also greatly simplifies the creation of network client-server applications.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/arch/script.html b/db/docs/ref/arch/script.html index 4a5e3823f..797bd82ce 100644 --- a/db/docs/ref/arch/script.html +++ b/db/docs/ref/arch/script.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Scripting languages - + @@ -16,17 +16,21 @@

Scripting languages

Perl

-

Two Perl APIs are distributed with the Berkeley DB release. The Perl +

Two Perl wrappers are distributed with the Berkeley DB release. The Perl interface to Berkeley DB version 1.85 is called DB_File. The Perl interface to Berkeley DB version 2 and later is called BerkeleyDB. See -Using Berkeley DB with Perl for more +Using Berkeley DB with Perl for more +information.

+

PHP

+

A PHP wrapper is distributed with the Berkeley DB release. See +Using Berkeley DB with Perl for more information.

Tcl

-

A Tcl API is distributed with the Berkeley DB release. See +

A Tcl wrapper is distributed with the Berkeley DB release. See Using Berkeley DB with Tcl for more information.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/arch/utilities.html b/db/docs/ref/arch/utilities.html index 7d32fadc5..885268a66 100644 --- a/db/docs/ref/arch/utilities.html +++ b/db/docs/ref/arch/utilities.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Supporting utilities - + @@ -18,37 +18,37 @@

Supporting utilities

The following are the standalone utilities that provide supporting functionality for the Berkeley DB environment:

-

-

berkeley_db_svc
The berkeley_db_svc utility is the Berkeley DB RPC server that +
+
berkeley_db_svc
The berkeley_db_svc utility is the Berkeley DB RPC server that provides standard server functionality for client applications. -

db_archive
The db_archive utility supports database backup and archival, +
db_archive
The db_archive utility supports database backup and archival, and log file administration. It facilitates log reclamation and the creation of database snapshots. Generally, some form of log archival must be done if a database environment has been configured for logging or transactions. -

db_checkpoint
The db_checkpoint utility runs as a daemon process, monitoring +
db_checkpoint
The db_checkpoint utility runs as a daemon process, monitoring the database log and periodically issuing checkpoints. It facilitates log reclamation and the creation of database snapshots. Generally, some form of database checkpointing must be done if a database environment has been configured for transactions. -

db_deadlock
The db_deadlock utility runs as a daemon process, periodically +
db_deadlock
The db_deadlock utility runs as a daemon process, periodically traversing the database lock structures and aborting transactions when it detects a deadlock. Generally, some form of deadlock detection must be done if a database environment has been configured for locking. -

db_dump
The db_dump utility writes a copy of the database to a flat-text +
db_dump
The db_dump utility writes a copy of the database to a flat-text file in a portable format. -

db_load
The db_load utility reads the flat-text file produced by +
db_load
The db_load utility reads the flat-text file produced by db_dump and loads it into a database file. -

db_printlog
The db_printlog utility displays the contents of Berkeley DB log files +
db_printlog
The db_printlog utility displays the contents of Berkeley DB log files in a human-readable and parsable format. -

db_recover
The db_recover utility runs after an unexpected Berkeley DB or system +
db_recover
The db_recover utility runs after an unexpected Berkeley DB or system failure to restore the database to a consistent state. Generally, some form of database recovery must be done if databases are being modified. -

db_stat
The db_stat utility displays statistics for databases and database +
db_stat
The db_stat utility displays statistics for databases and database environments. -

db_upgrade
The db_upgrade utility provides a command-line interface for +
db_upgrade
The db_upgrade utility provides a command-line interface for upgrading underlying database formats. -

db_verify
The db_verify utility provides a command-line interface for +
db_verify
The db_verify utility provides a command-line interface for verifying the database format.

All of the functionality implemented for these utilities is also available @@ -59,6 +59,6 @@ the necessity for multiple processes to negotiate database and database environment creation and shut down.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/aix.html b/db/docs/ref/build_unix/aix.html index d280c896c..f6ea88e93 100644 --- a/db/docs/ref/build_unix/aix.html +++ b/db/docs/ref/build_unix/aix.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: AIX - + @@ -78,6 +78,6 @@ include the problematical system include files.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/conf.html b/db/docs/ref/build_unix/conf.html index 2b8ee79b4..886a19a54 100644 --- a/db/docs/ref/build_unix/conf.html +++ b/db/docs/ref/build_unix/conf.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Configuring Berkeley DB - + @@ -22,9 +22,9 @@ standard GNU autoconf arguments are available and supported. To see a complete list of possible arguments, specify the --help flag to the configure program.

The Berkeley DB specific arguments are as follows:

-

+
-

--disable-largefile
Some systems, notably versions of HP/UX and Solaris, require special +
--disable-largefile
Some systems, notably versions of HP/UX and Solaris, require special compile-time options in order to create files larger than 2^32 bytes. These options are automatically enabled when Berkeley DB is compiled. For this reason, binaries built on current versions of these systems may @@ -33,7 +33,7 @@ calls necessary for large files are not available. To disable building with these compile-time options, enter --disable-largefile as an argument to configure. -

--disable-shared, --disable-static
On systems supporting shared libraries, Berkeley DB builds both static and +
--disable-shared, --disable-static
On systems supporting shared libraries, Berkeley DB builds both static and shared libraries by default. (Shared libraries are built using the GNU Project's Libtool distribution, which supports shared library builds @@ -41,33 +41,33 @@ on many (although not all) systems.) To not build shared libraries, configure using the --disable-shared argument. To not build static libraries, configure using the --disable-static argument. -

--enable-compat185
To compile or load Berkeley DB 1.85 applications against this release of the +
--enable-compat185
To compile or load Berkeley DB 1.85 applications against this release of the Berkeley DB library, enter --enable-compat185 as an argument to configure. This will include Berkeley DB 1.85 API compatibility code in the library. -

--enable-cxx
To build the Berkeley DB C++ API, enter --enable-cxx as an argument to +
--enable-cxx
To build the Berkeley DB C++ API, enter --enable-cxx as an argument to configure. -

--enable-debug
To build Berkeley DB with -g as a compiler flag and with +
--enable-debug
To build Berkeley DB with -g as a compiler flag and with DEBUG #defined during compilation, enter --enable-debug as an argument to configure. This will create a Berkeley DB library and utilities with debugging symbols, as well as load various routines that can be called from a debugger to display pages, cursor queues, and so forth. If installed, the utilities will not be stripped. This argument should not be specified when configuring to build production binaries. -

--enable-debug_rop
To build Berkeley DB to output log records for read operations, enter +
--enable-debug_rop
To build Berkeley DB to output log records for read operations, enter --enable-debug_rop as an argument to configure. This argument should not be specified when configuring to build production binaries. -

--enable-debug_wop
To build Berkeley DB to output log records for write operations, enter +
--enable-debug_wop
To build Berkeley DB to output log records for write operations, enter --enable-debug_wop as an argument to configure. This argument should not be specified when configuring to build production binaries. -

--enable-diagnostic
To build Berkeley DB with run-time debugging checks, enter --enable-diagnostic +
--enable-diagnostic
To build Berkeley DB with run-time debugging checks, enter --enable-diagnostic as an argument to configure. This will cause a number of special checks to be performed when Berkeley DB is running. Applications built using this argument should not share database environments with applications built without this argument. This argument should not be specified when configuring to build production binaries. -

--enable-dump185
To convert Berkeley DB 1.85 (or earlier) databases to this release of Berkeley DB, +
--enable-dump185
To convert Berkeley DB 1.85 (or earlier) databases to this release of Berkeley DB, enter --enable-dump185 as an argument to configure. This will build the db_dump185 utility, which can dump Berkeley DB 1.85 and 1.86 databases in a format readable by the Berkeley DB db_load utility. @@ -78,7 +78,7 @@ are using a non-standard library for the Berkeley DB 1.85 library routines, you will have to change the Makefile that the configuration step creates to load the db_dump185 utility with that library.

-

--enable-java
To build the Berkeley DB Java API, enter --enable-java as an argument to +
--enable-java
To build the Berkeley DB Java API, enter --enable-java as an argument to configure. To build Java, you must also build with shared libraries. Before configuring, you must set your PATH environment variable to include javac. Note that it is not sufficient to include a symbolic @@ -87,7 +87,7 @@ location of javac to determine the location of the Java include files (for example, jni.h). On some systems, additional include directories may be needed to process jni.h; see Changing compile or load options for more information. -

--enable-posixmutexes
To force Berkeley DB to use the POSIX pthread mutex interfaces for underlying +
--enable-posixmutexes
To force Berkeley DB to use the POSIX pthread mutex interfaces for underlying mutex support, enter --enable-posixmutexes as an argument to configure. This is rarely necessary: POSIX mutexes will be selected automatically on systems where they are the preferred implementation. @@ -110,21 +110,21 @@ database environments, that is, environments where the

Specifying the --enable-posixmutexes configuration argument may require that Berkeley DB be linked with the -lpthread library.

-

--enable-rpc
To build the Berkeley DB RPC client code and server utility, enter --enable-rpc +
--enable-rpc
To build the Berkeley DB RPC client code and server utility, enter --enable-rpc as an argument to configure. The --enable-rpc argument requires that RPC libraries already be installed on your system. -

--enable-smallbuild
To build a small memory footprint version of the Berkeley DB library, enter +
--enable-smallbuild
To build a small memory footprint version of the Berkeley DB library, enter --enable-smallbuild as an argument to configure. The --enable-smallbuild argument is equivalent to individually specifying --disable-cryptography, --disable-hash, --disable-queue, ---disable-replication, and --disable-verify, turning off cryptography -support, the Hash and Queue access methods, database environment -replication support and database verification support. See -Building a small memory footprint -library for more information. +--disable-replication, --disable-statistics and --disable-verify, +turning off cryptography support, the Hash and Queue access methods, +database environment replication support and database verification +support. See Building a +small memory footprint library for more information. -

--enable-tcl
To build the Berkeley DB Tcl API, enter --enable-tcl as an argument to +
--enable-tcl
To build the Berkeley DB Tcl API, enter --enable-tcl as an argument to configure. This configuration argument expects to find Tcl's tclConfig.sh file in the /usr/local/lib directory. See the --with-tcl argument for instructions on specifying a non-standard location for the @@ -133,11 +133,11 @@ with Tcl for information on sites from which you can download Tcl and which Tcl versions are compatible with Berkeley DB. To build Tcl, you must also build with shared libraries. -

--enable-test
To build the Berkeley DB test suite, enter --enable-test as an argument to +
--enable-test
To build the Berkeley DB test suite, enter --enable-test as an argument to configure. To run the Berkeley DB test suite, you must also build the Tcl API. This argument should not be specified when configuring to build production binaries. -

--enable-uimutexes
To force Berkeley DB to use the UNIX International (UI) mutex interfaces for +
--enable-uimutexes
To force Berkeley DB to use the UNIX International (UI) mutex interfaces for underlying mutex support, enter --enable-uimutexes as an argument to configure. This is rarely necessary: UI mutexes will be selected automatically on systems where they are the preferred implementation. @@ -147,13 +147,13 @@ implementation is not the preferred one (for example, on Solaris where the LWP mutexes are used by default).

Specifying the --enable-uimutexes configuration argument may require that Berkeley DB be linked with the -lthread library.

-

--enable-umrw
Rational Software's Purify product and other run-time tools complain +
--enable-umrw
Rational Software's Purify product and other run-time tools complain about uninitialized reads/writes of structure fields whose only purpose is padding, as well as when heap memory that was never initialized is written to disk. Specify the --enable-umrw argument during configuration to mask these errors. This argument should not be specified when configuring to build production binaries. -

--with-mutex=MUTEX
To force Berkeley DB to use a specific mutex implementation, configure with +
--with-mutex=MUTEX
To force Berkeley DB to use a specific mutex implementation, configure with --with-mutex=MUTEX, where MUTEX is the mutex implementation you want. For example, --with-mutex=x86/gcc-assembly will configure Berkeley DB to use the x86 GNU gcc compiler based test-and-set assembly mutexes. This is @@ -161,23 +161,23 @@ rarely necessary and should be done only when the default configuration selects the wrong mutex implementation. A list of available mutex implementations can be found in the distribution file dist/aclocal/mutex.ac. -

--with-mutexalign=ALIGNMENT
To force Berkeley DB to use a specific mutex byte alignment, configure with +
--with-mutexalign=ALIGNMENT
To force Berkeley DB to use a specific mutex byte alignment, configure with --with-mutexalignment=ALIGNMENT. For example, --with-mutexalignment=64 will configure Berkeley DB to align mutexes at 64-byte alignments, ensuring no two mutexes use the same cache line on machines with 64-byte cache alignment. This is only useful when performance tuning Berkeley DB for large multiprocessor systems. -

--with-rpm=ARCHIVE
To build Berkeley DB as an RPM software package, configure with --with-rpm=ARCHIVE, +
--with-rpm=ARCHIVE
To build Berkeley DB as an RPM software package, configure with --with-rpm=ARCHIVE, where ARCHIVE is the path of gzipped tar archive Berkeley DB distribution file. This configuration argument will create an RPM specification file from which the RPM software package can be built, using the "make" command. -

--with-tcl=DIR
To build the Berkeley DB Tcl API, enter --with-tcl=DIR, replacing DIR with +
--with-tcl=DIR
To build the Berkeley DB Tcl API, enter --with-tcl=DIR, replacing DIR with the directory in which the Tcl tclConfig.sh file may be found. See Loading Berkeley DB with Tcl for information on sites from which you can download Tcl and which Tcl versions are compatible with Berkeley DB. To build Tcl, you must also build with shared libraries. -

--with-uniquename=NAME
To build Berkeley DB with unique symbol names (in order to avoid conflicts +
--with-uniquename=NAME
To build Berkeley DB with unique symbol names (in order to avoid conflicts with other application modules or libraries), enter --with-uniquename=NAME, replacing NAME with a string that to be appended to every Berkeley DB symbol. If "=NAME" is not specified, a default value of "_MAJORMINOR" is used, @@ -187,6 +187,6 @@ multiple versions of Berkeley DB for more information.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/flags.html b/db/docs/ref/build_unix/flags.html index 422e4b393..9afec5f3f 100644 --- a/db/docs/ref/build_unix/flags.html +++ b/db/docs/ref/build_unix/flags.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Changing compile or load options - + @@ -57,6 +57,6 @@ prompt: ../dist/configure

See your command shell's manual page for further information.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/freebsd.html b/db/docs/ref/build_unix/freebsd.html index f8a01d2d2..8e697e27d 100644 --- a/db/docs/ref/build_unix/freebsd.html +++ b/db/docs/ref/build_unix/freebsd.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: FreeBSD - + @@ -61,6 +61,6 @@ files should be placed on NFS-mounted filesystems on these systems.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/hpux.html b/db/docs/ref/build_unix/hpux.html index a77b3e490..7f14b8718 100644 --- a/db/docs/ref/build_unix/hpux.html +++ b/db/docs/ref/build_unix/hpux.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: HP-UX - + @@ -91,6 +91,6 @@ include the problematical system include files.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/install.html b/db/docs/ref/build_unix/install.html index 95fc34a91..d092b88ae 100644 --- a/db/docs/ref/build_unix/install.html +++ b/db/docs/ref/build_unix/install.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Installing Berkeley DB - + @@ -61,6 +61,6 @@ the install itself:

directories that do not already exist on the system.


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/intro.html b/db/docs/ref/build_unix/intro.html index 897943816..0b76cd7b2 100644 --- a/db/docs/ref/build_unix/intro.html +++ b/db/docs/ref/build_unix/intro.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Building for UNIX/POSIX - + @@ -44,6 +44,8 @@ start from scratch by entering the following command:

make realclean
 ../dist/configure
 make
+

To uninstall Berkeley DB, enter:

+
make uninstall

To build multiple UNIX versions of Berkeley DB in the same source tree, create a new directory at the same level as the build_unix directory, and then configure and build in that directory as described previously.

@@ -57,6 +59,6 @@ compilation, and any output they produced.

PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/irix.html b/db/docs/ref/build_unix/irix.html index 0e929b277..2ed8d2840 100644 --- a/db/docs/ref/build_unix/irix.html +++ b/db/docs/ref/build_unix/irix.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: IRIX - + @@ -26,6 +26,6 @@ must compile with the _SGI_MP_SOURCE flag:


PrevRefNext
-

Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/linux.html b/db/docs/ref/build_unix/linux.html index df0a4992f..b5cb079f2 100644 --- a/db/docs/ref/build_unix/linux.html +++ b/db/docs/ref/build_unix/linux.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Linux - + -

Berkeley DB Reference Guide:
Building Berkeley DB for UNIX/POSIX systems

PrevRefNext +PrevRefNext

Linux

@@ -23,11 +23,6 @@ applications on Linux. If you are compiling a threaded application, you must compile with the _REENTRANT flag:

cc -D_REENTRANT ...

The Berkeley DB library will automatically build with the correct options.

-

  • I see database corruption when accessing databases on -NFS-mounted filesystems. -

    Some Linux filesystems are known to not support complete semantics for -the POSIX fsync call on NFS-mounted filesystems. No Berkeley DB files should -be placed on NFS-mounted filesystems on these systems.

  • I see database corruption when accessing databases.

    Some Linux filesystems do not support POSIX filesystem semantics. Specifically, ext2 and early releases of ReiserFS, and ext3 in some @@ -35,9 +30,13 @@ configurations, do not support "ordered data mode" and may insert random data into database or log files when systems crash. Berkeley DB files should not be placed on a filesystem that does not support, or is not configured to support, POSIX semantics.

    +

  • What scheduler should I use? +

    In some Linux kernels you can select schedulers, and the default is the +"anticipatory" scheduler. We recommend not using the "anticipatory" +scheduler for transaction processing workloads.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/notes.html b/db/docs/ref/build_unix/notes.html index 7f9cdb167..ec2267c74 100644 --- a/db/docs/ref/build_unix/notes.html +++ b/db/docs/ref/build_unix/notes.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Architecture independent FAQ - + @@ -106,7 +106,8 @@ Purify tool).

    For performance reasons, Berkeley DB does not write the unused portions of database pages or fill in unused structure fields. To turn off these errors when running software analysis tools, build with the ---enable-umrw configuration option.

    +--enable-umrw +configuration option.


  • Berkeley DB programs or the test suite fail unexpectedly.

    The Berkeley DB architecture does not support placing the shared memory @@ -148,6 +149,6 @@ DB185LIB=-ldb185

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/osf1.html b/db/docs/ref/build_unix/osf1.html index 1a7646f17..d5c00a927 100644 --- a/db/docs/ref/build_unix/osf1.html +++ b/db/docs/ref/build_unix/osf1.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: OSF/1 - + -

    Berkeley DB Reference Guide:
    Building Berkeley DB for UNIX/POSIX systems

    PrevRefNext +PrevRefNext

    OSF/1

    @@ -24,8 +24,8 @@ must compile with the _REENTRANT flag:

    cc -D_REENTRANT ...

    The Berkeley DB library will automatically build with the correct options.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/qnx.html b/db/docs/ref/build_unix/qnx.html index 3cfbec511..132cb40c4 100644 --- a/db/docs/ref/build_unix/qnx.html +++ b/db/docs/ref/build_unix/qnx.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: QNX - + @@ -72,6 +72,6 @@ should be used with caution on QNX.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/sco.html b/db/docs/ref/build_unix/sco.html index 9d237e73d..1df28f11e 100644 --- a/db/docs/ref/build_unix/sco.html +++ b/db/docs/ref/build_unix/sco.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: SCO - + @@ -25,6 +25,6 @@ libraries.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/shlib.html b/db/docs/ref/build_unix/shlib.html index 7f02f5a8d..fa03b7932 100644 --- a/db/docs/ref/build_unix/shlib.html +++ b/db/docs/ref/build_unix/shlib.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Dynamic shared libraries - + @@ -99,6 +99,6 @@ program. On other systems, using libtool has the virtue of knowing about any other details on systems that don't behave in this typical way.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/small.html b/db/docs/ref/build_unix/small.html index ca2beab54..2c1fc4e26 100644 --- a/db/docs/ref/build_unix/small.html +++ b/db/docs/ref/build_unix/small.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Building a small memory footprint library - + @@ -19,28 +19,30 @@ small memory footprint library. These configuration options turn off specific functionality in the Berkeley DB library, reducing the code size. These configuration options include:

    -

    -

    --disable-cryptography
    To build Berkeley DB without support for cryptography, enter +
    +
    --disable-cryptography
    To build Berkeley DB without support for cryptography, enter --disable-cryptography as an argument to configure. -

    --disable-hash
    To build Berkeley DB without support for the Hash access method, enter +
    --disable-hash
    To build Berkeley DB without support for the Hash access method, enter --disable-hash as an argument to configure. -

    --disable-queue
    To build Berkeley DB without support for the Queue access method, enter +
    --disable-queue
    To build Berkeley DB without support for the Queue access method, enter --disable-queue as an argument to configure. -

    --disable-replication
    To build Berkeley DB without support for the database environment replication, +
    --disable-replication
    To build Berkeley DB without support for the database environment replication, enter --disable-replication as an argument to configure. -

    --disable-verify
    To build Berkeley DB without support for database verification, enter +
    --disable-statistics
    To build Berkeley DB without support for the statistics interfaces, enter +--disable-statistics as an argument to configure. +
    --disable-verify
    To build Berkeley DB without support for database verification, enter --disable-verify as an argument to configure. -

    --enable-smallbuild
    Equivalent to individually specifying --disable-cryptography, ---disable-hash, --disable-queue, --disable-replication, and ---disable-verify +
    --enable-smallbuild
    Equivalent to individually specifying --disable-cryptography, +--disable-hash, --disable-queue, --disable-replication, +--disable-statistics and --disable-verify

    The following configuration options will increase the size of the Berkeley DB library dramatically and are only useful when debugging applications:

    -

    -

    --enable-debug
    Build Berkeley DB with symbols for debugging. -

    --enable-debug_rop
    Build Berkeley DB with read-operation logging. -

    --enable-debug_wop
    Build Berkeley DB with write-operation logging. -

    --enable-diagnostic
    Build Berkeley DB with run-time debugging checks. +
    +
    --enable-debug
    Build Berkeley DB with symbols for debugging. +
    --enable-debug_rop
    Build Berkeley DB with read-operation logging. +
    --enable-debug_wop
    Build Berkeley DB with write-operation logging. +
    --enable-diagnostic
    Build Berkeley DB with run-time debugging checks.

    In addition, static libraries are usually smaller than shared libraries. By default Berkeley DB will build both shared and static libraries. To build @@ -54,6 +56,6 @@ building small memory footprint libraries on other systems, please contact Sleepycat Software support.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/solaris.html b/db/docs/ref/build_unix/solaris.html index 8ab696dc7..46c0b8418 100644 --- a/db/docs/ref/build_unix/solaris.html +++ b/db/docs/ref/build_unix/solaris.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Solaris - + @@ -111,6 +111,6 @@ include the problematical system include files.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/sunos.html b/db/docs/ref/build_unix/sunos.html index 469d0b5fe..d112baeea 100644 --- a/db/docs/ref/build_unix/sunos.html +++ b/db/docs/ref/build_unix/sunos.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: SunOS - + @@ -26,6 +26,6 @@ versions of SunOS.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/test.html b/db/docs/ref/build_unix/test.html index 13151129d..daebf8c6c 100644 --- a/db/docs/ref/build_unix/test.html +++ b/db/docs/ref/build_unix/test.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Running the test suite under UNIX - + @@ -51,6 +51,6 @@ command:

    information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_unix/ultrix.html b/db/docs/ref/build_unix/ultrix.html index 18cddf6cd..84b2dc795 100644 --- a/db/docs/ref/build_unix/ultrix.html +++ b/db/docs/ref/build_unix/ultrix.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Ultrix - + @@ -23,6 +23,6 @@ they exist, because they are known to not work correctly.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_vxworks/faq.html b/db/docs/ref/build_vxworks/faq.html index f9cea42a2..a7bfe0fca 100644 --- a/db/docs/ref/build_vxworks/faq.html +++ b/db/docs/ref/build_vxworks/faq.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: VxWorks FAQ - + @@ -108,9 +108,14 @@ particular problem.

    look at SPR 72063 in the Wind River Systems' Support pages for a more detailed description of this problem.

  • Are there any filesystems I cannot use? +

    Currently both the Target Server File System (TSFS) and NFS are not able +to be used.

    The Target Server File System (TSFS) uses the netDrv driver. This driver -does not support any ioctl that allows flushing to the disk, and therefore -cannot be used with Berkeley DB.

    +does not support any ioctl that allows flushing to the disk, nor does +it allow renaming of files via FIORENAME. +The NFS file system uses nfsDrv and that driver +does not support FIORENAME and cannot be used +with Berkeley DB.

  • What VxWorks primitives are used for mutual exclusion in Berkeley DB?

    Mutexes inside of Berkeley DB use the basic binary semaphores in VxWorks. The mutexes are created using the FIFO queue type.

    @@ -130,6 +135,6 @@ may leak their underlying system resources. Therefore, the

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_vxworks/intro.html b/db/docs/ref/build_vxworks/intro.html index 76d479d57..bec44b47a 100644 --- a/db/docs/ref/build_vxworks/intro.html +++ b/db/docs/ref/build_vxworks/intro.html @@ -1,26 +1,23 @@ - - + + Berkeley DB Reference Guide: Building for VxWorks - + -

    Berkeley DB Reference Guide:
    Building Berkeley DB for VxWorks systems

    PrevRefNext +PrevRefNext

    Building for VxWorks

    The build_vxworks directory in the Berkeley DB distribution contains a workspace -and project files for Tornado 2.0/VxWorks 5.4, Tornado 2.2/VxWorks 5.5 -and component files for Tornado 3.1/VxWorks AE. See -Building for VxWorks AE for -information about VxWorks AE.

    +and project files for Tornado 2.0/VxWorks 5.4 and Tornado 2.2/VxWorks 5.5.

    @@ -36,13 +33,14 @@ information about VxWorks AE.

    Open the workspace BerkeleyDB20.wsp or BerkeleyDB22.wsp. The list of projects in this workspace will be shown. These projects were created for the x86 BSP for VxWorks.

    -

    The remainder of this document assumes that you already have a -VxWorks target and a target server, both up and running. It also -assumes that your VxWorks image is configured properly for your -needs. It also assumes that you -have an acceptable file system already available. See -VxWorks FAQ for more -information about file system requirements.

    +

    The remainder of this document assumes that you already have a VxWorks +target and a target server, both up and running. It also assumes that +your VxWorks image is configured properly for your needs. It also +assumes that you have an acceptable file system already available. +See VxWorks FAQ for more +information about file system requirements. +See VxWorks Notes for more +information about building a small footprint version of Berkeley DB.

    First, you need to set the include directories. To do this, go to the Builds tab for the workspace. Open up Berkeley DB Builds. You will see several different builds, containing different @@ -98,8 +96,8 @@ depending on which version of Tornado you are running. You need to repeat this procedure for all builds you are interested in building, as well as for all of the utility project builds you want to run.

    -
    FileDescription
    BerkeleyDB20.wsp Berkeley DB Workspace file for Tornado 2.0

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_vxworks/introae.html b/db/docs/ref/build_vxworks/introae.html index f1ddb5ad9..2d80a4152 100644 --- a/db/docs/ref/build_vxworks/introae.html +++ b/db/docs/ref/build_vxworks/introae.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Building for VxWorks AE - + @@ -129,6 +129,6 @@ building, as well as for all of the utility project builds you want to run.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_vxworks/notes.html b/db/docs/ref/build_vxworks/notes.html index e30859af1..e2c06a973 100644 --- a/db/docs/ref/build_vxworks/notes.html +++ b/db/docs/ref/build_vxworks/notes.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: VxWorks notes - + -

    Berkeley DB Reference Guide:
    Building Berkeley DB for VxWorks systems

    PrevRefNext +PrevRefNext

    VxWorks notes

    @@ -28,8 +28,7 @@ system supporting FIOSYNC.

    building Berkeley DB. If you want different or additional BSP build specifications you should add them by following the directions indicated in Building -with Tornado 2.0 or Tornado 2.2 or -Building with Tornado 3.1.

    +with Tornado 2.0 or Tornado 2.2.

    The demo program can be downloaded and run by calling the entry function dbdemo with the pathname of a database to use. The demo program will ask for some input keys. It creates a database and adds @@ -56,14 +55,15 @@ flag is implied for any application that does not specify the ID to ensure that different applications do not overwrite each other's database environments. See the DB_ENV->set_shm_key method for more information. Also, the DB_LOCKDOWN flag has no effect.

    -

    Notes for VxWorks AE 1.1

    -

    All tasks wanting to access a particular environment must run in the -same application domain. The memory regions used by the environment are -only accessible to the application domain. If more than one application -domain attempts to access an environment simultaneously, the results are -undefined but will likely lead to corruption.

    -

    PrevRefNext +

    A default small footprint build is provided. This default provides +equivalent to the --enable-smallbuild configuration option +described in Building a +small memory footprint library. In order to build the small +footprint, you should move db_config.h aside and copy +db_config_small.h to db_config.h. Then open up +the appropriate small workspace file via Tornado and build as usual.

    +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_win/faq.html b/db/docs/ref/build_win/faq.html index ba7410a98..0bebd855c 100644 --- a/db/docs/ref/build_win/faq.html +++ b/db/docs/ref/build_win/faq.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Windows FAQ - + @@ -29,14 +29,14 @@ click on db_load -> Properties and change "Configuration Type" from fprintf (or some other standard C library function).

    You should be using the "Debug Multithreaded DLL" compiler option in your application when you link with the -build_win32/Debug/libdb42d.lib library (this .lib file -is actually a stub for libdb42d.DLL). To check this +build_win32/Debug/libdb43d.lib library (this .lib file +is actually a stub for libdb43d.DLL). To check this setting in Visual C++, choose the Project/Settings menu item and select Code Generation under the tab marked C/C++; and see the box marked Use runtime library. This should be set to Debug Multithreaded DLL. If your application is linked against the static library, -build_win32/Debug/libdb42sd.lib; then, you will want +build_win32/Debug/libdb43sd.lib; then, you will want to set Use runtime library to Debug Multithreaded.

    Setting this option incorrectly can cause multiple versions of the standard libraries to be linked into your application (one on behalf @@ -75,6 +75,6 @@ source.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_win/intro.html b/db/docs/ref/build_win/intro.html index bbc6f7387..390a7fd0c 100644 --- a/db/docs/ref/build_win/intro.html +++ b/db/docs/ref/build_win/intro.html @@ -1,18 +1,18 @@ - + Berkeley DB Reference Guide: Building for Win32 - + -

    Berkeley DB Reference Guide:
    Building Berkeley DB for Windows systems

    PrevRefNext +PrevRefNext

    Building for Win32

    @@ -25,7 +25,9 @@ project files for Microsoft Visual C++:

    These project files can be used to build Berkeley DB for any Win32 platform: Windows/XP, Windows/2000, Windows/NT, Windows/98 and Windows/95.

    -

    Building Berkeley DB with Visual C++ .NET

    +

    The build_win64 directory contains project files for Microsoft +Visual C++ to target Windows on 64-bit CPUs:

    +

    Building Berkeley DB with Visual C++ .NET for Win32

    1. Choose File -> Open Solution. Look in the build_win32 directory for compatible workspace files, select @@ -44,7 +46,7 @@ your build will be placed in a subdirectory of build_win32 named after the configuration you chose (for examples, build_win32/Release or build_win32/Debug).
    -

    Building Berkeley DB with Visual C++ 6.0

    +

    Building Berkeley DB with Visual C++ 6.0 for Win32

    1. Choose File -> Open Workspace. Look in the build_win32 directory for Workspaces, select @@ -65,20 +67,39 @@ examples, build_win32/Release or

    When building your application, you should normally use compile options "Debug Multithreaded DLL" and link against -build_win32/Debug/libdb42d.lib. If you +build_win32/Debug/libdb43d.lib. If you want to link against a static (non-DLL) version of the library, use the "Debug Multithreaded" compile options and link against -build_win32/Debug_static/libdb42sd.lib. +build_win32/Debug_static/libdb43sd.lib. You can also build using a release version of the libraries and tools, which will be placed in -build_win32/Release/libdb42.lib. The +build_win32/Release/libdb43.lib. The static version will be in -build_win32/Release_static/libdb42s.lib. +build_win32/Release_static/libdb43s.lib. You will also need to add the build_win32 directory to the list of include directories of your application's project.

    Each release of Berkeley DB is built and tested with this procedure using Microsoft Visual C++ 6.0, Standard Version and Microsoft Visual C++ .NET, Standard Version.

    +

    Building Berkeley DB for 64-bit Windows

    +

    You can follow the same steps on either the Itanium itself or to +cross-compile on an x86 box.

    +

    You will need latest Platform SDK from Microsoft, available from +Microsoft's web site. You only need the "Core SDK" from there.

    +

    Once that is installed, you should have an entry in your Start Menu +called Microsoft Platform SDK (date) -> Open Build +Environment Window -> Windows Server 2003 64-bit Build Environment +-> Set Win Svr 2003 Build Env (Debug). Selecting that will open +a command window with the environment set up for Itanium development.

    +

    Then, in the build_win64 directory in the Berkeley DB distribution, +run: +

    msdev /useenv BerkeleyDB.dsw /make "build_all - Debug"
    +You should now have Itanium binaries in the "Debug" directory.

    +

    To build a release, open the "Retail" window instead of the "Debug" +window, and run: +

    msdev /useenv BerkeleyDB.dsw /make "build_all - Release"
    +There may be some warnings during build, but we don't believe any of +them are real bugs.

    Building Berkeley DB with Cygwin

    To build Berkeley DB with Cygwin, follow the instructions in Building for UNIX.

    @@ -127,7 +148,7 @@ javac. either the Debug or Release version of the db_java project. Then press OK.

  • To build, select Build -> Build -libdb_java42.dll. This builds the Java support +libdb_java43.dll. This builds the Java support library for Berkeley DB and compiles all the java files, placing the resulting db.jar and dbexamples.jar files in the build_win32/Release or build_win32/Debug @@ -178,7 +199,7 @@ tool bar.

  • To build, right-click on db_tcl and select Build. This builds the Tcl support library for Berkeley DB, placing the result into build_win32/Debug/libdb_tcl4M4MINORd.dll or -build_win32/Release/libdb_tcl42.dll. +build_win32/Release/libdb_tcl43.dll.

    If you use a version different from Tcl 8.4.x you will need to change the name of the Tcl library used in the build (for @@ -205,18 +226,19 @@ whatever the library is named in your distribution). either the Debug or Release version of the db_tcl project. Then press OK.

  • To build, select Build -> Build -libdb_tcl42.dll. This builds the Tcl support +libdb_tcl43.dll. This builds the Tcl support library for Berkeley DB, placing the result into build_win32/Debug/libdb_tcl4M4MINORd.dll or -build_win32/Release/libdb_tcl42.dll. +build_win32/Release/libdb_tcl43.dll.

    If you use a version different from Tcl 8.4.x you will need to change the name of the Tcl library used in the build (for example, tcl84g.lib) to the -appropriate name. To do this, right click on db_tcl, go to -Settings -> Link -> Object / library modules -and change tcl84g.lib to match the -Tcl version you are using.

    +appropriate name. To do this, choose +Project -> Settings -> db_tcl +and change the Tcl library listed in the Object/Library modules +tcl84g.lib to match the Tcl version +you are using.

    Distributing DLLs

    When distributing applications linked against the DLL (not static) version of the library, the DLL files you need will be found in the @@ -231,8 +253,8 @@ installed in the same directory that will contain your installed Berkeley DB DLLs. This directory may need to be added to your System PATH environment variable. Check your compiler's license and documentation for specifics on redistributing runtime DLLs.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_win/notes.html b/db/docs/ref/build_win/notes.html index 9feb6cec9..5d4a4b363 100644 --- a/db/docs/ref/build_win/notes.html +++ b/db/docs/ref/build_win/notes.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Windows notes - + @@ -29,6 +29,17 @@ with an obvious race.

    Practically speaking, however, these efforts would be largely meaningless on a FAT file system, which only has a "readable" and "writable" flag, applying to all users.

    +

  • On Windows, Berkeley DB supports internationalized filenames by treating all +directory paths and filenames passed to Berkeley DB methods as UTF-8 strings. +All paths are internally converted to wide character strings and passed +to the wide character variants of Windows system calls. +

    This allows applications to create and open databases with names that +cannot be represented with ASCII names while maintaining compatibility +with applications that work purely with ASCII paths.

    +

    Applications that operate on wide character strings can use the Windows +function WideCharToMultiByte with the code page CP_UTF8 to convert paths +to the form expected by Berkeley DB. Internally, Berkeley DB calls MultiByteToWideChar +on paths before calling Windows functions.

  • On Windows/9X, files opened by multiple processes do not share data correctly. For this reason, the DB_SYSTEM_MEM flag is implied for any application that does not specify the DB_PRIVATE flag, @@ -57,6 +68,6 @@ directly specified through the DB_ENV->op

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_win/small.html b/db/docs/ref/build_win/small.html index fa2ffd47b..8c5e1352b 100644 --- a/db/docs/ref/build_win/small.html +++ b/db/docs/ref/build_win/small.html @@ -1,20 +1,20 @@ - - + + -Berkeley DB Reference Guide: Building a small memory footprint library on Windows +Berkeley DB Reference Guide: Building a small memory footprint library - + -

    Berkeley DB Reference Guide:
    Building Berkeley DB for Windows systems

    PrevRefNext +PrevRefNext

    -

    Building a small memory footprint library on Windows

    +

    Building a small memory footprint library

    For applications that don't require all of the functionality of the full Berkeley DB library, an option is provided to build a static library with certain functionality disabled. In particular, cryptography, hash and @@ -38,8 +38,8 @@ in Release_small or Debug_small, respectively.

    For assistance in further reducing the size of the Berkeley DB library, or in building small memory footprint libraries on other systems, please contact Sleepycat Software support.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_win/test.html b/db/docs/ref/build_win/test.html index de817f9c6..8120bf580 100644 --- a/db/docs/ref/build_win/test.html +++ b/db/docs/ref/build_win/test.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Running the test suite under Windows - + @@ -72,6 +72,6 @@ the Tcl shell for your system. information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/build_win/unicode.html b/db/docs/ref/build_win/unicode.html new file mode 100644 index 000000000..ed6eca30b --- /dev/null +++ b/db/docs/ref/build_win/unicode.html @@ -0,0 +1,40 @@ + + + + + + +Berkeley DB Reference Guide: Unicode support + + + + + + + +

    Berkeley DB Reference Guide:
    Building Berkeley DB for Windows systems

    PrevRefNext +
    +

    +

    Unicode support

    +

    Unicode support requires a separate configuration step on Windows. +To enable Unicode support, perform the following steps:

    +
      +

    1. Right click on the "db_dll" project and choose Settings... (Visual +Studio .NET: Properties), then C/C++. The "Preprocessor definitions" +should read: +
      DB_CREATE_DLL,...
      +

      Change it to read:

      +
      UNICODE,_UNICODE,DB_CREATE_DLL,...
      +

      You will have to do this twice: once for the debug build and once for +the release build. If you also require static libraries, repeat for the +"db_static" project (there the first symbol is "CONFIG_TEST" for the +Debug Static build and "WIN32" for the Release Static build).

      +

    2. Select "Rebuild All" from the Build menu. +
    +

    To build binaries that can also run on Windows 9x or ME, follow the +instructions at Microsoft's web site.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/cam/app.html b/db/docs/ref/cam/app.html new file mode 100644 index 000000000..3720a5e4a --- /dev/null +++ b/db/docs/ref/cam/app.html @@ -0,0 +1,131 @@ + + + + + + +Berkeley DB Reference Guide: Architecting Data Store and Concurrent Data Store applications + + + + + + +

    Berkeley DB Reference Guide:
    Berkeley DB Concurrent Data Store Applications

    PrevRefNext +
    +

    +

    Architecting Data Store and Concurrent Data Store applications

    +

    When building Data Store and Concurrent Data Store applications, there +are two special issues to consider whenever any thread of control exits +for any reason with a Berkeley DB database or database environment open.

    +

    First, unexpected application or system failure may result in lost data, +corruption or inconsistencies in Berkeley DB databases. When a thread of +control exits while holding Berkeley DB resources, all databases (modified +since the database was last flushed to disk), should be either:

    +

      +
    • removed and re-created, +
    • removed and restored from the last known good backup, +
    • verified using the DB->verify method or db_verify utility. If +the database does not verify cleanly, the contents may be salvaged using +the -R and -r options of the db_dump +utility. +
    +

    Applications where this is unacceptable should consider the Berkeley DB Transactional Data Store +product, which offers standard transactional guarantees such as +recoverability after failure.

    +

    Second, unexpected application or system failure requires that any +persistent database environment (that is, any database environment not +created using the DB_PRIVATE flag), be removed to recover the +Berkeley DB resources and release any locks or mutexes that may have been held +to avoid starvation as the remaining threads of control block behind the +failed thread's locks or mutexes.

    +

    The Berkeley DB library cannot determine when to remove and re-create a +database environment; the application must make that decision. +Furthermore, database environment removal must be single-threaded; that +is, one thread of control or process must remove and re-create the +database environment before any other thread of control or process +attempts to join the Berkeley DB environment.

    +

    There are two approaches to handling this problem:

    +
    +
    The hard way:
    Applications can track their own state carefully enough that they know +when the database environment needs to be removed and re-created. +Specifically, the rule to use is that any persistent database +environment must be removed any time the threads of control previously +using the Berkeley DB environment did not shut the environment down cleanly +before exiting the environment for any reason (including application or +system failure). +
    The easy way:
    It is almost invariably simpler to remove and re-create the database +environment each time a thread of control accessing a database +environment fails for any reason. This requires the application detect +application or system failure, of course, and remove and re-create the +database environment on application, when appropriate. +
    +

    There are two common ways to build Data Store and Concurrent Data Store +applications. The most common way is as a single, usually +multithreaded, process. This architecture is simplest because it +requires no monitoring of other threads of control. When the +application starts, it opens and potentially creates the environment, +and then opens its databases. From then on, the application can create +new threads of control as it chooses. All threads of control share the +open Berkeley DB DB_ENV and DB handles. In this model, +databases are rarely opened or closed when more than a single thread of +control is running; that is, they are opened when only a single thread +is running, and closed after all threads but one have exited. The last +thread of control to exit closes the databases and the environment.

    +

    An alternative way to build Berkeley DB applications is as a set of +cooperating processes, which may or may not be multithreaded. This +architecture is more complicated.

    +

    First, this architecture requires that the order in which threads of +control are created and subsequently access the Berkeley DB environment be +controlled because database environment removal must be single-threaded. +The first thread of control to access the environment must remove any +previously existing environment and re-create the environment, and no +other thread should attempt to access the environment until the removal +is complete. (Note this ordering requirement does not apply to +environment creation without removal. If multiple threads attempt to +create a Berkeley DB environment, only one will perform the creation and the +others will join the already existing environment.)

    +

    Second, this architecture requires that threads of control be monitored. +If any thread of control that owns Berkeley DB resources exits without first +cleanly discarding those resources, removing the database environment +is usually necessary. Before removing the database environment, all +threads using the Berkeley DB environment must relinquish all of their Berkeley DB +resources (it does not matter if they do so gracefully or because they +are forced to exit). Then, the database environment can be removed and +and the threads of control continued or restarted.

    +

    We have found that the safest way to structure groups of cooperating +processes is to first create a single process (often a shell script) +that removes and re-creates the Berkeley DB environment, verifies, rebuilds +or removes the databases, and then creates the processes or threads that +will actually perform work. The initial thread has no further +responsibilities other than to monitor the threads of control it has +created, to ensure that none of them unexpectedly exits. If one exits, +the initial process then forces all of the threads of control using the +Berkeley DB environment to exit, removes the database environment, verifies, +rebuilds or removes the databases, and restarts the working threads of +control.

    +

    If it is not practical to have a single parent for the processes sharing +a Berkeley DB environment, each process sharing the environment should log +their connection to and exit from the environment in a way that allows +a monitoring process to detect if a thread of control might have +acquired Berkeley DB resources and never released them. In this model, an +initial "watcher" process removes and re-creates the Berkeley DB environment, +verifies, rebuilds or removes the databases, and then creates a sentinel +file. Any other process wanting to use the Berkeley DB environment checks for +the sentinel file; if the sentinel file exists, the other process +registers its process ID with the watcher and joins the database +environment. When the new process finishes with the environment, it +unregisters its process ID with the watcher. The watcher periodically +checks to ensure that no process has failed while using the environment. +If a process does fail while using the environment, the watcher removes +the sentinel file, kills all processes currently using the environment, +removes and re-creates the database environment, verifies, rebuilds or +removes the databases, and re-creates the sentinel file.

    +

    Obviously, it is important that the monitoring process in either case +be as simple and well-tested as possible because there is no recourse +if it fails.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/cam/intro.html b/db/docs/ref/cam/intro.html index 1b5f26b9a..fab7ce578 100644 --- a/db/docs/ref/cam/intro.html +++ b/db/docs/ref/cam/intro.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Berkeley DB Concurrent Data Store applications - + -

    Berkeley DB Reference Guide:
    Berkeley DB Concurrent Data Store Applications

    PrevRefNext +PrevRefNext

    Berkeley DB Concurrent Data Store applications

    @@ -94,15 +94,15 @@ lock that is blocking it.

  • Not testing Berkeley DB error return codes (if any cursor operation returns an unexpected error, that cursor must still be closed).

  • By default, Berkeley DB Concurrent Data Store does locking on a per-database basis. For this reason, -accessing multiple databases in different orders in different threads -or processes, or leaving cursors open on one database while accessing +using cursors to access multiple databases in different orders in different +threads or processes, or leaving cursors open on one database while accessing another database, can cause an application to hang. If this behavior is a requirement for the application, Berkeley DB should be configured to do locking on an environment-wide basis. See the DB_CDB_ALLDB flag of the DB_ENV->set_flags function for more information. -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/debug/common.html b/db/docs/ref/debug/common.html index 4ce650e50..8e6634671 100644 --- a/db/docs/ref/debug/common.html +++ b/db/docs/ref/debug/common.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Troubleshooting common Berkeley DB problems - + @@ -133,9 +133,16 @@ specifying the DB_RMW flag on your

    Or, if the application is doing a large number of updates in a small database, turning off Btree splits may help (see DB_REVSPLITOFF for more information.)

    +
    +

  • Opening the database environment displays the following error: +
    Log sequence error: page LSN # ######; previous LSN ## ######.
    +

    A database update was made outside of a transaction. Check that your +application passes a transaction handle to all opens and updates of +transactionally protected databases. This error leaves the environment +unrecoverable, and the databases must be dumped and reloaded.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/debug/compile.html b/db/docs/ref/debug/compile.html index 99eea3924..64cb5fb03 100644 --- a/db/docs/ref/debug/compile.html +++ b/db/docs/ref/debug/compile.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Compile-time configuration - + @@ -17,8 +17,8 @@

    Compile-time configuration

    There are three compile-time configuration options that assist in debugging Berkeley DB and Berkeley DB applications:

    -

    -

    --enable-debug
    If you want to build Berkeley DB with -g as the C and C++ compiler +
    +
    --enable-debug
    If you want to build Berkeley DB with -g as the C and C++ compiler flag, enter --enable-debug as an argument to configure. This will create Berkeley DB with debugging symbols, as well as load various Berkeley DB routines that can be called directly from a debugger to display database page @@ -26,12 +26,12 @@ content, cursor queues, and so forth. (Note that the -O optimization flag will still be specified. To compile with only the -g, explicitly set the CFLAGS environment variable before configuring.) -

    --enable-diagnostic
    If you want to build Berkeley DB with debugging run-time sanity checks and with +
    --enable-diagnostic
    If you want to build Berkeley DB with debugging run-time sanity checks and with DIAGNOSTIC #defined during compilation, enter --enable-diagnostic as an argument to configure. This will cause a number of special checks to be performed when Berkeley DB is running. This flag should not be defined when configuring to build production binaries because it degrades performance. -

    --enable-umrw
    When compiling Berkeley DB for use in run-time memory consistency checkers +
    --enable-umrw
    When compiling Berkeley DB for use in run-time memory consistency checkers (in particular, programs that look for reads and writes of uninitialized memory), use --enable-umrw as an argument to configure. This guarantees, among other things, that Berkeley DB will completely initialize @@ -40,6 +40,6 @@ amount.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/debug/intro.html b/db/docs/ref/debug/intro.html index dfb988492..b22c3f968 100644 --- a/db/docs/ref/debug/intro.html +++ b/db/docs/ref/debug/intro.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Introduction - + -

    Berkeley DB Reference Guide:
    Debugging Applications

    PrevRefNext +PrevRefNext

    Introduction

    @@ -28,32 +28,32 @@ you with debugging applications and reporting bugs to us so that we can provide you with the correct answer or fix as quickly as possible.

    When you encounter a problem, there are a few general actions you can take:

    -

    -

    Review the Berkeley DB error output
    If an error output mechanism has been configured in the Berkeley DB +
    +
    Review the Berkeley DB error output
    If an error output mechanism has been configured in the Berkeley DB environment, additional run-time error messages are made available to the applications. If you are not using an environment, it is well worth modifying your application to create one so that you can get more detailed error messages. See Run-time error information for more information on configuring Berkeley DB to output these error messages. -

    Review DB_ENV->set_verbose
    Check the list of flags for the DB_ENV->set_verbose function, and +
    Review DB_ENV->set_verbose
    Check the list of flags for the DB_ENV->set_verbose function, and see if any of them will produce additional information that might help understand the problem. -

    Add run-time diagnostics
    You can configure and build Berkeley DB to perform run-time diagnostics. (By +
    Add run-time diagnostics
    You can configure and build Berkeley DB to perform run-time diagnostics. (By default, these checks are not done because they can seriously impact performance.) See Compile-time configuration for more information. -

    Apply all available patches
    Before reporting a problem to Sleepycat Software, please upgrade to the +
    Apply all available patches
    Before reporting a problem to Sleepycat Software, please upgrade to the latest Sleepycat Software release of Berkeley DB, if possible, or at least make sure you have applied any updates available for your release from the Sleepycat Software web site. -

    Run the test suite
    If you see repeated failures or failures of simple test cases, run the +
    Run the test suite
    If you see repeated failures or failures of simple test cases, run the Berkeley DB test suite to determine whether the distribution of Berkeley DB you are using was built and configured correctly.
    - - + + @@ -58,16 +59,19 @@ + + + @@ -75,6 +79,6 @@

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/debug/printlog.html b/db/docs/ref/debug/printlog.html index 7f882fbd3..42069df77 100644 --- a/db/docs/ref/debug/printlog.html +++ b/db/docs/ref/debug/printlog.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Reviewing Berkeley DB log files - + @@ -48,7 +48,7 @@ some records.

    The following table presents each currently written log record type with a brief description of the operation it describes. Any of these record types may have the string "_debug" appended if they -were written because DB_TXN_NOT_DURABLE was specified and the +were written because DB_TXN_NOT_DURABLE was specified and the system was configured with --enable-diagnostic.

    @@ -58,6 +58,7 @@ system was configured with - + + - + + + + + @@ -154,6 +160,6 @@ extract:

    awk -f range.awk START_FILE=sf START_OFFSET=so END_FILE=ef END_OFFSET=eo log_output
    db_debugLog debugging message.
    db_noopThis marks an operation that did nothing but update the LSN on a page.
    db_ovrefIncrement or decrement the reference count for a big item.
    db_pg_allocIndicates we allocated a page to a Btree.
    db_pg_allocIndicates we allocated a page to a database.
    db_pg_freeIndicates we freed a page (freed pages are added to a freelist and reused).
    db_pg_freedataIndicates we freed a page that still contained data entries (freed pages are added to a freelist and reused.)
    db_pg_initIndicates we reinitialized a page during a truncate.
    db_pg_newIndicates that a page was allocated and put on the free list.
    db_pg_prepareIndicates a new page was allocated during a child transaction of a prepared transaction.
    db_relinkFix prev/next chains on duplicate pages because a page was added or removed.
    dbreg_registerRecords an open of a file (mapping the filename to a log-id that is used in subsequent log operations).
    fop_createCreate a file in the file system.
    fop_file_removeRemove a name in the file system.
    fop_removeRemove a file in the file system.
    fop_renameRename a file in the file system.
    fop_writeWrite bytes to an object in the file system.
    ham_chgpgUsed to adjust a cursor location when a Hash page is removed, and its elements are moved to a different Hash page.
    ham_copypageUsed when we empty a bucket page, but there are overflow pages for the bucket; one needs to be copied back into the actual bucket.
    ham_curadjUsed to adjust a cursor location when a nearby record changes in a Hash database.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/debug/runtime.html b/db/docs/ref/debug/runtime.html index 02705396f..3140c2095 100644 --- a/db/docs/ref/debug/runtime.html +++ b/db/docs/ref/debug/runtime.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Run-time error information - + @@ -43,6 +43,6 @@ described previously to format and display error messages to appropriate output devices.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/distrib/layout.html b/db/docs/ref/distrib/layout.html index 3269139f3..f9e17ed46 100644 --- a/db/docs/ref/distrib/layout.html +++ b/db/docs/ref/distrib/layout.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Source code layout - + @@ -22,7 +22,8 @@

    btreeBtree access method source code
    build_unixUNIX build directory
    build_vxworksVxWorks build directory.
    build_win32Windows build directory.
    build_win32Windows 32-bit build directory.
    build_win64Windows 64-bit build directory.
    clibC library replacement functions
    commonCommon Berkeley DB functions
    cryptoCryptographic support
    libdb_javaThe libdb_java shared library
    lockLock manager
    logLog manager
    mod_db4Apache module support
    mpShared memory buffer pool
    mutexMutexes
    osPOSIX 1003.1 operating-system specific functionality
    os_vxworksVxWorks operating-system specific functionality
    os_win32Windows operating-system specific functionality
    perlDB_File and BerkeleyDB Perl modules
    php_db4PHP module support
    qamQueue access method source code
    repReplication source code
    rpc_clientRPC client support
    rpc_serverRPC server utility
    sequenceSequence source code
    tclTcl API
    testTest suite
    txnTransaction manager

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/distrib/port.html b/db/docs/ref/distrib/port.html index b9aa39426..077135cea 100644 --- a/db/docs/ref/distrib/port.html +++ b/db/docs/ref/distrib/port.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Porting Berkeley DB to new architectures - + @@ -105,6 +105,6 @@ have any porting questions, just let us know, and we will be happy to answer them.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/dumpload/format.html b/db/docs/ref/dumpload/format.html index 68fe67f00..ae5a1469a 100644 --- a/db/docs/ref/dumpload/format.html +++ b/db/docs/ref/dumpload/format.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Dump output formats - + @@ -65,6 +65,6 @@ DATA=END.

    will repeat; that is, a new set of headers and a new set of data items.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/dumpload/text.html b/db/docs/ref/dumpload/text.html index 806f3bd08..f7da9df92 100644 --- a/db/docs/ref/dumpload/text.html +++ b/db/docs/ref/dumpload/text.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Loading text into databases - + @@ -28,6 +28,6 @@ name as the key item and the entire password entry as the data item:

    to avoid interpretation as escape characters by db_load.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/dumpload/utility.html b/db/docs/ref/dumpload/utility.html index 803110d1a..d977d4b64 100644 --- a/db/docs/ref/dumpload/utility.html +++ b/db/docs/ref/dumpload/utility.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: The db_dump and db_load utilities - + -

    Berkeley DB Reference Guide:
    Dumping and Reloading

    PrevRefNext +PrevRefNext

    The db_dump and db_load utilities

    @@ -39,8 +39,8 @@ the load process itself will fail.

    The only available workaround for either Hash or Btree databases is to modify the sources for the db_load utility to load the database using the correct hash, prefix, and comparison functions.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/create.html b/db/docs/ref/env/create.html index fb8975dd8..ed61fcea8 100644 --- a/db/docs/ref/env/create.html +++ b/db/docs/ref/env/create.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Creating a database environment - + @@ -32,8 +32,8 @@ in a different directory or on a different system.

    or joins the database environment. There are a number of options you can set to customize DB_ENV->open for your environment. These options fall into four broad categories:

    -

    -

    Subsystem Initialization:
    These flags indicate which Berkeley DB subsystems will be initialized for the +
    +
    Subsystem Initialization:
    These flags indicate which Berkeley DB subsystems will be initialized for the environment, and what operations will happen automatically when databases are accessed within the environment. The flags include DB_JOINENV, DB_INIT_CDB, DB_INIT_LOCK, @@ -45,13 +45,13 @@ a single subsystem; that is, when DB_RECOVER and +
    Recovery options:
    These flags, which include DB_RECOVER and DB_RECOVER_FATAL, indicate what recovery is to be performed on the environment before it is opened for normal use. -

    Naming options:
    These flags, which include DB_USE_ENVIRON and +
    Naming options:
    These flags, which include DB_USE_ENVIRON and DB_USE_ENVIRON_ROOT, modify how file naming happens in the environment. -

    Miscellaneous:
    Finally, there are a number of miscellaneous flags, for example, +
    Miscellaneous:
    Finally, there are a number of miscellaneous flags, for example, DB_CREATE which causes underlying files to be created as necessary. See the DB_ENV->open manual pages for further information. @@ -125,6 +125,6 @@ err: (void)dbenv->close(dbenv, 0); }

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/db_config.html b/db/docs/ref/env/db_config.html index 1935840df..22888b964 100644 --- a/db/docs/ref/env/db_config.html +++ b/db/docs/ref/env/db_config.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: DB_CONFIG configuration file - + @@ -46,6 +46,6 @@ compiled-in application cache size to a size more appropriate for a specific machine.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/encrypt.html b/db/docs/ref/env/encrypt.html index 0d370d675..8378dbab4 100644 --- a/db/docs/ref/env/encrypt.html +++ b/db/docs/ref/env/encrypt.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Encryption - + @@ -87,6 +87,6 @@ Mersenne Twister code into Berkeley DB.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/error.html b/db/docs/ref/env/error.html index 18dc0fc99..f4644babf 100644 --- a/db/docs/ref/env/error.html +++ b/db/docs/ref/env/error.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Error support - + @@ -53,6 +53,6 @@ a permission error, the error messages shown would look like this:

    my_app: contact your system administrator: session ID was 2

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/faq.html b/db/docs/ref/env/faq.html index a6d1efa5d..82572812f 100644 --- a/db/docs/ref/env/faq.html +++ b/db/docs/ref/env/faq.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Environment FAQ - + @@ -55,6 +55,6 @@ data members or methods.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/intro.html b/db/docs/ref/env/intro.html index 7a86ed745..72112b33c 100644 --- a/db/docs/ref/env/intro.html +++ b/db/docs/ref/env/intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Database environment introduction - + @@ -44,77 +44,42 @@ other's data as it resides in the shared regions, and they will share resources such as buffer space and locks. At the same time, any applications using the same databases must share an environment if consistency is to be maintained between them.

    - + - + - - - - - - - - - - - - - - - - - - + + - - - - - + + + + - + - - + + - - - - - - - - - - - - - - - - - -
    Database Environments and Related MethodsDescription
    Database Environment OperationsDescription
    db_env_createCreate an environment handle
    DB_ENV->closeClose an environment
    DB_ENV->dbremoveRemove a database
    DB_ENV->dbrenameRename a database
    DB_ENV->errError message with error string
    DB_ENV->errxError message
    DB_ENV->lock_detectPerform deadlock detection
    DB_ENV->lock_getAcquire a lock
    DB_ENV->lock_idAcquire a locker ID
    DB_ENV->lock_id_freeRelease a locker ID
    DB_ENV->lock_putRelease a lock
    DB_ENV->lock_statReturn lock subsystem statistics
    DB_ENV->lock_vecAcquire/release locks
    DB_ENV->log_archiveList log and database files
    DB_ENV->log_cursorCreate a log cursor handle
    DB_ENV->log_fileMap Log Sequence Numbers to log files
    DB_ENV->log_flushFlush log records
    DB_ENV->log_putWrite a log record
    DB_ENV->log_statReturn log subsystem statistics
    DB_ENV->memp_fcreateOpen a file in a memory pool
    DB_ENV->memp_registerRegister input/output functions for a file in a memory pool
    DB_ENV->memp_statReturn memory pool statistics
    DB_ENV->memp_syncFlush pages from a memory pool
    DB_ENV->memp_trickleTrickle flush pages from a memory pool
    DB_ENV->get_homeReturn environment's home directory
    DB_ENV->get_open_flagsReturn the flags with which the environment was opened
    DB_ENV->openOpen an environment
    DB_ENV->removeRemove an environment
    DB_ENV->rep_electHold a replication election
    DB_ENV->rep_process_messageProcess a replication message
    DB_ENV->rep_startConfigure an environment for replication
    DB_ENV->rep_statReplication statistics
    DB_ENV->set_allocSet local space allocation functions
    DB_ENV->stat_printEnvironment statistics
    db_strerrorError strings
    db_versionReturn version information
    Environment Configuration
    DB_ENV->set_app_dispatchConfigure application recovery
    DB_ENV->set_cachesizeSet the environment cache size
    DB_ENV->set_allocSet local space allocation functions
    DB_ENV->set_data_dirSet the environment data directory
    DB_ENV->set_encryptSet the environment cryptographic key
    DB_ENV->set_errcallSet error message callback
    DB_ENV->set_errfileSet error message FILE
    DB_ENV->set_errcall, DB_ENV->set_msgcallSet error and informational message callbacks
    DB_ENV->set_errfile, DB_ENV->set_msgfileSet error and informational message FILE
    DB_ENV->set_errpfxSet error message prefix
    DB_ENV->set_feedbackSet feedback callback
    DB_ENV->set_flagsEnvironment configuration
    DB_ENV->set_lg_bsizeSet log buffer size
    DB_ENV->set_lg_dirSet the environment logging directory
    DB_ENV->set_lg_maxSet log file size
    DB_ENV->set_lg_regionmaxSet logging region size
    DB_ENV->set_lk_conflictsSet lock conflicts matrix
    DB_ENV->set_lk_detectSet automatic deadlock detection
    DB_ENV->set_lk_max_lockersSet maximum number of lockers
    DB_ENV->set_lk_max_locksSet maximum number of locks
    DB_ENV->set_lk_max_objectsSet maximum number of lock objects
    DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size
    DB_ENV->set_paniccallSet panic callback
    DB_ENV->set_rep_limitLimit data sent in response to a single message
    DB_ENV->set_rep_transportConfigure replication transport
    DB_ENV->set_rpc_serverEstablish an RPC server connection
    DB_ENV->set_shm_keySet system memory shared segment ID
    DB_ENV->set_tas_spinsSet the number of test-and-set spins
    DB_ENV->set_timeoutSet lock and transaction timeout
    DB_ENV->set_tmp_dirSet the environment temporary file directory
    DB_ENV->set_tx_maxSet maximum number of transactions
    DB_ENV->set_tx_timestampSet recovery timestamp
    DB_ENV->set_verboseSet verbose messages
    DB_ENV->txn_beginBegin a transaction
    DB_ENV->txn_checkpointCheckpoint the transaction subsystem
    DB_ENV->txn_recoverDistributed transaction recovery
    DB_ENV->txn_statReturn transaction subsystem statistics

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/naming.html b/db/docs/ref/env/naming.html index e6234befd..02d3931b3 100644 --- a/db/docs/ref/env/naming.html +++ b/db/docs/ref/env/naming.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: File naming - + @@ -34,11 +34,11 @@ information.

    information may be specified to the Berkeley DB library. The specific circumstances and order in which these ways are applied are described in a subsequent paragraph.

    -

    -

    db_home
    If the db_home argument to DB_ENV->open is non-NULL, +
    +
    db_home
    If the db_home argument to DB_ENV->open is non-NULL, its value may be used as the database home, and files named relative to its path. -

    DB_HOME
    If the DB_HOME environment variable is set when DB_ENV->open is +
    DB_HOME
    If the DB_HOME environment variable is set when DB_ENV->open is called, its value may be used as the database home, and files named relative to its path.

    The DB_HOME environment variable is intended to permit users and system @@ -48,7 +48,7 @@ example::

    Application writers are encouraged to support the -h option found in the supporting Berkeley DB utilities to let users specify a database home.

    -

    DB_ENV methods
    There are three DB_ENV methods that affect file naming. The +
    DB_ENV methods
    There are three DB_ENV methods that affect file naming. The DB_ENV->set_data_dir method specifies a directory to search for database files. The DB_ENV->set_lg_dir method specifies a directory in which to create logging files. The DB_ENV->set_tmp_dir method specifies a @@ -57,7 +57,7 @@ are intended to permit applications to customize a file location for a database. For example, an application writer can place data files and log files in different directories or instantiate a new log directory each time the application runs. -

    DB_CONFIG
    The same information specified to the DB_ENV methods may also be +
    DB_CONFIG
    The same information specified to the DB_ENV methods may also be specified using the DB_CONFIG configuration file.

    Filename resolution in Berkeley DB

    @@ -65,8 +65,8 @@ specified using the DB_CONFIG -

    -

    absolute pathnames
    If the filename specified to a Berkeley DB function is an absolute +
    +
    absolute pathnames
    If the filename specified to a Berkeley DB function is an absolute pathname, that filename is used without modification by Berkeley DB.

    On UNIX systems, an absolute pathname is defined as any pathname that begins with a leading slash (/).

    @@ -74,21 +74,21 @@ begins with a leading slash (/).

    a leading slash or leading backslash (\); or any pathname beginning with a single alphabetic character, a colon and a leading slash or backslash (for example, C:/tmp).

    -

    DB_ENV methods, DB_CONFIG
    If a relevant configuration string (for example, set_data_dir), is +
    DB_ENV methods, DB_CONFIG
    If a relevant configuration string (for example, set_data_dir), is specified either by calling a DB_ENV method or as a line in the DB_CONFIG configuration file, the value is prepended to the filename. If the resulting filename is an absolute pathname, the filename is used without further modification by Berkeley DB. -

    db_home
    If the application specified a non-NULL db_home argument to +
    db_home
    If the application specified a non-NULL db_home argument to DB_ENV->open, its value is prepended to the filename. If the resulting filename is an absolute pathname, the filename is used without further modification by Berkeley DB. -

    DB_HOME
    If the db_home argument is NULL, the DB_HOME environment +
    DB_HOME
    If the db_home argument is NULL, the DB_HOME environment variable was set, and the application has set the appropriate DB_USE_ENVIRON or DB_USE_ENVIRON_ROOT flags, its value is prepended to the filename. If the resulting filename is an absolute pathname, the filename is used without further modification by Berkeley DB. -

    default
    Finally, all filenames are interpreted relative to the current working +
    default
    Finally, all filenames are interpreted relative to the current working directory of the process.

    The common model for a Berkeley DB environment is one in which only the DB_HOME @@ -122,6 +122,6 @@ dbenv->set_data_dir(dbenv, "data1"); dbenv->open(dbenv, "/a/database", flags, mode);

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/open.html b/db/docs/ref/env/open.html index 32f3a93d5..e6860b593 100644 --- a/db/docs/ref/env/open.html +++ b/db/docs/ref/env/open.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Opening databases within the environment - + @@ -85,6 +85,6 @@ err: if (dbp2 != NULL) }

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/region.html b/db/docs/ref/env/region.html index 6bcf7c02b..1ba9ee91b 100644 --- a/db/docs/ref/env/region.html +++ b/db/docs/ref/env/region.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Shared memory regions - + @@ -70,6 +70,6 @@ shared memory that is being used by the environment.

    displayed using the -e option to the db_stat utility.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/remote.html b/db/docs/ref/env/remote.html index acad323c2..8a5d5e942 100644 --- a/db/docs/ref/env/remote.html +++ b/db/docs/ref/env/remote.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Remote filesystem - + @@ -38,17 +38,17 @@ databases cannot be accessed from more than one Berkeley DB environment at a time (and therefore from more than one system), because no Berkeley DB database may be accessed from more than one Berkeley DB environment at a time.

    -

    -

    FreeBSD note:
    Some FreeBSD releases are known to return ENOLCK from fsync and close +
    +
    FreeBSD note:
    Some FreeBSD releases are known to return ENOLCK from fsync and close calls on NFS-mounted filesystems, even though the call has succeeded. The Berkeley DB code should be modified to ignore ENOLCK errors, or no Berkeley DB files should be placed on NFS-mounted filesystems on these systems. -

    Linux note:
    Some Linux releases are known to not support complete semantics for the +
    Linux note:
    Some Linux releases are known to not support complete semantics for the POSIX fsync call on NFS-mounted filesystems. No Berkeley DB files should be placed on NFS-mounted filesystems on these systems.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/env/security.html b/db/docs/ref/env/security.html index 46dcd6ff3..c01a8fa88 100644 --- a/db/docs/ref/env/security.html +++ b/db/docs/ref/env/security.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Security - + @@ -18,25 +18,25 @@

    Security

    The following are security issues that should be considered when writing Berkeley DB applications:

    -

    -

    Database environment permissions
    The directory used as the Berkeley DB database environment should have its +
    +
    Database environment permissions
    The directory used as the Berkeley DB database environment should have its permissions set to ensure that files in the environment are not accessible to users without appropriate permissions. Applications that add to the user's permissions (for example, UNIX setuid or setgid applications), must be carefully checked to not permit illegal use of those permissions such as general file access in the environment directory. -

    Environment variables
    Setting the DB_USE_ENVIRON and DB_USE_ENVIRON_ROOT flags +
    Environment variables
    Setting the DB_USE_ENVIRON and DB_USE_ENVIRON_ROOT flags and allowing the use of environment variables during file naming can be dangerous. Setting those flags in Berkeley DB applications with additional permissions (for example, UNIX setuid or setgid applications) could potentially allow users to read and write databases to which they would not normally have access. -

    File permissions
    By default, Berkeley DB always creates files readable and writable by the owner +
    File permissions
    By default, Berkeley DB always creates files readable and writable by the owner and the group (that is, S_IRUSR, S_IWUSR, S_IRGRP and S_IWGRP; or octal mode 0660 on historic UNIX systems). The group ownership of created files is based on the system and directory defaults, and is not further specified by Berkeley DB. -

    Temporary backing files
    If an unnamed database is created and the cache is too small to hold +
    Temporary backing files
    If an unnamed database is created and the cache is too small to hold the database in memory, Berkeley DB will create a temporary physical file to enable it to page the database to disk as needed. In this case, environment variables such as TMPDIR may be used to specify @@ -51,6 +51,6 @@ known permissions.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/ext/mod.html b/db/docs/ref/ext/mod.html new file mode 100644 index 000000000..55a181606 --- /dev/null +++ b/db/docs/ref/ext/mod.html @@ -0,0 +1,69 @@ + + + + + + +Berkeley DB Reference Guide: Using Berkeley DB with Apache + + + + + + + +

    Berkeley DB Reference Guide:
    Berkeley DB Extensions

    PrevRefNext +
    +

    +

    Using Berkeley DB with Apache

    +

    A mod_db4 Apache module for this release of Berkeley DB is included in the +distribution, providing a safe framework for running Berkeley DB applications +in the Apache 1.3 environment. In general, it is dangerous to run Berkeley DB +in a multiprocess system without some facility to coordinate recovery +between participating processes. Apache natively provides no interface +for communication between processes, so the mod_db4 module exists to +provide this communication.

    +

    Specifically, mod_db4 provides the following facilities:

    +
      +

    1. New constructors for DB_ENV and DB handles, which install +replacement open/close methods. +

    2. Transparent caching of open DB_ENV and DB handles. +

    3. Reference counting on all structures, allowing the module to detect the +initial opening of any managed database and automatically perform recovery. +

    4. Automatic detection of unexpected failures (segfaults, or a module +actually calling exit() and avoiding shut down phases), and automatic +termination of all child processes with open database resources to +attempt consistency. +
    +

    mod_db4 is designed to be used as an alternative interface to Berkeley DB. To +have another Apache module (for example, mod_foo) use mod_db4, do not +link mod_foo against the Berkeley DB library. In your mod_foo makefile, you +should:

    +
    #include "mod_db4_export.h"
    +

    and add your Apache include directory to your CPPFLAGS.

    +

    In mod_foo, to create a mod_db4 managed DB_ENV handle, use the +following:

    +
    int mod_db4_db_env_create(DB_ENV **dbenvp, u_int32_t flags);
    +

    which takes identical arguments to db_env_create.

    +

    To create a mod_db4 managed DB handle, use the following:

    +
    int mod_db4_db_create(DB **dbp, DB_ENV *dbenv, u_int32_t flags);
    +

    which takes identical arguments to db_create.

    +

    Otherwise the API is completely consistent with the standard Sleepycat +API.

    +

    The mod_db4 module requires the Berkeley DB library be compiled with C++ extensions +and the libmm library. Information and source code for the libmm library can +be found at +http://www.ossp.org/pkg/lib/mm/

    +

    To build this apache module, perform the following steps:

    +
    % ./configure --with-apxs=[path to the apxs utility] \
    +	--with-db4=[Berkeley DB library installation directory] \
    +	--with-mm=[libmm installation directory]
    +% make
    +% make install
    +

    Post-installation, modules can use this extension via the functions +documented in $APACHE_INCLUDEDIR/mod_db4_export.h.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/ext/perl.html b/db/docs/ref/ext/perl.html new file mode 100644 index 000000000..2dec231bc --- /dev/null +++ b/db/docs/ref/ext/perl.html @@ -0,0 +1,43 @@ + + + + + + +Berkeley DB Reference Guide: Using Berkeley DB with Perl + + + + + + + +

    Berkeley DB Reference Guide:
    Berkeley DB Extensions

    PrevRefNext +
    +

    +

    Using Berkeley DB with Perl

    +

    The original Perl module for Berkeley DB was DB_File, which was written to +interface to Berkeley DB version 1.85. The newer Perl module for Berkeley DB is +BerkeleyDB, which was written to interface to version 2.0 and subsequent +releases. Because Berkeley DB version 2.X has a compatibility API for version +1.85, you can (and should!) build DB_File using version 2.X of Berkeley DB, +although DB_File will still only support the 1.85 functionality.

    +

    DB_File is distributed with the standard Perl source distribution (look +in the directory "ext/DB_File"). You can find both DB_File and BerkeleyDB +on CPAN, the Comprehensive Perl Archive Network of mirrored FTP sites. +The master CPAN site is +ftp://ftp.funet.fi/.

    +

    Versions of both BerkeleyDB and DB_File that are known to work correctly +with each release of Berkeley DB are included in the distributed Berkeley DB source +tree, in the subdirectories perl.BerkeleyDB and +perl.DB_File. Each of those directories contains a +README file with instructions on installing and using those +modules.

    +

    The Perl interface is not maintained by Sleepycat Software. Questions +about the DB_File and BerkeleyDB modules are best asked on the Usenet +newsgroup comp.lang.perl.modules.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/ext/php.html b/db/docs/ref/ext/php.html new file mode 100644 index 000000000..62ae0de60 --- /dev/null +++ b/db/docs/ref/ext/php.html @@ -0,0 +1,102 @@ + + + + + + +Berkeley DB Reference Guide: Using Berkeley DB with PHP + + + + + + + +

    Berkeley DB Reference Guide:
    Berkeley DB Extensions

    PrevRefNext +
    +

    +

    Using Berkeley DB with PHP

    +

    A PHP 4 extension for this release of Berkeley DB is included in the +distribution package. It can either either link directly against the +installed Berkeley DB library (which is necessary for running in a +non-Apache/mod_php4 environment), or against mod_db4, which provides +additional safety when running under Apache/mod_php4.

    +

    The PHP extension provides the following classes, which mirror the +standard Berkeley DB C++ API.

    +
    class Db4Env {
    +
    function Db4Env($flags = 0) {}
    +function close($flags = 0) {}
    +function dbremove($txn, $filename, $database = null, $flags = 0) {}
    +function dbrename($txn, $file, $database, $new_database, $flags = 0) {}
    +function open($home, $flags = DB_CREATE  | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, $mode = 0666) {}
    +function remove($home, $flags = 0) {}
    +function set_data_dir($directory) {}
    +function txn_begin($parent_txn = null, $flags = 0) {}
    +function txn_checkpoint($kbytes, $minutes, $flags = 0) {}
    +} +

    +class Db4 { +

    function Db4($dbenv = null) {}	// create a new Db4 object using the optional DbEnv
    +function open($txn = null, $file = null, $database = null, $flags = DB_CREATE, $mode = 0) {}
    +function close() {}
    +function del($key, $txn = null) {}
    +function get($key, $txn = null, $flags = 0) {}
    +function pget($key, &$pkey, $txn = null, $flags = 0) {}
    +function get_type() {}	// returns the stringified database type name
    +function stat() {}	// returns statistics as an associative array
    +function join($cursor_list, $flags = 0) {}
    +function sync() {}
    +function truncate($txn = null, $flags = 0) {}
    +function cursor($txn = null, flags = 0) {}
    +} +

    +class Db4Txn { +

    function abort() {}
    +function commit() {}
    +function discard() {
    +function id() {}
    +function set_timeout($timeout, $flags = 0) {}
    +} +

    +class Db4Cursor { +

    function close() {}
    +function count() {}
    +function del() {}
    +function dup($flags = 0) {}
    +function get($key, $flags = 0) {}
    +function pget($key, &$primary_key, $flags = 0) {}
    +function put($key, $data, $flags = 0) {}
    +}
    +

    The PHP extension attempts to be "smart" for you by:

    +
      +

    1. Auto-committing operations on transactional databases if no explicit +Db4Txn object is specified. +

    2. Performing reference and dependency checking to insure that all +resources are closed in the correct order. +

    3. Supplying default values for flags. +
    +

    To install this PHP module linked against the mod_db4 framework, perform +the following steps:

    +
    % phpize
    +% ./configure --with-db4=[Berkeley DB library installation directory] \
    +	--with-mod_db4=$APACHE_INCLUDEDIR
    +% make
    +% make install
    +

    Then, in your php.ini file add the following:

    +
    extension=db4.so
    +

    This extension will now only run in a SAPI linked into Apache httpd +(mod_php4, most likely), and will take advantage of all of its +auto-recovery and handle-caching facilities.

    +

    To install this php module linked against the Berkeley DB library and not the +mod_db4 framework, perform the following steps:

    +
    % phpize
    +% ./configure --with-db4=[Berkeley DB library installation directory]
    +% make
    +% make install
    +

    Then in your php.ini file add:

    +
    extension=db4.so
    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/install/file.html b/db/docs/ref/install/file.html index 208993243..3f89ff555 100644 --- a/db/docs/ref/install/file.html +++ b/db/docs/ref/install/file.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: File utility /etc/magic information - + @@ -33,6 +33,6 @@ Berkeley DB distribution. This magic.txt information is correct for both big-endian and little-endian architectures.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/install/multiple.html b/db/docs/ref/install/multiple.html index 370493995..6e3794010 100644 --- a/db/docs/ref/install/multiple.html +++ b/db/docs/ref/install/multiple.html @@ -1,17 +1,17 @@ - - + + Berkeley DB Reference Guide: Building with multiple versions of Berkeley DB - + -

    Berkeley DB Reference Guide:
    System Installation Notes

    PrevRefNext +PrevRefNext

    Building with multiple versions of Berkeley DB

    @@ -48,8 +48,8 @@ version of Berkeley DB happens to be installed on the target system. Second, use --with-uniquename when configuring Berkeley DB, because that will insure that you do not unexpectedly collide with other application code or a library already installed on the target system.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/data.html b/db/docs/ref/intro/data.html index 15cf282d6..10993985e 100644 --- a/db/docs/ref/intro/data.html +++ b/db/docs/ref/intro/data.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: An introduction to data management - + @@ -50,6 +50,6 @@ commercially-available database systems. The problem is selecting the one that best solves the problems that their applications face.


    RefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/dbis.html b/db/docs/ref/intro/dbis.html index 427751089..5c3255dbb 100644 --- a/db/docs/ref/intro/dbis.html +++ b/db/docs/ref/intro/dbis.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: What is Berkeley DB? - + @@ -155,6 +155,6 @@ increasingly important feature in a world using CGI scripts to deliver HTML.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/dbisnot.html b/db/docs/ref/intro/dbisnot.html index e49284394..cf727a3dd 100644 --- a/db/docs/ref/intro/dbisnot.html +++ b/db/docs/ref/intro/dbisnot.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: What Berkeley DB is not - + @@ -136,6 +136,6 @@ servers make calls through the Berkeley DB API to find records and return them to clients. On its own, however, Berkeley DB is not a server.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/distrib.html b/db/docs/ref/intro/distrib.html index 9c370d99d..413136ef0 100644 --- a/db/docs/ref/intro/distrib.html +++ b/db/docs/ref/intro/distrib.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: What does the Berkeley DB distribution include? - + @@ -24,6 +24,6 @@ architecture/compiler combinations are available as part of Sleepycat Software's Berkeley DB support services.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/need.html b/db/docs/ref/intro/need.html index 9857ff13a..ba9398fec 100644 --- a/db/docs/ref/intro/need.html +++ b/db/docs/ref/intro/need.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Do you need Berkeley DB? - + @@ -56,6 +56,6 @@ that set of features, then Berkeley DB is almost certainly the best choice for you.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/products.html b/db/docs/ref/intro/products.html index 16709854a..604a185db 100644 --- a/db/docs/ref/intro/products.html +++ b/db/docs/ref/intro/products.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Sleepycat Software's Berkeley DB products - + -

    Berkeley DB Reference Guide:
    Introduction

    PrevRefNext +PrevRefNext

    Sleepycat Software's Berkeley DB products

    @@ -63,8 +63,8 @@ application requires. All replicas can handle read requests during normal processing. If the master system fails for any reason, one of the replicas takes over as the new master system, and distributes updates to the remaining replicas.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/terrain.html b/db/docs/ref/intro/terrain.html index 4913e1861..b4e1f7e8d 100644 --- a/db/docs/ref/intro/terrain.html +++ b/db/docs/ref/intro/terrain.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Mapping the terrain: theory and practice - + @@ -244,6 +244,6 @@ application creates new opportunity for installation mistakes and run-time problems.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/what.html b/db/docs/ref/intro/what.html index fd0d38028..112a03fa4 100644 --- a/db/docs/ref/intro/what.html +++ b/db/docs/ref/intro/what.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: What other services does Berkeley DB provide? - + @@ -17,20 +17,20 @@

    What other services does Berkeley DB provide?

    Berkeley DB also provides core database services to developers. These services include:

    -

    -

    Page cache management:
    The page cache provides fast access to a cache of database pages, +
    +
    Page cache management:
    The page cache provides fast access to a cache of database pages, handling the I/O associated with the cache to ensure that dirty pages are written back to the file system and that new pages are allocated on demand. Applications may use the Berkeley DB shared memory buffer manager to serve their own files and pages. -

    Transactions and logging:
    The transaction and logging systems provide recoverability and atomicity +
    Transactions and logging:
    The transaction and logging systems provide recoverability and atomicity for multiple database operations. The transaction system uses two-phase locking and write-ahead logging protocols to ensure that database operations may be undone or redone in the case of application or system failure. Applications may use Berkeley DB transaction and logging subsystems to protect their own data structures and operations from application or system failure. -

    Locking:
    The locking system provides multiple reader or single writer access to +
    Locking:
    The locking system provides multiple reader or single writer access to objects. The Berkeley DB access methods use the locking system to acquire the right to read or write database pages. Applications may use the Berkeley DB locking subsystem to support their own locking needs. @@ -49,6 +49,6 @@ Berkeley DB access method. As a result, developers can integrate non-database objects into their transactional applications using Berkeley DB.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/intro/where.html b/db/docs/ref/intro/where.html index 86e2fe9c5..ee8662aff 100644 --- a/db/docs/ref/intro/where.html +++ b/db/docs/ref/intro/where.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Where does Berkeley DB run? - + @@ -40,6 +40,6 @@ Windows/NT, Windows/2000 and Windows/XP, via the Microsoft Visual C++ for more information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/java/compat.html b/db/docs/ref/java/compat.html index 1443c8d9f..1fc11409f 100644 --- a/db/docs/ref/java/compat.html +++ b/db/docs/ref/java/compat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Compatibility - + @@ -30,6 +30,6 @@ across multiple platforms. However, using the JNI means that Berkeley DB will not be compatible with Microsoft Visual J++.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/java/conf.html b/db/docs/ref/java/conf.html index 226a096b5..1c0c0824a 100644 --- a/db/docs/ref/java/conf.html +++ b/db/docs/ref/java/conf.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Java configuration - + -

    Berkeley DB Reference Guide:
    Java API

    PrevRefNext +PrevRefNext

    Java configuration

    @@ -98,8 +98,8 @@ your current directory:

    it exits, you should see a list of the lines you entered display with data items. This is a simple check to make sure the fundamental configuration is working correctly.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/java/faq.html b/db/docs/ref/java/faq.html index 7f336cfee..3443acbd3 100644 --- a/db/docs/ref/java/faq.html +++ b/db/docs/ref/java/faq.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Java FAQ - + @@ -17,6 +17,36 @@

    Java FAQ

      +

    1. On what platforms is the Berkeley DB Java API supported? +

      All platforms supported by Berkeley DB that have a J2SE 1.3.1 compliant JVM.

      +

    2. Will the Berkeley DB Java API function properly when using Java +version 1.4? +

      The minimum required version is J2SE 1.3. Sun claims, and it has been +our experience, that for the most part 1.4.1 is backwards compatible +with 1.3.1. It is highly likely it will, we recommend that you run the +supplied test suites against the specific Java VM on the specific +platform on which you plan to ship your product to ensure compatibility.

      +

    3. How does the Berkeley DB Java API relate to the J2EE standard? +

      The Berkeley DB Java API does not currently implement any part of the J2EE +standard. That said, it does implement the implicit standard for Java +Java Collections. The concept of a transaction exists in several +Java packages (J2EE, XA, JINI to name a few). Support for these APIs +will be added based on demand in future versions of Berkeley DB.

      +

    4. How should I incorporate db.jar and the db native library into +a Tomcat or other J2EE application servers? +

      Tomcat and other J2EE application servers have the ability to rebuild +and reload code automatically. When using Tomcat this is the case when +"reloadable" is set to "true". If your WAR file includes the db.jar it +too will be reloaded each time your code is reloaded. This causes +exceptions as the native library can't be loaded more than once and +there is no way to unload native code. The solution is to place the +db.jar in $TOMCAT_HOME/common/lib and let Tomcat load that library once +at start time rather than putting it into the WAR that gets reloaded +over and over.

      +

    5. Can I use the Berkeley DB Java API from within a EJB, a Servlet or a +JSP page? +

      Yes. The Berkeley DB Java API can be used from within all the popular J2EE +application servers in many different ways.

    6. During one of the first calls to the Berkeley DB Java API, a DbException is thrown with a "Bad file number" or "Bad file descriptor" message. @@ -32,21 +62,31 @@ follows (note the 'd' at the end):

      % java -Dsleepycat.db.libname=libdb_java-VERSIONd

      On UNIX, try:

      % java -Dsleepycat.db.libname=db_java_g-VERSION
      -

    7. How should I incorporate db.jar and the db native library -into a Tomcat or other J2EE application servers? -

      Tomcat and other J2EE application servers have the ability to -rebuild and reload code automatically. When using Tomcat this -is the case when "reloadable" is set to "true". If your WAR -file includes the db.jar it too will be reloaded each time -your code is reloaded. This causes exceptions as the native -library can't be loaded more than once and there is no way to -unload native code. The solution is to place the db.jar in -$TOMCAT_HOME/common/lib and let Tomcat load that library once -at start time rather than putting it into the WAR that -gets reloaded over and over.

      +

    8. Why is ClassNotFoundException thrown when adding a record to +the database, when a SerialBinding is used? +

      This problem occurs if you copy the db.jar file into the Java extensions +(ext) directory. This will cause the database code to run under the +System class loader, and it won't be able to find your application +classes.

      +

      You'll have to actually remove db.jar from the Java extension directory. +If you have more than one installation of Java, be sure to remove it +from all of them. This is necessary even if db.jar is specified in the +classpath.

      +

      An example of the exception is:

      +
      com.sleepycat.examples.bdb.shipment.basic.SupplierKey
      +at java.net.URLClassLoader$1.run(Unknown Source)
      +at java.security.AccessController.doPrivileged(Native Method)
      +at java.net.URLClassLoader.findClass(Unknown Source)
      +at java.lang.ClassLoader.loadClass(Unknown Source)
      +at java.lang.ClassLoader.loadClass(Unknown Source)
      +at java.lang.ClassLoader.loadClassInternal(Unknown Source)
      +at java.lang.Class.forName0(Native Method)
      +at java.lang.Class.forName(Unknown Source)
      +at com.sleepycat.bind.serial.StoredClassCatalog.getClassInfo(StoredClassCatalog.java:211)
      +...

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/java/program.html b/db/docs/ref/java/program.html index e0571b8a6..68b6d27b5 100644 --- a/db/docs/ref/java/program.html +++ b/db/docs/ref/java/program.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Java programming notes - + @@ -30,79 +30,38 @@ order, so we would have to do extra bookkeeping to make sure that everything was closed in the proper order. The best word of advice is to always do a close() for any matching open() call. Specifically, the Berkeley DB package requires that you explicitly call close on each individual -Db - and -Dbc - object that you opened. Your database +Database and +Cursor object that you opened. Your database activity may not be synchronized to disk unless you do so.

  • Some methods in the Java API have no return type, and throw a -DbException - when an severe error -arises. There are some notable -methods that do have a return value, and can also throw an exception. -Db.get - and -Dbc.get - both return 0 when a get succeeds, -return Db.DB_NOTFOUND when the key is not found, and throw an error -when there is a severe error. This approach allows the programmer to -check for typical data-driven errors by watching return values without -special casing exceptions. -

    An object of type -DbDeadlockException - is +DatabaseException when an severe error +arises. There are some notable methods that do have a return value, and +can also throw an exception. The "get" methods in +Database and +Cursor both return 0 when a get +succeeds, DB_NOTFOUND when the key is not found, and throw an +error when there is a severe error. This approach allows the programmer +to check for typical data-driven errors by watching return values +without special casing exceptions. +

    An object of type MemoryException is +thrown when a Dbt is too small to hold the corresponding key or data item.

    +

    An object of type DeadlockException is thrown when a deadlock would occur.

    -

    An object of type -DbMemoryException - is -thrown when the system cannot provide enough memory to complete the -operation (the ENOMEM system error on UNIX).

    -

    An object of type -DbRunRecoveryException -, -a subclass of -DbException -, is thrown when -there is an error that requires a -recovery of the database using db_recover.

    -

    An object of type -IllegalArgumentException +

    An object of type RunRecoveryException, a +subclass of DatabaseException, is thrown when +there is an error that requires a recovery of the database using +db_recover.

    +

    An object of type IllegalArgumentException a standard Java Language exception, is thrown when there is an error in method arguments.

    -

  • Berkeley DB always turns on the -Db.DB_THREAD -flag because threads are expected in Java. -

  • Callbacks in Java manufacture -Dbt - objects -from internal data. For efficiency, the data field in such Dbts is -not set in the Java object until a -Dbt.getData - method call. This avoids the -creation of a potentially large Java byte array if it isn't needed. If -callback code can be written to defer calling -Dbt.getData -performance may be increased. For example, a bt_compare method might -compare values returned by -Dbt.getSize -before deciding whether a call to -Dbt.getData - is needed. -

  • If there are embedded null strings in the curslist argument for -Db.join -, they will be treated as the -end of the list of -cursors, even if you may have allocated a longer array. Fill in all -the strings in your array unless you intend to cut it short. -

  • The callback installed for -DbEnv.setErrorHandler - will run in the same -thread as the caller to -DbEnv.setErrorHandler -. Make sure that -thread remains running until your application exits or until -DbEnv.close -is called. +

    An object of type OutOfMemoryError is thrown +when the system cannot provide enough memory to complete the operation +(the ENOMEM system error on UNIX).

    +

  • If there are embedded nulls in the curslist argument for +Database.join(com.sleepycat.db.Cursor__BRACKETS__, com.sleepycat.db.JoinConfig), +they will be treated as the end of the list of cursors, even if you +may have allocated a longer array. Fill in all the cursors in your +array unless you intend to cut it short.

  • If you are using custom class loaders in your application, make sure that the Berkeley DB classes are loaded by the system class loader, not a custom class loader. This is due to a JVM bug that can cause an access @@ -111,6 +70,6 @@ Java Bug Database).

  • PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/am_conv.html b/db/docs/ref/lock/am_conv.html index 3f59e39fb..7251b55b5 100644 --- a/db/docs/ref/lock/am_conv.html +++ b/db/docs/ref/lock/am_conv.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB Transactional Data Store locking conventions - + @@ -119,6 +119,6 @@ span threads of control, so the library knows that two cursors in the same transaction cannot modify the database concurrently.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/cam_conv.html b/db/docs/ref/lock/cam_conv.html index 0f16a12c7..731685a03 100644 --- a/db/docs/ref/lock/cam_conv.html +++ b/db/docs/ref/lock/cam_conv.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB Concurrent Data Store locking conventions - + @@ -23,8 +23,8 @@ entirely in the Berkeley DB interface layer.

    The object it locks is the file, identified by its unique file number. The locking matrix is not one of the two standard lock modes, instead, we use a four-lock set, consisting of the following:

    -

    -

    DB_LOCK_NG
    not granted (always 0) +
    +
    DB_LOCK_NG
    not granted (always 0)
    DB_LOCK_READ
    read (shared)
    DB_LOCK_WRITE
    write (exclusive)
    DB_LOCK_IWRITE
    intention-to-write (shared with NG and READ, but conflicts with WRITE and IWRITE) @@ -49,6 +49,6 @@ already locked with a READ lock.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/config.html b/db/docs/ref/lock/config.html index d03ecedae..222ae3cf6 100644 --- a/db/docs/ref/lock/config.html +++ b/db/docs/ref/lock/config.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Configuring locking - + @@ -39,6 +39,6 @@ locking conflicts matrix. This is an advanced configuration option, and is almost never necessary.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/dead.html b/db/docs/ref/lock/dead.html index ad9df2199..2ba094ca7 100644 --- a/db/docs/ref/lock/dead.html +++ b/db/docs/ref/lock/dead.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Deadlock detection - + @@ -79,6 +79,6 @@ operations block temporarily on locks but are soon able to proceed, automatic detection can decrease performance.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/deaddbg.html b/db/docs/ref/lock/deaddbg.html index 273e509c1..31b5e3459 100644 --- a/db/docs/ref/lock/deaddbg.html +++ b/db/docs/ref/lock/deaddbg.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Deadlock debugging - + @@ -136,6 +136,6 @@ progress, one of them will have to be killed in order to resolve the deadlock.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/intro.html b/db/docs/ref/lock/intro.html index 46f938a81..460fbf3eb 100644 --- a/db/docs/ref/lock/intro.html +++ b/db/docs/ref/lock/intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB and locking - + @@ -83,25 +83,25 @@ the lock subsystem. It is the programmatic interface used by the

    The locking subsystem is closed by the call to DB_ENV->close.

    Finally, the entire locking subsystem may be discarded using the DB_ENV->remove method.

    - + - - - - - - - - - - - - - + + + + + + + + + + + + +
    Locking Subsystem and Related MethodsDescription
    DB_ENV->set_lk_conflictsSet lock conflicts matrix
    DB_ENV->set_lk_detectSet automatic deadlock detection
    DB_ENV->set_lk_max_lockersSet maximum number of lockers
    DB_ENV->set_lk_max_locksSet maximum number of locks
    DB_ENV->set_lk_max_objectsSet maximum number of lock objects
    DB_ENV->set_timeoutSet lock and transaction timeout
    DB_ENV->lock_detectPerform deadlock detection
    DB_ENV->lock_getAcquire a lock
    DB_ENV->lock_idAcquire a locker ID
    DB_ENV->lock_id_freeRelease a locker ID
    DB_ENV->lock_putRelease a lock
    DB_ENV->lock_statReturn lock subsystem statistics
    DB_ENV->lock_vecAcquire/release locks
    DB_ENV->lock_detectPerform deadlock detection
    DB_ENV->lock_getAcquire a lock
    DB_ENV->lock_idAcquire a locker ID
    DB_ENV->lock_id_freeRelease a locker ID
    DB_ENV->lock_putRelease a lock
    DB_ENV->lock_statReturn lock subsystem statistics
    DB_ENV->lock_vecAcquire/release locks
    DB_ENV->set_lk_conflictsSet lock conflicts matrix
    DB_ENV->set_lk_detectSet automatic deadlock detection
    DB_ENV->set_lk_max_lockersSet maximum number of lockers
    DB_ENV->set_lk_max_locksSet maximum number of locks
    DB_ENV->set_lk_max_objectsSet maximum number of lock objects
    DB_ENV->set_timeoutSet lock and transaction timeout

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/max.html b/db/docs/ref/lock/max.html index fe414de65..99ddad165 100644 --- a/db/docs/ref/lock/max.html +++ b/db/docs/ref/lock/max.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Configuring locking: sizing the system - + @@ -44,14 +44,16 @@ applications, finer granularity of control is necessary in order to minimize the size of the Lock subsystem.

    The maximum number of lockers can be estimated as follows:

      -
    • If the database environment is configured to use transactions, the -maximum number of lockers needed is the number of simultaneously active -transactions and child transactions (where a child transaction is active -until its parent commits or aborts, not until it commits or aborts). -
    • If the database environment is not configured to use transactions, the -maximum number of lockers needed is the number of simultaneous -non-cursor operations plus an additional locker for every simultaneously -open cursor or database handle. +
    • If the database environment is using transactions, the maximum number +of lockers can be estimated by adding the number of simultaneously +active non-transactional cursors open database handles to the number of +simultaneously active transactions and child transactions (where a child +transaction is active until it commits or aborts, not until its parent +commits or aborts). +
    • If the database environment is not using transactions, the maximum +number of lockers can be estimated by adding the number of +simultaneously active non-transactional cursors and open database +handles to the number of simultaneous non-cursor operations.

    The maximum number of lock objects needed for a single database operation can be estimated as follows:

    @@ -90,6 +92,6 @@ applications are unlikely to actually need that many locks. Reviewing the Lock subsystem statistics is the best way to determine this value.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/nondb.html b/db/docs/ref/lock/nondb.html index 26d06e311..1146ae76c 100644 --- a/db/docs/ref/lock/nondb.html +++ b/db/docs/ref/lock/nondb.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Locking and non-Berkeley DB applications - + @@ -46,6 +46,6 @@ be post-processed into a human-readable schedule of conference room use.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/notxn.html b/db/docs/ref/lock/notxn.html index ad6e843a2..099f48ea9 100644 --- a/db/docs/ref/lock/notxn.html +++ b/db/docs/ref/lock/notxn.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Locking without transactions - + @@ -42,6 +42,6 @@ concurrent access, but not transactions, are more safely implemented using the Berkeley DB Concurrent Data Store Product.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/page.html b/db/docs/ref/lock/page.html index e02894f31..7ea77af76 100644 --- a/db/docs/ref/lock/page.html +++ b/db/docs/ref/lock/page.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Locking granularity - + @@ -67,6 +67,6 @@ Therefore, two conflicting threads of control cannot access the same duplicate set simultaneously.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/stdmode.html b/db/docs/ref/lock/stdmode.html index ae004ed20..f40b12e26 100644 --- a/db/docs/ref/lock/stdmode.html +++ b/db/docs/ref/lock/stdmode.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Standard lock modes - + @@ -26,8 +26,8 @@ lock mode within a conflict matrix.

    matrix used by Berkeley DB to support the underlying access methods is more complicated, but this matrix shows the lock mode relationships available to applications using the Berkeley DB Locking subsystem interfaces directly.

    -

    -

    DB_LOCK_NG
    not granted (always 0) +
    +
    DB_LOCK_NG
    not granted (always 0)
    DB_LOCK_READ
    read (shared)
    DB_LOCK_WRITE
    write (exclusive)
    DB_LOCK_IWRITE
    intention to write (shared) @@ -45,15 +45,15 @@ Write** 0 1 1 1 1 1 Intent Write 0 1 1 0 0 0 Intent Read 0 0 1 0 0 0 Intent RW 0 1 1 0 0 0 -

    -

    *
    In this case, suppose that there is a read lock held on an object. A new +
    +
    *
    In this case, suppose that there is a read lock held on an object. A new request for a read lock would be granted, but a request for a write lock would not. -

    **
    In this case, suppose that there is a write lock held on an object. A +
    **
    In this case, suppose that there is a write lock held on an object. A new request for either a read or write lock would be denied.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/timeout.html b/db/docs/ref/lock/timeout.html index 53606fbe1..e6760aaeb 100644 --- a/db/docs/ref/lock/timeout.html +++ b/db/docs/ref/lock/timeout.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Deadlock detection using timers - + @@ -35,7 +35,11 @@ case of timeout.

    first block or when deadlock detection is performed, the accuracy of the timeout depends on how often deadlock detection is performed. More specifically, transactions will continue to run after their timeout has -expired if they do not block on a lock request after that time.

    +expired if they do not block on a lock request after that time. +A separate deadlock detection thread (or process) should always +be used if the application depends on timeouts otherwise if +there are no new blocked lock requests a pending timeout will +never trigger.

    If the database environment deadlock detector has been configured with the DB_LOCK_EXPIRE option, timeouts are the only mechanism by which deadlocks will be broken. If the deadlock detector has been @@ -61,6 +65,6 @@ and the specific lock described will be timed out if it blocks longer than 4ms.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/lock/twopl.html b/db/docs/ref/lock/twopl.html index b1382464d..8f81627b9 100644 --- a/db/docs/ref/lock/twopl.html +++ b/db/docs/ref/lock/twopl.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Locking with transactions: two-phase locking - + @@ -46,6 +46,6 @@ transaction most likely to deadlock.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/log/config.html b/db/docs/ref/log/config.html index 68de05486..5a6fd736d 100644 --- a/db/docs/ref/log/config.html +++ b/db/docs/ref/log/config.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Configuring logging - + @@ -42,6 +42,6 @@ or transactions producing large amounts of data. By default, the buffer is 32KB.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/log/intro.html b/db/docs/ref/log/intro.html index 00642046a..eb63d82fb 100644 --- a/db/docs/ref/log/intro.html +++ b/db/docs/ref/log/intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB and logging - + @@ -33,37 +33,38 @@ have two methods: DB_LOGC->get method log, and DB_LOGC->close method to destroy the cursor.

    There are additional methods for integrating the log subsystem with a transaction processing system:

    -

    -

    DB_ENV->log_flush
    Flushes the log up to a particular log sequence number. -

    log_compare
    Allows applications to compare any two log sequence numbers. -

    DB_ENV->log_file
    Maps a log sequence number to the specific log file that contains it. -

    DB_ENV->log_archive
    Returns various sets of log filenames. These methods are used for +
    +
    DB_ENV->log_flush
    Flushes the log up to a particular log sequence number. +
    log_compare
    Allows applications to compare any two log sequence numbers. +
    DB_ENV->log_file
    Maps a log sequence number to the specific log file that contains it. +
    DB_ENV->log_archive
    Returns various sets of log filenames. These methods are used for database administration; for example, to determine if log files may safely be removed from the system. -

    DB_ENV->log_stat
    The display db_stat utility uses the DB_ENV->log_stat method to +
    DB_ENV->log_stat
    The display db_stat utility uses the DB_ENV->log_stat method to display statistics about the log. -

    DB_ENV->remove
    The log meta-information (but not the log files themselves) may be +
    DB_ENV->remove
    The log meta-information (but not the log files themselves) may be removed using the DB_ENV->remove method.
    - + - - - - - - - - - - - - - + + + + + + + + + + + + + +
    Logging Subsystem and Related MethodsDescription
    DB_ENV->log_archiveList log and database files
    DB_ENV->log_cursorCreate a log cursor handle
    DB_ENV->log_fileMap Log Sequence Numbers to log files
    DB_ENV->log_flushFlush log records
    DB_ENV->log_putWrite a log record
    DB_ENV->set_lg_bsizeSet log buffer size
    DB_ENV->set_lg_dirSet the environment logging directory
    DB_ENV->set_lg_maxSet log file size
    DB_ENV->set_lg_regionmaxSet logging region size
    log_compareCompare two Log Sequence Numbers
    DB_ENV->log_statReturn log subsystem statistics
    DB_LOGC->closeClose a log cursor
    DB_LOGC->getRetrieve a log record
    DB_LSNLog Sequence Numbers
    log_compareCompare two Log Sequence Numbers
    DB_ENV->log_archiveList log and database files
    DB_ENV->log_cursorCreate a log cursor handle
    DB_ENV->log_fileMap Log Sequence Numbers to log files
    DB_ENV->log_flushFlush log records
    DB_ENV->log_putWrite a log record
    DB_ENV->log_statReturn log subsystem statistics
    DB_ENV->set_lg_bsizeSet log buffer size
    DB_ENV->set_lg_dirSet the environment logging directory
    DB_ENV->set_lg_maxSet log file size
    DB_ENV->set_lg_regionmaxSet logging region size
    DB_LOGC->closeClose a log cursor
    DB_LOGC->getRetrieve a log record

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/log/limits.html b/db/docs/ref/log/limits.html index 3568f75ca..2f0d2d1a3 100644 --- a/db/docs/ref/log/limits.html +++ b/db/docs/ref/log/limits.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Log file limits - + @@ -33,17 +33,17 @@ application is reaching the end of its log filename space, you must do the following:

    1. Archive your databases as if to prepare for catastrophic failure (see -db_archive for more information). -

    2. Dump and reload all your databases (see db_dump and -db_load for more information). -

    3. Remove all of the log files from the database environment. Note: This -is the only situation in which all the log files are removed from an -environment; in all other cases, at least a single log file is -retained. +Database and log file archival +for more information). +

    4. Reset the database's log sequence numbers (see the -r option +to the db_load utility for more information). +

    5. Remove all of the log files from the database environment. (This is the +only situation in which all the log files are removed from an environment; +in all other cases, at least a single log file is retained.)

    6. Restart your application.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/mp/config.html b/db/docs/ref/mp/config.html index 0daa7f19e..cfe11f2ea 100644 --- a/db/docs/ref/mp/config.html +++ b/db/docs/ref/mp/config.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Configuring the memory pool - + @@ -48,6 +48,6 @@ the size of files mapped into the process address space, use the DB_ENV->set_mp_mmapsize method.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/mp/intro.html b/db/docs/ref/mp/intro.html index 2b04bab86..053a49e58 100644 --- a/db/docs/ref/mp/intro.html +++ b/db/docs/ref/mp/intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB and the memory pool - + @@ -68,30 +68,34 @@ database systems, and which allows the memory pool to be flushed up to a specified log sequence number (DB_LSN).

  • The entire pool may be discarded using the DB_ENV->remove method. - + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + +
    Memory Pools and Related MethodsDescription
    DB_ENV->set_cachesizeSet the environment cache size
    DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size
    DB_ENV->memp_registerRegister input/output functions for a file in a memory pool
    DB_ENV->memp_statReturn memory pool statistics
    DB_ENV->memp_syncFlush pages from a memory pool
    DB_ENV->memp_trickleTrickle flush pages from a memory pool
    DB_ENV->memp_fcreateOpen a file in a memory pool
    DB_MPOOLFILE->closeClose a file in a memory pool
    DB_MPOOLFILE->getGet page from a file in a memory pool
    DB_MPOOLFILE->openOpen a file in a memory pool
    DB_MPOOLFILE->putReturn a page to a memory pool
    DB_MPOOLFILE->setSet memory pool page status
    DB_MPOOLFILE->syncFlush pages from a file in a memory pool
    DB_MPOOLFILE->set_clear_lenSet file page bytes to be cleared
    DB_MPOOLFILE->set_fileidSet file unique identifier
    DB_MPOOLFILE->set_ftypeSet file type
    DB_MPOOLFILE->set_lsn_offsetSet file log-sequence-number offset
    DB_MPOOLFILE->set_pgcookieSet file cookie for pgin/pgout
    DB->mpfReturn the database's memory pool handle
    DB_ENV->memp_fcreateOpen a file in a memory pool
    DB_ENV->memp_registerRegister input/output functions for a file in a memory pool
    DB_ENV->memp_set_max_openfdSet the maximum number of open file descriptors
    DB_ENV->memp_set_max_writeSet the maximum number of sequential disk writes
    DB_ENV->memp_statReturn memory pool statistics
    DB_ENV->memp_syncFlush pages from a memory pool
    DB_ENV->memp_trickleTrickle flush pages from a memory pool
    DB_ENV->set_cachesizeSet the environment cache size
    DB_ENV->set_mp_mmapsizeSet maximum mapped-in database file size
    DB_MPOOLFILE->closeClose a file in a memory pool
    DB_MPOOLFILE->getGet page from a file in a memory pool
    DB_MPOOLFILE->openOpen a file in a memory pool
    DB_MPOOLFILE->putReturn a page to a memory pool
    DB_MPOOLFILE->setSet memory pool page status
    DB_MPOOLFILE->set_clear_lenSet file page bytes to be cleared
    DB_MPOOLFILE->set_fileidSet file unique identifier
    DB_MPOOLFILE->set_flagsGeneral memory pool file configuration
    DB_MPOOLFILE->set_ftypeSet file type
    DB_MPOOLFILE->set_lsn_offsetSet file log-sequence-number offset
    DB_MPOOLFILE->set_pgcookieSet file cookie for pgin/pgout
    DB_MPOOLFILE->syncFlush pages from a file in a memory pool

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/pindex.src b/db/docs/ref/pindex.src index 92e992d6c..7295d2951 100644 --- a/db/docs/ref/pindex.src +++ b/db/docs/ref/pindex.src @@ -1,24 +1,13 @@ -__APIREL__/utility/berkeley_db_svc.html__OCT__2 @berkeley_db_svc -__APIREL__/utility/berkeley_db_svc.html__OCT__3 utility to support @RPC client/server -__APIREL__/utility/db_archive.html__OCT__2 @db_archive -__APIREL__/utility/db_archive.html__OCT__3 utility to @archive log files -__APIREL__/utility/db_checkpoint.html__OCT__2 @db_checkpoint -__APIREL__/utility/db_checkpoint.html__OCT__3 utility to take @checkpoints -__APIREL__/utility/db_deadlock.html__OCT__2 @db_deadlock -__APIREL__/utility/db_deadlock.html__OCT__3 utility to detect @deadlocks -__APIREL__/utility/db_dump.html__OCT__2 @db_dump -__APIREL__/utility/db_dump.html__OCT__3 utility to @dump databases as text files -__APIREL__/utility/db_load.html__OCT__2 @db_load -__APIREL__/utility/db_load.html__OCT__3 utility to @load text files into databases -__APIREL__/utility/db_printlog.html__OCT__2 @db_printlog -__APIREL__/utility/db_printlog.html__OCT__3 utility to display @log files as text -__APIREL__/utility/db_recover.html__OCT__2 @db_recover -__APIREL__/utility/db_recover.html__OCT__3 utility to @recover database environments -__APIREL__/utility/db_stat.html__OCT__2 @db_stat -__APIREL__/utility/db_stat.html__OCT__3 utility to display database and environment @statistics -__APIREL__/utility/db_upgrade.html__OCT__2 @db_upgrade -__APIREL__/utility/db_upgrade.html__OCT__3 utility to upgrade @database files -__APIREL__/utility/db_upgrade.html__OCT__4 utility to @upgrade database files -__APIREL__/utility/db_verify.html__OCT__2 @db_verify -__APIREL__/utility/db_verify.html__OCT__3 utility to verify @database files -__APIREL__/utility/db_verify.html__OCT__4 utility to @verify database files +__APIREL__/utility/berkeley_db_svc.html__OCT__2 utility to support @RPC client/server +__APIREL__/utility/db_archive.html__OCT__2 utility to @archive log files +__APIREL__/utility/db_checkpoint.html__OCT__2 utility to take @checkpoints +__APIREL__/utility/db_deadlock.html__OCT__2 utility to detect @deadlocks +__APIREL__/utility/db_dump.html__OCT__2 utility to @dump databases as text files +__APIREL__/utility/db_load.html__OCT__2 utility to @load text files into databases +__APIREL__/utility/db_printlog.html__OCT__2 utility to display @log files as text +__APIREL__/utility/db_recover.html__OCT__2 utility to @recover database environments +__APIREL__/utility/db_stat.html__OCT__2 utility to display database and environment @statistics +__APIREL__/utility/db_upgrade.html__OCT__2 utility to upgrade @database files +__APIREL__/utility/db_upgrade.html__OCT__3 utility to @upgrade database files +__APIREL__/utility/db_verify.html__OCT__2 utility to verify @database files +__APIREL__/utility/db_verify.html__OCT__3 utility to @verify database files diff --git a/db/docs/ref/program/appsignals.html b/db/docs/ref/program/appsignals.html index cc0f9b8e0..378af10c0 100644 --- a/db/docs/ref/program/appsignals.html +++ b/db/docs/ref/program/appsignals.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Signal handling - + @@ -34,6 +34,6 @@ underlying system calls that return failure with errno set to EINTR will be restarted rather than failing.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/cache.html b/db/docs/ref/program/cache.html index 862c625a5..fa7d7685f 100644 --- a/db/docs/ref/program/cache.html +++ b/db/docs/ref/program/cache.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Disk drive caches - + @@ -30,6 +30,6 @@ Many times, this means that write-caching on the disk drive must be disabled.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/compatible.html b/db/docs/ref/program/compatible.html index c296d7e8d..9537853ce 100644 --- a/db/docs/ref/program/compatible.html +++ b/db/docs/ref/program/compatible.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Compatibility with historic UNIX interfaces - + @@ -28,6 +28,6 @@ information. No utilities are provided to convert UNIX ndbm, or hsearch databases.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/copy.html b/db/docs/ref/program/copy.html index ebffa1ce1..4aee3846c 100644 --- a/db/docs/ref/program/copy.html +++ b/db/docs/ref/program/copy.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Copying databases - + @@ -24,16 +24,15 @@ When multiple processes or threads open the same database file in Berkeley DB, it is this bytestring that is used to ensure that the same underlying pages are updated in the shared memory buffer pool, no matter which Berkeley DB handle is used for the operation.

    -

    It is usually a bad idea to physically copy a database to a new name. In -the few cases in which copying is the best solution for your application, -you must guarantee that there are never two different databases with -the same file identification bytestring in the memory pool at the same -time. Copying databases is further complicated by the fact that the -shared memory buffer pool does not discard all cached copies of pages -for a database when the database is logically closed; that is, when -DB->close is called. Nor is there a Berkeley DB interface to -explicitly discard pages from the shared memory buffer pool for any -particular database.

    +

    It is usually a bad idea to physically copy a database to a new name. +In the few cases in which copying is the best solution for your +application, you must guarantee that there are never two different +databases with the same file identification bytestring in the memory +pool at the same time. Copying databases is further complicated because +the shared memory buffer pool does not discard cached database pages +when the database is closed by calling the DB->close method, cached +pages are only discarded when the database is removed by calling the +DB->remove method.

    Before copying a database, you must ensure that all modified pages have been written from the memory pool cache to the backing database file. This is done using the DB->sync or DB->close methods.

    @@ -53,15 +52,13 @@ create a new file that will have a new bytestring is to call the then use the db_load utility to load the dumped output into a new file. This allows you to access both the original and copy of the database at the same time. -

  • If your database is too large to be copied, overwrite the bytestring in -the copied database with a new bytestring. This allows you to access -both the original and copy of the database at the same time. -If there are multiple databases in a single physical file, -the bytestring found in the first page of each database needs to -be overwritten, not just the first page of the physical file. +

  • If your database is too large to be copied, reset the bytestring in the +copied database to a new bytestring. This allows you to access both the +original and copy of the database at the same time. You can reset the +bytestring with the -r flag to the db_load utility.

  • PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/environ.html b/db/docs/ref/program/environ.html index 462b6750a..73e7f3685 100644 --- a/db/docs/ref/program/environ.html +++ b/db/docs/ref/program/environ.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Environment variables - + @@ -17,18 +17,18 @@

    Environment variables

    The Berkeley DB library uses the following environment variables:

    -

    -

    DB_HOME
    If the environment variable DB_HOME is set, it is used as part of +
    +
    DB_HOME
    If the environment variable DB_HOME is set, it is used as part of File Naming. Note: For the DB_HOME variable to take effect, either the DB_USE_ENVIRON or DB_USE_ENVIRON_ROOT flags must be specified to DB_ENV->open. -

    TMPDIR, TEMP, TMP, TempFolder
    The TMPDIR, TEMP, TMP, and TempFolder environment variables are all +
    TMPDIR, TEMP, TMP, TempFolder
    The TMPDIR, TEMP, TMP, and TempFolder environment variables are all checked as locations in which to create temporary files. See DB_ENV->set_tmp_dir for more information.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/errorret.html b/db/docs/ref/program/errorret.html index f5e8f793a..5189d2e5e 100644 --- a/db/docs/ref/program/errorret.html +++ b/db/docs/ref/program/errorret.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Error returns to applications - + @@ -20,13 +20,13 @@ interfaces, Berkeley DB does not use the global variable errno to return error values. The return values for all Berkeley DB functions are grouped into the following three categories:

    -

    -

    0
    A return value of 0 indicates that the operation was successful. -

    > 0
    A return value that is greater than 0 indicates that there was a system +
    +
    0
    A return value of 0 indicates that the operation was successful. +
    > 0
    A return value that is greater than 0 indicates that there was a system error. The errno value returned by the system is returned by the function; for example, when a Berkeley DB function is unable to allocate memory, the return value from the function will be ENOMEM. -

    < 0
    A return value that is less than 0 indicates a condition that was not +
    < 0
    A return value that is less than 0 indicates a condition that was not a system failure, but was not an unqualified success, either. For example, a routine to retrieve a key/data pair from the database may return DB_NOTFOUND when the key/data pair does not appear in @@ -62,8 +62,9 @@ DB_KEYEMPTY for records that were created as part of a transaction that was later aborted and never re-created.

    DB_KEYEXIST

    The DB_KEYEXIST error return indicates the DB_NOOVERWRITE -option was specified to the DB->put method and the key already exists -in the database.

    +option was specified when inserting a key/data pair into the database and +the key already exists in the database, or the DB_NODUPDATA +option was specified and the key/data pair already exists in the data.

    DB_LOCK_DEADLOCK

    When multiple threads of control are modifying the database, there is @@ -108,6 +109,6 @@ by simply exiting the application when the callback function is called in applications that have no cleanup processing of their own.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/faq.html b/db/docs/ref/program/faq.html index 3e644d64f..e58491447 100644 --- a/db/docs/ref/program/faq.html +++ b/db/docs/ref/program/faq.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Programmer notes FAQ - + @@ -38,6 +38,6 @@ contains the wrapper object.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/mt.html b/db/docs/ref/program/mt.html index 713fe89aa..80321efc4 100644 --- a/db/docs/ref/program/mt.html +++ b/db/docs/ref/program/mt.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Multithreaded applications - + @@ -73,6 +73,6 @@ programs may have additional requirements. For more information, see file.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/namespace.html b/db/docs/ref/program/namespace.html index 26151e180..b3b3db7ed 100644 --- a/db/docs/ref/program/namespace.html +++ b/db/docs/ref/program/namespace.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Name spaces - + @@ -30,19 +30,27 @@ each architecture. If they are not provided by the system, they are typedef'd in the db.h include file. The types that may be typedef'd by db.h include the following: u_int8_t, int16_t, u_int16_t, int32_t, u_int32_t, u_char, u_short, u_int, and u_long.

    -

    The Berkeley DB library declares a number of external routines. All these -routines are prefixed with the strings "db_", "lock_", "log_", "memp_" -or "txn_". All internal routines are prefixed with the strings "__db_", -"__lock_", "__log_", "__memp_", or "__txn_".

    +

    The Berkeley DB library declares a few external routines. All these routines +are prefixed with the strings "db_". All internal Berkeley DB routines are +prefixed with the strings "__XXX_", where "XXX" is the subsystem prefix +(for example, "__db_XXX_" and "__txn_XXX_").

    Filesystem Name Space

    Berkeley DB environments create or use some number of files in environment home directories. These files are named DB_CONFIG, "log.NNNNN" (for example, log.0000000003, where the number of digits following the dot is unspecified), or with the string prefix "__db" (for example, -__db.001). Database files that match these names should not be created -in the environment directory.

    +__db.001). Applications should never create files or databases in +database environment home directories with names beginning with the +characters "log" or "__db".

    +

    In some cases, applications may choose to remove Berkeley DB files as part of +their cleanup procedures, using system utilities instead of Berkeley DB +interfaces (for example, using the UNIX rm utility instead of the +DB_ENV->remove method). This is not a problem, as long as applications +limit themselves to removing only files named "__db.###", where "###" +are the digits 0 through 9. Applications should never remove any files +named with the prefix "__db" or "log", other than "__db.###" files.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/ram.html b/db/docs/ref/program/ram.html index 40761f455..d050577b3 100644 --- a/db/docs/ref/program/ram.html +++ b/db/docs/ref/program/ram.html @@ -1,12 +1,12 @@ - - + + -Berkeley DB Reference Guide: RAM-only configurations +Berkeley DB Reference Guide: Memory-only configurations - + @@ -14,22 +14,17 @@
    PrevRefNext

    -

    RAM-only configurations

    -

    Berkeley DB supports RAM-only configurations, however, Berkeley DB does not -guarantee writes will never be attempted to disk.

    -

    You can configure RAM-only databases by not specifying a physical file -name to the DB->open method. Databases created in this way will never -be written to disk unless Berkeley DB runs out of cache space. Running out -of cache space happens when the application attempts to create a new -database page and there is no spare room in the cache, nor is there a -clean page which can simply be evicted from the cache, and a dirty page -must be written from the cache to disk to make room to create the new -page. When Berkeley DB runs out of cache space, it will attempt to create a -temporary backing file to make new room in the cache, which is not what -you want.

    -

    You can create RAM-only database environments (in which you can work -with both RAM-only and disk-backed databases) in two different types of -memory: in heap memory or in system shared memory. To create the +

    Memory-only configurations

    +

    Berkeley DB supports a variety of memory-only configurations, intended for +systems where filesystem space is limited in availability or entirely +replaced by some combination of RAM and Flash. There are three database +environment files that are potentially written to disk: database +environment shared region files, database files and log files. Each of +these file types can be individually configured to be created in memory +rather than on disk.

    +

    First, database environment shared region files. Applications can +create RAM-only database environments in two different types of memory: +in application heap memory or in system shared memory. To create the database environment in heap memory, specify the DB_PRIVATE flag to the DB_ENV->open method. Database environments created in heap memory are only accessible to the threads of a single process, however. @@ -39,15 +34,29 @@ environments created in system memory are accessible to multiple processes. However, database environments created in system shared memory do create a small (roughly 8 byte) file in the file system read by the processes to identify the system shared memory segments to use.

    -

    Finally, if your database environment is intended to be transactionally -protected or recoverable after application or system failure (that is, -if you configure either the locking or transaction subsystems in the -database environment), both the databases and database environment log -files must be written to disk. There is currently no way in the Berkeley DB -database environment to support transactions without writing log files -to disk.

    +

    Second, database files. Applications can create RAM-only databases by +not specifying a physical filename when opening the database using the +DB->open method. Normally, if the database environment cache fills +up Berkeley DB will create temporary backing files for RAM-only databases. +Use the DB_MPOOL_NOFILE flag to the DB_MPOOLFILE->set_flags method to +configure the cache to never create backing files for a database. Use +the DB_MPOOLFILE->set_maxsize method to limit the size of a database so it +cannot dirty the entire cache.

    +

    Third, log files. If a database environment is intended to be +transactionally recoverable after application or system failure (that +is, if it will exhibit the transactional attribute of "durability"), the +databases and the database environment log files must either be written +to the local disk and recovered, or they must be replicated to other +systems. When durability is not desired, or is accomplished through +replication, local database environments can be configured for +transactional behavior without durability, resulting in log files never +being written to disk. To configure a database environment this way, +specify the DB_LOG_INMEMORY flag to the +DB_ENV->set_flags method. To configure an individual database in a +database environment to be transactional but not durable, specify the +DB_TXN_NOT_DURABLE flag to the DB->set_flags method.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/runtime.html b/db/docs/ref/program/runtime.html index 2a8dc2b8e..e28fbd5ed 100644 --- a/db/docs/ref/program/runtime.html +++ b/db/docs/ref/program/runtime.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Run-time configuration - + @@ -26,10 +26,13 @@ interfaces. The following interfaces support this functionality:

    db_env_set_func_existsdb_env_set_func_freedb_env_set_func_fsync +db_env_set_func_ftruncatedb_env_set_func_ioinfodb_env_set_func_mallocdb_env_set_func_mapdb_env_set_func_open +db_env_set_func_pread +db_env_set_func_pwritedb_env_set_func_readdb_env_set_func_reallocdb_env_set_func_rename @@ -51,6 +54,6 @@ written using previous versions of the Berkeley DB APIs, and is only useful as an example.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/program/scope.html b/db/docs/ref/program/scope.html index 782bb5ede..5262f3758 100644 --- a/db/docs/ref/program/scope.html +++ b/db/docs/ref/program/scope.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB handles - + @@ -19,8 +19,8 @@

    The Berkeley DB library has a number of object handles. The following table lists those handles, their scope, and whether they are free-threaded (that is, whether multiple threads within a process can share them).

    -

    -

    DB_ENV
    The DB_ENV handle, created by the db_env_create method, refers +
    +
    DB_ENV
    The DB_ENV handle, created by the db_env_create method, refers to a Berkeley DB database environment -- a collection of Berkeley DB subsystems, log files and databases. DB_ENV handles are free-threaded if the DB_THREAD flag is specified to the DB_ENV->open method when @@ -29,7 +29,7 @@ other handle remains open that is using it as a reference (for example, DB or DB_TXN). Once either the DB_ENV->close or DB_ENV->remove methods are called, the handle may not be accessed again, regardless of the method's return. -

    DB_TXN
    The DB_TXN handle, created by the DB_ENV->txn_begin method, refers to +
    DB_TXN
    The DB_TXN handle, created by the DB_ENV->txn_begin method, refers to a single transaction. The handle is not free-threaded. Transactions may span threads, but only serially, that is, the application must serialize access to the DB_TXN handles. In the case of nested @@ -42,15 +42,15 @@ In addition, parent transactions may not issue any Berkeley DB operations while they have active child transactions (child transactions that have not yet been committed or aborted) except for DB_ENV->txn_begin, DB_TXN->abort and DB_TXN->commit.

    -

    DB_LOGC
    The DB_LOGC handle refers to a cursor into the log files. The +
    DB_LOGC
    The DB_LOGC handle refers to a cursor into the log files. The handle is not free-threaded. Once the DB_LOGC->close method is called, the handle may not be accessed again, regardless of the method's return. -

    DB_MPOOLFILE
    The DB_MPOOLFILE handle refers to an open file in the shared +
    DB_MPOOLFILE
    The DB_MPOOLFILE handle refers to an open file in the shared memory buffer pool of the database environment. The handle is not free-threaded. Once the DB_MPOOLFILE->close method is called, the handle may not be accessed again, regardless of the method's return. -

    DB
    The DB handle, created by the db_create method, refers to a +
    DB
    The DB handle, created by the db_create method, refers to a single Berkeley DB database, which may or may not be part of a database environment. DB handles are free-threaded if the DB_THREAD flag is specified to the DB->open method when the @@ -63,7 +63,7 @@ database have not yet been committed or aborted. Once the DB->close, DB->remove, or DB->rename methods are called, the handle may not be accessed again, regardless of the method's return. -

    DBC
    The DBC handle refers to a cursor into a Berkeley DB database. The +
    DBC
    The DBC handle refers to a cursor into a Berkeley DB database. The handle is not free-threaded. Cursors may span threads, but only serially, that is, the application must serialize access to the DBC handles. If the cursor is to be used to perform operations @@ -74,6 +74,6 @@ method's return.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/refs/refs.html b/db/docs/ref/refs/refs.html index 9902f1d2a..a3251af9d 100644 --- a/db/docs/ref/refs/refs.html +++ b/db/docs/ref/refs/refs.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Additional references - + @@ -22,55 +22,56 @@ we recommend the following sources:

    subject to copyrights held by the conference organizers and the authors of the papers. Sleepycat Software makes them available here as a courtesy with the permission of the copyright holders.

    -

    -

    Berkeley DB (HTML, Postscript)
    Michael Olson, Keith Bostic, and Margo Seltzer, Proceedings of the 1999 +
    +
    Berkeley DB +(HTML, Postscript)
    Michael Olson, Keith Bostic, and Margo Seltzer, Proceedings of the 1999 Summer Usenix Technical Conference, Monterey, California, June 1999. This paper describes recent commercial releases of Berkeley DB, its most important features, the history of the software, and Sleepycat's Open Source licensing policies. -

    Challenges in Embedded Database System Administration +
    Challenges in Embedded Database System Administration (HTML)
    Margo Seltzer and Michael Olson, First Workshop on Embedded Systems, Cambridge, Massachusetts, March 1999. This paper describes the challenges that face embedded systems developers, and how Berkeley DB has been designed to address them. -

    LIBTP: Portable Modular Transactions for UNIX +
    LIBTP: Portable Modular Transactions for UNIX (Postscript)
    Margo Seltzer and Michael Olson, USENIX Conference Proceedings, Winter 1992. This paper describes an early prototype of the transactional system for Berkeley DB. -

    A New Hashing Package for UNIX +
    A New Hashing Package for UNIX (Postscript)
    Margo Seltzer and Oz Yigit, USENIX Conference Proceedings, Winter 1991. This paper describes the Extended Linear Hashing techniques used by Berkeley DB.

    Background on Berkeley DB Features

    These papers, although not specific to Berkeley DB, give a good overview of the way different Berkeley DB features were implemented.

    -

    -

    Operating System Support for Database Management
    Michael Stonebraker, Communications of the ACM 24(7), 1981, pp. 412-418. -

    Dynamic Hash Tables
    Per-Ake Larson, Communications of the ACM, April 1988. -

    Linear Hashing: A New Tool for File and Table Addressing
    Witold Litwin, Proceedings of the 6th International +
    +
    Operating System Support for Database Management
    Michael Stonebraker, Communications of the ACM 24(7), 1981, pp. 412-418. +
    Dynamic Hash Tables
    Per-Ake Larson, Communications of the ACM, April 1988. +
    Linear Hashing: A New Tool for File and Table Addressing
    Witold Litwin, Proceedings of the 6th International Conference on Very Large Databases (VLDB), 1980 -

    The Ubiquitous B-tree
    Douglas Comer, ACM Comput. Surv. 11, 2 (June 1979), pp. 121-138. -

    Prefix B-trees
    Bayer and Unterauer, ACM Transactions on Database Systems, Vol. 2, 1 +
    The Ubiquitous B-tree
    Douglas Comer, ACM Comput. Surv. 11, 2 (June 1979), pp. 121-138. +
    Prefix B-trees
    Bayer and Unterauer, ACM Transactions on Database Systems, Vol. 2, 1 (March 1977), pp. 11-26. -

    The Art of Computer Programming Vol. 3: Sorting and Searching
    D.E. Knuth, 1968, pp. 471-480. -

    Document Processing in a Relational Database System
    Michael Stonebraker, Heidi Stettner, Joseph Kalash, Antonin Guttman, +
    The Art of Computer Programming Vol. 3: Sorting and Searching
    D.E. Knuth, 1968, pp. 471-480. +
    Document Processing in a Relational Database System
    Michael Stonebraker, Heidi Stettner, Joseph Kalash, Antonin Guttman, Nadene Lynn, Memorandum No. UCB/ERL M82/32, May 1982.

    Database Systems Theory

    These publications are standard reference works on the design and implementation of database systems. Berkeley DB uses many of the ideas they describe.

    -

    -

    Transaction Processing Concepts and Techniques
    by Jim Gray and Andreas Reuter, Morgan Kaufmann Publishers. +
    +
    Transaction Processing Concepts and Techniques
    by Jim Gray and Andreas Reuter, Morgan Kaufmann Publishers. We recommend chapters 1, 4 (skip 4.6, 4.7, 4.9, 4.10 and 4.11), 7, 9, 10.3, and 10.4. -

    An Introduction to Database Systems, Volume 1
    by C.J. Date, Addison Wesley Longman Publishers. +
    An Introduction to Database Systems, Volume 1
    by C.J. Date, Addison Wesley Longman Publishers. In the 5th Edition, we recommend chapters 1, 2, 3, 16 and 17. -

    Concurrency Control and Recovery in Database Systems
    by Bernstein, Goodman, Hadzilaco. Currently out of print, but available +
    Concurrency Control and Recovery in Database Systems
    by Bernstein, Goodman, Hadzilaco. Currently out of print, but available from http://research.microsoft.com/pubs/ccontrol/.

    PrevRef
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/refs/witold.html b/db/docs/ref/refs/witold.html index 9bd511c37..3dd53a61f 100644 --- a/db/docs/ref/refs/witold.html +++ b/db/docs/ref/refs/witold.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Witold Litwin - + @@ -19,6 +19,6 @@ chase up the mountains of Austria in search of very green wine.


    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/app.html b/db/docs/ref/rep/app.html index 5a652ad59..652178afb 100644 --- a/db/docs/ref/rep/app.html +++ b/db/docs/ref/rep/app.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Building replicated applications - + @@ -22,12 +22,12 @@ applications use the following additional four Berkeley DB methods: DB_ENV->rep_elect, DB_ENV->rep_process_message, DB_ENV->rep_start and DB_ENV->set_rep_transport and may also use the configuration method DB_ENV->set_rep_limit:

    -

    -

    DB_ENV->set_rep_transport
    The DB_ENV->set_rep_transport method configures the replication system's +
    +
    DB_ENV->set_rep_transport
    The DB_ENV->set_rep_transport method configures the replication system's communications infrastructure. -

    DB_ENV->rep_start
    The DB_ENV->rep_start method configures (or reconfigures) an existing database +
    DB_ENV->rep_start
    The DB_ENV->rep_start method configures (or reconfigures) an existing database environment to be a replication master or client. -

    DB_ENV->rep_process_message
    The DB_ENV->rep_process_message method is used to process incoming messages from other +
    DB_ENV->rep_process_message
    The DB_ENV->rep_process_message method is used to process incoming messages from other environments in the replication group. For clients, it is responsible for accepting log records and updating the local databases based on messages from the master. For both the master and the clients, it is @@ -36,10 +36,10 @@ protocol for dealing with lost messages), and permitting new clients to join an active replication group. This method should only be called after the environment has been configured as a replication master or client via DB_ENV->rep_start. -

    DB_ENV->rep_elect
    The DB_ENV->rep_elect method causes the replication group to elect a new +
    DB_ENV->rep_elect
    The DB_ENV->rep_elect method causes the replication group to elect a new master; it is called whenever contact with the master is lost and the application wants the remaining sites to select a new master. -

    DB_ENV->set_rep_limit
    The DB_ENV->set_rep_limit method imposes an upper bound on the amount of data +
    DB_ENV->set_rep_limit
    The DB_ENV->set_rep_limit method imposes an upper bound on the amount of data that will be sent in response to a single call to DB_ENV->rep_process_message. During client recovery, that is, when a replica site is trying to synchronize with the master, clients may ask the master for a large @@ -53,6 +53,11 @@ control and accepting other messages. must be changed and the application's communications infrastructure must be written. The application initialization changes are relatively simple, but the communications infrastructure code can be complex.

    +

    For implementation reasons, all replicated databases must reside in the +data directories set from DB_ENV->set_data_dir or in the default +environment home directory. If your databases reside in the default +environment home directory, they must be in the home directory itself, +not subdirectories below the environment home.

    During application initialization, the application performs three additional tasks: first, it must specify the DB_INIT_REP flag when opening its database environment; second, it must provide Berkeley DB @@ -73,20 +78,29 @@ The result of calling DB_ENV->rep_start< master, or the declaration of the local environment as the master. If a master has not been discovered after a reasonable amount of time, the application should call DB_ENV->rep_elect to call for an election.

    -

    In the case of multiple processes or threads accessing a replicated -environment, any environment handle that modifies databases in the -environment or processes replication messages must call the -DB_ENV->rep_start method. Note that not all processes running in replicated -environments need to call DB_ENV->set_rep_transport or DB_ENV->rep_start. -Read-only processes running in a master environment do not need to be -configured for replication in any way. Processes running in a client -environment are read-only by definition, and so do not need to be -configured for replication either (although, in the case of clients that -may become masters, it is usually simplest to configure for replication -on process startup rather than trying to reconfigure when the client -becomes a master). Obviously, at least one thread of control on each -client must be configured for replication as messages must be passed -between the master and the client.

    +

    Consider the case of multiple processes or multiple environment handles +that modify databases in the replicated environment. All modifications +must be done on the master environment. The first process to join or +create the master environment must call both the DB_ENV->set_rep_transport method +and the DB_ENV->rep_start method. Subsequent replication processes must at +least call the DB_ENV->set_rep_transport method. Those processes may call the +DB_ENV->rep_start method (as long as they use the same master or client +argument). If multiple processes are modifying the master environment +there must be a unified communication infrastructure such that messages +arriving at clients have a single master ID. Additionally the +application must be structured so that all incoming messages are able +to be processed by a single DB_ENV handle.

    +

    Note that not all processes running in replicated environments need to +call DB_ENV->set_rep_transport or DB_ENV->rep_start. Read-only processes +running in a master environment do not need to be configured for +replication in any way. Processes running in a client environment are +read-only by definition, and so do not need to be configured for +replication either (although, in the case of clients that may become +masters, it is usually simplest to configure for replication on process +startup rather than trying to reconfigure when the client becomes a +master). Obviously, at least one thread of control on each client must +be configured for replication as messages must be passed between the +master and the client.

    For implementation reasons, all incoming replication messages must be processed using the same DB_ENV handle. It is not required that a single thread of control process all messages, only that all threads @@ -96,6 +110,6 @@ participating in a replication group. The application should shut down the environment in the usual manner, by calling the DB_ENV->close method.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/comm.html b/db/docs/ref/rep/comm.html index 9c5cadd16..78034556f 100644 --- a/db/docs/ref/rep/comm.html +++ b/db/docs/ref/rep/comm.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Building the communications infrastructure - + @@ -51,23 +51,23 @@ deliver any number of messages simultaneously, and from any arbitrary thread or process in the Berkeley DB environment.

    There are a number of informational returns from the DB_ENV->rep_process_message method:

    -

    -

    DB_REP_DUPMASTER
    When DB_ENV->rep_process_message returns DB_REP_DUPMASTER, it means that +
    +
    DB_REP_DUPMASTER
    When DB_ENV->rep_process_message returns DB_REP_DUPMASTER, it means that another database environment in the replication group also believes itself to be the master. The application should complete all active transactions, close all open database handles, reconfigure itself as a client using the DB_ENV->rep_start method, and then call for an election by calling the DB_ENV->rep_elect method. -

    DB_REP_HOLDELECTION
    When DB_ENV->rep_process_message returns DB_REP_HOLDELECTION, it means +
    DB_REP_HOLDELECTION
    When DB_ENV->rep_process_message returns DB_REP_HOLDELECTION, it means that another database environment in the replication group has called for an election. The application should call the DB_ENV->rep_elect method. -

    DB_REP_ISPERM
    When DB_ENV->rep_process_message returns DB_REP_ISPERM, it means a +
    DB_REP_ISPERM
    When DB_ENV->rep_process_message returns DB_REP_ISPERM, it means a permanent record, perhaps a message previously returned as DB_REP_NOTPERM was successfully written to disk. This record may have filled a gap in the log record that allowed additional records to be written. The ret_lsnp contains the maximum LSN of the permanent records written. -

    DB_REP_NEWMASTER
    When DB_ENV->rep_process_message returns DB_REP_NEWMASTER, it means that +
    DB_REP_NEWMASTER
    When DB_ENV->rep_process_message returns DB_REP_NEWMASTER, it means that a new master has been elected. The call will also return the local environment's ID for that master. If the ID of the master has changed, the application may need to reconfigure itself (for example, to redirect @@ -75,11 +75,11 @@ update queries to the new master rather then the old one). If the new master is the local environment, then the application must call the DB_ENV->rep_start method, and reconfigure the supporting Berkeley DB library as a replication master. -

    DB_REP_NEWSITE
    When DB_ENV->rep_process_message returns DB_REP_NEWSITE, it means that +
    DB_REP_NEWSITE
    When DB_ENV->rep_process_message returns DB_REP_NEWSITE, it means that a message from a previously unknown member of the replication group has been received. The application should reconfigure itself as necessary so it is able to send messages to this site. -

    DB_REP_NOTPERM
    When DB_ENV->rep_process_message returns DB_REP_NOTPERM, it means a +
    DB_REP_NOTPERM
    When DB_ENV->rep_process_message returns DB_REP_NOTPERM, it means a message marked as DB_REP_PERMANENT was processed successfully but was not written to disk. This is normally an indication that one or more messages, which should have arrived before this message, have @@ -87,14 +87,14 @@ not yet arrived. This operation will be written to disk when the missing messages arrive. The ret_lsnp argument will contain the LSN of this record. The application should take whatever action is deemed necessary to retain its recoverability characteristics. -

    DB_REP_OUTDATED
    When DB_ENV->rep_process_message returns DB_REP_OUTDATED, it means that -the environment has been partitioned from the master for too long a -time, and the master no longer has the necessary log files to update -the local client. The application should shut down, and the client -should be reinitialized (see Initializing a new site for more information). +
    DB_REP_STARTUPDONE
    When DB_ENV->rep_process_message returns DB_REP_STARTUPDONE, it means that +the client has completed its startup synchronization activities and is +now processing live log messages from the master. Live log messages +are messages that the master is sending due to operations, as opposed +to resending log messages due to a request for log records from the client.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/elect.html b/db/docs/ref/rep/elect.html index c7b690afc..5be6fc3cf 100644 --- a/db/docs/ref/rep/elect.html +++ b/db/docs/ref/rep/elect.html @@ -1,17 +1,17 @@ - - + + Berkeley DB Reference Guide: Elections - + -

    Berkeley DB Reference Guide:
    Berkeley DB Replication

    PrevRefNext +PrevRefNext

    Elections

    @@ -30,8 +30,10 @@ period, then the application should call for an election.

    have no master, and the client must have the most recent log records. In the case of clients having equivalent log records, the priority of the database environments participating in the election will determine -the winner. At least ((N/2) + 1) of the members of the replication -group must participate in the election for a winner to be declared.

    +the winner. The application specifies the minimum number of replication +group members that must participate in an election for a winner to be +declared. We recommend at least ((N/2) + 1) members. If fewer than the +simple majority are specified, a warning will be given.

    If an application's policy for what site should win an election can be parameterized in terms the database environment's information (that is, the number of sites, available log records and a relative priority are @@ -66,8 +68,7 @@ client does not win the election, it is likely that it was not given sufficient time to update itself with respect to the current master.

    If a client is unable to find a master or win an election, it means that the network has been partitioned and there are not enough environments -participating in the election for one of the participants to win (or, -there were only two sites in the replication group and one crashed). +participating in the election for one of the participants to win. In this case, the application should repeatedly call DB_ENV->rep_start and DB_ENV->rep_elect, alternating between attempting to discover an existing master, and holding an election to declare a new one. In @@ -106,8 +107,8 @@ and reconfigure itself as a client using the DB_ENV->rep_elect method. -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/ex.html b/db/docs/ref/rep/ex.html index d502e1242..caedded06 100644 --- a/db/docs/ref/rep/ex.html +++ b/db/docs/ref/rep/ex.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Ex_repquote: a replication example - + @@ -30,19 +30,19 @@ attempt to build it.

    The synopsis for ex_repquote is as follows:

    ex_repquote [-MC] [-h home] [-m host:port] [-o host:port] [-n sites] [-p priority]

    The options to ex_repquote are as follows:

    -

    -

    -M
    Configure this process as a master. -

    -C
    Configure this process as a client. -

    -h
    Specify a home directory for the database environment; by +
    +
    -M
    Configure this process as a master. +
    -C
    Configure this process as a client. +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -m
    Listen on port "port" of host "host" for incoming connections. -

    -o
    Attempt to connect to another member of the replication group which is +
    -m
    Listen on port "port" of host "host" for incoming connections. +
    -o
    Attempt to connect to another member of the replication group which is listening on host "host" at port "port". Members of a replication group should be able to find all other members of a replication group so long as they are in contact with at least one other member of the replication group. -

    -n
    Specify the total number of sites in the replication group. -

    -p
    Set the election priority. See DB_ENV->rep_elect for more +
    -n
    Specify the total number of sites in the replication group. +
    -p
    Set the election priority. See DB_ENV->rep_elect for more information.

    A typical ex_repquote session begins with a command such as the @@ -60,6 +60,6 @@ having particular clients take over as master in the case that the master fails.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/ex_comm.html b/db/docs/ref/rep/ex_comm.html index 7214a11c2..11556a22f 100644 --- a/db/docs/ref/rep/ex_comm.html +++ b/db/docs/ref/rep/ex_comm.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Ex_repquote: a TCP/IP based communication infrastructure - + @@ -173,6 +173,6 @@ int *eidp): Accept a connection on a socket and add it to the machtab. int listen_socket_connect

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/ex_rq.html b/db/docs/ref/rep/ex_rq.html index 60fb51444..112a4ea4d 100644 --- a/db/docs/ref/rep/ex_rq.html +++ b/db/docs/ref/rep/ex_rq.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Ex_repquote: putting it all together - + @@ -143,7 +143,7 @@ for (ret = 0; ret == 0;) { */ machtab_parm(tab, &n, &pri, &timeout); if ((ret = dbenv->rep_elect(dbenv, - n, pri, timeout, &newm)) != 0) + n, 0, pri, timeout, &newm, 0)) != 0) continue;

    /* @@ -231,6 +231,6 @@ for (ret = 0; ret == 0;) { }


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/faq.html b/db/docs/ref/rep/faq.html index f825f3233..1797171ea 100644 --- a/db/docs/ref/rep/faq.html +++ b/db/docs/ref/rep/faq.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Replication FAQ - + @@ -96,15 +96,9 @@ communications process which is responsible for forwarding the message to the appropriate client. Alternatively, a broadcast mechanism will simplify the entire networking infrastructure, as processes will likely no longer have to maintain their own specific network connections.

    -

  • Can I use replication to replicate just the database -environment's log files? -

    Yes. If the DB_REP_LOGSONLY flag is specified to -DB_ENV->rep_start, the client site acts as a repository for logfiles -(see Log file only clients for more -information).


  • PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/id.html b/db/docs/ref/rep/id.html index 091e6aea6..f00325b27 100644 --- a/db/docs/ref/rep/id.html +++ b/db/docs/ref/rep/id.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Replication environment IDs - + @@ -31,15 +31,15 @@ identifier. Subsequently, Berkeley DB will label outgoing messages to the

    Negative identifiers are reserved for use by Berkeley DB, and should never be assigned to environments by the application. Two of these reserved identifiers are intended for application use, as follows:

    -

    -

    DB_EID_BROADCAST
    The DB_EID_BROADCAST identifier indicates a message should be +
    +
    DB_EID_BROADCAST
    The DB_EID_BROADCAST identifier indicates a message should be broadcast to all members of a replication group. -

    DB_EID_INVALID
    The DB_EID_INVALID identifier is an invalid environment ID, and +
    DB_EID_INVALID
    The DB_EID_INVALID identifier is an invalid environment ID, and may be used to initialize environment ID variables that are subsequently checked for validity.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/intro.html b/db/docs/ref/rep/intro.html index 65f25d410..f3969f48b 100644 --- a/db/docs/ref/rep/intro.html +++ b/db/docs/ref/rep/intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Introduction - + @@ -65,18 +65,18 @@ implemented to perform database updates using a different code path than the standard ones. This means operations that manage to crash the replication master due to a software bug will not necessarily also crash replication clients.

    - + - - - - - - + + + + + +
    Replication and Related MethodsDescription
    DB_ENV->set_rep_transportConfigure replication transport
    DB_ENV->rep_electHold a replication election
    DB_ENV->set_rep_limitLimit data sent in response to a single message
    DB_ENV->rep_process_messageProcess a replication message
    DB_ENV->rep_startConfigure an environment for replication
    DB_ENV->rep_statReplication statistics
    DB_ENV->rep_electHold a replication election
    DB_ENV->rep_process_messageProcess a replication message
    DB_ENV->rep_startConfigure an environment for replication
    DB_ENV->rep_statReplication statistics
    DB_ENV->set_rep_limitLimit data sent in response to a single message
    DB_ENV->set_rep_transportConfigure replication transport

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/newsite.html b/db/docs/ref/rep/newsite.html index 3a2dc0d06..d1477eb36 100644 --- a/db/docs/ref/rep/newsite.html +++ b/db/docs/ref/rep/newsite.html @@ -1,20 +1,25 @@ - - + + Berkeley DB Reference Guide: Connecting to a new site - + -

    Berkeley DB Reference Guide:
    Berkeley DB Replication

    PrevRefNext +PrevRefNext

    Connecting to a new site

    +

    To add a new site to the replication group all that is needed +is for the client member to join. Berkeley DB will perform an +internal initialization from the master to the client automatically +and will run recovery on the client to bring it up to date +with the master.

    Connecting to a new site in the replication group happens whenever the DB_ENV->rep_process_message method returns DB_REP_NEWSITE. The application should assign the new site a local environment ID number, and all future @@ -37,8 +42,8 @@ If no additional information was provided for Berkeley DB to forward to the existing members of the group, the data field of the rec parameter passed to the DB_ENV->rep_process_message method will be NULL after DB_ENV->rep_process_message returns DB_REP_NEWSITE.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/partition.html b/db/docs/ref/rep/partition.html index 326f17845..0b2232630 100644 --- a/db/docs/ref/rep/partition.html +++ b/db/docs/ref/rep/partition.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Network partitions - + @@ -85,6 +85,6 @@ pick a single master, and only hold elections when human intervention has determined the selected master is unable to recover at all.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/pri.html b/db/docs/ref/rep/pri.html index 0428611c4..3ddd77cc4 100644 --- a/db/docs/ref/rep/pri.html +++ b/db/docs/ref/rep/pri.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Replication environment priorities - + @@ -34,6 +34,6 @@ the client priority. If both sites have the same number of log records and the same priority, one is selected at random.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rep/trans.html b/db/docs/ref/rep/trans.html index f72f3648c..c78ddb416 100644 --- a/db/docs/ref/rep/trans.html +++ b/db/docs/ref/rep/trans.html @@ -1,17 +1,17 @@ - - + + Berkeley DB Reference Guide: Transactional guarantees - + -

    Berkeley DB Reference Guide:
    Berkeley DB Replication

    PrevRefNext +PrevRefNext

    Transactional guarantees

    @@ -233,8 +233,8 @@ a Global Transaction Manager and performing two-phase commit across multiple Berkeley DB database environments. More information on this topic can be found in the Distributed Transactions chapter.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rpc/client.html b/db/docs/ref/rpc/client.html index f351d558a..b2bbe3c3c 100644 --- a/db/docs/ref/rpc/client.html +++ b/db/docs/ref/rpc/client.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Client program - + @@ -79,6 +79,6 @@ is usually significantly less than when Berkeley DB is embedded within the client's address space, even if the RPC is to a local address.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rpc/faq.html b/db/docs/ref/rpc/faq.html index 58149d0e1..bfc8796ce 100644 --- a/db/docs/ref/rpc/faq.html +++ b/db/docs/ref/rpc/faq.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: RPC FAQ - + -

    Berkeley DB Reference Guide:
    RPC Client/Server

    PrevRefNext +PrevRefNext

    RPC FAQ

    @@ -25,8 +25,8 @@ and the client times out the request before the server has a chance to process and reply. If you get this error, try explicitly setting the client timeout value.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rpc/intro.html b/db/docs/ref/rpc/intro.html index b6d6d862d..cb0fdcfd3 100644 --- a/db/docs/ref/rpc/intro.html +++ b/db/docs/ref/rpc/intro.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Introduction - + -

    Berkeley DB Reference Guide:
    RPC Client/Server

    PrevRefNext +PrevRefNext

    Introduction

    @@ -72,8 +72,8 @@ server.
  • Run the berkeley_db_svc server program on the system where the database resides. -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/rpc/server.html b/db/docs/ref/rpc/server.html index 58b09901a..86dc5d9f7 100644 --- a/db/docs/ref/rpc/server.html +++ b/db/docs/ref/rpc/server.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Server program - + @@ -50,6 +50,6 @@ machine. This means, of course, that only one environment of a particular name is allowed on the server at any given time.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/sequence/intro.html b/db/docs/ref/sequence/intro.html new file mode 100644 index 000000000..2e61785d7 --- /dev/null +++ b/db/docs/ref/sequence/intro.html @@ -0,0 +1,64 @@ + + + + + + +Berkeley DB Reference Guide: Introduction + + + + + + + +

    Berkeley DB Reference Guide:
    Sequences

    PrevRefNext +
    +

    +

    Introduction

    +

    Sequences provide an arbitrary number of persistent objects that return +an increasing or decreasing sequence of integers. Opening a sequence +handle associates it with a record in a database. The handle can +maintain a cache of values from the database so that a database update +is not needed as the application allocates a value.

    +

    A sequence is stored as a record pair in any type of database. It is +referenced by the key used when the sequence is created. The key must +be compatible with the underlying access method. If the access method +of the database has fixed-length records, the record size must be at +least 64 bytes long.

    +

    Since a sequence handle is opened using a database handle, the use of +transactions with the sequence must follow how the database handle was +opened. In other words, if the database handle was opened within a +transaction, operations on the sequence handle must use transactions. +Of course, if sequences are cached, not all operations will actually +trigger a transaction.

    +

    For the highest concurrency, caching should be used and the +DB_AUTO_COMMIT and DB_TXN_NOSYNC flags should be +specified to the DB_SEQUENCE->get method call. If the allocation of the +sequence value must be part of a transaction, and rolled back if the +transaction aborts, then no caching should be specified and the +transaction handle must be passed to the DB_SEQUENCE->get method.

    + + + + + + + + + + + + + + + + + + +
    Sequences and Related MethodsDescription
    db_sequence_createCreate a sequence handle
    DB_SEQUENCE->closeClose a sequence
    DB_SEQUENCE->getGet the next sequence element(s)
    DB_SEQUENCE->get_dbpReturn a handle for the underlying sequence database
    DB_SEQUENCE->get_cachesizeReturn the cache size of a sequence
    DB_SEQUENCE->get_flagsReturn the flags for a sequence
    DB_SEQUENCE->get_rangeReturn the range for a sequence
    DB_SEQUENCE->get_keyReturn the key for a sequence
    DB_SEQUENCE->init_valueSet the initial value of a sequence
    DB_SEQUENCE->openOpen a sequence
    DB_SEQUENCE->removeRemove a sequence
    DB_SEQUENCE->set_cachesizeSet the cache size of a sequence
    DB_SEQUENCE->set_flagsSet the flags for a sequence
    DB_SEQUENCE->set_rangeSet the range for a sequence
    DB_SEQUENCE->statReturn sequence statistics
    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/tcl/error.html b/db/docs/ref/tcl/error.html index 4c18ad73d..82f14ea65 100644 --- a/db/docs/ref/tcl/error.html +++ b/db/docs/ref/tcl/error.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Tcl error handling - + @@ -66,6 +66,6 @@ more descriptive prefix is desired or where a constant prefix indicating an error is desired.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/tcl/faq.html b/db/docs/ref/tcl/faq.html index ad0a59e1f..39974ddde 100644 --- a/db/docs/ref/tcl/faq.html +++ b/db/docs/ref/tcl/faq.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Tcl FAQ - + -

    Berkeley DB Reference Guide:
    Tcl API

    PrevRefNext +PrevRefNext

    Tcl FAQ

    @@ -51,8 +51,8 @@ variable should contain sufficient linker flags to find and link against the installed libtcl library. In some circumstances, the tclConfig.sh file built by Tcl does not.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/tcl/intro.html b/db/docs/ref/tcl/intro.html index a895bd6cd..e88c08daf 100644 --- a/db/docs/ref/tcl/intro.html +++ b/db/docs/ref/tcl/intro.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Loading Berkeley DB with Tcl - + -

    Berkeley DB Reference Guide:
    Tcl API

    PrevRefNext +PrevRefNext

    Loading Berkeley DB with Tcl

    @@ -37,21 +37,21 @@ several steps that must be performed:

    For example:

    # tclsh8.4
    -% lappend auto_path /usr/local/BerkeleyDB.4.2/lib
    -% pkg_mkIndex /usr/local/BerkeleyDB.4.2/lib libdb_tcl-4.2.so
    +% lappend auto_path /usr/local/BerkeleyDB.4.3/lib +% pkg_mkIndex /usr/local/BerkeleyDB.4.3/lib libdb_tcl-4.3.so

    Note that your Tcl and Berkeley DB version numbers may differ from the example, and so your tclsh and library names may be different.

    Loading Berkeley DB with Tcl

    The Berkeley DB package may be loaded into the user's interactive Tcl script (or wish session) via the load command. For example:

    -
    load /usr/local/BerkeleyDB.4.2/lib/libdb_tcl-4.2.so
    +
    load /usr/local/BerkeleyDB.4.3/lib/libdb_tcl-4.3.so

    Note that your Berkeley DB version numbers may differ from the example, and so the library name may be different.

    If you installed your library to run as a Tcl package, Tcl application scripts should use the package command to indicate to the Tcl interpreter that it needs the Berkeley DB package and where to find it. For example:

    -
    lappend auto_path "/usr/local/BerkeleyDB.4.2/lib"
    +
    lappend auto_path "/usr/local/BerkeleyDB.4.3/lib"
     package require Db_tcl

    No matter which way the library gets loaded, it creates a command named berkdb. All the Berkeley DB functionality is accessed via this @@ -60,8 +60,8 @@ A simple test to determine whether everything is loaded and ready is to display the library version, as follows:

    berkdb version -string

    This should return you the Berkeley DB version in a string format.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/tcl/program.html b/db/docs/ref/tcl/program.html index 2c94036aa..8b794af00 100644 --- a/db/docs/ref/tcl/program.html +++ b/db/docs/ref/tcl/program.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Tcl API programming notes - + @@ -26,6 +26,6 @@ supported via the Tcl API. For example, there is no equivalent to the methods.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/tcl/using.html b/db/docs/ref/tcl/using.html index 6698d9aac..94bfae1ee 100644 --- a/db/docs/ref/tcl/using.html +++ b/db/docs/ref/tcl/using.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Using Berkeley DB with Tcl - + @@ -50,6 +50,6 @@ list the correct operations for a command or the correct options.

    Tcl Interface documentation.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/test/faq.html b/db/docs/ref/test/faq.html index 890a64584..0e8ae43dc 100644 --- a/db/docs/ref/test/faq.html +++ b/db/docs/ref/test/faq.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Test suite FAQ - + @@ -24,6 +24,6 @@ run is making forward progress and new lines are being written to the

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/test/run.html b/db/docs/ref/test/run.html index 964e3cd90..43b99fd16 100644 --- a/db/docs/ref/test/run.html +++ b/db/docs/ref/test/run.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Running the test suite - + -

    Berkeley DB Reference Guide:
    Test Suite

    PrevRefNext +PrevRefNext

    Running the test suite

    @@ -54,8 +54,8 @@ Regardless of where you run the tests, the TESTDIR directory should be on a local filesystem. Using a remote filesystem (for example, an NFS mounted filesystem) will almost certainly cause spurious test failures.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/admin.html b/db/docs/ref/transapp/admin.html index 2bc019447..e64723e5c 100644 --- a/db/docs/ref/transapp/admin.html +++ b/db/docs/ref/transapp/admin.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Environment infrastructure - + @@ -43,6 +43,6 @@ scripting interface because the scripting APIs do not always offer interfaces to the administrative functionality.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/app.html b/db/docs/ref/transapp/app.html index 7896b5be0..6062139e7 100644 --- a/db/docs/ref/transapp/app.html +++ b/db/docs/ref/transapp/app.html @@ -1,12 +1,12 @@ - - + + -Berkeley DB Reference Guide: Application structure +Berkeley DB Reference Guide: Architecting Transactional Data Store applications - + @@ -14,7 +14,7 @@
    PrevRefNext

    -

    Application structure

    +

    Architecting Transactional Data Store applications

    When building transactionally protected applications, there are some special issues that must be considered. The most important one is that if any thread of control exits for any reason while holding Berkeley DB @@ -27,15 +27,14 @@ locks.

  • Clean up any partially completed operations that may have left a database in an inconsistent or corrupted state. -

    Complicating this problem is the fact that the Berkeley DB library itself -cannot determine whether recovery is required; the application itself -must make that decision. A further complication is that -recovery must be single-threaded; that is, one thread of control or -process must perform recovery before any other thread of control or -processes attempts to create or join the Berkeley DB environment.

    +

    The Berkeley DB library cannot determine whether recovery is required; the +application must make that decision. Furthermore, recovery must be +single-threaded; that is, one thread of control or process must perform +recovery before any other thread of control or process attempts to join +the Berkeley DB environment.

    There are two approaches to handling this problem:

    -

    -

    The hard way:
    An application can track its own state carefully enough that it knows +
    +
    The hard way:
    An application can track its own state carefully enough that it knows when recovery needs to be performed. Specifically, the rule to use is that recovery must be performed before using a Berkeley DB environment any time the threads of control previously using the Berkeley DB environment did @@ -54,7 +53,7 @@ aborted, or DB_ENV->close method was DB_ENV handle. In addition, at least one transaction checkpoint must be performed after all existing transactions have been committed or aborted.

    -

    The easy way:
    It greatly simplifies matters that recovery may be performed regardless +
    The easy way:
    It greatly simplifies matters that recovery may be performed regardless of whether recovery strictly needs to be performed; that is, it is not an error to run recovery on a database for which no recovery is necessary. Because of this fact, it is almost invariably simpler to @@ -125,6 +124,6 @@ be as simple and well-tested as possible because there is no recourse if it fails.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/archival.html b/db/docs/ref/transapp/archival.html index 88fbf6e8f..12bb0490d 100644 --- a/db/docs/ref/transapp/archival.html +++ b/db/docs/ref/transapp/archival.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Database and log file archival - + @@ -75,23 +75,16 @@ backup device such as CD-ROM, alternate disk, or tape.

    To create a hot backup of your database that can be used to recover from catastrophic failure, take the following steps:

      -

    1. Run db_archive to identify those log files which are not -in use. These log files are not part of this hot backup and can -be discarded after the hot backup is successful. They can be used -with the previous hot backup to bring the databases forward to -this point.

    2. Archive your databases, as described in the previous step #4. -You do not have to halt ongoing transactions or force a -checkpoint. In the case of a hot backup, the utility you use to copy -the databases must read database pages atomically (as described by -Berkeley DB recoverability). -

    3. When performing a hot backup, you must additionally archive all of the -log files not identified in step #1. -Note that the order of these two operations is required, -and the database files must be archived before the log files. This -means that if the database files and log files are in the same -directory, you cannot simply archive the directory; you must make sure -that the correct order of archival is maintained. +You do not have to halt ongoing transactions or force a checkpoint. As +this is a hot backup, and the databases may be modified during the copy, +the utility you use to copy the databases must read database pages +atomically (as described by Berkeley DB recoverability). +

    4. Archive all of the log files. The order of these two operations +is required, and the database files must be archived before the +log files. This means that if the database files and log files are in +the same directory, you cannot simply archive the directory; you must +make sure that the correct order of archival is maintained.

      To archive your log files, run the db_archive utility using the -l option to identify all the database log files, and copy them to your backup media. If the database log files are stored @@ -99,17 +92,24 @@ in a separate directory from the other database files, it may be simpler to archive the directory itself instead of the individual files (see the DB_ENV->set_lg_dir method for more information).

    -

    Once these steps are completed, your database can be recovered from -catastrophic failure (see Recovery procedures for -more information).

    -

    To update your snapshot so that recovery from catastrophic failure is -possible up to a new point in time, repeat step 2 under the hot backup -instructions -- copying all existing log files to a backup device. This -is applicable to both standard and hot backups; that is, you can update -snapshots made either way. Each time both the database and log files -are copied to backup media, you may discard all previous database -snapshots and saved log files. Archiving additional log files does not -allow you to discard either previous database snapshots or log files.

    +

    To minimize the archival space needed for log files when doing a hot +backup, run db_archive to identify those log files which are not in use. +Log files which are not in use do not need to be included when creating +a hot backup, and you can discard them or move them aside for use with +previous backups (whichever is appropriate), before beginning the hot +backup.

    +

    After completing one of these two sets of steps, the database +environment can be recovered from catastrophic failure (see +Recovery procedures for more information).

    +

    To update either a hot or cold backup so that recovery from catastrophic +failure is possible to a new point in time, repeat step #2 under the +hot backup instructions and archive all of the log files in the +database environment. Each time both the database and log files are +copied to backup media, you may discard all previous database snapshots +and saved log files. Archiving additional log files does not allow you +to discard either previous database snapshots or log files. Generally, +updating a backup must be integrated with the application's log file +removal procedures.

    The time to restore from catastrophic failure is a function of the number of log records that have been written since the snapshot was originally created. Perhaps more importantly, the more separate pieces @@ -155,6 +155,6 @@ log_archlist(DB_ENV *dbenv) }

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/atomicity.html b/db/docs/ref/transapp/atomicity.html index a508d871f..979424bdf 100644 --- a/db/docs/ref/transapp/atomicity.html +++ b/db/docs/ref/transapp/atomicity.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Atomicity - + @@ -58,6 +58,6 @@ items in different orders greatly increases the likelihood of operations being blocked and failing due to deadlocks.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/checkpoint.html b/db/docs/ref/transapp/checkpoint.html index 8ef3697b4..458b55398 100644 --- a/db/docs/ref/transapp/checkpoint.html +++ b/db/docs/ref/transapp/checkpoint.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Checkpoints - + @@ -17,21 +17,23 @@

    Checkpoints

    The second component of the infrastructure is performing checkpoints of -the log files. As transactions commit, change records are written into -the log files, but the actual changes to the database are not -necessarily written to disk. When a checkpoint is performed, the -changes to the database that are part of committed transactions are -written into the backing database file.

    -

    Performing checkpoints is necessary for two reasons. First, you can -remove the Berkeley DB log files from your system only after a checkpoint. -Second, the frequency of your checkpoints is inversely proportional to -the amount of time it takes to run database recovery after a system or -application failure.

    -

    Once the database pages are written, log files can be archived and removed -from the system because they will never be needed for anything other than -catastrophic failure. In addition, recovery after system or application -failure has to redo or undo changes only since the last checkpoint since -changes before the checkpoint have all been flushed to the filesystem.

    +the log files. Performing checkpoints is necessary for two reasons.

    +

    First, you may be able to remove Berkeley DB log files from your database +environment after a checkpoint. Change records are written into the log +files when databases are modified, but the actual changes to the +database are not necessarily written to disk. When a checkpoint is +performed, changes to the database are written into the backing database +file. Once the database pages are written, log files can be archived +and removed from the database environment because they will never be +needed for anything other than catastrophic failure. (Log files which +are involved in active transactions may not be removed, and there must +always be at least one log file in the database environment.)

    +

    The second reason to perform checkpoints is because checkpoint frequency +is inversely proportional to the amount of time it takes to run database +recovery after a system or application failure. This is because +recovery after failure has to redo or undo changes only since the last +checkpoint, as changes before the checkpoint have all been flushed to +the databases.

    Berkeley DB provides a separate utility, db_checkpoint, which can be used to perform checkpoints. Alternatively, applications can write their own checkpoint utility using the underlying DB_ENV->txn_checkpoint @@ -118,6 +120,6 @@ perform a checkpoint is a common tuning parameter for Berkeley DB applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/cursor.html b/db/docs/ref/transapp/cursor.html index 0a72b55a2..48c0606fa 100644 --- a/db/docs/ref/transapp/cursor.html +++ b/db/docs/ref/transapp/cursor.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Transactional cursors - + @@ -23,9 +23,9 @@ remember is that a cursor must be closed before the enclosing transaction is committed or aborted.

    The following code fragment uses a cursor to store a new key in the cats database with four associated data items. The key is a name. The data -items are a company name, an address, and a list of the breeds of cat -owned. Each of the data entries is stored as a duplicate data item. -In this example, transactions are necessary to ensure that either all or none +items are a company name and a list of the breeds of cat owned. Each +of the data entries is stored as a duplicate data item. In this +example, transactions are necessary to ensure that either all or none of the data items appear in case of system or application failure.

    int
     main(int argc, char *argv)
    @@ -164,6 +164,6 @@ retry:	/* Begin the transaction. */
     }

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/data_open.html b/db/docs/ref/transapp/data_open.html index 0f930e616..cdd1a201f 100644 --- a/db/docs/ref/transapp/data_open.html +++ b/db/docs/ref/transapp/data_open.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Opening the databases - + @@ -130,13 +130,14 @@ well-known file might include an update of the list in the same transaction in which the database is created. Or, an application might create both a primary and secondary database in a single transaction.

    DB handles that will later be used for transactionally protected -operations must be opened within a transaction. Specifying a -transaction handle to operations using handles not opened within a -transaction will return an error. Similarly, not specifying a -transaction handle to operations using handles that were opened within -a transaction will also return an error.

    +database operations must be opened within a transaction. Specifying a +transaction handle to database operations using DB handles not +opened within a transaction will return an error. Similarly, not +specifying a transaction handle to database operations that will modify +the database, using handles that were opened within a transaction, will +also return an error.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/deadlock.html b/db/docs/ref/transapp/deadlock.html index 6167999aa..3fb213c81 100644 --- a/db/docs/ref/transapp/deadlock.html +++ b/db/docs/ref/transapp/deadlock.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Deadlock detection - + @@ -105,6 +105,6 @@ deadlocked transactions will be forced to abort when the deadlock is detected is a common tuning parameter for Berkeley DB applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/env_open.html b/db/docs/ref/transapp/env_open.html index 6aa958e43..8e502e9a0 100644 --- a/db/docs/ref/transapp/env_open.html +++ b/db/docs/ref/transapp/env_open.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Opening the environment - + @@ -171,6 +171,6 @@ Txn Region: 5. 0 Locks granted after waiting.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/faq.html b/db/docs/ref/transapp/faq.html index 6996e6e4a..7aafc851d 100644 --- a/db/docs/ref/transapp/faq.html +++ b/db/docs/ref/transapp/faq.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Transaction FAQ - + @@ -73,23 +73,25 @@ into another? cannot be simply moved into different database environments. To move a database into a different environment, dump and reload the database before moving it. If the database is too large to dump and reload, the -database may be prepared in place by setting the first eight bytes of -each database page in the file to 0.

    +database may be prepared in place using the -l argument to the +db_load utility.

  • I'm seeing the error "log_flush: LSN past current end-of-log", what does that mean?

    The most common cause of this error is that a system administrator has removed all of the log files from a database environment. You should shut down your database environment as gracefully as possible, first flushing the database environment cache to disk, if that's possible. -Then, dump and reload your databases. If your databases are too large -to dump and reload, the database may be repaired in place by setting -the first eight bytes of each database page in the file to 0, but if -you do that, you must verify your databases before using them again. -(It is possible for the databases to be corrupted when this happens, -and the longer the application runs, the worse it can get.)

    +Then, dump and reload your databases. If the database is too large to +dump and reload, the database may be reset in place using the +-l argument to the db_load +utility. However, if you reset the database in place, you should verify +your databases before using them again. (It is possible for the +databases to be corrupted by running after all of the log files have +been removed, and the longer the application runs, the worse it can +get.)


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/filesys.html b/db/docs/ref/transapp/filesys.html index 7f91bc74e..785949974 100644 --- a/db/docs/ref/transapp/filesys.html +++ b/db/docs/ref/transapp/filesys.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Recovery and filesystem operations - + @@ -61,6 +61,6 @@ recovery be performed from the filesystem operations forward.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/hotfail.html b/db/docs/ref/transapp/hotfail.html index 65c4c5b06..fba3f4da8 100644 --- a/db/docs/ref/transapp/hotfail.html +++ b/db/docs/ref/transapp/hotfail.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Hot failover - + @@ -78,6 +78,6 @@ Steps 2 through 5 must be performed at least once in order to ensure a consistent database environment snapshot.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/inc.html b/db/docs/ref/transapp/inc.html index 1b3a5574b..ca1b0e623 100644 --- a/db/docs/ref/transapp/inc.html +++ b/db/docs/ref/transapp/inc.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Isolation - + @@ -165,6 +165,6 @@ deadlock if we first obtain a read lock and subsequently a write lock, than if we obtain the write lock initially.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/intro.html b/db/docs/ref/transapp/intro.html index 80462c422..0d73ab408 100644 --- a/db/docs/ref/transapp/intro.html +++ b/db/docs/ref/transapp/intro.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Berkeley DB Transactional Data Store applications - + -

    Berkeley DB Reference Guide:
    Berkeley DB Transactional Data Store Applications

    PrevRefNext +PrevRefNext

    Berkeley DB Transactional Data Store applications

    @@ -35,8 +35,8 @@ to extract code blocks for your own applications. Fragments of the program will be presented throughout this chapter, and the complete text of the example program for IEEE/ANSI Std 1003.1 (POSIX) standard systems is included in the Berkeley DB distribution.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/logfile.html b/db/docs/ref/transapp/logfile.html index b27e359c9..e6817d1d2 100644 --- a/db/docs/ref/transapp/logfile.html +++ b/db/docs/ref/transapp/logfile.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Log file removal - + @@ -53,6 +53,6 @@ opportunity to copy the log files to backup media.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/nested.html b/db/docs/ref/transapp/nested.html index 49eda8dbf..5e9010fb5 100644 --- a/db/docs/ref/transapp/nested.html +++ b/db/docs/ref/transapp/nested.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Nested transactions - + @@ -60,6 +60,6 @@ transactions that are not yet resolved when the parent prepares are also prepared.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/put.html b/db/docs/ref/transapp/put.html index 9a5b7bd41..91dc84973 100644 --- a/db/docs/ref/transapp/put.html +++ b/db/docs/ref/transapp/put.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Recoverability and deadlock handling - + @@ -198,6 +198,6 @@ mainline code often results in the simplest and cleanest application code.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/read.html b/db/docs/ref/transapp/read.html index 354358dad..146d3c14a 100644 --- a/db/docs/ref/transapp/read.html +++ b/db/docs/ref/transapp/read.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Degrees of isolation - + @@ -22,7 +22,7 @@ for the life of the transaction, every time a thread of control reads a data item, it will be unchanged from its previous value (assuming, of course, the thread of control does not itself modify the item). Berkeley DB enforces repeatable reads whenever database reads are wrapped in -transactions.

    +transactions. This is also know as degree 3 isolation.

    Most applications do not need to enclose reads in transactions, and when possible, transactionally protected reads should be avoided as they can cause performance problems. For example, a transactionally protected @@ -39,6 +39,18 @@ that it will not have changed (for example, an operation modifying a data item based on its existing value).

    + + +

    A transaction may only require cursor stability, that is +only be guaranteed that cursors see committed data that does +not change so long as it is addressed by the cursor, but may +change before the reading transaction completes. This is +also called degree 2 isolation. Berkeley DB provides +this level of isolation when a transaction is started with +the DB_DEGREE_2 flag. This flag may also be specified +when opening a cursor within a fully isolated transaction.

    + +

    Berkeley DB optionally supports reading uncommitted data; that is, read operations may request data which has been modified but not yet committed by another transaction. This is done by first specifying the @@ -51,6 +63,6 @@ data; the disadvantage is that read operations may return data that will disappear should the transaction holding the write lock abort.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/reclimit.html b/db/docs/ref/transapp/reclimit.html index 8b7587619..5ebb63259 100644 --- a/db/docs/ref/transapp/reclimit.html +++ b/db/docs/ref/transapp/reclimit.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB recoverability - + @@ -145,6 +145,6 @@ incorrect data to the disk). However, configuring the database for checksums will ensure that any such corruption is detected.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/recovery.html b/db/docs/ref/transapp/recovery.html index 512e76dd1..74b304b2f 100644 --- a/db/docs/ref/transapp/recovery.html +++ b/db/docs/ref/transapp/recovery.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Recovery procedures - + @@ -25,7 +25,7 @@ from scratch. Although these applications may still need transaction protection for other reasons, recovery usually consists of removing the Berkeley DB environment home directory and all files it contains, and then restarting the application. -Such an application may use the DB_TXN_NOT_DURABLE flag to avoid +Such an application may use the DB_TXN_NOT_DURABLE flag to avoid writing log records.

  • It is necessary to recover information after system or application failure. In this case, recovery processing must be performed on any @@ -46,11 +46,11 @@ on the source for the database and log files you are using to recover.

    If up-to-the-minute database and log files are accessible on a stable filesystem, normal recovery is usually sufficient. Run the db_recover utility or call the DB_ENV->open method specifying -the DB_RECOVER flag. Note this case never includes recovery -using archival snapshots of the database environment. For example, you -cannot archive databases and log files, restore the backup and then run -normal recovery -- you must always run catastrophic recovery when using -archived files.

    +the DB_RECOVER flag. However, the normal recovery case +never includes recovery using hot backups of the database +environment. For example, you cannot perform a hot backup of databases +and log files, restore the backup and then run normal recovery -- you +must always run catastrophic recovery when using hot backups.

    If the database or log files have been destroyed or corrupted, or normal recovery fails, catastrophic recovery is required. For example, catastrophic failure includes the case where the disk drive on which @@ -92,6 +92,6 @@ pathnames.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/term.html b/db/docs/ref/transapp/term.html index 4f0e83422..b1243ed5f 100644 --- a/db/docs/ref/transapp/term.html +++ b/db/docs/ref/transapp/term.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Terminology - + @@ -17,41 +17,41 @@

    Terminology

    Here are some definitions that will be helpful in understanding transactions:

    -

    -

    Thread of control
    Berkeley DB is indifferent to the type or style of threads being used by the +
    +
    Thread of control
    Berkeley DB is indifferent to the type or style of threads being used by the application; or, for that matter, if threads are being used at all -- because Berkeley DB supports multiprocess access. In the Berkeley DB documentation, any time we refer to a thread of control, it can be read as a true thread (one of many in an application's address space) or a process. -

    Free-threaded
    A Berkeley DB handle that can be used by multiple threads simultaneously +
    Free-threaded
    A Berkeley DB handle that can be used by multiple threads simultaneously without any application-level synchronization is called free-threaded. -

    Transaction
    A transaction is a one or more operations on one or more +
    Transaction
    A transaction is a one or more operations on one or more databases that should be treated as a single unit of work. For example, changes to a set of databases, in which either all of the changes must be applied to the database(s) or none of them should. Applications specify when each transaction starts, what database operations are included in it, and when it ends. -

    Transaction abort/commit
    Every transaction ends by committing or aborting. +
    Transaction abort/commit
    Every transaction ends by committing or aborting. If a transaction commits, Berkeley DB guarantees that any database changes included in the transaction will never be lost, even after system or application failure. If a transaction aborts, or is uncommitted when the system or application fails, then the changes involved will never appear in the database. -

    System or application failure
    System or application failure is the phrase we use to +
    System or application failure
    System or application failure is the phrase we use to describe something bad happening near your data. It can be an application dumping core, being interrupted by a signal, the disk filling up, or the entire system crashing. In any case, for whatever reason, the application can no longer make forward progress, and its databases are left in an unknown state. -

    Recovery
    Recovery is what makes the database consistent after a system +
    Recovery
    Recovery is what makes the database consistent after a system or application failure. The recovery process includes review of log files and databases to ensure that the changes from each committed transaction appear in the database, and that no changes from an unfinished (or aborted) transaction do. Whenever system or application failure occurs, applications must usually run recovery. -

    Deadlock
    Deadlock, in its simplest form, happens when one thread of +
    Deadlock
    Deadlock, in its simplest form, happens when one thread of control owns resource A, but needs resource B; while another thread of control owns resource B, but needs resource A. Neither thread of control can make progress, and so one has to give up and release all @@ -60,6 +60,6 @@ forward progress.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/throughput.html b/db/docs/ref/transapp/throughput.html index 03261c6af..67d8fb7fc 100644 --- a/db/docs/ref/transapp/throughput.html +++ b/db/docs/ref/transapp/throughput.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Transaction throughput - + @@ -120,6 +120,6 @@ program for IEEE/ANSI Std 1003.1 (POSIX) standard systems is included in the distribution.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/tune.html b/db/docs/ref/transapp/tune.html index 34a4b36f8..594f79a46 100644 --- a/db/docs/ref/transapp/tune.html +++ b/db/docs/ref/transapp/tune.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Transaction tuning - + @@ -22,19 +22,19 @@ of Berkeley DB transactional applications. First, you should review tuning issues for access method applications are applicable to transactional applications as well. The following are additional tuning issues for Berkeley DB transactional applications:

    -

    -

    access method
    Highly concurrent applications should use the Queue access method, where +
    +
    access method
    Highly concurrent applications should use the Queue access method, where possible, as it provides finer-granularity of locking than the other access methods. Otherwise, applications usually see better concurrency when using the Btree access method than when using either the Hash or Recno access methods. -

    record numbers
    Using record numbers outside of the Queue access method will often slow +
    record numbers
    Using record numbers outside of the Queue access method will often slow down concurrent applications as they limit the degree of concurrency available in the database. Using the Recno access method, or the Btree access method with retrieval by record number configured can slow applications down. -

    Btree database size
    When using the Btree access method, applications supporting concurrent +
    Btree database size
    When using the Btree access method, applications supporting concurrent access may see excessive numbers of deadlocks in small databases. There are two different approaches to resolving this problem. First, as the Btree access method uses page-level locking, decreasing the database @@ -42,46 +42,53 @@ page size can result in fewer lock conflicts. Second, in the case of databases that are cyclically growing and shrinking, turning off reverse splits can leave the database with enough pages that there will be fewer lock conflicts. -

    transactionally protected read operations
    Most applications do not need repeatable reads. Performing all read +
    transactionally protected read operations
    Most applications do not need repeatable reads. Performing all read operations outside of transactions can often significantly increase application throughput. In addition, limiting the lifetime of non-transactional cursors will reduce the length of times locks are held, thereby improving concurrency. -

    DB_DIRECT_DB, DB_DIRECT_LOG
    Consider using the DB_DIRECT_DB and DB_DIRECT_LOG flags. +
    DB_DIRECT_DB, DB_DIRECT_LOG
    Consider using the DB_DIRECT_DB and DB_DIRECT_LOG flags. On some systems, avoiding caching in the operating system can improve write throughput and allow the creation of larger Berkeley DB caches. -

    DB_DIRTY_READ
    Consider using the DB_DIRTY_READ flag for transactions, cursors -or individual read operations. This flag allows read operations to +
    DB_DIRTY_READ, DB_DEGREE_2
    Consider decreasing the level of isolation of transaction using the +DB_DIRTY_READ or DB_DEGREE_2 flags for transactions or cursors +or the DB_DIRTY_READ flag on individual read operations. +Degree 2 isolation will release read +locks on cursors as soon as the data page is nolonger referenced. +This will tend to block write operations for shorter periods for +applications that do not need to have repeatable reads for +cursor operations. +The dirty read flag allows read operations to potentially return data which has been modified but not yet committed, and can significantly increase application throughput in applications that do not require data be guaranteed to be permanent in the database. -

    DB_RMW
    Consider using the DB_RMW flag to immediate acquire write locks +
    DB_RMW
    Consider using the DB_RMW flag to immediate acquire write locks when reading data items that will subsequently be modified. Although this flag may increase contention (because write locks are held longer than they would otherwise be), it may decrease the number of deadlocks that occur. -

    DB_TXN_WRITE_NOSYNC, DB_TXN_NOSYNC
    By default, transactional commit in Berkeley DB implies durability, that is, +
    DB_TXN_WRITE_NOSYNC, DB_TXN_NOSYNC
    By default, transactional commit in Berkeley DB implies durability, that is, all committed operations will be present in the database after recovery from any application or system failure. For applications not requiring that level of certainty, specifying the DB_TXN_NOSYNC flag will often provide a significant performance improvement. In this case, the database will still be fully recoverable, but some number of committed transactions might be lost after application or system failure. -

    access databases in order
    When modifying multiple databases in a single transaction, always access +
    access databases in order
    When modifying multiple databases in a single transaction, always access physical files and databases within physical files, in the same order where possible. In addition, avoid returning to a physical file or database, that is, avoid accessing a database, moving on to another database and then returning to the first database. This can significantly reduce the chance of deadlock between threads of control. -

    large key/data items
    Transactional protections in Berkeley DB are guaranteed by before and after +
    large key/data items
    Transactional protections in Berkeley DB are guaranteed by before and after physical image logging. This means applications modifying large key/data items also write large log records, and, in the case of the default transaction commit, threads of control must wait until those log records have been flushed to disk. Applications supporting concurrent access should try and keep key/data items small wherever possible. -

    mutex selection
    During configuration, Berkeley DB selects a mutex implementation for the +
    mutex selection
    During configuration, Berkeley DB selects a mutex implementation for the architecture. Berkeley DB normally prefers blocking-mutex implementations over non-blocking ones. For example, Berkeley DB will select POSIX pthread mutex interfaces rather than assembly-code test-and-set spin mutexes because @@ -102,24 +109,24 @@ tuning Berkeley DB for large multiprocessor systems, it may be useful to tune mutex alignment using the --with-mutexalign configuration argument.

    -

    --enable-posixmutexes
    By default, the Berkeley DB library will only select the POSIX pthread mutex +
    --enable-posixmutexes
    By default, the Berkeley DB library will only select the POSIX pthread mutex implementation if it supports mutexes shared between multiple processes. If your application does not share its database environment between processes and your system's POSIX mutex support was not selected because it did not support inter-process mutexes, you may be able to increase performance and transactional throughput by configuring with the --enable-posixmutexes argument. -

    log buffer size
    Berkeley DB internally maintains a buffer of log writes. The buffer is +
    log buffer size
    Berkeley DB internally maintains a buffer of log writes. The buffer is written to disk at transaction commit, by default, or, whenever it is filled. If it is consistently being filled before transaction commit, it will be written multiple times per transaction, costing application performance. In these cases, increasing the size of the log buffer can increase application throughput. -

    log file location
    If the database environment's log files are on the same disk as the +
    log file location
    If the database environment's log files are on the same disk as the databases, the disk arms will have to seek back-and-forth between the two. Placing the log files and the databases on different disk arms can often increase application throughput. -

    trickle write
    In some applications, the cache is sufficiently active and dirty that +
    trickle write
    In some applications, the cache is sufficiently active and dirty that readers frequently need to write a dirty page in order to have space in which to read a new page from the backing database file. You can use the db_stat utility (or the statistics returned by the @@ -130,6 +137,6 @@ the overall throughput of the application.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/transapp/why.html b/db/docs/ref/transapp/why.html index ecdf60fe6..a7c29da80 100644 --- a/db/docs/ref/transapp/why.html +++ b/db/docs/ref/transapp/why.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Why transactions? - + @@ -18,23 +18,23 @@

    Perhaps the first question to answer is "Why transactions?" There are a number of reasons to include transactional support in your applications. The most common ones are the following:

    -

    -

    Recoverability
    Applications often need to ensure that no matter how the system or +
    +
    Recoverability
    Applications often need to ensure that no matter how the system or application fails, previously saved data is available the next time the application runs. This is often called Durability. -

    Atomicity
    Applications may need to make multiple changes to one or more databases, +
    Atomicity
    Applications may need to make multiple changes to one or more databases, but ensure that either all of the changes happen, or none of them happens. Transactions guarantee that a group of changes are atomic; that is, if the application or system fails, either all of the changes to the databases will appear when the application next runs, or none of them. -

    Isolation
    Applications may need to make changes in isolation, that is, ensure that +
    Isolation
    Applications may need to make changes in isolation, that is, ensure that only a single thread of control is modifying a key/data pair at a time. Transactions ensure each thread of control sees all records as if all other transactions either completed before or after its transaction.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/txn/config.html b/db/docs/ref/txn/config.html index af33e1af1..c7d5a918e 100644 --- a/db/docs/ref/txn/config.html +++ b/db/docs/ref/txn/config.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Configuring transactions - + @@ -36,6 +36,6 @@ committed transactions may be undone during recovery instead of being redone.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/txn/intro.html b/db/docs/ref/txn/intro.html index 0c3e99935..0fc822560 100644 --- a/db/docs/ref/txn/intro.html +++ b/db/docs/ref/txn/intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Berkeley DB and transactions - + @@ -73,24 +73,25 @@ transaction subsystem. It is the programmatic interface used by the

    The transaction system is closed by a call to DB_ENV->close.

    Finally, the entire transaction system may be removed using the DB_ENV->remove method.

    - + - - - - - - - - - - - - + + + + + + + + + + + + +
    Transaction Subsystem and Related MethodsDescription
    DB_ENV->set_tx_maxSet maximum number of transactions
    DB_ENV->set_tx_timestampSet recovery timestamp
    DB_ENV->txn_checkpointCheckpoint the transaction subsystem
    DB_ENV->txn_recoverDistributed transaction recovery
    DB_ENV->txn_statReturn transaction subsystem statistics
    DB_ENV->txn_beginBegin a transaction
    DB_TXN->abortAbort a transaction
    DB_TXN->commitCommit a transaction
    DB_TXN->discardDiscard a prepared but not resolved transaction handle
    DB_TXN->idReturn a transaction's ID
    DB_TXN->preparePrepare a transaction for commit
    DB_TXN->set_timeoutSet transaction timeout
    DB_ENV->set_timeoutSet lock and transaction timeout
    DB_ENV->set_tx_maxSet maximum number of transactions
    DB_ENV->set_tx_timestampSet recovery timestamp
    DB_ENV->txn_beginBegin a transaction
    DB_ENV->txn_checkpointCheckpoint the transaction subsystem
    DB_ENV->txn_recoverDistributed transaction recovery
    DB_ENV->txn_statReturn transaction subsystem statistics
    DB_TXN->abortAbort a transaction
    DB_TXN->commitCommit a transaction
    DB_TXN->discardDiscard a prepared but not resolved transaction handle
    DB_TXN->idReturn a transaction's ID
    DB_TXN->preparePrepare a transaction for commit
    DB_TXN->set_timeoutSet transaction timeout

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/txn/limits.html b/db/docs/ref/txn/limits.html index a72ad4a0b..e48e63963 100644 --- a/db/docs/ref/txn/limits.html +++ b/db/docs/ref/txn/limits.html @@ -1,18 +1,18 @@ - - + + Berkeley DB Reference Guide: Transaction limits - + -

    Berkeley DB Reference Guide:
    Transaction Subsystem

    PrevRefNext +PrevRefNext

    Transaction limits

    @@ -49,8 +49,8 @@ the deadlock detector cannot detect the problem. In this case, there is no true deadlock, but because the transaction on which a transaction is waiting is in the same thread of control, no forward progress can be made.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.2.0/convert.html b/db/docs/ref/upgrade.2.0/convert.html index 0c55bb3da..706a8bd15 100644 --- a/db/docs/ref/upgrade.2.0/convert.html +++ b/db/docs/ref/upgrade.2.0/convert.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 2.0: converting applications - + @@ -70,6 +70,6 @@ additional functionality supplied by Berkeley DB version 2, as it is likely to result in enhanced application performance.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.2.0/disk.html b/db/docs/ref/upgrade.2.0/disk.html index b3e16695e..e0a09e0c3 100644 --- a/db/docs/ref/upgrade.2.0/disk.html +++ b/db/docs/ref/upgrade.2.0/disk.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 2.0: upgrade requirements - + @@ -23,6 +23,6 @@ environments did not exist prior to the 2.0 release, there is no question of upgrading existing database environments.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.2.0/intro.html b/db/docs/ref/upgrade.2.0/intro.html index 491565eae..836c11318 100644 --- a/db/docs/ref/upgrade.2.0/intro.html +++ b/db/docs/ref/upgrade.2.0/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 2.0: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -28,6 +27,6 @@ version 2 library. The underlying databases must be converted, however, as the Berkeley DB version 2 library has a different underlying database format.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.2.0/system.html b/db/docs/ref/upgrade.2.0/system.html index 28660c58f..b634edfdd 100644 --- a/db/docs/ref/upgrade.2.0/system.html +++ b/db/docs/ref/upgrade.2.0/system.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 2.0: system integration - + @@ -80,6 +80,6 @@ since you have removed that from the library as well.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.2.0/toc.html b/db/docs/ref/upgrade.2.0/toc.html index e6b475fa1..bc79d5f82 100644 --- a/db/docs/ref/upgrade.2.0/toc.html +++ b/db/docs/ref/upgrade.2.0/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 1.XX applications to Berkeley DB 2.0 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -23,6 +24,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/close.html b/db/docs/ref/upgrade.3.0/close.html index 71b74bc4b..7a0b0f6e6 100644 --- a/db/docs/ref/upgrade.3.0/close.html +++ b/db/docs/ref/upgrade.3.0/close.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: DB->sync and DB->close - + @@ -30,6 +30,6 @@ database. Alternatively, the caller can ignore any error return of DB_INCOMPLETE.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/cxx.html b/db/docs/ref/upgrade.3.0/cxx.html index 9850ec94e..adf0cec5f 100644 --- a/db/docs/ref/upgrade.3.0/cxx.html +++ b/db/docs/ref/upgrade.3.0/cxx.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: additional C++ changes - + @@ -27,6 +27,6 @@ and Java APIs much closer in terms of functionality and usage. Please refer to the pages for upgrading C applications for further details.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/db.html b/db/docs/ref/upgrade.3.0/db.html index ffbdc0b97..5dbdc27d0 100644 --- a/db/docs/ref/upgrade.3.0/db.html +++ b/db/docs/ref/upgrade.3.0/db.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: the DB structure - + @@ -44,6 +44,6 @@ applications and the methods that should now be used to get or set them.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/db_cxx.html b/db/docs/ref/upgrade.3.0/db_cxx.html index 81e757dbc..c7693a11d 100644 --- a/db/docs/ref/upgrade.3.0/db_cxx.html +++ b/db/docs/ref/upgrade.3.0/db_cxx.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: the Db class for C++ and Java - + @@ -43,6 +43,6 @@ table.open("lookup.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/dbenv.html b/db/docs/ref/upgrade.3.0/dbenv.html index 6ba4de5ef..031b0cf55 100644 --- a/db/docs/ref/upgrade.3.0/dbenv.html +++ b/db/docs/ref/upgrade.3.0/dbenv.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: the DB_ENV structure - + @@ -64,6 +64,6 @@ no longer be used by any application.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/dbenv_cxx.html b/db/docs/ref/upgrade.3.0/dbenv_cxx.html index dffe11908..b89b768d7 100644 --- a/db/docs/ref/upgrade.3.0/dbenv_cxx.html +++ b/db/docs/ref/upgrade.3.0/dbenv_cxx.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: the DbEnv class for C++ and Java - + @@ -68,6 +68,6 @@ DbTxnMgr, DbMpool, DbLog, DbTxnMgr. If you used any of these managers, all their methods are now found directly in the DbEnv class.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/dbinfo.html b/db/docs/ref/upgrade.3.0/dbinfo.html index c381b83d4..734670ae8 100644 --- a/db/docs/ref/upgrade.3.0/dbinfo.html +++ b/db/docs/ref/upgrade.3.0/dbinfo.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: the DBINFO structure - + @@ -68,6 +68,6 @@ record pad character. They should simply be discarded from the application.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/disk.html b/db/docs/ref/upgrade.3.0/disk.html index 127326dd2..ac2659a51 100644 --- a/db/docs/ref/upgrade.3.0/disk.html +++ b/db/docs/ref/upgrade.3.0/disk.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: upgrade requirements - + @@ -26,6 +26,6 @@ error.

    installations.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/eacces.html b/db/docs/ref/upgrade.3.0/eacces.html index f6417e9d9..f083de109 100644 --- a/db/docs/ref/upgrade.3.0/eacces.html +++ b/db/docs/ref/upgrade.3.0/eacces.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: EACCES - + @@ -24,6 +24,6 @@ error return from lock_put or lock_vec should have the test and any error handling removed.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/eagain.html b/db/docs/ref/upgrade.3.0/eagain.html index e2fa93e37..27856f6fb 100644 --- a/db/docs/ref/upgrade.3.0/eagain.html +++ b/db/docs/ref/upgrade.3.0/eagain.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: EAGAIN - + @@ -30,6 +30,6 @@ string EAGAIN in that file, there is a comment that describes how to make the change.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/envopen.html b/db/docs/ref/upgrade.3.0/envopen.html index b0befd650..e41781213 100644 --- a/db/docs/ref/upgrade.3.0/envopen.html +++ b/db/docs/ref/upgrade.3.0/envopen.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: environment open/close/unlink - + @@ -151,6 +151,6 @@ XXX_unlink is now a flag value that is set by bitwise inclusively OR'ing DB_ENV->remove flag argument.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/func.html b/db/docs/ref/upgrade.3.0/func.html index 40f7fd847..2b94bf5e7 100644 --- a/db/docs/ref/upgrade.3.0/func.html +++ b/db/docs/ref/upgrade.3.0/func.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: function arguments - + @@ -65,6 +65,6 @@ they declare, and then compile. You will see a warning message from your compiler in each case that needs to be upgraded.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/intro.html b/db/docs/ref/upgrade.3.0/intro.html index 7bc747313..3e0c44c14 100644 --- a/db/docs/ref/upgrade.3.0/intro.html +++ b/db/docs/ref/upgrade.3.0/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 3.0: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -22,6 +21,6 @@ This information does not describe how to upgrade Berkeley DB 1.85 release applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/java.html b/db/docs/ref/upgrade.3.0/java.html index 8b34dcce1..f910d6389 100644 --- a/db/docs/ref/upgrade.3.0/java.html +++ b/db/docs/ref/upgrade.3.0/java.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 3.0: additional Java changes - + @@ -24,16 +24,12 @@ DbException will catch these also, so code is not required to change. The catch clause for these new exceptions should appear before the catch clause for DbException.

    You will need to add a catch clause for java.io.FileNotFoundException, -since that can be thrown by the -Db.open -and -DbEnv.open -s.

    +since that can be thrown by Db.open and DbEnv.open.

    There are a number of smaller changes to the API that bring the C, C++ and Java APIs much closer in terms of functionality and usage. Please refer to the pages for upgrading C applications for further details.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/join.html b/db/docs/ref/upgrade.3.0/join.html index 8592d542f..083db66bd 100644 --- a/db/docs/ref/upgrade.3.0/join.html +++ b/db/docs/ref/upgrade.3.0/join.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: DB->join - + @@ -24,6 +24,6 @@ interfaces.

    For each of these, the order of the last two arguments should be swapped.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/jump_set.html b/db/docs/ref/upgrade.3.0/jump_set.html index a5f7392f6..164e20d40 100644 --- a/db/docs/ref/upgrade.3.0/jump_set.html +++ b/db/docs/ref/upgrade.3.0/jump_set.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: db_jump_set - + @@ -44,6 +44,6 @@ applications and the methods that should now be used instead.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/lock_detect.html b/db/docs/ref/upgrade.3.0/lock_detect.html index b61e26133..1cf2d93f1 100644 --- a/db/docs/ref/upgrade.3.0/lock_detect.html +++ b/db/docs/ref/upgrade.3.0/lock_detect.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: lock_detect - + @@ -20,6 +20,6 @@ For each one, a NULL argument should be appended to the current arguments.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/lock_notheld.html b/db/docs/ref/upgrade.3.0/lock_notheld.html index eff12096f..89bf4985b 100644 --- a/db/docs/ref/upgrade.3.0/lock_notheld.html +++ b/db/docs/ref/upgrade.3.0/lock_notheld.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: DB_LOCK_NOTHELD - + @@ -23,6 +23,6 @@ occurrences of DB_LOCK_NOTHELD. For each of these, the test and any error processing should be removed.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/lock_put.html b/db/docs/ref/upgrade.3.0/lock_put.html index 761c99ecc..8cd1cc6a2 100644 --- a/db/docs/ref/upgrade.3.0/lock_put.html +++ b/db/docs/ref/upgrade.3.0/lock_put.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: lock_put - + @@ -21,6 +21,6 @@ For each one, instead of passing a DB_LOCK variable as the last argument to the function, the address of the DB_LOCK variable should be passed.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/lock_stat.html b/db/docs/ref/upgrade.3.0/lock_stat.html index 9fc3b78a4..5657a4b5a 100644 --- a/db/docs/ref/upgrade.3.0/lock_stat.html +++ b/db/docs/ref/upgrade.3.0/lock_stat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: lock_stat - + @@ -20,6 +20,6 @@ have been removed, and this information is no longer available.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/log_register.html b/db/docs/ref/upgrade.3.0/log_register.html index e66068ca6..41183cd07 100644 --- a/db/docs/ref/upgrade.3.0/log_register.html +++ b/db/docs/ref/upgrade.3.0/log_register.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: log_register - + @@ -21,6 +21,6 @@ each of these, the DBTYPE argument (it is the fourth argument) should be removed.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/log_stat.html b/db/docs/ref/upgrade.3.0/log_stat.html index ea0856601..c7e896c71 100644 --- a/db/docs/ref/upgrade.3.0/log_stat.html +++ b/db/docs/ref/upgrade.3.0/log_stat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: log_stat - + @@ -19,6 +19,6 @@ has been removed, and this information is no longer available.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/memp_stat.html b/db/docs/ref/upgrade.3.0/memp_stat.html index d4c9ba9db..2d91dd7db 100644 --- a/db/docs/ref/upgrade.3.0/memp_stat.html +++ b/db/docs/ref/upgrade.3.0/memp_stat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: memp_stat - + @@ -22,6 +22,6 @@ has been replaced with two new fields, st_gbytes and st_bytes.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/open.html b/db/docs/ref/upgrade.3.0/open.html index e57888e36..09db0be91 100644 --- a/db/docs/ref/upgrade.3.0/open.html +++ b/db/docs/ref/upgrade.3.0/open.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: database open/close - + @@ -61,6 +61,6 @@ on the DB handle. That change is discus this chapter.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/rmw.html b/db/docs/ref/upgrade.3.0/rmw.html index ab7e32aa3..5a6ad7619 100644 --- a/db/docs/ref/upgrade.3.0/rmw.html +++ b/db/docs/ref/upgrade.3.0/rmw.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: DB_RMW - + @@ -27,6 +27,6 @@ each of these, any that are arguments to the DB_WRITECURSOR flag instead.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/stat.html b/db/docs/ref/upgrade.3.0/stat.html index 98c915b55..b5d397818 100644 --- a/db/docs/ref/upgrade.3.0/stat.html +++ b/db/docs/ref/upgrade.3.0/stat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: DB->stat - + @@ -20,6 +20,6 @@ and Recno databases has been removed, and this information is no longer available.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/toc.html b/db/docs/ref/upgrade.3.0/toc.html index 96d55df38..d87696bbf 100644 --- a/db/docs/ref/upgrade.3.0/toc.html +++ b/db/docs/ref/upgrade.3.0/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 2.X.X applications to Berkeley DB 3.0 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -50,6 +51,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/txn_begin.html b/db/docs/ref/upgrade.3.0/txn_begin.html index 56935358a..a81356bae 100644 --- a/db/docs/ref/upgrade.3.0/txn_begin.html +++ b/db/docs/ref/upgrade.3.0/txn_begin.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: txn_begin - + @@ -21,6 +21,6 @@ For each one, an argument of 0 should be appended to the current arguments.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/txn_commit.html b/db/docs/ref/upgrade.3.0/txn_commit.html index 409524981..4d93fd980 100644 --- a/db/docs/ref/upgrade.3.0/txn_commit.html +++ b/db/docs/ref/upgrade.3.0/txn_commit.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: txn_commit - + @@ -21,6 +21,6 @@ For each one, an argument of 0 should be appended to the current arguments.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/txn_stat.html b/db/docs/ref/upgrade.3.0/txn_stat.html index 88bb7b49c..e4d335034 100644 --- a/db/docs/ref/upgrade.3.0/txn_stat.html +++ b/db/docs/ref/upgrade.3.0/txn_stat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: txn_stat - + @@ -19,6 +19,6 @@ has been removed, and this information is no longer available.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/value_set.html b/db/docs/ref/upgrade.3.0/value_set.html index 0336434c8..f9390f6cb 100644 --- a/db/docs/ref/upgrade.3.0/value_set.html +++ b/db/docs/ref/upgrade.3.0/value_set.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: db_value_set - + @@ -37,6 +37,6 @@ information.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.0/xa.html b/db/docs/ref/upgrade.3.0/xa.html index 4529e4044..2d4c26d3e 100644 --- a/db/docs/ref/upgrade.3.0/xa.html +++ b/db/docs/ref/upgrade.3.0/xa.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.0: db_xa_open - + @@ -29,6 +29,6 @@ calls to the Db::xa_open method should be replaced with the the DB::open method.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/btstat.html b/db/docs/ref/upgrade.3.1/btstat.html index d419b3674..963b39bba 100644 --- a/db/docs/ref/upgrade.3.1/btstat.html +++ b/db/docs/ref/upgrade.3.1/btstat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB->stat - + @@ -46,6 +46,6 @@ of the qs_nrecs field, and the field should be changed to be qs_nkeys.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/config.html b/db/docs/ref/upgrade.3.1/config.html index 440c74b80..8ff93aa15 100644 --- a/db/docs/ref/upgrade.3.1/config.html +++ b/db/docs/ref/upgrade.3.1/config.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB_ENV->open, DB_ENV->remove - + @@ -31,6 +31,6 @@ argument, the strings values in that argument are replaced with calls to

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/disk.html b/db/docs/ref/upgrade.3.1/disk.html index 986acaba7..9af245022 100644 --- a/db/docs/ref/upgrade.3.1/disk.html +++ b/db/docs/ref/upgrade.3.1/disk.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: upgrade requirements - + @@ -30,6 +30,6 @@ databases are upgraded, the DB->open m installations.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/dup.html b/db/docs/ref/upgrade.3.1/dup.html index 724465224..984a94c60 100644 --- a/db/docs/ref/upgrade.3.1/dup.html +++ b/db/docs/ref/upgrade.3.1/dup.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: identical duplicate data items - + @@ -27,6 +27,6 @@ release. See Duplicate data items for more information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/env.html b/db/docs/ref/upgrade.3.1/env.html index c74764191..d3a253e60 100644 --- a/db/docs/ref/upgrade.3.1/env.html +++ b/db/docs/ref/upgrade.3.1/env.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: environment configuration - + @@ -49,6 +49,6 @@ instead.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/intro.html b/db/docs/ref/upgrade.3.1/intro.html index f41a03300..1139fff4c 100644 --- a/db/docs/ref/upgrade.3.1/intro.html +++ b/db/docs/ref/upgrade.3.1/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 3.1: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -22,6 +21,6 @@ This information does not describe how to upgrade Berkeley DB 1.85 release applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/log_register.html b/db/docs/ref/upgrade.3.1/log_register.html index dea09de1c..5b33cf1c1 100644 --- a/db/docs/ref/upgrade.3.1/log_register.html +++ b/db/docs/ref/upgrade.3.1/log_register.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: log_register - + @@ -24,6 +24,6 @@ be a reference to the DB structure being unregistered.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/logalloc.html b/db/docs/ref/upgrade.3.1/logalloc.html index 1444a522d..1d054b8cb 100644 --- a/db/docs/ref/upgrade.3.1/logalloc.html +++ b/db/docs/ref/upgrade.3.1/logalloc.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: log file pre-allocation - + @@ -23,6 +23,6 @@ this feature back on, search for the flag DB_OSO_LOG in the source file Sleepycat Software for assistance.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/memp_register.html b/db/docs/ref/upgrade.3.1/memp_register.html index 565efacbd..f5a68508b 100644 --- a/db/docs/ref/upgrade.3.1/memp_register.html +++ b/db/docs/ref/upgrade.3.1/memp_register.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: memp_register - + @@ -26,6 +26,6 @@ applications, and may be entirely ignored by the pgin and pgout functions themselves.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/put.html b/db/docs/ref/upgrade.3.1/put.html index 28509a665..9b8dacb34 100644 --- a/db/docs/ref/upgrade.3.1/put.html +++ b/db/docs/ref/upgrade.3.1/put.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB->put - + @@ -60,6 +60,6 @@ recno = *(db_recno_t *)key->data; printf("new record number is %lu\n", (u_long)recno);

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/set_feedback.html b/db/docs/ref/upgrade.3.1/set_feedback.html index 9abffdc57..141b8dc44 100644 --- a/db/docs/ref/upgrade.3.1/set_feedback.html +++ b/db/docs/ref/upgrade.3.1/set_feedback.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB_ENV->set_feedback, DB->set_feedback - + @@ -23,6 +23,6 @@ or throw an exception as appropriate when an error occurs.

    possible error on return.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/set_paniccall.html b/db/docs/ref/upgrade.3.1/set_paniccall.html index bd05e58b2..b94346942 100644 --- a/db/docs/ref/upgrade.3.1/set_paniccall.html +++ b/db/docs/ref/upgrade.3.1/set_paniccall.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB_ENV->set_paniccall, DB->set_paniccall - + @@ -23,6 +23,6 @@ or throw an exception as appropriate when an error occurs.

    possible error on return.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/set_tx_recover.html b/db/docs/ref/upgrade.3.1/set_tx_recover.html index d7d1ab21d..5186581b8 100644 --- a/db/docs/ref/upgrade.3.1/set_tx_recover.html +++ b/db/docs/ref/upgrade.3.1/set_tx_recover.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB_ENV->set_tx_recover - + @@ -32,6 +32,6 @@ function as follows:


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/sysmem.html b/db/docs/ref/upgrade.3.1/sysmem.html index de89a81c1..b8b20a196 100644 --- a/db/docs/ref/upgrade.3.1/sysmem.html +++ b/db/docs/ref/upgrade.3.1/sysmem.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB_SYSTEM_MEM - + @@ -21,6 +21,6 @@ specification of a base system memory segment ID, using the example, one returned by the UNIX ftok(3) function.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/tcl.html b/db/docs/ref/upgrade.3.1/tcl.html index 852652f36..c9259b5dd 100644 --- a/db/docs/ref/upgrade.3.1/tcl.html +++ b/db/docs/ref/upgrade.3.1/tcl.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: Tcl API - + @@ -27,6 +27,6 @@ record number are "0x", the record number is expected to be in hexadecimal form.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/tmp.html b/db/docs/ref/upgrade.3.1/tmp.html index 9f144e555..5961b0098 100644 --- a/db/docs/ref/upgrade.3.1/tmp.html +++ b/db/docs/ref/upgrade.3.1/tmp.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: DB_TMP_DIR - + @@ -30,6 +30,6 @@ protection modes for the system registry directory are different from those on the directory previously used by Berkeley DB.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/toc.html b/db/docs/ref/upgrade.3.1/toc.html index e3ec409e3..5884eb20e 100644 --- a/db/docs/ref/upgrade.3.1/toc.html +++ b/db/docs/ref/upgrade.3.1/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 3.0.X applications to Berkeley DB 3.1 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -36,6 +37,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.1/txn_check.html b/db/docs/ref/upgrade.3.1/txn_check.html index 035235b04..7d0ac906b 100644 --- a/db/docs/ref/upgrade.3.1/txn_check.html +++ b/db/docs/ref/upgrade.3.1/txn_check.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.1: txn_checkpoint - + @@ -21,6 +21,6 @@ txn_checkpoint. For each one, an argument of 0 should be appended to the current arguments.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/callback.html b/db/docs/ref/upgrade.3.2/callback.html index 14c02b852..0d0d879ba 100644 --- a/db/docs/ref/upgrade.3.2/callback.html +++ b/db/docs/ref/upgrade.3.2/callback.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.2: DB callback functions, app_private field - + @@ -35,6 +35,6 @@ Applications using this field will have to convert to using one of the replacement fields.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/db_dump.html b/db/docs/ref/upgrade.3.2/db_dump.html index 660bc9dd1..aefe04a62 100644 --- a/db/docs/ref/upgrade.3.2/db_dump.html +++ b/db/docs/ref/upgrade.3.2/db_dump.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.2: db_dump - + @@ -25,6 +25,6 @@ scripts post-processing the db_dump out under these conditions may require modification.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/disk.html b/db/docs/ref/upgrade.3.2/disk.html index 193e45b2c..828188fa3 100644 --- a/db/docs/ref/upgrade.3.2/disk.html +++ b/db/docs/ref/upgrade.3.2/disk.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.2: upgrade requirements - + @@ -24,6 +24,6 @@ the DB->open method will return a .


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/handle.html b/db/docs/ref/upgrade.3.2/handle.html index 9dd9de9be..7abd7f1cc 100644 --- a/db/docs/ref/upgrade.3.2/handle.html +++ b/db/docs/ref/upgrade.3.2/handle.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 3.2: Java and C++ object reuse - + @@ -15,17 +15,14 @@

    Release 3.2: Java and C++ object reuse

    -

    In previous releases of Berkeley DB, Java -DbEnv - and -Db -objects, and C++ DbEnv and Db objects could be -reused after they were closed, by calling open on them again. This is -no longer permitted, and these objects no longer allow any operations -after a close. Applications reusing these objects should be modified -to create new objects instead.

    +

    In previous releases of Berkeley DB, Java DbEnv and Db objects, and C++ +DbEnv and Db objects could be reused after they +were closed, by calling open on them again. This is no longer +permitted, and these objects no longer allow any operations after a +close. Applications reusing these objects should be modified to create +new objects instead.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/incomplete.html b/db/docs/ref/upgrade.3.2/incomplete.html index 5ec3832d2..23268ccab 100644 --- a/db/docs/ref/upgrade.3.2/incomplete.html +++ b/db/docs/ref/upgrade.3.2/incomplete.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 3.2: DB_INCOMPLETE - + @@ -27,19 +27,13 @@ code of DB_INCOMPLETE.

    an exception: Db::close, Db::sync, DbEnv::memp_sync, DbEnv::txn_checkpoint, DbMpoolFile::memp_fsync.

    The following Java methods are now declared "public int" rather than -"public void", and will return Db.DB_INCOMPLETE rather than -throw an exception: -Db.close -, -Db.sync -, -DbEnv.checkpoint -.

    +"public void", and will return Db.DB_INCOMPLETE rather than throw an +exception: Db.close, Db.sync, and DbEnv.checkpoint.

    It is likely that the only change required by any application will be those currently checking for a DB_INCOMPLETE return that has been encapsulated in an exception.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/intro.html b/db/docs/ref/upgrade.3.2/intro.html index c38d70859..2ccfab292 100644 --- a/db/docs/ref/upgrade.3.2/intro.html +++ b/db/docs/ref/upgrade.3.2/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 3.2: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -22,6 +21,6 @@ This information does not describe how to upgrade Berkeley DB 1.85 release applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/mutexlock.html b/db/docs/ref/upgrade.3.2/mutexlock.html index b0ac59894..2f45164f7 100644 --- a/db/docs/ref/upgrade.3.2/mutexlock.html +++ b/db/docs/ref/upgrade.3.2/mutexlock.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.2: DB_ENV->set_mutexlocks - + @@ -24,6 +24,6 @@ per-database environment basis. Applications using the old function should be updated to use the new one.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/notfound.html b/db/docs/ref/upgrade.3.2/notfound.html index 472dad9e9..655116c55 100644 --- a/db/docs/ref/upgrade.3.2/notfound.html +++ b/db/docs/ref/upgrade.3.2/notfound.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 3.2: Java java.io.FileNotFoundException - + @@ -15,17 +15,12 @@

    Release 3.2: Java java.io.FileNotFoundException

    -

    The Java -DbEnv.remove -, -Db.remove - and -Db.rename -methods now throw java.io.FileNotFoundException -in the case where the named file does not exist. Applications should -be modified to catch this exception where appropriate.

    +

    The Java DbEnv.remove, Db.remove and Db.rename methods now throw +java.io.FileNotFoundException in the case where the named file does not +exist. Applications should be modified to catch this exception where +appropriate.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/renumber.html b/db/docs/ref/upgrade.3.2/renumber.html index 947b7bb3c..ef20da4df 100644 --- a/db/docs/ref/upgrade.3.2/renumber.html +++ b/db/docs/ref/upgrade.3.2/renumber.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.2: Logically renumbering records - + @@ -35,6 +35,6 @@ evaluated to ensure that the new semantics do not cause application failure.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/set_flags.html b/db/docs/ref/upgrade.3.2/set_flags.html index a221add63..9561d9cec 100644 --- a/db/docs/ref/upgrade.3.2/set_flags.html +++ b/db/docs/ref/upgrade.3.2/set_flags.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.2: DB_ENV->set_flags - + @@ -31,6 +31,6 @@ appropriate place for them. Applications specifying either the DB_ENV->set_flags method.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/toc.html b/db/docs/ref/upgrade.3.2/toc.html index 4061b87a3..0396e24fe 100644 --- a/db/docs/ref/upgrade.3.2/toc.html +++ b/db/docs/ref/upgrade.3.2/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 3.1.X applications to Berkeley DB 3.2 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -30,6 +31,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.2/tx_recover.html b/db/docs/ref/upgrade.3.2/tx_recover.html index 6d1c2dee0..137835919 100644 --- a/db/docs/ref/upgrade.3.2/tx_recover.html +++ b/db/docs/ref/upgrade.3.2/tx_recover.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.2: DB_ENV->set_tx_recover - + @@ -28,6 +28,6 @@ application's callback function may be removed.

    enclosing the operation successfully committed.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/alloc.html b/db/docs/ref/upgrade.3.3/alloc.html index 679d235c6..00664eda2 100644 --- a/db/docs/ref/upgrade.3.3/alloc.html +++ b/db/docs/ref/upgrade.3.3/alloc.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 3.3: DB->set_malloc, DB->set_realloc - + @@ -22,34 +22,34 @@ Berkeley DB library to use when allocating memory to be owned by the application and when freeing memory that was originally allocated by the application.

    The new methods affect or replace the following historic methods:

    -

    -

    DB->set_malloc
    The DB->set_malloc method has been replaced in its entirety. +
    +
    DB->set_malloc
    The DB->set_malloc method has been replaced in its entirety. Applications using this method should replace the call with a call to DB->set_alloc. -

    DB->set_realloc
    The DB->set_realloc method has been replaced in its entirety. +
    DB->set_realloc
    The DB->set_realloc method has been replaced in its entirety. Applications using this method should replace the call with a call to DB->set_alloc. -

    DB->stat
    The historic db_malloc argument to the DB->stat method has +
    DB->stat
    The historic db_malloc argument to the DB->stat method has been replaced. Applications using this method should do as follows: if the argument is NULL, it should simply be removed. If non-NULL, it should be replaced with a call to DB->set_alloc. -

    lock_stat
    The historic db_malloc argument to the lock_stat function has +
    lock_stat
    The historic db_malloc argument to the lock_stat function has been replaced. Applications using this function should do as follows: if the argument is NULL, it should simply be removed. If non-NULL, it should be replaced with a call to DB_ENV->set_alloc. -

    log_archive
    The historic db_malloc argument to the log_archive function has +
    log_archive
    The historic db_malloc argument to the log_archive function has been replaced. Applications using this function should do as follows: if the argument is NULL, it should simply be removed. If non-NULL, it should be replaced with a call to DB_ENV->set_alloc. -

    log_stat
    The historic db_malloc argument to the log_stat function has +
    log_stat
    The historic db_malloc argument to the log_stat function has been replaced. Applications using this function should do as follows: if the argument is NULL, it should simply be removed. If non-NULL, it should be replaced with a call to DB_ENV->set_alloc. -

    memp_stat
    The historic db_malloc argument to the memp_stat function has +
    memp_stat
    The historic db_malloc argument to the memp_stat function has been replaced. Applications using this function should do as follows: if the argument is NULL, it should simply be removed. If non-NULL, it should be replaced with a call to DB_ENV->set_alloc. -

    txn_stat
    The historic db_malloc argument to the txn_stat function has +
    txn_stat
    The historic db_malloc argument to the txn_stat function has been replaced. Applications using this function should do as follows: if the argument is NULL, it should simply be removed. If non-NULL, it should be replaced with a call to DB_ENV->set_alloc. @@ -61,6 +61,6 @@ the environment first, and subsequently call the DB->set_malloc and DB->set_realloc methods; that use is no longer supported.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/bigfile.html b/db/docs/ref/upgrade.3.3/bigfile.html index d4d0fe4d2..221ffad0a 100644 --- a/db/docs/ref/upgrade.3.3/bigfile.html +++ b/db/docs/ref/upgrade.3.3/bigfile.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: --disable-bigfile - + @@ -24,6 +24,6 @@ version 2.50. For that reason, Berkeley DB configuration no longer supports used instead.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/conflict.html b/db/docs/ref/upgrade.3.3/conflict.html index 74b12396b..8ea579af6 100644 --- a/db/docs/ref/upgrade.3.3/conflict.html +++ b/db/docs/ref/upgrade.3.3/conflict.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: DB_LOCK_CONFLICT - + @@ -20,6 +20,6 @@ Applications specifying the DB_LOCK_CONFLICT flag should simply replace it with a flags argument of 0.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/disk.html b/db/docs/ref/upgrade.3.3/disk.html index 08fb29936..c0e4cf7b5 100644 --- a/db/docs/ref/upgrade.3.3/disk.html +++ b/db/docs/ref/upgrade.3.3/disk.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: upgrade requirements - + @@ -20,6 +20,6 @@ Upgrading Berkeley DB installations.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/getswap.html b/db/docs/ref/upgrade.3.3/getswap.html index ebba0ae11..b6b01568f 100644 --- a/db/docs/ref/upgrade.3.3/getswap.html +++ b/db/docs/ref/upgrade.3.3/getswap.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: DB->get_byteswapped - + @@ -24,6 +24,6 @@ argument is used as a memory location in which to store the requested information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/gettype.html b/db/docs/ref/upgrade.3.3/gettype.html index 589300ffb..2c54a26b7 100644 --- a/db/docs/ref/upgrade.3.3/gettype.html +++ b/db/docs/ref/upgrade.3.3/gettype.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: DB->get_type - + @@ -23,6 +23,6 @@ type DBTYPE * to the method. The additional argument is used as a memory location in which to store the requested information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/intro.html b/db/docs/ref/upgrade.3.3/intro.html index c71738917..7d2b8cbe5 100644 --- a/db/docs/ref/upgrade.3.3/intro.html +++ b/db/docs/ref/upgrade.3.3/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 3.3: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -22,6 +21,6 @@ This information does not describe how to upgrade Berkeley DB 1.85 release applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/memp_fget.html b/db/docs/ref/upgrade.3.3/memp_fget.html index 0ca1622a8..83a857290 100644 --- a/db/docs/ref/upgrade.3.3/memp_fget.html +++ b/db/docs/ref/upgrade.3.3/memp_fget.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: memp_fget, EIO - + @@ -32,6 +32,6 @@ transaction when a recoverable system error occurs in order to recover from the error.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/rpc.html b/db/docs/ref/upgrade.3.3/rpc.html index 6e3a1b7eb..ab8f09840 100644 --- a/db/docs/ref/upgrade.3.3/rpc.html +++ b/db/docs/ref/upgrade.3.3/rpc.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 3.3: DB_ENV->set_server - + @@ -24,6 +24,6 @@ and specifying a NULL for the added argument, second in the argument list.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/shared.html b/db/docs/ref/upgrade.3.3/shared.html index 662598f80..96373f2a7 100644 --- a/db/docs/ref/upgrade.3.3/shared.html +++ b/db/docs/ref/upgrade.3.3/shared.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: --enable-dynamic, --enable-shared - + @@ -26,6 +26,6 @@ both static and shared libraries by default, the useful options are Libtool's --disable-shared and --disable-static options.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/toc.html b/db/docs/ref/upgrade.3.3/toc.html index 6329c4837..9883f6b01 100644 --- a/db/docs/ref/upgrade.3.3/toc.html +++ b/db/docs/ref/upgrade.3.3/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 3.2.X applications to Berkeley DB 3.3 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -30,6 +31,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.3.3/txn_prepare.html b/db/docs/ref/upgrade.3.3/txn_prepare.html index fa6bfe4e7..2e57f79bf 100644 --- a/db/docs/ref/upgrade.3.3/txn_prepare.html +++ b/db/docs/ref/upgrade.3.3/txn_prepare.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 3.3: txn_prepare - + @@ -22,6 +22,6 @@ commit using Berkeley DB as a local resource manager), see information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/asr.html b/db/docs/ref/upgrade.4.0/asr.html index 2e230f151..ee9f086f3 100644 --- a/db/docs/ref/upgrade.4.0/asr.html +++ b/db/docs/ref/upgrade.4.0/asr.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: application-specific recovery - + @@ -35,6 +35,6 @@ recommend that you contact Sleepycat support and ask us to review those routines for you.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/cxx.html b/db/docs/ref/upgrade.4.0/cxx.html index 3ca038d8e..1554058b0 100644 --- a/db/docs/ref/upgrade.4.0/cxx.html +++ b/db/docs/ref/upgrade.4.0/cxx.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: C++ ostream objects - + @@ -44,6 +44,6 @@ void foo(Db db) { }

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/deadlock.html b/db/docs/ref/upgrade.4.0/deadlock.html index 8df9c1c5d..e36e7505b 100644 --- a/db/docs/ref/upgrade.4.0/deadlock.html +++ b/db/docs/ref/upgrade.4.0/deadlock.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: db_deadlock - + @@ -21,6 +21,6 @@ option by using the -t option with an argument of .100000.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/disk.html b/db/docs/ref/upgrade.4.0/disk.html index 5ab14f49c..38e707a14 100644 --- a/db/docs/ref/upgrade.4.0/disk.html +++ b/db/docs/ref/upgrade.4.0/disk.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: upgrade requirements - + @@ -21,6 +21,6 @@ formats changed in the Berkeley DB 4.0 release.

    Upgrading Berkeley DB installations.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/env.html b/db/docs/ref/upgrade.4.0/env.html index c887aba3b..f1da7255b 100644 --- a/db/docs/ref/upgrade.4.0/env.html +++ b/db/docs/ref/upgrade.4.0/env.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: db_env_set_XXX - + @@ -75,6 +75,6 @@ their calls, replacing the historic call with a call to usage of the historic interface.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/intro.html b/db/docs/ref/upgrade.4.0/intro.html index 434f65b7c..87f5fc2e9 100644 --- a/db/docs/ref/upgrade.4.0/intro.html +++ b/db/docs/ref/upgrade.4.0/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.0: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -22,6 +21,6 @@ This information does not describe how to upgrade Berkeley DB 1.85 release applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/java.html b/db/docs/ref/upgrade.4.0/java.html index 45bf27d84..8dbbbcdc8 100644 --- a/db/docs/ref/upgrade.4.0/java.html +++ b/db/docs/ref/upgrade.4.0/java.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: Java CLASSPATH environment variable - + @@ -20,7 +20,7 @@ release, the CLASSPATH environment variable must change to include at least the db.jar file. It can optionally include the dbexamples.jar file if you want to run the examples. For example, on UNIX:

    -
    export CLASSPATH="/usr/local/BerkeleyDB.4.2/lib/db.jar:/usr/local/BerkeleyDB.4.2/lib/dbexamples.jar"
    +
    export CLASSPATH="/usr/local/BerkeleyDB.4.3/lib/db.jar:/usr/local/BerkeleyDB.4.3/lib/dbexamples.jar"

    For example, on Windows:

    set CLASSPATH="D:\db\build_win32\Release\db.jar;D:\db\build_win32\Release\dbexamples.jar"

    For more information on Java configuration, please see @@ -28,6 +28,6 @@ example, on UNIX:

    Building for Win32.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/lock.html b/db/docs/ref/upgrade.4.0/lock.html index 26b431fc1..bdf5f0f3d 100644 --- a/db/docs/ref/upgrade.4.0/lock.html +++ b/db/docs/ref/upgrade.4.0/lock.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: lock_XXX - + @@ -41,6 +41,6 @@ to use the enclosing DB_ENV handle's me first argument to the existing call is the correct handle to use).


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/lock_id_free.html b/db/docs/ref/upgrade.4.0/lock_id_free.html index f792d72fa..a6e43b7e9 100644 --- a/db/docs/ref/upgrade.4.0/lock_id_free.html +++ b/db/docs/ref/upgrade.4.0/lock_id_free.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: DB_ENV->lock_id_free - + @@ -21,6 +21,6 @@ locker IDs may want to update their applications to free the locker ID when it is no longer needed.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/log.html b/db/docs/ref/upgrade.4.0/log.html index 596b5cda9..4cdb20124 100644 --- a/db/docs/ref/upgrade.4.0/log.html +++ b/db/docs/ref/upgrade.4.0/log.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: log_XXX - + @@ -51,6 +51,6 @@ applications should add a final argument of 0 to any calls made to DB_ENV->log_stat.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/mp.html b/db/docs/ref/upgrade.4.0/mp.html index 093f3e6e5..c242ac950 100644 --- a/db/docs/ref/upgrade.4.0/mp.html +++ b/db/docs/ref/upgrade.4.0/mp.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: memp_XXX - + @@ -61,6 +61,6 @@ interfaces are identical; the one exception is the

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/rpc.html b/db/docs/ref/upgrade.4.0/rpc.html index 0f27cc0ca..52507a106 100644 --- a/db/docs/ref/upgrade.4.0/rpc.html +++ b/db/docs/ref/upgrade.4.0/rpc.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 4.0: DB_ENV->set_server - + @@ -22,6 +22,6 @@ name, and specifying a NULL for the added argument, second in the argument list.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/set_lk_max.html b/db/docs/ref/upgrade.4.0/set_lk_max.html index 961b407b9..2829534c6 100644 --- a/db/docs/ref/upgrade.4.0/set_lk_max.html +++ b/db/docs/ref/upgrade.4.0/set_lk_max.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: DB_ENV->set_lk_max - + @@ -22,6 +22,6 @@ method continues to be available, but is no longer documented and is expected to be removed in a future release.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/toc.html b/db/docs/ref/upgrade.4.0/toc.html index 93e53eb21..31a3ababa 100644 --- a/db/docs/ref/upgrade.4.0/toc.html +++ b/db/docs/ref/upgrade.4.0/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 3.3.X applications to Berkeley DB 4.0 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -33,6 +34,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.0/txn.html b/db/docs/ref/upgrade.4.0/txn.html index a22b6559e..93675318b 100644 --- a/db/docs/ref/upgrade.4.0/txn.html +++ b/db/docs/ref/upgrade.4.0/txn.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.0: txn_XXX - + @@ -42,6 +42,6 @@ unchanged, applications should add a final argument of 0 to any calls made to DB_ENV->txn_stat.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/app_dispatch.html b/db/docs/ref/upgrade.4.1/app_dispatch.html index 5644f7be7..b538e8bff 100644 --- a/db/docs/ref/upgrade.4.1/app_dispatch.html +++ b/db/docs/ref/upgrade.4.1/app_dispatch.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: Application-specific logging and recovery - + @@ -27,6 +27,6 @@ removed interfaces should be updated to call recovery" and the DB_ENV->set_app_dispatch documentation.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/checkpoint.html b/db/docs/ref/upgrade.4.1/checkpoint.html index cbd89b9d8..1bdf7d6e3 100644 --- a/db/docs/ref/upgrade.4.1/checkpoint.html +++ b/db/docs/ref/upgrade.4.1/checkpoint.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: DB_CHECKPOINT, DB_CURLSN - + @@ -25,6 +25,6 @@ used this flag, please contact Sleepycat Software support for help in upgrading.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/cxx.html b/db/docs/ref/upgrade.4.1/cxx.html index 9efc6655d..aea5efa76 100644 --- a/db/docs/ref/upgrade.4.1/cxx.html +++ b/db/docs/ref/upgrade.4.1/cxx.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: C++ exceptions - + @@ -43,6 +43,6 @@ errors while closing can be handled by the application.

    }

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/disk.html b/db/docs/ref/upgrade.4.1/disk.html index 9ef311f4c..272b85253 100644 --- a/db/docs/ref/upgrade.4.1/disk.html +++ b/db/docs/ref/upgrade.4.1/disk.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: upgrade requirements - + @@ -26,6 +26,6 @@ usable with earlier Berkeley DB releases.

    Upgrading Berkeley DB installations.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/excl.html b/db/docs/ref/upgrade.4.1/excl.html index ebdda2c9b..8cfb71eaf 100644 --- a/db/docs/ref/upgrade.4.1/excl.html +++ b/db/docs/ref/upgrade.4.1/excl.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: DB_EXCL - + @@ -21,6 +21,6 @@ the DB_EXCL flag to check for the subdatabases.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/fop.html b/db/docs/ref/upgrade.4.1/fop.html index c5c85b842..ea0ca1202 100644 --- a/db/docs/ref/upgrade.4.1/fop.html +++ b/db/docs/ref/upgrade.4.1/fop.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 4.1: DB->associate, DB->open, DB->remove, DB->rename - + @@ -123,6 +123,6 @@ transaction handle to operations using handles that were opened within a transaction will also return an error.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/hash_nelem.html b/db/docs/ref/upgrade.4.1/hash_nelem.html index b84bf4806..9d0e6de24 100644 --- a/db/docs/ref/upgrade.4.1/hash_nelem.html +++ b/db/docs/ref/upgrade.4.1/hash_nelem.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: DB->stat.hash_nelem - + @@ -20,6 +20,6 @@ databases has been removed from the 4.1 release, this information is no longer available to applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/incomplete.html b/db/docs/ref/upgrade.4.1/incomplete.html index bccac9e19..2f0da9386 100644 --- a/db/docs/ref/upgrade.4.1/incomplete.html +++ b/db/docs/ref/upgrade.4.1/incomplete.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 4.1: DB_INCOMPLETE - + @@ -21,13 +21,10 @@ to check for this error return, as the underlying Berkeley DB interfaces that could historically fail to checkpoint or flush the cache and return this error can no longer fail for that reason. Applications should remove all uses of DB_INCOMPLETE.

    -

    Additionally, the -DbEnv.checkpoint -and -Db.sync -methods have been changed from returning int to returning void.

    +

    Additionally, the DbEnv.checkpoint and Db.sync methods have been changed +from returning int to returning void.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/intro.html b/db/docs/ref/upgrade.4.1/intro.html index 521afa080..7d7d9c23f 100644 --- a/db/docs/ref/upgrade.4.1/intro.html +++ b/db/docs/ref/upgrade.4.1/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.1: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -22,6 +21,6 @@ This information does not describe how to upgrade Berkeley DB 1.85 release applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/java.html b/db/docs/ref/upgrade.4.1/java.html index 6cba3606b..58a61ea19 100644 --- a/db/docs/ref/upgrade.4.1/java.html +++ b/db/docs/ref/upgrade.4.1/java.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: Release 4.1: Java exceptions - + @@ -15,21 +15,14 @@

    Release 4.1: Java exceptions

    -

    The Java -DbEnv - constructor is now -marked with "throws DbException". This means applications must -construct -DbEnv -objects in a context where -DbException -throwables are -handled (either in a try/catch block or in a method that propagates the -exception up the stack). Note that previous versions of the Berkeley DB Java -API could throw this exception from the constructor but it was not -marked.

    +

    The Java DbEnv constructor is now marked with "throws DbException". +This means applications must construct DbEnv objects in a context where +DbException throwables are handled (either in a try/catch block or in a +method that propagates the exception up the stack). Note that previous +versions of the Berkeley DB Java API could throw this exception from the +constructor but it was not marked.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/log_register.html b/db/docs/ref/upgrade.4.1/log_register.html index 94c6b4646..cd8d885e2 100644 --- a/db/docs/ref/upgrade.4.1/log_register.html +++ b/db/docs/ref/upgrade.4.1/log_register.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: DB_ENV->log_register - + @@ -22,6 +22,6 @@ these interfaces, please contact Sleepycat Software support for help in upgrading.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/log_stat.html b/db/docs/ref/upgrade.4.1/log_stat.html index e4f43e0de..e65d859bd 100644 --- a/db/docs/ref/upgrade.4.1/log_stat.html +++ b/db/docs/ref/upgrade.4.1/log_stat.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: st_flushcommit - + @@ -21,6 +21,6 @@ application using the "st_flushcommits" statistic should remove it, or replace it with the "st_count" statistic.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/memp_sync.html b/db/docs/ref/upgrade.4.1/memp_sync.html index 1008d24ed..17b48e384 100644 --- a/db/docs/ref/upgrade.4.1/memp_sync.html +++ b/db/docs/ref/upgrade.4.1/memp_sync.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.1: DB_ENV->memp_sync - + @@ -26,6 +26,6 @@ this information. If your application used this information, please contact Sleepycat Software support for help in upgrading.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.1/toc.html b/db/docs/ref/upgrade.4.1/toc.html index 3d477bced..b50b25f21 100644 --- a/db/docs/ref/upgrade.4.1/toc.html +++ b/db/docs/ref/upgrade.4.1/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 4.0.X applications to Berkeley DB 4.1 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -32,6 +33,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/cksum.html b/db/docs/ref/upgrade.4.2/cksum.html index 1451277be..db6d455e5 100644 --- a/db/docs/ref/upgrade.4.2/cksum.html +++ b/db/docs/ref/upgrade.4.2/cksum.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: DB_CHKSUM_SHA1 - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -25,6 +24,6 @@ Applications using the DB_CHKSUM_SHA1 flag should change that use to DB_CHKSUM; no other change is required.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/client.html b/db/docs/ref/upgrade.4.2/client.html index 8f295c3c9..274e7ed81 100644 --- a/db/docs/ref/upgrade.4.2/client.html +++ b/db/docs/ref/upgrade.4.2/client.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: DB_CLIENT - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -23,6 +22,6 @@ flag should change that use to

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/del.html b/db/docs/ref/upgrade.4.2/del.html index d48973311..c3f10415d 100644 --- a/db/docs/ref/upgrade.4.2/del.html +++ b/db/docs/ref/upgrade.4.2/del.html @@ -1,37 +1,31 @@ - - + + Berkeley DB Reference Guide: Release 4.2: DB->del - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext

    Release 4.2: DB->del

    -

    In previous releases, the C++ Db::del and Java -Db.delete - methods threw exceptions -encapsulating the -DB_KEYEMPTY error in some cases when called on Queue and Recno -databases. Unfortunately, this was undocumented behavior.

    +

    In previous releases, the C++ Db::del and Java Db.delete +methods threw exceptions encapsulating the DB_KEYEMPTY error in +some cases when called on Queue and Recno databases. Unfortunately, +this was undocumented behavior.

    For consistency with the other Berkeley DB methods that handle DB_KEYEMPTY, this is no longer the case. Applications calling -the Db::del or -Db.delete -methods on Queue or Recno -databases, and handling the DB_KEYEMPTY exception specially, -should be modified to check for a return value of DB_KEYEMPTY -instead.

    +the Db::del or Db.delete methods on Queue or Recno databases, +and handling the DB_KEYEMPTY exception specially, should be +modified to check for a return value of DB_KEYEMPTY instead.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/disk.html b/db/docs/ref/upgrade.4.2/disk.html index 294cf4a66..7eb6f868c 100644 --- a/db/docs/ref/upgrade.4.2/disk.html +++ b/db/docs/ref/upgrade.4.2/disk.html @@ -1,17 +1,17 @@ - - + + Berkeley DB Reference Guide: Release 4.2: upgrade requirements - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +PrevRefNext

    Release 4.2: upgrade requirements

    @@ -19,8 +19,8 @@ formats changed in the Berkeley DB 4.2 release.

    For further information on upgrading Berkeley DB installations, see Upgrading Berkeley DB installations.

    -

    PrevRefNext +

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/intro.html b/db/docs/ref/upgrade.4.2/intro.html index 21b538105..1653dc8fb 100644 --- a/db/docs/ref/upgrade.4.2/intro.html +++ b/db/docs/ref/upgrade.4.2/intro.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: introduction - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -22,6 +21,6 @@ This information does not describe how to upgrade Berkeley DB 1.85 release applications.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/java.html b/db/docs/ref/upgrade.4.2/java.html index 38a6a83fc..623545630 100644 --- a/db/docs/ref/upgrade.4.2/java.html +++ b/db/docs/ref/upgrade.4.2/java.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: Java - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -50,14 +49,11 @@ deploy to a version 1.1 or 1.0 Java environment, it may be possible to do so by not including the classes in the com.sleepycat.bdb package in the Java build process (however, that workaround has not been tested by Sleepycat Software).

    -

    A few inconsistent methods have been cleaned up (for example, -Db.close - now returns void; previously, -it returned an int -which was always zero). The synchronized attributed has been toggled -on some methods -- this is an attempt to prevent multithreaded -applications from calling close or similar methods concurrently from -multiple threads.

    +

    A few inconsistent methods have been cleaned up (for example, Db.close +now returns void; previously, it returned an int which was always zero). +The synchronized attributed has been toggled on some methods -- this is +an attempt to prevent multithreaded applications from calling close or +similar methods concurrently from multiple threads.

    The Berkeley DB API has up until now been consistent across all language APIs. Although consistency has is benefits, it made our Java API look strange to Java programmers. Many methods have been renamed in this release of the @@ -95,6 +91,6 @@ collections style access layer (com.sleepycat.bdb) and the now relocated XA system (com.sleepycat.xa).


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/lockng.html b/db/docs/ref/upgrade.4.2/lockng.html index ab78113f5..de2beaa61 100644 --- a/db/docs/ref/upgrade.4.2/lockng.html +++ b/db/docs/ref/upgrade.4.2/lockng.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: DB_LOCK_NOTGRANTED - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -46,6 +45,6 @@ can configure database operation methods to return done.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/nosync.html b/db/docs/ref/upgrade.4.2/nosync.html index f4cc53206..28433441b 100644 --- a/db/docs/ref/upgrade.4.2/nosync.html +++ b/db/docs/ref/upgrade.4.2/nosync.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: Client replication environments - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -32,6 +31,6 @@ will increase their performance. Regardless of the setting of the transaction prepare.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/priority.html b/db/docs/ref/upgrade.4.2/priority.html index 261c35b81..a68c4e9b1 100644 --- a/db/docs/ref/upgrade.4.2/priority.html +++ b/db/docs/ref/upgrade.4.2/priority.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: DB->set_cache_priority - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -26,6 +25,6 @@ arguments and behaves identically to the old call, except that a DB database handle.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/queue.html b/db/docs/ref/upgrade.4.2/queue.html index 4ee29e6bc..62b262ec0 100644 --- a/db/docs/ref/upgrade.4.2/queue.html +++ b/db/docs/ref/upgrade.4.2/queue.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: Queue access method - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -31,6 +30,6 @@ where extent files were configured along with either encryption or checksums.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/repinit.html b/db/docs/ref/upgrade.4.2/repinit.html index 8a857a8a4..5f5b4eb1d 100644 --- a/db/docs/ref/upgrade.4.2/repinit.html +++ b/db/docs/ref/upgrade.4.2/repinit.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: Replication - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -41,6 +40,6 @@ number (LSN) associated with those return values. The new argument is DB_REP_NOTPERM. See Transactional guarantees for more information.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/tcl.html b/db/docs/ref/upgrade.4.2/tcl.html index df24f7cd2..e3ebbd93d 100644 --- a/db/docs/ref/upgrade.4.2/tcl.html +++ b/db/docs/ref/upgrade.4.2/tcl.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Release 4.2: Tcl API - + @@ -19,6 +19,6 @@ or later.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/toc.html b/db/docs/ref/upgrade.4.2/toc.html index 33ecb67b0..225ce61f1 100644 --- a/db/docs/ref/upgrade.4.2/toc.html +++ b/db/docs/ref/upgrade.4.2/toc.html @@ -1,14 +1,15 @@ - - + + Berkeley DB Reference Guide: Upgrading Berkeley DB 4.1.X applications to Berkeley DB 4.2 - + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref @@ -32,6 +33,6 @@

    Ref
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.2/verify.html b/db/docs/ref/upgrade.4.2/verify.html index b516b2682..ae6412231 100644 --- a/db/docs/ref/upgrade.4.2/verify.html +++ b/db/docs/ref/upgrade.4.2/verify.html @@ -1,15 +1,14 @@ - - + + Berkeley DB Reference Guide: Release 4.2: DB->verify - + -

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext @@ -29,6 +28,6 @@ updated to make no further use of any kind of the DB->verify returns.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade.4.3/cput.html b/db/docs/ref/upgrade.4.3/cput.html new file mode 100644 index 000000000..dcc412235 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/cput.html @@ -0,0 +1,26 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: DBcursor->c_put + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: DBcursor->c_put

    +

    The 4.3 release disallows the DB_CURRENT flag to the DBcursor->c_put method +after the current item referenced by the cursor has been deleted. Applications +using this sequence of operations should be changed to do the put without first +deleting the item.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/disk.html b/db/docs/ref/upgrade.4.3/disk.html new file mode 100644 index 000000000..7935b1808 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/disk.html @@ -0,0 +1,26 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: upgrade requirements + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: upgrade requirements

    +

    The log file format changed in the Berkeley DB 4.3 release. No database +formats changed in the Berkeley DB 4.3 release.

    +

    For further information on upgrading Berkeley DB installations, see +Upgrading Berkeley DB installations.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/enomem.html b/db/docs/ref/upgrade.4.3/enomem.html new file mode 100644 index 000000000..a7cc776d0 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/enomem.html @@ -0,0 +1,43 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: ENOMEM and DbMemoryException + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: ENOMEM and DbMemoryException

    +

    In versions of Berkeley DB before 4.3, the error ENOMEM was used to +indicate that the buffer in a DBT configured with +DB_DBT_USERMEM was too small to hold a key or data item being +retrieved. The 4.3 release adds a new error, DB_BUFFER_SMALL, +that is returned in this case.

    +

    The reason for the change is that the use of ENOMEM was +ambiguous: calls such as DB->get or DBcursor->c_get could return +ENOMEM either if a DBT was too small or if some resource +was exhausted.

    +

    The result is that starting with the 4.3 release, C applications should +always treat ENOMEM as a fatal error. Code that checked for +the ENOMEM return and allocated a new buffer should be changed +to check for DB_BUFFER_SMALL.

    +

    In C++ applications configured for exceptions, a +DbMemoryException will continue to be thrown in both cases, +and applications should check the errno in the exception to determine +which error occurred.

    +

    In Java applications, a DbMemoryException will be thrown +when a Dbt is too small to hold a return value, and an +OutOfMemoryError will be thrown in all cases of resource +exhaustion.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/err.html b/db/docs/ref/upgrade.4.3/err.html new file mode 100644 index 000000000..6fc668d1b --- /dev/null +++ b/db/docs/ref/upgrade.4.3/err.html @@ -0,0 +1,33 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: DB_ENV->set_errcall, DB->set_errcall + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: DB_ENV->set_errcall, DB->set_errcall

    +

    The signature of the error callback passed to the +DB_ENV->set_errcall and DB->set_errcall methods has changed in +the 4.3 release. For example, if you previously had a function such as +this:

    +
    void handle_db_error(const char *prefix, char *message);
    +

    it should be changed to this:

    +
    void handle_db_error(const DB_ENV *dbenv,
    +    const char *prefix, const char *message);
    +

    This change adds the DB_ENV handle to provide database +environment context for the callback function, and incidentally makes +it clear the message parameter cannot be changed by the callback.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/fileopen.html b/db/docs/ref/upgrade.4.3/fileopen.html new file mode 100644 index 000000000..a4d1b0b45 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/fileopen.html @@ -0,0 +1,24 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: DB_FILEOPEN + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: DB_FILEOPEN

    +

    The 4.3 release removes the DB_FILEOPEN error return. Any application +check for the DB_FILEOPEN error should be removed.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/intro.html b/db/docs/ref/upgrade.4.3/intro.html new file mode 100644 index 000000000..2980c70df --- /dev/null +++ b/db/docs/ref/upgrade.4.3/intro.html @@ -0,0 +1,26 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: introduction + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: introduction

    +

    The following pages describe how to upgrade applications coded against +the Berkeley DB 4.2 release interfaces to the Berkeley DB 4.3 release interfaces. +This information does not describe how to upgrade Berkeley DB 1.85 release +applications.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/java.html b/db/docs/ref/upgrade.4.3/java.html new file mode 100644 index 000000000..9a6b51501 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/java.html @@ -0,0 +1,63 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: Java + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: Java

    +

    The Berkeley DB Java API has changed significantly in the 4.3 release, in ways +incompatible with previous releases. This has been done to provide a +consistent Java-like API for Berkeley DB as well as to make the Berkeley DB Java API +match the API in Berkeley DB Java Edition, to ease application-porting between +the two libraries.

    +

    Here is a summary of the major changes:

    +
      +

    1. The low-level wrapper around the C API has been moved into a package +called com.sleepycat.db.internal. +

    2. There is a new public API in the package com.sleepycat.db. +

    3. All flags and error numbers have been eliminated from the public API. +All configuration is done through method calls on configuration objects. +

    4. All classes and methods are named to Java standards, matching Berkeley DB Java +Edition. For example: +

        +
      • Db -> Database +
      • Dbc -> Cursor +
      • Dbt -> DatabaseEntry +
      • DbEnv -> Environment +
      • DbTxn -> Transaction +
      • Db.cursor -> Database.openCursor +
      • Dbc.get(..., DbConstants.DB_CURRENT) -> Cursor.getCurrent(...) +
      +

    5. The statistics classes have "getter" methods for all fields. +

    6. In transactional applications, the Java API infers whether to +auto-commit operations: if an update is performed on a transactional +database without supplying a transaction, it is implicitly +auto-committed. +

    7. The com.sleepycat.bdb.* packages have been reorganized so that the binding +classes can be used with the base API in the com.sleepycat.db package. The +bind and collection classes are now essentially the same in Berkeley DB and Berkeley DB +Java Edition. The former com.sleepycat.bdb.bind.* packages are now the +com.sleepycat.bind.* packages. The former com.sleepycat.bdb, +com.sleepycat.bdb.collections, and com.sleepycat.bdb.factory packages are now +combined in the new com.sleepycat.collections package. +

    8. A layer of the former collections API has been removed to simplify the API and +to remove the redundant implementation of secondary indices. The former +DataStore, DataIndex, and ForeignKeyIndex classes have been removed. Instead +of wrapping a Database in a DataStore or DataIndex, the Database object is now +passed directly to the constructor of a StoredMap, StoredList, etc. +
    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/log.html b/db/docs/ref/upgrade.4.3/log.html new file mode 100644 index 000000000..84312efa9 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/log.html @@ -0,0 +1,33 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: Logging + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: Logging

    +

    In previous releases, the DB_ENV->set_flags method flag +DB_TXN_NOT_DURABLE specified that transactions for the entire database +environment were not durable. However, it was not possible to set this +flag in environments that were part of replication groups, and physical +log files were still created. The 4.3 release adds support for true +in-memory logging for both replication and non-replicated sites.

    +

    Existing applications setting the DB_TXN_NOT_DURABLE flag for database +environments should be upgraded to set the DB_LOG_INMEMORY flag +instead.

    +

    In previous releases, log buffer sizes were restricted to be less than +or equal to the log file size; this restriction is no longer required.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/repl.html b/db/docs/ref/upgrade.4.3/repl.html new file mode 100644 index 000000000..706c001f6 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/repl.html @@ -0,0 +1,40 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: Replication + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: Replication

    +

    The 4.3 release removes support for logs-only replication clients. Use +of the DB_REP_LOGSONLY flag to the DB_ENV->rep_start should be replaced +with the DB_REP_CLIENT flag.

    +

    The 4.3 release adds two new arguments to the DB_ENV->rep_elect method, +nvotes and flags. The nvotes argument sets the +required number of replication group members that must participate in +an election in order for a master to be declared. For backward +compatibility, set the nvotes argument to 0. The flags argument +is currently unused and should be set to 0. See DB_ENV->rep_elect method or +"Replication Elections" for more information.

    +

    In the 4.3 release it is no longer necessary to do a database +environment hot backup to initialize a replication client. All that is +needed now is for the client to join the replication group. Berkeley DB will +perform an internal backup from the master to the client automatically +and will run recovery on the client to bring it up to date with the +master. For this reason, the DB_REP_OUTDATED error return from the +DB_ENV->rep_process_message method is no longer needed, and applications should remove +any checking done for that error return.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/rtc.html b/db/docs/ref/upgrade.4.3/rtc.html new file mode 100644 index 000000000..1acf1aa17 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/rtc.html @@ -0,0 +1,25 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: Run-time configuration + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: Run-time configuration

    +

    The signatures of the db_env_set_func_ftruncate and +db_env_set_func_seek functions have been simplified to take a byte +offset in one parameter rather than a page size and a page number.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/stat.html b/db/docs/ref/upgrade.4.3/stat.html new file mode 100644 index 000000000..315456a79 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/stat.html @@ -0,0 +1,26 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: DB_ENV->stat + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: DB_ENV->stat

    +

    The 4.3 release adds transactional support to the DB->stat method.

    +

    Application writers can simply add a NULL txnid argument to the +DB->stat method calls in their application to leave the application's +behavior unchanged.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/toc.html b/db/docs/ref/upgrade.4.3/toc.html new file mode 100644 index 000000000..70e638a72 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/toc.html @@ -0,0 +1,37 @@ + + + + + + +Berkeley DB Reference Guide: Upgrading Berkeley DB 4.2.X applications to Berkeley DB 4.3 + + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    Ref +
    +

    +

    Upgrading Berkeley DB 4.2.X applications to Berkeley DB 4.3

    +
      +
    1. Release 4.3: introduction +
    2. Release 4.3: Java +
    3. DB_ENV->set_errcall, DB->set_errcall +
    4. DBcursor->c_put +
    5. Release 4.3: DB->stat +
    6. Release 4.3: DB_ENV->set_verbose +
    7. Release 4.3: Logging +
    8. Release 4.3: DB_FILEOPEN +
    9. Release 4.3: ENOMEM and DbMemoryException +
    10. Release 4.3: Replication +
    11. Release 4.3: Run-time configuration +
    12. Release 4.3: upgrade requirements +
    +

    Ref +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade.4.3/verb.html b/db/docs/ref/upgrade.4.3/verb.html new file mode 100644 index 000000000..57f555233 --- /dev/null +++ b/db/docs/ref/upgrade.4.3/verb.html @@ -0,0 +1,33 @@ + + + + + + +Berkeley DB Reference Guide: Release 4.3: DB_ENV->set_verbose + + + + + + +

    Berkeley DB Reference Guide:
    Upgrading Berkeley DB Applications

    PrevRefNext +
    +

    +

    Release 4.3: DB_ENV->set_verbose

    +

    The 4.3 release removes support for the DB_ENV->set_verbose method flag +DB_VERB_CHKPOINT. Application writers should simply remove any use of +this flag from their applications.

    +

    The 4.3 release redirects output configured by the DB_ENV->set_verbose method +from the error output channels (see the DB_ENV->set_errfile and +DB_ENV->set_errcall methods for more information) to the new +DB_ENV->set_msgcall and DB_ENV->set_msgfile message output +channels. This change means the error output channels are now only used +for errors, and not for debugging and performance tuning messages as +well as errors. Application writers using DB_ENV->set_verbose +should confirm that output is handled appropriately.

    +

    PrevRefNext +
    +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. + + diff --git a/db/docs/ref/upgrade/process.html b/db/docs/ref/upgrade/process.html index c5e1bb6b7..86fb50357 100644 --- a/db/docs/ref/upgrade/process.html +++ b/db/docs/ref/upgrade/process.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Upgrading Berkeley DB installations - + @@ -146,6 +146,6 @@ and then re-add all of the clients to the replication group using the standard replication procedures for new sites.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/upgrade/version.html b/db/docs/ref/upgrade/version.html index 3185c2021..7288a05de 100644 --- a/db/docs/ref/upgrade/version.html +++ b/db/docs/ref/upgrade/version.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Library version information - + @@ -42,6 +42,6 @@ information, the db_version function encapsulating the version information, suitable for display to a user.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/xa/build.html b/db/docs/ref/xa/build.html index 12499ff82..ebdece38d 100644 --- a/db/docs/ref/xa/build.html +++ b/db/docs/ref/xa/build.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Building a Global Transaction Manager - + @@ -163,20 +163,20 @@ committing state. For each environment, the GTM should issue a can determine the fate of each transaction. The correct behavior is defined depending on the state of the global transaction according to the table below.

    -

    -

    preparing
    if all participating environments return the transaction in the prepared +
    +
    preparing
    if all participating environments return the transaction in the prepared but not yet committed/aborted state, then the GTM should commit the transaction. If any participating environment fails to return it, then the GTM should issue an abort to all environments that did return it. -

    committing
    the GTM should send a commit to any environment that returned this +
    committing
    the GTM should send a commit to any environment that returned this transaction in its list of prepared but not yet committed/aborted transactions. -

    aborting
    the GTM should send an abort to any environment that returned this +
    aborting
    the GTM should send an abort to any environment that returned this transaction in its list of prepared but not yet committed/aborted transactions.

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/xa/faq.html b/db/docs/ref/xa/faq.html index 614f2256c..7def07238 100644 --- a/db/docs/ref/xa/faq.html +++ b/db/docs/ref/xa/faq.html @@ -1,12 +1,12 @@ - - + + Berkeley DB Reference Guide: XA FAQ - + @@ -61,6 +61,6 @@ should occur independently of XA operation.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/xa/intro.html b/db/docs/ref/xa/intro.html index 338c13a56..c1938b0d5 100644 --- a/db/docs/ref/xa/intro.html +++ b/db/docs/ref/xa/intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Introduction - + @@ -48,6 +48,6 @@ transactions, the logs may be copied for backup purposes and the backup will be consistent across the multiple environments.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/xa/xa_config.html b/db/docs/ref/xa/xa_config.html index 4e2368181..ec850fb8c 100644 --- a/db/docs/ref/xa/xa_config.html +++ b/db/docs/ref/xa/xa_config.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: Configuring Berkeley DB with the Tuxedo System - + @@ -75,6 +75,6 @@ resource manager.

    using XA.


    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/ref/xa/xa_intro.html b/db/docs/ref/xa/xa_intro.html index 60f526420..b8375b0ea 100644 --- a/db/docs/ref/xa/xa_intro.html +++ b/db/docs/ref/xa/xa_intro.html @@ -1,12 +1,12 @@ - + Berkeley DB Reference Guide: XA Introduction - + @@ -57,6 +57,6 @@ X/Open Document Number: XO/CAE/91/300. by Andrade, Carges, Dwyer and Felts (Addison Wesley Longman).

    PrevRefNext
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/sleepycat/contact.html b/db/docs/sleepycat/contact.html index d514114d3..01e41b111 100644 --- a/db/docs/sleepycat/contact.html +++ b/db/docs/sleepycat/contact.html @@ -1,9 +1,9 @@ - + The Sleepycat Software Contact Page - + @@ -26,7 +26,7 @@ USA

    Sales:
    -Voice: +1-617-876-0858
    +Voice: +1-978-897-6487
    Toll-free: +1-877-SLEEPYCAT
    Email: sales@sleepycat.com
    diff --git a/db/docs/sleepycat/legal.html b/db/docs/sleepycat/legal.html index 41ce98378..38237aabd 100644 --- a/db/docs/sleepycat/legal.html +++ b/db/docs/sleepycat/legal.html @@ -1,25 +1,22 @@ - - + + Berkeley DB: Sleepycat Software Legal Notices - +

    Sleepycat Software Inc.

    Sleepycat Software Legal Notices

    -

    Copyright (c) 1990-2003 Sleepycat Software, Inc., 118 Tower Rd., +

    Copyright (c) 1990-2004 Sleepycat Software, Inc., 118 Tower Rd., Lincoln, MA 01773, U.S.A. All Rights Reserved.

    This product and publication is protected by copyright and distributed -under licenses restricting its use, copying and distribution. Permission -to use this publication or portions of this publication is granted by -Sleepycat Software provided that the above copyright notice appears in -all copies and that use of such publications is for non-commercial use -only and no modifications of the publication is made.

    +under licenses restricting its use, copying and distribution. See the +LICENSE file in the distribution for further information.

    RESTRICTED RIGHTS: Use, duplication, or disclosure by the U.S. Government is subject to restrictions of FAR 52.227-14(g)(2)(6/87) and FAR 52.227-19(6/87), or DFAR 252.227-7015(b)(6/95) and DFAR 227.7202-3(a).

    @@ -37,8 +34,9 @@ Corporation.

    QNX Software Systems Ltd.

    Sun Microsystems, SunOS and Solaris are trademarks, registered trademarks or service marks of Sun Microsystems, Inc.

    -

    TUXEDO is a trademarks, registered trademark or service mark of BEA +

    TUXEDO is a trademark, registered trademark or service mark of BEA Systems, Inc.

    +

    Linux is a registered trademark of Linus Torvalds.

    VxWorks and Tornado are trademarks, registered trademarks or service marks of Wind River Systems Inc.

    All other brand, company and product names referenced in this publication @@ -60,6 +58,6 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/sleepycat/license.html b/db/docs/sleepycat/license.html index c76dd77ab..9cf305055 100644 --- a/db/docs/sleepycat/license.html +++ b/db/docs/sleepycat/license.html @@ -1,12 +1,12 @@ - + Berkeley DB: Sleepycat Software Product License - +

    @@ -17,7 +17,7 @@ software. For a license to use the Berkeley DB software under conditions other than those described here, or to purchase support for this software, please contact Sleepycat Software.

    /*
    - * Copyright (c) 1990-2003
    + * Copyright (c) 1990-2004
      *	Sleepycat Software.  All rights reserved.
      *
      * Redistribution and use in source and binary forms, with or without
    @@ -108,6 +108,6 @@ software, please contact Sleepycat Software.

    * SUCH DAMAGE. */
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/berkeley_db_svc.html b/db/docs/utility/berkeley_db_svc.html index 7900ae5ed..c348c5de5 100644 --- a/db/docs/utility/berkeley_db_svc.html +++ b/db/docs/utility/berkeley_db_svc.html @@ -1,30 +1,29 @@ - - + + Berkeley DB: berkeley_db_svc - + - -

    berkeley_db_svc

    +


    berkeley_db_svc [-Vv] [-h home]
         [-I seconds] [-L file] [-t seconds] [-T seconds]

    Description

    - +

    The berkeley_db_svc utility is the Berkeley DB RPC server.

    The options are as follows:

    -

    -

    -h
    Add the specified home directory to the list of allowed home directories +
    +
    -h
    Add the specified home directory to the list of allowed home directories that can be specified by the client. The home directory should be an absolute pathname. The last component of each home directory specified must be unique because that is how clients specify which database environment @@ -33,40 +32,32 @@ they want to join. begins accepting requests from clients. For this reason, only one copy of the server program should ever be run at any time because recovery must always be single-threaded.

    -

    -I
    Set the default idle timeout for client environments to the specified +
    -I
    Set the default idle timeout for client environments to the specified number of seconds. The default timeout is 24 hours. -

    -L
    Log the execution of the berkeley_db_svc utility to the specified file in the +
    -L
    Log the execution of the berkeley_db_svc utility to the specified file in the following format, where ### is the process ID, and the date is the time the utility was started.
    berkeley_db_svc: ### Wed Jun 15 01:23:45 EDT 1995
    This file will be removed if the berkeley_db_svc utility exits gracefully. -

    -t
    Set the default timeout for client resources (idle transactions and +
    -t
    Set the default timeout for client resources (idle transactions and cursors) to the specified number of seconds. When the timeout expires, if the resource is a transaction, it is aborted; if the resource is a cursor, it is closed. The default timeout is 5 minutes. -

    -T
    Set the maximum timeout allowed for client resources. The default +
    -T
    Set the maximum timeout allowed for client resources. The default timeout is 20 minutes. If a client application requests a server timeout greater than the maximum timeout set for this server, the client's timeout will be capped at the maximum timeout value. -

    -V
    Write the library version number to the standard output, and exit. -

    -v
    Run in verbose mode. +
    -V
    Write the library version number to the standard output, and exit. +
    -v
    Run in verbose mode.
    -

    The berkeley_db_svc utility uses a Berkeley DB environment (as described for the --h option, the environment variable DB_HOME, or -because the utility was run in a directory containing a Berkeley DB -environment). In order to avoid environment corruption when using a -Berkeley DB environment, berkeley_db_svc should always be given the chance to -detach from the environment and exit gracefully. To cause berkeley_db_svc -to release all environment resources and exit cleanly, send it an -interrupt signal (SIGINT).

    The berkeley_db_svc utility exits 0 on success, and >0 if an error occurs.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_archive.html b/db/docs/utility/db_archive.html index f1fd08bdc..524a6f648 100644 --- a/db/docs/utility/db_archive.html +++ b/db/docs/utility/db_archive.html @@ -1,25 +1,24 @@ - + Berkeley DB: db_archive - + - -

    db_archive

    +


    db_archive [-adlsVv] [-h home] [-P password]

    Description

    - +

    The db_archive utility writes the pathnames of log files that are no longer in use (for example, no longer involved in active transactions), to the standard output, one pathname per line. These @@ -28,22 +27,22 @@ the case of catastrophic failure (which also requires a snapshot of the database files), but they may then be deleted from the system to reclaim disk space.

    The options are as follows:

    -

    -

    -a
    Write all pathnames as absolute pathnames, instead of relative to the +
    +
    -a
    Write all pathnames as absolute pathnames, instead of relative to the database home directories. -

    -d
    Remove log files that are no longer needed; no filenames are written. +
    -d
    Remove log files that are no longer needed; no filenames are written. Automatic log file removal is likely to make catastrophic recovery impossible. -

    -h
    Specify a home directory for the database environment; by +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -l
    Write out the pathnames of all the database log files, whether or not +
    -l
    Write out the pathnames of all the database log files, whether or not they are involved in active transactions. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -s
    Write the pathnames of all the database files that need to be archived +
    -s
    Write the pathnames of all the database files that need to be archived in order to recover the database from catastrophic failure. If any of the database files have not been accessed during the lifetime of the current log files, db_archive will not include them in this @@ -53,8 +52,8 @@ been deleted from the system. In this case, db_archive will ignore them. When db_recover is run, any files to which the log refers that are not present during recovery are assumed to have been deleted and will not be recovered.

    -

    -V
    Write the library version number to the standard output, and exit. -

    -v
    Run in verbose mode, listing the checkpoints in the log files as they +
    -V
    Write the library version number to the standard output, and exit. +
    -v
    Run in verbose mode, listing the checkpoints in the log files as they are reviewed.

    Log cursor handles (returned by the DB_ENV->log_cursor method) may have open @@ -85,12 +84,12 @@ See the db_archive utility source code for an example of using DB_ENV->open.

    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_checkpoint.html b/db/docs/utility/db_checkpoint.html index 5250662d6..3db865829 100644 --- a/db/docs/utility/db_checkpoint.html +++ b/db/docs/utility/db_checkpoint.html @@ -1,51 +1,50 @@ - + Berkeley DB: db_checkpoint - + - -

    db_checkpoint

    +


    db_checkpoint [-1Vv] [-h home]
         [-k kbytes] [-L file] [-P password] [-p min]

    Description

    - +

    The db_checkpoint utility is a daemon process that monitors the database log, and periodically calls DB_ENV->txn_checkpoint to checkpoint it.

    The options are as follows:

    -

    -

    -1
    Checkpoint the log once, regardless of whether or not there has been +
    +
    -1
    Checkpoint the log once, regardless of whether or not there has been activity since the last checkpoint and then exit. -

    -h
    Specify a home directory for the database environment; by +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -k
    Checkpoint the database at least as often as every kbytes of log +
    -k
    Checkpoint the database at least as often as every kbytes of log file are written. -

    -L
    Log the execution of the db_checkpoint utility to the specified file in the +
    -L
    Log the execution of the db_checkpoint utility to the specified file in the following format, where ### is the process ID, and the date is the time the utility was started.
    db_checkpoint: ### Wed Jun 15 01:23:45 EDT 1995
    This file will be removed if the db_checkpoint utility exits gracefully. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -p
    Checkpoint the database at least every min minutes if there has +
    -p
    Checkpoint the database at least every min minutes if there has been any activity since the last checkpoint. -

    -V
    Write the library version number to the standard output, and exit. -

    -v
    Write the time of each checkpoint attempt to the standard output. +
    -V
    Write the library version number to the standard output, and exit. +
    -v
    Write the time of each checkpoint attempt to the standard output.

    At least one of the -1, -k, and -p options must be specified.

    @@ -66,12 +65,12 @@ See the db_checkpoint utility source code for an example of using DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_deadlock.html b/db/docs/utility/db_deadlock.html index 2fadd6b16..676148f2a 100644 --- a/db/docs/utility/db_deadlock.html +++ b/db/docs/utility/db_deadlock.html @@ -1,26 +1,25 @@ - - + + Berkeley DB: db_deadlock - + - -

    db_deadlock

    +


    db_deadlock [-Vv]
    -    [-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]

    + [-a e | m | n | o | W | w | y] [-h home] [-L file] [-t sec.usec]

    Description

    - +

    The db_deadlock utility traverses the database environment lock region, and aborts a lock request each time it detects a deadlock or a lock request that has timed out. By default, in the case of a deadlock, @@ -30,32 +29,33 @@ Berkeley DB deadlock detection interfaces should be called in some other way, whenever there are multiple threads or processes accessing a database and at least one of them is modifying it.

    The options are as follows:

    -

    -

    -a
    When a deadlock is detected, abort the locker: -

    -

    m
    with the greatest number of locks -
    n
    with the fewest number of locks -
    o
    with the oldest locker ID -
    w
    with the fewest number of write locks -
    y
    with the youngest locker ID +
    +
    -a
    When a deadlock is detected, abort the locker: +
    +
    m
    with the most locks +
    n
    with the fewest locks +
    o
    with the oldest lock +
    W
    with the most write locks +
    w
    with the fewest write locks +
    y
    with the youngest lock

    When lock or transaction timeouts have been specified: -

    -

    e
    abort any lock request that has timed out +
    +
    e
    abort any lock request that has timed out

    -

    -h
    Specify a home directory for the database environment; by +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -L
    Log the execution of the db_deadlock utility to the specified file in the +
    -L
    Log the execution of the db_deadlock utility to the specified file in the following format, where ### is the process ID, and the date is the time the utility was started.
    db_deadlock: ### Wed Jun 15 01:23:45 EDT 1995
    This file will be removed if the db_deadlock utility exits gracefully. -

    -t
    Check the database environment every sec seconds plus +
    -t
    Check the database environment every sec seconds plus usec microseconds to see if a process has been forced to wait for a lock; if one has, review the database environment lock structures. -

    -V
    Write the library version number to the standard output, and exit. -

    -v
    Run in verbose mode, generating messages each time the detector runs. +
    -V
    Write the library version number to the standard output, and exit. +
    -v
    Run in verbose mode, generating messages each time the detector runs.

    If the -t option is not specified, db_deadlock will run once and exit.

    @@ -76,12 +76,12 @@ See the db_deadlock utility source code for an example of using DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_dump.html b/db/docs/utility/db_dump.html index 3217d2358..665cde85d 100644 --- a/db/docs/utility/db_dump.html +++ b/db/docs/utility/db_dump.html @@ -1,27 +1,26 @@ - + Berkeley DB: db_dump - + - -

    db_dump

    +


    db_dump [-klNpRrV] [-d ahr]
     	[-f output] [-h home] [-P password] [-s database] file
     db_dump185 [-p] [-f output] file

    Description

    - +

    The db_dump utility reads the database file file and writes it to the standard output using a portable flat-text format understood by the db_load utility. The file argument @@ -30,50 +29,50 @@ must be a file produced using the Berkeley DB library functions.

    except that it reads databases in the format used by Berkeley DB versions 1.85 and 1.86.

    The options are as follows:

    -

    -

    -d
    Dump the specified database in a format helpful for debugging the Berkeley DB +
    +
    -d
    Dump the specified database in a format helpful for debugging the Berkeley DB library routines. -

    -

    a
    Display all information. +
    +
    a
    Display all information.
    h
    Display only page headers.
    r
    Do not display the free-list or pages on the free list. This mode is used by the recovery tests.

    The output format of the -d option is not standard and may change, without notice, between releases of the Berkeley DB library.

    -

    -f
    Write to the specified file instead of to the standard output. -

    -h
    Specify a home directory for the database environment; by +
    -f
    Write to the specified file instead of to the standard output. +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -k
    Dump record numbers from Queue and Recno databases as keys. -

    -l
    List the databases stored in the file. -

    -N
    Do not acquire shared region mutexes while running. Other problems, +
    -k
    Dump record numbers from Queue and Recno databases as keys. +
    -l
    List the databases stored in the file. +
    -N
    Do not acquire shared region mutexes while running. Other problems, such as potentially fatal errors in Berkeley DB, will be ignored as well. This option is intended only for debugging errors, and should not be used under any other circumstances. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -p
    If characters in either the key or data items are printing characters (as +
    -p
    If characters in either the key or data items are printing characters (as defined by isprint(3)), use printing characters in file to represent them. This option permits users to use standard text editors and tools to modify the contents of databases.

    Note: different systems may have different notions about what characters are considered printing characters, and databases dumped in this manner may be less portable to external systems.

    -

    -R
    Aggressively salvage data from a possibly corrupt file. The -R +
    -R
    Aggressively salvage data from a possibly corrupt file. The -R flag differs from the -r option in that it will return all possible data from the file at the risk of also returning already deleted or otherwise nonsensical items. Data dumped in this fashion will almost certainly have to be edited by hand or other means before the data is ready for reload into another database -

    -r
    Salvage data from a possibly corrupt file. When used on a uncorrupted +
    -r
    Salvage data from a possibly corrupt file. When used on a uncorrupted database, this option should return equivalent data to a normal dump, but most likely in a different order. -

    -s
    Specify a single database to dump. If no database is specified, all +
    -s
    Specify a single database to dump. If no database is specified, all databases in the database file are dumped. -

    -V
    Write the library version number to the standard output, and exit. +
    -V
    Write the library version number to the standard output, and exit.

    Dumping and reloading Hash databases that use user-defined hash functions will result in new databases that use the default hash @@ -111,12 +110,12 @@ otherwise, the output may be corrupt.

    The db_dump utility exits 0 on success, and >0 if an error occurs.

    The db_dump185 utility exits 0 on success, and >0 if an error occurs.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_load.html b/db/docs/utility/db_load.html index 71c1f3088..3860a870a 100644 --- a/db/docs/utility/db_load.html +++ b/db/docs/utility/db_load.html @@ -1,26 +1,26 @@ - - + + Berkeley DB: db_load - + - -

    db_load

    +


    db_load [-nTV] [-c name=value] [-f file]
    -    [-h home] [-P password] [-t btree | hash | queue | recno] file

    + [-h home] [-P password] [-t btree | hash | queue | recno] file +db_load [-r lsn | fileid] [-h home] [-P password] file

    Description

    - +

    The db_load utility reads from the standard input and loads it into the database file. The database file is created if it does not already exist.

    @@ -28,32 +28,47 @@ it does not already exist.

    db_dump utility, utilities, or as specified for the -T below.

    The options are as follows:

    -

    -

    -c
    Specify configuration options ignoring any value they may have based on +
    +
    -c
    Specify configuration options ignoring any value they may have based on the input. The command-line format is name=value. See the Supported Keywords section below for a list of keywords supported by the -c option. -

    -f
    Read from the specified input file instead of from the standard +
    -f
    Read from the specified input file instead of from the standard input. -

    -h
    Specify a home directory for the database environment. +
    -h
    Specify a home directory for the database environment.

    If a home directory is specified, the database environment is opened -using the Db.DB_INIT_LOCK, Db.DB_INIT_LOG, -Db.DB_INIT_MPOOL, Db.DB_INIT_TXN, and Db.DB_USE_ENVIRON +using the DB_INIT_LOCK, DB_INIT_LOG, +DB_INIT_MPOOL, DB_INIT_TXN, and DB_USE_ENVIRON flags to DB_ENV->open. (This means that db_load can be used to load data into databases while they are in use by other processes.) If the DB_ENV->open call fails, or if no home directory is specified, the database is still updated, but the environment is ignored; for example, no locking is done.

    -

    -n
    Do not overwrite existing keys in the database when loading into an +
    -n
    Do not overwrite existing keys in the database when loading into an already existing database. If a key/data pair cannot be loaded into the database for this reason, a warning message is displayed on the standard error output, and the key/data pair are skipped. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -T
    The -T option allows non-Berkeley DB applications to easily load text +
    -r
    Reset the database's file ID or log sequence numbers (LSNs). +

    All database pages in transactional environments contain references to +the environment's log records. In order to copy a database into a +different database environment, database page references to the old +environment's log records must be reset, otherwise data corruption can +occur when the database is modified in the new environment. The +-r lsn option resets a database's log sequence +numbers.

    +

    All databases contain an ID string used to identify the database in the +database environment cache. If a database is copied, and used in the +same environment as another file with the same ID string, corruption can +occur. The -r fileid option resets a database's file +ID to a new value.

    +

    In both cases, the physical file specified by the file argument +is modified in-place.

    +
    -T
    The -T option allows non-Berkeley DB applications to easily load text files into databases.

    If the database to be created is of type Btree or Hash, or the keyword keys is specified as set, the input must be paired lines of text, @@ -76,7 +91,7 @@ occur in the text input must be escaped to avoid misinterpretation by db_load.

    If the -T option is specified, the underlying access method type must be specified using the -t option.

    -

    -t
    Specify the underlying access method. If no -t option is +
    -t
    Specify the underlying access method. If no -t option is specified, the database will be loaded into a database of the same type as was dumped; for example, a Hash database will be created if a Hash database was dumped. @@ -85,7 +100,7 @@ and Recno databases may be converted from one to the other. If the -k option was specified on the call to db_dump then Queue and Recno databases may be converted to Btree or Hash, with the key being the integer record number.

    -

    -V
    Write the library version number to the standard output, and exit. +
    -V
    Write the library version number to the standard output, and exit.

    The db_load utility may be used with a Berkeley DB environment (as described for the -h option, the environment variable DB_HOME, or @@ -108,8 +123,8 @@ key item and the entire password entry as the data item:

    Note that backslash characters naturally occurring in the text are escaped to avoid interpretation as escape characters by db_load.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    @@ -122,14 +137,14 @@ discussion of these keywords and what values should be specified. expect value to be 1 (set) or 0 (unset). Items listed as (number) convert value to a number. Items listed as (string) use the string value without modification.

    -

    +
    bt_minkey (number)
    The minimum number of keys per page.
    chksum (boolean)
    Enable page checksums.
    database (string)
    The database to load.
    db_lorder (number)
    The byte order for integers in the stored database metadata.
    db_pagesize (number)
    The size of database pages, in bytes. -
    duplicates (boolean)
    The value of the Db.DB_DUP flag. -
    dupsort (boolean)
    The value of the Db.DB_DUPSORT flag. +
    duplicates (boolean)
    The value of the DB_DUP flag. +
    dupsort (boolean)
    The value of the DB_DUPSORT flag.
    extentsize (number)
    The size of database extents, in pages, for Queue databases configured to use extents.
    h_ffactor (number)
    The density within the Hash database. @@ -137,11 +152,11 @@ to use extents.
    keys (boolean)
    Specify whether keys are present for Queue or Recno databases.
    re_len (number)
    Specify fixed-length records of the specified length.
    re_pad (string)
    Specify the fixed-length record pad character. -
    recnum (boolean)
    The value of the Db.DB_RECNUM flag. -
    renumber (boolean)
    The value of the Db.DB_RENUMBER flag. +
    recnum (boolean)
    The value of the DB_RECNUM flag. +
    renumber (boolean)
    The value of the DB_RENUMBER flag.
    subdatabase (string)
    The subdatabase to load.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_printlog.html b/db/docs/utility/db_printlog.html index f08a72fac..ab4fcdb82 100644 --- a/db/docs/utility/db_printlog.html +++ b/db/docs/utility/db_printlog.html @@ -1,42 +1,47 @@ - - + + Berkeley DB: db_printlog - + - -

    db_printlog

    +


    -

    db_printlog [-NrV] [-h home] [-P password]

    +

    db_printlog [-NrV] [-b start-LSN] [-e stop-LSN] [-h home] [-P password]

    Description

    - +

    The db_printlog utility is a debugging utility that dumps Berkeley DB log files in a human-readable format.

    The options are as follows:

    -

    -

    -h
    Specify a home directory for the database environment; by +
    +
    -b
    Display log records starting at log sequence number (LSN) start-LSN; +start-LSN is specified as a file number, followed by a slash (/) +character, followed by an offset number, with no intervening whitespace. +
    -e
    Stop displaying log records at log sequence number (LSN) stop-LSN; +stop-LSN is specified as a file number, followed by a slash (/) +character, followed by an offset number, with no intervening whitespace. +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -N
    Do not acquire shared region mutexes while running. Other problems, +
    -N
    Do not acquire shared region mutexes while running. Other problems, such as potentially fatal errors in Berkeley DB, will be ignored as well. This option is intended only for debugging errors, and should not be used under any other circumstances. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -r
    Read the log files in reverse order. -

    -V
    Write the library version number to the standard output, and exit. +
    -r
    Read the log files in reverse order. +
    -V
    Write the library version number to the standard output, and exit.

    For more information on the db_printlog output and using it to debug applications, see Reviewing @@ -51,12 +56,12 @@ to release all environment resources and exit cleanly, send it an interrupt signal (SIGINT).

    The db_printlog utility exits 0 on success, and >0 if an error occurs.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_recover.html b/db/docs/utility/db_recover.html index c32b5b6c7..962391d09 100644 --- a/db/docs/utility/db_recover.html +++ b/db/docs/utility/db_recover.html @@ -1,49 +1,48 @@ - + Berkeley DB: db_recover - + - -

    db_recover

    +


    db_recover [-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]]

    Description

    - +

    The db_recover utility must be run after an unexpected application, Berkeley DB, or system failure to restore the database to a consistent state. All committed transactions are guaranteed to appear after db_recover has run, and all uncommitted transactions will be completely undone.

    The options are as follows:

    -

    -

    -c
    Perform catastrophic recovery instead of normal recovery. -

    -e
    Retain the environment after running recovery. This option +
    +
    -c
    Perform catastrophic recovery instead of normal recovery. +
    -e
    Retain the environment after running recovery. This option will rarely be used unless a DB_CONFIG file is present in the home directory. If a DB_CONFIG file is not present, then the regions will be created with default parameter values. -

    -h
    Specify a home directory for the database environment; by +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -t
    Recover to the time specified rather than to the most current possible +
    -t
    Recover to the time specified rather than to the most current possible date. The timestamp argument should be in the form [[CC]YY]MMDDhhmm[.SS] where each pair of letters represents the following: -

    -

    CC
    The first two digits of the year (the century). +
    +
    CC
    The first two digits of the year (the century).
    YY
    The second two digits of the year. If "YY" is specified, but "CC" is not, a value for "YY" between 69 and 99 results in a "YY" value of 19. Otherwise, a "YY" value of 20 is used. @@ -56,8 +55,8 @@ a "YY" value of 20 is used.

    If the "CC" and "YY" letter pairs are not specified, the values default to the current year. If the "SS" letter pair is not specified, the value defaults to 0.

    -

    -V
    Write the library version number to the standard output, and exit. -

    -v
    Run in verbose mode. +
    -V
    Write the library version number to the standard output, and exit. +
    -v
    Run in verbose mode.

    In the case of catastrophic recovery, an archival copy -- or snapshot -- of all database files must be restored along with @@ -82,12 +81,12 @@ to release all environment resources and exit cleanly, send it an interrupt signal (SIGINT).

    The db_recover utility exits 0 on success, and >0 if an error occurs.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_stat.html b/db/docs/utility/db_stat.html index c0533d280..9953cb613 100644 --- a/db/docs/utility/db_stat.html +++ b/db/docs/utility/db_stat.html @@ -1,78 +1,86 @@ - - + + Berkeley DB: db_stat - + - -

    db_stat

    +


    db_stat -d file [-fN] [-h home] [-P password] [-s database]
    -db_stat [-celmNrtVZ] [-C Aclmop] [-h home] [-M Ahm] [-P password]

    +db_stat [-cEelmNrtVZ] [-C Aclop] [-h home] [-L A] [-M A] [-R A] [-P password]

    Description

    - +

    The db_stat utility displays statistics for Berkeley DB environments.

    The options are as follows:

    -

    -

    -C
    Display internal information about the lock region. (The output from this -option is often both voluminous and meaningless, and is intended only for -debugging.) -

    -

    A
    Display all information. +
    +
    -C
    Display internal information about the locking subsystem. (The output +from this option is often both voluminous and meaningless, and is +intended only for debugging.) +
    +
    A
    Display all information.
    c
    Display lock conflict matrix.
    l
    Display lockers within hash chains. -
    m
    Display region memory information. -
    o
    Display objects within hash chains. -
    p
    Display lock region parameters. +
    o
    Display lock objects within hash chains. +
    p
    Display locking subsystem parameters.
    -

    -c
    Display lock region statistics, as described in DB_ENV->lock_stat. -

    -d
    Display database statistics for the specified file, as described in +
    -c
    Display locking subsystem statistics, as described in DB_ENV->lock_stat. +
    -d
    Display database statistics for the specified file, as described in DB->stat.

    If the database contains multiple databases and the -s flag is not specified, the statistics are for the internal database that describes the other databases the file contains, and not for the file as a whole.

    -

    -e
    Display current environment statistics. -

    -f
    Display only those database statistics that can be +
    -E
    Display internal information about the database environment, including +all configured subsystems of the database environment. (The +output from this option is often both voluminous and meaningless, and +is intended only for debugging.) +
    -e
    Display information about the database environment, including +all configured subsystems of the database environment. +
    -f
    Display only those database statistics that can be acquired without traversing the database. -

    -h
    Specify a home directory for the database environment; by +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -l
    Display log region statistics, as described in DB_ENV->log_stat. -

    -M
    Display internal information about the shared memory buffer pool. (The -output from this option is often both voluminous and meaningless, and is -intended only for debugging.) -

    -

    A
    Display all information. +
    -l
    Display logging subsystem statistics, as described in DB_ENV->log_stat. +
    -M
    Display internal information about the cache. (The output from this +option is often both voluminous and meaningless, and is intended only +for debugging.) +
    +
    A
    Display all information.
    h
    Display buffers within hash chains. -
    m
    Display region memory information.
    -

    -m
    Display shared memory buffer pool statistics, as described in -DB_ENV->memp_stat. -

    -N
    Do not acquire shared region mutexes while running. Other problems, +
    -m
    Display cache statistics, as described in DB_ENV->memp_stat. +
    -N
    Do not acquire shared region mutexes while running. Other problems, such as potentially fatal errors in Berkeley DB, will be ignored as well. This option is intended only for debugging errors, and should not be used under any other circumstances. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -r
    Display replication statistics, as described in DB_ENV->rep_stat. -

    -s
    Display statistics for the specified database contained in the file +
    -R
    Display internal information about the replication subsystem. (The +output from this option is often both voluminous and meaningless, and +is intended only for debugging.) +
    +
    A
    Display all information. +
    +
    -r
    Display replication statistics, as described in DB_ENV->rep_stat. +
    -s
    Display statistics for the specified database contained in the file specified with the -d flag. -

    -t
    Display transaction region statistics, as described in DB_ENV->txn_stat. -

    -V
    Write the library version number to the standard output, and exit. -

    -Z
    Reset the statistics after reporting them; valid only with the --c, -e, -l, -m, and -t +
    -t
    Display transaction subsystem statistics, as described in DB_ENV->txn_stat. +
    -V
    Write the library version number to the standard output, and exit. +
    -Z
    Reset the statistics after reporting them; valid only with the -C, +-c, -E, -e, -L, -l, +-M, -m, -R, -r, and -t options.

    Values normally displayed in quantities of bytes are displayed as a @@ -90,12 +98,12 @@ to release all environment resources and exit cleanly, send it an interrupt signal (SIGINT).

    The db_stat utility exits 0 on success, and >0 if an error occurs.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_upgrade.html b/db/docs/utility/db_upgrade.html index 735051546..e151fc7e8 100644 --- a/db/docs/utility/db_upgrade.html +++ b/db/docs/utility/db_upgrade.html @@ -1,41 +1,40 @@ - + Berkeley DB: db_upgrade - + - -

    db_upgrade

    +


    db_upgrade [-NsV] [-h home] [-P password] file ...

    Description

    - +

    The db_upgrade utility upgrades the Berkeley DB version of one or more files and the databases they contain to the current release version.

    The options are as follows:

    -

    -

    -h
    Specify a home directory for the database environment; by +
    +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -N
    Do not acquire shared region mutexes while running. Other problems, +
    -N
    Do not acquire shared region mutexes while running. Other problems, such as potentially fatal errors in Berkeley DB, will be ignored as well. This option is intended only for debugging errors, and should not be used under any other circumstances. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -s
    This flag is only meaningful when upgrading databases from releases +
    -s
    This flag is only meaningful when upgrading databases from releases before the Berkeley DB 3.1 release.

    As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release, the on-disk format of duplicate data items changed. To correctly upgrade @@ -56,7 +55,7 @@ db_upgrade will work correctly as long as the -s flag is correctly specified. Otherwise, the file cannot be upgraded using db_upgrade, and must be upgraded manually using the db_dump and db_load utilities.

    -

    -V
    Write the library version number to the standard output, and exit. +
    -V
    Write the library version number to the standard output, and exit.

    It is important to realize that Berkeley DB database upgrades are done in place, and so are potentially destructive. This means that if the @@ -74,12 +73,12 @@ to release all environment resources and exit cleanly, send it an interrupt signal (SIGINT).

    The db_upgrade utility exits 0 on success, and >0 if an error occurs.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/db_verify.html b/db/docs/utility/db_verify.html index 358e11424..e2e05d290 100644 --- a/db/docs/utility/db_verify.html +++ b/db/docs/utility/db_verify.html @@ -1,32 +1,31 @@ - + Berkeley DB: db_verify - + - -

    db_verify

    +


    db_verify [-NoqV] [-h home] [-P password] file ...

    Description

    - +

    The db_verify utility verifies the structure of one or more files and the databases they contain.

    The options are as follows:

    -

    -

    -h
    Specify a home directory for the database environment; by +
    +
    -h
    Specify a home directory for the database environment; by default, the current working directory is used. -

    -o
    Skip the database checks for btree and duplicate sort order and for +
    -o
    Skip the database checks for btree and duplicate sort order and for hashing.

    If the file being verified contains databases with non-default comparison or hashing configurations, calling the db_verify @@ -36,18 +35,18 @@ hash ordering and allows db_verify to be used on these files. To fully verify these files, verify them explicitly using the DB->verify method, after configuring the correct comparison or hashing functions.

    -

    -N
    Do not acquire shared region mutexes while running. Other problems, +
    -N
    Do not acquire shared region mutexes while running. Other problems, such as potentially fatal errors in Berkeley DB, will be ignored as well. This option is intended only for debugging errors, and should not be used under any other circumstances. -

    -P
    Specify an environment password. Although Berkeley DB utilities overwrite +
    -P
    Specify an environment password. Although Berkeley DB utilities overwrite password strings as soon as possible, be aware there may be a window of vulnerability on systems where unprivileged users can see command-line arguments or where utilities are not able to overwrite the memory containing the command-line arguments. -

    -q
    Suppress the printing of any error descriptions, simply exit success or +
    -q
    Suppress the printing of any error descriptions, simply exit success or failure. -

    -V
    Write the library version number to the standard output, and exit. +
    -V
    Write the library version number to the standard output, and exit.

    The db_verify utility does not perform any locking, even in Berkeley DB environments that are configured with a locking subsystem. As @@ -63,12 +62,12 @@ to release all environment resources and exit cleanly, send it an interrupt signal (SIGINT).

    The db_verify utility exits 0 on success, and >0 if an error occurs.

    Environment Variables

    -

    -

    DB_HOME
    If the -h option is not specified and the environment variable +
    +
    DB_HOME
    If the -h option is not specified and the environment variable DB_HOME is set, it is used as the path of the database home, as described in DB_ENV->open.
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/docs/utility/index.html b/db/docs/utility/index.html index 3daca8c2d..d1126a992 100644 --- a/db/docs/utility/index.html +++ b/db/docs/utility/index.html @@ -1,12 +1,12 @@ - + Berkeley DB: Berkeley DB Supporting Utilities - +

    Berkeley DB Supporting Utilities

    @@ -24,6 +24,6 @@
    db_upgradeDatabase upgrade utility
    db_verifyVerification utility
    -

    Copyright (c) 1996-2003 Sleepycat Software, Inc. - All rights reserved. +

    Copyright (c) 1996-2004 Sleepycat Software, Inc. - All rights reserved. diff --git a/db/env/db_salloc.c b/db/env/db_salloc.c index 3fe9adaa7..f2b1ed386 100644 --- a/db/env/db_salloc.c +++ b/db/env/db_salloc.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_salloc.c,v 11.28 2004/09/17 22:00:27 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_salloc.c,v 11.17 2003/01/08 04:42:01 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -36,21 +34,27 @@ struct __data { SH_LIST_ENTRY links; }; +#define ILLEGAL_SIZE 1 /* An illegal size. */ + /* * __db_shalloc_init -- * Initialize the area as one large chunk. * - * PUBLIC: void __db_shalloc_init __P((void *, size_t)); + * PUBLIC: void __db_shalloc_init __P((REGINFO *, size_t)); */ void -__db_shalloc_init(area, size) - void *area; +__db_shalloc_init(infop, size) + REGINFO *infop; size_t size; { struct __data *elp; struct __head *hp; - hp = area; + /* No initialization needed for heap memory regions. */ + if (F_ISSET(infop->dbenv, DB_ENV_PRIVATE)) + return; + + hp = infop->addr; SH_LIST_INIT(hp); elp = (struct __data *)(hp + 1); @@ -62,9 +66,9 @@ __db_shalloc_init(area, size) * __db_shalloc_size -- * Return the space needed for an allocation, including alignment. * - * PUBLIC: int __db_shalloc_size __P((size_t, size_t)); + * PUBLIC: size_t __db_shalloc_size __P((size_t, size_t)); */ -int +size_t __db_shalloc_size(len, align) size_t len, align; { @@ -77,27 +81,67 @@ __db_shalloc_size(len, align) ++len; #endif - /* Never align to less than a db_align_t boundary. */ - if (align <= sizeof(db_align_t)) - align = sizeof(db_align_t); + /* Never align to less than a uintmax_t boundary. */ + if (align <= sizeof(uintmax_t)) + align = sizeof(uintmax_t); - return ((int)(ALIGN(len, align) + sizeof (struct __data))); + return ((size_t)DB_ALIGN(len, align) + sizeof(struct __data)); } /* * __db_shalloc -- - * Allocate some space from the shared region. + * Allocate space from the shared region. * - * PUBLIC: int __db_shalloc __P((void *, size_t, size_t, void *)); + * PUBLIC: int __db_shalloc __P((REGINFO *, size_t, size_t, void *)); */ int -__db_shalloc(p, len, align, retp) - void *p, *retp; +__db_shalloc(infop, len, align, retp) + REGINFO *infop; size_t len, align; + void *retp; { + DB_ENV *dbenv; struct __data *elp; size_t *sp; - void *rp; + int ret; + void *p, *rp; + + dbenv = infop->dbenv; + + /* Never align to less than a uintmax_t boundary. */ + if (align <= sizeof(uintmax_t)) + align = sizeof(uintmax_t); + + /* In a private region, we call malloc for additional space. */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { + /* Check to see if we're over our limit. */ + if (infop->allocated >= infop->max_alloc) + return (ENOMEM); + + /* Add enough room for a size. */ + len += sizeof(size_t); + + /* Add enough room to guarantee alignment is possible. */ + len += align - 1; + + /* Allocate the space. */ + if ((ret = __os_malloc(dbenv, len, &p)) != 0) + return (ret); + infop->allocated += len; + + /* Store the size. */ + sp = p; + *sp++ = len; + + /* Find the aligned location. */ + *(void **)retp = rp = ALIGNP_INC(sp, align); + + /* Fill in any gaps with illegal sizes. */ + for (; (void *)sp < rp; ++sp) + *sp = ILLEGAL_SIZE; + + return (0); + } /* Never allocate less than the size of a struct __data. */ if (len < sizeof(struct __data)) @@ -108,9 +152,7 @@ __db_shalloc(p, len, align, retp) ++len; #endif - /* Never align to less than a db_align_t boundary. */ - if (align <= sizeof(db_align_t)) - align = sizeof(db_align_t); + p = infop->addr; /* Walk the list, looking for a slot. */ for (elp = SH_LIST_FIRST((struct __head *)p, __data); @@ -125,7 +167,8 @@ __db_shalloc(p, len, align, retp) */ rp = (u_int8_t *)elp + sizeof(size_t) + elp->len; rp = (u_int8_t *)rp - len; - rp = (u_int8_t *)((db_alignp_t)rp & ~(align - 1)); + rp = (u_int8_t *)((uintptr_t)rp & ~(align - 1)); + rp = ALIGNP_DEC(rp, align); /* * Rp may now point before elp->links, in which case the chunk @@ -168,7 +211,6 @@ __db_shalloc(p, len, align, retp) * size_t length fields back to the "real" length field to a * flag value, so that we can find the real length during free. */ -#define ILLEGAL_SIZE 1 SH_LIST_REMOVE(elp, links, __data); for (sp = rp; (u_int8_t *)--sp >= (u_int8_t *)&elp->links;) *sp = ILLEGAL_SIZE; @@ -180,19 +222,23 @@ __db_shalloc(p, len, align, retp) /* * __db_shalloc_free -- - * Free a shared memory allocation. + * Free space into the shared region. * - * PUBLIC: void __db_shalloc_free __P((void *, void *)); + * PUBLIC: void __db_shalloc_free __P((REGINFO *, void *)); */ void -__db_shalloc_free(regionp, ptr) - void *regionp, *ptr; +__db_shalloc_free(infop, ptr) + REGINFO *infop; + void *ptr; { + DB_ENV *dbenv; struct __data *elp, *lastp, *newp; struct __head *hp; size_t free_size, *sp; int merged; + dbenv = infop->dbenv; + /* * Step back over flagged length fields to find the beginning of * the object and its real size. @@ -204,6 +250,15 @@ __db_shalloc_free(regionp, ptr) newp = (struct __data *)((u_int8_t *)ptr - sizeof(size_t)); free_size = newp->len; + /* In a private region, we call free. */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { + DB_ASSERT(infop->allocated >= free_size); + infop->allocated -= free_size; + + __os_free(dbenv, newp); + return; + } + #ifdef DIAGNOSTIC /* * The "real size" includes the guard byte; it's just the last @@ -225,7 +280,6 @@ __db_shalloc_free(regionp, ptr) /* Trash the returned memory (including guard byte). */ memset(ptr, CLEAR_BYTE, free_size); #endif - /* * Walk the list, looking for where this entry goes. * @@ -235,7 +289,7 @@ __db_shalloc_free(regionp, ptr) * XXX * Probably worth profiling this to see how expensive it is. */ - hp = (struct __head *)regionp; + hp = (struct __head *)(infop->addr); for (elp = SH_LIST_FIRST(hp, __data), lastp = NULL; elp != NULL && (void *)elp < (void *)ptr; lastp = elp, elp = SH_LIST_NEXT(elp, links, __data)) @@ -284,7 +338,7 @@ __db_shalloc_free(regionp, ptr) } /* - * __db_shsizeof -- + * __db_shalloc_sizeof -- * Return the size of a shalloc'd piece of memory. * * !!! @@ -292,10 +346,10 @@ __db_shalloc_free(regionp, ptr) * the size of the memory being used, but also the extra alignment bytes * in front and, #ifdef DIAGNOSTIC, the guard byte at the end. * - * PUBLIC: size_t __db_shsizeof __P((void *)); + * PUBLIC: size_t __db_shalloc_sizeof __P((void *)); */ size_t -__db_shsizeof(ptr) +__db_shalloc_sizeof(ptr) void *ptr; { struct __data *elp; @@ -311,28 +365,3 @@ __db_shsizeof(ptr) elp = (struct __data *)((u_int8_t *)sp - sizeof(size_t)); return (elp->len); } - -/* - * __db_shalloc_dump -- - * - * PUBLIC: void __db_shalloc_dump __P((void *, FILE *)); - */ -void -__db_shalloc_dump(addr, fp) - void *addr; - FILE *fp; -{ - struct __data *elp; - - /* Make it easy to call from the debugger. */ - if (fp == NULL) - fp = stderr; - - fprintf(fp, "%s\nMemory free list\n", DB_LINE); - - for (elp = SH_LIST_FIRST((struct __head *)addr, __data); - elp != NULL; - elp = SH_LIST_NEXT(elp, links, __data)) - fprintf(fp, "%#lx: %lu\t", P_TO_ULONG(elp), (u_long)elp->len); - fprintf(fp, "\n"); -} diff --git a/db/env/db_shash.c b/db/env/db_shash.c index 6c8e2dc42..ac3b31622 100644 --- a/db/env/db_shash.c +++ b/db/env/db_shash.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_shash.c,v 11.9 2004/03/20 16:18:51 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_shash.c,v 11.7 2003/01/08 04:42:06 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -75,13 +73,13 @@ static const struct { * __db_tablesize -- * Choose a size for the hash table. * - * PUBLIC: int __db_tablesize __P((u_int32_t)); + * PUBLIC: u_int32_t __db_tablesize __P((u_int32_t)); */ -int +u_int32_t __db_tablesize(n_buckets) u_int32_t n_buckets; { - int i; + u_int i; /* * We try to be clever about how big we make the hash tables. Use a @@ -93,15 +91,10 @@ __db_tablesize(n_buckets) if (n_buckets < 32) n_buckets = 32; - for (i = 0;; ++i) { - if (list[i].power == 0) { - --i; - break; - } + for (i = 0; i < sizeof(list)/sizeof(list[0]); ++i) if (list[i].power >= n_buckets) - break; - } - return (list[i].prime); + return (list[i].prime); + return (list[--i].prime); } /* diff --git a/db/env/env_file.c b/db/env/env_file.c index 6bcfad72b..53f93cc53 100644 --- a/db/env/env_file.c +++ b/db/env/env_file.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2002-2003 + * Copyright (c) 2002-2004 * Sleepycat Software. All rights reserved. + * + * $Id: env_file.c,v 1.11 2004/03/24 20:51:38 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: env_file.c,v 1.8 2003/05/24 14:57:52 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/env/env_method.c b/db/env/env_method.c index ac0136920..4f865061b 100644 --- a/db/env/env_method.c +++ b/db/env/env_method.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: env_method.c,v 11.136 2004/10/11 18:47:50 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: env_method.c,v 11.113 2003/09/11 17:36:41 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -21,6 +19,10 @@ static const char revid[] = "$Id: env_method.c,v 11.113 2003/09/11 17:36:41 sue #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + /* * This is the file that initializes the global array. Do it this way because * people keep changing one without changing the other. Having declaration and @@ -40,26 +42,25 @@ static const char revid[] = "$Id: env_method.c,v 11.113 2003/09/11 17:36:41 sue #include "dbinc/txn.h" #ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" #endif -static int __dbenv_init __P((DB_ENV *)); static void __dbenv_err __P((const DB_ENV *, int, const char *, ...)); static void __dbenv_errx __P((const DB_ENV *, const char *, ...)); -static int __dbenv_get_home __P((DB_ENV *, const char **)); -static int __dbenv_set_app_dispatch __P((DB_ENV *, - int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops))); static int __dbenv_get_data_dirs __P((DB_ENV *, const char ***)); -static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int))); -static void __dbenv_map_flags __P((DB_ENV *, u_int32_t *, u_int32_t *)); static int __dbenv_get_flags __P((DB_ENV *, u_int32_t *)); -static int __dbenv_set_rpc_server_noclnt - __P((DB_ENV *, void *, const char *, long, long, u_int32_t)); +static int __dbenv_get_home __P((DB_ENV *, const char **)); static int __dbenv_get_shm_key __P((DB_ENV *, long *)); static int __dbenv_get_tas_spins __P((DB_ENV *, u_int32_t *)); static int __dbenv_get_tmp_dir __P((DB_ENV *, const char **)); static int __dbenv_get_verbose __P((DB_ENV *, u_int32_t, int *)); +static int __dbenv_init __P((DB_ENV *)); +static void __dbenv_map_flags __P((DB_ENV *, u_int32_t *, u_int32_t *)); +static int __dbenv_set_app_dispatch + __P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops))); +static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int))); +static int __dbenv_set_rpc_server_noclnt + __P((DB_ENV *, void *, const char *, long, long, u_int32_t)); /* * db_env_create -- @@ -111,8 +112,6 @@ static int __dbenv_init(dbenv) DB_ENV *dbenv; { - int ret; - /* * !!! * Our caller has not yet had the opportunity to reset the panic @@ -128,6 +127,9 @@ __dbenv_init(dbenv) dbenv->set_errfile = __dbenv_set_errfile; dbenv->get_errpfx = __dbenv_get_errpfx; dbenv->set_errpfx = __dbenv_set_errpfx; + dbenv->set_msgcall = __dbenv_set_msgcall; + dbenv->get_msgfile = __dbenv_get_msgfile; + dbenv->set_msgfile = __dbenv_set_msgfile; #ifdef HAVE_RPC if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) { @@ -138,6 +140,13 @@ __dbenv_init(dbenv) dbenv->get_open_flags = __dbcl_env_get_open_flags; dbenv->open = __dbcl_env_open_wrap; dbenv->remove = __dbcl_env_remove; + dbenv->stat_print = NULL; + + dbenv->fileid_reset = NULL; + dbenv->is_bigendian = NULL; + dbenv->lsn_reset = NULL; + dbenv->prdbt = NULL; + dbenv->set_alloc = __dbcl_env_alloc; dbenv->set_app_dispatch = __dbcl_set_app_dispatch; dbenv->get_data_dirs = __dbcl_get_data_dirs; @@ -166,6 +175,13 @@ __dbenv_init(dbenv) dbenv->dbrename = __dbenv_dbrename_pp; dbenv->open = __dbenv_open; dbenv->remove = __dbenv_remove; + dbenv->stat_print = __dbenv_stat_print_pp; + + dbenv->fileid_reset = __db_fileid_reset; + dbenv->is_bigendian = __db_isbigendian; + dbenv->lsn_reset = __db_lsn_reset; + dbenv->prdbt = __db_prdbt; + dbenv->get_home = __dbenv_get_home; dbenv->get_open_flags = __dbenv_get_open_flags; dbenv->set_alloc = __dbenv_set_alloc; @@ -177,6 +193,7 @@ __dbenv_init(dbenv) dbenv->set_feedback = __dbenv_set_feedback; dbenv->get_flags = __dbenv_get_flags; dbenv->set_flags = __dbenv_set_flags; + dbenv->set_intermediate_dir = __dbenv_set_intermediate_dir; dbenv->set_paniccall = __dbenv_set_paniccall; dbenv->set_rpc_server = __dbenv_set_rpc_server_noclnt; dbenv->get_shm_key = __dbenv_get_shm_key; @@ -198,8 +215,7 @@ __dbenv_init(dbenv) __log_dbenv_create(dbenv); /* Subsystem specific. */ __lock_dbenv_create(dbenv); __memp_dbenv_create(dbenv); - if ((ret = __rep_dbenv_create(dbenv)) != 0) - return (ret); + __rep_dbenv_create(dbenv); __txn_dbenv_create(dbenv); return (0); @@ -419,10 +435,18 @@ __dbenv_map_flags(dbenv, inflagsp, outflagsp) FLD_SET(*outflagsp, DB_ENV_DIRECT_LOG); FLD_CLR(*inflagsp, DB_DIRECT_LOG); } + if (FLD_ISSET(*inflagsp, DB_DSYNC_LOG)) { + FLD_SET(*outflagsp, DB_ENV_DSYNC_LOG); + FLD_CLR(*inflagsp, DB_DSYNC_LOG); + } if (FLD_ISSET(*inflagsp, DB_LOG_AUTOREMOVE)) { FLD_SET(*outflagsp, DB_ENV_LOG_AUTOREMOVE); FLD_CLR(*inflagsp, DB_LOG_AUTOREMOVE); } + if (FLD_ISSET(*inflagsp, DB_LOG_INMEMORY)) { + FLD_SET(*outflagsp, DB_ENV_LOG_INMEMORY); + FLD_CLR(*inflagsp, DB_LOG_INMEMORY); + } if (FLD_ISSET(*inflagsp, DB_NOLOCKING)) { FLD_SET(*outflagsp, DB_ENV_NOLOCKING); FLD_CLR(*inflagsp, DB_NOLOCKING); @@ -451,10 +475,6 @@ __dbenv_map_flags(dbenv, inflagsp, outflagsp) FLD_SET(*outflagsp, DB_ENV_TXN_NOSYNC); FLD_CLR(*inflagsp, DB_TXN_NOSYNC); } - if (FLD_ISSET(*inflagsp, DB_TXN_NOT_DURABLE)) { - FLD_SET(*outflagsp, DB_ENV_TXN_NOT_DURABLE); - FLD_CLR(*inflagsp, DB_TXN_NOT_DURABLE); - } if (FLD_ISSET(*inflagsp, DB_TXN_WRITE_NOSYNC)) { FLD_SET(*outflagsp, DB_ENV_TXN_WRITE_NOSYNC); FLD_CLR(*inflagsp, DB_TXN_WRITE_NOSYNC); @@ -475,7 +495,9 @@ __dbenv_get_flags(dbenv, flagsp) DB_CDB_ALLDB, DB_DIRECT_DB, DB_DIRECT_LOG, + DB_DSYNC_LOG, DB_LOG_AUTOREMOVE, + DB_LOG_INMEMORY, DB_NOLOCKING, DB_NOMMAP, DB_NOPANIC, @@ -483,7 +505,6 @@ __dbenv_get_flags(dbenv, flagsp) DB_REGION_INIT, DB_TIME_NOTGRANTED, DB_TXN_NOSYNC, - DB_TXN_NOT_DURABLE, DB_TXN_WRITE_NOSYNC, DB_YIELDCPU, 0 @@ -500,11 +521,12 @@ __dbenv_get_flags(dbenv, flagsp) LF_SET(env_flags[i]); } - /* Special cases */ + /* Some flags are persisted in the regions. */ if (dbenv->reginfo != NULL && ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->envpanic != 0) { LF_SET(DB_PANIC_ENVIRONMENT); } + __log_get_flags(dbenv, &flags); *flagsp = flags; return (0); @@ -527,27 +549,27 @@ __dbenv_set_flags(dbenv, flags, on) #define OK_FLAGS \ (DB_AUTO_COMMIT | DB_CDB_ALLDB | DB_DIRECT_DB | DB_DIRECT_LOG | \ - DB_LOG_AUTOREMOVE | DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | \ - DB_OVERWRITE | DB_PANIC_ENVIRONMENT | DB_REGION_INIT | \ - DB_TIME_NOTGRANTED | DB_TXN_NOSYNC | DB_TXN_NOT_DURABLE | \ - DB_TXN_WRITE_NOSYNC | DB_YIELDCPU) + DB_DSYNC_LOG | DB_LOG_AUTOREMOVE | DB_LOG_INMEMORY | \ + DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | DB_OVERWRITE | \ + DB_PANIC_ENVIRONMENT | DB_REGION_INIT | DB_TIME_NOTGRANTED | \ + DB_TXN_NOSYNC | DB_TXN_WRITE_NOSYNC | DB_YIELDCPU) if (LF_ISSET(~OK_FLAGS)) return (__db_ferr(dbenv, "DB_ENV->set_flags", 0)); if (on) { if ((ret = __db_fcchk(dbenv, "DB_ENV->set_flags", - flags, DB_TXN_NOSYNC, DB_TXN_NOT_DURABLE)) != 0) + flags, DB_LOG_INMEMORY, DB_TXN_NOSYNC)) != 0) return (ret); if ((ret = __db_fcchk(dbenv, "DB_ENV->set_flags", - flags, DB_TXN_NOSYNC, DB_TXN_WRITE_NOSYNC)) != 0) + flags, DB_LOG_INMEMORY, DB_TXN_WRITE_NOSYNC)) != 0) return (ret); if ((ret = __db_fcchk(dbenv, "DB_ENV->set_flags", - flags, DB_TXN_NOT_DURABLE, DB_TXN_WRITE_NOSYNC)) != 0) + flags, DB_TXN_NOSYNC, DB_TXN_WRITE_NOSYNC)) != 0) return (ret); if (LF_ISSET(DB_DIRECT_DB | DB_DIRECT_LOG) && __os_have_direct() == 0) { __db_err(dbenv, - "DB_ENV->set_flags: direct I/O is not supported by this platform"); + "DB_ENV->set_flags: direct I/O either not configured or not supported"); return (EINVAL); } } @@ -564,12 +586,27 @@ __dbenv_set_flags(dbenv, flags, on) ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_flags: DB_REGION_INIT"); + /* + * DB_LOG_INMEMORY, DB_TXN_NOSYNC and DB_TXN_WRITE_NOSYNC are + * mutually incompatible. If we're setting one of them, clear all + * current settings. + */ + if (LF_ISSET( + DB_LOG_INMEMORY | DB_TXN_NOSYNC | DB_TXN_WRITE_NOSYNC)) + F_CLR(dbenv, + DB_ENV_LOG_INMEMORY | + DB_ENV_TXN_NOSYNC | DB_ENV_TXN_WRITE_NOSYNC); + + /* Some flags are persisted in the regions. */ + __log_set_flags(dbenv, flags, on); + mapped_flags = 0; __dbenv_map_flags(dbenv, &flags, &mapped_flags); if (on) F_SET(dbenv, mapped_flags); else F_CLR(dbenv, mapped_flags); + return (0); } @@ -584,7 +621,7 @@ __dbenv_get_data_dirs(dbenv, dirpp) /* * __dbenv_set_data_dir -- - * DB_ENV->set_dta_dir. + * DB_ENV->set_data_dir. * * PUBLIC: int __dbenv_set_data_dir __P((DB_ENV *, const char *)); */ @@ -620,17 +657,45 @@ __dbenv_set_data_dir(dbenv, dir) return (ret); } +/* + * __dbenv_set_intermediate_dir -- + * DB_ENV->set_intermediate_dir. + * + * !!! + * Undocumented routine allowing applications to configure Berkeley DB to + * create intermediate directories. + * + * PUBLIC: int __dbenv_set_intermediate_dir __P((DB_ENV *, int, u_int32_t)); + */ +int +__dbenv_set_intermediate_dir(dbenv, mode, flags) + DB_ENV *dbenv; + int mode; + u_int32_t flags; +{ + if (flags != 0) + return (__db_ferr(dbenv, "DB_ENV->set_intermediate_dir", 0)); + if (mode == 0) { + __db_err(dbenv, + "DB_ENV->set_intermediate_dir: mode may not be set to 0"); + return (EINVAL); + } + + dbenv->dir_mode = mode; + return (0); +} + /* * __dbenv_set_errcall -- * {DB_ENV,DB}->set_errcall. * - * PUBLIC: void __dbenv_set_errcall - * PUBLIC: __P((DB_ENV *, void (*)(const char *, char *))); + * PUBLIC: void __dbenv_set_errcall __P((DB_ENV *, + * PUBLIC: void (*)(const DB_ENV *, const char *, const char *))); */ void __dbenv_set_errcall(dbenv, errcall) DB_ENV *dbenv; - void (*errcall) __P((const char *, char *)); + void (*errcall) __P((const DB_ENV *, const char *, const char *)); { dbenv->db_errcall = errcall; } @@ -700,6 +765,49 @@ __dbenv_set_feedback(dbenv, feedback) return (0); } +/* + * __dbenv_set_msgcall -- + * {DB_ENV,DB}->set_msgcall. + * + * PUBLIC: void __dbenv_set_msgcall + * PUBLIC: __P((DB_ENV *, void (*)(const DB_ENV *, const char *))); + */ +void +__dbenv_set_msgcall(dbenv, msgcall) + DB_ENV *dbenv; + void (*msgcall) __P((const DB_ENV *, const char *)); +{ + dbenv->db_msgcall = msgcall; +} + +/* + * __dbenv_get_msgfile -- + * {DB_ENV,DB}->get_msgfile. + * + * PUBLIC: void __dbenv_get_msgfile __P((DB_ENV *, FILE **)); + */ +void +__dbenv_get_msgfile(dbenv, msgfilep) + DB_ENV *dbenv; + FILE **msgfilep; +{ + *msgfilep = dbenv->db_msgfile; +} + +/* + * __dbenv_set_msgfile -- + * {DB_ENV,DB}->set_msgfile. + * + * PUBLIC: void __dbenv_set_msgfile __P((DB_ENV *, FILE *)); + */ +void +__dbenv_set_msgfile(dbenv, msgfile) + DB_ENV *dbenv; + FILE *msgfile; +{ + dbenv->db_msgfile = msgfile; +} + /* * __dbenv_set_paniccall -- * {DB_ENV,DB}->set_paniccall. @@ -797,7 +905,6 @@ __dbenv_get_verbose(dbenv, which, onoffp) int *onoffp; { switch (which) { - case DB_VERB_CHKPOINT: case DB_VERB_DEADLOCK: case DB_VERB_RECOVERY: case DB_VERB_REPLICATION: @@ -823,7 +930,6 @@ __dbenv_set_verbose(dbenv, which, on) int on; { switch (which) { - case DB_VERB_CHKPOINT: case DB_VERB_DEADLOCK: case DB_VERB_RECOVERY: case DB_VERB_REPLICATION: @@ -850,7 +956,8 @@ __db_mi_env(dbenv, name) DB_ENV *dbenv; const char *name; { - __db_err(dbenv, "%s: method not permitted in shared environment", name); + __db_err(dbenv, "%s: method not permitted when environment specified", + name); return (EINVAL); } diff --git a/db/env/env_open.c b/db/env/env_open.c index d4d2313ed..6c5c3d67b 100644 --- a/db/env/env_open.c +++ b/db/env/env_open.c @@ -1,20 +1,19 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: env_open.c,v 11.177 2004/07/17 18:55:08 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: env_open.c,v 11.144 2003/09/13 18:39:34 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include +#include #include #include #endif @@ -118,16 +117,15 @@ __dbenv_open(dbenv, db_home, flags, mode) DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL)) != 0) return (ret); if (LF_ISSET(DB_INIT_REP) && !LF_ISSET(DB_INIT_TXN)) { - __db_err(dbenv, "Replication must be used with transactions"); + __db_err(dbenv, "Replication requires transaction support"); return (EINVAL); } if (LF_ISSET(DB_INIT_REP) && !LF_ISSET(DB_INIT_LOCK)) { - __db_err(dbenv, "Replication must be used with locking"); + __db_err(dbenv, "Replication requires locking support"); return (EINVAL); } - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && !LF_ISSET(DB_INIT_TXN)) { - __db_err(dbenv, - "Setting non-durability only valid with transactions"); + if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) && !LF_ISSET(DB_INIT_TXN)) { + __db_err(dbenv, "Recovery requires transaction support"); return (EINVAL); } @@ -139,7 +137,7 @@ __dbenv_open(dbenv, db_home, flags, mode) #ifdef HAVE_MUTEX_THREAD_ONLY if (!LF_ISSET(DB_PRIVATE)) { __db_err(dbenv, - "Berkeley DB library configured to support only DB_PRIVATE environments"); + "Berkeley DB library configured to support only private environments"); return (EINVAL); } #endif @@ -189,57 +187,79 @@ __dbenv_open(dbenv, db_home, flags, mode) dbenv->db_mode = mode == 0 ? __db_omode("rwrw--") : mode; /* - * Create/join the environment. We pass in the flags that - * will be of interest to an environment joining later; if - * we're not the ones to do the create, we - * pull out whatever has been stored. + * Create/join the environment. We pass in the flags that will be of + * interest to an environment joining later; if we're not the ones to + * do the create, we pull out whatever has been stored. */ init_flags = 0; - init_flags |= (LF_ISSET(DB_INIT_CDB) ? DB_INITENV_CDB : 0); - init_flags |= (LF_ISSET(DB_INIT_LOCK) ? DB_INITENV_LOCK : 0); - init_flags |= (LF_ISSET(DB_INIT_LOG) ? DB_INITENV_LOG : 0); - init_flags |= (LF_ISSET(DB_INIT_MPOOL) ? DB_INITENV_MPOOL : 0); - init_flags |= (LF_ISSET(DB_INIT_REP) ? DB_INITENV_REP : 0); - init_flags |= (LF_ISSET(DB_INIT_TXN) ? DB_INITENV_TXN : 0); - init_flags |= - (F_ISSET(dbenv, DB_ENV_CDB_ALLDB) ? DB_INITENV_CDB_ALLDB : 0); - + if (LF_ISSET(DB_INIT_CDB)) + FLD_SET(init_flags, DB_INITENV_CDB); + if (LF_ISSET(DB_INIT_LOCK)) + FLD_SET(init_flags, DB_INITENV_LOCK); + if (LF_ISSET(DB_INIT_LOG)) + FLD_SET(init_flags, DB_INITENV_LOG); + if (LF_ISSET(DB_INIT_MPOOL)) + FLD_SET(init_flags, DB_INITENV_MPOOL); + if (LF_ISSET(DB_INIT_REP)) + FLD_SET(init_flags, DB_INITENV_REP); + if (LF_ISSET(DB_INIT_TXN)) + FLD_SET(init_flags, DB_INITENV_TXN); + if (F_ISSET(dbenv, DB_ENV_CDB_ALLDB)) + FLD_SET(init_flags, DB_INITENV_CDB_ALLDB); if ((ret = __db_e_attach(dbenv, &init_flags)) != 0) goto err; /* - * __db_e_attach will return the saved init_flags field, which - * contains the DB_INIT_* flags used when we were created. + * __db_e_attach will return the saved init_flags field, which contains + * the DB_INIT_* flags used when the environment was created. + * + * Check if existing environment flags conflict with our flags. */ + if (LF_ISSET(DB_INIT_CDB) && FLD_ISSET(init_flags, DB_INITENV_TXN)) { + __db_err(dbenv, + "Concurrent Data Store incompatible with environment"); + ret = EINVAL; + goto err; + } + if (LF_ISSET(DB_INIT_TXN) && FLD_ISSET(init_flags, DB_INITENV_CDB)) { + __db_err(dbenv, + "Transactional Data Store incompatible with environment"); + ret = EINVAL; + goto err; + } + + /* If we're joining the environment, find out what we're joining. */ if (LF_ISSET(DB_JOINENV)) { LF_CLR(DB_JOINENV); - - LF_SET((init_flags & DB_INITENV_CDB) ? DB_INIT_CDB : 0); - LF_SET((init_flags & DB_INITENV_LOCK) ? DB_INIT_LOCK : 0); - LF_SET((init_flags & DB_INITENV_LOG) ? DB_INIT_LOG : 0); - LF_SET((init_flags & DB_INITENV_MPOOL) ? DB_INIT_MPOOL : 0); - LF_SET((init_flags & DB_INITENV_REP) ? DB_INIT_REP : 0); - LF_SET((init_flags & DB_INITENV_TXN) ? DB_INIT_TXN : 0); - - if (LF_ISSET(DB_INITENV_CDB_ALLDB) && + if (FLD_ISSET(init_flags, DB_INITENV_CDB)) + LF_SET(DB_INIT_CDB); + if (FLD_ISSET(init_flags, DB_INITENV_LOCK)) + LF_SET(DB_INIT_LOCK); + if (FLD_ISSET(init_flags, DB_INITENV_LOG)) + LF_SET(DB_INIT_LOG); + if (FLD_ISSET(init_flags, DB_INITENV_MPOOL)) + LF_SET(DB_INIT_MPOOL); + if (FLD_ISSET(init_flags, DB_INITENV_REP)) + LF_SET(DB_INIT_REP); + if (FLD_ISSET(init_flags, DB_INITENV_TXN)) + LF_SET(DB_INIT_TXN); + if (FLD_ISSET(init_flags, DB_INITENV_CDB_ALLDB) && (ret = __dbenv_set_flags(dbenv, DB_CDB_ALLDB, 1)) != 0) goto err; } + /* + * Save the flags passed to create the DB_ENV->open, that is, we've + * now replaced flags like DB_JOINENV with the flags responsible for + * the underlying set of subsystems. + */ + dbenv->open_flags = flags; + /* Initialize for CDB product. */ if (LF_ISSET(DB_INIT_CDB)) { LF_SET(DB_INIT_LOCK); F_SET(dbenv, DB_ENV_CDB); } - if (LF_ISSET(DB_RECOVER | - DB_RECOVER_FATAL) && !LF_ISSET(DB_INIT_TXN)) { - __db_err(dbenv, - "DB_RECOVER and DB_RECOVER_FATAL require DB_TXN_INIT in DB_ENV->open"); - ret = EINVAL; - goto err; - } - /* Save the flags passed to DB_ENV->open. */ - dbenv->open_flags = flags; /* * Initialize the subsystems. @@ -247,10 +267,8 @@ __dbenv_open(dbenv, db_home, flags, mode) * Initialize the replication area first, so that we can lock out this * call if we're currently running recovery for replication. */ - if (LF_ISSET(DB_INIT_REP)) { - if ((ret = __rep_open(dbenv)) != 0) - goto err; - } + if (LF_ISSET(DB_INIT_REP) && (ret = __rep_open(dbenv)) != 0) + goto err; rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; if (rep_check) @@ -267,7 +285,8 @@ __dbenv_open(dbenv, db_home, flags, mode) * This must be after the mpool init, but before the log initialization * because log_open may attempt to run log_recover during its open. */ - if ((ret = __crypto_region_init(dbenv)) != 0) + if (LF_ISSET(DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_TXN) && + (ret = __crypto_region_init(dbenv)) != 0) goto err; /* @@ -315,12 +334,6 @@ __dbenv_open(dbenv, db_home, flags, mode) if ((ret = __txn_init_recover(dbenv, &dbenv->recover_dtab, &dbenv->recover_dtab_size)) != 0) goto err; - - /* Perform recovery for any previous run. */ - if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) && - (ret = __db_apprec(dbenv, NULL, NULL, 1, - LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))) != 0) - goto err; } /* @@ -339,18 +352,30 @@ __dbenv_open(dbenv, db_home, flags, mode) * already be initialized. */ LIST_INIT(&dbenv->dblist); - if (F_ISSET(dbenv, DB_ENV_THREAD) && LF_ISSET(DB_INIT_MPOOL)) { + if (LF_ISSET(DB_INIT_MPOOL)) { dbmp = dbenv->mp_handle; - if ((ret = __db_mutex_setup( - dbenv, dbmp->reginfo, &dbenv->dblist_mutexp, - MUTEX_ALLOC | MUTEX_THREAD)) != 0) - goto err; - if ((ret = __db_mutex_setup( - dbenv, dbmp->reginfo, &dbenv->mt_mutexp, - MUTEX_ALLOC | MUTEX_THREAD)) != 0) - goto err; + if (F_ISSET(dbenv, DB_ENV_THREAD)) { + if ((ret = __db_mutex_setup( + dbenv, dbmp->reginfo, &dbenv->dblist_mutexp, + MUTEX_ALLOC | MUTEX_THREAD)) != 0) + goto err; + if ((ret = __db_mutex_setup( + dbenv, dbmp->reginfo, &dbenv->mt_mutexp, + MUTEX_ALLOC | MUTEX_THREAD)) != 0) + goto err; + } + /* Register DB's pgin/pgout functions. */ + if ((ret = __memp_register( + dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) + return (ret); } + /* Perform recovery for any previous run. */ + if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) && + (ret = __db_apprec(dbenv, NULL, NULL, 1, + LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))) != 0) + goto err; + /* * If we've created the regions, are running with transactions, and did * not just run recovery, we need to log the fact that the transaction @@ -362,17 +387,24 @@ __dbenv_open(dbenv, db_home, flags, mode) * don't need to do anything here in the recover case. */ if (TXN_ON(dbenv) && + !F_ISSET(dbenv, DB_ENV_LOG_INMEMORY) && F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE) && !LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) && (ret = __txn_reset(dbenv)) != 0) goto err; if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (0); -err: /* If we fail after creating the regions, remove them. */ +err: /* + * If we fail after creating the regions, remove them. + * + * !!! + * No need to call __env_db_rep_exit, that work is done by the calls to + * __dbenv_refresh. + */ if (dbenv->reginfo != NULL && F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE)) { ret = __db_panic(dbenv, ret); @@ -460,13 +492,20 @@ __dbenv_config(dbenv, db_home, flags) if ((ret = __db_home(dbenv, db_home, flags)) != 0) return (ret); - /* Parse the config file. */ - if ((ret = + /* + * If the application specified an environment directory, parse any + * config file we find there. + */ + p = NULL; + if (dbenv->db_home != NULL && (ret = __db_appname(dbenv, DB_APP_NONE, "DB_CONFIG", 0, NULL, &p)) != 0) return (ret); - - fp = fopen(p, "r"); - __os_free(dbenv, p); + if (p == NULL) + fp = NULL; + else { + fp = fopen(p, "r"); + __os_free(dbenv, p); + } if (fp != NULL) { while (fgets(buf, sizeof(buf), fp) != NULL) { @@ -565,13 +604,6 @@ __dbenv_close(dbenv, rep_check) (t_ret = __rep_preclose(dbenv, 1)) != 0 && ret == 0) ret = t_ret; - if (dbenv->db_ref != 0) { - __db_err(dbenv, - "Database handles open during environment close"); - if (ret == 0) - ret = EINVAL; - } - /* * Detach from the regions and undo the allocations done by * DB_ENV->open. @@ -579,15 +611,16 @@ __dbenv_close(dbenv, rep_check) if ((t_ret = __dbenv_refresh(dbenv, 0, rep_check)) != 0 && ret == 0) ret = t_ret; - /* Do per-subsystem destruction. */ - __lock_dbenv_close(dbenv); + /* Do per-subsystem close. */ + if ((t_ret = __lock_dbenv_close(dbenv)) != 0 && ret == 0) + ret = t_ret; if ((t_ret = __rep_dbenv_close(dbenv)) != 0 && ret == 0) ret = t_ret; #ifdef HAVE_CRYPTO /* - * Crypto comes last, because higher level close functions needs + * Crypto comes last, because higher level close functions need * cryptography. */ if ((t_ret = __crypto_dbenv_close(dbenv)) != 0 && ret == 0) @@ -625,19 +658,21 @@ __dbenv_refresh(dbenv, orig_flags, rep_check) u_int32_t orig_flags; int rep_check; { + DB *ldbp; DB_MPOOL *dbmp; int ret, t_ret; + dbmp = dbenv->mp_handle; ret = 0; /* - * Close subsystems, in the reverse order they were opened (txn + * Refresh subsystems, in the reverse order they were opened (txn * must be first, it may want to discard locks and flush the log). * * !!! * Note that these functions, like all of __dbenv_refresh, only undo * the effects of __dbenv_open. Functions that undo work done by - * db_env_create or by a configurator function should go in + * db_env_create or by a configuration function should go in * __dbenv_close. */ if (TXN_ON(dbenv) && @@ -652,9 +687,17 @@ __dbenv_refresh(dbenv, orig_flags, rep_check) * Locking should come after logging, because closing log results * in files closing which may require locks being released. */ - if (LOCKING_ON(dbenv) && - (t_ret = __lock_dbenv_refresh(dbenv)) != 0 && ret == 0) - ret = t_ret; + if (LOCKING_ON(dbenv)) { + if (!F_ISSET(dbenv, DB_ENV_THREAD) && + dbenv->env_lid != DB_LOCK_INVALIDID && + (t_ret = __lock_id_free(dbenv, dbenv->env_lid)) != 0 && + ret == 0) + ret = t_ret; + dbenv->env_lid = DB_LOCK_INVALIDID; + + if ((t_ret = __lock_dbenv_refresh(dbenv)) != 0 && ret == 0) + ret = t_ret; + } /* * Discard DB list and its mutex. @@ -667,15 +710,21 @@ __dbenv_refresh(dbenv, orig_flags, rep_check) * we close databases and try to acquire the mutex when we close * log file handles. Ick. */ + if (dbenv->db_ref != 0) { + __db_err(dbenv, "Database handles remain at environment close"); + for (ldbp = LIST_FIRST(&dbenv->dblist); + ldbp != NULL; ldbp = LIST_NEXT(ldbp, dblistlinks)) + __db_err(dbenv, "Open database handle: %s%s%s", + ldbp->fname, ldbp->dname == NULL ? "" : "/", + ldbp->dname == NULL ? "" : ldbp->dname); + if (ret == 0) + ret = EINVAL; + } LIST_INIT(&dbenv->dblist); - if (dbenv->dblist_mutexp != NULL) { - dbmp = dbenv->mp_handle; + if (dbenv->dblist_mutexp != NULL) __db_mutex_free(dbenv, dbmp->reginfo, dbenv->dblist_mutexp); - } - if (dbenv->mt_mutexp != NULL) { - dbmp = dbenv->mp_handle; + if (dbenv->mt_mutexp != NULL) __db_mutex_free(dbenv, dbmp->reginfo, dbenv->mt_mutexp); - } if (dbenv->mt != NULL) { __os_free(dbenv, dbenv->mt); dbenv->mt = NULL; @@ -706,11 +755,12 @@ __dbenv_refresh(dbenv, orig_flags, rep_check) * to do harm. */ if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); - /* Detach from the region. */ /* - * Must come after we call __env_rep_exit above. + * Detach from the region. + * + * Must come after we call __env_db_rep_exit above. */ __rep_dbenv_refresh(dbenv); @@ -745,7 +795,11 @@ __dbenv_refresh(dbenv, orig_flags, rep_check) } #define DB_ADDSTR(add) { \ - if ((add) != NULL) { \ + /* \ + * The string might be NULL or zero-length, and the p[-1] \ + * might indirect to before the beginning of our buffer. \ + */ \ + if ((add) != NULL && (add)[0] != '\0') { \ /* If leading slash, start over. */ \ if (__os_abspath(add)) { \ p = str; \ @@ -994,8 +1048,8 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); if (!strcasecmp(name, "set_cachesize")) { if (sscanf(value, "%lu %lu %lu %c", &v1, &v2, &v3, &v4) != 3) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); - __DB_OVFL(v2, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); + __DB_OVFL(v2, UINT32_MAX); __DB_OVFL(v3, 10000); return (__memp_set_cachesize( dbenv, (u_int32_t)v1, (u_int32_t)v2, (int)v3)); @@ -1005,6 +1059,15 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); !strcasecmp(name, "db_data_dir")) /* Compatibility. */ return (__dbenv_set_data_dir(dbenv, value)); + if (!strcasecmp(name, "set_intermediate_dir")) {/* Undocumented. */ + if (sscanf(value, "%lu %c", &v1, &v4) != 1) + goto badarg; +#ifdef INT_MAX + __DB_OVFL(v1, INT_MAX); +#endif + return (__dbenv_set_intermediate_dir(dbenv, (int)v1, 0)); + } + if (!strcasecmp(name, "set_flags")) { if (sscanf(value, "%40s %c", arg, &v4) != 1) goto badarg; @@ -1017,8 +1080,12 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); return (__dbenv_set_flags(dbenv, DB_DIRECT_DB, 1)); if (!strcasecmp(value, "db_direct_log")) return (__dbenv_set_flags(dbenv, DB_DIRECT_LOG, 1)); + if (!strcasecmp(value, "db_dsync_log")) + return (__dbenv_set_flags(dbenv, DB_DSYNC_LOG, 1)); if (!strcasecmp(value, "db_log_autoremove")) return (__dbenv_set_flags(dbenv, DB_LOG_AUTOREMOVE, 1)); + if (!strcasecmp(value, "db_log_inmemory")) + return (__dbenv_set_flags(dbenv, DB_LOG_INMEMORY, 1)); if (!strcasecmp(value, "db_nolocking")) return (__dbenv_set_flags(dbenv, DB_NOLOCKING, 1)); if (!strcasecmp(value, "db_nommap")) @@ -1031,9 +1098,6 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); return (__dbenv_set_flags(dbenv, DB_REGION_INIT, 1)); if (!strcasecmp(value, "db_txn_nosync")) return (__dbenv_set_flags(dbenv, DB_TXN_NOSYNC, 1)); - if (!strcasecmp(value, "db_txn_not_durable")) - return ( - __dbenv_set_flags(dbenv, DB_TXN_NOT_DURABLE, 1)); if (!strcasecmp(value, "db_txn_write_nosync")) return ( __dbenv_set_flags(dbenv, DB_TXN_WRITE_NOSYNC, 1)); @@ -1045,21 +1109,21 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); if (!strcasecmp(name, "set_lg_bsize")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__log_set_lg_bsize(dbenv, (u_int32_t)v1)); } if (!strcasecmp(name, "set_lg_max")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__log_set_lg_max(dbenv, (u_int32_t)v1)); } if (!strcasecmp(name, "set_lg_regionmax")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__log_set_lg_regionmax(dbenv, (u_int32_t)v1)); } @@ -1076,6 +1140,8 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); flags = DB_LOCK_EXPIRE; else if (!strcasecmp(value, "db_lock_maxlocks")) flags = DB_LOCK_MAXLOCKS; + else if (!strcasecmp(value, "db_lock_maxwrite")) + flags = DB_LOCK_MAXWRITE; else if (!strcasecmp(value, "db_lock_minlocks")) flags = DB_LOCK_MINLOCKS; else if (!strcasecmp(value, "db_lock_minwrite")) @@ -1094,43 +1160,58 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); if (!strcasecmp(name, "set_lk_max")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__lock_set_lk_max(dbenv, (u_int32_t)v1)); } if (!strcasecmp(name, "set_lk_max_locks")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__lock_set_lk_max_locks(dbenv, (u_int32_t)v1)); } if (!strcasecmp(name, "set_lk_max_lockers")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__lock_set_lk_max_lockers(dbenv, (u_int32_t)v1)); } if (!strcasecmp(name, "set_lk_max_objects")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__lock_set_lk_max_objects(dbenv, (u_int32_t)v1)); } if (!strcasecmp(name, "set_lock_timeout")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__lock_set_env_timeout( dbenv, (u_int32_t)v1, DB_SET_LOCK_TIMEOUT)); } + if (!strcasecmp(name, "set_mp_max_openfd")) { + if (sscanf(value, "%lu %c", &v1, &v4) != 1) + goto badarg; + __DB_OVFL(v1, INT_MAX); + return (__memp_set_mp_max_openfd(dbenv, (int)v1)); + } + + if (!strcasecmp(name, "set_mp_max_write")) { + if (sscanf(value, "%lu %lu %c", &v1, &v2, &v4) != 2) + goto badarg; + __DB_OVFL(v1, INT_MAX); + __DB_OVFL(v2, INT_MAX); + return (__memp_set_mp_max_write(dbenv, (int)v1, (int)v2)); + } + if (!strcasecmp(name, "set_mp_mmapsize")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__memp_set_mp_mmapsize(dbenv, (u_int32_t)v1)); } @@ -1150,7 +1231,7 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); if (!strcasecmp(name, "set_tas_spins")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__dbenv_set_tas_spins(dbenv, (u_int32_t)v1)); } @@ -1161,14 +1242,14 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); if (!strcasecmp(name, "set_tx_max")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__txn_set_tx_max(dbenv, (u_int32_t)v1)); } if (!strcasecmp(name, "set_txn_timeout")) { if (sscanf(value, "%lu %c", &v1, &v4) != 1) goto badarg; - __DB_OVFL(v1, UINT32_T_MAX); + __DB_OVFL(v1, UINT32_MAX); return (__lock_set_env_timeout( dbenv, (u_int32_t)v1, DB_SET_TXN_TIMEOUT)); } @@ -1177,8 +1258,6 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s); if (sscanf(value, "%40s %c", arg, &v4) != 1) goto badarg; - if (!strcasecmp(value, "db_verb_chkpoint")) - flags = DB_VERB_CHKPOINT; else if (!strcasecmp(value, "db_verb_deadlock")) flags = DB_VERB_DEADLOCK; else if (!strcasecmp(value, "db_verb_recovery")) @@ -1215,9 +1294,8 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhpp) DB_FH **fhpp; { u_int32_t id; - int mode, isdir, ret; - const char *p; - char *trv; + int filenum, i, isdir, ret; + char *firstx, *trv; /* * Check the target directory; if you have six X's and it doesn't @@ -1233,38 +1311,19 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhpp) } /* Build the path. */ - for (trv = path; *trv != '\0'; ++trv) - ; - *trv = PATH_SEPARATOR[0]; - for (p = DB_TRAIL; (*++trv = *p) != '\0'; ++p) - ; + (void)strncat(path, PATH_SEPARATOR, 1); + (void)strcat(path, DB_TRAIL); - /* Replace the X's with the process ID. */ - for (__os_id(&id); *--trv == 'X'; id /= 10) - switch (id % 10) { - case 0: *trv = '0'; break; - case 1: *trv = '1'; break; - case 2: *trv = '2'; break; - case 3: *trv = '3'; break; - case 4: *trv = '4'; break; - case 5: *trv = '5'; break; - case 6: *trv = '6'; break; - case 7: *trv = '7'; break; - case 8: *trv = '8'; break; - case 9: *trv = '9'; break; - default: /* Impossible. */ - break; - } - ++trv; - - /* Set up open flags and mode. */ - mode = __db_omode("rw----"); + /* Replace the X's with the process ID (in decimal). */ + for (trv = path + strlen(path), __os_id(&id); *--trv == 'X'; id /= 10) + *trv = '0' + (id % 10); + firstx = trv + 1; /* Loop, trying to open a file. */ - for (;;) { + for (filenum = 1;; filenum++) { if ((ret = __os_open(dbenv, path, tmp_oflags | DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_TEMP, - mode, fhpp)) == 0) + __db_omode("rw----"), fhpp)) == 0) return (0); /* @@ -1281,22 +1340,26 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhpp) } /* - * Tricky little algorithm for backward compatibility. - * Assumes sequential ordering of lower-case characters. + * Generate temporary file names in a backwards-compatible way. + * If id == 12345, the result is: + * /DB12345 (tried above, the first time through). + * /DBa2345 ... /DBz2345 + * /DBaa345 ... /DBaz345 + * /DBba345, and so on. + * + * XXX + * This algorithm is O(n**2) -- that is, creating 100 temporary + * files requires 5,000 opens, creating 1000 files requires + * 500,000. If applications open a lot of temporary files, we + * could improve performance by switching to timestamp-based + * file names. */ - for (;;) { - if (*trv == '\0') + for (i = filenum, trv = firstx; i > 0; i = (i - 1) / 26) + if (*trv++ == '\0') return (EINVAL); - if (*trv == 'z') - *trv++ = 'a'; - else { - if (isdigit((int)*trv)) - *trv = 'a'; - else - ++*trv; - break; - } - } + + for (i = filenum; i > 0; i = (i - 1) / 26) + *--trv = 'a' + ((i - 1) % 26); } /* NOTREACHED */ } diff --git a/db/env/env_recover.c b/db/env/env_recover.c index 80de4c1c7..0bd45e2bf 100644 --- a/db/env/env_recover.c +++ b/db/env/env_recover.c @@ -1,17 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: env_recover.c,v 11.126 2004/09/22 03:43:52 bostic Exp $ */ #include "db_config.h" #ifndef lint static const char copyright[] = - "Copyright (c) 1996-2003\nSleepycat Software Inc. All rights reserved.\n"; -static const char revid[] = - "$Id: env_recover.c,v 11.112 2003/09/13 18:46:20 bostic Exp $"; + "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; #endif #ifndef NO_SYSTEM_INCLUDES @@ -39,7 +39,7 @@ static const char revid[] = #include "dbinc/mp.h" #include "dbinc/db_am.h" -static int __log_backup __P((DB_ENV *, DB_LOGC *, DB_LSN *, DB_LSN *)); +static int __db_log_corrupt __P((DB_ENV *, DB_LSN *)); static int __log_earliest __P((DB_ENV *, DB_LOGC *, int32_t *, DB_LSN *)); static double __lsn_diff __P((DB_LSN *, DB_LSN *, DB_LSN *, u_int32_t, int)); @@ -50,28 +50,28 @@ static double __lsn_diff __P((DB_LSN *, DB_LSN *, DB_LSN *, u_int32_t, int)); * LSN of max_lsn, so we need to roll back sufficiently far for that * to work. See __log_backup for details. * - * PUBLIC: int __db_apprec __P((DB_ENV *, DB_LSN *, DB_LSN *, u_int32_t, - * PUBLIC: u_int32_t)); + * PUBLIC: int __db_apprec __P((DB_ENV *, DB_LSN *, DB_LSN *, int, u_int32_t)); */ int __db_apprec(dbenv, max_lsn, trunclsn, update, flags) DB_ENV *dbenv; DB_LSN *max_lsn, *trunclsn; - u_int32_t update, flags; + int update; + u_int32_t flags; { DBT data; DB_LOGC *logc; - DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, stop_lsn; - DB_REP *db_rep; + DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, stop_lsn, tlsn; DB_TXNREGION *region; - REP *rep; + REGENV *renv; + REGINFO *infop; __txn_ckp_args *ckp_args; time_t now, tlow; - int32_t log_size, low; double nfiles; - int have_rec, is_thread, progress, ret, t_ret; + u_int32_t hi_txn, log_size, txnid; + int32_t low; + int have_rec, progress, ret, t_ret; int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - u_int32_t hi_txn, txnid; char *p, *pass, t1[60], t2[60]; void *txninfo; @@ -93,21 +93,13 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) ((LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary))->log_size; /* - * Save the state of the thread flag -- we don't need it on at the - * moment because we're single-threaded until recovery is complete. - */ - is_thread = F_ISSET(dbenv, DB_ENV_THREAD) ? 1 : 0; - F_CLR(dbenv, DB_ENV_THREAD); - - /* - * If we need to, update the env handle timestamp. The timestamp - * field can be updated here without acquiring the rep mutex - * because recovery is single-threaded, even in the case of - * replication. + * If we need to, update the env handle timestamp. */ - if (update && (db_rep = dbenv->rep_handle) != NULL && - (rep = db_rep->region) != NULL) - (void)time(&rep->timestamp); + if (update) { + infop = dbenv->reginfo; + renv = infop->primary; + (void)time(&renv->rep_timestamp); + } /* Set in-recovery flags. */ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); @@ -124,7 +116,8 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) */ ZERO_LSN(lowlsn); if (max_lsn != NULL) { - if ((ret = __log_backup(dbenv, logc, max_lsn, &lowlsn)) != 0) + if ((ret = __log_backup(dbenv, logc, max_lsn, &lowlsn, + CKPLSN_CMP)) != 0) goto err; } else if (dbenv->tx_timestamp != 0) { if ((ret = __log_earliest(dbenv, logc, &low, &lowlsn)) != 0) @@ -185,12 +178,12 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) * we have to roll back to the checkpoint whose ckp_lsn * is earlier than the max_lsn. __log_backup will figure * that out for us. - * In case 2, "uncompleted TXNs" include all those who commited + * In case 2, "uncompleted TXNs" include all those who committed * after the user's specified timestamp. * * Pass #3: * Read forward through the log from the LSN found in pass #2, - * redoing any committed TXNs (which commited after any user- + * redoing any committed TXNs (which committed after any user- * specified rollback point). During this pass, checkpoint * file information is ignored, and file openings and closings * are redone. @@ -329,7 +322,7 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) (last_lsn.offset - first_lsn.offset) / log_size; else nfiles = (double)(last_lsn.file - first_lsn.file) + - (double)(log_size - first_lsn.offset + + (double)((log_size - first_lsn.offset) + last_lsn.offset) / log_size; /* We are going to divide by nfiles; make sure it isn't 0. */ if (nfiles == 0) @@ -351,8 +344,12 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) * There are no transactions and we're not recovering to an LSN (see * above), so there is nothing to do. */ - if (ret == DB_NOTFOUND) - ret = 0; + if (ret == DB_NOTFOUND) { + if (log_compare(&lsn, &last_lsn) != 0) + ret = __db_log_corrupt(dbenv, &lsn); + else + ret = 0; + } /* Reset to the first lsn. */ if (ret != 0 || @@ -384,7 +381,7 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) */ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) - __db_err(dbenv, "Recovery starting from [%lu][%lu]", + __db_msg(dbenv, "Recovery starting from [%lu][%lu]", (u_long)first_lsn.file, (u_long)first_lsn.offset); pass = "backward"; @@ -396,8 +393,9 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) &last_lsn, &lsn, log_size, 0) / nfiles)); dbenv->db_feedback(dbenv, DB_RECOVER, progress); } + tlsn = lsn; ret = __db_dispatch(dbenv, dbenv->recover_dtab, - dbenv->recover_dtab_size, &data, &lsn, + dbenv->recover_dtab_size, &data, &tlsn, DB_TXN_BACKWARD_ROLL, txninfo); if (ret != 0) { if (ret != DB_TXN_CKP) @@ -406,7 +404,13 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) ret = 0; } } - if (ret != 0 && ret != DB_NOTFOUND) + if (ret == DB_NOTFOUND) { + if (log_compare(&lsn, &first_lsn) > 0) + ret = __db_log_corrupt(dbenv, &lsn); + else + ret = 0; + } + if (ret != 0) goto err; /* @@ -426,21 +430,14 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) for (ret = __log_c_get(logc, &lsn, &data, DB_NEXT); ret == 0; ret = __log_c_get(logc, &lsn, &data, DB_NEXT)) { - /* - * If we are recovering to a timestamp or an LSN, - * we need to make sure that we don't try to roll - * forward beyond the soon-to-be end of log. - */ - if (log_compare(&lsn, &stop_lsn) > 0) - break; - if (dbenv->db_feedback != NULL) { progress = 67 + (int)(33 * (__lsn_diff(&first_lsn, &last_lsn, &lsn, log_size, 1) / nfiles)); dbenv->db_feedback(dbenv, DB_RECOVER, progress); } + tlsn = lsn; ret = __db_dispatch(dbenv, dbenv->recover_dtab, - dbenv->recover_dtab_size, &data, &lsn, + dbenv->recover_dtab_size, &data, &tlsn, DB_TXN_FORWARD_ROLL, txninfo); if (ret != 0) { if (ret != DB_TXN_CKP) @@ -448,11 +445,21 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) else ret = 0; } + /* + * If we are recovering to a timestamp or an LSN, + * we need to make sure that we don't try to roll + * forward beyond the soon-to-be end of log. + */ + if (log_compare(&lsn, &stop_lsn) >= 0) + break; } - if (ret != 0 && ret != DB_NOTFOUND) + if (ret == DB_NOTFOUND) + ret = __db_log_corrupt(dbenv, &lsn); + if (ret != 0) goto err; +#ifndef HAVE_FTRUNCATE /* * Process any pages that were on the limbo list and move them to * the free list. Do this before checkpointing the database. @@ -460,6 +467,7 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) if ((ret = __db_do_the_limbo(dbenv, NULL, NULL, txninfo, dbenv->tx_timestamp != 0 ? LIMBO_TIMESTAMP : LIMBO_RECOVER)) != 0) goto err; +#endif if (max_lsn == NULL) region->last_txnid = ((DB_TXNHEAD *)txninfo)->maxid; @@ -473,8 +481,12 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) if ((ret = __memp_sync(dbenv, NULL)) != 0) goto err; region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn; - __log_vtruncate(dbenv, &((DB_TXNHEAD *)txninfo)->maxlsn, - &((DB_TXNHEAD *)txninfo)->ckplsn, trunclsn); + if ((ret = __log_vtruncate(dbenv, + &((DB_TXNHEAD *)txninfo)->maxlsn, + &((DB_TXNHEAD *)txninfo)->ckplsn, trunclsn)) != 0) + goto err; + +#ifndef HAVE_FTRUNCATE /* * Generate logging compensation records. * If we crash during/after vtruncate we may have @@ -485,6 +497,7 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) if ((ret = __db_do_the_limbo(dbenv, NULL, NULL, txninfo, LIMBO_COMPENSATE)) != 0) goto err; +#endif } /* Take a checkpoint here to force any dirty data pages to disk. */ @@ -497,13 +510,18 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags) done: if (max_lsn != NULL) { - region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn; + if (!IS_ZERO_LSN(((DB_TXNHEAD *)txninfo)->ckplsn)) + region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn; + else if ((ret = + __txn_findlastckp(dbenv, ®ion->last_ckp, max_lsn)) != 0) + goto err; /* We are going to truncate, so we'd best close the cursor. */ if (logc != NULL && (ret = __log_c_close(logc)) != 0) goto err; - __log_vtruncate(dbenv, - max_lsn, &((DB_TXNHEAD *)txninfo)->ckplsn, trunclsn); + if ((ret = __log_vtruncate(dbenv, + max_lsn, &((DB_TXNHEAD *)txninfo)->ckplsn, trunclsn)) != 0) + goto err; /* * Now we need to open files that should be open in order for @@ -549,8 +567,8 @@ done: if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) { (void)time(&now); - __db_err(dbenv, "Recovery complete at %.24s", ctime(&now)); - __db_err(dbenv, "%s %lx %s [%lu][%lu]", + __db_msg(dbenv, "Recovery complete at %.24s", ctime(&now)); + __db_msg(dbenv, "%s %lx %s [%lu][%lu]", "Maximum transaction ID", (u_long)(txninfo == NULL ? TXN_MINIMUM : ((DB_TXNHEAD *)txninfo)->maxid), @@ -579,9 +597,6 @@ err: if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0) dbenv->tx_timestamp = 0; - /* Restore the state of the thread flag, clear in-recovery flags. */ - if (is_thread) - F_SET(dbenv, DB_ENV_THREAD); F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); F_CLR(region, TXN_IN_RECOVERY); @@ -637,47 +652,83 @@ __lsn_diff(low, high, current, max, is_forward) * * This is used to find the earliest log record to process when a client * is trying to sync up with a master whose max LSN is less than this - * client's max lsn; we want to roll back everything after that + * client's max lsn; we want to roll back everything after that. + * Also used in the verify phase to walk back via checkpoints. * * Find the latest checkpoint whose ckp_lsn is less than the max lsn. + * PUBLIC: int __log_backup __P((DB_ENV *, DB_LOGC *, DB_LSN *, + * PUBLIC: DB_LSN *, u_int32_t)); */ -static int -__log_backup(dbenv, logc, max_lsn, start_lsn) +int +__log_backup(dbenv, logc, max_lsn, start_lsn, cmp) DB_ENV *dbenv; DB_LOGC *logc; DB_LSN *max_lsn, *start_lsn; + u_int32_t cmp; { - DB_LSN lsn; + DB_LSN cmp_lsn, lsn; DBT data; __txn_ckp_args *ckp_args; - int ret; + int lcmp, ret; memset(&data, 0, sizeof(data)); ckp_args = NULL; - /* - * Follow checkpoints through the log until we find one with - * a ckp_lsn less than max_lsn. - */ + if (cmp != CKPLSN_CMP && cmp != LASTCKP_CMP) + return (EINVAL); + if ((ret = __txn_getckp(dbenv, &lsn)) != 0) goto err; + /* + * Cmp tells us whether to check the ckp_lsn or the last_ckp + * fields in the checkpoint record. + */ while ((ret = __log_c_get(logc, &lsn, &data, DB_SET)) == 0) { if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0) return (ret); - if (log_compare(&ckp_args->ckp_lsn, max_lsn) <= 0) { - *start_lsn = ckp_args->ckp_lsn; + if (cmp == CKPLSN_CMP) { + /* + * Follow checkpoints through the log until + * we find one with a ckp_lsn less than + * or equal max_lsn. + */ + cmp_lsn = ckp_args->ckp_lsn; + lcmp = (log_compare(&cmp_lsn, max_lsn) <= 0); + } else { + /* + * When we're walking back through the checkpoints + * we want the LSN of this checkpoint strictly less + * than the max_lsn (also a ckp LSN). + */ + cmp_lsn = lsn; + lcmp = (log_compare(&cmp_lsn, max_lsn) < 0); + } + if (lcmp) { + *start_lsn = cmp_lsn; break; } lsn = ckp_args->last_ckp; - if (IS_ZERO_LSN(lsn)) + /* + * If there are no more checkpoints behind us, we're + * done. Break with DB_NOTFOUND. + */ + if (IS_ZERO_LSN(lsn)) { + ret = DB_NOTFOUND; break; + } __os_free(dbenv, ckp_args); } if (ckp_args != NULL) __os_free(dbenv, ckp_args); -err: if (IS_ZERO_LSN(*start_lsn) && (ret == 0 || ret == DB_NOTFOUND)) + /* + * For CKPLSN_CMP if we walked back through all the checkpoints, + * set the cursor on the first log record. For LASTCKP_CMP + * we want to return 0,0 in start_lsn. + */ +err: if (IS_ZERO_LSN(*start_lsn) && cmp == CKPLSN_CMP && + (ret == 0 || ret == DB_NOTFOUND)) ret = __log_c_get(logc, start_lsn, &data, DB_FIRST); return (ret); } @@ -754,7 +805,7 @@ __env_openfiles(dbenv, logc, txninfo, int in_recovery; double nfiles; { - DB_LSN lsn; + DB_LSN lsn, tlsn; u_int32_t log_size; int progress, ret; @@ -774,8 +825,9 @@ __env_openfiles(dbenv, logc, txninfo, last_lsn, &lsn, log_size, 1) / nfiles)); dbenv->db_feedback(dbenv, DB_RECOVER, progress); } + tlsn = lsn; ret = __db_dispatch(dbenv, - dbenv->recover_dtab, dbenv->recover_dtab_size, data, &lsn, + dbenv->recover_dtab, dbenv->recover_dtab_size, data, &tlsn, in_recovery ? DB_TXN_OPENFILES : DB_TXN_POPENFILES, txninfo); if (ret != 0 && ret != DB_TXN_CKP) { @@ -785,11 +837,26 @@ __env_openfiles(dbenv, logc, txninfo, break; } if ((ret = __log_c_get(logc, &lsn, data, DB_NEXT)) != 0) { - if (ret == DB_NOTFOUND) - ret = 0; + if (ret == DB_NOTFOUND) { + if (last_lsn != NULL && + log_compare(&lsn, last_lsn) != 0) + ret = __db_log_corrupt(dbenv, &lsn); + else + ret = 0; + } break; } } return (ret); } + +static int +__db_log_corrupt(dbenv, lsnp) + DB_ENV *dbenv; + DB_LSN *lsnp; +{ + __db_err(dbenv, "Log file corrupt at LSN: [%lu][%lu]", + (u_long)lsnp->file, (u_long)lsnp->offset); + return (EINVAL); +} diff --git a/db/env/env_region.c b/db/env/env_region.c index 2306407e7..21775b302 100644 --- a/db/env/env_region.c +++ b/db/env/env_region.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: env_region.c,v 11.102 2004/09/15 21:49:17 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: env_region.c,v 11.79 2003/10/31 01:56:10 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -19,6 +17,7 @@ static const char revid[] = "$Id: env_region.c,v 11.79 2003/10/31 01:56:10 bosti #include "db_int.h" #include "dbinc/db_shash.h" +#include "dbinc/crypto.h" #include "dbinc/lock.h" #include "dbinc/log.h" #include "dbinc/mp.h" @@ -48,7 +47,8 @@ __db_e_attach(dbenv, init_flagsp) size_t size; size_t nrw; u_int32_t mbytes, bytes; - int retry_cnt, ret, segid; + u_int retry_cnt; + int ret, segid; char buf[sizeof(DB_REGION_FMT) + 20]; #if !defined(HAVE_MUTEX_THREADS) @@ -72,7 +72,7 @@ __db_e_attach(dbenv, init_flagsp) */ if (F_ISSET(dbenv, DB_ENV_THREAD)) { __db_err(dbenv, -"architecture lacks fast mutexes: applications cannot be threaded"); + "architecture lacks fast mutexes: applications cannot be threaded"); return (EINVAL); } #endif @@ -86,9 +86,9 @@ loop: renv = NULL; /* Set up the DB_ENV's REG_INFO structure. */ if ((ret = __os_calloc(dbenv, 1, sizeof(REGINFO), &infop)) != 0) return (ret); + infop->dbenv = dbenv; infop->type = REGION_TYPE_ENV; infop->id = REGION_ID_ENV; - infop->mode = dbenv->db_mode; infop->flags = REGION_JOIN_OK; if (F_ISSET(dbenv, DB_ENV_CREATE)) F_SET(infop, REGION_CREATE_OK); @@ -127,7 +127,7 @@ loop: renv = NULL; */ if (F_ISSET(dbenv, DB_ENV_CREATE)) { if ((ret = __os_open(dbenv, infop->name, - DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_REGION, + DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_REGION, dbenv->db_mode, &dbenv->lockfhp)) == 0) goto creation; if (ret != EEXIST) { @@ -141,8 +141,8 @@ loop: renv = NULL; * If we couldn't create the file, try and open it. (If that fails, * we're done.) */ - if ((ret = __os_open(dbenv, infop->name, DB_OSO_REGION | DB_OSO_DIRECT, - dbenv->db_mode, &dbenv->lockfhp)) != 0) + if ((ret = __os_open( + dbenv, infop->name, DB_OSO_REGION, 0, &dbenv->lockfhp)) != 0) goto err; /* The region exists, it's not okay to recreate it. */ @@ -249,7 +249,7 @@ loop: renv = NULL; * this means that all of our offsets (R_ADDR/R_OFFSET) get shifted * as well, but that should be fine. */ - infop->primary = R_ADDR(infop, 0); + infop->primary = infop->addr; infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV); renv = infop->primary; @@ -259,10 +259,8 @@ loop: renv = NULL; __db_err(dbenv, "Program version %d.%d doesn't match environment version", DB_VERSION_MAJOR, DB_VERSION_MINOR); -#ifndef DIAGNOSTIC - ret = EINVAL; + ret = DB_VERSION_MISMATCH; goto err; -#endif } /* @@ -325,11 +323,13 @@ err_unlock: MUTEX_UNLOCK(dbenv, &renv->mutex); ++renv->refcnt; /* - * If our caller wants them, return the flags this environment was - * initialized with. + * Add configuration flags from our caller; return the total set of + * configuration flags for later DB_JOINENV calls. */ - if (init_flagsp != NULL) + if (init_flagsp != NULL) { + renv->init_flags |= *init_flagsp; *init_flagsp = renv->init_flags; + } /* Discard our lock. */ MUTEX_UNLOCK(dbenv, &renv->mutex); @@ -349,15 +349,15 @@ creation: F_SET(infop, REGION_CREATE); /* - * Allocate room for 50 REGION structures plus overhead (we're going + * Allocate room for 100 REGION structures plus overhead (we're going * to use this space for last-ditch allocation requests), although we * should never need anything close to that. * * Encryption passwds are stored in the env region. Add that in too. */ memset(&tregion, 0, sizeof(tregion)); - tregion.size = (roff_t)(50 * sizeof(REGION) + - dbenv->passwd_len + 2048); + tregion.size = (roff_t)(100 * sizeof(REGION) + + dbenv->passwd_len + 4096); tregion.segid = INVALID_REGION_SEGID; if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0) goto err; @@ -386,9 +386,9 @@ creation: * region detach, and that all of our offsets (R_ADDR/R_OFFSET) will be * shifted as well, but that should be fine. */ - infop->primary = R_ADDR(infop, 0); + infop->primary = infop->addr; infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV); - __db_shalloc_init(infop->addr, tregion.size - sizeof(REGENV)); + __db_shalloc_init(infop, tregion.size - sizeof(REGENV)); /* * Initialize the rest of the REGENV structure, except for the magic @@ -396,7 +396,8 @@ creation: */ renv = infop->primary; renv->envpanic = 0; - db_version(&renv->majver, &renv->minver, &renv->patch); + __os_unique_id(dbenv, &renv->envid); + (void)db_version(&renv->majver, &renv->minver, &renv->patch); SH_LIST_INIT(&renv->regionq); renv->refcnt = 1; renv->cipher_off = INVALID_ROFF; @@ -531,7 +532,7 @@ retry: /* Close any open file handle. */ __db_err(dbenv, "unable to join the environment"); ret = EAGAIN; } else { - (void)__os_sleep(dbenv, retry_cnt * 3, 0); + __os_sleep(dbenv, retry_cnt * 3, 0); goto loop; } } @@ -558,6 +559,7 @@ __db_e_detach(dbenv, destroy) if (F_ISSET(dbenv, DB_ENV_PRIVATE)) destroy = 1; + /* Lock the environment. */ MUTEX_LOCK(dbenv, &renv->mutex); @@ -578,36 +580,50 @@ __db_e_detach(dbenv, destroy) dbenv->lockfhp = NULL; } - /* Reset the addr value that we "corrected" above. */ - infop->addr = infop->primary; - /* - * If we are destroying the environment, we need to - * destroy any system resources backing the mutex, as well - * as any system resources that the replication system may have - * acquired and put in the main region. - * - * Do these now before we free the memory in __os_r_detach. + * If we are destroying the environment, destroy any system resources + * the crypto and replication systems may have acquired and put in the + * main region. */ if (destroy) { +#ifdef HAVE_CRYPTO + (void)__crypto_region_destroy(dbenv); +#endif (void)__rep_region_destroy(dbenv); - __db_mutex_destroy(&renv->mutex); - __db_mutex_destroy(&infop->rp->mutex); } /* * Release the region, and kill our reference. * + * If we are destroying the environment, destroy any system resources + * backing the mutex. + */ + if (destroy) { + (void)__db_mutex_destroy(&renv->mutex); + (void)__db_mutex_destroy(&infop->rp->mutex); + + /* + * Only free the REGION structure itself if it was separately + * allocated from the heap. + */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, infop->rp); + } + + /* Reset the addr value that we "corrected" above. */ + infop->addr = infop->primary; + + (void)__os_r_detach(dbenv, infop, destroy); + if (infop->name != NULL) + __os_free(dbenv, infop->name); + + /* * We set the DB_ENV->reginfo field to NULL here and discard its memory. * DB_ENV->remove calls __dbenv_remove to do the region remove, and * __dbenv_remove attached and then detaches from the region. We don't * want to return to DB_ENV->remove with a non-NULL DB_ENV->reginfo * field because it will attempt to detach again as part of its cleanup. */ - (void)__os_r_detach(dbenv, infop, destroy); - - if (infop->name != NULL) - __os_free(dbenv, infop->name); __os_free(dbenv, dbenv->reginfo); dbenv->reginfo = NULL; @@ -758,9 +774,8 @@ __db_e_remfile(dbenv) DB_ENV *dbenv; { int cnt, fcnt, lastrm, ret; - u_int8_t saved_byte; const char *dir; - char *p, **names, *path, buf[sizeof(DB_REGION_FMT) + 20]; + char saved_char, *p, **names, *path, buf[sizeof(DB_REGION_FMT) + 20]; /* Get the full path of a file in the environment. */ (void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV); @@ -770,11 +785,11 @@ __db_e_remfile(dbenv) /* Get the parent directory for the environment. */ if ((p = __db_rpath(path)) == NULL) { p = path; - saved_byte = *p; + saved_char = *p; dir = PATH_DOT; } else { - saved_byte = *p; + saved_char = *p; *p = '\0'; dir = path; @@ -785,7 +800,7 @@ __db_e_remfile(dbenv) __db_err(dbenv, "%s: %s", dir, db_strerror(ret)); /* Restore the path, and free it. */ - *p = saved_byte; + *p = saved_char; __os_free(dbenv, path); if (ret != 0) @@ -804,6 +819,10 @@ __db_e_remfile(dbenv) if (strncmp(names[cnt], "__dbq.", 6) == 0) continue; + /* Skip replication files. */ + if (strncmp(names[cnt], "__db.rep.", 9) == 0) + continue; + /* * Remove the primary environment region last, because it's * the key to this whole mess. @@ -846,61 +865,6 @@ __db_e_remfile(dbenv) return (0); } -/* - * __db_e_stat - * Statistics for the environment. - * - * PUBLIC: int __db_e_stat __P((DB_ENV *, - * PUBLIC: REGENV *, REGION *, int *, u_int32_t)); - */ -int -__db_e_stat(dbenv, arg_renv, arg_regions, arg_regions_cnt, flags) - DB_ENV *dbenv; - REGENV *arg_renv; - REGION *arg_regions; - int *arg_regions_cnt; - u_int32_t flags; -{ - REGENV *renv; - REGINFO *infop; - REGION *rp; - int n, ret; - - infop = dbenv->reginfo; - renv = infop->primary; - rp = infop->rp; - if ((ret = __db_fchk(dbenv, - "DB_ENV->stat", flags, DB_STAT_CLEAR)) != 0) - return (ret); - - /* Lock the environment. */ - MUTEX_LOCK(dbenv, &rp->mutex); - - *arg_renv = *renv; - if (LF_ISSET(DB_STAT_CLEAR)) { - renv->mutex.mutex_set_nowait = 0; - renv->mutex.mutex_set_wait = 0; - } - - for (n = 0, rp = SH_LIST_FIRST(&renv->regionq, __db_region); - n < *arg_regions_cnt && rp != NULL; - ++n, rp = SH_LIST_NEXT(rp, q, __db_region)) { - arg_regions[n] = *rp; - if (LF_ISSET(DB_STAT_CLEAR)) { - rp->mutex.mutex_set_nowait = 0; - rp->mutex.mutex_set_wait = 0; - } - } - - /* Release the lock. */ - rp = infop->rp; - MUTEX_UNLOCK(dbenv, &rp->mutex); - - *arg_regions_cnt = n == 0 ? n : n - 1; - - return (0); -} - /* * __db_r_attach * Join/create a region. @@ -965,7 +929,7 @@ __db_r_attach(dbenv, infop, size) * If we created the region, initialize it for allocation. */ if (F_ISSET(infop, REGION_CREATE)) - (void)__db_shalloc_init(infop->addr, rp->size); + __db_shalloc_init(infop, rp->size); /* * If the underlying REGION isn't the environment, acquire a lock @@ -978,8 +942,8 @@ __db_r_attach(dbenv, infop, size) return (0); - /* Discard the underlying region. */ -err: if (infop->addr != NULL) +err: /* Discard the underlying region. */ + if (infop->addr != NULL) (void)__os_r_detach(dbenv, infop, F_ISSET(infop, REGION_CREATE)); infop->rp = NULL; @@ -1025,8 +989,8 @@ __db_r_detach(dbenv, infop, destroy) MUTEX_LOCK(dbenv, &rp->mutex); /* - * We need to call destroy on per-subsystem info before - * we free the memory associated with the region. + * We need to call destroy on per-subsystem info before we free the + * memory associated with the region. */ if (destroy) __db_region_destroy(dbenv, infop); @@ -1045,7 +1009,8 @@ __db_r_detach(dbenv, infop, destroy) * any unnecessary shared memory manipulation. */ if (destroy && - ((t_ret = __db_des_destroy(dbenv, rp, 0)) != 0) && ret == 0) + ((t_ret = __db_des_destroy( + dbenv, rp, F_ISSET(dbenv, DB_ENV_PRIVATE))) != 0) && ret == 0) ret = t_ret; /* Release the environment lock. */ @@ -1125,15 +1090,19 @@ __db_des_get(dbenv, env_infop, infop, rpp) * next available ID. */ if (rp == NULL) { - if ((ret = __db_shalloc(env_infop->addr, - sizeof(REGION), MUTEX_ALIGN, &rp)) != 0) + if ((ret = __db_shalloc(env_infop, + sizeof(REGION), MUTEX_ALIGN, &rp)) != 0) { + __db_err(dbenv, + "unable to create new master region entry: %s", + db_strerror(ret)); return (ret); + } /* Initialize the region. */ memset(rp, 0, sizeof(*rp)); if ((ret = __db_mutex_setup(dbenv, env_infop, &rp->mutex, MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) { - __db_shalloc_free(env_infop->addr, rp); + __db_shalloc_free(env_infop, rp); return (ret); } rp->segid = INVALID_REGION_SEGID; @@ -1180,13 +1149,16 @@ __db_des_destroy(dbenv, rp, shmem_safe) * have a choice -- safe or not, we have to destroy the mutex or we'll * leak memory. */ +#ifdef HAVE_MUTEX_SYSTEM_RESOURCES + (void)__db_mutex_destroy(&rp->mutex); +#else if (shmem_safe) + (void)__db_mutex_destroy(&rp->mutex); +#endif + if (shmem_safe) { SH_LIST_REMOVE(rp, q, __db_region); - - __db_mutex_destroy(&rp->mutex); - - if (shmem_safe) - __db_shalloc_free(infop->addr, rp); + __db_shalloc_free(infop, rp); + } return (0); } @@ -1205,6 +1177,10 @@ __db_faultmem(dbenv, addr, size, created) int ret; u_int8_t *p, *t; + /* Ignore heap regions. */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) + return (0); + /* * It's sometimes significantly faster to page-fault in all of the * region's pages before we run the application, as we see nasty @@ -1250,7 +1226,7 @@ __db_region_destroy(dbenv, infop) __log_region_destroy(dbenv, infop); break; case REGION_TYPE_MPOOL: - __mpool_region_destroy(dbenv, infop); + __memp_region_destroy(dbenv, infop); break; case REGION_TYPE_TXN: __txn_region_destroy(dbenv, infop); @@ -1258,6 +1234,7 @@ __db_region_destroy(dbenv, infop) case REGION_TYPE_ENV: case REGION_TYPE_MUTEX: break; + case INVALID_REGION_TYPE: default: DB_ASSERT(0); break; diff --git a/db/env/env_stat.c b/db/env/env_stat.c new file mode 100644 index 000000000..afec43c78 --- /dev/null +++ b/db/env/env_stat.c @@ -0,0 +1,656 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: env_stat.c,v 1.20 2004/09/28 20:29:52 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_shash.h" +#include "dbinc/db_am.h" +#include "dbinc/lock.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" +#include "dbinc/txn.h" + +#ifdef HAVE_STATISTICS +static int __dbenv_print_all __P((DB_ENV *, u_int32_t)); +static int __dbenv_print_stats __P((DB_ENV *, u_int32_t)); +static int __dbenv_stat_print __P((DB_ENV *, u_int32_t)); +static const char *__reg_type __P((reg_type_t)); + +/* + * __dbenv_stat_print_pp -- + * DB_ENV->stat_print pre/post processor. + * + * PUBLIC: int __dbenv_stat_print_pp __P((DB_ENV *, u_int32_t)); + */ +int +__dbenv_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->stat_print"); + + if ((ret = __db_fchk(dbenv, "DB_ENV->stat_print", + flags, DB_STAT_ALL | DB_STAT_CLEAR | DB_STAT_SUBSYSTEM)) != 0) + return (ret); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __dbenv_stat_print(dbenv, flags); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +/* + * __dbenv_stat_print -- + * DB_ENV->stat_print method. + */ +static int +__dbenv_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + DB *dbp; + int ret; + + if ((ret = __dbenv_print_stats(dbenv, flags)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL) && + (ret = __dbenv_print_all(dbenv, flags)) != 0) + return (ret); + + if (!LF_ISSET(DB_STAT_SUBSYSTEM)) + return (0); + + /* The subsystems don't know anything about DB_STAT_SUBSYSTEM. */ + LF_CLR(DB_STAT_SUBSYSTEM); + + if (LOGGING_ON(dbenv)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + if ((ret = __log_stat_print(dbenv, flags)) != 0) + return (ret); + } + + if (LOCKING_ON(dbenv)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + if ((ret = __lock_stat_print(dbenv, flags)) != 0) + return (ret); + } + + if (MPOOL_ON(dbenv)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + if ((ret = __memp_stat_print(dbenv, flags)) != 0) + return (ret); + } + + if (REP_ON(dbenv)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + if ((ret = __rep_stat_print(dbenv, flags)) != 0) + return (ret); + } + + if (TXN_ON(dbenv)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + if ((ret = __txn_stat_print(dbenv, flags)) != 0) + return (ret); + } + + MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + for (dbp = LIST_FIRST(&dbenv->dblist); + dbp != NULL; dbp = LIST_NEXT(dbp, dblistlinks)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "%s%s%s\tDatabase name", + dbp->fname, dbp->dname == NULL ? "" : "/", + dbp->dname == NULL ? "" : dbp->dname); + if ((ret = __db_stat_print(dbp, flags)) != 0) + break; + } + MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + + return (ret); +} + +/* + * __dbenv_print_stats -- + * Display the default environment statistics. + * + */ +static int +__dbenv_print_stats(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + REGENV *renv; + REGINFO *infop; + + infop = dbenv->reginfo; + renv = infop->primary; + + if (LF_ISSET(DB_STAT_ALL)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Default database environment information:"); + } + __db_msg(dbenv, "%d.%d.%d\tEnvironment version", + renv->majver, renv->minver, renv->patch); + STAT_HEX("Magic number", renv->magic); + STAT_LONG("Panic value", renv->envpanic); + STAT_LONG("References", renv->refcnt); + + __db_print_mutex(dbenv, NULL, &renv->mutex, + "The number of region locks that required waiting", flags); + + return (0); +} + +/* + * __dbenv_print_all -- + * Display the debugging environment statistics. + */ +static int +__dbenv_print_all(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + static const FN fn[] = { + { DB_ENV_AUTO_COMMIT, "DB_ENV_AUTO_COMMIT" }, + { DB_ENV_CDB, "DB_ENV_CDB" }, + { DB_ENV_CDB_ALLDB, "DB_ENV_CDB_ALLDB" }, + { DB_ENV_CREATE, "DB_ENV_CREATE" }, + { DB_ENV_DBLOCAL, "DB_ENV_DBLOCAL" }, + { DB_ENV_DIRECT_DB, "DB_ENV_DIRECT_DB" }, + { DB_ENV_DIRECT_LOG, "DB_ENV_DIRECT_LOG" }, + { DB_ENV_DSYNC_LOG, "DB_ENV_DSYNC_LOG" }, + { DB_ENV_FATAL, "DB_ENV_FATAL" }, + { DB_ENV_LOCKDOWN, "DB_ENV_LOCKDOWN" }, + { DB_ENV_LOG_AUTOREMOVE, "DB_ENV_LOG_AUTOREMOVE" }, + { DB_ENV_LOG_INMEMORY, "DB_ENV_LOG_INMEMORY" }, + { DB_ENV_NOLOCKING, "DB_ENV_NOLOCKING" }, + { DB_ENV_NOMMAP, "DB_ENV_NOMMAP" }, + { DB_ENV_NOPANIC, "DB_ENV_NOPANIC" }, + { DB_ENV_OPEN_CALLED, "DB_ENV_OPEN_CALLED" }, + { DB_ENV_OVERWRITE, "DB_ENV_OVERWRITE" }, + { DB_ENV_PRIVATE, "DB_ENV_PRIVATE" }, + { DB_ENV_REGION_INIT, "DB_ENV_REGION_INIT" }, + { DB_ENV_RPCCLIENT, "DB_ENV_RPCCLIENT" }, + { DB_ENV_RPCCLIENT_GIVEN, "DB_ENV_RPCCLIENT_GIVEN" }, + { DB_ENV_SYSTEM_MEM, "DB_ENV_SYSTEM_MEM" }, + { DB_ENV_THREAD, "DB_ENV_THREAD" }, + { DB_ENV_TIME_NOTGRANTED, "DB_ENV_TIME_NOTGRANTED" }, + { DB_ENV_TXN_NOSYNC, "DB_ENV_TXN_NOSYNC" }, + { DB_ENV_TXN_WRITE_NOSYNC, "DB_ENV_TXN_WRITE_NOSYNC" }, + { DB_ENV_YIELDCPU, "DB_ENV_YIELDCPU" }, + { 0, NULL } + }; + static const FN ofn[] = { + { DB_CREATE, "DB_CREATE" }, + { DB_CXX_NO_EXCEPTIONS, "DB_CXX_NO_EXCEPTIONS" }, + { DB_FORCE, "DB_FORCE" }, + { DB_INIT_CDB, "DB_INIT_CDB" }, + { DB_INIT_LOCK, "DB_INIT_LOCK" }, + { DB_INIT_LOG, "DB_INIT_LOG" }, + { DB_INIT_MPOOL, "DB_INIT_MPOOL" }, + { DB_INIT_REP, "DB_INIT_REP" }, + { DB_INIT_TXN, "DB_INIT_TXN" }, + { DB_JOINENV, "DB_JOINENV" }, + { DB_LOCKDOWN, "DB_LOCKDOWN" }, + { DB_NOMMAP, "DB_NOMMAP" }, + { DB_PRIVATE, "DB_PRIVATE" }, + { DB_RDONLY, "DB_RDONLY" }, + { DB_RECOVER, "DB_RECOVER" }, + { DB_RECOVER_FATAL, "DB_RECOVER_FATAL" }, + { DB_SYSTEM_MEM, "DB_SYSTEM_MEM" }, + { DB_THREAD, "DB_THREAD" }, + { DB_TRUNCATE, "DB_TRUNCATE" }, + { DB_TXN_NOSYNC, "DB_TXN_NOSYNC" }, + { DB_USE_ENVIRON, "DB_USE_ENVIRON" }, + { DB_USE_ENVIRON_ROOT, "DB_USE_ENVIRON_ROOT" }, + { 0, NULL } + }; + static const FN vfn[] = { + { DB_VERB_DEADLOCK, "DB_VERB_DEADLOCK" }, + { DB_VERB_RECOVERY, "DB_VERB_RECOVERY" }, + { DB_VERB_REPLICATION, "DB_VERB_REPLICATION" }, + { DB_VERB_WAITSFOR, "DB_VERB_WAITSFOR" }, + { 0, NULL } + }; + DB_MSGBUF mb; + REGENV *renv; + REGINFO *infop; + REGION *rp, regs[1024]; + size_t n; + char **p; + + infop = dbenv->reginfo; + renv = infop->primary; + DB_MSGBUF_INIT(&mb); + + /* + * Lock the database environment while we get copies of the region + * information. + */ + MUTEX_LOCK(dbenv, &infop->rp->mutex); + + for (n = 0, rp = SH_LIST_FIRST(&renv->regionq, __db_region); + n < sizeof(regs) / sizeof(regs[0]) && rp != NULL; + ++n, rp = SH_LIST_NEXT(rp, q, __db_region)) { + regs[n] = *rp; + if (LF_ISSET(DB_STAT_CLEAR)) + MUTEX_CLEAR(&rp->mutex); + } + if (n > 0) + --n; + MUTEX_UNLOCK(dbenv, &infop->rp->mutex); + + if (LF_ISSET(DB_STAT_ALL)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Per region database environment information:"); + } + while (n > 0) { + rp = ®s[--n]; + __db_msg(dbenv, "%s Region:", __reg_type(rp->type)); + STAT_LONG("Region ID", rp->id); + STAT_LONG("Segment ID", rp->segid); + __db_dlbytes(dbenv, + "Size", (u_long)0, (u_long)0, (u_long)rp->size); + __db_print_mutex(dbenv, NULL, &rp->mutex, + "The number of region locks that required waiting", flags); + } + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB_ENV handle information:"); + STAT_ISSET("Errfile", dbenv->db_errfile); + STAT_STRING("Errpfx", dbenv->db_errpfx); + STAT_ISSET("Errcall", dbenv->db_errcall); + STAT_ISSET("Feedback", dbenv->db_feedback); + STAT_ISSET("Panic", dbenv->db_paniccall); + STAT_ISSET("Malloc", dbenv->db_malloc); + STAT_ISSET("Realloc", dbenv->db_realloc); + STAT_ISSET("Free", dbenv->db_free); + __db_prflags(dbenv, NULL, dbenv->verbose, vfn, NULL, "\tVerbose flags"); + + STAT_ISSET("App private", dbenv->app_private); + STAT_ISSET("App dispatch", dbenv->app_dispatch); + STAT_STRING("Home", dbenv->db_home); + STAT_STRING("Log dir", dbenv->db_log_dir); + STAT_STRING("Tmp dir", dbenv->db_tmp_dir); + if (dbenv->db_data_dir == NULL) + STAT_ISSET("Data dir", dbenv->db_data_dir); + else { + for (p = dbenv->db_data_dir; *p != NULL; ++p) + __db_msgadd(dbenv, &mb, "%s\tData dir", *p); + DB_MSGBUF_FLUSH(dbenv, &mb); + } + STAT_FMT("Mode", "%#o", int, dbenv->db_mode); + __db_prflags(dbenv, NULL, dbenv->open_flags, ofn, NULL, "\tOpen flags"); + STAT_ISSET("Lockfhp", dbenv->lockfhp); + STAT_ISSET("Rec tab", dbenv->recover_dtab); + STAT_ULONG("Rec tab slots", dbenv->recover_dtab_size); + STAT_ISSET("RPC client", dbenv->cl_handle); + STAT_LONG("RPC client ID", dbenv->cl_id); + STAT_LONG("DB ref count", dbenv->db_ref); + STAT_LONG("Shared mem key", dbenv->shm_key); + STAT_ULONG("test-and-set spin configuration", dbenv->tas_spins); + __db_print_mutex( + dbenv, NULL, dbenv->dblist_mutexp, "DB handle mutex", flags); + + STAT_ISSET("api1 internal", dbenv->api1_internal); + STAT_ISSET("api2 internal", dbenv->api2_internal); + STAT_ISSET("password", dbenv->passwd); + STAT_ISSET("crypto handle", dbenv->crypto_handle); + __db_print_mutex(dbenv, NULL, dbenv->mt_mutexp, "MT mutex", flags); + + __db_prflags(dbenv, NULL, dbenv->flags, fn, NULL, "\tFlags"); + + return (0); +} + +/* + * __db_print_fh -- + * Print out a file handle. + * + * PUBLIC: void __db_print_fh __P((DB_ENV *, DB_FH *, u_int32_t)); + */ +void +__db_print_fh(dbenv, fh, flags) + DB_ENV *dbenv; + DB_FH *fh; + u_int32_t flags; +{ + static const FN fn[] = { + { DB_FH_NOSYNC, "DB_FH_NOSYNC" }, + { DB_FH_OPENED, "DB_FH_OPENED" }, + { DB_FH_UNLINK, "DB_FH_UNLINK" }, + { 0, NULL } + }; + + __db_print_mutex(dbenv, NULL, fh->mutexp, "file-handle.mutex", flags); + + STAT_LONG("file-handle.reference count", fh->ref); + STAT_LONG("file-handle.file descriptor", fh->fd); + STAT_STRING("file-handle.file name", fh->name); + + STAT_ULONG("file-handle.page number", fh->pgno); + STAT_ULONG("file-handle.page size", fh->pgsize); + STAT_ULONG("file-handle.page offset", fh->offset); + + __db_prflags(dbenv, NULL, fh->flags, fn, NULL, "\tfile-handle.flags"); +} + +/* + * __db_print_fileid -- + * Print out a file ID. + * + * PUBLIC: void __db_print_fileid __P((DB_ENV *, u_int8_t *, const char *)); + */ +void +__db_print_fileid(dbenv, id, suffix) + DB_ENV *dbenv; + u_int8_t *id; + const char *suffix; +{ + DB_MSGBUF mb; + int i; + + DB_MSGBUF_INIT(&mb); + for (i = 0; i < DB_FILE_ID_LEN; ++i, ++id) { + __db_msgadd(dbenv, &mb, "%x", (u_int)*id); + if (i < DB_FILE_ID_LEN - 1) + __db_msgadd(dbenv, &mb, " "); + } + if (suffix != NULL) + __db_msgadd(dbenv, &mb, "%s", suffix); + DB_MSGBUF_FLUSH(dbenv, &mb); +} + +/* + * __db_print_mutex -- + * Print out mutex statistics. + * + * PUBLIC: void __db_print_mutex + * PUBLIC: __P((DB_ENV *, DB_MSGBUF *, DB_MUTEX *, const char *, u_int32_t)); + */ +void +__db_print_mutex(dbenv, mbp, mutex, suffix, flags) + DB_ENV *dbenv; + DB_MSGBUF *mbp; + DB_MUTEX *mutex; + const char *suffix; + u_int32_t flags; +{ + DB_MSGBUF mb; + u_long value; + int standalone; + + /* If we don't have a mutex, point that out and return. */ + if (mutex == NULL) { + STAT_ISSET(suffix, mutex); + return; + } + + if (mbp == NULL) { + DB_MSGBUF_INIT(&mb); + mbp = &mb; + standalone = 1; + } else + standalone = 0; + + /* + * !!! + * We may not hold the mutex lock -- that's OK, we're only reading + * the statistics. + */ + if ((value = mutex->mutex_set_wait) < 10000000) + __db_msgadd(dbenv, mbp, "%lu", value); + else + __db_msgadd(dbenv, mbp, "%luM", value / 1000000); + + /* + * If standalone, append the mutex percent and the locker information + * after the suffix line. Otherwise, append it after the counter. + * + * The setting of "suffix" tracks "standalone" -- if standalone, expect + * a suffix and prefix it with a , otherwise, it's optional. This + * isn't a design, it's just the semantics we happen to need right now. + */ + if (standalone) { + if (suffix == NULL) /* Defense. */ + suffix = ""; + + __db_msgadd(dbenv, &mb, "\t%s (%d%%", suffix, + DB_PCT(mutex->mutex_set_wait, + mutex->mutex_set_wait + mutex->mutex_set_nowait)); +#ifdef DIAGNOSTIC + if (mutex->locked != 0) + __db_msgadd(dbenv, &mb, "/%lu", (u_long)mutex->locked); +#endif + __db_msgadd(dbenv, &mb, ")"); + + DB_MSGBUF_FLUSH(dbenv, mbp); + } else { + __db_msgadd(dbenv, mbp, "/%d%%", DB_PCT(mutex->mutex_set_wait, + mutex->mutex_set_wait + mutex->mutex_set_nowait)); +#ifdef DIAGNOSTIC + if (mutex->locked) + __db_msgadd(dbenv, mbp, "/%lu", (u_long)mutex->locked); +#endif + if (suffix != NULL) + __db_msgadd(dbenv, mbp, "%s", suffix); + } + + if (LF_ISSET(DB_STAT_CLEAR)) + MUTEX_CLEAR(mutex); +} + +/* + * __db_dl -- + * Display a big value. + * + * PUBLIC: void __db_dl __P((DB_ENV *, const char *, u_long)); + */ +void +__db_dl(dbenv, msg, value) + DB_ENV *dbenv; + const char *msg; + u_long value; +{ + /* + * Two formats: if less than 10 million, display as the number, if + * greater than 10 million display as ###M. + */ + if (value < 10000000) + __db_msg(dbenv, "%lu\t%s", value, msg); + else + __db_msg(dbenv, "%luM\t%s (%lu)", value / 1000000, msg, value); +} + +/* + * __db_dl_pct -- + * Display a big value, and related percentage. + * + * PUBLIC: void __db_dl_pct + * PUBLIC: __P((DB_ENV *, const char *, u_long, int, const char *)); + */ +void +__db_dl_pct(dbenv, msg, value, pct, tag) + DB_ENV *dbenv; + const char *msg, *tag; + u_long value; + int pct; +{ + DB_MSGBUF mb; + + DB_MSGBUF_INIT(&mb); + + /* + * Two formats: if less than 10 million, display as the number, if + * greater than 10 million display as ###M. + */ + if (value < 10000000) + __db_msgadd(dbenv, &mb, "%lu\t%s", value, msg); + else + __db_msgadd(dbenv, &mb, "%luM\t%s", value / 1000000, msg); + if (tag == NULL) + __db_msgadd(dbenv, &mb, " (%d%%)", pct); + else + __db_msgadd(dbenv, &mb, " (%d%% %s)", pct, tag); + + DB_MSGBUF_FLUSH(dbenv, &mb); +} + +/* + * __db_dlbytes -- + * Display a big number of bytes. + * + * PUBLIC: void __db_dlbytes + * PUBLIC: __P((DB_ENV *, const char *, u_long, u_long, u_long)); + */ +void +__db_dlbytes(dbenv, msg, gbytes, mbytes, bytes) + DB_ENV *dbenv; + const char *msg; + u_long gbytes, mbytes, bytes; +{ + DB_MSGBUF mb; + const char *sep; + + DB_MSGBUF_INIT(&mb); + + /* Normalize the values. */ + while (bytes >= MEGABYTE) { + ++mbytes; + bytes -= MEGABYTE; + } + while (mbytes >= GIGABYTE / MEGABYTE) { + ++gbytes; + mbytes -= GIGABYTE / MEGABYTE; + } + + if (gbytes == 0 && mbytes == 0 && bytes == 0) + __db_msgadd(dbenv, &mb, "0"); + else { + sep = ""; + if (gbytes > 0) { + __db_msgadd(dbenv, &mb, "%luGB", gbytes); + sep = " "; + } + if (mbytes > 0) { + __db_msgadd(dbenv, &mb, "%s%luMB", sep, mbytes); + sep = " "; + } + if (bytes >= 1024) { + __db_msgadd(dbenv, &mb, "%s%luKB", sep, bytes / 1024); + bytes %= 1024; + sep = " "; + } + if (bytes > 0) + __db_msgadd(dbenv, &mb, "%s%luB", sep, bytes); + } + + __db_msgadd(dbenv, &mb, "\t%s", msg); + + DB_MSGBUF_FLUSH(dbenv, &mb); +} + +/* + * __db_print_reginfo -- + * Print out underlying shared region information. + * + * PUBLIC: void __db_print_reginfo __P((DB_ENV *, REGINFO *, const char *)); + */ +void +__db_print_reginfo(dbenv, infop, s) + DB_ENV *dbenv; + REGINFO *infop; + const char *s; +{ + static const FN fn[] = { + { REGION_CREATE, "REGION_CREATE" }, + { REGION_CREATE_OK, "REGION_CREATE_OK" }, + { REGION_JOIN_OK, "REGION_JOIN_OK" }, + { 0, NULL } + }; + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "%s REGINFO information:", s); + STAT_STRING("Region type", __reg_type(infop->type)); + STAT_ULONG("Region ID", infop->id); + STAT_STRING("Region name", infop->name); + STAT_HEX("Original region address", infop->addr_orig); + STAT_HEX("Region address", infop->addr); + STAT_HEX("Region primary address", infop->primary); + STAT_ULONG("Region maximum allocation", infop->max_alloc); + STAT_ULONG("Region allocated", infop->max_alloc); + + __db_prflags(dbenv, NULL, infop->flags, fn, NULL, "\tRegion flags"); +} + +/* + * __reg_type -- + * Return the region type string. + */ +static const char * +__reg_type(t) + reg_type_t t; +{ + switch (t) { + case REGION_TYPE_ENV: + return ("Environment"); + case REGION_TYPE_LOCK: + return ("Lock"); + case REGION_TYPE_LOG: + return ("Log"); + case REGION_TYPE_MPOOL: + return ("Mpool"); + case REGION_TYPE_MUTEX: + return ("Mutex"); + case REGION_TYPE_TXN: + return ("Transaction"); + case INVALID_REGION_TYPE: + return ("Invalid"); + } + return ("Unknown"); +} + +#else /* !HAVE_STATISTICS */ + +/* + * __db_stat_not_built -- + * Common error routine when library not built with statistics. + * + * PUBLIC: int __db_stat_not_built __P((DB_ENV *)); + */ +int +__db_stat_not_built(dbenv) + DB_ENV *dbenv; +{ + __db_err(dbenv, "Library build did not include statistics support"); + return (DB_OPNOTSUP); +} + +int +__dbenv_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} +#endif diff --git a/db/examples_c/README b/db/examples_c/README index d5475ba01..722102de3 100644 --- a/db/examples_c/README +++ b/db/examples_c/README @@ -1,21 +1,32 @@ -# $Id: README,v 11.5 2002/02/26 16:22:45 krinsky Exp $ +# $Id: README,v 11.6 2004/09/23 17:49:25 mjc Exp $ + +getting_started/ + Examples from the Getting Started Guide + +bench_001.c Micro-benchmark for the bulk fetch interface. ex_access.c Using just the DB access methods. -ex_apprec Application-specific recovery. +ex_apprec/ Application-specific recovery. ex_btrec.c Using the BTREE access method with record numbers. +ex_dbclient.c Using DB from an RPC client. + ex_env.c Setting up the DB environment. ex_lock.c Locking. ex_mpool.c Shared memory buffer pools. -ex_repquote Replication. This creates a toy stock quote server +ex_repquote/ Replication. This creates a toy stock quote server with DB's single-master, multiple-client replication, with communication over TCP. +ex_sequence.c Sequences. + +ex_thread.c Threaded application with multiple readers and writers. + ex_tpcb.c TPC/B. Ex_tpcb sets up a framework in which to run a TPC/B test. Database initialization (the -i flag) and running the diff --git a/db/examples_c/bench_001.c b/db/examples_c/bench_001.c index 6754be195..27bdd5d4d 100644 --- a/db/examples_c/bench_001.c +++ b/db/examples_c/bench_001.c @@ -1,8 +1,8 @@ /*- - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: bench_001.c,v 1.15 2003/01/08 04:43:48 bostic Exp $ + * $Id: bench_001.c,v 1.17 2004/09/22 03:44:28 bostic Exp $ */ /* @@ -178,7 +178,7 @@ fill(dbenv, dbp, txn, datalen, num, dups) key.size = sizeof(i); data.data = data_val = (struct data *) malloc(datalen); memcpy(data_val->str, "0123456789012345678901234567890123456789", - datalen - sizeof (data_val->id)); + datalen - sizeof(data_val->id)); data.size = datalen; data.flags = DB_DBT_USERMEM; diff --git a/db/examples_c/ex_access.c b/db/examples_c/ex_access.c index 1e4c0bdc1..7a8863713 100644 --- a/db/examples_c/ex_access.c +++ b/db/examples_c/ex_access.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_access.c,v 11.23 2003/01/08 04:43:53 bostic Exp $ + * $Id: ex_access.c,v 11.25 2004/09/17 22:00:28 mjc Exp $ */ #include @@ -34,7 +34,7 @@ main(argc, argv) DB *dbp; DBC *dbcp; DBT key, data; - u_int32_t len; + size_t len; int ch, ret, rflag; char *database, *p, *t, buf[1024], rbuf[1024]; const char *progname = "ex_access"; /* Program name. */ @@ -102,7 +102,7 @@ main(argc, argv) key.data = buf; data.data = rbuf; - data.size = key.size = len - 1; + data.size = key.size = (u_int32_t)len - 1; switch (ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) { diff --git a/db/examples_c/ex_apprec/auto_rebuild b/db/examples_c/ex_apprec/auto_rebuild index 342519848..aadcf6432 100644 --- a/db/examples_c/ex_apprec/auto_rebuild +++ b/db/examples_c/ex_apprec/auto_rebuild @@ -6,4 +6,5 @@ cd ../../dist awk -f gen_rec.awk \ -v source_file=$E/ex_apprec_auto.c \ -v header_file=$E/ex_apprec_auto.h \ + -v print_file=$E/ex_apprec_autop.c \ -v template_file=$E/ex_apprec_template < $E/ex_apprec.src diff --git a/db/examples_c/ex_apprec/ex_apprec.c b/db/examples_c/ex_apprec/ex_apprec.c index 7167bf59a..7eead81c8 100644 --- a/db/examples_c/ex_apprec/ex_apprec.c +++ b/db/examples_c/ex_apprec/ex_apprec.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_apprec.c,v 1.4 2003/02/14 03:57:30 bostic Exp $ + * $Id: ex_apprec.c,v 1.5 2004/01/28 03:36:03 bostic Exp $ */ #include diff --git a/db/examples_c/ex_apprec/ex_apprec.h b/db/examples_c/ex_apprec/ex_apprec.h index b8eb02544..b77308fa5 100644 --- a/db/examples_c/ex_apprec/ex_apprec.h +++ b/db/examples_c/ex_apprec/ex_apprec.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2002-2003 + * Copyright (c) 2002-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_apprec.h,v 1.4 2003/01/08 04:45:12 bostic Exp $ + * $Id: ex_apprec.h,v 1.5 2004/01/28 03:36:03 bostic Exp $ */ #ifndef _EX_APPREC_H_ diff --git a/db/examples_c/ex_apprec/ex_apprec.src b/db/examples_c/ex_apprec/ex_apprec.src index 3880ec756..8027a8674 100644 --- a/db/examples_c/ex_apprec/ex_apprec.src +++ b/db/examples_c/ex_apprec/ex_apprec.src @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2002-2003 + * Copyright (c) 2002-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_apprec.src,v 1.4 2003/01/08 04:45:15 bostic Exp $ + * $Id: ex_apprec.src,v 1.5 2004/01/28 03:36:03 bostic Exp $ */ PREFIX ex_apprec diff --git a/db/examples_c/ex_apprec/ex_apprec_auto.c b/db/examples_c/ex_apprec/ex_apprec_auto.c index cc9fc84d1..e4ad1e2b9 100644 --- a/db/examples_c/ex_apprec/ex_apprec_auto.c +++ b/db/examples_c/ex_apprec/ex_apprec_auto.c @@ -1,4 +1,7 @@ /* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + #include #include #include @@ -21,7 +24,7 @@ ex_apprec_mkdir_log(dbenv, txnid, ret_lsnp, flags, const DBT *dirname; { DBT logrec; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, rectype, txn_num; u_int npad; u_int8_t *bp; @@ -29,13 +32,22 @@ ex_apprec_mkdir_log(dbenv, txnid, ret_lsnp, flags, rectype = DB_ex_apprec_mkdir; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -44,6 +56,8 @@ ex_apprec_mkdir_log(dbenv, txnid, ret_lsnp, flags, + sizeof(u_int32_t) + (dirname == NULL ? 0 : dirname->size); if ((logrec.data = malloc(logrec.size)) == NULL) return (ENOMEM); + bp = logrec.data; + if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -69,63 +83,22 @@ ex_apprec_mkdir_log(dbenv, txnid, ret_lsnp, flags, bp += dirname->size; } - ret = dbenv->log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; - + if ((ret = dbenv->log_put(dbenv, rlsnp, (DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)ex_apprec_mkdir_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif - free(logrec.data); + free(logrec.data); return (ret); } -/* - * PUBLIC: int ex_apprec_mkdir_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -ex_apprec_mkdir_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - ex_apprec_mkdir_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = ex_apprec_mkdir_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]ex_apprec_mkdir%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tdirname: "); - for (i = 0; i < argp->dirname.size; i++) { - ch = ((u_int8_t *)argp->dirname.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - free(argp); - - return (0); -} - /* * PUBLIC: int ex_apprec_mkdir_read __P((DB_ENV *, void *, * PUBLIC: ex_apprec_mkdir_args **)); @@ -143,9 +116,9 @@ ex_apprec_mkdir_read(dbenv, recbuf, argpp) dbenv = NULL; if ((argp = malloc(sizeof(ex_apprec_mkdir_args) + sizeof(DB_TXN))) == NULL) return (ENOMEM); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -165,25 +138,3 @@ ex_apprec_mkdir_read(dbenv, recbuf, argpp) return (0); } -/* - * PUBLIC: int ex_apprec_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -ex_apprec_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int __db_add_recovery __P((DB_ENV *, - int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), - size_t *, - int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t)); - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - ex_apprec_mkdir_print, DB_ex_apprec_mkdir)) != 0) - return (ret); - return (0); -} - diff --git a/db/examples_c/ex_apprec/ex_apprec_rec.c b/db/examples_c/ex_apprec/ex_apprec_rec.c index 065a928b2..faaa29424 100644 --- a/db/examples_c/ex_apprec/ex_apprec_rec.c +++ b/db/examples_c/ex_apprec/ex_apprec_rec.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_apprec_rec.c,v 1.3 2003/01/08 04:45:19 bostic Exp $ + * $Id: ex_apprec_rec.c,v 1.4 2004/01/28 03:36:03 bostic Exp $ */ /* diff --git a/db/examples_c/ex_btrec.c b/db/examples_c/ex_btrec.c index 641b6eb7c..432f6ce50 100644 --- a/db/examples_c/ex_btrec.c +++ b/db/examples_c/ex_btrec.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_btrec.c,v 11.19 2003/01/08 04:43:54 bostic Exp $ + * $Id: ex_btrec.c,v 11.22 2004/09/17 22:00:28 mjc Exp $ */ #include @@ -37,7 +37,7 @@ ex_btrec() DB_BTREE_STAT *statp; FILE *fp; db_recno_t recno; - u_int32_t len; + size_t len; int cnt, ret; char *p, *t, buf[1024], rbuf[1024]; const char *progname = "ex_btrec"; /* Program name. */ @@ -92,7 +92,7 @@ ex_btrec() key.data = buf; data.data = rbuf; - data.size = key.size = len - 1; + data.size = key.size = (u_int32_t)len - 1; if ((ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) != 0) { @@ -106,7 +106,7 @@ ex_btrec() (void)fclose(fp); /* Print out the number of records in the database. */ - if ((ret = dbp->stat(dbp, &statp, 0)) != 0) { + if ((ret = dbp->stat(dbp, NULL, &statp, 0)) != 0) { dbp->err(dbp, ret, "DB->stat"); goto err1; } diff --git a/db/examples_c/ex_dbclient.c b/db/examples_c/ex_dbclient.c index d92c1299e..171532d8d 100644 --- a/db/examples_c/ex_dbclient.c +++ b/db/examples_c/ex_dbclient.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_dbclient.c,v 1.30 2003/05/16 15:06:20 sue Exp $ + * $Id: ex_dbclient.c,v 1.32 2004/09/23 19:00:23 bostic Exp $ */ #include @@ -21,6 +21,7 @@ #define DATABASE "access.db" int db_clientrun __P((DB_ENV *, const char *)); +int ex_dbclient __P((const char *)); int ex_dbclient_run __P((const char *, FILE *, const char *, const char *)); int main __P((int, char *[])); diff --git a/db/examples_c/ex_env.c b/db/examples_c/ex_env.c index 969941b0d..8ca1768f4 100644 --- a/db/examples_c/ex_env.c +++ b/db/examples_c/ex_env.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_env.c,v 11.28 2003/01/08 04:43:57 bostic Exp $ + * $Id: ex_env.c,v 11.29 2004/01/28 03:36:03 bostic Exp $ */ #include diff --git a/db/examples_c/ex_lock.c b/db/examples_c/ex_lock.c index 1e4c70cac..165834d3a 100644 --- a/db/examples_c/ex_lock.c +++ b/db/examples_c/ex_lock.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_lock.c,v 11.19 2003/01/08 04:43:59 bostic Exp $ + * $Id: ex_lock.c,v 11.21 2004/09/17 22:00:28 mjc Exp $ */ #include @@ -40,7 +40,8 @@ main(argc, argv) DB_LOCK *locks; db_lockmode_t lock_type; long held; - u_int32_t len, locker, maxlocks; + size_t len; + u_int32_t locker, maxlocks; int ch, do_unlink, did_get, i, lockid, lockcount, ret; const char *home; char opbuf[16], objbuf[1024], lockbuf[16]; @@ -119,7 +120,7 @@ main(argc, argv) lock_type = DB_LOCK_WRITE; lock_dbt.data = objbuf; - lock_dbt.size = strlen(objbuf); + lock_dbt.size = (u_int32_t)strlen(objbuf); ret = dbenv->lock_get(dbenv, locker, DB_LOCK_NOWAIT, &lock_dbt, lock_type, &lock); if (ret == 0) { diff --git a/db/examples_c/ex_mpool.c b/db/examples_c/ex_mpool.c index 96bd93b15..f0be9ab96 100644 --- a/db/examples_c/ex_mpool.c +++ b/db/examples_c/ex_mpool.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_mpool.c,v 11.29 2003/09/04 18:06:47 bostic Exp $ + * $Id: ex_mpool.c,v 11.30 2004/01/28 03:36:03 bostic Exp $ */ #include diff --git a/db/examples_c/ex_repquote/ex_repquote.h b/db/examples_c/ex_repquote/ex_repquote.h index 1b87a481c..7a7fe338b 100644 --- a/db/examples_c/ex_repquote/ex_repquote.h +++ b/db/examples_c/ex_repquote/ex_repquote.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_repquote.h,v 1.34 2003/07/29 02:26:18 margo Exp $ + * $Id: ex_repquote.h,v 1.35 2004/01/28 03:36:03 bostic Exp $ */ #ifndef _EX_REPQUOTE_H_ diff --git a/db/examples_c/ex_repquote/ex_rq_client.c b/db/examples_c/ex_repquote/ex_rq_client.c index d14cd79a3..d26552f63 100644 --- a/db/examples_c/ex_repquote/ex_rq_client.c +++ b/db/examples_c/ex_repquote/ex_rq_client.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_rq_client.c,v 1.37 2003/09/05 00:05:34 bostic Exp $ + * $Id: ex_rq_client.c,v 1.39 2004/01/28 03:36:03 bostic Exp $ */ #include @@ -118,7 +118,7 @@ check_loop(args) } else { machtab_parm(machtab, &n, &pri, &timeout); if (dbenv->rep_elect(dbenv, - n, pri, timeout, &master_eid) == 0) + n, (n/2+1), pri, timeout, &master_eid, 0) == 0) break; count = 0; } diff --git a/db/examples_c/ex_repquote/ex_rq_main.c b/db/examples_c/ex_repquote/ex_rq_main.c index 954d6d12f..0fb98176d 100644 --- a/db/examples_c/ex_repquote/ex_rq_main.c +++ b/db/examples_c/ex_repquote/ex_rq_main.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_rq_main.c,v 1.32 2003/07/29 02:26:18 margo Exp $ + * $Id: ex_rq_main.c,v 1.34 2004/07/16 14:56:22 bostic Exp $ */ #include @@ -24,8 +24,8 @@ int master_eid; char *myaddr; unsigned short myport; -static int env_init __P((const char *, const char *, DB_ENV **, machtab_t *, - u_int32_t)); +static int env_init + __P((const char *, const char *, DB_ENV **, machtab_t *, u_int32_t)); static void usage __P((const char *)); int @@ -281,7 +281,7 @@ usage(progname) } /* Open and configure an environment. */ -int +static int env_init(progname, home, dbenvp, machtab, flags) const char *progname, *home; DB_ENV **dbenvp; diff --git a/db/examples_c/ex_repquote/ex_rq_master.c b/db/examples_c/ex_repquote/ex_rq_master.c index 27557f872..59e6abaf1 100644 --- a/db/examples_c/ex_repquote/ex_rq_master.c +++ b/db/examples_c/ex_repquote/ex_rq_master.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_rq_master.c,v 1.26 2003/07/04 17:45:06 margo Exp $ + * $Id: ex_rq_master.c,v 1.27 2004/01/28 03:36:03 bostic Exp $ */ #include diff --git a/db/examples_c/ex_repquote/ex_rq_net.c b/db/examples_c/ex_repquote/ex_rq_net.c index 02a1f164a..828aab582 100644 --- a/db/examples_c/ex_repquote/ex_rq_net.c +++ b/db/examples_c/ex_repquote/ex_rq_net.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_rq_net.c,v 1.46 2003/09/05 00:05:34 bostic Exp $ + * $Id: ex_rq_net.c,v 1.47 2004/01/28 03:36:03 bostic Exp $ */ #include diff --git a/db/examples_c/ex_repquote/ex_rq_util.c b/db/examples_c/ex_repquote/ex_rq_util.c index 3d492fda0..8f922d4dd 100644 --- a/db/examples_c/ex_repquote/ex_rq_util.c +++ b/db/examples_c/ex_repquote/ex_rq_util.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_rq_util.c,v 1.34 2003/07/14 21:30:24 mjc Exp $ + * $Id: ex_rq_util.c,v 1.38 2004/07/16 14:57:38 bostic Exp $ */ #include @@ -17,11 +17,10 @@ #include "ex_repquote.h" -static int connect_site __P((DB_ENV *, - machtab_t *, const char *, repsite_t *, int *, int *, - thread *)); -static void *elect_thread __P((void *)); -static void *hm_loop __P((void *)); +static int connect_site __P((DB_ENV *, machtab_t *, + const char *, repsite_t *, int *, int *, thread *)); +static void *elect_thread __P((void *)); +static void *hm_loop __P((void *)); typedef struct { DB_ENV *dbenv; @@ -105,7 +104,7 @@ hm_loop(args) master_eid = DB_EID_INVALID; machtab_parm(tab, &n, &pri, &timeout); if ((ret = dbenv->rep_elect(dbenv, - n, pri, timeout, &newm)) != 0) + n, (n/2+1), pri, timeout, &newm, 0)) != 0) continue; /* @@ -314,7 +313,7 @@ connect_all(args) hm_thr = NULL; success = NULL; - /* Some implementations of calloc are sad about alloc'ing 0 things. */ + /* Some implementations of calloc are sad about allocating 0 things. */ if ((success = calloc(nsites > 0 ? nsites : 1, sizeof(int))) == NULL) { dbenv->err(dbenv, errno, "connect_all"); ret = 1; @@ -364,7 +363,7 @@ err: if (success != NULL) return (ret ? (void *)EXIT_FAILURE : (void *)EXIT_SUCCESS); } -int +static int connect_site(dbenv, machtab, progname, site, is_open, eidp, hm_thrp) DB_ENV *dbenv; machtab_t *machtab; @@ -411,7 +410,7 @@ err: * We need to spawn off a new thread in which to hold an election in * case we are the only thread listening on for messages. */ -void * +static void * elect_thread(args) void *args; { @@ -427,8 +426,8 @@ elect_thread(args) free(eargs); machtab_parm(machtab, &n, &pri, &timeout); - while ((ret = - dbenv->rep_elect(dbenv, n, pri, timeout, &master_eid)) != 0) + while ((ret = dbenv->rep_elect(dbenv, n, (n/2+1), pri, timeout, + &master_eid, 0)) != 0) sleep(2); /* Check if it's us. */ diff --git a/db/examples_c/ex_sequence.c b/db/examples_c/ex_sequence.c new file mode 100644 index 000000000..678091a3f --- /dev/null +++ b/db/examples_c/ex_sequence.c @@ -0,0 +1,124 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ex_sequence.c,v 1.2 2004/09/23 15:38:02 mjc Exp $ + */ + +#include + +#include +#include +#include + +#ifdef _WIN32 +extern int getopt(int, char * const *, const char *); +#else +#include +#endif + +#include + +#define DATABASE "sequence.db" +#define SEQUENCE "my_sequence" +int main __P((int, char *[])); +int usage __P((void)); + +int +main(argc, argv) + int argc; + char *argv[]; +{ + extern int optind; + DB *dbp; + DB_SEQUENCE *seq; + DBT key; + int i, ret, rflag; + db_seq_t seqnum; + char ch; + const char *database, *progname = "ex_sequence"; + + rflag = 0; + while ((ch = getopt(argc, argv, "r")) != EOF) + switch (ch) { + case 'r': + rflag = 1; + break; + case '?': + default: + return (usage()); + } + argc -= optind; + argv += optind; + + /* Accept optional database name. */ + database = *argv == NULL ? DATABASE : argv[0]; + + /* Optionally discard the database. */ + if (rflag) + (void)remove(database); + + /* Create and initialize database object, open the database. */ + if ((ret = db_create(&dbp, NULL, 0)) != 0) { + fprintf(stderr, + "%s: db_create: %s\n", progname, db_strerror(ret)); + return (EXIT_FAILURE); + } + dbp->set_errfile(dbp, stderr); + dbp->set_errpfx(dbp, progname); + if ((ret = dbp->open(dbp, + NULL, database, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) { + dbp->err(dbp, ret, "%s: open", database); + goto err1; + } + + if ((ret = db_sequence_create(&seq, dbp, 0)) != 0) { + dbp->err(dbp, ret, "db_sequence_create"); + goto err1; + } + + memset(&key, 0, sizeof (DBT)); + key.data = SEQUENCE; + key.size = (u_int32_t)strlen(SEQUENCE); + + if ((ret = seq->open(seq, NULL, &key, DB_CREATE)) != 0) { + dbp->err(dbp, ret, "%s: DB_SEQUENCE->open", SEQUENCE); + goto err2; + } + + for (i = 0; i < 10; i++) { + if ((ret = seq->get(seq, NULL, 1, &seqnum, 0)) != 0) { + dbp->err(dbp, ret, "DB_SEQUENCE->get"); + goto err2; + } + + /* We don't have a portable way to print 64-bit numbers. */ + printf("Got sequence number (%x, %x)\n", + (int)(seqnum >> 32), (unsigned)seqnum); + } + + /* Close everything down. */ + if ((ret = seq->close(seq, 0)) != 0) { + dbp->err(dbp, ret, "DB_SEQUENCE->close"); + goto err1; + } + if ((ret = dbp->close(dbp, 0)) != 0) { + fprintf(stderr, + "%s: DB->close: %s\n", progname, db_strerror(ret)); + return (EXIT_FAILURE); + } + return (EXIT_SUCCESS); + +err2: (void)seq->close(seq, 0); +err1: (void)dbp->close(dbp, 0); + return (EXIT_FAILURE); +} + +int +usage() +{ + (void)fprintf(stderr, "usage: ex_sequence [-r] [database]\n"); + return (EXIT_FAILURE); +} diff --git a/db/examples_c/ex_thread.c b/db/examples_c/ex_thread.c index db9d301f1..2db144d1d 100644 --- a/db/examples_c/ex_thread.c +++ b/db/examples_c/ex_thread.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_thread.c,v 11.35 2003/01/08 04:44:00 bostic Exp $ + * $Id: ex_thread.c,v 11.36 2004/01/28 03:36:03 bostic Exp $ */ #include diff --git a/db/examples_c/ex_tpcb.c b/db/examples_c/ex_tpcb.c index 342834004..f0b8d7864 100644 --- a/db/examples_c/ex_tpcb.c +++ b/db/examples_c/ex_tpcb.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_tpcb.c,v 11.44 2003/04/24 15:44:15 bostic Exp $ + * $Id: ex_tpcb.c,v 11.45 2004/01/28 03:36:03 bostic Exp $ */ #include diff --git a/db/examples_c/ex_tpcb.h b/db/examples_c/ex_tpcb.h index 2a85c8c06..c4868ae60 100644 --- a/db/examples_c/ex_tpcb.h +++ b/db/examples_c/ex_tpcb.h @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: ex_tpcb.h,v 11.7 2003/01/08 04:44:03 bostic Exp $ + * $Id: ex_tpcb.h,v 11.8 2004/01/28 03:36:03 bostic Exp $ */ #ifndef _TPCB_H_ diff --git a/db/examples_c/getting_started/example_database_load.c b/db/examples_c/getting_started/example_database_load.c new file mode 100644 index 000000000..b7514f353 --- /dev/null +++ b/db/examples_c/getting_started/example_database_load.c @@ -0,0 +1,272 @@ +/* File: example_database_load.c */ + +/* We assume an ANSI-compatible compiler */ + +#include "gettingstarted_common.h" + +/* Forward declarations */ +int usage(void); +int load_vendors_database(STOCK_DBS, char *); +int pack_string(char *, char *, int); +int load_inventory_database(STOCK_DBS, char *); + +int +usage() +{ + fprintf(stderr, "example_database_load [-b ]"); + fprintf(stderr, " [-h ]\n"); + + fprintf(stderr, "\tNote: Any path specified must end with your"); + fprintf(stderr, " system's path delimiter (/ or \\)\n"); + return (-1); +} + +/* + * Loads the contents of vendors.txt and inventory.txt into + * Berkeley DB databases. Also causes the itemname secondary + * database to be created and loaded. + */ +int +main(int argc, char *argv[]) +{ + STOCK_DBS my_stock; + int ch, ret, size; + char *basename, *inventory_file, *vendor_file; + extern char *optarg; + + /* Initialize the STOCK_DBS struct */ + initialize_stockdbs(&my_stock); + + /* Initialize the base path. */ + basename = "./"; + + /* Parse the command line arguments */ + while ((ch = getopt(argc, argv, "b:h:")) != EOF) + switch (ch) { + case 'h': + if (optarg[strlen(optarg)-1] != '/' && + optarg[strlen(optarg)-1] != '\\') + return (usage()); + my_stock.db_home_dir = optarg; + break; + case 'b': + if (basename[strlen(basename)-1] != '/' && + basename[strlen(basename)-1] != '\\') + return (usage()); + basename = optarg; + break; + case '?': + default: + return (usage()); + } + + /* Identify the files that will hold our databases */ + set_db_filenames(&my_stock); + + /* Find our input files */ + size = strlen(basename) + strlen(INVENTORY_FILE) + 1; + inventory_file = malloc(size); + snprintf(inventory_file, size, "%s%s", basename, INVENTORY_FILE); + + size = strlen(basename) + strlen(VENDORS_FILE) + 1; + vendor_file = malloc(size); + snprintf(vendor_file, size, "%s%s", basename, VENDORS_FILE); + + /* Open all databases */ + ret = databases_setup(&my_stock, "example_database_load", stderr); + if (ret) { + fprintf(stderr, "Error opening databases\n"); + databases_close(&my_stock); + return (ret); + } + + ret = load_vendors_database(my_stock, vendor_file); + if (ret) { + fprintf(stderr, "Error loading vendors database.\n"); + databases_close(&my_stock); + return (ret); + } + ret = load_inventory_database(my_stock, inventory_file); + if (ret) { + fprintf(stderr, "Error loading inventory database.\n"); + databases_close(&my_stock); + return (ret); + } + + /* close our environment and databases */ + databases_close(&my_stock); + + printf("Done loading databases.\n"); + return (ret); +} + +/* + * Loads the contents of the vendors.txt file into + * a database. + */ +int +load_vendors_database(STOCK_DBS my_stock, char *vendor_file) +{ + DBT key, data; + char buf[MAXLINE]; + FILE *ifp; + VENDOR my_vendor; + + /* Load the vendors database */ + ifp = fopen(vendor_file, "r"); + if (ifp == NULL) { + fprintf(stderr, "Error opening file '%s'\n", vendor_file); + return (-1); + } + + while (fgets(buf, MAXLINE, ifp) != NULL) { + /* zero out the structure */ + memset(&my_vendor, 0, sizeof(VENDOR)); + /* Zero out the DBTs */ + memset(&key, 0, sizeof(DBT)); + memset(&data, 0, sizeof(DBT)); + + /* + * Scan the line into the structure. + * Convenient, but not particularly safe. + * In a real program, there would be a lot more + * defensive code here. + */ + sscanf(buf, + "%20[^#]#%20[^#]#%20[^#]#%3[^#]#%6[^#]#%13[^#]#%20[^#]#%20[^\n]", + my_vendor.name, my_vendor.street, + my_vendor.city, my_vendor.state, + my_vendor.zipcode, my_vendor.phone_number, + my_vendor.sales_rep, my_vendor.sales_rep_phone); + + /* Now that we have our structure we can load it into the database. */ + + /* Set up the database record's key */ + key.data = my_vendor.name; + key.size = (strlen(my_vendor.name) + 1) * sizeof(char); + + /* Set up the database record's data */ + data.data = &my_vendor; + data.size = sizeof(VENDOR); + + /* + * Note that given the way we built our struct, there's extra + * bytes in it. Essentially we're using fixed-width fields with + * the unused portion of some fields padded with zeros. This + * is the easiest thing to do, but it does result in a bloated + * database. Look at load_inventory_data() for an example of how + * to avoid this. + */ + + /* Put the data into the database */ + my_stock.vendor_dbp->put(my_stock.vendor_dbp, 0, &key, &data, 0); + } /* end vendors database while loop */ + + fclose(ifp); + return (0); +} + +/* + * Simple little convenience function that takes a buffer, a string, + * and an offset and copies that string into the buffer at the + * appropriate location. Used to ensure that all our strings + * are contained in a single contiguous chunk of memory. + */ +int +pack_string(char *buffer, char *string, int start_pos) +{ + int string_size; + + string_size = strlen(string) + 1; + memcpy(buffer+start_pos, string, string_size); + + return (start_pos + string_size); +} + +/* + * Loads the contents of the inventory.txt file into + * a database. Note that because the itemname + * secondary database is associated to the inventorydb + * (see env_setup() in gettingstarted_common.c), the + * itemname index is automatically created when this + * database is loaded. + */ +int +load_inventory_database(STOCK_DBS my_stock, char *inventory_file) +{ + DBT key, data; + char buf[MAXLINE]; + char databuf[MAXDATABUF]; + int bufLen, dataLen; + FILE *ifp; + + /* + * Rather than lining everything up nicely in a struct, we're being + * deliberately a bit sloppy here. This function illustrates how to + * store mixed data that might be obtained from various locations + * in your application. + */ + float price; + int quantity; + char category[MAXFIELD], name[MAXFIELD]; + char vendor[MAXFIELD], sku[MAXFIELD]; + + /* Load the inventory database */ + ifp = fopen(inventory_file, "r"); + if (ifp == NULL) { + fprintf(stderr, "Error opening file '%s'\n", inventory_file); + return (-1); + } + + while (fgets(buf, MAXLINE, ifp) != NULL) { + /* + * Scan the line into the appropriate buffers and variables. + * Convenient, but not particularly safe. In a real + * program, there would be a lot more defensive code here. + */ + sscanf(buf, + "%20[^#]#%20[^#]#%f#%i#%20[^#]#%20[^\n]", + name, sku, &price, &quantity, category, vendor); + + /* + * Now pack it into a single contiguous memory location for + * storage. + */ + memset(databuf, 0, MAXDATABUF); + bufLen = 0; + dataLen = 0; + + dataLen = sizeof(float); + memcpy(databuf, &price, dataLen); + bufLen += dataLen; + + dataLen = sizeof(int); + memcpy(databuf + bufLen, &quantity, dataLen); + bufLen += dataLen; + + bufLen = pack_string(databuf, name, bufLen); + bufLen = pack_string(databuf, sku, bufLen); + bufLen = pack_string(databuf, category, bufLen); + bufLen = pack_string(databuf, vendor, bufLen); + + /* Zero out the DBTs */ + memset(&key, 0, sizeof(DBT)); + memset(&data, 0, sizeof(DBT)); + + /* The key is the item's SKU */ + key.data = sku; + key.size = strlen(sku) + 1; + + /* The data is the information that we packed into databuf. */ + data.data = databuf; + data.size = bufLen; + + /* Put the data into the database */ + my_stock.vendor_dbp->put(my_stock.inventory_dbp, 0, &key, &data, 0); + } /* end vendors database while loop */ + + /* Cleanup */ + fclose(ifp); + + return (0); +} diff --git a/db/examples_c/getting_started/example_database_read.c b/db/examples_c/getting_started/example_database_read.c new file mode 100644 index 000000000..6ba42340b --- /dev/null +++ b/db/examples_c/getting_started/example_database_read.c @@ -0,0 +1,272 @@ +/* File: example_database_read.c */ + +/* We assume an ANSI-compatible compiler */ + +#include "gettingstarted_common.h" + +/* Forward declarations */ +int usage(void); +char *show_inventory_item(void *); +int show_all_records(STOCK_DBS *); +int show_records(STOCK_DBS *, char *); +int show_vendor_record(char *, DB *); + +int +usage() +{ + fprintf(stderr, "example_database_read [-i ]"); + fprintf(stderr, " [-h ]\n"); + + fprintf(stderr, + "\tNote: Any path specified to the -h parameter must end\n"); + fprintf(stderr, " with your system's path delimiter (/ or \\)\n"); + return (-1); +} + +/* + * Searches for a inventory item based on that item's name. The search is + * performed using the item name secondary database. Displays all + * inventory items that use the specified name, as well as the vendor + * associated with that inventory item. + * + * If no item name is provided, then all inventory items are displayed. + */ +int +main(int argc, char *argv[]) +{ + STOCK_DBS my_stock; + int ch, ret; + char *itemname; + + /* Initialize the STOCK_DBS struct */ + initialize_stockdbs(&my_stock); + + /* Parse the command line arguments */ + itemname = NULL; + while ((ch = getopt(argc, argv, "h:i:?")) != EOF) + switch (ch) { + case 'h': + if (optarg[strlen(optarg)-1] != '/' && + optarg[strlen(optarg)-1] != '\\') + return (usage()); + my_stock.db_home_dir = optarg; + break; + case 'i': + itemname = optarg; + break; + case '?': + default: + return (usage()); + } + + /* Identify the files that hold our databases */ + set_db_filenames(&my_stock); + + /* Open all databases */ + ret = databases_setup(&my_stock, "example_database_read", stderr); + if (ret != 0) { + fprintf(stderr, "Error opening databases\n"); + databases_close(&my_stock); + return (ret); + } + + if (itemname == NULL) + ret = show_all_records(&my_stock); + else + ret = show_records(&my_stock, itemname); + + /* close our databases */ + databases_close(&my_stock); + return (ret); +} + +int show_all_records(STOCK_DBS *my_stock) +{ + DBC *inventory_cursorp; + DBT key, data; + char *the_vendor; + int exit_value, ret; + + /* Initialize our DBTs. */ + memset(&key, 0, sizeof(DBT)); + memset(&data, 0, sizeof(DBT)); + + /* Get a cursor to the inventory db */ + my_stock->inventory_dbp->cursor(my_stock->inventory_dbp, NULL, + &inventory_cursorp, 0); + + /* + * Iterate over the inventory database, from the first record + * to the last, displaying each in turn. + */ + exit_value = 0; + while ((ret = + inventory_cursorp->c_get(inventory_cursorp, &key, &data, DB_NEXT)) == 0) + { + the_vendor = show_inventory_item(data.data); + ret = show_vendor_record(the_vendor, my_stock->vendor_dbp); + if (ret) { + exit_value = ret; + break; + } + } + + /* Close the cursor */ + inventory_cursorp->c_close(inventory_cursorp); + return (exit_value); +} + +/* + * Search for an inventory item given its name (using the inventory item + * secondary database) and display that record and any duplicates that may + * exist. + */ +int +show_records(STOCK_DBS *my_stock, char *itemname) +{ + DBC *itemname_cursorp; + DBT key, data; + char *the_vendor; + int ret, exit_value; + + /* Initialize our DBTs. */ + memset(&key, 0, sizeof(DBT)); + memset(&data, 0, sizeof(DBT)); + + /* Get a cursor to the itemname db */ + my_stock->itemname_sdbp->cursor(my_stock->itemname_sdbp, NULL, + &itemname_cursorp, 0); + + /* + * Get the search key. This is the name on the inventory + * record that we want to examine. + */ + key.data = itemname; + key.size = (strlen(itemname) + 1) * sizeof(char); + + /* + * Position our cursor to the first record in the secondary + * database that has the appropriate key. + */ + exit_value = 0; + ret = itemname_cursorp->c_get(itemname_cursorp, &key, &data, DB_SET); + if (!ret) { + do { + /* + * Show the inventory record and the vendor responsible + * for this inventory item. + */ + the_vendor = show_inventory_item(data.data); + ret = show_vendor_record(the_vendor, my_stock->vendor_dbp); + if (ret) { + exit_value = ret; + break; + } + /* + * Our secondary allows duplicates, so we need to loop over + * the next duplicate records and show them all. This is done + * because an inventory item's name is not a unique value. + */ + } while (itemname_cursorp->c_get(itemname_cursorp, &key, &data, + DB_NEXT_DUP) == 0); + } else { + printf("No records found for '%s'\n", itemname); + } + + /* Close the cursor */ + itemname_cursorp->c_close(itemname_cursorp); + + return (exit_value); +} + +/* + * Shows an inventory item. How we retrieve the inventory + * item values from the provided buffer is strictly dependent + * on the order that those items were originally stored in the + * DBT. See load_inventory_database in example_database_load + * for how this was done. + */ +char * +show_inventory_item(void *vBuf) +{ + float price; + int buf_pos, quantity; + char *category, *name, *sku, *vendor_name; + char *buf = (char *)vBuf; + + price = *((float *)buf); + buf_pos = sizeof(float); + + quantity = *((int *)(buf + buf_pos)); + buf_pos += sizeof(int); + + name = buf + buf_pos; + buf_pos += strlen(name) + 1; + + sku = buf + buf_pos; + buf_pos += strlen(sku) + 1; + + category = buf + buf_pos; + buf_pos += strlen(category) + 1; + + vendor_name = buf + buf_pos; + + printf("name: %s\n", name); + printf("\tSKU: %s\n", sku); + printf("\tCategory: %s\n", category); + printf("\tPrice: %.2f\n", price); + printf("\tQuantity: %i\n", quantity); + printf("\tVendor:\n"); + + return (vendor_name); +} + +/* + * Shows a vendor record. Each vendor record is an instance of + * a vendor structure. See load_vendor_database() in + * example_database_load for how this structure was originally + * put into the database. + */ +int +show_vendor_record(char *vendor_name, DB *vendor_dbp) +{ + DBT key, data; + VENDOR my_vendor; + int ret; + + /* Zero our DBTs */ + memset(&key, 0, sizeof(DBT)); + memset(&data, 0, sizeof(DBT)); + + /* Set the search key to the vendor's name */ + key.data = vendor_name; + key.size = strlen(vendor_name) + 1; + + /* + * Make sure we use the memory we set aside for the VENDOR + * structure rather than the memory that DB allocates. + * Some systems may require structures to be aligned in memory + * in a specific way, and DB may not get it right. + */ + + data.data = &my_vendor; + data.ulen = sizeof(VENDOR); + data.flags = DB_DBT_USERMEM; + + /* Get the record */ + ret = vendor_dbp->get(vendor_dbp, NULL, &key, &data, 0); + if (ret != 0) { + vendor_dbp->err(vendor_dbp, ret, "Error searching for vendor: '%s'", + vendor_name); + return (ret); + } else { + printf("\t\t%s\n", my_vendor.name); + printf("\t\t%s\n", my_vendor.street); + printf("\t\t%s, %s\n", my_vendor.city, my_vendor.state); + printf("\t\t%s\n\n", my_vendor.zipcode); + printf("\t\t%s\n\n", my_vendor.phone_number); + printf("\t\tContact: %s\n", my_vendor.sales_rep); + printf("\t\t%s\n", my_vendor.sales_rep_phone); + } + return (0); +} diff --git a/db/examples_c/getting_started/gettingstarted_common.c b/db/examples_c/getting_started/gettingstarted_common.c new file mode 100644 index 000000000..26704252b --- /dev/null +++ b/db/examples_c/getting_started/gettingstarted_common.c @@ -0,0 +1,235 @@ +/* File: gettingstarted_common.c */ + +#include "gettingstarted_common.h" + +int get_item_name(DB *, const DBT *, const DBT *, DBT *); + +/* + * Used to extract an inventory item's name from an + * inventory database record. This function is used to create + * keys for secondary database records. + */ +int +get_item_name(DB *dbp, const DBT *pkey, const DBT *pdata, DBT *skey) +{ + u_int offset; + + dbp = NULL; /* Not needed, unused. */ + pkey = NULL; + + /* + * First, obtain the buffer location where we placed the + * item's name. In this example, the item's name is located + * in the primary data. It is the first string in the + * buffer after the price (a float) and the quantity (an int). + * + * See load_inventory_database() in example_database_load.c + * for how we packed the inventory information into the + * data DBT. + */ + offset = sizeof(float) + sizeof(int); + + /* Check to make sure there's data */ + if (pdata->size < offset) + return (-1); /* Returning non-zero means that the + * secondary record is not created/updated. + */ + + /* Now set the secondary key's data to be the item name */ + memset(skey, 0, sizeof(DBT)); + skey->data = (u_int8_t *)pdata->data + offset; + skey->size = strlen(skey->data) + 1; + + return (0); +} + +/* Opens a database */ +int +open_database(DB **dbpp, const char *file_name, + const char *program_name, FILE *error_file_pointer, + int is_secondary) +{ + DB *dbp; /* For convenience */ + u_int32_t open_flags; + int ret; + + /* Initialize the DB handle */ + ret = db_create(&dbp, NULL, 0); + if (ret != 0) { + fprintf(error_file_pointer, "%s: %s\n", program_name, + db_strerror(ret)); + return (ret); + } + /* Point to the memory malloc'd by db_create() */ + *dbpp = dbp; + + /* Set up error handling for this database */ + dbp->set_errfile(dbp, error_file_pointer); + dbp->set_errpfx(dbp, program_name); + + /* + * If this is a secondary database, then we want to allow + * sorted duplicates. + */ + if (is_secondary) { + ret = dbp->set_flags(dbp, DB_DUPSORT); + if (ret != 0) { + dbp->err(dbp, ret, "Attempt to set DUPSORT flags failed.", + file_name); + return (ret); + } + } + + /* Set the open flags */ + open_flags = DB_CREATE; /* Allow database creation */ + + /* Now open the database */ + ret = dbp->open(dbp, /* Pointer to the database */ + NULL, /* Txn pointer */ + file_name, /* File name */ + NULL, /* Logical db name */ + DB_BTREE, /* Database type (using btree) */ + open_flags, /* Open flags */ + 0); /* File mode. Using defaults */ + if (ret != 0) { + dbp->err(dbp, ret, "Database '%s' open failed.", file_name); + return (ret); + } + + return (0); +} + +/* opens all databases */ +int +databases_setup(STOCK_DBS *my_stock, const char *program_name, + FILE *error_file_pointer) +{ + int ret; + + /* Open the vendor database */ + ret = open_database(&(my_stock->vendor_dbp), + my_stock->vendor_db_name, + program_name, error_file_pointer, + PRIMARY_DB); + if (ret != 0) + /* + * Error reporting is handled in open_database() so just return + * the return code. + */ + return (ret); + + /* Open the inventory database */ + ret = open_database(&(my_stock->inventory_dbp), + my_stock->inventory_db_name, + program_name, error_file_pointer, + PRIMARY_DB); + if (ret != 0) + /* + * Error reporting is handled in open_database() so just return + * the return code. + */ + return (ret); + + /* + * Open the itemname secondary database. This is used to + * index the product names found in the inventory + * database. + */ + ret = open_database(&(my_stock->itemname_sdbp), + my_stock->itemname_db_name, + program_name, error_file_pointer, + SECONDARY_DB); + if (ret != 0) + /* + * Error reporting is handled in open_database() so just return + * the return code. + */ + return (0); + + /* + * Associate the itemname db with its primary db + * (inventory db). + */ + my_stock->inventory_dbp->associate( + my_stock->inventory_dbp, /* Primary db */ + NULL, /* txn id */ + my_stock->itemname_sdbp, /* Secondary db */ + get_item_name, /* Secondary key creator */ + 0); /* Flags */ + + printf("databases opened successfully\n"); + return (0); +} + +/* Initializes the STOCK_DBS struct.*/ +void +initialize_stockdbs(STOCK_DBS *my_stock) +{ + my_stock->db_home_dir = DEFAULT_HOMEDIR; + my_stock->inventory_dbp = NULL; + my_stock->vendor_dbp = NULL; + my_stock->itemname_sdbp = NULL; + my_stock->vendor_db_name = NULL; + my_stock->inventory_db_name = NULL; + my_stock->itemname_db_name = NULL; +} + +/* Identify all the files that will hold our databases. */ +void +set_db_filenames(STOCK_DBS *my_stock) +{ + size_t size; + + /* Create the Inventory DB file name */ + size = strlen(my_stock->db_home_dir) + strlen(INVENTORYDB) + 1; + my_stock->inventory_db_name = malloc(size); + snprintf(my_stock->inventory_db_name, size, "%s%s", + my_stock->db_home_dir, INVENTORYDB); + + /* Create the Vendor DB file name */ + size = strlen(my_stock->db_home_dir) + strlen(VENDORDB) + 1; + my_stock->vendor_db_name = malloc(size); + snprintf(my_stock->vendor_db_name, size, "%s%s", + my_stock->db_home_dir, VENDORDB); + + /* Create the itemname DB file name */ + size = strlen(my_stock->db_home_dir) + strlen(ITEMNAMEDB) + 1; + my_stock->itemname_db_name = malloc(size); + snprintf(my_stock->itemname_db_name, size, "%s%s", + my_stock->db_home_dir, ITEMNAMEDB); + +} + +/* Closes all the databases and secondary databases. */ +int +databases_close(STOCK_DBS *my_stock) +{ + int ret; + /* + * Note that closing a database automatically flushes its cached data + * to disk, so no sync is required here. + */ + if (my_stock->itemname_sdbp != NULL) { + ret = my_stock->itemname_sdbp->close(my_stock->itemname_sdbp, 0); + if (ret != 0) + fprintf(stderr, "Itemname database close failed: %s\n", + db_strerror(ret)); + } + + if (my_stock->inventory_dbp != NULL) { + ret = my_stock->inventory_dbp->close(my_stock->inventory_dbp, 0); + if (ret != 0) + fprintf(stderr, "Inventory database close failed: %s\n", + db_strerror(ret)); + } + + if (my_stock->vendor_dbp != NULL) { + ret = my_stock->vendor_dbp->close(my_stock->vendor_dbp, 0); + if (ret != 0) + fprintf(stderr, "Vendor database close failed: %s\n", + db_strerror(ret)); + } + + printf("databases closed.\n"); + return (0); +} diff --git a/db/examples_c/getting_started/gettingstarted_common.h b/db/examples_c/getting_started/gettingstarted_common.h new file mode 100644 index 000000000..258e776e2 --- /dev/null +++ b/db/examples_c/getting_started/gettingstarted_common.h @@ -0,0 +1,61 @@ +/*************************************************************************** + gettingstarted_common.h - description + ------------------- + begin : Sun Feb 22 2004 + copyright : (C) 2004 Sleepycat Software + email : support@sleepycat.com + ***************************************************************************/ +#include +#include +#include +#include + +#ifdef _WIN32 +extern int getopt(int, char * const *, const char *); +#else +#include +#endif + + +#define DEFAULT_HOMEDIR "./" +#define INVENTORY_FILE "inventory.txt" +#define VENDORS_FILE "vendors.txt" +#define INVENTORYDB "inventoryDB.db" +#define ITEMNAMEDB "itemnameDB.db" +#define MAXDATABUF 1024 +#define MAXFIELD 20 +#define MAXLINE 150 +#define PRIMARY_DB 0 +#define SECONDARY_DB 1 +#define VENDORDB "vendorDB.db" + +typedef struct stock_dbs { + DB *inventory_dbp; /* Database containing inventory information */ + DB *vendor_dbp; /* Database containing vendor information */ + DB *itemname_sdbp; /* Index based on the item name index */ + + char *db_home_dir; /* Directory containing the database files */ + char *itemname_db_name; /* Itemname secondary database */ + char *inventory_db_name; /* Name of the inventory database */ + char *vendor_db_name; /* Name of the vendor database */ +} STOCK_DBS; + +typedef struct vendor { + char name[MAXFIELD]; /* Vendor name */ + char street[MAXFIELD]; /* Street name and number */ + char city[MAXFIELD]; /* City */ + char state[3]; /* Two-digit US state code */ + char zipcode[6]; /* US zipcode */ + char phone_number[13]; /* Vendor phone number */ + char sales_rep[MAXFIELD]; /* Name of sales representative */ + char sales_rep_phone[MAXFIELD]; /* Sales rep's phone number */ +} VENDOR; + +/* Function prototypes */ +int databases_setup(STOCK_DBS *, const char *, FILE *); +int databases_close(STOCK_DBS *); +void initialize_stockdbs(STOCK_DBS *); +int open_database(DB **, const char *, const char *, + FILE *, int); +void set_db_filenames(STOCK_DBS *my_stock); + diff --git a/db/examples_c/getting_started/inventory.txt b/db/examples_c/getting_started/inventory.txt new file mode 100644 index 000000000..d6b68762c --- /dev/null +++ b/db/examples_c/getting_started/inventory.txt @@ -0,0 +1,800 @@ +Oranges#OranfruiRu6Ghr#0.71#451#fruits#TriCounty Produce +Oranges#OranfruiXRPFn1#0.73#263#fruits#Simply Fresh +Oranges#OranfruiLEuzQj#0.69#261#fruits#Off the Vine +Apples#ApplfruiZls4Du#1.20#472#fruits#TriCounty Produce +Apples#Applfrui8fewZe#1.21#402#fruits#Simply Fresh +Apples#ApplfruiXoT6xG#1.20#728#fruits#Off the Vine +Bananas#BanafruipIlluX#0.50#207#fruits#TriCounty Produce +Bananas#BanafruiEQhWuj#0.50#518#fruits#Simply Fresh +Bananas#BanafruimpRgPO#0.50#741#fruits#Off the Vine +Almonds#AlmofruiPPCLz8#0.55#600#fruits#TriCounty Produce +Almonds#AlmofruidMyKmp#0.54#745#fruits#Simply Fresh +Almonds#Almofrui7K0xzH#0.53#405#fruits#Off the Vine +Allspice#AllsfruibJGK4R#0.94#669#fruits#TriCounty Produce +Allspice#Allsfruilfvoeg#0.94#244#fruits#Simply Fresh +Allspice#Allsfruio12BOS#0.95#739#fruits#Off the Vine +Apricot#AprifruijphEpM#0.89#560#fruits#TriCounty Produce +Apricot#AprifruiU1zIDn#0.91#980#fruits#Simply Fresh +Apricot#AprifruichcwYS#0.95#668#fruits#Off the Vine +Avocado#AvocfruiwYYomu#0.99#379#fruits#TriCounty Produce +Avocado#AvocfruiT6IwWE#1.02#711#fruits#Simply Fresh +Avocado#AvocfruisbK1h5#0.97#856#fruits#Off the Vine +Bael Fruit#BaelfruilAU7Hj#0.41#833#fruits#TriCounty Produce +Bael Fruit#BaelfruiX2KvqV#0.40#770#fruits#Simply Fresh +Bael Fruit#Baelfruidjne4e#0.39#778#fruits#Off the Vine +Betel Nut#BetefruiQYdHqQ#0.34#926#fruits#TriCounty Produce +Betel Nut#Betefrui32BKAz#0.37#523#fruits#Simply Fresh +Betel Nut#BetefruisaWzY4#0.34#510#fruits#Off the Vine +Black Walnut#BlacfruiXxIuMU#0.57#923#fruits#TriCounty Produce +Black Walnut#BlacfruiZXgY9t#0.59#312#fruits#Simply Fresh +Black Walnut#BlacfruikWO0vz#0.60#877#fruits#Off the Vine +Blueberry#BluefruiCbxb4t#1.02#276#fruits#TriCounty Produce +Blueberry#BluefruiBuCfgO#1.03#522#fruits#Simply Fresh +Blueberry#Bluefruixz8MkE#1.01#278#fruits#Off the Vine +Boysenberry#BoysfruizxyMuz#1.05#239#fruits#TriCounty Produce +Boysenberry#Boysfrui3hTRQu#1.09#628#fruits#Simply Fresh +Boysenberry#BoysfruinpLvr3#1.02#349#fruits#Off the Vine +Breadnut#Breafrui0kDPs6#0.31#558#fruits#TriCounty Produce +Breadnut#Breafrui44s3og#0.32#879#fruits#Simply Fresh +Breadnut#BreafruiwyLKhJ#0.30#407#fruits#Off the Vine +Cactus#Cactfruiyo2ddH#0.56#601#fruits#TriCounty Produce +Cactus#CactfruixTOLv5#0.54#477#fruits#Simply Fresh +Cactus#Cactfrui4ioUav#0.55#896#fruits#Off the Vine +California Wild Grape#CalifruiZsWAa6#0.78#693#fruits#TriCounty Produce +California Wild Grape#Califruid84xyt#0.83#293#fruits#Simply Fresh +California Wild Grape#CalifruiLSJFoJ#0.81#543#fruits#Off the Vine +Cashew#CashfruihaOFVP#0.37#221#fruits#TriCounty Produce +Cashew#Cashfruizzcw1E#0.38#825#fruits#Simply Fresh +Cashew#CashfruiqtMe2Q#0.38#515#fruits#Off the Vine +Chico Sapote#ChicfruiY534SX#0.47#216#fruits#TriCounty Produce +Chico Sapote#ChicfruiSqL3Lc#0.45#476#fruits#Simply Fresh +Chico Sapote#ChicfruiurzIp4#0.47#200#fruits#Off the Vine +Chinese Jello#ChinfruiyRg75u#0.64#772#fruits#TriCounty Produce +Chinese Jello#ChinfruiuIUj0X#0.65#624#fruits#Simply Fresh +Chinese Jello#ChinfruiwXbRrL#0.67#719#fruits#Off the Vine +Common Guava#Commfruib6znSI#0.80#483#fruits#TriCounty Produce +Common Guava#Commfrui6eUivL#0.81#688#fruits#Simply Fresh +Common Guava#CommfruibWKnz3#0.84#581#fruits#Off the Vine +Crabapple#CrabfruioY2L63#0.94#582#fruits#TriCounty Produce +Crabapple#Crabfruijxcxyt#0.94#278#fruits#Simply Fresh +Crabapple#CrabfruibvWd8K#0.95#213#fruits#Off the Vine +Cranberry#CranfruiJxmKr5#0.83#923#fruits#TriCounty Produce +Cranberry#CranfruiPlklAF#0.84#434#fruits#Simply Fresh +Cranberry#Cranfrui3G5XL9#0.84#880#fruits#Off the Vine +Damson Plum#DamsfruibMRMwe#0.98#782#fruits#TriCounty Produce +Damson Plum#DamsfruiV6wFLk#1.03#400#fruits#Simply Fresh +Damson Plum#DamsfruiLhqFrQ#0.98#489#fruits#Off the Vine +Date Palm#DatefruigS31GU#1.14#315#fruits#TriCounty Produce +Date Palm#DatefruipKPaJK#1.09#588#fruits#Simply Fresh +Date Palm#Datefrui5fTyNS#1.14#539#fruits#Off the Vine +Dragon's Eye#DragfruirGJ3aI#0.28#315#fruits#TriCounty Produce +Dragon's Eye#DragfruiBotxqt#0.27#705#fruits#Simply Fresh +Dragon's Eye#DragfruiPsSnV9#0.29#482#fruits#Off the Vine +East Indian Wine Palm#EastfruiNXFJuG#0.43#992#fruits#TriCounty Produce +East Indian Wine Palm#Eastfruiq06fRr#0.40#990#fruits#Simply Fresh +East Indian Wine Palm#Eastfrui4QUwl2#0.43#351#fruits#Off the Vine +English Walnut#EnglfruiBMtHtW#1.04#787#fruits#TriCounty Produce +English Walnut#EnglfruiHmVzxV#1.03#779#fruits#Simply Fresh +English Walnut#Englfrui18Tc9n#1.06#339#fruits#Off the Vine +False Mangosteen#FalsfruibkmYqH#0.66#971#fruits#TriCounty Produce +False Mangosteen#FalsfruipBsbcX#0.68#250#fruits#Simply Fresh +False Mangosteen#FalsfruiPrFfhe#0.70#386#fruits#Off the Vine +Fried Egg Tree#FriefruiihHUdc#0.29#649#fruits#TriCounty Produce +Fried Egg Tree#FriefruimdD1rf#0.28#527#fruits#Simply Fresh +Fried Egg Tree#FriefruivyAzYq#0.29#332#fruits#Off the Vine +Genipap#GenifruiDtKusQ#0.62#986#fruits#TriCounty Produce +Genipap#GenifruiXq32eP#0.61#326#fruits#Simply Fresh +Genipap#Genifruiphwwyq#0.61#794#fruits#Off the Vine +Ginger#GingfruiQLbRZI#0.28#841#fruits#TriCounty Produce +Ginger#GingfruiS8kK4p#0.29#432#fruits#Simply Fresh +Ginger#GingfruioL3Y4S#0.27#928#fruits#Off the Vine +Grapefruit#Grapfruih86Zxh#1.07#473#fruits#TriCounty Produce +Grapefruit#GrapfruiwL1v0N#1.08#878#fruits#Simply Fresh +Grapefruit#GrapfruihmJzWm#1.02#466#fruits#Off the Vine +Hackberry#HackfruiQjomN7#0.22#938#fruits#TriCounty Produce +Hackberry#HackfruiWS0eKp#0.20#780#fruits#Simply Fresh +Hackberry#Hackfrui0MIv6J#0.21#345#fruits#Off the Vine +Honey Locust#HonefruiebXGRc#1.08#298#fruits#TriCounty Produce +Honey Locust#HonefruiPSqILB#1.00#427#fruits#Simply Fresh +Honey Locust#Honefrui6UXtvW#1.03#422#fruits#Off the Vine +Japanese Plum#JapafruihTmoYR#0.40#658#fruits#TriCounty Produce +Japanese Plum#JapafruifGqz0l#0.40#700#fruits#Simply Fresh +Japanese Plum#JapafruiufWkLx#0.39#790#fruits#Off the Vine +Jojoba#JojofruisE0wTh#0.97#553#fruits#TriCounty Produce +Jojoba#JojofruiwiYLp2#1.02#969#fruits#Simply Fresh +Jojoba#JojofruigMD1ej#0.96#899#fruits#Off the Vine +Jostaberry#JostfruiglsEGV#0.50#300#fruits#TriCounty Produce +Jostaberry#JostfruiV3oo1h#0.52#423#fruits#Simply Fresh +Jostaberry#JostfruiUBerur#0.53#562#fruits#Off the Vine +Kangaroo Apple#KangfruiEQknz8#0.60#661#fruits#TriCounty Produce +Kangaroo Apple#KangfruiNabdFq#0.60#377#fruits#Simply Fresh +Kangaroo Apple#Kangfrui7hky1i#0.60#326#fruits#Off the Vine +Ken's Red#Ken'fruinPUSIm#0.21#337#fruits#TriCounty Produce +Ken's Red#Ken'fruiAoZlpl#0.21#902#fruits#Simply Fresh +Ken's Red#Ken'frui5rmbd4#0.22#972#fruits#Off the Vine +Ketembilla#Ketefrui3yAKxQ#0.31#303#fruits#TriCounty Produce +Ketembilla#KetefruiROn6F5#0.34#283#fruits#Simply Fresh +Ketembilla#Ketefrui16Rsts#0.33#887#fruits#Off the Vine +King Orange#KingfruisOFzWk#0.74#429#fruits#TriCounty Produce +King Orange#KingfruiBmzRJT#0.74#500#fruits#Simply Fresh +King Orange#KingfruiGsrgRX#0.78#994#fruits#Off the Vine +Kola Nut#KolafruiBbtAuw#0.58#991#fruits#TriCounty Produce +Kola Nut#KolafruirbnLVS#0.62#733#fruits#Simply Fresh +Kola Nut#Kolafrui1ItXJx#0.58#273#fruits#Off the Vine +Kuko#Kukofrui6YH5Ds#0.41#647#fruits#TriCounty Produce +Kuko#Kukofrui7WZaZK#0.39#241#fruits#Simply Fresh +Kuko#Kukofruig9MQFT#0.40#204#fruits#Off the Vine +Kumquat#KumqfruiT6WKQL#0.73#388#fruits#TriCounty Produce +Kumquat#KumqfruidLiFLU#0.70#393#fruits#Simply Fresh +Kumquat#KumqfruiL6zhQX#0.71#994#fruits#Off the Vine +Kwai Muk#KwaifruiQK1zOE#1.10#249#fruits#TriCounty Produce +Kwai Muk#KwaifruifbCRlT#1.14#657#fruits#Simply Fresh +Kwai Muk#Kwaifruipe7T2m#1.09#617#fruits#Off the Vine +Lanzone#LanzfruijsPf1v#0.34#835#fruits#TriCounty Produce +Lanzone#LanzfruibU3QoL#0.34#404#fruits#Simply Fresh +Lanzone#LanzfruiYgHwv6#0.34#237#fruits#Off the Vine +Lemon#Lemofrui4Tgsg2#0.46#843#fruits#TriCounty Produce +Lemon#LemofruivK6qvj#0.43#207#fruits#Simply Fresh +Lemon#LemofruiXSXqJ0#0.44#910#fruits#Off the Vine +Lemon Grass#LemofruiVFgVh5#0.40#575#fruits#TriCounty Produce +Lemon Grass#LemofruiWIelvi#0.41#386#fruits#Simply Fresh +Lemon Grass#LemofruiGVAow0#0.39#918#fruits#Off the Vine +Lilly-pilly#LillfruiEQnW1m#1.21#974#fruits#TriCounty Produce +Lilly-pilly#LillfruiMqVuR5#1.23#303#fruits#Simply Fresh +Lilly-pilly#LillfruiVGH9p4#1.17#512#fruits#Off the Vine +Ling Nut#LingfruiGtOf8X#0.85#540#fruits#TriCounty Produce +Ling Nut#LingfruiuP0Jf9#0.83#200#fruits#Simply Fresh +Ling Nut#LingfruiuO5qf5#0.81#319#fruits#Off the Vine +Lipote#LipofruisxD2Qc#0.85#249#fruits#TriCounty Produce +Lipote#LipofruiHNdIqL#0.85#579#fruits#Simply Fresh +Lipote#LipofruiSQ2pKK#0.83#472#fruits#Off the Vine +Litchee#Litcfrui1R6Ydz#0.99#806#fruits#TriCounty Produce +Litchee#LitcfruiwtDM79#1.01#219#fruits#Simply Fresh +Litchee#LitcfruilpPZbC#1.05#419#fruits#Off the Vine +Longan#LongfruiEI0lWF#1.02#573#fruits#TriCounty Produce +Longan#LongfruiPQxxSF#1.04#227#fruits#Simply Fresh +Longan#LongfruisdI812#0.99#993#fruits#Off the Vine +Love-in-a-mist#LovefruiKYPW70#0.69#388#fruits#TriCounty Produce +Love-in-a-mist#LovefruiHrgjDa#0.67#478#fruits#Simply Fresh +Love-in-a-mist#LovefruipSOWVz#0.71#748#fruits#Off the Vine +Lychee#LychfruiicVLnY#0.38#276#fruits#TriCounty Produce +Lychee#LychfruiGY6yJr#0.38#602#fruits#Simply Fresh +Lychee#LychfruiTzDCq2#0.40#572#fruits#Off the Vine +Mabolo#MabofruiSY8RQS#0.97#263#fruits#TriCounty Produce +Mabolo#MabofruiOWWk0n#0.98#729#fruits#Simply Fresh +Mabolo#MabofruixQLOTF#0.98#771#fruits#Off the Vine +Macadamia Nut#MacafruiZppJPw#1.22#888#fruits#TriCounty Produce +Macadamia Nut#MacafruiI7XFMV#1.24#484#fruits#Simply Fresh +Macadamia Nut#Macafrui4x8bxV#1.20#536#fruits#Off the Vine +Madagascar Plum#MadafruiVj5fDf#1.14#596#fruits#TriCounty Produce +Madagascar Plum#MadafruivJhAFI#1.15#807#fruits#Simply Fresh +Madagascar Plum#Madafrui7MTe1x#1.17#355#fruits#Off the Vine +Magnolia Vine#MagnfruiigN4Y1#1.17#321#fruits#TriCounty Produce +Magnolia Vine#MagnfruicKtiHd#1.15#353#fruits#Simply Fresh +Magnolia Vine#MagnfruiLPDSCp#1.23#324#fruits#Off the Vine +Mamey#Mamefrui5rjLF6#0.36#683#fruits#TriCounty Produce +Mamey#MamefruiM6ndnR#0.38#404#fruits#Simply Fresh +Mamey#Mamefruiq9KntD#0.36#527#fruits#Off the Vine +Mandarin Orange#MandfruiRKpmKL#0.42#352#fruits#TriCounty Produce +Mandarin Orange#Mandfrui1V0KLG#0.42#548#fruits#Simply Fresh +Mandarin Orange#Mandfruig2o9Fg#0.41#686#fruits#Off the Vine +Marany Nut#MarafruiqkrwoJ#1.14#273#fruits#TriCounty Produce +Marany Nut#MarafruiCGKpke#1.12#482#fruits#Simply Fresh +Marany Nut#MarafruiB1YE5x#1.09#412#fruits#Off the Vine +Marula#MarufruiXF4biH#0.22#403#fruits#TriCounty Produce +Marula#MarufruidZiVKZ#0.23#317#fruits#Simply Fresh +Marula#MarufruiIS8BEp#0.21#454#fruits#Off the Vine +Mayhaw#MayhfruiCSrm7k#0.24#220#fruits#TriCounty Produce +Mayhaw#MayhfruiNRDzWs#0.25#710#fruits#Simply Fresh +Mayhaw#MayhfruiIUCyEg#0.24#818#fruits#Off the Vine +Meiwa Kumquat#MeiwfruiYhv3AY#0.21#997#fruits#TriCounty Produce +Meiwa Kumquat#MeiwfruiyzQFNR#0.22#347#fruits#Simply Fresh +Meiwa Kumquat#Meiwfruict4OUp#0.21#923#fruits#Off the Vine +Mexican Barberry#Mexifrui2P2dXi#0.28#914#fruits#TriCounty Produce +Mexican Barberry#MexifruiywUTMI#0.29#782#fruits#Simply Fresh +Mexican Barberry#MexifruijPHu5X#0.29#367#fruits#Off the Vine +Meyer Lemon#Meyefruin9901J#0.38#824#fruits#TriCounty Produce +Meyer Lemon#MeyefruiNeQpjO#0.37#617#fruits#Simply Fresh +Meyer Lemon#MeyefruiYEVznZ#0.37#741#fruits#Off the Vine +Mississippi Honeyberry#Missfruipb5iW3#0.95#595#fruits#TriCounty Produce +Mississippi Honeyberry#MissfruiINiDbB#0.96#551#fruits#Simply Fresh +Mississippi Honeyberry#MissfruiNUQ82a#0.93#396#fruits#Off the Vine +Monkey Pot#MonkfruiXlTW4j#0.90#896#fruits#TriCounty Produce +Monkey Pot#Monkfrui1p7a4h#0.88#344#fruits#Simply Fresh +Monkey Pot#Monkfrui4eKggb#0.92#917#fruits#Off the Vine +Monos Plum#Monofrui0Mv9aV#1.11#842#fruits#TriCounty Produce +Monos Plum#Monofrui6iTGQY#1.14#570#fruits#Simply Fresh +Monos Plum#MonofruiNu2uGH#1.13#978#fruits#Off the Vine +Moosewood#MoosfruiMXEGex#0.86#969#fruits#TriCounty Produce +Moosewood#Moosfrui8805mB#0.86#963#fruits#Simply Fresh +Moosewood#MoosfruiOsnDFL#0.88#594#fruits#Off the Vine +Natal Orange#NatafruitB8Kh2#0.42#332#fruits#TriCounty Produce +Natal Orange#NatafruiOhqRrd#0.42#982#fruits#Simply Fresh +Natal Orange#NatafruiRObMf6#0.41#268#fruits#Off the Vine +Nectarine#NectfruilNfeD8#0.36#601#fruits#TriCounty Produce +Nectarine#NectfruiQfjt6b#0.35#818#fruits#Simply Fresh +Nectarine#Nectfrui5U7U96#0.37#930#fruits#Off the Vine +Neem Tree#NeemfruiCruEMF#0.24#222#fruits#TriCounty Produce +Neem Tree#NeemfruiGv0pv5#0.24#645#fruits#Simply Fresh +Neem Tree#NeemfruiUFPVfk#0.25#601#fruits#Off the Vine +New Zealand Spinach#New fruihDIgec#0.87#428#fruits#TriCounty Produce +New Zealand Spinach#New fruiaoR9TP#0.87#630#fruits#Simply Fresh +New Zealand Spinach#New fruiy8LBul#0.94#570#fruits#Off the Vine +Olosapo#OlosfruiGXvaMm#0.76#388#fruits#TriCounty Produce +Olosapo#OlosfruiESlpB3#0.76#560#fruits#Simply Fresh +Olosapo#OlosfruiFNEkER#0.76#962#fruits#Off the Vine +Oregon Grape#OregfruiWxhzrf#1.14#892#fruits#TriCounty Produce +Oregon Grape#OregfruiMgjHUn#1.20#959#fruits#Simply Fresh +Oregon Grape#OregfruiC5UCxX#1.17#419#fruits#Off the Vine +Otaheite Apple#OtahfruilT0iFj#0.21#579#fruits#TriCounty Produce +Otaheite Apple#Otahfrui92PyMY#0.22#857#fruits#Simply Fresh +Otaheite Apple#OtahfruiLGD1EH#0.20#807#fruits#Off the Vine +Oyster Plant#OystfruimGxOsj#0.77#835#fruits#TriCounty Produce +Oyster Plant#Oystfrui1kudBX#0.81#989#fruits#Simply Fresh +Oyster Plant#OystfruiaX3uO2#0.80#505#fruits#Off the Vine +Panama Berry#PanafruiZG0Vp4#1.19#288#fruits#TriCounty Produce +Panama Berry#PanafruiobvXPE#1.21#541#fruits#Simply Fresh +Panama Berry#PanafruipaW8F3#1.16#471#fruits#Off the Vine +Peach Tomato#PeacfruiQpovYH#1.20#475#fruits#TriCounty Produce +Peach Tomato#PeacfruixYXLTN#1.18#655#fruits#Simply Fresh +Peach Tomato#PeacfruiILDYAp#1.23#876#fruits#Off the Vine +Peanut#Peanfruiy8M7pt#0.69#275#fruits#TriCounty Produce +Peanut#PeanfruiEimbED#0.65#307#fruits#Simply Fresh +Peanut#Peanfruic452Vc#0.68#937#fruits#Off the Vine +Peanut Butter Fruit#PeanfruixEDt9Y#0.27#628#fruits#TriCounty Produce +Peanut Butter Fruit#PeanfruiST0T0R#0.27#910#fruits#Simply Fresh +Peanut Butter Fruit#Peanfrui7jeRN2#0.27#938#fruits#Off the Vine +Pear#PearfruiB5YmSJ#0.20#945#fruits#TriCounty Produce +Pear#PearfruiA93XZx#0.21#333#fruits#Simply Fresh +Pear#PearfruioNKiIf#0.21#715#fruits#Off the Vine +Pecan#PecafruiiTIv1Z#0.26#471#fruits#TriCounty Produce +Pecan#PecafruiMGkqla#0.26#889#fruits#Simply Fresh +Pecan#Pecafrui1szYz2#0.25#929#fruits#Off the Vine +Purple Passion Fruit#Purpfrui4mMGkD#1.04#914#fruits#TriCounty Produce +Purple Passion Fruit#Purpfrui5XOW3K#1.06#423#fruits#Simply Fresh +Purple Passion Fruit#PurpfruifDTAgW#1.05#549#fruits#Off the Vine +Red Mulberry#Red fruiVLOXIW#1.24#270#fruits#TriCounty Produce +Red Mulberry#Red fruiXNXt4a#1.21#836#fruits#Simply Fresh +Red Mulberry#Red fruiUseWLG#1.21#795#fruits#Off the Vine +Red Princess#Red fruigJLR4V#0.23#829#fruits#TriCounty Produce +Red Princess#Red fruinVKps5#0.23#558#fruits#Simply Fresh +Red Princess#Red frui0jl9mg#0.24#252#fruits#Off the Vine +Striped Screw Pine#StrifruiUKzjoU#0.60#226#fruits#TriCounty Produce +Striped Screw Pine#StrifruivWLDzH#0.64#685#fruits#Simply Fresh +Striped Screw Pine#StrifruiiF7CGH#0.60#983#fruits#Off the Vine +Tapioca#Tapifruib4LCqt#0.40#955#fruits#TriCounty Produce +Tapioca#TapifruiwgQLj9#0.41#889#fruits#Simply Fresh +Tapioca#TapifruiZ6Igg3#0.41#655#fruits#Off the Vine +Tavola#Tavofrui0k9XOt#1.16#938#fruits#TriCounty Produce +Tavola#Tavofrui8DuRxL#1.08#979#fruits#Simply Fresh +Tavola#TavofruiNZEuJZ#1.16#215#fruits#Off the Vine +Tea#TeafruiL0357s#1.11#516#fruits#TriCounty Produce +Tea#TeafruiD5soTf#1.13#970#fruits#Simply Fresh +Tea#TeafruiOWq4oO#1.19#357#fruits#Off the Vine +Ugli Fruit#UglifruipKNCpf#0.24#501#fruits#TriCounty Produce +Ugli Fruit#UglifruifbDrzc#0.24#642#fruits#Simply Fresh +Ugli Fruit#Uglifruiwx8or4#0.24#280#fruits#Off the Vine +Vegetable Brain#VegefruieXLBoc#0.73#355#fruits#TriCounty Produce +Vegetable Brain#Vegefruik5FSdl#0.71#498#fruits#Simply Fresh +Vegetable Brain#VegefruiKBfzN0#0.72#453#fruits#Off the Vine +White Walnut#Whitfruit3oVHL#0.30#501#fruits#TriCounty Produce +White Walnut#WhitfruiHygydw#0.30#913#fruits#Simply Fresh +White Walnut#WhitfruieNtplo#0.30#401#fruits#Off the Vine +Wood Apple#WoodfruijVPRqA#0.68#501#fruits#TriCounty Produce +Wood Apple#Woodfrui4Zk69T#0.68#616#fruits#Simply Fresh +Wood Apple#WoodfruiuSLHZK#0.70#474#fruits#Off the Vine +Yellow Horn#Yellfrui5igjjf#1.18#729#fruits#TriCounty Produce +Yellow Horn#Yellfrui0DiPqa#1.13#517#fruits#Simply Fresh +Yellow Horn#Yellfrui0ljvqC#1.14#853#fruits#Off the Vine +Yellow Sapote#YellfruilGmCfq#0.93#204#fruits#TriCounty Produce +Yellow Sapote#Yellfrui4J2mke#0.88#269#fruits#Simply Fresh +Yellow Sapote#Yellfrui6PuXaL#0.86#575#fruits#Off the Vine +Ylang-ylang#Ylanfrui3rmByO#0.76#429#fruits#TriCounty Produce +Ylang-ylang#YlanfruiA80Nkq#0.76#886#fruits#Simply Fresh +Ylang-ylang#YlanfruinUEm5d#0.72#747#fruits#Off the Vine +Zapote Blanco#ZapofruisZ5sMA#0.67#428#fruits#TriCounty Produce +Zapote Blanco#ZapofruilKxl7N#0.65#924#fruits#Simply Fresh +Zapote Blanco#ZapofruiAe6Eu1#0.68#255#fruits#Off the Vine +Zulu Nut#Zulufrui469K4k#0.71#445#fruits#TriCounty Produce +Zulu Nut#ZulufruiWbz6vU#0.71#653#fruits#Simply Fresh +Zulu Nut#Zulufrui0LJnWK#0.71#858#fruits#Off the Vine +Artichoke#ArtivegeIuqmS4#0.71#282#vegetables#The Pantry +Artichoke#Artivegebljjnf#0.69#66#vegetables#TriCounty Produce +Artichoke#ArtivegeTa2lcF#0.70#618#vegetables#Off the Vine +Asparagus#AspavegezC0cDl#0.23#70#vegetables#The Pantry +Asparagus#AspavegeM1q5Kt#0.24#546#vegetables#TriCounty Produce +Asparagus#AspavegeXWbCb8#0.24#117#vegetables#Off the Vine +Basil#Basivegev08fzf#0.31#213#vegetables#The Pantry +Basil#BasivegeF3Uha7#0.29#651#vegetables#TriCounty Produce +Basil#BasivegeqR8SHC#0.31#606#vegetables#Off the Vine +Bean#BeanvegegCFUOp#0.27#794#vegetables#The Pantry +Bean#BeanvegeqMSEVq#0.27#468#vegetables#TriCounty Produce +Bean#Beanvege4IGUwX#0.27#463#vegetables#Off the Vine +Beet#BeetvegedEv4Ic#0.35#120#vegetables#The Pantry +Beet#Beetvegegi1bz1#0.35#540#vegetables#TriCounty Produce +Beet#BeetvegemztZcN#0.36#386#vegetables#Off the Vine +Blackeyed Pea#Blacvege3TPldr#0.86#133#vegetables#The Pantry +Blackeyed Pea#Blacvege3Zqnep#0.88#67#vegetables#TriCounty Produce +Blackeyed Pea#Blacvege3khffZ#0.90#790#vegetables#Off the Vine +Cabbage#CabbvegeY0c4Fw#0.82#726#vegetables#The Pantry +Cabbage#CabbvegeoaK7Co#0.85#439#vegetables#TriCounty Produce +Cabbage#CabbvegeVvO646#0.82#490#vegetables#Off the Vine +Carrot#CarrvegeEbI0sw#0.45#717#vegetables#The Pantry +Carrot#CarrvegeEZndWL#0.49#284#vegetables#TriCounty Produce +Carrot#CarrvegewUkHao#0.47#122#vegetables#Off the Vine +Cauliflower#Caulvege1CPeNG#0.68#756#vegetables#The Pantry +Cauliflower#CaulvegedrPqib#0.66#269#vegetables#TriCounty Produce +Cauliflower#CaulvegeT6cka8#0.65#728#vegetables#Off the Vine +Chayote#ChayvegePRReGE#0.14#233#vegetables#The Pantry +Chayote#Chayvegep058f7#0.14#88#vegetables#TriCounty Produce +Chayote#ChayvegeoxO40S#0.14#611#vegetables#Off the Vine +Corn#CornvegeukXkv6#0.72#632#vegetables#The Pantry +Corn#CornvegePnPREC#0.72#609#vegetables#TriCounty Produce +Corn#CornvegeO0GwoQ#0.70#664#vegetables#Off the Vine +Cucumber#CucuvegeEqQeA7#0.94#499#vegetables#The Pantry +Cucumber#CucuvegewmKbJ1#0.94#738#vegetables#TriCounty Produce +Cucumber#CucuvegeUW6JaA#0.94#565#vegetables#Off the Vine +Cantaloupe#CantvegeIHs9vJ#0.66#411#vegetables#The Pantry +Cantaloupe#CantvegeEaDdST#0.66#638#vegetables#TriCounty Produce +Cantaloupe#CantvegewWQEa0#0.64#682#vegetables#Off the Vine +Carraway#CarrvegewuL4Ma#0.32#740#vegetables#The Pantry +Carraway#CarrvegeyiWfBj#0.32#265#vegetables#TriCounty Produce +Carraway#CarrvegeMjb1i9#0.31#732#vegetables#Off the Vine +Celeriac#CelevegeoTBicd#0.74#350#vegetables#The Pantry +Celeriac#CelevegeCNABoZ#0.70#261#vegetables#TriCounty Produce +Celeriac#Celevege9LUeww#0.70#298#vegetables#Off the Vine +Celery#Celevegej40ZCc#0.59#740#vegetables#The Pantry +Celery#CelevegerYlVRy#0.58#734#vegetables#TriCounty Produce +Celery#Celevege67eimC#0.58#619#vegetables#Off the Vine +Chervil#ChervegeuH4Dge#0.09#502#vegetables#The Pantry +Chervil#Chervegea1OyKO#0.09#299#vegetables#TriCounty Produce +Chervil#Chervegeq56gMO#0.09#474#vegetables#Off the Vine +Chicory#Chicvege79qoQ8#0.09#709#vegetables#The Pantry +Chicory#ChicvegeTSVBQq#0.10#477#vegetables#TriCounty Produce +Chicory#Chicvege6qpcyi#0.10#282#vegetables#Off the Vine +Chinese Cabbage#ChinvegeFNsSRn#0.78#408#vegetables#The Pantry +Chinese Cabbage#Chinvege2ldNr3#0.80#799#vegetables#TriCounty Produce +Chinese Cabbage#ChinvegeK3R2Td#0.80#180#vegetables#Off the Vine +Chinese Beans#ChinvegebxbyPy#0.45#654#vegetables#The Pantry +Chinese Beans#ChinvegewKGwgx#0.45#206#vegetables#TriCounty Produce +Chinese Beans#ChinvegevVjzC0#0.47#643#vegetables#Off the Vine +Chines Kale#ChinvegeCfdkss#0.70#239#vegetables#The Pantry +Chines Kale#Chinvege6V6Dne#0.65#548#vegetables#TriCounty Produce +Chines Kale#ChinvegeB7vE3x#0.66#380#vegetables#Off the Vine +Chinese Radish#ChinvegeXcM4eq#0.22#190#vegetables#The Pantry +Chinese Radish#ChinvegeTdUBqN#0.22#257#vegetables#TriCounty Produce +Chinese Radish#ChinvegeMXMms8#0.22#402#vegetables#Off the Vine +Chinese Mustard#ChinvegeRDdpdl#0.33#149#vegetables#The Pantry +Chinese Mustard#ChinvegeABDhNd#0.32#320#vegetables#TriCounty Produce +Chinese Mustard#Chinvege8NPwa2#0.34#389#vegetables#Off the Vine +Cilantro#CilavegeQXBEsW#0.60#674#vegetables#The Pantry +Cilantro#CilavegeRgjkUG#0.60#355#vegetables#TriCounty Produce +Cilantro#CilavegelT2msu#0.59#464#vegetables#Off the Vine +Collard#CollvegesTGGNw#0.32#745#vegetables#The Pantry +Collard#CollvegeAwdor5#0.32#124#vegetables#TriCounty Produce +Collard#CollvegeQe900L#0.30#796#vegetables#Off the Vine +Coriander#CorivegeXxp4xY#0.26#560#vegetables#The Pantry +Coriander#Corivege9xBAT0#0.27#321#vegetables#TriCounty Produce +Coriander#CorivegeCfNjBx#0.27#709#vegetables#Off the Vine +Dandelion#DandvegeJNcnbr#0.11#285#vegetables#The Pantry +Dandelion#DandvegeGwBkHZ#0.11#733#vegetables#TriCounty Produce +Dandelion#DandvegeZfwVqn#0.11#57#vegetables#Off the Vine +Daikon Radish#DaikvegeHHsd7M#0.61#743#vegetables#The Pantry +Daikon Radish#DaikvegeIu17yC#0.62#459#vegetables#TriCounty Produce +Daikon Radish#DaikvegePzFjqf#0.63#296#vegetables#Off the Vine +Eggplant#EggpvegeKJtydN#0.55#200#vegetables#The Pantry +Eggplant#EggpvegeQMKrNs#0.53#208#vegetables#TriCounty Produce +Eggplant#EggpvegeN0WnSo#0.51#761#vegetables#Off the Vine +English Pea#Englvegea1ytIn#0.40#457#vegetables#The Pantry +English Pea#EnglvegerU9Vty#0.37#263#vegetables#TriCounty Produce +English Pea#EnglvegeCmkd3y#0.39#430#vegetables#Off the Vine +Fennel#Fennvegebz2UM7#0.76#545#vegetables#The Pantry +Fennel#FennvegeQzjtZ3#0.78#795#vegetables#TriCounty Produce +Fennel#FennvegeXSrW61#0.75#79#vegetables#Off the Vine +Garlic#GarlvegesR2yel#0.76#478#vegetables#The Pantry +Garlic#GarlvegeEQvt8W#0.77#349#vegetables#TriCounty Produce +Garlic#GarlvegedljBdK#0.80#708#vegetables#Off the Vine +Ginger#GingvegeMNiTc2#0.88#563#vegetables#The Pantry +Ginger#Gingvegeq366Sn#0.89#738#vegetables#TriCounty Produce +Ginger#GingvegeznyyVj#0.89#598#vegetables#Off the Vine +Horseradish#HorsvegemSwISt#0.12#622#vegetables#The Pantry +Horseradish#HorsvegetCOS0x#0.11#279#vegetables#TriCounty Produce +Horseradish#Horsvegew6XXaS#0.12#478#vegetables#Off the Vine +Japanese Eggplant#JapavegeTdKDCL#0.57#539#vegetables#The Pantry +Japanese Eggplant#JapavegevsJfGa#0.58#782#vegetables#TriCounty Produce +Japanese Eggplant#JapavegeCIrIxd#0.57#777#vegetables#Off the Vine +Jerusalem Artichoke#Jeruvege928cr0#0.13#231#vegetables#The Pantry +Jerusalem Artichoke#JeruvegeC2v086#0.14#123#vegetables#TriCounty Produce +Jerusalem Artichoke#JeruvegeehCYzi#0.14#196#vegetables#Off the Vine +Jicama#JicavegeRWYj9n#0.75#79#vegetables#The Pantry +Jicama#JicavegeGk5LKH#0.71#292#vegetables#TriCounty Produce +Jicama#JicavegeUjpaX1#0.70#308#vegetables#Off the Vine +Kale#Kalevegext6RNT#0.55#765#vegetables#The Pantry +Kale#KalevegeFsp17B#0.53#107#vegetables#TriCounty Produce +Kale#KalevegeAffBTS#0.57#573#vegetables#Off the Vine +Kiwifruit#KiwivegeloZBKJ#0.60#769#vegetables#The Pantry +Kiwifruit#KiwivegenCQAHw#0.59#307#vegetables#TriCounty Produce +Kiwifruit#Kiwivege0Gi3P2#0.59#235#vegetables#Off the Vine +Kohlrabi#KohlvegeJFKZDl#0.26#406#vegetables#The Pantry +Kohlrabi#Kohlvege32UTAj#0.28#613#vegetables#TriCounty Produce +Kohlrabi#KohlvegejNQC1M#0.28#326#vegetables#Off the Vine +Leek#Leekvege5iaFtg#0.70#580#vegetables#The Pantry +Leek#Leekvegei9Wxbz#0.68#188#vegetables#TriCounty Produce +Leek#LeekvegewY4mAc#0.70#473#vegetables#Off the Vine +Lettuce#LettvegesK9wDR#0.55#716#vegetables#The Pantry +Lettuce#LettvegeWzMyCM#0.57#83#vegetables#TriCounty Produce +Lettuce#LettvegeHgfGG8#0.56#268#vegetables#Off the Vine +Melons#Melovege6t93WF#0.11#252#vegetables#The Pantry +Melons#Melovegeq9kz7T#0.12#558#vegetables#TriCounty Produce +Melons#Melovege9kLTXN#0.12#382#vegetables#Off the Vine +Mushroom#MushvegeSq53h8#0.59#365#vegetables#The Pantry +Mushroom#Mushvegedq6lYP#0.59#444#vegetables#TriCounty Produce +Mushroom#Mushvege8o27D2#0.55#467#vegetables#Off the Vine +Okra#OkravegeTszQSL#0.55#62#vegetables#The Pantry +Okra#OkravegeJBWmfh#0.58#165#vegetables#TriCounty Produce +Okra#OkravegeD6tF9n#0.55#77#vegetables#Off the Vine +Onion#OniovegejwimQo#0.80#186#vegetables#The Pantry +Onion#OniovegeUOwwks#0.80#417#vegetables#TriCounty Produce +Onion#OniovegezcRDrc#0.80#435#vegetables#Off the Vine +Oregano#OregvegetlU7Ez#0.71#119#vegetables#The Pantry +Oregano#Oregvege9h9ZKy#0.70#173#vegetables#TriCounty Produce +Oregano#OregvegebXr0PJ#0.70#773#vegetables#Off the Vine +Parsley#ParsvegeXFEjjN#0.83#502#vegetables#The Pantry +Parsley#ParsvegejAg5C4#0.80#454#vegetables#TriCounty Produce +Parsley#ParsvegehAtH2H#0.84#523#vegetables#Off the Vine +Parsnip#Parsvegee9Lp6D#0.46#626#vegetables#The Pantry +Parsnip#ParsvegeSxXHSA#0.47#411#vegetables#TriCounty Produce +Parsnip#Parsvegea0stPf#0.44#403#vegetables#Off the Vine +Pea#Peavegecq4SxR#0.18#342#vegetables#The Pantry +Pea#Peavege46Gdp9#0.18#255#vegetables#TriCounty Produce +Pea#Peavegeov1gc5#0.18#251#vegetables#Off the Vine +Pepper#PeppvegeUcBYRp#0.33#52#vegetables#The Pantry +Pepper#PeppvegeB60btP#0.35#107#vegetables#TriCounty Produce +Pepper#PeppvegeG4tP3e#0.34#481#vegetables#Off the Vine +Pigeon Pea#Pigevegec5bAtm#0.94#391#vegetables#The Pantry +Pigeon Pea#Pigevegeb93eLi#0.91#447#vegetables#TriCounty Produce +Pigeon Pea#PigevegejEBDRa#0.89#259#vegetables#Off the Vine +Irish Potato#IrisvegeJNQqby#0.72#355#vegetables#The Pantry +Irish Potato#Irisvegewq1PLd#0.72#601#vegetables#TriCounty Produce +Irish Potato#IrisvegeAfFLdO#0.68#740#vegetables#Off the Vine +Pumpkin#PumpvegeiYsPR8#0.25#776#vegetables#The Pantry +Pumpkin#PumpvegelqP1Kh#0.25#189#vegetables#TriCounty Produce +Pumpkin#Pumpvegeb3nQU5#0.26#207#vegetables#Off the Vine +Radish#RadivegeNwwSBJ#0.16#613#vegetables#The Pantry +Radish#Radivege0tIBnL#0.16#779#vegetables#TriCounty Produce +Radish#RadivegeNLqJCf#0.16#731#vegetables#Off the Vine +Rhubarb#RhubvegeREfOti#0.12#301#vegetables#The Pantry +Rhubarb#Rhubvege4Jc3b7#0.12#557#vegetables#TriCounty Produce +Rhubarb#RhubvegeaXqF7H#0.12#378#vegetables#Off the Vine +Rosemary#Rosevege16QStc#0.73#380#vegetables#The Pantry +Rosemary#RosevegeNf6Oem#0.75#622#vegetables#TriCounty Produce +Rosemary#RosevegeFgsOyN#0.74#631#vegetables#Off the Vine +Rutabaga#RutavegecUYfQ3#0.55#676#vegetables#The Pantry +Rutabaga#RutavegejOG5DF#0.55#273#vegetables#TriCounty Produce +Rutabaga#RutavegewEVjzV#0.53#452#vegetables#Off the Vine +Salsify#SalsvegeViS9HF#0.11#537#vegetables#The Pantry +Salsify#Salsvegemd3HAL#0.11#753#vegetables#TriCounty Produce +Salsify#SalsvegeuRCnmq#0.10#787#vegetables#Off the Vine +Savory#Savovegee4DRWl#0.21#456#vegetables#The Pantry +Savory#SavovegerZ90Xm#0.21#642#vegetables#TriCounty Produce +Savory#Savovegeje7yy7#0.22#328#vegetables#Off the Vine +Sesame#Sesavege4NAWZE#0.84#54#vegetables#The Pantry +Sesame#SesavegeMTc9IN#0.84#458#vegetables#TriCounty Produce +Sesame#SesavegegOwAjo#0.83#125#vegetables#Off the Vine +Shallots#ShalvegeUO2pDO#0.26#599#vegetables#The Pantry +Shallots#ShalvegeY1sekb#0.27#647#vegetables#TriCounty Produce +Shallots#ShalvegeSDC8VY#0.27#369#vegetables#Off the Vine +Sugar Snap Peas#SugavegepUZDTl#0.47#308#vegetables#The Pantry +Sugar Snap Peas#Sugavege1XyzNH#0.48#205#vegetables#TriCounty Produce +Sugar Snap Peas#SugavegeJuaG7f#0.46#348#vegetables#Off the Vine +Soybean#SoybvegeqxSVRL#0.70#639#vegetables#The Pantry +Soybean#SoybvegezEMjOG#0.68#423#vegetables#TriCounty Produce +Soybean#SoybvegebanSFq#0.67#268#vegetables#Off the Vine +Spaghetti Squash#SpagvegeMNO1yC#0.12#753#vegetables#The Pantry +Spaghetti Squash#SpagvegeilpUaD#0.13#604#vegetables#TriCounty Produce +Spaghetti Squash#SpagvegeAOoZNX#0.13#431#vegetables#Off the Vine +Spinach#SpinvegeegXXou#0.10#742#vegetables#The Pantry +Spinach#SpinvegeVcqXL6#0.11#708#vegetables#TriCounty Produce +Spinach#SpinvegetZ26DN#0.11#625#vegetables#Off the Vine +Sweet Potato#SweevegepNDQWb#0.94#720#vegetables#The Pantry +Sweet Potato#Sweevegepnw7Tm#0.90#377#vegetables#TriCounty Produce +Sweet Potato#Sweevegeyk0C82#0.89#242#vegetables#Off the Vine +Swiss Chard#SwisvegeksalTA#0.54#545#vegetables#The Pantry +Swiss Chard#SwisvegeKm2Kze#0.54#472#vegetables#TriCounty Produce +Swiss Chard#SwisvegehteuMk#0.56#142#vegetables#Off the Vine +Taro#Tarovege3fpGV6#0.87#155#vegetables#The Pantry +Taro#TarovegerZkmof#0.86#371#vegetables#TriCounty Produce +Taro#TarovegeXKPuzc#0.89#443#vegetables#Off the Vine +Tarragon#TarrvegeCzVC6U#0.18#491#vegetables#The Pantry +Tarragon#TarrvegesIkEfS#0.17#65#vegetables#TriCounty Produce +Tarragon#TarrvegerZsKFP#0.18#180#vegetables#Off the Vine +Thyme#Thymvege8Rv72c#0.41#442#vegetables#The Pantry +Thyme#ThymvegeJoUdQS#0.42#237#vegetables#TriCounty Produce +Thyme#ThymvegeRck5uO#0.43#491#vegetables#Off the Vine +Tomato#Tomavegey0NHGK#0.31#60#vegetables#The Pantry +Tomato#TomavegeKAjRUn#0.30#630#vegetables#TriCounty Produce +Tomato#TomavegePZOHlH#0.30#70#vegetables#Off the Vine +Turnip#TurnvegeRVQiV5#0.44#580#vegetables#The Pantry +Turnip#TurnvegeVjIX9D#0.45#743#vegetables#TriCounty Produce +Turnip#TurnvegelFhvuJ#0.44#219#vegetables#Off the Vine +Watercress#WatevegelwzPLQ#0.54#230#vegetables#The Pantry +Watercress#Watevege8oeDCT#0.54#774#vegetables#TriCounty Produce +Watercress#Watevegexr8L1t#0.55#185#vegetables#Off the Vine +Watermelon#WatevegeL83MRH#0.19#698#vegetables#The Pantry +Watermelon#WatevegeR2S4Dq#0.21#488#vegetables#TriCounty Produce +Watermelon#WatevegepFPXQu#0.21#439#vegetables#Off the Vine +Kamote#KamovegegdON75#0.13#218#vegetables#The Pantry +Kamote#KamovegevupDBf#0.13#98#vegetables#TriCounty Produce +Kamote#KamovegeSQX7IA#0.14#703#vegetables#Off the Vine +Alogbati#AlogvegeB1WaJU#0.41#775#vegetables#The Pantry +Alogbati#AlogvegeVr5cPP#0.40#789#vegetables#TriCounty Produce +Alogbati#AlogvegeyTUQzy#0.40#416#vegetables#Off the Vine +Ampalaya#AmpavegemR9fSd#0.85#107#vegetables#The Pantry +Ampalaya#AmpavegeJDu9Im#0.90#676#vegetables#TriCounty Produce +Ampalaya#AmpavegepL8GH5#0.86#728#vegetables#Off the Vine +Dahon ng sili#Dahovege6X9grk#0.11#369#vegetables#The Pantry +Dahon ng sili#DahovegeiHZjQT#0.11#141#vegetables#TriCounty Produce +Dahon ng sili#DahovegeoCDAH8#0.12#517#vegetables#Off the Vine +Gabi#GabivegeVm4Xk3#0.44#396#vegetables#The Pantry +Gabi#Gabivegeu6woqK#0.42#722#vegetables#TriCounty Produce +Gabi#GabivegezcA7q1#0.42#394#vegetables#Off the Vine +Kabute#Kabuvege6Tqrif#0.16#123#vegetables#The Pantry +Kabute#KabuvegeA3uYdG#0.15#183#vegetables#TriCounty Produce +Kabute#KabuvegeXW6ZiI#0.16#624#vegetables#Off the Vine +Kamoteng Kahoy#KamovegeAdW37X#0.42#782#vegetables#The Pantry +Kamoteng Kahoy#KamovegetFlqpC#0.42#515#vegetables#TriCounty Produce +Kamoteng Kahoy#KamovegeMvxoLn#0.40#166#vegetables#Off the Vine +Kangkong#KangvegeSFTvEz#0.35#759#vegetables#The Pantry +Kangkong#KangvegeRLR6gL#0.34#695#vegetables#TriCounty Produce +Kangkong#Kangvege9BFo14#0.35#783#vegetables#Off the Vine +Labanos#Labavege3qrWJL#0.94#514#vegetables#The Pantry +Labanos#LabavegekgVWDH#0.89#210#vegetables#TriCounty Produce +Labanos#LabavegeiVPgMx#0.89#207#vegetables#Off the Vine +Labong#LabovegeX3O8yz#0.85#722#vegetables#The Pantry +Labong#LabovegeI1wSEs#0.87#472#vegetables#TriCounty Produce +Labong#LabovegeOPiQht#0.85#740#vegetables#Off the Vine +Malunggay#MaluvegeHkwAFm#0.30#252#vegetables#The Pantry +Malunggay#Maluvegez6TiSY#0.30#245#vegetables#TriCounty Produce +Malunggay#MaluvegewzY37D#0.31#405#vegetables#Off the Vine +Munggo#MungvegeqeuwGw#0.25#362#vegetables#The Pantry +Munggo#MungvegeNhqWvL#0.26#360#vegetables#TriCounty Produce +Munggo#MungvegeGxNxQC#0.25#555#vegetables#Off the Vine +Pechay#PechvegezDeHFZ#0.36#401#vegetables#The Pantry +Pechay#Pechvegehi4Fcx#0.35#723#vegetables#TriCounty Produce +Pechay#Pechvege8Pq8Eo#0.36#141#vegetables#Off the Vine +Sigarilyas#SigavegeMJrtlV#0.88#335#vegetables#The Pantry +Sigarilyas#SigavegeLhsoOB#0.87#768#vegetables#TriCounty Produce +Sigarilyas#SigavegeS6RJcA#0.93#356#vegetables#Off the Vine +Sitaw#Sitavege0hMi9z#0.65#153#vegetables#The Pantry +Sitaw#Sitavegeez1g6N#0.67#561#vegetables#TriCounty Produce +Sitaw#Sitavege0BCNeF#0.66#674#vegetables#Off the Vine +Talong#TalovegevZjVK6#0.10#530#vegetables#The Pantry +Talong#TalovegexX4MRw#0.09#305#vegetables#TriCounty Produce +Talong#TalovegeO3U2ze#0.10#126#vegetables#Off the Vine +Toge#TogevegeYelJUw#0.54#449#vegetables#The Pantry +Toge#Togevegeilr1xK#0.54#274#vegetables#TriCounty Produce +Toge#Togevegesvjnyn#0.51#316#vegetables#Off the Vine +Ube#UbevegeoPnxvb#0.56#397#vegetables#The Pantry +Ube#Ubevege2CNyve#0.55#450#vegetables#TriCounty Produce +Ube#UbevegeC43sVj#0.55#263#vegetables#Off the Vine +Upo#UpovegecOGRqC#0.22#404#vegetables#The Pantry +Upo#Upovegekjl2wl#0.22#541#vegetables#TriCounty Produce +Upo#UpovegemTTTwI#0.23#459#vegetables#Off the Vine +Edamame#EdamvegeVYtk8z#0.79#296#vegetables#The Pantry +Edamame#Edamvege608vXi#0.78#700#vegetables#TriCounty Produce +Edamame#Edamvege1jiqGY#0.75#115#vegetables#Off the Vine +Hairy melon#HairvegeFYFHIw#0.71#789#vegetables#The Pantry +Hairy melon#HairvegeS7AAqI#0.72#302#vegetables#TriCounty Produce +Hairy melon#HairvegeO6WJHL#0.72#444#vegetables#Off the Vine +Burdock#BurdvegeyLstLV#0.56#761#vegetables#The Pantry +Burdock#BurdvegeZsqAjT#0.56#582#vegetables#TriCounty Produce +Burdock#BurdvegeycF7mo#0.55#566#vegetables#Off the Vine +Snake gourd#SnakvegesfHGvt#0.92#626#vegetables#The Pantry +Snake gourd#SnakvegedlNiBk#0.92#669#vegetables#TriCounty Produce +Snake gourd#Snakvegec5n1UM#0.92#143#vegetables#Off the Vine +Wasabi#Wasavege5P5pZp#0.67#751#vegetables#The Pantry +Wasabi#Wasavege6EEE9r#0.68#559#vegetables#TriCounty Produce +Wasabi#Wasavege1ve7TY#0.65#61#vegetables#Off the Vine +Yam#YamvegeRN9ONH#0.57#438#vegetables#The Pantry +Yam#YamvegeWjdzeA#0.56#564#vegetables#TriCounty Produce +Yam#YamvegeI1AnyI#0.56#456#vegetables#Off the Vine +Apple Fritters#AppldessDj96hw#6.12#16#desserts#Mom's Kitchen +Apple Fritters#AppldessrN1kvM#6.06#7#desserts#The Baking Pan +Banana Split#Banadess7tpjkJ#10.86#10#desserts#Mom's Kitchen +Banana Split#Banadessfif758#11.07#14#desserts#The Baking Pan +Blueberry Boy Bait#BluedesseX2LVU#3.72#16#desserts#Mom's Kitchen +Blueberry Boy Bait#Bluedess9zLhaH#3.93#9#desserts#The Baking Pan +Candied Cranberries#CanddessjW92p3#1.77#9#desserts#Mom's Kitchen +Candied Cranberries#CanddesskhtVoQ#1.72#0#desserts#The Baking Pan +Daiquiri Souffle#DaiqdessebnYcy#9.54#15#desserts#Mom's Kitchen +Daiquiri Souffle#DaiqdessfM1DnX#9.72#6#desserts#The Baking Pan +Bananas Flambe#BanadesscczumD#6.94#12#desserts#Mom's Kitchen +Bananas Flambe#Banadess8qNfxd#7.07#16#desserts#The Baking Pan +Pie, Apple#Pie,desshcSHhT#7.88#11#desserts#Mom's Kitchen +Pie, Apple#Pie,dessTbiwDp#7.88#15#desserts#The Baking Pan +Pie, Pumpkin#Pie,desswhPBPB#6.00#20#desserts#Mom's Kitchen +Pie, Pumpkin#Pie,dessDg3NWl#6.24#19#desserts#The Baking Pan +Pie, Blueberry#Pie,dessw9VdgD#2.14#3#desserts#Mom's Kitchen +Pie, Blueberry#Pie,dessiSjZKD#2.12#1#desserts#The Baking Pan +Pie, Pecan#Pie,dess2NqhNR#12.70#20#desserts#Mom's Kitchen +Pie, Pecan#Pie,dessB1LfcE#12.33#12#desserts#The Baking Pan +Pie, Cranberry Apple#Pie,dess1mL7IS#10.16#7#desserts#Mom's Kitchen +Pie, Cranberry Apple#Pie,dessmDhkUA#10.16#11#desserts#The Baking Pan +Pie, Banana Cream#Pie,dessH80DuG#7.35#6#desserts#Mom's Kitchen +Pie, Banana Cream#Pie,dessf1YvFb#7.08#11#desserts#The Baking Pan +Pie, Key Lime#Pie,desshtli5N#4.85#2#desserts#Mom's Kitchen +Pie, Key Lime#Pie,dessMwQkKm#5.13#1#desserts#The Baking Pan +Pie, Lemon Meringue#Pie,dess9naVkX#3.74#7#desserts#Mom's Kitchen +Pie, Lemon Meringue#Pie,dessKYcNML#3.67#5#desserts#The Baking Pan +Pie, Caramel#Pie,dessSUuiIU#2.27#9#desserts#Mom's Kitchen +Pie, Caramel#Pie,dessvo8uHh#2.33#4#desserts#The Baking Pan +Pie, Raspberry#Pie,dessUHhMlS#2.36#0#desserts#Mom's Kitchen +Pie, Raspberry#Pie,dessJflbf5#2.36#2#desserts#The Baking Pan +Ice Cream, Chocolate#Ice desseXuyxx#1.44#9#desserts#Mom's Kitchen +Ice Cream, Chocolate#Ice dessASBohf#1.41#13#desserts#The Baking Pan +Ice Cream, Vanilla#Ice dessYnzbbt#11.92#19#desserts#Mom's Kitchen +Ice Cream, Vanilla#Ice dessUBBKp8#11.58#10#desserts#The Baking Pan +Ice Cream, Strawberry#Ice dessfTwKhD#1.90#14#desserts#Mom's Kitchen +Ice Cream, Strawberry#Ice dessaO9Fxf#1.99#6#desserts#The Baking Pan +Ice Cream, Rocky Road#Ice dessyIri3P#13.10#20#desserts#Mom's Kitchen +Ice Cream, Rocky Road#Ice dessZuLr8F#13.48#13#desserts#The Baking Pan +Ice Cream, Mint Chocolate Chip#Ice dessV1IGG7#5.75#4#desserts#Mom's Kitchen +Ice Cream, Mint Chocolate Chip#Ice dessX1gEQ4#5.64#1#desserts#The Baking Pan +Ice Cream Sundae#Ice dessbhlAXt#5.62#11#desserts#Mom's Kitchen +Ice Cream Sundae#Ice dessByapxl#5.72#16#desserts#The Baking Pan +Cobbler, Peach#CobbdessYUGeOB#10.14#20#desserts#Mom's Kitchen +Cobbler, Peach#CobbdessXfEtUK#10.43#16#desserts#The Baking Pan +Cobbler, Berry-Pecan#Cobbdessx3htak#5.36#12#desserts#Mom's Kitchen +Cobbler, Berry-Pecan#Cobbdesse4FUVI#5.41#8#desserts#The Baking Pan +Cobbler, Blueberry#CobbdessbiI0oF#3.78#11#desserts#Mom's Kitchen +Cobbler, Blueberry#CobbdessMXxbBN#3.57#2#desserts#The Baking Pan +Cobbler, Cherry#CobbdessNSa8QW#12.58#0#desserts#Mom's Kitchen +Cobbler, Cherry#CobbdessA1dADa#12.10#10#desserts#The Baking Pan +Cobbler, Huckleberry#Cobbdess3t6O8d#3.99#18#desserts#Mom's Kitchen +Cobbler, Huckleberry#CobbdessGI9euK#3.88#0#desserts#The Baking Pan +Cobbler, Rhubarb#Cobbdess22X40Z#9.54#0#desserts#Mom's Kitchen +Cobbler, Rhubarb#CobbdessPfnCT0#9.27#18#desserts#The Baking Pan +Cobbler, Strawberry#CobbdessI78188#12.43#0#desserts#Mom's Kitchen +Cobbler, Strawberry#CobbdessH3LdgQ#12.20#3#desserts#The Baking Pan +Cobbler, Zucchini#Cobbdess5rK4dP#11.24#3#desserts#Mom's Kitchen +Cobbler, Zucchini#Cobbdess4Ez8kS#10.51#10#desserts#The Baking Pan +Brownies#BrowdessmogdTl#7.62#9#desserts#Mom's Kitchen +Brownies#Browdess84Qc1z#7.55#9#desserts#The Baking Pan +Fudge Bar#Fudgdess8iXSyf#11.72#6#desserts#Mom's Kitchen +Fudge Bar#FudgdessakU1Id#12.29#5#desserts#The Baking Pan +Cookies, Oatmeal#Cookdessnq9Oya#2.84#15#desserts#Mom's Kitchen +Cookies, Oatmeal#CookdessBhgp7p#2.68#10#desserts#The Baking Pan +Cookies, Chocolate Chip#CookdessRVszsZ#12.73#17#desserts#Mom's Kitchen +Cookies, Chocolate Chip#CookdessSOoHmT#12.26#19#desserts#The Baking Pan +Cookies, Peanut Butter#Cookdess2UcMI2#7.82#5#desserts#Mom's Kitchen +Cookies, Peanut Butter#Cookdess1cILme#7.46#20#desserts#The Baking Pan +Mousse, Chocolate#MousdessDpN4sQ#6.25#20#desserts#Mom's Kitchen +Mousse, Chocolate#Mousdess8FyFT8#5.96#1#desserts#The Baking Pan +Mousse, Blueberry Maple#MousdessacwrkO#7.28#7#desserts#Mom's Kitchen +Mousse, Blueberry Maple#MousdessbiCMFg#7.21#12#desserts#The Baking Pan +Mousse, Chocolate Banana#MousdessIeW4qz#5.13#2#desserts#Mom's Kitchen +Mousse, Chocolate Banana#Mousdess1De9oL#5.08#19#desserts#The Baking Pan +Mousse, Cherry#Mousdesss1bF8H#13.05#20#desserts#Mom's Kitchen +Mousse, Cherry#Mousdess0ujevx#12.43#1#desserts#The Baking Pan +Mousse, Eggnog#MousdessZ38hXj#9.07#10#desserts#Mom's Kitchen +Mousse, Eggnog#Mousdesshs05ST#8.81#8#desserts#The Baking Pan +Mousse, Strawberry#MousdessHCDlBK#5.58#3#desserts#Mom's Kitchen +Mousse, Strawberry#MousdessSZ4PyW#5.36#6#desserts#The Baking Pan +Sherbet, Cantaloupe#Sherdess3DCxUg#3.11#9#desserts#Mom's Kitchen +Sherbet, Cantaloupe#Sherdesscp2VIz#2.99#7#desserts#The Baking Pan +Sherbet, Lemon Milk#Sherdess1JVFOS#7.57#9#desserts#Mom's Kitchen +Sherbet, Lemon Milk#SherdessC865vu#7.57#0#desserts#The Baking Pan +Sherbet, Orange Crush#Sherdess8W8Mb9#4.32#18#desserts#Mom's Kitchen +Sherbet, Orange Crush#SherdessxmVJBF#4.16#10#desserts#The Baking Pan +Sherbet, Blueberry#SherdessFAgxqp#3.46#9#desserts#Mom's Kitchen +Sherbet, Blueberry#SherdessMPL87u#3.60#6#desserts#The Baking Pan +Sherbet, Raspberry#Sherdesse86ugA#6.08#1#desserts#Mom's Kitchen +Sherbet, Raspberry#Sherdesslc1etR#5.85#12#desserts#The Baking Pan +Sherbet, Strawberry#SherdessFwv09m#4.63#17#desserts#Mom's Kitchen +Sherbet, Strawberry#SherdessKB0H7q#4.81#20#desserts#The Baking Pan +Tart, Apple#TartdessrsTyXA#3.35#18#desserts#Mom's Kitchen +Tart, Apple#Tartdessp7pyiy#3.13#11#desserts#The Baking Pan +Tart, Almond#TartdessC7FARL#6.62#10#desserts#Mom's Kitchen +Tart, Almond#Tartdess1V1A1c#6.68#13#desserts#The Baking Pan +Tart, Blueberry#TartdesssQZRXX#10.28#10#desserts#Mom's Kitchen +Tart, Blueberry#TartdessUSJSuc#10.28#9#desserts#The Baking Pan +Tart, Chocolate-Pear#Tartdess2pdOE4#5.67#17#desserts#Mom's Kitchen +Tart, Chocolate-Pear#TartdessL3aEDd#5.51#9#desserts#The Baking Pan +Tart, Lemon Fudge#Tartdess9DhZUT#3.88#3#desserts#Mom's Kitchen +Tart, Lemon Fudge#TartdesshzLOWt#3.96#13#desserts#The Baking Pan +Tart, Pecan#TartdessvSbXzd#11.80#3#desserts#Mom's Kitchen +Tart, Pecan#Tartdess6YXJec#11.04#13#desserts#The Baking Pan +Tart, Pineapple#TartdesseMfJFe#9.01#18#desserts#Mom's Kitchen +Tart, Pineapple#TartdessA2Wftr#8.44#13#desserts#The Baking Pan +Tart, Pear#Tartdess4a1BUc#10.09#2#desserts#Mom's Kitchen +Tart, Pear#TartdessNw8YPG#10.68#5#desserts#The Baking Pan +Tart, Raspberry#TartdessAVnpP6#6.18#7#desserts#Mom's Kitchen +Tart, Raspberry#TartdessfVxZFf#5.95#9#desserts#The Baking Pan +Tart, Strawberry#Tartdess4IUcZW#4.75#8#desserts#Mom's Kitchen +Tart, Strawberry#Tartdess2BeEDb#4.61#17#desserts#The Baking Pan +Tart, Raspberry#TartdesshyBd24#1.85#5#desserts#Mom's Kitchen +Tart, Raspberry#Tartdess5fqxgy#1.94#20#desserts#The Baking Pan +Trifle, Berry#TrifdessmEkbU2#12.48#19#desserts#Mom's Kitchen +Trifle, Berry#TrifdessAV9Ix8#12.60#18#desserts#The Baking Pan +Trifle, American#TrifdesscsdSCd#4.70#17#desserts#Mom's Kitchen +Trifle, American#TrifdessTArskm#4.35#11#desserts#The Baking Pan +Trifle, English#TrifdessX87q8T#8.20#9#desserts#Mom's Kitchen +Trifle, English#Trifdess52l955#8.12#11#desserts#The Baking Pan +Trifle, Orange#TrifdesslUwxwe#9.74#15#desserts#Mom's Kitchen +Trifle, Orange#TrifdessFrfCHP#10.22#1#desserts#The Baking Pan +Trifle, Pumpkin#TrifdessJKFN96#4.72#7#desserts#Mom's Kitchen +Trifle, Pumpkin#TrifdessMNw4EV#4.95#16#desserts#The Baking Pan +Trifle, Scottish#TrifdessFa0JdK#13.63#0#desserts#Mom's Kitchen +Trifle, Scottish#TrifdessAAUQCN#14.03#6#desserts#The Baking Pan +Trifle, Sherry#TrifdesscuttJg#4.42#5#desserts#Mom's Kitchen +Trifle, Sherry#TrifdesspRGpfP#4.21#19#desserts#The Baking Pan +Trifle, Strawberry#TrifdessAd5TpV#3.58#11#desserts#Mom's Kitchen +Trifle, Strawberry#Trifdess1rtW0A#3.58#3#desserts#The Baking Pan +Trifle, Scotch Whiskey#Trifdess2zJsGi#5.44#5#desserts#Mom's Kitchen +Trifle, Scotch Whiskey#TrifdessL8nuI6#5.18#5#desserts#The Baking Pan +Cheesecake, Amaretto#CheedessOJBqfD#11.89#5#desserts#Mom's Kitchen +Cheesecake, Amaretto#CheedessVnDf14#11.89#9#desserts#The Baking Pan +Cheesecake, Apple#Cheedessuks1YK#11.22#15#desserts#Mom's Kitchen +Cheesecake, Apple#CheedessMYKaKK#11.01#14#desserts#The Baking Pan +Cheesecake, Apricot#CheedessKUxTYY#12.34#16#desserts#Mom's Kitchen +Cheesecake, Apricot#CheedessMvB1pr#11.88#18#desserts#The Baking Pan +Cheesecake, Australian#CheedessQ9WAIn#2.70#9#desserts#Mom's Kitchen +Cheesecake, Australian#CheedessE6Jyjc#2.53#14#desserts#The Baking Pan +Cheesecake, Arkansas#CheedessTbqzmw#6.98#9#desserts#Mom's Kitchen +Cheesecake, Arkansas#CheedesstWJZfC#6.66#5#desserts#The Baking Pan +Cheesecake, Blueberry#Cheedessyo51KL#8.07#11#desserts#Mom's Kitchen +Cheesecake, Blueberry#Cheedess4Hz7P4#8.62#5#desserts#The Baking Pan +Cheesecake, Cherry#CheedessEahRkC#4.40#14#desserts#Mom's Kitchen +Cheesecake, Cherry#Cheedess3Nx4jZ#4.65#3#desserts#The Baking Pan +Cheesecake, Cran-Raspberry#CheedessrJsr9i#13.47#20#desserts#Mom's Kitchen +Cheesecake, Cran-Raspberry#CheedesshcuXCy#14.00#6#desserts#The Baking Pan +Cheesecake, German Chocolate#CheedesswayvJL#12.03#16#desserts#Mom's Kitchen +Cheesecake, German Chocolate#CheedessebTAeB#11.58#0#desserts#The Baking Pan +Cheesecake, Turtle#CheedessLqgeIA#12.19#6#desserts#Mom's Kitchen +Cheesecake, Turtle#CheedessvyNohA#12.07#19#desserts#The Baking Pan +Brownies, Apple#BrowdessIDW1Cc#5.44#12#desserts#Mom's Kitchen +Brownies, Apple#BrowdessyRMrAH#5.14#12#desserts#The Baking Pan +Brownies, Fudge#BrowdessmIHIFJ#5.19#8#desserts#Mom's Kitchen +Brownies, Fudge#BrowdessqewJ38#5.10#17#desserts#The Baking Pan +Brownies, Almond Macaroon#BrowdessniK7QI#10.57#3#desserts#Mom's Kitchen +Brownies, Almond Macaroon#BrowdessgkXURH#10.36#17#desserts#The Baking Pan +Brownies, Butterscotch#BrowdesslpA06E#7.16#13#desserts#Mom's Kitchen +Brownies, Butterscotch#BrowdessK5hofE#7.30#6#desserts#The Baking Pan +Brownies, Caramel#BrowdessVGfoA8#3.07#3#desserts#Mom's Kitchen +Brownies, Caramel#Browdess5jvVMM#3.13#11#desserts#The Baking Pan +Brownies, Cherry#Browdessyoa66A#3.39#17#desserts#Mom's Kitchen +Brownies, Cherry#BrowdessIg2JuF#3.39#11#desserts#The Baking Pan +Brownies, Chocolate Chip#Browdessb9dc59#6.18#10#desserts#Mom's Kitchen +Brownies, Chocolate Chip#BrowdessvW4nOx#6.43#14#desserts#The Baking Pan +Brownies, Coconut#BrowdessWPHrVR#3.06#15#desserts#Mom's Kitchen +Brownies, Coconut#BrowdessGVBlML#2.86#11#desserts#The Baking Pan +Brownies, Cream Cheese#Browdess1OyRay#12.74#4#desserts#Mom's Kitchen +Brownies, Cream Cheese#Browdess2fRsNv#12.61#19#desserts#The Baking Pan +Brownies, Fudge Mint#Browdessl7DP7k#11.45#14#desserts#Mom's Kitchen +Brownies, Fudge Mint#Browdessv70VKQ#11.34#16#desserts#The Baking Pan +Brownies, Mint Chip#BrowdessDDMvF7#1.81#15#desserts#Mom's Kitchen +Brownies, Mint Chip#Browdess0j9PBD#1.84#9#desserts#The Baking Pan +Cake, Angel Food#CakedessEaqGaE#11.18#3#desserts#Mom's Kitchen +Cake, Angel Food#CakedessJyAyFe#11.18#14#desserts#The Baking Pan +Cake, Chocolate#CakedessKLXFbn#10.11#7#desserts#Mom's Kitchen +Cake, Chocolate#CakedessfNP5Hg#9.91#14#desserts#The Baking Pan +Cake, Carrot#CakedessUTgMoV#4.20#13#desserts#Mom's Kitchen +Cake, Carrot#CakedessQdkaYg#4.00#3#desserts#The Baking Pan +Cake, Lemon Blueberry#CakedessepkeEW#11.73#16#desserts#Mom's Kitchen +Cake, Lemon Blueberry#CakedessHTKyQs#12.42#16#desserts#The Baking Pan +Cake Triple Fudge#CakedessiZ75lR#7.92#7#desserts#Mom's Kitchen +Cake Triple Fudge#CakedessWRrSXP#8.00#15#desserts#The Baking Pan +Cake, Walnut#CakedessveYVXZ#10.83#17#desserts#Mom's Kitchen +Cake, Walnut#Cakedesse22rT5#11.04#7#desserts#The Baking Pan +Cake, French Apple#CakedessjA2Kxv#1.95#0#desserts#Mom's Kitchen +Cake, French Apple#CakedessNBHCk0#1.86#20#desserts#The Baking Pan +Cake, Fig#CakedessOncX4y#6.82#3#desserts#Mom's Kitchen +Cake, Fig#CakedessTJtffn#7.08#10#desserts#The Baking Pan +Cake, Maple#CakedessnoGPRF#3.04#11#desserts#Mom's Kitchen +Cake, Maple#CakedessfVattM#3.22#4#desserts#The Baking Pan +Cake, Devil's Food#CakedessiXcDCt#4.73#7#desserts#Mom's Kitchen +Cake, Devil's Food#CakedessnBZk45#4.82#6#desserts#The Baking Pan +Cake, Double-Lemon#CakedesskeS0Vd#3.46#9#desserts#Mom's Kitchen +Cake, Double-Lemon#Cakedess50vx53#3.60#6#desserts#The Baking Pan +Sorbet, Blackberry#SorbdessQoa0CE#9.88#15#desserts#Mom's Kitchen +Sorbet, Blackberry#SorbdessqoOYzv#9.78#9#desserts#The Baking Pan diff --git a/db/examples_c/getting_started/vendors.txt b/db/examples_c/getting_started/vendors.txt new file mode 100644 index 000000000..528e1b110 --- /dev/null +++ b/db/examples_c/getting_started/vendors.txt @@ -0,0 +1,6 @@ +TriCounty Produce#309 S. Main Street#Middle Town#MN#55432#763 555 5761#Mort Dufresne#763 555 5765 +Simply Fresh#15612 Bogart Lane#Harrigan#WI#53704#420 333 3912#Cheryl Swedberg#420 333 3952 +Off the Vine#133 American Ct.#Centennial#IA#52002#563 121 3800#Bob King#563 121 3800 x54 +The Pantry#1206 N. Creek Way#Middle Town#MN#55432#763 555 3391#Sully Beckstrom#763 555 3391 +Mom's Kitchen#53 Yerman Ct.#Middle Town#MN#55432#763 554 9200#Maggie Kultgen#763 554 9200 x12 +The Baking Pan#1415 53rd Ave.#Dutchin#MN#56304#320 442 2277#Mike Roan#320 442 6879 diff --git a/db/examples_cxx/AccessExample.cpp b/db/examples_cxx/AccessExample.cpp index 14ddae578..ead22ac14 100644 --- a/db/examples_cxx/AccessExample.cpp +++ b/db/examples_cxx/AccessExample.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: AccessExample.cpp,v 11.22 2003/01/08 04:46:49 bostic Exp $ + * $Id: AccessExample.cpp,v 11.24 2004/09/17 22:00:28 mjc Exp $ */ #include @@ -113,12 +113,10 @@ void AccessExample::run(bool removeExistingDatabase, const char *fileName) // Insert records into the database, where the key is the user // input and the data is the user input in reverse order. // - char buf[1024]; - char rbuf[1024]; - char *t; - char *p; + char buf[1024], rbuf[1024]; + char *p, *t; int ret; - int len; + u_int32_t len; for (;;) { cout << "input> "; @@ -128,7 +126,7 @@ void AccessExample::run(bool removeExistingDatabase, const char *fileName) if (cin.eof()) break; - if ((len = strlen(buf)) <= 0) + if ((len = (u_int32_t)strlen(buf)) <= 0) continue; for (t = rbuf, p = buf + (len - 1); p >= buf;) *t++ = *p--; diff --git a/db/examples_cxx/BtRecExample.cpp b/db/examples_cxx/BtRecExample.cpp index 033bd8b24..e56effdac 100644 --- a/db/examples_cxx/BtRecExample.cpp +++ b/db/examples_cxx/BtRecExample.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: BtRecExample.cpp,v 11.22 2003/01/08 04:46:52 bostic Exp $ + * $Id: BtRecExample.cpp,v 11.26 2004/09/17 22:00:28 mjc Exp $ */ #include @@ -68,7 +68,7 @@ BtRecExample::BtRecExample(FILE *fp) (void)sprintf(buf, "%04d_", cnt); if (fgets(buf + 4, sizeof(buf) - 4, fp) == NULL) break; - u_int32_t len = strlen(buf); + u_int32_t len = (u_int32_t)strlen(buf); buf[len - 1] = '\0'; for (t = rbuf, p = buf + (len - 2); p >= buf;) *t++ = *p--; @@ -103,7 +103,7 @@ void BtRecExample::stats() { DB_BTREE_STAT *statp; - dbp->stat(&statp, 0); + dbp->stat(NULL, &statp, 0); cout << progname << ": database contains " << (u_long)statp->bt_ndata << " records\n"; diff --git a/db/examples_cxx/EnvExample.cpp b/db/examples_cxx/EnvExample.cpp index 5794d8850..6e3aeab90 100644 --- a/db/examples_cxx/EnvExample.cpp +++ b/db/examples_cxx/EnvExample.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: EnvExample.cpp,v 11.25 2003/01/08 04:46:55 bostic Exp $ + * $Id: EnvExample.cpp,v 11.26 2004/01/28 03:36:04 bostic Exp $ */ #include diff --git a/db/examples_cxx/LockExample.cpp b/db/examples_cxx/LockExample.cpp index edfbfd19d..a6cfb6ef2 100644 --- a/db/examples_cxx/LockExample.cpp +++ b/db/examples_cxx/LockExample.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: LockExample.cpp,v 11.24 2003/04/24 15:46:04 bostic Exp $ + * $Id: LockExample.cpp,v 11.26 2004/09/17 22:00:28 mjc Exp $ */ #include @@ -125,12 +125,13 @@ LockExample::LockExample(const char *home, u_int32_t maxlocks, int do_unlink) void LockExample::run() { long held; - u_int32_t len, locker; + size_t len; + u_int32_t locker; int did_get, ret; DbLock *locks = 0; int lockcount = 0; - char objbuf[1024]; int lockid = 0; + char objbuf[1024]; // // Accept lock requests. @@ -172,7 +173,7 @@ void LockExample::run() else lock_type = DB_LOCK_WRITE; - Dbt dbt(objbuf, strlen(objbuf)); + Dbt dbt(objbuf, (u_int32_t)strlen(objbuf)); DbLock lock; ret = lock_get(locker, DB_LOCK_NOWAIT, &dbt, diff --git a/db/examples_cxx/MpoolExample.cpp b/db/examples_cxx/MpoolExample.cpp index 24c36a7b1..b7d43f091 100644 --- a/db/examples_cxx/MpoolExample.cpp +++ b/db/examples_cxx/MpoolExample.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: MpoolExample.cpp,v 11.24 2003/01/08 04:47:01 bostic Exp $ + * $Id: MpoolExample.cpp,v 11.25 2004/01/28 03:36:05 bostic Exp $ */ #include diff --git a/db/examples_cxx/SequenceExample.cpp b/db/examples_cxx/SequenceExample.cpp new file mode 100644 index 000000000..9958e00c7 --- /dev/null +++ b/db/examples_cxx/SequenceExample.cpp @@ -0,0 +1,130 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SequenceExample.cpp,v 1.1 2004/09/22 22:20:32 mjc Exp $ + */ + +#include + +#include +#include +#include +#include + +#ifdef _WIN32 +extern "C" { + extern int getopt(int, char * const *, const char *); + extern int optind; +} +#else +#include +#endif + +#include + +#define DATABASE "sequence.db" +#define SEQUENCE "my_sequence" + +using std::cout; +using std::cerr; + +class SequenceExample +{ +public: + SequenceExample(); + void run(bool removeExistingDatabase, const char *fileName); + +private: + // no need for copy and assignment + SequenceExample(const SequenceExample &); + void operator = (const SequenceExample &); +}; + +int +usage() +{ + (void)fprintf(stderr, "usage: SequenceExample [-r] [database]\n"); + return (EXIT_FAILURE); +} + +int +main(int argc, char *argv[]) +{ + int ch, rflag; + const char *database; + + rflag = 0; + while ((ch = getopt(argc, argv, "r")) != EOF) + switch (ch) { + case 'r': + rflag = 1; + break; + case '?': + default: + return (usage()); + } + argc -= optind; + argv += optind; + + /* Accept optional database name. */ + database = *argv == NULL ? DATABASE : argv[0]; + + // Use a try block just to report any errors. + // An alternate approach to using exceptions is to + // use error models (see DbEnv::set_error_model()) so + // that error codes are returned for all Berkeley DB methods. + // + try { + SequenceExample app; + app.run((bool)(rflag == 1 ? true : false), database); + return (EXIT_SUCCESS); + } + catch (DbException &dbe) { + cerr << "SequenceExample: " << dbe.what() << "\n"; + return (EXIT_FAILURE); + } +} + +SequenceExample::SequenceExample() +{ +} + +void SequenceExample::run(bool removeExistingDatabase, const char *fileName) +{ + // Remove the previous database. + if (removeExistingDatabase) + (void)remove(fileName); + + // Create the database object. + // There is no environment for this simple example. + Db db(0, 0); + + db.set_error_stream(&cerr); + db.set_errpfx("SequenceExample"); + db.open(NULL, fileName, NULL, DB_BTREE, DB_CREATE, 0664); + + // We put a try block around this section of code + // to ensure that our database is properly closed + // in the event of an error. + // + try { + Dbt key((void *)SEQUENCE, (u_int32_t)strlen(SEQUENCE)); + DbSequence seq(&db, 0); + seq.open(0, &key, DB_CREATE); + + for (int i = 0; i < 10; i++) { + db_seq_t seqnum; + seq.get(0, 1, &seqnum, 0); + cout << "Got sequence number: " << seqnum << "\n"; + } + + seq.close(0); + } catch (DbException &dbe) { + cerr << "SequenceExample: " << dbe.what() << "\n"; + } + + db.close(0); +} diff --git a/db/examples_cxx/TpcbExample.cpp b/db/examples_cxx/TpcbExample.cpp index 2ddcdb501..fd6648855 100644 --- a/db/examples_cxx/TpcbExample.cpp +++ b/db/examples_cxx/TpcbExample.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TpcbExample.cpp,v 11.32 2003/04/24 15:46:05 bostic Exp $ + * $Id: TpcbExample.cpp,v 11.33 2004/01/28 03:36:05 bostic Exp $ */ #include diff --git a/db/examples_cxx/getting_started/MyDb.cpp b/db/examples_cxx/getting_started/MyDb.cpp new file mode 100644 index 000000000..b059381e9 --- /dev/null +++ b/db/examples_cxx/getting_started/MyDb.cpp @@ -0,0 +1,63 @@ +#include "MyDb.hpp" + +// File: MyDb.cpp + +// Class constructor. Requires a path to the location +// where the database is located, and a database name +MyDb::MyDb(std::string &path, std::string &dbName, + bool isSecondary) + : db_(NULL, 0), // Instantiate Db object + dbFileName_(path + dbName), // Database file name + cFlags_(DB_CREATE) // If the database doesn't yet exist, + // allow it to be created. +{ + try + { + // Redirect debugging information to std::cerr + db_.set_error_stream(&std::cerr); + + // If this is a secondary database, support + // sorted duplicates + if (isSecondary) + db_.set_flags(DB_DUPSORT); + + // Open the database + db_.open(NULL, dbFileName_.c_str(), NULL, DB_BTREE, cFlags_, 0); + } + // DbException is not a subclass of std::exception, so we + // need to catch them both. + catch(DbException &e) + { + std::cerr << "Error opening database: " << dbFileName_ << "\n"; + std::cerr << e.what() << std::endl; + } + catch(std::exception &e) + { + std::cerr << "Error opening database: " << dbFileName_ << "\n"; + std::cerr << e.what() << std::endl; + } +} + +// Private member used to close a database. Called from the class +// destructor. +void +MyDb::close() +{ + // Close the db + try + { + db_.close(0); + std::cout << "Database " << dbFileName_ + << " is closed." << std::endl; + } + catch(DbException &e) + { + std::cerr << "Error closing database: " << dbFileName_ << "\n"; + std::cerr << e.what() << std::endl; + } + catch(std::exception &e) + { + std::cerr << "Error closing database: " << dbFileName_ << "\n"; + std::cerr << e.what() << std::endl; + } +} diff --git a/db/examples_cxx/getting_started/MyDb.hpp b/db/examples_cxx/getting_started/MyDb.hpp new file mode 100644 index 000000000..44d6a11c8 --- /dev/null +++ b/db/examples_cxx/getting_started/MyDb.hpp @@ -0,0 +1,38 @@ +// File: MyDb.hpp + +#ifndef MYDB_H +#define MYDB_H + +#include +#include + +class MyDb +{ +public: + // Constructor requires a path to the database, + // and a database name. + MyDb(std::string &path, std::string &dbName, + bool isSecondary = false); + + // Our destructor just calls our private close method. + ~MyDb() { close(); } + + inline Db &getDb() {return db_;} + +private: + Db db_; + std::string dbFileName_; + u_int32_t cFlags_; + + // Make sure the default constructor is private + // We don't want it used. + MyDb() : db_(NULL, 0) {} + + // We put our database close activity here. + // This is called from our destructor. In + // a more complicated example, we might want + // to make this method public, but a private + // method is more appropriate for this example. + void close(); +}; +#endif diff --git a/db/examples_cxx/getting_started/excxx_example_database_load.cpp b/db/examples_cxx/getting_started/excxx_example_database_load.cpp new file mode 100644 index 000000000..b9e79f339 --- /dev/null +++ b/db/examples_cxx/getting_started/excxx_example_database_load.cpp @@ -0,0 +1,221 @@ +// File: excxx_example_database_load.cpp +#include +#include +#include + +#include "MyDb.hpp" +#include "gettingStartedCommon.hpp" + +#ifdef _WIN32 +extern "C" { + extern int getopt(int, char * const *, const char *); + extern int optind; +} +#else +#include +#endif + +// Forward declarations +void loadInventoryDB(MyDb &, std::string &); +void loadVendorDB(MyDb &, std::string &); + +using namespace std; + +int +usage() +{ + std::cout << "example_database_load [-b ]" + << " [-h ]" << std::endl; + + std::cout << "Note: If -b -h is specified, then the path must end" + << " with your system's path delimiter (/ or \\)" + << std::endl; + return (-1); +} + +// Loads the contents of vendors.txt and inventory.txt into +// Berkeley DB databases. +int +main (int argc, char *argv[]) +{ + + char ch, lastChar; + extern char *optarg; + + // Initialize the path to the database files + std::string basename("./"); + std::string databaseHome("./"); + + // Database names + std::string vDbName("vendordb.db"); + std::string iDbName("inventorydb.db"); + std::string itemSDbName("itemname.sdb"); + + // Parse the command line arguments + while ((ch = getopt(argc, argv, "b:h:")) != EOF) + switch (ch) { + case 'h': + databaseHome = optarg; + lastChar = databaseHome[databaseHome.size() -1]; + if (lastChar != '/' && lastChar != '\\') + return (usage()); + break; + case 'b': + basename = optarg; + lastChar = basename[basename.size() -1]; + if (lastChar != '/' && lastChar != '\\') + return (usage()); + break; + case '?': + default: + return (usage()); + break; + } + + // Identify the full name for our input files, which should + // also include some path information. + std::string inventoryFile = basename + "inventory.txt"; + std::string vendorFile = basename + "vendors.txt"; + + try + { + // Open all databases. + MyDb inventoryDB(databaseHome, iDbName); + MyDb vendorDB(databaseHome, vDbName); + MyDb itemnameSDB(databaseHome, itemSDbName, true); + + // Associate the primary and the secondary + inventoryDB.getDb().associate(NULL, + &(itemnameSDB.getDb()), + get_item_name, + 0); + + // Load the inventory database + loadInventoryDB(inventoryDB, inventoryFile); + + // Load the vendor database + loadVendorDB(vendorDB, vendorFile); + } catch(DbException &e) { + std::cerr << "Error loading databases. " << std::endl; + std::cerr << e.what() << std::endl; + return (e.get_errno()); + } catch(std::exception &e) { + std::cerr << "Error loading databases. " << std::endl; + std::cerr << e.what() << std::endl; + return (-1); + } + + // MyDb class constructors will close the databases when they + // go out of scope. + return (0); +} // End main + +// Used to locate the first pound sign (a field delimiter) +// in the input string. +int +getNextPound(std::string &theString, std::string &substring) +{ + int pos = theString.find("#"); + substring.assign(theString, 0, pos); + theString.assign(theString, pos + 1, theString.size()); + return (pos); +} + +// Loads the contents of the inventory.txt file into a database +void +loadInventoryDB(MyDb &inventoryDB, std::string &inventoryFile) +{ + InventoryData inventoryData; + std::string substring; + int nextPound; + + std::ifstream inFile(inventoryFile.c_str(), std::ios::in); + if ( !inFile ) + { + std::cerr << "Could not open file '" << inventoryFile + << "'. Giving up." << std::endl; + throw std::exception(); + } + + while (!inFile.eof()) + { + inventoryData.clear(); + std::string stringBuf; + std::getline(inFile, stringBuf); + + // Now parse the line + if (!stringBuf.empty()) + { + nextPound = getNextPound(stringBuf, substring); + inventoryData.setName(substring); + + nextPound = getNextPound(stringBuf, substring); + inventoryData.setSKU(substring); + + nextPound = getNextPound(stringBuf, substring); + inventoryData.setPrice(strtod(substring.c_str(), 0)); + + nextPound = getNextPound(stringBuf, substring); + inventoryData.setQuantity(strtol(substring.c_str(), 0, 10)); + + nextPound = getNextPound(stringBuf, substring); + inventoryData.setCategory(substring); + + nextPound = getNextPound(stringBuf, substring); + inventoryData.setVendor(substring); + + void *buff = (void *)inventoryData.getSKU().c_str(); + int size = inventoryData.getSKU().size()+1; + Dbt key(buff, size); + + buff = inventoryData.getBuffer(); + size = inventoryData.getBufferSize(); + Dbt data(buff, size); + + inventoryDB.getDb().put(NULL, &key, &data, 0); + } + + } + + inFile.close(); + +} + +// Loads the contents of the vendors.txt file into a database +void +loadVendorDB(MyDb &vendorDB, std::string &vendorFile) +{ + std::ifstream inFile(vendorFile.c_str(), std::ios::in); + if ( !inFile ) + { + std::cerr << "Could not open file '" << vendorFile + << "'. Giving up." << std::endl; + throw std::exception(); + } + + VENDOR my_vendor; + while (!inFile.eof()) + { + std::string stringBuf; + std::getline(inFile, stringBuf); + memset(&my_vendor, 0, sizeof(VENDOR)); + + // Scan the line into the structure. + // Convenient, but not particularly safe. + // In a real program, there would be a lot more + // defensive code here. + sscanf(stringBuf.c_str(), + "%20[^#]#%20[^#]#%20[^#]#%3[^#]#%6[^#]#%13[^#]#%20[^#]#%20[^\n]", + my_vendor.name, my_vendor.street, + my_vendor.city, my_vendor.state, + my_vendor.zipcode, my_vendor.phone_number, + my_vendor.sales_rep, my_vendor.sales_rep_phone); + + Dbt key(my_vendor.name, strlen(my_vendor.name) + 1); + Dbt data(&my_vendor, sizeof(VENDOR)); + + vendorDB.getDb().put(NULL, &key, &data, 0); + } + + inFile.close(); +} diff --git a/db/examples_cxx/getting_started/excxx_example_database_read.cpp b/db/examples_cxx/getting_started/excxx_example_database_read.cpp new file mode 100644 index 000000000..373e7f371 --- /dev/null +++ b/db/examples_cxx/getting_started/excxx_example_database_read.cpp @@ -0,0 +1,228 @@ +// File: excxx_example_database_read.cpp + +#include +#include +#include + +#include "MyDb.hpp" +#include "gettingStartedCommon.hpp" + +#ifdef _WIN32 +extern "C" { + extern int getopt(int, char * const *, const char *); + extern int optind; +} +#else +#include +#endif + +// Forward declarations +int show_item(MyDb &itemnameSDB, MyDb &vendorDB, std::string &itemName); +int show_all_records(MyDb &inventoryDB, MyDb &vendorDB); +int show_vendor(MyDb &vendorDB, const char *vendor); + +int +usage() +{ + std::cout << "example_database_read [-i ]" + << " [-h ]" << std::endl; + + std::cout << "Note: Any path specified to the -h parameter must end" + << " with your system's path delimiter (/ or \\)" + << std::endl; + return (-1); +} + +int +main (int argc, char *argv[]) +{ + + char ch, lastChar; + extern char *optarg; + + // Initialize the path to the database files + std::string databaseHome("./"); + std::string itemName; + + // Database names + std::string vDbName("vendordb.db"); + std::string iDbName("inventorydb.db"); + std::string itemSDbName("itemname.sdb"); + + // Parse the command line arguments + while ((ch = getopt(argc, argv, "h:i:")) != EOF) + switch (ch) { + case 'h': + databaseHome = optarg; + lastChar = databaseHome[databaseHome.size() -1]; + if (lastChar != '/' && lastChar != '\\') + return (usage()); + break; + case 'i': + itemName = optarg; + break; + case '?': + default: + return (usage()); + break; + } + + try + { + // Open all databases. + MyDb inventoryDB(databaseHome, iDbName); + MyDb vendorDB(databaseHome, vDbName); + MyDb itemnameSDB(databaseHome, itemSDbName, true); + + // Associate the secondary to the primary + inventoryDB.getDb().associate(NULL, + &(itemnameSDB.getDb()), + get_item_name, + 0); + + if (itemName.empty()) + { + show_all_records(inventoryDB, vendorDB); + } else { + show_item(itemnameSDB, vendorDB, itemName); + } + } catch(DbException &e) { + std::cerr << "Error reading databases. " << std::endl; + return (e.get_errno()); + } catch(std::exception &e) { + std::cerr << "Error reading databases. " << std::endl; + std::cerr << e.what() << std::endl; + return (-1); + } + + return (0); +} // End main + +// Shows the records in the inventory database that +// have a specific item name. For each inventory record +// shown, the appropriate vendor record is also displayed. +int +show_item(MyDb &itemnameSDB, MyDb &vendorDB, std::string &itemName) +{ + + // Get a cursor to the itemname secondary db + Dbc *cursorp; + + try { + itemnameSDB.getDb().cursor(NULL, &cursorp, 0); + + // Get the search key. This is the name on the inventory + // record that we want to examine. + std::cout << "Looking for " << itemName << std::endl; + Dbt key((void *)itemName.c_str(), itemName.length() + 1); + Dbt data; + + // Position the cursor to the first record in the secondary + // database that has the appropriate key. + int ret = cursorp->get(&key, &data, DB_SET); + if (!ret) { + do { + InventoryData inventoryItem(data.get_data()); + inventoryItem.show(); + + show_vendor(vendorDB, inventoryItem.getVendor().c_str()); + + } while (cursorp->get(&key, &data, DB_NEXT_DUP) == 0); + } else { + std::cerr << "No records found for '" << itemName + << "'" << std::endl; + } + } catch(DbException &e) { + itemnameSDB.getDb().err(e.get_errno(), "Error in show_item"); + cursorp->close(); + throw e; + } catch(std::exception &e) { + itemnameSDB.getDb().errx("Error in show_item: %s", e.what()); + cursorp->close(); + throw e; + } + + cursorp->close(); + return (0); +} + +// Shows all the records in the inventory database. +// For each inventory record shown, the appropriate +// vendor record is also displayed. +int +show_all_records(MyDb &inventoryDB, MyDb &vendorDB) +{ + + // Get a cursor to the inventory db + Dbc *cursorp; + try { + inventoryDB.getDb().cursor(NULL, &cursorp, 0); + + // Iterate over the inventory database, from the first record + // to the last, displaying each in turn + Dbt key, data; + int ret; + while ((ret = cursorp->get(&key, &data, DB_NEXT)) == 0 ) + { + InventoryData inventoryItem(data.get_data()); + inventoryItem.show(); + + show_vendor(vendorDB, inventoryItem.getVendor().c_str()); + } + } catch(DbException &e) { + inventoryDB.getDb().err(e.get_errno(), "Error in show_all_records"); + cursorp->close(); + throw e; + } catch(std::exception &e) { + cursorp->close(); + throw e; + } + + cursorp->close(); + return (0); +} + +// Shows a vendor record. Each vendor record is an instance of +// a vendor structure. See loadVendorDB() in +// example_database_load for how this structure was originally +// put into the database. +int +show_vendor(MyDb &vendorDB, const char *vendor) +{ + Dbt data; + VENDOR my_vendor; + + try { + // Set the search key to the vendor's name + // vendor is explicitly cast to char * to stop a compiler + // complaint. + Dbt key((char *)vendor, strlen(vendor) + 1); + + // Make sure we use the memory we set aside for the VENDOR + // structure rather than the memory that DB allocates. + // Some systems may require structures to be aligned in memory + // in a specific way, and DB may not get it right. + + data.set_data(&my_vendor); + data.set_ulen(sizeof(VENDOR)); + data.set_flags(DB_DBT_USERMEM); + + // Get the record + vendorDB.getDb().get(NULL, &key, &data, 0); + std::cout << " " << my_vendor.street << "\n" + << " " << my_vendor.city << ", " + << my_vendor.state << "\n" + << " " << my_vendor.zipcode << "\n" + << " " << my_vendor.phone_number << "\n" + << " Contact: " << my_vendor.sales_rep << "\n" + << " " << my_vendor.sales_rep_phone + << std::endl; + + } catch(DbException &e) { + vendorDB.getDb().err(e.get_errno(), "Error in show_vendor"); + throw e; + } catch(std::exception &e) { + throw e; + } + return (0); +} diff --git a/db/examples_cxx/getting_started/gettingStartedCommon.hpp b/db/examples_cxx/getting_started/gettingStartedCommon.hpp new file mode 100644 index 000000000..27d1cf1c7 --- /dev/null +++ b/db/examples_cxx/getting_started/gettingStartedCommon.hpp @@ -0,0 +1,181 @@ +// File: gettingStartedCommon.hpp + +#ifndef GETTINGSTARTEDCOMMON_H +#define GETTINGSTARTEDCOMMON_H + +class InventoryData +{ +public: + inline void setPrice(double price) {price_ = price;} + inline void setQuantity(long quantity) {quantity_ = quantity;} + inline void setCategory(std::string &category) {category_ = category;} + inline void setName(std::string &name) {name_ = name;} + inline void setVendor(std::string &vendor) {vendor_ = vendor;} + inline void setSKU(std::string &sku) {sku_ = sku;} + + inline double& getPrice() {return(price_);} + inline long& getQuantity() {return(quantity_);} + inline std::string& getCategory() {return(category_);} + inline std::string& getName() {return(name_);} + inline std::string& getVendor() {return(vendor_);} + inline std::string& getSKU() {return(sku_);} + + /* Initialize our data members */ + void clear() + { + price_ = 0.0; + quantity_ = 0; + category_ = ""; + name_ = ""; + vendor_ = ""; + sku_ = ""; + } + + // Default constructor + InventoryData() { clear(); } + + // Constructor from a void * + // For use with the data returned from a bdb get + InventoryData(void *buffer) + { + char *buf = (char *)buffer; + + price_ = *((double *)buf); + bufLen_ = sizeof(double); + + quantity_ = *((long *)(buf + bufLen_)); + bufLen_ += sizeof(long); + + name_ = buf + bufLen_; + bufLen_ += name_.size() + 1; + + sku_ = buf + bufLen_; + bufLen_ += sku_.size() + 1; + + category_ = buf + bufLen_; + bufLen_ += category_.size() + 1; + + vendor_ = buf + bufLen_; + bufLen_ += vendor_.size() + 1; + } + + /* + * Marshalls this classes data members into a single + * contiguous memory location for the purpose of storing + * the data in a database. + */ + char * + getBuffer() + { + // Zero out the buffer + memset(databuf_, 0, 500); + /* + * Now pack the data into a single contiguous memory location for + * storage. + */ + bufLen_ = 0; + int dataLen = 0; + + dataLen = sizeof(double); + memcpy(databuf_, &price_, dataLen); + bufLen_ += dataLen; + + dataLen = sizeof(long); + memcpy(databuf_ + bufLen_, &quantity_, dataLen); + bufLen_ += dataLen; + + packString(databuf_, name_); + packString(databuf_, sku_); + packString(databuf_, category_); + packString(databuf_, vendor_); + + return (databuf_); + } + + /* + * Returns the size of the buffer. Used for storing + * the buffer in a database. + */ + inline int getBufferSize() { return (bufLen_); } + + /* Utility function used to show the contents of this class */ + void + show() { + std::cout << "\nName: " << name_ << std::endl; + std::cout << " SKU: " << sku_ << std::endl; + std::cout << " Price: " << price_ << std::endl; + std::cout << " Quantity: " << quantity_ << std::endl; + std::cout << " Category: " << category_ << std::endl; + std::cout << " Vendor: " << vendor_ << std::endl; + } + +private: + + /* + * Utility function that appends a char * to the end of + * the buffer. + */ + void + packString(char *buffer, std::string &theString) + { + int string_size = theString.size() + 1; + memcpy(buffer+bufLen_, theString.c_str(), string_size); + bufLen_ += string_size; + } + + /* Data members */ + std::string category_, name_, vendor_, sku_; + double price_; + long quantity_; + int bufLen_; + char databuf_[500]; + +}; + +#define MAXFIELD 20 + +typedef struct vendor { + char name[MAXFIELD]; /* Vendor name */ + char street[MAXFIELD]; /* Street name and number */ + char city[MAXFIELD]; /* City */ + char state[3]; /* Two-digit US state code */ + char zipcode[6]; /* US zipcode */ + char phone_number[13]; /* Vendor phone number */ + char sales_rep[MAXFIELD]; /* Name of sales representative */ + char sales_rep_phone[MAXFIELD]; /* Sales rep's phone number */ +} VENDOR; + +// Forward declarations +class Db; +class Dbt; + +// Used to extract an inventory item's name from an +// inventory database record. This function is used to create +// keys for secondary database records. +int +get_item_name(Db *dbp, const Dbt *pkey, const Dbt *pdata, Dbt *skey) +{ + InventoryData id(pdata->get_data()); + const char *itemname = id.getName().c_str(); + + // unused + (void)pkey; + + // If these don't match, then there was a problem with + // the buffer contained in pdata, or there's a programming + // error in how the buffer is marshalled/unmarshalled. + // This should never happen! + if ((u_int32_t)id.getBufferSize() != pdata->get_size()) { + dbp->errx("get_item_name: buffer sizes do not match!"); + // When we return non-zero, the index record is not + // added/updated. + return (-1); + } + + /* Now set the secondary key's data to be the item name */ + skey->set_data((void *)itemname); + skey->set_size(strlen(itemname) + 1); + + return (0); +}; +#endif diff --git a/db/examples_cxx/getting_started/inventory.txt b/db/examples_cxx/getting_started/inventory.txt new file mode 100644 index 000000000..d6b68762c --- /dev/null +++ b/db/examples_cxx/getting_started/inventory.txt @@ -0,0 +1,800 @@ +Oranges#OranfruiRu6Ghr#0.71#451#fruits#TriCounty Produce +Oranges#OranfruiXRPFn1#0.73#263#fruits#Simply Fresh +Oranges#OranfruiLEuzQj#0.69#261#fruits#Off the Vine +Apples#ApplfruiZls4Du#1.20#472#fruits#TriCounty Produce +Apples#Applfrui8fewZe#1.21#402#fruits#Simply Fresh +Apples#ApplfruiXoT6xG#1.20#728#fruits#Off the Vine +Bananas#BanafruipIlluX#0.50#207#fruits#TriCounty Produce +Bananas#BanafruiEQhWuj#0.50#518#fruits#Simply Fresh +Bananas#BanafruimpRgPO#0.50#741#fruits#Off the Vine +Almonds#AlmofruiPPCLz8#0.55#600#fruits#TriCounty Produce +Almonds#AlmofruidMyKmp#0.54#745#fruits#Simply Fresh +Almonds#Almofrui7K0xzH#0.53#405#fruits#Off the Vine +Allspice#AllsfruibJGK4R#0.94#669#fruits#TriCounty Produce +Allspice#Allsfruilfvoeg#0.94#244#fruits#Simply Fresh +Allspice#Allsfruio12BOS#0.95#739#fruits#Off the Vine +Apricot#AprifruijphEpM#0.89#560#fruits#TriCounty Produce +Apricot#AprifruiU1zIDn#0.91#980#fruits#Simply Fresh +Apricot#AprifruichcwYS#0.95#668#fruits#Off the Vine +Avocado#AvocfruiwYYomu#0.99#379#fruits#TriCounty Produce +Avocado#AvocfruiT6IwWE#1.02#711#fruits#Simply Fresh +Avocado#AvocfruisbK1h5#0.97#856#fruits#Off the Vine +Bael Fruit#BaelfruilAU7Hj#0.41#833#fruits#TriCounty Produce +Bael Fruit#BaelfruiX2KvqV#0.40#770#fruits#Simply Fresh +Bael Fruit#Baelfruidjne4e#0.39#778#fruits#Off the Vine +Betel Nut#BetefruiQYdHqQ#0.34#926#fruits#TriCounty Produce +Betel Nut#Betefrui32BKAz#0.37#523#fruits#Simply Fresh +Betel Nut#BetefruisaWzY4#0.34#510#fruits#Off the Vine +Black Walnut#BlacfruiXxIuMU#0.57#923#fruits#TriCounty Produce +Black Walnut#BlacfruiZXgY9t#0.59#312#fruits#Simply Fresh +Black Walnut#BlacfruikWO0vz#0.60#877#fruits#Off the Vine +Blueberry#BluefruiCbxb4t#1.02#276#fruits#TriCounty Produce +Blueberry#BluefruiBuCfgO#1.03#522#fruits#Simply Fresh +Blueberry#Bluefruixz8MkE#1.01#278#fruits#Off the Vine +Boysenberry#BoysfruizxyMuz#1.05#239#fruits#TriCounty Produce +Boysenberry#Boysfrui3hTRQu#1.09#628#fruits#Simply Fresh +Boysenberry#BoysfruinpLvr3#1.02#349#fruits#Off the Vine +Breadnut#Breafrui0kDPs6#0.31#558#fruits#TriCounty Produce +Breadnut#Breafrui44s3og#0.32#879#fruits#Simply Fresh +Breadnut#BreafruiwyLKhJ#0.30#407#fruits#Off the Vine +Cactus#Cactfruiyo2ddH#0.56#601#fruits#TriCounty Produce +Cactus#CactfruixTOLv5#0.54#477#fruits#Simply Fresh +Cactus#Cactfrui4ioUav#0.55#896#fruits#Off the Vine +California Wild Grape#CalifruiZsWAa6#0.78#693#fruits#TriCounty Produce +California Wild Grape#Califruid84xyt#0.83#293#fruits#Simply Fresh +California Wild Grape#CalifruiLSJFoJ#0.81#543#fruits#Off the Vine +Cashew#CashfruihaOFVP#0.37#221#fruits#TriCounty Produce +Cashew#Cashfruizzcw1E#0.38#825#fruits#Simply Fresh +Cashew#CashfruiqtMe2Q#0.38#515#fruits#Off the Vine +Chico Sapote#ChicfruiY534SX#0.47#216#fruits#TriCounty Produce +Chico Sapote#ChicfruiSqL3Lc#0.45#476#fruits#Simply Fresh +Chico Sapote#ChicfruiurzIp4#0.47#200#fruits#Off the Vine +Chinese Jello#ChinfruiyRg75u#0.64#772#fruits#TriCounty Produce +Chinese Jello#ChinfruiuIUj0X#0.65#624#fruits#Simply Fresh +Chinese Jello#ChinfruiwXbRrL#0.67#719#fruits#Off the Vine +Common Guava#Commfruib6znSI#0.80#483#fruits#TriCounty Produce +Common Guava#Commfrui6eUivL#0.81#688#fruits#Simply Fresh +Common Guava#CommfruibWKnz3#0.84#581#fruits#Off the Vine +Crabapple#CrabfruioY2L63#0.94#582#fruits#TriCounty Produce +Crabapple#Crabfruijxcxyt#0.94#278#fruits#Simply Fresh +Crabapple#CrabfruibvWd8K#0.95#213#fruits#Off the Vine +Cranberry#CranfruiJxmKr5#0.83#923#fruits#TriCounty Produce +Cranberry#CranfruiPlklAF#0.84#434#fruits#Simply Fresh +Cranberry#Cranfrui3G5XL9#0.84#880#fruits#Off the Vine +Damson Plum#DamsfruibMRMwe#0.98#782#fruits#TriCounty Produce +Damson Plum#DamsfruiV6wFLk#1.03#400#fruits#Simply Fresh +Damson Plum#DamsfruiLhqFrQ#0.98#489#fruits#Off the Vine +Date Palm#DatefruigS31GU#1.14#315#fruits#TriCounty Produce +Date Palm#DatefruipKPaJK#1.09#588#fruits#Simply Fresh +Date Palm#Datefrui5fTyNS#1.14#539#fruits#Off the Vine +Dragon's Eye#DragfruirGJ3aI#0.28#315#fruits#TriCounty Produce +Dragon's Eye#DragfruiBotxqt#0.27#705#fruits#Simply Fresh +Dragon's Eye#DragfruiPsSnV9#0.29#482#fruits#Off the Vine +East Indian Wine Palm#EastfruiNXFJuG#0.43#992#fruits#TriCounty Produce +East Indian Wine Palm#Eastfruiq06fRr#0.40#990#fruits#Simply Fresh +East Indian Wine Palm#Eastfrui4QUwl2#0.43#351#fruits#Off the Vine +English Walnut#EnglfruiBMtHtW#1.04#787#fruits#TriCounty Produce +English Walnut#EnglfruiHmVzxV#1.03#779#fruits#Simply Fresh +English Walnut#Englfrui18Tc9n#1.06#339#fruits#Off the Vine +False Mangosteen#FalsfruibkmYqH#0.66#971#fruits#TriCounty Produce +False Mangosteen#FalsfruipBsbcX#0.68#250#fruits#Simply Fresh +False Mangosteen#FalsfruiPrFfhe#0.70#386#fruits#Off the Vine +Fried Egg Tree#FriefruiihHUdc#0.29#649#fruits#TriCounty Produce +Fried Egg Tree#FriefruimdD1rf#0.28#527#fruits#Simply Fresh +Fried Egg Tree#FriefruivyAzYq#0.29#332#fruits#Off the Vine +Genipap#GenifruiDtKusQ#0.62#986#fruits#TriCounty Produce +Genipap#GenifruiXq32eP#0.61#326#fruits#Simply Fresh +Genipap#Genifruiphwwyq#0.61#794#fruits#Off the Vine +Ginger#GingfruiQLbRZI#0.28#841#fruits#TriCounty Produce +Ginger#GingfruiS8kK4p#0.29#432#fruits#Simply Fresh +Ginger#GingfruioL3Y4S#0.27#928#fruits#Off the Vine +Grapefruit#Grapfruih86Zxh#1.07#473#fruits#TriCounty Produce +Grapefruit#GrapfruiwL1v0N#1.08#878#fruits#Simply Fresh +Grapefruit#GrapfruihmJzWm#1.02#466#fruits#Off the Vine +Hackberry#HackfruiQjomN7#0.22#938#fruits#TriCounty Produce +Hackberry#HackfruiWS0eKp#0.20#780#fruits#Simply Fresh +Hackberry#Hackfrui0MIv6J#0.21#345#fruits#Off the Vine +Honey Locust#HonefruiebXGRc#1.08#298#fruits#TriCounty Produce +Honey Locust#HonefruiPSqILB#1.00#427#fruits#Simply Fresh +Honey Locust#Honefrui6UXtvW#1.03#422#fruits#Off the Vine +Japanese Plum#JapafruihTmoYR#0.40#658#fruits#TriCounty Produce +Japanese Plum#JapafruifGqz0l#0.40#700#fruits#Simply Fresh +Japanese Plum#JapafruiufWkLx#0.39#790#fruits#Off the Vine +Jojoba#JojofruisE0wTh#0.97#553#fruits#TriCounty Produce +Jojoba#JojofruiwiYLp2#1.02#969#fruits#Simply Fresh +Jojoba#JojofruigMD1ej#0.96#899#fruits#Off the Vine +Jostaberry#JostfruiglsEGV#0.50#300#fruits#TriCounty Produce +Jostaberry#JostfruiV3oo1h#0.52#423#fruits#Simply Fresh +Jostaberry#JostfruiUBerur#0.53#562#fruits#Off the Vine +Kangaroo Apple#KangfruiEQknz8#0.60#661#fruits#TriCounty Produce +Kangaroo Apple#KangfruiNabdFq#0.60#377#fruits#Simply Fresh +Kangaroo Apple#Kangfrui7hky1i#0.60#326#fruits#Off the Vine +Ken's Red#Ken'fruinPUSIm#0.21#337#fruits#TriCounty Produce +Ken's Red#Ken'fruiAoZlpl#0.21#902#fruits#Simply Fresh +Ken's Red#Ken'frui5rmbd4#0.22#972#fruits#Off the Vine +Ketembilla#Ketefrui3yAKxQ#0.31#303#fruits#TriCounty Produce +Ketembilla#KetefruiROn6F5#0.34#283#fruits#Simply Fresh +Ketembilla#Ketefrui16Rsts#0.33#887#fruits#Off the Vine +King Orange#KingfruisOFzWk#0.74#429#fruits#TriCounty Produce +King Orange#KingfruiBmzRJT#0.74#500#fruits#Simply Fresh +King Orange#KingfruiGsrgRX#0.78#994#fruits#Off the Vine +Kola Nut#KolafruiBbtAuw#0.58#991#fruits#TriCounty Produce +Kola Nut#KolafruirbnLVS#0.62#733#fruits#Simply Fresh +Kola Nut#Kolafrui1ItXJx#0.58#273#fruits#Off the Vine +Kuko#Kukofrui6YH5Ds#0.41#647#fruits#TriCounty Produce +Kuko#Kukofrui7WZaZK#0.39#241#fruits#Simply Fresh +Kuko#Kukofruig9MQFT#0.40#204#fruits#Off the Vine +Kumquat#KumqfruiT6WKQL#0.73#388#fruits#TriCounty Produce +Kumquat#KumqfruidLiFLU#0.70#393#fruits#Simply Fresh +Kumquat#KumqfruiL6zhQX#0.71#994#fruits#Off the Vine +Kwai Muk#KwaifruiQK1zOE#1.10#249#fruits#TriCounty Produce +Kwai Muk#KwaifruifbCRlT#1.14#657#fruits#Simply Fresh +Kwai Muk#Kwaifruipe7T2m#1.09#617#fruits#Off the Vine +Lanzone#LanzfruijsPf1v#0.34#835#fruits#TriCounty Produce +Lanzone#LanzfruibU3QoL#0.34#404#fruits#Simply Fresh +Lanzone#LanzfruiYgHwv6#0.34#237#fruits#Off the Vine +Lemon#Lemofrui4Tgsg2#0.46#843#fruits#TriCounty Produce +Lemon#LemofruivK6qvj#0.43#207#fruits#Simply Fresh +Lemon#LemofruiXSXqJ0#0.44#910#fruits#Off the Vine +Lemon Grass#LemofruiVFgVh5#0.40#575#fruits#TriCounty Produce +Lemon Grass#LemofruiWIelvi#0.41#386#fruits#Simply Fresh +Lemon Grass#LemofruiGVAow0#0.39#918#fruits#Off the Vine +Lilly-pilly#LillfruiEQnW1m#1.21#974#fruits#TriCounty Produce +Lilly-pilly#LillfruiMqVuR5#1.23#303#fruits#Simply Fresh +Lilly-pilly#LillfruiVGH9p4#1.17#512#fruits#Off the Vine +Ling Nut#LingfruiGtOf8X#0.85#540#fruits#TriCounty Produce +Ling Nut#LingfruiuP0Jf9#0.83#200#fruits#Simply Fresh +Ling Nut#LingfruiuO5qf5#0.81#319#fruits#Off the Vine +Lipote#LipofruisxD2Qc#0.85#249#fruits#TriCounty Produce +Lipote#LipofruiHNdIqL#0.85#579#fruits#Simply Fresh +Lipote#LipofruiSQ2pKK#0.83#472#fruits#Off the Vine +Litchee#Litcfrui1R6Ydz#0.99#806#fruits#TriCounty Produce +Litchee#LitcfruiwtDM79#1.01#219#fruits#Simply Fresh +Litchee#LitcfruilpPZbC#1.05#419#fruits#Off the Vine +Longan#LongfruiEI0lWF#1.02#573#fruits#TriCounty Produce +Longan#LongfruiPQxxSF#1.04#227#fruits#Simply Fresh +Longan#LongfruisdI812#0.99#993#fruits#Off the Vine +Love-in-a-mist#LovefruiKYPW70#0.69#388#fruits#TriCounty Produce +Love-in-a-mist#LovefruiHrgjDa#0.67#478#fruits#Simply Fresh +Love-in-a-mist#LovefruipSOWVz#0.71#748#fruits#Off the Vine +Lychee#LychfruiicVLnY#0.38#276#fruits#TriCounty Produce +Lychee#LychfruiGY6yJr#0.38#602#fruits#Simply Fresh +Lychee#LychfruiTzDCq2#0.40#572#fruits#Off the Vine +Mabolo#MabofruiSY8RQS#0.97#263#fruits#TriCounty Produce +Mabolo#MabofruiOWWk0n#0.98#729#fruits#Simply Fresh +Mabolo#MabofruixQLOTF#0.98#771#fruits#Off the Vine +Macadamia Nut#MacafruiZppJPw#1.22#888#fruits#TriCounty Produce +Macadamia Nut#MacafruiI7XFMV#1.24#484#fruits#Simply Fresh +Macadamia Nut#Macafrui4x8bxV#1.20#536#fruits#Off the Vine +Madagascar Plum#MadafruiVj5fDf#1.14#596#fruits#TriCounty Produce +Madagascar Plum#MadafruivJhAFI#1.15#807#fruits#Simply Fresh +Madagascar Plum#Madafrui7MTe1x#1.17#355#fruits#Off the Vine +Magnolia Vine#MagnfruiigN4Y1#1.17#321#fruits#TriCounty Produce +Magnolia Vine#MagnfruicKtiHd#1.15#353#fruits#Simply Fresh +Magnolia Vine#MagnfruiLPDSCp#1.23#324#fruits#Off the Vine +Mamey#Mamefrui5rjLF6#0.36#683#fruits#TriCounty Produce +Mamey#MamefruiM6ndnR#0.38#404#fruits#Simply Fresh +Mamey#Mamefruiq9KntD#0.36#527#fruits#Off the Vine +Mandarin Orange#MandfruiRKpmKL#0.42#352#fruits#TriCounty Produce +Mandarin Orange#Mandfrui1V0KLG#0.42#548#fruits#Simply Fresh +Mandarin Orange#Mandfruig2o9Fg#0.41#686#fruits#Off the Vine +Marany Nut#MarafruiqkrwoJ#1.14#273#fruits#TriCounty Produce +Marany Nut#MarafruiCGKpke#1.12#482#fruits#Simply Fresh +Marany Nut#MarafruiB1YE5x#1.09#412#fruits#Off the Vine +Marula#MarufruiXF4biH#0.22#403#fruits#TriCounty Produce +Marula#MarufruidZiVKZ#0.23#317#fruits#Simply Fresh +Marula#MarufruiIS8BEp#0.21#454#fruits#Off the Vine +Mayhaw#MayhfruiCSrm7k#0.24#220#fruits#TriCounty Produce +Mayhaw#MayhfruiNRDzWs#0.25#710#fruits#Simply Fresh +Mayhaw#MayhfruiIUCyEg#0.24#818#fruits#Off the Vine +Meiwa Kumquat#MeiwfruiYhv3AY#0.21#997#fruits#TriCounty Produce +Meiwa Kumquat#MeiwfruiyzQFNR#0.22#347#fruits#Simply Fresh +Meiwa Kumquat#Meiwfruict4OUp#0.21#923#fruits#Off the Vine +Mexican Barberry#Mexifrui2P2dXi#0.28#914#fruits#TriCounty Produce +Mexican Barberry#MexifruiywUTMI#0.29#782#fruits#Simply Fresh +Mexican Barberry#MexifruijPHu5X#0.29#367#fruits#Off the Vine +Meyer Lemon#Meyefruin9901J#0.38#824#fruits#TriCounty Produce +Meyer Lemon#MeyefruiNeQpjO#0.37#617#fruits#Simply Fresh +Meyer Lemon#MeyefruiYEVznZ#0.37#741#fruits#Off the Vine +Mississippi Honeyberry#Missfruipb5iW3#0.95#595#fruits#TriCounty Produce +Mississippi Honeyberry#MissfruiINiDbB#0.96#551#fruits#Simply Fresh +Mississippi Honeyberry#MissfruiNUQ82a#0.93#396#fruits#Off the Vine +Monkey Pot#MonkfruiXlTW4j#0.90#896#fruits#TriCounty Produce +Monkey Pot#Monkfrui1p7a4h#0.88#344#fruits#Simply Fresh +Monkey Pot#Monkfrui4eKggb#0.92#917#fruits#Off the Vine +Monos Plum#Monofrui0Mv9aV#1.11#842#fruits#TriCounty Produce +Monos Plum#Monofrui6iTGQY#1.14#570#fruits#Simply Fresh +Monos Plum#MonofruiNu2uGH#1.13#978#fruits#Off the Vine +Moosewood#MoosfruiMXEGex#0.86#969#fruits#TriCounty Produce +Moosewood#Moosfrui8805mB#0.86#963#fruits#Simply Fresh +Moosewood#MoosfruiOsnDFL#0.88#594#fruits#Off the Vine +Natal Orange#NatafruitB8Kh2#0.42#332#fruits#TriCounty Produce +Natal Orange#NatafruiOhqRrd#0.42#982#fruits#Simply Fresh +Natal Orange#NatafruiRObMf6#0.41#268#fruits#Off the Vine +Nectarine#NectfruilNfeD8#0.36#601#fruits#TriCounty Produce +Nectarine#NectfruiQfjt6b#0.35#818#fruits#Simply Fresh +Nectarine#Nectfrui5U7U96#0.37#930#fruits#Off the Vine +Neem Tree#NeemfruiCruEMF#0.24#222#fruits#TriCounty Produce +Neem Tree#NeemfruiGv0pv5#0.24#645#fruits#Simply Fresh +Neem Tree#NeemfruiUFPVfk#0.25#601#fruits#Off the Vine +New Zealand Spinach#New fruihDIgec#0.87#428#fruits#TriCounty Produce +New Zealand Spinach#New fruiaoR9TP#0.87#630#fruits#Simply Fresh +New Zealand Spinach#New fruiy8LBul#0.94#570#fruits#Off the Vine +Olosapo#OlosfruiGXvaMm#0.76#388#fruits#TriCounty Produce +Olosapo#OlosfruiESlpB3#0.76#560#fruits#Simply Fresh +Olosapo#OlosfruiFNEkER#0.76#962#fruits#Off the Vine +Oregon Grape#OregfruiWxhzrf#1.14#892#fruits#TriCounty Produce +Oregon Grape#OregfruiMgjHUn#1.20#959#fruits#Simply Fresh +Oregon Grape#OregfruiC5UCxX#1.17#419#fruits#Off the Vine +Otaheite Apple#OtahfruilT0iFj#0.21#579#fruits#TriCounty Produce +Otaheite Apple#Otahfrui92PyMY#0.22#857#fruits#Simply Fresh +Otaheite Apple#OtahfruiLGD1EH#0.20#807#fruits#Off the Vine +Oyster Plant#OystfruimGxOsj#0.77#835#fruits#TriCounty Produce +Oyster Plant#Oystfrui1kudBX#0.81#989#fruits#Simply Fresh +Oyster Plant#OystfruiaX3uO2#0.80#505#fruits#Off the Vine +Panama Berry#PanafruiZG0Vp4#1.19#288#fruits#TriCounty Produce +Panama Berry#PanafruiobvXPE#1.21#541#fruits#Simply Fresh +Panama Berry#PanafruipaW8F3#1.16#471#fruits#Off the Vine +Peach Tomato#PeacfruiQpovYH#1.20#475#fruits#TriCounty Produce +Peach Tomato#PeacfruixYXLTN#1.18#655#fruits#Simply Fresh +Peach Tomato#PeacfruiILDYAp#1.23#876#fruits#Off the Vine +Peanut#Peanfruiy8M7pt#0.69#275#fruits#TriCounty Produce +Peanut#PeanfruiEimbED#0.65#307#fruits#Simply Fresh +Peanut#Peanfruic452Vc#0.68#937#fruits#Off the Vine +Peanut Butter Fruit#PeanfruixEDt9Y#0.27#628#fruits#TriCounty Produce +Peanut Butter Fruit#PeanfruiST0T0R#0.27#910#fruits#Simply Fresh +Peanut Butter Fruit#Peanfrui7jeRN2#0.27#938#fruits#Off the Vine +Pear#PearfruiB5YmSJ#0.20#945#fruits#TriCounty Produce +Pear#PearfruiA93XZx#0.21#333#fruits#Simply Fresh +Pear#PearfruioNKiIf#0.21#715#fruits#Off the Vine +Pecan#PecafruiiTIv1Z#0.26#471#fruits#TriCounty Produce +Pecan#PecafruiMGkqla#0.26#889#fruits#Simply Fresh +Pecan#Pecafrui1szYz2#0.25#929#fruits#Off the Vine +Purple Passion Fruit#Purpfrui4mMGkD#1.04#914#fruits#TriCounty Produce +Purple Passion Fruit#Purpfrui5XOW3K#1.06#423#fruits#Simply Fresh +Purple Passion Fruit#PurpfruifDTAgW#1.05#549#fruits#Off the Vine +Red Mulberry#Red fruiVLOXIW#1.24#270#fruits#TriCounty Produce +Red Mulberry#Red fruiXNXt4a#1.21#836#fruits#Simply Fresh +Red Mulberry#Red fruiUseWLG#1.21#795#fruits#Off the Vine +Red Princess#Red fruigJLR4V#0.23#829#fruits#TriCounty Produce +Red Princess#Red fruinVKps5#0.23#558#fruits#Simply Fresh +Red Princess#Red frui0jl9mg#0.24#252#fruits#Off the Vine +Striped Screw Pine#StrifruiUKzjoU#0.60#226#fruits#TriCounty Produce +Striped Screw Pine#StrifruivWLDzH#0.64#685#fruits#Simply Fresh +Striped Screw Pine#StrifruiiF7CGH#0.60#983#fruits#Off the Vine +Tapioca#Tapifruib4LCqt#0.40#955#fruits#TriCounty Produce +Tapioca#TapifruiwgQLj9#0.41#889#fruits#Simply Fresh +Tapioca#TapifruiZ6Igg3#0.41#655#fruits#Off the Vine +Tavola#Tavofrui0k9XOt#1.16#938#fruits#TriCounty Produce +Tavola#Tavofrui8DuRxL#1.08#979#fruits#Simply Fresh +Tavola#TavofruiNZEuJZ#1.16#215#fruits#Off the Vine +Tea#TeafruiL0357s#1.11#516#fruits#TriCounty Produce +Tea#TeafruiD5soTf#1.13#970#fruits#Simply Fresh +Tea#TeafruiOWq4oO#1.19#357#fruits#Off the Vine +Ugli Fruit#UglifruipKNCpf#0.24#501#fruits#TriCounty Produce +Ugli Fruit#UglifruifbDrzc#0.24#642#fruits#Simply Fresh +Ugli Fruit#Uglifruiwx8or4#0.24#280#fruits#Off the Vine +Vegetable Brain#VegefruieXLBoc#0.73#355#fruits#TriCounty Produce +Vegetable Brain#Vegefruik5FSdl#0.71#498#fruits#Simply Fresh +Vegetable Brain#VegefruiKBfzN0#0.72#453#fruits#Off the Vine +White Walnut#Whitfruit3oVHL#0.30#501#fruits#TriCounty Produce +White Walnut#WhitfruiHygydw#0.30#913#fruits#Simply Fresh +White Walnut#WhitfruieNtplo#0.30#401#fruits#Off the Vine +Wood Apple#WoodfruijVPRqA#0.68#501#fruits#TriCounty Produce +Wood Apple#Woodfrui4Zk69T#0.68#616#fruits#Simply Fresh +Wood Apple#WoodfruiuSLHZK#0.70#474#fruits#Off the Vine +Yellow Horn#Yellfrui5igjjf#1.18#729#fruits#TriCounty Produce +Yellow Horn#Yellfrui0DiPqa#1.13#517#fruits#Simply Fresh +Yellow Horn#Yellfrui0ljvqC#1.14#853#fruits#Off the Vine +Yellow Sapote#YellfruilGmCfq#0.93#204#fruits#TriCounty Produce +Yellow Sapote#Yellfrui4J2mke#0.88#269#fruits#Simply Fresh +Yellow Sapote#Yellfrui6PuXaL#0.86#575#fruits#Off the Vine +Ylang-ylang#Ylanfrui3rmByO#0.76#429#fruits#TriCounty Produce +Ylang-ylang#YlanfruiA80Nkq#0.76#886#fruits#Simply Fresh +Ylang-ylang#YlanfruinUEm5d#0.72#747#fruits#Off the Vine +Zapote Blanco#ZapofruisZ5sMA#0.67#428#fruits#TriCounty Produce +Zapote Blanco#ZapofruilKxl7N#0.65#924#fruits#Simply Fresh +Zapote Blanco#ZapofruiAe6Eu1#0.68#255#fruits#Off the Vine +Zulu Nut#Zulufrui469K4k#0.71#445#fruits#TriCounty Produce +Zulu Nut#ZulufruiWbz6vU#0.71#653#fruits#Simply Fresh +Zulu Nut#Zulufrui0LJnWK#0.71#858#fruits#Off the Vine +Artichoke#ArtivegeIuqmS4#0.71#282#vegetables#The Pantry +Artichoke#Artivegebljjnf#0.69#66#vegetables#TriCounty Produce +Artichoke#ArtivegeTa2lcF#0.70#618#vegetables#Off the Vine +Asparagus#AspavegezC0cDl#0.23#70#vegetables#The Pantry +Asparagus#AspavegeM1q5Kt#0.24#546#vegetables#TriCounty Produce +Asparagus#AspavegeXWbCb8#0.24#117#vegetables#Off the Vine +Basil#Basivegev08fzf#0.31#213#vegetables#The Pantry +Basil#BasivegeF3Uha7#0.29#651#vegetables#TriCounty Produce +Basil#BasivegeqR8SHC#0.31#606#vegetables#Off the Vine +Bean#BeanvegegCFUOp#0.27#794#vegetables#The Pantry +Bean#BeanvegeqMSEVq#0.27#468#vegetables#TriCounty Produce +Bean#Beanvege4IGUwX#0.27#463#vegetables#Off the Vine +Beet#BeetvegedEv4Ic#0.35#120#vegetables#The Pantry +Beet#Beetvegegi1bz1#0.35#540#vegetables#TriCounty Produce +Beet#BeetvegemztZcN#0.36#386#vegetables#Off the Vine +Blackeyed Pea#Blacvege3TPldr#0.86#133#vegetables#The Pantry +Blackeyed Pea#Blacvege3Zqnep#0.88#67#vegetables#TriCounty Produce +Blackeyed Pea#Blacvege3khffZ#0.90#790#vegetables#Off the Vine +Cabbage#CabbvegeY0c4Fw#0.82#726#vegetables#The Pantry +Cabbage#CabbvegeoaK7Co#0.85#439#vegetables#TriCounty Produce +Cabbage#CabbvegeVvO646#0.82#490#vegetables#Off the Vine +Carrot#CarrvegeEbI0sw#0.45#717#vegetables#The Pantry +Carrot#CarrvegeEZndWL#0.49#284#vegetables#TriCounty Produce +Carrot#CarrvegewUkHao#0.47#122#vegetables#Off the Vine +Cauliflower#Caulvege1CPeNG#0.68#756#vegetables#The Pantry +Cauliflower#CaulvegedrPqib#0.66#269#vegetables#TriCounty Produce +Cauliflower#CaulvegeT6cka8#0.65#728#vegetables#Off the Vine +Chayote#ChayvegePRReGE#0.14#233#vegetables#The Pantry +Chayote#Chayvegep058f7#0.14#88#vegetables#TriCounty Produce +Chayote#ChayvegeoxO40S#0.14#611#vegetables#Off the Vine +Corn#CornvegeukXkv6#0.72#632#vegetables#The Pantry +Corn#CornvegePnPREC#0.72#609#vegetables#TriCounty Produce +Corn#CornvegeO0GwoQ#0.70#664#vegetables#Off the Vine +Cucumber#CucuvegeEqQeA7#0.94#499#vegetables#The Pantry +Cucumber#CucuvegewmKbJ1#0.94#738#vegetables#TriCounty Produce +Cucumber#CucuvegeUW6JaA#0.94#565#vegetables#Off the Vine +Cantaloupe#CantvegeIHs9vJ#0.66#411#vegetables#The Pantry +Cantaloupe#CantvegeEaDdST#0.66#638#vegetables#TriCounty Produce +Cantaloupe#CantvegewWQEa0#0.64#682#vegetables#Off the Vine +Carraway#CarrvegewuL4Ma#0.32#740#vegetables#The Pantry +Carraway#CarrvegeyiWfBj#0.32#265#vegetables#TriCounty Produce +Carraway#CarrvegeMjb1i9#0.31#732#vegetables#Off the Vine +Celeriac#CelevegeoTBicd#0.74#350#vegetables#The Pantry +Celeriac#CelevegeCNABoZ#0.70#261#vegetables#TriCounty Produce +Celeriac#Celevege9LUeww#0.70#298#vegetables#Off the Vine +Celery#Celevegej40ZCc#0.59#740#vegetables#The Pantry +Celery#CelevegerYlVRy#0.58#734#vegetables#TriCounty Produce +Celery#Celevege67eimC#0.58#619#vegetables#Off the Vine +Chervil#ChervegeuH4Dge#0.09#502#vegetables#The Pantry +Chervil#Chervegea1OyKO#0.09#299#vegetables#TriCounty Produce +Chervil#Chervegeq56gMO#0.09#474#vegetables#Off the Vine +Chicory#Chicvege79qoQ8#0.09#709#vegetables#The Pantry +Chicory#ChicvegeTSVBQq#0.10#477#vegetables#TriCounty Produce +Chicory#Chicvege6qpcyi#0.10#282#vegetables#Off the Vine +Chinese Cabbage#ChinvegeFNsSRn#0.78#408#vegetables#The Pantry +Chinese Cabbage#Chinvege2ldNr3#0.80#799#vegetables#TriCounty Produce +Chinese Cabbage#ChinvegeK3R2Td#0.80#180#vegetables#Off the Vine +Chinese Beans#ChinvegebxbyPy#0.45#654#vegetables#The Pantry +Chinese Beans#ChinvegewKGwgx#0.45#206#vegetables#TriCounty Produce +Chinese Beans#ChinvegevVjzC0#0.47#643#vegetables#Off the Vine +Chines Kale#ChinvegeCfdkss#0.70#239#vegetables#The Pantry +Chines Kale#Chinvege6V6Dne#0.65#548#vegetables#TriCounty Produce +Chines Kale#ChinvegeB7vE3x#0.66#380#vegetables#Off the Vine +Chinese Radish#ChinvegeXcM4eq#0.22#190#vegetables#The Pantry +Chinese Radish#ChinvegeTdUBqN#0.22#257#vegetables#TriCounty Produce +Chinese Radish#ChinvegeMXMms8#0.22#402#vegetables#Off the Vine +Chinese Mustard#ChinvegeRDdpdl#0.33#149#vegetables#The Pantry +Chinese Mustard#ChinvegeABDhNd#0.32#320#vegetables#TriCounty Produce +Chinese Mustard#Chinvege8NPwa2#0.34#389#vegetables#Off the Vine +Cilantro#CilavegeQXBEsW#0.60#674#vegetables#The Pantry +Cilantro#CilavegeRgjkUG#0.60#355#vegetables#TriCounty Produce +Cilantro#CilavegelT2msu#0.59#464#vegetables#Off the Vine +Collard#CollvegesTGGNw#0.32#745#vegetables#The Pantry +Collard#CollvegeAwdor5#0.32#124#vegetables#TriCounty Produce +Collard#CollvegeQe900L#0.30#796#vegetables#Off the Vine +Coriander#CorivegeXxp4xY#0.26#560#vegetables#The Pantry +Coriander#Corivege9xBAT0#0.27#321#vegetables#TriCounty Produce +Coriander#CorivegeCfNjBx#0.27#709#vegetables#Off the Vine +Dandelion#DandvegeJNcnbr#0.11#285#vegetables#The Pantry +Dandelion#DandvegeGwBkHZ#0.11#733#vegetables#TriCounty Produce +Dandelion#DandvegeZfwVqn#0.11#57#vegetables#Off the Vine +Daikon Radish#DaikvegeHHsd7M#0.61#743#vegetables#The Pantry +Daikon Radish#DaikvegeIu17yC#0.62#459#vegetables#TriCounty Produce +Daikon Radish#DaikvegePzFjqf#0.63#296#vegetables#Off the Vine +Eggplant#EggpvegeKJtydN#0.55#200#vegetables#The Pantry +Eggplant#EggpvegeQMKrNs#0.53#208#vegetables#TriCounty Produce +Eggplant#EggpvegeN0WnSo#0.51#761#vegetables#Off the Vine +English Pea#Englvegea1ytIn#0.40#457#vegetables#The Pantry +English Pea#EnglvegerU9Vty#0.37#263#vegetables#TriCounty Produce +English Pea#EnglvegeCmkd3y#0.39#430#vegetables#Off the Vine +Fennel#Fennvegebz2UM7#0.76#545#vegetables#The Pantry +Fennel#FennvegeQzjtZ3#0.78#795#vegetables#TriCounty Produce +Fennel#FennvegeXSrW61#0.75#79#vegetables#Off the Vine +Garlic#GarlvegesR2yel#0.76#478#vegetables#The Pantry +Garlic#GarlvegeEQvt8W#0.77#349#vegetables#TriCounty Produce +Garlic#GarlvegedljBdK#0.80#708#vegetables#Off the Vine +Ginger#GingvegeMNiTc2#0.88#563#vegetables#The Pantry +Ginger#Gingvegeq366Sn#0.89#738#vegetables#TriCounty Produce +Ginger#GingvegeznyyVj#0.89#598#vegetables#Off the Vine +Horseradish#HorsvegemSwISt#0.12#622#vegetables#The Pantry +Horseradish#HorsvegetCOS0x#0.11#279#vegetables#TriCounty Produce +Horseradish#Horsvegew6XXaS#0.12#478#vegetables#Off the Vine +Japanese Eggplant#JapavegeTdKDCL#0.57#539#vegetables#The Pantry +Japanese Eggplant#JapavegevsJfGa#0.58#782#vegetables#TriCounty Produce +Japanese Eggplant#JapavegeCIrIxd#0.57#777#vegetables#Off the Vine +Jerusalem Artichoke#Jeruvege928cr0#0.13#231#vegetables#The Pantry +Jerusalem Artichoke#JeruvegeC2v086#0.14#123#vegetables#TriCounty Produce +Jerusalem Artichoke#JeruvegeehCYzi#0.14#196#vegetables#Off the Vine +Jicama#JicavegeRWYj9n#0.75#79#vegetables#The Pantry +Jicama#JicavegeGk5LKH#0.71#292#vegetables#TriCounty Produce +Jicama#JicavegeUjpaX1#0.70#308#vegetables#Off the Vine +Kale#Kalevegext6RNT#0.55#765#vegetables#The Pantry +Kale#KalevegeFsp17B#0.53#107#vegetables#TriCounty Produce +Kale#KalevegeAffBTS#0.57#573#vegetables#Off the Vine +Kiwifruit#KiwivegeloZBKJ#0.60#769#vegetables#The Pantry +Kiwifruit#KiwivegenCQAHw#0.59#307#vegetables#TriCounty Produce +Kiwifruit#Kiwivege0Gi3P2#0.59#235#vegetables#Off the Vine +Kohlrabi#KohlvegeJFKZDl#0.26#406#vegetables#The Pantry +Kohlrabi#Kohlvege32UTAj#0.28#613#vegetables#TriCounty Produce +Kohlrabi#KohlvegejNQC1M#0.28#326#vegetables#Off the Vine +Leek#Leekvege5iaFtg#0.70#580#vegetables#The Pantry +Leek#Leekvegei9Wxbz#0.68#188#vegetables#TriCounty Produce +Leek#LeekvegewY4mAc#0.70#473#vegetables#Off the Vine +Lettuce#LettvegesK9wDR#0.55#716#vegetables#The Pantry +Lettuce#LettvegeWzMyCM#0.57#83#vegetables#TriCounty Produce +Lettuce#LettvegeHgfGG8#0.56#268#vegetables#Off the Vine +Melons#Melovege6t93WF#0.11#252#vegetables#The Pantry +Melons#Melovegeq9kz7T#0.12#558#vegetables#TriCounty Produce +Melons#Melovege9kLTXN#0.12#382#vegetables#Off the Vine +Mushroom#MushvegeSq53h8#0.59#365#vegetables#The Pantry +Mushroom#Mushvegedq6lYP#0.59#444#vegetables#TriCounty Produce +Mushroom#Mushvege8o27D2#0.55#467#vegetables#Off the Vine +Okra#OkravegeTszQSL#0.55#62#vegetables#The Pantry +Okra#OkravegeJBWmfh#0.58#165#vegetables#TriCounty Produce +Okra#OkravegeD6tF9n#0.55#77#vegetables#Off the Vine +Onion#OniovegejwimQo#0.80#186#vegetables#The Pantry +Onion#OniovegeUOwwks#0.80#417#vegetables#TriCounty Produce +Onion#OniovegezcRDrc#0.80#435#vegetables#Off the Vine +Oregano#OregvegetlU7Ez#0.71#119#vegetables#The Pantry +Oregano#Oregvege9h9ZKy#0.70#173#vegetables#TriCounty Produce +Oregano#OregvegebXr0PJ#0.70#773#vegetables#Off the Vine +Parsley#ParsvegeXFEjjN#0.83#502#vegetables#The Pantry +Parsley#ParsvegejAg5C4#0.80#454#vegetables#TriCounty Produce +Parsley#ParsvegehAtH2H#0.84#523#vegetables#Off the Vine +Parsnip#Parsvegee9Lp6D#0.46#626#vegetables#The Pantry +Parsnip#ParsvegeSxXHSA#0.47#411#vegetables#TriCounty Produce +Parsnip#Parsvegea0stPf#0.44#403#vegetables#Off the Vine +Pea#Peavegecq4SxR#0.18#342#vegetables#The Pantry +Pea#Peavege46Gdp9#0.18#255#vegetables#TriCounty Produce +Pea#Peavegeov1gc5#0.18#251#vegetables#Off the Vine +Pepper#PeppvegeUcBYRp#0.33#52#vegetables#The Pantry +Pepper#PeppvegeB60btP#0.35#107#vegetables#TriCounty Produce +Pepper#PeppvegeG4tP3e#0.34#481#vegetables#Off the Vine +Pigeon Pea#Pigevegec5bAtm#0.94#391#vegetables#The Pantry +Pigeon Pea#Pigevegeb93eLi#0.91#447#vegetables#TriCounty Produce +Pigeon Pea#PigevegejEBDRa#0.89#259#vegetables#Off the Vine +Irish Potato#IrisvegeJNQqby#0.72#355#vegetables#The Pantry +Irish Potato#Irisvegewq1PLd#0.72#601#vegetables#TriCounty Produce +Irish Potato#IrisvegeAfFLdO#0.68#740#vegetables#Off the Vine +Pumpkin#PumpvegeiYsPR8#0.25#776#vegetables#The Pantry +Pumpkin#PumpvegelqP1Kh#0.25#189#vegetables#TriCounty Produce +Pumpkin#Pumpvegeb3nQU5#0.26#207#vegetables#Off the Vine +Radish#RadivegeNwwSBJ#0.16#613#vegetables#The Pantry +Radish#Radivege0tIBnL#0.16#779#vegetables#TriCounty Produce +Radish#RadivegeNLqJCf#0.16#731#vegetables#Off the Vine +Rhubarb#RhubvegeREfOti#0.12#301#vegetables#The Pantry +Rhubarb#Rhubvege4Jc3b7#0.12#557#vegetables#TriCounty Produce +Rhubarb#RhubvegeaXqF7H#0.12#378#vegetables#Off the Vine +Rosemary#Rosevege16QStc#0.73#380#vegetables#The Pantry +Rosemary#RosevegeNf6Oem#0.75#622#vegetables#TriCounty Produce +Rosemary#RosevegeFgsOyN#0.74#631#vegetables#Off the Vine +Rutabaga#RutavegecUYfQ3#0.55#676#vegetables#The Pantry +Rutabaga#RutavegejOG5DF#0.55#273#vegetables#TriCounty Produce +Rutabaga#RutavegewEVjzV#0.53#452#vegetables#Off the Vine +Salsify#SalsvegeViS9HF#0.11#537#vegetables#The Pantry +Salsify#Salsvegemd3HAL#0.11#753#vegetables#TriCounty Produce +Salsify#SalsvegeuRCnmq#0.10#787#vegetables#Off the Vine +Savory#Savovegee4DRWl#0.21#456#vegetables#The Pantry +Savory#SavovegerZ90Xm#0.21#642#vegetables#TriCounty Produce +Savory#Savovegeje7yy7#0.22#328#vegetables#Off the Vine +Sesame#Sesavege4NAWZE#0.84#54#vegetables#The Pantry +Sesame#SesavegeMTc9IN#0.84#458#vegetables#TriCounty Produce +Sesame#SesavegegOwAjo#0.83#125#vegetables#Off the Vine +Shallots#ShalvegeUO2pDO#0.26#599#vegetables#The Pantry +Shallots#ShalvegeY1sekb#0.27#647#vegetables#TriCounty Produce +Shallots#ShalvegeSDC8VY#0.27#369#vegetables#Off the Vine +Sugar Snap Peas#SugavegepUZDTl#0.47#308#vegetables#The Pantry +Sugar Snap Peas#Sugavege1XyzNH#0.48#205#vegetables#TriCounty Produce +Sugar Snap Peas#SugavegeJuaG7f#0.46#348#vegetables#Off the Vine +Soybean#SoybvegeqxSVRL#0.70#639#vegetables#The Pantry +Soybean#SoybvegezEMjOG#0.68#423#vegetables#TriCounty Produce +Soybean#SoybvegebanSFq#0.67#268#vegetables#Off the Vine +Spaghetti Squash#SpagvegeMNO1yC#0.12#753#vegetables#The Pantry +Spaghetti Squash#SpagvegeilpUaD#0.13#604#vegetables#TriCounty Produce +Spaghetti Squash#SpagvegeAOoZNX#0.13#431#vegetables#Off the Vine +Spinach#SpinvegeegXXou#0.10#742#vegetables#The Pantry +Spinach#SpinvegeVcqXL6#0.11#708#vegetables#TriCounty Produce +Spinach#SpinvegetZ26DN#0.11#625#vegetables#Off the Vine +Sweet Potato#SweevegepNDQWb#0.94#720#vegetables#The Pantry +Sweet Potato#Sweevegepnw7Tm#0.90#377#vegetables#TriCounty Produce +Sweet Potato#Sweevegeyk0C82#0.89#242#vegetables#Off the Vine +Swiss Chard#SwisvegeksalTA#0.54#545#vegetables#The Pantry +Swiss Chard#SwisvegeKm2Kze#0.54#472#vegetables#TriCounty Produce +Swiss Chard#SwisvegehteuMk#0.56#142#vegetables#Off the Vine +Taro#Tarovege3fpGV6#0.87#155#vegetables#The Pantry +Taro#TarovegerZkmof#0.86#371#vegetables#TriCounty Produce +Taro#TarovegeXKPuzc#0.89#443#vegetables#Off the Vine +Tarragon#TarrvegeCzVC6U#0.18#491#vegetables#The Pantry +Tarragon#TarrvegesIkEfS#0.17#65#vegetables#TriCounty Produce +Tarragon#TarrvegerZsKFP#0.18#180#vegetables#Off the Vine +Thyme#Thymvege8Rv72c#0.41#442#vegetables#The Pantry +Thyme#ThymvegeJoUdQS#0.42#237#vegetables#TriCounty Produce +Thyme#ThymvegeRck5uO#0.43#491#vegetables#Off the Vine +Tomato#Tomavegey0NHGK#0.31#60#vegetables#The Pantry +Tomato#TomavegeKAjRUn#0.30#630#vegetables#TriCounty Produce +Tomato#TomavegePZOHlH#0.30#70#vegetables#Off the Vine +Turnip#TurnvegeRVQiV5#0.44#580#vegetables#The Pantry +Turnip#TurnvegeVjIX9D#0.45#743#vegetables#TriCounty Produce +Turnip#TurnvegelFhvuJ#0.44#219#vegetables#Off the Vine +Watercress#WatevegelwzPLQ#0.54#230#vegetables#The Pantry +Watercress#Watevege8oeDCT#0.54#774#vegetables#TriCounty Produce +Watercress#Watevegexr8L1t#0.55#185#vegetables#Off the Vine +Watermelon#WatevegeL83MRH#0.19#698#vegetables#The Pantry +Watermelon#WatevegeR2S4Dq#0.21#488#vegetables#TriCounty Produce +Watermelon#WatevegepFPXQu#0.21#439#vegetables#Off the Vine +Kamote#KamovegegdON75#0.13#218#vegetables#The Pantry +Kamote#KamovegevupDBf#0.13#98#vegetables#TriCounty Produce +Kamote#KamovegeSQX7IA#0.14#703#vegetables#Off the Vine +Alogbati#AlogvegeB1WaJU#0.41#775#vegetables#The Pantry +Alogbati#AlogvegeVr5cPP#0.40#789#vegetables#TriCounty Produce +Alogbati#AlogvegeyTUQzy#0.40#416#vegetables#Off the Vine +Ampalaya#AmpavegemR9fSd#0.85#107#vegetables#The Pantry +Ampalaya#AmpavegeJDu9Im#0.90#676#vegetables#TriCounty Produce +Ampalaya#AmpavegepL8GH5#0.86#728#vegetables#Off the Vine +Dahon ng sili#Dahovege6X9grk#0.11#369#vegetables#The Pantry +Dahon ng sili#DahovegeiHZjQT#0.11#141#vegetables#TriCounty Produce +Dahon ng sili#DahovegeoCDAH8#0.12#517#vegetables#Off the Vine +Gabi#GabivegeVm4Xk3#0.44#396#vegetables#The Pantry +Gabi#Gabivegeu6woqK#0.42#722#vegetables#TriCounty Produce +Gabi#GabivegezcA7q1#0.42#394#vegetables#Off the Vine +Kabute#Kabuvege6Tqrif#0.16#123#vegetables#The Pantry +Kabute#KabuvegeA3uYdG#0.15#183#vegetables#TriCounty Produce +Kabute#KabuvegeXW6ZiI#0.16#624#vegetables#Off the Vine +Kamoteng Kahoy#KamovegeAdW37X#0.42#782#vegetables#The Pantry +Kamoteng Kahoy#KamovegetFlqpC#0.42#515#vegetables#TriCounty Produce +Kamoteng Kahoy#KamovegeMvxoLn#0.40#166#vegetables#Off the Vine +Kangkong#KangvegeSFTvEz#0.35#759#vegetables#The Pantry +Kangkong#KangvegeRLR6gL#0.34#695#vegetables#TriCounty Produce +Kangkong#Kangvege9BFo14#0.35#783#vegetables#Off the Vine +Labanos#Labavege3qrWJL#0.94#514#vegetables#The Pantry +Labanos#LabavegekgVWDH#0.89#210#vegetables#TriCounty Produce +Labanos#LabavegeiVPgMx#0.89#207#vegetables#Off the Vine +Labong#LabovegeX3O8yz#0.85#722#vegetables#The Pantry +Labong#LabovegeI1wSEs#0.87#472#vegetables#TriCounty Produce +Labong#LabovegeOPiQht#0.85#740#vegetables#Off the Vine +Malunggay#MaluvegeHkwAFm#0.30#252#vegetables#The Pantry +Malunggay#Maluvegez6TiSY#0.30#245#vegetables#TriCounty Produce +Malunggay#MaluvegewzY37D#0.31#405#vegetables#Off the Vine +Munggo#MungvegeqeuwGw#0.25#362#vegetables#The Pantry +Munggo#MungvegeNhqWvL#0.26#360#vegetables#TriCounty Produce +Munggo#MungvegeGxNxQC#0.25#555#vegetables#Off the Vine +Pechay#PechvegezDeHFZ#0.36#401#vegetables#The Pantry +Pechay#Pechvegehi4Fcx#0.35#723#vegetables#TriCounty Produce +Pechay#Pechvege8Pq8Eo#0.36#141#vegetables#Off the Vine +Sigarilyas#SigavegeMJrtlV#0.88#335#vegetables#The Pantry +Sigarilyas#SigavegeLhsoOB#0.87#768#vegetables#TriCounty Produce +Sigarilyas#SigavegeS6RJcA#0.93#356#vegetables#Off the Vine +Sitaw#Sitavege0hMi9z#0.65#153#vegetables#The Pantry +Sitaw#Sitavegeez1g6N#0.67#561#vegetables#TriCounty Produce +Sitaw#Sitavege0BCNeF#0.66#674#vegetables#Off the Vine +Talong#TalovegevZjVK6#0.10#530#vegetables#The Pantry +Talong#TalovegexX4MRw#0.09#305#vegetables#TriCounty Produce +Talong#TalovegeO3U2ze#0.10#126#vegetables#Off the Vine +Toge#TogevegeYelJUw#0.54#449#vegetables#The Pantry +Toge#Togevegeilr1xK#0.54#274#vegetables#TriCounty Produce +Toge#Togevegesvjnyn#0.51#316#vegetables#Off the Vine +Ube#UbevegeoPnxvb#0.56#397#vegetables#The Pantry +Ube#Ubevege2CNyve#0.55#450#vegetables#TriCounty Produce +Ube#UbevegeC43sVj#0.55#263#vegetables#Off the Vine +Upo#UpovegecOGRqC#0.22#404#vegetables#The Pantry +Upo#Upovegekjl2wl#0.22#541#vegetables#TriCounty Produce +Upo#UpovegemTTTwI#0.23#459#vegetables#Off the Vine +Edamame#EdamvegeVYtk8z#0.79#296#vegetables#The Pantry +Edamame#Edamvege608vXi#0.78#700#vegetables#TriCounty Produce +Edamame#Edamvege1jiqGY#0.75#115#vegetables#Off the Vine +Hairy melon#HairvegeFYFHIw#0.71#789#vegetables#The Pantry +Hairy melon#HairvegeS7AAqI#0.72#302#vegetables#TriCounty Produce +Hairy melon#HairvegeO6WJHL#0.72#444#vegetables#Off the Vine +Burdock#BurdvegeyLstLV#0.56#761#vegetables#The Pantry +Burdock#BurdvegeZsqAjT#0.56#582#vegetables#TriCounty Produce +Burdock#BurdvegeycF7mo#0.55#566#vegetables#Off the Vine +Snake gourd#SnakvegesfHGvt#0.92#626#vegetables#The Pantry +Snake gourd#SnakvegedlNiBk#0.92#669#vegetables#TriCounty Produce +Snake gourd#Snakvegec5n1UM#0.92#143#vegetables#Off the Vine +Wasabi#Wasavege5P5pZp#0.67#751#vegetables#The Pantry +Wasabi#Wasavege6EEE9r#0.68#559#vegetables#TriCounty Produce +Wasabi#Wasavege1ve7TY#0.65#61#vegetables#Off the Vine +Yam#YamvegeRN9ONH#0.57#438#vegetables#The Pantry +Yam#YamvegeWjdzeA#0.56#564#vegetables#TriCounty Produce +Yam#YamvegeI1AnyI#0.56#456#vegetables#Off the Vine +Apple Fritters#AppldessDj96hw#6.12#16#desserts#Mom's Kitchen +Apple Fritters#AppldessrN1kvM#6.06#7#desserts#The Baking Pan +Banana Split#Banadess7tpjkJ#10.86#10#desserts#Mom's Kitchen +Banana Split#Banadessfif758#11.07#14#desserts#The Baking Pan +Blueberry Boy Bait#BluedesseX2LVU#3.72#16#desserts#Mom's Kitchen +Blueberry Boy Bait#Bluedess9zLhaH#3.93#9#desserts#The Baking Pan +Candied Cranberries#CanddessjW92p3#1.77#9#desserts#Mom's Kitchen +Candied Cranberries#CanddesskhtVoQ#1.72#0#desserts#The Baking Pan +Daiquiri Souffle#DaiqdessebnYcy#9.54#15#desserts#Mom's Kitchen +Daiquiri Souffle#DaiqdessfM1DnX#9.72#6#desserts#The Baking Pan +Bananas Flambe#BanadesscczumD#6.94#12#desserts#Mom's Kitchen +Bananas Flambe#Banadess8qNfxd#7.07#16#desserts#The Baking Pan +Pie, Apple#Pie,desshcSHhT#7.88#11#desserts#Mom's Kitchen +Pie, Apple#Pie,dessTbiwDp#7.88#15#desserts#The Baking Pan +Pie, Pumpkin#Pie,desswhPBPB#6.00#20#desserts#Mom's Kitchen +Pie, Pumpkin#Pie,dessDg3NWl#6.24#19#desserts#The Baking Pan +Pie, Blueberry#Pie,dessw9VdgD#2.14#3#desserts#Mom's Kitchen +Pie, Blueberry#Pie,dessiSjZKD#2.12#1#desserts#The Baking Pan +Pie, Pecan#Pie,dess2NqhNR#12.70#20#desserts#Mom's Kitchen +Pie, Pecan#Pie,dessB1LfcE#12.33#12#desserts#The Baking Pan +Pie, Cranberry Apple#Pie,dess1mL7IS#10.16#7#desserts#Mom's Kitchen +Pie, Cranberry Apple#Pie,dessmDhkUA#10.16#11#desserts#The Baking Pan +Pie, Banana Cream#Pie,dessH80DuG#7.35#6#desserts#Mom's Kitchen +Pie, Banana Cream#Pie,dessf1YvFb#7.08#11#desserts#The Baking Pan +Pie, Key Lime#Pie,desshtli5N#4.85#2#desserts#Mom's Kitchen +Pie, Key Lime#Pie,dessMwQkKm#5.13#1#desserts#The Baking Pan +Pie, Lemon Meringue#Pie,dess9naVkX#3.74#7#desserts#Mom's Kitchen +Pie, Lemon Meringue#Pie,dessKYcNML#3.67#5#desserts#The Baking Pan +Pie, Caramel#Pie,dessSUuiIU#2.27#9#desserts#Mom's Kitchen +Pie, Caramel#Pie,dessvo8uHh#2.33#4#desserts#The Baking Pan +Pie, Raspberry#Pie,dessUHhMlS#2.36#0#desserts#Mom's Kitchen +Pie, Raspberry#Pie,dessJflbf5#2.36#2#desserts#The Baking Pan +Ice Cream, Chocolate#Ice desseXuyxx#1.44#9#desserts#Mom's Kitchen +Ice Cream, Chocolate#Ice dessASBohf#1.41#13#desserts#The Baking Pan +Ice Cream, Vanilla#Ice dessYnzbbt#11.92#19#desserts#Mom's Kitchen +Ice Cream, Vanilla#Ice dessUBBKp8#11.58#10#desserts#The Baking Pan +Ice Cream, Strawberry#Ice dessfTwKhD#1.90#14#desserts#Mom's Kitchen +Ice Cream, Strawberry#Ice dessaO9Fxf#1.99#6#desserts#The Baking Pan +Ice Cream, Rocky Road#Ice dessyIri3P#13.10#20#desserts#Mom's Kitchen +Ice Cream, Rocky Road#Ice dessZuLr8F#13.48#13#desserts#The Baking Pan +Ice Cream, Mint Chocolate Chip#Ice dessV1IGG7#5.75#4#desserts#Mom's Kitchen +Ice Cream, Mint Chocolate Chip#Ice dessX1gEQ4#5.64#1#desserts#The Baking Pan +Ice Cream Sundae#Ice dessbhlAXt#5.62#11#desserts#Mom's Kitchen +Ice Cream Sundae#Ice dessByapxl#5.72#16#desserts#The Baking Pan +Cobbler, Peach#CobbdessYUGeOB#10.14#20#desserts#Mom's Kitchen +Cobbler, Peach#CobbdessXfEtUK#10.43#16#desserts#The Baking Pan +Cobbler, Berry-Pecan#Cobbdessx3htak#5.36#12#desserts#Mom's Kitchen +Cobbler, Berry-Pecan#Cobbdesse4FUVI#5.41#8#desserts#The Baking Pan +Cobbler, Blueberry#CobbdessbiI0oF#3.78#11#desserts#Mom's Kitchen +Cobbler, Blueberry#CobbdessMXxbBN#3.57#2#desserts#The Baking Pan +Cobbler, Cherry#CobbdessNSa8QW#12.58#0#desserts#Mom's Kitchen +Cobbler, Cherry#CobbdessA1dADa#12.10#10#desserts#The Baking Pan +Cobbler, Huckleberry#Cobbdess3t6O8d#3.99#18#desserts#Mom's Kitchen +Cobbler, Huckleberry#CobbdessGI9euK#3.88#0#desserts#The Baking Pan +Cobbler, Rhubarb#Cobbdess22X40Z#9.54#0#desserts#Mom's Kitchen +Cobbler, Rhubarb#CobbdessPfnCT0#9.27#18#desserts#The Baking Pan +Cobbler, Strawberry#CobbdessI78188#12.43#0#desserts#Mom's Kitchen +Cobbler, Strawberry#CobbdessH3LdgQ#12.20#3#desserts#The Baking Pan +Cobbler, Zucchini#Cobbdess5rK4dP#11.24#3#desserts#Mom's Kitchen +Cobbler, Zucchini#Cobbdess4Ez8kS#10.51#10#desserts#The Baking Pan +Brownies#BrowdessmogdTl#7.62#9#desserts#Mom's Kitchen +Brownies#Browdess84Qc1z#7.55#9#desserts#The Baking Pan +Fudge Bar#Fudgdess8iXSyf#11.72#6#desserts#Mom's Kitchen +Fudge Bar#FudgdessakU1Id#12.29#5#desserts#The Baking Pan +Cookies, Oatmeal#Cookdessnq9Oya#2.84#15#desserts#Mom's Kitchen +Cookies, Oatmeal#CookdessBhgp7p#2.68#10#desserts#The Baking Pan +Cookies, Chocolate Chip#CookdessRVszsZ#12.73#17#desserts#Mom's Kitchen +Cookies, Chocolate Chip#CookdessSOoHmT#12.26#19#desserts#The Baking Pan +Cookies, Peanut Butter#Cookdess2UcMI2#7.82#5#desserts#Mom's Kitchen +Cookies, Peanut Butter#Cookdess1cILme#7.46#20#desserts#The Baking Pan +Mousse, Chocolate#MousdessDpN4sQ#6.25#20#desserts#Mom's Kitchen +Mousse, Chocolate#Mousdess8FyFT8#5.96#1#desserts#The Baking Pan +Mousse, Blueberry Maple#MousdessacwrkO#7.28#7#desserts#Mom's Kitchen +Mousse, Blueberry Maple#MousdessbiCMFg#7.21#12#desserts#The Baking Pan +Mousse, Chocolate Banana#MousdessIeW4qz#5.13#2#desserts#Mom's Kitchen +Mousse, Chocolate Banana#Mousdess1De9oL#5.08#19#desserts#The Baking Pan +Mousse, Cherry#Mousdesss1bF8H#13.05#20#desserts#Mom's Kitchen +Mousse, Cherry#Mousdess0ujevx#12.43#1#desserts#The Baking Pan +Mousse, Eggnog#MousdessZ38hXj#9.07#10#desserts#Mom's Kitchen +Mousse, Eggnog#Mousdesshs05ST#8.81#8#desserts#The Baking Pan +Mousse, Strawberry#MousdessHCDlBK#5.58#3#desserts#Mom's Kitchen +Mousse, Strawberry#MousdessSZ4PyW#5.36#6#desserts#The Baking Pan +Sherbet, Cantaloupe#Sherdess3DCxUg#3.11#9#desserts#Mom's Kitchen +Sherbet, Cantaloupe#Sherdesscp2VIz#2.99#7#desserts#The Baking Pan +Sherbet, Lemon Milk#Sherdess1JVFOS#7.57#9#desserts#Mom's Kitchen +Sherbet, Lemon Milk#SherdessC865vu#7.57#0#desserts#The Baking Pan +Sherbet, Orange Crush#Sherdess8W8Mb9#4.32#18#desserts#Mom's Kitchen +Sherbet, Orange Crush#SherdessxmVJBF#4.16#10#desserts#The Baking Pan +Sherbet, Blueberry#SherdessFAgxqp#3.46#9#desserts#Mom's Kitchen +Sherbet, Blueberry#SherdessMPL87u#3.60#6#desserts#The Baking Pan +Sherbet, Raspberry#Sherdesse86ugA#6.08#1#desserts#Mom's Kitchen +Sherbet, Raspberry#Sherdesslc1etR#5.85#12#desserts#The Baking Pan +Sherbet, Strawberry#SherdessFwv09m#4.63#17#desserts#Mom's Kitchen +Sherbet, Strawberry#SherdessKB0H7q#4.81#20#desserts#The Baking Pan +Tart, Apple#TartdessrsTyXA#3.35#18#desserts#Mom's Kitchen +Tart, Apple#Tartdessp7pyiy#3.13#11#desserts#The Baking Pan +Tart, Almond#TartdessC7FARL#6.62#10#desserts#Mom's Kitchen +Tart, Almond#Tartdess1V1A1c#6.68#13#desserts#The Baking Pan +Tart, Blueberry#TartdesssQZRXX#10.28#10#desserts#Mom's Kitchen +Tart, Blueberry#TartdessUSJSuc#10.28#9#desserts#The Baking Pan +Tart, Chocolate-Pear#Tartdess2pdOE4#5.67#17#desserts#Mom's Kitchen +Tart, Chocolate-Pear#TartdessL3aEDd#5.51#9#desserts#The Baking Pan +Tart, Lemon Fudge#Tartdess9DhZUT#3.88#3#desserts#Mom's Kitchen +Tart, Lemon Fudge#TartdesshzLOWt#3.96#13#desserts#The Baking Pan +Tart, Pecan#TartdessvSbXzd#11.80#3#desserts#Mom's Kitchen +Tart, Pecan#Tartdess6YXJec#11.04#13#desserts#The Baking Pan +Tart, Pineapple#TartdesseMfJFe#9.01#18#desserts#Mom's Kitchen +Tart, Pineapple#TartdessA2Wftr#8.44#13#desserts#The Baking Pan +Tart, Pear#Tartdess4a1BUc#10.09#2#desserts#Mom's Kitchen +Tart, Pear#TartdessNw8YPG#10.68#5#desserts#The Baking Pan +Tart, Raspberry#TartdessAVnpP6#6.18#7#desserts#Mom's Kitchen +Tart, Raspberry#TartdessfVxZFf#5.95#9#desserts#The Baking Pan +Tart, Strawberry#Tartdess4IUcZW#4.75#8#desserts#Mom's Kitchen +Tart, Strawberry#Tartdess2BeEDb#4.61#17#desserts#The Baking Pan +Tart, Raspberry#TartdesshyBd24#1.85#5#desserts#Mom's Kitchen +Tart, Raspberry#Tartdess5fqxgy#1.94#20#desserts#The Baking Pan +Trifle, Berry#TrifdessmEkbU2#12.48#19#desserts#Mom's Kitchen +Trifle, Berry#TrifdessAV9Ix8#12.60#18#desserts#The Baking Pan +Trifle, American#TrifdesscsdSCd#4.70#17#desserts#Mom's Kitchen +Trifle, American#TrifdessTArskm#4.35#11#desserts#The Baking Pan +Trifle, English#TrifdessX87q8T#8.20#9#desserts#Mom's Kitchen +Trifle, English#Trifdess52l955#8.12#11#desserts#The Baking Pan +Trifle, Orange#TrifdesslUwxwe#9.74#15#desserts#Mom's Kitchen +Trifle, Orange#TrifdessFrfCHP#10.22#1#desserts#The Baking Pan +Trifle, Pumpkin#TrifdessJKFN96#4.72#7#desserts#Mom's Kitchen +Trifle, Pumpkin#TrifdessMNw4EV#4.95#16#desserts#The Baking Pan +Trifle, Scottish#TrifdessFa0JdK#13.63#0#desserts#Mom's Kitchen +Trifle, Scottish#TrifdessAAUQCN#14.03#6#desserts#The Baking Pan +Trifle, Sherry#TrifdesscuttJg#4.42#5#desserts#Mom's Kitchen +Trifle, Sherry#TrifdesspRGpfP#4.21#19#desserts#The Baking Pan +Trifle, Strawberry#TrifdessAd5TpV#3.58#11#desserts#Mom's Kitchen +Trifle, Strawberry#Trifdess1rtW0A#3.58#3#desserts#The Baking Pan +Trifle, Scotch Whiskey#Trifdess2zJsGi#5.44#5#desserts#Mom's Kitchen +Trifle, Scotch Whiskey#TrifdessL8nuI6#5.18#5#desserts#The Baking Pan +Cheesecake, Amaretto#CheedessOJBqfD#11.89#5#desserts#Mom's Kitchen +Cheesecake, Amaretto#CheedessVnDf14#11.89#9#desserts#The Baking Pan +Cheesecake, Apple#Cheedessuks1YK#11.22#15#desserts#Mom's Kitchen +Cheesecake, Apple#CheedessMYKaKK#11.01#14#desserts#The Baking Pan +Cheesecake, Apricot#CheedessKUxTYY#12.34#16#desserts#Mom's Kitchen +Cheesecake, Apricot#CheedessMvB1pr#11.88#18#desserts#The Baking Pan +Cheesecake, Australian#CheedessQ9WAIn#2.70#9#desserts#Mom's Kitchen +Cheesecake, Australian#CheedessE6Jyjc#2.53#14#desserts#The Baking Pan +Cheesecake, Arkansas#CheedessTbqzmw#6.98#9#desserts#Mom's Kitchen +Cheesecake, Arkansas#CheedesstWJZfC#6.66#5#desserts#The Baking Pan +Cheesecake, Blueberry#Cheedessyo51KL#8.07#11#desserts#Mom's Kitchen +Cheesecake, Blueberry#Cheedess4Hz7P4#8.62#5#desserts#The Baking Pan +Cheesecake, Cherry#CheedessEahRkC#4.40#14#desserts#Mom's Kitchen +Cheesecake, Cherry#Cheedess3Nx4jZ#4.65#3#desserts#The Baking Pan +Cheesecake, Cran-Raspberry#CheedessrJsr9i#13.47#20#desserts#Mom's Kitchen +Cheesecake, Cran-Raspberry#CheedesshcuXCy#14.00#6#desserts#The Baking Pan +Cheesecake, German Chocolate#CheedesswayvJL#12.03#16#desserts#Mom's Kitchen +Cheesecake, German Chocolate#CheedessebTAeB#11.58#0#desserts#The Baking Pan +Cheesecake, Turtle#CheedessLqgeIA#12.19#6#desserts#Mom's Kitchen +Cheesecake, Turtle#CheedessvyNohA#12.07#19#desserts#The Baking Pan +Brownies, Apple#BrowdessIDW1Cc#5.44#12#desserts#Mom's Kitchen +Brownies, Apple#BrowdessyRMrAH#5.14#12#desserts#The Baking Pan +Brownies, Fudge#BrowdessmIHIFJ#5.19#8#desserts#Mom's Kitchen +Brownies, Fudge#BrowdessqewJ38#5.10#17#desserts#The Baking Pan +Brownies, Almond Macaroon#BrowdessniK7QI#10.57#3#desserts#Mom's Kitchen +Brownies, Almond Macaroon#BrowdessgkXURH#10.36#17#desserts#The Baking Pan +Brownies, Butterscotch#BrowdesslpA06E#7.16#13#desserts#Mom's Kitchen +Brownies, Butterscotch#BrowdessK5hofE#7.30#6#desserts#The Baking Pan +Brownies, Caramel#BrowdessVGfoA8#3.07#3#desserts#Mom's Kitchen +Brownies, Caramel#Browdess5jvVMM#3.13#11#desserts#The Baking Pan +Brownies, Cherry#Browdessyoa66A#3.39#17#desserts#Mom's Kitchen +Brownies, Cherry#BrowdessIg2JuF#3.39#11#desserts#The Baking Pan +Brownies, Chocolate Chip#Browdessb9dc59#6.18#10#desserts#Mom's Kitchen +Brownies, Chocolate Chip#BrowdessvW4nOx#6.43#14#desserts#The Baking Pan +Brownies, Coconut#BrowdessWPHrVR#3.06#15#desserts#Mom's Kitchen +Brownies, Coconut#BrowdessGVBlML#2.86#11#desserts#The Baking Pan +Brownies, Cream Cheese#Browdess1OyRay#12.74#4#desserts#Mom's Kitchen +Brownies, Cream Cheese#Browdess2fRsNv#12.61#19#desserts#The Baking Pan +Brownies, Fudge Mint#Browdessl7DP7k#11.45#14#desserts#Mom's Kitchen +Brownies, Fudge Mint#Browdessv70VKQ#11.34#16#desserts#The Baking Pan +Brownies, Mint Chip#BrowdessDDMvF7#1.81#15#desserts#Mom's Kitchen +Brownies, Mint Chip#Browdess0j9PBD#1.84#9#desserts#The Baking Pan +Cake, Angel Food#CakedessEaqGaE#11.18#3#desserts#Mom's Kitchen +Cake, Angel Food#CakedessJyAyFe#11.18#14#desserts#The Baking Pan +Cake, Chocolate#CakedessKLXFbn#10.11#7#desserts#Mom's Kitchen +Cake, Chocolate#CakedessfNP5Hg#9.91#14#desserts#The Baking Pan +Cake, Carrot#CakedessUTgMoV#4.20#13#desserts#Mom's Kitchen +Cake, Carrot#CakedessQdkaYg#4.00#3#desserts#The Baking Pan +Cake, Lemon Blueberry#CakedessepkeEW#11.73#16#desserts#Mom's Kitchen +Cake, Lemon Blueberry#CakedessHTKyQs#12.42#16#desserts#The Baking Pan +Cake Triple Fudge#CakedessiZ75lR#7.92#7#desserts#Mom's Kitchen +Cake Triple Fudge#CakedessWRrSXP#8.00#15#desserts#The Baking Pan +Cake, Walnut#CakedessveYVXZ#10.83#17#desserts#Mom's Kitchen +Cake, Walnut#Cakedesse22rT5#11.04#7#desserts#The Baking Pan +Cake, French Apple#CakedessjA2Kxv#1.95#0#desserts#Mom's Kitchen +Cake, French Apple#CakedessNBHCk0#1.86#20#desserts#The Baking Pan +Cake, Fig#CakedessOncX4y#6.82#3#desserts#Mom's Kitchen +Cake, Fig#CakedessTJtffn#7.08#10#desserts#The Baking Pan +Cake, Maple#CakedessnoGPRF#3.04#11#desserts#Mom's Kitchen +Cake, Maple#CakedessfVattM#3.22#4#desserts#The Baking Pan +Cake, Devil's Food#CakedessiXcDCt#4.73#7#desserts#Mom's Kitchen +Cake, Devil's Food#CakedessnBZk45#4.82#6#desserts#The Baking Pan +Cake, Double-Lemon#CakedesskeS0Vd#3.46#9#desserts#Mom's Kitchen +Cake, Double-Lemon#Cakedess50vx53#3.60#6#desserts#The Baking Pan +Sorbet, Blackberry#SorbdessQoa0CE#9.88#15#desserts#Mom's Kitchen +Sorbet, Blackberry#SorbdessqoOYzv#9.78#9#desserts#The Baking Pan diff --git a/db/examples_cxx/getting_started/vendors.txt b/db/examples_cxx/getting_started/vendors.txt new file mode 100644 index 000000000..528e1b110 --- /dev/null +++ b/db/examples_cxx/getting_started/vendors.txt @@ -0,0 +1,6 @@ +TriCounty Produce#309 S. Main Street#Middle Town#MN#55432#763 555 5761#Mort Dufresne#763 555 5765 +Simply Fresh#15612 Bogart Lane#Harrigan#WI#53704#420 333 3912#Cheryl Swedberg#420 333 3952 +Off the Vine#133 American Ct.#Centennial#IA#52002#563 121 3800#Bob King#563 121 3800 x54 +The Pantry#1206 N. Creek Way#Middle Town#MN#55432#763 555 3391#Sully Beckstrom#763 555 3391 +Mom's Kitchen#53 Yerman Ct.#Middle Town#MN#55432#763 554 9200#Maggie Kultgen#763 554 9200 x12 +The Baking Pan#1415 53rd Ave.#Dutchin#MN#56304#320 442 2277#Mike Roan#320 442 6879 diff --git a/db/examples_java/src/com/sleepycat/examples/collections/access/AccessExample.java b/db/examples_java/src/com/sleepycat/examples/collections/access/AccessExample.java new file mode 100644 index 000000000..64cd3e5fa --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/access/AccessExample.java @@ -0,0 +1,286 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: AccessExample.java,v 1.1 2004/04/09 16:34:05 mark Exp $ + */ + +package com.sleepycat.examples.collections.access; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.util.Iterator; +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.ByteArrayBinding; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; + +/** + * AccesssExample mirrors the functionality of a class by the same name + * used to demonstrate the com.sleepycat.je Java API. This version makes + * use of the new com.sleepycat.collections.* collections style classes to make + * life easier. + * + *@author Gregory Burd + *@created October 22, 2002 + */ +public class AccessExample + implements Runnable { + + // Class Variables of AccessExample class + private static boolean create = true; + private static final int EXIT_SUCCESS = 0; + private static final int EXIT_FAILURE = 1; + + public static void usage() { + + System.out.println("usage: java " + AccessExample.class.getName() + + " [-r] [database]\n"); + System.exit(EXIT_FAILURE); + } + + /** + * The main program for the AccessExample class + * + *@param argv The command line arguments + */ + public static void main(String[] argv) { + + boolean removeExistingDatabase = false; + String databaseName = "access.db"; + + for (int i = 0; i < argv.length; i++) { + if (argv[i].equals("-r")) { + removeExistingDatabase = true; + } else if (argv[i].equals("-?")) { + usage(); + } else if (argv[i].startsWith("-")) { + usage(); + } else { + if ((argv.length - i) != 1) + usage(); + databaseName = argv[i]; + break; + } + } + + try { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + if (create) { + envConfig.setAllowCreate(true); + } + Environment env = new Environment(new File("."), envConfig); + // Remove the previous database. + if (removeExistingDatabase) { + env.removeDatabase(null, databaseName, null); + } + + // create the app and run it + AccessExample app = new AccessExample(env, databaseName); + app.run(); + } catch (DatabaseException e) { + e.printStackTrace(); + System.exit(1); + } catch (FileNotFoundException e) { + e.printStackTrace(); + System.exit(1); + } catch (Exception e) { + e.printStackTrace(); + System.exit(1); + } + System.exit(0); + } + + + private Database db; + private SortedMap map; + private Environment env; + + + /** + * Constructor for the AccessExample object + * + *@param env Description of the Parameter + *@exception Exception Description of the Exception + */ + public AccessExample(Environment env, String databaseName) + throws Exception { + + this.env = env; + + // + // Lets mimic the com.sleepycat.examples.db.AccessExample 100% + // and use plain old byte arrays to store the key and data strings. + // + ByteArrayBinding keyBinding = new ByteArrayBinding(); + ByteArrayBinding dataBinding = new ByteArrayBinding(); + + // + // Open a data store. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + if (create) { + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + } + this.db = env.openDatabase(null, databaseName, null, dbConfig); + + // + // Now create a collection style map view of the data store + // so that it is easy to work with the data in the database. + // + this.map = new StoredSortedMap(db, keyBinding, dataBinding, true); + } + + + /** + * Main processing method for the AccessExample object + */ + public void run() { + // + // Insert records into a Stored Sorted Map DatabaseImpl, where + // the key is the user input and the data is the user input + // in reverse order. + // + final InputStreamReader reader = new InputStreamReader(System.in); + + for (; ; ) { + final String line = askForLine(reader, System.out, "input> "); + if (line == null) { + break; + } + + final String reversed = + (new StringBuffer(line)).reverse().toString(); + + log("adding: \"" + + line + "\" : \"" + + reversed + "\""); + + // Do the work to add the key/data to the HashMap here. + TransactionRunner tr = new TransactionRunner(env); + try { + tr.run( + new TransactionWorker() { + public void doWork() { + if (!map.containsKey(line.getBytes())) + map.put(line.getBytes(), + reversed.getBytes()); + else + System.out.println("Key " + line + + " already exists."); + } + }); + } catch (com.sleepycat.db.DatabaseException e) { + System.err.println("AccessExample: " + e.toString()); + System.exit(1); + } catch (java.lang.Exception e) { + System.err.println("AccessExample: " + e.toString()); + System.exit(1); + } + } + System.out.println(""); + + // Do the work to traverse and print the HashMap key/data + // pairs here get iterator over map entries. + Iterator iter = map.entrySet().iterator(); + try { + System.out.println("Reading data"); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + log("found \"" + + new String((byte[]) entry.getKey()) + + "\" key with data \"" + + new String((byte[]) entry.getValue()) + "\""); + } + } finally { + // Ensure that all database iterators are closed. This is very + // important. + StoredIterator.close(iter); + } + } + + + /** + * Prompts for a line, and keeps prompting until a non blank line is + * returned. Returns null on error. + * + *@param reader stream from which to read user input + *@param out stream on which to prompt for user input + *@param prompt prompt to use to solicit input + *@return the string supplied by the user + */ + String askForLine(InputStreamReader reader, PrintStream out, + String prompt) { + + String result = ""; + while (result != null && result.length() == 0) { + out.print(prompt); + out.flush(); + result = getLine(reader); + } + return result; + } + + + /** + * Read a single line. Gets the line attribute of the AccessExample object + * Not terribly efficient, but does the job. Works for reading a line from + * stdin or a file. + * + *@param reader stream from which to read the line + *@return either a String or null on EOF, if EOF appears in the + * middle of a line, returns that line, then null on next call. + */ + String getLine(InputStreamReader reader) { + + StringBuffer b = new StringBuffer(); + int c; + try { + while ((c = reader.read()) != -1 && c != '\n') { + if (c != '\r') { + b.append((char) c); + } + } + } catch (IOException ioe) { + c = -1; + } + + if (c == -1 && b.length() == 0) { + return null; + } else { + return b.toString(); + } + } + + + /** + * A simple log method. + * + *@param s The string to be logged. + */ + private void log(String s) { + + System.out.println(s); + System.out.flush(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/hello/HelloDatabaseWorld.java b/db/examples_java/src/com/sleepycat/examples/collections/hello/HelloDatabaseWorld.java new file mode 100644 index 000000000..edd8f2fa5 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/hello/HelloDatabaseWorld.java @@ -0,0 +1,163 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: HelloDatabaseWorld.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.hello; + +import java.io.File; +import java.util.Iterator; +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; + +/** + * @author Mark Hayes + */ +public class HelloDatabaseWorld implements TransactionWorker { + + private static final String[] INT_NAMES = { + "Hello", "Database", "World", + }; + private static boolean create = true; + + private Environment env; + private ClassCatalog catalog; + private Database db; + private SortedMap map; + + /** Creates the environment and runs a transaction */ + public static void main(String[] argv) + throws Exception { + + String dir = "./tmp"; + + // environment is transactional + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + if (create) { + envConfig.setAllowCreate(true); + } + Environment env = new Environment(new File(dir), envConfig); + + // create the application and run a transaction + HelloDatabaseWorld worker = new HelloDatabaseWorld(env); + TransactionRunner runner = new TransactionRunner(env); + try { + // open and access the database within a transaction + runner.run(worker); + } finally { + // close the database outside the transaction + worker.close(); + } + } + + /** Creates the database for this application */ + private HelloDatabaseWorld(Environment env) + throws Exception { + + this.env = env; + open(); + } + + /** Performs work within a transaction. */ + public void doWork() + throws Exception { + + writeAndRead(); + } + + /** Opens the database and creates the Map. */ + private void open() + throws Exception { + + // use a generic database configuration + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + if (create) { + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + } + + // catalog is needed for serial bindings (java serialization) + Database catalogDb = env.openDatabase(null, "catalog", null, dbConfig); + catalog = new StoredClassCatalog(catalogDb); + + // use Integer tuple binding for key entries + TupleBinding keyBinding = + TupleBinding.getPrimitiveBinding(Integer.class); + + // use String serial binding for data entries + SerialBinding dataBinding = new SerialBinding(catalog, String.class); + + this.db = env.openDatabase(null, "helloworld", null, dbConfig); + + // create a map view of the database + this.map = new StoredSortedMap(db, keyBinding, dataBinding, true); + } + + /** Closes the database. */ + private void close() + throws Exception { + + if (catalog != null) { + catalog.close(); + catalog = null; + } + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** Writes and reads the database via the Map. */ + private void writeAndRead() { + + // check for existing data + Integer key = new Integer(0); + String val = (String) map.get(key); + if (val == null) { + System.out.println("Writing data"); + // write in reverse order to show that keys are sorted + for (int i = INT_NAMES.length - 1; i >= 0; i -= 1) { + map.put(new Integer(i), INT_NAMES[i]); + } + } + // get iterator over map entries + Iterator iter = map.entrySet().iterator(); + try { + System.out.println("Reading data"); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + System.out.println(entry.getKey().toString() + ' ' + + entry.getValue()); + } + } finally { + // all database iterators must be closed!! + StoredIterator.close(iter); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartData.java new file mode 100644 index 000000000..809d7f5bc --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartData.java @@ -0,0 +1,65 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartData.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.Serializable; + +/** + * A PartData serves as the data in the key/data pair for a part entity. + * + *

    In this sample, PartData is used both as the storage entry for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartKey.java new file mode 100644 index 000000000..6ea90b4d4 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/PartKey.java @@ -0,0 +1,41 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartKey.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.Serializable; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is used both as the storage entry for the key as + * well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey implements Serializable { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Sample.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Sample.java new file mode 100644 index 000000000..68208eb5f --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Sample.java @@ -0,0 +1,267 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sample.java,v 1.3 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.FileNotFoundException; +import java.util.Iterator; +import java.util.Map; + +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java com.sleepycat.examples.collections.ship.basic.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private SampleDatabase db; + private SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException, FileNotFoundException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed. + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + printEntries("Parts", + views.getPartEntrySet().iterator()); + printEntries("Suppliers", + views.getSupplierEntrySet().iterator()); + printEntries("Shipments", + views.getShipmentEntrySet().iterator()); + } + } + + /** + * Populate the part entities in the database. If the part map is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Map parts = views.getPartMap(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.put(new PartKey("P1"), + new PartData("Nut", "Red", + new Weight(12.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P2"), + new PartData("Bolt", "Green", + new Weight(17.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P3"), + new PartData("Screw", "Blue", + new Weight(17.0, Weight.GRAMS), + "Rome")); + parts.put(new PartKey("P4"), + new PartData("Screw", "Red", + new Weight(14.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P5"), + new PartData("Cam", "Blue", + new Weight(12.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P6"), + new PartData("Cog", "Red", + new Weight(19.0, Weight.GRAMS), + "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier map is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Map suppliers = views.getSupplierMap(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.put(new SupplierKey("S1"), + new SupplierData("Smith", 20, "London")); + suppliers.put(new SupplierKey("S2"), + new SupplierData("Jones", 10, "Paris")); + suppliers.put(new SupplierKey("S3"), + new SupplierData("Blake", 30, "Paris")); + suppliers.put(new SupplierKey("S4"), + new SupplierData("Clark", 20, "London")); + suppliers.put(new SupplierKey("S5"), + new SupplierData("Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment map + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Map shipments = views.getShipmentMap(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.put(new ShipmentKey("P1", "S1"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P3", "S1"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P4", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P5", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P6", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P1", "S2"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S2"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P2", "S3"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P2", "S4"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P4", "S4"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P5", "S4"), + new ShipmentData(400)); + } + } + + /** + * Print the key/value objects returned by an iterator of Map.Entry + * objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printEntries(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + Map.Entry entry = (Map.Entry) iterator.next(); + System.out.println(entry.getKey().toString()); + System.out.println(entry.getValue().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleDatabase.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleDatabase.java new file mode 100644 index 000000000..47e7524fc --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleDatabase.java @@ -0,0 +1,135 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleDatabase.java,v 1.3 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.File; +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException, FileNotFoundException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, + dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, null, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Close all databases and the environment. + */ + public void close() + throws DatabaseException { + + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleViews.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleViews.java new file mode 100644 index 000000000..d75c6986b --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SampleViews.java @@ -0,0 +1,123 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleViews.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.collections.StoredEntrySet; +import com.sleepycat.collections.StoredMap; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredMap partMap; + private StoredMap supplierMap; + private StoredMap shipmentMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // In this sample, the stored key and data entries are used directly + // rather than mapping them to separate objects. Therefore, no binding + // classes are defined here and the SerialBinding class is used. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new SerialBinding(catalog, PartKey.class); + EntryBinding partDataBinding = + new SerialBinding(catalog, PartData.class); + EntryBinding supplierKeyBinding = + new SerialBinding(catalog, SupplierKey.class); + EntryBinding supplierDataBinding = + new SerialBinding(catalog, SupplierData.class); + EntryBinding shipmentKeyBinding = + new SerialBinding(catalog, ShipmentKey.class); + EntryBinding shipmentDataBinding = + new SerialBinding(catalog, ShipmentData.class); + + // Create map views for all stores and indices. + // StoredSortedMap is not used since the stores and indices are + // ordered by serialized key objects, which do not provide a very + // useful ordering. + // + partMap = + new StoredMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredMap and StoredEntrySet + // classes, which provide additional methods. The entry sets could be + // obtained directly from the Map.entrySet() method, but convenience + // methods are provided here to return them in order to avoid down-casting + // elsewhere. + + /** + * Return a map view of the part storage container. + */ + public final StoredMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public final StoredMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public final StoredMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entry set view of the part storage container. + */ + public final StoredEntrySet getPartEntrySet() { + + return (StoredEntrySet) partMap.entrySet(); + } + + /** + * Return an entry set view of the supplier storage container. + */ + public final StoredEntrySet getSupplierEntrySet() { + + return (StoredEntrySet) supplierMap.entrySet(); + } + + /** + * Return an entry set view of the shipment storage container. + */ + public final StoredEntrySet getShipmentEntrySet() { + + return (StoredEntrySet) shipmentMap.entrySet(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentData.java new file mode 100644 index 000000000..4229545cd --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentData.java @@ -0,0 +1,42 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentData.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the data in the key/data pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used both as the storage entry for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentKey.java new file mode 100644 index 000000000..609aeb926 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/ShipmentKey.java @@ -0,0 +1,49 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentKey.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.Serializable; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is used both as the storage entry for the + * key as well as the object binding to the key. Because it is used directly + * as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements Serializable { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierData.java new file mode 100644 index 000000000..1bef3ac6b --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierData.java @@ -0,0 +1,58 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierData.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.Serializable; + +/** + * A SupplierData serves as the data in the key/data pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used both as the storage entry for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierKey.java new file mode 100644 index 000000000..ec30a918e --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/SupplierKey.java @@ -0,0 +1,41 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierKey.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.Serializable; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is used both as the storage entry for the key + * as well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements Serializable { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Weight.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Weight.java new file mode 100644 index 000000000..6cdb0102a --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/basic/Weight.java @@ -0,0 +1,50 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Weight.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.basic; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Serial serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Part.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Part.java new file mode 100644 index 000000000..4bec31c3c --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Part.java @@ -0,0 +1,73 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Part.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is created from the stored key/data entry using a + * SerialSerialBinding. See {@link SampleViews.PartBinding} for details. + * Since this class is not used directly for data storage, it does not need to + * be Serializable.

    + * + * @author Mark Hayes + */ +public class Part { + + private String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartData.java new file mode 100644 index 000000000..f05bae600 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartData.java @@ -0,0 +1,66 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartData.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.Serializable; + +/** + * A PartData serves as the value in the key/value pair for a part entity. + * + *

    In this sample, PartData is used only as the storage data for the + * value, while the Part object is used as the value's object representation. + * Because it is used directly as storage data using serial format, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartKey.java new file mode 100644 index 000000000..046efb556 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/PartKey.java @@ -0,0 +1,41 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartKey.java,v 1.2 2004/09/22 16:17:09 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.Serializable; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is used both as the storage entry for the key as + * well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey implements Serializable { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Sample.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Sample.java new file mode 100644 index 000000000..115a33fcd --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Sample.java @@ -0,0 +1,250 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sample.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.FileNotFoundException; +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java com.sleepycat.examples.collections.ship.entity.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private SampleDatabase db; + private SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException, FileNotFoundException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys. For details on + * database iterators see {@link StoredIterator}.

    + */ + private class PrintDatabase implements TransactionWorker { + + + public void doWork() + throws Exception { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleDatabase.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleDatabase.java new file mode 100644 index 000000000..424e27aae --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleDatabase.java @@ -0,0 +1,330 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleDatabase.java,v 1.4 2004/09/22 18:00:56 bostic Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.File; +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialSerialKeyCreator; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException, FileNotFoundException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, + dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, null, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setType(DatabaseType.BTREE); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator( + new SupplierByCityKeyCreator(javaCatalog, + SupplierKey.class, + SupplierData.class, + String.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + null, + supplierDb, + secConfig); + + secConfig.setKeyCreator( + new ShipmentByPartKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + PartKey.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + null, + shipmentDb, + secConfig); + + secConfig.setKeyCreator( + new ShipmentBySupplierKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + SupplierKey.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + null, + shipmentDb, + secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class SupplierByCityKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the supplier key class. + * @param valueClass is the supplier value class. + * @param indexKeyClass is the city key class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + SupplierData supplierData = (SupplierData) valueInput; + return supplierData.getCity(); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentByPartKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the part key class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new PartKey(shipmentKey.getPartNumber()); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentBySupplierKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the supplier key class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The part + * key is stored in the shipment key, so the shipment value is not + * used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new SupplierKey(shipmentKey.getSupplierNumber()); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleViews.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleViews.java new file mode 100644 index 000000000..cf956322d --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SampleViews.java @@ -0,0 +1,307 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleViews.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.SerialSerialBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredValueSet; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object. For keys, however, + // the stored entry is used directly via a SerialBinding and no + // special binding class is needed. + // + ClassCatalog catalog = db.getClassCatalog(); + SerialBinding partKeyBinding = + new SerialBinding(catalog, PartKey.class); + EntityBinding partDataBinding = + new PartBinding(catalog, PartKey.class, PartData.class); + SerialBinding supplierKeyBinding = + new SerialBinding(catalog, SupplierKey.class); + EntityBinding supplierDataBinding = + new SupplierBinding(catalog, SupplierKey.class, + SupplierData.class); + SerialBinding shipmentKeyBinding = + new SerialBinding(catalog, ShipmentKey.class); + EntityBinding shipmentDataBinding = + new ShipmentBinding(catalog, ShipmentKey.class, + ShipmentData.class); + SerialBinding cityKeyBinding = + new SerialBinding(catalog, String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is not used since the stores and indices are + // ordered by serialized key objects, which do not provide a very + // useful ordering. + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredValueSet getPartSet() { + + return (StoredValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredValueSet getSupplierSet() { + + return (StoredValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredValueSet getShipmentSet() { + + return (StoredValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * PartBinding is used to bind the stored key/data entry pair for a part + * to a combined data object (entity). + */ + private static class PartBinding extends SerialSerialBinding { + + /** + * Construct the binding object. + */ + private PartBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + super(classCatalog, keyClass, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(Object keyInput, Object dataInput) { + + PartKey key = (PartKey) keyInput; + PartData data = (PartData) dataInput; + return new Part(key.getNumber(), data.getName(), data.getColor(), + data.getWeight(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public Object objectToKey(Object object) { + + Part part = (Part) object; + return new PartKey(part.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Part part = (Part) object; + return new PartData(part.getName(), part.getColor(), + part.getWeight(), part.getCity()); + } + } + + /** + * SupplierBinding is used to bind the stored key/data entry pair for a + * supplier to a combined data object (entity). + */ + private static class SupplierBinding extends SerialSerialBinding { + + /** + * Construct the binding object. + */ + private SupplierBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + super(classCatalog, keyClass, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(Object keyInput, Object dataInput) { + + SupplierKey key = (SupplierKey) keyInput; + SupplierData data = (SupplierData) dataInput; + return new Supplier(key.getNumber(), data.getName(), + data.getStatus(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public Object objectToKey(Object object) { + + Supplier supplier = (Supplier) object; + return new SupplierKey(supplier.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Supplier supplier = (Supplier) object; + return new SupplierData(supplier.getName(), supplier.getStatus(), + supplier.getCity()); + } + } + + /** + * ShipmentBinding is used to bind the stored key/data entry pair for a + * shipment to a combined data object (entity). + */ + private static class ShipmentBinding extends SerialSerialBinding { + + /** + * Construct the binding object. + */ + private ShipmentBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + super(classCatalog, keyClass, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(Object keyInput, Object dataInput) { + + ShipmentKey key = (ShipmentKey) keyInput; + ShipmentData data = (ShipmentData) dataInput; + return new Shipment(key.getPartNumber(), key.getSupplierNumber(), + data.getQuantity()); + } + + /** + * Create the stored key from the entity. + */ + public Object objectToKey(Object object) { + + Shipment shipment = (Shipment) object; + return new ShipmentKey(shipment.getPartNumber(), + shipment.getSupplierNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Shipment shipment = (Shipment) object; + return new ShipmentData(shipment.getQuantity()); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Shipment.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Shipment.java new file mode 100644 index 000000000..4deb1347b --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Shipment.java @@ -0,0 +1,56 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Shipment.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.ShipmentBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment { + + private String partNumber; + private String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentData.java new file mode 100644 index 000000000..72af8523e --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentData.java @@ -0,0 +1,43 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentData.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the value in the key/value pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used only as the storage data for the + * value, while the Shipment object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentKey.java new file mode 100644 index 000000000..f999a3dec --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/ShipmentKey.java @@ -0,0 +1,49 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentKey.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.Serializable; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is used both as the storage entry for the + * key as well as the object binding to the key. Because it is used directly + * as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements Serializable { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Supplier.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Supplier.java new file mode 100644 index 000000000..a8e96e67b --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Supplier.java @@ -0,0 +1,64 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Supplier.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.SupplierBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier { + + private String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierData.java new file mode 100644 index 000000000..3002bf039 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierData.java @@ -0,0 +1,59 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierData.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.Serializable; + +/** + * A SupplierData serves as the value in the key/value pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used only as the storage data for the + * value, while the Supplier object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierKey.java new file mode 100644 index 000000000..a097a3a8f --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/SupplierKey.java @@ -0,0 +1,41 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierKey.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.Serializable; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is used both as the storage entry for the + * key as well as the object binding to the key. Because it is used directly + * as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements Serializable { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Weight.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Weight.java new file mode 100644 index 000000000..b45ab918c --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/entity/Weight.java @@ -0,0 +1,50 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Weight.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.entity; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Serial serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Part.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Part.java new file mode 100644 index 000000000..7cd54c60d --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Part.java @@ -0,0 +1,107 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Part.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is bound to the stored key/data entry by + * implementing the MarshalledTupleKeyEntity interface.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) + * are transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a PartData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Part implements Serializable, MarshalledTupleKeyEntity { + + private transient String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } + + // --- MarshalledTupleKeyEntity implementation --- + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + throw new UnsupportedOperationException(keyName); + } + + public boolean nullifyForeignKey(String keyName) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/PartKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/PartKey.java new file mode 100644 index 000000000..a548fb1ff --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/PartKey.java @@ -0,0 +1,61 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartKey.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the stored key tuple entry by + * implementing the MarshalledTupleEntry interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class PartKey implements MarshalledTupleEntry { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } + + // --- MarshalledTupleEntry implementation --- + + public PartKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void marshalEntry(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalEntry(TupleInput keyInput) { + + this.number = keyInput.readString(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Sample.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Sample.java new file mode 100644 index 000000000..f320191b4 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Sample.java @@ -0,0 +1,248 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sample.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java com.sleepycat.examples.collections.ship.factory.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. To specify a different home + * directory, use the -home option. The home directory must exist before + * running the sample. To recreate the sample database from scratch, delete + * all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private SampleDatabase db; + private SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws Exception { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws Exception { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys. For details on + * database iterators see {@link StoredIterator}.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleDatabase.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleDatabase.java new file mode 100644 index 000000000..0a8af819f --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleDatabase.java @@ -0,0 +1,225 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleDatabase.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import java.io.File; +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.collections.TupleSerialFactory; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + private TupleSerialFactory factory; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException, FileNotFoundException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, + dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Use the TupleSerialDbFactory for a Serial/Tuple-based database + // where marshalling interfaces are used. + // + factory = new TupleSerialFactory(javaCatalog); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, null, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setType(DatabaseType.BTREE); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(factory.getKeyCreator(Supplier.class, + Supplier.CITY_KEY)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + null, + supplierDb, + secConfig); + + secConfig.setKeyCreator(factory.getKeyCreator(Shipment.class, + Shipment.PART_KEY)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + null, + shipmentDb, + secConfig); + + secConfig.setKeyCreator(factory.getKeyCreator(Shipment.class, + Shipment.SUPPLIER_KEY)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + null, + shipmentDb, + secConfig); + } + + /** + * Return the tuple-serial factory. + */ + public final TupleSerialFactory getFactory() { + + return factory; + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all databases and the environment. + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleViews.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleViews.java new file mode 100644 index 000000000..1c78cac03 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SampleViews.java @@ -0,0 +1,143 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleViews.java,v 1.2 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; +import com.sleepycat.collections.TupleSerialFactory; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Use the TupleSerialFactory for a Serial/Tuple-based database + // where marshalling interfaces are used. + // + TupleSerialFactory factory = db.getFactory(); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + factory.newSortedMap(db.getPartDatabase(), + PartKey.class, Part.class, true); + supplierMap = + factory.newSortedMap(db.getSupplierDatabase(), + SupplierKey.class, Supplier.class, true); + shipmentMap = + factory.newSortedMap(db.getShipmentDatabase(), + ShipmentKey.class, Shipment.class, true); + shipmentByPartMap = + factory.newSortedMap(db.getShipmentByPartDatabase(), + PartKey.class, Shipment.class, true); + shipmentBySupplierMap = + factory.newSortedMap(db.getShipmentBySupplierDatabase(), + SupplierKey.class, Shipment.class, true); + supplierByCityMap = + factory.newSortedMap(db.getSupplierByCityDatabase(), + String.class, Supplier.class, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredMap and StoredValueSet + // classes, which provide additional methods. The entity sets could be + // obtained directly from the Map.values() method but convenience methods + // are provided here to return them in order to avoid down-casting + // elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Shipment.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Shipment.java new file mode 100644 index 000000000..7e15e9d41 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Shipment.java @@ -0,0 +1,103 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Shipment.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is bound to the stored key/data entry by + * implementing the MarshalledTupleKeyEntity interface.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) + * are transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a ShipmentData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment implements Serializable, MarshalledTupleKeyEntity { + + static final String PART_KEY = "part"; + static final String SUPPLIER_KEY = "supplier"; + + private transient String partNumber; + private transient String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } + + // --- MarshalledTupleKeyEntity implementation --- + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(PART_KEY)) { + keyOutput.writeString(this.partNumber); + return true; + } else if (keyName.equals(SUPPLIER_KEY)) { + keyOutput.writeString(this.supplierNumber); + return true; + } else { + throw new UnsupportedOperationException(keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/ShipmentKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/ShipmentKey.java new file mode 100644 index 000000000..ea570033a --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/ShipmentKey.java @@ -0,0 +1,71 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentKey.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the stored key tuple entry by + * implementing the MarshalledTupleEntry interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements MarshalledTupleEntry { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } + + // --- MarshalledTupleEntry implementation --- + + public ShipmentKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void marshalEntry(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } + + public void unmarshalEntry(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Supplier.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Supplier.java new file mode 100644 index 000000000..828df1e35 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Supplier.java @@ -0,0 +1,109 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Supplier.java,v 1.3 2004/09/22 16:17:10 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is bound to the stored key/data entry by + * implementing the MarshalledTupleKeyEntity interface.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a SupplierData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier implements Serializable, MarshalledTupleKeyEntity { + + static final String CITY_KEY = "city"; + + private transient String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } + + // --- MarshalledTupleKeyEntity implementation --- + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(CITY_KEY)) { + if (this.city != null) { + keyOutput.writeString(this.city); + return true; + } else { + return false; + } + } else { + throw new UnsupportedOperationException(keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SupplierKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SupplierKey.java new file mode 100644 index 000000000..8ffac0062 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/SupplierKey.java @@ -0,0 +1,61 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierKey.java,v 1.3 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the stored key tuple entry by + * implementing the MarshalledTupleEntry interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements MarshalledTupleEntry { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } + + // --- MarshalledTupleEntry implementation --- + + public SupplierKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void marshalEntry(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalEntry(TupleInput keyInput) { + + this.number = keyInput.readString(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Weight.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Weight.java new file mode 100644 index 000000000..3da852c7f --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/factory/Weight.java @@ -0,0 +1,50 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Weight.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.factory; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartData.java new file mode 100644 index 000000000..8c052acd1 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartData.java @@ -0,0 +1,65 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartData.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.Serializable; + +/** + * A PartData serves as the data in the key/data pair for a part entity. + * + *

    In this sample, PartData is used both as the storage data for the data + * as well as the object binding to the data. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartKey.java new file mode 100644 index 000000000..f6268d558 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/PartKey.java @@ -0,0 +1,41 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartKey.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.Serializable; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is used both as the storage data for the key as + * well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey implements Serializable { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/Sample.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/Sample.java new file mode 100644 index 000000000..7db349381 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/Sample.java @@ -0,0 +1,303 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sample.java,v 1.4 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.FileNotFoundException; +import java.util.Iterator; +import java.util.Map; + +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java com.sleepycat.examples.collections.ship.index.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private SampleDatabase db; + private SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException, FileNotFoundException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys. For details on + * database iterators see {@link StoredIterator}.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + printEntries("Parts", + views.getPartEntrySet().iterator()); + printEntries("Suppliers", + views.getSupplierEntrySet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printEntries("Shipments", + views.getShipmentEntrySet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part map is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Map parts = views.getPartMap(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.put(new PartKey("P1"), + new PartData("Nut", "Red", + new Weight(12.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P2"), + new PartData("Bolt", "Green", + new Weight(17.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P3"), + new PartData("Screw", "Blue", + new Weight(17.0, Weight.GRAMS), + "Rome")); + parts.put(new PartKey("P4"), + new PartData("Screw", "Red", + new Weight(14.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P5"), + new PartData("Cam", "Blue", + new Weight(12.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P6"), + new PartData("Cog", "Red", + new Weight(19.0, Weight.GRAMS), + "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier map is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Map suppliers = views.getSupplierMap(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.put(new SupplierKey("S1"), + new SupplierData("Smith", 20, "London")); + suppliers.put(new SupplierKey("S2"), + new SupplierData("Jones", 10, "Paris")); + suppliers.put(new SupplierKey("S3"), + new SupplierData("Blake", 30, "Paris")); + suppliers.put(new SupplierKey("S4"), + new SupplierData("Clark", 20, "London")); + suppliers.put(new SupplierKey("S5"), + new SupplierData("Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment map + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Map shipments = views.getShipmentMap(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.put(new ShipmentKey("P1", "S1"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P3", "S1"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P4", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P5", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P6", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P1", "S2"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S2"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P2", "S3"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P2", "S4"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P4", "S4"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P5", "S4"), + new ShipmentData(400)); + } + } + + /** + * Print the key/value objects returned by an iterator of Map.Entry + * objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printEntries(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + Map.Entry entry = (Map.Entry) iterator.next(); + System.out.println(entry.getKey().toString()); + System.out.println(entry.getValue().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } + + /** + * Print the objects returned by an iterator of value objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleDatabase.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleDatabase.java new file mode 100644 index 000000000..c30d4722e --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleDatabase.java @@ -0,0 +1,330 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleDatabase.java,v 1.4 2004/09/22 18:00:57 bostic Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.File; +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialSerialKeyCreator; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException, FileNotFoundException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, + dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, null, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setType(DatabaseType.BTREE); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator( + new SupplierByCityKeyCreator(javaCatalog, + SupplierKey.class, + SupplierData.class, + String.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + null, + supplierDb, + secConfig); + + secConfig.setKeyCreator( + new ShipmentByPartKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + PartKey.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + null, + shipmentDb, + secConfig); + + secConfig.setKeyCreator( + new ShipmentBySupplierKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + SupplierKey.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + null, + shipmentDb, + secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class SupplierByCityKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the supplier key class. + * @param valueClass is the supplier value class. + * @param indexKeyClass is the city key class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + SupplierData supplierData = (SupplierData) valueInput; + return supplierData.getCity(); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentByPartKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the part key class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new PartKey(shipmentKey.getPartNumber()); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentBySupplierKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the supplier key class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The part + * key is stored in the shipment key, so the shipment value is not + * used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new SupplierKey(shipmentKey.getSupplierNumber()); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleViews.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleViews.java new file mode 100644 index 000000000..84745de76 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SampleViews.java @@ -0,0 +1,162 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleViews.java,v 1.3 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.collections.StoredEntrySet; +import com.sleepycat.collections.StoredSortedMap; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, the stored key and data entries are used directly + // rather than mapping them to separate objects. Therefore, no binding + // classes are defined here and the SerialBinding class is used. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new SerialBinding(catalog, PartKey.class); + EntryBinding partDataBinding = + new SerialBinding(catalog, PartData.class); + EntryBinding supplierKeyBinding = + new SerialBinding(catalog, SupplierKey.class); + EntryBinding supplierDataBinding = + new SerialBinding(catalog, SupplierData.class); + EntryBinding shipmentKeyBinding = + new SerialBinding(catalog, ShipmentKey.class); + EntryBinding shipmentDataBinding = + new SerialBinding(catalog, ShipmentData.class); + EntryBinding cityKeyBinding = + new SerialBinding(catalog, String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is not used since the stores and indices are + // ordered by serialized key objects, which do not provide a very + // useful ordering. + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredEntrySet classes, which provide additional methods. The entry + // sets could be obtained directly from the Map.entrySet() method, but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public final StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public final StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public final StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entry set view of the part storage container. + */ + public final StoredEntrySet getPartEntrySet() { + + return (StoredEntrySet) partMap.entrySet(); + } + + /** + * Return an entry set view of the supplier storage container. + */ + public final StoredEntrySet getSupplierEntrySet() { + + return (StoredEntrySet) supplierMap.entrySet(); + } + + /** + * Return an entry set view of the shipment storage container. + */ + public final StoredEntrySet getShipmentEntrySet() { + + return (StoredEntrySet) shipmentMap.entrySet(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentData.java new file mode 100644 index 000000000..02676e6ee --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentData.java @@ -0,0 +1,42 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentData.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the data in the key/data pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used both as the storage data for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentKey.java new file mode 100644 index 000000000..876bbce55 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/ShipmentKey.java @@ -0,0 +1,49 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentKey.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.Serializable; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is used both as the storage data for the key + * as well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements Serializable { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierData.java new file mode 100644 index 000000000..3cf70472c --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierData.java @@ -0,0 +1,58 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierData.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.Serializable; + +/** + * A SupplierData serves as the data in the key/data pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used both as the storage data for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierKey.java new file mode 100644 index 000000000..fa64ca771 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/SupplierKey.java @@ -0,0 +1,41 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierKey.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.Serializable; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is used both as the storage data for the key + * as well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements Serializable { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/index/Weight.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/Weight.java new file mode 100644 index 000000000..568baf6b8 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/index/Weight.java @@ -0,0 +1,50 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Weight.java,v 1.2 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.index; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Serial serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledEntity.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledEntity.java new file mode 100644 index 000000000..c45b1101b --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledEntity.java @@ -0,0 +1,43 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MarshalledEntity.java,v 1.3 2004/09/22 16:17:11 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * MarshalledEntity is implemented by entity (combined key/data) objects and + * called by {@link SampleViews.MarshalledEntityBinding}. In this sample, + * MarshalledEntity is implemented by {@link Part}, {@link Supplier}, and + * {@link Shipment}. This interface is package-protected rather than public + * to hide the marshalling interface from other users of the data objects. + * Note that a MarshalledEntity must also have a no arguments constructor so + * that it can be instantiated by the binding. + * + * @author Mark Hayes + */ +interface MarshalledEntity { + + /** + * Extracts the entity's primary key and writes it to the key output. + */ + void marshalPrimaryKey(TupleOutput keyOutput); + + /** + * Completes construction of the entity by setting its primary key from the + * stored primary key. + */ + void unmarshalPrimaryKey(TupleInput keyInput); + + /** + * Extracts the entity's index key and writes it to the key output. + */ + boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput); +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledKey.java new file mode 100644 index 000000000..e19682ca4 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/MarshalledKey.java @@ -0,0 +1,37 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MarshalledKey.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * MarshalledKey is implemented by key objects and called by {@link + * SampleViews.MarshalledKeyBinding}. In this sample, MarshalledKey is + * implemented by {@link PartKey}, {@link SupplierKey}, and {@link + * ShipmentKey}. This interface is package-protected rather than public to + * hide the marshalling interface from other users of the data objects. Note + * that a MarshalledKey must also have a no arguments constructor so + * that it can be instantiated by the binding. + * + * @author Mark Hayes + */ +interface MarshalledKey { + + /** + * Construct the key tuple entry from the key object. + */ + void marshalKey(TupleOutput keyOutput); + + /** + * Construct the key object from the key tuple entry. + */ + void unmarshalKey(TupleInput keyInput); +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Part.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Part.java new file mode 100644 index 000000000..28af87b21 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Part.java @@ -0,0 +1,117 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Part.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is bound to the stored key/data entry by + * implementing the MarshalledEntity interface, which is called by {@link + * SampleViews.MarshalledEntityBinding}.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a PartData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Part implements Serializable, MarshalledEntity { + + private transient String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + final void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } + + // --- MarshalledEntity implementation --- + + Part() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/PartKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/PartKey.java new file mode 100644 index 000000000..76e31f6da --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/PartKey.java @@ -0,0 +1,60 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartKey.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the stored key tuple entry by + * implementing the MarshalledKey interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class PartKey implements MarshalledKey { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } + + // --- MarshalledKey implementation --- + + PartKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Sample.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Sample.java new file mode 100644 index 000000000..615d1ed94 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Sample.java @@ -0,0 +1,250 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sample.java,v 1.4 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import java.io.FileNotFoundException; +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java com.sleepycat.examples.collections.ship.marshal.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. To specify a different home + * directory, use the -home option. The home directory must exist before + * running the sample. To recreate the sample database from scratch, delete + * all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private SampleDatabase db; + private SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException, FileNotFoundException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys. For details on + * database iterators see {@link StoredIterator}.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleDatabase.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleDatabase.java new file mode 100644 index 000000000..70c810e15 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleDatabase.java @@ -0,0 +1,259 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleDatabase.java,v 1.4 2004/09/22 18:00:58 bostic Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import java.io.File; +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialKeyCreator; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException, FileNotFoundException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, + dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, null, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setType(DatabaseType.BTREE); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog, + Supplier.class, + Supplier.CITY_KEY)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + null, + supplierDb, + secConfig); + + secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog, + Shipment.class, + Shipment.PART_KEY)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + null, + shipmentDb, + secConfig); + + secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog, + Shipment.class, + Shipment.SUPPLIER_KEY)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + null, + shipmentDb, + secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for MarshalledEntity objects. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class MarshalledKeyCreator + extends TupleSerialKeyCreator { + + private String keyName; + + /** + * Construct the key creator. + * @param catalog is the class catalog. + * @param valueClass is the supplier value class. + * @param keyName is the key name passed to the marshalling methods. + */ + private MarshalledKeyCreator(ClassCatalog catalog, + Class valueClass, + String keyName) { + + super(catalog, valueClass); + this.keyName = keyName; + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + // the primary key is unmarshalled before marshalling the index + // key, to account for cases where the index key is composed of + // data elements from the primary key + MarshalledEntity entity = (MarshalledEntity) valueInput; + entity.unmarshalPrimaryKey(primaryKeyInput); + return entity.marshalSecondaryKey(keyName, indexKeyOutput); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleViews.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleViews.java new file mode 100644 index 000000000..fe2113761 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SampleViews.java @@ -0,0 +1,277 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleViews.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object; a "tricky" binding + // that uses transient fields is used--see PartBinding, etc, for + // details. For keys, a one-to-one binding is implemented with + // EntryBinding classes to bind the stored tuple entry to a key Object. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new MarshalledKeyBinding(PartKey.class); + EntityBinding partDataBinding = + new MarshalledEntityBinding(catalog, Part.class); + EntryBinding supplierKeyBinding = + new MarshalledKeyBinding(SupplierKey.class); + EntityBinding supplierDataBinding = + new MarshalledEntityBinding(catalog, Supplier.class); + EntryBinding shipmentKeyBinding = + new MarshalledKeyBinding(ShipmentKey.class); + EntityBinding shipmentDataBinding = + new MarshalledEntityBinding(catalog, Shipment.class); + EntryBinding cityKeyBinding = + TupleBinding.getPrimitiveBinding(String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * MarshalledKeyBinding is used to bind the stored key tuple entry to a key + * object representation. To do this, it calls the MarshalledKey interface + * implemented by the key class. + */ + private static class MarshalledKeyBinding extends TupleBinding { + + private Class keyClass; + + /** + * Construct the binding object. + */ + private MarshalledKeyBinding(Class keyClass) { + + // The key class will be used to instantiate the key object. + // + if (!MarshalledKey.class.isAssignableFrom(keyClass)) { + throw new IllegalArgumentException(keyClass.toString() + + " does not implement MarshalledKey"); + } + this.keyClass = keyClass; + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + try { + MarshalledKey key = (MarshalledKey) keyClass.newInstance(); + key.unmarshalKey(input); + return key; + } catch (IllegalAccessException e) { + throw new RuntimeExceptionWrapper(e); + } catch (InstantiationException e) { + throw new RuntimeExceptionWrapper(e); + } + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + MarshalledKey key = (MarshalledKey) object; + key.marshalKey(output); + } + } + + /** + * MarshalledEntityBinding is used to bind the stored key/data entry pair + * to a combined to an entity object representation. To do this, it calls + * the MarshalledEntity interface implemented by the entity class. + * + *

    The binding is "tricky" in that it uses the entity class for both + * the stored data entry and the combined entity object. To do this, + * entity's key field(s) are transient and are set by the binding after the + * data object has been deserialized. This avoids the use of a "data" class + * completely.

    + */ + private static class MarshalledEntityBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private MarshalledEntityBinding(ClassCatalog classCatalog, + Class entityClass) { + + super(classCatalog, entityClass); + + // The entity class will be used to instantiate the entity object. + // + if (!MarshalledEntity.class.isAssignableFrom(entityClass)) { + throw new IllegalArgumentException(entityClass.toString() + + " does not implement MarshalledEntity"); + } + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput tupleInput, Object javaInput) { + + MarshalledEntity entity = (MarshalledEntity) javaInput; + entity.unmarshalPrimaryKey(tupleInput); + return entity; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + MarshalledEntity entity = (MarshalledEntity) object; + entity.marshalPrimaryKey(output); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Shipment.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Shipment.java new file mode 100644 index 000000000..5993055f0 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Shipment.java @@ -0,0 +1,114 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Shipment.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is bound to the stored key/data entry by + * implementing the MarshalledEntity interface, which is called by {@link + * SampleViews.MarshalledEntityBinding}.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a ShipmentData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment implements Serializable, MarshalledEntity { + + static final String PART_KEY = "part"; + static final String SUPPLIER_KEY = "supplier"; + + private transient String partNumber; + private transient String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } + + // --- MarshalledEntity implementation --- + + Shipment() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(PART_KEY)) { + keyOutput.writeString(this.partNumber); + return true; + } else if (keyName.equals(SUPPLIER_KEY)) { + keyOutput.writeString(this.supplierNumber); + return true; + } else { + throw new UnsupportedOperationException(keyName); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/ShipmentKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/ShipmentKey.java new file mode 100644 index 000000000..1b14fd7ac --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/ShipmentKey.java @@ -0,0 +1,70 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentKey.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the stored key tuple entry by + * implementing the MarshalledKey interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements MarshalledKey { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } + + // --- MarshalledKey implementation --- + + ShipmentKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalKey(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } + + public void marshalKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Supplier.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Supplier.java new file mode 100644 index 000000000..f94e18ca6 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Supplier.java @@ -0,0 +1,119 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Supplier.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is bound to the stored key/data entry by + * implementing the MarshalledEntity interface, which is called by {@link + * SampleViews.MarshalledEntityBinding}.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a SupplierData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier implements Serializable, MarshalledEntity { + + static final String CITY_KEY = "city"; + + private transient String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } + + // --- MarshalledEntity implementation --- + + Supplier() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(CITY_KEY)) { + if (this.city != null) { + keyOutput.writeString(this.city); + return true; + } else { + return false; + } + } else { + throw new UnsupportedOperationException(keyName); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SupplierKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SupplierKey.java new file mode 100644 index 000000000..5cba0bcb6 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/SupplierKey.java @@ -0,0 +1,60 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierKey.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the stored key tuple entry by + * implementing the MarshalledKey interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements MarshalledKey { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } + + // --- MarshalledKey implementation --- + + SupplierKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Weight.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Weight.java new file mode 100644 index 000000000..ce26855ec --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/marshal/Weight.java @@ -0,0 +1,50 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Weight.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.marshal; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Part.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Part.java new file mode 100644 index 000000000..de00ec8b8 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Part.java @@ -0,0 +1,91 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Part.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +import java.io.Serializable; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is created from the stored key/data entry using a + * TupleSerialEntityBinding. See {@link SampleViews.PartBinding} for details. + *

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a PartData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Part implements Serializable { + + private transient String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + final void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/PartKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/PartKey.java new file mode 100644 index 000000000..01af80ba8 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/PartKey.java @@ -0,0 +1,39 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartKey.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the key's tuple storage entry using + * a TupleBinding. Because it is not used directly as storage data, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Sample.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Sample.java new file mode 100644 index 000000000..091d707cc --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Sample.java @@ -0,0 +1,250 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sample.java,v 1.4 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +import java.io.FileNotFoundException; +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java com.sleepycat.examples.collections.ship.sentity.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. To specify a different home + * directory, use the -home option. The home directory must exist before + * running the sample. To recreate the sample database from scratch, delete + * all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private SampleDatabase db; + private SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException, FileNotFoundException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys. For details on + * database iterators see {@link StoredIterator}.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleDatabase.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleDatabase.java new file mode 100644 index 000000000..0f8b02ee9 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleDatabase.java @@ -0,0 +1,322 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleDatabase.java,v 1.4 2004/09/22 18:00:59 bostic Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +import java.io.File; +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialKeyCreator; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException, FileNotFoundException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, + dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, null, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setType(DatabaseType.BTREE); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog, + Supplier.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + null, + supplierDb, + secConfig); + + secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog, + Shipment.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + null, + shipmentDb, + secConfig); + + secConfig.setKeyCreator(new ShipmentBySupplierKeyCreator(javaCatalog, + Shipment.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + null, + shipmentDb, + secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class SupplierByCityKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param valueClass is the supplier value class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class valueClass) { + + super(catalog, valueClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + Supplier supplier = (Supplier) valueInput; + String city = supplier.getCity(); + if (city != null) { + indexKeyOutput.writeString(supplier.getCity()); + return true; + } else { + return false; + } + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentByPartKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + String partNumber = primaryKeyInput.readString(); + // don't bother reading the supplierNumber + indexKeyOutput.writeString(partNumber); + return true; + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentBySupplierKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The + * supplier key is stored in the shipment key, so the shipment value is + * not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + primaryKeyInput.readString(); // skip the partNumber + String supplierNumber = primaryKeyInput.readString(); + indexKeyOutput.writeString(supplierNumber); + return true; + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleViews.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleViews.java new file mode 100644 index 000000000..6f447ff22 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SampleViews.java @@ -0,0 +1,420 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleViews.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object; a "tricky" binding + // that uses transient fields is used--see PartBinding, etc, for + // details. For keys, a one-to-one binding is implemented with + // EntryBinding classes to bind the stored tuple entry to a key Object. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new PartKeyBinding(); + EntityBinding partDataBinding = + new PartBinding(catalog, Part.class); + EntryBinding supplierKeyBinding = + new SupplierKeyBinding(); + EntityBinding supplierDataBinding = + new SupplierBinding(catalog, Supplier.class); + EntryBinding shipmentKeyBinding = + new ShipmentKeyBinding(); + EntityBinding shipmentDataBinding = + new ShipmentBinding(catalog, Shipment.class); + EntryBinding cityKeyBinding = + TupleBinding.getPrimitiveBinding(String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * PartKeyBinding is used to bind the stored key tuple entry for a part to + * a key object representation. + */ + private static class PartKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private PartKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new PartKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + PartKey key = (PartKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * PartBinding is used to bind the stored key/data entry pair for a part + * to a combined data object (entity). + * + *

    The binding is "tricky" in that it uses the Part class for both the + * stored data entry and the combined entity object. To do this, Part's + * key field(s) are transient and are set by the binding after the data + * object has been deserialized. This avoids the use of a PartData class + * completely.

    + */ + private static class PartBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private PartBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + Part part = (Part) dataInput; + part.setKey(number); + return part; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Part part = (Part) object; + output.writeString(part.getNumber()); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } + + /** + * SupplierKeyBinding is used to bind the stored key tuple entry for a + * supplier to a key object representation. + */ + private static class SupplierKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private SupplierKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new SupplierKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + SupplierKey key = (SupplierKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * SupplierBinding is used to bind the stored key/data entry pair for a + * supplier to a combined data object (entity). + * + *

    The binding is "tricky" in that it uses the Supplier class for both + * the stored data entry and the combined entity object. To do this, + * Supplier's key field(s) are transient and are set by the binding after + * the data object has been deserialized. This avoids the use of a + * SupplierData class completely.

    + */ + private static class SupplierBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private SupplierBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + Supplier supplier = (Supplier) dataInput; + supplier.setKey(number); + return supplier; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Supplier supplier = (Supplier) object; + output.writeString(supplier.getNumber()); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } + + /** + * ShipmentKeyBinding is used to bind the stored key tuple entry for a + * shipment to a key object representation. + */ + private static class ShipmentKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private ShipmentKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String partNumber = input.readString(); + String supplierNumber = input.readString(); + return new ShipmentKey(partNumber, supplierNumber); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + ShipmentKey key = (ShipmentKey) object; + output.writeString(key.getPartNumber()); + output.writeString(key.getSupplierNumber()); + } + } + + /** + * ShipmentBinding is used to bind the stored key/data entry pair for a + * shipment to a combined data object (entity). + * + *

    The binding is "tricky" in that it uses the Shipment class for both + * the stored data entry and the combined entity object. To do this, + * Shipment's key field(s) are transient and are set by the binding after + * the data object has been deserialized. This avoids the use of a + * ShipmentData class completely.

    + */ + private static class ShipmentBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private ShipmentBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String partNumber = keyInput.readString(); + String supplierNumber = keyInput.readString(); + Shipment shipment = (Shipment) dataInput; + shipment.setKey(partNumber, supplierNumber); + return shipment; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Shipment shipment = (Shipment) object; + output.writeString(shipment.getPartNumber()); + output.writeString(shipment.getSupplierNumber()); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Shipment.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Shipment.java new file mode 100644 index 000000000..8ccc8bc0c --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Shipment.java @@ -0,0 +1,76 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Shipment.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +import java.io.Serializable; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is created from the stored key/data entry + * using TupleSerialEntityBinding. See {@link SampleViews.PartBinding} for + * details. + *

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) + * are transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a ShipmentData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment implements Serializable { + + private transient String partNumber; + private transient String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/ShipmentKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/ShipmentKey.java new file mode 100644 index 000000000..62e51cbc9 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/ShipmentKey.java @@ -0,0 +1,47 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentKey.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Supplier.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Supplier.java new file mode 100644 index 000000000..f98b0aaf3 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Supplier.java @@ -0,0 +1,83 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Supplier.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +import java.io.Serializable; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is created from the stored key/data entry + * using TupleSerialEntityBinding. See {@link SampleViews.PartBinding} for + * details. + *

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a SupplierData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier implements Serializable { + + private transient String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SupplierKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SupplierKey.java new file mode 100644 index 000000000..bbd9e6042 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/SupplierKey.java @@ -0,0 +1,39 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierKey.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Weight.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Weight.java new file mode 100644 index 000000000..c70bff093 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/sentity/Weight.java @@ -0,0 +1,50 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Weight.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.sentity; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Part.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Part.java new file mode 100644 index 000000000..039d02326 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Part.java @@ -0,0 +1,73 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Part.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is created from the stored key/data entry using a + * SerialSerialBinding. See {@link SampleViews.PartBinding} for details. + * Since this class is not directly used for data storage, it does not need to + * be Serializable.

    + * + * @author Mark Hayes + */ +public class Part { + + private String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartData.java new file mode 100644 index 000000000..634c51166 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartData.java @@ -0,0 +1,66 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartData.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +import java.io.Serializable; + +/** + * A PartData serves as the value in the key/value pair for a part entity. + * + *

    In this sample, PartData is used only as the storage data for the + * value, while the Part object is used as the value's object representation. + * Because it is used directly as storage data using serial format, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartKey.java new file mode 100644 index 000000000..455fc0418 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/PartKey.java @@ -0,0 +1,39 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PartKey.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the key's tuple storage entry using + * a TupleBinding. Because it is not used directly as storage data, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Sample.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Sample.java new file mode 100644 index 000000000..2c9f8c66a --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Sample.java @@ -0,0 +1,249 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sample.java,v 1.4 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +import java.io.FileNotFoundException; +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java com.sleepycat.examples.collections.ship.tuple.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private SampleDatabase db; + private SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException, FileNotFoundException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys. For details on + * database iterators see {@link StoredIterator}.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() + throws Exception { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + * + *

    IMPORTANT: All database iterators must be closed to avoid + * serious database problems. If the iterator is not closed, the + * underlying Berkeley DB cursor is not closed either.

    + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + try { + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } finally { + // IMPORTANT: Use StoredIterator to close all database + // iterators. If java.util.Iterator is in hand, you can safely + // close it by calling StoredIterator.close(Iterator). + StoredIterator.close(iterator); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleDatabase.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleDatabase.java new file mode 100644 index 000000000..73b5dce87 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleDatabase.java @@ -0,0 +1,322 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleDatabase.java,v 1.4 2004/09/22 18:00:59 bostic Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +import java.io.File; +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialKeyCreator; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException, FileNotFoundException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setInitializeCache(true); + envConfig.setInitializeLocking(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setType(DatabaseType.BTREE); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, null, + dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, null, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, null, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, null, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setType(DatabaseType.BTREE); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog, + SupplierData.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + null, + supplierDb, + secConfig); + + secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog, + ShipmentData.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + null, + shipmentDb, + secConfig); + + secConfig.setKeyCreator(new ShipmentBySupplierKeyCreator(javaCatalog, + ShipmentData.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + null, + shipmentDb, + secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class SupplierByCityKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param valueClass is the supplier value class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class valueClass) { + + super(catalog, valueClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + SupplierData supplierData = (SupplierData) valueInput; + String city = supplierData.getCity(); + if (city != null) { + indexKeyOutput.writeString(supplierData.getCity()); + return true; + } else { + return false; + } + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentByPartKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + String partNumber = primaryKeyInput.readString(); + // don't bother reading the supplierNumber + indexKeyOutput.writeString(partNumber); + return true; + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentBySupplierKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The + * supplier key is stored in the shipment key, so the shipment value is + * not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + primaryKeyInput.readString(); // skip the partNumber + String supplierNumber = primaryKeyInput.readString(); + indexKeyOutput.writeString(supplierNumber); + return true; + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleViews.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleViews.java new file mode 100644 index 000000000..0d5604800 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SampleViews.java @@ -0,0 +1,397 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SampleViews.java,v 1.3 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object. For keys, a + // one-to-one binding is implemented with EntryBinding classes to bind + // the stored tuple entry to a key Object. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new PartKeyBinding(); + EntityBinding partDataBinding = + new PartBinding(catalog, PartData.class); + EntryBinding supplierKeyBinding = + new SupplierKeyBinding(); + EntityBinding supplierDataBinding = + new SupplierBinding(catalog, SupplierData.class); + EntryBinding shipmentKeyBinding = + new ShipmentKeyBinding(); + EntityBinding shipmentDataBinding = + new ShipmentBinding(catalog, ShipmentData.class); + EntryBinding cityKeyBinding = + TupleBinding.getPrimitiveBinding(String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * PartKeyBinding is used to bind the stored key tuple entry for a part to + * a key object representation. + */ + private static class PartKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private PartKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new PartKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + PartKey key = (PartKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * PartBinding is used to bind the stored key/data entry pair for a part + * to a combined data object (entity). + */ + private static class PartBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private PartBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + PartData data = (PartData) dataInput; + return new Part(number, data.getName(), data.getColor(), + data.getWeight(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Part part = (Part) object; + output.writeString(part.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Part part = (Part) object; + return new PartData(part.getName(), part.getColor(), + part.getWeight(), part.getCity()); + } + } + + /** + * SupplierKeyBinding is used to bind the stored key tuple entry for a + * supplier to a key object representation. + */ + private static class SupplierKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private SupplierKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new SupplierKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + SupplierKey key = (SupplierKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * SupplierBinding is used to bind the stored key/data entry pair for a + * supplier to a combined data object (entity). + */ + private static class SupplierBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private SupplierBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + SupplierData data = (SupplierData) dataInput; + return new Supplier(number, data.getName(), + data.getStatus(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Supplier supplier = (Supplier) object; + output.writeString(supplier.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Supplier supplier = (Supplier) object; + return new SupplierData(supplier.getName(), supplier.getStatus(), + supplier.getCity()); + } + } + + /** + * ShipmentKeyBinding is used to bind the stored key tuple entry for a + * shipment to a key object representation. + */ + private static class ShipmentKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private ShipmentKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String partNumber = input.readString(); + String supplierNumber = input.readString(); + return new ShipmentKey(partNumber, supplierNumber); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + ShipmentKey key = (ShipmentKey) object; + output.writeString(key.getPartNumber()); + output.writeString(key.getSupplierNumber()); + } + } + + /** + * ShipmentBinding is used to bind the stored key/data entry pair for a + * shipment to a combined data object (entity). + */ + private static class ShipmentBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private ShipmentBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String partNumber = keyInput.readString(); + String supplierNumber = keyInput.readString(); + ShipmentData data = (ShipmentData) dataInput; + return new Shipment(partNumber, supplierNumber, + data.getQuantity()); + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Shipment shipment = (Shipment) object; + output.writeString(shipment.getPartNumber()); + output.writeString(shipment.getSupplierNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Shipment shipment = (Shipment) object; + return new ShipmentData(shipment.getQuantity()); + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Shipment.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Shipment.java new file mode 100644 index 000000000..596de0f05 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Shipment.java @@ -0,0 +1,56 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Shipment.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.ShipmentBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment { + + private String partNumber; + private String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentData.java new file mode 100644 index 000000000..ba56a203e --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentData.java @@ -0,0 +1,43 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentData.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the value in the key/value pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used only as the storage data for the + * value, while the Shipment object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentKey.java new file mode 100644 index 000000000..35ddf46d5 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/ShipmentKey.java @@ -0,0 +1,47 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShipmentKey.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Supplier.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Supplier.java new file mode 100644 index 000000000..d990f8526 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Supplier.java @@ -0,0 +1,64 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Supplier.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.SupplierBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier { + + private String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierData.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierData.java new file mode 100644 index 000000000..48e9064db --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierData.java @@ -0,0 +1,59 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierData.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +import java.io.Serializable; + +/** + * A SupplierData serves as the value in the key/value pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used only as the storage data for the + * value, while the Supplier object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierKey.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierKey.java new file mode 100644 index 000000000..6975a0614 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/SupplierKey.java @@ -0,0 +1,39 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SupplierKey.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Weight.java b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Weight.java new file mode 100644 index 000000000..7626dcecd --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/collections/ship/tuple/Weight.java @@ -0,0 +1,50 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Weight.java,v 1.2 2004/09/22 16:17:13 mark Exp $ + */ + +package com.sleepycat.examples.collections.ship.tuple; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/db/AccessExample.java b/db/examples_java/src/com/sleepycat/examples/db/AccessExample.java index 9e24cc153..12d2adc3a 100644 --- a/db/examples_java/src/com/sleepycat/examples/db/AccessExample.java +++ b/db/examples_java/src/com/sleepycat/examples/db/AccessExample.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: AccessExample.java,v 11.17 2003/03/27 23:05:31 gburd Exp $ + * $Id: AccessExample.java,v 11.19 2004/04/06 20:43:35 mjc Exp $ */ @@ -17,54 +17,45 @@ import java.io.InputStreamReader; import java.io.IOException; import java.io.PrintStream; -class AccessExample -{ +class AccessExample { private static final int EXIT_SUCCESS = 0; private static final int EXIT_FAILURE = 1; - public AccessExample() - { + public AccessExample() { } - public static void usage() - { - System.out.println("usage: java " + - "com.sleepycat.examples.db.AccessExample [-r] [database]\n"); - System.exit(EXIT_FAILURE); + public static void usage() { + System.out.println("usage: java " + + "com.sleepycat.examples.db.AccessExample [-r] [database]\n"); + System.exit(EXIT_FAILURE); } - public static void main(String argv[]) - { - boolean removeExistingDatabase = false; - String databaseName = "access.db"; - - for (int i = 0; i < argv.length; i++) { - if (argv[i].equals("-r")) { - removeExistingDatabase = true; - } else if (argv[i].equals("-?")) { - usage(); - } else if (argv[i].startsWith("-")) { - usage(); - } else { - if ((argv.length - i) != 1) - usage(); - databaseName = argv[i]; - break; - } - } - - try - { + public static void main(String[] argv) { + boolean removeExistingDatabase = false; + String databaseName = "access.db"; + + for (int i = 0; i < argv.length; i++) { + if (argv[i].equals("-r")) + removeExistingDatabase = true; + else if (argv[i].equals("-?")) + usage(); + else if (argv[i].startsWith("-")) + usage(); + else { + if ((argv.length - i) != 1) + usage(); + databaseName = argv[i]; + break; + } + } + + try { AccessExample app = new AccessExample(); app.run(removeExistingDatabase, databaseName); - } - catch (DbException dbe) - { + } catch (DatabaseException dbe) { System.err.println("AccessExample: " + dbe.toString()); System.exit(EXIT_FAILURE); - } - catch (FileNotFoundException fnfe) - { + } catch (FileNotFoundException fnfe) { System.err.println("AccessExample: " + fnfe.toString()); System.exit(EXIT_FAILURE); } @@ -74,9 +65,8 @@ class AccessExample // Prompts for a line, and keeps prompting until a non blank // line is returned. Returns null on erroror. // - static public String askForLine(InputStreamReader reader, - PrintStream out, String prompt) - { + public static String askForLine(InputStreamReader reader, + PrintStream out, String prompt) { String result = ""; while (result != null && result.length() == 0) { out.print(prompt); @@ -91,8 +81,7 @@ class AccessExample // Returns null on EOF. If EOF appears in the middle // of a line, returns that line, then null on next call. // - static public String getLine(InputStreamReader reader) - { + public static String getLine(InputStreamReader reader) { StringBuffer b = new StringBuffer(); int c; try { @@ -100,8 +89,7 @@ class AccessExample if (c != '\r') b.append((char)c); } - } - catch (IOException ioe) { + } catch (IOException ioe) { c = -1; } @@ -112,18 +100,20 @@ class AccessExample } public void run(boolean removeExistingDatabase, String databaseName) - throws DbException, FileNotFoundException - { + throws DatabaseException, FileNotFoundException { + // Remove the previous database. - if (removeExistingDatabase) - new File(databaseName).delete(); + if (removeExistingDatabase) + new File(databaseName).delete(); // Create the database object. // There is no environment for this simple example. - Db table = new Db(null, 0); - table.setErrorStream(System.err); - table.setErrorPrefix("AccessExample"); - table.open(null, databaseName, null, Db.DB_BTREE, Db.DB_CREATE, 0644); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setErrorStream(System.err); + dbConfig.setErrorPrefix("AccessExample"); + dbConfig.setType(DatabaseType.BTREE); + dbConfig.setAllowCreate(true); + Database table = new Database(databaseName, null, dbConfig); // // Insert records into the database, where the key is the user @@ -140,69 +130,54 @@ class AccessExample // See definition of StringDbt below // - StringDbt key = new StringDbt(line); - StringDbt data = new StringDbt(reversed); - - try - { - int err = 0; - if ((err = table.put(null, - key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) { - System.out.println("Key " + line + " already exists."); - } - } - catch (DbException dbe) - { + StringEntry key = new StringEntry(line); + StringEntry data = new StringEntry(reversed); + + try { + if (table.putNoOverwrite(null, key, data) == OperationStatus.KEYEXIST) + System.out.println("Key " + line + " already exists."); + } catch (DatabaseException dbe) { System.out.println(dbe.toString()); } } // Acquire an iterator for the table. - Dbc iterator; - iterator = table.cursor(null, 0); + Cursor cursor; + cursor = table.openCursor(null, null); // Walk through the table, printing the key/data pairs. // See class StringDbt defined below. // - StringDbt key = new StringDbt(); - StringDbt data = new StringDbt(); - while (iterator.get(key, data, Db.DB_NEXT) == 0) - { + StringEntry key = new StringEntry(); + StringEntry data = new StringEntry(); + while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS) System.out.println(key.getString() + " : " + data.getString()); - } - iterator.close(); - table.close(0); + cursor.close(); + table.close(); } - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner - // class, but it need not be. + // Here's an example of how you can extend DatabaseEntry in a + // straightforward way to allow easy storage/retrieval of strings, + // or whatever kind of data you wish. We've declared it as a static + // inner class, but it need not be. // static /*inner*/ - class StringDbt extends Dbt - { - StringDbt() - { - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval + class StringEntry extends DatabaseEntry { + StringEntry() { } - StringDbt(String value) - { + StringEntry(String value) { setString(value); - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval } - void setString(String value) - { + void setString(String value) { byte[] data = value.getBytes(); setData(data); setSize(data.length); } - String getString() - { - return new String(getData(), 0, getSize()); + String getString() { + return new String(getData(), getOffset(), getSize()); } } } diff --git a/db/examples_java/src/com/sleepycat/examples/db/BtRecExample.java b/db/examples_java/src/com/sleepycat/examples/db/BtRecExample.java index ae25b082c..7baa9fdd4 100644 --- a/db/examples_java/src/com/sleepycat/examples/db/BtRecExample.java +++ b/db/examples_java/src/com/sleepycat/examples/db/BtRecExample.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: BtRecExample.java,v 11.17 2003/10/20 20:12:31 mjc Exp $ + * $Id: BtRecExample.java,v 11.21 2004/08/20 16:33:58 mjc Exp $ */ @@ -15,34 +15,34 @@ import java.io.BufferedReader; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; -import java.io.FileWriter; import java.io.InputStreamReader; import java.io.IOException; import java.io.PrintStream; -public class BtRecExample -{ - static final String progname = "BtRecExample"; // Program name. +public class BtRecExample { + static final String progname = "BtRecExample"; // Program name. static final String database = "access.db"; static final String wordlist = "../test/wordlist"; BtRecExample(BufferedReader reader) - throws DbException, IOException, FileNotFoundException - { - int ret; + throws DatabaseException, IOException, FileNotFoundException { + + OperationStatus status; // Remove the previous database. File f = new File(database); f.delete(); - dbp = new Db(null, 0); + DatabaseConfig config = new DatabaseConfig(); - dbp.setErrorStream(System.err); - dbp.setErrorPrefix(progname); - dbp.setPageSize(1024); // 1K page sizes. + config.setErrorStream(System.err); + config.setErrorPrefix(progname); + config.setPageSize(1024); // 1K page sizes. - dbp.setFlags(Db.DB_RECNUM); // Record numbers. - dbp.open(null, database, null, Db.DB_BTREE, Db.DB_CREATE, 0664); + config.setBtreeRecordNumbers(true); + config.setType(DatabaseType.BTREE); + config.setAllowCreate(true); + db = new Database(database, null, config); // // Insert records into the database, where the key is the word @@ -57,24 +57,22 @@ public class BtRecExample String buf = numstr + '_' + reader.readLine(); StringBuffer rbuf = new StringBuffer(buf).reverse(); - StringDbt key = new StringDbt(buf); - StringDbt data = new StringDbt(rbuf.toString()); + StringEntry key = new StringEntry(buf); + StringEntry data = new StringEntry(rbuf.toString()); - if ((ret = dbp.put(null, key, data, Db.DB_NOOVERWRITE)) != 0) { - if (ret != Db.DB_KEYEXIST) - throw new DbException("Db.put failed" + ret); - } + status = db.putNoOverwrite(null, key, data); + if (status != OperationStatus.SUCCESS && + status!= OperationStatus.KEYEXIST) + throw new DatabaseException("Database.put failed " + status); } } - void run() - throws DbException - { + void run() throws DatabaseException { int recno; - int ret; + OperationStatus status; // Acquire a cursor for the database. - dbcp = dbp.cursor(null, 0); + cursor = db.openCursor(null, null); // // Prompt the user for a record number, then retrieve and display @@ -90,90 +88,83 @@ public class BtRecExample try { recno = Integer.parseInt(line); - } - catch (NumberFormatException nfe) { + } catch (NumberFormatException nfe) { System.err.println("Bad record number: " + nfe); continue; } // - // Start with a fresh key each time, the dbp.get() routine returns + // Start with a fresh key each time, the db.get() routine returns // the key and data pair, not just the key! // - RecnoStringDbt key = new RecnoStringDbt(recno, 100); - RecnoStringDbt data = new RecnoStringDbt(100); + RecnoStringEntry key = new RecnoStringEntry(recno, 100); + RecnoStringEntry data = new RecnoStringEntry(100); - if ((ret = dbcp.get(key, data, Db.DB_SET_RECNO)) != 0) { - throw new DbException("Dbc.get failed", ret); - } + status = cursor.getSearchRecordNumber(key, data, null); + if (status != OperationStatus.SUCCESS) + throw new DatabaseException("Cursor.setRecno failed: " + status); // Display the key and data. show("k/d\t", key, data); // Move the cursor a record forward. - if ((ret = dbcp.get(key, data, Db.DB_NEXT)) != 0) { - throw new DbException("Dbc.get failed", ret); - } + status = cursor.getNext(key, data, null); + if (status != OperationStatus.SUCCESS) + throw new DatabaseException("Cursor.getNext failed: " + status); // Display the key and data. show("next\t", key, data); - RecnoStringDbt datano = new RecnoStringDbt(100); + RecnoStringEntry datano = new RecnoStringEntry(100); // // Retrieve the record number for the following record into // local memory. // - if ((ret = dbcp.get(key, datano, Db.DB_GET_RECNO)) != 0) { - if (ret != Db.DB_NOTFOUND && ret != Db.DB_KEYEMPTY) { - throw new DbException("Dbc.get failed", ret); - } - } + status = cursor.getRecordNumber(datano, null); + if (status != OperationStatus.SUCCESS && + status != OperationStatus.NOTFOUND && + status != OperationStatus.KEYEMPTY) + throw new DatabaseException("Cursor.get failed: " + status); else { - recno = datano.getRecno(); + recno = datano.getRecordNumber(); System.out.println("retrieved recno: " + recno); } } - dbcp.close(); - dbcp = null; + cursor.close(); + cursor = null; } // // Print out the number of records in the database. // - void stats() - throws DbException - { - DbBtreeStat statp; + void stats() throws DatabaseException { + BtreeStats stats; - statp = (DbBtreeStat)dbp.stat(0); + stats = (BtreeStats)db.getStats(null, null); System.out.println(progname + ": database contains " + - statp.bt_ndata + " records"); + stats.getNumData() + " records"); } - void show(String msg, RecnoStringDbt key, RecnoStringDbt data) - throws DbException - { + void show(String msg, RecnoStringEntry key, RecnoStringEntry data) + throws DatabaseException { + System.out.println(msg + key.getString() + ": " + data.getString()); } - public void shutdown() - throws DbException - { - if (dbcp != null) { - dbcp.close(); - dbcp = null; + public void shutdown() throws DatabaseException { + if (cursor != null) { + cursor.close(); + cursor = null; } - if (dbp != null) { - dbp.close(0); - dbp = null; + if (db != null) { + db.close(); + db = null; } } - public static void main(String argv[]) - { - + public static void main(String[] argv) { try { // Open the word database. FileReader freader = new FileReader(wordlist); @@ -192,7 +183,7 @@ public class BtRecExample } catch (IOException ioe) { System.err.println(progname + ": open " + wordlist + ": " + ioe); System.exit (1); - } catch (DbException dbe) { + } catch (DatabaseException dbe) { System.err.println("Exception: " + dbe); System.exit(dbe.getErrno()); } @@ -203,9 +194,8 @@ public class BtRecExample // Prompts for a line, and keeps prompting until a non blank // line is returned. Returns null on erroror. // - static public String askForLine(InputStreamReader reader, - PrintStream out, String prompt) - { + public static String askForLine(InputStreamReader reader, + PrintStream out, String prompt) { String result = ""; while (result != null && result.length() == 0) { out.print(prompt); @@ -220,8 +210,7 @@ public class BtRecExample // Returns null on EOF. If EOF appears in the middle // of a line, returns that line, then null on next call. // - static public String getLine(InputStreamReader reader) - { + public static String getLine(InputStreamReader reader) { StringBuffer b = new StringBuffer(); int c; try { @@ -229,8 +218,7 @@ public class BtRecExample if (c != '\r') b.append((char)c); } - } - catch (IOException ioe) { + } catch (IOException ioe) { c = -1; } @@ -240,102 +228,62 @@ public class BtRecExample return b.toString(); } - private Dbc dbcp; - private Db dbp; + private Cursor cursor; + private Database db; - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings. + // Here's an example of how you can extend DatabaseEntry in a + // straightforward way to allow easy storage/retrieval of strings. // We've declared it as a static inner class, but it need not be. // - static /*inner*/ - class StringDbt extends Dbt - { - StringDbt(byte[] arr) - { - setFlags(Db.DB_DBT_USERMEM); - setData(arr); - setSize(arr.length); - } - - StringDbt() - { - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } + static class StringEntry extends DatabaseEntry { + StringEntry() {} - StringDbt(String value) - { + StringEntry(String value) { setString(value); - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval } - void setString(String value) - { + void setString(String value) { byte[] data = value.getBytes(); setData(data); setSize(data.length); - // must set ulen because sometimes a string is returned - setUserBufferLength(data.length); } - String getString() - { + String getString() { return new String(getData(), 0, getSize()); } } - // Here's an example of how you can extend a Dbt to store - // (potentially) both recno's and strings in the same - // structure. + // Here's an example of how you can extend DatabaseEntry to store + // (potentially) both recno's and strings in the same structure. // - static /*inner*/ - class RecnoStringDbt extends Dbt - { - RecnoStringDbt(int maxsize) - { + static class RecnoStringEntry extends DatabaseEntry { + RecnoStringEntry(int maxsize) { this(0, maxsize); // let other constructor do most of the work } - RecnoStringDbt(int value, int maxsize) - { - setFlags(Db.DB_DBT_USERMEM); // do not allocate on retrieval - arr = new byte[maxsize]; - setData(arr); // use our local array for data - setUserBufferLength(maxsize); // size of return storage - setRecno(value); - } - - RecnoStringDbt(String value, int maxsize) - { - setFlags(Db.DB_DBT_USERMEM); // do not allocate on retrieval + RecnoStringEntry(int value, int maxsize) { arr = new byte[maxsize]; setData(arr); // use our local array for data - setUserBufferLength(maxsize); // size of return storage - setString(value); + setUserBuffer(maxsize, true); + setRecordNumber(value); } - void setRecno(int value) - { - setRecordNumber(value); - setSize(arr.length); + RecnoStringEntry(String value) { + byte[] data = value.getBytes(); + setData(data); // use our local array for data + setUserBuffer(data.length, true); } - void setString(String value) - { + void setString(String value) { byte[] data = value.getBytes(); setData(data); setSize(data.length); } - int getRecno() - { - return getRecordNumber(); - } - - String getString() - { - return new String(getData(), 0, getSize()); + String getString() { + return new String(getData(), getOffset(), getSize()); } - byte arr[]; + byte[] arr; } } diff --git a/db/examples_java/src/com/sleepycat/examples/db/BulkAccessExample.java b/db/examples_java/src/com/sleepycat/examples/db/BulkAccessExample.java index b3b1a75c2..bfecc69f4 100644 --- a/db/examples_java/src/com/sleepycat/examples/db/BulkAccessExample.java +++ b/db/examples_java/src/com/sleepycat/examples/db/BulkAccessExample.java @@ -1,13 +1,12 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: BulkAccessExample.java,v 1.10 2003/03/27 23:05:31 gburd Exp $ + * $Id: BulkAccessExample.java,v 1.13 2004/09/22 18:00:59 bostic Exp $ */ - package com.sleepycat.examples.db; import com.sleepycat.db.*; @@ -17,28 +16,20 @@ import java.io.InputStreamReader; import java.io.IOException; import java.io.PrintStream; -class BulkAccessExample -{ +class BulkAccessExample { private static final String FileName = "access.db"; - public BulkAccessExample() - { + public BulkAccessExample() { } - public static void main(String argv[]) - { - try - { + public static void main(String[] argv) { + try { BulkAccessExample app = new BulkAccessExample(); app.run(); - } - catch (DbException dbe) - { + } catch (DatabaseException dbe) { System.err.println("BulkAccessExample: " + dbe.toString()); System.exit(1); - } - catch (FileNotFoundException fnfe) - { + } catch (FileNotFoundException fnfe) { System.err.println("BulkAccessExample: " + fnfe.toString()); System.exit(1); } @@ -48,9 +39,8 @@ class BulkAccessExample // Prompts for a line, and keeps prompting until a non blank // line is returned. Returns null on erroror. // - static public String askForLine(InputStreamReader reader, - PrintStream out, String prompt) - { + public static String askForLine(InputStreamReader reader, + PrintStream out, String prompt) { String result = ""; while (result != null && result.length() == 0) { out.print(prompt); @@ -65,8 +55,7 @@ class BulkAccessExample // Returns null on EOF. If EOF appears in the middle // of a line, returns that line, then null on next call. // - static public String getLine(InputStreamReader reader) - { + public static String getLine(InputStreamReader reader) { StringBuffer b = new StringBuffer(); int c; try { @@ -74,8 +63,7 @@ class BulkAccessExample if (c != '\r') b.append((char)c); } - } - catch (IOException ioe) { + } catch (IOException ioe) { c = -1; } @@ -85,18 +73,19 @@ class BulkAccessExample return b.toString(); } - public void run() - throws DbException, FileNotFoundException - { + public void run() throws DatabaseException, FileNotFoundException { // Remove the previous database. new File(FileName).delete(); // Create the database object. // There is no environment for this simple example. - Db table = new Db(null, 0); - table.setErrorStream(System.err); - table.setErrorPrefix("BulkAccessExample"); - table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644); + DatabaseConfig config = new DatabaseConfig(); + config.setErrorStream(System.err); + config.setErrorPrefix("BulkAccessExample"); + config.setType(DatabaseType.BTREE); + config.setAllowCreate(true); + config.setMode(0644); + Database table = new Database(FileName, null, config); // // Insert records into the database, where the key is the user @@ -111,88 +100,62 @@ class BulkAccessExample String reversed = (new StringBuffer(line)).reverse().toString(); - // See definition of StringDbt below + // See definition of StringEntry below // - StringDbt key = new StringDbt(line); - StringDbt data = new StringDbt(reversed); - - try - { - int err; - if ((err = table.put(null, - key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) { - System.out.println("Key " + line + " already exists."); - } - } - catch (DbException dbe) - { + StringEntry key = new StringEntry(line); + StringEntry data = new StringEntry(reversed); + + try { + if (table.putNoOverwrite(null, key, data) == OperationStatus.KEYEXIST) + System.out.println("Key " + line + " already exists."); + } catch (DatabaseException dbe) { System.out.println(dbe.toString()); } System.out.println(""); } - // Acquire a cursor for the table and two Dbts. - Dbc dbc = table.cursor(null, 0); - Dbt foo = new Dbt(); - foo.setFlags(Db.DB_DBT_MALLOC); - - Dbt bulk_data = new Dbt(); - - // Set Db.DB_DBT_USERMEM on the data Dbt; Db.DB_MULTIPLE_KEY requires - // it. Then allocate a byte array of a reasonable size; we'll - // go through the database in chunks this big. - bulk_data.setFlags(Db.DB_DBT_USERMEM); - bulk_data.setData(new byte[1000000]); - bulk_data.setUserBufferLength(1000000); + // Acquire a cursor for the table. + Cursor cursor = table.openCursor(null, null); + DatabaseEntry foo = new DatabaseEntry(); + MultipleKeyDataEntry bulk_data = new MultipleKeyDataEntry(); + bulk_data.setData(new byte[1024 * 1024]); + bulk_data.setUserBuffer(1024 * 1024, true); // Walk through the table, printing the key/data pairs. // - while (dbc.get(foo, bulk_data, Db.DB_NEXT | Db.DB_MULTIPLE_KEY) == 0) - { - DbMultipleKeyDataIterator iterator; - iterator = new DbMultipleKeyDataIterator(bulk_data); + while (cursor.getNext(foo, bulk_data, null) == OperationStatus.SUCCESS) { + StringEntry key, data; + key = new StringEntry(); + data = new StringEntry(); - StringDbt key, data; - key = new StringDbt(); - data = new StringDbt(); - - while (iterator.next(key, data)) { + while (bulk_data.next(key, data)) System.out.println(key.getString() + " : " + data.getString()); - } } - dbc.close(); - table.close(0); + cursor.close(); + table.close(); } - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner + // Here's an example of how you can extend DatabaseEntry in a + // straightforward way to allow easy storage/retrieval of strings, or + // whatever kind of data you wish. We've declared it as a static inner // class, but it need not be. // - static /*inner*/ - class StringDbt extends Dbt - { - StringDbt() - { - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval + static class StringEntry extends DatabaseEntry { + StringEntry() { } - StringDbt(String value) - { + StringEntry(String value) { setString(value); - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval } - void setString(String value) - { + void setString(String value) { byte[] data = value.getBytes(); setData(data); setSize(data.length); } - String getString() - { + String getString() { return new String(getData(), getOffset(), getSize()); } } diff --git a/db/examples_java/src/com/sleepycat/examples/db/EnvExample.java b/db/examples_java/src/com/sleepycat/examples/db/EnvExample.java index 8015212ef..9b407ea8d 100644 --- a/db/examples_java/src/com/sleepycat/examples/db/EnvExample.java +++ b/db/examples_java/src/com/sleepycat/examples/db/EnvExample.java @@ -1,127 +1,114 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: EnvExample.java,v 11.13 2003/03/21 23:28:38 gburd Exp $ + * $Id: EnvExample.java,v 11.15 2004/04/06 20:43:35 mjc Exp $ */ - package com.sleepycat.examples.db; import com.sleepycat.db.*; +import java.io.File; import java.io.FileNotFoundException; import java.io.OutputStream; /* - * An example of a program using DbEnv to configure its DB - * environment. + * An example of a program configuring a database environment. * * For comparison purposes, this example uses a similar structure * as examples/ex_env.c and examples_cxx/EnvExample.cpp. */ -public class EnvExample -{ +public class EnvExample { private static final String progname = "EnvExample"; - private static final String DATABASE_HOME = "/tmp/database"; + private static final File DATABASE_HOME = new File("/tmp/database"); + + private static void runApplication(Environment dbenv) + throws DatabaseException { - private static void db_application() - throws DbException - { // Do something interesting... // Your application goes here. } - private static void db_setup(String home, String data_dir, - OutputStream errs) - throws DbException, FileNotFoundException - { - // - // Create an environment object and initialize it for erroror - // reporting. - // - DbEnv dbenv = new DbEnv(0); - dbenv.setErrorStream(errs); - dbenv.setErrorPrefix(progname); + private static void setupEnvironment(File home, + String dataDir, + OutputStream errs) + throws DatabaseException, FileNotFoundException { + + // Create an environment object and initialize it for error reporting. + EnvironmentConfig config = new EnvironmentConfig(); + config.setErrorStream(errs); + config.setErrorPrefix(progname); // // We want to specify the shared memory buffer pool cachesize, // but everything else is the default. // - dbenv.setCacheSize(64 * 1024, 0); + config.setCacheSize(64 * 1024); - // Databases are in a subdirectory. - dbenv.setDataDir(data_dir); + // Databases are in a separate directory. + config.addDataDir(dataDir); + + // Open the environment with full transactional support. + config.setAllowCreate(true); + config.setInitializeCache(true); + config.setTransactional(true); + config.setInitializeLocking(true); - // Open the environment with full transactional support. - // - // open() will throw a DbException if there is an erroror. // // open is declared to throw a FileNotFoundException, which normally - // shouldn't occur with the DB_CREATE option. + // shouldn't occur when allowCreate is set. // - dbenv.open(DATABASE_HOME, - Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_LOG | - Db.DB_INIT_MPOOL | Db.DB_INIT_TXN, 0); + Environment dbenv = new Environment(home, config); try { - // Start your application. - db_application(); - - } - finally { - - // Close the environment. Doing this in the - // finally block ensures it is done, even if - // an erroror is thrown. - // - dbenv.close(0); + runApplication(dbenv); + } finally { + // Close the environment. Doing this in the finally block ensures + // it is done, even if an error is thrown. + dbenv.close(); } } - private static void db_teardown(String home, String data_dir, - OutputStream errs) - throws DbException, FileNotFoundException - { - // Remove the shared database regions. + private static void teardownEnvironment(File home, + String dataDir, + OutputStream errs) + throws DatabaseException, FileNotFoundException { - DbEnv dbenv = new DbEnv(0); + // Remove the shared database regions. + EnvironmentConfig config = new EnvironmentConfig(); - dbenv.setErrorStream(errs); - dbenv.setErrorPrefix(progname); - dbenv.setDataDir(data_dir); - dbenv.remove(home, 0); + config.setErrorStream(errs); + config.setErrorPrefix(progname); + config.addDataDir(dataDir); + Environment.remove(home, true, config); } - public static void main(String[] args) - { + public static void main(String[] args) { // // All of the shared database files live in /tmp/database, - // but data files live in /database. + // but data files live in /database/files. // // Using Berkeley DB in C/C++, we need to allocate two elements // in the array and set config[1] to NULL. This is not // necessary in Java. // - String home = DATABASE_HOME; - String config = "/database/files"; + File home = DATABASE_HOME; + String dataDir = "/database/files"; try { System.out.println("Setup env"); - db_setup(home, config, System.err); + setupEnvironment(home, dataDir, System.err); System.out.println("Teardown env"); - db_teardown(home, config, System.err); - } - catch (DbException dbe) { + teardownEnvironment(home, dataDir, System.err); + } catch (DatabaseException dbe) { System.err.println(progname + ": environment open: " + dbe.toString()); System.exit (1); - } - catch (FileNotFoundException fnfe) { - System.err.println(progname + - ": unexpected open environment error " + fnfe); + } catch (FileNotFoundException fnfe) { + System.err.println(progname + ": unexpected open environment error " + fnfe); System.exit (1); } } diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseLoad.java b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseLoad.java new file mode 100644 index 000000000..d147be718 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseLoad.java @@ -0,0 +1,228 @@ +// File: ExampleDatabaseLoad.java + +package com.sleepycat.examples.db.GettingStarted; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.StringTokenizer; +import java.util.Vector; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; + +public class ExampleDatabaseLoad { + + private static String myDbsPath = "./"; + private static File inventoryFile = new File("./inventory.txt"); + private static File vendorsFile = new File("./vendors.txt"); + + // DatabaseEntries used for loading records + private static DatabaseEntry theKey = new DatabaseEntry(); + private static DatabaseEntry theData = new DatabaseEntry(); + + // Encapsulates the databases. + private static MyDbs myDbs = new MyDbs(); + + private static void usage() { + System.out.println("ExampleDatabaseLoad [-h ]"); + System.out.println(" [-s ] [-v ]"); + System.exit(-1); + } + + + public static void main(String args[]) { + ExampleDatabaseLoad edl = new ExampleDatabaseLoad(); + try { + edl.run(args); + } catch (DatabaseException dbe) { + System.err.println("ExampleDatabaseLoad: " + dbe.toString()); + dbe.printStackTrace(); + } catch (Exception e) { + System.out.println("Exception: " + e.toString()); + e.printStackTrace(); + } finally { + myDbs.close(); + } + System.out.println("All done."); + } + + + private void run(String args[]) + throws DatabaseException { + // Parse the arguments list + parseArgs(args); + + myDbs.setup(myDbsPath); + + System.out.println("loading vendors db...."); + loadVendorsDb(); + + System.out.println("loading inventory db...."); + loadInventoryDb(); + } + + + private void loadVendorsDb() + throws DatabaseException { + + // loadFile opens a flat-text file that contains our data + // and loads it into a list for us to work with. The integer + // parameter represents the number of fields expected in the + // file. + ArrayList vendors = loadFile(vendorsFile, 8); + + // Now load the data into the database. The vendor's name is the + // key, and the data is a Vendor class object. + + // Need a serial binding for the data + EntryBinding dataBinding = + new SerialBinding(myDbs.getClassCatalog(), Vendor.class); + + for (int i = 0; i < vendors.size(); i++) { + String[] sArray = (String[])vendors.get(i); + Vendor theVendor = new Vendor(); + theVendor.setVendorName(sArray[0]); + theVendor.setAddress(sArray[1]); + theVendor.setCity(sArray[2]); + theVendor.setState(sArray[3]); + theVendor.setZipcode(sArray[4]); + theVendor.setBusinessPhoneNumber(sArray[5]); + theVendor.setRepName(sArray[6]); + theVendor.setRepPhoneNumber(sArray[7]); + + // The key is the vendor's name. + // ASSUMES THE VENDOR'S NAME IS UNIQUE! + String vendorName = theVendor.getVendorName(); + try { + theKey = new DatabaseEntry(vendorName.getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + + // Convert the Vendor object to a DatabaseEntry object + // using our SerialBinding + dataBinding.objectToEntry(theVendor, theData); + + // Put it in the database. + myDbs.getVendorDB().put(null, theKey, theData); + } + } + + + private void loadInventoryDb() + throws DatabaseException { + + // loadFile opens a flat-text file that contains our data + // and loads it into a list for us to work with. The integer + // parameter represents the number of fields expected in the + // file. + ArrayList inventoryArray = loadFile(inventoryFile, 6); + + // Now load the data into the database. The item's sku is the + // key, and the data is an Inventory class object. + + // Need a tuple binding for the Inventory class. + TupleBinding inventoryBinding = new InventoryBinding(); + + for (int i = 0; i < inventoryArray.size(); i++) { + String[] sArray = (String[])inventoryArray.get(i); + String sku = sArray[1]; + try { + theKey = new DatabaseEntry(sku.getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + + Inventory theInventory = new Inventory(); + theInventory.setItemName(sArray[0]); + theInventory.setSku(sArray[1]); + theInventory.setVendorPrice((new Float(sArray[2])).floatValue()); + theInventory.setVendorInventory((new Integer(sArray[3])).intValue()); + theInventory.setCategory(sArray[4]); + theInventory.setVendor(sArray[5]); + + // Place the Vendor object on the DatabaseEntry object using our + // the tuple binding we implemented in InventoryBinding.java + inventoryBinding.objectToEntry(theInventory, theData); + + // Put it in the database. Note that this causes our secondary database + // to be automatically updated for us. + myDbs.getInventoryDB().put(null, theKey, theData); + } + } + + + private static void parseArgs(String args[]) { + int nArgs = args.length; + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + if (i < nArgs) { + myDbsPath = new String(args[++i]); + } + break; + case 'i': + if (i < nArgs) { + inventoryFile = new File(args[++i]); + } + break; + case 'v': + if (i < nArgs) { + vendorsFile = new File(args[++i]); + } + break; + default: + usage(); + } + } + } + } + + + private ArrayList loadFile(File theFile, int numFields) { + ArrayList records = new ArrayList(); + try { + String theLine = null; + FileInputStream fis = new FileInputStream(theFile); + BufferedReader br = new BufferedReader(new InputStreamReader(fis)); + while((theLine=br.readLine()) != null) { + String[] theLineArray = splitString(theLine, "#"); + if (theLineArray.length != numFields) { + System.out.println("Malformed line found in " + theFile.getPath()); + System.out.println("Line was: '" + theLine); + System.out.println("length found was: " + theLineArray.length); + System.exit(-1); + } + records.add(theLineArray); + } + } catch (FileNotFoundException e) { + System.err.println(theFile.getPath() + " does not exist."); + e.printStackTrace(); + usage(); + } catch (IOException e) { + System.err.println("IO Exception: " + e.toString()); + e.printStackTrace(); + System.exit(-1); + } + return records; + } + + + private static String[] splitString(String s, String delimiter) { + Vector resultVector = new Vector(); + StringTokenizer tokenizer = new StringTokenizer(s, delimiter); + while (tokenizer.hasMoreTokens()) + resultVector.add(tokenizer.nextToken()); + String[] resultArray = new String[resultVector.size()]; + resultVector.copyInto(resultArray); + return resultArray; + } + + + protected ExampleDatabaseLoad() {} +} diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseRead.java b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseRead.java new file mode 100644 index 000000000..140f58e43 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ExampleDatabaseRead.java @@ -0,0 +1,202 @@ +// File: ExampleDatabaseRead + +package com.sleepycat.examples.db.GettingStarted; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.db.Cursor; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.LockMode; +import com.sleepycat.db.OperationStatus; +import com.sleepycat.db.SecondaryCursor; + +import java.io.IOException; + +public class ExampleDatabaseRead { + + private static String myDbsPath = "./"; + + // Encapsulates the database environment and databases. + private static MyDbs myDbs = new MyDbs(); + + private static TupleBinding inventoryBinding; + private static EntryBinding vendorBinding; + + // The item to locate if the -s switch is used + private static String locateItem; + + private static void usage() { + System.out.println("ExampleDatabaseRead [-h ]" + + "[-s ]"); + System.exit(-1); + } + + public static void main(String args[]) { + ExampleDatabaseRead edr = new ExampleDatabaseRead(); + try { + edr.run(args); + } catch (DatabaseException dbe) { + System.err.println("ExampleDatabaseRead: " + dbe.toString()); + dbe.printStackTrace(); + } finally { + myDbs.close(); + } + System.out.println("All done."); + } + + private void run(String args[]) + throws DatabaseException { + // Parse the arguments list + parseArgs(args); + + myDbs.setup(myDbsPath); + + // Setup our bindings. + inventoryBinding = new InventoryBinding(); + vendorBinding = + new SerialBinding(myDbs.getClassCatalog(), + Vendor.class); + + if (locateItem != null) { + showItem(); + } else { + showAllInventory(); + } + } + + private void showItem() throws DatabaseException { + + SecondaryCursor secCursor = null; + try { + // searchKey is the key that we want to find in the + // secondary db. + DatabaseEntry searchKey = + new DatabaseEntry(locateItem.getBytes("UTF-8")); + + // foundKey and foundData are populated from the primary + // entry that is associated with the secondary db key. + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + // open a secondary cursor + secCursor = + myDbs.getNameIndexDB().openSecondaryCursor(null, null); + + // Search for the secondary database entry. + OperationStatus retVal = + secCursor.getSearchKey(searchKey, foundKey, + foundData, LockMode.DEFAULT); + + // Display the entry, if one is found. Repeat until no more + // secondary duplicate entries are found + while(retVal == OperationStatus.SUCCESS) { + Inventory theInventory = + (Inventory)inventoryBinding.entryToObject(foundData); + displayInventoryRecord(foundKey, theInventory); + retVal = secCursor.getNextDup(searchKey, foundKey, + foundData, LockMode.DEFAULT); + } + } catch (Exception e) { + System.err.println("Error on inventory secondary cursor:"); + System.err.println(e.toString()); + e.printStackTrace(); + } finally { + if (secCursor != null) { + secCursor.close(); + } + } + } + + private void showAllInventory() + throws DatabaseException { + // Get a cursor + Cursor cursor = myDbs.getInventoryDB().openCursor(null, null); + + // DatabaseEntry objects used for reading records + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + try { // always want to make sure the cursor gets closed + while (cursor.getNext(foundKey, foundData, + LockMode.DEFAULT) == OperationStatus.SUCCESS) { + Inventory theInventory = + (Inventory)inventoryBinding.entryToObject(foundData); + displayInventoryRecord(foundKey, theInventory); + } + } catch (Exception e) { + System.err.println("Error on inventory cursor:"); + System.err.println(e.toString()); + e.printStackTrace(); + } finally { + cursor.close(); + } + } + + private void displayInventoryRecord(DatabaseEntry theKey, + Inventory theInventory) + throws DatabaseException { + + String theSKU = new String(theKey.getData()); + System.out.println(theSKU + ":"); + System.out.println("\t " + theInventory.getItemName()); + System.out.println("\t " + theInventory.getCategory()); + System.out.println("\t " + theInventory.getVendor()); + System.out.println("\t\tNumber in stock: " + + theInventory.getVendorInventory()); + System.out.println("\t\tPrice per unit: " + + theInventory.getVendorPrice()); + System.out.println("\t\tContact: "); + + DatabaseEntry searchKey = null; + try { + searchKey = + new DatabaseEntry(theInventory.getVendor().getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + DatabaseEntry foundVendor = new DatabaseEntry(); + + if (myDbs.getVendorDB().get(null, searchKey, foundVendor, + LockMode.DEFAULT) != OperationStatus.SUCCESS) { + System.out.println("Could not find vendor: " + + theInventory.getVendor() + "."); + System.exit(-1); + } else { + Vendor theVendor = + (Vendor)vendorBinding.entryToObject(foundVendor); + System.out.println("\t\t " + theVendor.getAddress()); + System.out.println("\t\t " + theVendor.getCity() + ", " + + theVendor.getState() + " " + theVendor.getZipcode()); + System.out.println("\t\t Business Phone: " + + theVendor.getBusinessPhoneNumber()); + System.out.println("\t\t Sales Rep: " + + theVendor.getRepName()); + System.out.println("\t\t " + + theVendor.getRepPhoneNumber()); + } + } + + protected ExampleDatabaseRead() {} + + private static void parseArgs(String args[]) { + int nArgs = args.length; + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + if (i < nArgs) { + myDbsPath = new String(args[++i]); + } + break; + case 's': + if (i < nArgs) { + locateItem = new String(args[++i]); + } + break; + default: + usage(); + } + } + } + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Inventory.java b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Inventory.java new file mode 100644 index 000000000..77e061935 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Inventory.java @@ -0,0 +1,62 @@ +// File: Inventory.java + +package com.sleepycat.examples.db.GettingStarted; + +public class Inventory { + + private String sku; + private String itemName; + private String category; + private String vendor; + private int vendorInventory; + private float vendorPrice; + + public void setSku(String data) { + sku = data; + } + + public void setItemName(String data) { + itemName = data; + } + + public void setCategory(String data) { + category = data; + } + + public void setVendorInventory(int data) { + vendorInventory = data; + } + + public void setVendor(String data) { + vendor = data; + } + + public void setVendorPrice(float data) { + vendorPrice = data; + } + + public String getSku() { + return sku; + } + + public String getItemName() { + return itemName; + } + + public String getCategory() { + return category; + } + + public int getVendorInventory() { + return vendorInventory; + } + + public String getVendor() { + return vendor; + } + + public float getVendorPrice() { + return vendorPrice; + } +} + diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/InventoryBinding.java b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/InventoryBinding.java new file mode 100644 index 000000000..81e96d23e --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/InventoryBinding.java @@ -0,0 +1,46 @@ +// File: InventoryBinding.java + +package com.sleepycat.examples.db.GettingStarted; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +public class InventoryBinding extends TupleBinding { + + // Implement this abstract method. Used to convert + // a DatabaseEntry to an Inventory object. + public Object entryToObject(TupleInput ti) { + + String sku = ti.readString(); + String itemName = ti.readString(); + String category = ti.readString(); + String vendor = ti.readString(); + int vendorInventory = ti.readInt(); + float vendorPrice = ti.readFloat(); + + Inventory inventory = new Inventory(); + inventory.setSku(sku); + inventory.setItemName(itemName); + inventory.setCategory(category); + inventory.setVendor(vendor); + inventory.setVendorInventory(vendorInventory); + inventory.setVendorPrice(vendorPrice); + + return inventory; + } + + // Implement this abstract method. Used to convert a + // Inventory object to a DatabaseEntry object. + public void objectToEntry(Object object, TupleOutput to) { + + Inventory inventory = (Inventory)object; + + to.writeString(inventory.getSku()); + to.writeString(inventory.getItemName()); + to.writeString(inventory.getCategory()); + to.writeString(inventory.getVendor()); + to.writeInt(inventory.getVendorInventory()); + to.writeFloat(inventory.getVendorPrice()); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ItemNameKeyCreator.java b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ItemNameKeyCreator.java new file mode 100644 index 000000000..4a966ae23 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/ItemNameKeyCreator.java @@ -0,0 +1,37 @@ +// File: ItemNameKeyCreator.java + +package com.sleepycat.examples.db.GettingStarted; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.db.SecondaryKeyCreator; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.SecondaryDatabase; + +public class ItemNameKeyCreator implements SecondaryKeyCreator { + + private TupleBinding theBinding; + + // Use the constructor to set the tuple binding + ItemNameKeyCreator(TupleBinding binding) { + theBinding = binding; + } + + // Abstract method that we must implement + public boolean createSecondaryKey(SecondaryDatabase secDb, + DatabaseEntry keyEntry, // From the primary + DatabaseEntry dataEntry, // From the primary + DatabaseEntry resultEntry) // set the key data on this. + throws DatabaseException { + + if (dataEntry != null) { + // Convert dataEntry to an Inventory object + Inventory inventoryItem = + (Inventory)theBinding.entryToObject(dataEntry); + // Get the item name and use that as the key + String theItem = inventoryItem.getItemName(); + resultEntry.setData(theItem.getBytes()); + } + return true; + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/MyDbs.java b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/MyDbs.java new file mode 100644 index 000000000..9f4f254fb --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/MyDbs.java @@ -0,0 +1,157 @@ +// File: MyDbs.java + +package com.sleepycat.examples.db.GettingStarted; + +import java.io.FileNotFoundException; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + + +public class MyDbs { + + // The databases that our application uses + private Database vendorDb = null; + private Database inventoryDb = null; + private Database classCatalogDb = null; + private SecondaryDatabase itemNameIndexDb = null; + + private String vendordb = "VendorDB.db"; + private String inventorydb = "InventoryDB.db"; + private String classcatalogdb = "ClassCatalogDB.db"; + private String itemnameindexdb = "ItemNameIndexDB.db"; + + // Needed for object serialization + private StoredClassCatalog classCatalog; + + // Our constructor does nothing + public MyDbs() {} + + // The setup() method opens all our databases + // for us. + public void setup(String databasesHome) + throws DatabaseException { + + DatabaseConfig myDbConfig = new DatabaseConfig(); + SecondaryConfig mySecConfig = new SecondaryConfig(); + + myDbConfig.setErrorStream(System.err); + mySecConfig.setErrorStream(System.err); + myDbConfig.setErrorPrefix("MyDbs"); + mySecConfig.setErrorPrefix("MyDbs"); + myDbConfig.setType(DatabaseType.BTREE); + mySecConfig.setType(DatabaseType.BTREE); + myDbConfig.setAllowCreate(true); + mySecConfig.setAllowCreate(true); + + // Now open, or create and open, our databases + // Open the vendors and inventory databases + try { + vendordb = databasesHome + "/" + vendordb; + vendorDb = new Database(vendordb, + null, + myDbConfig); + + inventorydb = databasesHome + "/" + inventorydb; + inventoryDb = new Database(inventorydb, + null, + myDbConfig); + + // Open the class catalog db. This is used to + // optimize class serialization. + classcatalogdb = databasesHome + "/" + classcatalogdb; + classCatalogDb = new Database(classcatalogdb, + null, + myDbConfig); + } catch(FileNotFoundException fnfe) { + System.err.println("MyDbs: " + fnfe.toString()); + System.exit(-1); + } + + // Create our class catalog + classCatalog = new StoredClassCatalog(classCatalogDb); + + // Need a tuple binding for the Inventory class. + // We use the InventoryBinding class + // that we implemented for this purpose. + TupleBinding inventoryBinding = new InventoryBinding(); + + // Open the secondary database. We use this to create a + // secondary index for the inventory database + + // We want to maintain an index for the inventory entries based + // on the item name. So, instantiate the appropriate key creator + // and open a secondary database. + ItemNameKeyCreator keyCreator = + new ItemNameKeyCreator(new InventoryBinding()); + + + // Set up additional secondary properties + // Need to allow duplicates for our secondary database + mySecConfig.setSortedDuplicates(true); + mySecConfig.setAllowPopulate(true); // Allow autopopulate + mySecConfig.setKeyCreator(keyCreator); + + // Now open it + try { + itemnameindexdb = databasesHome + "/" + itemnameindexdb; + itemNameIndexDb = new SecondaryDatabase(itemnameindexdb, + null, + inventoryDb, + mySecConfig); + } catch(FileNotFoundException fnfe) { + System.err.println("MyDbs: " + fnfe.toString()); + System.exit(-1); + } + } + + // getter methods + public Database getVendorDB() { + return vendorDb; + } + + public Database getInventoryDB() { + return inventoryDb; + } + + public SecondaryDatabase getNameIndexDB() { + return itemNameIndexDb; + } + + public StoredClassCatalog getClassCatalog() { + return classCatalog; + } + + // Close the databases + public void close() { + try { + if (itemNameIndexDb != null) { + itemNameIndexDb.close(); + } + + if (vendorDb != null) { + vendorDb.close(); + } + + if (inventoryDb != null) { + inventoryDb.close(); + } + + if (classCatalogDb != null) { + classCatalogDb.close(); + } + + } catch(DatabaseException dbe) { + System.err.println("Error closing MyDbs: " + + dbe.toString()); + System.exit(-1); + } + } +} + diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Vendor.java b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Vendor.java new file mode 100644 index 000000000..8b5ee96f1 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/Vendor.java @@ -0,0 +1,82 @@ +// File: Vendor.java +package com.sleepycat.examples.db.GettingStarted; + +import java.io.Serializable; + +public class Vendor implements Serializable { + + private String repName; + private String address; + private String city; + private String state; + private String zipcode; + private String bizPhoneNumber; + private String repPhoneNumber; + private String vendor; + + public void setRepName(String data) { + repName = data; + } + + public void setAddress(String data) { + address = data; + } + + public void setCity(String data) { + city = data; + } + + public void setState(String data) { + state = data; + } + + public void setZipcode(String data) { + zipcode = data; + } + + public void setBusinessPhoneNumber(String data) { + bizPhoneNumber = data; + } + + public void setRepPhoneNumber(String data) { + repPhoneNumber = data; + } + + public void setVendorName(String data) { + vendor = data; + } + + public String getRepName() { + return repName; + } + + public String getAddress() { + return address; + } + + public String getCity() { + return city; + } + + public String getState() { + return state; + } + + public String getZipcode() { + return zipcode; + } + + public String getBusinessPhoneNumber() { + return bizPhoneNumber; + } + + public String getRepPhoneNumber() { + return repPhoneNumber; + } + + public String getVendorName() { + return vendor; + } + +} + diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/inventory.txt b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/inventory.txt new file mode 100644 index 000000000..d6b68762c --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/inventory.txt @@ -0,0 +1,800 @@ +Oranges#OranfruiRu6Ghr#0.71#451#fruits#TriCounty Produce +Oranges#OranfruiXRPFn1#0.73#263#fruits#Simply Fresh +Oranges#OranfruiLEuzQj#0.69#261#fruits#Off the Vine +Apples#ApplfruiZls4Du#1.20#472#fruits#TriCounty Produce +Apples#Applfrui8fewZe#1.21#402#fruits#Simply Fresh +Apples#ApplfruiXoT6xG#1.20#728#fruits#Off the Vine +Bananas#BanafruipIlluX#0.50#207#fruits#TriCounty Produce +Bananas#BanafruiEQhWuj#0.50#518#fruits#Simply Fresh +Bananas#BanafruimpRgPO#0.50#741#fruits#Off the Vine +Almonds#AlmofruiPPCLz8#0.55#600#fruits#TriCounty Produce +Almonds#AlmofruidMyKmp#0.54#745#fruits#Simply Fresh +Almonds#Almofrui7K0xzH#0.53#405#fruits#Off the Vine +Allspice#AllsfruibJGK4R#0.94#669#fruits#TriCounty Produce +Allspice#Allsfruilfvoeg#0.94#244#fruits#Simply Fresh +Allspice#Allsfruio12BOS#0.95#739#fruits#Off the Vine +Apricot#AprifruijphEpM#0.89#560#fruits#TriCounty Produce +Apricot#AprifruiU1zIDn#0.91#980#fruits#Simply Fresh +Apricot#AprifruichcwYS#0.95#668#fruits#Off the Vine +Avocado#AvocfruiwYYomu#0.99#379#fruits#TriCounty Produce +Avocado#AvocfruiT6IwWE#1.02#711#fruits#Simply Fresh +Avocado#AvocfruisbK1h5#0.97#856#fruits#Off the Vine +Bael Fruit#BaelfruilAU7Hj#0.41#833#fruits#TriCounty Produce +Bael Fruit#BaelfruiX2KvqV#0.40#770#fruits#Simply Fresh +Bael Fruit#Baelfruidjne4e#0.39#778#fruits#Off the Vine +Betel Nut#BetefruiQYdHqQ#0.34#926#fruits#TriCounty Produce +Betel Nut#Betefrui32BKAz#0.37#523#fruits#Simply Fresh +Betel Nut#BetefruisaWzY4#0.34#510#fruits#Off the Vine +Black Walnut#BlacfruiXxIuMU#0.57#923#fruits#TriCounty Produce +Black Walnut#BlacfruiZXgY9t#0.59#312#fruits#Simply Fresh +Black Walnut#BlacfruikWO0vz#0.60#877#fruits#Off the Vine +Blueberry#BluefruiCbxb4t#1.02#276#fruits#TriCounty Produce +Blueberry#BluefruiBuCfgO#1.03#522#fruits#Simply Fresh +Blueberry#Bluefruixz8MkE#1.01#278#fruits#Off the Vine +Boysenberry#BoysfruizxyMuz#1.05#239#fruits#TriCounty Produce +Boysenberry#Boysfrui3hTRQu#1.09#628#fruits#Simply Fresh +Boysenberry#BoysfruinpLvr3#1.02#349#fruits#Off the Vine +Breadnut#Breafrui0kDPs6#0.31#558#fruits#TriCounty Produce +Breadnut#Breafrui44s3og#0.32#879#fruits#Simply Fresh +Breadnut#BreafruiwyLKhJ#0.30#407#fruits#Off the Vine +Cactus#Cactfruiyo2ddH#0.56#601#fruits#TriCounty Produce +Cactus#CactfruixTOLv5#0.54#477#fruits#Simply Fresh +Cactus#Cactfrui4ioUav#0.55#896#fruits#Off the Vine +California Wild Grape#CalifruiZsWAa6#0.78#693#fruits#TriCounty Produce +California Wild Grape#Califruid84xyt#0.83#293#fruits#Simply Fresh +California Wild Grape#CalifruiLSJFoJ#0.81#543#fruits#Off the Vine +Cashew#CashfruihaOFVP#0.37#221#fruits#TriCounty Produce +Cashew#Cashfruizzcw1E#0.38#825#fruits#Simply Fresh +Cashew#CashfruiqtMe2Q#0.38#515#fruits#Off the Vine +Chico Sapote#ChicfruiY534SX#0.47#216#fruits#TriCounty Produce +Chico Sapote#ChicfruiSqL3Lc#0.45#476#fruits#Simply Fresh +Chico Sapote#ChicfruiurzIp4#0.47#200#fruits#Off the Vine +Chinese Jello#ChinfruiyRg75u#0.64#772#fruits#TriCounty Produce +Chinese Jello#ChinfruiuIUj0X#0.65#624#fruits#Simply Fresh +Chinese Jello#ChinfruiwXbRrL#0.67#719#fruits#Off the Vine +Common Guava#Commfruib6znSI#0.80#483#fruits#TriCounty Produce +Common Guava#Commfrui6eUivL#0.81#688#fruits#Simply Fresh +Common Guava#CommfruibWKnz3#0.84#581#fruits#Off the Vine +Crabapple#CrabfruioY2L63#0.94#582#fruits#TriCounty Produce +Crabapple#Crabfruijxcxyt#0.94#278#fruits#Simply Fresh +Crabapple#CrabfruibvWd8K#0.95#213#fruits#Off the Vine +Cranberry#CranfruiJxmKr5#0.83#923#fruits#TriCounty Produce +Cranberry#CranfruiPlklAF#0.84#434#fruits#Simply Fresh +Cranberry#Cranfrui3G5XL9#0.84#880#fruits#Off the Vine +Damson Plum#DamsfruibMRMwe#0.98#782#fruits#TriCounty Produce +Damson Plum#DamsfruiV6wFLk#1.03#400#fruits#Simply Fresh +Damson Plum#DamsfruiLhqFrQ#0.98#489#fruits#Off the Vine +Date Palm#DatefruigS31GU#1.14#315#fruits#TriCounty Produce +Date Palm#DatefruipKPaJK#1.09#588#fruits#Simply Fresh +Date Palm#Datefrui5fTyNS#1.14#539#fruits#Off the Vine +Dragon's Eye#DragfruirGJ3aI#0.28#315#fruits#TriCounty Produce +Dragon's Eye#DragfruiBotxqt#0.27#705#fruits#Simply Fresh +Dragon's Eye#DragfruiPsSnV9#0.29#482#fruits#Off the Vine +East Indian Wine Palm#EastfruiNXFJuG#0.43#992#fruits#TriCounty Produce +East Indian Wine Palm#Eastfruiq06fRr#0.40#990#fruits#Simply Fresh +East Indian Wine Palm#Eastfrui4QUwl2#0.43#351#fruits#Off the Vine +English Walnut#EnglfruiBMtHtW#1.04#787#fruits#TriCounty Produce +English Walnut#EnglfruiHmVzxV#1.03#779#fruits#Simply Fresh +English Walnut#Englfrui18Tc9n#1.06#339#fruits#Off the Vine +False Mangosteen#FalsfruibkmYqH#0.66#971#fruits#TriCounty Produce +False Mangosteen#FalsfruipBsbcX#0.68#250#fruits#Simply Fresh +False Mangosteen#FalsfruiPrFfhe#0.70#386#fruits#Off the Vine +Fried Egg Tree#FriefruiihHUdc#0.29#649#fruits#TriCounty Produce +Fried Egg Tree#FriefruimdD1rf#0.28#527#fruits#Simply Fresh +Fried Egg Tree#FriefruivyAzYq#0.29#332#fruits#Off the Vine +Genipap#GenifruiDtKusQ#0.62#986#fruits#TriCounty Produce +Genipap#GenifruiXq32eP#0.61#326#fruits#Simply Fresh +Genipap#Genifruiphwwyq#0.61#794#fruits#Off the Vine +Ginger#GingfruiQLbRZI#0.28#841#fruits#TriCounty Produce +Ginger#GingfruiS8kK4p#0.29#432#fruits#Simply Fresh +Ginger#GingfruioL3Y4S#0.27#928#fruits#Off the Vine +Grapefruit#Grapfruih86Zxh#1.07#473#fruits#TriCounty Produce +Grapefruit#GrapfruiwL1v0N#1.08#878#fruits#Simply Fresh +Grapefruit#GrapfruihmJzWm#1.02#466#fruits#Off the Vine +Hackberry#HackfruiQjomN7#0.22#938#fruits#TriCounty Produce +Hackberry#HackfruiWS0eKp#0.20#780#fruits#Simply Fresh +Hackberry#Hackfrui0MIv6J#0.21#345#fruits#Off the Vine +Honey Locust#HonefruiebXGRc#1.08#298#fruits#TriCounty Produce +Honey Locust#HonefruiPSqILB#1.00#427#fruits#Simply Fresh +Honey Locust#Honefrui6UXtvW#1.03#422#fruits#Off the Vine +Japanese Plum#JapafruihTmoYR#0.40#658#fruits#TriCounty Produce +Japanese Plum#JapafruifGqz0l#0.40#700#fruits#Simply Fresh +Japanese Plum#JapafruiufWkLx#0.39#790#fruits#Off the Vine +Jojoba#JojofruisE0wTh#0.97#553#fruits#TriCounty Produce +Jojoba#JojofruiwiYLp2#1.02#969#fruits#Simply Fresh +Jojoba#JojofruigMD1ej#0.96#899#fruits#Off the Vine +Jostaberry#JostfruiglsEGV#0.50#300#fruits#TriCounty Produce +Jostaberry#JostfruiV3oo1h#0.52#423#fruits#Simply Fresh +Jostaberry#JostfruiUBerur#0.53#562#fruits#Off the Vine +Kangaroo Apple#KangfruiEQknz8#0.60#661#fruits#TriCounty Produce +Kangaroo Apple#KangfruiNabdFq#0.60#377#fruits#Simply Fresh +Kangaroo Apple#Kangfrui7hky1i#0.60#326#fruits#Off the Vine +Ken's Red#Ken'fruinPUSIm#0.21#337#fruits#TriCounty Produce +Ken's Red#Ken'fruiAoZlpl#0.21#902#fruits#Simply Fresh +Ken's Red#Ken'frui5rmbd4#0.22#972#fruits#Off the Vine +Ketembilla#Ketefrui3yAKxQ#0.31#303#fruits#TriCounty Produce +Ketembilla#KetefruiROn6F5#0.34#283#fruits#Simply Fresh +Ketembilla#Ketefrui16Rsts#0.33#887#fruits#Off the Vine +King Orange#KingfruisOFzWk#0.74#429#fruits#TriCounty Produce +King Orange#KingfruiBmzRJT#0.74#500#fruits#Simply Fresh +King Orange#KingfruiGsrgRX#0.78#994#fruits#Off the Vine +Kola Nut#KolafruiBbtAuw#0.58#991#fruits#TriCounty Produce +Kola Nut#KolafruirbnLVS#0.62#733#fruits#Simply Fresh +Kola Nut#Kolafrui1ItXJx#0.58#273#fruits#Off the Vine +Kuko#Kukofrui6YH5Ds#0.41#647#fruits#TriCounty Produce +Kuko#Kukofrui7WZaZK#0.39#241#fruits#Simply Fresh +Kuko#Kukofruig9MQFT#0.40#204#fruits#Off the Vine +Kumquat#KumqfruiT6WKQL#0.73#388#fruits#TriCounty Produce +Kumquat#KumqfruidLiFLU#0.70#393#fruits#Simply Fresh +Kumquat#KumqfruiL6zhQX#0.71#994#fruits#Off the Vine +Kwai Muk#KwaifruiQK1zOE#1.10#249#fruits#TriCounty Produce +Kwai Muk#KwaifruifbCRlT#1.14#657#fruits#Simply Fresh +Kwai Muk#Kwaifruipe7T2m#1.09#617#fruits#Off the Vine +Lanzone#LanzfruijsPf1v#0.34#835#fruits#TriCounty Produce +Lanzone#LanzfruibU3QoL#0.34#404#fruits#Simply Fresh +Lanzone#LanzfruiYgHwv6#0.34#237#fruits#Off the Vine +Lemon#Lemofrui4Tgsg2#0.46#843#fruits#TriCounty Produce +Lemon#LemofruivK6qvj#0.43#207#fruits#Simply Fresh +Lemon#LemofruiXSXqJ0#0.44#910#fruits#Off the Vine +Lemon Grass#LemofruiVFgVh5#0.40#575#fruits#TriCounty Produce +Lemon Grass#LemofruiWIelvi#0.41#386#fruits#Simply Fresh +Lemon Grass#LemofruiGVAow0#0.39#918#fruits#Off the Vine +Lilly-pilly#LillfruiEQnW1m#1.21#974#fruits#TriCounty Produce +Lilly-pilly#LillfruiMqVuR5#1.23#303#fruits#Simply Fresh +Lilly-pilly#LillfruiVGH9p4#1.17#512#fruits#Off the Vine +Ling Nut#LingfruiGtOf8X#0.85#540#fruits#TriCounty Produce +Ling Nut#LingfruiuP0Jf9#0.83#200#fruits#Simply Fresh +Ling Nut#LingfruiuO5qf5#0.81#319#fruits#Off the Vine +Lipote#LipofruisxD2Qc#0.85#249#fruits#TriCounty Produce +Lipote#LipofruiHNdIqL#0.85#579#fruits#Simply Fresh +Lipote#LipofruiSQ2pKK#0.83#472#fruits#Off the Vine +Litchee#Litcfrui1R6Ydz#0.99#806#fruits#TriCounty Produce +Litchee#LitcfruiwtDM79#1.01#219#fruits#Simply Fresh +Litchee#LitcfruilpPZbC#1.05#419#fruits#Off the Vine +Longan#LongfruiEI0lWF#1.02#573#fruits#TriCounty Produce +Longan#LongfruiPQxxSF#1.04#227#fruits#Simply Fresh +Longan#LongfruisdI812#0.99#993#fruits#Off the Vine +Love-in-a-mist#LovefruiKYPW70#0.69#388#fruits#TriCounty Produce +Love-in-a-mist#LovefruiHrgjDa#0.67#478#fruits#Simply Fresh +Love-in-a-mist#LovefruipSOWVz#0.71#748#fruits#Off the Vine +Lychee#LychfruiicVLnY#0.38#276#fruits#TriCounty Produce +Lychee#LychfruiGY6yJr#0.38#602#fruits#Simply Fresh +Lychee#LychfruiTzDCq2#0.40#572#fruits#Off the Vine +Mabolo#MabofruiSY8RQS#0.97#263#fruits#TriCounty Produce +Mabolo#MabofruiOWWk0n#0.98#729#fruits#Simply Fresh +Mabolo#MabofruixQLOTF#0.98#771#fruits#Off the Vine +Macadamia Nut#MacafruiZppJPw#1.22#888#fruits#TriCounty Produce +Macadamia Nut#MacafruiI7XFMV#1.24#484#fruits#Simply Fresh +Macadamia Nut#Macafrui4x8bxV#1.20#536#fruits#Off the Vine +Madagascar Plum#MadafruiVj5fDf#1.14#596#fruits#TriCounty Produce +Madagascar Plum#MadafruivJhAFI#1.15#807#fruits#Simply Fresh +Madagascar Plum#Madafrui7MTe1x#1.17#355#fruits#Off the Vine +Magnolia Vine#MagnfruiigN4Y1#1.17#321#fruits#TriCounty Produce +Magnolia Vine#MagnfruicKtiHd#1.15#353#fruits#Simply Fresh +Magnolia Vine#MagnfruiLPDSCp#1.23#324#fruits#Off the Vine +Mamey#Mamefrui5rjLF6#0.36#683#fruits#TriCounty Produce +Mamey#MamefruiM6ndnR#0.38#404#fruits#Simply Fresh +Mamey#Mamefruiq9KntD#0.36#527#fruits#Off the Vine +Mandarin Orange#MandfruiRKpmKL#0.42#352#fruits#TriCounty Produce +Mandarin Orange#Mandfrui1V0KLG#0.42#548#fruits#Simply Fresh +Mandarin Orange#Mandfruig2o9Fg#0.41#686#fruits#Off the Vine +Marany Nut#MarafruiqkrwoJ#1.14#273#fruits#TriCounty Produce +Marany Nut#MarafruiCGKpke#1.12#482#fruits#Simply Fresh +Marany Nut#MarafruiB1YE5x#1.09#412#fruits#Off the Vine +Marula#MarufruiXF4biH#0.22#403#fruits#TriCounty Produce +Marula#MarufruidZiVKZ#0.23#317#fruits#Simply Fresh +Marula#MarufruiIS8BEp#0.21#454#fruits#Off the Vine +Mayhaw#MayhfruiCSrm7k#0.24#220#fruits#TriCounty Produce +Mayhaw#MayhfruiNRDzWs#0.25#710#fruits#Simply Fresh +Mayhaw#MayhfruiIUCyEg#0.24#818#fruits#Off the Vine +Meiwa Kumquat#MeiwfruiYhv3AY#0.21#997#fruits#TriCounty Produce +Meiwa Kumquat#MeiwfruiyzQFNR#0.22#347#fruits#Simply Fresh +Meiwa Kumquat#Meiwfruict4OUp#0.21#923#fruits#Off the Vine +Mexican Barberry#Mexifrui2P2dXi#0.28#914#fruits#TriCounty Produce +Mexican Barberry#MexifruiywUTMI#0.29#782#fruits#Simply Fresh +Mexican Barberry#MexifruijPHu5X#0.29#367#fruits#Off the Vine +Meyer Lemon#Meyefruin9901J#0.38#824#fruits#TriCounty Produce +Meyer Lemon#MeyefruiNeQpjO#0.37#617#fruits#Simply Fresh +Meyer Lemon#MeyefruiYEVznZ#0.37#741#fruits#Off the Vine +Mississippi Honeyberry#Missfruipb5iW3#0.95#595#fruits#TriCounty Produce +Mississippi Honeyberry#MissfruiINiDbB#0.96#551#fruits#Simply Fresh +Mississippi Honeyberry#MissfruiNUQ82a#0.93#396#fruits#Off the Vine +Monkey Pot#MonkfruiXlTW4j#0.90#896#fruits#TriCounty Produce +Monkey Pot#Monkfrui1p7a4h#0.88#344#fruits#Simply Fresh +Monkey Pot#Monkfrui4eKggb#0.92#917#fruits#Off the Vine +Monos Plum#Monofrui0Mv9aV#1.11#842#fruits#TriCounty Produce +Monos Plum#Monofrui6iTGQY#1.14#570#fruits#Simply Fresh +Monos Plum#MonofruiNu2uGH#1.13#978#fruits#Off the Vine +Moosewood#MoosfruiMXEGex#0.86#969#fruits#TriCounty Produce +Moosewood#Moosfrui8805mB#0.86#963#fruits#Simply Fresh +Moosewood#MoosfruiOsnDFL#0.88#594#fruits#Off the Vine +Natal Orange#NatafruitB8Kh2#0.42#332#fruits#TriCounty Produce +Natal Orange#NatafruiOhqRrd#0.42#982#fruits#Simply Fresh +Natal Orange#NatafruiRObMf6#0.41#268#fruits#Off the Vine +Nectarine#NectfruilNfeD8#0.36#601#fruits#TriCounty Produce +Nectarine#NectfruiQfjt6b#0.35#818#fruits#Simply Fresh +Nectarine#Nectfrui5U7U96#0.37#930#fruits#Off the Vine +Neem Tree#NeemfruiCruEMF#0.24#222#fruits#TriCounty Produce +Neem Tree#NeemfruiGv0pv5#0.24#645#fruits#Simply Fresh +Neem Tree#NeemfruiUFPVfk#0.25#601#fruits#Off the Vine +New Zealand Spinach#New fruihDIgec#0.87#428#fruits#TriCounty Produce +New Zealand Spinach#New fruiaoR9TP#0.87#630#fruits#Simply Fresh +New Zealand Spinach#New fruiy8LBul#0.94#570#fruits#Off the Vine +Olosapo#OlosfruiGXvaMm#0.76#388#fruits#TriCounty Produce +Olosapo#OlosfruiESlpB3#0.76#560#fruits#Simply Fresh +Olosapo#OlosfruiFNEkER#0.76#962#fruits#Off the Vine +Oregon Grape#OregfruiWxhzrf#1.14#892#fruits#TriCounty Produce +Oregon Grape#OregfruiMgjHUn#1.20#959#fruits#Simply Fresh +Oregon Grape#OregfruiC5UCxX#1.17#419#fruits#Off the Vine +Otaheite Apple#OtahfruilT0iFj#0.21#579#fruits#TriCounty Produce +Otaheite Apple#Otahfrui92PyMY#0.22#857#fruits#Simply Fresh +Otaheite Apple#OtahfruiLGD1EH#0.20#807#fruits#Off the Vine +Oyster Plant#OystfruimGxOsj#0.77#835#fruits#TriCounty Produce +Oyster Plant#Oystfrui1kudBX#0.81#989#fruits#Simply Fresh +Oyster Plant#OystfruiaX3uO2#0.80#505#fruits#Off the Vine +Panama Berry#PanafruiZG0Vp4#1.19#288#fruits#TriCounty Produce +Panama Berry#PanafruiobvXPE#1.21#541#fruits#Simply Fresh +Panama Berry#PanafruipaW8F3#1.16#471#fruits#Off the Vine +Peach Tomato#PeacfruiQpovYH#1.20#475#fruits#TriCounty Produce +Peach Tomato#PeacfruixYXLTN#1.18#655#fruits#Simply Fresh +Peach Tomato#PeacfruiILDYAp#1.23#876#fruits#Off the Vine +Peanut#Peanfruiy8M7pt#0.69#275#fruits#TriCounty Produce +Peanut#PeanfruiEimbED#0.65#307#fruits#Simply Fresh +Peanut#Peanfruic452Vc#0.68#937#fruits#Off the Vine +Peanut Butter Fruit#PeanfruixEDt9Y#0.27#628#fruits#TriCounty Produce +Peanut Butter Fruit#PeanfruiST0T0R#0.27#910#fruits#Simply Fresh +Peanut Butter Fruit#Peanfrui7jeRN2#0.27#938#fruits#Off the Vine +Pear#PearfruiB5YmSJ#0.20#945#fruits#TriCounty Produce +Pear#PearfruiA93XZx#0.21#333#fruits#Simply Fresh +Pear#PearfruioNKiIf#0.21#715#fruits#Off the Vine +Pecan#PecafruiiTIv1Z#0.26#471#fruits#TriCounty Produce +Pecan#PecafruiMGkqla#0.26#889#fruits#Simply Fresh +Pecan#Pecafrui1szYz2#0.25#929#fruits#Off the Vine +Purple Passion Fruit#Purpfrui4mMGkD#1.04#914#fruits#TriCounty Produce +Purple Passion Fruit#Purpfrui5XOW3K#1.06#423#fruits#Simply Fresh +Purple Passion Fruit#PurpfruifDTAgW#1.05#549#fruits#Off the Vine +Red Mulberry#Red fruiVLOXIW#1.24#270#fruits#TriCounty Produce +Red Mulberry#Red fruiXNXt4a#1.21#836#fruits#Simply Fresh +Red Mulberry#Red fruiUseWLG#1.21#795#fruits#Off the Vine +Red Princess#Red fruigJLR4V#0.23#829#fruits#TriCounty Produce +Red Princess#Red fruinVKps5#0.23#558#fruits#Simply Fresh +Red Princess#Red frui0jl9mg#0.24#252#fruits#Off the Vine +Striped Screw Pine#StrifruiUKzjoU#0.60#226#fruits#TriCounty Produce +Striped Screw Pine#StrifruivWLDzH#0.64#685#fruits#Simply Fresh +Striped Screw Pine#StrifruiiF7CGH#0.60#983#fruits#Off the Vine +Tapioca#Tapifruib4LCqt#0.40#955#fruits#TriCounty Produce +Tapioca#TapifruiwgQLj9#0.41#889#fruits#Simply Fresh +Tapioca#TapifruiZ6Igg3#0.41#655#fruits#Off the Vine +Tavola#Tavofrui0k9XOt#1.16#938#fruits#TriCounty Produce +Tavola#Tavofrui8DuRxL#1.08#979#fruits#Simply Fresh +Tavola#TavofruiNZEuJZ#1.16#215#fruits#Off the Vine +Tea#TeafruiL0357s#1.11#516#fruits#TriCounty Produce +Tea#TeafruiD5soTf#1.13#970#fruits#Simply Fresh +Tea#TeafruiOWq4oO#1.19#357#fruits#Off the Vine +Ugli Fruit#UglifruipKNCpf#0.24#501#fruits#TriCounty Produce +Ugli Fruit#UglifruifbDrzc#0.24#642#fruits#Simply Fresh +Ugli Fruit#Uglifruiwx8or4#0.24#280#fruits#Off the Vine +Vegetable Brain#VegefruieXLBoc#0.73#355#fruits#TriCounty Produce +Vegetable Brain#Vegefruik5FSdl#0.71#498#fruits#Simply Fresh +Vegetable Brain#VegefruiKBfzN0#0.72#453#fruits#Off the Vine +White Walnut#Whitfruit3oVHL#0.30#501#fruits#TriCounty Produce +White Walnut#WhitfruiHygydw#0.30#913#fruits#Simply Fresh +White Walnut#WhitfruieNtplo#0.30#401#fruits#Off the Vine +Wood Apple#WoodfruijVPRqA#0.68#501#fruits#TriCounty Produce +Wood Apple#Woodfrui4Zk69T#0.68#616#fruits#Simply Fresh +Wood Apple#WoodfruiuSLHZK#0.70#474#fruits#Off the Vine +Yellow Horn#Yellfrui5igjjf#1.18#729#fruits#TriCounty Produce +Yellow Horn#Yellfrui0DiPqa#1.13#517#fruits#Simply Fresh +Yellow Horn#Yellfrui0ljvqC#1.14#853#fruits#Off the Vine +Yellow Sapote#YellfruilGmCfq#0.93#204#fruits#TriCounty Produce +Yellow Sapote#Yellfrui4J2mke#0.88#269#fruits#Simply Fresh +Yellow Sapote#Yellfrui6PuXaL#0.86#575#fruits#Off the Vine +Ylang-ylang#Ylanfrui3rmByO#0.76#429#fruits#TriCounty Produce +Ylang-ylang#YlanfruiA80Nkq#0.76#886#fruits#Simply Fresh +Ylang-ylang#YlanfruinUEm5d#0.72#747#fruits#Off the Vine +Zapote Blanco#ZapofruisZ5sMA#0.67#428#fruits#TriCounty Produce +Zapote Blanco#ZapofruilKxl7N#0.65#924#fruits#Simply Fresh +Zapote Blanco#ZapofruiAe6Eu1#0.68#255#fruits#Off the Vine +Zulu Nut#Zulufrui469K4k#0.71#445#fruits#TriCounty Produce +Zulu Nut#ZulufruiWbz6vU#0.71#653#fruits#Simply Fresh +Zulu Nut#Zulufrui0LJnWK#0.71#858#fruits#Off the Vine +Artichoke#ArtivegeIuqmS4#0.71#282#vegetables#The Pantry +Artichoke#Artivegebljjnf#0.69#66#vegetables#TriCounty Produce +Artichoke#ArtivegeTa2lcF#0.70#618#vegetables#Off the Vine +Asparagus#AspavegezC0cDl#0.23#70#vegetables#The Pantry +Asparagus#AspavegeM1q5Kt#0.24#546#vegetables#TriCounty Produce +Asparagus#AspavegeXWbCb8#0.24#117#vegetables#Off the Vine +Basil#Basivegev08fzf#0.31#213#vegetables#The Pantry +Basil#BasivegeF3Uha7#0.29#651#vegetables#TriCounty Produce +Basil#BasivegeqR8SHC#0.31#606#vegetables#Off the Vine +Bean#BeanvegegCFUOp#0.27#794#vegetables#The Pantry +Bean#BeanvegeqMSEVq#0.27#468#vegetables#TriCounty Produce +Bean#Beanvege4IGUwX#0.27#463#vegetables#Off the Vine +Beet#BeetvegedEv4Ic#0.35#120#vegetables#The Pantry +Beet#Beetvegegi1bz1#0.35#540#vegetables#TriCounty Produce +Beet#BeetvegemztZcN#0.36#386#vegetables#Off the Vine +Blackeyed Pea#Blacvege3TPldr#0.86#133#vegetables#The Pantry +Blackeyed Pea#Blacvege3Zqnep#0.88#67#vegetables#TriCounty Produce +Blackeyed Pea#Blacvege3khffZ#0.90#790#vegetables#Off the Vine +Cabbage#CabbvegeY0c4Fw#0.82#726#vegetables#The Pantry +Cabbage#CabbvegeoaK7Co#0.85#439#vegetables#TriCounty Produce +Cabbage#CabbvegeVvO646#0.82#490#vegetables#Off the Vine +Carrot#CarrvegeEbI0sw#0.45#717#vegetables#The Pantry +Carrot#CarrvegeEZndWL#0.49#284#vegetables#TriCounty Produce +Carrot#CarrvegewUkHao#0.47#122#vegetables#Off the Vine +Cauliflower#Caulvege1CPeNG#0.68#756#vegetables#The Pantry +Cauliflower#CaulvegedrPqib#0.66#269#vegetables#TriCounty Produce +Cauliflower#CaulvegeT6cka8#0.65#728#vegetables#Off the Vine +Chayote#ChayvegePRReGE#0.14#233#vegetables#The Pantry +Chayote#Chayvegep058f7#0.14#88#vegetables#TriCounty Produce +Chayote#ChayvegeoxO40S#0.14#611#vegetables#Off the Vine +Corn#CornvegeukXkv6#0.72#632#vegetables#The Pantry +Corn#CornvegePnPREC#0.72#609#vegetables#TriCounty Produce +Corn#CornvegeO0GwoQ#0.70#664#vegetables#Off the Vine +Cucumber#CucuvegeEqQeA7#0.94#499#vegetables#The Pantry +Cucumber#CucuvegewmKbJ1#0.94#738#vegetables#TriCounty Produce +Cucumber#CucuvegeUW6JaA#0.94#565#vegetables#Off the Vine +Cantaloupe#CantvegeIHs9vJ#0.66#411#vegetables#The Pantry +Cantaloupe#CantvegeEaDdST#0.66#638#vegetables#TriCounty Produce +Cantaloupe#CantvegewWQEa0#0.64#682#vegetables#Off the Vine +Carraway#CarrvegewuL4Ma#0.32#740#vegetables#The Pantry +Carraway#CarrvegeyiWfBj#0.32#265#vegetables#TriCounty Produce +Carraway#CarrvegeMjb1i9#0.31#732#vegetables#Off the Vine +Celeriac#CelevegeoTBicd#0.74#350#vegetables#The Pantry +Celeriac#CelevegeCNABoZ#0.70#261#vegetables#TriCounty Produce +Celeriac#Celevege9LUeww#0.70#298#vegetables#Off the Vine +Celery#Celevegej40ZCc#0.59#740#vegetables#The Pantry +Celery#CelevegerYlVRy#0.58#734#vegetables#TriCounty Produce +Celery#Celevege67eimC#0.58#619#vegetables#Off the Vine +Chervil#ChervegeuH4Dge#0.09#502#vegetables#The Pantry +Chervil#Chervegea1OyKO#0.09#299#vegetables#TriCounty Produce +Chervil#Chervegeq56gMO#0.09#474#vegetables#Off the Vine +Chicory#Chicvege79qoQ8#0.09#709#vegetables#The Pantry +Chicory#ChicvegeTSVBQq#0.10#477#vegetables#TriCounty Produce +Chicory#Chicvege6qpcyi#0.10#282#vegetables#Off the Vine +Chinese Cabbage#ChinvegeFNsSRn#0.78#408#vegetables#The Pantry +Chinese Cabbage#Chinvege2ldNr3#0.80#799#vegetables#TriCounty Produce +Chinese Cabbage#ChinvegeK3R2Td#0.80#180#vegetables#Off the Vine +Chinese Beans#ChinvegebxbyPy#0.45#654#vegetables#The Pantry +Chinese Beans#ChinvegewKGwgx#0.45#206#vegetables#TriCounty Produce +Chinese Beans#ChinvegevVjzC0#0.47#643#vegetables#Off the Vine +Chines Kale#ChinvegeCfdkss#0.70#239#vegetables#The Pantry +Chines Kale#Chinvege6V6Dne#0.65#548#vegetables#TriCounty Produce +Chines Kale#ChinvegeB7vE3x#0.66#380#vegetables#Off the Vine +Chinese Radish#ChinvegeXcM4eq#0.22#190#vegetables#The Pantry +Chinese Radish#ChinvegeTdUBqN#0.22#257#vegetables#TriCounty Produce +Chinese Radish#ChinvegeMXMms8#0.22#402#vegetables#Off the Vine +Chinese Mustard#ChinvegeRDdpdl#0.33#149#vegetables#The Pantry +Chinese Mustard#ChinvegeABDhNd#0.32#320#vegetables#TriCounty Produce +Chinese Mustard#Chinvege8NPwa2#0.34#389#vegetables#Off the Vine +Cilantro#CilavegeQXBEsW#0.60#674#vegetables#The Pantry +Cilantro#CilavegeRgjkUG#0.60#355#vegetables#TriCounty Produce +Cilantro#CilavegelT2msu#0.59#464#vegetables#Off the Vine +Collard#CollvegesTGGNw#0.32#745#vegetables#The Pantry +Collard#CollvegeAwdor5#0.32#124#vegetables#TriCounty Produce +Collard#CollvegeQe900L#0.30#796#vegetables#Off the Vine +Coriander#CorivegeXxp4xY#0.26#560#vegetables#The Pantry +Coriander#Corivege9xBAT0#0.27#321#vegetables#TriCounty Produce +Coriander#CorivegeCfNjBx#0.27#709#vegetables#Off the Vine +Dandelion#DandvegeJNcnbr#0.11#285#vegetables#The Pantry +Dandelion#DandvegeGwBkHZ#0.11#733#vegetables#TriCounty Produce +Dandelion#DandvegeZfwVqn#0.11#57#vegetables#Off the Vine +Daikon Radish#DaikvegeHHsd7M#0.61#743#vegetables#The Pantry +Daikon Radish#DaikvegeIu17yC#0.62#459#vegetables#TriCounty Produce +Daikon Radish#DaikvegePzFjqf#0.63#296#vegetables#Off the Vine +Eggplant#EggpvegeKJtydN#0.55#200#vegetables#The Pantry +Eggplant#EggpvegeQMKrNs#0.53#208#vegetables#TriCounty Produce +Eggplant#EggpvegeN0WnSo#0.51#761#vegetables#Off the Vine +English Pea#Englvegea1ytIn#0.40#457#vegetables#The Pantry +English Pea#EnglvegerU9Vty#0.37#263#vegetables#TriCounty Produce +English Pea#EnglvegeCmkd3y#0.39#430#vegetables#Off the Vine +Fennel#Fennvegebz2UM7#0.76#545#vegetables#The Pantry +Fennel#FennvegeQzjtZ3#0.78#795#vegetables#TriCounty Produce +Fennel#FennvegeXSrW61#0.75#79#vegetables#Off the Vine +Garlic#GarlvegesR2yel#0.76#478#vegetables#The Pantry +Garlic#GarlvegeEQvt8W#0.77#349#vegetables#TriCounty Produce +Garlic#GarlvegedljBdK#0.80#708#vegetables#Off the Vine +Ginger#GingvegeMNiTc2#0.88#563#vegetables#The Pantry +Ginger#Gingvegeq366Sn#0.89#738#vegetables#TriCounty Produce +Ginger#GingvegeznyyVj#0.89#598#vegetables#Off the Vine +Horseradish#HorsvegemSwISt#0.12#622#vegetables#The Pantry +Horseradish#HorsvegetCOS0x#0.11#279#vegetables#TriCounty Produce +Horseradish#Horsvegew6XXaS#0.12#478#vegetables#Off the Vine +Japanese Eggplant#JapavegeTdKDCL#0.57#539#vegetables#The Pantry +Japanese Eggplant#JapavegevsJfGa#0.58#782#vegetables#TriCounty Produce +Japanese Eggplant#JapavegeCIrIxd#0.57#777#vegetables#Off the Vine +Jerusalem Artichoke#Jeruvege928cr0#0.13#231#vegetables#The Pantry +Jerusalem Artichoke#JeruvegeC2v086#0.14#123#vegetables#TriCounty Produce +Jerusalem Artichoke#JeruvegeehCYzi#0.14#196#vegetables#Off the Vine +Jicama#JicavegeRWYj9n#0.75#79#vegetables#The Pantry +Jicama#JicavegeGk5LKH#0.71#292#vegetables#TriCounty Produce +Jicama#JicavegeUjpaX1#0.70#308#vegetables#Off the Vine +Kale#Kalevegext6RNT#0.55#765#vegetables#The Pantry +Kale#KalevegeFsp17B#0.53#107#vegetables#TriCounty Produce +Kale#KalevegeAffBTS#0.57#573#vegetables#Off the Vine +Kiwifruit#KiwivegeloZBKJ#0.60#769#vegetables#The Pantry +Kiwifruit#KiwivegenCQAHw#0.59#307#vegetables#TriCounty Produce +Kiwifruit#Kiwivege0Gi3P2#0.59#235#vegetables#Off the Vine +Kohlrabi#KohlvegeJFKZDl#0.26#406#vegetables#The Pantry +Kohlrabi#Kohlvege32UTAj#0.28#613#vegetables#TriCounty Produce +Kohlrabi#KohlvegejNQC1M#0.28#326#vegetables#Off the Vine +Leek#Leekvege5iaFtg#0.70#580#vegetables#The Pantry +Leek#Leekvegei9Wxbz#0.68#188#vegetables#TriCounty Produce +Leek#LeekvegewY4mAc#0.70#473#vegetables#Off the Vine +Lettuce#LettvegesK9wDR#0.55#716#vegetables#The Pantry +Lettuce#LettvegeWzMyCM#0.57#83#vegetables#TriCounty Produce +Lettuce#LettvegeHgfGG8#0.56#268#vegetables#Off the Vine +Melons#Melovege6t93WF#0.11#252#vegetables#The Pantry +Melons#Melovegeq9kz7T#0.12#558#vegetables#TriCounty Produce +Melons#Melovege9kLTXN#0.12#382#vegetables#Off the Vine +Mushroom#MushvegeSq53h8#0.59#365#vegetables#The Pantry +Mushroom#Mushvegedq6lYP#0.59#444#vegetables#TriCounty Produce +Mushroom#Mushvege8o27D2#0.55#467#vegetables#Off the Vine +Okra#OkravegeTszQSL#0.55#62#vegetables#The Pantry +Okra#OkravegeJBWmfh#0.58#165#vegetables#TriCounty Produce +Okra#OkravegeD6tF9n#0.55#77#vegetables#Off the Vine +Onion#OniovegejwimQo#0.80#186#vegetables#The Pantry +Onion#OniovegeUOwwks#0.80#417#vegetables#TriCounty Produce +Onion#OniovegezcRDrc#0.80#435#vegetables#Off the Vine +Oregano#OregvegetlU7Ez#0.71#119#vegetables#The Pantry +Oregano#Oregvege9h9ZKy#0.70#173#vegetables#TriCounty Produce +Oregano#OregvegebXr0PJ#0.70#773#vegetables#Off the Vine +Parsley#ParsvegeXFEjjN#0.83#502#vegetables#The Pantry +Parsley#ParsvegejAg5C4#0.80#454#vegetables#TriCounty Produce +Parsley#ParsvegehAtH2H#0.84#523#vegetables#Off the Vine +Parsnip#Parsvegee9Lp6D#0.46#626#vegetables#The Pantry +Parsnip#ParsvegeSxXHSA#0.47#411#vegetables#TriCounty Produce +Parsnip#Parsvegea0stPf#0.44#403#vegetables#Off the Vine +Pea#Peavegecq4SxR#0.18#342#vegetables#The Pantry +Pea#Peavege46Gdp9#0.18#255#vegetables#TriCounty Produce +Pea#Peavegeov1gc5#0.18#251#vegetables#Off the Vine +Pepper#PeppvegeUcBYRp#0.33#52#vegetables#The Pantry +Pepper#PeppvegeB60btP#0.35#107#vegetables#TriCounty Produce +Pepper#PeppvegeG4tP3e#0.34#481#vegetables#Off the Vine +Pigeon Pea#Pigevegec5bAtm#0.94#391#vegetables#The Pantry +Pigeon Pea#Pigevegeb93eLi#0.91#447#vegetables#TriCounty Produce +Pigeon Pea#PigevegejEBDRa#0.89#259#vegetables#Off the Vine +Irish Potato#IrisvegeJNQqby#0.72#355#vegetables#The Pantry +Irish Potato#Irisvegewq1PLd#0.72#601#vegetables#TriCounty Produce +Irish Potato#IrisvegeAfFLdO#0.68#740#vegetables#Off the Vine +Pumpkin#PumpvegeiYsPR8#0.25#776#vegetables#The Pantry +Pumpkin#PumpvegelqP1Kh#0.25#189#vegetables#TriCounty Produce +Pumpkin#Pumpvegeb3nQU5#0.26#207#vegetables#Off the Vine +Radish#RadivegeNwwSBJ#0.16#613#vegetables#The Pantry +Radish#Radivege0tIBnL#0.16#779#vegetables#TriCounty Produce +Radish#RadivegeNLqJCf#0.16#731#vegetables#Off the Vine +Rhubarb#RhubvegeREfOti#0.12#301#vegetables#The Pantry +Rhubarb#Rhubvege4Jc3b7#0.12#557#vegetables#TriCounty Produce +Rhubarb#RhubvegeaXqF7H#0.12#378#vegetables#Off the Vine +Rosemary#Rosevege16QStc#0.73#380#vegetables#The Pantry +Rosemary#RosevegeNf6Oem#0.75#622#vegetables#TriCounty Produce +Rosemary#RosevegeFgsOyN#0.74#631#vegetables#Off the Vine +Rutabaga#RutavegecUYfQ3#0.55#676#vegetables#The Pantry +Rutabaga#RutavegejOG5DF#0.55#273#vegetables#TriCounty Produce +Rutabaga#RutavegewEVjzV#0.53#452#vegetables#Off the Vine +Salsify#SalsvegeViS9HF#0.11#537#vegetables#The Pantry +Salsify#Salsvegemd3HAL#0.11#753#vegetables#TriCounty Produce +Salsify#SalsvegeuRCnmq#0.10#787#vegetables#Off the Vine +Savory#Savovegee4DRWl#0.21#456#vegetables#The Pantry +Savory#SavovegerZ90Xm#0.21#642#vegetables#TriCounty Produce +Savory#Savovegeje7yy7#0.22#328#vegetables#Off the Vine +Sesame#Sesavege4NAWZE#0.84#54#vegetables#The Pantry +Sesame#SesavegeMTc9IN#0.84#458#vegetables#TriCounty Produce +Sesame#SesavegegOwAjo#0.83#125#vegetables#Off the Vine +Shallots#ShalvegeUO2pDO#0.26#599#vegetables#The Pantry +Shallots#ShalvegeY1sekb#0.27#647#vegetables#TriCounty Produce +Shallots#ShalvegeSDC8VY#0.27#369#vegetables#Off the Vine +Sugar Snap Peas#SugavegepUZDTl#0.47#308#vegetables#The Pantry +Sugar Snap Peas#Sugavege1XyzNH#0.48#205#vegetables#TriCounty Produce +Sugar Snap Peas#SugavegeJuaG7f#0.46#348#vegetables#Off the Vine +Soybean#SoybvegeqxSVRL#0.70#639#vegetables#The Pantry +Soybean#SoybvegezEMjOG#0.68#423#vegetables#TriCounty Produce +Soybean#SoybvegebanSFq#0.67#268#vegetables#Off the Vine +Spaghetti Squash#SpagvegeMNO1yC#0.12#753#vegetables#The Pantry +Spaghetti Squash#SpagvegeilpUaD#0.13#604#vegetables#TriCounty Produce +Spaghetti Squash#SpagvegeAOoZNX#0.13#431#vegetables#Off the Vine +Spinach#SpinvegeegXXou#0.10#742#vegetables#The Pantry +Spinach#SpinvegeVcqXL6#0.11#708#vegetables#TriCounty Produce +Spinach#SpinvegetZ26DN#0.11#625#vegetables#Off the Vine +Sweet Potato#SweevegepNDQWb#0.94#720#vegetables#The Pantry +Sweet Potato#Sweevegepnw7Tm#0.90#377#vegetables#TriCounty Produce +Sweet Potato#Sweevegeyk0C82#0.89#242#vegetables#Off the Vine +Swiss Chard#SwisvegeksalTA#0.54#545#vegetables#The Pantry +Swiss Chard#SwisvegeKm2Kze#0.54#472#vegetables#TriCounty Produce +Swiss Chard#SwisvegehteuMk#0.56#142#vegetables#Off the Vine +Taro#Tarovege3fpGV6#0.87#155#vegetables#The Pantry +Taro#TarovegerZkmof#0.86#371#vegetables#TriCounty Produce +Taro#TarovegeXKPuzc#0.89#443#vegetables#Off the Vine +Tarragon#TarrvegeCzVC6U#0.18#491#vegetables#The Pantry +Tarragon#TarrvegesIkEfS#0.17#65#vegetables#TriCounty Produce +Tarragon#TarrvegerZsKFP#0.18#180#vegetables#Off the Vine +Thyme#Thymvege8Rv72c#0.41#442#vegetables#The Pantry +Thyme#ThymvegeJoUdQS#0.42#237#vegetables#TriCounty Produce +Thyme#ThymvegeRck5uO#0.43#491#vegetables#Off the Vine +Tomato#Tomavegey0NHGK#0.31#60#vegetables#The Pantry +Tomato#TomavegeKAjRUn#0.30#630#vegetables#TriCounty Produce +Tomato#TomavegePZOHlH#0.30#70#vegetables#Off the Vine +Turnip#TurnvegeRVQiV5#0.44#580#vegetables#The Pantry +Turnip#TurnvegeVjIX9D#0.45#743#vegetables#TriCounty Produce +Turnip#TurnvegelFhvuJ#0.44#219#vegetables#Off the Vine +Watercress#WatevegelwzPLQ#0.54#230#vegetables#The Pantry +Watercress#Watevege8oeDCT#0.54#774#vegetables#TriCounty Produce +Watercress#Watevegexr8L1t#0.55#185#vegetables#Off the Vine +Watermelon#WatevegeL83MRH#0.19#698#vegetables#The Pantry +Watermelon#WatevegeR2S4Dq#0.21#488#vegetables#TriCounty Produce +Watermelon#WatevegepFPXQu#0.21#439#vegetables#Off the Vine +Kamote#KamovegegdON75#0.13#218#vegetables#The Pantry +Kamote#KamovegevupDBf#0.13#98#vegetables#TriCounty Produce +Kamote#KamovegeSQX7IA#0.14#703#vegetables#Off the Vine +Alogbati#AlogvegeB1WaJU#0.41#775#vegetables#The Pantry +Alogbati#AlogvegeVr5cPP#0.40#789#vegetables#TriCounty Produce +Alogbati#AlogvegeyTUQzy#0.40#416#vegetables#Off the Vine +Ampalaya#AmpavegemR9fSd#0.85#107#vegetables#The Pantry +Ampalaya#AmpavegeJDu9Im#0.90#676#vegetables#TriCounty Produce +Ampalaya#AmpavegepL8GH5#0.86#728#vegetables#Off the Vine +Dahon ng sili#Dahovege6X9grk#0.11#369#vegetables#The Pantry +Dahon ng sili#DahovegeiHZjQT#0.11#141#vegetables#TriCounty Produce +Dahon ng sili#DahovegeoCDAH8#0.12#517#vegetables#Off the Vine +Gabi#GabivegeVm4Xk3#0.44#396#vegetables#The Pantry +Gabi#Gabivegeu6woqK#0.42#722#vegetables#TriCounty Produce +Gabi#GabivegezcA7q1#0.42#394#vegetables#Off the Vine +Kabute#Kabuvege6Tqrif#0.16#123#vegetables#The Pantry +Kabute#KabuvegeA3uYdG#0.15#183#vegetables#TriCounty Produce +Kabute#KabuvegeXW6ZiI#0.16#624#vegetables#Off the Vine +Kamoteng Kahoy#KamovegeAdW37X#0.42#782#vegetables#The Pantry +Kamoteng Kahoy#KamovegetFlqpC#0.42#515#vegetables#TriCounty Produce +Kamoteng Kahoy#KamovegeMvxoLn#0.40#166#vegetables#Off the Vine +Kangkong#KangvegeSFTvEz#0.35#759#vegetables#The Pantry +Kangkong#KangvegeRLR6gL#0.34#695#vegetables#TriCounty Produce +Kangkong#Kangvege9BFo14#0.35#783#vegetables#Off the Vine +Labanos#Labavege3qrWJL#0.94#514#vegetables#The Pantry +Labanos#LabavegekgVWDH#0.89#210#vegetables#TriCounty Produce +Labanos#LabavegeiVPgMx#0.89#207#vegetables#Off the Vine +Labong#LabovegeX3O8yz#0.85#722#vegetables#The Pantry +Labong#LabovegeI1wSEs#0.87#472#vegetables#TriCounty Produce +Labong#LabovegeOPiQht#0.85#740#vegetables#Off the Vine +Malunggay#MaluvegeHkwAFm#0.30#252#vegetables#The Pantry +Malunggay#Maluvegez6TiSY#0.30#245#vegetables#TriCounty Produce +Malunggay#MaluvegewzY37D#0.31#405#vegetables#Off the Vine +Munggo#MungvegeqeuwGw#0.25#362#vegetables#The Pantry +Munggo#MungvegeNhqWvL#0.26#360#vegetables#TriCounty Produce +Munggo#MungvegeGxNxQC#0.25#555#vegetables#Off the Vine +Pechay#PechvegezDeHFZ#0.36#401#vegetables#The Pantry +Pechay#Pechvegehi4Fcx#0.35#723#vegetables#TriCounty Produce +Pechay#Pechvege8Pq8Eo#0.36#141#vegetables#Off the Vine +Sigarilyas#SigavegeMJrtlV#0.88#335#vegetables#The Pantry +Sigarilyas#SigavegeLhsoOB#0.87#768#vegetables#TriCounty Produce +Sigarilyas#SigavegeS6RJcA#0.93#356#vegetables#Off the Vine +Sitaw#Sitavege0hMi9z#0.65#153#vegetables#The Pantry +Sitaw#Sitavegeez1g6N#0.67#561#vegetables#TriCounty Produce +Sitaw#Sitavege0BCNeF#0.66#674#vegetables#Off the Vine +Talong#TalovegevZjVK6#0.10#530#vegetables#The Pantry +Talong#TalovegexX4MRw#0.09#305#vegetables#TriCounty Produce +Talong#TalovegeO3U2ze#0.10#126#vegetables#Off the Vine +Toge#TogevegeYelJUw#0.54#449#vegetables#The Pantry +Toge#Togevegeilr1xK#0.54#274#vegetables#TriCounty Produce +Toge#Togevegesvjnyn#0.51#316#vegetables#Off the Vine +Ube#UbevegeoPnxvb#0.56#397#vegetables#The Pantry +Ube#Ubevege2CNyve#0.55#450#vegetables#TriCounty Produce +Ube#UbevegeC43sVj#0.55#263#vegetables#Off the Vine +Upo#UpovegecOGRqC#0.22#404#vegetables#The Pantry +Upo#Upovegekjl2wl#0.22#541#vegetables#TriCounty Produce +Upo#UpovegemTTTwI#0.23#459#vegetables#Off the Vine +Edamame#EdamvegeVYtk8z#0.79#296#vegetables#The Pantry +Edamame#Edamvege608vXi#0.78#700#vegetables#TriCounty Produce +Edamame#Edamvege1jiqGY#0.75#115#vegetables#Off the Vine +Hairy melon#HairvegeFYFHIw#0.71#789#vegetables#The Pantry +Hairy melon#HairvegeS7AAqI#0.72#302#vegetables#TriCounty Produce +Hairy melon#HairvegeO6WJHL#0.72#444#vegetables#Off the Vine +Burdock#BurdvegeyLstLV#0.56#761#vegetables#The Pantry +Burdock#BurdvegeZsqAjT#0.56#582#vegetables#TriCounty Produce +Burdock#BurdvegeycF7mo#0.55#566#vegetables#Off the Vine +Snake gourd#SnakvegesfHGvt#0.92#626#vegetables#The Pantry +Snake gourd#SnakvegedlNiBk#0.92#669#vegetables#TriCounty Produce +Snake gourd#Snakvegec5n1UM#0.92#143#vegetables#Off the Vine +Wasabi#Wasavege5P5pZp#0.67#751#vegetables#The Pantry +Wasabi#Wasavege6EEE9r#0.68#559#vegetables#TriCounty Produce +Wasabi#Wasavege1ve7TY#0.65#61#vegetables#Off the Vine +Yam#YamvegeRN9ONH#0.57#438#vegetables#The Pantry +Yam#YamvegeWjdzeA#0.56#564#vegetables#TriCounty Produce +Yam#YamvegeI1AnyI#0.56#456#vegetables#Off the Vine +Apple Fritters#AppldessDj96hw#6.12#16#desserts#Mom's Kitchen +Apple Fritters#AppldessrN1kvM#6.06#7#desserts#The Baking Pan +Banana Split#Banadess7tpjkJ#10.86#10#desserts#Mom's Kitchen +Banana Split#Banadessfif758#11.07#14#desserts#The Baking Pan +Blueberry Boy Bait#BluedesseX2LVU#3.72#16#desserts#Mom's Kitchen +Blueberry Boy Bait#Bluedess9zLhaH#3.93#9#desserts#The Baking Pan +Candied Cranberries#CanddessjW92p3#1.77#9#desserts#Mom's Kitchen +Candied Cranberries#CanddesskhtVoQ#1.72#0#desserts#The Baking Pan +Daiquiri Souffle#DaiqdessebnYcy#9.54#15#desserts#Mom's Kitchen +Daiquiri Souffle#DaiqdessfM1DnX#9.72#6#desserts#The Baking Pan +Bananas Flambe#BanadesscczumD#6.94#12#desserts#Mom's Kitchen +Bananas Flambe#Banadess8qNfxd#7.07#16#desserts#The Baking Pan +Pie, Apple#Pie,desshcSHhT#7.88#11#desserts#Mom's Kitchen +Pie, Apple#Pie,dessTbiwDp#7.88#15#desserts#The Baking Pan +Pie, Pumpkin#Pie,desswhPBPB#6.00#20#desserts#Mom's Kitchen +Pie, Pumpkin#Pie,dessDg3NWl#6.24#19#desserts#The Baking Pan +Pie, Blueberry#Pie,dessw9VdgD#2.14#3#desserts#Mom's Kitchen +Pie, Blueberry#Pie,dessiSjZKD#2.12#1#desserts#The Baking Pan +Pie, Pecan#Pie,dess2NqhNR#12.70#20#desserts#Mom's Kitchen +Pie, Pecan#Pie,dessB1LfcE#12.33#12#desserts#The Baking Pan +Pie, Cranberry Apple#Pie,dess1mL7IS#10.16#7#desserts#Mom's Kitchen +Pie, Cranberry Apple#Pie,dessmDhkUA#10.16#11#desserts#The Baking Pan +Pie, Banana Cream#Pie,dessH80DuG#7.35#6#desserts#Mom's Kitchen +Pie, Banana Cream#Pie,dessf1YvFb#7.08#11#desserts#The Baking Pan +Pie, Key Lime#Pie,desshtli5N#4.85#2#desserts#Mom's Kitchen +Pie, Key Lime#Pie,dessMwQkKm#5.13#1#desserts#The Baking Pan +Pie, Lemon Meringue#Pie,dess9naVkX#3.74#7#desserts#Mom's Kitchen +Pie, Lemon Meringue#Pie,dessKYcNML#3.67#5#desserts#The Baking Pan +Pie, Caramel#Pie,dessSUuiIU#2.27#9#desserts#Mom's Kitchen +Pie, Caramel#Pie,dessvo8uHh#2.33#4#desserts#The Baking Pan +Pie, Raspberry#Pie,dessUHhMlS#2.36#0#desserts#Mom's Kitchen +Pie, Raspberry#Pie,dessJflbf5#2.36#2#desserts#The Baking Pan +Ice Cream, Chocolate#Ice desseXuyxx#1.44#9#desserts#Mom's Kitchen +Ice Cream, Chocolate#Ice dessASBohf#1.41#13#desserts#The Baking Pan +Ice Cream, Vanilla#Ice dessYnzbbt#11.92#19#desserts#Mom's Kitchen +Ice Cream, Vanilla#Ice dessUBBKp8#11.58#10#desserts#The Baking Pan +Ice Cream, Strawberry#Ice dessfTwKhD#1.90#14#desserts#Mom's Kitchen +Ice Cream, Strawberry#Ice dessaO9Fxf#1.99#6#desserts#The Baking Pan +Ice Cream, Rocky Road#Ice dessyIri3P#13.10#20#desserts#Mom's Kitchen +Ice Cream, Rocky Road#Ice dessZuLr8F#13.48#13#desserts#The Baking Pan +Ice Cream, Mint Chocolate Chip#Ice dessV1IGG7#5.75#4#desserts#Mom's Kitchen +Ice Cream, Mint Chocolate Chip#Ice dessX1gEQ4#5.64#1#desserts#The Baking Pan +Ice Cream Sundae#Ice dessbhlAXt#5.62#11#desserts#Mom's Kitchen +Ice Cream Sundae#Ice dessByapxl#5.72#16#desserts#The Baking Pan +Cobbler, Peach#CobbdessYUGeOB#10.14#20#desserts#Mom's Kitchen +Cobbler, Peach#CobbdessXfEtUK#10.43#16#desserts#The Baking Pan +Cobbler, Berry-Pecan#Cobbdessx3htak#5.36#12#desserts#Mom's Kitchen +Cobbler, Berry-Pecan#Cobbdesse4FUVI#5.41#8#desserts#The Baking Pan +Cobbler, Blueberry#CobbdessbiI0oF#3.78#11#desserts#Mom's Kitchen +Cobbler, Blueberry#CobbdessMXxbBN#3.57#2#desserts#The Baking Pan +Cobbler, Cherry#CobbdessNSa8QW#12.58#0#desserts#Mom's Kitchen +Cobbler, Cherry#CobbdessA1dADa#12.10#10#desserts#The Baking Pan +Cobbler, Huckleberry#Cobbdess3t6O8d#3.99#18#desserts#Mom's Kitchen +Cobbler, Huckleberry#CobbdessGI9euK#3.88#0#desserts#The Baking Pan +Cobbler, Rhubarb#Cobbdess22X40Z#9.54#0#desserts#Mom's Kitchen +Cobbler, Rhubarb#CobbdessPfnCT0#9.27#18#desserts#The Baking Pan +Cobbler, Strawberry#CobbdessI78188#12.43#0#desserts#Mom's Kitchen +Cobbler, Strawberry#CobbdessH3LdgQ#12.20#3#desserts#The Baking Pan +Cobbler, Zucchini#Cobbdess5rK4dP#11.24#3#desserts#Mom's Kitchen +Cobbler, Zucchini#Cobbdess4Ez8kS#10.51#10#desserts#The Baking Pan +Brownies#BrowdessmogdTl#7.62#9#desserts#Mom's Kitchen +Brownies#Browdess84Qc1z#7.55#9#desserts#The Baking Pan +Fudge Bar#Fudgdess8iXSyf#11.72#6#desserts#Mom's Kitchen +Fudge Bar#FudgdessakU1Id#12.29#5#desserts#The Baking Pan +Cookies, Oatmeal#Cookdessnq9Oya#2.84#15#desserts#Mom's Kitchen +Cookies, Oatmeal#CookdessBhgp7p#2.68#10#desserts#The Baking Pan +Cookies, Chocolate Chip#CookdessRVszsZ#12.73#17#desserts#Mom's Kitchen +Cookies, Chocolate Chip#CookdessSOoHmT#12.26#19#desserts#The Baking Pan +Cookies, Peanut Butter#Cookdess2UcMI2#7.82#5#desserts#Mom's Kitchen +Cookies, Peanut Butter#Cookdess1cILme#7.46#20#desserts#The Baking Pan +Mousse, Chocolate#MousdessDpN4sQ#6.25#20#desserts#Mom's Kitchen +Mousse, Chocolate#Mousdess8FyFT8#5.96#1#desserts#The Baking Pan +Mousse, Blueberry Maple#MousdessacwrkO#7.28#7#desserts#Mom's Kitchen +Mousse, Blueberry Maple#MousdessbiCMFg#7.21#12#desserts#The Baking Pan +Mousse, Chocolate Banana#MousdessIeW4qz#5.13#2#desserts#Mom's Kitchen +Mousse, Chocolate Banana#Mousdess1De9oL#5.08#19#desserts#The Baking Pan +Mousse, Cherry#Mousdesss1bF8H#13.05#20#desserts#Mom's Kitchen +Mousse, Cherry#Mousdess0ujevx#12.43#1#desserts#The Baking Pan +Mousse, Eggnog#MousdessZ38hXj#9.07#10#desserts#Mom's Kitchen +Mousse, Eggnog#Mousdesshs05ST#8.81#8#desserts#The Baking Pan +Mousse, Strawberry#MousdessHCDlBK#5.58#3#desserts#Mom's Kitchen +Mousse, Strawberry#MousdessSZ4PyW#5.36#6#desserts#The Baking Pan +Sherbet, Cantaloupe#Sherdess3DCxUg#3.11#9#desserts#Mom's Kitchen +Sherbet, Cantaloupe#Sherdesscp2VIz#2.99#7#desserts#The Baking Pan +Sherbet, Lemon Milk#Sherdess1JVFOS#7.57#9#desserts#Mom's Kitchen +Sherbet, Lemon Milk#SherdessC865vu#7.57#0#desserts#The Baking Pan +Sherbet, Orange Crush#Sherdess8W8Mb9#4.32#18#desserts#Mom's Kitchen +Sherbet, Orange Crush#SherdessxmVJBF#4.16#10#desserts#The Baking Pan +Sherbet, Blueberry#SherdessFAgxqp#3.46#9#desserts#Mom's Kitchen +Sherbet, Blueberry#SherdessMPL87u#3.60#6#desserts#The Baking Pan +Sherbet, Raspberry#Sherdesse86ugA#6.08#1#desserts#Mom's Kitchen +Sherbet, Raspberry#Sherdesslc1etR#5.85#12#desserts#The Baking Pan +Sherbet, Strawberry#SherdessFwv09m#4.63#17#desserts#Mom's Kitchen +Sherbet, Strawberry#SherdessKB0H7q#4.81#20#desserts#The Baking Pan +Tart, Apple#TartdessrsTyXA#3.35#18#desserts#Mom's Kitchen +Tart, Apple#Tartdessp7pyiy#3.13#11#desserts#The Baking Pan +Tart, Almond#TartdessC7FARL#6.62#10#desserts#Mom's Kitchen +Tart, Almond#Tartdess1V1A1c#6.68#13#desserts#The Baking Pan +Tart, Blueberry#TartdesssQZRXX#10.28#10#desserts#Mom's Kitchen +Tart, Blueberry#TartdessUSJSuc#10.28#9#desserts#The Baking Pan +Tart, Chocolate-Pear#Tartdess2pdOE4#5.67#17#desserts#Mom's Kitchen +Tart, Chocolate-Pear#TartdessL3aEDd#5.51#9#desserts#The Baking Pan +Tart, Lemon Fudge#Tartdess9DhZUT#3.88#3#desserts#Mom's Kitchen +Tart, Lemon Fudge#TartdesshzLOWt#3.96#13#desserts#The Baking Pan +Tart, Pecan#TartdessvSbXzd#11.80#3#desserts#Mom's Kitchen +Tart, Pecan#Tartdess6YXJec#11.04#13#desserts#The Baking Pan +Tart, Pineapple#TartdesseMfJFe#9.01#18#desserts#Mom's Kitchen +Tart, Pineapple#TartdessA2Wftr#8.44#13#desserts#The Baking Pan +Tart, Pear#Tartdess4a1BUc#10.09#2#desserts#Mom's Kitchen +Tart, Pear#TartdessNw8YPG#10.68#5#desserts#The Baking Pan +Tart, Raspberry#TartdessAVnpP6#6.18#7#desserts#Mom's Kitchen +Tart, Raspberry#TartdessfVxZFf#5.95#9#desserts#The Baking Pan +Tart, Strawberry#Tartdess4IUcZW#4.75#8#desserts#Mom's Kitchen +Tart, Strawberry#Tartdess2BeEDb#4.61#17#desserts#The Baking Pan +Tart, Raspberry#TartdesshyBd24#1.85#5#desserts#Mom's Kitchen +Tart, Raspberry#Tartdess5fqxgy#1.94#20#desserts#The Baking Pan +Trifle, Berry#TrifdessmEkbU2#12.48#19#desserts#Mom's Kitchen +Trifle, Berry#TrifdessAV9Ix8#12.60#18#desserts#The Baking Pan +Trifle, American#TrifdesscsdSCd#4.70#17#desserts#Mom's Kitchen +Trifle, American#TrifdessTArskm#4.35#11#desserts#The Baking Pan +Trifle, English#TrifdessX87q8T#8.20#9#desserts#Mom's Kitchen +Trifle, English#Trifdess52l955#8.12#11#desserts#The Baking Pan +Trifle, Orange#TrifdesslUwxwe#9.74#15#desserts#Mom's Kitchen +Trifle, Orange#TrifdessFrfCHP#10.22#1#desserts#The Baking Pan +Trifle, Pumpkin#TrifdessJKFN96#4.72#7#desserts#Mom's Kitchen +Trifle, Pumpkin#TrifdessMNw4EV#4.95#16#desserts#The Baking Pan +Trifle, Scottish#TrifdessFa0JdK#13.63#0#desserts#Mom's Kitchen +Trifle, Scottish#TrifdessAAUQCN#14.03#6#desserts#The Baking Pan +Trifle, Sherry#TrifdesscuttJg#4.42#5#desserts#Mom's Kitchen +Trifle, Sherry#TrifdesspRGpfP#4.21#19#desserts#The Baking Pan +Trifle, Strawberry#TrifdessAd5TpV#3.58#11#desserts#Mom's Kitchen +Trifle, Strawberry#Trifdess1rtW0A#3.58#3#desserts#The Baking Pan +Trifle, Scotch Whiskey#Trifdess2zJsGi#5.44#5#desserts#Mom's Kitchen +Trifle, Scotch Whiskey#TrifdessL8nuI6#5.18#5#desserts#The Baking Pan +Cheesecake, Amaretto#CheedessOJBqfD#11.89#5#desserts#Mom's Kitchen +Cheesecake, Amaretto#CheedessVnDf14#11.89#9#desserts#The Baking Pan +Cheesecake, Apple#Cheedessuks1YK#11.22#15#desserts#Mom's Kitchen +Cheesecake, Apple#CheedessMYKaKK#11.01#14#desserts#The Baking Pan +Cheesecake, Apricot#CheedessKUxTYY#12.34#16#desserts#Mom's Kitchen +Cheesecake, Apricot#CheedessMvB1pr#11.88#18#desserts#The Baking Pan +Cheesecake, Australian#CheedessQ9WAIn#2.70#9#desserts#Mom's Kitchen +Cheesecake, Australian#CheedessE6Jyjc#2.53#14#desserts#The Baking Pan +Cheesecake, Arkansas#CheedessTbqzmw#6.98#9#desserts#Mom's Kitchen +Cheesecake, Arkansas#CheedesstWJZfC#6.66#5#desserts#The Baking Pan +Cheesecake, Blueberry#Cheedessyo51KL#8.07#11#desserts#Mom's Kitchen +Cheesecake, Blueberry#Cheedess4Hz7P4#8.62#5#desserts#The Baking Pan +Cheesecake, Cherry#CheedessEahRkC#4.40#14#desserts#Mom's Kitchen +Cheesecake, Cherry#Cheedess3Nx4jZ#4.65#3#desserts#The Baking Pan +Cheesecake, Cran-Raspberry#CheedessrJsr9i#13.47#20#desserts#Mom's Kitchen +Cheesecake, Cran-Raspberry#CheedesshcuXCy#14.00#6#desserts#The Baking Pan +Cheesecake, German Chocolate#CheedesswayvJL#12.03#16#desserts#Mom's Kitchen +Cheesecake, German Chocolate#CheedessebTAeB#11.58#0#desserts#The Baking Pan +Cheesecake, Turtle#CheedessLqgeIA#12.19#6#desserts#Mom's Kitchen +Cheesecake, Turtle#CheedessvyNohA#12.07#19#desserts#The Baking Pan +Brownies, Apple#BrowdessIDW1Cc#5.44#12#desserts#Mom's Kitchen +Brownies, Apple#BrowdessyRMrAH#5.14#12#desserts#The Baking Pan +Brownies, Fudge#BrowdessmIHIFJ#5.19#8#desserts#Mom's Kitchen +Brownies, Fudge#BrowdessqewJ38#5.10#17#desserts#The Baking Pan +Brownies, Almond Macaroon#BrowdessniK7QI#10.57#3#desserts#Mom's Kitchen +Brownies, Almond Macaroon#BrowdessgkXURH#10.36#17#desserts#The Baking Pan +Brownies, Butterscotch#BrowdesslpA06E#7.16#13#desserts#Mom's Kitchen +Brownies, Butterscotch#BrowdessK5hofE#7.30#6#desserts#The Baking Pan +Brownies, Caramel#BrowdessVGfoA8#3.07#3#desserts#Mom's Kitchen +Brownies, Caramel#Browdess5jvVMM#3.13#11#desserts#The Baking Pan +Brownies, Cherry#Browdessyoa66A#3.39#17#desserts#Mom's Kitchen +Brownies, Cherry#BrowdessIg2JuF#3.39#11#desserts#The Baking Pan +Brownies, Chocolate Chip#Browdessb9dc59#6.18#10#desserts#Mom's Kitchen +Brownies, Chocolate Chip#BrowdessvW4nOx#6.43#14#desserts#The Baking Pan +Brownies, Coconut#BrowdessWPHrVR#3.06#15#desserts#Mom's Kitchen +Brownies, Coconut#BrowdessGVBlML#2.86#11#desserts#The Baking Pan +Brownies, Cream Cheese#Browdess1OyRay#12.74#4#desserts#Mom's Kitchen +Brownies, Cream Cheese#Browdess2fRsNv#12.61#19#desserts#The Baking Pan +Brownies, Fudge Mint#Browdessl7DP7k#11.45#14#desserts#Mom's Kitchen +Brownies, Fudge Mint#Browdessv70VKQ#11.34#16#desserts#The Baking Pan +Brownies, Mint Chip#BrowdessDDMvF7#1.81#15#desserts#Mom's Kitchen +Brownies, Mint Chip#Browdess0j9PBD#1.84#9#desserts#The Baking Pan +Cake, Angel Food#CakedessEaqGaE#11.18#3#desserts#Mom's Kitchen +Cake, Angel Food#CakedessJyAyFe#11.18#14#desserts#The Baking Pan +Cake, Chocolate#CakedessKLXFbn#10.11#7#desserts#Mom's Kitchen +Cake, Chocolate#CakedessfNP5Hg#9.91#14#desserts#The Baking Pan +Cake, Carrot#CakedessUTgMoV#4.20#13#desserts#Mom's Kitchen +Cake, Carrot#CakedessQdkaYg#4.00#3#desserts#The Baking Pan +Cake, Lemon Blueberry#CakedessepkeEW#11.73#16#desserts#Mom's Kitchen +Cake, Lemon Blueberry#CakedessHTKyQs#12.42#16#desserts#The Baking Pan +Cake Triple Fudge#CakedessiZ75lR#7.92#7#desserts#Mom's Kitchen +Cake Triple Fudge#CakedessWRrSXP#8.00#15#desserts#The Baking Pan +Cake, Walnut#CakedessveYVXZ#10.83#17#desserts#Mom's Kitchen +Cake, Walnut#Cakedesse22rT5#11.04#7#desserts#The Baking Pan +Cake, French Apple#CakedessjA2Kxv#1.95#0#desserts#Mom's Kitchen +Cake, French Apple#CakedessNBHCk0#1.86#20#desserts#The Baking Pan +Cake, Fig#CakedessOncX4y#6.82#3#desserts#Mom's Kitchen +Cake, Fig#CakedessTJtffn#7.08#10#desserts#The Baking Pan +Cake, Maple#CakedessnoGPRF#3.04#11#desserts#Mom's Kitchen +Cake, Maple#CakedessfVattM#3.22#4#desserts#The Baking Pan +Cake, Devil's Food#CakedessiXcDCt#4.73#7#desserts#Mom's Kitchen +Cake, Devil's Food#CakedessnBZk45#4.82#6#desserts#The Baking Pan +Cake, Double-Lemon#CakedesskeS0Vd#3.46#9#desserts#Mom's Kitchen +Cake, Double-Lemon#Cakedess50vx53#3.60#6#desserts#The Baking Pan +Sorbet, Blackberry#SorbdessQoa0CE#9.88#15#desserts#Mom's Kitchen +Sorbet, Blackberry#SorbdessqoOYzv#9.78#9#desserts#The Baking Pan diff --git a/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/vendors.txt b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/vendors.txt new file mode 100644 index 000000000..528e1b110 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/GettingStarted/vendors.txt @@ -0,0 +1,6 @@ +TriCounty Produce#309 S. Main Street#Middle Town#MN#55432#763 555 5761#Mort Dufresne#763 555 5765 +Simply Fresh#15612 Bogart Lane#Harrigan#WI#53704#420 333 3912#Cheryl Swedberg#420 333 3952 +Off the Vine#133 American Ct.#Centennial#IA#52002#563 121 3800#Bob King#563 121 3800 x54 +The Pantry#1206 N. Creek Way#Middle Town#MN#55432#763 555 3391#Sully Beckstrom#763 555 3391 +Mom's Kitchen#53 Yerman Ct.#Middle Town#MN#55432#763 554 9200#Maggie Kultgen#763 554 9200 x12 +The Baking Pan#1415 53rd Ave.#Dutchin#MN#56304#320 442 2277#Mike Roan#320 442 6879 diff --git a/db/examples_java/src/com/sleepycat/examples/db/LockExample.java b/db/examples_java/src/com/sleepycat/examples/db/LockExample.java index 8d40de5e4..a9cf338d1 100644 --- a/db/examples_java/src/com/sleepycat/examples/db/LockExample.java +++ b/db/examples_java/src/com/sleepycat/examples/db/LockExample.java @@ -1,53 +1,55 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: LockExample.java,v 11.12 2003/03/21 23:28:38 gburd Exp $ + * $Id: LockExample.java,v 11.14 2004/04/06 20:43:35 mjc Exp $ */ - package com.sleepycat.examples.db; import com.sleepycat.db.*; +import java.io.File; import java.io.FileNotFoundException; -import java.io.InputStream; import java.io.InputStreamReader; import java.io.IOException; import java.io.PrintStream; import java.util.Vector; // -// An example of a program using DbLock and related classes. +// An example of a program using Lock and related classes. // -class LockExample extends DbEnv -{ +class LockExample { private static final String progname = "LockExample"; - private static final String LOCK_HOME = "TESTDIR"; + private static final File LOCK_HOME = new File("TESTDIR"); + Environment dbenv; + + public LockExample(File home, int maxlocks, boolean do_unlink) + throws DatabaseException, FileNotFoundException { - public LockExample(String home, int maxlocks, boolean do_unlink) - throws DbException, FileNotFoundException - { - super(0); if (do_unlink) { - remove(home, Db.DB_FORCE); - } - else { - setErrorStream(System.err); - setErrorPrefix("LockExample"); - if (maxlocks != 0) - setLockMaxLocks(maxlocks); - open(home, Db.DB_CREATE|Db.DB_INIT_LOCK, 0); + Environment.remove(home, true, null); } + + EnvironmentConfig config = new EnvironmentConfig(); + config.setErrorStream(System.err); + config.setErrorPrefix("LockExample"); + config.setMaxLocks(maxlocks); + config.setAllowCreate(true); + config.setInitializeLocking(true); + dbenv = new Environment(home, config); + } + + public void close() throws DatabaseException { + dbenv.close(); } // Prompts for a line, and keeps prompting until a non blank // line is returned. Returns null on erroror. // - static public String askForLine(InputStreamReader reader, - PrintStream out, String prompt) - { + public static String askForLine(InputStreamReader reader, + PrintStream out, String prompt) { String result = ""; while (result != null && result.length() == 0) { out.print(prompt); @@ -62,17 +64,14 @@ class LockExample extends DbEnv // Returns null on EOF. If EOF appears in the middle // of a line, returns that line, then null on next call. // - static public String getLine(InputStreamReader reader) - { + public static String getLine(InputStreamReader reader) { StringBuffer b = new StringBuffer(); int c; try { - while ((c = reader.read()) != -1 && c != '\n') { + while ((c = reader.read()) != -1 && c != '\n') if (c != '\r') b.append((char)c); - } - } - catch (IOException ioe) { + } catch (IOException ioe) { c = -1; } @@ -82,9 +81,7 @@ class LockExample extends DbEnv return b.toString(); } - public void run() - throws DbException - { + public void run() throws DatabaseException { long held; int len = 0, locker; int ret; @@ -96,7 +93,7 @@ class LockExample extends DbEnv // // Accept lock requests. // - locker = lockId(); + locker = dbenv.createLockerID(); for (held = 0;;) { String opbuf = askForLine(in, System.out, "Operation get/release [get]> "); @@ -107,7 +104,7 @@ class LockExample extends DbEnv if (opbuf.equals("get")) { // Acquire a lock. String objbuf = askForLine(in, System.out, - "input object (text string) to lock> "); + "input object (text string) to lock> "); if (objbuf == null) break; @@ -122,18 +119,17 @@ class LockExample extends DbEnv !lockbuf.equals("read") && !lockbuf.equals("write")); - int lock_type; + LockRequestMode lock_type; if (len <= 1 || lockbuf.equals("read")) - lock_type = Db.DB_LOCK_READ; + lock_type = LockRequestMode.READ; else - lock_type = Db.DB_LOCK_WRITE; + lock_type = LockRequestMode.WRITE; - Dbt dbt = new Dbt(objbuf.getBytes()); + DatabaseEntry entry = new DatabaseEntry(objbuf.getBytes()); - DbLock lock; + Lock lock; did_get = true; - lock = lockGet(locker, Db.DB_LOCK_NOWAIT, - dbt, lock_type); + lock = dbenv.getLock(locker, true, entry, lock_type); lockid = locks.size(); locks.addElement(lock); } else { @@ -150,22 +146,19 @@ class LockExample extends DbEnv continue; } did_get = false; - DbLock lock = (DbLock)locks.elementAt(lockid); - lockPut(lock); + Lock lock = (Lock)locks.elementAt(lockid); + dbenv.putLock(lock); } System.out.println("Lock #" + lockid + " " + (did_get ? "granted" : "released")); held += did_get ? 1 : -1; - } - catch (DbLockNotGrantedException lnge) { + } catch (LockNotGrantedException lnge) { System.err.println("Lock not granted"); - } - catch (DbDeadlockException de) { + } catch (DeadlockException de) { System.err.println("LockExample: lock_" + (did_get ? "get" : "put") + ": returned DEADLOCK"); - } - catch (DbException dbe) { + } catch (DatabaseException dbe) { System.err.println("LockExample: lock_get: " + dbe.toString()); } } @@ -174,15 +167,13 @@ class LockExample extends DbEnv " locks held"); } - private static void usage() - { + private static void usage() { System.err.println("usage: LockExample [-u] [-h home] [-m maxlocks]"); System.exit(1); } - public static void main(String argv[]) - { - String home = LOCK_HOME; + public static void main(String[] argv) { + File home = LOCK_HOME; boolean do_unlink = false; int maxlocks = 0; @@ -190,42 +181,30 @@ class LockExample extends DbEnv if (argv[i].equals("-h")) { if (++i >= argv.length) usage(); - home = argv[i]; - } - else if (argv[i].equals("-m")) { + home = new File(argv[i]); + } else if (argv[i].equals("-m")) { if (++i >= argv.length) usage(); try { maxlocks = Integer.parseInt(argv[i]); - } - catch (NumberFormatException nfe) { + } catch (NumberFormatException nfe) { usage(); } - } - else if (argv[i].equals("-u")) { + } else if (argv[i].equals("-u")) { do_unlink = true; - } - else { + } else { usage(); } } try { - if (do_unlink) { - // Create an environment that immediately - // removes all files. - LockExample tmp = new LockExample(home, maxlocks, do_unlink); - } - LockExample app = new LockExample(home, maxlocks, do_unlink); app.run(); - app.close(0); - } - catch (DbException dbe) { + app.close(); + } catch (DatabaseException dbe) { System.err.println(progname + ": " + dbe.toString()); - } - catch (Throwable t) { + } catch (Throwable t) { System.err.println(progname + ": " + t.toString()); } System.out.println("LockExample completed"); diff --git a/db/examples_java/src/com/sleepycat/examples/db/RPCExample.java b/db/examples_java/src/com/sleepycat/examples/db/RPCExample.java new file mode 100644 index 000000000..af2ba7ddf --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/RPCExample.java @@ -0,0 +1,107 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: RPCExample.java,v 1.1 2004/09/23 17:54:26 mjc Exp $ + */ + +package com.sleepycat.examples.db; + +import com.sleepycat.db.*; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.OutputStream; + +/* + * An example of a program configuring a database environment as an RPC client. + */ +public class RPCExample { + private static final String progname = "RPCExample"; + private static final File DATABASE_HOME = new File("TESTDIR"); + + private static void runApplication(Environment dbenv) + throws DatabaseException, FileNotFoundException { + + // Do something interesting... + // Your application goes here. + DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + config.setType(DatabaseType.BTREE); + Database db = dbenv.openDatabase(null, "test.db", null, config); + db.close(); + } + + private static void setupEnvironment(File home, + OutputStream errs) + throws DatabaseException, FileNotFoundException { + + // Create an environment object and initialize it for error reporting. + EnvironmentConfig config = new EnvironmentConfig(); + config.setErrorStream(errs); + config.setErrorPrefix(progname); + + // + // We want to specify the shared memory buffer pool cachesize, + // but everything else is the default. + // + config.setCacheSize(64 * 1024); + + // Open the environment with full transactional support. + config.setAllowCreate(true); + config.setInitializeCache(true); + config.setTransactional(true); + config.setInitializeLocking(true); + + config.setRPCServer("localhost", 0, 0); + + // + // open is declared to throw a FileNotFoundException, which normally + // shouldn't occur when allowCreate is set. + // + Environment dbenv = new Environment(home, config); + + try { + // Start your application. + runApplication(dbenv); + } finally { + // Close the environment. Doing this in the finally block ensures + // it is done, even if an error is thrown. + dbenv.close(); + } + } + + private static void teardownEnvironment(File home, + OutputStream errs) + throws DatabaseException, FileNotFoundException { + + // Remove the shared database regions. + EnvironmentConfig config = new EnvironmentConfig(); + + config.setErrorStream(errs); + config.setErrorPrefix(progname); + config.setRPCServer("localhost", 0, 0); + Environment.remove(home, true, config); + } + + public static void main(String[] args) { + File home = DATABASE_HOME; + + try { + System.out.println("Setup env"); + setupEnvironment(home, System.err); + + System.out.println("Teardown env"); + teardownEnvironment(home, System.err); + } catch (DatabaseException dbe) { + System.err.println(progname + ": environment open: " + dbe.toString()); + dbe.printStackTrace(System.err); + System.exit (1); + } catch (FileNotFoundException fnfe) { + System.err.println(progname + ": unexpected open environment error " + fnfe); + System.exit (1); + } + } + +} diff --git a/db/examples_java/src/com/sleepycat/examples/db/SequenceExample.java b/db/examples_java/src/com/sleepycat/examples/db/SequenceExample.java new file mode 100644 index 000000000..6e430c434 --- /dev/null +++ b/db/examples_java/src/com/sleepycat/examples/db/SequenceExample.java @@ -0,0 +1,93 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SequenceExample.java,v 1.1 2004/09/22 22:20:32 mjc Exp $ + */ + +package com.sleepycat.examples.db; + +import com.sleepycat.db.*; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.PrintStream; + +class SequenceExample { + private static final int EXIT_SUCCESS = 0; + private static final int EXIT_FAILURE = 1; + + public SequenceExample() { + } + + public static void usage() { + System.out.println("usage: java " + + "com.sleepycat.examples.db.SequenceExample [-r] [database]\n"); + System.exit(EXIT_FAILURE); + } + + public static void main(String[] argv) { + boolean removeExistingDatabase = false; + String databaseName = "access.db"; + + for (int i = 0; i < argv.length; i++) { + if (argv[i].equals("-r")) + removeExistingDatabase = true; + else if (argv[i].equals("-?")) + usage(); + else if (argv[i].startsWith("-")) + usage(); + else { + if ((argv.length - i) != 1) + usage(); + databaseName = argv[i]; + break; + } + } + + try { + SequenceExample app = new SequenceExample(); + app.run(removeExistingDatabase, databaseName); + } catch (DatabaseException dbe) { + System.err.println("SequenceExample: " + dbe.toString()); + System.exit(EXIT_FAILURE); + } catch (FileNotFoundException fnfe) { + System.err.println("SequenceExample: " + fnfe.toString()); + System.exit(EXIT_FAILURE); + } + System.exit(EXIT_SUCCESS); + } + + public void run(boolean removeExistingDatabase, String databaseName) + throws DatabaseException, FileNotFoundException { + + // Remove the previous database. + if (removeExistingDatabase) + new File(databaseName).delete(); + + // Create the database object. + // There is no environment for this simple example. + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setErrorStream(System.err); + dbConfig.setErrorPrefix("SequenceExample"); + dbConfig.setType(DatabaseType.BTREE); + dbConfig.setAllowCreate(true); + Database table = new Database(databaseName, null, dbConfig); + + SequenceConfig config = new SequenceConfig(); + config.setAllowCreate(true); + DatabaseEntry key = + new DatabaseEntry("my_sequence".getBytes()); + Sequence sequence = table.openSequence(null, key, config); + + for (int i = 0; i < 10; i++) { + long seqnum = sequence.get(null, 1); + System.out.println("Got sequence number: " + seqnum); + } + + sequence.close(); + table.close(); + } +} diff --git a/db/examples_java/src/com/sleepycat/examples/db/TpcbExample.java b/db/examples_java/src/com/sleepycat/examples/db/TpcbExample.java index 2d2aa80c2..9935ba808 100644 --- a/db/examples_java/src/com/sleepycat/examples/db/TpcbExample.java +++ b/db/examples_java/src/com/sleepycat/examples/db/TpcbExample.java @@ -1,15 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TpcbExample.java,v 11.24 2003/10/20 20:12:32 mjc Exp $ + * $Id: TpcbExample.java,v 11.26 2004/04/06 20:43:35 mjc Exp $ */ package com.sleepycat.examples.db; import com.sleepycat.db.*; + +import java.io.File; import java.io.FileNotFoundException; import java.math.BigDecimal; import java.util.Calendar; @@ -25,8 +27,7 @@ import java.util.GregorianCalendar; // test, use the n flag to indicate a number of transactions to run in // each thread and -T to specify the number of threads. // -class TpcbExample extends DbEnv -{ +class TpcbExample { public static final int TELLERS_PER_BRANCH = 10; public static final int ACCOUNTS_PER_TELLER = 10000; public static final int HISTORY_PER_BRANCH = 2592000; @@ -40,18 +41,18 @@ class TpcbExample extends DbEnv // VALID_SCALING configuration /* - public static final int ACCOUNTS = 1000000; - public static final int BRANCHES = 10; - public static final int TELLERS = 100; - public static final int HISTORY = 25920000; + public static final int ACCOUNTS = 1000000; + public static final int BRANCHES = 10; + public static final int TELLERS = 100; + public static final int HISTORY = 25920000; */ // TINY configuration /* - public static final int ACCOUNTS = 1000; - public static final int BRANCHES = 10; - public static final int TELLERS = 100; - public static final int HISTORY = 10000; + public static final int ACCOUNTS = 1000; + public static final int BRANCHES = 10; + public static final int TELLERS = 100; + public static final int HISTORY = 10000; */ // Default configuration @@ -69,47 +70,55 @@ class TpcbExample extends DbEnv public static final int BRANCH = 1; public static final int TELLER = 2; - public static boolean verbose = false; - public static final String progname = "TpcbExample"; // Program name. + public static boolean verbose = false; + public static final String progname = "TpcbExample"; // Program name. + Environment dbenv; int accounts, branches, tellers, history; - public TpcbExample(String home, + public TpcbExample(File home, int accounts, int branches, int tellers, int history, - int cachesize, boolean initializing, int flags) - throws DbException, FileNotFoundException - { - super(0); + int cachesize, boolean initializing, boolean noSync) + throws DatabaseException, FileNotFoundException { + this.accounts = accounts; this.branches = branches; this.tellers = tellers; this.history = history; - setErrorStream(System.err); - setErrorPrefix(progname); - setCacheSize(cachesize == 0 ? 4 * 1024 * 1024 : cachesize, 0); - if ((flags & (Db.DB_TXN_NOSYNC)) != 0) - setFlags(Db.DB_TXN_NOSYNC, true); - flags &= ~(Db.DB_TXN_NOSYNC); - setLockDetect(Db.DB_LOCK_DEFAULT); + EnvironmentConfig config = new EnvironmentConfig(); + config.setErrorStream(System.err); + config.setErrorPrefix(progname); + config.setCacheSize(cachesize == 0 ? 4 * 1024 * 1024 : cachesize); + config.setTxnNoSync(noSync); + config.setLockDetectMode(LockDetectMode.DEFAULT); + config.setAllowCreate(true); - int local_flags = flags | Db.DB_CREATE; - if (initializing) - local_flags |= Db.DB_INIT_MPOOL; - else - local_flags |= Db.DB_INIT_TXN | Db.DB_INIT_LOCK | - Db.DB_INIT_LOG | Db.DB_INIT_MPOOL; + config.setInitializeCache(true); + config.setTransactional(!initializing); + config.setInitializeLocking(!initializing); + config.setInitializeLogging(!initializing); - open(home, local_flags, 0); // may throw DbException + dbenv = new Environment(home, config); + } + + public void close() + throws DatabaseException { + + try { + if (dbenv != null) + dbenv.close(); + } finally { + dbenv = null; + } } // // Initialize the database to the number of accounts, branches, // history records, and tellers given to the constructor. // - public void populate() - { - Db dbp = null; + public void populate() { + Database dbp = null; int err; int balance, idnum; @@ -123,12 +132,14 @@ class TpcbExample extends DbEnv h_nelem = accounts; try { - dbp = new Db(this, 0); - dbp.setHashNumElements(h_nelem); - dbp.open(null, "account", null, Db.DB_HASH, - Db.DB_CREATE | Db.DB_TRUNCATE, 0644); + DatabaseConfig config = new DatabaseConfig(); + config.setType(DatabaseType.HASH); + config.setHashNumElements(h_nelem); + config.setAllowCreate(true); + config.setTruncate(true); + dbp = dbenv.openDatabase(null, "account", null, config); } catch (Exception e1) { - // can be DbException or FileNotFoundException + // can be DatabaseException or FileNotFoundException errExit(e1, "Open of account file failed"); } @@ -137,8 +148,8 @@ class TpcbExample extends DbEnv idnum += h_nelem; end_anum = idnum - 1; try { - dbp.close(0); - } catch (DbException e2) { + dbp.close(); + } catch (DatabaseException e2) { errExit(e2, "Account file close failed"); } @@ -155,26 +166,27 @@ class TpcbExample extends DbEnv h_nelem = (int)branches; try { - dbp = new Db(this, 0); - - dbp.setHashNumElements(h_nelem); - dbp.setHashFillFactor(1); - dbp.setPageSize(512); - - dbp.open(null, "branch", null, Db.DB_HASH, - Db.DB_CREATE | Db.DB_TRUNCATE, 0644); + DatabaseConfig config = new DatabaseConfig(); + config.setType(DatabaseType.HASH); + config.setHashNumElements(h_nelem); + config.setHashFillFactor(1); + config.setPageSize(512); + config.setAllowCreate(true); + config.setTruncate(true); + dbp = dbenv.openDatabase(null, "branch", null, config); } catch (Exception e3) { - // can be DbException or FileNotFoundException + // can be DatabaseException or FileNotFoundException errExit(e3, "Branch file create failed"); } + start_bnum = idnum; populateTable(dbp, idnum, balance, h_nelem, "branch"); idnum += h_nelem; end_bnum = idnum - 1; try { - dbp.close(0); - } catch (DbException dbe4) { + dbp.close(); + } catch (DatabaseException dbe4) { errExit(dbe4, "Close of branch file failed"); } @@ -190,17 +202,16 @@ class TpcbExample extends DbEnv h_nelem = (int)tellers; try { - - dbp = new Db(this, 0); - - dbp.setHashNumElements(h_nelem); - dbp.setHashFillFactor(0); - dbp.setPageSize(512); - - dbp.open(null, "teller", null, Db.DB_HASH, - Db.DB_CREATE | Db.DB_TRUNCATE, 0644); + DatabaseConfig config = new DatabaseConfig(); + config.setType(DatabaseType.HASH); + config.setHashNumElements(h_nelem); + config.setHashFillFactor(0); + config.setPageSize(512); + config.setAllowCreate(true); + config.setTruncate(true); + dbp = dbenv.openDatabase(null, "teller", null, config); } catch (Exception e5) { - // can be DbException or FileNotFoundException + // can be DatabaseException or FileNotFoundException errExit(e5, "Teller file create failed"); } @@ -210,67 +221,68 @@ class TpcbExample extends DbEnv end_tnum = idnum - 1; try { - dbp.close(0); - } catch (DbException e6) { + dbp.close(); + } catch (DatabaseException e6) { errExit(e6, "Close of teller file failed"); } if (verbose) - System.out.println("Populated tellers: " - + String.valueOf(start_tnum) + " - " + String.valueOf(end_tnum)); + System.out.println("Populated tellers: " + + String.valueOf(start_tnum) + " - " + + String.valueOf(end_tnum)); try { - dbp = new Db(this, 0); - dbp.setRecordLength(HISTORY_LEN); - dbp.open(null, "history", null, Db.DB_RECNO, - Db.DB_CREATE | Db.DB_TRUNCATE, 0644); + DatabaseConfig config = new DatabaseConfig(); + config.setType(DatabaseType.RECNO); + config.setRecordLength(HISTORY_LEN); + config.setAllowCreate(true); + config.setTruncate(true); + dbp = dbenv.openDatabase(null, "history", null, config); } catch (Exception e7) { - // can be DbException or FileNotFoundException + // can be DatabaseException or FileNotFoundException errExit(e7, "Create of history file failed"); } populateHistory(dbp); try { - dbp.close(0); - } catch (DbException e8) { + dbp.close(); + } catch (DatabaseException e8) { errExit(e8, "Close of history file failed"); } } - public void populateTable( - Db dbp, int start_id, int balance, int nrecs, String msg) - { + public void populateTable(Database dbp, + int start_id, int balance, int nrecs, String msg) { Defrec drec = new Defrec(); - Dbt kdbt = new Dbt(drec.data); + DatabaseEntry kdbt = new DatabaseEntry(drec.data); kdbt.setSize(4); // sizeof(int) - Dbt ddbt = new Dbt(drec.data); + DatabaseEntry ddbt = new DatabaseEntry(drec.data); ddbt.setSize(drec.data.length); // uses whole array try { for (int i = 0; i < nrecs; i++) { kdbt.setRecordNumber(start_id + (int)i); drec.set_balance(balance); - dbp.put(null, kdbt, ddbt, Db.DB_NOOVERWRITE); + dbp.putNoOverwrite(null, kdbt, ddbt); } - } catch (DbException dbe) { + } catch (DatabaseException dbe) { System.err.println("Failure initializing " + msg + " file: " + dbe.toString()); System.exit(1); } } - public void populateHistory(Db dbp) - { + public void populateHistory(Database dbp) { Histrec hrec = new Histrec(); hrec.set_amount(10); - byte arr[] = new byte[4]; // sizeof(int) + byte[] arr = new byte[4]; // sizeof(int) int i; - Dbt kdbt = new Dbt(arr); + DatabaseEntry kdbt = new DatabaseEntry(arr); kdbt.setSize(arr.length); - Dbt ddbt = new Dbt(hrec.data); + DatabaseEntry ddbt = new DatabaseEntry(hrec.data); ddbt.setSize(hrec.data.length); try { @@ -281,30 +293,25 @@ class TpcbExample extends DbEnv hrec.set_bid(random_id(BRANCH)); hrec.set_tid(random_id(TELLER)); - dbp.put(null, kdbt, ddbt, Db.DB_APPEND); + dbp.append(null, kdbt, ddbt); } - } catch (DbException dbe) { + } catch (DatabaseException dbe) { errExit(dbe, "Failure initializing history file"); } } static Random rand = new Random(); - public int random_int(int lo, int hi) - { - int ret; - int t; - - t = rand.nextInt(); + public static int random_int(int lo, int hi) { + int t = rand.nextInt(); if (t < 0) t = -t; - ret = (int)(((double)t / ((double)(Integer.MAX_VALUE) + 1)) * - (hi - lo + 1)); + int ret = (int)(((double)t / ((double)(Integer.MAX_VALUE) + 1)) * + (hi - lo + 1)); ret += lo; return (ret); } - public int random_id(int type) - { + public int random_id(int type) { int min, max, num; max = min = BEGID; @@ -327,51 +334,46 @@ class TpcbExample extends DbEnv // The byte order is our choice. // - static long get_int_in_array(byte[] array, int offset) - { + static long get_int_in_array(byte[] array, int offset) { return - ((0xff & array[offset+0]) << 0) | - ((0xff & array[offset+1]) << 8) | - ((0xff & array[offset+2]) << 16) | - ((0xff & array[offset+3]) << 24); + ((0xff & array[offset + 0]) << 0) | + ((0xff & array[offset + 1]) << 8) | + ((0xff & array[offset + 2]) << 16) | + ((0xff & array[offset + 3]) << 24); } // Note: Value needs to be long to avoid sign extension - static void set_int_in_array(byte[] array, int offset, long value) - { - array[offset+0] = (byte)((value >> 0) & 0x0ff); - array[offset+1] = (byte)((value >> 8) & 0x0ff); - array[offset+2] = (byte)((value >> 16) & 0x0ff); - array[offset+3] = (byte)((value >> 24) & 0x0ff); + static void set_int_in_array(byte[] array, int offset, long value) { + array[offset + 0] = (byte)((value >> 0) & 0xff); + array[offset + 1] = (byte)((value >> 8) & 0xff); + array[offset + 2] = (byte)((value >> 16) & 0xff); + array[offset + 3] = (byte)((value >> 24) & 0xff); } // round 'd' to 'scale' digits, and return result as string - static String showRounded(double d, int scale) - { + static String showRounded(double d, int scale) { return new BigDecimal(d). setScale(scale, BigDecimal.ROUND_HALF_DOWN).toString(); } - public void run(int ntxns, int threads) - { + public void run(int ntxns, int threads) { double gtps; int txns, failed; long curtime, starttime; TxnThread[] txnList = new TxnThread[threads]; - for (int i = 0; i < threads; i++) { + for (int i = 0; i < threads; i++) txnList[i] = new TxnThread("Thread " + String.valueOf(i), ntxns); - } starttime = (new Date()).getTime(); for (int i = 0; i < threads; i++) txnList[i].start(); - for (int i = 0; i < threads; i++) { + for (int i = 0; i < threads; i++) try { txnList[i].join(); } catch (Exception e1) { errExit(e1, "join failed"); } - } + curtime = (new Date()).getTime(); txns = failed = 0; for (int i = 0; i < threads; i++) { @@ -386,20 +388,17 @@ class TpcbExample extends DbEnv System.out.println(showRounded(gtps, 2) + " TPS"); } - class TxnThread extends Thread - { + class TxnThread extends Thread { private int ntxns; /* Number of txns we were asked to run. */ public int txns, failed; /* Number that succeeded / failed. */ - private Db adb, bdb, hdb, tdb; + private Database adb, bdb, hdb, tdb; - public TxnThread(String name, int ntxns) - { + public TxnThread(String name, int ntxns) { super(name); this.ntxns = ntxns; } - public void run() - { + public void run() { double gtps, itps; int n, ifailed, ret; long starttime, curtime, lasttime; @@ -409,19 +408,13 @@ class TpcbExample extends DbEnv // int err; try { - adb = new Db(TpcbExample.this, 0); - adb.open(null, "account", null, Db.DB_UNKNOWN, - Db.DB_AUTO_COMMIT, 0); - bdb = new Db(TpcbExample.this, 0); - bdb.open(null, "branch", null, Db.DB_UNKNOWN, - Db.DB_AUTO_COMMIT, 0); - tdb = new Db(TpcbExample.this, 0); - tdb.open(null, "teller", null, Db.DB_UNKNOWN, - Db.DB_AUTO_COMMIT, 0); - hdb = new Db(TpcbExample.this, 0); - hdb.open(null, "history", null, Db.DB_UNKNOWN, - Db.DB_AUTO_COMMIT, 0); - } catch (DbException dbe) { + DatabaseConfig config = new DatabaseConfig(); + config.setTransactional(true); + adb = dbenv.openDatabase(null, "account", null, config); + bdb = dbenv.openDatabase(null, "branch", null, config); + tdb = dbenv.openDatabase(null, "teller", null, config); + hdb = dbenv.openDatabase(null, "history", null, config); + } catch (DatabaseException dbe) { TpcbExample.errExit(dbe, "Open of db files failed"); } catch (FileNotFoundException fnfe) { TpcbExample.errExit(fnfe, "Open of db files failed, missing file"); @@ -457,39 +450,38 @@ class TpcbExample extends DbEnv } try { - adb.close(0); - bdb.close(0); - tdb.close(0); - hdb.close(0); - } catch (DbException dbe2) { + adb.close(); + bdb.close(); + tdb.close(); + hdb.close(); + } catch (DatabaseException dbe2) { TpcbExample.errExit(dbe2, "Close of db files failed"); } System.out.println(getName() + ": " + - (long)txns + " transactions begun " - + String.valueOf(failed) + " failed"); + (long)txns + " transactions begun " + + String.valueOf(failed) + " failed"); } // // XXX Figure out the appropriate way to pick out IDs. // - int txn() - { - Dbc acurs = null; - Dbc bcurs = null; - Dbc hcurs = null; - Dbc tcurs = null; - DbTxn t = null; + int txn() { + Cursor acurs = null; + Cursor bcurs = null; + Cursor hcurs = null; + Cursor tcurs = null; + Transaction t = null; Defrec rec = new Defrec(); Histrec hrec = new Histrec(); - int account, branch, teller, ret; + int account, branch, teller; - Dbt d_dbt = new Dbt(); - Dbt d_histdbt = new Dbt(); - Dbt k_dbt = new Dbt(); - Dbt k_histdbt = new Dbt(); + DatabaseEntry d_dbt = new DatabaseEntry(); + DatabaseEntry d_histdbt = new DatabaseEntry(); + DatabaseEntry k_dbt = new DatabaseEntry(); + DatabaseEntry k_histdbt = new DatabaseEntry(); account = TpcbExample.this.random_id(TpcbExample.ACCOUNT); branch = TpcbExample.this.random_id(TpcbExample.BRANCH); @@ -497,62 +489,61 @@ class TpcbExample extends DbEnv // The history key will not actually be retrieved, // but it does need to be set to something. - byte hist_key[] = new byte[4]; + byte[] hist_key = new byte[4]; k_histdbt.setData(hist_key); k_histdbt.setSize(4 /* == sizeof(int)*/); - byte key_bytes[] = new byte[4]; + byte[] key_bytes = new byte[4]; k_dbt.setData(key_bytes); k_dbt.setSize(4 /* == sizeof(int)*/); - d_dbt.setFlags(Db.DB_DBT_USERMEM); d_dbt.setData(rec.data); - d_dbt.setUserBufferLength(rec.length()); + d_dbt.setUserBuffer(rec.length(), true); hrec.set_aid(account); hrec.set_bid(branch); hrec.set_tid(teller); hrec.set_amount(10); // Request 0 bytes since we're just positioning. - d_histdbt.setFlags(Db.DB_DBT_PARTIAL); + d_histdbt.setPartial(0, 0, true); // START TIMING try { - t = TpcbExample.this.txnBegin(null, 0); + t = dbenv.beginTransaction(null, null); - acurs = adb.cursor(t, 0); - bcurs = bdb.cursor(t, 0); - tcurs = tdb.cursor(t, 0); - hcurs = hdb.cursor(t, 0); + acurs = adb.openCursor(t, null); + bcurs = bdb.openCursor(t, null); + tcurs = tdb.openCursor(t, null); + hcurs = hdb.openCursor(t, null); // Account record k_dbt.setRecordNumber(account); - if (acurs.get(k_dbt, d_dbt, Db.DB_SET) != 0) - throw new TpcbException("acurs get failed"); + if (acurs.getSearchKey(k_dbt, d_dbt, null) != OperationStatus.SUCCESS) + throw new Exception("acurs get failed"); rec.set_balance(rec.get_balance() + 10); - acurs.put(k_dbt, d_dbt, Db.DB_CURRENT); + acurs.putCurrent(d_dbt); // Branch record k_dbt.setRecordNumber(branch); - if ((ret = bcurs.get(k_dbt, d_dbt, Db.DB_SET)) != 0) - throw new TpcbException("bcurs get failed"); + if (bcurs.getSearchKey(k_dbt, d_dbt, null) != OperationStatus.SUCCESS) + throw new Exception("bcurs get failed"); rec.set_balance(rec.get_balance() + 10); - bcurs.put(k_dbt, d_dbt, Db.DB_CURRENT); + bcurs.putCurrent(d_dbt); // Teller record k_dbt.setRecordNumber(teller); - if (tcurs.get(k_dbt, d_dbt, Db.DB_SET) != 0) - throw new TpcbException("ccurs get failed"); + if (tcurs.getSearchKey(k_dbt, d_dbt, null) != OperationStatus.SUCCESS) + throw new Exception("ccurs get failed"); rec.set_balance(rec.get_balance() + 10); - tcurs.put(k_dbt, d_dbt, Db.DB_CURRENT); + tcurs.putCurrent(d_dbt); // History record - d_histdbt.setFlags(0); + d_histdbt.setPartial(0, 0, false); d_histdbt.setData(hrec.data); - d_histdbt.setUserBufferLength(hrec.length()); - if (hdb.put(t, k_histdbt, d_histdbt, Db.DB_APPEND) != 0) - throw(new DbException("put failed")); + d_histdbt.setUserBuffer(hrec.length(), true); + if (hdb.append(t, k_histdbt, d_histdbt) != OperationStatus.SUCCESS) + throw new DatabaseException("put failed"); acurs.close(); acurs = null; @@ -565,9 +556,9 @@ class TpcbExample extends DbEnv // null out t in advance; if the commit fails, // we don't want to abort it in the catch clause. - DbTxn tmptxn = t; + Transaction tmptxn = t; t = null; - tmptxn.commit(0); + tmptxn.commit(); // END TIMING return (0); @@ -583,14 +574,15 @@ class TpcbExample extends DbEnv hcurs.close(); if (t != null) t.abort(); - } catch (DbException dbe) { + } catch (DatabaseException dbe) { // not much we can do here. } if (TpcbExample.this.verbose) { - System.out.println("Transaction A=" + String.valueOf(account) - + " B=" + String.valueOf(branch) - + " T=" + String.valueOf(teller) + " failed"); + System.out.println("Transaction A=" + String.valueOf(account) + + " B=" + String.valueOf(branch) + + " T=" + String.valueOf(teller) + + " failed"); System.out.println("Reason: " + e.toString()); } return (-1); @@ -598,23 +590,20 @@ class TpcbExample extends DbEnv } } - private static void usage() - { + private static void usage() { System.err.println( - "usage: TpcbExample [-fiv] [-a accounts] [-b branches]\n" + - " [-c cachesize] [-h home] [-n transactions]\n" + - " [-T threads] [-S seed] [-s history] [-t tellers]"); + "usage: TpcbExample [-fiv] [-a accounts] [-b branches]\n" + + " [-c cachesize] [-h home] [-n transactions]\n" + + " [-T threads] [-S seed] [-s history] [-t tellers]"); System.exit(1); } - private static void invarg(String str) - { + private static void invarg(String str) { System.err.println("TpcbExample: invalid argument: " + str); System.exit(1); } - public static void errExit(Exception err, String s) - { + public static void errExit(Exception err, String s) { System.err.print(progname + ": "); if (s != null) { System.err.print(s + ": "); @@ -623,9 +612,8 @@ class TpcbExample extends DbEnv System.exit(1); } - public static void main(String argv[]) throws java.io.IOException - { - String home = "TESTDIR"; + public static void main(String[] argv) throws java.io.IOException { + File home = new File("TESTDIR"); int accounts = ACCOUNTS; int branches = BRANCHES; int tellers = TELLERS; @@ -655,7 +643,7 @@ class TpcbExample extends DbEnv txn_no_sync = true; } else if (argv[i].equals("-h")) { // DB home. - home = argv[++i]; + home = new File(argv[++i]); } else if (argv[i].equals("-i")) { // Initialize the test. iflag = true; @@ -696,17 +684,16 @@ class TpcbExample extends DbEnv TpcbExample app = null; try { app = new TpcbExample(home, accounts, branches, tellers, history, - mpool, iflag, - txn_no_sync ? Db.DB_TXN_NOSYNC : 0); + mpool, iflag, txn_no_sync); } catch (Exception e1) { errExit(e1, "initializing environment failed"); } if (verbose) - System.out.println((long)accounts + " Accounts, " - + String.valueOf(branches) + " Branches, " - + String.valueOf(tellers) + " Tellers, " - + String.valueOf(history) + " History"); + System.out.println((long)accounts + " Accounts, " + + String.valueOf(branches) + " Branches, " + + String.valueOf(tellers) + " Tellers, " + + String.valueOf(history) + " History"); if (iflag) { if (ntxns != 0) @@ -721,8 +708,8 @@ class TpcbExample extends DbEnv // Shut down the application. try { - app.close(0); - } catch (DbException dbe2) { + app.close(); + } catch (DatabaseException dbe2) { errExit(dbe2, "appexit failed"); } @@ -737,35 +724,28 @@ class TpcbExample extends DbEnv // u_int8_t pad[RECLEN - sizeof(int) - sizeof(int)]; // }; -class Defrec -{ - public Defrec() - { +class Defrec { + public Defrec() { data = new byte[TpcbExample.RECLEN]; } - public int length() - { + public int length() { return TpcbExample.RECLEN; } - public long get_id() - { + public long get_id() { return TpcbExample.get_int_in_array(data, 0); } - public void set_id(long value) - { + public void set_id(long value) { TpcbExample.set_int_in_array(data, 0, value); } - public long get_balance() - { + public long get_balance() { return TpcbExample.get_int_in_array(data, 4); } - public void set_balance(long value) - { + public void set_balance(long value) { TpcbExample.set_int_in_array(data, 4, value); } @@ -786,70 +766,46 @@ class Defrec // u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)]; // }; -class Histrec -{ - public Histrec() - { +class Histrec { + public Histrec() { data = new byte[TpcbExample.RECLEN]; } - public int length() - { + public int length() { return TpcbExample.RECLEN; } - public long get_aid() - { + public long get_aid() { return TpcbExample.get_int_in_array(data, 0); } - public void set_aid(long value) - { + public void set_aid(long value) { TpcbExample.set_int_in_array(data, 0, value); } - public long get_bid() - { + public long get_bid() { return TpcbExample.get_int_in_array(data, 4); } - public void set_bid(long value) - { + public void set_bid(long value) { TpcbExample.set_int_in_array(data, 4, value); } - public long get_tid() - { + public long get_tid() { return TpcbExample.get_int_in_array(data, 8); } - public void set_tid(long value) - { + public void set_tid(long value) { TpcbExample.set_int_in_array(data, 8, value); } - public long get_amount() - { + public long get_amount() { return TpcbExample.get_int_in_array(data, 12); } - public void set_amount(long value) - { + public void set_amount(long value) { TpcbExample.set_int_in_array(data, 12, value); } public byte[] data; } - -class TpcbException extends Exception -{ - TpcbException() - { - super(); - } - - TpcbException(String s) - { - super(s); - } -} diff --git a/db/fileops/fileops.src b/db/fileops/fileops.src index 9e79ee156..a77b5d5c4 100644 --- a/db/fileops/fileops.src +++ b/db/fileops/fileops.src @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * - * $Id: fileops.src,v 1.11 2003/04/24 14:19:17 bostic Exp $ + * $Id: fileops.src,v 1.13 2004/06/17 17:35:20 bostic Exp $ */ PREFIX __fop DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE diff --git a/db/fileops/fileops_auto.c b/db/fileops/fileops_auto.c index 4ebd2b1d1..333e37755 100644 --- a/db/fileops/fileops_auto.c +++ b/db/fileops/fileops_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -34,31 +35,42 @@ __fop_create_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___fop_create; npad = 0; + rlsnp = ret_lsnp; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -73,27 +85,23 @@ __fop_create_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -130,130 +138,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__fop_create_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __fop_create_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_create_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_create_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __fop_create_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __fop_create_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__fop_create%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tname: "); - for (i = 0; i < argp->name.size; i++) { - ch = ((u_int8_t *)argp->name.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tappname: %lu\n", (u_long)argp->appname); - (void)printf("\tmode: %o\n", argp->mode); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __fop_create_read __P((DB_ENV *, void *, __fop_create_args **)); */ @@ -271,9 +196,9 @@ __fop_create_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__fop_create_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -318,31 +243,42 @@ __fop_remove_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___fop_remove; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -357,27 +293,23 @@ __fop_remove_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -421,135 +353,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__fop_remove_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __fop_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_remove_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_remove_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __fop_remove_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __fop_remove_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__fop_remove%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tname: "); - for (i = 0; i < argp->name.size; i++) { - ch = ((u_int8_t *)argp->name.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tfid: "); - for (i = 0; i < argp->fid.size; i++) { - ch = ((u_int8_t *)argp->fid.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tappname: %lu\n", (u_long)argp->appname); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __fop_remove_read __P((DB_ENV *, void *, __fop_remove_args **)); */ @@ -567,9 +411,9 @@ __fop_remove_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__fop_remove_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -622,31 +466,42 @@ __fop_write_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___fop_write; npad = 0; + rlsnp = ret_lsnp; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -665,27 +520,23 @@ __fop_write_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -745,139 +596,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__fop_write_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __fop_write_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_write_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_write_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __fop_write_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __fop_write_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__fop_write%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tname: "); - for (i = 0; i < argp->name.size; i++) { - ch = ((u_int8_t *)argp->name.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tappname: %lu\n", (u_long)argp->appname); - (void)printf("\tpgsize: %lu\n", (u_long)argp->pgsize); - (void)printf("\tpageno: %lu\n", (u_long)argp->pageno); - (void)printf("\toffset: %lu\n", (u_long)argp->offset); - (void)printf("\tpage: "); - for (i = 0; i < argp->page.size; i++) { - ch = ((u_int8_t *)argp->page.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tflag: %lu\n", (u_long)argp->flag); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __fop_write_read __P((DB_ENV *, void *, __fop_write_args **)); */ @@ -895,9 +654,9 @@ __fop_write_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__fop_write_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -961,31 +720,42 @@ __fop_rename_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___fop_rename; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1001,27 +771,23 @@ __fop_rename_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1076,141 +842,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__fop_rename_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __fop_rename_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_rename_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__fop_rename_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __fop_rename_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __fop_rename_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__fop_rename%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\toldname: "); - for (i = 0; i < argp->oldname.size; i++) { - ch = ((u_int8_t *)argp->oldname.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tnewname: "); - for (i = 0; i < argp->newname.size; i++) { - ch = ((u_int8_t *)argp->newname.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tfileid: "); - for (i = 0; i < argp->fileid.size; i++) { - ch = ((u_int8_t *)argp->fileid.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tappname: %lu\n", (u_long)argp->appname); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __fop_rename_read __P((DB_ENV *, void *, __fop_rename_args **)); */ @@ -1228,9 +900,9 @@ __fop_rename_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__fop_rename_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1286,31 +958,42 @@ __fop_file_remove_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___fop_file_remove; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1327,27 +1010,23 @@ __fop_file_remove_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1406,142 +1085,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__fop_file_remove_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __fop_file_remove_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__fop_file_remove_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __fop_file_remove_print __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__fop_file_remove_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __fop_file_remove_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __fop_file_remove_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__fop_file_remove%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\treal_fid: "); - for (i = 0; i < argp->real_fid.size; i++) { - ch = ((u_int8_t *)argp->real_fid.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\ttmp_fid: "); - for (i = 0; i < argp->tmp_fid.size; i++) { - ch = ((u_int8_t *)argp->tmp_fid.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tname: "); - for (i = 0; i < argp->name.size; i++) { - ch = ((u_int8_t *)argp->name.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tappname: %lu\n", (u_long)argp->appname); - (void)printf("\tchild: 0x%lx\n", (u_long)argp->child); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __fop_file_remove_read __P((DB_ENV *, void *, * PUBLIC: __fop_file_remove_args **)); @@ -1560,9 +1144,9 @@ __fop_file_remove_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__fop_file_remove_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1602,68 +1186,6 @@ __fop_file_remove_read(dbenv, recbuf, argpp) return (0); } -/* - * PUBLIC: int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__fop_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_create_print, DB___fop_create)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_remove_print, DB___fop_remove)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_write_print, DB___fop_write)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_rename_print, DB___fop_rename)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_file_remove_print, DB___fop_file_remove)) != 0) - return (ret); - return (0); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __fop_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__fop_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_create_getpgnos, DB___fop_create)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_remove_getpgnos, DB___fop_remove)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_write_getpgnos, DB___fop_write)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_rename_getpgnos, DB___fop_rename)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __fop_file_remove_getpgnos, DB___fop_file_remove)) != 0) - return (ret); - return (0); -} -#endif /* HAVE_REPLICATION */ - /* * PUBLIC: int __fop_init_recover __P((DB_ENV *, int (***)(DB_ENV *, * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); diff --git a/db/fileops/fileops_autop.c b/db/fileops/fileops_autop.c new file mode 100644 index 000000000..970b0c63b --- /dev/null +++ b/db/fileops/fileops_autop.c @@ -0,0 +1,306 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" +#include "dbinc/fop.h" + +/* + * PUBLIC: int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__fop_create_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __fop_create_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __fop_create_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__fop_create%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tname: "); + for (i = 0; i < argp->name.size; i++) { + ch = ((u_int8_t *)argp->name.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tappname: %lu\n", (u_long)argp->appname); + (void)printf("\tmode: %o\n", argp->mode); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__fop_remove_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __fop_remove_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __fop_remove_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__fop_remove%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tname: "); + for (i = 0; i < argp->name.size; i++) { + ch = ((u_int8_t *)argp->name.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tfid: "); + for (i = 0; i < argp->fid.size; i++) { + ch = ((u_int8_t *)argp->fid.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tappname: %lu\n", (u_long)argp->appname); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__fop_write_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __fop_write_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __fop_write_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__fop_write%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tname: "); + for (i = 0; i < argp->name.size; i++) { + ch = ((u_int8_t *)argp->name.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tappname: %lu\n", (u_long)argp->appname); + (void)printf("\tpgsize: %lu\n", (u_long)argp->pgsize); + (void)printf("\tpageno: %lu\n", (u_long)argp->pageno); + (void)printf("\toffset: %lu\n", (u_long)argp->offset); + (void)printf("\tpage: "); + for (i = 0; i < argp->page.size; i++) { + ch = ((u_int8_t *)argp->page.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tflag: %lu\n", (u_long)argp->flag); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__fop_rename_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __fop_rename_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __fop_rename_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__fop_rename%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\toldname: "); + for (i = 0; i < argp->oldname.size; i++) { + ch = ((u_int8_t *)argp->oldname.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tnewname: "); + for (i = 0; i < argp->newname.size; i++) { + ch = ((u_int8_t *)argp->newname.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tfileid: "); + for (i = 0; i < argp->fileid.size; i++) { + ch = ((u_int8_t *)argp->fileid.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tappname: %lu\n", (u_long)argp->appname); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __fop_file_remove_print __P((DB_ENV *, DBT *, + * PUBLIC: DB_LSN *, db_recops, void *)); + */ +int +__fop_file_remove_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __fop_file_remove_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __fop_file_remove_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__fop_file_remove%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\treal_fid: "); + for (i = 0; i < argp->real_fid.size; i++) { + ch = ((u_int8_t *)argp->real_fid.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\ttmp_fid: "); + for (i = 0; i < argp->tmp_fid.size; i++) { + ch = ((u_int8_t *)argp->tmp_fid.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tname: "); + for (i = 0; i < argp->name.size; i++) { + ch = ((u_int8_t *)argp->name.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tappname: %lu\n", (u_long)argp->appname); + (void)printf("\tchild: 0x%lx\n", (u_long)argp->child); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__fop_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __fop_create_print, DB___fop_create)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __fop_remove_print, DB___fop_remove)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __fop_write_print, DB___fop_write)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __fop_rename_print, DB___fop_rename)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __fop_file_remove_print, DB___fop_file_remove)) != 0) + return (ret); + return (0); +} diff --git a/db/fileops/fop_basic.c b/db/fileops/fop_basic.c index b560d9fb1..0b6be860f 100644 --- a/db/fileops/fop_basic.c +++ b/db/fileops/fop_basic.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: fop_basic.c,v 1.31 2004/01/28 03:36:09 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: fop_basic.c,v 1.30 2003/07/24 01:26:22 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include diff --git a/db/fileops/fop_rec.c b/db/fileops/fop_rec.c index 5d7c90247..a9326d532 100644 --- a/db/fileops/fop_rec.c +++ b/db/fileops/fop_rec.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: fop_rec.c,v 1.31 2004/09/22 03:45:25 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: fop_rec.c,v 1.27 2003/10/07 20:23:28 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -60,7 +58,7 @@ __fop_create_recover(dbenv, dbtp, lsnp, op, info) if ((ret = __os_open(dbenv, real_name, DB_OSO_CREATE | DB_OSO_EXCL, argp->mode, &fhp)) == 0) (void)__os_closehandle(dbenv, fhp); - else + else goto out; } @@ -257,7 +255,7 @@ __fop_file_remove_recover(dbenv, dbtp, lsnp, op, info) int is_real, is_tmp, ret; size_t len; u_int8_t mbuf[DBMETASIZE]; - u_int32_t cstat; + u_int32_t cstat, ret_stat; fhp = NULL; is_real = is_tmp = 0; @@ -319,10 +317,7 @@ __fop_file_remove_recover(dbenv, dbtp, lsnp, op, info) if (DB_UNDO(op)) { /* On the backward pass, we leave a note for the child txn. */ if ((ret = __db_txnlist_update(dbenv, - info, argp->child, cstat, NULL)) == TXN_NOTFOUND) - ret = __db_txnlist_add(dbenv, - info, argp->child, cstat, NULL); - if (ret != 0) + info, argp->child, cstat, NULL, &ret_stat, 1)) != 0) goto out; } else if (DB_REDO(op)) { /* diff --git a/db/fileops/fop_util.c b/db/fileops/fop_util.c index 516378b3a..564dc4a36 100644 --- a/db/fileops/fop_util.c +++ b/db/fileops/fop_util.c @@ -1,20 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: fop_util.c,v 1.104 2004/09/24 00:43:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: fop_util.c,v 1.83 2003/10/15 20:29:59 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include -#include #include #include #endif @@ -35,7 +32,15 @@ static int __fop_set_pgsize __P((DB *, DB_FH *, const char *)); * Acquire the environment meta-data lock. The parameters are the * environment (ENV), the locker id to use in acquiring the lock (ID) * and a pointer to a DB_LOCK. + * + * !!! + * Turn off locking for Critical Path. The application must do its own + * synchronization of open/create. Two threads creating and opening a + * file at the same time may have unpredictable results. */ +#ifdef CRITICALPATH_10266 +#define GET_ENVLOCK(ENV, ID, L) (0) +#else #define GET_ENVLOCK(ENV, ID, L) do { \ DBT __dbt; \ u_int32_t __lockval; \ @@ -49,9 +54,7 @@ static int __fop_set_pgsize __P((DB *, DB_FH *, const char *)); goto err; \ } \ } while (0) - -#define REL_ENVLOCK(ENV, L) \ - (!LOCK_ISSET(*(L)) ? 0 : __lock_put((ENV), (L))) +#endif /* * If we open a file handle and our caller is doing fcntl(2) locking, @@ -75,21 +78,21 @@ static int __fop_set_pgsize __P((DB *, DB_FH *, const char *)); /* * __fop_lock_handle -- * - * Get the handle lock for a database. If the envlock is specified, - * do this as a lock_vec call that releases the enviroment lock before - * acquiring the handle lock. + * Get the handle lock for a database. If the envlock is specified, do this + * as a lock_vec call that releases the environment lock before acquiring the + * handle lock. * * PUBLIC: int __fop_lock_handle __P((DB_ENV *, * PUBLIC: DB *, u_int32_t, db_lockmode_t, DB_LOCK *, u_int32_t)); * */ int -__fop_lock_handle(dbenv, dbp, locker, mode, elock, flags) +__fop_lock_handle(dbenv, dbp, locker, mode, elockp, flags) DB_ENV *dbenv; DB *dbp; u_int32_t locker; db_lockmode_t mode; - DB_LOCK *elock; + DB_LOCK *elockp; u_int32_t flags; { DBT fileobj; @@ -105,13 +108,10 @@ __fop_lock_handle(dbenv, dbp, locker, mode, elock, flags) * If we are in recovery, the only locking we should be * doing is on the global environment. */ - if (IS_RECOVERING(dbenv)) { - if (elock != NULL) - REL_ENVLOCK(dbenv, elock); - return (0); - } + if (IS_RECOVERING(dbenv)) + return (elockp == NULL ? 0 : __ENV_LPUT(dbenv, *elockp, 0)); - memcpy(&lock_desc.fileid, &dbp->fileid, DB_FILE_ID_LEN); + memcpy(lock_desc.fileid, dbp->fileid, DB_FILE_ID_LEN); lock_desc.pgno = dbp->meta_pgno; lock_desc.type = DB_HANDLE_LOCK; @@ -119,12 +119,12 @@ __fop_lock_handle(dbenv, dbp, locker, mode, elock, flags) fileobj.data = &lock_desc; fileobj.size = sizeof(lock_desc); DB_TEST_SUBLOCKS(dbenv, flags); - if (elock == NULL) + if (elockp == NULL) ret = __lock_get(dbenv, locker, flags, &fileobj, mode, &dbp->handle_lock); else { reqs[0].op = DB_LOCK_PUT; - reqs[0].lock = *elock; + reqs[0].lock = *elockp; reqs[1].op = DB_LOCK_GET; reqs[1].mode = mode; reqs[1].obj = &fileobj; @@ -132,9 +132,9 @@ __fop_lock_handle(dbenv, dbp, locker, mode, elock, flags) if ((ret = __lock_vec(dbenv, locker, flags, reqs, 2, &ereq)) == 0) { dbp->handle_lock = reqs[1].lock; - LOCK_INIT(*elock); + LOCK_INIT(*elockp); } else if (ereq != reqs) - LOCK_INIT(*elock); + LOCK_INIT(*elockp); } dbp->cur_lid = locker; @@ -160,7 +160,7 @@ __fop_lock_handle(dbenv, dbp, locker, mode, elock, flags) * buffer cache or obtaining a lock (we use this unique fileid to lock * as well as to identify like files in the cache). * - * There are a couple of idiosyncracies that this code must support, in + * There are a couple of idiosyncrasies that this code must support, in * particular, DB_TRUNCATE and DB_FCNTL_LOCKING. First, we disallow * DB_TRUNCATE in the presence of transactions, since opening a file with * O_TRUNC will result in data being lost in an unrecoverable fashion. @@ -202,7 +202,7 @@ __fop_file_setup(dbp, txn, name, mode, flags, retidp) size_t len; u_int32_t dflags, locker, oflags; u_int8_t mbuf[DBMETASIZE]; - int created_locker, ret, t_ret, tmp_created, truncating; + int created_locker, ret, retries, t_ret, tmp_created, truncating; char *real_name, *real_tmpname, *tmpname; DB_ASSERT(name != NULL); @@ -223,7 +223,9 @@ __fop_file_setup(dbp, txn, name, mode, flags, retidp) * sure we don't clobber it and conflict. */ if (LOCKING_ON(dbenv) && - !F_ISSET(dbp, DB_AM_COMPENSATE) && dbp->lid == DB_LOCK_INVALIDID) { + !F_ISSET(dbp, DB_AM_COMPENSATE) && + !F_ISSET(dbp, DB_AM_RECOVER) && + dbp->lid == DB_LOCK_INVALIDID) { if ((ret = __lock_id(dbenv, &dbp->lid)) != 0) goto err; created_locker = 1; @@ -246,8 +248,20 @@ __fop_file_setup(dbp, txn, name, mode, flags, retidp) oflags |= DB_OSO_RDONLY; if (LF_ISSET(DB_TRUNCATE)) oflags |= DB_OSO_TRUNC; - -retry: if (!F_ISSET(dbp, DB_AM_COMPENSATE)) + retries = 0; +retry: + /* + * If we cannot create the file, only retry a few times. We + * think we might be in a race with another create, but it could + * be that the backup filename exists (that is, is left over from + * a previous crash). + */ + if (++retries > DB_RETRY) { + __db_err(dbenv, "__fop_file_setup: Retry limit (%d) exceeded", + DB_RETRY); + goto err; + } + if (!F_ISSET(dbp, DB_AM_COMPENSATE) && !F_ISSET(dbp, DB_AM_RECOVER)) GET_ENVLOCK(dbenv, locker, &elock); if ((ret = __os_exists(real_name, NULL)) == 0) { /* @@ -307,9 +321,12 @@ reopen: if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0) /* Now, get our handle lock. */ if ((ret = __fop_lock_handle(dbenv, dbp, locker, DB_LOCK_READ, NULL, DB_LOCK_NOWAIT)) == 0) { - if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0) + if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0) goto err; - } else if (ret == DB_LOCK_NOTGRANTED) { + } else if (ret != DB_LOCK_NOTGRANTED || + (txn != NULL && F_ISSET(txn, TXN_NOWAIT))) + goto err; + else { /* * We were unable to acquire the handle lock without * blocking. The fact that we are blocking might mean @@ -343,8 +360,7 @@ reopen: if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0) if ((ret = __os_open(dbenv, real_name, 0, 0, &fhp)) != 0) goto err; - } else - goto err; + } /* If we got here, then we now have the handle lock. */ @@ -375,10 +391,10 @@ reopen: if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0) * should not have been allowed to open it. */ if (LF_ISSET(DB_EXCL)) { - if (LOCK_ISSET(dbp->handle_lock)) - __lock_put(dbenv, &dbp->handle_lock); + ret = __ENV_LPUT(dbenv, dbp->handle_lock, 0); LOCK_INIT(dbp->handle_lock); - ret = EEXIST; + if (ret == 0) + ret = EEXIST; goto err; } goto done; @@ -394,7 +410,7 @@ reopen: if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0) * the fileid and the locks. Then we need to call the appropriate * routines to create meta-data pages. */ - if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0) + if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0) goto err; create: if (txn != NULL && IS_REP_CLIENT(dbenv)) { @@ -417,6 +433,7 @@ create: if (txn != NULL && IS_REP_CLIENT(dbenv)) { if (!TXN_ON(dbenv) && ret == EEXIST) { __os_free(dbenv, tmpname); tmpname = NULL; + __os_yield(dbenv, 1); goto retry; } goto err; @@ -449,7 +466,7 @@ creat2: if ((ret = __db_appname(dbenv, * Now move the file into place unless we are creating in place (because * we created a database in a file that started out 0-length). */ - if (!F_ISSET(dbp, DB_AM_COMPENSATE)) + if (!F_ISSET(dbp, DB_AM_COMPENSATE) && !F_ISSET(dbp, DB_AM_RECOVER)) GET_ENVLOCK(dbenv, locker, &elock); if (F_ISSET(dbp, DB_AM_IN_RENAME)) { @@ -464,8 +481,7 @@ creat2: if ((ret = __db_appname(dbenv, */ (void)__fop_remove(dbenv, NULL, dbp->fileid, tmpname, DB_APP_DATA, dflags); - if (LOCK_ISSET(dbp->handle_lock)) - __lock_put(dbenv, &dbp->handle_lock); + (void)__ENV_LPUT(dbenv, dbp->handle_lock, 0); LOCK_INIT(dbp->handle_lock); if (stxn != NULL) { @@ -478,7 +494,7 @@ creat2: if ((ret = __db_appname(dbenv, } if ((ret = __fop_lock_handle(dbenv, - dbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0) + dbp, locker, DB_LOCK_WRITE, &elock, NOWAIT_FLAG(txn))) != 0) goto err; if (tmpname != name && (ret = __fop_rename(dbenv, stxn, tmpname, name, dbp->fileid, DB_APP_DATA, dflags)) != 0) @@ -505,10 +521,9 @@ err: CLOSE_HANDLE(dbp, fhp); if (tmp_created && txn == NULL) (void)__fop_remove(dbenv, NULL, NULL, tmpname, DB_APP_DATA, dflags); - if (LOCK_ISSET(dbp->handle_lock) && txn == NULL) - __lock_put(dbenv, &dbp->handle_lock); - if (LOCK_ISSET(elock)) - (void)REL_ENVLOCK(dbenv, &elock); + if (txn == NULL) + (void)__ENV_LPUT(dbenv, dbp->handle_lock, 0); + (void)__ENV_LPUT(dbenv, elock, 0); if (created_locker) { (void)__lock_id_free(dbenv, dbp->lid); dbp->lid = DB_LOCK_INVALIDID; @@ -602,6 +617,7 @@ __fop_subdb_setup(dbp, txn, mname, name, mode, flags) { DB *mdbp; DB_ENV *dbenv; + db_lockmode_t lkmode; int ret, t_ret; mdbp = NULL; @@ -653,15 +669,16 @@ __fop_subdb_setup(dbp, txn, mname, name, mode, flags) */ memcpy(dbp->fileid, mdbp->fileid, DB_FILE_ID_LEN); + lkmode = F_ISSET(dbp, DB_AM_CREATED) || LF_ISSET(DB_WRITEOPEN) ? + DB_LOCK_WRITE : DB_LOCK_READ; if ((ret = __fop_lock_handle(dbenv, dbp, - txn == NULL ? dbp->lid : txn->txnid, - F_ISSET(dbp, DB_AM_CREATED) || LF_ISSET(DB_WRITEOPEN) ? - DB_LOCK_WRITE : DB_LOCK_READ, NULL, 0)) != 0) + txn == NULL ? dbp->lid : txn->txnid, lkmode, NULL, + NOWAIT_FLAG(txn))) != 0) goto err; if ((ret = __db_init_subdb(mdbp, dbp, name, txn)) != 0) { /* - * If there was no tranaction and we created this database, + * If there was no transaction and we created this database, * then we need to undo the update of the master database. */ if (F_ISSET(dbp, DB_AM_CREATED) && txn != NULL) @@ -703,8 +720,8 @@ __fop_subdb_setup(dbp, txn, mname, name, mode, flags) if (0) { err: DB_TEST_RECOVERY_LABEL - if (LOCK_ISSET(dbp->handle_lock) && txn == NULL) - __lock_put(dbenv, &dbp->handle_lock); + if (txn == NULL) + (void)__ENV_LPUT(dbenv, dbp->handle_lock, 0); } /* @@ -729,7 +746,16 @@ DB_TEST_RECOVERY_LABEL ret = t_ret; } LOCK_INIT(mdbp->handle_lock); - if ((t_ret =__db_close(mdbp, txn, 0)) && ret == 0) + + /* + * If the master was created, we need to sync so that the metadata + * page is correct on disk for recovery, since it isn't read through + * mpool. If we're opening a subdb in an existing file, we can skip + * the sync. + */ + if ((t_ret =__db_close(mdbp, txn, + F_ISSET(dbp, DB_AM_CREATED_MSTR) ? 0 : DB_NOSYNC)) != 0 && + ret == 0) ret = t_ret; return (ret); } @@ -751,13 +777,15 @@ __fop_remove_setup(dbp, txn, name, flags) DB_ENV *dbenv; DB_FH *fhp; DB_LOCK elock; + u_int32_t refcnt; u_int8_t mbuf[DBMETASIZE]; - int cnt, ret; + int ret; COMPQUIET(flags, 0); dbenv = dbp->dbenv; PANIC_CHECK(dbenv); LOCK_INIT(elock); + fhp = NULL; /* Create locker if necessary. */ retry: if (LOCKING_ON(dbenv)) { @@ -816,9 +844,10 @@ retry: if (LOCKING_ON(dbenv)) { fhp = NULL; } if (ret == DB_LOCK_NOTEXIST) { - if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0) + if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0) goto err; - } else if (ret != DB_LOCK_NOTGRANTED) + } else if (ret != DB_LOCK_NOTGRANTED || + (txn != NULL && F_ISSET(txn, TXN_NOWAIT))) goto err; else if ((ret = __fop_lock_handle(dbenv, dbp, dbp->lid, DB_LOCK_WRITE, &elock, 0)) != 0 && @@ -829,25 +858,28 @@ retry: if (LOCKING_ON(dbenv)) { dbp->lid = DB_LOCK_INVALIDID; (void)__db_refresh(dbp, txn, DB_NOSYNC, NULL); goto retry; - } else if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0) + } else if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0) goto err; /* Check if the file is already open. */ - if ((ret = __memp_get_refcnt(dbenv, dbp->fileid, &cnt)) != 0) + if ((ret = __memp_get_refcnt(dbenv, dbp->fileid, &refcnt)) != 0) goto err; /* * Now, error check. If the file is already open (refcnt != 0), then - * we must have it open (since we got the lock) and we need to report - * the error. If the file isn't open, but it's in the midst of a rename - * then this file doesn't really exist. + * we must have it open (since we got the lock) and we need to panic, + * because this is a self deadlock and the application has a bug. + * If the file isn't open, but it's in the midst of a rename then + * this file doesn't really exist. */ - if (cnt != 0) - ret = DB_FILEOPEN; - else if (F_ISSET(dbp, DB_AM_IN_RENAME)) + if (refcnt != 0) { + __db_err(dbenv, +"Attempting to remove file open in current transaction causing self-deadlock"); + ret = __db_panic(dbenv, DB_LOCK_DEADLOCK); + } else if (F_ISSET(dbp, DB_AM_IN_RENAME)) ret = ENOENT; if (0) { -err: (void)REL_ENVLOCK(dbenv, &elock); +err: (void)__ENV_LPUT(dbenv, elock, 0); } if (fhp != NULL && !LF_ISSET(DB_FCNTL_LOCKING)) (void)__os_closehandle(dbenv, fhp); @@ -979,7 +1011,7 @@ __fop_dummy(dbp, txn, old, new, flags) /* Create a dummy dbp handle. */ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0) goto err; - memcpy(&tmpdbp->fileid, ((DBMETA *)mbuf)->uid, DB_FILE_ID_LEN); + memcpy(tmpdbp->fileid, ((DBMETA *)mbuf)->uid, DB_FILE_ID_LEN); /* Now, lock the name space while we initialize this file. */ if ((ret = __db_appname(dbenv, @@ -1016,7 +1048,7 @@ __fop_dummy(dbp, txn, old, new, flags) t2dbp, locker, DB_LOCK_WRITE, NULL, DB_LOCK_NOWAIT)) != 0) ret = EEXIST; else { - (void)__lock_put(dbenv, &t2dbp->handle_lock); + (void)__lock_put(dbenv, &t2dbp->handle_lock, 0); if (!F_ISSET(t2dbp, DB_AM_IN_RENAME)) ret = EEXIST; } @@ -1038,7 +1070,7 @@ __fop_dummy(dbp, txn, old, new, flags) stxn, back, old, tmpdbp->fileid, DB_APP_DATA, dflags)) != 0) goto err; if ((ret = __fop_lock_handle(dbenv, - tmpdbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0) + tmpdbp, locker, DB_LOCK_WRITE, &elock, NOWAIT_FLAG(txn))) != 0) goto err; /* @@ -1077,7 +1109,7 @@ __fop_dummy(dbp, txn, old, new, flags) if ((ret = __txn_remevent(dbenv, txn, realold, NULL)) != 0) goto err; -err: (void)REL_ENVLOCK(dbenv, &elock); +err: (void)__ENV_LPUT(dbenv, elock, 0); if (stxn != NULL) (void)__txn_abort(stxn); if (tmpdbp != NULL && @@ -1116,7 +1148,7 @@ __fop_dbrename(dbp, old, new) DB_ENV *dbenv; DB_LOCK elock; char *real_new, *real_old; - int ret, tret; + int ret, t_ret; dbenv = dbp->dbenv; real_new = NULL; @@ -1145,8 +1177,8 @@ __fop_dbrename(dbp, old, new) ret = __memp_nameop(dbenv, dbp->fileid, new, real_old, real_new); -err: if ((tret = REL_ENVLOCK(dbenv, &elock)) != 0 && ret == 0) - ret = tret; +err: if ((t_ret = __ENV_LPUT(dbenv, elock, 0)) != 0 && ret == 0) + ret = t_ret; if (real_old != NULL) __os_free(dbenv, real_old); if (real_new != NULL) diff --git a/db/hash/hash.c b/db/hash/hash.c index b1a2d8703..ed96bba1a 100644 --- a/db/hash/hash.c +++ b/db/hash/hash.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,18 +38,15 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: hash.c,v 11.199 2004/10/11 19:38:49 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash.c,v 11.177 2003/10/04 01:31:58 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include -#include #include #endif @@ -114,7 +111,8 @@ __ham_quick_delete(dbc) DB_ASSERT(IS_INITIALIZED(dbc)); DB_ASSERT(((HASH_CURSOR *)dbc->internal)->opd == NULL); - ret = __ham_del_pair(dbc, 1); + if ((ret = __ham_c_writelock(dbc)) == 0) + ret = __ham_del_pair(dbc, 1); if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0) ret = t_ret; @@ -163,9 +161,7 @@ __ham_c_init(dbc) dbc->c_am_put = __ham_c_put; dbc->c_am_writelock = __ham_c_writelock; - __ham_item_init(dbc); - - return (0); + return (__ham_item_init(dbc)); } /* @@ -181,6 +177,7 @@ __ham_c_close(dbc, root_pgno, rmroot) DB_MPOOLFILE *mpf; HASH_CURSOR *hcp; HKEYDATA *dp; + db_lockmode_t lock_mode; int doroot, gotmeta, ret, t_ret; u_int32_t dirty; @@ -195,11 +192,19 @@ __ham_c_close(dbc, root_pgno, rmroot) if ((ret = __ham_get_meta(dbc)) != 0) goto done; gotmeta = 1; - if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0) + lock_mode = DB_LOCK_READ; + + /* To support dirty reads we must reget the write lock. */ + if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && + F_ISSET((BTREE_CURSOR *) + dbc->internal->opd->internal, C_DELETED)) + lock_mode = DB_LOCK_WRITE; + + if ((ret = __ham_get_cpage(dbc, lock_mode)) != 0) goto out; dp = (HKEYDATA *)H_PAIRDATA(dbc->dbp, hcp->page, hcp->indx); - /* If its not a dup we aborted before we changed it. */ + /* If it's not a dup we aborted before we changed it. */ if (HPAGE_PTYPE(dp) == H_OFFDUP) memcpy(&root_pgno, HOFFPAGE_PGNO(dp), sizeof(db_pgno_t)); @@ -222,8 +227,8 @@ out: if (hcp->page != NULL && (t_ret = if (gotmeta != 0 && (t_ret = __ham_release_meta(dbc)) != 0 && ret == 0) ret = t_ret; -done: - __ham_item_init(dbc); +done: if ((t_ret = __ham_item_init(dbc)) != 0 && ret == 0) + ret = t_ret; return (ret); } @@ -359,7 +364,7 @@ __ham_c_del(dbc) out: if (hcp->page != NULL) { if ((t_ret = __memp_fput(mpf, - hcp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) && ret == 0) + hcp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0) ret = t_ret; hcp->page = NULL; } @@ -380,6 +385,7 @@ __ham_c_dup(orig_dbc, new_dbc) DBC *orig_dbc, *new_dbc; { HASH_CURSOR *orig, *new; + int ret; orig = (HASH_CURSOR *)orig_dbc->internal; new = (HASH_CURSOR *)new_dbc->internal; @@ -399,16 +405,17 @@ __ham_c_dup(orig_dbc, new_dbc) * If the old cursor held a lock and we're not in transactions, get one * for the new one. The reason that we don't need a new lock if we're * in a transaction is because we already hold a lock and will continue - * to do so until commit, so there is no point in reaquiring it. We + * to do so until commit, so there is no point in re-acquiring it. We * don't know if the old lock was a read or write lock, but it doesn't * matter. We'll get a read lock. We know that this locker already * holds a lock of the correct type, so if we need a write lock and * request it, we know that we'll get it. */ - if (!LOCK_ISSET(orig->lock) || orig_dbc->txn != NULL) - return (0); + if (orig_dbc->txn == NULL && LOCK_ISSET(orig->lock)) + if ((ret = __ham_lock_bucket(new_dbc, DB_LOCK_READ)) != 0) + return (ret); - return (__ham_lock_bucket(new_dbc, DB_LOCK_READ)); + return (0); } static int @@ -493,6 +500,9 @@ __ham_c_get(dbc, key, data, flags, pgnop) ret = __ham_item(dbc, lock_type, pgnop); break; + default: + ret = __db_unknown_flag(dbp->dbenv, "__ham_c_get", flags); + break; } /* @@ -605,11 +615,11 @@ __ham_bulk(dbc, data, flags) db_indx_t dup_len, dup_off, dup_tlen, indx, *inp; db_lockmode_t lock_mode; db_pgno_t pgno; - int32_t *endp, key_off, *offp, *saveoff; - u_int32_t key_size, size, space; + int32_t *endp, *offp, *saveoff; + u_int32_t key_off, key_size, pagesize, size, space; u_int8_t *dbuf, *dp, *hk, *np, *tmp; int is_dup, is_key; - int need_pg, next_key, no_dup, pagesize, ret, t_ret; + int need_pg, next_key, no_dup, ret, t_ret; ret = 0; key_off = 0; @@ -658,7 +668,7 @@ next_pg: dbc, key_size, pgno, np)) != 0) return (ret); space -= key_size; - key_off = (int32_t)(np - dbuf); + key_off = (u_int32_t)(np - dbuf); np += key_size; } else { if (need_pg) { @@ -667,10 +677,11 @@ next_pg: if (space < size) { get_key_space: if (offp == endp) { - data->size = - ALIGN(size + + data->size = (u_int32_t) + DB_ALIGN(size + pagesize, 1024); - return (ENOMEM); + return + (DB_BUFFER_SMALL); } goto back_up; } @@ -681,8 +692,9 @@ get_key_space: np += size; } key_size = LEN_HKEY(dbp, pg, pagesize, indx); - key_off = (int32_t)(inp[indx] - HOFFSET(pg) - + dp - dbuf + SSZA(HKEYDATA, data)); + key_off = ((inp[indx] - HOFFSET(pg)) + + (u_int32_t)(dp - dbuf)) + + SSZA(HKEYDATA, data); } } @@ -768,9 +780,10 @@ get_space: */ if (offp >= endp || F_ISSET(dbc, DBC_TRANSIENT)) { - data->size = ALIGN(size + + data->size = (u_int32_t) + DB_ALIGN(size + data->ulen - space, 1024); - return (ENOMEM); + return (DB_BUFFER_SMALL); } /* * Don't continue; we're all out @@ -839,17 +852,18 @@ get_space: if (space > data->ulen) { if (!is_dup || dup_off == 0) goto back_up; - dup_off -= (db_indx_t)DUP_SIZE(offp[1]); + dup_off -= (db_indx_t) + DUP_SIZE((u_int32_t)offp[1]); goto get_space; } if (is_key) { - *offp-- = key_off; - *offp-- = key_size; + *offp-- = (int32_t)key_off; + *offp-- = (int32_t)key_size; } if (is_dup) { *offp-- = (int32_t)( - inp[indx + 1] - HOFFSET(pg) + - dp - dbuf + SSZA(HKEYDATA, data) + + ((inp[indx + 1] - HOFFSET(pg)) + + dp - dbuf) + SSZA(HKEYDATA, data) + dup_off + sizeof(db_indx_t)); memcpy(&dup_len, HKEYDATA_DATA(hk) + dup_off, @@ -858,8 +872,8 @@ get_space: *offp-- = dup_len; } else { *offp-- = (int32_t)( - inp[indx + 1] - HOFFSET(pg) + - dp - dbuf + SSZA(HKEYDATA, data)); + ((inp[indx + 1] - HOFFSET(pg)) + + dp - dbuf) + SSZA(HKEYDATA, data)); *offp-- = LEN_HDATA(dbp, pg, pagesize, indx); } @@ -876,14 +890,14 @@ get_space: space -= 2 * sizeof(*offp); if (space > data->ulen) goto back_up; - *offp-- = key_off; - *offp-- = key_size; + *offp-- = (int32_t)key_off; + *offp-- = (int32_t)key_size; } saveoff = offp; if ((ret = __bam_bulk_duplicates(dbc, pgno, dbuf, is_key ? offp + 2 : NULL, &offp, &np, &space, no_dup)) != 0) { - if (ret == ENOMEM) { + if (ret == DB_BUFFER_SMALL) { size = space; space = 0; if (is_key && saveoff == offp) { @@ -910,16 +924,19 @@ get_space: return (ret); if (is_key) { - *offp-- = key_off; - *offp-- = key_size; + *offp-- = (int32_t)key_off; + *offp-- = (int32_t)key_size; } *offp-- = (int32_t)(np - dbuf); - *offp-- = size; + *offp-- = (int32_t)size; np += size; space -= size; break; + default: + /* Do nothing. */ + break; } } while (next_key && (indx += 2) < NUM_ENT(pg)); @@ -973,7 +990,7 @@ get_space: if (ret != DB_NOTFOUND) return (ret); } - *offp = (u_int32_t) -1; + *offp = -1; return (0); } @@ -1060,6 +1077,9 @@ __ham_c_put(dbc, key, data, flags, pgnop) case DB_CURRENT: ret = __ham_item(dbc, DB_LOCK_WRITE, pgnop); break; + default: + ret = __db_unknown_flag(dbp->dbenv, "__ham_c_put", flags); + break; } if (*pgnop == PGNO_INVALID && ret == 0) { @@ -1072,15 +1092,19 @@ __ham_c_put(dbc, key, data, flags, pgnop) ret = __ham_add_dup(dbc, data, flags, pgnop); } -done: if (ret == 0 && F_ISSET(hcp, H_EXPAND)) { +done: if (hcp->page != NULL) { + if ((t_ret = __memp_fput(mpf, + hcp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) + ret = t_ret; + if (t_ret == 0) + hcp->page = NULL; + } + + if (ret == 0 && F_ISSET(hcp, H_EXPAND)) { ret = __ham_expand_table(dbc); F_CLR(hcp, H_EXPAND); } - if (hcp->page != NULL && (t_ret = - __memp_fset(mpf, hcp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - err2: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0) ret = t_ret; @@ -1104,8 +1128,8 @@ __ham_expand_table(dbc) HASH_CURSOR *hcp; PAGE *h; db_pgno_t pgno, mpgno; - u_int32_t newalloc, new_bucket, old_bucket; - int dirty_meta, got_meta, logn, new_double, ret; + u_int32_t dirty_meta, logn, newalloc, new_bucket, old_bucket; + int got_meta, new_double, ret, t_ret; dbp = dbc->dbp; mpf = dbp->mpf; @@ -1117,9 +1141,8 @@ __ham_expand_table(dbc) mmeta = (DBMETA *) hcp->hdr; mpgno = mmeta->pgno; h = NULL; - dirty_meta = 0; + dirty_meta = newalloc = 0; got_meta = 0; - newalloc = 0; /* * If the split point is about to increase, make sure that we @@ -1180,7 +1203,7 @@ __ham_expand_table(dbc) if ((ret = __ham_metagroup_log(dbp, dbc->txn, &lsn, 0, hcp->hdr->max_bucket, mpgno, &mmeta->lsn, hcp->hdr->dbmeta.pgno, &hcp->hdr->dbmeta.lsn, - pgno, &lsn, newalloc)) != 0) + pgno, &lsn, newalloc, mmeta->last_pgno)) != 0) goto err; } else LSN_NOT_LOGGED(lsn); @@ -1231,30 +1254,31 @@ __ham_expand_table(dbc) ret = __ham_split_page(dbc, old_bucket, new_bucket); err: if (got_meta) - (void)__memp_fput(mpf, mmeta, dirty_meta); - - if (LOCK_ISSET(metalock)) - (void)__TLPUT(dbc, metalock); - + if ((t_ret = + __memp_fput(mpf, mmeta, dirty_meta)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; if (h != NULL) - (void)__memp_fput(mpf, h, 0); + if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) + ret = t_ret; return (ret); } /* - * PUBLIC: u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t)); + * PUBLIC: u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, u_int32_t)); */ u_int32_t __ham_call_hash(dbc, k, len) DBC *dbc; u_int8_t *k; - int32_t len; + u_int32_t len; { DB *dbp; - u_int32_t n, bucket; HASH_CURSOR *hcp; HASH *hashp; + u_int32_t n, bucket; dbp = dbc->dbp; hcp = (HASH_CURSOR *)dbc->internal; @@ -1512,7 +1536,7 @@ __ham_overwrite(dbc, nval, flags) */ if (nval->doff > nondup_size) newsize += - (nval->doff - nondup_size + nval->size); + ((nval->doff - nondup_size) + nval->size); else if (nval->doff + nval->dlen > nondup_size) newsize += nval->size - (nondup_size - nval->doff); @@ -1524,7 +1548,8 @@ __ham_overwrite(dbc, nval, flags) * the onpage duplicate size in which case we need * to convert to off-page duplicates. */ - if (ISBIG(hcp, hcp->dup_tlen - nondup_size + newsize)) { + if (ISBIG(hcp, + (hcp->dup_tlen - nondup_size) + newsize)) { if ((ret = __ham_dup_convert(dbc)) != 0) return (ret); return (hcp->opd->c_am_put(hcp->opd, @@ -1561,7 +1586,7 @@ __ham_overwrite(dbc, nval, flags) /* End of original record (if there is any) */ if (nval->doff + nval->dlen < tmp_val.size) { - len = tmp_val.size - nval->doff - nval->dlen; + len = (tmp_val.size - nval->doff) - nval->dlen; memcpy(p, (u_int8_t *)tmp_val.data + nval->doff + nval->dlen, len); p += len; @@ -1580,7 +1605,7 @@ __ham_overwrite(dbc, nval, flags) tmp_val2.size = newsize; if (dbp->dup_compare( dbp, &tmp_val, &tmp_val2) != 0) { - (void)__os_free(dbenv, newrec); + __os_free(dbenv, newrec); return (__db_duperr(dbp, flags)); } } @@ -1591,7 +1616,7 @@ __ham_overwrite(dbc, nval, flags) tmp_val2.dlen = DUP_SIZE(hcp->dup_len); ret = __ham_replpair(dbc, &tmp_val2, 0); - (void)__os_free(dbenv, newrec); + __os_free(dbenv, newrec); /* Update cursor */ if (ret != 0) @@ -1606,7 +1631,7 @@ __ham_overwrite(dbc, nval, flags) } else { /* Check whether we need to convert to off page. */ if (ISBIG(hcp, - hcp->dup_tlen - hcp->dup_len + nval->size)) { + (hcp->dup_tlen - hcp->dup_len) + nval->size)) { if ((ret = __ham_dup_convert(dbc)) != 0) return (ret); return (hcp->opd->c_am_put(hcp->opd, @@ -1744,6 +1769,8 @@ found_key: F_SET(hcp, H_OK); * duplicated, only data items are. */ return (__db_pgfmt(dbp->dbenv, PGNO(hcp->page))); + default: + return (__db_pgfmt(dbp->dbenv, PGNO(hcp->page))); } } @@ -1998,7 +2025,8 @@ __ham_get_clist(dbp, pgno, indx, listp) DB *ldbp; DBC *cp; DB_ENV *dbenv; - int nalloc, nused, ret; + u_int nalloc, nused; + int ret; /* * Assume that finding anything is the exception, so optimize for @@ -2057,7 +2085,6 @@ static int __ham_c_writelock(dbc) DBC *dbc; { - DB_ENV *dbenv; DB_LOCK tmp_lock; HASH_CURSOR *hcp; int ret; @@ -2070,14 +2097,12 @@ __ham_c_writelock(dbc) return (0); hcp = (HASH_CURSOR *)dbc->internal; - if ((!LOCK_ISSET(hcp->lock) || hcp->lock_mode == DB_LOCK_READ)) { + ret = 0; + if ((!LOCK_ISSET(hcp->lock) || hcp->lock_mode != DB_LOCK_WRITE)) { tmp_lock = hcp->lock; - if ((ret = __ham_lock_bucket(dbc, DB_LOCK_WRITE)) != 0) - return (ret); - dbenv = dbc->dbp->dbenv; - if (LOCK_ISSET(tmp_lock) && - (ret = __lock_put(dbenv, &tmp_lock)) != 0) - return (ret); + if ((ret = __ham_lock_bucket(dbc, DB_LOCK_WRITE)) == 0 + && tmp_lock.mode != DB_LOCK_WWRITE) + ret = __LPUT(dbc, tmp_lock); } - return (0); + return (ret); } diff --git a/db/hash/hash.src b/db/hash/hash.src index 246ae847d..4acff5e59 100644 --- a/db/hash/hash.src +++ b/db/hash/hash.src @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: hash.src,v 10.41 2003/11/14 05:32:37 ubell Exp $ + * $Id: hash.src,v 10.44 2004/06/17 17:35:21 bostic Exp $ */ /* * Copyright (c) 1995, 1996 @@ -45,8 +45,6 @@ PREFIX __ham DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE @@ -184,6 +182,7 @@ END * newalloc: 1 indicates that this record did the actual allocation; * 0 indicates that the pages were already allocated from a * previous (failed) allocation. + * last_pgno: the last page in the file before this op. */ BEGIN metagroup 29 DB fileid int32_t ld @@ -195,6 +194,7 @@ POINTER metalsn DB_LSN * lu ARG pgno db_pgno_t lu POINTER pagelsn DB_LSN * lu ARG newalloc u_int32_t lu +ARG last_pgno db_pgno_t lu END /* @@ -208,6 +208,7 @@ END * metalsn: meta-data lsn * start_pgno: starting page number * num: number of allocated pages + * last_pgno: the last page in the file before this op. */ BEGIN groupalloc 32 DB fileid int32_t ld @@ -215,6 +216,7 @@ POINTER meta_lsn DB_LSN * lu ARG start_pgno db_pgno_t lu ARG num u_int32_t lu ARG free db_pgno_t lu +ARG last_pgno db_pgno_t lu END /* diff --git a/db/hash/hash_auto.c b/db/hash/hash_auto.c index b8f217a79..49a126a75 100644 --- a/db/hash/hash_auto.c +++ b/db/hash/hash_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -40,33 +41,42 @@ __ham_insdel_log(dbp, txnid, ret_lsnp, flags, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_insdel; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -85,27 +95,23 @@ __ham_insdel_log(dbp, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -172,140 +178,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_insdel_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_insdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_insdel_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_insdel_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_insdel_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_insdel_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_insdel%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tndx: %lu\n", (u_long)argp->ndx); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\tkey: "); - for (i = 0; i < argp->key.size; i++) { - ch = ((u_int8_t *)argp->key.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tdata: "); - for (i = 0; i < argp->data.size; i++) { - ch = ((u_int8_t *)argp->data.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_insdel_read __P((DB_ENV *, void *, __ham_insdel_args **)); */ @@ -323,9 +236,9 @@ __ham_insdel_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_insdel_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -394,33 +307,42 @@ __ham_newpage_log(dbp, txnid, ret_lsnp, flags, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_newpage; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -440,27 +362,23 @@ __ham_newpage_log(dbp, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -521,131 +439,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_newpage_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_newpage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_newpage_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_newpage_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_newpage_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_newpage_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_newpage%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno); - (void)printf("\tprevlsn: [%lu][%lu]\n", - (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset); - (void)printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno); - (void)printf("\tnextlsn: [%lu][%lu]\n", - (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_newpage_read __P((DB_ENV *, void *, * PUBLIC: __ham_newpage_args **)); @@ -664,9 +498,9 @@ __ham_newpage_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_newpage_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -727,33 +561,42 @@ __ham_splitdata_log(dbp, txnid, ret_lsnp, flags, opcode, pgno, pageimage, pagels DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_splitdata; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -770,27 +613,23 @@ __ham_splitdata_log(dbp, txnid, ret_lsnp, flags, opcode, pgno, pageimage, pagels logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -842,133 +681,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_splitdata_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_splitdata_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__ham_splitdata_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_splitdata_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_splitdata_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_splitdata_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_splitdata%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tpageimage: "); - for (i = 0; i < argp->pageimage.size; i++) { - ch = ((u_int8_t *)argp->pageimage.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_splitdata_read __P((DB_ENV *, void *, * PUBLIC: __ham_splitdata_args **)); @@ -987,9 +740,9 @@ __ham_splitdata_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_splitdata_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1047,33 +800,42 @@ __ham_replace_log(dbp, txnid, ret_lsnp, flags, pgno, ndx, pagelsn, off, olditem, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_replace; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1093,27 +855,23 @@ __ham_replace_log(dbp, txnid, ret_lsnp, flags, pgno, ndx, pagelsn, off, olditem, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1184,164 +942,70 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_replace_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION /* - * PUBLIC: int __ham_replace_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); + * PUBLIC: int __ham_replace_read __P((DB_ENV *, void *, + * PUBLIC: __ham_replace_args **)); */ int -__ham_replace_getpgnos(dbenv, rec, lsnp, notused1, summary) +__ham_replace_read(dbenv, recbuf, argpp) DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; + void *recbuf; + __ham_replace_args **argpp; { - TXN_RECS *t; + __ham_replace_args *argp; + u_int32_t uinttmp; + u_int8_t *bp; int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) + if ((ret = __os_malloc(dbenv, + sizeof(__ham_replace_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; + argp->txnid = (DB_TXN *)&argp[1]; - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_replace_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_replace_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_replace_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_replace%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tndx: %lu\n", (u_long)argp->ndx); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\toff: %ld\n", (long)argp->off); - (void)printf("\tolditem: "); - for (i = 0; i < argp->olditem.size; i++) { - ch = ((u_int8_t *)argp->olditem.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tnewitem: "); - for (i = 0; i < argp->newitem.size; i++) { - ch = ((u_int8_t *)argp->newitem.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tmakedup: %lu\n", (u_long)argp->makedup); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - -/* - * PUBLIC: int __ham_replace_read __P((DB_ENV *, void *, - * PUBLIC: __ham_replace_args **)); - */ -int -__ham_replace_read(dbenv, recbuf, argpp) - DB_ENV *dbenv; - void *recbuf; - __ham_replace_args **argpp; -{ - __ham_replace_args *argp; - u_int32_t uinttmp; - u_int8_t *bp; - int ret; - - if ((ret = __os_malloc(dbenv, - sizeof(__ham_replace_args) + sizeof(DB_TXN), &argp)) != 0) - return (ret); - argp->txnid = (DB_TXN *)&argp[1]; - - bp = recbuf; - memcpy(&argp->type, bp, sizeof(argp->type)); - bp += sizeof(argp->type); + memcpy(&argp->type, bp, sizeof(argp->type)); + bp += sizeof(argp->type); memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid)); bp += sizeof(argp->txnid->txnid); @@ -1411,33 +1075,42 @@ __ham_copypage_log(dbp, txnid, ret_lsnp, flags, pgno, pagelsn, next_pgno, nextls DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_copypage; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1457,27 +1130,23 @@ __ham_copypage_log(dbp, txnid, ret_lsnp, flags, pgno, pagelsn, next_pgno, nextls logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1545,138 +1214,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_copypage_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_copypage_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__ham_copypage_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_copypage_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_copypage_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_copypage_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_copypage%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno); - (void)printf("\tnextlsn: [%lu][%lu]\n", - (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset); - (void)printf("\tnnext_pgno: %lu\n", (u_long)argp->nnext_pgno); - (void)printf("\tnnextlsn: [%lu][%lu]\n", - (u_long)argp->nnextlsn.file, (u_long)argp->nnextlsn.offset); - (void)printf("\tpage: "); - for (i = 0; i < argp->page.size; i++) { - ch = ((u_int8_t *)argp->page.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_copypage_read __P((DB_ENV *, void *, * PUBLIC: __ham_copypage_args **)); @@ -1695,9 +1273,9 @@ __ham_copypage_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_copypage_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1745,11 +1323,11 @@ __ham_copypage_read(dbenv, recbuf, argpp) /* * PUBLIC: int __ham_metagroup_log __P((DB *, DB_TXN *, DB_LSN *, * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, - * PUBLIC: db_pgno_t, DB_LSN *, u_int32_t)); + * PUBLIC: db_pgno_t, DB_LSN *, u_int32_t, db_pgno_t)); */ int __ham_metagroup_log(dbp, txnid, ret_lsnp, flags, bucket, mmpgno, mmetalsn, mpgno, metalsn, - pgno, pagelsn, newalloc) + pgno, pagelsn, newalloc, last_pgno) DB *dbp; DB_TXN *txnid; DB_LSN *ret_lsnp; @@ -1762,37 +1340,47 @@ __ham_metagroup_log(dbp, txnid, ret_lsnp, flags, bucket, mmpgno, mmetalsn, mpgno db_pgno_t pgno; DB_LSN * pagelsn; u_int32_t newalloc; + db_pgno_t last_pgno; { DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_metagroup; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1806,6 +1394,7 @@ __ham_metagroup_log(dbp, txnid, ret_lsnp, flags, bucket, mmpgno, mmetalsn, mpgno + sizeof(*metalsn) + sizeof(u_int32_t) + sizeof(*pagelsn) + + sizeof(u_int32_t) + sizeof(u_int32_t); if (CRYPTO_ON(dbenv)) { npad = @@ -1813,27 +1402,23 @@ __ham_metagroup_log(dbp, txnid, ret_lsnp, flags, bucket, mmpgno, mmetalsn, mpgno logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1896,134 +1481,53 @@ do_malloc: memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); + uinttmp = (u_int32_t)last_pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_metagroup_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_metagroup_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__ham_metagroup_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_metagroup_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_metagroup_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_metagroup_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_metagroup%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tbucket: %lu\n", (u_long)argp->bucket); - (void)printf("\tmmpgno: %lu\n", (u_long)argp->mmpgno); - (void)printf("\tmmetalsn: [%lu][%lu]\n", - (u_long)argp->mmetalsn.file, (u_long)argp->mmetalsn.offset); - (void)printf("\tmpgno: %lu\n", (u_long)argp->mpgno); - (void)printf("\tmetalsn: [%lu][%lu]\n", - (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tpagelsn: [%lu][%lu]\n", - (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); - (void)printf("\tnewalloc: %lu\n", (u_long)argp->newalloc); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_metagroup_read __P((DB_ENV *, void *, * PUBLIC: __ham_metagroup_args **)); @@ -2042,9 +1546,9 @@ __ham_metagroup_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_metagroup_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2087,16 +1591,21 @@ __ham_metagroup_read(dbenv, recbuf, argpp) argp->newalloc = (u_int32_t)uinttmp; bp += sizeof(uinttmp); + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->last_pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + *argpp = argp; return (0); } /* * PUBLIC: int __ham_groupalloc_log __P((DB *, DB_TXN *, DB_LSN *, - * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t)); + * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t, + * PUBLIC: db_pgno_t)); */ int -__ham_groupalloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, start_pgno, num, free) +__ham_groupalloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, start_pgno, num, free, last_pgno) DB *dbp; DB_TXN *txnid; DB_LSN *ret_lsnp; @@ -2105,37 +1614,47 @@ __ham_groupalloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, start_pgno, num, fre db_pgno_t start_pgno; u_int32_t num; db_pgno_t free; + db_pgno_t last_pgno; { DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_groupalloc; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2145,6 +1664,7 @@ __ham_groupalloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, start_pgno, num, fre + sizeof(*meta_lsn) + sizeof(u_int32_t) + sizeof(u_int32_t) + + sizeof(u_int32_t) + sizeof(u_int32_t); if (CRYPTO_ON(dbenv)) { npad = @@ -2152,27 +1672,23 @@ __ham_groupalloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, start_pgno, num, fre logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2215,128 +1731,53 @@ do_malloc: memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); + uinttmp = (u_int32_t)last_pgno; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_groupalloc_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_groupalloc_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__ham_groupalloc_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_groupalloc_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_groupalloc_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_groupalloc_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_groupalloc%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tmeta_lsn: [%lu][%lu]\n", - (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); - (void)printf("\tstart_pgno: %lu\n", (u_long)argp->start_pgno); - (void)printf("\tnum: %lu\n", (u_long)argp->num); - (void)printf("\tfree: %lu\n", (u_long)argp->free); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_groupalloc_read __P((DB_ENV *, void *, * PUBLIC: __ham_groupalloc_args **)); @@ -2355,9 +1796,9 @@ __ham_groupalloc_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_groupalloc_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2386,6 +1827,10 @@ __ham_groupalloc_read(dbenv, recbuf, argpp) argp->free = (db_pgno_t)uinttmp; bp += sizeof(uinttmp); + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->last_pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + *argpp = argp; return (0); } @@ -2413,33 +1858,42 @@ __ham_curadj_log(dbp, txnid, ret_lsnp, flags, pgno, indx, len, dup_off, add, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_curadj; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2459,27 +1913,23 @@ __ham_curadj_log(dbp, txnid, ret_lsnp, flags, pgno, indx, len, dup_off, add, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2534,128 +1984,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_curadj_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_curadj_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_curadj_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_curadj_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_curadj_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_curadj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\tlen: %lu\n", (u_long)argp->len); - (void)printf("\tdup_off: %lu\n", (u_long)argp->dup_off); - (void)printf("\tadd: %ld\n", (long)argp->add); - (void)printf("\tis_dup: %ld\n", (long)argp->is_dup); - (void)printf("\torder: %lu\n", (u_long)argp->order); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_curadj_read __P((DB_ENV *, void *, __ham_curadj_args **)); */ @@ -2673,9 +2042,9 @@ __ham_curadj_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_curadj_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -2741,33 +2110,42 @@ __ham_chgpg_log(dbp, txnid, ret_lsnp, flags, mode, old_pgno, new_pgno, old_indx, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___ham_chgpg; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -2785,27 +2163,23 @@ __ham_chgpg_log(dbp, txnid, ret_lsnp, flags, mode, old_pgno, new_pgno, old_indx, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -2852,126 +2226,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__ham_chgpg_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_chgpg_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_chgpg_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__ham_chgpg_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __ham_chgpg_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __ham_chgpg_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__ham_chgpg%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tmode: %ld\n", (long)argp->mode); - (void)printf("\told_pgno: %lu\n", (u_long)argp->old_pgno); - (void)printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno); - (void)printf("\told_indx: %lu\n", (u_long)argp->old_indx); - (void)printf("\tnew_indx: %lu\n", (u_long)argp->new_indx); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __ham_chgpg_read __P((DB_ENV *, void *, __ham_chgpg_args **)); */ @@ -2989,9 +2284,9 @@ __ham_chgpg_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__ham_chgpg_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -3029,92 +2324,6 @@ __ham_chgpg_read(dbenv, recbuf, argpp) return (0); } -/* - * PUBLIC: int __ham_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__ham_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_insdel_print, DB___ham_insdel)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_newpage_print, DB___ham_newpage)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_splitdata_print, DB___ham_splitdata)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_replace_print, DB___ham_replace)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_copypage_print, DB___ham_copypage)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_metagroup_print, DB___ham_metagroup)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_groupalloc_print, DB___ham_groupalloc)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_curadj_print, DB___ham_curadj)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_chgpg_print, DB___ham_chgpg)) != 0) - return (ret); - return (0); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __ham_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__ham_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_insdel_getpgnos, DB___ham_insdel)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_newpage_getpgnos, DB___ham_newpage)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_splitdata_getpgnos, DB___ham_splitdata)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_replace_getpgnos, DB___ham_replace)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_copypage_getpgnos, DB___ham_copypage)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_metagroup_getpgnos, DB___ham_metagroup)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_groupalloc_getpgnos, DB___ham_groupalloc)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_curadj_getpgnos, DB___ham_curadj)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __ham_chgpg_getpgnos, DB___ham_chgpg)) != 0) - return (ret); - return (0); -} -#endif /* HAVE_REPLICATION */ - /* * PUBLIC: int __ham_init_recover __P((DB_ENV *, int (***)(DB_ENV *, * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); diff --git a/db/hash/hash_autop.c b/db/hash/hash_autop.c new file mode 100644 index 000000000..5664ba793 --- /dev/null +++ b/db/hash/hash_autop.c @@ -0,0 +1,486 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifdef HAVE_HASH +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/hash.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_insdel_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_insdel_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_insdel_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_insdel%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tndx: %lu\n", (u_long)argp->ndx); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\tkey: "); + for (i = 0; i < argp->key.size; i++) { + ch = ((u_int8_t *)argp->key.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tdata: "); + for (i = 0; i < argp->data.size; i++) { + ch = ((u_int8_t *)argp->data.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_newpage_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_newpage_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_newpage_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_newpage%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno); + (void)printf("\tprevlsn: [%lu][%lu]\n", + (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset); + (void)printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno); + (void)printf("\tnextlsn: [%lu][%lu]\n", + (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_splitdata_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_splitdata_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_splitdata_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_splitdata%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tpageimage: "); + for (i = 0; i < argp->pageimage.size; i++) { + ch = ((u_int8_t *)argp->pageimage.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_replace_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_replace_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_replace_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_replace%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tndx: %lu\n", (u_long)argp->ndx); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\toff: %ld\n", (long)argp->off); + (void)printf("\tolditem: "); + for (i = 0; i < argp->olditem.size; i++) { + ch = ((u_int8_t *)argp->olditem.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tnewitem: "); + for (i = 0; i < argp->newitem.size; i++) { + ch = ((u_int8_t *)argp->newitem.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tmakedup: %lu\n", (u_long)argp->makedup); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_copypage_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_copypage_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_copypage_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_copypage%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno); + (void)printf("\tnextlsn: [%lu][%lu]\n", + (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset); + (void)printf("\tnnext_pgno: %lu\n", (u_long)argp->nnext_pgno); + (void)printf("\tnnextlsn: [%lu][%lu]\n", + (u_long)argp->nnextlsn.file, (u_long)argp->nnextlsn.offset); + (void)printf("\tpage: "); + for (i = 0; i < argp->page.size; i++) { + ch = ((u_int8_t *)argp->page.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_metagroup_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_metagroup_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_metagroup_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_metagroup%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tbucket: %lu\n", (u_long)argp->bucket); + (void)printf("\tmmpgno: %lu\n", (u_long)argp->mmpgno); + (void)printf("\tmmetalsn: [%lu][%lu]\n", + (u_long)argp->mmetalsn.file, (u_long)argp->mmetalsn.offset); + (void)printf("\tmpgno: %lu\n", (u_long)argp->mpgno); + (void)printf("\tmetalsn: [%lu][%lu]\n", + (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tpagelsn: [%lu][%lu]\n", + (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset); + (void)printf("\tnewalloc: %lu\n", (u_long)argp->newalloc); + (void)printf("\tlast_pgno: %lu\n", (u_long)argp->last_pgno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_groupalloc_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_groupalloc_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_groupalloc_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_groupalloc%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tmeta_lsn: [%lu][%lu]\n", + (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset); + (void)printf("\tstart_pgno: %lu\n", (u_long)argp->start_pgno); + (void)printf("\tnum: %lu\n", (u_long)argp->num); + (void)printf("\tfree: %lu\n", (u_long)argp->free); + (void)printf("\tlast_pgno: %lu\n", (u_long)argp->last_pgno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_curadj_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_curadj_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_curadj_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_curadj%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\tlen: %lu\n", (u_long)argp->len); + (void)printf("\tdup_off: %lu\n", (u_long)argp->dup_off); + (void)printf("\tadd: %ld\n", (long)argp->add); + (void)printf("\tis_dup: %ld\n", (long)argp->is_dup); + (void)printf("\torder: %lu\n", (u_long)argp->order); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__ham_chgpg_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __ham_chgpg_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __ham_chgpg_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__ham_chgpg%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tmode: %ld\n", (long)argp->mode); + (void)printf("\told_pgno: %lu\n", (u_long)argp->old_pgno); + (void)printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno); + (void)printf("\told_indx: %lu\n", (u_long)argp->old_indx); + (void)printf("\tnew_indx: %lu\n", (u_long)argp->new_indx); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __ham_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__ham_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_insdel_print, DB___ham_insdel)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_newpage_print, DB___ham_newpage)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_splitdata_print, DB___ham_splitdata)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_replace_print, DB___ham_replace)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_copypage_print, DB___ham_copypage)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_metagroup_print, DB___ham_metagroup)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_groupalloc_print, DB___ham_groupalloc)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_curadj_print, DB___ham_curadj)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __ham_chgpg_print, DB___ham_chgpg)) != 0) + return (ret); + return (0); +} +#endif /* HAVE_HASH */ diff --git a/db/hash/hash_conv.c b/db/hash/hash_conv.c index 751b87a59..a90799c7b 100644 --- a/db/hash/hash_conv.c +++ b/db/hash/hash_conv.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: hash_conv.c,v 11.16 2004/03/24 20:37:38 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_conv.c,v 11.14 2003/01/08 05:03:21 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -110,7 +109,7 @@ __ham_mswap(pg) SWAP32(p); /* h_charkey */ for (i = 0; i < NCACHED; ++i) SWAP32(p); /* spares */ - p += 59 * sizeof(u_int32_t); /* unusued */ + p += 59 * sizeof(u_int32_t); /* unused */ SWAP32(p); /* crypto_magic */ return (0); } diff --git a/db/hash/hash_dup.c b/db/hash/hash_dup.c index 31ba9cc3d..93fc2b51f 100644 --- a/db/hash/hash_dup.c +++ b/db/hash/hash_dup.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -34,12 +34,11 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: hash_dup.c,v 11.85 2004/06/03 16:32:21 margo Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_dup.c,v 11.81 2003/06/30 17:20:11 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" /* * PACKAGE: hashing @@ -200,7 +199,11 @@ __ham_add_dup(dbc, nval, flags, pgnop) case DB_AFTER: tmp_val.doff = hcp->dup_off + DUP_SIZE(hcp->dup_len); break; + default: + DB_ASSERT(0); + return (EINVAL); } + /* Add the duplicate. */ ret = __ham_replpair(dbc, &tmp_val, 0); if (ret == 0) @@ -222,6 +225,9 @@ __ham_add_dup(dbc, nval, flags, pgnop) hcp->dup_tlen += (db_indx_t)DUP_SIZE(nval->size); hcp->dup_len = nval->size; break; + default: + DB_ASSERT(0); + return (EINVAL); } ret = __ham_c_update(dbc, tmp_val.size, 1, 1); return (ret); @@ -477,7 +483,7 @@ __ham_check_move(dbc, add_len) old_len = LEN_HITEM(dbp, hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx)); - new_datalen = old_len - HKEYDATA_SIZE(0) + add_len; + new_datalen = (old_len - HKEYDATA_SIZE(0)) + add_len; if (HPAGE_PTYPE(hk) != H_DUPLICATE) new_datalen += DUP_SIZE(0); @@ -650,7 +656,7 @@ __ham_move_offpage(dbc, pagep, ndx, pgno) DBT old_dbt; HOFFDUP od; db_indx_t i, *inp; - int32_t shrink; + int32_t difflen; u_int8_t *src; int ret; @@ -674,18 +680,24 @@ __ham_move_offpage(dbc, pagep, ndx, pgno) } else LSN_NOT_LOGGED(LSN(pagep)); - shrink = LEN_HITEM(dbp, pagep, dbp->pgsize, ndx) - HOFFDUP_SIZE; - inp = P_INP(dbp, pagep); - - if (shrink != 0) { + /* + * difflen is the difference in the lengths, and so may be negative. + * We know that the difference between two unsigned lengths from a + * database page will fit into an int32_t. + */ + difflen = + (int32_t)LEN_HITEM(dbp, pagep, dbp->pgsize, ndx) - + (int32_t)HOFFDUP_SIZE; + if (difflen != 0) { /* Copy data. */ + inp = P_INP(dbp, pagep); src = (u_int8_t *)(pagep) + HOFFSET(pagep); - memmove(src + shrink, src, inp[ndx] - HOFFSET(pagep)); - HOFFSET(pagep) += shrink; + memmove(src + difflen, src, inp[ndx] - HOFFSET(pagep)); + HOFFSET(pagep) += difflen; /* Update index table. */ for (i = ndx; i < NUM_ENT(pagep); i++) - inp[i] += shrink; + inp[i] += difflen; } /* Now copy the offdup entry onto the page. */ @@ -722,6 +734,7 @@ __ham_dsearch(dbc, dbt, offp, cmpp, flags) i = F_ISSET(hcp, H_CONTINUE) ? hcp->dup_off: 0; data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx)) + i; hcp->dup_tlen = LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx); + len = hcp->dup_len; while (i < hcp->dup_tlen) { memcpy(&len, data, sizeof(db_indx_t)); data += sizeof(db_indx_t); @@ -753,30 +766,6 @@ __ham_dsearch(dbc, dbt, offp, cmpp, flags) F_SET(hcp, H_ISDUP); } -#ifdef DEBUG -/* - * __ham_cprint -- - * Display the current cursor list. - * - * PUBLIC: void __ham_cprint __P((DBC *)); - */ -void -__ham_cprint(dbc) - DBC *dbc; -{ - HASH_CURSOR *cp; - - cp = (HASH_CURSOR *)dbc->internal; - - fprintf(stderr, "%#0lx->%#0lx: page: %lu index: %lu", - P_TO_ULONG(dbc), P_TO_ULONG(cp), (u_long)cp->pgno, - (u_long)cp->indx); - if (F_ISSET(cp, H_DELETED)) - fprintf(stderr, " (deleted)"); - fprintf(stderr, "\n"); -} -#endif /* DEBUG */ - /* * __ham_dcursor -- * diff --git a/db/hash/hash_func.c b/db/hash/hash_func.c index cd4b5e7b7..b117fcee3 100644 --- a/db/hash/hash_func.c +++ b/db/hash/hash_func.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: hash_func.c,v 11.15 2004/01/28 03:36:11 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_func.c,v 11.14 2003/01/08 05:03:34 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/hash/hash_meta.c b/db/hash/hash_meta.c index 247793bbf..6d700fcc1 100644 --- a/db/hash/hash_meta.c +++ b/db/hash/hash_meta.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: hash_meta.c,v 11.31 2004/09/22 03:46:22 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_meta.c,v 11.24 2003/09/09 16:46:10 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -32,34 +30,23 @@ __ham_get_meta(dbc) DBC *dbc; { DB *dbp; - DB_ENV *dbenv; DB_MPOOLFILE *mpf; HASH *hashp; HASH_CURSOR *hcp; int ret; dbp = dbc->dbp; - dbenv = dbp->dbenv; mpf = dbp->mpf; hashp = dbp->h_internal; hcp = (HASH_CURSOR *)dbc->internal; - if (dbenv != NULL && - STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) { - dbc->lock.pgno = hashp->meta_pgno; - if ((ret = __lock_get(dbenv, dbc->locker, - DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0, - &dbc->lock_dbt, DB_LOCK_READ, &hcp->hlock)) != 0) - return ((ret == DB_LOCK_NOTGRANTED && - !F_ISSET(dbenv, DB_ENV_TIME_NOTGRANTED)) ? - DB_LOCK_DEADLOCK : ret); - - } + if ((ret = __db_lget(dbc, 0, + hashp->meta_pgno, DB_LOCK_READ, 0, &hcp->hlock)) != 0) + return (ret); if ((ret = __memp_fget(mpf, - &hashp->meta_pgno, DB_MPOOL_CREATE, &(hcp->hdr))) != 0 && - LOCK_ISSET(hcp->hlock)) - (void)__lock_put(dbenv, &hcp->hlock); + &hashp->meta_pgno, DB_MPOOL_CREATE, &(hcp->hdr))) != 0) + (void)__LPUT(dbc, hcp->hlock); return (ret); } @@ -83,12 +70,9 @@ __ham_release_meta(dbc) (void)__memp_fput(mpf, hcp->hdr, F_ISSET(hcp, H_DIRTY) ? DB_MPOOL_DIRTY : 0); hcp->hdr = NULL; - if (!F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE) && - dbc->txn == NULL && LOCK_ISSET(hcp->hlock)) - (void)__lock_put(dbc->dbp->dbenv, &hcp->hlock); F_CLR(hcp, H_DIRTY); - return (0); + return (__TLPUT(dbc, hcp->hlock)); } /* @@ -101,8 +85,6 @@ __ham_dirty_meta(dbc) DBC *dbc; { DB *dbp; - DB_ENV *dbenv; - DB_LOCK _tmp; HASH *hashp; HASH_CURSOR *hcp; int ret; @@ -112,20 +94,11 @@ __ham_dirty_meta(dbc) hcp = (HASH_CURSOR *)dbc->internal; ret = 0; - dbenv = dbp->dbenv; - if (STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) { - dbc->lock.pgno = hashp->meta_pgno; - if ((ret = __lock_get(dbenv, dbc->locker, - DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0, - &dbc->lock_dbt, DB_LOCK_WRITE, &_tmp)) == 0) { - ret = __lock_put(dbenv, &hcp->hlock); - hcp->hlock = _tmp; - } - } + + ret = __db_lget(dbc, LCK_COUPLE, + hashp->meta_pgno, DB_LOCK_WRITE, 0, &hcp->hlock); if (ret == 0) F_SET(hcp, H_DIRTY); - return ((ret == DB_LOCK_NOTGRANTED && - !F_ISSET(dbenv, DB_ENV_TIME_NOTGRANTED)) ? - DB_LOCK_DEADLOCK : ret); + return (ret); } diff --git a/db/hash/hash_method.c b/db/hash/hash_method.c index 07508a3db..6b59787a6 100644 --- a/db/hash/hash_method.c +++ b/db/hash/hash_method.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: hash_method.c,v 11.17 2004/01/28 03:36:11 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_method.c,v 11.15 2003/04/18 08:36:37 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -19,11 +17,9 @@ static const char revid[] = "$Id: hash_method.c,v 11.15 2003/04/18 08:36:37 mjc #include "dbinc/db_page.h" #include "dbinc/hash.h" -static int __ham_get_h_ffactor __P((DB *, u_int32_t *)); static int __ham_set_h_ffactor __P((DB *, u_int32_t)); static int __ham_set_h_hash __P((DB *, u_int32_t(*)(DB *, const void *, u_int32_t))); -static int __ham_get_h_nelem __P((DB *, u_int32_t *)); static int __ham_set_h_nelem __P((DB *, u_int32_t)); /* @@ -73,9 +69,11 @@ __ham_db_close(dbp) } /* - * __db_get_h_ffactor -- + * __ham_get_h_ffactor -- + * + * PUBLIC: int __ham_get_h_ffactor __P((DB *, u_int32_t *)); */ -static int +int __ham_get_h_ffactor(dbp, h_ffactorp) DB *dbp; u_int32_t *h_ffactorp; @@ -127,8 +125,10 @@ __ham_set_h_hash(dbp, func) /* * __db_get_h_nelem -- + * + * PUBLIC: int __ham_get_h_nelem __P((DB *, u_int32_t *)); */ -static int +int __ham_get_h_nelem(dbp, h_nelemp) DB *dbp; u_int32_t *h_nelemp; diff --git a/db/hash/hash_open.c b/db/hash/hash_open.c index a5842fd18..67b12e5eb 100644 --- a/db/hash/hash_open.c +++ b/db/hash/hash_open.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,18 +38,15 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: hash_open.c,v 11.191 2004/06/22 18:43:38 margo Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_open.c,v 11.185 2003/07/17 01:39:17 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include -#include #include #endif @@ -83,14 +80,12 @@ __ham_open(dbp, txn, name, base_pgno, flags) { DB_ENV *dbenv; DBC *dbc; - DB_MPOOLFILE *mpf; HASH_CURSOR *hcp; HASH *hashp; int ret, t_ret; dbenv = dbp->dbenv; dbc = NULL; - mpf = dbp->mpf; /* * Get a cursor. If DB_CREATE is specified, we may be creating @@ -130,17 +125,6 @@ __ham_open(dbp, txn, name, base_pgno, flags) if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_SUBDB)) F_SET(dbp, DB_AM_SUBDB); - /* - * We must initialize last_pgno, it could be stale. - * We update this without holding the meta page write - * locked. This is ok since two threads in the code - * must be setting it to the same value. SR #7159. - */ - if (!F_ISSET(dbp, DB_AM_RDONLY) && - dbp->meta_pgno == PGNO_BASE_MD) { - __memp_last_pgno(mpf, &hcp->hdr->dbmeta.last_pgno); - F_SET(hcp, H_DIRTY); - } } else if (!IS_RECOVERING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER)) { __db_err(dbp->dbenv, "%s: Invalid hash meta page %d", name, base_pgno); @@ -273,8 +257,7 @@ __ham_init_meta(dbp, meta, pgno, lsnp) { HASH *hashp; db_pgno_t nbuckets; - int i; - int32_t l2; + u_int i, l2; hashp = dbp->h_internal; if (hashp->h_hash == NULL) @@ -372,71 +355,75 @@ __ham_new_file(dbp, txn, fhp, name) mpf = dbp->mpf; meta = NULL; page = NULL; - memset(&pdbt, 0, sizeof(pdbt)); + buf = NULL; - /* Build meta-data page. */ if (name == NULL) { + /* Build meta-data page. */ lpgno = PGNO_BASE_MD; - ret = __memp_fget(mpf, &lpgno, DB_MPOOL_CREATE, &meta); + if ((ret = + __memp_fget(mpf, &lpgno, DB_MPOOL_CREATE, &meta)) != 0) + return (ret); + LSN_NOT_LOGGED(lsn); + lpgno = __ham_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); + meta->dbmeta.last_pgno = lpgno; + ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); + meta = NULL; + if (ret != 0) + goto err; + + /* Allocate the final hash bucket. */ + if ((ret = + __memp_fget(mpf, &lpgno, DB_MPOOL_CREATE, &page)) != 0) + goto err; + P_INIT(page, + dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH); + LSN_NOT_LOGGED(page->lsn); + ret = __memp_fput(mpf, page, DB_MPOOL_DIRTY); + page = NULL; + if (ret != 0) + goto err; } else { + memset(&pdbt, 0, sizeof(pdbt)); + + /* Build meta-data page. */ pginfo.db_pagesize = dbp->pgsize; pginfo.type = dbp->type; pginfo.flags = F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP)); pdbt.data = &pginfo; pdbt.size = sizeof(pginfo); - ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf); + if ((ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf)) != 0) + return (ret); meta = (HMETA *)buf; - } - if (ret != 0) - return (ret); - - LSN_NOT_LOGGED(lsn); - lpgno = __ham_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); - meta->dbmeta.last_pgno = lpgno; - - if (name == NULL) - ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); - else { + LSN_NOT_LOGGED(lsn); + lpgno = __ham_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); + meta->dbmeta.last_pgno = lpgno; if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0) goto err; - ret = __fop_write(dbenv, txn, name, - DB_APP_DATA, fhp, dbp->pgsize, 0, 0, buf, dbp->pgsize, 1, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0); - } - if (ret != 0) - goto err; - meta = NULL; - - /* Now allocate the final hash bucket. */ - if (name == NULL) { - if ((ret = - __memp_fget(mpf, &lpgno, DB_MPOOL_CREATE, &page)) != 0) + if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp, + dbp->pgsize, 0, 0, buf, dbp->pgsize, 1, F_ISSET( + dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0) goto err; - } else { + meta = NULL; + + /* Allocate the final hash bucket. */ #ifdef DIAGNOSTIC - memset(buf, dbp->pgsize, 0); + memset(buf, 0, dbp->pgsize); #endif page = (PAGE *)buf; - } - - P_INIT(page, dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH); - LSN_NOT_LOGGED(page->lsn); - - if (name == NULL) - ret = __memp_fput(mpf, page, DB_MPOOL_DIRTY); - else { + P_INIT(page, + dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH); + LSN_NOT_LOGGED(page->lsn); if ((ret = __db_pgout(dbenv, lpgno, buf, &pdbt)) != 0) goto err; - ret = __fop_write(dbenv, txn, name, DB_APP_DATA, - fhp, dbp->pgsize, lpgno, 0, buf, dbp->pgsize, 1, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0); + if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp, + dbp->pgsize, lpgno, 0, buf, dbp->pgsize, 1, F_ISSET( + dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0) + goto err; + page = NULL; } - if (ret != 0) - goto err; - page = NULL; -err: if (name != NULL) +err: if (buf != NULL) __os_free(dbenv, buf); else { if (meta != NULL) @@ -521,8 +508,8 @@ __ham_new_subdb(mdbp, dbp, txn) /* Reflect the group allocation. */ if (DBENV_LOGGING(dbenv)) if ((ret = __ham_groupalloc_log(mdbp, txn, - &LSN(mmeta), 0, &LSN(mmeta), - meta->spares[0], meta->max_bucket + 1, mmeta->free)) != 0) + &LSN(mmeta), 0, &LSN(mmeta), meta->spares[0], + meta->max_bucket + 1, mmeta->free, mmeta->last_pgno)) != 0) goto err; /* Release the new meta-data page. */ @@ -551,15 +538,13 @@ err: if (mmeta != NULL) if ((t_ret = __memp_fput(mpf, mmeta, 0)) != 0 && ret == 0) ret = t_ret; - if (LOCK_ISSET(mmlock)) - if ((t_ret = __LPUT(dbc, mmlock)) != 0 && ret == 0) - ret = t_ret; + if ((t_ret = __LPUT(dbc, mmlock)) != 0 && ret == 0) + ret = t_ret; if (meta != NULL) if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) ret = t_ret; - if (LOCK_ISSET(metalock)) - if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; + if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; if (dbc != NULL) if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) ret = t_ret; diff --git a/db/hash/hash_page.c b/db/hash/hash_page.c index c52aa7967..636767f4e 100644 --- a/db/hash/hash_page.c +++ b/db/hash/hash_page.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: hash_page.c,v 11.102 2004/09/22 21:14:56 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_page.c,v 11.93 2003/06/30 17:20:12 bostic Exp $"; -#endif /* not lint */ - /* * PACKAGE: hashing * @@ -150,7 +148,7 @@ __ham_item_reset(dbc) DB *dbp; DB_MPOOLFILE *mpf; HASH_CURSOR *hcp; - int ret; + int ret, t_ret; dbp = dbc->dbp; mpf = dbp->mpf; @@ -160,29 +158,33 @@ __ham_item_reset(dbc) if (hcp->page != NULL) ret = __memp_fput(mpf, hcp->page, 0); - __ham_item_init(dbc); + if ((t_ret = __ham_item_init(dbc)) != 0 && ret == 0) + ret = t_ret; + return (ret); } /* - * PUBLIC: void __ham_item_init __P((DBC *)); + * PUBLIC: int __ham_item_init __P((DBC *)); */ -void +int __ham_item_init(dbc) DBC *dbc; { HASH_CURSOR *hcp; + int ret; hcp = (HASH_CURSOR *)dbc->internal; + /* - * If this cursor still holds any locks, we must - * release them if we are not running with transactions. + * If this cursor still holds any locks, we must release them if + * we are not running with transactions. */ - (void)__TLPUT(dbc, hcp->lock); + ret = __TLPUT(dbc, hcp->lock); /* - * The following fields must *not* be initialized here - * because they may have meaning across inits. + * The following fields must *not* be initialized here because they + * may have meaning across inits. * hlock, hdr, split_buf, stats */ hcp->bucket = BUCKET_INVALID; @@ -199,6 +201,8 @@ __ham_item_init(dbc) hcp->pgno = PGNO_INVALID; hcp->indx = NDX_INVALID; hcp->page = NULL; + + return (ret); } /* @@ -550,8 +554,8 @@ __ham_del_pair(dbc, reclaim_page) db_ham_mode op; db_indx_t ndx; db_pgno_t chg_pgno, pgno, tmp_pgno; - int ret, t_ret; u_int32_t order; + int ret, t_ret; dbp = dbc->dbp; mpf = dbp->mpf; @@ -571,12 +575,12 @@ __ham_del_pair(dbc, reclaim_page) * to remove the big item and then update the page to remove the * entry referring to the big item. */ - ret = 0; if (HPAGE_PTYPE(H_PAIRKEY(dbp, p, ndx)) == H_OFFPAGE) { memcpy(&pgno, HOFFPAGE_PGNO(P_ENTRY(dbp, p, H_KEYINDEX(ndx))), sizeof(db_pgno_t)); ret = __db_doff(dbc, pgno); - } + } else + ret = 0; if (ret == 0) switch (HPAGE_PTYPE(H_PAIRDATA(dbp, p, ndx))) { @@ -595,6 +599,9 @@ __ham_del_pair(dbc, reclaim_page) */ F_CLR(hcp, H_ISDUP); break; + default: + /* No-op */ + break; } if (ret) @@ -834,9 +841,9 @@ __ham_replpair(dbc, dbt, make_dup) DB_ENV *dbenv; DB_LSN new_lsn; HASH_CURSOR *hcp; - int32_t change; /* XXX: Possible overflow. */ + u_int32_t change; u_int32_t dup_flag, len, memsize; - int beyond_eor, is_big, ret, type; + int beyond_eor, is_big, is_plus, ret, type; u_int8_t *beg, *dest, *end, *hk, *src; void *memp; @@ -864,7 +871,13 @@ __ham_replpair(dbc, dbt, make_dup) * formula doesn't work, because we are essentially adding * new bytes. */ - change = dbt->size - dbt->dlen; + if (dbt->size > dbt->dlen) { + change = dbt->size - dbt->dlen; + is_plus = 1; + } else { + change = dbt->dlen - dbt->size; + is_plus = 0; + } hk = H_PAIRDATA(dbp, hcp->page, hcp->indx); is_big = HPAGE_PTYPE(hk) == H_OFFPAGE; @@ -876,10 +889,25 @@ __ham_replpair(dbc, dbt, make_dup) dbp->pgsize, H_DATAINDEX(hcp->indx)); beyond_eor = dbt->doff + dbt->dlen > len; - if (beyond_eor) - change += dbt->doff + dbt->dlen - len; + if (beyond_eor) { + /* + * The change is beyond the end of file. If change + * is a positive number, we can simply add the extension + * to it. However, if change is negative, then we need + * to figure out if the extension is larger than the + * negative change. + */ + if (is_plus) + change += dbt->doff + dbt->dlen - len; + else if (dbt->doff + dbt->dlen - len > change) { + /* Extension bigger than change */ + is_plus = 1; + change = (dbt->doff + dbt->dlen - len) - change; + } else /* Extension is smaller than change. */ + change -= (dbt->doff + dbt->dlen - len); + } - if (change > (int32_t)P_FREESPACE(dbp, hcp->page) || + if ((is_plus && change > P_FREESPACE(dbp, hcp->page)) || beyond_eor || is_big) { /* * Case 3 -- two subcases. @@ -923,7 +951,7 @@ __ham_replpair(dbc, dbt, make_dup) } /* Now shift old data around to make room for new. */ - if (change > 0) { + if (is_plus) { if ((ret = __os_realloc(dbenv, tdata.size + change, &tdata.data)) != 0) return (ret); @@ -936,13 +964,19 @@ __ham_replpair(dbc, dbt, make_dup) src = (u_int8_t *)tdata.data + dbt->doff + dbt->dlen; if (src < end && tdata.size > dbt->doff + dbt->dlen) { - len = tdata.size - dbt->doff - dbt->dlen; - dest = src + change; + len = tdata.size - (dbt->doff + dbt->dlen); + if (is_plus) + dest = src + change; + else + dest = src - change; memmove(dest, src, len); } memcpy((u_int8_t *)tdata.data + dbt->doff, dbt->data, dbt->size); - tdata.size += change; + if (is_plus) + tdata.size += change; + else + tdata.size -= change; /* Now add the pair. */ ret = __ham_add_el(dbc, &tmp, &tdata, type); @@ -970,7 +1004,7 @@ err: return (ret); if ((ret = __ham_replace_log(dbp, dbc->txn, &new_lsn, 0, PGNO(hcp->page), (u_int32_t)H_DATAINDEX(hcp->indx), &LSN(hcp->page), - (u_int32_t)dbt->doff, &old_dbt, dbt, make_dup)) != 0) + (int32_t)dbt->doff, &old_dbt, dbt, make_dup)) != 0) return (ret); } else @@ -979,7 +1013,7 @@ err: return (ret); LSN(hcp->page) = new_lsn; /* Structure assignment. */ __ham_onpage_replace(dbp, hcp->page, (u_int32_t)H_DATAINDEX(hcp->indx), - (int32_t)dbt->doff, change, dbt); + (int32_t)dbt->doff, change, is_plus, dbt); return (0); } @@ -997,15 +1031,16 @@ err: return (ret); * dbt: the new data that gets written at beg. * * PUBLIC: void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t, - * PUBLIC: int32_t, int32_t, DBT *)); + * PUBLIC: int32_t, u_int32_t, int, DBT *)); */ void -__ham_onpage_replace(dbp, pagep, ndx, off, change, dbt) +__ham_onpage_replace(dbp, pagep, ndx, off, change, is_plus, dbt) DB *dbp; PAGE *pagep; u_int32_t ndx; int32_t off; - int32_t change; + u_int32_t change; + int is_plus; DBT *dbt; { db_indx_t i, *inp; @@ -1030,15 +1065,25 @@ __ham_onpage_replace(dbp, pagep, ndx, off, change, dbt) len = (int32_t)( (HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off) - src); - dest = src - change; - memmove(dest, src, len); + if (is_plus) + dest = src - change; + else + dest = src + change; + memmove(dest, src, (size_t)len); if (zero_me) memset(dest + len, 0, change); /* Now update the indices. */ - for (i = ndx; i < NUM_ENT(pagep); i++) - inp[i] -= change; - HOFFSET(pagep) -= change; + for (i = ndx; i < NUM_ENT(pagep); i++) { + if (is_plus) + inp[i] -= change; + else + inp[i] += change; + } + if (is_plus) + HOFFSET(pagep) -= change; + else + HOFFSET(pagep) += change; } if (off >= 0) memcpy(HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off, @@ -1071,11 +1116,12 @@ __ham_split_page(dbc, obucket, nbucket) void *big_buf; dbp = dbc->dbp; + carray = NULL; dbenv = dbp->dbenv; mpf = dbp->mpf; hcp = (HASH_CURSOR *)dbc->internal; temp_pagep = old_pagep = new_pagep = NULL; - carray = NULL; + npgno = PGNO_INVALID; LOCK_INIT(block); bucket_pgno = BUCKET_TO_PAGE(hcp, obucket); @@ -1268,8 +1314,8 @@ err: if (old_pagep != NULL) if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno) (void)__memp_fput(mpf, temp_pagep, DB_MPOOL_DIRTY); } - if (LOCK_ISSET(block)) - __TLPUT(dbc, block); + if ((t_ret = __TLPUT(dbc, block)) != 0 && ret == 0) + ret = t_ret; if (carray != NULL) /* We never knew its size. */ __os_free(dbenv, carray); return (ret); @@ -1554,13 +1600,19 @@ __ham_get_cpage(dbc, mode) */ LOCK_INIT(tmp_lock); if (STD_LOCKING(dbc)) { - if (hcp->lbucket != hcp->bucket && /* Case 4 */ - (ret = __TLPUT(dbc, hcp->lock)) != 0) - return (ret); + if (hcp->lbucket != hcp->bucket) { /* Case 4 */ + if ((ret = __TLPUT(dbc, hcp->lock)) != 0) + return (ret); + LOCK_INIT(hcp->lock); + } + /* + * See if we have the right lock. If we are doing + * dirty reads we assume the write lock has been downgraded. + */ if ((LOCK_ISSET(hcp->lock) && - (hcp->lock_mode == DB_LOCK_READ && - mode == DB_LOCK_WRITE))) { + ((hcp->lock_mode == DB_LOCK_READ || + F_ISSET(dbp, DB_AM_DIRTY)) && mode == DB_LOCK_WRITE))) { /* Case 3. */ tmp_lock = hcp->lock; LOCK_INIT(hcp->lock); @@ -1575,9 +1627,9 @@ __ham_get_cpage(dbc, mode) if (ret == 0) { hcp->lock_mode = mode; hcp->lbucket = hcp->bucket; - if (LOCK_ISSET(tmp_lock)) - /* Case 3: release the original lock. */ - ret = __lock_put(dbp->dbenv, &tmp_lock); + /* Case 3: release the original lock. */ + if ((ret = __ENV_LPUT(dbp->dbenv, tmp_lock, 0)) != 0) + return (ret); } else if (LOCK_ISSET(tmp_lock)) hcp->lock = tmp_lock; } diff --git a/db/hash/hash_rec.c b/db/hash/hash_rec.c index b3701057e..6cd4ffe4c 100644 --- a/db/hash/hash_rec.c +++ b/db/hash/hash_rec.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: hash_rec.c,v 11.82 2004/09/22 03:46:22 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_rec.c,v 11.76 2003/07/04 17:45:07 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -92,16 +90,28 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info) if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ + if (ret == DB_PAGE_NOTFOUND) + goto done; + else { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } + } +#ifdef HAVE_FTRUNCATE + /* If the page is not here then it was later truncated. */ + if (!IS_ZERO_LSN(argp->pagelsn)) goto done; - } else if ((ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) +#endif + /* + * This page was created by a group allocation and + * the file may not have been extend yet. + * Create the page if necessary. + */ + if ((ret = __memp_fget(mpf, + &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); goto out; + } } cmp_n = log_compare(lsnp, &LSN(pagep)); @@ -117,8 +127,8 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info) * We do this by calling __putitem with the type H_OFFPAGE instead * of H_KEYDATA. */ - opcode = OPCODE_OF(argp->opcode); + opcode = OPCODE_OF(argp->opcode); flags = 0; if ((opcode == DELPAIR && cmp_n == 0 && DB_UNDO(op)) || (opcode == PUTPAIR && cmp_p == 0 && DB_REDO(op))) { @@ -142,7 +152,7 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info) type = H_KEYDATA; __ham_putitem(file_dbp, pagep, &argp->data, type); } else - (void)__ham_reputpair(file_dbp, pagep, + __ham_reputpair(file_dbp, pagep, argp->ndx, &argp->key, &argp->data); LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn; @@ -199,20 +209,7 @@ __ham_newpage_recover(dbenv, dbtp, lsnp, op, info) REC_PRINT(__ham_newpage_print); REC_INTRO(__ham_newpage_read, 1); - if ((ret = __memp_fget(mpf, &argp->new_pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ - ret = 0; - goto ppage; - } else if ((ret = __memp_fget(mpf, - &argp->new_pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->new_pgno, &pagep, ppage); /* * There are potentially three pages we need to check: the one @@ -249,21 +246,7 @@ __ham_newpage_recover(dbenv, dbtp, lsnp, op, info) /* Now do the prev page. */ ppage: if (argp->prev_pgno != PGNO_INVALID) { - if ((ret = - __memp_fget(mpf, &argp->prev_pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. - * That is equivalent to having a pagelsn of 0, - * so we would not have to undo anything. In - * this case, don't bother creating a page. - */ - ret = 0; - goto npage; - } else if ((ret = __memp_fget(mpf, - &argp->prev_pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->prev_pgno, &pagep, npage); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->prevlsn); @@ -293,20 +276,7 @@ ppage: if (argp->prev_pgno != PGNO_INVALID) { /* Now time to do the next page */ npage: if (argp->next_pgno != PGNO_INVALID) { - if ((ret = - __memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. - * That is equivalent to having a pagelsn of 0, - * so we would not have to undo anything. In - * this case, don't bother creating a page. - */ - goto done; - } else if ((ret = __memp_fget(mpf, - &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->next_pgno, &pagep, done); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->nextlsn); @@ -365,8 +335,8 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info) DBT dbt; PAGE *pagep; u_int32_t flags; - int32_t grow; - int cmp_n, cmp_p, ret; + u_int32_t change; + int cmp_n, cmp_p, is_plus, ret; u_int8_t *hk; pagep = NULL; @@ -375,19 +345,7 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info) REC_PRINT(__ham_replace_print); REC_INTRO(__ham_replace_read, 1); - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ - goto done; - } else if ((ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->pgno, &pagep, done); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); @@ -395,27 +353,45 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info) memset(&dbt, 0, sizeof(dbt)); flags = 0; - grow = 1; + /* + * Before we know the direction of the transformation we will + * determine the size differential; then once we know if we are + * redoing or undoing, we'll adjust the sign (is_plus) appropriately. + */ + if (argp->newitem.size > argp->olditem.size) { + change = argp->newitem.size - argp->olditem.size; + is_plus = 1; + } else { + change = argp->olditem.size - argp->newitem.size; + is_plus = 0; + } if (cmp_p == 0 && DB_REDO(op)) { /* Reapply the change as specified. */ dbt.data = argp->newitem.data; dbt.size = argp->newitem.size; - grow = argp->newitem.size - argp->olditem.size; LSN(pagep) = *lsnp; + /* + * The is_plus flag is set properly to reflect + * newitem.size - olditem.size. + */ flags = DB_MPOOL_DIRTY; } else if (cmp_n == 0 && DB_UNDO(op)) { /* Undo the already applied change. */ dbt.data = argp->olditem.data; dbt.size = argp->olditem.size; - grow = argp->olditem.size - argp->newitem.size; + /* + * Invert is_plus to reflect sign of + * olditem.size - newitem.size. + */ + is_plus = !is_plus; LSN(pagep) = argp->pagelsn; flags = DB_MPOOL_DIRTY; } if (flags) { __ham_onpage_replace(file_dbp, pagep, - argp->ndx, argp->off, grow, &dbt); + argp->ndx, argp->off, change, is_plus, &dbt); if (argp->makedup) { hk = P_ENTRY(file_dbp, pagep, argp->ndx); if (DB_REDO(op)) @@ -467,16 +443,28 @@ __ham_splitdata_recover(dbenv, dbtp, lsnp, op, info) if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ + if (ret == DB_PAGE_NOTFOUND) + goto done; + else { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } + } +#ifdef HAVE_FTRUNCATE + /* If the page is not here then it was later truncated. */ + if (!IS_ZERO_LSN(argp->pagelsn)) goto done; - } else if ((ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) +#endif + /* + * This page was created by a group allocation and + * the file may not have been extend yet. + * Create the page if necessary. + */ + if ((ret = __memp_fget(mpf, + &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); goto out; + } } cmp_n = log_compare(lsnp, &LSN(pagep)); @@ -556,20 +544,7 @@ __ham_copypage_recover(dbenv, dbtp, lsnp, op, info) flags = 0; /* This is the bucket page. */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ - ret = 0; - goto donext; - } else if ((ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->pgno, &pagep, donext); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); @@ -594,20 +569,7 @@ __ham_copypage_recover(dbenv, dbtp, lsnp, op, info) pagep = NULL; donext: /* Now fix up the "next" page. */ - if ((ret = __memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ - ret = 0; - goto do_nn; - } else if ((ret = __memp_fget(mpf, - &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->next_pgno, &pagep, do_nn); /* For REDO just update the LSN. For UNDO copy page back. */ cmp_n = log_compare(lsnp, &LSN(pagep)); @@ -630,19 +592,7 @@ donext: /* Now fix up the "next" page. */ do_nn: if (argp->nnext_pgno == PGNO_INVALID) goto done; - if ((ret = __memp_fget(mpf, &argp->nnext_pgno, 0, &pagep)) != 0) { - if (DB_UNDO(op)) { - /* - * We are undoing and the page doesn't exist. That - * is equivalent to having a pagelsn of 0, so we - * would not have to undo anything. In this case, - * don't bother creating a page. - */ - goto done; - } else if ((ret = __memp_fget(mpf, - &argp->nnext_pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } + REC_FGET(mpf, argp->nnext_pgno, &pagep, done); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->nnextlsn); @@ -706,7 +656,7 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info) /* * This logs the virtual create of pages pgno to pgno + bucket - * Since the mpool page-allocation is not really able to be + * If HAVE_FTRUNCATE is not supported the mpool page-allocation is not * transaction protected, we can never undo it. Even in an abort, * we have to allocate these pages to the hash table if they * were actually created. In particular, during disaster @@ -725,7 +675,23 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info) if (argp->newalloc) pgno += argp->bucket; - if ((ret = __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) { + flags = 0; + pagep = NULL; +#ifndef HAVE_FTRUNCATE + flags = DB_MPOOL_CREATE; +#endif + ret = __memp_fget(mpf, &pgno, flags, &pagep); + +#ifdef HAVE_FTRUNCATE + /* If we are undoing, then we don't want to create the page. */ + if (ret != 0 && DB_REDO(op)) + ret = __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep); + else if (ret == DB_PAGE_NOTFOUND) { + groupgrow = 0; + goto do_meta; + } +#endif + if (ret != 0) { if (ret != ENOSPC) goto out; pgno = 0; @@ -738,18 +704,32 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info) CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn); flags = 0; - if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && DB_UNDO(op))) { - /* - * We need to make sure that we redo the allocation of the - * pages. - */ - if (DB_REDO(op)) - pagep->lsn = *lsnp; - else - pagep->lsn = argp->pagelsn; + if (cmp_p == 0 && DB_REDO(op)) { + pagep->lsn = *lsnp; flags = DB_MPOOL_DIRTY; } - if ((ret = __memp_fput(mpf, pagep, flags)) != 0) + else if (cmp_n == 0 && DB_UNDO(op)) { +#ifdef HAVE_FTRUNCATE + /* If this record allocated the pages give them back. */ + if (argp->newalloc) { + if (pagep != NULL && (ret = + __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0) + goto out; + pagep = NULL; + if ((ret = __memp_ftruncate(mpf, argp->pgno, 0)) != 0) + goto out; + } else +#endif + { + /* + * Otherwise just roll the page back to its + * previous state. + */ + pagep->lsn = argp->pagelsn; + flags = DB_MPOOL_DIRTY; + } + } + if (pagep != NULL && (ret = __memp_fput(mpf, pagep, flags)) != 0) goto out; do_meta: @@ -786,16 +766,26 @@ do_meta: * Now we need to fix up the spares array. Each entry in the * spares array indicates the beginning page number for the * indicated doubling. We need to fill this in whenever the - * spares array is invalid, since we never reclaim pages from - * the spares array and we have to allocate the pages to the + * spares array is invalid, if we never reclaim pages then + * we have to allocate the pages to the * spares array in both the redo and undo cases. */ if (groupgrow && +#ifdef HAVE_FTRUNCATE + !DB_UNDO(op) && +#endif hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] == PGNO_INVALID) { hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] = - argp->pgno - argp->bucket - 1; + (argp->pgno - argp->bucket) - 1; did_recover = 1; } +#ifdef HAVE_FTRUNCATE + if (cmp_n == 0 && groupgrow && DB_UNDO(op)) { + hcp->hdr->spares[ + __db_log2(argp->bucket + 1) + 1] = PGNO_INVALID; + did_recover = 1; + } +#endif /* * Finally, we need to potentially fix up the last_pgno field @@ -808,16 +798,18 @@ do_meta: mmeta_flags = 0; cmp_n = log_compare(lsnp, &mmeta->lsn); cmp_p = log_compare(&mmeta->lsn, &argp->mmetalsn); - if (cmp_p == 0 && DB_REDO(op)) { + if (cmp_p == 0 && DB_REDO(op)) mmeta->lsn = *lsnp; - mmeta_flags = DB_MPOOL_DIRTY; - } else if (cmp_n == 0 && DB_UNDO(op)) { + else if (cmp_n == 0 && DB_UNDO(op)) mmeta->lsn = argp->mmetalsn; - mmeta_flags = DB_MPOOL_DIRTY; - } } else mmeta = (DBMETA *)hcp->hdr; +#ifdef HAVE_FTRUNCATE + if (cmp_n == 0 && DB_UNDO(op)) + mmeta->last_pgno = argp->last_pgno; + else if (DB_REDO(op)) +#endif if (mmeta->last_pgno < pgno) mmeta->last_pgno = pgno; mmeta_flags = DB_MPOOL_DIRTY; @@ -875,13 +867,14 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info) pgno = PGNO_BASE_MD; if ((ret = __memp_fget(mpf, &pgno, 0, &mmeta)) != 0) { if (DB_REDO(op)) { - /* Page should have existed. */ ret = __db_pgerr(file_dbp, pgno, ret); goto out; - } else { - ret = 0; + } else goto done; - } + } + if (ret != 0) { + ret = __db_pgerr(file_dbp, pgno, ret); + goto out; } cmp_n = log_compare(lsnp, &LSN(mmeta)); @@ -907,24 +900,62 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info) } } else if (DB_UNDO(op)) { /* - * Reset the last page back to its preallocation state. + * Fetch the last page and determine if it is in + * the post allocation state. */ + pagep = NULL; if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) == 0) { + if (log_compare(&pagep->lsn, lsnp) != 0) { + if ((ret = __memp_fput(mpf, + pagep, DB_MPOOL_DISCARD)) != 0) + goto out; + pagep = NULL; + } + } else if (ret != DB_PAGE_NOTFOUND) + goto out; +#ifdef HAVE_FTRUNCATE + COMPQUIET(info, NULL); + /* + * If the last page was allocated then truncate back + * to the first page. + */ + if (pagep != NULL) { + if ((ret = + __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0) + goto out; + if ((ret = + __memp_ftruncate(mpf, argp->start_pgno, 0)) != 0) + goto out; + } + /* + * If we are rolling back the metapage, then make + * sure it reflects the the correct last_pgno. + */ + if (cmp_n == 0) { + mmeta->last_pgno = argp->last_pgno; + modified = 1; + } + pgno = 0; +#else + /* + * Reset the last page back to its preallocation state. + */ + if (pagep != NULL) { if (log_compare(&pagep->lsn, lsnp) == 0) ZERO_LSN(pagep->lsn); if ((ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0) goto out; - } else if (ret != DB_PAGE_NOTFOUND) - goto out; + } /* - * Always put the pages into the limbo list and free them later. + * Put the pages into the limbo list and free them later. */ if ((ret = __db_add_limbo(dbenv, info, argp->fileid, argp->start_pgno, argp->num)) != 0) goto out; +#endif if (cmp_n == 0) { LSN(mmeta) = argp->meta_lsn; modified = 1; @@ -933,7 +964,8 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info) /* * In both REDO and UNDO, we have grown the file and need to make - * sure that last_pgno is correct. + * sure that last_pgno is correct. If we HAVE_FTRUNCATE pgno + * will only be valid on REDO. */ if (pgno > mmeta->last_pgno) { mmeta->last_pgno = pgno; @@ -942,6 +974,7 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info) done: if (ret == 0) *lsnp = argp->prev_lsn; + ret = 0; out: if (mmeta != NULL) (void)__memp_fput(mpf, mmeta, modified ? DB_MPOOL_DIRTY : 0); @@ -1031,9 +1064,9 @@ __ham_curadj_recover(dbenv, dbtp, lsnp, op, info) goto done; /* - * Undo the adjustment by reinitializing the the cursor - * to look like the one that was used to do the adustment, - * then we invert the add so that undo the adjustment. + * Undo the adjustment by reinitializing the the cursor to look like + * the one that was used to do the adjustment, then we invert the + * add so that undo the adjustment. */ hcp = (HASH_CURSOR *)dbc->internal; hcp->pgno = argp->pgno; diff --git a/db/hash/hash_reclaim.c b/db/hash/hash_reclaim.c index c1a127153..f0adba7c6 100644 --- a/db/hash/hash_reclaim.c +++ b/db/hash/hash_reclaim.c @@ -1,20 +1,16 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: hash_reclaim.c,v 11.17 2004/06/22 18:43:38 margo Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_reclaim.c,v 11.15 2003/06/30 17:20:13 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include - -#include #endif #include "db_int.h" diff --git a/db/hash/hash_stat.c b/db/hash/hash_stat.c index 2526f0f21..a50e383af 100644 --- a/db/hash/hash_stat.c +++ b/db/hash/hash_stat.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: hash_stat.c,v 11.66 2004/09/22 03:46:22 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_stat.c,v 11.52 2003/06/30 17:20:13 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -22,9 +20,9 @@ static const char revid[] = "$Id: hash_stat.c,v 11.52 2003/06/30 17:20:13 bostic #include "dbinc/db_shash.h" #include "dbinc/btree.h" #include "dbinc/hash.h" -#include "dbinc/lock.h" #include "dbinc/mp.h" +#ifdef HAVE_STATISTICS static int __ham_stat_callback __P((DB *, PAGE *, void *, int *)); /* @@ -117,6 +115,241 @@ err: if (sp != NULL) return (ret); } +/* + * __ham_stat_print -- + * Display hash statistics. + * + * PUBLIC: int __ham_stat_print __P((DBC *, u_int32_t)); + */ +int +__ham_stat_print(dbc, flags) + DBC *dbc; + u_int32_t flags; +{ + static const FN fn[] = { + { DB_HASH_DUP, "duplicates" }, + { DB_HASH_SUBDB, "multiple-databases" }, + { DB_HASH_DUPSORT, "sorted duplicates" }, + { 0, NULL } + }; + DB *dbp; + DB_ENV *dbenv; + DB_HASH_STAT *sp; + int lorder, ret; + const char *s; + + dbp = dbc->dbp; + dbenv = dbp->dbenv; + + if ((ret = __ham_stat(dbc, &sp, 0)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Default Hash database information:"); + } + __db_msg(dbenv, "%lx\tHash magic number", (u_long)sp->hash_magic); + __db_msg(dbenv, + "%lu\tHash version number", (u_long)sp->hash_version); + (void)__db_get_lorder(dbp, &lorder); + switch (lorder) { + case 1234: + s = "Little-endian"; + break; + case 4321: + s = "Big-endian"; + break; + default: + s = "Unrecognized byte order"; + break; + } + __db_msg(dbenv, "%s\tByte order", s); + __db_prflags(dbenv, NULL, sp->hash_metaflags, fn, NULL, "\tFlags"); + __db_dl(dbenv, + "Underlying database page size", (u_long)sp->hash_pagesize); + __db_dl(dbenv, "Specified fill factor", (u_long)sp->hash_ffactor); + __db_dl(dbenv, + "Number of keys in the database", (u_long)sp->hash_nkeys); + __db_dl(dbenv, + "Number of data items in the database", (u_long)sp->hash_ndata); + + __db_dl(dbenv, "Number of hash buckets", (u_long)sp->hash_buckets); + __db_dl_pct(dbenv, "Number of bytes free on bucket pages", + (u_long)sp->hash_bfree, DB_PCT_PG( + sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize), "ff"); + + __db_dl(dbenv, + "Number of overflow pages", (u_long)sp->hash_bigpages); + __db_dl_pct(dbenv, "Number of bytes free in overflow pages", + (u_long)sp->hash_big_bfree, DB_PCT_PG( + sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize), "ff"); + + __db_dl(dbenv, + "Number of bucket overflow pages", (u_long)sp->hash_overflows); + __db_dl_pct(dbenv, + "Number of bytes free in bucket overflow pages", + (u_long)sp->hash_ovfl_free, DB_PCT_PG( + sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize), "ff"); + + __db_dl(dbenv, "Number of duplicate pages", (u_long)sp->hash_dup); + __db_dl_pct(dbenv, "Number of bytes free in duplicate pages", + (u_long)sp->hash_dup_free, DB_PCT_PG( + sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize), "ff"); + + __db_dl(dbenv, + "Number of pages on the free list", (u_long)sp->hash_free); + + __os_ufree(dbenv, sp); + + return (0); +} + +static int +__ham_stat_callback(dbp, pagep, cookie, putp) + DB *dbp; + PAGE *pagep; + void *cookie; + int *putp; +{ + DB_HASH_STAT *sp; + DB_BTREE_STAT bstat; + db_indx_t indx, len, off, tlen, top; + u_int8_t *hk; + int ret; + + *putp = 0; + sp = cookie; + + switch (pagep->type) { + case P_INVALID: + /* + * Hash pages may be wholly zeroed; this is not a bug. + * Obviously such pages have no data, so we can just proceed. + */ + break; + case P_HASH: + /* + * We count the buckets and the overflow pages + * separately and tally their bytes separately + * as well. We need to figure out if this page + * is a bucket. + */ + if (PREV_PGNO(pagep) == PGNO_INVALID) + sp->hash_bfree += P_FREESPACE(dbp, pagep); + else { + sp->hash_overflows++; + sp->hash_ovfl_free += P_FREESPACE(dbp, pagep); + } + top = NUM_ENT(pagep); + /* Correct for on-page duplicates and deleted items. */ + for (indx = 0; indx < top; indx += P_INDX) { + switch (*H_PAIRDATA(dbp, pagep, indx)) { + case H_OFFDUP: + break; + case H_OFFPAGE: + case H_KEYDATA: + sp->hash_ndata++; + break; + case H_DUPLICATE: + tlen = LEN_HDATA(dbp, pagep, 0, indx); + hk = H_PAIRDATA(dbp, pagep, indx); + for (off = 0; off < tlen; + off += len + 2 * sizeof(db_indx_t)) { + sp->hash_ndata++; + memcpy(&len, + HKEYDATA_DATA(hk) + + off, sizeof(db_indx_t)); + } + break; + default: + return (__db_pgfmt(dbp->dbenv, PGNO(pagep))); + } + } + sp->hash_nkeys += H_NUMPAIRS(pagep); + break; + case P_IBTREE: + case P_IRECNO: + case P_LBTREE: + case P_LRECNO: + case P_LDUP: + /* + * These are all btree pages; get a correct + * cookie and call them. Then add appropriate + * fields into our stat structure. + */ + memset(&bstat, 0, sizeof(bstat)); + if ((ret = __bam_stat_callback(dbp, pagep, &bstat, putp)) != 0) + return (ret); + sp->hash_dup++; + sp->hash_dup_free += bstat.bt_leaf_pgfree + + bstat.bt_dup_pgfree + bstat.bt_int_pgfree; + sp->hash_ndata += bstat.bt_ndata; + break; + case P_OVERFLOW: + sp->hash_bigpages++; + sp->hash_big_bfree += P_OVFLSPACE(dbp, dbp->pgsize, pagep); + break; + default: + return (__db_pgfmt(dbp->dbenv, PGNO(pagep))); + } + + return (0); +} + +/* + * __ham_print_cursor -- + * Display the current cursor. + * + * PUBLIC: void __ham_print_cursor __P((DBC *)); + */ +void +__ham_print_cursor(dbc) + DBC *dbc; +{ + static const FN fn[] = { + { H_CONTINUE, "H_CONTINUE" }, + { H_DELETED, "H_DELETED" }, + { H_DIRTY, "H_DIRTY" }, + { H_DUPONLY, "H_DUPONLY" }, + { H_EXPAND, "H_EXPAND" }, + { H_ISDUP, "H_ISDUP" }, + { H_NEXT_NODUP, "H_NEXT_NODUP" }, + { H_NOMORE, "H_NOMORE" }, + { H_OK, "H_OK" }, + { 0, NULL } + }; + DB_ENV *dbenv; + HASH_CURSOR *cp; + + dbenv = dbc->dbp->dbenv; + cp = (HASH_CURSOR *)dbc->internal; + + STAT_ULONG("Bucket traversing", cp->bucket); + STAT_ULONG("Bucket locked", cp->lbucket); + STAT_ULONG("Duplicate set offset", cp->dup_off); + STAT_ULONG("Current duplicate length", cp->dup_len); + STAT_ULONG("Total duplicate set length", cp->dup_tlen); + STAT_ULONG("Bytes needed for add", cp->seek_size); + STAT_ULONG("Page on which we can insert", cp->seek_found_page); + STAT_ULONG("Order", cp->order); + __db_prflags(dbenv, NULL, cp->flags, fn, NULL, "\tInternal Flags"); +} + +#else /* !HAVE_STATISTICS */ + +int +__ham_stat(dbc, spp, flags) + DBC *dbc; + void *spp; + u_int32_t flags; +{ + COMPQUIET(spp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbc->dbp->dbenv)); +} +#endif + /* * __ham_traverse * Traverse an entire hash table. We use the callback so that we @@ -237,7 +470,13 @@ __ham_traverse(dbc, mode, callback, cookie, look_past_max) goto err; break; case H_KEYDATA: + case H_DUPLICATE: break; + default: + DB_ASSERT(0); + ret = EINVAL; + goto err; + } } @@ -254,9 +493,6 @@ __ham_traverse(dbc, mode, callback, cookie, look_past_max) if (ret != 0) goto err; - if (STD_LOCKING(dbc)) - (void)__lock_put(dbp->dbenv, &hcp->lock); - if (hcp->page != NULL) { if ((ret = __memp_fput(mpf, hcp->page, 0)) != 0) return (ret); @@ -269,92 +505,3 @@ err: if (opd != NULL && ret = t_ret; return (ret); } - -static int -__ham_stat_callback(dbp, pagep, cookie, putp) - DB *dbp; - PAGE *pagep; - void *cookie; - int *putp; -{ - DB_HASH_STAT *sp; - DB_BTREE_STAT bstat; - db_indx_t indx, len, off, tlen, top; - u_int8_t *hk; - int ret; - - *putp = 0; - sp = cookie; - - switch (pagep->type) { - case P_INVALID: - /* - * Hash pages may be wholly zeroed; this is not a bug. - * Obviously such pages have no data, so we can just proceed. - */ - break; - case P_HASH: - /* - * We count the buckets and the overflow pages - * separately and tally their bytes separately - * as well. We need to figure out if this page - * is a bucket. - */ - if (PREV_PGNO(pagep) == PGNO_INVALID) - sp->hash_bfree += P_FREESPACE(dbp, pagep); - else { - sp->hash_overflows++; - sp->hash_ovfl_free += P_FREESPACE(dbp, pagep); - } - top = NUM_ENT(pagep); - /* Correct for on-page duplicates and deleted items. */ - for (indx = 0; indx < top; indx += P_INDX) { - switch (*H_PAIRDATA(dbp, pagep, indx)) { - case H_OFFDUP: - break; - case H_OFFPAGE: - case H_KEYDATA: - sp->hash_ndata++; - break; - case H_DUPLICATE: - tlen = LEN_HDATA(dbp, pagep, 0, indx); - hk = H_PAIRDATA(dbp, pagep, indx); - for (off = 0; off < tlen; - off += len + 2 * sizeof (db_indx_t)) { - sp->hash_ndata++; - memcpy(&len, - HKEYDATA_DATA(hk) - + off, sizeof(db_indx_t)); - } - } - } - sp->hash_nkeys += H_NUMPAIRS(pagep); - break; - case P_IBTREE: - case P_IRECNO: - case P_LBTREE: - case P_LRECNO: - case P_LDUP: - /* - * These are all btree pages; get a correct - * cookie and call them. Then add appropriate - * fields into our stat structure. - */ - memset(&bstat, 0, sizeof(bstat)); - if ((ret = __bam_stat_callback(dbp, pagep, &bstat, putp)) != 0) - return (ret); - sp->hash_dup++; - sp->hash_dup_free += bstat.bt_leaf_pgfree + - bstat.bt_dup_pgfree + bstat.bt_int_pgfree; - sp->hash_ndata += bstat.bt_ndata; - break; - case P_OVERFLOW: - sp->hash_bigpages++; - sp->hash_big_bfree += P_OVFLSPACE(dbp, dbp->pgsize, pagep); - break; - default: - return (__db_pgfmt(dbp->dbenv, pagep->pgno)); - } - - return (0); -} diff --git a/db/hash/hash_stub.c b/db/hash/hash_stub.c index 906331990..7bbe925c7 100644 --- a/db/hash/hash_stub.c +++ b/db/hash/hash_stub.c @@ -1,15 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: hash_stub.c,v 1.10 2004/09/29 15:35:14 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_stub.c,v 1.4 2003/07/01 19:47:14 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" +#ifndef HAVE_HASH #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -121,14 +121,6 @@ __ham_c_init(dbc) return (__db_no_hash_am(dbc->dbp->dbenv)); } -void -__ham_cprint(dbc) - DBC *dbc; -{ - COMPQUIET(dbc, NULL); - return; -} - int __ham_db_close(dbp) DB *dbp; @@ -145,18 +137,6 @@ __ham_db_create(dbp) return (0); } -int -__ham_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - COMPQUIET(dbenv, NULL); - COMPQUIET(dtabp, NULL); - COMPQUIET(dtabsizep, NULL); - return (0); -} - int __ham_init_print(dbenv, dtabp, dtabsizep) DB_ENV *dbenv; @@ -275,6 +255,13 @@ __ham_pgout(dbenv, dummydbp, pg, pp, cookie) return (__db_no_hash_am(dbenv)); } +void +__ham_print_cursor(dbc) + DBC *dbc; +{ + (void)__db_no_hash_am(dbc->dbp->dbenv); +} + int __ham_quick_delete(dbc) DBC *dbc; @@ -321,6 +308,15 @@ __ham_stat(dbc, spp, flags) return (__db_no_hash_am(dbc->dbp->dbenv)); } +int +__ham_stat_print(dbc, flags) + DBC *dbc; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + return (__db_no_hash_am(dbc->dbp->dbenv)); +} + int __ham_truncate(dbc, countp) DBC *dbc; @@ -392,3 +388,4 @@ __ham_vrfy_structure(dbp, vdp, meta_pgno, flags) COMPQUIET(flags, 0); return (__db_no_hash_am(dbp->dbenv)); } +#endif /* !HAVE_HASH */ diff --git a/db/hash/hash_upgrade.c b/db/hash/hash_upgrade.c index 875d90f09..b626138ef 100644 --- a/db/hash/hash_upgrade.c +++ b/db/hash/hash_upgrade.c @@ -1,19 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: hash_upgrade.c,v 11.35 2004/04/06 12:38:08 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_upgrade.c,v 11.33 2003/01/08 05:03:56 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include -#include #include #endif @@ -38,8 +36,8 @@ __ham_30_hashmeta(dbp, real_name, obuf) HASHHDR *oldmeta; HMETA30 newmeta; u_int32_t *o_spares, *n_spares; - u_int32_t fillf, maxb, nelem; - int i, max_entry, ret; + u_int32_t fillf, i, maxb, max_entry, nelem; + int ret; dbenv = dbp->dbenv; memset(&newmeta, 0, sizeof(newmeta)); diff --git a/db/hash/hash_verify.c b/db/hash/hash_verify.c index 1aa09454b..fb76fbef7 100644 --- a/db/hash/hash_verify.c +++ b/db/hash/hash_verify.c @@ -1,18 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. * - * $Id: hash_verify.c,v 1.58 2003/06/30 17:20:13 bostic Exp $ + * $Id: hash_verify.c,v 1.62 2004/10/11 18:47:50 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hash_verify.c,v 1.58 2003/06/30 17:20:13 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -175,6 +171,9 @@ __ham_vrfy_meta(dbp, vdp, m, pgno, flags) err: if ((t_ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0) ret = t_ret; + if (LF_ISSET(DB_SALVAGE) && + (t_ret = __db_salvage_markdone(vdp, pgno)) != 0 && ret == 0) + ret = t_ret; return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); } @@ -202,11 +201,6 @@ __ham_vrfy(dbp, vdp, h, pgno, flags) if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) return (ret); - /* Sanity check our flags and page type. */ - if ((ret = __db_fchk(dbp->dbenv, "__ham_vrfy", - flags, DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)) != 0) - goto err; - if (TYPE(h) != P_HASH) { TYPE_ERR_PRINT(dbp->dbenv, "__ham_vrfy", pgno, TYPE(h)); DB_ASSERT(0); @@ -861,7 +855,7 @@ __ham_salvage(dbp, vdp, pgno, h, handle, callback, flags) keydata: memcpy(buf, HKEYDATA_DATA(hk), len); dbt.size = len; dbt.data = buf; - if ((ret = __db_prdbt(&dbt, + if ((ret = __db_vrfy_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; @@ -875,11 +869,11 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len); if ((ret = __db_safe_goff(dbp, vdp, dpgno, &dbt, &buf, flags)) != 0) { err_ret = ret; - (void)__db_prdbt(&unkdbt, 0, " ", + (void)__db_vrfy_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp); break; } - if ((ret = __db_prdbt(&dbt, + if ((ret = __db_vrfy_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; @@ -892,7 +886,8 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len); HOFFPAGE_PGNO(hk), sizeof(dpgno)); /* UNKNOWN iff pgno is bad or we're a key. */ if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) { - if ((ret = __db_prdbt(&unkdbt, 0, " ", + if ((ret = + __db_vrfy_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; } else if ((ret = __db_salvage_duptree(dbp, @@ -935,7 +930,7 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len); memcpy(buf, hk + tlen, dlen); dbt.size = dlen; dbt.data = buf; - if ((ret = __db_prdbt(&dbt, 0, " ", + if ((ret = __db_vrfy_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; tlen += sizeof(db_indx_t); diff --git a/db/hmac/hmac.c b/db/hmac/hmac.c index fec91411f..bb2da5eb4 100644 --- a/db/hmac/hmac.c +++ b/db/hmac/hmac.c @@ -1,19 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. * * Some parts of this code originally written by Adam Stubblefield, - * astubble@rice.edu. + * -- astubble@rice.edu. + * + * $Id: hmac.c,v 1.27 2004/01/28 03:36:11 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hmac.c,v 1.26 2003/01/08 05:04:43 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/hmac/sha1.c b/db/hmac/sha1.c index 2f2c806a2..8824796f0 100644 --- a/db/hmac/sha1.c +++ b/db/hmac/sha1.c @@ -1,8 +1,9 @@ +/* + * $Id: sha1.c,v 1.14 2004/01/28 03:36:11 bostic Exp $ + */ + #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: sha1.c,v 1.13 2002/04/09 13:40:36 sue Exp $"; -#endif /* not lint */ /* SHA-1 in C By Steve Reid diff --git a/db/hsearch/hsearch.c b/db/hsearch/hsearch.c index edc9a23aa..5bcbe93d3 100644 --- a/db/hsearch/hsearch.c +++ b/db/hsearch/hsearch.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -38,14 +38,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: hsearch.c,v 11.14 2004/01/28 03:36:11 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: hsearch.c,v 11.13 2003/01/08 05:05:10 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/java/src/com/sleepycat/bind/ByteArrayBinding.java b/db/java/src/com/sleepycat/bind/ByteArrayBinding.java new file mode 100644 index 000000000..e684c1fea --- /dev/null +++ b/db/java/src/com/sleepycat/bind/ByteArrayBinding.java @@ -0,0 +1,43 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ByteArrayBinding.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A pass-through EntryBinding that uses the entry's byte array as + * the key or data object. + * + * @author Mark Hayes + */ +public class ByteArrayBinding implements EntryBinding { + + /** + * Creates a byte array binding. + */ + public ByteArrayBinding() { + } + + // javadoc is inherited + public Object entryToObject(DatabaseEntry entry) { + + byte[] bytes = new byte[entry.getSize()]; + System.arraycopy(entry.getData(), entry.getOffset(), + bytes, 0, bytes.length); + return bytes; + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + byte[] bytes = (byte[]) object; + entry.setData(bytes, 0, bytes.length); + } +} diff --git a/db/java/src/com/sleepycat/bind/EntityBinding.java b/db/java/src/com/sleepycat/bind/EntityBinding.java new file mode 100644 index 000000000..5209af56a --- /dev/null +++ b/db/java/src/com/sleepycat/bind/EntityBinding.java @@ -0,0 +1,49 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: EntityBinding.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A binding between a key-value entry pair and an entity object. + * + * @author Mark Hayes + */ +public interface EntityBinding { + + /** + * Converts key and data entry buffers into an entity Object. + * + * @param key is the source key entry. + * + * @param data is the source data entry. + * + * @return the resulting Object. + */ + Object entryToObject(DatabaseEntry key, DatabaseEntry data); + + /** + * Extracts the key entry from an entity Object. + * + * @param object is the source Object. + * + * @param key is the destination entry buffer. + */ + void objectToKey(Object object, DatabaseEntry key); + + /** + * Extracts the data entry from an entity Object. + * + * @param object is the source Object. + * + * @param data is the destination entry buffer. + */ + void objectToData(Object object, DatabaseEntry data); +} diff --git a/db/java/src/com/sleepycat/bind/EntryBinding.java b/db/java/src/com/sleepycat/bind/EntryBinding.java new file mode 100644 index 000000000..e7ad56fa7 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/EntryBinding.java @@ -0,0 +1,38 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: EntryBinding.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A binding between a key or data entry and a key or data object. + * + * @author Mark Hayes + */ +public interface EntryBinding { + + /** + * Converts a entry buffer into an Object. + * + * @param entry is the source entry buffer. + * + * @return the resulting Object. + */ + Object entryToObject(DatabaseEntry entry); + + /** + * Converts an Object into a entry buffer. + * + * @param object is the source Object. + * + * @param entry is the destination entry buffer. + */ + void objectToEntry(Object object, DatabaseEntry entry); +} diff --git a/db/java/src/com/sleepycat/bind/RecordNumberBinding.java b/db/java/src/com/sleepycat/bind/RecordNumberBinding.java new file mode 100644 index 000000000..7fe3dce2e --- /dev/null +++ b/db/java/src/com/sleepycat/bind/RecordNumberBinding.java @@ -0,0 +1,70 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: RecordNumberBinding.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.DatabaseEntry; + +/** + * An EntryBinding that treats a record number key entry as a + * Long key object. + * + *

    Record numbers are returned as Long objects, although on + * input any Number object may be used.

    + * + * @author Mark Hayes + */ +public class RecordNumberBinding implements EntryBinding { + + /** + * Creates a byte array binding. + */ + public RecordNumberBinding() { + } + + // javadoc is inherited + public Object entryToObject(DatabaseEntry entry) { + + return new Long(entryToRecordNumber(entry)); + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + recordNumberToEntry(((Number) object).longValue(), entry); + } + + /** + * Utility method for use by bindings to translate a entry buffer to an + * record number integer. + * + * @param entry the entry buffer. + * + * @return the record number. + */ + public static long entryToRecordNumber(DatabaseEntry entry) { + + return DbCompat.getRecordNumber(entry) & 0xFFFFFFFFL; + } + + /** + * Utility method for use by bindings to translate a record number integer + * to a entry buffer. + * + * @param recordNumber the record number. + * + * @param entry the entry buffer to hold the record number. + */ + public static void recordNumberToEntry(long recordNumber, + DatabaseEntry entry) { + entry.setData(new byte[4], 0, 4); + DbCompat.setRecordNumber(entry, (int) recordNumber); + } +} diff --git a/db/java/src/com/sleepycat/bind/package.html b/db/java/src/com/sleepycat/bind/package.html new file mode 100644 index 000000000..cf824682b --- /dev/null +++ b/db/java/src/com/sleepycat/bind/package.html @@ -0,0 +1,7 @@ + + + +Bindings between database entries and Java objects
    +[reference guide]. + + diff --git a/db/java/src/com/sleepycat/bind/serial/ClassCatalog.java b/db/java/src/com/sleepycat/bind/serial/ClassCatalog.java new file mode 100644 index 000000000..5d249b4f8 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/ClassCatalog.java @@ -0,0 +1,72 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ClassCatalog.java,v 1.3 2004/09/01 14:34:20 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import java.io.ObjectStreamClass; + +import com.sleepycat.db.DatabaseException; + +/** + * A catalog of class description information for use during object + * serialization. + * + *

    A catalog is used to store class descriptions separately from serialized + * objects, to avoid redundantly stored information with each object. + * When serialized objects are stored in a database, a {@link + * StoredClassCatalog} should be used.

    + * + *

    This information is used for serialization of class descriptors or + * java.io.ObjectStreamClass objects, each of which represents a unique class + * format. For each unique format, a unique class ID is assigned by the + * catalog. The class ID can then be used in the serialization stream in place + * of the full class information. When used with {@link SerialInput} and + * {@link SerialOutput} or any of the serial bindings, the use of the catalog + * is transparent to the application.

    + * + * @author Mark Hayes + */ +public interface ClassCatalog { + + /** + * Close a catalog database and release any cached resources. + */ + public void close() + throws DatabaseException; + + /** + * Return the class ID for the current version of the given class + * description. + * This is used for storing in serialization streams in place of a full + * class descriptor, since it is much more compact. To get back the + * ObjectStreamClass for a class ID, call {@link #getClassFormat(byte[])}. + * This function causes a new class ID to be assigned if the class + * description has changed. + * + * @param classDesc The class description for which to return the + * class ID. + * + * @return The class ID for the current version of the class. + */ + public byte[] getClassID(ObjectStreamClass classDesc) + throws DatabaseException, ClassNotFoundException; + + /** + * Return the ObjectStreamClass for the given class ID. This may or may + * not be the current class format, depending on whether the class has + * changed since the class ID was generated. + * + * @param classID The class ID for which to return the class format. + * + * @return The class format for the given class ID, which may or may not + * represent the current version of the class. + */ + public ObjectStreamClass getClassFormat(byte[] classID) + throws DatabaseException, ClassNotFoundException; +} diff --git a/db/java/src/com/sleepycat/bind/serial/SerialBinding.java b/db/java/src/com/sleepycat/bind/serial/SerialBinding.java new file mode 100644 index 000000000..e2e607c14 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/SerialBinding.java @@ -0,0 +1,130 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SerialBinding.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import java.io.IOException; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.util.FastInputStream; +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A concrete EntryBinding that treats a key or data entry as + * a serialized object. + * + *

    This binding stores objects in serialized object format. The + * deserialized objects are returned by the binding, and their + * Class must implement the Serializable + * interface.

    + * + * @author Mark Hayes + */ +public class SerialBinding implements EntryBinding { + + private ClassCatalog classCatalog; + private Class baseClass; + + /** + * Creates a serial binding. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param baseClass is the base class for serialized objects stored using + * this binding -- all objects using this binding must be an instance of + * this class. + */ + public SerialBinding(ClassCatalog classCatalog, Class baseClass) { + + if (classCatalog == null) { + throw new NullPointerException("classCatalog must be non-null"); + } + this.classCatalog = classCatalog; + this.baseClass = baseClass; + } + + /** + * Returns the base class for this binding. + * + * @return the base class for this binding. + */ + public final Class getBaseClass() { + + return baseClass; + } + + /** + * Deserialize an object from an entry buffer. May only be called for data + * that was serialized using {@link #objectToEntry}, since the fixed + * serialization header is assumed to not be included in the input data. + * {@link SerialInput} is used to deserialize the object. + * + * @param entry is the input serialized entry. + * + * @return the output deserialized object. + */ + public Object entryToObject(DatabaseEntry entry) { + + int length = entry.getSize(); + byte[] hdr = SerialOutput.getStreamHeader(); + byte[] bufWithHeader = new byte[length + hdr.length]; + + System.arraycopy(hdr, 0, bufWithHeader, 0, hdr.length); + System.arraycopy(entry.getData(), entry.getOffset(), + bufWithHeader, hdr.length, length); + + try { + SerialInput jin = new SerialInput( + new FastInputStream(bufWithHeader, 0, bufWithHeader.length), + classCatalog); + return jin.readObject(); + } catch (IOException e) { + throw new RuntimeExceptionWrapper(e); + } catch (ClassNotFoundException e) { + throw new RuntimeExceptionWrapper(e); + } + } + + /** + * Serialize an object into an entry buffer. The fixed serialization + * header is not included in the output data to save space, and therefore + * to deserialize the data the complementary {@link #entryToObject} method + * must be used. {@link SerialOutput} is used to serialize the object. + * + * @param object is the input deserialized object. + * + * @param entry is the output serialized entry. + * + * @throws IllegalArgumentException if the object is not an instance of the + * base class for this binding. + */ + public void objectToEntry(Object object, DatabaseEntry entry) { + + if (baseClass != null && !baseClass.isInstance(object)) { + throw new IllegalArgumentException( + "Data object class (" + object.getClass() + + ") not an instance of binding's base class (" + + baseClass + ')'); + } + FastOutputStream fo = new FastOutputStream(); + try { + SerialOutput jos = new SerialOutput(fo, classCatalog); + jos.writeObject(object); + } catch (IOException e) { + throw new RuntimeExceptionWrapper(e); + } + + byte[] hdr = SerialOutput.getStreamHeader(); + entry.setData(fo.getBufferBytes(), hdr.length, + fo.getBufferLength() - hdr.length); + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/SerialInput.java b/db/java/src/com/sleepycat/bind/serial/SerialInput.java new file mode 100644 index 000000000..16bfd859f --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/SerialInput.java @@ -0,0 +1,75 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SerialInput.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.ObjectStreamClass; + +import com.sleepycat.db.DatabaseException; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A specialized ObjectInputStream that gets class description + * information from a ClassCatalog. It is used by + * SerialBinding. + * + *

    This class is used instead of an {@link ObjectInputStream}, which it + * extends, to read an object stream written by the {@link SerialOutput} class. + * For reading objects from a database normally one of the serial binding + * classes is used. {@link SerialInput} is used when an {@link + * ObjectInputStream} is needed along with compact storage. A {@link + * ClassCatalog} must be supplied, however, to stored shared class + * descriptions.

    + * + * @author Mark Hayes + */ +public class SerialInput extends ObjectInputStream { + + private ClassCatalog classCatalog; + + /** + * Creates a serial input stream. + * + * @param in is the input stream from which compact serialized objects will + * be read. + * + * @param classCatalog is the catalog containing the class descriptions + * for the serialized objects. + */ + public SerialInput(InputStream in, ClassCatalog classCatalog) + throws IOException { + + super(in); + + this.classCatalog = classCatalog; + } + + // javadoc is inherited + protected ObjectStreamClass readClassDescriptor() + throws IOException, ClassNotFoundException { + + try { + byte len = readByte(); + byte[] id = new byte[len]; + readFully(id); + + return classCatalog.getClassFormat(id); + } catch (DatabaseException e) { + /* + * Do not throw IOException from here since ObjectOutputStream + * will write the exception to the stream, which causes another + * call here, etc. + */ + throw new RuntimeExceptionWrapper(e); + } + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/SerialOutput.java b/db/java/src/com/sleepycat/bind/serial/SerialOutput.java new file mode 100644 index 000000000..22ae8f872 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/SerialOutput.java @@ -0,0 +1,114 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SerialOutput.java,v 1.3 2004/09/01 14:34:20 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.ObjectStreamConstants; +import java.io.OutputStream; + +import com.sleepycat.db.DatabaseException; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A specialized ObjectOutputStream that stores class description + * information in a ClassCatalog. It is used by + * SerialBinding. + * + *

    This class is used instead of an {@link ObjectOutputStream}, which it + * extends, to write a compact object stream. For writing objects to a + * database normally one of the serial binding classes is used. {@link + * SerialOutput} is used when an {@link ObjectOutputStream} is needed along + * with compact storage. A {@link ClassCatalog} must be supplied, however, to + * stored shared class descriptions.

    + * + *

    The {@link ClassCatalog} is used to store class definitions rather than + * embedding these into the stream. Instead, a class format identifier is + * embedded into the stream. This identifier is then used by {@link + * SerialInput} to load the class format to deserialize the object.

    + * + * @author Mark Hayes + */ +public class SerialOutput extends ObjectOutputStream { + + /* Serialization version constants. Instead of hardcoding these + * we get them by creating a SerialOutput, which itself + * guarantees that we'll always use a PROTOCOL_VERSION_2 header. + */ + private final static byte[] STREAM_HEADER; + static { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + SerialOutput oos = new SerialOutput(baos, null); + } catch (IOException e) { + throw new RuntimeExceptionWrapper(e); + } + STREAM_HEADER = baos.toByteArray(); + } + + private ClassCatalog classCatalog; + + /** + * Creates a serial output stream. + * + * @param out is the output stream to which the compact serialized objects + * will be written. + * + * @param classCatalog is the catalog to which the class descriptions for + * the serialized objects will be written. + */ + public SerialOutput(OutputStream out, ClassCatalog classCatalog) + throws IOException { + + super(out); + this.classCatalog = classCatalog; + + /* guarantee that we'll always use the same serialization format */ + + useProtocolVersion(ObjectStreamConstants.PROTOCOL_VERSION_2); + } + + // javadoc is inherited + protected void writeClassDescriptor(ObjectStreamClass classdesc) + throws IOException { + + try { + byte[] id = classCatalog.getClassID(classdesc); + writeByte(id.length); + write(id); + } catch (DatabaseException e) { + /* + * Do not throw IOException from here since ObjectOutputStream + * will write the exception to the stream, which causes another + * call here, etc. + */ + throw new RuntimeExceptionWrapper(e); + } catch (ClassNotFoundException e) { + throw new RuntimeExceptionWrapper(e); + } + } + + /** + * Returns the fixed stream header used for all serialized streams in + * PROTOCOL_VERSION_2 format. To save space this header can be removed and + * serialized streams before storage and inserted before deserializing. + * {@link SerialOutput} always uses PROTOCOL_VERSION_2 serialization format + * to guarantee that this header is fixed. {@link SerialBinding} removes + * this header from serialized streams automatically. + * + * @return the fixed stream header. + */ + public static byte[] getStreamHeader() { + + return STREAM_HEADER; + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/SerialSerialBinding.java b/db/java/src/com/sleepycat/bind/serial/SerialSerialBinding.java new file mode 100644 index 000000000..3fba700bd --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/SerialSerialBinding.java @@ -0,0 +1,117 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SerialSerialBinding.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.db.DatabaseEntry; + +/** + * An abstract EntityBinding that treats an entity's key entry and + * data entry as serialized objects. + * + *

    This class takes care of serializing and deserializing the key and + * data entry automatically. Its three abstract methods must be implemented by + * a concrete subclass to convert the deserialized objects to/from an entity + * object.

    + *
      + *
    • {@link #entryToObject(Object,Object)}
    • + *
    • {@link #objectToKey(Object)}
    • + *
    • {@link #objectToData(Object)}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class SerialSerialBinding implements EntityBinding { + + private SerialBinding keyBinding; + private SerialBinding dataBinding; + + /** + * Creates a serial-serial entity binding. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param keyClass is the key base class. + * + * @param dataClass is the data base class. + */ + public SerialSerialBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + this(new SerialBinding(classCatalog, keyClass), + new SerialBinding(classCatalog, dataClass)); + } + + /** + * Creates a serial-serial entity binding. + * + * @param keyBinding is the key binding. + * + * @param dataBinding is the data binding. + */ + public SerialSerialBinding(SerialBinding keyBinding, + SerialBinding dataBinding) { + + this.keyBinding = keyBinding; + this.dataBinding = dataBinding; + } + + // javadoc is inherited + public Object entryToObject(DatabaseEntry key, DatabaseEntry data) { + + return entryToObject(keyBinding.entryToObject(key), + dataBinding.entryToObject(data)); + } + + // javadoc is inherited + public void objectToKey(Object object, DatabaseEntry key) { + + object = objectToKey(object); + keyBinding.objectToEntry(object, key); + } + + // javadoc is inherited + public void objectToData(Object object, DatabaseEntry data) { + + object = objectToData(object); + dataBinding.objectToEntry(object, data); + } + + /** + * Constructs an entity object from deserialized key and data objects. + * + * @param keyInput is the deserialized key object. + * + * @param dataInput is the deserialized data object. + * + * @return the entity object constructed from the key and data. + */ + public abstract Object entryToObject(Object keyInput, Object dataInput); + + /** + * Extracts a key object from an entity object. + * + * @param object is the entity object. + * + * @return the deserialized key object. + */ + public abstract Object objectToKey(Object object); + + /** + * Extracts a data object from an entity object. + * + * @param object is the entity object. + * + * @return the deserialized data object. + */ + public abstract Object objectToData(Object object); +} diff --git a/db/java/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java b/db/java/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java new file mode 100644 index 000000000..eae756f61 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java @@ -0,0 +1,143 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SerialSerialKeyCreator.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.SecondaryDatabase; +import com.sleepycat.db.SecondaryKeyCreator; + +/** + * A abstract key creator that uses a serial key and a serial data entry. + * This class takes care of serializing and deserializing the key and data + * entry automatically. + * The following abstract method must be implemented by a concrete subclass + * to create the index key using these objects + *
      + *
    • {@link #createSecondaryKey(Object,Object)}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class SerialSerialKeyCreator + implements SecondaryKeyCreator { + + protected SerialBinding primaryKeyBinding; + protected SerialBinding dataBinding; + protected SerialBinding indexKeyBinding; + + /** + * Creates a serial-serial key creator. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param primaryKeyClass is the primary key base class. + * + * @param dataClass is the data base class. + * + * @param indexKeyClass is the index key base class. + */ + public SerialSerialKeyCreator(ClassCatalog classCatalog, + Class primaryKeyClass, + Class dataClass, + Class indexKeyClass) { + + this(new SerialBinding(classCatalog, primaryKeyClass), + new SerialBinding(classCatalog, dataClass), + new SerialBinding(classCatalog, indexKeyClass)); + } + + /** + * Creates a serial-serial entity binding. + * + * @param primaryKeyBinding is the primary key binding. + * + * @param dataBinding is the data binding. + * + * @param indexKeyBinding is the index key binding. + */ + public SerialSerialKeyCreator(SerialBinding primaryKeyBinding, + SerialBinding dataBinding, + SerialBinding indexKeyBinding) { + + this.primaryKeyBinding = primaryKeyBinding; + this.dataBinding = dataBinding; + this.indexKeyBinding = indexKeyBinding; + } + + // javadoc is inherited + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + throws DatabaseException { + + Object primaryKeyInput = + primaryKeyBinding.entryToObject(primaryKeyEntry); + Object dataInput = dataBinding.entryToObject(dataEntry); + Object indexKey = createSecondaryKey(primaryKeyInput, dataInput); + if (indexKey != null) { + indexKeyBinding.objectToEntry(indexKey, indexKeyEntry); + return true; + } else { + return false; + } + } + + // javadoc is inherited + public boolean nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + throws DatabaseException { + + Object data = dataBinding.entryToObject(dataEntry); + data = nullifyForeignKey(data); + if (data != null) { + dataBinding.objectToEntry(data, dataEntry); + return true; + } else { + return false; + } + } + + /** + * Creates the index key object from primary key and entry objects. + * + * @param primaryKey is the deserialized source primary key entry, or + * null if no primary key entry is used to construct the index key. + * + * @param data is the deserialized source data entry, or null if no + * data entry is used to construct the index key. + * + * @return the destination index key object, or null to indicate that + * the key is not present. + */ + public abstract Object createSecondaryKey(Object primaryKey, Object data); + + /** + * Clears the index key in a data object. + * + *

    On entry the data parameter contains the index key to be cleared. It + * should be changed by this method such that {@link #createSecondaryKey} + * will return false. Other fields in the data object should remain + * unchanged.

    + * + * @param data is the source and destination data object. + * + * @return the destination data object, or null to indicate that the + * key is not present and no change is necessary. The data returned may + * be the same object passed as the data parameter or a newly created + * object. + */ + public Object nullifyForeignKey(Object data) { + + return null; + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/StoredClassCatalog.java b/db/java/src/com/sleepycat/bind/serial/StoredClassCatalog.java new file mode 100644 index 000000000..04caeae3b --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/StoredClassCatalog.java @@ -0,0 +1,446 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredClassCatalog.java,v 1.4 2004/09/01 14:34:20 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.Serializable; +import java.math.BigInteger; +import java.util.HashMap; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Cursor; +import com.sleepycat.db.CursorConfig; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.LockMode; +import com.sleepycat.db.OperationStatus; +import com.sleepycat.db.Transaction; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.UtfOps; + +/** + * A ClassCatalog that is stored in a Database. + * + *

    A single StoredClassCatalog object is normally used along + * with a set of databases that stored serialized objects.

    + * + * @author Mark Hayes + */ +public class StoredClassCatalog implements ClassCatalog { + + /* + * Record types ([key] [data]): + * + * [0] [next class ID] + * [1 / class ID] [ObjectStreamClass (class format)] + * [2 / class name] [ClassInfo (has 8 byte class ID)] + */ + private static final byte REC_LAST_CLASS_ID = (byte) 0; + private static final byte REC_CLASS_FORMAT = (byte) 1; + private static final byte REC_CLASS_INFO = (byte) 2; + + private static final byte[] LAST_CLASS_ID_KEY = {REC_LAST_CLASS_ID}; + + private Database db; + private HashMap classMap; + private HashMap formatMap; + private LockMode writeLockMode; + private boolean cdbMode; + private boolean txnMode; + + /** + * Creates a catalog based on a given database. To save resources, only a + * single catalog object should be used for each unique catalog database. + * + * @param database an open database to use as the class catalog. It must + * be a BTREE database and must not allow duplicates. + * + * @throws DatabaseException if an error occurs accessing the database. + * + * @throws IllegalArgumentException if the database is not a BTREE database + * or if it configured to allow duplicates. + */ + public StoredClassCatalog(Database database) + throws DatabaseException, IllegalArgumentException { + + db = database; + DatabaseConfig dbConfig = db.getConfig(); + EnvironmentConfig envConfig = db.getEnvironment().getConfig(); + + writeLockMode = (DbCompat.getInitializeLocking(envConfig) || + envConfig.getTransactional()) ? LockMode.RMW + : LockMode.DEFAULT; + cdbMode = DbCompat.getInitializeCDB(envConfig); + txnMode = dbConfig.getTransactional(); + + if (!DbCompat.isTypeBtree(dbConfig)) { + throw new IllegalArgumentException( + "The class catalog must be a BTREE database."); + } + if (DbCompat.getSortedDuplicates(dbConfig) || + DbCompat.getUnsortedDuplicates(dbConfig)) { + throw new IllegalArgumentException( + "The class catalog database must not allow duplicates."); + } + + /* + * Create the class format and class info maps. Note that these are not + * synchronized, and therefore the methods that use them are + * synchronized. + */ + classMap = new HashMap(); + formatMap = new HashMap(); + + /* + * To avoid phantoms, use putNoOverwrite to ensure that there is always + * a class ID record. + */ + if (!dbConfig.getReadOnly()) { + DatabaseEntry key = new DatabaseEntry(LAST_CLASS_ID_KEY); + DatabaseEntry data = new DatabaseEntry(new byte[1]); // zero ID + db.putNoOverwrite(null, key, data); + } + } + + // javadoc is inherited + public synchronized void close() + throws DatabaseException { + + if (db != null) { + db.close(); + } + db = null; + formatMap = null; + classMap = null; + } + + // javadoc is inherited + public synchronized byte[] getClassID(ObjectStreamClass classFormat) + throws DatabaseException, ClassNotFoundException { + + ClassInfo classInfo = getClassInfo(classFormat); + return classInfo.getClassID(); + } + + // javadoc is inherited + public synchronized ObjectStreamClass getClassFormat(byte[] classID) + throws DatabaseException, ClassNotFoundException { + + return getClassFormat(classID, new DatabaseEntry()); + } + + /** + * Internal function for getting the class format. Allows passing the + * DatabaseEntry object for the data, so the bytes of the class format can + * be examined afterwards. + */ + private synchronized ObjectStreamClass getClassFormat(byte[] classID, + DatabaseEntry data) + throws DatabaseException, ClassNotFoundException { + + /* First check the map and, if found, add class info to the map. */ + + BigInteger classIDObj = new BigInteger(classID); + ObjectStreamClass classFormat = + (ObjectStreamClass) formatMap.get(classIDObj); + if (classFormat == null) { + + /* Make the class format key. */ + + byte[] keyBytes = new byte[classID.length + 1]; + keyBytes[0] = REC_CLASS_FORMAT; + System.arraycopy(classID, 0, keyBytes, 1, classID.length); + DatabaseEntry key = new DatabaseEntry(keyBytes); + + /* Read the class format. */ + + OperationStatus status = db.get(null, key, data, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + throw new ClassNotFoundException("Catalog class ID not found"); + } + try { + ObjectInputStream ois = + new ObjectInputStream( + new ByteArrayInputStream(data.getData(), + data.getOffset(), + data.getSize())); + classFormat = (ObjectStreamClass) ois.readObject(); + } catch (IOException e) { + throw new RuntimeExceptionWrapper(e); + } + + /* Update the class format map. */ + + formatMap.put(classIDObj, classFormat); + } + return classFormat; + } + + /** + * Get the ClassInfo for a given class name, adding it and its + * ObjectStreamClass to the database if they are not already present, and + * caching both of them using the class info and class format maps. When a + * class is first loaded from the database, the stored ObjectStreamClass is + * compared to the current ObjectStreamClass loaded by the Java class + * loader; if they are different, a new class ID is assigned for the + * current format. + */ + private synchronized ClassInfo getClassInfo(ObjectStreamClass classFormat) + throws DatabaseException, ClassNotFoundException { + + /* + * First check for a cached copy of the class info, which if + * present always contains the class format object + */ + String className = classFormat.getName(); + ClassInfo classInfo = (ClassInfo) classMap.get(className); + if (classInfo != null) { + return classInfo; + } else { + /* Make class info key. */ + char[] nameChars = className.toCharArray(); + byte[] keyBytes = new byte[1 + UtfOps.getByteLength(nameChars)]; + keyBytes[0] = REC_CLASS_INFO; + UtfOps.charsToBytes(nameChars, 0, keyBytes, 1, nameChars.length); + DatabaseEntry key = new DatabaseEntry(keyBytes); + + /* Read class info. */ + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = db.get(null, key, data, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + /* + * Not found in the database, write class info and class + * format. + */ + classInfo = putClassInfo(new ClassInfo(), className, key, + classFormat); + } else { + /* + * Read class info to get the class format key, then read class + * format. + */ + classInfo = new ClassInfo(data); + DatabaseEntry formatData = new DatabaseEntry(); + ObjectStreamClass storedClassFormat = + getClassFormat(classInfo.getClassID(), formatData); + + /* + * Compare the stored class format to the current class format, + * and if they are different then generate a new class ID. + */ + if (!areClassFormatsEqual(storedClassFormat, + getBytes(formatData), + classFormat)) { + classInfo = putClassInfo(classInfo, className, key, + classFormat); + } + + /* Update the class info map. */ + classInfo.setClassFormat(classFormat); + classMap.put(className, classInfo); + } + } + return classInfo; + } + + /** + * Assign a new class ID (increment the current ID record), write the + * ObjectStreamClass record for this new ID, and update the ClassInfo + * record with the new ID also. The ClassInfo passed as an argument is the + * one to be updated. + */ + private synchronized ClassInfo putClassInfo(ClassInfo classInfo, + String className, + DatabaseEntry classKey, + ObjectStreamClass classFormat) + throws DatabaseException, ClassNotFoundException { + + /* An intent-to-write cursor is needed for CDB. */ + CursorConfig cursorConfig = null; + if (cdbMode) { + cursorConfig = new CursorConfig(); + DbCompat.setWriteCursor(cursorConfig, true); + } + Cursor cursor = null; + Transaction txn = null; + try { + if (txnMode) { + txn = db.getEnvironment().beginTransaction(null, null); + } + cursor = db.openCursor(txn, cursorConfig); + + /* Get the current class ID. */ + DatabaseEntry key = new DatabaseEntry(LAST_CLASS_ID_KEY); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = cursor.getSearchKey(key, data, + writeLockMode); + if (status != OperationStatus.SUCCESS) { + throw new IllegalStateException("Class ID not initialized"); + } + byte[] idBytes = getBytes(data); + + /* Increment the ID by one and write the updated record. */ + idBytes = incrementID(idBytes); + data.setData(idBytes); + cursor.put(key, data); + + /* + * Write the new class format record whose key is the ID just + * assigned. + */ + byte[] keyBytes = new byte[1 + idBytes.length]; + keyBytes[0] = REC_CLASS_FORMAT; + System.arraycopy(idBytes, 0, keyBytes, 1, idBytes.length); + key.setData(keyBytes); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos; + try { + oos = new ObjectOutputStream(baos); + oos.writeObject(classFormat); + } catch (IOException e) { + throw new RuntimeExceptionWrapper(e); + } + data.setData(baos.toByteArray()); + + cursor.put(key, data); + + /* + * Write the new class info record, using the key passed in; this + * is done last so that a reader who gets the class info record + * first will always find the corresponding class format record. + */ + classInfo.setClassID(idBytes); + classInfo.toDbt(data); + + cursor.put(classKey, data); + + /* + * Update the maps before closing the cursor, so that the cursor + * lock prevents other writers from duplicating this entry. + */ + classInfo.setClassFormat(classFormat); + classMap.put(className, classInfo); + formatMap.put(new BigInteger(idBytes), classFormat); + return classInfo; + } finally { + if (cursor != null) { + cursor.close(); + } + if (txn != null) { + txn.commit(); + } + } + } + + private static byte[] incrementID(byte[] key) { + + BigInteger id = new BigInteger(key); + id = id.add(BigInteger.valueOf(1)); + return id.toByteArray(); + } + + /** + * Holds the class format key for a class, maintains a reference to the + * ObjectStreamClass. Other fields can be added when we need to store more + * information per class. + */ + private static class ClassInfo implements Serializable { + + private byte[] classID; + private transient ObjectStreamClass classFormat; + + ClassInfo() { + } + + ClassInfo(DatabaseEntry dbt) { + + byte[] data = dbt.getData(); + int len = data[0]; + classID = new byte[len]; + System.arraycopy(data, 1, classID, 0, len); + } + + void toDbt(DatabaseEntry dbt) { + + byte[] data = new byte[1 + classID.length]; + data[0] = (byte) classID.length; + System.arraycopy(classID, 0, data, 1, classID.length); + dbt.setData(data); + } + + void setClassID(byte[] classID) { + + this.classID = classID; + } + + byte[] getClassID() { + + return classID; + } + + ObjectStreamClass getClassFormat() { + + return classFormat; + } + + void setClassFormat(ObjectStreamClass classFormat) { + + this.classFormat = classFormat; + } + } + + /** + * Return whether two class formats are equal. This determines whether a + * new class format is needed for an object being serialized. Formats must + * be identical in all respects, or a new format is needed. + */ + private static boolean areClassFormatsEqual(ObjectStreamClass format1, + byte[] format1Bytes, + ObjectStreamClass format2) { + try { + if (format1Bytes == null) { // using cached format1 object + format1Bytes = getObjectBytes(format1); + } + byte[] format2Bytes = getObjectBytes(format2); + return java.util.Arrays.equals(format2Bytes, format1Bytes); + } catch (IOException e) { return false; } + } + + private static byte[] getBytes(DatabaseEntry dbt) { + byte[] b = dbt.getData(); + if (b == null) { + return null; + } + if (dbt.getOffset() == 0 && b.length == dbt.getSize()) { + return b; + } + byte[] t = new byte[dbt.getSize()]; + System.arraycopy(b, dbt.getOffset(), t, 0, t.length); + return t; + } + + private static byte[] getObjectBytes(Object o) + throws IOException { + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(o); + return baos.toByteArray(); + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/TupleSerialBinding.java b/db/java/src/com/sleepycat/bind/serial/TupleSerialBinding.java new file mode 100644 index 000000000..2e30c3f5f --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/TupleSerialBinding.java @@ -0,0 +1,115 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleSerialBinding.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.db.DatabaseEntry; + +/** + * An abstract EntityBinding that treats an entity's key entry as + * a tuple and its data entry as a serialized object. + * + *

    This class takes care of serializing and deserializing the data entry, + * and converting the key entry to/from {@link TupleInput} and {@link + * TupleOutput} objects. Its three abstract methods must be implemented by a + * concrete subclass to convert these objects to/from an entity object.

    + *
      + *
    • {@link #entryToObject(TupleInput,Object)}
    • + *
    • {@link #objectToKey(Object,TupleOutput)}
    • + *
    • {@link #objectToData(Object)}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class TupleSerialBinding implements EntityBinding { + + protected SerialBinding dataBinding; + + /** + * Creates a tuple-serial entity binding. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param baseClass is the base class. + */ + public TupleSerialBinding(ClassCatalog classCatalog, + Class baseClass) { + + this(new SerialBinding(classCatalog, baseClass)); + } + + /** + * Creates a tuple-serial entity binding. + * + * @param dataBinding is the data binding. + */ + public TupleSerialBinding(SerialBinding dataBinding) { + + this.dataBinding = dataBinding; + } + + // javadoc is inherited + public Object entryToObject(DatabaseEntry key, DatabaseEntry data) { + + return entryToObject(TupleBinding.entryToInput(key), + dataBinding.entryToObject(data)); + } + + // javadoc is inherited + public void objectToKey(Object object, DatabaseEntry key) { + + TupleOutput output = TupleBinding.newOutput(); + objectToKey(object, output); + TupleBinding.outputToEntry(output, key); + } + + // javadoc is inherited + public void objectToData(Object object, DatabaseEntry data) { + + object = objectToData(object); + dataBinding.objectToEntry(object, data); + } + + /** + * Constructs an entity object from {@link TupleInput} key entry and + * deserialized data entry objects. + * + * @param keyInput is the {@link TupleInput} key entry object. + * + * @param dataInput is the deserialized data entry object. + * + * @return the entity object constructed from the key and data. + */ + public abstract Object entryToObject(TupleInput keyInput, + Object dataInput); + + /** + * Extracts a key tuple from an entity object. + * + * @param object is the entity object. + * + * @param keyOutput is the {@link TupleOutput} to which the key should be + * written. + */ + public abstract void objectToKey(Object object, TupleOutput keyOutput); + + /** + * Extracts a data object from an entity object. + * + * @param object is the entity object. + * + * @return the deserialized data object. + */ + public abstract Object objectToData(Object object); +} diff --git a/db/java/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java b/db/java/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java new file mode 100644 index 000000000..f81f75a9a --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java @@ -0,0 +1,137 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleSerialKeyCreator.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.SecondaryDatabase; +import com.sleepycat.db.SecondaryKeyCreator; + +/** + * A abstract key creator that uses a tuple key and a serial data entry. This + * class takes care of serializing and deserializing the data entry, and + * converting the key entry to/from {@link TupleInput} and {@link TupleOutput} + * objects. + * The following abstract method must be implemented by a concrete subclass + * to create the index key using these objects + *
      + *
    • {@link #createSecondaryKey(TupleInput,Object,TupleOutput)}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class TupleSerialKeyCreator + implements SecondaryKeyCreator { + + protected SerialBinding dataBinding; + + /** + * Creates a tuple-serial key creator. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param dataClass is the data base class. + */ + public TupleSerialKeyCreator(ClassCatalog classCatalog, Class dataClass) { + + this(new SerialBinding(classCatalog, dataClass)); + } + + /** + * Creates a tuple-serial key creator. + * + * @param dataBinding is the data binding. + */ + public TupleSerialKeyCreator(SerialBinding dataBinding) { + + this.dataBinding = dataBinding; + } + + // javadoc is inherited + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + throws DatabaseException { + + TupleOutput output = TupleBinding.newOutput(); + TupleInput primaryKeyInput = + TupleBinding.entryToInput(primaryKeyEntry); + Object dataInput = dataBinding.entryToObject(dataEntry); + if (createSecondaryKey(primaryKeyInput, dataInput, output)) { + TupleBinding.outputToEntry(output, indexKeyEntry); + return true; + } else { + return false; + } + } + + // javadoc is inherited + public boolean nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + throws DatabaseException { + + Object data = dataBinding.entryToObject(dataEntry); + data = nullifyForeignKey(data); + if (data != null) { + dataBinding.objectToEntry(data, dataEntry); + return true; + } else { + return false; + } + } + + /** + * Creates the index key entry from primary key tuple entry and + * deserialized data entry. + * + * @param primaryKeyInput is the {@link TupleInput} for the primary key + * entry, or null if no primary key entry is used to construct the index + * key. + * + * @param dataInput is the deserialized data entry, or null if no data + * entry is used to construct the index key. + * + * @param indexKeyOutput is the destination index key tuple. For index + * keys which are optionally present, no tuple entry should be output to + * indicate that the key is not present or null. + * + * @return true if a key was created, or false to indicate that the key is + * not present. + */ + public abstract boolean createSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput); + + /** + * Clears the index key in the deserialized data entry. + * + *

    On entry the data parameter contains the index key to be cleared. It + * should be changed by this method such that {@link #createSecondaryKey} + * will return false. Other fields in the data object should remain + * unchanged.

    + * + * @param data is the source and destination deserialized data + * entry. + * + * @return the destination data object, or null to indicate that the + * key is not present and no change is necessary. The data returned may + * be the same object passed as the data parameter or a newly created + * object. + */ + public Object nullifyForeignKey(Object data) { + + return null; + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java b/db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java new file mode 100644 index 000000000..85a254e9a --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java @@ -0,0 +1,93 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleSerialMarshalledBinding.java,v 1.3 2004/09/22 18:01:01 bostic Exp $ + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A concrete TupleSerialBinding that delegates to the + * MarshalledTupleKeyEntity interface of the entity class. + * + *

    The {@link MarshalledTupleKeyEntity} interface must be implemented by the + * entity class to convert between the key/data entry and entity object.

    + * + *

    The binding is "tricky" in that it uses the entity class for both the + * stored data entry and the combined entity object. To do this, the entity's + * key field(s) are transient and are set by the binding after the data object + * has been deserialized. This avoids the use of a "data" class completely. + *

    + * + * @author Mark Hayes + * @see MarshalledTupleKeyEntity + */ +public class TupleSerialMarshalledBinding extends TupleSerialBinding { + + /** + * Creates a tuple-serial marshalled binding object. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param baseClass is the base class for serialized objects stored using + * this binding -- all objects using this binding must be an instance of + * this class. + */ + public TupleSerialMarshalledBinding(ClassCatalog classCatalog, + Class baseClass) { + + this(new SerialBinding(classCatalog, baseClass)); + } + + /** + * Creates a tuple-serial marshalled binding object. + * + * @param dataBinding is the binding used for serializing and deserializing + * the entity object. + */ + public TupleSerialMarshalledBinding(SerialBinding dataBinding) { + + super(dataBinding); + } + + // javadoc is inherited + public Object entryToObject(TupleInput tupleInput, Object javaInput) { + + /* Creates the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) javaInput; + + if (tupleInput != null) { // may be null if not used by key extractor + entity.unmarshalPrimaryKey(tupleInput); + } + return entity; + } + + // javadoc is inherited + public void objectToKey(Object object, TupleOutput output) { + + /* Creates the stored key from the entity. + */ + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) object; + entity.marshalPrimaryKey(output); + } + + // javadoc is inherited + public Object objectToData(Object object) { + + /* Returns the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + return object; + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java b/db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java new file mode 100644 index 000000000..98b8fa637 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java @@ -0,0 +1,75 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleSerialMarshalledKeyCreator.java,v 1.2 2004/06/04 18:24:49 mark Exp $ + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A concrete key creator that works in conjunction with a {@link + * TupleSerialMarshalledBinding}. This key creator works by calling the + * methods of the {@link MarshalledTupleKeyEntity} interface to create and + * clear the index key fields. + * + * @author Mark Hayes + */ +public class TupleSerialMarshalledKeyCreator extends TupleSerialKeyCreator { + + private TupleSerialMarshalledBinding binding; + private String keyName; + + /** + * Creates a tuple-serial marshalled key creator. + * + * @param binding is the binding used for the tuple-serial entity. + * + * @param keyName is the key name passed to the {@link + * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the + * index key. + */ + public TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding + binding, + String keyName) { + + super(binding.dataBinding); + this.binding = binding; + this.keyName = keyName; + + if (dataBinding == null) { + throw new NullPointerException("dataBinding may not be null"); + } + } + + // javadoc is inherited + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object dataInput, + TupleOutput indexKeyOutput) { + + /* + * The primary key is unmarshalled before marshalling the index key, to + * account for cases where the index key includes fields taken from the + * primary key. + */ + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) + binding.entryToObject(primaryKeyInput, dataInput); + + return entity.marshalSecondaryKey(keyName, indexKeyOutput); + } + + // javadoc is inherited + public Object nullifyForeignKey(Object dataInput) { + + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) + binding.entryToObject(null, dataInput); + + return entity.nullifyForeignKey(keyName) ? dataInput : null; + } +} diff --git a/db/java/src/com/sleepycat/bind/serial/package.html b/db/java/src/com/sleepycat/bind/serial/package.html new file mode 100644 index 000000000..eab1e2151 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/serial/package.html @@ -0,0 +1,6 @@ + + + +Bindings that use Java serialization. + + diff --git a/db/java/src/com/sleepycat/bind/tuple/BooleanBinding.java b/db/java/src/com/sleepycat/bind/tuple/BooleanBinding.java new file mode 100644 index 000000000..389b19f25 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/BooleanBinding.java @@ -0,0 +1,75 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: BooleanBinding.java,v 1.5 2004/08/13 15:16:44 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Boolean primitive + * wrapper or a boolean primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class BooleanBinding extends TupleBinding { + + private static final int BOOLEAN_SIZE = 1; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return input.readBoolean() ? Boolean.TRUE : Boolean.FALSE; + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + booleanToEntry(((Boolean) object).booleanValue(), entry); + } + + /** + * Converts an entry buffer into a simple boolean value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static boolean entryToBoolean(DatabaseEntry entry) { + + return entryToInput(entry).readBoolean(); + } + + /** + * Converts a simple boolean value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void booleanToEntry(boolean val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[BOOLEAN_SIZE]).writeBoolean(val), + entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/ByteBinding.java b/db/java/src/com/sleepycat/bind/tuple/ByteBinding.java new file mode 100644 index 000000000..f227e50c6 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/ByteBinding.java @@ -0,0 +1,74 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ByteBinding.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Byte primitive + * wrapper or a byte primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class ByteBinding extends TupleBinding { + + private static final int BYTE_SIZE = 1; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return new Byte(input.readByte()); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + byteToEntry(((Number) object).byteValue(), entry); + } + + /** + * Converts an entry buffer into a simple byte value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static byte entryToByte(DatabaseEntry entry) { + + return entryToInput(entry).readByte(); + } + + /** + * Converts a simple byte value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void byteToEntry(byte val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[BYTE_SIZE]).writeByte(val), entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/CharacterBinding.java b/db/java/src/com/sleepycat/bind/tuple/CharacterBinding.java new file mode 100644 index 000000000..c521a9ac4 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/CharacterBinding.java @@ -0,0 +1,74 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: CharacterBinding.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Character primitive + * wrapper or a char primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class CharacterBinding extends TupleBinding { + + private static final int CHAR_SIZE = 2; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return new Character(input.readChar()); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + charToEntry(((Character) object).charValue(), entry); + } + + /** + * Converts an entry buffer into a simple char value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static char entryToChar(DatabaseEntry entry) { + + return entryToInput(entry).readChar(); + } + + /** + * Converts a simple char value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void charToEntry(char val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[CHAR_SIZE]).writeChar(val), entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/DoubleBinding.java b/db/java/src/com/sleepycat/bind/tuple/DoubleBinding.java new file mode 100644 index 000000000..a94c7ecb9 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/DoubleBinding.java @@ -0,0 +1,75 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DoubleBinding.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Double primitive + * wrapper or a double primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class DoubleBinding extends TupleBinding { + + private static final int DOUBLE_SIZE = 8; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return new Double(input.readDouble()); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + doubleToEntry(((Number) object).doubleValue(), entry); + } + + /** + * Converts an entry buffer into a simple double value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static double entryToDouble(DatabaseEntry entry) { + + return entryToInput(entry).readDouble(); + } + + /** + * Converts a simple double value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void doubleToEntry(double val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[DOUBLE_SIZE]).writeDouble(val), + entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/FloatBinding.java b/db/java/src/com/sleepycat/bind/tuple/FloatBinding.java new file mode 100644 index 000000000..c7d45716f --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/FloatBinding.java @@ -0,0 +1,74 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: FloatBinding.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Float primitive + * wrapper or a float primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class FloatBinding extends TupleBinding { + + private static final int FLOAT_SIZE = 4; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return new Float(input.readFloat()); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + floatToEntry(((Number) object).floatValue(), entry); + } + + /** + * Converts an entry buffer into a simple float value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static float entryToFloat(DatabaseEntry entry) { + + return entryToInput(entry).readFloat(); + } + + /** + * Converts a simple float value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void floatToEntry(float val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[FLOAT_SIZE]).writeFloat(val), entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/IntegerBinding.java b/db/java/src/com/sleepycat/bind/tuple/IntegerBinding.java new file mode 100644 index 000000000..c2b6391ee --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/IntegerBinding.java @@ -0,0 +1,74 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: IntegerBinding.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Integer primitive + * wrapper or an int primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class IntegerBinding extends TupleBinding { + + private static final int INT_SIZE = 4; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return new Integer(input.readInt()); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + intToEntry(((Number) object).intValue(), entry); + } + + /** + * Converts an entry buffer into a simple int value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static int entryToInt(DatabaseEntry entry) { + + return entryToInput(entry).readInt(); + } + + /** + * Converts a simple int value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void intToEntry(int val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[INT_SIZE]).writeInt(val), entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/LongBinding.java b/db/java/src/com/sleepycat/bind/tuple/LongBinding.java new file mode 100644 index 000000000..6dee013b7 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/LongBinding.java @@ -0,0 +1,74 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: LongBinding.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Long primitive + * wrapper or a long primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class LongBinding extends TupleBinding { + + private static final int LONG_SIZE = 8; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return new Long(input.readLong()); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + longToEntry(((Number) object).longValue(), entry); + } + + /** + * Converts an entry buffer into a simple long value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static long entryToLong(DatabaseEntry entry) { + + return entryToInput(entry).readLong(); + } + + /** + * Converts a simple long value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void longToEntry(long val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[LONG_SIZE]).writeLong(val), entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java b/db/java/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java new file mode 100644 index 000000000..9665b3c85 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java @@ -0,0 +1,45 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MarshalledTupleEntry.java,v 1.2 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.bind.tuple; + +/** + * A marshalling interface implemented by key, data or entity classes that + * are represented as tuples. + * + *

    Key classes implement this interface to marshal their key entry. Data or + * entity classes implement this interface to marshal their data entry. + * Implementations of this interface must have a public no arguments + * constructor so that they can be instantiated by a binding, prior to calling + * the {@link #unmarshalEntry} method.

    + * + *

    Note that implementing this interface is not necessary when the object is + * a Java simple type, for example: String, Integer, etc. These types can be + * used with built-in bindings returned by {@link + * TupleBinding#getPrimitiveBinding}.

    + * + * @author Mark Hayes + * @see TupleTupleMarshalledBinding + */ +public interface MarshalledTupleEntry { + + /** + * Construct the key or data tuple entry from the key or data object. + * + * @param dataOutput is the output tuple. + */ + void marshalEntry(TupleOutput dataOutput); + + /** + * Construct the key or data object from the key or data tuple entry. + * + * @param dataInput is the input tuple. + */ + void unmarshalEntry(TupleInput dataInput); +} diff --git a/db/java/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java b/db/java/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java new file mode 100644 index 000000000..d7640e9f5 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java @@ -0,0 +1,71 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MarshalledTupleKeyEntity.java,v 1.3 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +/** + * A marshalling interface implemented by entity classes that represent keys as + * tuples. Since MarshalledTupleKeyEntity objects are instantiated + * using Java deserialization, no particular constructor is required by classes + * that implement this interface. + * + *

    Note that a marshalled tuple key extractor is somewhat less efficient + * than a non-marshalled key tuple extractor because more conversions are + * needed. A marshalled key extractor must convert the entry to an object in + * order to extract the key fields, while an unmarshalled key extractor does + * not.

    + * + * @author Mark Hayes + * @see TupleTupleMarshalledBinding + * @see com.sleepycat.bind.serial.TupleSerialMarshalledBinding + */ +public interface MarshalledTupleKeyEntity { + + /** + * Extracts the entity's primary key and writes it to the key output. + * + * @param keyOutput is the output tuple. + */ + void marshalPrimaryKey(TupleOutput keyOutput); + + /** + * Completes construction of the entity by setting its primary key from the + * stored primary key. + * + * @param keyInput is the input tuple. + */ + void unmarshalPrimaryKey(TupleInput keyInput); + + /** + * Extracts the entity's secondary key and writes it to the key output. + * + * @param keyName identifies the secondary key. + * + * @param keyOutput is the output tuple. + * + * @return true if a key was created, or false to indicate that the key is + * not present. + */ + boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput); + + /** + * Clears the entity's secondary key fields for the given key name. + * + *

    The specified index key should be changed by this method such that + * {@link #marshalSecondaryKey} for the same key name will return false. + * Other fields in the data object should remain unchanged.

    + * + * + * @param keyName identifies the secondary key. + * + * @return true if the key was cleared, or false to indicate that the key + * is not present and no change is necessary. + */ + boolean nullifyForeignKey(String keyName); +} diff --git a/db/java/src/com/sleepycat/bind/tuple/ShortBinding.java b/db/java/src/com/sleepycat/bind/tuple/ShortBinding.java new file mode 100644 index 000000000..d330f9b1b --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/ShortBinding.java @@ -0,0 +1,74 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ShortBinding.java,v 1.4 2004/08/02 18:52:04 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a Short primitive + * wrapper or a short primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class ShortBinding extends TupleBinding { + + private static final int SHORT_SIZE = 2; + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return new Short(input.readShort()); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + shortToEntry(((Number) object).shortValue(), entry); + } + + /** + * Converts an entry buffer into a simple short value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static short entryToShort(DatabaseEntry entry) { + + return entryToInput(entry).readShort(); + } + + /** + * Converts a simple short value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void shortToEntry(short val, DatabaseEntry entry) { + + outputToEntry(newOutput(new byte[SHORT_SIZE]).writeShort(val), entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/StringBinding.java b/db/java/src/com/sleepycat/bind/tuple/StringBinding.java new file mode 100644 index 000000000..257e01f44 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/StringBinding.java @@ -0,0 +1,76 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StringBinding.java,v 1.4 2004/08/02 18:52:05 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.UtfOps; +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete TupleBinding for a simple String value. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.db} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + */ +public class StringBinding extends TupleBinding { + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + return input.readString(); + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + /* Do nothing. Not called by objectToEntry(Object,DatabaseEntry). */ + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + stringToEntry((String) object, entry); + } + + /** + * Converts an entry buffer into a simple String value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static String entryToString(DatabaseEntry entry) { + + return entryToInput(entry).readString(); + } + + /** + * Converts a simple String value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void stringToEntry(String val, DatabaseEntry entry) { + + int stringLength = + (val == null) ? 1 : UtfOps.getByteLength(val.toCharArray()); + stringLength++; // null terminator + outputToEntry(newOutput(new byte[stringLength]).writeString(val), + entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleBinding.java b/db/java/src/com/sleepycat/bind/tuple/TupleBinding.java new file mode 100644 index 000000000..5ae95545b --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleBinding.java @@ -0,0 +1,179 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleBinding.java,v 1.4 2004/06/29 06:06:36 mark Exp $ + */ + +package com.sleepycat.bind.tuple; + +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.DatabaseEntry; + +/** + * An abstract EntryBinding that treats a key or data entry as a + * tuple; it includes predefined bindings for Java primitive types. + * + *

    This class takes care of converting the entries to/from {@link + * TupleInput} and {@link TupleOutput} objects. Its two abstract methods must + * be implemented by a concrete subclass to convert between tuples and key or + * data objects.

    + *
      + *
    • {@link #entryToObject(TupleInput)}
    • + *
    • {@link #objectToEntry(Object,TupleOutput)}
    • + *
    + * + *

    For key or data entries which are Java primitive classes (String, + * Integer, etc) {@link #getPrimitiveBinding} may be used to return a builtin + * tuple binding. A custom tuple binding for these types is not needed.

    + * + * @author Mark Hayes + */ +public abstract class TupleBinding implements EntryBinding { + + private static final Map primitives = new HashMap(); + static { + primitives.put(String.class, new StringBinding()); + primitives.put(Character.class, new CharacterBinding()); + primitives.put(Boolean.class, new BooleanBinding()); + primitives.put(Byte.class, new ByteBinding()); + primitives.put(Short.class, new ShortBinding()); + primitives.put(Integer.class, new IntegerBinding()); + primitives.put(Long.class, new LongBinding()); + primitives.put(Float.class, new FloatBinding()); + primitives.put(Double.class, new DoubleBinding()); + } + + /** + * Creates a tuple binding. + */ + public TupleBinding() { + } + + // javadoc is inherited + public Object entryToObject(DatabaseEntry entry) { + + return entryToObject(entryToInput(entry)); + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + TupleOutput output = newOutput(); + objectToEntry(object, output); + outputToEntry(output, entry); + } + + /** + * Utility method for use by bindings to create a tuple output object. + * + * @return a new tuple output object. + */ + public static TupleOutput newOutput() { + + return new TupleOutput(); + } + + /** + * Utility method for use by bindings to create a tuple output object + * with a specific starting size. + * + * @return a new tuple output object. + */ + public static TupleOutput newOutput(byte[] buffer) { + + return new TupleOutput(buffer); + } + + /** + * Utility method to set the data in a entry buffer to the data in a tuple + * output object. + * + * @param output is the source tuple output object. + * + * @param entry is the destination entry buffer. + */ + public static void outputToEntry(TupleOutput output, DatabaseEntry entry) { + + entry.setData(output.getBufferBytes(), output.getBufferOffset(), + output.getBufferLength()); + } + + /** + * Utility method to set the data in a entry buffer to the data in a tuple + * input object. + * + * @param input is the source tuple input object. + * + * @param entry is the destination entry buffer. + */ + public static void inputToEntry(TupleInput input, DatabaseEntry entry) { + + entry.setData(input.getBufferBytes(), input.getBufferOffset(), + input.getBufferLength()); + } + + /** + * Utility method to create a new tuple input object for reading the data + * from a given buffer. If an existing input is reused, it is reset before + * returning it. + * + * @param entry is the source entry buffer. + * + * @return the new tuple input object. + */ + public static TupleInput entryToInput(DatabaseEntry entry) { + + return new TupleInput(entry.getData(), entry.getOffset(), + entry.getSize()); + } + + /** + * Constructs a key or data object from a {@link TupleInput} entry. + * + * @param input is the tuple key or data entry. + * + * @return the key or data object constructed from the entry. + */ + public abstract Object entryToObject(TupleInput input); + + /** + * Converts a key or data object to a tuple entry. + * + * @param object is the key or data object. + * + * @param output is the tuple entry to which the key or data should be + * written. + */ + public abstract void objectToEntry(Object object, TupleOutput output); + + /** + * Creates a tuple binding for a primitive Java class. The following + * Java classes are supported. + *
      + *
    • String
    • + *
    • Character
    • + *
    • Boolean
    • + *
    • Byte
    • + *
    • Short
    • + *
    • Integer
    • + *
    • Long
    • + *
    • Float
    • + *
    • Double
    • + *
    + * + * @param cls is the primitive Java class. + * + * @return a new binding for the primitive class or null if the cls + * parameter is not one of the supported classes. + */ + public static TupleBinding getPrimitiveBinding(Class cls) { + + return (TupleBinding) primitives.get(cls); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleInput.java b/db/java/src/com/sleepycat/bind/tuple/TupleInput.java new file mode 100644 index 000000000..97108b10a --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleInput.java @@ -0,0 +1,482 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleInput.java,v 1.4 2004/09/01 14:34:20 mark Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.FastInputStream; +import com.sleepycat.util.UtfOps; + +/** + * An InputStream with DataInput-like methods for + * reading tuple fields. It is used by TupleBinding. + * + *

    This class has many methods that have the same signatures as methods in + * the {@link java.io.DataInput} interface. The reason this class does not + * implement {@link java.io.DataInput} is because it would break the interface + * contract for those methods because of data format differences.

    + * + *

    Signed numbers are stored in the buffer in MSB (most significant byte + * first) order with their sign bit (high-order bit) inverted to cause negative + * numbers to be sorted first when comparing values as unsigned byte arrays, + * as done in a database. Unsigned numbers, including characters, are stored + * in MSB order with no change to their sign bit.

    + * + *

    Strings and character arrays are stored either as a fixed length array of + * unicode characters, where the length must be known by the application, or as + * a null-terminated UTF byte array.

    + *
      + *
    • Null strings are UTF encoded as { 0xFF }, which is not allowed in a + * standard UTF encoding. This allows null strings, as distinct from empty or + * zero length strings, to be represented in a tuple. Using the default + * comparator, null strings will be ordered last.
    • + *
    • Zero (0x0000) character values are UTF encoded as non-zero values, and + * therefore embedded zeros in the string are supported. The sequence { 0xC0, + * 0x80 } is used to encode a zero character. This UTF encoding is the same + * one used by native Java UTF libraries. However, this encoding of zero does + * impact the lexicographical ordering, and zeros will not be sorted first (the + * natural order) or last. For all character values other than zero, the + * default UTF byte ordering is the same as the Unicode lexicographical + * character ordering.
    • + *
    + * + *

    Floats and doubles are stored in standard Java integer-bit representation + * (IEEE 754). Non-negative numbers are correctly ordered by numeric value. + * However, negative numbers are not correctly ordered; therefore, if you use + * negative floating point numbers in a key, you'll need to implement and + * configure a custom comparator to get correct numeric ordering.

    + * + * @author Mark Hayes + */ +public class TupleInput extends FastInputStream { + + /** + * Creates a tuple input object for reading a byte array of tuple data. A + * reference to the byte array will be kept by this object (it will not be + * copied) and therefore the byte array should not be modified while this + * object is in use. + * + * @param buffer is the byte array to be read and should contain data in + * tuple format. + */ + public TupleInput(byte[] buffer) { + + super(buffer); + } + + /** + * Creates a tuple input object for reading a byte array of tuple data at + * a given offset for a given length. A reference to the byte array will + * be kept by this object (it will not be copied) and therefore the byte + * array should not be modified while this object is in use. + * + * @param buffer is the byte array to be read and should contain data in + * tuple format. + * + * @param offset is the byte offset at which to begin reading. + * + * @param length is the number of bytes to be read. + */ + public TupleInput(byte[] buffer, int offset, int length) { + + super(buffer, offset, length); + } + + /** + * Creates a tuple input object from the data contained in a tuple output + * object. A reference to the tuple output's byte array will be kept by + * this object (it will not be copied) and therefore the tuple output + * object should not be modified while this object is in use. + * + * @param output is the tuple output object containing the data to be read. + */ + public TupleInput(TupleOutput output) { + + super(output.getBufferBytes(), output.getBufferOffset(), + output.getBufferLength()); + } + + // --- begin DataInput compatible methods --- + + /** + * Reads a null-terminated UTF string from the data buffer and converts + * the data from UTF to Unicode. + * Reads values that were written using {@link + * TupleOutput#writeString(String)}. + * + * @return the converted string. + * + * @throws IndexOutOfBoundsException if no null terminating byte is found + * in the buffer. + * + * @throws IllegalArgumentException malformed UTF data is encountered. + */ + public final String readString() + throws IndexOutOfBoundsException, IllegalArgumentException { + + byte[] buf = getBufferBytes(); + int off = getBufferOffset(); + if (available() >= 2 && + buf[off] == TupleOutput.NULL_STRING_UTF_VALUE && + buf[off + 1] == 0) { + skip(2); + return null; + } else { + int byteLen = UtfOps.getZeroTerminatedByteLength(buf, off); + skip(byteLen + 1); + return UtfOps.bytesToString(buf, off, byteLen); + } + } + + /** + * Reads a char (two byte) unsigned value from the buffer. + * Reads values that were written using {@link TupleOutput#writeChar}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final char readChar() throws IndexOutOfBoundsException { + + return (char) readUnsignedShort(); + } + + /** + * Reads a boolean (one byte) unsigned value from the buffer and returns + * true if it is non-zero and false if it is zero. + * Reads values that were written using {@link TupleOutput#writeBoolean}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final boolean readBoolean() throws IndexOutOfBoundsException { + + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + return (c != 0); + } + + /** + * Reads a signed byte (one byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeByte}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final byte readByte() throws IndexOutOfBoundsException { + + return (byte) (readUnsignedByte() ^ 0x80); + } + + /** + * Reads a signed short (two byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeShort}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final short readShort() throws IndexOutOfBoundsException { + + return (short) (readUnsignedShort() ^ 0x8000); + } + + /** + * Reads a signed int (four byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeInt}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final int readInt() throws IndexOutOfBoundsException { + + return (int) (readUnsignedInt() ^ 0x80000000); + } + + /** + * Reads a signed long (eight byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeLong}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final long readLong() throws IndexOutOfBoundsException { + + return readUnsignedLong() ^ 0x8000000000000000L; + } + + /** + * Reads a signed float (four byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeFloat}. + * Float.intBitsToFloat is used to convert the signed int + * value. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final float readFloat() throws IndexOutOfBoundsException { + + return Float.intBitsToFloat((int) readUnsignedInt()); + } + + /** + * Reads a signed double (eight byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeDouble}. + * Double.longBitsToDouble is used to convert the signed long + * value. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final double readDouble() throws IndexOutOfBoundsException { + + return Double.longBitsToDouble(readUnsignedLong()); + } + + /** + * Reads an unsigned byte (one byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeUnsignedByte}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final int readUnsignedByte() throws IndexOutOfBoundsException { + + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + return c; + } + + /** + * Reads an unsigned short (two byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeUnsignedShort}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final int readUnsignedShort() throws IndexOutOfBoundsException { + + int c1 = readFast(); + int c2 = readFast(); + if ((c1 | c2) < 0) { + throw new IndexOutOfBoundsException(); + } + return ((c1 << 8) | c2); + } + + // --- end DataInput compatible methods --- + + /** + * Reads an unsigned int (four byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeUnsignedInt}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final long readUnsignedInt() throws IndexOutOfBoundsException { + + long c1 = readFast(); + long c2 = readFast(); + long c3 = readFast(); + long c4 = readFast(); + if ((c1 | c2 | c3 | c4) < 0) { + throw new IndexOutOfBoundsException(); + } + return ((c1 << 24) | (c2 << 16) | (c3 << 8) | c4); + } + + /** + * This method is private since an unsigned long cannot be treated as + * such in Java, nor converted to a BigInteger of the same value. + */ + private final long readUnsignedLong() throws IndexOutOfBoundsException { + + long c1 = readFast(); + long c2 = readFast(); + long c3 = readFast(); + long c4 = readFast(); + long c5 = readFast(); + long c6 = readFast(); + long c7 = readFast(); + long c8 = readFast(); + if ((c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8) < 0) { + throw new IndexOutOfBoundsException(); + } + return ((c1 << 56) | (c2 << 48) | (c3 << 40) | (c4 << 32) | + (c5 << 24) | (c6 << 16) | (c7 << 8) | c8); + } + + /** + * Reads the specified number of bytes from the buffer, converting each + * unsigned byte value to a character of the resulting string. + * Reads values that were written using {@link TupleOutput#writeBytes}. + * Only characters with values below 0x100 may be read using this method. + * + * @param length is the number of bytes to be read. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final String readBytes(int length) + throws IndexOutOfBoundsException { + + StringBuffer buf = new StringBuffer(length); + for (int i = 0; i < length; i++) { + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + buf.append((char) c); + } + return buf.toString(); + } + + /** + * Reads the specified number of characters from the buffer, converting + * each two byte unsigned value to a character of the resulting string. + * Reads values that were written using {@link TupleOutput#writeChars}. + * + * @param length is the number of characters to be read. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final String readChars(int length) + throws IndexOutOfBoundsException { + + StringBuffer buf = new StringBuffer(length); + for (int i = 0; i < length; i++) { + buf.append(readChar()); + } + return buf.toString(); + } + + /** + * Reads the specified number of bytes from the buffer, converting each + * unsigned byte value to a character of the resulting array. + * Reads values that were written using {@link TupleOutput#writeBytes}. + * Only characters with values below 0x100 may be read using this method. + * + * @param chars is the array to receive the data and whose length is used + * to determine the number of bytes to be read. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final void readBytes(char[] chars) + throws IndexOutOfBoundsException { + + for (int i = 0; i < chars.length; i++) { + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + chars[i] = (char) c; + } + } + + /** + * Reads the specified number of characters from the buffer, converting + * each two byte unsigned value to a character of the resulting array. + * Reads values that were written using {@link TupleOutput#writeChars}. + * + * @param chars is the array to receive the data and whose length is used + * to determine the number of characters to be read. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + */ + public final void readChars(char[] chars) + throws IndexOutOfBoundsException { + + for (int i = 0; i < chars.length; i++) { + chars[i] = readChar(); + } + } + + /** + * Reads the specified number of UTF characters string from the data + * buffer and converts the data from UTF to Unicode. + * Reads values that were written using {@link + * TupleOutput#writeString(char[])}. + * + * @param length is the number of characters to be read. + * + * @return the converted string. + * + * @throws IndexOutOfBoundsException if no null terminating byte is found + * in the buffer. + * + * @throws IllegalArgumentException malformed UTF data is encountered. + */ + public final String readString(int length) + throws IndexOutOfBoundsException, IllegalArgumentException { + + char[] chars = new char[length]; + readString(chars); + return new String(chars); + } + + /** + * Reads the specified number of UTF characters string from the data + * buffer and converts the data from UTF to Unicode. + * Reads values that were written using {@link + * TupleOutput#writeString(char[])}. + * + * @param chars is the array to receive the data and whose length is used + * to determine the number of characters to be read. + * + * @return the converted string. + * + * @throws IndexOutOfBoundsException if no null terminating byte is found + * in the buffer. + * + * @throws IllegalArgumentException malformed UTF data is encountered. + */ + public final void readString(char[] chars) + throws IndexOutOfBoundsException, IllegalArgumentException { + + byte[] buf = getBufferBytes(); + off = UtfOps.bytesToChars(buf, off, chars, 0, chars.length, false); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleInputBinding.java b/db/java/src/com/sleepycat/bind/tuple/TupleInputBinding.java new file mode 100644 index 000000000..20523dfce --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleInputBinding.java @@ -0,0 +1,46 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleInputBinding.java,v 1.2 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.DatabaseEntry; + +/** + * A concrete EntryBinding that uses the TupleInput + * object as the key or data object. + * + * A concrete tuple binding for key or data entries which are {@link + * TupleInput} objects. This binding is used when tuples themselves are the + * objects, rather than using application defined objects. A {@link TupleInput} + * must always be used. To convert a {@link TupleOutput} to a {@link + * TupleInput}, use the {@link TupleInput#TupleInput(TupleOutput)} constructor. + * + * @author Mark Hayes + */ +public class TupleInputBinding implements EntryBinding { + + /** + * Creates a tuple input binding. + */ + public TupleInputBinding() { + } + + // javadoc is inherited + public Object entryToObject(DatabaseEntry entry) { + + return TupleBinding.entryToInput(entry); + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + TupleBinding.inputToEntry((TupleInput) object, entry); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java b/db/java/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java new file mode 100644 index 000000000..05644eab0 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java @@ -0,0 +1,70 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleMarshalledBinding.java,v 1.3 2004/09/22 18:01:01 bostic Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A concrete TupleBinding that delegates to the + * MarshalledTupleEntry interface of the data or key object. + * + *

    This class works by calling the methods of the {@link + * MarshalledTupleEntry} interface, which must be implemented by the key or + * data class, to convert between the key or data entry and the object.

    + * + * @author Mark Hayes + */ +public class TupleMarshalledBinding extends TupleBinding { + + private Class cls; + + /** + * Creates a tuple marshalled binding object. + * + *

    The given class is used to instantiate key or data objects using + * {@link Class#forName}, and therefore must be a public class and have a + * public no-arguments constructor. It must also implement the {@link + * MarshalledTupleEntry} interface.

    + * + * @param cls is the class of the key or data objects. + */ + public TupleMarshalledBinding(Class cls) { + + this.cls = cls; + + /* The class will be used to instantiate the object. */ + if (!MarshalledTupleEntry.class.isAssignableFrom(cls)) { + throw new IllegalArgumentException(cls.toString() + + " does not implement MarshalledTupleEntry"); + } + } + + // javadoc is inherited + public Object entryToObject(TupleInput input) { + + try { + MarshalledTupleEntry obj = + (MarshalledTupleEntry) cls.newInstance(); + obj.unmarshalEntry(input); + return obj; + } catch (IllegalAccessException e) { + throw new RuntimeExceptionWrapper(e); + } catch (InstantiationException e) { + throw new RuntimeExceptionWrapper(e); + } + } + + // javadoc is inherited + public void objectToEntry(Object object, TupleOutput output) { + + MarshalledTupleEntry obj = (MarshalledTupleEntry) object; + obj.marshalEntry(output); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleOutput.java b/db/java/src/com/sleepycat/bind/tuple/TupleOutput.java new file mode 100644 index 000000000..da1fa0c8f --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleOutput.java @@ -0,0 +1,398 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleOutput.java,v 1.4 2004/09/01 14:34:20 mark Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.UtfOps; + +/** + * An OutputStream with DataOutput-like methods for + * writing tuple fields. It is used by TupleBinding. + * + *

    This class has many methods that have the same signatures as methods in + * the {@link java.io.DataOutput} interface. The reason this class does not + * implement {@link java.io.DataOutput} is because it would break the interface + * contract for those methods because of data format differences.

    + * + *

    Signed numbers are stored in the buffer in MSB (most significant byte + * first) order with their sign bit (high-order bit) inverted to cause negative + * numbers to be sorted first when comparing values as unsigned byte arrays, + * as done in a database. Unsigned numbers, including characters, are stored + * in MSB order with no change to their sign bit.

    + * + *

    Strings and character arrays are stored either as a fixed length array of + * unicode characters, where the length must be known by the application, or as + * a null-terminated UTF byte array.

    + *
      + *
    • Null strings are UTF encoded as { 0xFF }, which is not allowed in a + * standard UTF encoding. This allows null strings, as distinct from empty or + * zero length strings, to be represented in a tuple. Using the default + * comparator, null strings will be ordered last.
    • + *
    • Zero (0x0000) character values are UTF encoded as non-zero values, and + * therefore embedded zeros in the string are supported. The sequence { 0xC0, + * 0x80 } is used to encode a zero character. This UTF encoding is the same + * one used by native Java UTF libraries. However, this encoding of zero does + * impact the lexicographical ordering, and zeros will not be sorted first (the + * natural order) or last. For all character values other than zero, the + * default UTF byte ordering is the same as the Unicode lexicographical + * character ordering.
    • + *
    + * + *

    Floats and doubles are stored in standard Java integer-bit representation + * (IEEE 754). Non-negative numbers are correctly ordered by numeric value. + * However, negative numbers are not correctly ordered; therefore, if you use + * negative floating point numbers in a key, you'll need to implement and + * configure a custom comparator to get correct numeric ordering.

    + * + * @author Mark Hayes + */ +public class TupleOutput extends FastOutputStream { + + /** + * We represent a null string as a single FF UTF character, which cannot + * occur in a UTF encoded string. + */ + static final int NULL_STRING_UTF_VALUE = ((byte) 0xFF); + + /** + * Creates a tuple output object for writing a byte array of tuple data. + */ + public TupleOutput() { + + super(); + } + + /** + * Creates a tuple output object for writing a byte array of tuple data, + * using a given buffer. A new buffer will be allocated only if the number + * of bytes needed is greater than the length of this buffer. A reference + * to the byte array will be kept by this object and therefore the byte + * array should not be modified while this object is in use. + * + * @param buffer is the byte array to use as the buffer. + */ + public TupleOutput(byte[] buffer) { + + super(buffer); + } + + // --- begin DataOutput compatible methods --- + + /** + * Writes the specified bytes to the buffer, converting each character to + * an unsigned byte value. + * Writes values that can be read using {@link TupleInput#readBytes}. + * Only characters with values below 0x100 may be written using this + * method, since the high-order 8 bits of all characters are discarded. + * + * @param val is the string containing the values to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the val parameter is null. + */ + public final TupleOutput writeBytes(String val) { + + writeBytes(val.toCharArray()); + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to a two byte unsigned value. + * Writes values that can be read using {@link TupleInput#readChars}. + * + * @param val is the string containing the characters to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the val parameter is null. + */ + public final TupleOutput writeChars(String val) { + + writeChars(val.toCharArray()); + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to UTF format, and adding a null terminator byte. + * Note that zero (0x0000) character values are encoded as non-zero values + * and a null String parameter is encoded as 0xFF. + * Writes values that can be read using {@link TupleInput#readString()}. + * + * @param val is the string containing the characters to be written. + * + * @return this tuple output object. + */ + public final TupleOutput writeString(String val) { + + if (val != null) { + writeString(val.toCharArray()); + } else { + writeFast(NULL_STRING_UTF_VALUE); + } + writeFast(0); + return this; + } + + /** + * Writes a char (two byte) unsigned value to the buffer. + * Writes values that can be read using {@link TupleInput#readChar}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeChar(int val) { + + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } + + /** + * Writes a boolean (one byte) unsigned value to the buffer, writing one + * if the value is true and zero if it is false. + * Writes values that can be read using {@link TupleInput#readBoolean}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeBoolean(boolean val) { + + writeFast(val ? (byte)1 : (byte)0); + return this; + } + + /** + * Writes an signed byte (one byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readByte}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeByte(int val) { + + writeUnsignedByte(val ^ 0x80); + return this; + } + + /** + * Writes an signed short (two byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readShort}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeShort(int val) { + + writeUnsignedShort(val ^ 0x8000); + return this; + } + + /** + * Writes an signed int (four byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readInt}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeInt(int val) { + + writeUnsignedInt(val ^ 0x80000000); + return this; + } + + /** + * Writes an signed long (eight byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readLong}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeLong(long val) { + + writeUnsignedLong(val ^ 0x8000000000000000L); + return this; + } + + /** + * Writes an signed float (four byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readFloat}. + * Float.floatToIntBits is used to convert the signed float + * value. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeFloat(float val) { + + writeUnsignedInt(Float.floatToIntBits(val)); + return this; + } + + /** + * Writes an signed double (eight byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readDouble}. + * Double.doubleToLongBits is used to convert the signed + * double value. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeDouble(double val) { + + writeUnsignedLong(Double.doubleToLongBits(val)); + return this; + } + + // --- end DataOutput compatible methods --- + + /** + * Writes the specified bytes to the buffer, converting each character to + * an unsigned byte value. + * Writes values that can be read using {@link TupleInput#readBytes}. + * Only characters with values below 0x100 may be written using this + * method, since the high-order 8 bits of all characters are discarded. + * + * @param chars is the array of values to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the chars parameter is null. + */ + public final TupleOutput writeBytes(char[] chars) { + + for (int i = 0; i < chars.length; i++) { + writeFast((byte) chars[i]); + } + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to a two byte unsigned value. + * Writes values that can be read using {@link TupleInput#readChars}. + * + * @param chars is the array of characters to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the chars parameter is null. + */ + public final TupleOutput writeChars(char[] chars) { + + for (int i = 0; i < chars.length; i++) { + writeFast((byte) (chars[i] >>> 8)); + writeFast((byte) chars[i]); + } + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to UTF format. + * Note that zero (0x0000) character values are encoded as non-zero values. + * Writes values that can be read using {@link TupleInput#readString(int)} + * or {@link TupleInput#readString(char[])}. + * + * @param chars is the array of characters to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the chars parameter is null. + */ + public final TupleOutput writeString(char[] chars) { + + if (chars.length == 0) return this; + + int utfLength = UtfOps.getByteLength(chars); + + makeSpace(utfLength); + UtfOps.charsToBytes(chars, 0, getBufferBytes(), getBufferLength(), + chars.length); + addSize(utfLength); + return this; + } + + /** + * Writes an unsigned byte (one byte) value to the buffer. + * Writes values that can be read using {@link + * TupleInput#readUnsignedByte}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeUnsignedByte(int val) { + + writeFast(val); + return this; + } + + /** + * Writes an unsigned short (two byte) value to the buffer. + * Writes values that can be read using {@link + * TupleInput#readUnsignedShort}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeUnsignedShort(int val) { + + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } + + /** + * Writes an unsigned int (four byte) value to the buffer. + * Writes values that can be read using {@link + * TupleInput#readUnsignedInt}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + */ + public final TupleOutput writeUnsignedInt(long val) { + + writeFast((byte) (val >>> 24)); + writeFast((byte) (val >>> 16)); + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } + + /** + * This method is private since an unsigned long cannot be treated as + * such in Java, nor converted to a BigInteger of the same value. + */ + private final TupleOutput writeUnsignedLong(long val) { + + writeFast((byte) (val >>> 56)); + writeFast((byte) (val >>> 48)); + writeFast((byte) (val >>> 40)); + writeFast((byte) (val >>> 32)); + writeFast((byte) (val >>> 24)); + writeFast((byte) (val >>> 16)); + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleTupleBinding.java b/db/java/src/com/sleepycat/bind/tuple/TupleTupleBinding.java new file mode 100644 index 000000000..ac8f4158f --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleTupleBinding.java @@ -0,0 +1,96 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleTupleBinding.java,v 1.2 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.db.DatabaseEntry; + +/** + * An abstract EntityBinding that treats an entity's key entry and + * data entry as tuples. + * + *

    This class takes care of converting the entries to/from {@link + * TupleInput} and {@link TupleOutput} objects. Its three abstract methods + * must be implemented by a concrete subclass to convert between tuples and + * entity objects.

    + *
      + *
    • {@link #entryToObject(TupleInput,TupleInput)}
    • + *
    • {@link #objectToKey(Object,TupleOutput)}
    • + *
    • {@link #objectToData(Object,TupleOutput)}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class TupleTupleBinding implements EntityBinding { + + /** + * Creates a tuple-tuple entity binding. + */ + public TupleTupleBinding() { + } + + // javadoc is inherited + public Object entryToObject(DatabaseEntry key, DatabaseEntry data) { + + return entryToObject(TupleBinding.entryToInput(key), + TupleBinding.entryToInput(data)); + } + + // javadoc is inherited + public void objectToKey(Object object, DatabaseEntry key) { + + TupleOutput output = TupleBinding.newOutput(); + objectToKey(object, output); + TupleBinding.outputToEntry(output, key); + } + + // javadoc is inherited + public void objectToData(Object object, DatabaseEntry data) { + + TupleOutput output = TupleBinding.newOutput(); + objectToData(object, output); + TupleBinding.outputToEntry(output, data); + } + + // abstract methods + + /** + * Constructs an entity object from {@link TupleInput} key and data + * entries. + * + * @param keyInput is the {@link TupleInput} key entry object. + * + * @param dataInput is the {@link TupleInput} data entry object. + * + * @return the entity object constructed from the key and data. + */ + public abstract Object entryToObject(TupleInput keyInput, + TupleInput dataInput); + + /** + * Extracts a key tuple from an entity object. + * + * @param object is the entity object. + * + * @param output is the {@link TupleOutput} to which the key should be + * written. + */ + public abstract void objectToKey(Object object, TupleOutput output); + + /** + * Extracts a key tuple from an entity object. + * + * @param object is the entity object. + * + * @param output is the {@link TupleOutput} to which the data should be + * written. + */ + public abstract void objectToData(Object object, TupleOutput output); +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java b/db/java/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java new file mode 100644 index 000000000..f5041a94e --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java @@ -0,0 +1,105 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleTupleKeyCreator.java,v 1.4 2004/08/02 18:52:05 mjc Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.SecondaryDatabase; +import com.sleepycat.db.SecondaryKeyCreator; + +/** + * An abstract key creator that uses a tuple key and a tuple data entry. This + * class takes care of converting the key and data entry to/from {@link + * TupleInput} and {@link TupleOutput} objects. + * + * @author Mark Hayes + */ +public abstract class TupleTupleKeyCreator + implements SecondaryKeyCreator { + + /** + * Creates a tuple-tuple key creator. + */ + public TupleTupleKeyCreator() { + } + + // javadoc is inherited + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) + throws DatabaseException { + + TupleOutput output = TupleBinding.newOutput(); + TupleInput primaryKeyInput = + TupleBinding.entryToInput(primaryKeyEntry); + TupleInput dataInput = TupleBinding.entryToInput(dataEntry); + if (createSecondaryKey(primaryKeyInput, dataInput, output)) { + TupleBinding.outputToEntry(output, indexKeyEntry); + return true; + } else { + return false; + } + } + + // javadoc is inherited + public boolean nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) + throws DatabaseException { + + TupleOutput output = TupleBinding.newOutput(); + if (nullifyForeignKey(TupleBinding.entryToInput(dataEntry), + output)) { + TupleBinding.outputToEntry(output, dataEntry); + return true; + } else { + return false; + } + } + + /** + * Creates the index key from primary key tuple and data tuple. + * + * @param primaryKeyInput is the {@link TupleInput} for the primary key + * entry. + * + * @param dataInput is the {@link TupleInput} for the data entry. + * + * @param indexKeyOutput is the destination index key tuple. + * + * @return true if a key was created, or false to indicate that the key is + * not present. + */ + public abstract boolean createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput); + + /** + * Clears the index key in the tuple data entry. The dataInput should be + * read and then written to the dataOutput, clearing the index key in the + * process. + * + *

    The secondary key should be output or removed by this method such + * that {@link #createSecondaryKey} will return false. Other fields in the + * data object should remain unchanged.

    + * + * @param dataInput is the {@link TupleInput} for the data entry. + * + * @param dataOutput is the destination {@link TupleOutput}. + * + * @return true if the key was cleared, or false to indicate that the key + * is not present and no change is necessary. + */ + public boolean nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) { + + return false; + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java b/db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java new file mode 100644 index 000000000..370b4cc8a --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java @@ -0,0 +1,94 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleTupleMarshalledBinding.java,v 1.3 2004/09/22 18:01:01 bostic Exp $ + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A concrete TupleTupleBinding that delegates to the + * MarshalledTupleEntry and + * MarshalledTupleKeyEntity interfaces of the entity class. + * + *

    This class calls the methods of the {@link MarshalledTupleEntry} + * interface to convert between the data entry and entity object. It calls the + * methods of the {@link MarshalledTupleKeyEntity} interface to convert between + * the key entry and the entity object. These two interfaces must both be + * implemented by the entity class.

    + * + * @author Mark Hayes + */ +public class TupleTupleMarshalledBinding extends TupleTupleBinding { + + private Class cls; + + /** + * Creates a tuple-tuple marshalled binding object. + * + *

    The given class is used to instantiate entity objects using + * {@link Class#forName}, and therefore must be a public class and have a + * public no-arguments constructor. It must also implement the {@link + * MarshalledTupleEntry} and {@link MarshalledTupleKeyEntity} + * interfaces.

    + * + * @param cls is the class of the entity objects. + */ + public TupleTupleMarshalledBinding(Class cls) { + + this.cls = cls; + + // The entity class will be used to instantiate the entity object. + // + if (!MarshalledTupleKeyEntity.class.isAssignableFrom(cls)) { + throw new IllegalArgumentException(cls.toString() + + " does not implement MarshalledTupleKeyEntity"); + } + if (!MarshalledTupleEntry.class.isAssignableFrom(cls)) { + throw new IllegalArgumentException(cls.toString() + + " does not implement MarshalledTupleEntry"); + } + } + + // javadoc is inherited + public Object entryToObject(TupleInput keyInput, TupleInput dataInput) { + + // This "tricky" binding returns the stored data as the entity, but + // first it sets the transient key fields from the stored key. + MarshalledTupleEntry obj; + try { + obj = (MarshalledTupleEntry) cls.newInstance(); + } catch (IllegalAccessException e) { + throw new RuntimeExceptionWrapper(e); + } catch (InstantiationException e) { + throw new RuntimeExceptionWrapper(e); + } + if (dataInput != null) { // may be null if used by key extractor + obj.unmarshalEntry(dataInput); + } + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) obj; + if (keyInput != null) { // may be null if used by key extractor + entity.unmarshalPrimaryKey(keyInput); + } + return entity; + } + + // javadoc is inherited + public void objectToKey(Object object, TupleOutput output) { + + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) object; + entity.marshalPrimaryKey(output); + } + + // javadoc is inherited + public void objectToData(Object object, TupleOutput output) { + + MarshalledTupleEntry entity = (MarshalledTupleEntry) object; + entity.marshalEntry(output); + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java b/db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java new file mode 100644 index 000000000..aa2911a91 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java @@ -0,0 +1,75 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleTupleMarshalledKeyCreator.java,v 1.2 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.bind.tuple; + +/** + * A concrete key creator that works in conjunction with a {@link + * TupleTupleMarshalledBinding}. This key creator works by calling the + * methods of the {@link MarshalledTupleKeyEntity} interface to create and + * clear the index key. + * + *

    Note that a marshalled tuple key creator is somewhat less efficient + * than a non-marshalled key tuple creator because more conversions are + * needed. A marshalled key creator must convert the entry to an object in + * order to create the key, while an unmarshalled key creator does not.

    + * + * @author Mark Hayes + */ +public class TupleTupleMarshalledKeyCreator extends TupleTupleKeyCreator { + + private String keyName; + private TupleTupleMarshalledBinding binding; + + /** + * Creates a tuple-tuple marshalled key creator. + * + * @param binding is the binding used for the tuple-tuple entity. + * + * @param keyName is the key name passed to the {@link + * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the + * index key. + */ + public TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding binding, + String keyName) { + + this.binding = binding; + this.keyName = keyName; + } + + // javadoc is inherited + public boolean createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) { + + /* The primary key is unmarshalled before marshalling the index key, to + * account for cases where the index key includes fields taken from the + * primary key. + */ + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) + binding.entryToObject(primaryKeyInput, dataInput); + + return entity.marshalSecondaryKey(keyName, indexKeyOutput); + } + + // javadoc is inherited + public boolean nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) { + + // XXX null primary key input below may be unexpected by the binding + MarshalledTupleKeyEntity entity = (MarshalledTupleKeyEntity) + binding.entryToObject(null, dataInput); + if (entity.nullifyForeignKey(keyName)) { + binding.objectToData(entity, dataOutput); + return true; + } else { + return false; + } + } +} diff --git a/db/java/src/com/sleepycat/bind/tuple/package.html b/db/java/src/com/sleepycat/bind/tuple/package.html new file mode 100644 index 000000000..9f2723523 --- /dev/null +++ b/db/java/src/com/sleepycat/bind/tuple/package.html @@ -0,0 +1,6 @@ + + + +Bindings that use sequences of primitive fields, or tuples. + + diff --git a/db/java/src/com/sleepycat/collections/CurrentTransaction.java b/db/java/src/com/sleepycat/collections/CurrentTransaction.java new file mode 100644 index 000000000..1876f26fc --- /dev/null +++ b/db/java/src/com/sleepycat/collections/CurrentTransaction.java @@ -0,0 +1,433 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: CurrentTransaction.java,v 1.4 2004/09/22 18:01:02 bostic Exp $ + */ + +package com.sleepycat.collections; + +import java.util.ArrayList; +import java.util.List; +import java.util.WeakHashMap; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Cursor; +import com.sleepycat.db.CursorConfig; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.LockMode; +import com.sleepycat.db.Transaction; +import com.sleepycat.db.TransactionConfig; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * Provides access to the current transaction for the current thread within the + * context of a Berkeley DB environment. This class provides explicit + * transaction control beyond that provided by the {@link TransactionRunner} + * class. However, both methods of transaction control manage per-thread + * transactions. + * + * @author Mark Hayes + */ +public class CurrentTransaction { + + /* For internal use, this class doubles as an Environment wrapper. */ + + private static WeakHashMap envMap = new WeakHashMap(); + + private LockMode writeLockMode; + private boolean cdbMode; + private boolean txnMode; + private Environment env; + private ThreadLocal localTrans = new ThreadLocal(); + private ThreadLocal localCdbCursors; + + /** + * Gets the CurrentTransaction accessor for a specified Berkeley DB + * environment. This method always returns the same reference when called + * more than once with the same environment parameter. + * + * @param env is an open Berkeley DB environment. + * + * @return the CurrentTransaction accessor for the given environment, or + * null if the environment is not transactional. + */ + public static CurrentTransaction getInstance(Environment env) { + + CurrentTransaction currentTxn = getInstanceInternal(env); + return currentTxn.isTxnMode() ? currentTxn : null; + } + + /** + * Gets the CurrentTransaction accessor for a specified Berkeley DB + * environment. Unlike getInstance(), this method never returns null. + * + * @param env is an open Berkeley DB environment. + */ + static CurrentTransaction getInstanceInternal(Environment env) { + synchronized (envMap) { + CurrentTransaction myEnv = (CurrentTransaction) envMap.get(env); + if (myEnv == null) { + myEnv = new CurrentTransaction(env); + envMap.put(env, myEnv); + } + return myEnv; + } + } + + private CurrentTransaction(Environment env) { + this.env = env; + try { + EnvironmentConfig config = env.getConfig(); + txnMode = config.getTransactional(); + if (txnMode || DbCompat.getInitializeLocking(config)) { + writeLockMode = LockMode.RMW; + } else { + writeLockMode = LockMode.DEFAULT; + } + cdbMode = DbCompat.getInitializeCDB(config); + if (cdbMode) { + localCdbCursors = new ThreadLocal(); + } + } catch (DatabaseException e) { + throw new RuntimeExceptionWrapper(e); + } + } + + /** + * Returns whether this is a transactional environment. + */ + final boolean isTxnMode() { + + return txnMode; + } + + /** + * Returns whether this is a Concurrent Data Store environment. + */ + final boolean isCdbMode() { + + return cdbMode; + } + + /** + * Return the LockMode.RMW or null, depending on whether locking is + * enabled. LockMode.RMW will cause an error if passed when locking + * is not enabled. Locking is enabled if locking or transactions were + * specified for this environment. + */ + final LockMode getWriteLockMode() { + + return writeLockMode; + } + + /** + * Returns the underlying Berkeley DB environment. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Returns the transaction associated with the current thread for this + * environment, or null if no transaction is active. + */ + public final Transaction getTransaction() { + + Trans trans = (Trans) localTrans.get(); + return (trans != null) ? trans.txn : null; + } + + /** + * Begins a new transaction for this environment and associates it with + * the current thread. If a transaction is already active for this + * environment and thread, a nested transaction will be created. + * + * @param config the transaction configuration used for calling + * {@link Environment#beginTransaction}, or null to use the default + * configuration. + * + * @return the new transaction. + * + * @throws DatabaseException if the transaction cannot be started, in which + * case any existing transaction is not affected. + * + * @throws IllegalStateException if a transaction is already active and + * nested transactions are not supported by the environment. + */ + public final Transaction beginTransaction(TransactionConfig config) + throws DatabaseException { + + Trans trans = (Trans) localTrans.get(); + if (trans != null) { + if (trans.txn != null) { + if (!DbCompat.NESTED_TRANSACTIONS) { + throw new IllegalStateException( + "Nested transactions are not supported"); + } + Transaction parentTxn = trans.txn; + trans = new Trans(trans, config); + trans.txn = env.beginTransaction(parentTxn, config); + localTrans.set(trans); + } else { + trans.txn = env.beginTransaction(null, config); + trans.config = config; + } + } else { + trans = new Trans(null, config); + trans.txn = env.beginTransaction(null, config); + localTrans.set(trans); + } + return trans.txn; + } + + /** + * Commits the transaction that is active for the current thread for this + * environment and makes the parent transaction (if any) the current + * transaction. + * + * @return the parent transaction or null if the committed transaction was + * not nested. + * + * @throws DatabaseException if an error occurs committing the transaction. + * The transaction will still be closed and the parent transaction will + * become the current transaction. + * + * @throws IllegalStateException if no transaction is active for the + * current thread for this environment. + */ + public final Transaction commitTransaction() + throws DatabaseException, IllegalStateException { + + Trans trans = (Trans) localTrans.get(); + if (trans != null && trans.txn != null) { + Transaction parent = closeTxn(trans); + trans.txn.commit(); + return parent; + } else { + throw new IllegalStateException("No transaction is active"); + } + } + + /** + * Aborts the transaction that is active for the current thread for this + * environment and makes the parent transaction (if any) the current + * transaction. + * + * @return the parent transaction or null if the aborted transaction was + * not nested. + * + * @throws DatabaseException if an error occurs aborting the transaction. + * The transaction will still be closed and the parent transaction will + * become the current transaction. + * + * @throws IllegalStateException if no transaction is active for the + * current thread for this environment. + */ + public final Transaction abortTransaction() + throws DatabaseException, IllegalStateException { + + Trans trans = (Trans) localTrans.get(); + if (trans != null && trans.txn != null) { + Transaction parent = closeTxn(trans); + trans.txn.abort(); + return parent; + } else { + throw new IllegalStateException("No transaction is active"); + } + } + + /** + * Returns whether the current transaction is a dirtyRead transaction. + */ + final boolean isDirtyRead() { + + Trans trans = (Trans) localTrans.get(); + if (trans != null && trans.config != null) { + return trans.config.getDirtyRead(); + } else { + return false; + } + } + + private Transaction closeTxn(Trans trans) { + + localTrans.set(trans.parent); + return (trans.parent != null) ? trans.parent.txn : null; + } + + private static class Trans { + + private Trans parent; + private Transaction txn; + private TransactionConfig config; + + private Trans(Trans parent, TransactionConfig config) { + + this.parent = parent; + this.config = config; + } + } + + /** + * Opens a cursor for a given database, dup'ing an existing CDB cursor if + * one is open for the current thread. + */ + Cursor openCursor(Database db, boolean writeCursor, Transaction txn) + throws DatabaseException { + + if (cdbMode) { + CdbCursors cdbCursors = null; + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap == null) { + cdbCursorsMap = new WeakHashMap(); + localCdbCursors.set(cdbCursorsMap); + } else { + cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + } + if (cdbCursors == null) { + cdbCursors = new CdbCursors(); + cdbCursorsMap.put(db, cdbCursors); + } + List cursors; + CursorConfig config; + if (writeCursor) { + if (cdbCursors.readCursors.size() > 0) { + + /* + * Although CDB allows opening a write cursor when a read + * cursor is open, a self-deadlock will occur if a write is + * attempted for a record that is read-locked; we should + * avoid self-deadlocks at all costs + */ + throw new IllegalStateException( + "cannot open CDB write cursor when read cursor is open"); + } + cursors = cdbCursors.writeCursors; + config = new CursorConfig(); + DbCompat.setWriteCursor(config, true); + } else { + cursors = cdbCursors.readCursors; + config = null; + } + Cursor cursor; + if (cursors.size() > 0) { + Cursor other = ((Cursor) cursors.get(0)); + cursor = other.dup(false); + } else { + cursor = db.openCursor(null, config); + } + cursors.add(cursor); + return cursor; + } else { + return db.openCursor(txn, null); + } + } + + /** + * Duplicates a cursor for a given database. + * + * @param writeCursor true to open a write cursor in a CDB environment, and + * ignored for other environments. + * + * @param samePosition is passed through to Cursor.dup(). + * + * @return the open cursor. + * + * @throws DatabaseException if a database problem occurs. + */ + Cursor dupCursor(Cursor cursor, boolean writeCursor, boolean samePosition) + throws DatabaseException { + + if (cdbMode) { + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap != null) { + Database db = cursor.getDatabase(); + CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + if (cdbCursors != null) { + List cursors = writeCursor ? cdbCursors.writeCursors + : cdbCursors.readCursors; + if (cursors.contains(cursor)) { + Cursor newCursor = cursor.dup(samePosition); + cursors.add(newCursor); + return newCursor; + } + } + } + throw new IllegalStateException("cursor to dup not tracked"); + } else { + return cursor.dup(samePosition); + } + } + + /** + * Closes a cursor. + * + * @param cursor the cursor to close. + * + * @throws DatabaseException if a database problem occurs. + */ + void closeCursor(Cursor cursor) + throws DatabaseException { + + if (cursor == null) { + return; + } + if (cdbMode) { + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap != null) { + Database db = cursor.getDatabase(); + CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + if (cdbCursors != null) { + if (cdbCursors.readCursors.remove(cursor) || + cdbCursors.writeCursors.remove(cursor)) { + cursor.close(); + return; + } + } + } + throw new IllegalStateException( + "closing CDB cursor that was not known to be open"); + } else { + cursor.close(); + } + } + + /** + * Returns true if a CDB cursor is open and therefore a Database write + * operation should not be attempted since a self-deadlock may result. + */ + boolean isCDBCursorOpen(Database db) + throws DatabaseException { + + if (cdbMode) { + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap != null) { + CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + + /* + * FindBugs whines unnecessarily about a Null pointer + * dereference here. + */ + if (cdbCursors != null && + cdbCursors.readCursors.size() > 0 || + cdbCursors.writeCursors.size() > 0) { + return true; + } + } + } + return false; + } + + static final class CdbCursors { + + List writeCursors = new ArrayList(); + List readCursors = new ArrayList(); + } +} diff --git a/db/java/src/com/sleepycat/collections/DataCursor.java b/db/java/src/com/sleepycat/collections/DataCursor.java new file mode 100644 index 000000000..cc77e7a27 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/DataCursor.java @@ -0,0 +1,690 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DataCursor.java,v 1.4 2004/09/22 18:01:02 bostic Exp $ + */ + +package com.sleepycat.collections; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Cursor; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.JoinConfig; +import com.sleepycat.db.JoinCursor; +import com.sleepycat.db.LockMode; +import com.sleepycat.db.OperationStatus; + +/** + * Represents a Berkeley DB cursor and adds support for indices, bindings and + * key ranges. + * + *

    This class operates on a view and takes care of reading and updating + * indices, calling bindings, constraining access to a key range, etc.

    + * + * @author Mark Hayes + */ +final class DataCursor implements Cloneable { + + private RangeCursor cursor; + private JoinCursor joinCursor; + private DataView view; + private KeyRange range; + private boolean writeAllowed; + private boolean dirtyRead; + private DatabaseEntry keyThang; + private DatabaseEntry valueThang; + private DatabaseEntry primaryKeyThang; + private DatabaseEntry otherThang; + private DataCursor[] indexCursorsToClose; + + /** + * Creates a cursor for a given view. + */ + DataCursor(DataView view, boolean writeAllowed) + throws DatabaseException { + + init(view, writeAllowed, null); + } + + /** + * Creates a cursor for a given view and single key range. + */ + DataCursor(DataView view, boolean writeAllowed, Object singleKey) + throws DatabaseException { + + init(view, writeAllowed, view.subRange(singleKey)); + } + + /** + * Creates a cursor for a given view and key range. + */ + DataCursor(DataView view, boolean writeAllowed, + Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive) + throws DatabaseException { + + init(view, writeAllowed, + view.subRange(beginKey, beginInclusive, endKey, endInclusive)); + } + + /** + * Creates a join cursor. + */ + DataCursor(DataView view, DataCursor[] indexCursors, + JoinConfig joinConfig, boolean closeIndexCursors) + throws DatabaseException { + + if (view.isSecondary()) { + throw new IllegalArgumentException( + "The primary collection in a join must not be a secondary " + + "database"); + } + Cursor[] cursors = new Cursor[indexCursors.length]; + for (int i = 0; i < cursors.length; i += 1) { + cursors[i] = indexCursors[i].cursor.getCursor(); + } + joinCursor = view.db.join(cursors, joinConfig); + init(view, false, null); + if (closeIndexCursors) { + indexCursorsToClose = indexCursors; + } + } + + /** + * Clones a cursor preserving the current position. + */ + DataCursor cloneCursor() + throws DatabaseException { + + checkNoJoinCursor(); + + DataCursor o; + try { + o = (DataCursor) super.clone(); + } catch (CloneNotSupportedException neverHappens) { + return null; + } + + o.initThangs(); + KeyRange.copy(keyThang, o.keyThang); + KeyRange.copy(valueThang, o.valueThang); + if (primaryKeyThang != keyThang) { + KeyRange.copy(primaryKeyThang, o.primaryKeyThang); + } + + o.cursor = cursor.dup(true); + return o; + } + + /** + * Returns the internal range cursor. + */ + RangeCursor getCursor() { + return cursor; + } + + /** + * Constructor helper. + */ + private void init(DataView view, boolean writeAllowed, KeyRange range) + throws DatabaseException { + + this.view = view; + this.writeAllowed = writeAllowed && view.writeAllowed; + this.range = (range != null) ? range : view.range; + dirtyRead = view.dirtyReadEnabled; + + initThangs(); + + if (joinCursor == null) { + cursor = new RangeCursor(view, this.range, this.writeAllowed); + } + } + + /** + * Constructor helper. + */ + private void initThangs() + throws DatabaseException { + + keyThang = new DatabaseEntry(); + primaryKeyThang = view.isSecondary() ? (new DatabaseEntry()) + : keyThang; + valueThang = new DatabaseEntry(); + } + + /** + * Closes the associated cursor. + */ + void close() + throws DatabaseException { + + if (joinCursor != null) { + JoinCursor toClose = joinCursor; + joinCursor = null; + toClose.close(); + } + if (cursor != null) { + Cursor toClose = cursor.getCursor(); + cursor = null; + view.currentTxn.closeCursor(toClose ); + } + if (indexCursorsToClose != null) { + DataCursor[] toClose = indexCursorsToClose; + indexCursorsToClose = null; + for (int i = 0; i < toClose.length; i += 1) { + toClose[i].close(); + } + } + } + + /** + * Returns the view for this cursor. + */ + DataView getView() { + + return view; + } + + /** + * Returns the range for this cursor. + */ + KeyRange getRange() { + + return range; + } + + /** + * Returns whether write is allowed for this cursor, as specified to the + * constructor. + */ + boolean isWriteAllowed() { + + return writeAllowed; + } + + /** + * Returns the key object for the last record read. + */ + Object getCurrentKey() + throws DatabaseException { + + if (view.keyBinding == null) { + throw new UnsupportedOperationException(); + } + return view.makeKey(keyThang); + } + + /** + * Returns the value object for the last record read. + */ + Object getCurrentValue() + throws DatabaseException { + + return view.makeValue(primaryKeyThang, valueThang); + } + + /** + * Returns whether record number access is allowed. + */ + boolean hasRecNumAccess() { + + return view.recNumAccess; + } + + /** + * Returns the record number for the last record read. + */ + int getCurrentRecordNumber() + throws DatabaseException { + + if (view.btreeRecNumDb) { + /* BTREE-RECNO access. */ + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + DbCompat.getCurrentRecordNumber(cursor.getCursor(), otherThang, + getLockMode(false)); + return DbCompat.getRecordNumber(otherThang); + } else { + /* QUEUE or RECNO database. */ + return DbCompat.getRecordNumber(keyThang); + } + } + + /** + * Binding version of Cursor.getCurrent(), no join cursor allowed. + */ + OperationStatus getCurrent(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getCurrent(keyThang, primaryKeyThang, valueThang, + getLockMode(lockForWrite)); + } + + /** + * Binding version of Cursor.getFirst(), join cursor is allowed. + */ + OperationStatus getFirst(boolean lockForWrite) + throws DatabaseException { + + LockMode lockMode = getLockMode(lockForWrite); + if (joinCursor != null) { + return joinCursor.getNext(keyThang, valueThang, lockMode); + } else { + return cursor.getFirst(keyThang, primaryKeyThang, valueThang, + lockMode); + } + } + + /** + * Binding version of Cursor.getNext(), join cursor is allowed. + */ + OperationStatus getNext(boolean lockForWrite) + throws DatabaseException { + + LockMode lockMode = getLockMode(lockForWrite); + if (joinCursor != null) { + return joinCursor.getNext(keyThang, valueThang, lockMode); + } else { + return cursor.getNext(keyThang, primaryKeyThang, valueThang, + lockMode); + } + } + + /** + * Binding version of Cursor.getNext(), join cursor is allowed. + */ + OperationStatus getNextNoDup(boolean lockForWrite) + throws DatabaseException { + + LockMode lockMode = getLockMode(lockForWrite); + if (joinCursor != null) { + return joinCursor.getNext(keyThang, valueThang, lockMode); + } else { + return cursor.getNextNoDup(keyThang, primaryKeyThang, valueThang, + lockMode); + } + } + + /** + * Binding version of Cursor.getNextDup(), no join cursor allowed. + */ + OperationStatus getNextDup(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getNextDup(keyThang, primaryKeyThang, valueThang, + getLockMode(lockForWrite)); + } + + /** + * Binding version of Cursor.getLast(), no join cursor allowed. + */ + OperationStatus getLast(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getLast(keyThang, primaryKeyThang, valueThang, + getLockMode(lockForWrite)); + } + + /** + * Binding version of Cursor.getPrev(), no join cursor allowed. + */ + OperationStatus getPrev(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getPrev(keyThang, primaryKeyThang, valueThang, + getLockMode(lockForWrite)); + } + + /** + * Binding version of Cursor.getPrevNoDup(), no join cursor allowed. + */ + OperationStatus getPrevNoDup(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getPrevNoDup(keyThang, primaryKeyThang, valueThang, + getLockMode(lockForWrite)); + } + + /** + * Binding version of Cursor.getPrevDup(), no join cursor allowed. + */ + OperationStatus getPrevDup(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getPrevDup(keyThang, primaryKeyThang, valueThang, + getLockMode(lockForWrite)); + } + + /** + * Binding version of Cursor.getSearchKey(), no join cursor allowed. + * Searches by record number in a BTREE-RECNO db with RECNO access. + */ + OperationStatus getSearchKey(Object key, Object value, + boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + if (view.useKey(key, value, keyThang, range)) { + return doGetSearchKey(lockForWrite); + } else { + return OperationStatus.NOTFOUND; + } + } + + /** + * Pass-thru version of Cursor.getSearchKey(). + * Searches by record number in a BTREE-RECNO db with RECNO access. + */ + private OperationStatus doGetSearchKey(boolean lockForWrite) + throws DatabaseException { + + LockMode lockMode = getLockMode(lockForWrite); + if (view.btreeRecNumAccess) { + return cursor.getSearchRecordNumber(keyThang, primaryKeyThang, + valueThang, lockMode); + } else { + return cursor.getSearchKey(keyThang, primaryKeyThang, + valueThang, lockMode); + } + } + + /** + * Binding version of Cursor.getSearchKeyRange(), no join cursor allowed. + */ + OperationStatus getSearchKeyRange(Object key, Object value, + boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + if (view.useKey(key, value, keyThang, range)) { + return cursor.getSearchKeyRange(keyThang, primaryKeyThang, + valueThang, + getLockMode(lockForWrite)); + } else { + return OperationStatus.NOTFOUND; + } + } + + /** + * Binding version of Cursor.getSearchBoth(), no join cursor allowed. + * Unlike SecondaryCursor.getSearchBoth, for a secondary this searches for + * the primary value not the primary key. + */ + OperationStatus getSearchBoth(Object key, Object value, + boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + LockMode lockMode = getLockMode(lockForWrite); + view.useValue(value, valueThang, null); + if (view.useKey(key, value, keyThang, range)) { + if (view.isSecondary()) { + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + OperationStatus status = cursor.getSearchKey(keyThang, + primaryKeyThang, + otherThang, + lockMode); + while (status == OperationStatus.SUCCESS) { + if (KeyRange.equalBytes(otherThang, valueThang)) { + break; + } + status = cursor.getNextDup(keyThang, primaryKeyThang, + otherThang, lockMode); + } + /* if status != SUCCESS set range cursor to invalid? */ + return status; + } else { + return cursor.getSearchBoth(keyThang, null, valueThang, + lockMode); + } + } else { + return OperationStatus.NOTFOUND; + } + } + + /** + * Find the given value using getSearchBoth if possible or a sequential + * scan otherwise, no join cursor allowed. + */ + OperationStatus find(Object value, boolean findFirst) + throws DatabaseException { + + checkNoJoinCursor(); + LockMode lockMode = getLockMode(false); + + if (view.entityBinding != null && !view.isSecondary() && + (findFirst || !view.dupsAllowed)) { + return getSearchBoth(null, value, false); + } else { + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + view.useValue(value, otherThang, null); + OperationStatus status = findFirst ? getFirst(false) + : getLast(false); + while (status == OperationStatus.SUCCESS) { + if (KeyRange.equalBytes(valueThang, otherThang)) { + break; + } + status = findFirst ? getNext(false) : getPrev(false); + } + return status; + } + } + + /** + * Calls Cursor.count(), no join cursor allowed. + */ + int count() + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.count(); + } + + /** + * Binding version of Cursor.putCurrent(). + */ + OperationStatus putCurrent(Object value) + throws DatabaseException { + + checkWriteAllowed(false); + view.useValue(value, valueThang, keyThang); + + /* + * Workaround for a DB core problem: With HASH type a put() with + * different data is allowed. + */ + boolean hashWorkaround = (view.dupsOrdered && !view.ordered); + if (hashWorkaround) { + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + cursor.getCurrent(keyThang, primaryKeyThang, otherThang, + LockMode.DEFAULT); + if (KeyRange.equalBytes(valueThang, otherThang)) { + return OperationStatus.SUCCESS; + } else { + throw new IllegalArgumentException( + "Current data differs from put data with sorted duplicates"); + } + } + + return cursor.putCurrent(valueThang); + } + + /** + * Binding version of Cursor.putAfter(). + */ + OperationStatus putAfter(Object value) + throws DatabaseException { + + checkWriteAllowed(false); + view.useValue(value, valueThang, null); /* why no key check? */ + return cursor.putAfter(valueThang); + } + + /** + * Binding version of Cursor.putBefore(). + */ + OperationStatus putBefore(Object value) + throws DatabaseException { + + checkWriteAllowed(false); + view.useValue(value, valueThang, keyThang); + return cursor.putBefore(valueThang); + } + + /** + * Binding version of Cursor.put(), optionally returning the old value and + * optionally using the current key instead of the key parameter. + */ + OperationStatus put(Object key, Object value, Object[] oldValue, + boolean useCurrentKey) + throws DatabaseException { + + initForPut(key, value, oldValue, useCurrentKey); + return cursor.put(keyThang, valueThang); + } + + /** + * Binding version of Cursor.putNoOverwrite(), optionally using the current + * key instead of the key parameter. + */ + OperationStatus putNoOverwrite(Object key, Object value, + boolean useCurrentKey) + throws DatabaseException { + + initForPut(key, value, null, useCurrentKey); + return cursor.putNoOverwrite(keyThang, valueThang); + } + + /** + * Binding version of Cursor.putNoDupData(), optionally returning the old + * value and optionally using the current key instead of the key parameter. + */ + OperationStatus putNoDupData(Object key, Object value, Object[] oldValue, + boolean useCurrentKey) + throws DatabaseException { + + initForPut(key, value, oldValue, useCurrentKey); + if (view.dupsOrdered) { + return cursor.putNoDupData(keyThang, valueThang); + } else { + if (view.dupsAllowed) { + /* Unordered duplicates. */ + OperationStatus status = + cursor.getSearchBoth(keyThang, primaryKeyThang, + valueThang, + getLockMode(false)); + if (status == OperationStatus.SUCCESS) { + return OperationStatus.KEYEXIST; + } else { + return cursor.put(keyThang, valueThang); + } + } else { + /* No duplicates. */ + return cursor.putNoOverwrite(keyThang, valueThang); + } + } + } + + /** + * Do setup for a put() operation. + */ + private void initForPut(Object key, Object value, Object[] oldValue, + boolean useCurrentKey) + throws DatabaseException { + + checkWriteAllowed(false); + if (!useCurrentKey && !view.useKey(key, value, keyThang, range)) { + throw new IllegalArgumentException("key out of range"); + } + if (oldValue != null) { + oldValue[0] = null; + if (!view.dupsAllowed) { + OperationStatus status = doGetSearchKey(true); + if (status == OperationStatus.SUCCESS) { + oldValue[0] = getCurrentValue(); + } + } + } + view.useValue(value, valueThang, keyThang); + } + + /** + * Sets the key entry to the begin key of a single key range, so the next + * time a putXxx() method is called that key will be used. + */ + void useRangeKey() { + if (!range.singleKey) { + throw new IllegalStateException(); + } + KeyRange.copy(range.beginKey, keyThang); + } + + /** + * Perform an arbitrary database 'delete' operation. + */ + OperationStatus delete() + throws DatabaseException { + + checkWriteAllowed(true); + return cursor.delete(); + } + + /** + * Returns the lock mode to use for a getXxx() operation. + */ + LockMode getLockMode(boolean lockForWrite) { + + /* Dirty-read takes precedence over write-locking. */ + + if (dirtyRead) { + return LockMode.DIRTY_READ; + } else if (lockForWrite && !view.currentTxn.isDirtyRead()) { + return view.currentTxn.getWriteLockMode(); + } else { + return LockMode.DEFAULT; + } + } + + /** + * Throws an exception if a join cursor is in use. + */ + private void checkNoJoinCursor() { + + if (joinCursor != null) { + throw new UnsupportedOperationException + ("Not allowed with a join cursor"); + } + } + + /** + * Throws an exception if write is not allowed or if a join cursor is in + * use. + */ + private void checkWriteAllowed(boolean allowSecondary) { + + checkNoJoinCursor(); + + if (!writeAllowed || (!allowSecondary && view.isSecondary())) { + throw new UnsupportedOperationException + ("Writing is not allowed"); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/DataView.java b/db/java/src/com/sleepycat/collections/DataView.java new file mode 100644 index 000000000..e8cec80b3 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/DataView.java @@ -0,0 +1,598 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DataView.java,v 1.4 2004/08/02 18:52:05 mjc Exp $ + */ + +package com.sleepycat.collections; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.Environment; +import com.sleepycat.db.JoinConfig; +import com.sleepycat.db.OperationStatus; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; +import com.sleepycat.db.SecondaryKeyCreator; +import com.sleepycat.db.Transaction; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * Represents a Berkeley DB database and adds support for indices, bindings and + * key ranges. + * + *

    This class defines a view and takes care of reading and updating indices, + * calling bindings, constraining access to a key range, etc.

    + * + * @author Mark Hayes + */ +final class DataView implements Cloneable { + + Database db; + SecondaryDatabase secDb; + CurrentTransaction currentTxn; + KeyRange range; + EntryBinding keyBinding; + EntryBinding valueBinding; + EntityBinding entityBinding; + PrimaryKeyAssigner keyAssigner; + SecondaryKeyCreator secKeyCreator; + boolean writeAllowed; // Read-write view + boolean ordered; // Not a HASH Db + boolean recNumAllowed; // QUEUE, RECNO, or BTREE-RECNUM Db + boolean recNumAccess; // recNumAllowed && using a rec num binding + boolean btreeRecNumDb; // BTREE-RECNUM Db + boolean btreeRecNumAccess; // recNumAccess && BTREE-RECNUM Db + boolean recNumRenumber; // RECNO-RENUM Db + boolean keysRenumbered; // recNumRenumber || btreeRecNumAccess + boolean dupsAllowed; // Dups configured + boolean dupsOrdered; // Sorted dups configured + boolean transactional; // Db is transactional + boolean dirtyReadAllowed; // Dirty-read is optional in DB-CORE + boolean dirtyReadEnabled; // This view is a dirty-ready view + + /** + * Creates a view for a given database and bindings. The initial key range + * of the view will be open. + */ + DataView(Database database, EntryBinding keyBinding, + EntryBinding valueBinding, EntityBinding entityBinding, + boolean writeAllowed, PrimaryKeyAssigner keyAssigner) + throws IllegalArgumentException { + + if (database == null) { + throw new IllegalArgumentException("database is null"); + } + db = database; + try { + currentTxn = + CurrentTransaction.getInstanceInternal(db.getEnvironment()); + DatabaseConfig dbConfig; + if (db instanceof SecondaryDatabase) { + secDb = (SecondaryDatabase) database; + SecondaryConfig secConfig = secDb.getSecondaryConfig(); + secKeyCreator = secConfig.getKeyCreator(); + dbConfig = secConfig; + } else { + dbConfig = db.getConfig(); + } + ordered = !DbCompat.isTypeHash(dbConfig); + recNumAllowed = DbCompat.isTypeQueue(dbConfig) || + DbCompat.isTypeRecno(dbConfig) || + DbCompat.getBtreeRecordNumbers(dbConfig); + recNumRenumber = DbCompat.getRenumbering(dbConfig); + dupsAllowed = DbCompat.getSortedDuplicates(dbConfig) || + DbCompat.getUnsortedDuplicates(dbConfig); + dupsOrdered = DbCompat.getSortedDuplicates(dbConfig); + transactional = currentTxn.isTxnMode() && + dbConfig.getTransactional(); + dirtyReadAllowed = DbCompat.getDirtyRead(dbConfig); + btreeRecNumDb = recNumAllowed && DbCompat.isTypeBtree(dbConfig); + range = new KeyRange(dbConfig.getBtreeComparator()); + } catch (DatabaseException e) { + throw new RuntimeExceptionWrapper(e); + } + this.writeAllowed = writeAllowed; + this.keyBinding = keyBinding; + this.valueBinding = valueBinding; + this.entityBinding = entityBinding; + this.keyAssigner = keyAssigner; + + if (valueBinding != null && entityBinding != null) + throw new IllegalArgumentException( + "both valueBinding and entityBinding are non-null"); + + if (keyBinding instanceof com.sleepycat.bind.RecordNumberBinding) { + if (!recNumAllowed) { + throw new IllegalArgumentException( + "RecordNumberBinding requires DB_BTREE/DB_RECNUM, " + + "DB_RECNO, or DB_QUEUE"); + } + recNumAccess = true; + if (btreeRecNumDb) { + btreeRecNumAccess = true; + } + } + keysRenumbered = recNumRenumber || btreeRecNumAccess; + } + + /** + * Clones the view. + */ + private DataView cloneView() { + + try { + return (DataView) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + throw new IllegalStateException(); + } + } + + /** + * Return a new key-set view derived from this view by setting the + * entity and value binding to null. + * + * @return the derived view. + */ + DataView keySetView() { + + if (keyBinding == null) { + throw new UnsupportedOperationException("must have keyBinding"); + } + DataView view = cloneView(); + view.valueBinding = null; + view.entityBinding = null; + return view; + } + + /** + * Return a new value-set view derived from this view by setting the + * key binding to null. + * + * @return the derived view. + */ + DataView valueSetView() { + + if (valueBinding == null && entityBinding == null) { + throw new UnsupportedOperationException( + "must have valueBinding or entityBinding"); + } + DataView view = cloneView(); + view.keyBinding = null; + return view; + } + + /** + * Return a new value-set view for single key range. + * + * @param singleKey the single key value. + * + * @return the derived view. + * + * @throws DatabaseException if a database problem occurs. + * + * @throws KeyRangeException if the specified range is not within the + * current range. + */ + DataView valueSetView(Object singleKey) + throws DatabaseException, KeyRangeException { + + /* + * Must do subRange before valueSetView since the latter clears the + * key binding needed for the former. + */ + KeyRange singleKeyRange = subRange(singleKey); + DataView view = valueSetView(); + view.range = singleKeyRange; + return view; + } + + /** + * Return a new value-set view for key range, optionally changing + * the key binding. + */ + DataView subView(Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive, + EntryBinding keyBinding) + throws DatabaseException, KeyRangeException { + + DataView view = cloneView(); + view.setRange(beginKey, beginInclusive, endKey, endInclusive); + if (keyBinding != null) view.keyBinding = keyBinding; + return view; + } + + /** + * Returns a new view with a specified dirtyRead setting. + */ + DataView dirtyReadView(boolean enable) { + + if (!dirtyReadAllowed) + return this; + DataView view = cloneView(); + view.dirtyReadEnabled = enable; + return view; + } + + /** + * Returns the current transaction for the view or null if the environment + * is non-transactional. + */ + CurrentTransaction getCurrentTxn() { + + return transactional ? currentTxn : null; + } + + /** + * Sets this view's range to a subrange with the given parameters. + */ + private void setRange(Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive) + throws DatabaseException, KeyRangeException { + + range = subRange(beginKey, beginInclusive, endKey, endInclusive); + } + + /** + * Returns the key thang for a single key range, or null if a single key + * range is not used. + */ + DatabaseEntry getSingleKeyThang() { + + return range.getSingleKey(); + } + + /** + * Returns the environment for the database. + */ + final Environment getEnv() { + + return currentTxn.getEnvironment(); + } + + /** + * Returns whether this is a view on a secondary database rather + * than directly on a primary database. + */ + final boolean isSecondary() { + + return (secDb != null); + } + + /** + * Returns whether no records are present in the view. + */ + boolean isEmpty() + throws DatabaseException { + + DataCursor cursor = new DataCursor(this, false); + try { + return cursor.getFirst(false) != OperationStatus.SUCCESS; + } finally { + cursor.close(); + } + } + + /** + * Appends a value and returns the new key. If a key assigner is used + * it assigns the key, otherwise a QUEUE or RECNO database is required. + */ + OperationStatus append(Object value, Object[] retPrimaryKey, + Object[] retValue) + throws DatabaseException { + + /* + * Flags will be NOOVERWRITE if used with assigner, or APPEND + * otherwise. + * Requires: if value param, value or entity binding + * Requires: if retPrimaryKey, primary key binding (no index). + * Requires: if retValue, value or entity binding + */ + DatabaseEntry keyThang = new DatabaseEntry(); + DatabaseEntry valueThang = new DatabaseEntry(); + useValue(value, valueThang, null); + OperationStatus status; + if (keyAssigner != null) { + keyAssigner.assignKey(keyThang); + if (!range.check(keyThang)) { + throw new IllegalArgumentException( + "assigned key out of range"); + } + DataCursor cursor = new DataCursor(this, true); + try { + status = cursor.getCursor().putNoOverwrite(keyThang, + valueThang); + } finally { + cursor.close(); + } + } else { + /* Assume QUEUE/RECNO access method. */ + if (currentTxn.isCDBCursorOpen(db)) { + throw new IllegalStateException( + "cannot open CDB write cursor when read cursor is open"); + } + status = DbCompat.append(db, useTransaction(), + keyThang, valueThang); + if (status == OperationStatus.SUCCESS && !range.check(keyThang)) { + db.delete(useTransaction(), keyThang); + throw new IllegalArgumentException( + "appended record number out of range"); + } + } + if (status == OperationStatus.SUCCESS) { + returnPrimaryKeyAndValue(keyThang, valueThang, + retPrimaryKey, retValue); + } + return status; + } + + /** + * Returns the current transaction if the database is transaction, or null + * if the database is not transactional or there is no current transaction. + */ + Transaction useTransaction() { + return transactional ? currentTxn.getTransaction() : null; + } + + /** + * Deletes all records in the current range. + */ + void clear() + throws DatabaseException { + + DataCursor cursor = new DataCursor(this, true); + try { + OperationStatus status = OperationStatus.SUCCESS; + while (status == OperationStatus.SUCCESS) { + if (keysRenumbered) { + status = cursor.getFirst(true); + } else { + status = cursor.getNext(true); + } + if (status == OperationStatus.SUCCESS) { + cursor.delete(); + } + } + } finally { + cursor.close(); + } + } + + /** + * Returns a cursor for this view that reads only records having the + * specified index key values. + */ + DataCursor join(DataView[] indexViews, Object[] indexKeys, + JoinConfig joinConfig) + throws DatabaseException { + + DataCursor joinCursor = null; + DataCursor[] indexCursors = new DataCursor[indexViews.length]; + try { + for (int i = 0; i < indexViews.length; i += 1) { + indexCursors[i] = new DataCursor(indexViews[i], false); + indexCursors[i].getSearchKey(indexKeys[i], null, false); + } + joinCursor = new DataCursor(this, indexCursors, joinConfig, true); + return joinCursor; + } finally { + if (joinCursor == null) { + // An exception is being thrown, so close cursors we opened. + for (int i = 0; i < indexCursors.length; i += 1) { + if (indexCursors[i] != null) { + try { indexCursors[i].close(); } + catch (Exception e) { + /* FindBugs, this is ok. */ + } + } + } + } + } + } + + /** + * Returns a cursor for this view that reads only records having the + * index key values at the specified cursors. + */ + DataCursor join(DataCursor[] indexCursors, JoinConfig joinConfig) + throws DatabaseException { + + return new DataCursor(this, indexCursors, joinConfig, false); + } + + /** + * Returns primary key and value if return parameters are non-null. + */ + private void returnPrimaryKeyAndValue(DatabaseEntry keyThang, + DatabaseEntry valueThang, + Object[] retPrimaryKey, + Object[] retValue) + throws DatabaseException { + + // Requires: if retPrimaryKey, primary key binding (no index). + // Requires: if retValue, value or entity binding + + if (retPrimaryKey != null) { + if (keyBinding == null) { + throw new IllegalArgumentException( + "returning key requires primary key binding"); + } else if (isSecondary()) { + throw new IllegalArgumentException( + "returning key requires unindexed view"); + } else { + retPrimaryKey[0] = keyBinding.entryToObject(keyThang); + } + } + if (retValue != null) { + retValue[0] = makeValue(keyThang, valueThang); + } + } + + /** + * Populates the key entry and returns whether the key is within range. + */ + boolean useKey(Object key, Object value, DatabaseEntry keyThang, + KeyRange checkRange) + throws DatabaseException { + + if (key != null) { + if (keyBinding == null) { + throw new IllegalArgumentException( + "non-null key with null key binding"); + } + keyBinding.objectToEntry(key, keyThang); + } else { + if (value == null) { + throw new IllegalArgumentException( + "null key and null value"); + } + if (entityBinding == null) { + throw new IllegalStateException( + "EntityBinding required to derive key from value"); + } + if (isSecondary()) { + DatabaseEntry primaryKeyThang = new DatabaseEntry(); + entityBinding.objectToKey(value, primaryKeyThang); + DatabaseEntry valueThang = new DatabaseEntry(); + entityBinding.objectToData(value, valueThang); + secKeyCreator.createSecondaryKey(secDb, primaryKeyThang, + valueThang, keyThang); + } else { + entityBinding.objectToKey(value, keyThang); + } + } + if (recNumAccess && DbCompat.getRecordNumber(keyThang) <= 0) { + return false; + } + if (checkRange != null && !checkRange.check(keyThang)) { + return false; + } + return true; + } + + /** + * Returns whether data keys can be derived from the value/entity binding + * of this view, which determines whether a value/entity object alone is + * sufficient for operations that require keys. + */ + final boolean canDeriveKeyFromValue() { + + return (entityBinding != null); + } + + /** + * Populates the value entry and throws an exception if the primary key + * would be changed via an entity binding. + */ + void useValue(Object value, DatabaseEntry valueThang, + DatabaseEntry checkKeyThang) + throws DatabaseException { + + if (value != null) { + if (valueBinding != null) { + valueBinding.objectToEntry(value, valueThang); + } else if (entityBinding != null) { + entityBinding.objectToData(value, valueThang); + if (checkKeyThang != null) { + DatabaseEntry thang = new DatabaseEntry(); + entityBinding.objectToKey(value, thang); + if (!KeyRange.equalBytes(thang, checkKeyThang)) { + throw new IllegalArgumentException( + "cannot change primary key"); + } + } + } else { + throw new IllegalArgumentException( + "non-null value with null value/entity binding"); + } + } else { + valueThang.setData(new byte[0]); + valueThang.setOffset(0); + valueThang.setSize(0); + } + } + + /** + * Converts a key entry to a key object. + */ + Object makeKey(DatabaseEntry keyThang) + throws DatabaseException { + + if (keyThang.getSize() == 0) return null; + return keyBinding.entryToObject(keyThang); + } + + /** + * Converts a key-value entry pair to a value object. + */ + Object makeValue(DatabaseEntry primaryKeyThang, DatabaseEntry valueThang) + throws DatabaseException { + + Object value; + if (valueBinding != null) { + value = valueBinding.entryToObject(valueThang); + } else if (entityBinding != null) { + value = entityBinding.entryToObject(primaryKeyThang, + valueThang); + } else { + throw new UnsupportedOperationException( + "requires valueBinding or entityBinding"); + } + return value; + } + + /** + * Intersects the given key and the current range. + */ + KeyRange subRange(Object singleKey) + throws DatabaseException, KeyRangeException { + + return range.subRange(makeRangeKey(singleKey)); + } + + /** + * Intersects the given range and the current range. + */ + KeyRange subRange(Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive) + throws DatabaseException, KeyRangeException { + + if (beginKey == endKey && beginInclusive && endInclusive) { + return subRange(beginKey); + } + if (!ordered) { + throw new UnsupportedOperationException( + "Cannot use key ranges on an unsorted database"); + } + DatabaseEntry beginThang = + (beginKey != null) ? makeRangeKey(beginKey) : null; + DatabaseEntry endThang = + (endKey != null) ? makeRangeKey(endKey) : null; + + return range.subRange(beginThang, beginInclusive, + endThang, endInclusive); + } + + /** + * Given a key object, make a key entry that can be used in a range. + */ + private DatabaseEntry makeRangeKey(Object key) + throws DatabaseException { + + DatabaseEntry thang = new DatabaseEntry(); + if (keyBinding != null) { + useKey(key, null, thang, null); + } else { + useKey(null, key, thang, null); + } + return thang; + } +} diff --git a/db/java/src/com/sleepycat/collections/KeyRange.java b/db/java/src/com/sleepycat/collections/KeyRange.java new file mode 100644 index 000000000..8a5b7d3b2 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/KeyRange.java @@ -0,0 +1,299 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: KeyRange.java,v 1.2 2004/05/05 15:43:48 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Comparator; + +import com.sleepycat.db.DatabaseEntry; + +/** + * Encapsulates a key range for use with a RangeCursor. + */ +class KeyRange { + + Comparator comparator; + DatabaseEntry beginKey; + DatabaseEntry endKey; + boolean singleKey; + boolean beginInclusive; + boolean endInclusive; + + /** + * Creates an unconstrained key range. + */ + KeyRange(Comparator comparator) { + this.comparator = comparator; + } + + /** + * Creates a range for a single key. + */ + KeyRange subRange(DatabaseEntry key) + throws KeyRangeException { + + if (!check(key)) { + throw new KeyRangeException("singleKey out of range"); + } + KeyRange range = new KeyRange(comparator); + range.beginKey = key; + range.endKey = key; + range.beginInclusive = true; + range.endInclusive = true; + range.singleKey = true; + return range; + } + + /** + * Creates a range that is the intersection of this range and the given + * range parameters. + */ + KeyRange subRange(DatabaseEntry beginKey, boolean beginInclusive, + DatabaseEntry endKey, boolean endInclusive) + throws KeyRangeException { + + if (beginKey == null) { + beginKey = this.beginKey; + beginInclusive = this.beginInclusive; + } else if (!check(beginKey, beginInclusive)) { + throw new KeyRangeException("beginKey out of range"); + } + if (endKey == null) { + endKey = this.endKey; + endInclusive = this.endInclusive; + } else if (!check(endKey, endInclusive)) { + throw new KeyRangeException("endKey out of range"); + } + KeyRange range = new KeyRange(comparator); + range.beginKey = beginKey; + range.endKey = endKey; + range.beginInclusive = beginInclusive; + range.endInclusive = endInclusive; + return range; + } + + /** + * Returns the key of a single-key range, or null if not a single-key + * range. + */ + final DatabaseEntry getSingleKey() { + + return singleKey ? beginKey : null; + } + + /** + * Returns whether this range has a begin or end bound. + */ + final boolean hasBound() { + + return endKey != null || beginKey != null; + } + + /** + * Formats this range as a string for debugging. + */ + public String toString() { + + return "[KeyRange " + beginKey + ' ' + beginInclusive + + endKey + ' ' + endInclusive + + (singleKey ? " single" : ""); + } + + /** + * Returns whether a given key is within range. + */ + boolean check(DatabaseEntry key) { + + if (singleKey) { + return (compare(key, beginKey) == 0); + } else { + return checkBegin(key, true) && checkEnd(key, true); + } + } + + /** + * Returns whether a given key is within range. + */ + boolean check(DatabaseEntry key, boolean inclusive) { + + if (singleKey) { + return (compare(key, beginKey) == 0); + } else { + return checkBegin(key, inclusive) && checkEnd(key, inclusive); + } + } + + /** + * Returns whether the given key is within range with respect to the + * beginning of the range. + * + *

    The inclusive parameter should be true for checking a key read from + * the database; this will require that the key is within range. When + * inclusive=false the key is allowed to be equal to the beginKey for the + * range; this is used for checking a new exclusive bound of a + * sub-range.

    + * + *

    Note that when inclusive=false and beginInclusive=true our check is + * not exactly correct because in theory we should allow the key to be "one + * less" than the existing bound; however, checking for "one less" is + * impossible so we do the best we can and test the bounds + * conservatively.

    + */ + boolean checkBegin(DatabaseEntry key, boolean inclusive) { + + if (beginKey == null) { + return true; + } else if (!beginInclusive && inclusive) { + return compare(key, beginKey) > 0; + } else { + return compare(key, beginKey) >= 0; + } + } + + /** + * Returns whether the given key is within range with respect to the + * end of the range. See checkBegin for details. + */ + boolean checkEnd(DatabaseEntry key, boolean inclusive) { + + if (endKey == null) { + return true; + } else if (!endInclusive && inclusive) { + return compare(key, endKey) < 0; + } else { + return compare(key, endKey) <= 0; + } + } + + /** + * Compares two keys, using the user comparator if there is one. + */ + int compare(DatabaseEntry key1, DatabaseEntry key2) { + + if (comparator != null) { + return comparator.compare(getByteArray(key1), getByteArray(key2)); + } else { + return compareBytes + (key1.getData(), key1.getOffset(), key1.getSize(), + key2.getData(), key2.getOffset(), key2.getSize()); + + } + } + + /** + * Compares two keys as unsigned byte arrays, which is the default + * comparison used by JE/DB. + */ + static int compareBytes(byte[] data1, int offset1, int size1, + byte[] data2, int offset2, int size2) { + + for (int i = 0; i < size1 && i < size2; i++) { + + int b1 = 0xFF & data1[offset1 + i]; + int b2 = 0xFF & data2[offset2 + i]; + if (b1 < b2) + return -1; + else if (b1 > b2) + return 1; + } + + if (size1 < size2) + return -1; + else if (size1 > size2) + return 1; + else + return 0; + } + + /** + * Returns a copy of an entry. + */ + static DatabaseEntry copy(DatabaseEntry from) { + return new DatabaseEntry(getByteArray(from)); + } + + /** + * Copies one entry to another. + */ + static void copy(DatabaseEntry from, DatabaseEntry to) { + to.setData(getByteArray(from)); + to.setOffset(0); + } + + /** + * Returns an entry's byte array, copying it if the entry offset is + * non-zero. + */ + static byte[] getByteArray(DatabaseEntry entry) { + + byte[] bytes = entry.getData(); + if (bytes == null) return null; + int size = entry.getSize(); + byte[] data = new byte[size]; + System.arraycopy(bytes, entry.getOffset(), data, 0, size); + return data; + } + + /** + * Returns the two DatabaseEntry objects have the same data value. + */ + static boolean equalBytes(DatabaseEntry e1, DatabaseEntry e2) { + + if (e1 == null && e2 == null) { + return true; + } + if (e1 == null || e2 == null) { + return false; + } + + byte[] d1 = e1.getData(); + byte[] d2 = e2.getData(); + int s1 = e1.getSize(); + int s2 = e2.getSize(); + int o1 = e1.getOffset(); + int o2 = e2.getOffset(); + + if (d1 == null && d2 == null) { + return true; + } + if (d1 == null || d2 == null) { + return false; + } + if (s1 != s2) { + return false; + } + for (int i = 0; i < s1; i += 1) { + if (d1[o1 + i] != d2[o2 + i]) { + return false; + } + } + return true; + } + + /** + * Converts the byte array of this thang to space-separated integers, + * and suffixed by the record number if applicable. + * + * @param dbt the thang to convert. + * + * @param the resulting string. + */ + static String toString(DatabaseEntry dbt) { + + int len = dbt.getOffset() + dbt.getSize(); + StringBuffer buf = new StringBuffer(len * 2); + byte[] data = dbt.getData(); + for (int i = dbt.getOffset(); i < len; i++) { + String num = Integer.toHexString(data[i]); + if (num.length() < 2) buf.append('0'); + buf.append(num); + } + return buf.toString(); + } +} diff --git a/db/java/src/com/sleepycat/collections/KeyRangeException.java b/db/java/src/com/sleepycat/collections/KeyRangeException.java new file mode 100644 index 000000000..342ebd528 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/KeyRangeException.java @@ -0,0 +1,26 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: KeyRangeException.java,v 1.1 2004/04/09 16:34:08 mark Exp $ + */ + +package com.sleepycat.collections; + +/** + * An exception thrown when a key is out of range. + * + * @author Mark Hayes + */ +class KeyRangeException extends IllegalArgumentException { + + /** + * Creates a key range exception. + */ + public KeyRangeException(String msg) { + + super(msg); + } +} diff --git a/db/java/src/com/sleepycat/collections/MapEntryParameter.java b/db/java/src/com/sleepycat/collections/MapEntryParameter.java new file mode 100644 index 000000000..8148bbe8a --- /dev/null +++ b/db/java/src/com/sleepycat/collections/MapEntryParameter.java @@ -0,0 +1,126 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MapEntryParameter.java,v 1.1 2004/04/09 16:34:08 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Map; + +/** + * A simple Map.Entry implementation that can be used as in + * input parameter. Since a MapEntryParameter is not obtained + * from a map, it is not attached to any map in particular. To emphasize that + * changing this object does not change the map, the {@link #setValue} method + * always throws UnsupportedOperationException. + * + *

    Warning: Use of this interface violates the Java Collections + * interface contract since these state that Map.Entry objects + * should only be obtained from Map.entrySet() sets, while this + * class allows constructing them directly. However, it is useful for + * performing operations on an entry set such as add(), contains(), etc. For + * restrictions see {@link #getValue} and {@link #setValue}.

    + * + * @author Mark Hayes + */ +public class MapEntryParameter implements Map.Entry { + + private Object key; + private Object value; + + /** + * Creates a map entry with a given key and value. + * + * @param key is the key to use. + * + * @param value is the value to use. + */ + public MapEntryParameter(Object key, Object value) { + + this.key = key; + this.value = value; + } + + /** + * Computes a hash code as specified by {@link + * java.util.Map.Entry#hashCode}. + * + * @return the computed hash code. + */ + public int hashCode() { + + return ((key == null) ? 0 : key.hashCode()) ^ + ((value == null) ? 0 : value.hashCode()); + } + + /** + * Compares this entry to a given entry as specified by {@link + * java.util.Map.Entry#equals}. + * + * @return the computed hash code. + */ + public boolean equals(Object other) { + + if (!(other instanceof Map.Entry)) { + return false; + } + + Map.Entry e = (Map.Entry) other; + + return ((key == null) ? (e.getKey() == null) + : key.equals(e.getKey())) && + ((value == null) ? (e.getValue() == null) + : value.equals(e.getValue())); + } + + /** + * Returns the key of this entry. + * + * @return the key of this entry. + */ + public final Object getKey() { + + return key; + } + + /** + * Returns the value of this entry. Note that this will be the value + * passed to the constructor or the last value passed to {@link #setValue}. + * It will not reflect changes made to a Map. + * + * @return the value of this entry. + */ + public final Object getValue() { + + return value; + } + + /** + * Always throws UnsupportedOperationException since this + * object is not attached to a map. + */ + public Object setValue(Object newValue) { + + throw new UnsupportedOperationException(); + } + + final void setValueInternal(Object newValue) { + + this.value = newValue; + } + + /** + * Converts the entry to a string representation for debugging. + * + * @return the string representation. + */ + public String toString() { + + return "[key [" + key + "] value [" + value + ']'; + } +} + diff --git a/db/java/src/com/sleepycat/collections/PrimaryKeyAssigner.java b/db/java/src/com/sleepycat/collections/PrimaryKeyAssigner.java new file mode 100644 index 000000000..1af3ba1ae --- /dev/null +++ b/db/java/src/com/sleepycat/collections/PrimaryKeyAssigner.java @@ -0,0 +1,30 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PrimaryKeyAssigner.java,v 1.2 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.collections; + +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; + +/** + * An interface implemented to assign new primary key values. + * An implementation of this interface is passed to the {@link StoredMap} + * or {@link StoredSortedMap} constructor to assign primary keys for that + * store. Key assignment occurs when StoredMap.append() is called. + * + * @author Mark Hayes + */ +public interface PrimaryKeyAssigner { + + /** + * Assigns a new primary key value into the given data buffer. + */ + void assignKey(DatabaseEntry keyData) + throws DatabaseException; +} diff --git a/db/java/src/com/sleepycat/collections/RangeCursor.java b/db/java/src/com/sleepycat/collections/RangeCursor.java new file mode 100644 index 000000000..ae664ef51 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/RangeCursor.java @@ -0,0 +1,874 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: RangeCursor.java,v 1.3 2004/09/22 18:01:02 bostic Exp $ + */ + +package com.sleepycat.collections; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.Cursor; +import com.sleepycat.db.LockMode; +import com.sleepycat.db.OperationStatus; +import com.sleepycat.db.SecondaryCursor; + +/** + * A cursor-like interface that enforces a key range. The method signatures + * are actually those of SecondaryCursor, but the pKey parameter may be null. + * It was done this way to avoid doubling the number of methods. + * + *

    This is not a general implementation of a range cursor and should not + * be used outside this package; however, it may evolve into a generally useful + * range cursor some day.

    + * + * @author Mark Hayes + */ +class RangeCursor implements Cloneable { + + /** + * The cursor and secondary cursor are the same object. The secCursor is + * null if the database is not a secondary database. + */ + private Cursor cursor; + private SecondaryCursor secCursor; + private CurrentTransaction currentTxn; + private boolean writeAllowed; + + /** + * The range is always non-null, but may be unbounded meaning that it is + * open and not used. + */ + private KeyRange range; + + /** + * If the database is a RECNO or QUEUE database, we know its keys are + * record numbers. We treat a non-positive record number as out of bounds, + * that is, we return NOTFOUND rather than throwing + * IllegalArgumentException as would happen if we passed a non-positive + * record number into the DB cursor. This behavior is required by the + * collections interface. + */ + private boolean isRecnoOrQueue; + + /** + * The privXxx entries are used only when the range is bounded. We read + * into these private entries to avoid modifying the caller's entry + * parameters in the case where we read sucessfully but the key is out of + * range. In that case we return NOTFOUND and we want to leave the entry + * parameters unchanged. + */ + private DatabaseEntry privKey; + private DatabaseEntry privPKey; + private DatabaseEntry privData; + + /** + * The initialized flag is set to true whenever we sucessfully position the + * cursor. It is used to implement the getNext/Prev logic for doing a + * getFirst/Last when the cursor is not initialized. We can't rely on + * Cursor to do that for us, since if we position the underlying cursor + * sucessfully but the key is out of range, we have no way to set the + * underlying cursor to uninitialized. A range cursor always starts in + * the uninitialized state. + */ + private boolean initialized; + + /** + * Create a range cursor. + */ + RangeCursor(DataView view, KeyRange range, boolean writeAllowed) + throws DatabaseException { + + this.range = range; + this.writeAllowed = writeAllowed; + this.currentTxn = view.currentTxn; + isRecnoOrQueue = view.recNumAllowed && !view.btreeRecNumDb; + + cursor = currentTxn.openCursor(view.db, writeAllowed, + view.useTransaction()); + init(); + } + + /** + * Create a cloned range cursor. The caller must clone the underlying + * cursor before using this constructor, because cursor open/close is + * handled specially for CDS cursors outside this class. + */ + RangeCursor dup(boolean samePosition) + throws DatabaseException { + + try { + RangeCursor c = (RangeCursor) super.clone(); + c.cursor = currentTxn.dupCursor(cursor, writeAllowed, + samePosition); + c.init(); + return c; + } catch (CloneNotSupportedException neverHappens) { + return null; + } + } + + /** + * Used for opening and duping (cloning). + */ + private void init() { + + if (cursor instanceof SecondaryCursor) { + secCursor = (SecondaryCursor) cursor; + } else { + secCursor = null; + } + + if (range.hasBound()) { + privKey = new DatabaseEntry(); + privPKey = new DatabaseEntry(); + privData = new DatabaseEntry(); + } else { + privKey = null; + privPKey = null; + privData = null; + } + } + + /** + * Returns the underlying cursor. Used for cloning. + */ + Cursor getCursor() { + return cursor; + } + + /** + * When an unbounded range is used, this method is called to use the + * callers entry parameters directly, to avoid the extra step of copying + * between the private entries and the caller's entries. + */ + private void setParams(DatabaseEntry key, DatabaseEntry pKey, + DatabaseEntry data) { + privKey = key; + privPKey = pKey; + privData = data; + } + + /** + * Dups the cursor, sets the cursor and secCursor fields to the duped + * cursor, and returns the old cursor. Always call endOperation in a + * finally clause after calling beginOperation. + * + *

    If the returned cursor == the cursor field, the cursor is + * uninitialized and was not duped; this case is handled correctly by + * endOperation.

    + */ + private Cursor beginOperation() + throws DatabaseException { + + Cursor oldCursor = cursor; + if (initialized) { + cursor = currentTxn.dupCursor(cursor, writeAllowed, true); + if (secCursor != null) { + secCursor = (SecondaryCursor) cursor; + } + } else { + return cursor; + } + return oldCursor; + } + + /** + * If the operation succeded, leaves the duped cursor in place and closes + * the oldCursor. If the operation failed, moves the oldCursor back in + * place and closes the duped cursor. oldCursor may be null if + * beginOperation was not called, in cases where we don't need to dup + * the cursor. Always call endOperation when a successful operation ends, + * in order to set the initialized field. + */ + private void endOperation(Cursor oldCursor, OperationStatus status, + DatabaseEntry key, DatabaseEntry pKey, + DatabaseEntry data) + throws DatabaseException { + + if (status == OperationStatus.SUCCESS) { + if (oldCursor != null && oldCursor != cursor) { + currentTxn.closeCursor(oldCursor); + } + if (key != null) { + swapData(key, privKey); + } + if (pKey != null && secCursor != null) { + swapData(pKey, privPKey); + } + if (data != null) { + swapData(data, privData); + } + initialized = true; + } else { + if (oldCursor != null && oldCursor != cursor) { + currentTxn.closeCursor(cursor); + cursor = oldCursor; + if (secCursor != null) { + secCursor = (SecondaryCursor) cursor; + } + } + } + } + + /** + * Swaps the contents of the two entries. Used to return entry data to + * the caller when the operation was successful. + */ + static private void swapData(DatabaseEntry e1, DatabaseEntry e2) { + + byte[] d1 = e1.getData(); + int o1 = e1.getOffset(); + int s1 = e1.getSize(); + + e1.setData(e2.getData(), e2.getOffset(), e2.getSize()); + e2.setData(d1, o1, s1); + } + + /** + * Shares the same byte array, offset and size between two entries. + * Used when copying the entry data is not necessary because it is known + * that the underlying operation will not modify the entry, for example, + * with getSearchKey. + */ + static private void shareData(DatabaseEntry from, DatabaseEntry to) { + + if (from != null) { + to.setData(from.getData(), from.getOffset(), from.getSize()); + } + } + + public OperationStatus getFirst(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetFirst(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (range.singleKey) { + KeyRange.copy(range.beginKey, privKey); + status = doGetSearchKey(lockMode); + endOperation(null, status, key, pKey, data); + } else { + status = OperationStatus.NOTFOUND; + Cursor oldCursor = beginOperation(); + try { + if (range.beginKey == null) { + status = doGetFirst(lockMode); + } else { + KeyRange.copy(range.beginKey, privKey); + status = doGetSearchKeyRange(lockMode); + if (status == OperationStatus.SUCCESS && + !range.beginInclusive && + range.compare(privKey, range.beginKey) == 0) { + status = doGetNext(lockMode); + } + } + if (status == OperationStatus.SUCCESS && + !range.check(privKey)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + } + return status; + } + + public OperationStatus getLast(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status = OperationStatus.NOTFOUND; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetLast(lockMode); + endOperation(null, status, null, null, null); + return status; + } + Cursor oldCursor = beginOperation(); + try { + if (range.endKey == null) { + status = doGetLast(lockMode); + } else { + KeyRange.copy(range.endKey, privKey); + status = doGetSearchKeyRange(lockMode); + if (status == OperationStatus.SUCCESS) { + if (range.endInclusive && + range.compare(range.endKey, privKey) == 0) { + status = doGetNextNoDup(lockMode); + if (status == OperationStatus.SUCCESS) { + status = doGetPrev(lockMode); + } else { + status = doGetLast(lockMode); + } + } else { + status = doGetPrev(lockMode); + } + } else { + status = doGetLast(lockMode); + } + } + if (status == OperationStatus.SUCCESS && + !range.checkBegin(privKey, true)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + return status; + } + + public OperationStatus getNext(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!initialized) { + return getFirst(key, pKey, data, lockMode); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetNext(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (range.singleKey) { + status = doGetNextDup(lockMode); + endOperation(null, status, key, pKey, data); + } else { + status = OperationStatus.NOTFOUND; + Cursor oldCursor = beginOperation(); + try { + status = doGetNext(lockMode); + if (status == OperationStatus.SUCCESS && + !range.check(privKey)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + } + return status; + } + + public OperationStatus getNextNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!initialized) { + return getFirst(key, pKey, data, lockMode); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetNextNoDup(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (range.singleKey) { + status = OperationStatus.NOTFOUND; + } else { + status = OperationStatus.NOTFOUND; + Cursor oldCursor = beginOperation(); + try { + status = doGetNextNoDup(lockMode); + if (status == OperationStatus.SUCCESS && + !range.check(privKey)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + } + return status; + } + + public OperationStatus getPrev(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!initialized) { + return getLast(key, pKey, data, lockMode); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetPrev(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (range.singleKey) { + status = doGetPrevDup(lockMode); + endOperation(null, status, key, pKey, data); + } else { + status = OperationStatus.NOTFOUND; + Cursor oldCursor = beginOperation(); + try { + status = doGetPrev(lockMode); + if (status == OperationStatus.SUCCESS && + !range.check(privKey)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + } + return status; + } + + public OperationStatus getPrevNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!initialized) { + return getLast(key, pKey, data, lockMode); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetPrevNoDup(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (range.singleKey) { + status = OperationStatus.NOTFOUND; + } else { + status = OperationStatus.NOTFOUND; + Cursor oldCursor = beginOperation(); + try { + status = doGetPrevNoDup(lockMode); + if (status == OperationStatus.SUCCESS && + !range.check(privKey)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + } + return status; + } + + public OperationStatus getSearchKey(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetSearchKey(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (!range.check(key)) { + status = OperationStatus.NOTFOUND; + } else { + shareData(key, privKey); + status = doGetSearchKey(lockMode); + endOperation(null, status, key, pKey, data); + } + return status; + } + + public OperationStatus getSearchBoth(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetSearchBoth(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (!range.check(key)) { + status = OperationStatus.NOTFOUND; + } else { + shareData(key, privKey); + if (secCursor != null) { + shareData(pKey, privPKey); + } else { + shareData(data, privData); + } + status = doGetSearchBoth(lockMode); + endOperation(null, status, key, pKey, data); + } + return status; + } + + public OperationStatus getSearchKeyRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status = OperationStatus.NOTFOUND; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetSearchKeyRange(lockMode); + endOperation(null, status, null, null, null); + return status; + } + Cursor oldCursor = beginOperation(); + try { + shareData(key, privKey); + status = doGetSearchKeyRange(lockMode); + if (status == OperationStatus.SUCCESS && + !range.check(privKey)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + return status; + } + + public OperationStatus getSearchBothRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status = OperationStatus.NOTFOUND; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetSearchBothRange(lockMode); + endOperation(null, status, null, null, null); + return status; + } + Cursor oldCursor = beginOperation(); + try { + shareData(key, privKey); + if (secCursor != null) { + shareData(pKey, privPKey); + } else { + shareData(data, privData); + } + status = doGetSearchBothRange(lockMode); + if (status == OperationStatus.SUCCESS && + !range.check(privKey)) { + status = OperationStatus.NOTFOUND; + } + } finally { + endOperation(oldCursor, status, key, pKey, data); + } + return status; + } + + public OperationStatus getSearchRecordNumber(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetSearchRecordNumber(lockMode); + endOperation(null, status, null, null, null); + return status; + } + if (!range.check(key)) { + status = OperationStatus.NOTFOUND; + } else { + shareData(key, privKey); + status = doGetSearchRecordNumber(lockMode); + endOperation(null, status, key, pKey, data); + } + return status; + } + + public OperationStatus getNextDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + if (!initialized) { + throw new DatabaseException("Cursor not initialized"); + } + OperationStatus status; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetNextDup(lockMode); + endOperation(null, status, null, null, null); + } else { + status = doGetNextDup(lockMode); + endOperation(null, status, key, pKey, data); + } + return status; + } + + public OperationStatus getPrevDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + if (!initialized) { + throw new DatabaseException("Cursor not initialized"); + } + OperationStatus status; + if (!range.hasBound()) { + setParams(key, pKey, data); + status = doGetPrevDup(lockMode); + endOperation(null, status, null, null, null); + } else { + status = doGetPrevDup(lockMode); + endOperation(null, status, key, pKey, data); + } + return status; + } + + public OperationStatus getCurrent(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + if (!initialized) { + throw new DatabaseException("Cursor not initialized"); + } + if (secCursor != null && pKey != null) { + return secCursor.getCurrent(key, pKey, data, lockMode); + } else { + return cursor.getCurrent(key, data, lockMode); + } + } + + /* + * Pass-thru methods. + */ + + public void close() + throws DatabaseException { + + currentTxn.closeCursor(cursor); + } + + public int count() + throws DatabaseException { + + return cursor.count(); + } + + public OperationStatus delete() + throws DatabaseException { + + return cursor.delete(); + } + + public OperationStatus put(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + + return cursor.put(key, data); + } + + public OperationStatus putNoOverwrite(DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + return cursor.putNoOverwrite(key, data); + } + + public OperationStatus putNoDupData(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + + return cursor.putNoDupData(key, data); + } + + public OperationStatus putCurrent(DatabaseEntry data) + throws DatabaseException { + + return cursor.putCurrent(data); + } + + public OperationStatus putAfter(DatabaseEntry data) + throws DatabaseException { + + return DbCompat.putAfter(cursor, data); + } + + public OperationStatus putBefore(DatabaseEntry data) + throws DatabaseException { + + return DbCompat.putBefore(cursor, data); + } + + private OperationStatus doGetFirst(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getFirst(privKey, privPKey, privData, lockMode); + } else { + return cursor.getFirst(privKey, privData, lockMode); + } + } + + private OperationStatus doGetLast(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getLast(privKey, privPKey, privData, lockMode); + } else { + return cursor.getLast(privKey, privData, lockMode); + } + } + + private OperationStatus doGetNext(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getNext(privKey, privPKey, privData, lockMode); + } else { + return cursor.getNext(privKey, privData, lockMode); + } + } + + private OperationStatus doGetNextDup(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getNextDup(privKey, privPKey, privData, lockMode); + } else { + return cursor.getNextDup(privKey, privData, lockMode); + } + } + + private OperationStatus doGetNextNoDup(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getNextNoDup(privKey, privPKey, privData, + lockMode); + } else { + return cursor.getNextNoDup(privKey, privData, lockMode); + } + } + + private OperationStatus doGetPrev(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getPrev(privKey, privPKey, privData, lockMode); + } else { + return cursor.getPrev(privKey, privData, lockMode); + } + } + + private OperationStatus doGetPrevDup(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getPrevDup(privKey, privPKey, privData, lockMode); + } else { + return cursor.getPrevDup(privKey, privData, lockMode); + } + } + + private OperationStatus doGetPrevNoDup(LockMode lockMode) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + return secCursor.getPrevNoDup(privKey, privPKey, privData, + lockMode); + } else { + return cursor.getPrevNoDup(privKey, privData, lockMode); + } + } + + private OperationStatus doGetSearchKey(LockMode lockMode) + throws DatabaseException { + + if (isRecnoOrQueue && DbCompat.getRecordNumber(privKey) <= 0) { + return OperationStatus.NOTFOUND; + } + if (secCursor != null && privPKey != null) { + return secCursor.getSearchKey(privKey, privPKey, privData, + lockMode); + } else { + return cursor.getSearchKey(privKey, privData, lockMode); + } + } + + private OperationStatus doGetSearchKeyRange(LockMode lockMode) + throws DatabaseException { + + if (isRecnoOrQueue && DbCompat.getRecordNumber(privKey) <= 0) { + return OperationStatus.NOTFOUND; + } + if (secCursor != null && privPKey != null) { + return secCursor.getSearchKeyRange(privKey, privPKey, privData, + lockMode); + } else { + return cursor.getSearchKeyRange(privKey, privData, lockMode); + } + } + + private OperationStatus doGetSearchBoth(LockMode lockMode) + throws DatabaseException { + + if (isRecnoOrQueue && DbCompat.getRecordNumber(privKey) <= 0) { + return OperationStatus.NOTFOUND; + } + if (secCursor != null && privPKey != null) { + return secCursor.getSearchBoth(privKey, privPKey, privData, + lockMode); + } else { + return cursor.getSearchBoth(privKey, privData, lockMode); + } + } + + private OperationStatus doGetSearchBothRange(LockMode lockMode) + throws DatabaseException { + + if (isRecnoOrQueue && DbCompat.getRecordNumber(privKey) <= 0) { + return OperationStatus.NOTFOUND; + } + if (secCursor != null && privPKey != null) { + return secCursor.getSearchBothRange(privKey, privPKey, privData, + lockMode); + } else { + return cursor.getSearchBothRange(privKey, privData, lockMode); + } + } + + private OperationStatus doGetSearchRecordNumber(LockMode lockMode) + throws DatabaseException { + + if (DbCompat.getRecordNumber(privKey) <= 0) { + return OperationStatus.NOTFOUND; + } + if (secCursor != null && privPKey != null) { + return DbCompat.getSearchRecordNumber(secCursor, privKey, privPKey, + privData, lockMode); + } else { + return DbCompat.getSearchRecordNumber(cursor, privKey, privData, + lockMode); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredCollection.java b/db/java/src/com/sleepycat/collections/StoredCollection.java new file mode 100644 index 000000000..90f1a2f11 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredCollection.java @@ -0,0 +1,460 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredCollection.java,v 1.4 2004/08/02 18:52:05 mjc Exp $ + */ + +package com.sleepycat.collections; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.JoinConfig; +import com.sleepycat.db.OperationStatus; + +/** + * A abstract base class for all stored collections. This class, and its + * base class {@link StoredContainer}, provide implementations of most methods + * in the {@link Collection} interface. Other methods, such as {@link #add} + * and {@link #remove}, are provided by concrete classes that extend this + * class. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition, this class provides the following methods for stored + * collections only. Note that the use of these methods is not compatible with + * the standard Java collections interface.

    + *
      + *
    • {@link #iterator(boolean)}
    • + *
    • {@link #join}
    • + *
    • {@link #toList()}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class StoredCollection extends StoredContainer + implements Collection { + + StoredCollection(DataView view) { + + super(view); + } + + final boolean add(Object key, Object value) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = + cursor.putNoDupData(key, value, null, false); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns an iterator over the elements in this collection. + * The iterator will be read-only if the collection is read-only. + * This method conforms to the {@link Collection#iterator} interface. + * + * @return a {@link StoredIterator} for this collection. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + * + * @see #isWriteAllowed + */ + public Iterator iterator() { + + return iterator(isWriteAllowed()); + } + + /** + * Returns a read or read-write iterator over the elements in this + * collection. + * This method does not exist in the standard {@link Collection} interface. + * + * @param writeAllowed is true to open a read-write iterator or false to + * open a read-only iterator. If the collection is read-only the iterator + * will always be read-only. + * + * @return a {@link StoredIterator} for this collection. + * + * @throws IllegalStateException if writeAllowed is true but the collection + * is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + * + * @see #isWriteAllowed + */ + public StoredIterator iterator(boolean writeAllowed) { + + try { + return new StoredIterator(this, writeAllowed && isWriteAllowed(), + null); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns an array of all the elements in this collection. + * This method conforms to the {@link Collection#toArray()} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public Object[] toArray() { + + ArrayList list = new ArrayList(); + Iterator i = iterator(); + try { + while (i.hasNext()) { + list.add(i.next()); + } + } finally { + StoredIterator.close(i); + } + return list.toArray(); + } + + /** + * Returns an array of all the elements in this collection whose runtime + * type is that of the specified array. + * This method conforms to the {@link Collection#toArray(Object[])} + * interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public Object[] toArray(Object[] a) { + + int j = 0; + Iterator i = iterator(); + try { + while (j < a.length && i.hasNext()) { + a[j++] = i.next(); + } + if (j < a.length) { + a[j] = null; + } else if (i.hasNext()) { + ArrayList list = new ArrayList(Arrays.asList(a)); + while (i.hasNext()) { + list.add(i.next()); + } + a = list.toArray(a); + } + } finally { + StoredIterator.close(i); + } + return a; + } + + /** + * Returns true if this collection contains all of the elements in the + * specified collection. + * This method conforms to the {@link Collection#containsAll} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean containsAll(Collection coll) { + Iterator i = coll.iterator(); + try { + while (i.hasNext()) { + if (!contains(i.next())) { + return false; + } + } + } finally { + StoredIterator.close(i); + } + return true; + } + + /** + * Adds all of the elements in the specified collection to this collection + * (optional operation). + * This method calls the {@link #add(Object)} method of the concrete + * collection class, which may or may not be supported. + * This method conforms to the {@link Collection#addAll} interface. + * + * @throws UnsupportedOperationException if the collection is read-only, or + * if the collection is indexed, or if the add method is not supported by + * the concrete collection. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean addAll(Collection coll) { + Iterator i = null; + boolean doAutoCommit = beginAutoCommit(); + try { + i = coll.iterator(); + boolean changed = false; + while (i.hasNext()) { + if (add(i.next())) { + changed = true; + } + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + return changed; + } catch (Exception e) { + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Removes all this collection's elements that are also contained in the + * specified collection (optional operation). + * This method conforms to the {@link Collection#removeAll} interface. + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean removeAll(Collection coll) { + + return removeAll(coll, true); + } + + /** + * Retains only the elements in this collection that are contained in the + * specified collection (optional operation). + * This method conforms to the {@link Collection#removeAll} interface. + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean retainAll(Collection coll) { + + return removeAll(coll, false); + } + + private boolean removeAll(Collection coll, boolean ifExistsInColl) { + Iterator i = null; + boolean doAutoCommit = beginAutoCommit(); + try { + boolean changed = false; + i = iterator(); + while (i.hasNext()) { + if (ifExistsInColl == coll.contains(i.next())) { + i.remove(); + changed = true; + } + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + return changed; + } catch (Exception e) { + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Compares the specified object with this collection for equality. + * A value comparison is performed by this method and the stored values + * are compared rather than calling the equals() method of each element. + * This method conforms to the {@link Collection#equals} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean equals(Object other) { + + if (other instanceof Collection) { + Collection otherColl = StoredCollection.copyCollection(other); + Iterator i = iterator(); + try { + while (i.hasNext()) { + if (!otherColl.remove(i.next())) { + return false; + } + } + return otherColl.isEmpty(); + } finally { + StoredIterator.close(i); + } + } else { + return false; + } + } + + /* + * Add this in to keep FindBugs from whining at us about implementing + * equals(), but not hashCode(). + */ + public int hashCode() { + return super.hashCode(); + } + + /** + * Returns a copy of this collection as an ArrayList. This is the same as + * {@link #toArray()} but returns a collection instead of an array. + * + * @return an {@link ArrayList} containing a copy of all elements in this + * collection. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public List toList() { + + ArrayList list = new ArrayList(); + Iterator i = iterator(); + try { + while (i.hasNext()) list.add(i.next()); + return list; + } finally { + StoredIterator.close(i); + } + } + + /** + * Converts the collection to a string representation for debugging. + * WARNING: All elements will be converted to strings and returned and + * therefore the returned string may be very large. + * + * @return the string representation. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("["); + Iterator i = iterator(); + try { + while (i.hasNext()) { + if (buf.length() > 1) buf.append(','); + buf.append(i.next().toString()); + } + buf.append(']'); + return buf.toString(); + } finally { + StoredIterator.close(i); + } + } + + /** + * Returns an iterator representing an equality join of the indices and + * index key values specified. + * This method does not exist in the standard {@link Collection} interface. + * + *

    The returned iterator supports only the two methods: hasNext() and + * next(). All other methods will throw UnsupportedOperationException.

    + * + * @param indices is an array of indices with elements corresponding to + * those in the indexKeys array. + * + * @param indexKeys is an array of index key values identifying the + * elements to be selected. + * + * @param joinConfig is the join configuration, or null to use the + * default configuration. + * + * @return an iterator over the elements in this collection that match + * all specified index key values. + * + * @throws IllegalArgumentException if this collection is indexed or if a + * given index does not have the same store as this collection. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredIterator join(StoredContainer[] indices, Object[] indexKeys, + JoinConfig joinConfig) { + + try { + DataView[] indexViews = new DataView[indices.length]; + for (int i = 0; i < indices.length; i += 1) { + indexViews[i] = indices[i].view; + } + DataCursor cursor = view.join(indexViews, indexKeys, joinConfig); + return new StoredIterator(this, false, cursor); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + final Object getFirstOrLast(boolean doGetFirst) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status; + if (doGetFirst) { + status = cursor.getFirst(false); + } else { + status = cursor.getLast(false); + } + return (status == OperationStatus.SUCCESS) ? + makeIteratorData(null, cursor) : null; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + abstract Object makeIteratorData(StoredIterator iterator, + DataCursor cursor) + throws DatabaseException; + + abstract boolean hasValues(); + + boolean iterateDuplicates() { + + return true; + } + + void checkIterAddAllowed() + throws UnsupportedOperationException { + + if (!areDuplicatesAllowed()) { + throw new UnsupportedOperationException("duplicates required"); + } + } + + int getIndexOffset() { + + return 0; + } + + private static Collection copyCollection(Object other) { + + if (other instanceof StoredCollection) { + return ((StoredCollection) other).toList(); + } else { + return new ArrayList((Collection) other); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredCollections.java b/db/java/src/com/sleepycat/collections/StoredCollections.java new file mode 100644 index 000000000..c9a4ab9fc --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredCollections.java @@ -0,0 +1,156 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredCollections.java,v 1.1 2004/04/09 16:34:09 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; + +/** + * This class consists exclusively of static methods that operate on or return + * stored collections. It contains methods for changing certain properties of a + * collection. Because collection properties are immutable, these methods + * always return a new collection reference. This allows stored collections to + * be used safely by multiple threads. Note that creating the new collection + * reference is not expensive and creates only two new objects. + * + *

    When a collection is created with a particular property, all collections + * and iterators derived from that collection will inherit the property. For + * example, if a dirty-read Map is created then calls to subMap(), values(), + * entrySet(), and keySet() will create dirty-read collections also.

    + * + *

    Dirty-Read Methods names beginning with dirtyRead create a new + * dirty-read container from a given stored container. When dirty-read is + * enabled, data will be read that has been modified by another transaction but + * not committed. Using dirty-read can improve concurrency since reading will + * not wait for other transactions to complete. For a non-transactional + * container (when {@link StoredContainer#isTransactional} returns false), + * dirty-read has no effect. If {@link StoredContainer#isDirtyReadAllowed} + * returns false, dirty-read also has no effect. If dirty-ready is enabled + * (and allowed) for a container, {@link StoredContainer#isDirtyRead} will + * return true. Dirty-read is disabled by default for a container.

    + */ +public class StoredCollections { + + private StoredCollections() {} + + /** + * Creates a dirty-read collection from a given stored collection. + * + * @param storedCollection the base collection. + * + * @return the dirty-read collection. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static Collection dirtyReadCollection(Collection storedCollection) { + + return (Collection) + ((StoredContainer) storedCollection).dirtyReadClone(); + } + + /** + * Creates a dirty-read list from a given stored list. + * + * @param storedList the base list. + * + * @return the dirty-read list. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static List dirtyReadList(List storedList) { + + return (List) ((StoredContainer) storedList).dirtyReadClone(); + } + + /** + * Creates a dirty-read map from a given stored map. + * + * @param storedMap the base map. + * + * @return the dirty-read map. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static Map dirtyReadMap(Map storedMap) { + + return (Map) ((StoredContainer) storedMap).dirtyReadClone(); + } + + /** + * Creates a dirty-read set from a given stored set. + * + * @param storedSet the base set. + * + * @return the dirty-read set. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static Set dirtyReadSet(Set storedSet) { + + return (Set) ((StoredContainer) storedSet).dirtyReadClone(); + } + + /** + * Creates a dirty-read sorted map from a given stored sorted map. + * + * @param storedSortedMap the base map. + * + * @return the dirty-read map. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static SortedMap dirtyReadSortedMap(SortedMap storedSortedMap) { + + return (SortedMap) + ((StoredContainer) storedSortedMap).dirtyReadClone(); + } + + /** + * Creates a dirty-read sorted set from a given stored sorted set. + * + * @param storedSortedSet the base set. + * + * @return the dirty-read set. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static SortedSet dirtyReadSortedSet(SortedSet storedSortedSet) { + + return (SortedSet) + ((StoredContainer) storedSortedSet).dirtyReadClone(); + } + + /** + * Clones a stored iterator preserving its current position. + * + * @param storedIterator an iterator to clone. + * + * @return a new {@link StoredIterator} having the same position as the + * given iterator. + * + * @throws ClassCastException if the given iterator is not a + * StoredIterator. + */ + public static Iterator iterator(Iterator storedIterator) { + + return (Iterator) ((StoredIterator) storedIterator).clone(); + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredContainer.java b/db/java/src/com/sleepycat/collections/StoredContainer.java new file mode 100644 index 000000000..f503a5dc8 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredContainer.java @@ -0,0 +1,415 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredContainer.java,v 1.2 2004/06/02 20:59:38 mark Exp $ + */ + +package com.sleepycat.collections; + +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A abstract base class for all stored collections and maps. This class + * provides implementations of methods that are common to the {@link + * java.util.Collection} and the {@link java.util.Map} interfaces, namely + * {@link #clear}, {@link #isEmpty} and {@link #size}. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition, this class provides the following methods for stored + * collections only. Note that the use of these methods is not compatible with + * the standard Java collections interface.

    + *
      + *
    • {@link #isWriteAllowed()}
    • + *
    • {@link #isSecondary()}
    • + *
    • {@link #isOrdered()}
    • + *
    • {@link #areDuplicatesAllowed()}
    • + *
    • {@link #areDuplicatesOrdered()}
    • + *
    • {@link #areKeysRenumbered()}
    • + *
    • {@link #isDirtyReadAllowed()}
    • + *
    • {@link #isDirtyRead()}
    • + *
    • {@link #isTransactional()}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class StoredContainer implements Cloneable { + + DataView view; + + StoredContainer(DataView view) { + + this.view = view; + } + + /** + * Returns true if this is a read-write container or false if this is a + * read-only container. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether write is allowed. + */ + public final boolean isWriteAllowed() { + + return view.writeAllowed; + } + + /** + * Returns whether dirty-read is allowed for this container. + * For the JE product, dirty-read is always allowed; for the DB product, + * dirty-read is allowed if it was configured for the underlying database + * for this container. + * Even when dirty-read is allowed it must specifically be enabled by + * calling one of the {@link StoredCollections} methods. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether dirty-read is allowed. + */ + public final boolean isDirtyReadAllowed() { + + return view.dirtyReadAllowed; + } + + /** + * Returns whether dirty-read is enabled for this container. + * If dirty-read is enabled, data will be read that is modified but not + * committed. + * Dirty-read is disabled by default. + * This method always returns false if {@link #isDirtyReadAllowed} returns + * false. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether dirty-read is enabled. + */ + public final boolean isDirtyRead() { + + return view.dirtyReadEnabled; + } + + /** + * Returns whether the databases underlying this container are + * transactional. + * Even in a transactional environment, a database will be transactional + * only if it was opened within a transaction or if the auto-commit option + * was specified when it was opened. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether the database is transactional. + */ + public final boolean isTransactional() { + + return view.transactional; + } + + /** + * Clones and enables dirty-read in the clone. + */ + final StoredContainer dirtyReadClone() { + + if (!isDirtyReadAllowed()) + return this; + try { + StoredContainer cont = (StoredContainer) clone(); + cont.view = cont.view.dirtyReadView(true); + return cont; + } catch (CloneNotSupportedException willNeverOccur) { return null; } + } + + /** + * Returns whether duplicate keys are allowed in this container. + * Duplicates are optionally allowed for HASH and BTREE databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether duplicates are allowed. + */ + public final boolean areDuplicatesAllowed() { + + return view.dupsAllowed; + } + + /** + * Returns whether duplicate keys are allowed and sorted by element value. + * Duplicates are optionally sorted for HASH and BTREE databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether duplicates are ordered. + */ + public final boolean areDuplicatesOrdered() { + + return view.dupsOrdered; + } + + /** + * Returns whether keys are renumbered when insertions and deletions occur. + * Keys are optionally renumbered for RECNO databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether keys are renumbered. + */ + public final boolean areKeysRenumbered() { + + return view.keysRenumbered; + } + + /** + * Returns whether keys are ordered in this container. + * Keys are ordered for BTREE, RECNO and QUEUE database. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether keys are ordered. + */ + public final boolean isOrdered() { + + return view.ordered; + } + + /** + * Returns whether this container is a view on a secondary database rather + * than directly on a primary database. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether the view is for a secondary database. + */ + public final boolean isSecondary() { + + return view.isSecondary(); + } + + /** + * Always throws UnsupportedOperationException. The size of a database + * cannot be obtained reliably or inexpensively. + * This method therefore violates the {@link java.util.Collection#size} and + * {@link java.util.Map#size} interfaces. + * + * @return always throws an exception. + * + * @throws UnsupportedOperationException unconditionally. + */ + public int size() { + + throw new UnsupportedOperationException( + "collection size not available"); + } + + /** + * Returns true if this map or collection contains no mappings or elements. + * This method conforms to the {@link java.util.Collection#isEmpty} and + * {@link java.util.Map#isEmpty} interfaces. + * + * @return whether the container is empty. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public boolean isEmpty() { + + try { + return view.isEmpty(); + } catch (Exception e) { + throw convertException(e); + } + } + + /** + * Removes all mappings or elements from this map or collection (optional + * operation). + * This method conforms to the {@link java.util.Collection#clear} and + * {@link java.util.Map#clear} interfaces. + * + * @throws UnsupportedOperationException if the container is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public void clear() { + + boolean doAutoCommit = beginAutoCommit(); + try { + view.clear(); + commitAutoCommit(doAutoCommit); + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + Object get(Object key) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + if (OperationStatus.SUCCESS == + cursor.getSearchKey(key, null, false)) { + return cursor.getCurrentValue(); + } else { + return null; + } + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + Object put(final Object key, final Object value) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + Object[] oldValue = new Object[1]; + cursor.put(key, value, oldValue, false); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return oldValue[0]; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + final boolean removeKey(final Object key, final Object[] oldVal) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + boolean found = false; + OperationStatus status = cursor.getSearchKey(key, null, true); + while (status == OperationStatus.SUCCESS) { + cursor.delete(); + found = true; + if (oldVal != null && oldVal[0] == null) { + oldVal[0] = cursor.getCurrentValue(); + } + status = cursor.getNextDup(true); + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return found; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + boolean containsKey(Object key) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + return OperationStatus.SUCCESS == + cursor.getSearchKey(key, null, false); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + final boolean removeValue(Object value) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = cursor.find(value, true); + if (status == OperationStatus.SUCCESS) { + cursor.delete(); + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + boolean containsValue(Object value) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status = cursor.find(value, true); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + final void closeCursor(DataCursor cursor) { + + if (cursor != null) { + try { + cursor.close(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + } + + final boolean beginAutoCommit() { + + if (view.transactional) { + try { + CurrentTransaction currentTxn = view.getCurrentTxn(); + if (currentTxn.getTransaction() == null) { + currentTxn.beginTransaction(null); + return true; + } + } catch (DatabaseException e) { + throw new RuntimeExceptionWrapper(e); + } + } + return false; + } + + final void commitAutoCommit(boolean doAutoCommit) + throws DatabaseException { + + if (doAutoCommit) view.getCurrentTxn().commitTransaction(); + } + + final RuntimeException handleException(Exception e, boolean doAutoCommit) { + + if (doAutoCommit) { + try { + view.getCurrentTxn().abortTransaction(); + } catch (DatabaseException ignored) { + } + } + return StoredContainer.convertException(e); + } + + static RuntimeException convertException(Exception e) { + + if (e instanceof RuntimeException) { + return (RuntimeException) e; + } else { + return new RuntimeExceptionWrapper(e); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredEntrySet.java b/db/java/src/com/sleepycat/collections/StoredEntrySet.java new file mode 100644 index 000000000..fbc5f85d2 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredEntrySet.java @@ -0,0 +1,176 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredEntrySet.java,v 1.3 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.OperationStatus; + +/** + * The Set returned by Map.entrySet(). This class may not be instantiated + * directly. Contrary to what is stated by {@link Map#entrySet} this class + * does support the {@link #add} and {@link #addAll} methods. + * + *

    The {@link java.util.Map.Entry#setValue} method of the Map.Entry objects + * that are returned by this class and its iterators behaves just as the {@link + * StoredIterator#set} method does.

    + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + * @author Mark Hayes + */ +public class StoredEntrySet extends StoredCollection implements Set { + + StoredEntrySet(DataView mapView) { + + super(mapView); + } + + /** + * Adds the specified element to this set if it is not already present + * (optional operation). + * This method conforms to the {@link Set#add} interface. + * + * @param mapEntry must be a {@link java.util.Map.Entry} instance. + * + * @return true if the key-value pair was added to the set (and was not + * previously present). + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws ClassCastException if the mapEntry is not a {@link + * java.util.Map.Entry} instance. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public boolean add(Object mapEntry) { + + Map.Entry entry = (Map.Entry) mapEntry; // allow ClassCastException + return add(entry.getKey(), entry.getValue()); + } + + /** + * Removes the specified element from this set if it is present (optional + * operation). + * This method conforms to the {@link Set#remove} interface. + * + * @param mapEntry is a {@link java.util.Map.Entry} instance to be removed. + * + * @return true if the key-value pair was removed from the set, or false if + * the mapEntry is not a {@link java.util.Map.Entry} instance or is not + * present in the set. + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public boolean remove(Object mapEntry) { + + if (!(mapEntry instanceof Map.Entry)) { + return false; + } + Map.Entry entry = (Map.Entry) mapEntry; + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = + cursor.getSearchBoth(entry.getKey(), entry.getValue(), true); + if (status == OperationStatus.SUCCESS) { + cursor.delete(); + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns true if this set contains the specified element. + * This method conforms to the {@link Set#contains} interface. + * + * @param mapEntry is a {@link java.util.Map.Entry} instance to be checked. + * + * @return true if the key-value pair is present in the set, or false if + * the mapEntry is not a {@link java.util.Map.Entry} instance or is not + * present in the set. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public boolean contains(Object mapEntry) { + + if (!(mapEntry instanceof Map.Entry)) { + return false; + } + Map.Entry entry = (Map.Entry) mapEntry; + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status = + cursor.getSearchBoth(entry.getKey(), entry.getValue(), false); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + // javadoc is inherited + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("["); + Iterator i = iterator(); + try { + while (i.hasNext()) { + Map.Entry entry = (Map.Entry) i.next(); + if (buf.length() > 1) buf.append(','); + Object key = entry.getKey(); + Object val = entry.getValue(); + if (key != null) buf.append(key.toString()); + buf.append('='); + if (val != null) buf.append(val.toString()); + } + buf.append(']'); + return buf.toString(); + } + finally { + StoredIterator.close(i); + } + } + + Object makeIteratorData(StoredIterator iterator, DataCursor cursor) + throws DatabaseException { + + return new StoredMapEntry(cursor.getCurrentKey(), + cursor.getCurrentValue(), + this, iterator); + } + + boolean hasValues() { + + return true; + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredIterator.java b/db/java/src/com/sleepycat/collections/StoredIterator.java new file mode 100644 index 000000000..cfc7c67a4 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredIterator.java @@ -0,0 +1,600 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredIterator.java,v 1.5 2004/09/22 18:01:03 bostic Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Iterator; +import java.util.ListIterator; +import java.util.NoSuchElementException; + +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.OperationStatus; + +/** + * The Iterator returned by all stored collections. + * + *

    While in general this class conforms to the {@link Iterator} interface, + * it is important to note that all iterators for stored collections must be + * explicitly closed with {@link #close()}. The static method {@link + * #close(java.util.Iterator)} allows calling close for all iterators without + * harm to iterators that are not from stored collections, and also avoids + * casting. If a stored iterator is not closed, unpredictable behavior + * including process death may result.

    + * + *

    This class implements the {@link Iterator} interface for all stored + * iterators. It also implements {@link ListIterator} because some list + * iterator methods apply to all stored iterators, for example, {@link + * #previous} and {@link #hasPrevious}. Other list iterator methods are always + * supported for lists, but for other types of collections are only supported + * under certain conditions. See {@link #nextIndex}, {@link #previousIndex}, + * {@link #add} and {@link #set} for details.

    + * + *

    In addition, this class provides the following methods for stored + * collection iterators only. Note that the use of these methods is not + * compatible with the standard Java collections interface.

    + *
      + *
    • {@link #close()}
    • + *
    • {@link #close(Iterator)}
    • + *
    • {@link #getCollection}
    • + *
    • {@link #setReadModifyWrite}
    • + *
    • {@link #isReadModifyWrite}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredIterator implements ListIterator, Cloneable { + + /** + * Closes the given iterator using {@link #close()} if it is a {@link + * StoredIterator}. If the given iterator is not a {@link StoredIterator}, + * this method does nothing. + * + * @param i is the iterator to close. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public static void close(Iterator i) { + + if (i instanceof StoredIterator) { + ((StoredIterator) i).close(); + } + } + + private static final int MOVE_NEXT = 1; + private static final int MOVE_PREV = 2; + private static final int MOVE_FIRST = 3; + + private boolean lockForWrite; + private StoredCollection coll; + private DataCursor cursor; + private int toNext; + private int toPrevious; + private int toCurrent; + private boolean writeAllowed; + private boolean setAndRemoveAllowed; + private Object currentData; + private final boolean recNumAccess; + + StoredIterator(StoredCollection coll, boolean writeAllowed, + DataCursor joinCursor) { + try { + this.coll = coll; + this.writeAllowed = writeAllowed; + if (joinCursor == null) + this.cursor = new DataCursor(coll.view, writeAllowed); + else + this.cursor = joinCursor; + this.recNumAccess = cursor.hasRecNumAccess(); + reset(); + } catch (Exception e) { + try { + /* Ensure that the cursor is closed. [#10516] */ + close(); + } catch (Exception ignored) {} + throw StoredContainer.convertException(e); + } + } + + /** + * Clones this iterator preserving its current position. + * + * @return a new {@link StoredIterator} having the same position as this + * iterator. + */ + protected Object clone() { + + try { + StoredIterator o = (StoredIterator) super.clone(); + o.cursor = cursor.cloneCursor(); + return o; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns whether write-locks will be obtained when reading with this + * cursor. + * Obtaining write-locks can prevent deadlocks when reading and then + * modifying data. + * + * @return the write-lock setting. + */ + public final boolean isReadModifyWrite() { + + return lockForWrite; + } + + /** + * Changes whether write-locks will be obtained when reading with this + * cursor. + * Obtaining write-locks can prevent deadlocks when reading and then + * modifying data. + * + * @param lockForWrite the write-lock setting. + */ + public void setReadModifyWrite(boolean lockForWrite) { + + this.lockForWrite = lockForWrite; + } + + // --- begin Iterator/ListIterator methods --- + + /** + * Returns true if this iterator has more elements when traversing in the + * forward direction. False is returned if the iterator has been closed. + * This method conforms to the {@link Iterator#hasNext} interface. + * + * @return whether {@link #next()} will succeed. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public boolean hasNext() { + + if (cursor == null) { + return false; + } + try { + if (toNext != 0) { + OperationStatus status = move(toNext); + if (status == OperationStatus.SUCCESS) { + toNext = 0; + toPrevious = MOVE_PREV; + toCurrent = MOVE_PREV; + } + } + return (toNext == 0); + } + catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns true if this iterator has more elements when traversing in the + * reverse direction. It returns false if the iterator has been closed. + * This method conforms to the {@link ListIterator#hasPrevious} interface. + * + * @return whether {@link #previous()} will succeed. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown. + */ + public boolean hasPrevious() { + + if (cursor == null) { + return false; + } + try { + if (toPrevious != 0) { + OperationStatus status = move(toPrevious); + if (status == OperationStatus.SUCCESS) { + toPrevious = 0; + toNext = MOVE_NEXT; + toCurrent = MOVE_NEXT; + } + } + return (toPrevious == 0); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns the next element in the iteration. + * This method conforms to the {@link Iterator#next} interface. + * + * @return the next element. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public Object next() { + + try { + if (toNext != 0) { + OperationStatus status = move(toNext); + if (status == OperationStatus.SUCCESS) { + toNext = 0; + } + } + if (toNext == 0) { + currentData = coll.makeIteratorData(this, cursor); + toNext = MOVE_NEXT; + toPrevious = 0; + toCurrent = 0; + setAndRemoveAllowed = true; + return currentData; + } + // else throw NoSuchElementException below + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + throw new NoSuchElementException(); + } + + /** + * Returns the next element in the iteration. + * This method conforms to the {@link ListIterator#previous} interface. + * + * @return the previous element. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public Object previous() { + + try { + if (toPrevious != 0) { + OperationStatus status = move(toPrevious); + if (status == OperationStatus.SUCCESS) { + toPrevious = 0; + } + } + if (toPrevious == 0) { + currentData = coll.makeIteratorData(this, cursor); + toPrevious = MOVE_PREV; + toNext = 0; + toCurrent = 0; + setAndRemoveAllowed = true; + return currentData; + } + // else throw NoSuchElementException below + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + throw new NoSuchElementException(); + } + + /** + * Returns the index of the element that would be returned by a subsequent + * call to next. + * This method conforms to the {@link ListIterator#nextIndex} interface + * except that it returns Integer.MAX_VALUE for stored lists when + * positioned at the end of the list, rather than returning the list size + * as specified by the ListIterator interface. This is because the database + * size is not available. + * + * @return the next index. + * + * @throws UnsupportedOperationException if this iterator's collection does + * not use record number keys. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public int nextIndex() { + + if (!recNumAccess) { + throw new UnsupportedOperationException( + "Record number access not supported"); + } + try { + return hasNext() ? (cursor.getCurrentRecordNumber() - + coll.getIndexOffset()) + : Integer.MAX_VALUE; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns the index of the element that would be returned by a subsequent + * call to previous. + * This method conforms to the {@link ListIterator#previousIndex} + * interface. + * + * @return the previous index. + * + * @throws UnsupportedOperationException if this iterator's collection does + * not use record number keys. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public int previousIndex() { + + if (!recNumAccess) { + throw new UnsupportedOperationException( + "Record number access not supported"); + } + try { + return hasPrevious() ? (cursor.getCurrentRecordNumber() - + coll.getIndexOffset()) + : -1; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Replaces the last element returned by next or previous with the + * specified element (optional operation). + * This method conforms to the {@link ListIterator#set} interface. + * + * @param value the new value. + * + * @throws UnsupportedOperationException if the collection is a {@link + * StoredKeySet} (the set returned by {@link java.util.Map#keySet}), or if + * duplicates are sorted since this would change the iterator position, or + * if the collection is indexed, or if the collection is read-only. + * + * @throws IllegalArgumentException if an entity value binding is used and + * the primary key of the value given is different than the existing stored + * primary key. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public void set(Object value) { + + if (!coll.hasValues()) throw new UnsupportedOperationException(); + if (!setAndRemoveAllowed) throw new IllegalStateException(); + try { + moveToCurrent(); + cursor.putCurrent(value); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Removes the last element that was returned by next or previous (optional + * operation). + * This method conforms to the {@link ListIterator#remove} interface except + * that when the collection is a list and the RECNO-RENUMBER access method + * is not used, list indices will not be renumbered. + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public void remove() { + + if (!setAndRemoveAllowed) throw new IllegalStateException(); + try { + moveToCurrent(); + cursor.delete(); + setAndRemoveAllowed = false; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Inserts the specified element into the list or inserts a duplicate into + * other types of collections (optional operation). + * This method conforms to the {@link ListIterator#add} interface when + * the collection is a list and the RECNO-RENUMBER access method is used. + * Otherwise, this method may only be called when duplicates are allowed. + * If duplicates are unsorted, the new value will be inserted in the same + * manner as list elements. + * If duplicates are sorted, the new value will be inserted in sort order. + * + * @param value the new value. + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the collection is a list and the RECNO-RENUMBER access method was not + * used, or if the collection is not a list and duplicates are not allowed. + * + * @throws IllegalStateException if the collection is empty and is not a + * list with RECNO-RENUMBER access. + * + * @throws IllegalArgumentException if a duplicate value is being added + * that already exists and duplicates are sorted. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public void add(Object value) { + + coll.checkIterAddAllowed(); + try { + OperationStatus status = OperationStatus.SUCCESS; + if (toNext != 0 && toPrevious != 0) { // database is empty + if (coll.view.keysRenumbered) { // recno-renumber database + /* + * Close cursor during append and then reopen to support + * CDB restriction that append may not be called with a + * cursor open; note the append will still fail if the + * application has another cursor open. + */ + close(); + status = coll.view.append(value, null, null); + cursor = new DataCursor(coll.view, writeAllowed); + reset(); + next(); // move past new record + } else { // hash/btree with duplicates + throw new IllegalStateException( + "Collection is empty, cannot add() duplicate"); + } + } else { // database is not empty + boolean putBefore = false; + if (coll.view.keysRenumbered) { // recno-renumber database + moveToCurrent(); + if (hasNext()) { + status = cursor.putBefore(value); + putBefore = true; + } else { + status = cursor.putAfter(value); + } + } else { // hash/btree with duplicates + if (coll.areDuplicatesOrdered()) { + status = cursor.putNoDupData(null, value, null, true); + } else if (toNext == 0) { + status = cursor.putBefore(value); + putBefore = true; + } else { + status = cursor.putAfter(value); + } + } + if (putBefore) { + toPrevious = 0; + toNext = MOVE_NEXT; + } + } + if (status == OperationStatus.KEYEXIST) { + throw new IllegalArgumentException("Duplicate value"); + } else if (status != OperationStatus.SUCCESS) { + throw new IllegalArgumentException("Could not insert: " + + status); + } + setAndRemoveAllowed = false; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + // --- end Iterator/ListIterator methods --- + + /** + * Resets cursor to an uninitialized state. + */ + private void reset() { + + toNext = MOVE_FIRST; + toPrevious = MOVE_PREV; + toCurrent = 0; + currentData = null; + /* + * Initialize cursor at beginning to avoid "initial previous == last" + * behavior when cursor is uninitialized. + * + * FindBugs whines about us ignoring the return value from hasNext(). + */ + hasNext(); + } + + /** + * Returns the number of elements having the same key value as the key + * value of the element last returned by next() or previous(). If no + * duplicates are allowed, 1 is always returned. + * + * @return the number of duplicates. + * + * @throws IllegalStateException if next() or previous() has not been + * called for this iterator, or if remove() or add() were called after + * the last call to next() or previous(). + */ + public int count() { + + if (!setAndRemoveAllowed) throw new IllegalStateException(); + try { + moveToCurrent(); + return cursor.count(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Closes this iterator. + * This method does not exist in the standard {@link Iterator} or {@link + * ListIterator} interfaces. + * + *

    After being closed, only the {@link #hasNext} and {@link + * #hasPrevious} methods may be called and these will return false. {@link + * #close()} may also be called again and will do nothing. If other + * methods are called a NullPointerException will generally be + * thrown.

    + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public void close() { + + if (cursor != null) { + coll.closeCursor(cursor); + cursor = null; + } + } + + /** + * Returns the collection associated with this iterator. + * This method does not exist in the standard {@link Iterator} or {@link + * ListIterator} interfaces. + * + * @return the collection associated with this iterator. + */ + public final StoredCollection getCollection() { + + return coll; + } + + final boolean isCurrentData(Object currentData) { + + return (this.currentData == currentData); + } + + final boolean moveToIndex(int index) { + + try { + OperationStatus status = + cursor.getSearchKey(new Integer(index), null, lockForWrite); + setAndRemoveAllowed = (status == OperationStatus.SUCCESS); + return setAndRemoveAllowed; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + private void moveToCurrent() + throws DatabaseException { + + if (toCurrent != 0) { + move(toCurrent); + toCurrent = 0; + } + } + + private OperationStatus move(int direction) + throws DatabaseException { + + switch (direction) { + case MOVE_NEXT: + if (coll.iterateDuplicates()) { + return cursor.getNext(lockForWrite); + } else { + return cursor.getNextNoDup(lockForWrite); + } + case MOVE_PREV: + if (coll.iterateDuplicates()) { + return cursor.getPrev(lockForWrite); + } else { + return cursor.getPrevNoDup(lockForWrite); + } + case MOVE_FIRST: + return cursor.getFirst(lockForWrite); + default: + throw new IllegalArgumentException(String.valueOf(direction)); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredKeySet.java b/db/java/src/com/sleepycat/collections/StoredKeySet.java new file mode 100644 index 000000000..1c86aa003 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredKeySet.java @@ -0,0 +1,144 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredKeySet.java,v 1.3 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Set; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.OperationStatus; + +/** + * The Set returned by Map.keySet() and which can also be constructed directly + * if a Map is not needed. + * Since this collection is a set it only contains one element for each key, + * even when duplicates are allowed. Key set iterators are therefore + * particularly useful for enumerating the unique keys of a store or index that + * allows duplicates. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + * @author Mark Hayes + */ +public class StoredKeySet extends StoredCollection implements Set { + + /** + * Creates a key set view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredKeySet(Database database, EntryBinding keyBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, null, + writeAllowed, null)); + } + + StoredKeySet(DataView keySetView) { + + super(keySetView); + } + + /** + * Adds the specified key to this set if it is not already present + * (optional operation). + * When a key is added the value in the underlying data store will be + * empty. + * This method conforms to the {@link Set#add} interface. + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean add(Object key) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = cursor.putNoOverwrite(key, null, false); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Removes the specified key from this set if it is present (optional + * operation). + * If duplicates are allowed, this method removes all duplicates for the + * given key. + * This method conforms to the {@link Set#remove} interface. + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean remove(Object key) { + + return removeKey(key, null); + } + + /** + * Returns true if this set contains the specified key. + * This method conforms to the {@link Set#contains} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean contains(Object key) { + + return containsKey(key); + } + + boolean hasValues() { + + return false; + } + + Object makeIteratorData(StoredIterator iterator, DataCursor cursor) + throws DatabaseException { + + return cursor.getCurrentKey(); + } + + boolean iterateDuplicates() { + + return false; + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredList.java b/db/java/src/com/sleepycat/collections/StoredList.java new file mode 100644 index 000000000..a65e5a734 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredList.java @@ -0,0 +1,604 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredList.java,v 1.4 2004/08/02 18:52:05 mjc Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.OperationStatus; + +/** + * A List view of a {@link Database}. + * + *

    For all stored lists the keys of the underlying Database + * must have record number format, and therefore the store or index must be a + * RECNO, RECNO-RENUMBER, QUEUE, or BTREE-RECNUM database. Only RECNO-RENUMBER + * allows true list behavior where record numbers are renumbered following the + * position of an element that is added or removed. For the other access + * methods (RECNO, QUEUE, and BTREE-RECNUM), stored Lists are most useful as + * read-only collections where record numbers are not required to be + * sequential.

    + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition to the standard List methods, this class provides the + * following methods for stored lists only. Note that the use of these methods + * is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #append(Object)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredList extends StoredCollection implements List { + + private static final EntryBinding DEFAULT_KEY_BINDING = + new IndexKeyBinding(1); + + private int baseIndex = 1; + private boolean isSubList; + + /** + * Creates a list view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredList(Database database, EntryBinding valueBinding, + boolean writeAllowed) { + + super(new DataView(database, DEFAULT_KEY_BINDING, valueBinding, null, + writeAllowed, null)); + } + + /** + * Creates a list entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredList(Database database, EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, DEFAULT_KEY_BINDING, null, + valueEntityBinding, writeAllowed, null)); + } + + /** + * Creates a list view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created list. + * + * @param database is the Database underlying the new collection. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param keyAssigner is used by the {@link #add} and {@link #append} + * methods to assign primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredList(Database database, EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, DEFAULT_KEY_BINDING, valueBinding, + null, true, keyAssigner)); + } + + /** + * Creates a list entity view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created list. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param keyAssigner is used by the {@link #add} and {@link #append} + * methods to assign primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredList(Database database, EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, DEFAULT_KEY_BINDING, null, + valueEntityBinding, true, keyAssigner)); + } + + private StoredList(DataView view, int baseIndex) { + + super(view); + this.baseIndex = baseIndex; + this.isSubList = true; + } + + /** + * Inserts the specified element at the specified position in this list + * (optional operation). + * This method conforms to the {@link List#add(int, Object)} interface. + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the RECNO-RENUMBER access method was not used. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public void add(int index, Object value) { + + checkIterAddAllowed(); + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = + cursor.getSearchKey(new Long(index), null, false); + if (status == OperationStatus.SUCCESS) { + cursor.putBefore(value); + closeCursor(cursor); + } else { + closeCursor(cursor); + cursor = null; + view.append(value, null, null); + } + commitAutoCommit(doAutoCommit); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Appends the specified element to the end of this list (optional + * operation). + * This method conforms to the {@link List#add(Object)} interface. + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the RECNO-RENUMBER access method was not used. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean add(Object value) { + + checkIterAddAllowed(); + boolean doAutoCommit = beginAutoCommit(); + try { + view.append(value, null, null); + commitAutoCommit(doAutoCommit); + return true; + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + /** + * Appends a given value returning the newly assigned index. + * If a {@link com.sleepycat.collections.PrimaryKeyAssigner} is associated + * with Store for this list, it will be used to assigned the returned + * index. Otherwise the Store must be a QUEUE or RECNO database and the + * next available record number is assigned as the index. This method does + * not exist in the standard {@link List} interface. + * + * @param value the value to be appended. + * + * @return the assigned index. + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only, or if the Store has no {@link + * com.sleepycat.collections.PrimaryKeyAssigner} and is not a QUEUE or + * RECNO database. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public int append(Object value) { + + boolean doAutoCommit = beginAutoCommit(); + try { + Object[] key = new Object[1]; + view.append(value, key, null); + commitAutoCommit(doAutoCommit); + return ((Number) key[0]).intValue(); + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + void checkIterAddAllowed() + throws UnsupportedOperationException { + + if (isSubList) { + throw new UnsupportedOperationException("cannot add to subList"); + } + if (!view.keysRenumbered) { // RECNO-RENUM + throw new UnsupportedOperationException( + "requires renumbered keys"); + } + } + + /** + * Inserts all of the elements in the specified collection into this list + * at the specified position (optional operation). + * This method conforms to the {@link List#addAll(int, Collection)} + * interface. + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the RECNO-RENUMBER access method was not used. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean addAll(int index, Collection coll) { + + checkIterAddAllowed(); + DataCursor cursor = null; + Iterator i = null; + boolean doAutoCommit = beginAutoCommit(); + try { + i = coll.iterator(); + if (!i.hasNext()) { + return false; + } + cursor = new DataCursor(view, true); + OperationStatus status = + cursor.getSearchKey(new Long(index), null, false); + if (status == OperationStatus.SUCCESS) { + while (i.hasNext()) { + cursor.putBefore(i.next()); + } + closeCursor(cursor); + } else { + closeCursor(cursor); + cursor = null; + while (i.hasNext()) { + view.append(i.next(), null, null); + } + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + return true; + } catch (Exception e) { + closeCursor(cursor); + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns true if this list contains the specified element. + * This method conforms to the {@link List#contains} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean contains(Object value) { + + return containsValue(value); + } + + /** + * Returns the element at the specified position in this list. + * This method conforms to the {@link List#get} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public Object get(int index) { + + return super.get(new Long(index)); + } + + /** + * Returns the index in this list of the first occurrence of the specified + * element, or -1 if this list does not contain this element. + * This method conforms to the {@link List#indexOf} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public int indexOf(Object value) { + + return indexOf(value, true); + } + + /** + * Returns the index in this list of the last occurrence of the specified + * element, or -1 if this list does not contain this element. + * This method conforms to the {@link List#lastIndexOf} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public int lastIndexOf(Object value) { + + return indexOf(value, false); + } + + private int indexOf(Object value, boolean findFirst) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status = cursor.find(value, findFirst); + return (status == OperationStatus.SUCCESS) + ? (cursor.getCurrentRecordNumber() - baseIndex) + : (-1); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + int getIndexOffset() { + + return baseIndex; + } + + /** + * Returns a list iterator of the elements in this list (in proper + * sequence). + * The iterator will be read-only if the collection is read-only. + * This method conforms to the {@link List#listIterator()} interface. + * + * @return a {@link StoredIterator} for this collection. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + * + * @see #isWriteAllowed + */ + public ListIterator listIterator() { + + return iterator(isWriteAllowed()); + } + + /** + * Returns a list iterator of the elements in this list (in proper + * sequence), starting at the specified position in this list. + * The iterator will be read-only if the collection is read-only. + * This method conforms to the {@link List#listIterator(int)} interface. + * + * @return a {@link StoredIterator} for this collection. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + * + * @see #isWriteAllowed + */ + public ListIterator listIterator(int index) { + + StoredIterator i = iterator(isWriteAllowed()); + if (i.moveToIndex(index)) { + return i; + } else { + i.close(); + throw new IndexOutOfBoundsException(String.valueOf(index)); + } + } + + /** + * Removes the element at the specified position in this list (optional + * operation). + * This method conforms to the {@link List#remove(int)} interface. + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public Object remove(int index) { + + try { + Object[] oldVal = new Object[1]; + removeKey(new Long(index), oldVal); + return oldVal[0]; + } catch (IllegalArgumentException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } + } + + /** + * Removes the first occurrence in this list of the specified element + * (optional operation). + * This method conforms to the {@link List#remove(Object)} interface. + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean remove(Object value) { + + return removeValue(value); + } + + /** + * Replaces the element at the specified position in this list with the + * specified element (optional operation). + * This method conforms to the {@link List#set} interface. + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only. + * + * @throws IllegalArgumentException if an entity value binding is used and + * the primary key of the value given is different than the existing stored + * primary key. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public Object set(int index, Object value) { + + try { + return put(new Long(index), value); + } catch (IllegalArgumentException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } + } + + /** + * Returns a view of the portion of this list between the specified + * fromIndex, inclusive, and toIndex, exclusive. + * Note that add() and remove() may not be called for the returned sublist. + * This method conforms to the {@link List#subList} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public List subList(int fromIndex, int toIndex) { + + if (fromIndex < 0 || fromIndex > toIndex) { + throw new IndexOutOfBoundsException(String.valueOf(fromIndex)); + } + try { + int newBaseIndex = baseIndex + fromIndex; + return new StoredList( + view.subView(new Long(fromIndex), true, + new Long(toIndex), false, + new IndexKeyBinding(newBaseIndex)), + newBaseIndex); + } catch (KeyRangeException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Compares the specified object with this list for equality. + * A value comparison is performed by this method and the stored values + * are compared rather than calling the equals() method of each element. + * This method conforms to the {@link List#equals} interface. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean equals(Object other) { + + if (!(other instanceof List)) return false; + List otherList = (List) other; + ListIterator i1 = null; + ListIterator i2 = null; + try { + i1 = listIterator(); + i2 = otherList.listIterator(); + while (i1.hasNext()) { + if (!i2.hasNext()) return false; + if (i1.nextIndex() != i2.nextIndex()) return false; + Object o1 = i1.next(); + Object o2 = i2.next(); + if (o1 == null) { + if (o2 != null) return false; + } else { + if (!o1.equals(o2)) return false; + } + } + if (i2.hasNext()) return false; + return true; + } finally { + StoredIterator.close(i1); + StoredIterator.close(i2); + } + } + + /* + * Add this in to keep FindBugs from whining at us about implementing + * equals(), but not hashCode(). + */ + public int hashCode() { + return super.hashCode(); + } + + Object makeIteratorData(StoredIterator iterator, DataCursor cursor) + throws DatabaseException { + + return cursor.getCurrentValue(); + } + + boolean hasValues() { + + return true; + } + + private static class IndexKeyBinding extends RecordNumberBinding { + + private int baseIndex; + + private IndexKeyBinding(int baseIndex) { + + this.baseIndex = baseIndex; + } + + public Object entryToObject(DatabaseEntry data) { + + return new Long(entryToRecordNumber(data) - baseIndex); + } + + public void objectToEntry(Object object, DatabaseEntry data) { + + recordNumberToEntry(((Number) object).intValue() + baseIndex, + data); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredMap.java b/db/java/src/com/sleepycat/collections/StoredMap.java new file mode 100644 index 000000000..1c0e1dcf1 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredMap.java @@ -0,0 +1,511 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredMap.java,v 1.4 2004/09/22 18:01:03 bostic Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.Database; + +/** + * A Map view of a {@link Database}. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition to the standard Map methods, this class provides the + * following methods for stored maps only. Note that the use of these methods + * is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #duplicates(Object)}
    • + *
    • {@link #append(Object)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredMap extends StoredContainer implements Map { + + private StoredKeySet keySet; + private boolean keySetInitialized = false; + private StoredEntrySet entrySet; + private boolean entrySetInitialized = false; + private StoredValueSet valueSet; + private boolean valueSetInitialized = false; + + /** + * Creates a map view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredMap(Database database, EntryBinding keyBinding, + EntryBinding valueBinding, boolean writeAllowed) { + + super(new DataView(database, keyBinding, valueBinding, null, + writeAllowed, null)); + } + + /** + * Creates a map view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredMap(Database database, EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, valueBinding, null, + true, keyAssigner)); + } + + protected Object clone() + throws CloneNotSupportedException { + + // cached collections must be cleared and recreated with the new view + // of the map to inherit the new view's properties + StoredMap other = (StoredMap) super.clone(); + other.keySet = null; + other.keySetInitialized = false; + other.entrySet = null; + other.entrySetInitialized = false; + other.valueSet = null; + other.valueSetInitialized = false; + return other; + } + + /** + * Creates a map entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredMap(Database database, EntryBinding keyBinding, + EntityBinding valueEntityBinding, boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + writeAllowed, null)); + } + + /** + * Creates a map entity view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredMap(Database database, EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + true, keyAssigner)); + } + + StoredMap(DataView view) { + + super(view); + } + + /** + * Returns the value to which this map maps the specified key. If + * duplicates are allowed, this method returns the first duplicate, in the + * order in which duplicates are configured, that maps to the specified + * key. + * + * This method conforms to the {@link Map#get} interface. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object get(Object key) { + + return super.get(key); + } + + /** + * Associates the specified value with the specified key in this map + * (optional operation). If duplicates are allowed and the specified key + * is already mapped to a value, this method appends the new duplicate + * after the existing duplicates. This method conforms to the {@link + * Map#put} interface. + * + *

    The key parameter may be null if an entity binding is used and the + * key will be derived from the value (entity) parameter. If an entity + * binding is used and the key parameter is non-null, then the key + * parameter must be equal to the key derived from the value parameter.

    + * + * @return the previous value associated with specified key, or null if + * there was no mapping for the key or if duplicates are allowed. + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only. + * + * @throws IllegalArgumentException if an entity value binding is used and + * the primary key of the value given is different than the existing stored + * primary key. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object put(Object key, Object value) { + + return super.put(key, value); + } + + /** + * Appends a given value returning the newly assigned key. If a {@link + * PrimaryKeyAssigner} is associated with Store for this map, it will be + * used to assigned the returned key. Otherwise the Store must be a QUEUE + * or RECNO database and the next available record number is assigned as + * the key. This method does not exist in the standard {@link Map} + * interface. + * + * @param value the value to be appended. + * + * @return the assigned key. + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only, or if the Store has no {@link + * PrimaryKeyAssigner} and is not a QUEUE or RECNO database. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object append(Object value) { + + boolean doAutoCommit = beginAutoCommit(); + try { + Object[] key = new Object[1]; + view.append(value, key, null); + commitAutoCommit(doAutoCommit); + return key[0]; + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + /** + * Removes the mapping for this key from this map if present (optional + * operation). If duplicates are allowed, this method removes all + * duplicates for the given key. This method conforms to the {@link + * Map#remove} interface. + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object remove(Object key) { + + Object[] oldVal = new Object[1]; + removeKey(key, oldVal); + return oldVal[0]; + } + + /** + * Returns true if this map contains the specified key. This method + * conforms to the {@link Map#containsKey} interface. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public boolean containsKey(Object key) { + + return super.containsKey(key); + } + + /** + * Returns true if this map contains the specified value. When an entity + * binding is used, this method returns whether the map contains the + * primary key and value mapping of the entity. This method conforms to + * the {@link Map#containsValue} interface. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public boolean containsValue(Object value) { + + return super.containsValue(value); + } + + /** + * Copies all of the mappings from the specified map to this map (optional + * operation). When duplicates are allowed, the mappings in the specified + * map are effectively appended to the existing mappings in this map, that + * is no previously existing mappings in this map are replaced. This + * method conforms to the {@link Map#putAll} interface. + * + * @throws UnsupportedOperationException if the collection is read-only, or + * if the collection is indexed. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public void putAll(Map map) { + + boolean doAutoCommit = beginAutoCommit(); + Iterator entries = null; + try { + entries = map.entrySet().iterator(); + while (entries.hasNext()) { + Map.Entry entry = (Map.Entry) entries.next(); + put(entry.getKey(), entry.getValue()); + } + StoredIterator.close(entries); + commitAutoCommit(doAutoCommit); + } catch (Exception e) { + StoredIterator.close(entries); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns a set view of the keys contained in this map. A {@link + * java.util.SortedSet} is returned if the map is ordered. The returned + * collection will be read-only if the map is read-only. This method + * conforms to the {@link Map#keySet()} interface. + * + * @return a {@link StoredKeySet} or a {@link StoredSortedKeySet} for this + * map. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + * + * @see #isOrdered + * @see #isWriteAllowed + */ + public Set keySet() { + + if (!keySetInitialized) { + synchronized (this) { + if (!keySetInitialized) { + DataView newView = view.keySetView(); + if (isOrdered()) { + keySet = new StoredSortedKeySet(newView); + } else { + keySet = new StoredKeySet(newView); + } + keySetInitialized = true; + } + } + } + return keySet; + } + + /** + * Returns a set view of the mappings contained in this map. A {@link + * java.util.SortedSet} is returned if the map is ordered. The returned + * collection will be read-only if the map is read-only. This method + * conforms to the {@link Map#entrySet()} interface. + * + * @return a {@link StoredEntrySet} or a {@link StoredSortedEntrySet} for + * this map. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + * + * @see #isOrdered + * @see #isWriteAllowed + */ + public Set entrySet() { + + if (!entrySetInitialized) { + synchronized (this) { + if (!entrySetInitialized) { + if (isOrdered()) { + entrySet = new StoredSortedEntrySet(view); + } else { + entrySet = new StoredEntrySet(view); + } + entrySetInitialized = true; + } + } + } + return entrySet; + } + + /** + * Returns a collection view of the values contained in this map. A {@link + * java.util.SortedSet} is returned if the map is ordered and the + * value/entity binding can be used to derive the map's key from its + * value/entity object. The returned collection will be read-only if the + * map is read-only. This method conforms to the {@link Map#entrySet()} + * interface. + * + * @return a {@link StoredValueSet} or a {@link StoredSortedValueSet} for + * this map. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + * + * @see #isOrdered + * @see #isWriteAllowed + */ + public Collection values() { + + if (!valueSetInitialized) { + synchronized (this) { + if (!valueSetInitialized) { + DataView newView = view.valueSetView(); + if (isOrdered() && newView.canDeriveKeyFromValue()) { + valueSet = new StoredSortedValueSet(newView); + } else { + valueSet = new StoredValueSet(newView); + } + valueSetInitialized = true; + } + } + } + return valueSet; + } + + /** + * Returns a new collection containing the values mapped to the given key + * in this map. This collection's iterator() method is particularly useful + * for iterating over the duplicates for a given key, since this is not + * supported by the standard Map interface. This method does not exist in + * the standard {@link Map} interface. + * + *

    If no mapping for the given key is present, an empty collection is + * returned. If duplicates are not allowed, at most a single value will be + * in the collection returned. If duplicates are allowed, the returned + * collection's add() method may be used to add values for the given + * key.

    + * + * @param key is the key for which values are to be returned. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Collection duplicates(Object key) { + + try { + DataView newView = view.valueSetView(key); + return new StoredValueSet(newView, true); + } catch (KeyRangeException e) { + return Collections.EMPTY_SET; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Compares the specified object with this map for equality. A value + * comparison is performed by this method and the stored values are + * compared rather than calling the equals() method of each element. This + * method conforms to the {@link Map#equals} interface. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public boolean equals(Object other) { + + if (other instanceof Map) { + return entrySet().equals(((Map) other).entrySet()); + } else { + return false; + } + } + + /* + * Add this in to keep FindBugs from whining at us about implementing + * equals(), but not hashCode(). + */ + public int hashCode() { + return super.hashCode(); + } + + /** + * Converts the map to a string representation for debugging. WARNING: All + * mappings will be converted to strings and returned and therefore the + * returned string may be very large. + * + * @return the string representation. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public String toString() { + + return entrySet().toString(); + } +} + diff --git a/db/java/src/com/sleepycat/collections/StoredMapEntry.java b/db/java/src/com/sleepycat/collections/StoredMapEntry.java new file mode 100644 index 000000000..020e878e4 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredMapEntry.java @@ -0,0 +1,41 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredMapEntry.java,v 1.1 2004/04/09 16:34:09 mark Exp $ + */ + +package com.sleepycat.collections; + +/** + * @author Mark Hayes + */ +final class StoredMapEntry extends MapEntryParameter { + + private StoredIterator iter; + private StoredCollection coll; + + StoredMapEntry(Object key, Object value, StoredCollection coll, + StoredIterator iter) { + + super(key, value); + // Assert: coll, coll.keyBinding/valueBinding + this.coll = coll; + this.iter = iter; + } + + public Object setValue(Object newValue) { + + Object oldValue; + if (iter != null && iter.isCurrentData(this)) { + oldValue = getValue(); + iter.set(newValue); + } else { + oldValue = coll.put(getKey(), newValue); + } + setValueInternal(newValue); + return oldValue; + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredSortedEntrySet.java b/db/java/src/com/sleepycat/collections/StoredSortedEntrySet.java new file mode 100644 index 000000000..99fceab9b --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredSortedEntrySet.java @@ -0,0 +1,220 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredSortedEntrySet.java,v 1.2 2004/06/02 20:59:39 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.Map; +import java.util.SortedSet; + +/** + * The SortedSet returned by Map.entrySet(). This class may not be + * instantiated directly. Contrary to what is stated by {@link Map#entrySet} + * this class does support the {@link #add} and {@link #addAll} methods. + * + *

    The {@link java.util.Map.Entry#setValue} method of the Map.Entry objects + * that are returned by this class and its iterators behaves just as the {@link + * StoredIterator#set} method does.

    + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition to the standard SortedSet methods, this class provides the + * following methods for stored sorted sets only. Note that the use of these + * methods is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #headSet(Object, boolean)}
    • + *
    • {@link #tailSet(Object, boolean)}
    • + *
    • {@link #subSet(Object, boolean, Object, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedEntrySet extends StoredEntrySet implements SortedSet { + + StoredSortedEntrySet(DataView mapView) { + + super(mapView); + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedSet#comparator} + * interface. + * + * @return null. + */ + public Comparator comparator() { + + return null; + } + + /** + * Returns the first (lowest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#first} interface. + * + * @return the first element. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object first() { + + return getFirstOrLast(true); + } + + /** + * Returns the last (highest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#last} interface. + * + * @return the last element. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object last() { + + return getFirstOrLast(false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toMapEntry. + * This method conforms to the {@link SortedSet#headSet} interface. + * + * @param toMapEntry the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet headSet(Object toMapEntry) { + + return subSet(null, false, toMapEntry, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toMapEntry, optionally including toMapEntry. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param toMapEntry is the upper bound. + * + * @param toInclusive is true to include toMapEntry. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet headSet(Object toMapEntry, boolean toInclusive) { + + return subSet(null, false, toMapEntry, toInclusive); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * greater than or equal to fromMapEntry. + * This method conforms to the {@link SortedSet#tailSet} interface. + * + * @param fromMapEntry is the lower bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet tailSet(Object fromMapEntry) { + + return subSet(fromMapEntry, true, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromMapEntry, optionally including fromMapEntry. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param fromMapEntry is the lower bound. + * + * @param fromInclusive is true to include fromMapEntry. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet tailSet(Object fromMapEntry, boolean fromInclusive) { + + return subSet(fromMapEntry, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements range + * from fromMapEntry, inclusive, to toMapEntry, exclusive. + * This method conforms to the {@link SortedSet#subSet} interface. + * + * @param fromMapEntry is the lower bound. + * + * @param toMapEntry is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet subSet(Object fromMapEntry, Object toMapEntry) { + + return subSet(fromMapEntry, true, toMapEntry, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromMapEntry and strictly less than toMapEntry, + * optionally including fromMapEntry and toMapEntry. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param fromMapEntry is the lower bound. + * + * @param fromInclusive is true to include fromMapEntry. + * + * @param toMapEntry is the upper bound. + * + * @param toInclusive is true to include toMapEntry. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet subSet(Object fromMapEntry, boolean fromInclusive, + Object toMapEntry, boolean toInclusive) { + + Object fromKey = (fromMapEntry != null) ? + ((Map.Entry) fromMapEntry).getKey() : null; + Object toKey = (toMapEntry != null) ? + ((Map.Entry) toMapEntry).getKey() : null; + try { + return new StoredSortedEntrySet( + view.subView(fromKey, fromInclusive, toKey, toInclusive, null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredSortedKeySet.java b/db/java/src/com/sleepycat/collections/StoredSortedKeySet.java new file mode 100644 index 000000000..44b12d55a --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredSortedKeySet.java @@ -0,0 +1,241 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredSortedKeySet.java,v 1.2 2004/06/02 20:59:39 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.SortedSet; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.Database; + +/** + * The SortedSet returned by Map.keySet() and which can also be constructed + * directly if a Map is not needed. + * Since this collection is a set it only contains one element for each key, + * even when duplicates are allowed. Key set iterators are therefore + * particularly useful for enumerating the unique keys of a store or index that + * allows duplicates. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition to the standard SortedSet methods, this class provides the + * following methods for stored sorted sets only. Note that the use of these + * methods is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #headSet(Object, boolean)}
    • + *
    • {@link #tailSet(Object, boolean)}
    • + *
    • {@link #subSet(Object, boolean, Object, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedKeySet extends StoredKeySet implements SortedSet { + + /** + * Creates a sorted key set view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredSortedKeySet(Database database, EntryBinding keyBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, null, + writeAllowed, null)); + } + + StoredSortedKeySet(DataView keySetView) { + + super(keySetView); + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedSet#comparator} + * interface. + * + * @return null. + */ + public Comparator comparator() { + + return null; + } + + /** + * Returns the first (lowest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#first} interface. + * + * @return the first element. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object first() { + + return getFirstOrLast(true); + } + + /** + * Returns the last (highest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#last} interface. + * + * @return the last element. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object last() { + + return getFirstOrLast(false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toKey. + * This method conforms to the {@link SortedSet#headSet} interface. + * + * @param toKey is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet headSet(Object toKey) { + + return subSet(null, false, toKey, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toKey, optionally including toKey. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet headSet(Object toKey, boolean toInclusive) { + + return subSet(null, false, toKey, toInclusive); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * greater than or equal to fromKey. + * This method conforms to the {@link SortedSet#tailSet} interface. + * + * @param fromKey is the lower bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet tailSet(Object fromKey) { + + return subSet(fromKey, true, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromKey, optionally including fromKey. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet tailSet(Object fromKey, boolean fromInclusive) { + + return subSet(fromKey, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements range + * from fromKey, inclusive, to toKey, exclusive. + * This method conforms to the {@link SortedSet#subSet} interface. + * + * @param fromKey is the lower bound. + * + * @param toKey is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet subSet(Object fromKey, Object toKey) { + + return subSet(fromKey, true, toKey, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromKey and strictly less than toKey, + * optionally including fromKey and toKey. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet subSet(Object fromKey, boolean fromInclusive, + Object toKey, boolean toInclusive) { + + try { + return new StoredSortedKeySet( + view.subView(fromKey, fromInclusive, toKey, toInclusive, null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredSortedMap.java b/db/java/src/com/sleepycat/collections/StoredSortedMap.java new file mode 100644 index 000000000..142e75ff4 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredSortedMap.java @@ -0,0 +1,348 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredSortedMap.java,v 1.3 2004/09/22 18:01:03 bostic Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.SortedMap; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.Database; +import com.sleepycat.db.OperationStatus; + +/** + * A SortedMap view of a {@link Database}. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition to the standard SortedMap methods, this class provides the + * following methods for stored sorted maps only. Note that the use of these + * methods is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #duplicates(Object)}
    • + *
    • {@link #headMap(Object, boolean)}
    • + *
    • {@link #tailMap(Object, boolean)}
    • + *
    • {@link #subMap(Object, boolean, Object, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedMap extends StoredMap implements SortedMap { + + /** + * Creates a sorted map view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredSortedMap(Database database, EntryBinding keyBinding, + EntryBinding valueBinding, boolean writeAllowed) { + + super(new DataView(database, keyBinding, valueBinding, null, + writeAllowed, null)); + } + + /** + * Creates a sorted map view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredSortedMap(Database database, EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, valueBinding, null, + true, keyAssigner)); + } + + /** + * Creates a sorted map entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredSortedMap(Database database, EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + writeAllowed, null)); + } + + /** + * Creates a sorted map entity view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredSortedMap(Database database, EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + true, keyAssigner)); + } + + StoredSortedMap(DataView mapView) { + + super(mapView); + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedMap#comparator} + * interface. + * + * @return null. + */ + public Comparator comparator() { + + return null; + } + + /** + * Returns the first (lowest) key currently in this sorted map. + * This method conforms to the {@link SortedMap#firstKey} interface. + * + * @return the first key. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object firstKey() { + + return getFirstOrLastKey(true); + } + + /** + * Returns the last (highest) element currently in this sorted map. + * This method conforms to the {@link SortedMap#lastKey} interface. + * + * @return the last key. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object lastKey() { + + return getFirstOrLastKey(false); + } + + private Object getFirstOrLastKey(boolean doGetFirst) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status; + if (doGetFirst) { + status = cursor.getFirst(false); + } else { + status = cursor.getLast(false); + } + return (status == OperationStatus.SUCCESS) ? + cursor.getCurrentKey() : null; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + /** + * Returns a view of the portion of this sorted set whose keys are + * strictly less than toKey. + * This method conforms to the {@link SortedMap#headMap} interface. + * + * @param toKey is the upper bound. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedMap headMap(Object toKey) { + + return subMap(null, false, toKey, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * strictly less than toKey, optionally including toKey. + * This method does not exist in the standard {@link SortedMap} interface. + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedMap headMap(Object toKey, boolean toInclusive) { + + return subMap(null, false, toKey, toInclusive); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * greater than or equal to fromKey. + * This method conforms to the {@link SortedMap#tailMap} interface. + * + * @param fromKey is the lower bound. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedMap tailMap(Object fromKey) { + + return subMap(fromKey, true, null, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * strictly greater than fromKey, optionally including fromKey. + * This method does not exist in the standard {@link SortedMap} interface. + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedMap tailMap(Object fromKey, boolean fromInclusive) { + + return subMap(fromKey, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements range + * from fromKey, inclusive, to toKey, exclusive. + * This method conforms to the {@link SortedMap#subMap} interface. + * + * @param fromKey is the lower bound. + * + * @param toKey is the upper bound. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedMap subMap(Object fromKey, Object toKey) { + + return subMap(fromKey, true, toKey, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * strictly greater than fromKey and strictly less than toKey, + * optionally including fromKey and toKey. + * This method does not exist in the standard {@link SortedMap} interface. + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedMap subMap(Object fromKey, boolean fromInclusive, + Object toKey, boolean toInclusive) { + + try { + return new StoredSortedMap( + view.subView(fromKey, fromInclusive, toKey, toInclusive, null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredSortedValueSet.java b/db/java/src/com/sleepycat/collections/StoredSortedValueSet.java new file mode 100644 index 000000000..96c534137 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredSortedValueSet.java @@ -0,0 +1,255 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredSortedValueSet.java,v 1.2 2004/06/02 20:59:39 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.SortedSet; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.db.Database; + +/** + * The SortedSet returned by Map.values() and which can also be constructed + * directly if a Map is not needed. + * Although this collection is a set it may contain duplicate values. Only if + * an entity value binding is used are all elements guaranteed to be unique. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + *

    In addition to the standard SortedSet methods, this class provides the + * following methods for stored sorted value sets only. Note that the use of + * these methods is not compatible with the standard Java collections + * interface.

    + *
      + *
    • {@link #headSet(Object, boolean)}
    • + *
    • {@link #tailSet(Object, boolean)}
    • + *
    • {@link #subSet(Object, boolean, Object, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedValueSet extends StoredValueSet implements SortedSet { + + /* + * No valueBinding ctor is possible since key cannot be derived. + */ + + /** + * Creates a sorted value set entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public StoredSortedValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, null, null, valueEntityBinding, + writeAllowed, null)); + checkKeyDerivation(); + } + + StoredSortedValueSet(DataView valueSetView) { + + super(valueSetView); + checkKeyDerivation(); + } + + private void checkKeyDerivation() { + + if (!view.canDeriveKeyFromValue()) { + throw new IllegalArgumentException("Cannot derive key from value"); + } + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedSet#comparator} + * interface. + * + * @return null. + */ + public Comparator comparator() { + + return null; + } + + /** + * Returns the first (lowest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#first} interface. + * + * @return the first element. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object first() { + + return getFirstOrLast(true); + } + + /** + * Returns the last (highest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#last} interface. + * + * @return the last element. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public Object last() { + + return getFirstOrLast(false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toValue. + * This method conforms to the {@link SortedSet#headSet} interface. + * + * @param toValue the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet headSet(Object toValue) { + + return subSet(null, false, toValue, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toValue, optionally including toValue. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param toValue is the upper bound. + * + * @param toInclusive is true to include toValue. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet headSet(Object toValue, boolean toInclusive) { + + return subSet(null, false, toValue, toInclusive); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * greater than or equal to fromValue. + * This method conforms to the {@link SortedSet#tailSet} interface. + * + * @param fromValue is the lower bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet tailSet(Object fromValue) { + + return subSet(fromValue, true, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromValue, optionally including fromValue. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param fromValue is the lower bound. + * + * @param fromInclusive is true to include fromValue. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet tailSet(Object fromValue, boolean fromInclusive) { + + return subSet(fromValue, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements range + * from fromValue, inclusive, to toValue, exclusive. + * This method conforms to the {@link SortedSet#subSet} interface. + * + * @param fromValue is the lower bound. + * + * @param toValue is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet subSet(Object fromValue, Object toValue) { + + return subSet(fromValue, true, toValue, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromValue and strictly less than toValue, + * optionally including fromValue and toValue. + * This method does not exist in the standard {@link SortedSet} interface. + * + * @param fromValue is the lower bound. + * + * @param fromInclusive is true to include fromValue. + * + * @param toValue is the upper bound. + * + * @param toInclusive is true to include toValue. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a {@link + * com.sleepycat.db.DatabaseException} is thrown. + */ + public SortedSet subSet(Object fromValue, boolean fromInclusive, + Object toValue, boolean toInclusive) { + + try { + return new StoredSortedValueSet( + view.subView(fromValue, fromInclusive, toValue, toInclusive, + null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/db/java/src/com/sleepycat/collections/StoredValueSet.java b/db/java/src/com/sleepycat/collections/StoredValueSet.java new file mode 100644 index 000000000..a4d30bc87 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/StoredValueSet.java @@ -0,0 +1,220 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredValueSet.java,v 1.3 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.collections; + +import java.util.Set; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.OperationStatus; + +/** + * The Set returned by Map.values() and Map.duplicates(), and which can also be + * constructed directly if a Map is not needed. + * Although this collection is a set it may contain duplicate values. Only if + * an entity value binding is used are all elements guaranteed to be unique. + * + *

    Note that this class does not conform to the standard Java + * collections interface in the following ways:

    + *
      + *
    • The {@link #size} method always throws + * UnsupportedOperationException because, for performance reasons, + * databases do not maintain their total record count.
    • + *
    • All iterators must be explicitly closed using {@link + * StoredIterator#close()} or {@link StoredIterator#close(java.util.Iterator)} + * to release the underlying database cursor resources.
    • + *
    + * + * @author Mark Hayes + */ +public class StoredValueSet extends StoredCollection implements Set { + + /* + * This class is also used internally for the set returned by duplicates(). + */ + + private boolean isSingleKey; + + /** + * Creates a value set view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredValueSet(Database database, + EntryBinding valueBinding, + boolean writeAllowed) { + + super(new DataView(database, null, valueBinding, null, + writeAllowed, null)); + } + + /** + * Creates a value set entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public StoredValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, null, null, valueEntityBinding, + writeAllowed, null)); + } + + StoredValueSet(DataView valueSetView) { + + super(valueSetView); + } + + StoredValueSet(DataView valueSetView, boolean isSingleKey) { + + super(valueSetView); + this.isSingleKey = isSingleKey; + } + + /** + * Adds the specified entity to this set if it is not already present + * (optional operation). + * This method conforms to the {@link Set#add} interface. + * + * @param entity is the entity to be added. + * + * @return true if the entity was added, that is the key-value pair + * represented by the entity was not previously present in the collection. + * + * @throws UnsupportedOperationException if the collection is read-only, + * if the collection is indexed, or if an entity binding is not used. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean add(Object entity) { + + if (view.isSecondary()) { + throw new UnsupportedOperationException( + "add() not allowed with index"); + } else if (isSingleKey) { + /* entity is actually just a value in this case */ + if (!view.dupsAllowed) { + throw new UnsupportedOperationException("duplicates required"); + } + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + cursor.useRangeKey(); + OperationStatus status = + cursor.putNoDupData(null, entity, null, true); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } else if (view.entityBinding == null) { + throw new UnsupportedOperationException( + "add() requires entity binding"); + } else { + return add(null, entity); + } + } + + /** + * Returns true if this set contains the specified element. + * This method conforms to the {@link java.util.Set#contains} + * interface. + * + * @param value the value to check. + * + * @return whether the set contains the given value. + */ + public boolean contains(Object value) { + + return containsValue(value); + } + + /** + * Removes the specified value from this set if it is present (optional + * operation). + * If an entity binding is used, the key-value pair represented by the + * given entity is removed. If an entity binding is used, the first + * occurrence of a key-value pair with the given value is removed. + * This method conforms to the {@link Set#remove} interface. + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is + * thrown. + */ + public boolean remove(Object value) { + + return removeValue(value); + } + + // javadoc is inherited + public int size() { + + if (!isSingleKey) { + return super.size(); + } + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status = cursor.getFirst(false); + if (status == OperationStatus.SUCCESS) { + return cursor.count(); + } else { + return 0; + } + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + Object makeIteratorData(StoredIterator iterator, DataCursor cursor) + throws DatabaseException { + + return cursor.getCurrentValue(); + } + + boolean hasValues() { + + return true; + } +} diff --git a/db/java/src/com/sleepycat/collections/TransactionRunner.java b/db/java/src/com/sleepycat/collections/TransactionRunner.java new file mode 100644 index 000000000..270d265fb --- /dev/null +++ b/db/java/src/com/sleepycat/collections/TransactionRunner.java @@ -0,0 +1,221 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TransactionRunner.java,v 1.2 2004/09/22 18:01:03 bostic Exp $ + */ + +package com.sleepycat.collections; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DeadlockException; +import com.sleepycat.db.Environment; +import com.sleepycat.db.Transaction; +import com.sleepycat.db.TransactionConfig; +import com.sleepycat.util.ExceptionUnwrapper; + +/** + * Starts a transaction, calls {@link TransactionWorker#doWork}, and handles + * transaction retry and exceptions. + * + * @author Mark Hayes + */ +public class TransactionRunner { + + /** The default maximum number of retries. */ + public static final int DEFAULT_MAX_RETRIES = 10; + + private Environment env; + private CurrentTransaction currentTxn; + private int maxRetries; + private TransactionConfig config; + private boolean allowNestedTxn; + + /** + * Creates a transaction runner for a given Berkeley DB environment. + * The default maximum number of retries ({@link #DEFAULT_MAX_RETRIES}) and + * a null (default) {@link TransactionConfig} will be used. + * + * @param env is the environment for running transactions. + */ + public TransactionRunner(Environment env) { + + this(env, DEFAULT_MAX_RETRIES, null); + } + + /** + * Creates a transaction runner for a given Berkeley DB environment and + * with a given number of maximum retries. + * + * @param env is the environment for running transactions. + * + * @param maxRetries is the maximum number of retries that will be + * performed when deadlocks are detected. + * + * @param config the transaction configuration used for calling + * {@link Environment#beginTransaction}, or null to use the default + * configuration. The configuration object is not cloned, and + * any modifications to it will impact subsequent transactions. + */ + public TransactionRunner(Environment env, int maxRetries, + TransactionConfig config) { + + this.env = env; + this.currentTxn = CurrentTransaction.getInstance(env); + this.maxRetries = maxRetries; + this.config = config; + } + + /** + * Returns the maximum number of retries that will be performed when + * deadlocks are detected. + */ + public int getMaxRetries() { + + return maxRetries; + } + + /** + * Changes the maximum number of retries that will be performed when + * deadlocks are detected. + * Calling this method does not impact transactions already running. + */ + public void setMaxRetries(int maxRetries) { + + this.maxRetries = maxRetries; + } + + /** + * Returns whether nested transactions will be created if + * run() is called when a transaction is already active for + * the current thread. + * By default this property is false. + */ + public boolean getAllowNestedTransactions() { + + return allowNestedTxn; + } + + /** + * Changes whether nested transactions will be created if + * run() is called when a transaction is already active for + * the current thread. + * Calling this method does not impact transactions already running. + */ + public void setAllowNestedTransactions(boolean allowNestedTxn) { + + if (allowNestedTxn && !DbCompat.NESTED_TRANSACTIONS) { + throw new UnsupportedOperationException( + "Nested transactions are not supported."); + } + this.allowNestedTxn = allowNestedTxn; + } + + /** + * Returns the transaction configuration used for calling + * {@link Environment#beginTransaction}. + * + *

    If this property is null, the default configuration is used. The + * configuration object is not cloned, and any modifications to it will + * impact subsequent transactions.

    + * + * @return the transaction configuration. + */ + public TransactionConfig getTransactionConfig() { + + return config; + } + + /** + * Changes the transaction configuration used for calling + * {@link Environment#beginTransaction}. + * + *

    If this property is null, the default configuration is used. The + * configuration object is not cloned, and any modifications to it will + * impact subsequent transactions.

    + * + * @param config the transaction configuration. + */ + public void setTransactionConfig(TransactionConfig config) { + + this.config = config; + } + + /** + * Calls the {@link TransactionWorker#doWork} method and, for transactional + * environments, begins and ends a transaction. If the environment given + * is non-transactional, a transaction will not be used but the doWork() + * method will still be called. + * + *

    In a transactional environment, a new transaction is started before + * calling doWork(). This will start a nested transaction if one is + * already active. If DeadlockException is thrown by doWork(), the + * transaction will be aborted and the process will be repeated up to the + * maximum number of retries specified. If another exception is thrown by + * doWork() or the maximum number of retries has occurred, the transaction + * will be aborted and the exception will be rethrown by this method. If + * no exception is thrown by doWork(), the transaction will be committed. + * This method will not attempt to commit or abort a transaction if it has + * already been committed or aborted by doWork().

    + * + * @throws DeadlockException when it is thrown by doWork() and the + * maximum number of retries has occurred. The transaction will have been + * aborted by this method. + * + * @throws Exception when any other exception is thrown by doWork(). The + * exception will first be unwrapped by calling {@link + * ExceptionUnwrapper#unwrap}. The transaction will have been aborted by + * this method. + */ + public void run(TransactionWorker worker) + throws DatabaseException, Exception { + + if (currentTxn != null && + (allowNestedTxn || currentTxn.getTransaction() == null)) { + /* + * Transactional and (not nested or nested txns allowed). + */ + for (int i = 0;; i += 1) { + Transaction txn = null; + try { + txn = currentTxn.beginTransaction(config); + worker.doWork(); + if (txn != null && txn == currentTxn.getTransaction()) { + currentTxn.commitTransaction(); + } + return; + } catch (Exception e) { + e = ExceptionUnwrapper.unwrap(e); + if (txn != null && txn == currentTxn.getTransaction()) { + try { + currentTxn.abortTransaction(); + } catch (Exception e2) { + /* + * XXX We should really throw a 3rd exception that + * wraps both e and e2, to give the user a complete + * set of error information. + */ + e2.printStackTrace(); + throw e; + } + } + if (i >= maxRetries || !(e instanceof DeadlockException)) { + throw e; + } + } + } + } else { + /* + * Non-transactional or (nested and no nested txns allowed). + */ + try { + worker.doWork(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } + } +} diff --git a/db/java/src/com/sleepycat/collections/TransactionWorker.java b/db/java/src/com/sleepycat/collections/TransactionWorker.java new file mode 100644 index 000000000..eb69c7095 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/TransactionWorker.java @@ -0,0 +1,28 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TransactionWorker.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.collections; + +/** + * The interface implemented to perform the work within a transaction. + * To run a transaction, an instance of this interface is passed to the + * {@link TransactionRunner#run} method. + * + * @author Mark Hayes + */ +public interface TransactionWorker { + + /** + * Perform the work for a single transaction. + * + * @see TransactionRunner#run + */ + void doWork() + throws Exception; +} diff --git a/db/java/src/com/sleepycat/collections/TupleSerialFactory.java b/db/java/src/com/sleepycat/collections/TupleSerialFactory.java new file mode 100644 index 000000000..b8382a1a8 --- /dev/null +++ b/db/java/src/com/sleepycat/collections/TupleSerialFactory.java @@ -0,0 +1,135 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleSerialFactory.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.collections; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialMarshalledBinding; +import com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleMarshalledBinding; +import com.sleepycat.db.Database; + +/** + * Creates stored collections having tuple keys and serialized entity values. + * The entity classes must implement the java.io.Serializable and + * MarshalledTupleKeyEntity interfaces. The key classes must either implement + * the MarshalledTupleEntry interface or be one of the Java primitive type + * classes. Underlying binding objects are created automatically. + * + * @author Mark Hayes + */ +public class TupleSerialFactory { + + private ClassCatalog catalog; + + /** + * Creates a tuple-serial factory for given environment and class catalog. + */ + public TupleSerialFactory(ClassCatalog catalog) { + + this.catalog = catalog; + } + + /** + * Returns the class catalog associated with this factory. + */ + public final ClassCatalog getCatalog() { + + return catalog; + } + + /** + * Creates a map from a previously opened Database object. + * + * @param db the previously opened Database object. + * + * @param keyClass is the class used for map keys. It must implement the + * {@link com.sleepycat.bind.tuple.MarshalledTupleEntry} interface or be + * one of the Java primitive type classes. + * + * @param valueBaseClass the base class of the entity values for this + * store. It must implement the {@link + * com.sleepycat.bind.tuple.MarshalledTupleKeyEntity} interface. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + */ + public StoredMap newMap(Database db, Class keyClass, Class valueBaseClass, + boolean writeAllowed) { + + return new StoredMap(db, + getKeyBinding(keyClass), + getEntityBinding(valueBaseClass), + writeAllowed); + } + + /** + * Creates a sorted map from a previously opened Database object. + * + * @param db the previously opened Database object. + * + * @param keyClass is the class used for map keys. It must implement the + * {@link com.sleepycat.bind.tuple.MarshalledTupleEntry} interface or be + * one of the Java primitive type classes. + * + * @param valueBaseClass the base class of the entity values for this + * store. It must implement the {@link + * com.sleepycat.bind.tuple.MarshalledTupleKeyEntity} interface. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + */ + public StoredSortedMap newSortedMap(Database db, Class keyClass, + Class valueBaseClass, + boolean writeAllowed) { + + return new StoredSortedMap(db, + getKeyBinding(keyClass), + getEntityBinding(valueBaseClass), + writeAllowed); + } + + /** + * Creates a SecondaryKeyCreator object for use in configuring + * a SecondaryDatabase. The returned object implements + * the {@link com.sleepycat.db.SecondaryKeyCreator} interface. + * + * @param valueBaseClass the base class of the entity values for this + * store. It must implement the {@link + * com.sleepycat.bind.tuple.MarshalledTupleKeyEntity} interface. + * + * @param keyName is the key name passed to the {@link + * com.sleepycat.bind.tuple.MarshalledTupleKeyEntity#marshalSecondaryKey} + * method to identify the secondary key. + */ + public TupleSerialMarshalledKeyCreator getKeyCreator(Class valueBaseClass, + String keyName) { + + return new TupleSerialMarshalledKeyCreator( + getEntityBinding(valueBaseClass), + keyName); + } + + private TupleSerialMarshalledBinding getEntityBinding(Class baseClass) { + + return new TupleSerialMarshalledBinding(catalog, baseClass); + } + + private EntryBinding getKeyBinding(Class keyClass) { + + EntryBinding binding = TupleBinding.getPrimitiveBinding(keyClass); + if (binding == null) { + binding = new TupleMarshalledBinding(keyClass); + } + return binding; + } +} + diff --git a/db/java/src/com/sleepycat/collections/package.html b/db/java/src/com/sleepycat/collections/package.html new file mode 100644 index 000000000..865f36b0f --- /dev/null +++ b/db/java/src/com/sleepycat/collections/package.html @@ -0,0 +1,21 @@ + + + + + + +Data access based on the standard Java collections API
    +[reference guide]. +

    +Examples can be found in je/examples/com/sleepycat/examples/collections. Build and run directions are in the installation notes. + + diff --git a/db/java/src/com/sleepycat/compat/DbCompat.java b/db/java/src/com/sleepycat/compat/DbCompat.java new file mode 100644 index 000000000..84e68bace --- /dev/null +++ b/db/java/src/com/sleepycat/compat/DbCompat.java @@ -0,0 +1,255 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DbCompat.java,v 1.5 2004/09/22 18:01:03 bostic Exp $ + */ + +package com.sleepycat.compat; + +import java.io.FileNotFoundException; +import java.util.Comparator; + +import com.sleepycat.db.Cursor; +import com.sleepycat.db.CursorConfig; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.DatabaseType; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.LockDetectMode; +import com.sleepycat.db.LockMode; +import com.sleepycat.db.OperationStatus; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryCursor; +import com.sleepycat.db.SecondaryDatabase; +import com.sleepycat.db.Transaction; + +/** + * A minimal set of DB-JE compatibility methods for internal use only. + * Two versions are maintained in parallel in the DB and JE source trees. + * Used by the collections package. + */ +public class DbCompat { + + /* Capabilities */ + + public static final boolean CDB = true; + public static final boolean JOIN = true; + public static final boolean NESTED_TRANSACTIONS = true; + public static final boolean INSERTION_ORDERED_DUPLICATES = true; + public static final boolean SEPARATE_DATABASE_FILES = true; + public static final boolean MEMORY_SUBSYSTEM = true; + public static final boolean LOCK_SUBSYSTEM = true; + public static final boolean HASH_METHOD = true; + public static final boolean RECNO_METHOD = true; + public static final boolean QUEUE_METHOD = true; + public static final boolean BTREE_RECNUM_METHOD = true; + public static final boolean OPTIONAL_DIRTY_READ = true; + public static final boolean SECONDARIES = true; + + /* Methods used by the collections package. */ + + public static boolean getInitializeLocking(EnvironmentConfig config) { + return config.getInitializeLocking(); + } + + public static boolean getInitializeCDB(EnvironmentConfig config) { + return config.getInitializeCDB(); + } + + public static boolean isTypeBtree(DatabaseConfig dbConfig) { + return dbConfig.getType() == DatabaseType.BTREE; + } + + public static boolean isTypeHash(DatabaseConfig dbConfig) { + return dbConfig.getType() == DatabaseType.HASH; + } + + public static boolean isTypeQueue(DatabaseConfig dbConfig) { + return dbConfig.getType() == DatabaseType.QUEUE; + } + + public static boolean isTypeRecno(DatabaseConfig dbConfig) { + return dbConfig.getType() == DatabaseType.RECNO; + } + + public static boolean getBtreeRecordNumbers(DatabaseConfig dbConfig) { + return dbConfig.getBtreeRecordNumbers(); + } + + public static boolean getDirtyRead(DatabaseConfig dbConfig) { + return dbConfig.getDirtyRead(); + } + + public static boolean getRenumbering(DatabaseConfig dbConfig) { + return dbConfig.getRenumbering(); + } + + public static boolean getSortedDuplicates(DatabaseConfig dbConfig) { + return dbConfig.getSortedDuplicates(); + } + + public static boolean getUnsortedDuplicates(DatabaseConfig dbConfig) { + return dbConfig.getUnsortedDuplicates(); + } + + public static void setWriteCursor(CursorConfig config, boolean val) { + config.setWriteCursor(val); + } + + public static void setRecordNumber(DatabaseEntry entry, int recNum) { + entry.setRecordNumber(recNum); + } + + public static int getRecordNumber(DatabaseEntry entry) { + return entry.getRecordNumber(); + } + + public static String getDatabaseFile(Database db) + throws DatabaseException { + return db.getDatabaseFile(); + } + + public static OperationStatus getCurrentRecordNumber(Cursor cursor, + DatabaseEntry key, + LockMode lockMode) + throws DatabaseException { + return cursor.getRecordNumber(key, lockMode); + } + + public static OperationStatus getSearchRecordNumber(Cursor cursor, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + return cursor.getSearchRecordNumber(key, data, lockMode); + } + + public static OperationStatus getSearchRecordNumber(SecondaryCursor cursor, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + return cursor.getSearchRecordNumber(key, pKey, data, lockMode); + } + + public static OperationStatus putAfter(Cursor cursor, DatabaseEntry data) + throws DatabaseException { + return cursor.putAfter(data); + } + + public static OperationStatus putBefore(Cursor cursor, DatabaseEntry data) + throws DatabaseException { + return cursor.putBefore(data); + } + + public static OperationStatus append(Database db, + Transaction txn, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + return db.append(txn, key, data); + } + + /* Methods used by the collections tests. */ + + public static void setInitializeCache(EnvironmentConfig config, + boolean val) { + config.setInitializeCache(val); + } + + public static void setInitializeLocking(EnvironmentConfig config, + boolean val) { + config.setInitializeLocking(val); + } + + public static void setInitializeCDB(EnvironmentConfig config, + boolean val) { + config.setInitializeCDB(val); + } + + public static void setLockDetectModeOldest(EnvironmentConfig config) { + + config.setLockDetectMode(LockDetectMode.OLDEST); + } + + public static void setBtreeComparator(DatabaseConfig dbConfig, + Comparator comparator) { + dbConfig.setBtreeComparator(comparator); + } + + public static void setTypeBtree(DatabaseConfig dbConfig) { + dbConfig.setType(DatabaseType.BTREE); + } + + public static void setTypeHash(DatabaseConfig dbConfig) { + dbConfig.setType(DatabaseType.HASH); + } + + public static void setTypeRecno(DatabaseConfig dbConfig) { + dbConfig.setType(DatabaseType.RECNO); + } + + public static void setTypeQueue(DatabaseConfig dbConfig) { + dbConfig.setType(DatabaseType.QUEUE); + } + + public static void setBtreeRecordNumbers(DatabaseConfig dbConfig, + boolean val) { + dbConfig.setBtreeRecordNumbers(val); + } + + public static void setDirtyRead(DatabaseConfig dbConfig, + boolean val) { + dbConfig.setDirtyRead(val); + } + + public static void setRenumbering(DatabaseConfig dbConfig, + boolean val) { + dbConfig.setRenumbering(val); + } + + public static void setSortedDuplicates(DatabaseConfig dbConfig, + boolean val) { + dbConfig.setSortedDuplicates(val); + } + + public static void setUnsortedDuplicates(DatabaseConfig dbConfig, + boolean val) { + dbConfig.setUnsortedDuplicates(val); + } + + public static void setRecordLength(DatabaseConfig dbConfig, int val) { + dbConfig.setRecordLength(val); + } + + public static void setRecordPad(DatabaseConfig dbConfig, int val) { + dbConfig.setRecordPad(val); + } + + public static Database openDatabase(Environment env, + Transaction txn, + String file, + String name, + DatabaseConfig config) + throws DatabaseException, FileNotFoundException { + return env.openDatabase(txn, file, name, config); + } + + public static SecondaryDatabase + openSecondaryDatabase(Environment env, + Transaction txn, + String file, + String name, + Database primary, + SecondaryConfig config) + throws DatabaseException, FileNotFoundException { + return env.openSecondaryDatabase(txn, file, name, primary, config); + } +} diff --git a/db/java/src/com/sleepycat/db/BtreePrefixCalculator.java b/db/java/src/com/sleepycat/db/BtreePrefixCalculator.java new file mode 100644 index 000000000..ca86f0a68 --- /dev/null +++ b/db/java/src/com/sleepycat/db/BtreePrefixCalculator.java @@ -0,0 +1,14 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: BtreePrefixCalculator.java,v 1.1 2004/04/06 20:43:36 mjc Exp $ + */ + +package com.sleepycat.db; + +public interface BtreePrefixCalculator { + int prefix(Database db, DatabaseEntry dbt1, DatabaseEntry dbt2); +} diff --git a/db/java/src/com/sleepycat/db/BtreeStats.java b/db/java/src/com/sleepycat/db/BtreeStats.java new file mode 100644 index 000000000..50c8f9d7a --- /dev/null +++ b/db/java/src/com/sleepycat/db/BtreeStats.java @@ -0,0 +1,146 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class BtreeStats extends DatabaseStats { + // no public constructor + protected BtreeStats() {} + + private int bt_magic; + public int getMagic() { + return bt_magic; + } + + private int bt_version; + public int getVersion() { + return bt_version; + } + + private int bt_metaflags; + public int getMetaFlags() { + return bt_metaflags; + } + + private int bt_nkeys; + public int getNumKeys() { + return bt_nkeys; + } + + private int bt_ndata; + public int getNumData() { + return bt_ndata; + } + + private int bt_pagesize; + public int getPageSize() { + return bt_pagesize; + } + + private int bt_maxkey; + public int getMaxKey() { + return bt_maxkey; + } + + private int bt_minkey; + public int getMinKey() { + return bt_minkey; + } + + private int bt_re_len; + public int getReLen() { + return bt_re_len; + } + + private int bt_re_pad; + public int getRePad() { + return bt_re_pad; + } + + private int bt_levels; + public int getLevels() { + return bt_levels; + } + + private int bt_int_pg; + public int getIntPages() { + return bt_int_pg; + } + + private int bt_leaf_pg; + public int getLeafPages() { + return bt_leaf_pg; + } + + private int bt_dup_pg; + public int getDupPages() { + return bt_dup_pg; + } + + private int bt_over_pg; + public int getOverPages() { + return bt_over_pg; + } + + private int bt_empty_pg; + public int getEmptyPages() { + return bt_empty_pg; + } + + private int bt_free; + public int getFree() { + return bt_free; + } + + private int bt_int_pgfree; + public int getIntPagesFree() { + return bt_int_pgfree; + } + + private int bt_leaf_pgfree; + public int getLeafPagesFree() { + return bt_leaf_pgfree; + } + + private int bt_dup_pgfree; + public int getDupPagesFree() { + return bt_dup_pgfree; + } + + private int bt_over_pgfree; + public int getOverPagesFree() { + return bt_over_pgfree; + } + + public String toString() { + return "BtreeStats:" + + "\n bt_magic=" + bt_magic + + "\n bt_version=" + bt_version + + "\n bt_metaflags=" + bt_metaflags + + "\n bt_nkeys=" + bt_nkeys + + "\n bt_ndata=" + bt_ndata + + "\n bt_pagesize=" + bt_pagesize + + "\n bt_maxkey=" + bt_maxkey + + "\n bt_minkey=" + bt_minkey + + "\n bt_re_len=" + bt_re_len + + "\n bt_re_pad=" + bt_re_pad + + "\n bt_levels=" + bt_levels + + "\n bt_int_pg=" + bt_int_pg + + "\n bt_leaf_pg=" + bt_leaf_pg + + "\n bt_dup_pg=" + bt_dup_pg + + "\n bt_over_pg=" + bt_over_pg + + "\n bt_empty_pg=" + bt_empty_pg + + "\n bt_free=" + bt_free + + "\n bt_int_pgfree=" + bt_int_pgfree + + "\n bt_leaf_pgfree=" + bt_leaf_pgfree + + "\n bt_dup_pgfree=" + bt_dup_pgfree + + "\n bt_over_pgfree=" + bt_over_pgfree + ; + } +} diff --git a/db/java/src/com/sleepycat/db/CacheFile.java b/db/java/src/com/sleepycat/db/CacheFile.java new file mode 100644 index 000000000..2fa44a19c --- /dev/null +++ b/db/java/src/com/sleepycat/db/CacheFile.java @@ -0,0 +1,70 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: CacheFile.java,v 1.3 2004/09/23 17:56:39 mjc Exp $ + */ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbMpoolFile; + +public class CacheFile { + private DbMpoolFile mpf; + + /* package */ + CacheFile(final DbMpoolFile mpf) { + this.mpf = mpf; + } + + public CacheFilePriority getPriority() + throws DatabaseException { + + return CacheFilePriority.fromFlag(mpf.get_priority()); + } + + public void setPriority(final CacheFilePriority priority) + throws DatabaseException { + + mpf.set_priority(priority.getFlag()); + } + + public long getMaximumSize() + throws DatabaseException { + + return mpf.get_maxsize(); + } + + public void setMaximumSize(final long bytes) + throws DatabaseException { + + mpf.set_maxsize(bytes); + } + + public boolean getNoFile() + throws DatabaseException { + + return (mpf.get_flags() & DbConstants.DB_MPOOL_NOFILE) != 0; + } + + public void setNoFile(final boolean onoff) + throws DatabaseException { + + mpf.set_flags(DbConstants.DB_MPOOL_NOFILE, onoff); + } + + public boolean getUnlink() + throws DatabaseException { + + return (mpf.get_flags() & DbConstants.DB_MPOOL_UNLINK) != 0; + } + + public void setUnlink(boolean onoff) + throws DatabaseException { + + mpf.set_flags(DbConstants.DB_MPOOL_UNLINK, onoff); + } +} diff --git a/db/java/src/com/sleepycat/db/CacheFilePriority.java b/db/java/src/com/sleepycat/db/CacheFilePriority.java new file mode 100644 index 000000000..89f08c8f2 --- /dev/null +++ b/db/java/src/com/sleepycat/db/CacheFilePriority.java @@ -0,0 +1,61 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: CacheFilePriority.java,v 1.2 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public final class CacheFilePriority { + public static final CacheFilePriority DEFAULT = + new CacheFilePriority("DEFAULT", DbConstants.DB_PRIORITY_DEFAULT); + public static final CacheFilePriority HIGH = + new CacheFilePriority("HIGH", DbConstants.DB_PRIORITY_HIGH); + public static final CacheFilePriority LOW = + new CacheFilePriority("LOW", DbConstants.DB_PRIORITY_LOW); + public static final CacheFilePriority VERY_HIGH = + new CacheFilePriority("VERY_HIGH", DbConstants.DB_PRIORITY_VERY_HIGH); + public static final CacheFilePriority VERY_LOW = + new CacheFilePriority("VERY_LOW", DbConstants.DB_PRIORITY_VERY_LOW); + + /* package */ + static CacheFilePriority fromFlag(int flag) { + switch (flag) { + case DbConstants.DB_PRIORITY_DEFAULT: + return DEFAULT; + case DbConstants.DB_PRIORITY_HIGH: + return HIGH; + case DbConstants.DB_PRIORITY_LOW: + return LOW; + case DbConstants.DB_PRIORITY_VERY_HIGH: + return VERY_HIGH; + case DbConstants.DB_PRIORITY_VERY_LOW: + return VERY_LOW; + default: + throw new IllegalArgumentException( + "Unknown cache priority: " + flag); + } + } + + private final String priorityName; + private final int flag; + + private CacheFilePriority(final String priorityName, final int flag) { + this.priorityName = priorityName; + this.flag = flag; + } + + public String toString() { + return "CacheFilePriority." + priorityName; + } + + /* package */ + int getFlag() { + return flag; + } +} diff --git a/db/java/src/com/sleepycat/db/CacheFileStats.java b/db/java/src/com/sleepycat/db/CacheFileStats.java new file mode 100644 index 000000000..7b864ed4e --- /dev/null +++ b/db/java/src/com/sleepycat/db/CacheFileStats.java @@ -0,0 +1,68 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class CacheFileStats { + // no public constructor + protected CacheFileStats() {} + + private String file_name; + public String getFileName() { + return file_name; + } + + private int st_pagesize; + public int getPageSize() { + return st_pagesize; + } + + private int st_map; + public int getMap() { + return st_map; + } + + private int st_cache_hit; + public int getCacheHit() { + return st_cache_hit; + } + + private int st_cache_miss; + public int getCacheMiss() { + return st_cache_miss; + } + + private int st_page_create; + public int getPageCreate() { + return st_page_create; + } + + private int st_page_in; + public int getPageIn() { + return st_page_in; + } + + private int st_page_out; + public int getPageOut() { + return st_page_out; + } + + public String toString() { + return "CacheFileStats:" + + "\n file_name=" + file_name + + "\n st_pagesize=" + st_pagesize + + "\n st_map=" + st_map + + "\n st_cache_hit=" + st_cache_hit + + "\n st_cache_miss=" + st_cache_miss + + "\n st_page_create=" + st_page_create + + "\n st_page_in=" + st_page_in + + "\n st_page_out=" + st_page_out + ; + } +} diff --git a/db/java/src/com/sleepycat/db/CacheStats.java b/db/java/src/com/sleepycat/db/CacheStats.java new file mode 100644 index 000000000..b9e16c96a --- /dev/null +++ b/db/java/src/com/sleepycat/db/CacheStats.java @@ -0,0 +1,224 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class CacheStats { + // no public constructor + protected CacheStats() {} + + private int st_gbytes; + public int getGbytes() { + return st_gbytes; + } + + private int st_bytes; + public int getBytes() { + return st_bytes; + } + + private int st_ncache; + public int getNumCache() { + return st_ncache; + } + + private int st_regsize; + public int getRegSize() { + return st_regsize; + } + + private int st_mmapsize; + public int getMmapSize() { + return st_mmapsize; + } + + private int st_maxopenfd; + public int getMaxOpenfd() { + return st_maxopenfd; + } + + private int st_maxwrite; + public int getMaxWrite() { + return st_maxwrite; + } + + private int st_maxwrite_sleep; + public int getMaxWriteSleep() { + return st_maxwrite_sleep; + } + + private int st_map; + public int getMap() { + return st_map; + } + + private int st_cache_hit; + public int getCacheHit() { + return st_cache_hit; + } + + private int st_cache_miss; + public int getCacheMiss() { + return st_cache_miss; + } + + private int st_page_create; + public int getPageCreate() { + return st_page_create; + } + + private int st_page_in; + public int getPageIn() { + return st_page_in; + } + + private int st_page_out; + public int getPageOut() { + return st_page_out; + } + + private int st_ro_evict; + public int getRoEvict() { + return st_ro_evict; + } + + private int st_rw_evict; + public int getRwEvict() { + return st_rw_evict; + } + + private int st_page_trickle; + public int getPageTrickle() { + return st_page_trickle; + } + + private int st_pages; + public int getPages() { + return st_pages; + } + + private int st_page_clean; + public int getPageClean() { + return st_page_clean; + } + + private int st_page_dirty; + public int getPageDirty() { + return st_page_dirty; + } + + private int st_hash_buckets; + public int getHashBuckets() { + return st_hash_buckets; + } + + private int st_hash_searches; + public int getHashSearches() { + return st_hash_searches; + } + + private int st_hash_longest; + public int getHashLongest() { + return st_hash_longest; + } + + private int st_hash_examined; + public int getHashExamined() { + return st_hash_examined; + } + + private int st_hash_nowait; + public int getHashNowait() { + return st_hash_nowait; + } + + private int st_hash_wait; + public int getHashWait() { + return st_hash_wait; + } + + private int st_hash_max_wait; + public int getHashMaxWait() { + return st_hash_max_wait; + } + + private int st_region_nowait; + public int getRegionNowait() { + return st_region_nowait; + } + + private int st_region_wait; + public int getRegionWait() { + return st_region_wait; + } + + private int st_alloc; + public int getAlloc() { + return st_alloc; + } + + private int st_alloc_buckets; + public int getAllocBuckets() { + return st_alloc_buckets; + } + + private int st_alloc_max_buckets; + public int getAllocMaxBuckets() { + return st_alloc_max_buckets; + } + + private int st_alloc_pages; + public int getAllocPages() { + return st_alloc_pages; + } + + private int st_alloc_max_pages; + public int getAllocMaxPages() { + return st_alloc_max_pages; + } + + public String toString() { + return "CacheStats:" + + "\n st_gbytes=" + st_gbytes + + "\n st_bytes=" + st_bytes + + "\n st_ncache=" + st_ncache + + "\n st_regsize=" + st_regsize + + "\n st_mmapsize=" + st_mmapsize + + "\n st_maxopenfd=" + st_maxopenfd + + "\n st_maxwrite=" + st_maxwrite + + "\n st_maxwrite_sleep=" + st_maxwrite_sleep + + "\n st_map=" + st_map + + "\n st_cache_hit=" + st_cache_hit + + "\n st_cache_miss=" + st_cache_miss + + "\n st_page_create=" + st_page_create + + "\n st_page_in=" + st_page_in + + "\n st_page_out=" + st_page_out + + "\n st_ro_evict=" + st_ro_evict + + "\n st_rw_evict=" + st_rw_evict + + "\n st_page_trickle=" + st_page_trickle + + "\n st_pages=" + st_pages + + "\n st_page_clean=" + st_page_clean + + "\n st_page_dirty=" + st_page_dirty + + "\n st_hash_buckets=" + st_hash_buckets + + "\n st_hash_searches=" + st_hash_searches + + "\n st_hash_longest=" + st_hash_longest + + "\n st_hash_examined=" + st_hash_examined + + "\n st_hash_nowait=" + st_hash_nowait + + "\n st_hash_wait=" + st_hash_wait + + "\n st_hash_max_wait=" + st_hash_max_wait + + "\n st_region_nowait=" + st_region_nowait + + "\n st_region_wait=" + st_region_wait + + "\n st_alloc=" + st_alloc + + "\n st_alloc_buckets=" + st_alloc_buckets + + "\n st_alloc_max_buckets=" + st_alloc_max_buckets + + "\n st_alloc_pages=" + st_alloc_pages + + "\n st_alloc_max_pages=" + st_alloc_max_pages + ; + } +} diff --git a/db/java/src/com/sleepycat/db/CheckpointConfig.java b/db/java/src/com/sleepycat/db/CheckpointConfig.java new file mode 100644 index 000000000..ab9d6bc83 --- /dev/null +++ b/db/java/src/com/sleepycat/db/CheckpointConfig.java @@ -0,0 +1,60 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: CheckpointConfig.java,v 1.3 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbEnv; +import com.sleepycat.db.internal.DbConstants; + +public class CheckpointConfig { + public static final CheckpointConfig DEFAULT = new CheckpointConfig(); + + private boolean force = false; + private int kBytes = 0; + private int minutes = 0; + + public CheckpointConfig() { + } + + /* package */ + static CheckpointConfig checkNull(CheckpointConfig config) { + return (config == null) ? DEFAULT : config; + } + + public void setKBytes(final int kBytes) { + this.kBytes = kBytes; + } + + public int getKBytes() { + return kBytes; + } + + public void setMinutes(final int minutes) { + this.minutes = minutes; + } + + public int getMinutes() { + return minutes; + } + + public void setForce(final boolean force) { + this.force = force; + } + + public boolean getForce() { + return force; + } + + /* package */ + void runCheckpoint(final DbEnv dbenv) + throws DatabaseException { + + dbenv.txn_checkpoint(kBytes, minutes, force ? DbConstants.DB_FORCE : 0); + } +} diff --git a/db/java/src/com/sleepycat/db/Cursor.java b/db/java/src/com/sleepycat/db/Cursor.java new file mode 100644 index 000000000..6b1e8e7f8 --- /dev/null +++ b/db/java/src/com/sleepycat/db/Cursor.java @@ -0,0 +1,349 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: Cursor.java,v 1.5 2004/06/02 21:28:43 mark Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.Dbc; + +public class Cursor { + /* package */ Dbc dbc; + protected Database database; + protected CursorConfig config; + + protected Cursor() { + } + + Cursor(final Database database, final Dbc dbc, final CursorConfig config) + throws DatabaseException { + + this.dbc = dbc; + this.database = database; + this.config = config; + } + + public synchronized void close() + throws DatabaseException { + + if (dbc != null) { + try { + dbc.close(); + } finally { + dbc = null; + } + } + } + + public Cursor dup(final boolean samePosition) + throws DatabaseException { + + return new Cursor(database, + dbc.dup(samePosition ? DbConstants.DB_POSITION : 0), config); + } + + public CursorConfig getConfig() { + return config; + } + + public Database getDatabase() { + return database; + } + + public int count() + throws DatabaseException { + + return dbc.count(0); + } + + public OperationStatus delete() + throws DatabaseException { + + return OperationStatus.fromInt(dbc.del(0)); + } + + public OperationStatus getCurrent(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_CURRENT | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getFirst(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_FIRST | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getLast(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_LAST | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getNext(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_NEXT | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getNextDup(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_NEXT_DUP | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getNextNoDup(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_NEXT_NODUP | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getPrev(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_PREV | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getPrevDup(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + /* + * "Get the previous duplicate" isn't directly supported by the C API, + * so here's how to get it: dup the cursor and call getPrev, then dup + * the result and call getNextDup. If both succeed then there was a + * previous duplicate and the first dup is sitting on it. Keep that, + * and call getCurrent to fill in the user's buffers. + */ + Dbc dup1 = dbc.dup(DbConstants.DB_POSITION); + try { + int errCode = dup1.get(DatabaseEntry.IGNORE, DatabaseEntry.IGNORE, + DbConstants.DB_PREV | LockMode.getFlag(lockMode)); + if (errCode == 0) { + Dbc dup2 = dup1.dup(DbConstants.DB_POSITION); + try { + errCode = dup2.get(DatabaseEntry.IGNORE, + DatabaseEntry.IGNORE, + DbConstants.DB_NEXT_DUP | LockMode.getFlag(lockMode)); + } finally { + dup2.close(); + } + } + if (errCode == 0) + errCode = dup1.get(key, data, + DbConstants.DB_CURRENT | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag())); + if (errCode == 0) { + Dbc tdbc = dbc; + dbc = dup1; + dup1 = tdbc; + } + return OperationStatus.fromInt(errCode); + } finally { + dup1.close(); + } + } + + public OperationStatus getPrevNoDup(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_PREV_NODUP | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getRecordNumber(final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(DatabaseEntry.IGNORE, data, + DbConstants.DB_GET_RECNO | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchKey(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_SET | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchKeyRange(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_SET_RANGE | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchBoth(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_GET_BOTH | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchBothRange(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, + DbConstants.DB_GET_BOTH_RANGE | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchRecordNumber(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, DbConstants.DB_SET_RECNO | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus put(final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.put(key, data, DbConstants.DB_KEYLAST)); + } + + public OperationStatus putAfter(final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.put(DatabaseEntry.UNUSED, data, DbConstants.DB_AFTER)); + } + + public OperationStatus putBefore(final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.put(DatabaseEntry.UNUSED, data, DbConstants.DB_BEFORE)); + } + + public OperationStatus putNoOverwrite(final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + /* + * The tricks here are making sure the cursor doesn't move on error and + * noticing that if the key exists, that's an error and we don't want + * to return the data. + */ + Dbc tempDbc = dbc.dup(0); + try { + int errCode = tempDbc.get(key, DatabaseEntry.IGNORE, + DbConstants.DB_SET | database.rmwFlag); + if (errCode == 0) + return OperationStatus.KEYEXIST; + else if (errCode != DbConstants.DB_NOTFOUND && + errCode != DbConstants.DB_KEYEMPTY) + return OperationStatus.fromInt(errCode); + else { + Dbc tdbc = dbc; + dbc = tempDbc; + tempDbc = tdbc; + + return OperationStatus.fromInt( + dbc.put(key, data, DbConstants.DB_KEYLAST)); + } + } finally { + tempDbc.close(); + } + } + + public OperationStatus putKeyFirst(final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.put(key, data, DbConstants.DB_KEYFIRST)); + } + + public OperationStatus putKeyLast(final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.put(key, data, DbConstants.DB_KEYLAST)); + } + + public OperationStatus putNoDupData(final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.put(key, data, DbConstants.DB_NODUPDATA)); + } + + public OperationStatus putCurrent(final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.put(DatabaseEntry.UNUSED, data, DbConstants.DB_CURRENT)); + } +} diff --git a/db/java/src/com/sleepycat/db/CursorConfig.java b/db/java/src/com/sleepycat/db/CursorConfig.java new file mode 100644 index 000000000..f674de3c8 --- /dev/null +++ b/db/java/src/com/sleepycat/db/CursorConfig.java @@ -0,0 +1,76 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: CursorConfig.java,v 1.4 2004/09/28 19:30:36 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.Db; +import com.sleepycat.db.internal.Dbc; +import com.sleepycat.db.internal.DbTxn; + +public class CursorConfig implements Cloneable { + public static final CursorConfig DEFAULT = new CursorConfig(); + + public static final CursorConfig DIRTY_READ = new CursorConfig(); + static { DIRTY_READ.setDirtyRead(true); } + + public static final CursorConfig DEGREE_2 = new CursorConfig(); + static { DEGREE_2.setDegree2(true); } + + public static final CursorConfig WRITECURSOR = new CursorConfig(); + static { WRITECURSOR.setWriteCursor(true); } + + + private boolean dirtyRead = false; + private boolean degree2 = false; + private boolean writeCursor = false; + + public CursorConfig() { + } + + /* package */ + static CursorConfig checkNull(CursorConfig config) { + return (config == null) ? DEFAULT : config; + } + + public void setDegree2(final boolean degree2) { + this.degree2 = degree2; + } + + public boolean getDegree2() { + return degree2; + } + + public void setDirtyRead(final boolean dirtyRead) { + this.dirtyRead = dirtyRead; + } + + public boolean getDirtyRead() { + return dirtyRead; + } + + public void setWriteCursor(final boolean writeCursor) { + this.writeCursor = writeCursor; + } + + public boolean getWriteCursor() { + return writeCursor; + } + + /* package */ + Dbc openCursor(final Db db, final DbTxn txn) + throws DatabaseException { + + int flags = 0; + flags |= dirtyRead ? DbConstants.DB_DIRTY_READ : 0; + flags |= degree2 ? DbConstants.DB_DEGREE_2 : 0; + flags |= writeCursor ? DbConstants.DB_WRITECURSOR : 0; + return db.cursor(txn, flags); + } +} diff --git a/db/java/src/com/sleepycat/db/Database.java b/db/java/src/com/sleepycat/db/Database.java new file mode 100644 index 000000000..186e71e21 --- /dev/null +++ b/db/java/src/com/sleepycat/db/Database.java @@ -0,0 +1,314 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: Database.java,v 1.12 2004/09/28 19:30:37 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.Db; +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbSequence; +import com.sleepycat.db.internal.Dbc; + +public class Database { + Db db; + private int autoCommitFlag; + int rmwFlag; + + /* package */ + Database(final Db db) + throws DatabaseException { + + this.db = db; + db.wrapper = this; + this.autoCommitFlag = + db.get_transactional() ? DbConstants.DB_AUTO_COMMIT : 0; + rmwFlag = ((db.get_env().get_open_flags() & + DbConstants.DB_INIT_LOCK) != 0) ? DbConstants.DB_RMW : 0; + } + + public Database(final String filename, + final String databaseName, + final DatabaseConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + this(DatabaseConfig.checkNull(config).openDatabase(null, null, + filename, databaseName)); + // Set up dbenv.wrapper + new Environment(db.get_env()); + } + + public void close(final boolean noSync) + throws DatabaseException { + + db.close(noSync ? DbConstants.DB_NOSYNC : 0); + } + + public void close() + throws DatabaseException { + + close(false); + } + + public Cursor openCursor(final Transaction txn, CursorConfig config) + throws DatabaseException { + + return new Cursor(this, CursorConfig.checkNull(config).openCursor( + db, (txn == null) ? null : txn.txn), config); + } + + public Sequence openSequence(final Transaction txn, + final DatabaseEntry key, + final SequenceConfig config) + throws DatabaseException { + + return new Sequence(SequenceConfig.checkNull(config).openSequence( + db, (txn == null) ? null : txn.txn, key), config); + } + + public void removeSequence(final Transaction txn, + final DatabaseEntry key, + SequenceConfig config) + throws DatabaseException { + + config = SequenceConfig.checkNull(config); + final DbSequence seq = config.openSequence( + db, (txn == null) ? null : txn.txn, key); + seq.remove((txn == null) ? null : txn.txn, + (txn == null && db.get_transactional()) ? + DbConstants.DB_AUTO_COMMIT | (config.getAutoCommitNoSync() ? + DbConstants.DB_TXN_NOSYNC : 0) : 0); + } + + public String getDatabaseFile() + throws DatabaseException { + + return db.get_filename(); + } + + public String getDatabaseName() + throws DatabaseException { + + return db.get_dbname(); + } + + public DatabaseConfig getConfig() + throws DatabaseException { + + return new DatabaseConfig(db); + } + + public void setConfig(DatabaseConfig config) + throws DatabaseException { + + config.configureDatabase(db, getConfig()); + } + + public Environment getEnvironment() + throws DatabaseException { + + return db.get_env().wrapper; + } + + public CacheFile getCacheFile() + throws DatabaseException { + + return new CacheFile(db.get_mpf()); + } + + public OperationStatus append(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + db.put((txn == null) ? null : txn.txn, key, data, + DbConstants.DB_APPEND | ((txn == null) ? autoCommitFlag : 0))); + } + + public OperationStatus consume(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final boolean wait) + throws DatabaseException { + + return OperationStatus.fromInt( + db.get((txn == null) ? null : txn.txn, + key, data, + (wait ? DbConstants.DB_CONSUME_WAIT : DbConstants.DB_CONSUME) | + ((txn == null) ? autoCommitFlag : 0))); + } + + public OperationStatus delete(final Transaction txn, + final DatabaseEntry key) + throws DatabaseException { + + return OperationStatus.fromInt( + db.del((txn == null) ? null : txn.txn, key, + ((txn == null) ? autoCommitFlag : 0))); + } + + public OperationStatus get(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + db.get((txn == null) ? null : txn.txn, + key, data, + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public KeyRange getKeyRange(final Transaction txn, + final DatabaseEntry key) + throws DatabaseException { + + final KeyRange range = new KeyRange(); + db.key_range((txn == null) ? null : txn.txn, key, range, 0); + return range; + } + + public OperationStatus getSearchBoth(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + db.get((txn == null) ? null : txn.txn, + key, data, + DbConstants.DB_GET_BOTH | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchRecordNumber(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + db.get((txn == null) ? null : txn.txn, + key, data, + DbConstants.DB_SET_RECNO | + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus put(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + db.put((txn == null) ? null : txn.txn, + key, data, + ((txn == null) ? autoCommitFlag : 0))); + } + + public OperationStatus putNoDupData(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + db.put((txn == null) ? null : txn.txn, + key, data, + DbConstants.DB_NODUPDATA | + ((txn == null) ? autoCommitFlag : 0))); + } + + public OperationStatus putNoOverwrite(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + db.put((txn == null) ? null : txn.txn, + key, data, + DbConstants.DB_NOOVERWRITE | + ((txn == null) ? autoCommitFlag : 0))); + } + + public JoinCursor join(final Cursor[] cursList, JoinConfig config) + throws DatabaseException { + + config = JoinConfig.checkNull(config); + + final Dbc[] dbcList = new Dbc[cursList.length]; + for (int i = 0; i < cursList.length; i++) + dbcList[i] = (cursList[i] == null) ? null : cursList[i].dbc; + + return new JoinCursor(this, + db.join(dbcList, config.getFlags()), config); + } + + public int truncate(final Transaction txn, boolean countRecords) + throws DatabaseException { + + // XXX: implement countRecords in C + int count = db.truncate((txn == null) ? null : txn.txn, + ((txn == null) ? autoCommitFlag : 0)); + + return countRecords ? count : -1; + } + + public DatabaseStats getStats(final Transaction txn, StatsConfig config) + throws DatabaseException { + + return (DatabaseStats)db.stat((txn == null) ? null : txn.txn, + StatsConfig.checkNull(config).getFlags()); + } + + public static void remove(final String fileName, + final String databaseName, + DatabaseConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + final Db db = DatabaseConfig.checkNull(config).createDatabase(null); + db.remove(fileName, databaseName, 0); + } + + public static void rename(final String fileName, + final String oldDatabaseName, + final String newDatabaseName, + DatabaseConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + final Db db = DatabaseConfig.checkNull(config).createDatabase(null); + db.rename(fileName, oldDatabaseName, newDatabaseName, 0); + } + + public void sync() + throws DatabaseException { + + db.sync(0); + } + + public static void upgrade(final String fileName, + DatabaseConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + final Db db = DatabaseConfig.checkNull(config).createDatabase(null); + db.upgrade(fileName, + config.getSortedDuplicates() ? DbConstants.DB_DUPSORT : 0); + db.close(0); + } + + public boolean verify(final String fileName, + final String databaseName, + final java.io.PrintStream dumpStream, + VerifyConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + return db.verify(fileName, databaseName, dumpStream, + VerifyConfig.checkNull(config).getFlags()); + } +} diff --git a/db/java/src/com/sleepycat/db/DatabaseConfig.java b/db/java/src/com/sleepycat/db/DatabaseConfig.java new file mode 100644 index 000000000..2a66f8410 --- /dev/null +++ b/db/java/src/com/sleepycat/db/DatabaseConfig.java @@ -0,0 +1,628 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: DatabaseConfig.java,v 1.8 2004/07/30 14:52:21 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.Db; +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; +import com.sleepycat.db.internal.DbTxn; +import com.sleepycat.db.internal.DbUtil; + +public class DatabaseConfig implements Cloneable { + /* + * For internal use, final to allow null as a valid value for + * the config parameter. + */ + public static final DatabaseConfig DEFAULT = new DatabaseConfig(); + + /* package */ + static DatabaseConfig checkNull(DatabaseConfig config) { + return (config == null) ? DEFAULT : config; + } + + /* Parameters */ + private DatabaseType type = DatabaseType.UNKNOWN; + private int mode = 0644; + private int btMinKey = 0; + private int byteOrder = 0; + private long cacheSize = 0L; + private int cacheCount = 0; + private java.io.OutputStream errorStream = null; + private String errorPrefix = null; + private int hashFillFactor = 0; + private int hashNumElements = 0; + private java.io.OutputStream messageStream = null; + private int pageSize = 0; + private String password = null; + private int queueExtentSize = 0; + private int recordDelimiter = 0; + private int recordLength = 0; + private int recordPad = -1; // Zero is a valid, non-default value. + private java.io.File recordSource = null; + + /* Flags */ + private boolean allowCreate = false; + private boolean btreeRecordNumbers = false; + private boolean checksum = false; + private boolean dirtyRead = false; + private boolean encrypted = false; + private boolean exclusiveCreate = false; + private boolean noMMap = false; + private boolean queueInOrder = false; + private boolean readOnly = false; + private boolean renumbering = false; + private boolean reverseSplitOff = false; + private boolean sortedDuplicates = false; + private boolean snapshot = false; + private boolean unsortedDuplicates = false; + private boolean transactional = false; + private boolean transactionNotDurable = false; + private boolean truncate = false; + private boolean xaCreate = false; + + private java.util.Comparator btreeComparator = null; + private BtreePrefixCalculator btreePrefixCalculator = null; + private java.util.Comparator duplicateComparator = null; + private FeedbackHandler feedbackHandler = null; + private ErrorHandler errorHandler = null; + private MessageHandler messageHandler = null; + private Hasher hasher = null; + private RecordNumberAppender recnoAppender = null; + private PanicHandler panicHandler = null; + + public DatabaseConfig() { + } + + public void setAllowCreate(final boolean allowCreate) { + this.allowCreate = allowCreate; + } + + public boolean getAllowCreate() { + return allowCreate; + } + + public void setBtreeComparator(final java.util.Comparator btreeComparator) { + this.btreeComparator = btreeComparator; + } + + public java.util.Comparator getBtreeComparator() { + return btreeComparator; + } + + public void setBtreeMinKey(final int btMinKey) { + this.btMinKey = btMinKey; + } + + public int getBtreeMinKey() { + return btMinKey; + } + + public void setByteOrder(final int byteOrder) { + this.byteOrder = byteOrder; + } + + public int getByteOrder() { + return byteOrder; + } + + public boolean getByteSwapped() { + return byteOrder != 0 && byteOrder != DbUtil.default_lorder(); + } + + public void setBtreePrefixCalculator( + final BtreePrefixCalculator btreePrefixCalculator) { + this.btreePrefixCalculator = btreePrefixCalculator; + } + + public BtreePrefixCalculator getBtreePrefixCalculator() { + return btreePrefixCalculator; + } + + public void setCacheSize(final long cacheSize) { + this.cacheSize = cacheSize; + } + + public long getCacheSize() { + return cacheSize; + } + + public void setCacheCount(final int cacheCount) { + this.cacheCount = cacheCount; + } + + public int getCacheCount() { + return cacheCount; + } + + public void setChecksum(final boolean checksum) { + this.checksum = checksum; + } + + public boolean getChecksum() { + return checksum; + } + + public void setDirtyRead(final boolean dirtyRead) { + this.dirtyRead = dirtyRead; + } + + public boolean getDirtyRead() { + return dirtyRead; + } + + public void setDuplicateComparator(final java.util.Comparator duplicateComparator) { + this.duplicateComparator = duplicateComparator; + } + + public java.util.Comparator getDuplicateComparator() { + return duplicateComparator; + } + + public void setEncrypted(final String password) { + this.password = password; + } + + public boolean getEncrypted() { + return (password != null); + } + + public void setErrorHandler(final ErrorHandler errorHandler) { + this.errorHandler = errorHandler; + } + + public ErrorHandler getErrorHandler() { + return errorHandler; + } + + public void setErrorPrefix(final String errorPrefix) { + this.errorPrefix = errorPrefix; + } + + public String getErrorPrefix() { + return errorPrefix; + } + + public void setErrorStream(final java.io.OutputStream errorStream) { + this.errorStream = errorStream; + } + + public java.io.OutputStream getErrorStream() { + return errorStream; + } + + public void setExclusiveCreate(final boolean exclusiveCreate) { + this.exclusiveCreate = exclusiveCreate; + } + + public boolean getExclusiveCreate() { + return exclusiveCreate; + } + + public void setFeedbackHandler(final FeedbackHandler feedbackHandler) { + this.feedbackHandler = feedbackHandler; + } + + public FeedbackHandler getFeedbackHandler() { + return feedbackHandler; + } + + public void setHashFillFactor(final int hashFillFactor) { + this.hashFillFactor = hashFillFactor; + } + + public int getHashFillFactor() { + return hashFillFactor; + } + + public void setHasher(final Hasher hasher) { + this.hasher = hasher; + } + + public Hasher getHasher() { + return hasher; + } + + public void setHashNumElements(final int hashNumElements) { + this.hashNumElements = hashNumElements; + } + + public int getHashNumElements() { + return hashNumElements; + } + + public void setMessageHandler(final MessageHandler messageHandler) { + this.messageHandler = messageHandler; + } + + public MessageHandler getMessageHandler() { + return messageHandler; + } + + public void setMessageStream(final java.io.OutputStream messageStream) { + this.messageStream = messageStream; + } + + public java.io.OutputStream getMessageStream() { + return messageStream; + } + + public void setMode(final int mode) { + this.mode = mode; + } + + public long getMode() { + return mode; + } + + public void setNoMMap(final boolean noMMap) { + this.noMMap = noMMap; + } + + public boolean getNoMMap() { + return noMMap; + } + + public void setPageSize(final int pageSize) { + this.pageSize = pageSize; + } + + public int getPageSize() { + return pageSize; + } + + public void setPanicHandler(final PanicHandler panicHandler) { + this.panicHandler = panicHandler; + } + + public PanicHandler getPanicHandler() { + return panicHandler; + } + + public void setQueueExtentSize(final int queueExtentSize) { + this.queueExtentSize = queueExtentSize; + } + + public int getQueueExtentSize() { + return queueExtentSize; + } + + public void setQueueInOrder(final boolean queueInOrder) { + this.queueInOrder = queueInOrder; + } + + public boolean getQueueInOrder() { + return queueInOrder; + } + + public void setReadOnly(final boolean readOnly) { + this.readOnly = readOnly; + } + + public boolean getReadOnly() { + return readOnly; + } + + public void setRecordNumberAppender(final RecordNumberAppender recnoAppender) { + this.recnoAppender = recnoAppender; + } + + public RecordNumberAppender getRecordNumberAppender() { + return recnoAppender; + } + + public void setRecordDelimiter(final int recordDelimiter) { + this.recordDelimiter = recordDelimiter; + } + + public int getRecordDelimiter() { + return recordDelimiter; + } + + public void setRecordLength(final int recordLength) { + this.recordLength = recordLength; + } + + public int getRecordLength() { + return recordLength; + } + + public void setBtreeRecordNumbers(final boolean btreeRecordNumbers) { + this.btreeRecordNumbers = btreeRecordNumbers; + } + + public boolean getBtreeRecordNumbers() { + return btreeRecordNumbers; + } + + public void setRecordPad(final int recordPad) { + this.recordPad = recordPad; + } + + public int getRecordPad() { + return recordPad; + } + + public void setRecordSource(final java.io.File recordSource) { + this.recordSource = recordSource; + } + + public java.io.File getRecordSource() { + return recordSource; + } + + public void setRenumbering(final boolean renumbering) { + this.renumbering = renumbering; + } + + public boolean getRenumbering() { + return renumbering; + } + + public void setReverseSplitOff(final boolean reverseSplitOff) { + this.reverseSplitOff = reverseSplitOff; + } + + public boolean getReverseSplitOff() { + return reverseSplitOff; + } + + public void setSortedDuplicates(final boolean sortedDuplicates) { + this.sortedDuplicates = sortedDuplicates; + } + + public boolean getSortedDuplicates() { + return sortedDuplicates; + } + + public void setUnsortedDuplicates(final boolean unsortedDuplicates) { + this.unsortedDuplicates = unsortedDuplicates; + } + + public boolean getUnsortedDuplicates() { + return unsortedDuplicates; + } + + public void setSnapshot(final boolean snapshot) { + this.snapshot = snapshot; + } + + public boolean getSnapshot() { + return snapshot; + } + + public boolean getTransactional() { + return transactional; + } + + public void setTransactional(final boolean transactional) { + this.transactional = transactional; + } + + public void setTransactionNotDurable(final boolean transactionNotDurable) { + this.transactionNotDurable = transactionNotDurable; + } + + public boolean getTransactionNotDurable() { + return transactionNotDurable; + } + + public void setTruncate(final boolean truncate) { + this.truncate = truncate; + } + + public boolean getTruncate() { + return truncate; + } + + public void setType(final DatabaseType type) { + this.type = type; + } + + public DatabaseType getType() { + return type; + } + + public void setXACreate(final boolean xaCreate) { + this.xaCreate = xaCreate; + } + + public boolean getXACreate() { + return xaCreate; + } + + /* package */ + Db createDatabase(final DbEnv dbenv) + throws DatabaseException { + + int createFlags = 0; + + createFlags |= xaCreate ? DbConstants.DB_XA_CREATE : 0; + return new Db(dbenv, createFlags); + } + + /* package */ + Db openDatabase(final DbEnv dbenv, + final DbTxn txn, + final String fileName, + final String databaseName) + throws DatabaseException, java.io.FileNotFoundException { + + final Db db = createDatabase(dbenv); + // The DB_THREAD flag is inherited from the environment + // (defaulting to ON if no environment handle is supplied). + boolean threaded = (dbenv == null || + (dbenv.get_open_flags() & DbConstants.DB_THREAD) != 0); + + int openFlags = 0; + openFlags |= allowCreate ? DbConstants.DB_CREATE : 0; + openFlags |= dirtyRead ? DbConstants.DB_DIRTY_READ : 0; + openFlags |= exclusiveCreate ? DbConstants.DB_EXCL : 0; + openFlags |= noMMap ? DbConstants.DB_NOMMAP : 0; + openFlags |= readOnly ? DbConstants.DB_RDONLY : 0; + openFlags |= threaded ? DbConstants.DB_THREAD : 0; + openFlags |= truncate ? DbConstants.DB_TRUNCATE : 0; + + if (transactional && txn == null) + openFlags |= DbConstants.DB_AUTO_COMMIT; + + configureDatabase(db, DEFAULT); + boolean succeeded = false; + try { + db.open(txn, fileName, databaseName, type.getId(), openFlags, mode); + succeeded = true; + return db; + } finally { + if (!succeeded) + try { + db.close(0); + } catch (Throwable t) { + // Ignore it -- an exception is already in flight. + } + } + } + + /* package */ + void configureDatabase(final Db db, final DatabaseConfig oldConfig) + throws DatabaseException { + + int dbFlags = 0; + dbFlags |= checksum ? DbConstants.DB_CHKSUM : 0; + dbFlags |= (password != null) ? DbConstants.DB_ENCRYPT : 0; + dbFlags |= btreeRecordNumbers ? DbConstants.DB_RECNUM : 0; + dbFlags |= queueInOrder ? DbConstants.DB_INORDER : 0; + dbFlags |= renumbering ? DbConstants.DB_RENUMBER : 0; + dbFlags |= reverseSplitOff ? DbConstants.DB_REVSPLITOFF : 0; + dbFlags |= sortedDuplicates ? DbConstants.DB_DUPSORT : 0; + dbFlags |= snapshot ? DbConstants.DB_SNAPSHOT : 0; + dbFlags |= unsortedDuplicates ? DbConstants.DB_DUP : 0; + dbFlags |= transactionNotDurable ? DbConstants.DB_TXN_NOT_DURABLE : 0; + + if (dbFlags != 0) + db.set_flags(dbFlags); + + if (btMinKey != oldConfig.btMinKey) + db.set_bt_minkey(btMinKey); + if (byteOrder != oldConfig.byteOrder) + db.set_lorder(byteOrder); + if (cacheSize != oldConfig.cacheSize || + cacheCount != oldConfig.cacheCount) + db.set_cachesize(cacheSize, cacheCount); + if (errorStream != oldConfig.errorStream) + db.set_error_stream(errorStream); + if (errorPrefix != oldConfig.errorPrefix) + db.set_errpfx(errorPrefix); + if (hashFillFactor != oldConfig.hashFillFactor) + db.set_h_ffactor(hashFillFactor); + if (hashNumElements != oldConfig.hashNumElements) + db.set_h_nelem(hashNumElements); + if (messageStream != oldConfig.messageStream) + db.set_message_stream(messageStream); + if (pageSize != oldConfig.pageSize) + db.set_pagesize(pageSize); + if (password != oldConfig.password) + db.set_encrypt(password, DbConstants.DB_ENCRYPT_AES); + if (queueExtentSize != oldConfig.queueExtentSize) + db.set_q_extentsize(queueExtentSize); + if (recordDelimiter != oldConfig.recordDelimiter) + db.set_re_delim(recordDelimiter); + if (recordLength != oldConfig.recordLength) + db.set_re_len(recordLength); + if (recordPad != oldConfig.recordPad) + db.set_re_pad(recordPad); + if (recordSource != oldConfig.recordSource) + db.set_re_source(recordSource.toString()); + + if (btreeComparator != oldConfig.btreeComparator) + db.set_bt_compare(btreeComparator); + if (btreePrefixCalculator != oldConfig.btreePrefixCalculator) + db.set_bt_prefix(btreePrefixCalculator); + if (duplicateComparator != oldConfig.duplicateComparator) + db.set_dup_compare(duplicateComparator); + if (feedbackHandler != oldConfig.feedbackHandler) + db.set_feedback(feedbackHandler); + if (errorHandler != oldConfig.errorHandler) + db.set_errcall(errorHandler); + if (hasher != oldConfig.hasher) + db.set_h_hash(hasher); + if (messageHandler != oldConfig.messageHandler) + db.set_msgcall(messageHandler); + if (recnoAppender != oldConfig.recnoAppender) + db.set_append_recno(recnoAppender); + if (panicHandler != oldConfig.panicHandler) + db.set_paniccall(panicHandler); + } + + /* package */ + DatabaseConfig(final Db db) + throws DatabaseException { + + type = DatabaseType.fromInt(db.get_type()); + + final int openFlags = db.get_open_flags(); + allowCreate = (openFlags & DbConstants.DB_CREATE) != 0; + dirtyRead = (openFlags & DbConstants.DB_DIRTY_READ) != 0; + exclusiveCreate = (openFlags & DbConstants.DB_EXCL) != 0; + noMMap = (openFlags & DbConstants.DB_NOMMAP) != 0; + readOnly = (openFlags & DbConstants.DB_RDONLY) != 0; + truncate = (openFlags & DbConstants.DB_TRUNCATE) != 0; + + final int dbFlags = db.get_flags(); + checksum = (dbFlags & DbConstants.DB_CHKSUM) != 0; + btreeRecordNumbers = (dbFlags & DbConstants.DB_RECNUM) != 0; + queueInOrder = (dbFlags & DbConstants.DB_INORDER) != 0; + renumbering = (dbFlags & DbConstants.DB_RENUMBER) != 0; + reverseSplitOff = (dbFlags & DbConstants.DB_REVSPLITOFF) != 0; + sortedDuplicates = (dbFlags & DbConstants.DB_DUPSORT) != 0; + snapshot = (dbFlags & DbConstants.DB_SNAPSHOT) != 0; + unsortedDuplicates = (dbFlags & DbConstants.DB_DUP) != 0; + transactionNotDurable = (dbFlags & DbConstants.DB_TXN_NOT_DURABLE) != 0; + + if (type == DatabaseType.BTREE) { + btMinKey = db.get_bt_minkey(); + } + byteOrder = db.get_lorder(); + // Call get_cachesize* on the DbEnv to avoid this error: + // DB->get_cachesize: method not permitted in shared environment + cacheSize = db.get_env().get_cachesize(); + cacheCount = db.get_env().get_cachesize_ncache(); + errorStream = db.get_error_stream(); + errorPrefix = db.get_errpfx(); + if (type == DatabaseType.HASH) { + hashFillFactor = db.get_h_ffactor(); + hashNumElements = db.get_h_nelem(); + } + messageStream = db.get_message_stream(); + // Not available by design + password = ((dbFlags & DbConstants.DB_ENCRYPT) != 0) ? "" : null; + if (type == DatabaseType.QUEUE) { + queueExtentSize = db.get_q_extentsize(); + } + if (type == DatabaseType.QUEUE || type == DatabaseType.RECNO) { + recordLength = db.get_re_len(); + recordPad = db.get_re_pad(); + } + if (type == DatabaseType.RECNO) { + recordDelimiter = db.get_re_delim(); + recordSource = (db.get_re_source() == null) ? null : + new java.io.File(db.get_re_source()); + } + transactional = db.get_transactional(); + + btreeComparator = db.get_bt_compare(); + btreePrefixCalculator = db.get_bt_prefix(); + duplicateComparator = db.get_dup_compare(); + feedbackHandler = db.get_feedback(); + errorHandler = db.get_errcall(); + hasher = db.get_h_hash(); + messageHandler = db.get_msgcall(); + recnoAppender = db.get_append_recno(); + panicHandler = db.get_paniccall(); + } +} diff --git a/db/java/src/com/sleepycat/db/DatabaseEntry.java b/db/java/src/com/sleepycat/db/DatabaseEntry.java new file mode 100644 index 000000000..be67fe024 --- /dev/null +++ b/db/java/src/com/sleepycat/db/DatabaseEntry.java @@ -0,0 +1,181 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: DatabaseEntry.java,v 1.7 2004/09/22 18:01:03 bostic Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbUtil; + +public class DatabaseEntry { + + /* Currently, JE stores all data records as byte array */ + protected byte[] data; + protected int dlen = 0; + protected int doff = 0; + protected int flags = 0; + protected int offset = 0; + protected int size = 0; + protected int ulen = 0; + + /* + * IGNORE is used to avoid returning data that is not needed. It may not + * be used as the key DBT in a put since the PARTIAL flag is not allowed; + * use UNUSED for that instead. + */ + + /* package */ + static final DatabaseEntry IGNORE = new DatabaseEntry(); + static { + IGNORE.setUserBuffer(0, true); + IGNORE.setPartial(0, 0, true); // dlen == 0, so no data ever returned + } + /* package */ + static final DatabaseEntry UNUSED = new DatabaseEntry(); + + protected static final int INT32SZ = 4; + + /* + * Constructors + */ + + public DatabaseEntry() { + } + + public DatabaseEntry(final byte[] data) { + this.data = data; + if (data != null) { + this.size = data.length; + } + } + + public DatabaseEntry(final byte[] data, final int offset, final int size) { + this.data = data; + this.offset = offset; + this.size = size; + } + + /* + * Accessors + */ + + public byte[] getData() { + return data; + } + + public void setData(final byte[] data, final int offset, final int size) { + this.data = data; + this.offset = offset; + this.size = size; + } + + public void setData(final byte[] data) { + setData(data, 0, (data == null) ? 0 : data.length); + } + + /* package */ + int getMultiFlag() { + return 0; + } + + public int getOffset() { + return offset; + } + + public void setOffset(final int offset) { + this.offset = offset; + } + + public int getPartialLength() { + return dlen; + } + + public int getPartialOffset() { + return doff; + } + + public boolean getPartial() { + return (flags & DbConstants.DB_DBT_PARTIAL) != 0; + } + + public void setPartialOffset(final int doff) { + this.doff = doff; + } + + public void setPartialLength(final int dlen) { + this.dlen = dlen; + } + + public void setPartial(final boolean partial) { + if (partial) + flags |= DbConstants.DB_DBT_PARTIAL; + else + flags &= ~DbConstants.DB_DBT_PARTIAL; + } + + public void setPartial(final int doff, + final int dlen, + final boolean partial) { + setPartialOffset(doff); + setPartialLength(dlen); + setPartial(partial); + } + + public int getRecordNumber() { + return DbUtil.array2int(data, offset); + } + + public void setRecordNumber(final int recno) { + if (data == null || data.length < INT32SZ) { + data = new byte[INT32SZ]; + size = INT32SZ; + ulen = 0; + offset = 0; + } + DbUtil.int2array(recno, data, 0); + } + + public boolean getReuseBuffer() { + return 0 == + (flags & (DbConstants.DB_DBT_MALLOC | DbConstants.DB_DBT_USERMEM)); + } + + public void setReuseBuffer(boolean reuse) { + if (reuse) + flags &= ~(DbConstants.DB_DBT_MALLOC | DbConstants.DB_DBT_USERMEM); + else { + flags &= ~DbConstants.DB_DBT_USERMEM; + flags |= DbConstants.DB_DBT_MALLOC; + } + } + + public int getSize() { + return size; + } + + public void setSize(final int size) { + this.size = size; + } + + public boolean getUserBuffer() { + return (flags & DbConstants.DB_DBT_USERMEM) != 0; + } + + public int getUserBufferLength() { + return ulen; + } + + public void setUserBuffer(final int length, final boolean usermem) { + this.ulen = length; + if (usermem) { + flags &= ~DbConstants.DB_DBT_MALLOC; + flags |= DbConstants.DB_DBT_USERMEM; + } else + flags &= ~DbConstants.DB_DBT_USERMEM; + } +} diff --git a/db/java/src/com/sleepycat/db/DatabaseException.java b/db/java/src/com/sleepycat/db/DatabaseException.java new file mode 100644 index 000000000..17ffb5432 --- /dev/null +++ b/db/java/src/com/sleepycat/db/DatabaseException.java @@ -0,0 +1,54 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DatabaseException.java,v 1.1 2004/04/06 20:43:36 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbEnv; + +public class DatabaseException extends Exception { + private Environment dbenv; + private int errno; + + public DatabaseException(final String s) { + this(s, 0, (Environment)null); + } + + public DatabaseException(final String s, final int errno) { + this(s, errno, (Environment)null); + } + + public DatabaseException(final String s, + final int errno, + final Environment dbenv) { + super(s); + this.errno = errno; + this.dbenv = dbenv; + } + + protected DatabaseException(final String s, + final int errno, + final DbEnv dbenv) { + this(s, errno, (dbenv == null) ? null : dbenv.wrapper); + } + + public Environment getEnvironment() { + return dbenv; + } + + public int getErrno() { + return errno; + } + + public String toString() { + String s = super.toString(); + if (errno != 0) + s += ": " + DbEnv.strerror(errno); + return s; + } +} diff --git a/db/java/src/com/sleepycat/db/DatabaseStats.java b/db/java/src/com/sleepycat/db/DatabaseStats.java new file mode 100644 index 000000000..c372ca435 --- /dev/null +++ b/db/java/src/com/sleepycat/db/DatabaseStats.java @@ -0,0 +1,15 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DatabaseStats.java,v 1.2 2004/09/28 19:30:37 mjc Exp $ + */ +package com.sleepycat.db; + +public abstract class DatabaseStats { + // no public constructor + protected DatabaseStats() {} +} diff --git a/db/java/src/com/sleepycat/db/DatabaseType.java b/db/java/src/com/sleepycat/db/DatabaseType.java new file mode 100644 index 000000000..2407498cf --- /dev/null +++ b/db/java/src/com/sleepycat/db/DatabaseType.java @@ -0,0 +1,65 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: DatabaseType.java,v 1.2 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public final class DatabaseType { + public static final DatabaseType BTREE = + new DatabaseType("BTREE", DbConstants.DB_BTREE); + + public static final DatabaseType HASH = + new DatabaseType("HASH", DbConstants.DB_HASH); + + public static final DatabaseType QUEUE = + new DatabaseType("QUEUE", DbConstants.DB_QUEUE); + + public static final DatabaseType RECNO = + new DatabaseType("RECNO", DbConstants.DB_RECNO); + + public static final DatabaseType UNKNOWN = + new DatabaseType("UNKNOWN", DbConstants.DB_UNKNOWN); + + /* package */ + static DatabaseType fromInt(int type) { + switch(type) { + case DbConstants.DB_BTREE: + return BTREE; + case DbConstants.DB_HASH: + return HASH; + case DbConstants.DB_QUEUE: + return QUEUE; + case DbConstants.DB_RECNO: + return DatabaseType.RECNO; + case DbConstants.DB_UNKNOWN: + return DatabaseType.UNKNOWN; + default: + throw new IllegalArgumentException( + "Unknown database type: " + type); + } + } + + private String statusName; + private int id; + + private DatabaseType(final String statusName, final int id) { + this.statusName = statusName; + this.id = id; + } + + /* package */ + int getId() { + return id; + } + + public String toString() { + return "DatabaseType." + statusName; + } +} diff --git a/db/java/src/com/sleepycat/db/DeadlockException.java b/db/java/src/com/sleepycat/db/DeadlockException.java new file mode 100644 index 000000000..afe9b498a --- /dev/null +++ b/db/java/src/com/sleepycat/db/DeadlockException.java @@ -0,0 +1,20 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1999-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DeadlockException.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbEnv; + +public class DeadlockException extends DatabaseException { + protected DeadlockException(final String s, + final int errno, + final DbEnv dbenv) { + super(s, errno, dbenv); + } +} diff --git a/db/java/src/com/sleepycat/db/Environment.java b/db/java/src/com/sleepycat/db/Environment.java new file mode 100644 index 000000000..b45c56d58 --- /dev/null +++ b/db/java/src/com/sleepycat/db/Environment.java @@ -0,0 +1,354 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: Environment.java,v 1.5 2004/08/05 19:20:34 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; + +public class Environment { + private DbEnv dbenv; + private java.io.File home; + private int autoCommitFlag; + + /* package */ + Environment(final DbEnv dbenv) + throws DatabaseException { + + this.dbenv = dbenv; + dbenv.wrapper = this; + } + + public Environment(final java.io.File home, EnvironmentConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + this(EnvironmentConfig.checkNull(config).openEnvironment(home)); + this.home = home; + this.autoCommitFlag = + ((dbenv.get_open_flags() & DbConstants.DB_INIT_TXN) == 0) ? 0 : + DbConstants.DB_AUTO_COMMIT; + } + + public void close() + throws DatabaseException { + + dbenv.close(0); + } + + /* package */ + DbEnv unwrap() { + return dbenv; + } + + public static void remove(final java.io.File home, + final boolean force, + EnvironmentConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + config = EnvironmentConfig.checkNull(config); + int flags = force ? DbConstants.DB_FORCE : 0; + flags |= config.getUseEnvironment() ? + DbConstants.DB_USE_ENVIRON : 0; + flags |= config.getUseEnvironmentRoot() ? + DbConstants.DB_USE_ENVIRON_ROOT : 0; + final DbEnv dbenv = config.createEnvironment(); + dbenv.remove(home.toString(), flags); + } + + public void setConfig(final EnvironmentConfig config) + throws DatabaseException { + + config.configureEnvironment(dbenv, new EnvironmentConfig(dbenv)); + } + + public EnvironmentConfig getConfig() + throws DatabaseException { + + return new EnvironmentConfig(dbenv); + } + + /* Manage databases. */ + public Database openDatabase(final Transaction txn, + final String fileName, + final String databaseName, + DatabaseConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + return new Database( + DatabaseConfig.checkNull(config).openDatabase(dbenv, + (txn == null) ? null : txn.txn, + fileName, databaseName)); + } + + public SecondaryDatabase openSecondaryDatabase( + final Transaction txn, + final String fileName, + final String databaseName, + final Database primaryDatabase, + SecondaryConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + return new SecondaryDatabase( + SecondaryConfig.checkNull(config).openSecondaryDatabase( + dbenv, (txn == null) ? null : txn.txn, + fileName, databaseName, primaryDatabase.db), + primaryDatabase); + } + + public void removeDatabase(final Transaction txn, + final String fileName, + final String databaseName) + throws DatabaseException, java.io.FileNotFoundException { + + dbenv.dbremove((txn == null) ? null : txn.txn, + fileName, databaseName, + (txn == null) ? autoCommitFlag : 0); + } + + public void renameDatabase(final Transaction txn, + final String fileName, + final String databaseName, + final String newName) + throws DatabaseException, java.io.FileNotFoundException { + + dbenv.dbrename((txn == null) ? null : txn.txn, + fileName, databaseName, newName, + (txn == null) ? autoCommitFlag : 0); + } + + public java.io.File getHome() + throws DatabaseException { + + return home; + } + + /* Cache management. */ + public int trickleCacheWrite(int percent) + throws DatabaseException { + + return dbenv.memp_trickle(percent); + } + + /* Locking */ + public int detectDeadlocks(LockDetectMode mode) + throws DatabaseException { + + return dbenv.lock_detect(0, mode.getFlag()); + } + + public Lock getLock(int locker, + boolean noWait, + DatabaseEntry object, + LockRequestMode mode) + throws DatabaseException { + + return Lock.wrap( + dbenv.lock_get(locker, noWait ? DbConstants.DB_LOCK_NOWAIT : 0, + object, mode.getFlag())); + } + + public void putLock(Lock lock) + throws DatabaseException { + + dbenv.lock_put(lock.unwrap()); + } + + public int createLockerID() + throws DatabaseException { + + return dbenv.lock_id(); + } + + public void freeLockerID(int id) + throws DatabaseException { + + dbenv.lock_id_free(id); + } + + public void lockVector(int locker, boolean noWait, LockRequest[] list) + throws DatabaseException { + + dbenv.lock_vec(locker, noWait ? DbConstants.DB_LOCK_NOWAIT : 0, + list, 0, list.length); + } + + /* Logging */ + public LogCursor openLogCursor() + throws DatabaseException { + + return LogCursor.wrap(dbenv.log_cursor(0)); + } + + public String getLogFileName(LogSequenceNumber lsn) + throws DatabaseException { + + return dbenv.log_file(lsn); + } + + /* Replication support */ + public int electReplicationMaster(int nsites, + int nvotes, + int priority, + int timeout) + throws DatabaseException { + + return dbenv.rep_elect(nsites, nvotes, priority, timeout, + 0 /* unused flags */); + } + + public ReplicationStatus processReplicationMessage(DatabaseEntry control, + DatabaseEntry rec, + int envid) + throws DatabaseException { + + final DbEnv.RepProcessMessage wrappedID = new DbEnv.RepProcessMessage(); + wrappedID.envid = envid; + // Create a new entry so that rec isn't overwritten + final DatabaseEntry cdata = + new DatabaseEntry(rec.getData(), rec.getOffset(), rec.getSize()); + final LogSequenceNumber lsn = new LogSequenceNumber(); + final int ret = + dbenv.rep_process_message(control, cdata, wrappedID, lsn); + return ReplicationStatus.getStatus(ret, cdata, wrappedID.envid, lsn); + } + + public void startReplication(DatabaseEntry cdata, boolean master) + throws DatabaseException { + + dbenv.rep_start(cdata, + master ? DbConstants.DB_REP_MASTER : DbConstants.DB_REP_CLIENT); + } + + /* Statistics */ + public CacheStats getCacheStats(StatsConfig config) + throws DatabaseException { + + return dbenv.memp_stat(StatsConfig.checkNull(config).getFlags()); + } + + public CacheFileStats[] getCacheFileStats(StatsConfig config) + throws DatabaseException { + + return dbenv.memp_fstat(StatsConfig.checkNull(config).getFlags()); + } + + public LogStats getLogStats(StatsConfig config) + throws DatabaseException { + + return dbenv.log_stat(StatsConfig.checkNull(config).getFlags()); + } + + public ReplicationStats getReplicationStats(StatsConfig config) + throws DatabaseException { + + return dbenv.rep_stat(StatsConfig.checkNull(config).getFlags()); + } + + public LockStats getLockStats(StatsConfig config) + throws DatabaseException { + + return dbenv.lock_stat(StatsConfig.checkNull(config).getFlags()); + } + + public TransactionStats getTransactionStats(StatsConfig config) + throws DatabaseException { + + return dbenv.txn_stat(StatsConfig.checkNull(config).getFlags()); + } + + /* Transaction management */ + public Transaction beginTransaction(final Transaction parent, + TransactionConfig config) + throws DatabaseException { + + return new Transaction( + TransactionConfig.checkNull(config).beginTransaction(dbenv, + (parent == null) ? null : parent.txn)); + } + + public void checkpoint(CheckpointConfig config) + throws DatabaseException { + + CheckpointConfig.checkNull(config).runCheckpoint(dbenv); + } + + public void logFlush(LogSequenceNumber lsn) + throws DatabaseException { + + dbenv.log_flush(lsn); + } + + public LogSequenceNumber logPut(DatabaseEntry data, boolean flush) + throws DatabaseException { + + final LogSequenceNumber lsn = new LogSequenceNumber(); + dbenv.log_put(lsn, data, flush ? DbConstants.DB_FLUSH : 0); + return lsn; + } + + public java.io.File[] getArchiveLogFiles(boolean includeInUse) + throws DatabaseException { + + final String[] logNames = + dbenv.log_archive(DbConstants.DB_ARCH_ABS | + (includeInUse ? DbConstants.DB_ARCH_LOG : 0)); + final java.io.File[] logFiles = new java.io.File[logNames.length]; + for (int i = 0; i < logNames.length; i++) + logFiles[i] = new java.io.File(logNames[i]); + return logFiles; + } + + public java.io.File[] getArchiveDatabases() + throws DatabaseException { + + final String[] dbNames = dbenv.log_archive(DbConstants.DB_ARCH_DATA); + final java.io.File[] dbFiles = new java.io.File[dbNames.length]; + for (int i = 0; i < dbNames.length; i++) + dbFiles[i] = new java.io.File(home, dbNames[i]); + return dbFiles; + } + + public void removeOldLogFiles() + throws DatabaseException { + + dbenv.log_archive(DbConstants.DB_ARCH_REMOVE); + } + + public PreparedTransaction[] recover(final int count, + final boolean continued) + throws DatabaseException { + + return dbenv.txn_recover(count, + continued ? DbConstants.DB_NEXT : DbConstants.DB_FIRST); + } + + /* Panic the environment, or stop a panic. */ + public void panic(boolean onoff) + throws DatabaseException { + + dbenv.set_flags(DbConstants.DB_PANIC_ENVIRONMENT, onoff); + } + + /* Version information */ + public static String getVersionString() { + return DbEnv.get_version_string(); + } + + public static int getVersionMajor() { + return DbEnv.get_version_major(); + } + + public static int getVersionMinor() { + return DbEnv.get_version_minor(); + } + + public static int getVersionPatch() { + return DbEnv.get_version_patch(); + } +} diff --git a/db/java/src/com/sleepycat/db/EnvironmentConfig.java b/db/java/src/com/sleepycat/db/EnvironmentConfig.java new file mode 100644 index 000000000..23e6ab309 --- /dev/null +++ b/db/java/src/com/sleepycat/db/EnvironmentConfig.java @@ -0,0 +1,1076 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: EnvironmentConfig.java,v 1.13 2004/09/28 19:30:37 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; + +public class EnvironmentConfig implements Cloneable { + /* + * For internal use, to allow null as a valid value for + * the config parameter. + */ + public static final EnvironmentConfig DEFAULT = new EnvironmentConfig(); + + /* package */ + static EnvironmentConfig checkNull(EnvironmentConfig config) { + return (config == null) ? DEFAULT : config; + } + + /* Parameters */ + private int mode = 0644; + private int cacheCount = 0; + private long cacheSize = 0L; + private java.util.Vector dataDirs = new java.util.Vector(); + private int envid = 0; + private java.io.OutputStream errorStream = null; + private String errorPrefix = null; + private byte[][] lockConflicts = null; + private LockDetectMode lockDetectMode = LockDetectMode.NONE; + private int maxLocks = 0; + private int maxLockers = 0; + private int maxLockObjects = 0; + private int maxLogFileSize = 0; + private int logBufferSize = 0; + private java.io.OutputStream messageStream = null; + private java.io.File logDirectory = null; + private int logRegionSize = 0; + private long mmapSize = 0L; + private String password = null; + private long replicationLimit = 0L; + private String rpcServer = null; + private long rpcClientTimeout = 0L; + private long rpcServerTimeout = 0L; + private long segmentId = 0L; + private int testAndSetSpins = 0; + private long lockTimeout = 0L; + private int txnMaxActive = 0; + private long txnTimeout = 0L; + private java.util.Date txnTimestamp = null; + private String temporaryDirectory = null; + + /* Open flags */ + private boolean allowCreate = false; + private boolean initializeCache = false; + private boolean initializeCDB = false; + private boolean initializeLocking = false; + private boolean initializeLogging = false; + private boolean initializeReplication = false; + private boolean joinEnvironment = false; + private boolean lockDown = false; + private boolean isPrivate = false; + private boolean readOnly = false; + private boolean runRecovery = false; + private boolean runFatalRecovery = false; + private boolean systemMemory = false; + private boolean threaded = true; // Handles are threaded by default in Java + private boolean transactional = false; + private boolean useEnvironment = false; + private boolean useEnvironmentRoot = false; + + /* Flags */ + private boolean cdbLockAllDatabases = false; + private boolean directDatabaseIO = false; + private boolean directLogIO = false; + private boolean dsyncLog = false; + private boolean initializeRegions = false; + private boolean logAutoRemove = false; + private boolean logInMemory = false; + private boolean noLocking = false; + private boolean noMMap = false; + private boolean noPanic = false; + private boolean overwrite = false; + private boolean txnNoSync = false; + private boolean txnNotDurable = false; + private boolean txnWriteNoSync = false; + private boolean yieldCPU = false; + + /* Verbose Flags */ + private boolean verboseDeadlock = false; + private boolean verboseRecovery = false; + private boolean verboseReplication = false; + private boolean verboseWaitsFor = false; + + /* Callbacks */ + private ErrorHandler errorHandler = null; + private FeedbackHandler feedbackHandler = null; + private LogRecordHandler logRecordHandler = null; + private MessageHandler messageHandler = null; + private PanicHandler panicHandler = null; + private ReplicationTransport replicationTransport = null; + + public EnvironmentConfig() { + } + + public void setAllowCreate(final boolean allowCreate) { + this.allowCreate = allowCreate; + } + + public boolean getAllowCreate() { + return allowCreate; + } + + public void setCacheSize(final long cacheSize) { + this.cacheSize = cacheSize; + } + + public long getCacheSize() { + return cacheSize; + } + + public void setCacheCount(final int cacheCount) { + this.cacheCount = cacheCount; + } + + public int getCacheCount() { + return cacheCount; + } + + public void setCDBLockAllDatabases(final boolean cdbLockAllDatabases) { + this.cdbLockAllDatabases = cdbLockAllDatabases; + } + + public boolean getCDBLockAllDatabases() { + return cdbLockAllDatabases; + } + + public void addDataDir(final String dataDir) { + this.dataDirs.add(dataDir); + } + + public String[] getDataDirs() { + final String[] dirs = new String[dataDirs.size()]; + dataDirs.copyInto(dirs); + return dirs; + } + + public void setDirectDatabaseIO(final boolean directDatabaseIO) { + this.directDatabaseIO = directDatabaseIO; + } + + public boolean getDirectDatabaseIO() { + return directDatabaseIO; + } + + public void setDirectLogIO(final boolean directLogIO) { + this.directLogIO = directLogIO; + } + + public boolean getDirectLogIO() { + return directLogIO; + } + + public void setDsyncLog(final boolean dsyncLog) { + this.dsyncLog = dsyncLog; + } + + public boolean getDsyncLog() { + return dsyncLog; + } + + public void setEncrypted(final String password) { + this.password = password; + } + + public boolean getEncrypted() { + return (password != null); + } + + public void setErrorHandler(final ErrorHandler errorHandler) { + this.errorHandler = errorHandler; + } + + public ErrorHandler getErrorHandler() { + return errorHandler; + } + + public void setErrorPrefix(final String errorPrefix) { + this.errorPrefix = errorPrefix; + } + + public String getErrorPrefix() { + return errorPrefix; + } + + public void setErrorStream(final java.io.OutputStream errorStream) { + this.errorStream = errorStream; + } + + public java.io.OutputStream getErrorStream() { + return errorStream; + } + + public void setFeedbackHandler(final FeedbackHandler feedbackHandler) { + this.feedbackHandler = feedbackHandler; + } + + public FeedbackHandler getFeedbackHandler() { + return feedbackHandler; + } + + public void setInitializeCache(final boolean initializeCache) { + this.initializeCache = initializeCache; + } + + public boolean getInitializeCache() { + return initializeCache; + } + + public void setInitializeCDB(final boolean initializeCDB) { + this.initializeCDB = initializeCDB; + } + + public boolean getInitializeCDB() { + return initializeCDB; + } + + public void setInitializeLocking(final boolean initializeLocking) { + this.initializeLocking = initializeLocking; + } + + public boolean getInitializeLocking() { + return initializeLocking; + } + + public void setInitializeLogging(final boolean initializeLogging) { + this.initializeLogging = initializeLogging; + } + + public boolean getInitializeLogging() { + return initializeLogging; + } + + public void setInitializeRegions(final boolean initializeRegions) { + this.initializeRegions = initializeRegions; + } + + public boolean getInitializeRegions() { + return initializeRegions; + } + + public void setInitializeReplication(final boolean initializeReplication) { + this.initializeReplication = initializeReplication; + } + + public boolean getInitializeReplication() { + return initializeReplication; + } + + public void setJoinEnvironment(final boolean joinEnvironment) { + this.joinEnvironment = joinEnvironment; + } + + public boolean getJoinEnvironment() { + return joinEnvironment; + } + + public void setLockConflicts(final byte[][] lockConflicts) { + this.lockConflicts = lockConflicts; + } + + public byte[][] getLockConflicts() { + return lockConflicts; + } + + public void setLockDetectMode(final LockDetectMode lockDetectMode) { + this.lockDetectMode = lockDetectMode; + } + + public LockDetectMode getLockDetectMode() { + return lockDetectMode; + } + + public void setLockDown(final boolean lockDown) { + this.lockDown = lockDown; + } + + public boolean getLockDown() { + return lockDown; + } + + public void setLockTimeout(final long lockTimeout) { + this.lockTimeout = lockTimeout; + } + + public long getLockTimeout() { + return lockTimeout; + } + + public void setLogAutoRemove(final boolean logAutoRemove) { + this.logAutoRemove = logAutoRemove; + } + + public boolean getLogAutoRemove() { + return logAutoRemove; + } + + public void setLogInMemory(final boolean logInMemory) { + this.logInMemory = logInMemory; + } + + public boolean getLogInMemory() { + return logInMemory; + } + + public void setLogRecordHandler(final LogRecordHandler logRecordHandler) { + this.logRecordHandler = logRecordHandler; + } + + public LogRecordHandler getLogRecordHandler() { + return logRecordHandler; + } + + public void setMaxLocks(final int maxLocks) { + this.maxLocks = maxLocks; + } + + public int getMaxLocks() { + return maxLocks; + } + + public void setMaxLockers(final int maxLockers) { + this.maxLockers = maxLockers; + } + + public int getMaxLockers() { + return maxLockers; + } + + public void setMaxLockObjects(final int maxLockObjects) { + this.maxLockObjects = maxLockObjects; + } + + public int getMaxLockObjects() { + return maxLockObjects; + } + + public void setMaxLogFileSize(final int maxLogFileSize) { + this.maxLogFileSize = maxLogFileSize; + } + + public int getMaxLogFileSize() { + return maxLogFileSize; + } + + public void setLogBufferSize(final int logBufferSize) { + this.logBufferSize = logBufferSize; + } + + public int getLogBufferSize() { + return logBufferSize; + } + + public void setLogDirectory(final java.io.File logDirectory) { + this.logDirectory = logDirectory; + } + + public java.io.File getLogDirectory() { + return logDirectory; + } + + public void setLogRegionSize(final int logRegionSize) { + this.logRegionSize = logRegionSize; + } + + public int getLogRegionSize() { + return logRegionSize; + } + + public void setMessageHandler(final MessageHandler messageHandler) { + this.messageHandler = messageHandler; + } + + public MessageHandler getMessageHandler() { + return messageHandler; + } + + public void setMessageStream(final java.io.OutputStream messageStream) { + this.messageStream = messageStream; + } + + public java.io.OutputStream getMessageStream() { + return messageStream; + } + + public void setMMapSize(final long mmapSize) { + this.mmapSize = mmapSize; + } + + public long getMMapSize() { + return mmapSize; + } + + public void setMode(final int mode) { + this.mode = mode; + } + + public long getMode() { + return mode; + } + + public void setNoLocking(final boolean noLocking) { + this.noLocking = noLocking; + } + + public boolean getNoLocking() { + return noLocking; + } + + public void setNoMMap(final boolean noMMap) { + this.noMMap = noMMap; + } + + public boolean getNoMMap() { + return noMMap; + } + + public void setNoPanic(final boolean noPanic) { + this.noPanic = noPanic; + } + + public boolean getNoPanic() { + return noPanic; + } + + public void setOverwrite(final boolean overwrite) { + this.overwrite = overwrite; + } + + public boolean getOverwrite() { + return overwrite; + } + + public void setPanicHandler(final PanicHandler panicHandler) { + this.panicHandler = panicHandler; + } + + public PanicHandler getPanicHandler() { + return panicHandler; + } + + public void setPrivate(final boolean isPrivate) { + this.isPrivate = isPrivate; + } + + public boolean getPrivate() { + return isPrivate; + } + + public boolean getReadOnly() { + return readOnly; + } + + public void setReadOnly(final boolean readOnly) { + this.readOnly = readOnly; + } + + public void setReplicationLimit(final long replicationLimit) { + this.replicationLimit = replicationLimit; + } + + public long getReplicationLimit() { + return replicationLimit; + } + + public void setReplicationTransport(final int envid, + final ReplicationTransport replicationTransport) { + + this.envid = envid; + this.replicationTransport = replicationTransport; + } + + public ReplicationTransport getReplicationTransport() { + return replicationTransport; + } + + public void setRunFatalRecovery(final boolean runFatalRecovery) { + this.runFatalRecovery = runFatalRecovery; + } + + public boolean getRunFatalRecovery() { + return runFatalRecovery; + } + + public void setRunRecovery(final boolean runRecovery) { + this.runRecovery = runRecovery; + } + + public boolean getRunRecovery() { + return runRecovery; + } + + public void setSystemMemory(final boolean systemMemory) { + this.systemMemory = systemMemory; + } + + public boolean getSystemMemory() { + return systemMemory; + } + + public void setRPCServer(final String rpcServer, + final long rpcClientTimeout, + final long rpcServerTimeout) { + this.rpcServer = rpcServer; + this.rpcClientTimeout = rpcClientTimeout; + this.rpcServerTimeout = rpcServerTimeout; + + // Turn off threading for RPC client handles. + this.threaded = false; + } + + public void setSegmentId(final long segmentId) { + this.segmentId = segmentId; + } + + public long getSegmentId() { + return segmentId; + } + + public void setTemporaryDirectory(final String temporaryDirectory) { + this.temporaryDirectory = temporaryDirectory; + } + + public String getTemporaryDirectory() { + return temporaryDirectory; + } + + public void setTestAndSetSpins(final int testAndSetSpins) { + this.testAndSetSpins = testAndSetSpins; + } + + public int getTestAndSetSpins() { + return testAndSetSpins; + } + + public void setThreaded(final boolean threaded) { + this.threaded = threaded; + } + + public boolean getThreaded() { + return threaded; + } + + public void setTransactional(final boolean transactional) { + this.transactional = transactional; + } + + public boolean getTransactional() { + return transactional; + } + + public void setTxnNoSync(final boolean txnNoSync) { + this.txnNoSync = txnNoSync; + } + + public boolean getTxnNoSync() { + return txnNoSync; + } + + public void setTxnNotDurable(final boolean txnNotDurable) { + this.txnNotDurable = txnNotDurable; + } + + public boolean getTxnNotDurable() { + return txnNotDurable; + } + + public void setTxnMaxActive(final int txnMaxActive) { + this.txnMaxActive = txnMaxActive; + } + + public int getTxnMaxActive() { + return txnMaxActive; + } + + public void setTxnTimeout(final long txnTimeout) { + this.txnTimeout = txnTimeout; + } + + public long getTxnTimeout() { + return txnTimeout; + } + + public void setTxnTimestamp(final java.util.Date txnTimestamp) { + this.txnTimestamp = txnTimestamp; + } + + public java.util.Date getTxnTimestamp() { + return txnTimestamp; + } + + public void setTxnWriteNoSync(final boolean txnWriteNoSync) { + this.txnWriteNoSync = txnWriteNoSync; + } + + public boolean getTxnWriteNoSync() { + return txnWriteNoSync; + } + + public void setUseEnvironment(final boolean useEnvironment) { + this.useEnvironment = useEnvironment; + } + + public boolean getUseEnvironment() { + return useEnvironment; + } + + public void setUseEnvironmentRoot(final boolean useEnvironmentRoot) { + this.useEnvironmentRoot = useEnvironmentRoot; + } + + public boolean getUseEnvironmentRoot() { + return useEnvironmentRoot; + } + + public void setVerboseDeadlock(final boolean verboseDeadlock) { + this.verboseDeadlock = verboseDeadlock; + } + + public boolean getVerboseDeadlock() { + return verboseDeadlock; + } + + public void setVerboseRecovery(final boolean verboseRecovery) { + this.verboseRecovery = verboseRecovery; + } + + public boolean getVerboseRecovery() { + return verboseRecovery; + } + + public void setVerboseReplication(final boolean verboseReplication) { + this.verboseReplication = verboseReplication; + } + + public boolean getVerboseReplication() { + return verboseReplication; + } + + public void setVerboseWaitsFor(final boolean verboseWaitsFor) { + this.verboseWaitsFor = verboseWaitsFor; + } + + public boolean getVerboseWaitsFor() { + return verboseWaitsFor; + } + + public void setYieldCPU(final boolean yieldCPU) { + this.yieldCPU = yieldCPU; + } + + public boolean getYieldCPU() { + return yieldCPU; + } + + private boolean lockConflictsEqual(byte[][] lc1, byte[][]lc2) { + if (lc1 == lc2) + return true; + if (lc1 == null || lc2 == null || lc1.length != lc2.length) + return false; + for (int i = 0; i < lc1.length; i++) { + if (lc1[i].length != lc2[i].length) + return false; + for (int j = 0; j < lc1[i].length; j++) + if (lc1[i][j] != lc2[i][j]) + return false; + } + return true; + } + + /* package */ + DbEnv openEnvironment(final java.io.File home) + throws DatabaseException, java.io.FileNotFoundException { + + final DbEnv dbenv = createEnvironment(); + int openFlags = 0; + + openFlags |= allowCreate ? DbConstants.DB_CREATE : 0; + openFlags |= initializeCache ? DbConstants.DB_INIT_MPOOL : 0; + openFlags |= initializeCDB ? DbConstants.DB_INIT_CDB : 0; + openFlags |= initializeLocking ? DbConstants.DB_INIT_LOCK : 0; + openFlags |= initializeLogging ? DbConstants.DB_INIT_LOG : 0; + openFlags |= initializeReplication ? DbConstants.DB_INIT_REP : 0; + openFlags |= joinEnvironment ? DbConstants.DB_JOINENV : 0; + openFlags |= lockDown ? DbConstants.DB_LOCKDOWN : 0; + openFlags |= isPrivate ? DbConstants.DB_PRIVATE : 0; + openFlags |= readOnly ? DbConstants.DB_RDONLY : 0; + openFlags |= runRecovery ? DbConstants.DB_RECOVER : 0; + openFlags |= runFatalRecovery ? DbConstants.DB_RECOVER_FATAL : 0; + openFlags |= systemMemory ? DbConstants.DB_SYSTEM_MEM : 0; + openFlags |= threaded ? DbConstants.DB_THREAD : 0; + openFlags |= transactional ? DbConstants.DB_INIT_TXN : 0; + openFlags |= useEnvironment ? DbConstants.DB_USE_ENVIRON : 0; + openFlags |= useEnvironmentRoot ? DbConstants.DB_USE_ENVIRON_ROOT : 0; + + boolean succeeded = false; + try { + dbenv.open(home.toString(), openFlags, mode); + succeeded = true; + return dbenv; + } finally { + if (!succeeded) + try { + dbenv.close(0); + } catch (Throwable t) { + // Ignore it -- an exception is already in flight. + } + } + } + + + /* package */ + DbEnv createEnvironment() + throws DatabaseException { + + int createFlags = 0; + + if (rpcServer != null) + createFlags |= DbConstants.DB_RPCCLIENT; + + final DbEnv dbenv = new DbEnv(createFlags); + configureEnvironment(dbenv, DEFAULT); + return dbenv; + } + + /* package */ + void configureEnvironment(final DbEnv dbenv, + final EnvironmentConfig oldConfig) + throws DatabaseException { + + if (errorHandler != oldConfig.errorHandler) + dbenv.set_errcall(errorHandler); + if (errorPrefix != oldConfig.errorPrefix && + errorPrefix != null && !errorPrefix.equals(oldConfig.errorPrefix)) + dbenv.set_errpfx(errorPrefix); + if (errorStream != oldConfig.errorStream) + dbenv.set_error_stream(errorStream); + + if (rpcServer != oldConfig.rpcServer || + rpcClientTimeout != oldConfig.rpcClientTimeout || + rpcServerTimeout != oldConfig.rpcServerTimeout) + dbenv.set_rpc_server(null, rpcServer, + rpcClientTimeout, rpcServerTimeout, 0); + + // We always set DB_TIME_NOTGRANTED in the Java API, because + // LockNotGrantedException extends DeadlockException, so there's no + // reason why an application would prefer one to the other. + int onFlags = DbConstants.DB_TIME_NOTGRANTED; + int offFlags = 0; + + if (cdbLockAllDatabases && !oldConfig.cdbLockAllDatabases) + onFlags |= DbConstants.DB_CDB_ALLDB; + if (!cdbLockAllDatabases && oldConfig.cdbLockAllDatabases) + offFlags |= DbConstants.DB_CDB_ALLDB; + + if (directDatabaseIO && !oldConfig.directDatabaseIO) + onFlags |= DbConstants.DB_DIRECT_DB; + if (!directDatabaseIO && oldConfig.directDatabaseIO) + offFlags |= DbConstants.DB_DIRECT_DB; + + if (directLogIO && !oldConfig.directLogIO) + onFlags |= DbConstants.DB_DIRECT_LOG; + if (!directLogIO && oldConfig.directLogIO) + offFlags |= DbConstants.DB_DIRECT_LOG; + + if (dsyncLog && !oldConfig.dsyncLog) + onFlags |= DbConstants.DB_DSYNC_LOG; + if (!dsyncLog && oldConfig.dsyncLog) + offFlags |= DbConstants.DB_DSYNC_LOG; + + if (initializeRegions && !oldConfig.initializeRegions) + onFlags |= DbConstants.DB_REGION_INIT; + if (!initializeRegions && oldConfig.initializeRegions) + offFlags |= DbConstants.DB_REGION_INIT; + + if (logAutoRemove && !oldConfig.logAutoRemove) + onFlags |= DbConstants.DB_LOG_AUTOREMOVE; + if (!logAutoRemove && oldConfig.logAutoRemove) + offFlags |= DbConstants.DB_LOG_AUTOREMOVE; + + if (logInMemory && !oldConfig.logInMemory) + onFlags |= DbConstants.DB_LOG_INMEMORY; + if (!logInMemory && oldConfig.logInMemory) + offFlags |= DbConstants.DB_LOG_INMEMORY; + + if (noLocking && !oldConfig.noLocking) + onFlags |= DbConstants.DB_NOLOCKING; + if (!noLocking && oldConfig.noLocking) + offFlags |= DbConstants.DB_NOLOCKING; + + if (noMMap && !oldConfig.noMMap) + onFlags |= DbConstants.DB_NOMMAP; + if (!noMMap && oldConfig.noMMap) + offFlags |= DbConstants.DB_NOMMAP; + + if (noPanic && !oldConfig.noPanic) + onFlags |= DbConstants.DB_NOPANIC; + if (!noPanic && oldConfig.noPanic) + offFlags |= DbConstants.DB_NOPANIC; + + if (overwrite && !oldConfig.overwrite) + onFlags |= DbConstants.DB_OVERWRITE; + if (!overwrite && oldConfig.overwrite) + offFlags |= DbConstants.DB_OVERWRITE; + + if (txnNoSync && !oldConfig.txnNoSync) + onFlags |= DbConstants.DB_TXN_NOSYNC; + if (!txnNoSync && oldConfig.txnNoSync) + offFlags |= DbConstants.DB_TXN_NOSYNC; + + if (txnNotDurable && !oldConfig.txnNotDurable) + onFlags |= DbConstants.DB_TXN_NOT_DURABLE; + if (!txnNotDurable && oldConfig.txnNotDurable) + offFlags |= DbConstants.DB_TXN_NOT_DURABLE; + + if (txnWriteNoSync && !oldConfig.txnWriteNoSync) + onFlags |= DbConstants.DB_TXN_WRITE_NOSYNC; + if (!txnWriteNoSync && oldConfig.txnWriteNoSync) + offFlags |= DbConstants.DB_TXN_WRITE_NOSYNC; + + if (yieldCPU && !oldConfig.yieldCPU) + onFlags |= DbConstants.DB_YIELDCPU; + if (!yieldCPU && oldConfig.yieldCPU) + offFlags |= DbConstants.DB_YIELDCPU; + + if (onFlags != 0) + dbenv.set_flags(onFlags, true); + if (offFlags != 0) + dbenv.set_flags(offFlags, false); + + /* Verbose flags */ + onFlags = 0; + offFlags = 0; + + if (verboseDeadlock && !oldConfig.verboseDeadlock) + onFlags |= DbConstants.DB_VERB_DEADLOCK; + if (!verboseDeadlock && oldConfig.verboseDeadlock) + offFlags |= DbConstants.DB_VERB_DEADLOCK; + + if (verboseRecovery && !oldConfig.verboseRecovery) + onFlags |= DbConstants.DB_VERB_RECOVERY; + if (!verboseRecovery && oldConfig.verboseRecovery) + offFlags |= DbConstants.DB_VERB_RECOVERY; + + if (verboseReplication && !oldConfig.verboseReplication) + onFlags |= DbConstants.DB_VERB_REPLICATION; + if (!verboseReplication && oldConfig.verboseReplication) + offFlags |= DbConstants.DB_VERB_REPLICATION; + + if (verboseWaitsFor && !oldConfig.verboseWaitsFor) + onFlags |= DbConstants.DB_VERB_WAITSFOR; + if (!verboseWaitsFor && oldConfig.verboseWaitsFor) + offFlags |= DbConstants.DB_VERB_WAITSFOR; + + if (onFlags != 0) + dbenv.set_verbose(onFlags, true); + if (offFlags != 0) + dbenv.set_verbose(offFlags, false); + + /* Callbacks */ + if (feedbackHandler != oldConfig.feedbackHandler) + dbenv.set_feedback(feedbackHandler); + if (logRecordHandler != oldConfig.logRecordHandler) + dbenv.set_app_dispatch(logRecordHandler); + if (messageHandler != oldConfig.messageHandler) + dbenv.set_msgcall(messageHandler); + if (panicHandler != oldConfig.panicHandler) + dbenv.set_paniccall(panicHandler); + if (replicationTransport != oldConfig.replicationTransport) + dbenv.set_rep_transport(envid, replicationTransport); + + /* Other settings */ + if (cacheSize != oldConfig.cacheSize || + cacheCount != oldConfig.cacheCount) + dbenv.set_cachesize(cacheSize, cacheCount); + for (final java.util.Enumeration e = dataDirs.elements(); + e.hasMoreElements();) { + final String dir = (String)e.nextElement(); + if (!oldConfig.dataDirs.contains(dir)) + dbenv.set_data_dir(dir); + } + if (!lockConflictsEqual(lockConflicts, oldConfig.lockConflicts)) + dbenv.set_lk_conflicts(lockConflicts); + if (lockDetectMode != oldConfig.lockDetectMode) + dbenv.set_lk_detect(lockDetectMode.getFlag()); + if (maxLocks != oldConfig.maxLocks) + dbenv.set_lk_max_locks(maxLocks); + if (maxLockers != oldConfig.maxLockers) + dbenv.set_lk_max_lockers(maxLockers); + if (maxLockObjects != oldConfig.maxLockObjects) + dbenv.set_lk_max_objects(maxLockObjects); + if (maxLogFileSize != oldConfig.maxLogFileSize) + dbenv.set_lg_max(maxLogFileSize); + if (logBufferSize != oldConfig.logBufferSize) + dbenv.set_lg_bsize(logBufferSize); + if (logDirectory != oldConfig.logDirectory && logDirectory != null && + !logDirectory.equals(oldConfig.logDirectory)) + dbenv.set_lg_dir(logDirectory.toString()); + if (logRegionSize != oldConfig.logRegionSize) + dbenv.set_lg_regionmax(logRegionSize); + if (messageStream != oldConfig.messageStream) + dbenv.set_message_stream(messageStream); + if (mmapSize != oldConfig.mmapSize) + dbenv.set_mp_mmapsize(mmapSize); + if (password != null) + dbenv.set_encrypt(password, DbConstants.DB_ENCRYPT_AES); + if (replicationLimit != oldConfig.replicationLimit) + dbenv.set_rep_limit(replicationLimit); + if (segmentId != oldConfig.segmentId) + dbenv.set_shm_key(segmentId); + if (testAndSetSpins != oldConfig.testAndSetSpins) + dbenv.set_tas_spins(testAndSetSpins); + if (lockTimeout != oldConfig.lockTimeout) + dbenv.set_timeout(lockTimeout, DbConstants.DB_SET_LOCK_TIMEOUT); + if (txnMaxActive != oldConfig.txnMaxActive) + dbenv.set_tx_max(txnMaxActive); + if (txnTimeout != oldConfig.txnTimeout) + dbenv.set_timeout(txnTimeout, DbConstants.DB_SET_TXN_TIMEOUT); + if (txnTimestamp != oldConfig.txnTimestamp && txnTimestamp != null && + !txnTimestamp.equals(oldConfig.txnTimestamp)) + dbenv.set_tx_timestamp(txnTimestamp); + if (temporaryDirectory != oldConfig.temporaryDirectory && + temporaryDirectory != null && + !temporaryDirectory.equals(oldConfig.temporaryDirectory)) + dbenv.set_tmp_dir(temporaryDirectory); + } + + /* package */ + EnvironmentConfig(final DbEnv dbenv) + throws DatabaseException { + + final int openFlags = dbenv.get_open_flags(); + + allowCreate = ((openFlags & DbConstants.DB_CREATE) != 0); + initializeCache = ((openFlags & DbConstants.DB_INIT_MPOOL) != 0); + initializeCDB = ((openFlags & DbConstants.DB_INIT_CDB) != 0); + initializeLocking = ((openFlags & DbConstants.DB_INIT_LOCK) != 0); + initializeLogging = ((openFlags & DbConstants.DB_INIT_LOG) != 0); + initializeReplication = ((openFlags & DbConstants.DB_INIT_REP) != 0); + joinEnvironment = ((openFlags & DbConstants.DB_JOINENV) != 0); + lockDown = ((openFlags & DbConstants.DB_LOCKDOWN) != 0); + isPrivate = ((openFlags & DbConstants.DB_PRIVATE) != 0); + readOnly = ((openFlags & DbConstants.DB_RDONLY) != 0); + runRecovery = ((openFlags & DbConstants.DB_RECOVER) != 0); + runFatalRecovery = ((openFlags & DbConstants.DB_RECOVER_FATAL) != 0); + systemMemory = ((openFlags & DbConstants.DB_SYSTEM_MEM) != 0); + threaded = ((openFlags & DbConstants.DB_THREAD) != 0); + transactional = ((openFlags & DbConstants.DB_INIT_TXN) != 0); + useEnvironment = ((openFlags & DbConstants.DB_USE_ENVIRON) != 0); + useEnvironmentRoot = + ((openFlags & DbConstants.DB_USE_ENVIRON_ROOT) != 0); + + final int envFlags = dbenv.get_flags(); + + cdbLockAllDatabases = ((envFlags & DbConstants.DB_CDB_ALLDB) != 0); + directDatabaseIO = ((envFlags & DbConstants.DB_DIRECT_DB) != 0); + directLogIO = ((envFlags & DbConstants.DB_DIRECT_LOG) != 0); + dsyncLog = ((envFlags & DbConstants.DB_DSYNC_LOG) != 0); + initializeRegions = ((envFlags & DbConstants.DB_REGION_INIT) != 0); + logAutoRemove = ((envFlags & DbConstants.DB_LOG_AUTOREMOVE) != 0); + logInMemory = ((envFlags & DbConstants.DB_LOG_INMEMORY) != 0); + noLocking = ((envFlags & DbConstants.DB_NOLOCKING) != 0); + noMMap = ((envFlags & DbConstants.DB_NOMMAP) != 0); + noPanic = ((envFlags & DbConstants.DB_NOPANIC) != 0); + overwrite = ((envFlags & DbConstants.DB_OVERWRITE) != 0); + txnNoSync = ((envFlags & DbConstants.DB_TXN_NOSYNC) != 0); + txnNotDurable = ((envFlags & DbConstants.DB_TXN_NOT_DURABLE) != 0); + txnWriteNoSync = ((envFlags & DbConstants.DB_TXN_WRITE_NOSYNC) != 0); + yieldCPU = ((envFlags & DbConstants.DB_YIELDCPU) != 0); + + /* Verbose flags */ + verboseDeadlock = dbenv.get_verbose(DbConstants.DB_VERB_DEADLOCK); + verboseRecovery = dbenv.get_verbose(DbConstants.DB_VERB_RECOVERY); + verboseReplication = dbenv.get_verbose(DbConstants.DB_VERB_REPLICATION); + verboseWaitsFor = dbenv.get_verbose(DbConstants.DB_VERB_WAITSFOR); + + /* Callbacks */ + errorHandler = dbenv.get_errcall(); + feedbackHandler = dbenv.get_feedback(); + logRecordHandler = dbenv.get_app_dispatch(); + messageHandler = dbenv.get_msgcall(); + panicHandler = dbenv.get_paniccall(); + // XXX: replicationTransport and envid aren't available? + + /* Other settings */ + cacheSize = dbenv.get_cachesize(); + cacheCount = dbenv.get_cachesize_ncache(); + + String[] dataDirArray = dbenv.get_data_dirs(); + if (dataDirArray == null) + dataDirArray = new String[0]; + dataDirs = new java.util.Vector(dataDirArray.length); + for (int i = 0; i < dataDirArray.length; i++) + dataDirs.set(i, dataDirArray[i]); + + errorPrefix = dbenv.get_errpfx(); + errorStream = dbenv.get_error_stream(); + + if (initializeLocking) { + lockConflicts = dbenv.get_lk_conflicts(); + lockDetectMode = LockDetectMode.fromFlag(dbenv.get_lk_detect()); + lockTimeout = dbenv.get_timeout(DbConstants.DB_SET_LOCK_TIMEOUT); + maxLocks = dbenv.get_lk_max_locks(); + maxLockers = dbenv.get_lk_max_lockers(); + maxLockObjects = dbenv.get_lk_max_objects(); + txnTimeout = dbenv.get_timeout(DbConstants.DB_SET_TXN_TIMEOUT); + } else { + lockConflicts = null; + lockDetectMode = LockDetectMode.NONE; + lockTimeout = 0L; + maxLocks = 0; + maxLockers = 0; + maxLockObjects = 0; + txnTimeout = 0L; + } + if (initializeLogging) { + maxLogFileSize = dbenv.get_lg_max(); + logBufferSize = dbenv.get_lg_bsize(); + logDirectory = (dbenv.get_lg_dir() == null) ? null : + new java.io.File(dbenv.get_lg_dir()); + logRegionSize = dbenv.get_lg_regionmax(); + } else { + maxLogFileSize = 0; + logBufferSize = 0; + logDirectory = null; + logRegionSize = 0; + } + messageStream = dbenv.get_message_stream(); + mmapSize = dbenv.get_mp_mmapsize(); + + // XXX: intentional information loss? + password = (dbenv.get_encrypt_flags() == 0) ? null : ""; + + if (initializeReplication) { + replicationLimit = dbenv.get_rep_limit(); + } else { + replicationLimit = 0L; + } + + // XXX: no way to find RPC server? + rpcServer = null; + rpcClientTimeout = 0; + rpcServerTimeout = 0; + + segmentId = dbenv.get_shm_key(); + testAndSetSpins = dbenv.get_tas_spins(); + if (transactional) { + txnMaxActive = dbenv.get_tx_max(); + final long txnTimestampSeconds = dbenv.get_tx_timestamp(); + if (txnTimestampSeconds != 0L) + txnTimestamp = new java.util.Date(txnTimestampSeconds * 1000); + else + txnTimestamp = null; + } else { + txnMaxActive = 0; + txnTimestamp = null; + } + temporaryDirectory = dbenv.get_tmp_dir(); + } +} diff --git a/db/java/src/com/sleepycat/db/ErrorHandler.java b/db/java/src/com/sleepycat/db/ErrorHandler.java new file mode 100644 index 000000000..ad4a9e5c4 --- /dev/null +++ b/db/java/src/com/sleepycat/db/ErrorHandler.java @@ -0,0 +1,14 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ErrorHandler.java,v 1.2 2004/04/20 20:45:11 mjc Exp $ + */ +package com.sleepycat.db; + +public interface ErrorHandler { + void error(Environment dbenv, String errpfx, String msg); +} diff --git a/db/java/src/com/sleepycat/db/FeedbackHandler.java b/db/java/src/com/sleepycat/db/FeedbackHandler.java new file mode 100644 index 000000000..96b2e6eb2 --- /dev/null +++ b/db/java/src/com/sleepycat/db/FeedbackHandler.java @@ -0,0 +1,16 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: FeedbackHandler.java,v 1.2 2004/04/21 01:09:09 mjc Exp $ + */ +package com.sleepycat.db; + +public interface FeedbackHandler { + void recoveryFeedback(Environment dbenv, int percent); + void upgradeFeedback(Database db, int percent); + void verifyFeedback(Database db, int percent); +} diff --git a/db/java/src/com/sleepycat/db/HashStats.java b/db/java/src/com/sleepycat/db/HashStats.java new file mode 100644 index 000000000..3e04452b7 --- /dev/null +++ b/db/java/src/com/sleepycat/db/HashStats.java @@ -0,0 +1,116 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class HashStats extends DatabaseStats { + // no public constructor + protected HashStats() {} + + private int hash_magic; + public int getMagic() { + return hash_magic; + } + + private int hash_version; + public int getVersion() { + return hash_version; + } + + private int hash_metaflags; + public int getMetaFlags() { + return hash_metaflags; + } + + private int hash_nkeys; + public int getNumKeys() { + return hash_nkeys; + } + + private int hash_ndata; + public int getNumData() { + return hash_ndata; + } + + private int hash_pagesize; + public int getPageSize() { + return hash_pagesize; + } + + private int hash_ffactor; + public int getFfactor() { + return hash_ffactor; + } + + private int hash_buckets; + public int getBuckets() { + return hash_buckets; + } + + private int hash_free; + public int getFree() { + return hash_free; + } + + private int hash_bfree; + public int getBFree() { + return hash_bfree; + } + + private int hash_bigpages; + public int getBigPages() { + return hash_bigpages; + } + + private int hash_big_bfree; + public int getBigBFree() { + return hash_big_bfree; + } + + private int hash_overflows; + public int getOverflows() { + return hash_overflows; + } + + private int hash_ovfl_free; + public int getOvflFree() { + return hash_ovfl_free; + } + + private int hash_dup; + public int getDup() { + return hash_dup; + } + + private int hash_dup_free; + public int getDupFree() { + return hash_dup_free; + } + + public String toString() { + return "HashStats:" + + "\n hash_magic=" + hash_magic + + "\n hash_version=" + hash_version + + "\n hash_metaflags=" + hash_metaflags + + "\n hash_nkeys=" + hash_nkeys + + "\n hash_ndata=" + hash_ndata + + "\n hash_pagesize=" + hash_pagesize + + "\n hash_ffactor=" + hash_ffactor + + "\n hash_buckets=" + hash_buckets + + "\n hash_free=" + hash_free + + "\n hash_bfree=" + hash_bfree + + "\n hash_bigpages=" + hash_bigpages + + "\n hash_big_bfree=" + hash_big_bfree + + "\n hash_overflows=" + hash_overflows + + "\n hash_ovfl_free=" + hash_ovfl_free + + "\n hash_dup=" + hash_dup + + "\n hash_dup_free=" + hash_dup_free + ; + } +} diff --git a/db/java/src/com/sleepycat/db/Hasher.java b/db/java/src/com/sleepycat/db/Hasher.java new file mode 100644 index 000000000..cc46a8549 --- /dev/null +++ b/db/java/src/com/sleepycat/db/Hasher.java @@ -0,0 +1,14 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Hasher.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +public interface Hasher { + int hash(Database db, byte[] data, int len); +} diff --git a/db/java/src/com/sleepycat/db/JoinConfig.java b/db/java/src/com/sleepycat/db/JoinConfig.java new file mode 100644 index 000000000..41689767c --- /dev/null +++ b/db/java/src/com/sleepycat/db/JoinConfig.java @@ -0,0 +1,39 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: JoinConfig.java,v 1.4 2004/09/28 19:30:37 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public class JoinConfig implements Cloneable { + public static final JoinConfig DEFAULT = new JoinConfig(); + + /* package */ + static JoinConfig checkNull(JoinConfig config) { + return (config == null) ? DEFAULT : config; + } + + private boolean noSort; + + public JoinConfig() { + } + + public void setNoSort(final boolean noSort) { + this.noSort = noSort; + } + + public boolean getNoSort() { + return noSort; + } + + /* package */ + int getFlags() { + return noSort ? DbConstants.DB_JOIN_NOSORT : 0; + } +} diff --git a/db/java/src/com/sleepycat/db/JoinCursor.java b/db/java/src/com/sleepycat/db/JoinCursor.java new file mode 100644 index 000000000..5527a6177 --- /dev/null +++ b/db/java/src/com/sleepycat/db/JoinCursor.java @@ -0,0 +1,60 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: JoinCursor.java,v 1.2 2004/04/09 15:08:38 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.Dbc; + +public class JoinCursor { + private Database database; + private Dbc dbc; + private JoinConfig config; + + JoinCursor(final Database database, + final Dbc dbc, + final JoinConfig config) { + this.database = database; + this.dbc = dbc; + this.config = config; + } + + public void close() + throws DatabaseException { + + dbc.close(); + } + + public Database getDatabase() { + return database; + } + + public JoinConfig getConfig() { + return config; + } + + public OperationStatus getNext(final DatabaseEntry key, LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, DatabaseEntry.IGNORE, + DbConstants.DB_JOIN_ITEM | + LockMode.getFlag(lockMode))); + } + + public OperationStatus getNext(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.get(key, data, LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } +} diff --git a/db/java/src/com/sleepycat/db/KeyRange.java b/db/java/src/com/sleepycat/db/KeyRange.java new file mode 100644 index 000000000..5ddbb123d --- /dev/null +++ b/db/java/src/com/sleepycat/db/KeyRange.java @@ -0,0 +1,16 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: KeyRange.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +public class KeyRange { + public double equal; + public double greater; + public double less; +} diff --git a/db/java/src/com/sleepycat/db/Lock.java b/db/java/src/com/sleepycat/db/Lock.java new file mode 100644 index 000000000..64559e359 --- /dev/null +++ b/db/java/src/com/sleepycat/db/Lock.java @@ -0,0 +1,31 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Lock.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbLock; + +public final class Lock { + private DbLock dbLock; + + private Lock(final DbLock dblock) { + this.dbLock = dbLock; + dbLock.wrapper = this; + } + + /* package */ + static Lock wrap(final DbLock dblock) { + return (dblock == null) ? null : new Lock(dblock); + } + + /* package */ + DbLock unwrap() { + return dbLock; + } +} diff --git a/db/java/src/com/sleepycat/db/LockDetectMode.java b/db/java/src/com/sleepycat/db/LockDetectMode.java new file mode 100644 index 000000000..ac163cad3 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LockDetectMode.java @@ -0,0 +1,90 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: LockDetectMode.java,v 1.2 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public final class LockDetectMode { + public static final LockDetectMode NONE = + new LockDetectMode("NONE", 0); + + public static final LockDetectMode DEFAULT = + new LockDetectMode("DEFAULT", DbConstants.DB_LOCK_DEFAULT); + + public static final LockDetectMode EXPIRE = + new LockDetectMode("EXPIRE", DbConstants.DB_LOCK_EXPIRE); + + public static final LockDetectMode MAXLOCKS = + new LockDetectMode("MAXLOCKS", DbConstants.DB_LOCK_MAXLOCKS); + + public static final LockDetectMode MAXWRITE = + new LockDetectMode("MAXWRITE", DbConstants.DB_LOCK_MAXWRITE); + + public static final LockDetectMode MINLOCKS = + new LockDetectMode("MINLOCKS", DbConstants.DB_LOCK_MINLOCKS); + + public static final LockDetectMode MINWRITE = + new LockDetectMode("MINWRITE", DbConstants.DB_LOCK_MINWRITE); + + public static final LockDetectMode OLDEST = + new LockDetectMode("OLDEST", DbConstants.DB_LOCK_OLDEST); + + public static final LockDetectMode RANDOM = + new LockDetectMode("RANDOM", DbConstants.DB_LOCK_RANDOM); + + public static final LockDetectMode YOUNGEST = + new LockDetectMode("YOUNGEST", DbConstants.DB_LOCK_YOUNGEST); + + /* package */ + static LockDetectMode fromFlag(int flag) { + switch (flag) { + case 0: + return NONE; + case DbConstants.DB_LOCK_DEFAULT: + return DEFAULT; + case DbConstants.DB_LOCK_EXPIRE: + return EXPIRE; + case DbConstants.DB_LOCK_MAXLOCKS: + return MAXLOCKS; + case DbConstants.DB_LOCK_MAXWRITE: + return MAXWRITE; + case DbConstants.DB_LOCK_MINLOCKS: + return MINLOCKS; + case DbConstants.DB_LOCK_MINWRITE: + return MINWRITE; + case DbConstants.DB_LOCK_OLDEST: + return OLDEST; + case DbConstants.DB_LOCK_RANDOM: + return RANDOM; + case DbConstants.DB_LOCK_YOUNGEST: + return YOUNGEST; + default: + throw new IllegalArgumentException( + "Unknown lock detect mode: " + flag); + } + } + + private String modeName; + private int flag; + + private LockDetectMode(final String modeName, final int flag) { + this.modeName = modeName; + this.flag = flag; + } + + /* package */ + int getFlag() { + return flag; + } + + public String toString() { + return "LockDetectMode." + modeName; + } +} diff --git a/db/java/src/com/sleepycat/db/LockMode.java b/db/java/src/com/sleepycat/db/LockMode.java new file mode 100644 index 000000000..ab03d49a5 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LockMode.java @@ -0,0 +1,40 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: LockMode.java,v 1.2 2004/04/09 15:08:38 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public final class LockMode { + private String lockModeName; + private int flag; + + private LockMode(String lockModeName, int flag) { + this.lockModeName = lockModeName; + this.flag = flag; + } + + public static final LockMode DEFAULT = + new LockMode("DEFAULT", 0); + public static final LockMode DIRTY_READ = + new LockMode("DIRTY_READ", DbConstants.DB_DIRTY_READ); + public static final LockMode DEGREE_2 = + new LockMode("DEGREE_2", DbConstants.DB_DEGREE_2); + public static final LockMode RMW = + new LockMode("RMW", DbConstants.DB_RMW); + + public String toString() { + return "LockMode." + lockModeName; + } + + /* package */ + static int getFlag(LockMode mode) { + return ((mode == null) ? DEFAULT : mode).flag; + } +} diff --git a/db/java/src/com/sleepycat/db/LockNotGrantedException.java b/db/java/src/com/sleepycat/db/LockNotGrantedException.java new file mode 100644 index 000000000..a3f4c0b29 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LockNotGrantedException.java @@ -0,0 +1,57 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: LockNotGrantedException.java,v 1.2 2004/09/28 19:30:37 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; +import com.sleepycat.db.internal.DbLock; + +public class LockNotGrantedException extends DeadlockException { + private int index; + private Lock lock; + private int mode; + private DatabaseEntry obj; + private int op; + + protected LockNotGrantedException(final String message, + final int op, + final int mode, + final DatabaseEntry obj, + final DbLock lock, + final int index, + final DbEnv dbenv) { + super(message, DbConstants.DB_LOCK_NOTGRANTED, dbenv); + this.op = op; + this.mode = mode; + this.obj = obj; + this.lock = lock.wrapper; + this.index = index; + } + + public int getIndex() { + return index; + } + + public Lock getLock() { + return lock; + } + + public int getMode() { + return mode; + } + + public DatabaseEntry getObj() { + return obj; + } + + public int getOp() { + return op; + } +} diff --git a/db/java/src/com/sleepycat/db/LockOperation.java b/db/java/src/com/sleepycat/db/LockOperation.java new file mode 100644 index 000000000..4bda73728 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LockOperation.java @@ -0,0 +1,65 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: LockOperation.java,v 1.2 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public final class LockOperation { + public static final LockOperation GET = + new LockOperation("GET", DbConstants.DB_LOCK_GET); + public static final LockOperation GET_TIMEOUT = + new LockOperation("GET_TIMEOUT", DbConstants.DB_LOCK_GET_TIMEOUT); + public static final LockOperation PUT = + new LockOperation("PUT", DbConstants.DB_LOCK_PUT); + public static final LockOperation PUT_ALL = + new LockOperation("PUT_ALL", DbConstants.DB_LOCK_PUT_ALL); + public static final LockOperation PUT_OBJ = + new LockOperation("PUT_OBJ", DbConstants.DB_LOCK_PUT_OBJ); + public static final LockOperation TIMEOUT = + new LockOperation("TIMEOUT", DbConstants.DB_LOCK_TIMEOUT); + + /* package */ + static LockOperation fromFlag(int flag) { + switch (flag) { + case DbConstants.DB_LOCK_GET: + return GET; + case DbConstants.DB_LOCK_GET_TIMEOUT: + return GET_TIMEOUT; + case DbConstants.DB_LOCK_PUT: + return PUT; + case DbConstants.DB_LOCK_PUT_ALL: + return PUT_ALL; + case DbConstants.DB_LOCK_PUT_OBJ: + return PUT_OBJ; + case DbConstants.DB_LOCK_TIMEOUT: + return TIMEOUT; + default: + throw new IllegalArgumentException( + "Unknown lock operation: " + flag); + } + } + + private final String operationName; + private final int flag; + + private LockOperation(final String operationName, final int flag) { + this.operationName = operationName; + this.flag = flag; + } + + public String toString() { + return "LockOperation." + operationName; + } + + /* package */ + int getFlag() { + return flag; + } +} diff --git a/db/java/src/com/sleepycat/db/LockRequest.java b/db/java/src/com/sleepycat/db/LockRequest.java new file mode 100644 index 000000000..d5d5a8fa3 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LockRequest.java @@ -0,0 +1,83 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: LockRequest.java,v 1.3 2004/07/26 17:01:51 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbLock; + +public class LockRequest { + private DbLock lock; + private LockRequestMode mode; + private int modeFlag; + private DatabaseEntry obj; + private int op; + private int timeout; + + public LockRequest(final LockOperation op, + final LockRequestMode mode, + final DatabaseEntry obj, + final Lock lock) { + + this(op, mode, obj, lock, 0); + } + + public LockRequest(final LockOperation op, + final LockRequestMode mode, + final DatabaseEntry obj, + final Lock lock, + final int timeout) { + + this.setOp(op); + this.setMode(mode); + this.setObj(obj); + this.setLock(lock); + this.setTimeout(timeout); + } + + public void setLock(final Lock lock) { + this.lock = lock.unwrap(); + } + + public void setMode(final LockRequestMode mode) { + this.mode = mode; + this.modeFlag = mode.getFlag(); + } + + public void setObj(final DatabaseEntry obj) { + this.obj = obj; + } + + public void setOp(final LockOperation op) { + this.op = op.getFlag(); + } + + public void setTimeout(final int timeout) { + this.timeout = timeout; + } + + public Lock getLock() { + return lock.wrapper; + } + + public LockRequestMode getMode() { + return mode; + } + + public DatabaseEntry getObj() { + return obj; + } + + public LockOperation getOp() { + return LockOperation.fromFlag(op); + } + + public int getTimeout() { + return timeout; + } +} diff --git a/db/java/src/com/sleepycat/db/LockRequestMode.java b/db/java/src/com/sleepycat/db/LockRequestMode.java new file mode 100644 index 000000000..14f49d5c6 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LockRequestMode.java @@ -0,0 +1,43 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: LockRequestMode.java,v 1.2 2004/07/26 17:01:51 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public final class LockRequestMode { + public static final LockRequestMode READ = + new LockRequestMode("READ", DbConstants.DB_LOCK_READ); + public static final LockRequestMode WRITE = + new LockRequestMode("WRITE", DbConstants.DB_LOCK_WRITE); + public static final LockRequestMode IWRITE = + new LockRequestMode("IWRITE", DbConstants.DB_LOCK_IWRITE); + public static final LockRequestMode IREAD = + new LockRequestMode("IREAD", DbConstants.DB_LOCK_IREAD); + public static final LockRequestMode IWR = + new LockRequestMode("IWR", DbConstants.DB_LOCK_IWR); + + /* package */ + private final String operationName; + private final int flag; + + public LockRequestMode(final String operationName, final int flag) { + this.operationName = operationName; + this.flag = flag; + } + + public String toString() { + return "LockRequestMode." + operationName; + } + + /* package */ + int getFlag() { + return flag; + } +} diff --git a/db/java/src/com/sleepycat/db/LockStats.java b/db/java/src/com/sleepycat/db/LockStats.java new file mode 100644 index 000000000..a16a22616 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LockStats.java @@ -0,0 +1,164 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class LockStats { + // no public constructor + protected LockStats() {} + + private int st_id; + public int getId() { + return st_id; + } + + private int st_cur_maxid; + public int getCurMaxId() { + return st_cur_maxid; + } + + private int st_maxlocks; + public int getMaxLocks() { + return st_maxlocks; + } + + private int st_maxlockers; + public int getMaxLockers() { + return st_maxlockers; + } + + private int st_maxobjects; + public int getMaxObjects() { + return st_maxobjects; + } + + private int st_nmodes; + public int getNumModes() { + return st_nmodes; + } + + private int st_nlocks; + public int getNumLocks() { + return st_nlocks; + } + + private int st_maxnlocks; + public int getMaxNlocks() { + return st_maxnlocks; + } + + private int st_nlockers; + public int getNumLockers() { + return st_nlockers; + } + + private int st_maxnlockers; + public int getMaxNlockers() { + return st_maxnlockers; + } + + private int st_nobjects; + public int getNobjects() { + return st_nobjects; + } + + private int st_maxnobjects; + public int getMaxNobjects() { + return st_maxnobjects; + } + + private int st_nconflicts; + public int getNumConflicts() { + return st_nconflicts; + } + + private int st_nrequests; + public int getNumRequests() { + return st_nrequests; + } + + private int st_nreleases; + public int getNumReleases() { + return st_nreleases; + } + + private int st_nnowaits; + public int getNumNowaits() { + return st_nnowaits; + } + + private int st_ndeadlocks; + public int getNumDeadlocks() { + return st_ndeadlocks; + } + + private int st_locktimeout; + public int getLockTimeout() { + return st_locktimeout; + } + + private int st_nlocktimeouts; + public int getNumLockTimeouts() { + return st_nlocktimeouts; + } + + private int st_txntimeout; + public int getTxnTimeout() { + return st_txntimeout; + } + + private int st_ntxntimeouts; + public int getNumTxnTimeouts() { + return st_ntxntimeouts; + } + + private int st_region_wait; + public int getRegionWait() { + return st_region_wait; + } + + private int st_region_nowait; + public int getRegionNowait() { + return st_region_nowait; + } + + private int st_regsize; + public int getRegSize() { + return st_regsize; + } + + public String toString() { + return "LockStats:" + + "\n st_id=" + st_id + + "\n st_cur_maxid=" + st_cur_maxid + + "\n st_maxlocks=" + st_maxlocks + + "\n st_maxlockers=" + st_maxlockers + + "\n st_maxobjects=" + st_maxobjects + + "\n st_nmodes=" + st_nmodes + + "\n st_nlocks=" + st_nlocks + + "\n st_maxnlocks=" + st_maxnlocks + + "\n st_nlockers=" + st_nlockers + + "\n st_maxnlockers=" + st_maxnlockers + + "\n st_nobjects=" + st_nobjects + + "\n st_maxnobjects=" + st_maxnobjects + + "\n st_nconflicts=" + st_nconflicts + + "\n st_nrequests=" + st_nrequests + + "\n st_nreleases=" + st_nreleases + + "\n st_nnowaits=" + st_nnowaits + + "\n st_ndeadlocks=" + st_ndeadlocks + + "\n st_locktimeout=" + st_locktimeout + + "\n st_nlocktimeouts=" + st_nlocktimeouts + + "\n st_txntimeout=" + st_txntimeout + + "\n st_ntxntimeouts=" + st_ntxntimeouts + + "\n st_region_wait=" + st_region_wait + + "\n st_region_nowait=" + st_region_nowait + + "\n st_regsize=" + st_regsize + ; + } +} diff --git a/db/java/src/com/sleepycat/db/LogCursor.java b/db/java/src/com/sleepycat/db/LogCursor.java new file mode 100644 index 000000000..e15576ed9 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LogCursor.java @@ -0,0 +1,80 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: LogCursor.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbLogc; + +public class LogCursor { + protected DbLogc logc; + + protected LogCursor(final DbLogc logc) { + this.logc = logc; + } + + /* package */ + static LogCursor wrap(DbLogc logc) { + return (logc == null) ? null : new LogCursor(logc); + } + + public synchronized void close() + throws DatabaseException { + + logc.close(0); + } + + public OperationStatus getCurrent(final LogSequenceNumber lsn, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + logc.get(lsn, data, DbConstants.DB_CURRENT)); + } + + public OperationStatus getNext(final LogSequenceNumber lsn, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + logc.get(lsn, data, DbConstants.DB_NEXT)); + } + + public OperationStatus getFirst(final LogSequenceNumber lsn, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + logc.get(lsn, data, DbConstants.DB_FIRST)); + } + + public OperationStatus getLast(final LogSequenceNumber lsn, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + logc.get(lsn, data, DbConstants.DB_LAST)); + } + + public OperationStatus getPrev(final LogSequenceNumber lsn, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + logc.get(lsn, data, DbConstants.DB_PREV)); + } + + public OperationStatus set(final LogSequenceNumber lsn, + final DatabaseEntry data) + throws DatabaseException { + + return OperationStatus.fromInt( + logc.get(lsn, data, DbConstants.DB_SET)); + } +} diff --git a/db/java/src/com/sleepycat/db/LogRecordHandler.java b/db/java/src/com/sleepycat/db/LogRecordHandler.java new file mode 100644 index 000000000..db4d6368c --- /dev/null +++ b/db/java/src/com/sleepycat/db/LogRecordHandler.java @@ -0,0 +1,17 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: LogRecordHandler.java,v 1.2 2004/04/21 01:09:09 mjc Exp $ + */ +package com.sleepycat.db; + +public interface LogRecordHandler { + int handleLogRecord(Environment dbenv, + DatabaseEntry logRecord, + LogSequenceNumber lsn, + RecoveryOperation operation); +} diff --git a/db/java/src/com/sleepycat/db/LogSequenceNumber.java b/db/java/src/com/sleepycat/db/LogSequenceNumber.java new file mode 100644 index 000000000..7d1d09313 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LogSequenceNumber.java @@ -0,0 +1,38 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: LogSequenceNumber.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbEnv; + +public class LogSequenceNumber { + private int file; + private int offset; + + public LogSequenceNumber(final int file, final int offset) { + this.file = file; + this.offset = offset; + } + + public LogSequenceNumber() { + this(0, 0); + } + + public int getFile() { + return file; + } + + public int getOffset() { + return offset; + } + + public static int compare(LogSequenceNumber lsn1, LogSequenceNumber lsn2) { + return DbEnv.log_compare(lsn1, lsn2); + } +} diff --git a/db/java/src/com/sleepycat/db/LogStats.java b/db/java/src/com/sleepycat/db/LogStats.java new file mode 100644 index 000000000..aacf1e8f0 --- /dev/null +++ b/db/java/src/com/sleepycat/db/LogStats.java @@ -0,0 +1,146 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class LogStats { + // no public constructor + protected LogStats() {} + + private int st_magic; + public int getMagic() { + return st_magic; + } + + private int st_version; + public int getVersion() { + return st_version; + } + + private int st_mode; + public int getMode() { + return st_mode; + } + + private int st_lg_bsize; + public int getLgBSize() { + return st_lg_bsize; + } + + private int st_lg_size; + public int getLgSize() { + return st_lg_size; + } + + private int st_w_bytes; + public int getWBytes() { + return st_w_bytes; + } + + private int st_w_mbytes; + public int getWMbytes() { + return st_w_mbytes; + } + + private int st_wc_bytes; + public int getWcBytes() { + return st_wc_bytes; + } + + private int st_wc_mbytes; + public int getWcMbytes() { + return st_wc_mbytes; + } + + private int st_wcount; + public int getWCount() { + return st_wcount; + } + + private int st_wcount_fill; + public int getWCountFill() { + return st_wcount_fill; + } + + private int st_scount; + public int getSCount() { + return st_scount; + } + + private int st_region_wait; + public int getRegionWait() { + return st_region_wait; + } + + private int st_region_nowait; + public int getRegionNowait() { + return st_region_nowait; + } + + private int st_cur_file; + public int getCurFile() { + return st_cur_file; + } + + private int st_cur_offset; + public int getCurOffset() { + return st_cur_offset; + } + + private int st_disk_file; + public int getDiskFile() { + return st_disk_file; + } + + private int st_disk_offset; + public int getDiskOffset() { + return st_disk_offset; + } + + private int st_regsize; + public int getRegSize() { + return st_regsize; + } + + private int st_maxcommitperflush; + public int getMaxCommitperflush() { + return st_maxcommitperflush; + } + + private int st_mincommitperflush; + public int getMinCommitperflush() { + return st_mincommitperflush; + } + + public String toString() { + return "LogStats:" + + "\n st_magic=" + st_magic + + "\n st_version=" + st_version + + "\n st_mode=" + st_mode + + "\n st_lg_bsize=" + st_lg_bsize + + "\n st_lg_size=" + st_lg_size + + "\n st_w_bytes=" + st_w_bytes + + "\n st_w_mbytes=" + st_w_mbytes + + "\n st_wc_bytes=" + st_wc_bytes + + "\n st_wc_mbytes=" + st_wc_mbytes + + "\n st_wcount=" + st_wcount + + "\n st_wcount_fill=" + st_wcount_fill + + "\n st_scount=" + st_scount + + "\n st_region_wait=" + st_region_wait + + "\n st_region_nowait=" + st_region_nowait + + "\n st_cur_file=" + st_cur_file + + "\n st_cur_offset=" + st_cur_offset + + "\n st_disk_file=" + st_disk_file + + "\n st_disk_offset=" + st_disk_offset + + "\n st_regsize=" + st_regsize + + "\n st_maxcommitperflush=" + st_maxcommitperflush + + "\n st_mincommitperflush=" + st_mincommitperflush + ; + } +} diff --git a/db/java/src/com/sleepycat/db/MemoryException.java b/db/java/src/com/sleepycat/db/MemoryException.java new file mode 100644 index 000000000..7c612494f --- /dev/null +++ b/db/java/src/com/sleepycat/db/MemoryException.java @@ -0,0 +1,41 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1999-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MemoryException.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbEnv; + +public class MemoryException extends DatabaseException { + private DatabaseEntry dbt = null; + private String message; + + protected MemoryException(final String s, + final DatabaseEntry dbt, + final int errno, + final DbEnv dbenv) { + super(s, errno, dbenv); + this.message = s; + this.dbt = dbt; + } + + public DatabaseEntry getDatabaseEntry() { + return dbt; + } + + public String toString() { + return message; + } + + void updateDatabaseEntry(final DatabaseEntry newEntry) { + if (this.dbt == null) { + this.message = "DatabaseEntry not large enough for available data"; + this.dbt = newEntry; + } + } +} diff --git a/db/java/src/com/sleepycat/db/MessageHandler.java b/db/java/src/com/sleepycat/db/MessageHandler.java new file mode 100644 index 000000000..a28e35820 --- /dev/null +++ b/db/java/src/com/sleepycat/db/MessageHandler.java @@ -0,0 +1,14 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MessageHandler.java,v 1.2 2004/04/20 20:45:11 mjc Exp $ + */ +package com.sleepycat.db; + +public interface MessageHandler { + void message(Environment dbenv, String message); +} diff --git a/db/java/src/com/sleepycat/db/MultipleDataEntry.java b/db/java/src/com/sleepycat/db/MultipleDataEntry.java new file mode 100644 index 000000000..17c2af473 --- /dev/null +++ b/db/java/src/com/sleepycat/db/MultipleDataEntry.java @@ -0,0 +1,57 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: MultipleDataEntry.java,v 1.2 2004/04/09 15:08:38 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbUtil; + +public class MultipleDataEntry extends MultipleEntry { + public MultipleDataEntry() { + super(null, 0, 0); + } + + public MultipleDataEntry(final byte[] data) { + super(data, 0, (data == null) ? 0 : data.length); + } + + public MultipleDataEntry(final byte[] data, + final int offset, + final int size) { + super(data, offset, size); + } + + /* package */ + int getMultiFlag() { + return DbConstants.DB_MULTIPLE; + } + + public boolean next(final DatabaseEntry data) { + if (pos == 0) + pos = ulen - INT32SZ; + + final int dataoff = DbUtil.array2int(this.data, pos); + + // crack out the data offset and length. + if (dataoff < 0) { + return (false); + } + + pos -= INT32SZ; + final int datasz = DbUtil.array2int(this.data, pos); + + pos -= INT32SZ; + + data.setData(this.data); + data.setSize(datasz); + data.setOffset(dataoff); + + return (true); + } +} diff --git a/db/java/src/com/sleepycat/db/MultipleEntry.java b/db/java/src/com/sleepycat/db/MultipleEntry.java new file mode 100644 index 000000000..f3025a5e5 --- /dev/null +++ b/db/java/src/com/sleepycat/db/MultipleEntry.java @@ -0,0 +1,28 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: MultipleEntry.java,v 1.4 2004/09/28 19:30:37 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public abstract class MultipleEntry extends DatabaseEntry { + protected int pos; + + protected MultipleEntry(final byte[] data, final int offset, final int size) { + super(data, offset, size); + setUserBuffer(data.length - offset, true); + this.flags |= DbConstants.DB_DBT_USERMEM; + } + + public void setUserBuffer(final int length, final boolean usermem) { + if (!usermem) + throw new IllegalArgumentException("User buffer required"); + super.setUserBuffer(length, usermem); + } +} diff --git a/db/java/src/com/sleepycat/db/MultipleKeyDataEntry.java b/db/java/src/com/sleepycat/db/MultipleKeyDataEntry.java new file mode 100644 index 000000000..17234b640 --- /dev/null +++ b/db/java/src/com/sleepycat/db/MultipleKeyDataEntry.java @@ -0,0 +1,63 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: MultipleKeyDataEntry.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbUtil; + +public class MultipleKeyDataEntry extends MultipleEntry { + public MultipleKeyDataEntry() { + super(null, 0, 0); + } + + public MultipleKeyDataEntry(final byte[] data) { + super(data, 0, (data == null) ? 0 : data.length); + } + + public MultipleKeyDataEntry(final byte[] data, + final int offset, + final int size) { + super(data, offset, size); + } + + /* package */ + int getMultiFlag() { + return DbConstants.DB_MULTIPLE_KEY; + } + + public boolean next(final DatabaseEntry key, final DatabaseEntry data) { + if (pos == 0) + pos = ulen - INT32SZ; + + final int keyoff = DbUtil.array2int(this.data, pos); + + // crack out the key and data offsets and lengths. + if (keyoff < 0) + return false; + + pos -= INT32SZ; + final int keysz = DbUtil.array2int(this.data, pos); + pos -= INT32SZ; + final int dataoff = DbUtil.array2int(this.data, pos); + pos -= INT32SZ; + final int datasz = DbUtil.array2int(this.data, pos); + pos -= INT32SZ; + + key.setData(this.data); + key.setOffset(keyoff); + key.setSize(keysz); + + data.setData(this.data); + data.setOffset(dataoff); + data.setSize(datasz); + + return true; + } +} diff --git a/db/java/src/com/sleepycat/db/MultipleRecnoDataEntry.java b/db/java/src/com/sleepycat/db/MultipleRecnoDataEntry.java new file mode 100644 index 000000000..016c671d0 --- /dev/null +++ b/db/java/src/com/sleepycat/db/MultipleRecnoDataEntry.java @@ -0,0 +1,61 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: MultipleRecnoDataEntry.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbUtil; + +public class MultipleRecnoDataEntry extends MultipleEntry { + public MultipleRecnoDataEntry() { + super(null, 0, 0); + } + + public MultipleRecnoDataEntry(final byte[] data) { + super(data, 0, (data == null) ? 0 : data.length); + } + + public MultipleRecnoDataEntry(final byte[] data, + final int offset, + final int size) { + super(data, offset, size); + } + + /* package */ + int getMultiFlag() { + return DbConstants.DB_MULTIPLE_KEY; + } + + public boolean next(final DatabaseEntry recno, final DatabaseEntry data) { + if (pos == 0) + pos = ulen - INT32SZ; + + final int keyoff = DbUtil.array2int(this.data, pos); + + // crack out the key offset and the data offset and length. + if (keyoff < 0) + return false; + + pos -= INT32SZ; + final int dataoff = DbUtil.array2int(this.data, pos); + pos -= INT32SZ; + final int datasz = DbUtil.array2int(this.data, pos); + pos -= INT32SZ; + + recno.setData(this.data); + recno.setOffset(keyoff); + recno.setSize(INT32SZ); + + data.setData(this.data); + data.setOffset(dataoff); + data.setSize(datasz); + + return true; + } +} diff --git a/db/java/src/com/sleepycat/db/OperationStatus.java b/db/java/src/com/sleepycat/db/OperationStatus.java new file mode 100644 index 000000000..5739bc6e2 --- /dev/null +++ b/db/java/src/com/sleepycat/db/OperationStatus.java @@ -0,0 +1,54 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: OperationStatus.java,v 1.2 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; + +public final class OperationStatus { + public static final OperationStatus SUCCESS = + new OperationStatus("SUCCESS", 0); + public static final OperationStatus KEYEXIST = + new OperationStatus("KEYEXIST", DbConstants.DB_KEYEXIST); + public static final OperationStatus KEYEMPTY = + new OperationStatus("KEYEMPTY", DbConstants.DB_KEYEMPTY); + public static final OperationStatus NOTFOUND = + new OperationStatus("NOTFOUND", DbConstants.DB_NOTFOUND); + + /* package */ + static OperationStatus fromInt(final int errCode) { + switch(errCode) { + case 0: + return SUCCESS; + case DbConstants.DB_KEYEXIST: + return KEYEXIST; + case DbConstants.DB_KEYEMPTY: + return KEYEMPTY; + case DbConstants.DB_NOTFOUND: + return NOTFOUND; + default: + throw new IllegalArgumentException( + "Unknown error code: " + DbEnv.strerror(errCode)); + } + } + + /* For toString */ + private String statusName; + private int errCode; + + private OperationStatus(final String statusName, int errCode) { + this.statusName = statusName; + this.errCode = errCode; + } + + public String toString() { + return "OperationStatus." + statusName; + } +} diff --git a/db/java/src/com/sleepycat/db/PanicHandler.java b/db/java/src/com/sleepycat/db/PanicHandler.java new file mode 100644 index 000000000..7c9b838a8 --- /dev/null +++ b/db/java/src/com/sleepycat/db/PanicHandler.java @@ -0,0 +1,14 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PanicHandler.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +public interface PanicHandler { + void panic(Environment dbenv, DatabaseException e); +} diff --git a/db/java/src/com/sleepycat/db/PreparedTransaction.java b/db/java/src/com/sleepycat/db/PreparedTransaction.java new file mode 100644 index 000000000..36d103134 --- /dev/null +++ b/db/java/src/com/sleepycat/db/PreparedTransaction.java @@ -0,0 +1,30 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1999-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: PreparedTransaction.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbTxn; + +public class PreparedTransaction { + private byte[] gid; + private Transaction txn; + + PreparedTransaction(final DbTxn txn, final byte[] gid) { + this.txn = new Transaction(txn); + this.gid = gid; + } + + public byte[] getGID() { + return gid; + } + + public Transaction getTransaction() { + return txn; + } +} diff --git a/db/java/src/com/sleepycat/db/QueueStats.java b/db/java/src/com/sleepycat/db/QueueStats.java new file mode 100644 index 000000000..10ad3f768 --- /dev/null +++ b/db/java/src/com/sleepycat/db/QueueStats.java @@ -0,0 +1,98 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class QueueStats extends DatabaseStats { + // no public constructor + protected QueueStats() {} + + private int qs_magic; + public int getMagic() { + return qs_magic; + } + + private int qs_version; + public int getVersion() { + return qs_version; + } + + private int qs_metaflags; + public int getMetaFlags() { + return qs_metaflags; + } + + private int qs_nkeys; + public int getNumKeys() { + return qs_nkeys; + } + + private int qs_ndata; + public int getNumData() { + return qs_ndata; + } + + private int qs_pagesize; + public int getPageSize() { + return qs_pagesize; + } + + private int qs_extentsize; + public int getExtentSize() { + return qs_extentsize; + } + + private int qs_pages; + public int getPages() { + return qs_pages; + } + + private int qs_re_len; + public int getReLen() { + return qs_re_len; + } + + private int qs_re_pad; + public int getRePad() { + return qs_re_pad; + } + + private int qs_pgfree; + public int getPagesFree() { + return qs_pgfree; + } + + private int qs_first_recno; + public int getFirstRecno() { + return qs_first_recno; + } + + private int qs_cur_recno; + public int getCurRecno() { + return qs_cur_recno; + } + + public String toString() { + return "QueueStats:" + + "\n qs_magic=" + qs_magic + + "\n qs_version=" + qs_version + + "\n qs_metaflags=" + qs_metaflags + + "\n qs_nkeys=" + qs_nkeys + + "\n qs_ndata=" + qs_ndata + + "\n qs_pagesize=" + qs_pagesize + + "\n qs_extentsize=" + qs_extentsize + + "\n qs_pages=" + qs_pages + + "\n qs_re_len=" + qs_re_len + + "\n qs_re_pad=" + qs_re_pad + + "\n qs_pgfree=" + qs_pgfree + + "\n qs_first_recno=" + qs_first_recno + + "\n qs_cur_recno=" + qs_cur_recno + ; + } +} diff --git a/db/java/src/com/sleepycat/db/RecordNumberAppender.java b/db/java/src/com/sleepycat/db/RecordNumberAppender.java new file mode 100644 index 000000000..4162a1ead --- /dev/null +++ b/db/java/src/com/sleepycat/db/RecordNumberAppender.java @@ -0,0 +1,15 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: RecordNumberAppender.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +public interface RecordNumberAppender { + void appendRecordNumber(Database db, DatabaseEntry data, int recno) + throws DatabaseException; +} diff --git a/db/java/src/com/sleepycat/db/RecoveryOperation.java b/db/java/src/com/sleepycat/db/RecoveryOperation.java new file mode 100644 index 000000000..938eacd2e --- /dev/null +++ b/db/java/src/com/sleepycat/db/RecoveryOperation.java @@ -0,0 +1,56 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: RecoveryOperation.java,v 1.1 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public final class RecoveryOperation { + public static final RecoveryOperation BACKWARD_ROLL = + new RecoveryOperation("BACKWARD_ROLL", DbConstants.DB_TXN_BACKWARD_ROLL); + public static final RecoveryOperation FORWARD_ROLL = + new RecoveryOperation("FORWARD_ROLL", DbConstants.DB_TXN_FORWARD_ROLL); + public static final RecoveryOperation ABORT = + new RecoveryOperation("ABORT", DbConstants.DB_TXN_ABORT); + public static final RecoveryOperation APPLY = + new RecoveryOperation("APPLY", DbConstants.DB_TXN_APPLY); + public static final RecoveryOperation PRINT = + new RecoveryOperation("PRINT", DbConstants.DB_TXN_PRINT); + + private String operationName; + private int flag; + + private RecoveryOperation(String operationName, int flag) { + this.operationName = operationName; + this.flag = flag; + } + + public String toString() { + return "RecoveryOperation." + operationName; + } + + /* This is public only so it can be called from internal/DbEnv.java. */ + public static RecoveryOperation fromFlag(int flag) { + switch (flag) { + case DbConstants.DB_TXN_BACKWARD_ROLL: + return BACKWARD_ROLL; + case DbConstants.DB_TXN_FORWARD_ROLL: + return FORWARD_ROLL; + case DbConstants.DB_TXN_ABORT: + return ABORT; + case DbConstants.DB_TXN_APPLY: + return APPLY; + case DbConstants.DB_TXN_PRINT: + return PRINT; + default: + throw new IllegalArgumentException( + "Unknown recover operation: " + flag); + } + } +} diff --git a/db/java/src/com/sleepycat/db/ReplicationHandleDeadException.java b/db/java/src/com/sleepycat/db/ReplicationHandleDeadException.java new file mode 100644 index 000000000..cfcf92ab6 --- /dev/null +++ b/db/java/src/com/sleepycat/db/ReplicationHandleDeadException.java @@ -0,0 +1,20 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ReplicationHandleDeadException.java,v 1.1 2004/09/23 17:56:39 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbEnv; + +public class ReplicationHandleDeadException extends DatabaseException { + protected ReplicationHandleDeadException(final String s, + final int errno, + final DbEnv dbenv) { + super(s, errno, dbenv); + } +} diff --git a/db/java/src/com/sleepycat/db/ReplicationStats.java b/db/java/src/com/sleepycat/db/ReplicationStats.java new file mode 100644 index 000000000..70c2bf032 --- /dev/null +++ b/db/java/src/com/sleepycat/db/ReplicationStats.java @@ -0,0 +1,278 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class ReplicationStats { + // no public constructor + protected ReplicationStats() {} + + private int st_status; + public int getStatus() { + return st_status; + } + + private LogSequenceNumber st_next_lsn; + public LogSequenceNumber getNextLsn() { + return st_next_lsn; + } + + private LogSequenceNumber st_waiting_lsn; + public LogSequenceNumber getWaitingLsn() { + return st_waiting_lsn; + } + + private int st_next_pg; + public int getNextPages() { + return st_next_pg; + } + + private int st_waiting_pg; + public int getWaitingPages() { + return st_waiting_pg; + } + + private int st_dupmasters; + public int getDupmasters() { + return st_dupmasters; + } + + private int st_env_id; + public int getEnvId() { + return st_env_id; + } + + private int st_env_priority; + public int getEnvPriority() { + return st_env_priority; + } + + private int st_gen; + public int getGen() { + return st_gen; + } + + private int st_egen; + public int getEgen() { + return st_egen; + } + + private int st_log_duplicated; + public int getLogDuplicated() { + return st_log_duplicated; + } + + private int st_log_queued; + public int getLogQueued() { + return st_log_queued; + } + + private int st_log_queued_max; + public int getLogQueuedMax() { + return st_log_queued_max; + } + + private int st_log_queued_total; + public int getLogQueuedTotal() { + return st_log_queued_total; + } + + private int st_log_records; + public int getLogRecords() { + return st_log_records; + } + + private int st_log_requested; + public int getLogRequested() { + return st_log_requested; + } + + private int st_master; + public int getMaster() { + return st_master; + } + + private int st_master_changes; + public int getMasterChanges() { + return st_master_changes; + } + + private int st_msgs_badgen; + public int getMsgsBadgen() { + return st_msgs_badgen; + } + + private int st_msgs_processed; + public int getMsgsProcessed() { + return st_msgs_processed; + } + + private int st_msgs_recover; + public int getMsgsRecover() { + return st_msgs_recover; + } + + private int st_msgs_send_failures; + public int getMsgsSendFailures() { + return st_msgs_send_failures; + } + + private int st_msgs_sent; + public int getMsgsSent() { + return st_msgs_sent; + } + + private int st_newsites; + public int getNewsites() { + return st_newsites; + } + + private int st_nsites; + public int getNumSites() { + return st_nsites; + } + + private int st_nthrottles; + public int getNumThrottles() { + return st_nthrottles; + } + + private int st_outdated; + public int getOutdated() { + return st_outdated; + } + + private int st_pg_duplicated; + public int getPagesDuplicated() { + return st_pg_duplicated; + } + + private int st_pg_records; + public int getPagesRecords() { + return st_pg_records; + } + + private int st_pg_requested; + public int getPagesRequested() { + return st_pg_requested; + } + + private int st_startup_complete; + public int getStartupComplete() { + return st_startup_complete; + } + + private int st_txns_applied; + public int getTxnsApplied() { + return st_txns_applied; + } + + private int st_elections; + public int getElections() { + return st_elections; + } + + private int st_elections_won; + public int getElectionsWon() { + return st_elections_won; + } + + private int st_election_cur_winner; + public int getElectionCurWinner() { + return st_election_cur_winner; + } + + private int st_election_gen; + public int getElectionGen() { + return st_election_gen; + } + + private LogSequenceNumber st_election_lsn; + public LogSequenceNumber getElectionLsn() { + return st_election_lsn; + } + + private int st_election_nsites; + public int getElectionNumSites() { + return st_election_nsites; + } + + private int st_election_nvotes; + public int getElectionNumVotes() { + return st_election_nvotes; + } + + private int st_election_priority; + public int getElectionPriority() { + return st_election_priority; + } + + private int st_election_status; + public int getElectionStatus() { + return st_election_status; + } + + private int st_election_tiebreaker; + public int getElectionTiebreaker() { + return st_election_tiebreaker; + } + + private int st_election_votes; + public int getElectionVotes() { + return st_election_votes; + } + + public String toString() { + return "ReplicationStats:" + + "\n st_status=" + st_status + + "\n st_next_lsn=" + st_next_lsn + + "\n st_waiting_lsn=" + st_waiting_lsn + + "\n st_next_pg=" + st_next_pg + + "\n st_waiting_pg=" + st_waiting_pg + + "\n st_dupmasters=" + st_dupmasters + + "\n st_env_id=" + st_env_id + + "\n st_env_priority=" + st_env_priority + + "\n st_gen=" + st_gen + + "\n st_egen=" + st_egen + + "\n st_log_duplicated=" + st_log_duplicated + + "\n st_log_queued=" + st_log_queued + + "\n st_log_queued_max=" + st_log_queued_max + + "\n st_log_queued_total=" + st_log_queued_total + + "\n st_log_records=" + st_log_records + + "\n st_log_requested=" + st_log_requested + + "\n st_master=" + st_master + + "\n st_master_changes=" + st_master_changes + + "\n st_msgs_badgen=" + st_msgs_badgen + + "\n st_msgs_processed=" + st_msgs_processed + + "\n st_msgs_recover=" + st_msgs_recover + + "\n st_msgs_send_failures=" + st_msgs_send_failures + + "\n st_msgs_sent=" + st_msgs_sent + + "\n st_newsites=" + st_newsites + + "\n st_nsites=" + st_nsites + + "\n st_nthrottles=" + st_nthrottles + + "\n st_outdated=" + st_outdated + + "\n st_pg_duplicated=" + st_pg_duplicated + + "\n st_pg_records=" + st_pg_records + + "\n st_pg_requested=" + st_pg_requested + + "\n st_startup_complete=" + st_startup_complete + + "\n st_txns_applied=" + st_txns_applied + + "\n st_elections=" + st_elections + + "\n st_elections_won=" + st_elections_won + + "\n st_election_cur_winner=" + st_election_cur_winner + + "\n st_election_gen=" + st_election_gen + + "\n st_election_lsn=" + st_election_lsn + + "\n st_election_nsites=" + st_election_nsites + + "\n st_election_nvotes=" + st_election_nvotes + + "\n st_election_priority=" + st_election_priority + + "\n st_election_status=" + st_election_status + + "\n st_election_tiebreaker=" + st_election_tiebreaker + + "\n st_election_votes=" + st_election_votes + ; + } +} diff --git a/db/java/src/com/sleepycat/db/ReplicationStatus.java b/db/java/src/com/sleepycat/db/ReplicationStatus.java new file mode 100644 index 000000000..64e4fd2c1 --- /dev/null +++ b/db/java/src/com/sleepycat/db/ReplicationStatus.java @@ -0,0 +1,121 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: ReplicationStatus.java,v 1.5 2004/08/17 20:04:42 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; + +public final class ReplicationStatus { + static final ReplicationStatus SUCCESS = + new ReplicationStatus("SUCCESS", 0); + + private int errCode; + private DatabaseEntry cdata; + private int envid; + private LogSequenceNumber lsn; + + /* For toString */ + private String statusName; + + private ReplicationStatus(final String statusName, + final int errCode, + final DatabaseEntry cdata, + final int envid, + final LogSequenceNumber lsn) { + this.statusName = statusName; + this.errCode = errCode; + this.cdata = cdata; + this.envid = envid; + this.lsn = lsn; + } + + private ReplicationStatus(final String statusName, final int errCode) { + this(statusName, errCode, null, 0, null); + } + + public boolean isSuccess() { + return errCode == 0; + } + + public boolean isDupMaster() { + return errCode == DbConstants.DB_REP_DUPMASTER; + } + + public boolean isHoldElection() { + return errCode == DbConstants.DB_REP_HOLDELECTION; + } + + public boolean isPermanent() { + return errCode == DbConstants.DB_REP_ISPERM; + } + + public boolean isNewMaster() { + return errCode == DbConstants.DB_REP_NEWMASTER; + } + + public boolean isNewSite() { + return errCode == DbConstants.DB_REP_NEWSITE; + } + + public boolean isNotPermanent() { + return errCode == DbConstants.DB_REP_NOTPERM; + } + + public boolean isStartupDone() { + return errCode == DbConstants.DB_REP_STARTUPDONE; + } + + public DatabaseEntry getCData() { + return cdata; + } + + public int getEnvID() { + return envid; + } + + public LogSequenceNumber getLSN() { + return lsn; + } + + public String toString() { + return "ReplicationStatus." + statusName; + } + + /* package */ + static ReplicationStatus getStatus(final int errCode, + final DatabaseEntry cdata, + final int envid, + final LogSequenceNumber lsn) { + switch(errCode) { + case 0: + return SUCCESS; + case DbConstants.DB_REP_DUPMASTER: + return DUPMASTER; + case DbConstants.DB_REP_HOLDELECTION: + return HOLDELECTION; + case DbConstants.DB_REP_ISPERM: + return new ReplicationStatus("ISPERM", errCode, cdata, envid, lsn); + case DbConstants.DB_REP_NEWMASTER: + return new ReplicationStatus("NEWMASTER", errCode, cdata, envid, lsn); + case DbConstants.DB_REP_NEWSITE: + return new ReplicationStatus("NEWSITE", errCode, cdata, envid, lsn); + case DbConstants.DB_REP_NOTPERM: + return new ReplicationStatus("NOTPERM", errCode, cdata, envid, lsn); + default: + throw new IllegalArgumentException( + "Unknown error code: " + DbEnv.strerror(errCode)); + } + } + + private static final ReplicationStatus DUPMASTER = + new ReplicationStatus("DUPMASTER", DbConstants.DB_REP_DUPMASTER); + private static final ReplicationStatus HOLDELECTION = + new ReplicationStatus("HOLDELECTION", DbConstants.DB_REP_HOLDELECTION); +} diff --git a/db/java/src/com/sleepycat/db/ReplicationTransport.java b/db/java/src/com/sleepycat/db/ReplicationTransport.java new file mode 100644 index 000000000..72b28a73c --- /dev/null +++ b/db/java/src/com/sleepycat/db/ReplicationTransport.java @@ -0,0 +1,26 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ReplicationTransport.java,v 1.3 2004/07/06 15:06:37 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public interface ReplicationTransport { + int send(Environment dbenv, + DatabaseEntry control, + DatabaseEntry rec, + LogSequenceNumber lsn, + int envid, + boolean noBuffer, + boolean permanent) + throws DatabaseException; + + int EID_BROADCAST = DbConstants.DB_EID_BROADCAST; + int EID_INVALID = DbConstants.DB_EID_INVALID; +} diff --git a/db/java/src/com/sleepycat/db/RunRecoveryException.java b/db/java/src/com/sleepycat/db/RunRecoveryException.java new file mode 100644 index 000000000..640d81af8 --- /dev/null +++ b/db/java/src/com/sleepycat/db/RunRecoveryException.java @@ -0,0 +1,20 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: RunRecoveryException.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbEnv; + +public class RunRecoveryException extends DatabaseException { + protected RunRecoveryException(final String s, + final int errno, + final DbEnv dbenv) { + super(s, errno, dbenv); + } +} diff --git a/db/java/src/com/sleepycat/db/SecondaryConfig.java b/db/java/src/com/sleepycat/db/SecondaryConfig.java new file mode 100644 index 000000000..e275e19e5 --- /dev/null +++ b/db/java/src/com/sleepycat/db/SecondaryConfig.java @@ -0,0 +1,91 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: SecondaryConfig.java,v 1.3 2004/08/06 21:56:40 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.Db; +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; +import com.sleepycat.db.internal.DbTxn; + +public class SecondaryConfig extends DatabaseConfig implements Cloneable { + /* + * For internal use, to allow null as a valid value for + * the config parameter. + */ + public static final SecondaryConfig DEFAULT = new SecondaryConfig(); + + /* package */ + static SecondaryConfig checkNull(SecondaryConfig config) { + return (config == null) ? DEFAULT : config; + } + + private boolean allowPopulate; + private SecondaryKeyCreator keyCreator; + + public SecondaryConfig() { + } + + public void setKeyCreator(final SecondaryKeyCreator keyCreator) { + this.keyCreator = keyCreator; + } + + public SecondaryKeyCreator getKeyCreator() { + return keyCreator; + } + + public void setAllowPopulate(final boolean allowPopulate) { + this.allowPopulate = allowPopulate; + } + + public boolean getAllowPopulate() { + return allowPopulate; + } + + /* package */ + Db openSecondaryDatabase(final DbEnv dbenv, + final DbTxn txn, + final String fileName, + final String databaseName, + final Db primary) + throws DatabaseException, java.io.FileNotFoundException { + + int associateFlags = 0; + associateFlags |= allowPopulate ? DbConstants.DB_CREATE : 0; + if (getTransactional() && txn == null) + associateFlags |= DbConstants.DB_AUTO_COMMIT; + + final Db db = super.openDatabase(dbenv, txn, fileName, databaseName); + boolean succeeded = false; + try { + primary.associate(txn, db, keyCreator, associateFlags); + succeeded = true; + return db; + } finally { + if (!succeeded) + try { + db.close(0); + } catch (Throwable t) { + // Ignore it -- there is already an exception in flight. + } + } + } + + /* package */ + SecondaryConfig(final Db db) + throws DatabaseException { + + super(db); + + // XXX: There is no way to find out whether allowPopulate was set. + allowPopulate = false; + keyCreator = db.get_seckey_create(); + } +} + diff --git a/db/java/src/com/sleepycat/db/SecondaryCursor.java b/db/java/src/com/sleepycat/db/SecondaryCursor.java new file mode 100644 index 000000000..886ea5f92 --- /dev/null +++ b/db/java/src/com/sleepycat/db/SecondaryCursor.java @@ -0,0 +1,250 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: SecondaryCursor.java,v 1.3 2004/04/21 01:09:09 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.Dbc; + +public class SecondaryCursor extends Cursor { + /* package */ + SecondaryCursor(final SecondaryDatabase database, + final Dbc dbc, + final CursorConfig config) + throws DatabaseException { + + super(database, dbc, config); + } + + public SecondaryDatabase getSecondaryDatabase() { + return (SecondaryDatabase)super.getDatabase(); + } + + public Cursor dup(final boolean samePosition) + throws DatabaseException { + + return dupSecondary(samePosition); + } + + public SecondaryCursor dupSecondary(final boolean samePosition) + throws DatabaseException { + + return new SecondaryCursor(getSecondaryDatabase(), + dbc.dup(samePosition ? DbConstants.DB_POSITION : 0), config); + } + + public OperationStatus getCurrent(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_CURRENT | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getFirst(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_FIRST | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getLast(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_LAST | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getNext(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_NEXT | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getNextDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_NEXT_DUP | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getNextNoDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_NEXT_NODUP | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getPrev(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_PREV | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getPrevDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + /* + * "Get the previous duplicate" isn't directly supported by the C API, + * so here's how to get it: dup the cursor and call getPrev, then dup + * the result and call getNextDup. If both succeed then there was a + * previous duplicate and the first dup is sitting on it. Keep that, + * and call getCurrent to fill in the user's buffers. + */ + Dbc dup1 = dbc.dup(DbConstants.DB_POSITION); + try { + int errCode = dup1.get(DatabaseEntry.IGNORE, DatabaseEntry.IGNORE, + DbConstants.DB_PREV | LockMode.getFlag(lockMode)); + if (errCode == 0) { + Dbc dup2 = dup1.dup(DbConstants.DB_POSITION); + try { + errCode = dup2.get(DatabaseEntry.IGNORE, + DatabaseEntry.IGNORE, + DbConstants.DB_NEXT_DUP | LockMode.getFlag(lockMode)); + } finally { + dup2.close(); + } + } + if (errCode == 0) + errCode = dup1.pget(key, pKey, data, + DbConstants.DB_CURRENT | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag())); + if (errCode == 0) { + Dbc tdbc = dbc; + dbc = dup1; + dup1 = tdbc; + } + return OperationStatus.fromInt(errCode); + } finally { + dup1.close(); + } + } + + public OperationStatus getPrevNoDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_PREV_NODUP | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getRecordNumber(final DatabaseEntry secondaryRecno, + final DatabaseEntry primaryRecno, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(DatabaseEntry.IGNORE, secondaryRecno, primaryRecno, + DbConstants.DB_GET_RECNO | LockMode.getFlag(lockMode))); + } + + public OperationStatus getSearchKey(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_SET | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchKeyRange(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_SET_RANGE | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchBoth(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_GET_BOTH | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchBothRange(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(key, pKey, data, + DbConstants.DB_GET_BOTH_RANGE | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchRecordNumber( + final DatabaseEntry secondaryRecno, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + dbc.pget(secondaryRecno, pKey, data, + DbConstants.DB_SET_RECNO | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } +} diff --git a/db/java/src/com/sleepycat/db/SecondaryDatabase.java b/db/java/src/com/sleepycat/db/SecondaryDatabase.java new file mode 100644 index 000000000..3ea7c1398 --- /dev/null +++ b/db/java/src/com/sleepycat/db/SecondaryDatabase.java @@ -0,0 +1,106 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: SecondaryDatabase.java,v 1.3 2004/04/21 01:09:10 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.Db; +import com.sleepycat.db.internal.DbConstants; + +public class SecondaryDatabase extends Database { + private final Database primaryDatabase; + + /* package */ + SecondaryDatabase(final Db db, final Database primaryDatabase) + throws DatabaseException { + + super(db); + this.primaryDatabase = primaryDatabase; + } + + public SecondaryDatabase(final String fileName, + final String databaseName, + final Database primaryDatabase, + final SecondaryConfig config) + throws DatabaseException, java.io.FileNotFoundException { + + this(SecondaryConfig.checkNull(config).openSecondaryDatabase( + null, null, fileName, databaseName, primaryDatabase.db), + primaryDatabase); + } + + public Cursor openCursor(final Transaction txn, final CursorConfig config) + throws DatabaseException { + + return openSecondaryCursor(txn, config); + } + + public SecondaryCursor openSecondaryCursor(final Transaction txn, + final CursorConfig config) + throws DatabaseException { + + return new SecondaryCursor(this, + CursorConfig.checkNull(config).openCursor(db, + (txn == null) ? null : txn.txn), config); + } + + public Database getPrimaryDatabase() { + return primaryDatabase; + } + + public DatabaseConfig getConfig() + throws DatabaseException { + + return getSecondaryConfig(); + } + + public SecondaryConfig getSecondaryConfig() + throws DatabaseException { + + return new SecondaryConfig(db); + } + + public OperationStatus get(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + db.pget((txn == null) ? null : txn.txn, key, pKey, data, + LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchBoth(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + db.pget((txn == null) ? null : txn.txn, key, pKey, data, + DbConstants.DB_GET_BOTH | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } + + public OperationStatus getSearchRecordNumber(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) + throws DatabaseException { + + return OperationStatus.fromInt( + db.pget((txn == null) ? null : txn.txn, key, pKey, data, + DbConstants.DB_SET_RECNO | LockMode.getFlag(lockMode) | + ((data == null) ? 0 : data.getMultiFlag()))); + } +} diff --git a/db/java/src/com/sleepycat/db/SecondaryKeyCreator.java b/db/java/src/com/sleepycat/db/SecondaryKeyCreator.java new file mode 100644 index 000000000..10d17c56e --- /dev/null +++ b/db/java/src/com/sleepycat/db/SecondaryKeyCreator.java @@ -0,0 +1,18 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: SecondaryKeyCreator.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ +*/ + +package com.sleepycat.db; + +public interface SecondaryKeyCreator { + boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) + throws DatabaseException; +} diff --git a/db/java/src/com/sleepycat/db/Sequence.java b/db/java/src/com/sleepycat/db/Sequence.java new file mode 100644 index 000000000..14a6d29dd --- /dev/null +++ b/db/java/src/com/sleepycat/db/Sequence.java @@ -0,0 +1,63 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Sequence.java,v 1.2 2004/09/28 19:30:37 mjc Exp $ + */ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbSequence; + +public class Sequence { + private DbSequence seq; + private int autoCommitFlag; + + /* package */ + Sequence(final DbSequence seq, SequenceConfig config) + throws DatabaseException { + + this.seq = seq; + seq.wrapper = this; + if (seq.get_db().get_transactional()) + this.autoCommitFlag = DbConstants.DB_AUTO_COMMIT | + (SequenceConfig.checkNull(config).getAutoCommitNoSync() ? + DbConstants.DB_TXN_NOSYNC : 0); + } + + public void close() + throws DatabaseException { + + seq.close(0); + } + + public long get(Transaction txn, int delta) + throws DatabaseException { + + return seq.get((txn == null) ? null : txn.txn, delta, + (txn == null) ? autoCommitFlag : 0); + } + + public Database getDatabase() + throws DatabaseException { + + return seq.get_db().wrapper; + } + + public DatabaseEntry getKey() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + seq.get_key(key); + return key; + } + + public SequenceStats getStats(StatsConfig config) + throws DatabaseException { + + return seq.stat(config.getFlags()); + } +} diff --git a/db/java/src/com/sleepycat/db/SequenceConfig.java b/db/java/src/com/sleepycat/db/SequenceConfig.java new file mode 100644 index 000000000..2901ac1e6 --- /dev/null +++ b/db/java/src/com/sleepycat/db/SequenceConfig.java @@ -0,0 +1,199 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: SequenceConfig.java,v 1.2 2004/09/23 17:56:39 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.Db; +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbSequence; +import com.sleepycat.db.internal.DbTxn; + +public class SequenceConfig implements Cloneable { + /* + * For internal use, final to allow null as a valid value for + * the config parameter. + */ + public static final SequenceConfig DEFAULT = new SequenceConfig(); + + /* package */ + static SequenceConfig checkNull(SequenceConfig config) { + return (config == null) ? DEFAULT : config; + } + + /* Parameters */ + private int cacheSize = 0; + private long rangeMin = Long.MIN_VALUE; + private long rangeMax = Long.MAX_VALUE; + private long initialValue = 0L; + + /* Flags */ + private boolean allowCreate = false; + private boolean decrement = false; + private boolean exclusiveCreate = false; + private boolean autoCommitNoSync = false; + private boolean wrap = false; + + public SequenceConfig() { + } + + public void setAllowCreate(final boolean allowCreate) { + this.allowCreate = allowCreate; + } + + public boolean getAllowCreate() { + return allowCreate; + } + + public void setCacheSize(final int cacheSize) { + this.cacheSize = cacheSize; + } + + public int getCacheSize() { + return cacheSize; + } + + public void setDecrement(boolean decrement) { + this.decrement = decrement; + } + + public boolean getDecrement() { + return decrement; + } + + public void setExclusiveCreate(final boolean exclusiveCreate) { + this.exclusiveCreate = exclusiveCreate; + } + + public boolean getExclusiveCreate() { + return exclusiveCreate; + } + + public void setInitialValue(long initialValue) { + this.initialValue = initialValue; + } + + public long getInitialValue() { + return initialValue; + } + + public void setAutoCommitNoSync(final boolean autoCommitNoSync) { + this.autoCommitNoSync = autoCommitNoSync; + } + + public boolean getAutoCommitNoSync() { + return autoCommitNoSync; + } + + public void setRange(final long min, final long max) { + this.rangeMin = min; + this.rangeMax = max; + } + + public long getRangeMin() { + return rangeMin; + } + + public long getRangeMax() { + return rangeMax; + } + + public void setWrap(final boolean wrap) { + this.wrap = wrap; + } + + public boolean getWrap() { + return wrap; + } + + /* package */ + DbSequence createSequence(final Db db) + throws DatabaseException { + + int createFlags = 0; + + return new DbSequence(db, createFlags); + } + + /* package */ + DbSequence openSequence(final Db db, + final DbTxn txn, + final DatabaseEntry key) + throws DatabaseException { + + final DbSequence seq = createSequence(db); + // The DB_THREAD flag is inherited from the database + boolean threaded = ((db.get_open_flags() & DbConstants.DB_THREAD) != 0); + + int openFlags = 0; + openFlags |= allowCreate ? DbConstants.DB_CREATE : 0; + openFlags |= exclusiveCreate ? DbConstants.DB_EXCL : 0; + openFlags |= threaded ? DbConstants.DB_THREAD : 0; + + if (db.get_transactional() && txn == null) + openFlags |= DbConstants.DB_AUTO_COMMIT; + + configureSequence(seq, DEFAULT); + boolean succeeded = false; + try { + seq.open(txn, key, openFlags); + succeeded = true; + return seq; + } finally { + if (!succeeded) + try { + seq.close(0); + } catch (Throwable t) { + // Ignore it -- an exception is already in flight. + } + } + } + + /* package */ + void configureSequence(final DbSequence seq, final SequenceConfig oldConfig) + throws DatabaseException { + + int seqFlags = 0; + seqFlags |= decrement ? DbConstants.DB_SEQ_DEC : DbConstants.DB_SEQ_INC; + seqFlags |= wrap ? DbConstants.DB_SEQ_WRAP : 0; + + if (seqFlags != 0) + seq.set_flags(seqFlags); + + if (rangeMin != oldConfig.rangeMin || rangeMax != oldConfig.rangeMax) + seq.set_range(rangeMin, rangeMax); + + if (initialValue != oldConfig.initialValue) + seq.initial_value(initialValue); + + if (cacheSize != oldConfig.cacheSize) + seq.set_cachesize(cacheSize); + } + + /* package */ + SequenceConfig(final DbSequence seq) + throws DatabaseException { + + // XXX: can't get open flags + final int openFlags = 0; + allowCreate = (openFlags & DbConstants.DB_CREATE) != 0; + exclusiveCreate = (openFlags & DbConstants.DB_EXCL) != 0; + + final int seqFlags = seq.get_flags(); + decrement = (seqFlags & DbConstants.DB_SEQ_DEC) != 0; + wrap = (seqFlags & DbConstants.DB_SEQ_WRAP) != 0; + + // XXX: can't get initial value + final long initialValue = 0; + + cacheSize = seq.get_cachesize(); + + rangeMin = seq.get_range_min(); + rangeMax = seq.get_range_max(); + } +} diff --git a/db/java/src/com/sleepycat/db/SequenceStats.java b/db/java/src/com/sleepycat/db/SequenceStats.java new file mode 100644 index 000000000..5c1e3a868 --- /dev/null +++ b/db/java/src/com/sleepycat/db/SequenceStats.java @@ -0,0 +1,74 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +public class SequenceStats { + // no public constructor + protected SequenceStats() {} + + private int st_wait; + public int getWait() { + return st_wait; + } + + private int st_nowait; + public int getNowait() { + return st_nowait; + } + + private long st_current; + public long getCurrent() { + return st_current; + } + + private long st_value; + public long getValue() { + return st_value; + } + + private long st_last_value; + public long getLastValue() { + return st_last_value; + } + + private long st_min; + public long getMin() { + return st_min; + } + + private long st_max; + public long getMax() { + return st_max; + } + + private int st_cache_size; + public int getCacheSize() { + return st_cache_size; + } + + private int st_flags; + public int getFlags() { + return st_flags; + } + + public String toString() { + return "SequenceStats:" + + "\n st_wait=" + st_wait + + "\n st_nowait=" + st_nowait + + "\n st_current=" + st_current + + "\n st_value=" + st_value + + "\n st_last_value=" + st_last_value + + "\n st_min=" + st_min + + "\n st_max=" + st_max + + "\n st_cache_size=" + st_cache_size + + "\n st_flags=" + st_flags + ; + } +} diff --git a/db/java/src/com/sleepycat/db/StatsConfig.java b/db/java/src/com/sleepycat/db/StatsConfig.java new file mode 100644 index 000000000..400407960 --- /dev/null +++ b/db/java/src/com/sleepycat/db/StatsConfig.java @@ -0,0 +1,56 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: StatsConfig.java,v 1.3 2004/04/21 01:09:10 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public class StatsConfig { + /* + * For internal use, to allow null as a valid value for + * the config parameter. + */ + public static final StatsConfig DEFAULT = new StatsConfig(); + + /* package */ + static StatsConfig checkNull(StatsConfig config) { + return (config == null) ? DEFAULT : config; + } + + private boolean clear = false; + private boolean fast = false; + + public StatsConfig() { + } + + public void setClear(boolean clear) { + this.clear = clear; + } + + public boolean getClear() { + return clear; + } + + public void setFast(boolean fast) { + this.fast = fast; + } + + public boolean getFast() { + return fast; + } + + int getFlags() { + int flags = 0; + if (fast) + flags |= DbConstants.DB_FAST_STAT; + if (clear) + flags |= DbConstants.DB_STAT_CLEAR; + return flags; + } +} diff --git a/db/java/src/com/sleepycat/db/Transaction.java b/db/java/src/com/sleepycat/db/Transaction.java new file mode 100644 index 000000000..7aacc86ed --- /dev/null +++ b/db/java/src/com/sleepycat/db/Transaction.java @@ -0,0 +1,75 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: Transaction.java,v 1.2 2004/04/21 01:09:10 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbTxn; + +public class Transaction { + /*package */ final DbTxn txn; + + Transaction(final DbTxn txn) { + this.txn = txn; + } + + public void abort() + throws DatabaseException { + + txn.abort(); + } + + public void commit() + throws DatabaseException { + + txn.commit(0); + } + + public void commitSync() + throws DatabaseException { + + txn.commit(DbConstants.DB_TXN_SYNC); + } + + public void commitNoSync() + throws DatabaseException { + + txn.commit(DbConstants.DB_TXN_NOSYNC); + } + + public void discard() + throws DatabaseException { + + txn.discard(0); + } + + public int getId() + throws DatabaseException { + + return txn.id(); + } + + public void prepare(final byte[] gid) + throws DatabaseException { + + txn.prepare(gid); + } + + public void setTxnTimeout(final long timeOut) + throws DatabaseException { + + txn.set_timeout(timeOut, DbConstants.DB_SET_TXN_TIMEOUT); + } + + public void setLockTimeout(final long timeOut) + throws DatabaseException { + + txn.set_timeout(timeOut, DbConstants.DB_SET_LOCK_TIMEOUT); + } +} diff --git a/db/java/src/com/sleepycat/db/TransactionConfig.java b/db/java/src/com/sleepycat/db/TransactionConfig.java new file mode 100644 index 000000000..6d00225cc --- /dev/null +++ b/db/java/src/com/sleepycat/db/TransactionConfig.java @@ -0,0 +1,89 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TransactionConfig.java,v 1.3 2004/09/28 19:30:37 mjc Exp $ + */ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; +import com.sleepycat.db.internal.DbEnv; +import com.sleepycat.db.internal.DbTxn; + +public class TransactionConfig implements Cloneable { + /* + * For internal use, to allow null as a valid value for + * the config parameter. + */ + public static final TransactionConfig DEFAULT = new TransactionConfig(); + + /* package */ + static TransactionConfig checkNull(TransactionConfig config) { + return (config == null) ? DEFAULT : config; + } + + private boolean dirtyRead = false; + private boolean degree2 = false; + private boolean noSync = false; + private boolean noWait = false; + private boolean sync = false; + + public TransactionConfig() { + } + + public void setDegree2(final boolean degree2) { + this.degree2 = degree2; + } + + public boolean getDegree2() { + return degree2; + } + + public void setDirtyRead(final boolean dirtyRead) { + this.dirtyRead = dirtyRead; + } + + public boolean getDirtyRead() { + return dirtyRead; + } + + public void setNoSync(final boolean noSync) { + this.noSync = noSync; + } + + public boolean getNoSync() { + return noSync; + } + + public void setNoWait(final boolean noWait) { + this.noWait = noWait; + } + + public boolean getNoWait() { + return noWait; + } + + public void setSync(final boolean sync) { + this.sync = sync; + } + + public boolean getSync() { + return sync; + } + + DbTxn beginTransaction(final DbEnv dbenv, final DbTxn parent) + throws DatabaseException { + + int flags = 0; + flags |= degree2 ? DbConstants.DB_DEGREE_2 : 0; + flags |= dirtyRead ? DbConstants.DB_DIRTY_READ : 0; + flags |= noSync ? DbConstants.DB_TXN_NOSYNC : 0; + flags |= noWait ? DbConstants.DB_TXN_NOWAIT : 0; + flags |= sync ? DbConstants.DB_TXN_SYNC : 0; + + return dbenv.txn_begin(parent, flags); + } +} diff --git a/db/java/src/com/sleepycat/db/TransactionStats.java b/db/java/src/com/sleepycat/db/TransactionStats.java new file mode 100644 index 000000000..a872cde4c --- /dev/null +++ b/db/java/src/com/sleepycat/db/TransactionStats.java @@ -0,0 +1,147 @@ +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbUtil; + +public class TransactionStats +{ + // no public constructor + protected TransactionStats() {} + + public static class Active { // no public constructor + protected Active() {} + + private int txnid; + public int getTxnId() { + return txnid; + } + + private int parentid; + public int getParentId() { + return parentid; + } + + private LogSequenceNumber lsn; + public LogSequenceNumber getLsn() { + return lsn; + } + + private int xa_status; + public int getXaStatus() { + return xa_status; + } + + private byte[] xid; + public byte[] getXId() { + return xid; + } + + public String toString() { + return "Active:" + + "\n txnid=" + txnid + + "\n parentid=" + parentid + + "\n lsn=" + lsn + + "\n xa_status=" + xa_status + + "\n xid=" + DbUtil.byteArrayToString(xid) + ; + } + }; + + private LogSequenceNumber st_last_ckp; + public LogSequenceNumber getLastCkp() { + return st_last_ckp; + } + + private long st_time_ckp; + public long getTimeCkp() { + return st_time_ckp; + } + + private int st_last_txnid; + public int getLastTxnId() { + return st_last_txnid; + } + + private int st_maxtxns; + public int getMaxTxns() { + return st_maxtxns; + } + + private int st_naborts; + public int getNaborts() { + return st_naborts; + } + + private int st_nbegins; + public int getNumBegins() { + return st_nbegins; + } + + private int st_ncommits; + public int getNumCommits() { + return st_ncommits; + } + + private int st_nactive; + public int getNactive() { + return st_nactive; + } + + private int st_nrestores; + public int getNumRestores() { + return st_nrestores; + } + + private int st_maxnactive; + public int getMaxNactive() { + return st_maxnactive; + } + + private Active[] st_txnarray; + public Active[] getTxnarray() { + return st_txnarray; + } + + private int st_region_wait; + public int getRegionWait() { + return st_region_wait; + } + + private int st_region_nowait; + public int getRegionNowait() { + return st_region_nowait; + } + + private int st_regsize; + public int getRegSize() { + return st_regsize; + } + + public String toString() { + return "TransactionStats:" + + "\n st_last_ckp=" + st_last_ckp + + "\n st_time_ckp=" + st_time_ckp + + "\n st_last_txnid=" + st_last_txnid + + "\n st_maxtxns=" + st_maxtxns + + "\n st_naborts=" + st_naborts + + "\n st_nbegins=" + st_nbegins + + "\n st_ncommits=" + st_ncommits + + "\n st_nactive=" + st_nactive + + "\n st_nrestores=" + st_nrestores + + "\n st_maxnactive=" + st_maxnactive + + "\n st_txnarray=" + DbUtil.objectArrayToString(st_txnarray, "st_txnarray") + + "\n st_region_wait=" + st_region_wait + + "\n st_region_nowait=" + st_region_nowait + + "\n st_regsize=" + st_regsize + ; + } +} +// end of TransactionStats.java diff --git a/db/java/src/com/sleepycat/db/VerifyConfig.java b/db/java/src/com/sleepycat/db/VerifyConfig.java new file mode 100644 index 000000000..0bb4031e0 --- /dev/null +++ b/db/java/src/com/sleepycat/db/VerifyConfig.java @@ -0,0 +1,81 @@ +/*- +* See the file LICENSE for redistribution information. +* +* Copyright (c) 2002-2004 +* Sleepycat Software. All rights reserved. +* +* $Id: VerifyConfig.java,v 1.3 2004/04/21 01:09:10 mjc Exp $ +*/ + +package com.sleepycat.db; + +import com.sleepycat.db.internal.DbConstants; + +public class VerifyConfig { + public static final VerifyConfig DEFAULT = new VerifyConfig(); + + /* package */ + static VerifyConfig checkNull(VerifyConfig config) { + return (config == null) ? DEFAULT : config; + } + + private boolean aggressive = false; + private boolean noOrderCheck = false; + private boolean orderCheckOnly = false; + private boolean salvage = false; + private boolean printable = false; + + public VerifyConfig() { + } + + public void setAggressive(final boolean aggressive) { + this.aggressive = aggressive; + } + + public boolean getAggressive() { + return aggressive; + } + + public void setNoOrderCheck(final boolean noOrderCheck) { + this.noOrderCheck = noOrderCheck; + } + + public boolean getNoOrderCheck() { + return printable; + } + + public void setOrderCheckOnly(final boolean orderCheckOnly) { + this.orderCheckOnly = orderCheckOnly; + } + + public boolean getOrderCheckOnly() { + return orderCheckOnly; + } + + public void setPrintable(final boolean printable) { + this.printable = printable; + } + + public boolean getPrintable() { + return printable; + } + + public void setSalvage(final boolean salvage) { + this.salvage = salvage; + } + + public boolean getSalvage() { + return salvage; + } + + int getFlags() { + int flags = 0; + flags |= aggressive ? DbConstants.DB_AGGRESSIVE : 0; + flags |= noOrderCheck ? DbConstants.DB_NOORDERCHK : 0; + flags |= orderCheckOnly ? DbConstants.DB_ORDERCHKONLY : 0; + flags |= salvage ? DbConstants.DB_SALVAGE : 0; + flags |= printable ? DbConstants.DB_PRINTABLE : 0; + + return flags; + } +} diff --git a/db/java/src/com/sleepycat/db/internal/Db.java b/db/java/src/com/sleepycat/db/internal/Db.java new file mode 100644 index 000000000..0bee8f4c1 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/Db.java @@ -0,0 +1,399 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class Db { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected Db(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected Db() { + this(0, false); + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + throw new UnsupportedOperationException("C++ destructor does not have public access"); + } + swigCPtr = 0; + } + + protected static long getCPtr(Db obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + /* package */ static final int GIGABYTE = 1 << 30; + /* + * Internally, the JNI layer creates a global reference to each Db, + * which can potentially be different to this. We keep a copy here so + * we can clean up after destructors. + */ + private long db_ref; + private DbEnv dbenv; + private boolean private_dbenv; + + public Database wrapper; + private RecordNumberAppender append_recno_handler; + private Comparator bt_compare_handler; + private BtreePrefixCalculator bt_prefix_handler; + private Comparator dup_compare_handler; + private FeedbackHandler db_feedback_handler; + private Hasher h_hash_handler; + private SecondaryKeyCreator seckey_create_handler; + + /* Called by the Db constructor */ + private void initialize(DbEnv dbenv) { + if (dbenv == null) { + private_dbenv = true; + dbenv = db_java.getDbEnv0(this); + dbenv.initialize(); + } + this.dbenv = dbenv; + db_ref = db_java.initDbRef0(this, this); + } + + private void cleanup() { + swigCPtr = 0; + db_java.deleteRef0(db_ref); + db_ref = 0L; + if (private_dbenv) + dbenv.cleanup(); + dbenv = null; + } + + public synchronized void close(int flags) throws DatabaseException { + try { + close0(flags); + } finally { + cleanup(); + } + } + + public DbEnv get_env() throws DatabaseException { + return dbenv; + } + + private final void handle_append_recno(DatabaseEntry data, int recno) + throws DatabaseException { + append_recno_handler.appendRecordNumber(wrapper, data, recno); + } + + public RecordNumberAppender get_append_recno() throws com.sleepycat.db.DatabaseException { + return append_recno_handler; + } + + private final int handle_bt_compare(DatabaseEntry dbt1, DatabaseEntry dbt2) { + return bt_compare_handler.compare(dbt1, dbt2); + } + + public Comparator get_bt_compare() throws com.sleepycat.db.DatabaseException { + return bt_compare_handler; + } + + private final int handle_bt_prefix(DatabaseEntry dbt1, DatabaseEntry dbt2) { + return bt_prefix_handler.prefix(wrapper, dbt1, dbt2); + } + + public BtreePrefixCalculator get_bt_prefix() throws com.sleepycat.db.DatabaseException { + return bt_prefix_handler; + } + + private final void handle_db_feedback(int opcode, int percent) { + if (opcode == DbConstants.DB_UPGRADE) + db_feedback_handler.upgradeFeedback(wrapper, percent); + else if (opcode == DbConstants.DB_VERIFY) + db_feedback_handler.upgradeFeedback(wrapper, percent); + /* No other database feedback types known. */ + } + + public FeedbackHandler get_feedback() throws com.sleepycat.db.DatabaseException { + return db_feedback_handler; + } + + private final int handle_dup_compare(DatabaseEntry dbt1, DatabaseEntry dbt2) { + return dup_compare_handler.compare(dbt1, dbt2); + } + + public Comparator get_dup_compare() throws com.sleepycat.db.DatabaseException { + return dup_compare_handler; + } + + private final int handle_h_hash(byte[] data, int len) { + return h_hash_handler.hash(wrapper, data, len); + } + + public Hasher get_h_hash() throws com.sleepycat.db.DatabaseException { + return h_hash_handler; + } + + private final int handle_seckey_create(DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + return seckey_create_handler.createSecondaryKey( + (SecondaryDatabase)wrapper, key, data, result) ? + 0 : DbConstants.DB_DONOTINDEX; + } + + public SecondaryKeyCreator get_seckey_create() throws com.sleepycat.db.DatabaseException { + return seckey_create_handler; + } + + public synchronized void remove(String file, String database, int flags) + throws DatabaseException, java.io.FileNotFoundException { + try { + remove0(file, database, flags); + } finally { + cleanup(); + } + } + + public synchronized void rename(String file, String database, + String newname, int flags) + throws DatabaseException, java.io.FileNotFoundException { + try { + rename0(file, database, newname, flags); + } finally { + cleanup(); + } + } + + public synchronized boolean verify(String file, String database, + java.io.OutputStream outfile, int flags) + throws DatabaseException, java.io.FileNotFoundException { + try { + return verify0(file, database, outfile, flags); + } finally { + cleanup(); + } + } + + public ErrorHandler get_errcall() /* no exception */ { + return dbenv.get_errcall(); + } + + public void set_errcall(ErrorHandler db_errcall_fcn) /* no exception */ { + dbenv.set_errcall(db_errcall_fcn); + } + + public MessageHandler get_msgcall() /* no exception */ { + return dbenv.get_msgcall(); + } + + public void set_msgcall(MessageHandler db_msgcall_fcn) /* no exception */ { + dbenv.set_msgcall(db_msgcall_fcn); + } + + public java.io.OutputStream get_error_stream() /* no exception */ { + return dbenv.get_error_stream(); + } + + public void set_error_stream(java.io.OutputStream stream) /* no exception */ { + dbenv.set_error_stream(stream); + } + + public java.io.OutputStream get_message_stream() /* no exception */ { + return dbenv.get_message_stream(); + } + + public void set_message_stream(java.io.OutputStream stream) /* no exception */ { + dbenv.set_message_stream(stream); + } + + public void set_paniccall(PanicHandler db_panic_fcn) + throws DatabaseException { + dbenv.set_paniccall(db_panic_fcn); + } + + public PanicHandler get_paniccall() throws com.sleepycat.db.DatabaseException { + return dbenv.get_paniccall(); + } + + public Db(DbEnv dbenv, int flags) throws com.sleepycat.db.DatabaseException { + this(db_javaJNI.new_Db(DbEnv.getCPtr(dbenv), flags), true); + initialize(dbenv); + } + + public void associate(DbTxn txnid, Db secondary, com.sleepycat.db.SecondaryKeyCreator callback, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_associate(swigCPtr, DbTxn.getCPtr(txnid), Db.getCPtr(secondary), (secondary.seckey_create_handler = callback) , flags); } + + /* package */ int close0(int flags) { + return db_javaJNI.Db_close0(swigCPtr, flags); + } + + public Dbc cursor(DbTxn txnid, int flags) throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.Db_cursor(swigCPtr, DbTxn.getCPtr(txnid), flags); + return (cPtr == 0) ? null : new Dbc(cPtr, false); + } + + public int del(DbTxn txnid, com.sleepycat.db.DatabaseEntry key, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_del(swigCPtr, DbTxn.getCPtr(txnid), key, flags); + } + + public void err(int error, String message) /* no exception */ { + db_javaJNI.Db_err(swigCPtr, error, message); + } + + public void errx(String message) /* no exception */ { + db_javaJNI.Db_errx(swigCPtr, message); + } + + public boolean get_transactional() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_transactional(swigCPtr); } + + public int get(DbTxn txnid, com.sleepycat.db.DatabaseEntry key, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get(swigCPtr, DbTxn.getCPtr(txnid), key, data, flags); + } + + public boolean get_byteswapped() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_byteswapped(swigCPtr); } + + public long get_cachesize() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_cachesize(swigCPtr); + } + + public int get_cachesize_ncache() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_cachesize_ncache(swigCPtr); } + + public String get_filename() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_filename(swigCPtr); + } + + public String get_dbname() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_dbname(swigCPtr); + } + + public int get_encrypt_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_encrypt_flags(swigCPtr); } + + public String get_errpfx() /* no exception */ { + return db_javaJNI.Db_get_errpfx(swigCPtr); + } + + public int get_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_flags(swigCPtr); } + + public int get_lorder() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_lorder(swigCPtr); + } + + public DbMpoolFile get_mpf() throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.Db_get_mpf(swigCPtr); + return (cPtr == 0) ? null : new DbMpoolFile(cPtr, false); + } + + public int get_open_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_open_flags(swigCPtr); } + + public int get_pagesize() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_pagesize(swigCPtr); } + + public int get_bt_minkey() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_bt_minkey(swigCPtr); } + + public int get_h_ffactor() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_h_ffactor(swigCPtr); } + + public int get_h_nelem() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_h_nelem(swigCPtr); } + + public int get_re_delim() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_re_delim(swigCPtr); + } + + public int get_re_len() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_re_len(swigCPtr); } + + public int get_re_pad() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_re_pad(swigCPtr); + } + + public String get_re_source() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_re_source(swigCPtr); + } + + public int get_q_extentsize() throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_get_q_extentsize(swigCPtr); } + + public int get_type() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_get_type(swigCPtr); + } + + public Dbc join(Dbc[] curslist, int flags) throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.Db_join(swigCPtr, curslist, flags); + return (cPtr == 0) ? null : new Dbc(cPtr, true); + } + + public void key_range(DbTxn txnid, com.sleepycat.db.DatabaseEntry key, com.sleepycat.db.KeyRange key_range, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_key_range(swigCPtr, DbTxn.getCPtr(txnid), key, key_range, flags); } + + public void open(DbTxn txnid, String file, String database, int type, int flags, int mode) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException { db_javaJNI.Db_open(swigCPtr, DbTxn.getCPtr(txnid), file, database, type, flags, mode); } + + public int pget(DbTxn txnid, com.sleepycat.db.DatabaseEntry key, com.sleepycat.db.DatabaseEntry pkey, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_pget(swigCPtr, DbTxn.getCPtr(txnid), key, pkey, data, flags); + } + + public int put(DbTxn txnid, com.sleepycat.db.DatabaseEntry key, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_put(swigCPtr, DbTxn.getCPtr(txnid), key, data, flags); + } + + /* package */ void remove0(String file, String database, int flags) { db_javaJNI.Db_remove0(swigCPtr, file, database, flags); } + + /* package */ void rename0(String file, String database, String newname, int flags) { db_javaJNI.Db_rename0(swigCPtr, file, database, newname, flags); } + + public void set_append_recno(com.sleepycat.db.RecordNumberAppender db_append_recno_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_append_recno(swigCPtr, (append_recno_handler = db_append_recno_fcn) ); } + + public void set_bt_compare(java.util.Comparator bt_compare_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_bt_compare(swigCPtr, (bt_compare_handler = bt_compare_fcn) ); } + + public void set_bt_maxkey(int maxkey) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_bt_maxkey(swigCPtr, maxkey); } + + public void set_bt_minkey(int bt_minkey) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_bt_minkey(swigCPtr, bt_minkey); } + + public void set_bt_prefix(com.sleepycat.db.BtreePrefixCalculator bt_prefix_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_bt_prefix(swigCPtr, (bt_prefix_handler = bt_prefix_fcn) ); } + + public void set_cachesize(long bytes, int ncache) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_cachesize(swigCPtr, bytes, ncache); } + + public void set_dup_compare(java.util.Comparator dup_compare_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_dup_compare(swigCPtr, (dup_compare_handler = dup_compare_fcn) ); } + + public void set_encrypt(String passwd, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_encrypt(swigCPtr, passwd, flags); } + + public void set_errpfx(String errpfx) /* no exception */ { + db_javaJNI.Db_set_errpfx(swigCPtr, errpfx); + } + + public void set_feedback(com.sleepycat.db.FeedbackHandler db_feedback_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_feedback(swigCPtr, (db_feedback_handler = db_feedback_fcn) ); } + + public void set_flags(int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_flags(swigCPtr, flags); } + + public void set_h_ffactor(int h_ffactor) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_h_ffactor(swigCPtr, h_ffactor); } + + public void set_h_hash(com.sleepycat.db.Hasher h_hash_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_h_hash(swigCPtr, (h_hash_handler = h_hash_fcn) ); } + + public void set_h_nelem(int h_nelem) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_h_nelem(swigCPtr, h_nelem); } + + public void set_lorder(int lorder) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_lorder(swigCPtr, lorder); } + + public void set_pagesize(long pagesize) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_pagesize(swigCPtr, pagesize); } + + public void set_re_delim(int re_delim) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_re_delim(swigCPtr, re_delim); } + + public void set_re_len(int re_len) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_re_len(swigCPtr, re_len); } + + public void set_re_pad(int re_pad) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_re_pad(swigCPtr, re_pad); } + + public void set_re_source(String source) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_re_source(swigCPtr, source); } + + public void set_q_extentsize(int extentsize) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_set_q_extentsize(swigCPtr, extentsize); } + + public Object stat(DbTxn txnid, int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.Db_stat(swigCPtr, DbTxn.getCPtr(txnid), flags); } + + public void sync(int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_sync(swigCPtr, flags); } + + public int truncate(DbTxn txnid, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Db_truncate(swigCPtr, DbTxn.getCPtr(txnid), flags); + } + + public void upgrade(String file, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.Db_upgrade(swigCPtr, file, flags); } + + /* package */ boolean verify0(String file, String database, java.io.OutputStream outfile, int flags) { return db_javaJNI.Db_verify0(swigCPtr, file, database, outfile, flags); } + +} diff --git a/db/java/src/com/sleepycat/db/internal/DbClient.java b/db/java/src/com/sleepycat/db/internal/DbClient.java new file mode 100644 index 000000000..28d586ecb --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbClient.java @@ -0,0 +1,17 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2003 + * Sleepycat Software. All rights reserved. + * + * $Id: DbClient.java,v 1.1 2004/04/06 20:43:40 mjc Exp $ + */ +package com.sleepycat.db.internal; + +/** + * The DbClient object is used to encapsulate a reference to an RPC + * client.

    + */ +public interface DbClient { +} diff --git a/db/java/src/com/sleepycat/db/internal/DbConstants.java b/db/java/src/com/sleepycat/db/internal/DbConstants.java new file mode 100644 index 000000000..0ad378cd6 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbConstants.java @@ -0,0 +1,182 @@ +/* DO NOT EDIT: automatically built by dist/s_java_const. */ + +package com.sleepycat.db.internal; + +public interface DbConstants +{ + int DB_AFTER = 1; + int DB_AGGRESSIVE = 0x0000001; + int DB_APPEND = 2; + int DB_ARCH_ABS = 0x001; + int DB_ARCH_DATA = 0x002; + int DB_ARCH_LOG = 0x004; + int DB_ARCH_REMOVE = 0x008; + int DB_AUTO_COMMIT = 0x01000000; + int DB_BEFORE = 3; + int DB_BTREE = 1; + int DB_CDB_ALLDB = 0x00001000; + int DB_CHKSUM = 0x0000001; + int DB_CONSUME = 5; + int DB_CONSUME_WAIT = 6; + int DB_CREATE = 0x0000001; + int DB_CURRENT = 7; + int DB_DBT_MALLOC = 0x004; + int DB_DBT_PARTIAL = 0x008; + int DB_DBT_USERMEM = 0x020; + int DB_DEGREE_2 = 0x02000000; + int DB_DIRECT_DB = 0x00002000; + int DB_DIRECT_LOG = 0x00004000; + int DB_DIRTY_READ = 0x04000000; + int DB_DONOTINDEX = -30998; + int DB_DSYNC_LOG = 0x00008000; + int DB_DUP = 0x0000002; + int DB_DUPSORT = 0x0000004; + int DB_EID_BROADCAST = -1; + int DB_EID_INVALID = -2; + int DB_ENCRYPT = 0x0000008; + int DB_ENCRYPT_AES = 0x0000001; + int DB_EXCL = 0x0001000; + int DB_FAST_STAT = 8; + int DB_FIRST = 9; + int DB_FLUSH = 0x001; + int DB_FORCE = 0x0000004; + int DB_GET_BOTH = 10; + int DB_GET_BOTH_RANGE = 12; + int DB_GET_RECNO = 13; + int DB_HASH = 2; + int DB_INIT_CDB = 0x0001000; + int DB_INIT_LOCK = 0x0002000; + int DB_INIT_LOG = 0x0004000; + int DB_INIT_MPOOL = 0x0008000; + int DB_INIT_REP = 0x0010000; + int DB_INIT_TXN = 0x0020000; + int DB_INORDER = 0x0000010; + int DB_JOINENV = 0x0040000; + int DB_JOIN_ITEM = 14; + int DB_JOIN_NOSORT = 0x0000001; + int DB_KEYEMPTY = -30997; + int DB_KEYEXIST = -30996; + int DB_KEYFIRST = 15; + int DB_KEYLAST = 16; + int DB_LAST = 17; + int DB_LOCKDOWN = 0x0080000; + int DB_LOCK_DEFAULT = 1; + int DB_LOCK_EXPIRE = 2; + int DB_LOCK_GET = 1; + int DB_LOCK_GET_TIMEOUT = 2; + int DB_LOCK_IREAD = 5; + int DB_LOCK_IWR = 6; + int DB_LOCK_IWRITE = 4; + int DB_LOCK_MAXLOCKS = 3; + int DB_LOCK_MAXWRITE = 4; + int DB_LOCK_MINLOCKS = 5; + int DB_LOCK_MINWRITE = 6; + int DB_LOCK_NOTGRANTED = -30994; + int DB_LOCK_NOWAIT = 0x002; + int DB_LOCK_OLDEST = 7; + int DB_LOCK_PUT = 4; + int DB_LOCK_PUT_ALL = 5; + int DB_LOCK_PUT_OBJ = 6; + int DB_LOCK_RANDOM = 8; + int DB_LOCK_READ = 1; + int DB_LOCK_TIMEOUT = 8; + int DB_LOCK_WRITE = 2; + int DB_LOCK_YOUNGEST = 9; + int DB_LOG_AUTOREMOVE = 0x00010000; + int DB_LOG_INMEMORY = 0x00020000; + int DB_MPOOL_NOFILE = 0x001; + int DB_MPOOL_UNLINK = 0x002; + int DB_MULTIPLE = 0x08000000; + int DB_MULTIPLE_KEY = 0x10000000; + int DB_NEXT = 18; + int DB_NEXT_DUP = 19; + int DB_NEXT_NODUP = 20; + int DB_NODUPDATA = 21; + int DB_NOLOCKING = 0x00040000; + int DB_NOMMAP = 0x0000008; + int DB_NOORDERCHK = 0x0000002; + int DB_NOOVERWRITE = 22; + int DB_NOPANIC = 0x00080000; + int DB_NOSERVER_HOME = -30991; + int DB_NOSERVER_ID = -30990; + int DB_NOSYNC = 23; + int DB_NOTFOUND = -30989; + int DB_ORDERCHKONLY = 0x0000004; + int DB_OVERWRITE = 0x00100000; + int DB_PANIC_ENVIRONMENT = 0x00200000; + int DB_POSITION = 24; + int DB_PREV = 25; + int DB_PREV_NODUP = 26; + int DB_PRINTABLE = 0x0000020; + int DB_PRIORITY_DEFAULT = 3; + int DB_PRIORITY_HIGH = 4; + int DB_PRIORITY_LOW = 2; + int DB_PRIORITY_VERY_HIGH = 5; + int DB_PRIORITY_VERY_LOW = 1; + int DB_PRIVATE = 0x0100000; + int DB_QUEUE = 4; + int DB_RDONLY = 0x0000010; + int DB_RECNO = 3; + int DB_RECNUM = 0x0000020; + int DB_RECOVER = 0x0000020; + int DB_RECOVER_FATAL = 0x0200000; + int DB_REGION_INIT = 0x00400000; + int DB_RENUMBER = 0x0000040; + int DB_REP_CLIENT = 0x001; + int DB_REP_DUPMASTER = -30986; + int DB_REP_HOLDELECTION = -30984; + int DB_REP_ISPERM = -30983; + int DB_REP_MASTER = 0x002; + int DB_REP_NEWMASTER = -30982; + int DB_REP_NEWSITE = -30981; + int DB_REP_NOBUFFER = 0x0000001; + int DB_REP_NOTPERM = -30980; + int DB_REP_PERMANENT = 0x0000002; + int DB_REP_STARTUPDONE = -30979; + int DB_REVSPLITOFF = 0x0000080; + int DB_RMW = 0x20000000; + int DB_RPCCLIENT = 0x0000001; + int DB_SALVAGE = 0x0000040; + int DB_SEQ_DEC = 0x00000001; + int DB_SEQ_INC = 0x00000002; + int DB_SEQ_WRAP = 0x00000008; + int DB_SET = 28; + int DB_SET_LOCK_TIMEOUT = 29; + int DB_SET_RANGE = 30; + int DB_SET_RECNO = 31; + int DB_SET_TXN_TIMEOUT = 33; + int DB_SNAPSHOT = 0x0000100; + int DB_STAT_CLEAR = 0x0000002; + int DB_SYSTEM_MEM = 0x0400000; + int DB_THREAD = 0x0000040; + int DB_TIME_NOTGRANTED = 0x00800000; + int DB_TRUNCATE = 0x0000080; + int DB_TXN_ABORT = 0; + int DB_TXN_APPLY = 1; + int DB_TXN_BACKWARD_ROLL = 3; + int DB_TXN_FORWARD_ROLL = 4; + int DB_TXN_NOSYNC = 0x0000100; + int DB_TXN_NOT_DURABLE = 0x0000200; + int DB_TXN_NOWAIT = 0x0001000; + int DB_TXN_PRINT = 7; + int DB_TXN_SYNC = 0x0002000; + int DB_TXN_WRITE_NOSYNC = 0x10000000; + int DB_UNKNOWN = 5; + int DB_UPGRADE = 0x0000001; + int DB_USE_ENVIRON = 0x0000400; + int DB_USE_ENVIRON_ROOT = 0x0000800; + int DB_VERB_DEADLOCK = 0x0001; + int DB_VERB_RECOVERY = 0x0002; + int DB_VERB_REPLICATION = 0x0004; + int DB_VERB_WAITSFOR = 0x0008; + int DB_VERIFY = 0x0000002; + int DB_VERSION_MAJOR = 4; + int DB_VERSION_MINOR = 3; + int DB_VERSION_PATCH = 14; + int DB_WRITECURSOR = 35; + int DB_XA_CREATE = 0x0000002; + int DB_XIDDATASIZE = 128; + int DB_YIELDCPU = 0x20000000; +} + +// end of DbConstants.java diff --git a/db/java/src/com/sleepycat/db/internal/DbEnv.java b/db/java/src/com/sleepycat/db/internal/DbEnv.java new file mode 100644 index 000000000..1a8112943 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbEnv.java @@ -0,0 +1,434 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class DbEnv { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected DbEnv(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected DbEnv() { + this(0, false); + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + throw new UnsupportedOperationException("C++ destructor does not have public access"); + } + swigCPtr = 0; + } + + protected static long getCPtr(DbEnv obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + /* + * Internally, the JNI layer creates a global reference to each DbEnv, + * which can potentially be different to this. We keep a copy here so + * we can clean up after destructors. + */ + private long dbenv_ref; + public Environment wrapper; + + private LogRecordHandler app_dispatch_handler; + private FeedbackHandler env_feedback_handler; + private ErrorHandler error_handler; + private MessageHandler message_handler; + private PanicHandler panic_handler; + private ReplicationTransport rep_transport_handler; + private java.io.OutputStream error_stream; + private java.io.OutputStream message_stream; + + public static class RepProcessMessage { + public int envid; + } + + /* + * Called by the public DbEnv constructor and for private environments + * by the Db constructor. + */ + void initialize() { + dbenv_ref = db_java.initDbEnvRef0(this, this); + /* Start with System.err as the default error stream. */ + set_error_stream(System.err); + set_message_stream(System.out); + } + + void cleanup() { + swigCPtr = 0; + db_java.deleteRef0(dbenv_ref); + dbenv_ref = 0L; + } + + public synchronized void close(int flags) throws DatabaseException { + try { + close0(flags); + } finally { + cleanup(); + } + } + + private final int handle_app_dispatch(DatabaseEntry dbt, LogSequenceNumber lsn, int recops) { + return app_dispatch_handler.handleLogRecord(wrapper, dbt, lsn, RecoveryOperation.fromFlag(recops)); + } + + public LogRecordHandler get_app_dispatch() throws com.sleepycat.db.DatabaseException { + return app_dispatch_handler; + } + + private final void handle_env_feedback(int opcode, int percent) { + if (opcode == DbConstants.DB_RECOVER) + env_feedback_handler.recoveryFeedback(wrapper, percent); + /* No other environment feedback type supported. */ + } + + public FeedbackHandler get_feedback() throws com.sleepycat.db.DatabaseException { + return env_feedback_handler; + } + + private final void handle_error(String errpfx, String msg) { + error_handler.error(wrapper, errpfx, msg); + } + + public ErrorHandler get_errcall() /* no exception */ { + return error_handler; + } + + private final void handle_message(String msg) { + message_handler.message(wrapper, msg); + } + + public MessageHandler get_msgcall() /* no exception */ { + return message_handler; + } + + private final void handle_panic(DatabaseException e) { + panic_handler.panic(wrapper, e); + } + + public PanicHandler get_paniccall() throws com.sleepycat.db.DatabaseException { + return panic_handler; + } + + private final int handle_rep_transport(DatabaseEntry control, DatabaseEntry rec, + LogSequenceNumber lsn, int envid, int flags) + throws DatabaseException { + return rep_transport_handler.send(wrapper, control, rec, lsn, envid, + (flags & DbConstants.DB_REP_NOBUFFER) != 0, + (flags & DbConstants.DB_REP_PERMANENT) != 0); + } + + public void lock_vec(/*u_int32_t*/ int locker, int flags, + LockRequest[] list, int offset, int count) throws DatabaseException { + db_javaJNI.DbEnv_lock_vec(swigCPtr, locker, flags, list, + offset, count); + } + + public synchronized void remove(String db_home, int flags) + throws DatabaseException, java.io.FileNotFoundException { + try { + remove0(db_home, flags); + } finally { + cleanup(); + } + } + + public void set_error_stream(java.io.OutputStream stream) /* no exception */ { + error_stream = stream; + final java.io.PrintWriter pw = new java.io.PrintWriter(stream); + set_errcall(new ErrorHandler() { + public void error(Environment env, + String prefix, String buf) /* no exception */ { + if (prefix != null) + pw.print(prefix + ": "); + pw.println(buf); + pw.flush(); + } + }); + } + + public java.io.OutputStream get_error_stream() /* no exception */ { + return error_stream; + } + + + public void set_message_stream(java.io.OutputStream stream) /* no exception */ { + message_stream = stream; + final java.io.PrintWriter pw = new java.io.PrintWriter(stream); + set_msgcall(new MessageHandler() { + public void message(Environment env, String msg) /* no exception */ + /* no exception */ { + pw.println(msg); + pw.flush(); + } + }); + } + + public java.io.OutputStream get_message_stream() /* no exception */ { + return message_stream; + } + + public void set_tx_timestamp(java.util.Date timestamp) throws com.sleepycat.db.DatabaseException { + set_tx_timestamp0(timestamp.getTime()/1000); + } + + public DbEnv(int flags) throws com.sleepycat.db.DatabaseException { + this(db_javaJNI.new_DbEnv(flags), true); + initialize(); + } + + /* package */ void close0(int flags) { db_javaJNI.DbEnv_close0(swigCPtr, flags); } + + public void dbremove(DbTxn txnid, String file, String database, int flags) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException { db_javaJNI.DbEnv_dbremove(swigCPtr, DbTxn.getCPtr(txnid), file, database, flags); } + + public void dbrename(DbTxn txnid, String file, String database, String newname, int flags) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException { db_javaJNI.DbEnv_dbrename(swigCPtr, DbTxn.getCPtr(txnid), file, database, newname, flags); } + + public void err(int error, String message) /* no exception */ { + db_javaJNI.DbEnv_err(swigCPtr, error, message); + } + + public void errx(String message) /* no exception */ { + db_javaJNI.DbEnv_errx(swigCPtr, message); + } + + public String[] get_data_dirs() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_data_dirs(swigCPtr); } + + public int get_encrypt_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_encrypt_flags(swigCPtr); } + + public String get_errpfx() /* no exception */ { + return db_javaJNI.DbEnv_get_errpfx(swigCPtr); + } + + public int get_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_flags(swigCPtr); } + + public String get_home() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_get_home(swigCPtr); + } + + public int get_open_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_open_flags(swigCPtr); } + + public long get_shm_key() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_shm_key(swigCPtr); } + + public int get_tas_spins() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_tas_spins(swigCPtr); } + + public String get_tmp_dir() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_get_tmp_dir(swigCPtr); + } + + public boolean get_verbose(int which) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_verbose(swigCPtr, which); } + + public void open(String db_home, int flags, int mode) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException { db_javaJNI.DbEnv_open(swigCPtr, db_home, flags, mode); } + + /* package */ void remove0(String db_home, int flags) { db_javaJNI.DbEnv_remove0(swigCPtr, db_home, flags); } + + public void set_cachesize(long bytes, int ncache) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_cachesize(swigCPtr, bytes, ncache); } + + public void set_data_dir(String dir) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_data_dir(swigCPtr, dir); } + + public void set_encrypt(String passwd, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_encrypt(swigCPtr, passwd, flags); } + + public void set_errcall(com.sleepycat.db.ErrorHandler db_errcall_fcn) /* no exception */ { + db_javaJNI.DbEnv_set_errcall(swigCPtr, (error_handler = db_errcall_fcn) ); + } + + public void set_errpfx(String errpfx) /* no exception */ { + db_javaJNI.DbEnv_set_errpfx(swigCPtr, errpfx); + } + + public void set_flags(int flags, boolean onoff) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_flags(swigCPtr, flags, onoff); } + + public void set_feedback(com.sleepycat.db.FeedbackHandler env_feedback_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_feedback(swigCPtr, (env_feedback_handler = env_feedback_fcn) ); } + + public void set_mp_mmapsize(long mp_mmapsize) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_mp_mmapsize(swigCPtr, mp_mmapsize); } + + public void set_msgcall(com.sleepycat.db.MessageHandler db_msgcall_fcn) /* no exception */ { + db_javaJNI.DbEnv_set_msgcall(swigCPtr, (message_handler = db_msgcall_fcn) ); + } + + public void set_paniccall(com.sleepycat.db.PanicHandler db_panic_fcn) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_paniccall(swigCPtr, (panic_handler = db_panic_fcn) ); } + + public void set_rpc_server(DbClient client, String host, long cl_timeout, long sv_timeout, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_rpc_server(swigCPtr, client, host, cl_timeout, sv_timeout, flags); } + + public void set_shm_key(long shm_key) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_shm_key(swigCPtr, shm_key); } + + public void set_tas_spins(int tas_spins) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_tas_spins(swigCPtr, tas_spins); } + + public void set_timeout(long timeout, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_timeout(swigCPtr, timeout, flags); } + + public void set_tmp_dir(String dir) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_tmp_dir(swigCPtr, dir); } + + public void set_tx_max(int max) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_tx_max(swigCPtr, max); } + + public void set_app_dispatch(com.sleepycat.db.LogRecordHandler tx_recover) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_app_dispatch(swigCPtr, (app_dispatch_handler = tx_recover) ); } + + /* package */ void set_tx_timestamp0(long timestamp) { db_javaJNI.DbEnv_set_tx_timestamp0(swigCPtr, timestamp); } + + public void set_verbose(int which, boolean onoff) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_verbose(swigCPtr, which, onoff); } + + public byte[][] get_lk_conflicts() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lk_conflicts(swigCPtr); } + + public int get_lk_detect() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lk_detect(swigCPtr); } + + public int get_lk_max_locks() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lk_max_locks(swigCPtr); } + + public int get_lk_max_lockers() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lk_max_lockers(swigCPtr); } + + public int get_lk_max_objects() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lk_max_objects(swigCPtr); } + + public int lock_detect(int flags, int atype) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_lock_detect(swigCPtr, flags, atype); + } + + public DbLock lock_get(int locker, int flags, com.sleepycat.db.DatabaseEntry object, int lock_mode) throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.DbEnv_lock_get(swigCPtr, locker, flags, object, lock_mode); + return (cPtr == 0) ? null : new DbLock(cPtr, true); + } + + public int lock_id() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_lock_id(swigCPtr); } + + public void lock_id_free(int id) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_lock_id_free(swigCPtr, id); } + + public void lock_put(DbLock lock) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_lock_put(swigCPtr, DbLock.getCPtr(lock)); } + + public com.sleepycat.db.LockStats lock_stat(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_lock_stat(swigCPtr, flags); } + + public void set_lk_conflicts(byte[][] conflicts) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lk_conflicts(swigCPtr, conflicts); } + + public void set_lk_detect(int detect) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lk_detect(swigCPtr, detect); } + + public void set_lk_max_lockers(int max) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lk_max_lockers(swigCPtr, max); } + + public void set_lk_max_locks(int max) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lk_max_locks(swigCPtr, max); } + + public void set_lk_max_objects(int max) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lk_max_objects(swigCPtr, max); } + + public int get_lg_bsize() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lg_bsize(swigCPtr); } + + public String get_lg_dir() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_get_lg_dir(swigCPtr); + } + + public int get_lg_max() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lg_max(swigCPtr); } + + public int get_lg_regionmax() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_lg_regionmax(swigCPtr); } + + public String[] log_archive(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_log_archive(swigCPtr, flags); } + + public static int log_compare(com.sleepycat.db.LogSequenceNumber lsn0, com.sleepycat.db.LogSequenceNumber lsn1) /* no exception */ { + return db_javaJNI.DbEnv_log_compare(lsn0, lsn1); + } + + public DbLogc log_cursor(int flags) throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.DbEnv_log_cursor(swigCPtr, flags); + return (cPtr == 0) ? null : new DbLogc(cPtr, true); + } + + public String log_file(com.sleepycat.db.LogSequenceNumber lsn) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_log_file(swigCPtr, lsn); + } + + public void log_flush(com.sleepycat.db.LogSequenceNumber lsn) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_log_flush(swigCPtr, lsn); } + + public void log_put(com.sleepycat.db.LogSequenceNumber lsn, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_log_put(swigCPtr, lsn, data, flags); } + + public com.sleepycat.db.LogStats log_stat(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_log_stat(swigCPtr, flags); } + + public void set_lg_bsize(int lg_bsize) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lg_bsize(swigCPtr, lg_bsize); } + + public void set_lg_dir(String dir) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lg_dir(swigCPtr, dir); } + + public void set_lg_max(int lg_max) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lg_max(swigCPtr, lg_max); } + + public void set_lg_regionmax(int lg_regionmax) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_lg_regionmax(swigCPtr, lg_regionmax); } + + public long get_cachesize() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_get_cachesize(swigCPtr); + } + + public int get_cachesize_ncache() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_get_cachesize_ncache(swigCPtr); + } + + public long get_mp_mmapsize() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_mp_mmapsize(swigCPtr); } + + public com.sleepycat.db.CacheStats memp_stat(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_memp_stat(swigCPtr, flags); } + + public com.sleepycat.db.CacheFileStats[] memp_fstat(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_memp_fstat(swigCPtr, flags); } + + public int memp_trickle(int percent) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_memp_trickle(swigCPtr, percent); + } + + public int get_tx_max() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_tx_max(swigCPtr); } + + public long get_tx_timestamp() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_tx_timestamp(swigCPtr); } + + public long get_timeout(int flag) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_get_timeout(swigCPtr, flag); } + + public DbTxn txn_begin(DbTxn parent, int flags) throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.DbEnv_txn_begin(swigCPtr, DbTxn.getCPtr(parent), flags); + return (cPtr == 0) ? null : new DbTxn(cPtr, false); + } + + public void txn_checkpoint(int kbyte, int min, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_txn_checkpoint(swigCPtr, kbyte, min, flags); } + + public com.sleepycat.db.PreparedTransaction[] txn_recover(int count, int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_txn_recover(swigCPtr, count, flags); } + + public com.sleepycat.db.TransactionStats txn_stat(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_txn_stat(swigCPtr, flags); } + + public long get_rep_limit() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_get_rep_limit(swigCPtr); + } + + public int rep_elect(int nsites, int nvotes, int priority, int timeout, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbEnv_rep_elect(swigCPtr, nsites, nvotes, priority, timeout, flags); + } + + public int rep_process_message(com.sleepycat.db.DatabaseEntry control, com.sleepycat.db.DatabaseEntry rec, DbEnv.RepProcessMessage envid, com.sleepycat.db.LogSequenceNumber ret_lsn) /* no exception */ { + return db_javaJNI.DbEnv_rep_process_message(swigCPtr, control, rec, envid, ret_lsn); + } + + public void rep_start(com.sleepycat.db.DatabaseEntry cdata, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_rep_start(swigCPtr, cdata, flags); } + + public com.sleepycat.db.ReplicationStats rep_stat(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbEnv_rep_stat(swigCPtr, flags); } + + public void set_rep_limit(long bytes) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_rep_limit(swigCPtr, bytes); } + + public void set_rep_transport(int envid, com.sleepycat.db.ReplicationTransport send) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbEnv_set_rep_transport(swigCPtr, envid, (rep_transport_handler = send) ); } + + public static String strerror(int error) /* no exception */ { + return db_javaJNI.DbEnv_strerror(error); + } + + public static int get_version_major() /* no exception */ { + return db_javaJNI.DbEnv_get_version_major(); + } + + public static int get_version_minor() /* no exception */ { + return db_javaJNI.DbEnv_get_version_minor(); + } + + public static int get_version_patch() /* no exception */ { + return db_javaJNI.DbEnv_get_version_patch(); + } + + public static String get_version_string() /* no exception */ { + return db_javaJNI.DbEnv_get_version_string(); + } + +} diff --git a/db/java/src/com/sleepycat/db/internal/DbLock.java b/db/java/src/com/sleepycat/db/internal/DbLock.java new file mode 100644 index 000000000..723629908 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbLock.java @@ -0,0 +1,51 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class DbLock { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected DbLock(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected DbLock() { + this(0, false); + } + + protected void finalize() { + try { + delete(); + } catch(Exception e) { + System.err.println("Exception during finalization: " + e); + e.printStackTrace(System.err); + } + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + db_javaJNI.delete_DbLock(swigCPtr); + } + swigCPtr = 0; + } + + protected static long getCPtr(DbLock obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + public Lock wrapper; + +} diff --git a/db/java/src/com/sleepycat/db/internal/DbLogc.java b/db/java/src/com/sleepycat/db/internal/DbLogc.java new file mode 100644 index 000000000..d26d95f60 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbLogc.java @@ -0,0 +1,54 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class DbLogc { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected DbLogc(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected DbLogc() { + this(0, false); + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + throw new UnsupportedOperationException("C++ destructor does not have public access"); + } + swigCPtr = 0; + } + + protected static long getCPtr(DbLogc obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + public synchronized void close(int flags) throws DatabaseException { + try { + close0(flags); + } finally { + swigCPtr = 0; + } + } + + /* package */ void close0(int flags) { db_javaJNI.DbLogc_close0(swigCPtr, flags); } + + public int get(com.sleepycat.db.LogSequenceNumber lsn, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbLogc_get(swigCPtr, lsn, data, flags); + } + +} diff --git a/db/java/src/com/sleepycat/db/internal/DbMpoolFile.java b/db/java/src/com/sleepycat/db/internal/DbMpoolFile.java new file mode 100644 index 000000000..e72c1c70d --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbMpoolFile.java @@ -0,0 +1,56 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class DbMpoolFile { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected DbMpoolFile(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected DbMpoolFile() { + this(0, false); + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + throw new UnsupportedOperationException("C++ destructor does not have public access"); + } + swigCPtr = 0; + } + + protected static long getCPtr(DbMpoolFile obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + public int get_priority() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbMpoolFile_get_priority(swigCPtr); + } + + public void set_priority(int priority) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbMpoolFile_set_priority(swigCPtr, priority); } + + public int get_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbMpoolFile_get_flags(swigCPtr); } + + public void set_flags(int flags, boolean onoff) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbMpoolFile_set_flags(swigCPtr, flags, onoff); } + + public long get_maxsize() throws com.sleepycat.db.DatabaseException { + return db_javaJNI.DbMpoolFile_get_maxsize(swigCPtr); + } + + public void set_maxsize(long bytes) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbMpoolFile_set_maxsize(swigCPtr, bytes); } + +} diff --git a/db/java/src/com/sleepycat/db/internal/DbSequence.java b/db/java/src/com/sleepycat/db/internal/DbSequence.java new file mode 100644 index 000000000..ac8da65b5 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbSequence.java @@ -0,0 +1,96 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class DbSequence { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected DbSequence(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected DbSequence() { + this(0, false); + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + throw new UnsupportedOperationException("C++ destructor does not have public access"); + } + swigCPtr = 0; + } + + protected static long getCPtr(DbSequence obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + public Sequence wrapper; + + public synchronized void close(int flags) throws DatabaseException { + try { + close0(flags); + } finally { + swigCPtr = 0; + } + } + + public synchronized void remove(DbTxn txn, int flags) + throws DatabaseException { + try { + remove0(txn, flags); + } finally { + swigCPtr = 0; + } + } + + public DbSequence(Db db, int flags) throws com.sleepycat.db.DatabaseException { + this(db_javaJNI.new_DbSequence(Db.getCPtr(db), flags), true); + } + + /* package */ void close0(int flags) { db_javaJNI.DbSequence_close0(swigCPtr, flags); } + + public long get(DbTxn txnid, int delta, int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbSequence_get(swigCPtr, DbTxn.getCPtr(txnid), delta, flags); } + + public int get_cachesize() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbSequence_get_cachesize(swigCPtr); } + + public Db get_db() throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.DbSequence_get_db(swigCPtr); + return (cPtr == 0) ? null : new Db(cPtr, false); + } + + public int get_flags() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbSequence_get_flags(swigCPtr); } + + public void get_key(com.sleepycat.db.DatabaseEntry key) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbSequence_get_key(swigCPtr, key); } + + public long get_range_min() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbSequence_get_range_min(swigCPtr); } + + public long get_range_max() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbSequence_get_range_max(swigCPtr); } + + public void initial_value(long val) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbSequence_initial_value(swigCPtr, val); } + + public void open(DbTxn txnid, com.sleepycat.db.DatabaseEntry key, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbSequence_open(swigCPtr, DbTxn.getCPtr(txnid), key, flags); } + + /* package */ void remove0(DbTxn txnid, int flags) { db_javaJNI.DbSequence_remove0(swigCPtr, DbTxn.getCPtr(txnid), flags); } + + public void set_cachesize(int size) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbSequence_set_cachesize(swigCPtr, size); } + + public void set_flags(int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbSequence_set_flags(swigCPtr, flags); } + + public void set_range(long min, long max) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbSequence_set_range(swigCPtr, min, max); } + + public com.sleepycat.db.SequenceStats stat(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbSequence_stat(swigCPtr, flags); } + +} diff --git a/db/java/src/com/sleepycat/db/internal/DbTxn.java b/db/java/src/com/sleepycat/db/internal/DbTxn.java new file mode 100644 index 000000000..d277f1982 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbTxn.java @@ -0,0 +1,103 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class DbTxn { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected DbTxn(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected DbTxn() { + this(0, false); + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + throw new UnsupportedOperationException("C++ destructor does not have public access"); + } + swigCPtr = 0; + } + + protected static long getCPtr(DbTxn obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + public void abort() throws DatabaseException { + try { + abort0(); + } finally { + swigCPtr = 0; + } + } + + public void commit(int flags) throws DatabaseException { + try { + commit0(flags); + } finally { + swigCPtr = 0; + } + } + + public void discard(int flags) throws DatabaseException { + try { + discard0(flags); + } finally { + swigCPtr = 0; + } + } + + /* + * We override Object.equals because it is possible for the Java API to + * create multiple DbTxns that reference the same underlying object. + * This can happen for example during DbEnv.txn_recover(). + */ + public boolean equals(Object obj) + { + if (this == obj) + return true; + + if (obj != null && (obj instanceof DbTxn)) { + DbTxn that = (DbTxn)obj; + return (this.swigCPtr == that.swigCPtr); + } + return false; + } + + /* + * We must override Object.hashCode whenever we override + * Object.equals() to enforce the maxim that equal objects have the + * same hashcode. + */ + public int hashCode() + { + return ((int)swigCPtr ^ (int)(swigCPtr >> 32)); + } + + /* package */ void abort0() { db_javaJNI.DbTxn_abort0(swigCPtr); } + + /* package */ void commit0(int flags) { db_javaJNI.DbTxn_commit0(swigCPtr, flags); } + + /* package */ void discard0(int flags) { db_javaJNI.DbTxn_discard0(swigCPtr, flags); } + + public int id() throws com.sleepycat.db.DatabaseException { return db_javaJNI.DbTxn_id(swigCPtr); } + + public void prepare(byte[] gid) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbTxn_prepare(swigCPtr, gid); } + + public void set_timeout(long timeout, int flags) throws com.sleepycat.db.DatabaseException { db_javaJNI.DbTxn_set_timeout(swigCPtr, timeout, flags); } + +} diff --git a/db/java/src/com/sleepycat/db/internal/DbUtil.java b/db/java/src/com/sleepycat/db/internal/DbUtil.java new file mode 100644 index 000000000..54a2730f9 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/DbUtil.java @@ -0,0 +1,179 @@ +/* + * - + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2003 + * Sleepycat Software. All rights reserved. + * + * $Id: DbUtil.java,v 1.2 2004/09/22 18:01:04 bostic Exp $ + */ +package com.sleepycat.db.internal; + +/** + * DbUtil is a simple class that holds a few static utility functions other + * parts of the package share and that don't have a good home elsewhere. (For + * now, that's limited to byte-array-to-int conversion and back.) + */ + +public class DbUtil { + /** + * Get the u_int32_t stored beginning at offset "offset" into + * array "arr". We have to do the conversion manually since it's + * a C-native int, and we're not really supposed to make this + * kind of cast in Java. + * + * @return Description of the Return Value + */ + public static int array2int(byte[] arr, int offset) { + int b1; + int b2; + int b3; + int b4; + int pos = offset; + + // Get the component bytes; b4 is most significant, b1 least. + if (big_endian) { + b4 = arr[pos++]; + b3 = arr[pos++]; + b2 = arr[pos++]; + b1 = arr[pos]; + } else { + b1 = arr[pos++]; + b2 = arr[pos++]; + b3 = arr[pos++]; + b4 = arr[pos]; + } + + // Bytes are signed. Convert [-128, -1] to [128, 255]. + if (b1 < 0) { + b1 += 256; + } + if (b2 < 0) { + b2 += 256; + } + if (b3 < 0) { + b3 += 256; + } + if (b4 < 0) { + b4 += 256; + } + + // Put the bytes in their proper places in an int. + b2 <<= 8; + b3 <<= 16; + b4 <<= 24; + + // Return their sum. + return (b1 + b2 + b3 + b4); + } + + + /** + * Store the specified u_int32_t, with endianness appropriate to + * the platform we're running on, into four consecutive bytes of + * the specified byte array, starting from the specified offset. + */ + public static void int2array(int n, byte[] arr, int offset) { + int b1; + int b2; + int b3; + int b4; + int pos = offset; + + b1 = n & 0xff; + b2 = (n >> 8) & 0xff; + b3 = (n >> 16) & 0xff; + b4 = (n >> 24) & 0xff; + + // Bytes are signed. Convert [128, 255] to [-128, -1]. + if (b1 >= 128) { + b1 -= 256; + } + if (b2 >= 128) { + b2 -= 256; + } + if (b3 >= 128) { + b3 -= 256; + } + if (b4 >= 128) { + b4 -= 256; + } + + // Put the bytes in the appropriate place in the array. + if (big_endian) { + arr[pos++] = (byte) b4; + arr[pos++] = (byte) b3; + arr[pos++] = (byte) b2; + arr[pos] = (byte) b1; + } else { + arr[pos++] = (byte) b1; + arr[pos++] = (byte) b2; + arr[pos++] = (byte) b3; + arr[pos] = (byte) b4; + } + } + + + /** + * Convert a byte array to a concise, readable string suitable + * for use in toString methods of the *Stat classes. + * + * @return Description of the Return Value + */ + public static String byteArrayToString(byte[] barr) { + if (barr == null) { + return "null"; + } + + StringBuffer sb = new StringBuffer(); + int len = barr.length; + for (int i = 0; i < len; i++) { + sb.append('x'); + int val = (barr[i] >> 4) & 0xf; + if (val < 10) { + sb.append((char) ('0' + val)); + } else { + sb.append((char) ('a' + val - 10)); + } + val = barr[i] & 0xf; + if (val < 10) { + sb.append((char) ('0' + val)); + } else { + sb.append((char) ('a' + val - 10)); + } + } + return sb.toString(); + } + + + /** + * Convert an object array to a string, suitable for use in + * toString methods of the *Stat classes. + * + * @return Description of the Return Value + */ + public static String objectArrayToString(Object[] arr, String name) { + if (arr == null) { + return "null"; + } + + StringBuffer sb = new StringBuffer(); + int len = arr.length; + for (int i = 0; i < len; i++) { + sb.append("\n " + name + "[" + i + "]:\n"); + sb.append(" " + arr[i].toString()); + } + return sb.toString(); + } + + public static int default_lorder() { + return big_endian ? 4321 : 1234; + } + + private final static boolean big_endian = is_big_endian(); + + /** + * @return Description of the Return Value + */ + private native static boolean is_big_endian(); +} diff --git a/db/java/src/com/sleepycat/db/internal/Dbc.java b/db/java/src/com/sleepycat/db/internal/Dbc.java new file mode 100644 index 000000000..9d9bced88 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/Dbc.java @@ -0,0 +1,73 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + +package com.sleepycat.db.internal; + + +import com.sleepycat.db.*; +import java.util.Comparator; + +public class Dbc { + private long swigCPtr; + protected boolean swigCMemOwn; + + protected Dbc(long cPtr, boolean cMemoryOwn) { + swigCMemOwn = cMemoryOwn; + swigCPtr = cPtr; + } + + protected Dbc() { + this(0, false); + } + + /* package */ void delete() { + if(swigCPtr != 0 && swigCMemOwn) { + swigCMemOwn = false; + throw new UnsupportedOperationException("C++ destructor does not have public access"); + } + swigCPtr = 0; + } + + protected static long getCPtr(Dbc obj) { + return (obj == null) ? 0 : obj.swigCPtr; + } + + public synchronized void close() throws DatabaseException { + try { + close0(); + } finally { + swigCPtr = 0; + } + } + + /* package */ void close0() { db_javaJNI.Dbc_close0(swigCPtr); } + + public int count(int flags) throws com.sleepycat.db.DatabaseException { return db_javaJNI.Dbc_count(swigCPtr, flags); } + + public int del(int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Dbc_del(swigCPtr, flags); + } + + public Dbc dup(int flags) throws com.sleepycat.db.DatabaseException { + long cPtr = db_javaJNI.Dbc_dup(swigCPtr, flags); + return (cPtr == 0) ? null : new Dbc(cPtr, false); + } + + public int get(com.sleepycat.db.DatabaseEntry key, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Dbc_get(swigCPtr, key, data, flags); + } + + public int pget(com.sleepycat.db.DatabaseEntry key, com.sleepycat.db.DatabaseEntry pkey, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Dbc_pget(swigCPtr, key, pkey, data, flags); + } + + public int put(com.sleepycat.db.DatabaseEntry key, com.sleepycat.db.DatabaseEntry data, int flags) throws com.sleepycat.db.DatabaseException { + return db_javaJNI.Dbc_put(swigCPtr, key, data, flags); + } + +} diff --git a/db/java/src/com/sleepycat/db/internal/db_java.java b/db/java/src/com/sleepycat/db/internal/db_java.java new file mode 100644 index 000000000..6bfbf64c8 --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/db_java.java @@ -0,0 +1,34 @@ +package com.sleepycat.db.internal; + +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + + +/* package */ class db_java { + public static void DbEnv_lock_vec(DbEnv dbenv, int locker, int flags, com.sleepycat.db.LockRequest[] list, int offset, int nlist) throws com.sleepycat.db.DatabaseException { + db_javaJNI.DbEnv_lock_vec(DbEnv.getCPtr(dbenv), locker, flags, list, offset, nlist); + } + + /* package */ static long initDbEnvRef0(DbEnv self, Object handle) { + return db_javaJNI.initDbEnvRef0(DbEnv.getCPtr(self), handle); + } + + /* package */ static long initDbRef0(Db self, Object handle) { + return db_javaJNI.initDbRef0(Db.getCPtr(self), handle); + } + + /* package */ static void deleteRef0(long ref) { + db_javaJNI.deleteRef0(ref); + } + + /* package */ static DbEnv getDbEnv0(Db self) { + long cPtr = db_javaJNI.getDbEnv0(Db.getCPtr(self)); + return (cPtr == 0) ? null : new DbEnv(cPtr, false); + } + +} diff --git a/db/java/src/com/sleepycat/db/internal/db_javaJNI.java b/db/java/src/com/sleepycat/db/internal/db_javaJNI.java new file mode 100644 index 000000000..4b47b3bec --- /dev/null +++ b/db/java/src/com/sleepycat/db/internal/db_javaJNI.java @@ -0,0 +1,263 @@ +package com.sleepycat.db.internal; + +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version: 1.3.21 + * + * Do not make changes to this file unless you know what you are doing--modify + * the SWIG interface file instead. + * ----------------------------------------------------------------------------- */ + + +class db_javaJNI { + + static { + /* An alternate library name can be specified via a property. */ + String libname; + + if ((libname = System.getProperty("sleepycat.db.libfile")) != null) + System.load(libname); + else if ((libname = System.getProperty("sleepycat.db.libname")) != null) + System.loadLibrary(libname); + else { + String os = System.getProperty("os.name"); + if (os != null && os.startsWith("Windows")) { + /* + * On Windows, library name is something like + * "libdb_java42.dll" or "libdb_java42d.dll". + */ + libname = "libdb_java" + + DbConstants.DB_VERSION_MAJOR + + DbConstants.DB_VERSION_MINOR; + + try { + System.loadLibrary(libname); + } catch (UnsatisfiedLinkError e) { + try { + libname += "d"; + System.loadLibrary(libname); + } catch (UnsatisfiedLinkError e2) { + throw e; + } + } + } else { + /* + * On UNIX, library name is something like + * "libdb_java-3.0.so". + */ + System.loadLibrary("db_java-" + + DbConstants.DB_VERSION_MAJOR + "." + + DbConstants.DB_VERSION_MINOR); + } + } + + initialize(); + + if (DbEnv_get_version_major() != DbConstants.DB_VERSION_MAJOR || + DbEnv_get_version_minor() != DbConstants.DB_VERSION_MINOR || + DbEnv_get_version_patch() != DbConstants.DB_VERSION_PATCH) + throw new RuntimeException("Berkeley DB library version doesn't match Java classes"); + } + + static native final void initialize(); + + public final static native void DbEnv_lock_vec(long jarg1, int jarg2, int jarg3, com.sleepycat.db.LockRequest[] jarg4, int jarg5, int jarg6) throws com.sleepycat.db.DatabaseException; + /* package */ final static native long initDbEnvRef0(long jarg1, Object jarg2); + /* package */ final static native long initDbRef0(long jarg1, Object jarg2); + /* package */ final static native void deleteRef0(long jarg1); + /* package */ final static native long getDbEnv0(long jarg1); + public final static native long new_Db(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_associate(long jarg1, long jarg2, long jarg3, com.sleepycat.db.SecondaryKeyCreator jarg4, int jarg5) throws com.sleepycat.db.DatabaseException; + /* package */ final static native int Db_close0(long jarg1, int jarg2); + public final static native long Db_cursor(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native int Db_del(long jarg1, long jarg2, com.sleepycat.db.DatabaseEntry jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + public final static native void Db_err(long jarg1, int jarg2, String jarg3) /* no exception */; + public final static native void Db_errx(long jarg1, String jarg2) /* no exception */; + public final static native boolean Db_get_transactional(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get(long jarg1, long jarg2, com.sleepycat.db.DatabaseEntry jarg3, com.sleepycat.db.DatabaseEntry jarg4, int jarg5) throws com.sleepycat.db.DatabaseException; + public final static native boolean Db_get_byteswapped(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long Db_get_cachesize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_cachesize_ncache(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String Db_get_filename(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String Db_get_dbname(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_encrypt_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String Db_get_errpfx(long jarg1) /* no exception */; + public final static native int Db_get_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_lorder(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long Db_get_mpf(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_open_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_pagesize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_bt_minkey(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_h_ffactor(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_h_nelem(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_re_delim(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_re_len(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_re_pad(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String Db_get_re_source(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_q_extentsize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int Db_get_type(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long Db_join(long jarg1, Dbc[] jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void Db_key_range(long jarg1, long jarg2, com.sleepycat.db.DatabaseEntry jarg3, com.sleepycat.db.KeyRange jarg4, int jarg5) throws com.sleepycat.db.DatabaseException; + public final static native void Db_open(long jarg1, long jarg2, String jarg3, String jarg4, int jarg5, int jarg6, int jarg7) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException; + public final static native int Db_pget(long jarg1, long jarg2, com.sleepycat.db.DatabaseEntry jarg3, com.sleepycat.db.DatabaseEntry jarg4, com.sleepycat.db.DatabaseEntry jarg5, int jarg6) throws com.sleepycat.db.DatabaseException; + public final static native int Db_put(long jarg1, long jarg2, com.sleepycat.db.DatabaseEntry jarg3, com.sleepycat.db.DatabaseEntry jarg4, int jarg5) throws com.sleepycat.db.DatabaseException; + /* package */ final static native void Db_remove0(long jarg1, String jarg2, String jarg3, int jarg4); + /* package */ final static native void Db_rename0(long jarg1, String jarg2, String jarg3, String jarg4, int jarg5); + public final static native void Db_set_append_recno(long jarg1, com.sleepycat.db.RecordNumberAppender jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_bt_compare(long jarg1, java.util.Comparator jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_bt_maxkey(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_bt_minkey(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_bt_prefix(long jarg1, com.sleepycat.db.BtreePrefixCalculator jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_cachesize(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_dup_compare(long jarg1, java.util.Comparator jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_encrypt(long jarg1, String jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_errpfx(long jarg1, String jarg2) /* no exception */; + public final static native void Db_set_feedback(long jarg1, com.sleepycat.db.FeedbackHandler jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_flags(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_h_ffactor(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_h_hash(long jarg1, com.sleepycat.db.Hasher jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_h_nelem(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_lorder(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_pagesize(long jarg1, long jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_re_delim(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_re_len(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_re_pad(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_re_source(long jarg1, String jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void Db_set_q_extentsize(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native Object Db_stat(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void Db_sync(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int Db_truncate(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void Db_upgrade(long jarg1, String jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + /* package */ final static native boolean Db_verify0(long jarg1, String jarg2, String jarg3, java.io.OutputStream jarg4, int jarg5); + /* package */ final static native void Dbc_close0(long jarg1); + public final static native int Dbc_count(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int Dbc_del(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native long Dbc_dup(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int Dbc_get(long jarg1, com.sleepycat.db.DatabaseEntry jarg2, com.sleepycat.db.DatabaseEntry jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + public final static native int Dbc_pget(long jarg1, com.sleepycat.db.DatabaseEntry jarg2, com.sleepycat.db.DatabaseEntry jarg3, com.sleepycat.db.DatabaseEntry jarg4, int jarg5) throws com.sleepycat.db.DatabaseException; + public final static native int Dbc_put(long jarg1, com.sleepycat.db.DatabaseEntry jarg2, com.sleepycat.db.DatabaseEntry jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + public final static native long new_DbEnv(int jarg1) throws com.sleepycat.db.DatabaseException; + /* package */ final static native void DbEnv_close0(long jarg1, int jarg2); + public final static native void DbEnv_dbremove(long jarg1, long jarg2, String jarg3, String jarg4, int jarg5) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException; + public final static native void DbEnv_dbrename(long jarg1, long jarg2, String jarg3, String jarg4, String jarg5, int jarg6) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException; + public final static native void DbEnv_err(long jarg1, int jarg2, String jarg3) /* no exception */; + public final static native void DbEnv_errx(long jarg1, String jarg2) /* no exception */; + public final static native String[] DbEnv_get_data_dirs(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_encrypt_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String DbEnv_get_errpfx(long jarg1) /* no exception */; + public final static native int DbEnv_get_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String DbEnv_get_home(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_open_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_get_shm_key(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_tas_spins(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String DbEnv_get_tmp_dir(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native boolean DbEnv_get_verbose(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_open(long jarg1, String jarg2, int jarg3, int jarg4) throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException; + /* package */ final static native void DbEnv_remove0(long jarg1, String jarg2, int jarg3); + public final static native void DbEnv_set_cachesize(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_data_dir(long jarg1, String jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_encrypt(long jarg1, String jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_errcall(long jarg1, com.sleepycat.db.ErrorHandler jarg2) /* no exception */; + public final static native void DbEnv_set_errpfx(long jarg1, String jarg2) /* no exception */; + public final static native void DbEnv_set_flags(long jarg1, int jarg2, boolean jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_feedback(long jarg1, com.sleepycat.db.FeedbackHandler jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_mp_mmapsize(long jarg1, long jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_msgcall(long jarg1, com.sleepycat.db.MessageHandler jarg2) /* no exception */; + public final static native void DbEnv_set_paniccall(long jarg1, com.sleepycat.db.PanicHandler jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_rpc_server(long jarg1, DbClient jarg2, String jarg3, long jarg4, long jarg5, int jarg6) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_shm_key(long jarg1, long jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_tas_spins(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_timeout(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_tmp_dir(long jarg1, String jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_tx_max(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_app_dispatch(long jarg1, com.sleepycat.db.LogRecordHandler jarg2) throws com.sleepycat.db.DatabaseException; + /* package */ final static native void DbEnv_set_tx_timestamp0(long jarg1, long jarg2); + public final static native void DbEnv_set_verbose(long jarg1, int jarg2, boolean jarg3) throws com.sleepycat.db.DatabaseException; + public final static native byte[][] DbEnv_get_lk_conflicts(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_lk_detect(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_lk_max_locks(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_lk_max_lockers(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_lk_max_objects(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_lock_detect(long jarg1, int jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_lock_get(long jarg1, int jarg2, int jarg3, com.sleepycat.db.DatabaseEntry jarg4, int jarg5) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_lock_id(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_lock_id_free(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_lock_put(long jarg1, long jarg2) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.LockStats DbEnv_lock_stat(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lk_conflicts(long jarg1, byte[][] jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lk_detect(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lk_max_lockers(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lk_max_locks(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lk_max_objects(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_lg_bsize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String DbEnv_get_lg_dir(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_lg_max(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_lg_regionmax(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native String[] DbEnv_log_archive(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_log_compare(com.sleepycat.db.LogSequenceNumber jarg1, com.sleepycat.db.LogSequenceNumber jarg2) /* no exception */; + public final static native long DbEnv_log_cursor(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native String DbEnv_log_file(long jarg1, com.sleepycat.db.LogSequenceNumber jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_log_flush(long jarg1, com.sleepycat.db.LogSequenceNumber jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_log_put(long jarg1, com.sleepycat.db.LogSequenceNumber jarg2, com.sleepycat.db.DatabaseEntry jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.LogStats DbEnv_log_stat(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lg_bsize(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lg_dir(long jarg1, String jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lg_max(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_lg_regionmax(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_get_cachesize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_cachesize_ncache(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_get_mp_mmapsize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.CacheStats DbEnv_memp_stat(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.CacheFileStats[] DbEnv_memp_fstat(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_memp_trickle(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_get_tx_max(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_get_tx_timestamp(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_get_timeout(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_txn_begin(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_txn_checkpoint(long jarg1, int jarg2, int jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.PreparedTransaction[] DbEnv_txn_recover(long jarg1, int jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.TransactionStats DbEnv_txn_stat(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native long DbEnv_get_rep_limit(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_rep_elect(long jarg1, int jarg2, int jarg3, int jarg4, int jarg5, int jarg6) throws com.sleepycat.db.DatabaseException; + public final static native int DbEnv_rep_process_message(long jarg1, com.sleepycat.db.DatabaseEntry jarg2, com.sleepycat.db.DatabaseEntry jarg3, DbEnv.RepProcessMessage jarg4, com.sleepycat.db.LogSequenceNumber jarg5) /* no exception */; + public final static native void DbEnv_rep_start(long jarg1, com.sleepycat.db.DatabaseEntry jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.ReplicationStats DbEnv_rep_stat(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_rep_limit(long jarg1, long jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbEnv_set_rep_transport(long jarg1, int jarg2, com.sleepycat.db.ReplicationTransport jarg3) throws com.sleepycat.db.DatabaseException; + public final static native String DbEnv_strerror(int jarg1) /* no exception */; + public final static native int DbEnv_get_version_major() /* no exception */; + public final static native int DbEnv_get_version_minor() /* no exception */; + public final static native int DbEnv_get_version_patch() /* no exception */; + public final static native String DbEnv_get_version_string() /* no exception */; + /* package */ final static native void delete_DbLock(long jarg1); + /* package */ final static native void DbLogc_close0(long jarg1, int jarg2); + public final static native int DbLogc_get(long jarg1, com.sleepycat.db.LogSequenceNumber jarg2, com.sleepycat.db.DatabaseEntry jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + public final static native int DbMpoolFile_get_priority(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native void DbMpoolFile_set_priority(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native int DbMpoolFile_get_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native void DbMpoolFile_set_flags(long jarg1, int jarg2, boolean jarg3) throws com.sleepycat.db.DatabaseException; + public final static native long DbMpoolFile_get_maxsize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native void DbMpoolFile_set_maxsize(long jarg1, long jarg2) throws com.sleepycat.db.DatabaseException; + public final static native long new_DbSequence(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + /* package */ final static native void DbSequence_close0(long jarg1, int jarg2); + public final static native long DbSequence_get(long jarg1, long jarg2, int jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + public final static native int DbSequence_get_cachesize(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long DbSequence_get_db(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native int DbSequence_get_flags(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native void DbSequence_get_key(long jarg1, com.sleepycat.db.DatabaseEntry jarg2) throws com.sleepycat.db.DatabaseException; + public final static native long DbSequence_get_range_min(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native long DbSequence_get_range_max(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native void DbSequence_initial_value(long jarg1, long jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbSequence_open(long jarg1, long jarg2, com.sleepycat.db.DatabaseEntry jarg3, int jarg4) throws com.sleepycat.db.DatabaseException; + /* package */ final static native void DbSequence_remove0(long jarg1, long jarg2, int jarg3); + public final static native void DbSequence_set_cachesize(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbSequence_set_flags(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbSequence_set_range(long jarg1, long jarg2, long jarg3) throws com.sleepycat.db.DatabaseException; + public final static native com.sleepycat.db.SequenceStats DbSequence_stat(long jarg1, int jarg2) throws com.sleepycat.db.DatabaseException; + /* package */ final static native void DbTxn_abort0(long jarg1); + /* package */ final static native void DbTxn_commit0(long jarg1, int jarg2); + /* package */ final static native void DbTxn_discard0(long jarg1, int jarg2); + public final static native int DbTxn_id(long jarg1) throws com.sleepycat.db.DatabaseException; + public final static native void DbTxn_prepare(long jarg1, byte[] jarg2) throws com.sleepycat.db.DatabaseException; + public final static native void DbTxn_set_timeout(long jarg1, long jarg2, int jarg3) throws com.sleepycat.db.DatabaseException; +} diff --git a/db/java/src/com/sleepycat/db/package.html b/db/java/src/com/sleepycat/db/package.html index 73f58df1d..fe77f9223 100644 --- a/db/java/src/com/sleepycat/db/package.html +++ b/db/java/src/com/sleepycat/db/package.html @@ -1,7 +1,29 @@ - + + + + -

    Java API programming notes
    -[reference guide] +Berkeley DB Java API
    +[reference guide] [Java programming notes]. +

    +This package is a wrapper around the Berkeley DB library. It uses JNI +to provide access to Berkeley DB, which is implemented in C. That means +that a shared library or DLL must be available when applications use +this package. +

    +There are also several utilities provided with Berkeley DB that make +administrative tasks possible from the command line. For more +information, see the page Berkeley DB Supporting Utilities. diff --git a/db/java/src/com/sleepycat/util/ExceptionUnwrapper.java b/db/java/src/com/sleepycat/util/ExceptionUnwrapper.java new file mode 100644 index 000000000..ee7e8ceb4 --- /dev/null +++ b/db/java/src/com/sleepycat/util/ExceptionUnwrapper.java @@ -0,0 +1,69 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ExceptionUnwrapper.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.util; + +/** + * Unwraps nested exceptions by calling the {@link + * ExceptionWrapper#getDetail()} method for exceptions that implement the + * {@link ExceptionWrapper} interface. Does not currently support the Java 1.4 + * Throwable.getDetail() method. + * + * @author Mark Hayes + */ +public class ExceptionUnwrapper { + + /** + * Unwraps an Exception and returns the underlying Exception, or throws an + * Error if the underlying Throwable is an Error. + * + * @param e is the Exception to unwrap. + * + * @return the underlying Exception. + * + * @throws Error if the underlying Throwable is an Error. + * + * @throws IllegalArgumentException if the underlying Throwable is not an + * Exception or an Error. + */ + public static Exception unwrap(Exception e) { + + Throwable t = unwrapAny(e); + if (t instanceof Exception) { + return (Exception) t; + } else if (t instanceof Error) { + throw (Error) t; + } else { + throw new IllegalArgumentException("Not Exception or Error: " + t); + } + } + + /** + * Unwraps an Exception and returns the underlying Throwable. + * + * @param e is the Exception to unwrap. + * + * @return the underlying Throwable. + */ + public static Throwable unwrapAny(Throwable e) { + + while (true) { + if (e instanceof ExceptionWrapper) { + Throwable e2 = ((ExceptionWrapper) e).getDetail(); + if (e2 == null) { + return e; + } else { + e = e2; + } + } else { + return e; + } + } + } +} diff --git a/db/java/src/com/sleepycat/util/ExceptionWrapper.java b/db/java/src/com/sleepycat/util/ExceptionWrapper.java new file mode 100644 index 000000000..4917fdc2b --- /dev/null +++ b/db/java/src/com/sleepycat/util/ExceptionWrapper.java @@ -0,0 +1,25 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ExceptionWrapper.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.util; + +/** + * Interface implemented by exceptions that can contain nested exceptions. + * + * @author Mark Hayes + */ +public interface ExceptionWrapper { + + /** + * Returns the nested exception or null if none is present. + * + * @return the nested exception or null if none is present. + */ + Throwable getDetail(); +} diff --git a/db/java/src/com/sleepycat/util/FastInputStream.java b/db/java/src/com/sleepycat/util/FastInputStream.java new file mode 100644 index 000000000..6bad7d79a --- /dev/null +++ b/db/java/src/com/sleepycat/util/FastInputStream.java @@ -0,0 +1,179 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: FastInputStream.java,v 1.2 2004/06/04 18:24:50 mark Exp $ + */ + +package com.sleepycat.util; + +import java.io.IOException; +import java.io.InputStream; + +/** + * A replacement for ByteArrayInputStream that does not synchronize every + * byte read. + * + *

    This class extends {@link InputStream} and its read() + * methods allow it to be used as a standard input stream. In addition, it + * provides readFast() methods that are not declared to throw + * IOException. IOException is never thrown by this + * class.

    + * + * @author Mark Hayes + */ +public class FastInputStream extends InputStream { + + protected int len; + protected int off; + protected int mark; + protected byte[] buf; + + /** + * Creates an input stream. + * + * @param buffer the data to read. + */ + public FastInputStream(byte[] buffer) { + + buf = buffer; + len = buffer.length; + } + + /** + * Creates an input stream. + * + * @param buffer the data to read. + * + * @param offset the byte offset at which to begin reading. + * + * @param length the number of bytes to read. + */ + public FastInputStream(byte[] buffer, int offset, int length) { + + buf = buffer; + off = offset; + len = length; + } + + // --- begin ByteArrayInputStream compatible methods --- + + public int available() { + + return len - off; + } + + public boolean markSupported() { + + return true; + } + + public void mark(int pos) { + + mark = pos; + } + + public void reset() { + + off = mark; + } + + public long skip(long count) { + + int myCount = (int) count; + if (myCount + off > len) { + myCount = len - off; + } + off += myCount; + return myCount; + } + + public int read() throws IOException { + + return readFast(); + } + + public int read(byte[] toBuf) throws IOException { + + return readFast(toBuf, 0, toBuf.length); + } + + public int read(byte[] toBuf, int offset, int length) throws IOException { + + return readFast(toBuf, offset, length); + } + + // --- end ByteArrayInputStream compatible methods --- + + /** + * Equivalent to read() but does not throw + * IOException. + * @see #read() + */ + public final int readFast() { + + return (off < len) ? (buf[off++] & 0xff) : (-1); + } + + /** + * Equivalent to read(byte[]) but does not throw + * IOException. + * @see #read(byte[]) + */ + public final int readFast(byte[] toBuf) { + + return readFast(toBuf, 0, toBuf.length); + } + + /** + * Equivalent to read(byte[],int,int) but does not throw + * IOException. + * @see #read(byte[],int,int) + */ + public final int readFast(byte[] toBuf, int offset, int length) { + + int avail = len - off; + if (avail <= 0) { + return -1; + } + if (length > avail) { + length = avail; + } + for (int i = 0; i < length; i++) { + toBuf[offset++] = buf[off++]; + } + return length; + } + + /** + * Returns the underlying data being read. + * + * @return the underlying data. + */ + public final byte[] getBufferBytes() { + + return buf; + } + + /** + * Returns the offset at which data is being read from the buffer. + * + * @return the offset at which data is being read. + */ + public final int getBufferOffset() { + + return off; + } + + /** + * Returns the end of the buffer being read. + * + * @return the end of the buffer. + */ + public final int getBufferLength() { + + return len; + } +} diff --git a/db/java/src/com/sleepycat/util/FastOutputStream.java b/db/java/src/com/sleepycat/util/FastOutputStream.java new file mode 100644 index 000000000..a0984bfe4 --- /dev/null +++ b/db/java/src/com/sleepycat/util/FastOutputStream.java @@ -0,0 +1,278 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: FastOutputStream.java,v 1.3 2004/07/03 16:15:36 mark Exp $ + */ + +package com.sleepycat.util; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; + +/** + * A replacement for ByteArrayOutputStream that does not synchronize every + * byte read. + * + *

    This class extends {@link OutputStream} and its write() + * methods allow it to be used as a standard output stream. In addition, it + * provides writeFast() methods that are not declared to throw + * IOException. IOException is never thrown by this + * class.

    + * + * @author Mark Hayes + */ +public class FastOutputStream extends OutputStream { + + public static final int DEFAULT_INIT_SIZE = 100; + public static final int DEFAULT_BUMP_SIZE = 100; + + private int len; + private int bumpLen; + private byte[] buf; + + /** + * Creates an output stream with default sizes. + */ + public FastOutputStream() { + + initBuffer(DEFAULT_INIT_SIZE, DEFAULT_BUMP_SIZE); + } + + /** + * Creates an output stream with a default bump size and a given initial + * size. + * + * @param initialSize the initial size of the buffer. + */ + public FastOutputStream(int initialSize) { + + initBuffer(initialSize, DEFAULT_BUMP_SIZE); + } + + /** + * Creates an output stream with a given bump size and initial size. + * + * @param initialSize the initial size of the buffer. + * + * @param bumpSize the amount to increment the buffer. + */ + public FastOutputStream(int initialSize, int bumpSize) { + + initBuffer(initialSize, bumpSize); + } + + /** + * Creates an output stream with a given initial buffer and a default + * bump size. + * + * @param buffer the initial buffer; will be owned by this object. + */ + public FastOutputStream(byte[] buffer) { + + buf = buffer; + bumpLen = DEFAULT_BUMP_SIZE; + } + + /** + * Creates an output stream with a given initial buffer and a given + * bump size. + * + * @param buffer the initial buffer; will be owned by this object. + * + * @param bumpSize the amount to increment the buffer. + */ + public FastOutputStream(byte[] buffer, int bumpSize) { + + buf = buffer; + bumpLen = bumpSize; + } + + private void initBuffer(int bufferSize, int bumpLen) { + buf = new byte[bufferSize]; + this.bumpLen = bumpLen; + } + + // --- begin ByteArrayOutputStream compatible methods --- + + public int size() { + + return len; + } + + public void reset() { + + len = 0; + } + + public void write(int b) throws IOException { + + writeFast(b); + } + + public void write(byte[] fromBuf) throws IOException { + + writeFast(fromBuf); + } + + public void write(byte[] fromBuf, int offset, int length) + throws IOException { + + writeFast(fromBuf, offset, length); + } + + public void writeTo(OutputStream out) throws IOException { + + out.write(buf, 0, len); + } + + public String toString() { + + return new String(buf, 0, len); + } + + public String toString(String encoding) + throws UnsupportedEncodingException { + + return new String(buf, 0, len, encoding); + } + + public byte[] toByteArray() { + + byte[] toBuf = new byte[len]; + + for (int i = 0; i < len; i++) + toBuf[i] = buf[i]; + + return toBuf; + } + + // --- end ByteArrayOutputStream compatible methods --- + + /** + * Equivalent to write(int) but does not throw + * IOException. + * @see #write(int) + */ + public final void writeFast(int b) { + + if (len + 1 > buf.length) + bump(1); + + buf[len++] = (byte) b; + } + + /** + * Equivalent to write(byte[]) but does not throw + * IOException. + * @see #write(byte[]) + */ + public final void writeFast(byte[] fromBuf) { + + int needed = len + fromBuf.length - buf.length; + if (needed > 0) + bump(needed); + + for (int i = 0; i < fromBuf.length; i++) + buf[len++] = fromBuf[i]; + } + + /** + * Equivalent to write(byte[],int,int) but does not throw + * IOException. + * @see #write(byte[],int,int) + */ + public final void writeFast(byte[] fromBuf, int offset, int length) { + + int needed = len + length - buf.length; + if (needed > 0) + bump(needed); + + int fromLen = offset + length; + + for (int i = offset; i < fromLen; i++) + buf[len++] = fromBuf[i]; + } + + /** + * Copy the buffered data to the given array. + * + * @param toBuf the buffer to hold a copy of the data. + * + * @param offset the offset at which to start copying. + */ + public void toByteArray(byte[] toBuf, int offset) { + + int toLen = (toBuf.length > len) ? len : toBuf.length; + + for (int i = offset; i < toLen; i++) + toBuf[i] = buf[i]; + } + + /** + * Returns the buffer owned by this object. + * + * @return the buffer. + */ + public byte[] getBufferBytes() { + + return buf; + } + + /** + * Returns the offset of the internal buffer. + * + * @return always zero currently. + */ + public int getBufferOffset() { + + return 0; + } + + /** + * Returns the length used in the internal buffer, i.e., the offset at + * which data will be written next. + * + * @return the buffer length. + */ + public int getBufferLength() { + + return len; + } + + /** + * Ensure that at least the given number of bytes are available in the + * internal buffer. + * + * @param sizeNeeded the number of bytes desired. + */ + public void makeSpace(int sizeNeeded) { + + int needed = len + sizeNeeded - buf.length; + if (needed > 0) + bump(needed); + } + + /** + * Skip the given number of bytes in the buffer. + * + * @param sizeAdded number of bytes to skip. + */ + public void addSize(int sizeAdded) { + + len += sizeAdded; + } + + private void bump(int needed) { + + byte[] toBuf = new byte[buf.length + needed + bumpLen]; + + for (int i = 0; i < len; i++) + toBuf[i] = buf[i]; + + buf = toBuf; + } +} diff --git a/db/java/src/com/sleepycat/util/IOExceptionWrapper.java b/db/java/src/com/sleepycat/util/IOExceptionWrapper.java new file mode 100644 index 000000000..76b409829 --- /dev/null +++ b/db/java/src/com/sleepycat/util/IOExceptionWrapper.java @@ -0,0 +1,34 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: IOExceptionWrapper.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.util; + +import java.io.IOException; + +/** + * An IOException that can contain nested exceptions. + * + * @author Mark Hayes + */ +public class IOExceptionWrapper + extends IOException implements ExceptionWrapper { + + private Throwable e; + + public IOExceptionWrapper(Throwable e) { + + super(e.getMessage()); + this.e = e; + } + + public Throwable getDetail() { + + return e; + } +} diff --git a/db/java/src/com/sleepycat/util/RuntimeExceptionWrapper.java b/db/java/src/com/sleepycat/util/RuntimeExceptionWrapper.java new file mode 100644 index 000000000..f40f1ff7a --- /dev/null +++ b/db/java/src/com/sleepycat/util/RuntimeExceptionWrapper.java @@ -0,0 +1,32 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: RuntimeExceptionWrapper.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.util; + +/** + * A RuntimeException that can contain nested exceptions. + * + * @author Mark Hayes + */ +public class RuntimeExceptionWrapper extends RuntimeException + implements ExceptionWrapper { + + private Throwable e; + + public RuntimeExceptionWrapper(Throwable e) { + + super(e.getMessage()); + this.e = e; + } + + public Throwable getDetail() { + + return e; + } +} diff --git a/db/java/src/com/sleepycat/util/UtfOps.java b/db/java/src/com/sleepycat/util/UtfOps.java new file mode 100644 index 000000000..19c8a9ee3 --- /dev/null +++ b/db/java/src/com/sleepycat/util/UtfOps.java @@ -0,0 +1,281 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: UtfOps.java,v 1.2 2004/06/04 18:24:51 mark Exp $ + */ + +package com.sleepycat.util; + +/** + * UTF operations with more flexibility than is provided by DataInput and + * DataOutput. + * + * @author Mark Hayes + */ +public class UtfOps { + + private static byte[] EMPTY_BYTES = {}; + private static String EMPTY_STRING = ""; + + /** + * Returns the byte length of a null terminated UTF string, not including + * the terminator. + * + * @param bytes the data containing the UTF string. + * + * @param offset the beginning of the string the measure. + * + * @throws IndexOutOfBoundsException if no zero terminator is found. + * + * @return the number of bytes. + */ + public static int getZeroTerminatedByteLength(byte[] bytes, int offset) + throws IndexOutOfBoundsException { + + int len = 0; + while (bytes[offset++] != 0) { + len++; + } + return len; + } + + /** + * Returns the byte length of the UTF string that would be created by + * converting the given characters to UTF. + * + * @param chars the characters that would be converted. + * + * @return the byte length of the equivalent UTF data. + */ + public static int getByteLength(char[] chars) { + + return getByteLength(chars, 0, chars.length); + } + + /** + * Returns the byte length of the UTF string that would be created by + * converting the given characters to UTF. + * + * @param chars the characters that would be converted. + * + * @param offset the first character to be converted. + * + * @param length the number of characters to be converted. + * + * @return the byte length of the equivalent UTF data. + */ + public static int getByteLength(char[] chars, int offset, int length) { + + int len = 0; + length += offset; + for (int i = offset; i < length; i++) { + int c = chars[i]; + if ((c >= 0x0001) && (c <= 0x007F)) { + len++; + } else if (c > 0x07FF) { + len += 3; + } else { + len += 2; + } + } + return len; + } + + /** + * Returns the number of characters represented by the given UTF string. + * + * @param bytes the UTF string. + * + * @return the number of characters. + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static int getCharLength(byte[] bytes) + throws IllegalArgumentException, IndexOutOfBoundsException { + + return getCharLength(bytes, 0, bytes.length); + } + + /** + * Returns the number of characters represented by the given UTF string. + * + * @param bytes the data containing the UTF string. + * + * @param offset the first byte to be converted. + * + * @param length the number of byte to be converted. + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static int getCharLength(byte[] bytes, int offset, int length) + throws IllegalArgumentException, IndexOutOfBoundsException { + + int charCount = 0; + length += offset; + while (offset < length) { + switch ((bytes[offset] & 0xff) >> 4) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + offset++; + break; + case 12: case 13: + offset += 2; + break; + case 14: + offset += 3; + break; + default: + throw new IllegalArgumentException(); + } + charCount++; + } + return charCount; + } + + /** + * Converts byte arrays into character arrays. + * + * @param bytes the source byte data to convert + * + * @param byteOffset the offset into the byte array at which + * to start the conversion + * + * @param chars the destination array + * + * @param charOffset the offset into chars at which to begin the copy + * + * @param len the amount of information to copy into chars + * + * @param isByteLen if true then len is a measure of bytes, otherwise + * len is a measure of characters + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static int bytesToChars(byte[] bytes, int byteOffset, + char[] chars, int charOffset, + int len, boolean isByteLen) + throws IllegalArgumentException, IndexOutOfBoundsException { + + int char1, char2, char3; + len += isByteLen ? byteOffset : charOffset; + while ((isByteLen ? byteOffset : charOffset) < len) { + char1 = bytes[byteOffset++] & 0xff; + switch ((char1 & 0xff) >> 4) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + chars[charOffset++] = (char) char1; + break; + case 12: case 13: + char2 = bytes[byteOffset++]; + if ((char2 & 0xC0) != 0x80) { + throw new IllegalArgumentException(); + } + chars[charOffset++] = (char)(((char1 & 0x1F) << 6) | + (char2 & 0x3F)); + break; + case 14: + char2 = bytes[byteOffset++]; + char3 = bytes[byteOffset++]; + if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80)) + throw new IllegalArgumentException(); + chars[charOffset++] = (char)(((char1 & 0x0F) << 12) | + ((char2 & 0x3F) << 6) | + ((char3 & 0x3F) << 0)); + break; + default: + throw new IllegalArgumentException(); + } + } + return byteOffset; + } + + /** + * Converts character arrays into byte arrays. + * + * @param chars the source character data to convert + * + * @param charOffset the offset into the character array at which + * to start the conversion + * + * @param bytes the destination array + * + * @param byteOffset the offset into bytes at which to begin the copy + * + * @param charLength the length of characters to copy into bytes + */ + public static void charsToBytes(char[] chars, int charOffset, + byte[] bytes, int byteOffset, + int charLength) { + charLength += charOffset; + for (int i = charOffset; i < charLength; i++) { + int c = chars[i]; + if ((c >= 0x0001) && (c <= 0x007F)) { + bytes[byteOffset++] = (byte) c; + } else if (c > 0x07FF) { + bytes[byteOffset++] = (byte) (0xE0 | ((c >> 12) & 0x0F)); + bytes[byteOffset++] = (byte) (0x80 | ((c >> 6) & 0x3F)); + bytes[byteOffset++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } else { + bytes[byteOffset++] = (byte) (0xC0 | ((c >> 6) & 0x1F)); + bytes[byteOffset++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } + } + } + + /** + * Converts byte arrays into strings. + * + * @param bytes the source byte data to convert + * + * @param offset the offset into the byte array at which + * to start the conversion + * + * @param length the number of bytes to be converted. + * + * @return the string. + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static String bytesToString(byte[] bytes, int offset, int length) + throws IllegalArgumentException, IndexOutOfBoundsException { + + if (length == 0) return EMPTY_STRING; + int charLen = UtfOps.getCharLength(bytes, offset, length); + char[] chars = new char[charLen]; + UtfOps.bytesToChars(bytes, offset, chars, 0, length, true); + return new String(chars, 0, charLen); + } + + /** + * Converts strings to byte arrays. + * + * @param string the string to convert. + * + * @return the UTF byte array. + */ + public static byte[] stringToBytes(String string) { + + if (string.length() == 0) return EMPTY_BYTES; + char[] chars = string.toCharArray(); + byte[] bytes = new byte[UtfOps.getByteLength(chars)]; + UtfOps.charsToBytes(chars, 0, bytes, 0, chars.length); + return bytes; + } +} diff --git a/db/java/src/com/sleepycat/util/package.html b/db/java/src/com/sleepycat/util/package.html new file mode 100644 index 000000000..83fae5ca9 --- /dev/null +++ b/db/java/src/com/sleepycat/util/package.html @@ -0,0 +1,6 @@ + + + +General utilities used throughout Berkeley DB. + + diff --git a/db/libdb_java/db.i b/db/libdb_java/db.i index 7053ed647..ec40c39d8 100644 --- a/db/libdb_java/db.i +++ b/db/libdb_java/db.i @@ -11,6 +11,8 @@ #endif typedef unsigned char u_int8_t; +typedef long int32_t; +typedef long long db_seq_t; typedef unsigned long u_int32_t; typedef u_int32_t db_recno_t; /* Record number type. */ typedef u_int32_t db_timeout_t; /* Type of a timeout. */ @@ -37,42 +39,46 @@ struct __db_out_stream { void *handle; int (*callback) __P((void *, const void *)); }; + +#define Db __db +#define Dbc __dbc +#define Dbt __db_dbt +#define DbEnv __db_env +#define DbLock __db_lock_u +#define DbLogc __db_log_cursor +#define DbLsn __db_lsn +#define DbMpoolFile __db_mpoolfile +#define DbSequence __db_sequence +#define DbTxn __db_txn + +/* Suppress a compilation warning for an unused symbol */ +void *unused = SWIG_JavaThrowException; %} -%rename(Db) __db; -%rename(Dbt) __db_dbt; -%rename(DbEnv) __db_env; -%rename(DbLock) __db_lock_u; -%rename(DbLogc) __db_log_cursor; -%rename(DbLsn) __db_lsn; -%rename(DbTxn) __db_txn; -%rename(Dbc) __dbc; -%rename(DbMpoolFile) __db_mpoolfile; - - -struct __db; typedef struct __db DB; -struct __db_dbt; typedef struct __db_dbt DBT; -struct __db_env; typedef struct __db_env DB_ENV; -struct __db_lock_u; typedef struct __db_lock_u DB_LOCK; -struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC; -struct __db_lsn; typedef struct __db_lsn DB_LSN; -struct __db_txn; typedef struct __db_txn DB_TXN; -struct __dbc; typedef struct __dbc DBC; -struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE; +struct Db; typedef struct Db DB; +struct Dbc; typedef struct Dbc DBC; +struct Dbt; typedef struct Dbt DBT; +struct DbEnv; typedef struct DbEnv DB_ENV; +struct DbLock; typedef struct DbLock DB_LOCK; +struct DbLogc; typedef struct DbLogc DB_LOGC; +struct DbLsn; typedef struct DbLsn DB_LSN; +struct DbMpoolFile; typedef struct DbMpoolFile DB_MPOOLFILE; +struct DbSequence; typedef struct Db DB_SEQUENCE; +struct DbTxn; typedef struct DbTxn DB_TXN; /* Methods that allocate new objects */ -%newobject __db::join(DBC **curslist, u_int32_t flags); -%newobject __dbc::dup(u_int32_t flags); -%newobject __db_env::lock_get(u_int32_t locker, +%newobject Db::join(DBC **curslist, u_int32_t flags); +%newobject Db::dup(u_int32_t flags); +%newobject DbEnv::lock_get(u_int32_t locker, u_int32_t flags, const DBT *object, db_lockmode_t lock_mode); -%newobject __db_env::log_cursor(u_int32_t flags); +%newobject DbEnv::log_cursor(u_int32_t flags); -struct __db +struct Db { %extend { JAVA_EXCEPT_ERRNO(DB_RETOK_STD, JDBENV) - __db(DB_ENV *dbenv, u_int32_t flags) { + Db(DB_ENV *dbenv, u_int32_t flags) { DB *self; errno = db_create(&self, dbenv, flags); return (errno == 0) ? self : NULL; @@ -107,10 +113,8 @@ struct __db return self->del(self, txnid, key, flags); } - /* Avoid a name clash in the generated code with __db_err */ - %name(err) JAVA_EXCEPT_NONE - void err_internal(int error, const char *message) { + void err(int error, const char *message) { self->err(self, error, message); } @@ -118,6 +122,10 @@ struct __db self->errx(self, message); } + int_bool get_transactional() { + return self->get_transactional(self); + } + #ifndef SWIGJAVA int fd() { int ret; @@ -168,14 +176,17 @@ struct __db return ret; } - // These methods are implemented in Java to avoid wrapping the object - // on every call. + /* + * This method is implemented in Java to avoid wrapping the object on + * every call. + */ #ifndef SWIGJAVA DB_ENV *get_env() { DB_ENV *env; errno = self->get_env(self, &env); return env; } +#endif const char *get_errpfx() { const char *ret; @@ -183,7 +194,6 @@ struct __db self->get_errpfx(self, &ret); return ret; } -#endif u_int32_t get_flags() { u_int32_t ret; @@ -262,17 +272,6 @@ struct __db return ret; } - u_int32_t get_flags_raw() { - errno = 0; - return self->flags; - } - - int_bool get_transactional() { - int ret; - errno = self->get_transactional(self, &ret); - return ret; - } - DBTYPE get_type() { DBTYPE type; errno = self->get_type(self, &type); @@ -358,16 +357,18 @@ struct __db return self->set_encrypt(self, passwd, flags); } + JAVA_EXCEPT_NONE #ifndef SWIGJAVA - void set_errcall(void (*db_errcall_fcn)(const char *, char *)) { + void set_errcall(void (*db_errcall_fcn)(const DB_ENV *, const char *, const char *)) { self->set_errcall(self, db_errcall_fcn); } +#endif /* SWIGJAVA */ void set_errpfx(const char *errpfx) { self->set_errpfx(self, errpfx); } -#endif + JAVA_EXCEPT(DB_RETOK_STD, DB2JDBENV) db_ret_t set_feedback(void (*db_feedback_fcn)(DB *, int, int)) { return self->set_feedback(self, db_feedback_fcn); } @@ -393,6 +394,12 @@ struct __db return self->set_lorder(self, lorder); } +#ifndef SWIGJAVA + void set_msgcall(void (*db_msgcall_fcn)(const DB_ENV *, const char *)) { + self->set_msgcall(self, db_msgcall_fcn); + } +#endif /* SWIGJAVA */ + db_ret_t set_pagesize(u_int32_t pagesize) { return self->set_pagesize(self, pagesize); } @@ -424,9 +431,9 @@ struct __db } JAVA_EXCEPT_ERRNO(DB_RETOK_STD, DB2JDBENV) - void *stat(u_int32_t flags) { + void *stat(DB_TXN *txnid, u_int32_t flags) { void *statp; - errno = self->stat(self, &statp, flags); + errno = self->stat(self, txnid, &statp, flags); return (errno == 0) ? statp : NULL; } @@ -447,8 +454,8 @@ struct __db return self->upgrade(self, file, flags); } - JAVA_EXCEPT(DB_RETOK_STD, NULL) - db_ret_t verify(const char *file, const char *database, + JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) + int_bool verify(const char *file, const char *database, struct __db_out_stream outfile, u_int32_t flags) { /* * We can't easily #include "dbinc/db_ext.h" because of name @@ -456,14 +463,19 @@ struct __db */ extern int __db_verify_internal __P((DB *, const char *, const char *, void *, int (*)(void *, const void *), u_int32_t)); - return __db_verify_internal(self, file, database, + errno = __db_verify_internal(self, file, database, outfile.handle, outfile.callback, flags); + if (errno == DB_VERIFY_BAD) { + errno = 0; + return 0; + } else + return 1; } } }; -struct __dbc +struct Dbc { %extend { JAVA_EXCEPT(DB_RETOK_STD, NULL) @@ -507,11 +519,11 @@ struct __dbc }; -struct __db_env +struct DbEnv { %extend { JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) - __db_env(u_int32_t flags) { + DbEnv(u_int32_t flags) { DB_ENV *self = NULL; errno = db_env_create(&self, flags); return (errno == 0) ? self : NULL; @@ -556,14 +568,12 @@ struct __db_env return ret; } -#ifndef SWIGJAVA const char *get_errpfx() { const char *ret; errno = 0; self->get_errpfx(self, &ret); return ret; } -#endif u_int32_t get_flags() { u_int32_t ret; @@ -633,29 +643,33 @@ struct __db_env } JAVA_EXCEPT_NONE - void set_errcall(void (*db_errcall_fcn)(const char *, char *)) { + void set_errcall(void (*db_errcall_fcn)(const DB_ENV *, const char *, const char *)) { self->set_errcall(self, db_errcall_fcn); } -#ifndef SWIGJAVA void set_errpfx(const char *errpfx) { self->set_errpfx(self, errpfx); } -#endif JAVA_EXCEPT(DB_RETOK_STD, JDBENV) db_ret_t set_flags(u_int32_t flags, int_bool onoff) { return self->set_flags(self, flags, onoff); } - db_ret_t set_feedback(void (*db_feedback_fcn)(DB_ENV *, int, int)) { - return self->set_feedback(self, db_feedback_fcn); + db_ret_t set_feedback(void (*env_feedback_fcn)(DB_ENV *, int, int)) { + return self->set_feedback(self, env_feedback_fcn); } db_ret_t set_mp_mmapsize(size_t mp_mmapsize) { return self->set_mp_mmapsize(self, mp_mmapsize); } + JAVA_EXCEPT_NONE + void set_msgcall(void (*db_msgcall_fcn)(const DB_ENV *, const char *)) { + self->set_msgcall(self, db_msgcall_fcn); + } + + JAVA_EXCEPT(DB_RETOK_STD, JDBENV) db_ret_t set_paniccall(void (*db_panic_fcn)(DB_ENV *, int)) { return self->set_paniccall(self, db_panic_fcn); } @@ -699,8 +713,7 @@ struct __db_env return self->set_verbose(self, which, onoff); } - // Lock functions - // + /* Lock functions */ JAVA_EXCEPT_ERRNO(DB_RETOK_STD, JDBENV) struct __db_lk_conflicts get_lk_conflicts() { struct __db_lk_conflicts ret; @@ -771,7 +784,7 @@ struct __db_env } #ifndef SWIGJAVA - // For Java, this is defined in native code + /* For Java, this is defined in native code */ db_ret_t lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ *list, int offset, int nlist) { @@ -809,8 +822,7 @@ struct __db_env return self->set_lk_max_objects(self, max); } - // Log functions - // + /* Log functions */ JAVA_EXCEPT_ERRNO(DB_RETOK_STD, JDBENV) u_int32_t get_lg_bsize() { u_int32_t ret; @@ -837,7 +849,7 @@ struct __db_env } char **log_archive(u_int32_t flags) { - char **list; + char **list = NULL; errno = self->log_archive(self, &list, flags); return (errno == 0) ? list : NULL; } @@ -893,8 +905,7 @@ struct __db_env return self->set_lg_regionmax(self, lg_regionmax); } - // Memory pool functions - // + /* Memory pool functions */ JAVA_EXCEPT_ERRNO(DB_RETOK_STD, JDBENV) jlong get_cachesize() { u_int32_t gbytes, bytes; @@ -932,8 +943,7 @@ struct __db_env return ret; } - // Transaction functions - // + /* Transaction functions */ u_int32_t get_tx_max() { u_int32_t ret; errno = self->get_tx_max(self, &ret); @@ -990,17 +1000,16 @@ struct __db_env return (errno == 0) ? statp : NULL; } - // Replication functions - // + /* Replication functions */ jlong get_rep_limit() { u_int32_t gbytes, bytes; errno = self->get_rep_limit(self, &gbytes, &bytes); return (jlong)gbytes * GIGABYTE + bytes; } - int rep_elect(int nsites, int priority, u_int32_t timeout) { + int rep_elect(int nsites, int nvotes, int priority, u_int32_t timeout, u_int32_t flags) { int id; - errno = self->rep_elect(self, nsites, priority, timeout, &id); + errno = self->rep_elect(self, nsites, nvotes, priority, timeout, &id, flags); return id; } @@ -1034,13 +1043,13 @@ struct __db_env return self->set_rep_transport(self, envid, send); } - // Convert DB errors to strings + /* Convert DB errors to strings */ JAVA_EXCEPT_NONE static const char *strerror(int error) { return db_strerror(error); } - // Versioning information + /* Versioning information */ static int get_version_major() { return DB_VERSION_MAJOR; } @@ -1060,40 +1069,18 @@ struct __db_env }; -struct __db_txn +struct DbLock { %extend { - JAVA_EXCEPT(DB_RETOK_STD, NULL) - db_ret_t abort() { - return self->abort(self); - } - - db_ret_t commit(u_int32_t flags) { - return self->commit(self, flags); - } - - db_ret_t discard(u_int32_t flags) { - return self->discard(self, flags); - } - JAVA_EXCEPT_NONE - u_int32_t id() { - return self->id(self); - } - - JAVA_EXCEPT(DB_RETOK_STD, TXN2JDBENV) - db_ret_t prepare(u_int8_t *gid) { - return self->prepare(self, gid); - } - - db_ret_t set_timeout(db_timeout_t timeout, u_int32_t flags) { - return self->set_timeout(self, timeout, flags); + ~DbLock() { + __os_free(NULL, self); } } }; -struct __db_log_cursor +struct DbLogc { %extend { JAVA_EXCEPT(DB_RETOK_STD, NULL) @@ -1109,22 +1096,12 @@ struct __db_log_cursor }; -struct __db_lock_u -{ -%extend { - JAVA_EXCEPT_NONE - ~__db_lock_u() { - __os_free(NULL, self); - } -} -}; - - -struct __db_lsn +#ifndef SWIGJAVA +struct DbLsn { %extend { JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) - __db_lsn(u_int32_t file, u_int32_t offset) { + DbLsn(u_int32_t file, u_int32_t offset) { DB_LSN *self = NULL; errno = __os_malloc(NULL, sizeof (DB_LSN), &self); if (errno == 0) { @@ -1135,7 +1112,7 @@ struct __db_lsn } JAVA_EXCEPT_NONE - ~__db_lsn() { + ~DbLsn() { __os_free(NULL, self); } @@ -1148,8 +1125,10 @@ struct __db_lsn } } }; +#endif + -struct __db_mpoolfile +struct DbMpoolFile { %extend { JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) @@ -1183,7 +1162,7 @@ struct __db_mpoolfile return (jlong)gbytes * GIGABYTE + bytes; } - // New method - no backwards compatibility version + /* New method - no backwards compatibility version */ JAVA_EXCEPT(DB_RETOK_STD, NULL) db_ret_t set_maxsize(jlong bytes) { return self->set_maxsize(self, @@ -1192,3 +1171,131 @@ struct __db_mpoolfile } } }; + + +struct DbSequence +{ +%extend { + JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) + DbSequence(DB *db, u_int32_t flags) { + DB_SEQUENCE *self = NULL; + errno = db_sequence_create(&self, db, flags); + return self; + } + + JAVA_EXCEPT(DB_RETOK_STD, NULL) + db_ret_t close(u_int32_t flags) { + return self->close(self, flags); + } + + JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) + db_seq_t get(DB_TXN *txnid, int32_t delta, u_int32_t flags) { + db_seq_t ret = 0; + errno = self->get(self, txnid, delta, &ret, flags); + return ret; + } + + int32_t get_cachesize() { + int32_t ret = 0; + errno = self->get_cachesize(self, &ret); + return ret; + } + + DB *get_db() { + DB *ret = NULL; + errno = self->get_db(self, &ret); + return ret; + } + + u_int32_t get_flags() { + u_int32_t ret = 0; + errno = self->get_flags(self, &ret); + return ret; + } + + JAVA_EXCEPT(DB_RETOK_STD, NULL) + db_ret_t get_key(DBT *key) { + return self->get_key(self, key); + } + + JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) + db_seq_t get_range_min() { + db_seq_t ret = 0; + errno = self->get_range(self, &ret, NULL); + return ret; + } + + db_seq_t get_range_max() { + db_seq_t ret = 0; + errno = self->get_range(self, NULL, &ret); + return ret; + } + + JAVA_EXCEPT(DB_RETOK_STD, NULL) + db_ret_t initial_value(db_seq_t val) { + return self->initial_value(self, val); + } + + db_ret_t open(DB_TXN *txnid, DBT *key, u_int32_t flags) { + return self->open(self, txnid, key, flags); + } + + db_ret_t remove(DB_TXN *txnid, u_int32_t flags) { + return self->remove(self, txnid, flags); + } + + db_ret_t set_cachesize(int32_t size) { + return self->set_cachesize(self, size); + } + + db_ret_t set_flags(u_int32_t flags) { + return self->set_flags(self, flags); + } + + db_ret_t set_range(db_seq_t min, db_seq_t max) { + return self->set_range(self, min, max); + } + + JAVA_EXCEPT_ERRNO(DB_RETOK_STD, NULL) + DB_SEQUENCE_STAT *stat(u_int32_t flags) { + DB_SEQUENCE_STAT *ret = NULL; + errno = self->stat(self, &ret, flags); + return ret; + } +} +}; + + +struct DbTxn +{ +%extend { + JAVA_EXCEPT(DB_RETOK_STD, NULL) + db_ret_t abort() { + return self->abort(self); + } + + db_ret_t commit(u_int32_t flags) { + return self->commit(self, flags); + } + + db_ret_t discard(u_int32_t flags) { + return self->discard(self, flags); + } + + JAVA_EXCEPT_NONE + u_int32_t id() { + return self->id(self); + } + + JAVA_EXCEPT(DB_RETOK_STD, TXN2JDBENV) + db_ret_t prepare(u_int8_t *gid) { + return self->prepare(self, gid); + } + + db_ret_t set_timeout(db_timeout_t timeout, u_int32_t flags) { + return self->set_timeout(self, timeout, flags); + } +} +}; + + diff --git a/db/libdb_java/db_java.i b/db/libdb_java/db_java.i index 98c8dcffc..d84d210c5 100644 --- a/db/libdb_java/db_java.i +++ b/db/libdb_java/db_java.i @@ -20,7 +20,7 @@ * the native part at finalization time. These are exactly the cases where C * applications manage the memory for the handles. */ -%typemap(javafinalize) DbLsn, DbLock %{ +%typemap(javafinalize) struct DbLsn, struct DbLock %{ protected void finalize() { try { delete(); @@ -31,8 +31,15 @@ } %} -// Destructors -%rename(open0) open; +%typemap(javaimports) SWIGTYPE %{ +import com.sleepycat.db.*; +import java.util.Comparator; +%} + +/* Class names */ +%rename(LogSequenceNumber) DbLsn; + +/* Destructors */ %rename(close0) close; %rename(remove0) remove; %rename(rename0) rename; @@ -41,45 +48,50 @@ %rename(commit0) commit; %rename(discard0) discard; -// Special case methods +/* Special case methods */ %rename(set_tx_timestamp0) set_tx_timestamp; -%rename(setFeedbackHandler) set_feedback; -%rename(setErrorHandler) set_errcall; -%rename(setPanicHandler) set_paniccall; -%rename(get) pget; - -// Extra code in the Java classes -%typemap(javacode) DbEnv %{ - // Internally, the JNI layer creates a global reference to each DbEnv, - // which can potentially be different to this. We keep a copy here so - // we can clean up after destructors. - private Object dbenv_ref; - private DbAppDispatch app_dispatch_handler; - private DbEnvFeedbackHandler env_feedback_handler; - private DbErrorHandler error_handler; - private DbPanicHandler panic_handler; - private DbRepTransport rep_transport_handler; - private String errpfx; + +/* Extra code in the Java classes */ +%typemap(javacode) struct DbEnv %{ + /* + * Internally, the JNI layer creates a global reference to each DbEnv, + * which can potentially be different to this. We keep a copy here so + * we can clean up after destructors. + */ + private long dbenv_ref; + public Environment wrapper; + + private LogRecordHandler app_dispatch_handler; + private FeedbackHandler env_feedback_handler; + private ErrorHandler error_handler; + private MessageHandler message_handler; + private PanicHandler panic_handler; + private ReplicationTransport rep_transport_handler; + private java.io.OutputStream error_stream; + private java.io.OutputStream message_stream; public static class RepProcessMessage { public int envid; } - // Called by the public DbEnv constructor and for private environments - // by the Db constructor. + /* + * Called by the public DbEnv constructor and for private environments + * by the Db constructor. + */ void initialize() { dbenv_ref = db_java.initDbEnvRef0(this, this); - // Start with System.err as the default error stream. + /* Start with System.err as the default error stream. */ set_error_stream(System.err); + set_message_stream(System.out); } void cleanup() { swigCPtr = 0; db_java.deleteRef0(dbenv_ref); - dbenv_ref = null; + dbenv_ref = 0L; } - public synchronized void close(int flags) throws DbException { + public synchronized void close(int flags) throws DatabaseException { try { close0(flags); } finally { @@ -87,151 +99,132 @@ } } - private final int handle_app_dispatch(Dbt dbt, DbLsn lsn, int recops) { - return app_dispatch_handler.appDispatch(this, dbt, lsn, recops); + private final int handle_app_dispatch(DatabaseEntry dbt, LogSequenceNumber lsn, int recops) { + return app_dispatch_handler.handleLogRecord(wrapper, dbt, lsn, RecoveryOperation.fromFlag(recops)); + } + + public LogRecordHandler get_app_dispatch() { + return app_dispatch_handler; } private final void handle_env_feedback(int opcode, int percent) { - env_feedback_handler.feedback(this, opcode, percent); + if (opcode == DbConstants.DB_RECOVER) + env_feedback_handler.recoveryFeedback(wrapper, percent); + /* No other environment feedback type supported. */ } - private final void handle_error(String msg) { - error_handler.error(this.errpfx, msg); + public FeedbackHandler get_feedback() { + return env_feedback_handler; } - private final void handle_panic(DbException e) { - panic_handler.panic(this, e); + private final void handle_error(String errpfx, String msg) { + error_handler.error(wrapper, errpfx, msg); } - private final int handle_rep_transport(Dbt control, Dbt rec, - DbLsn lsn, int flags, int envid) - throws DbException { - return rep_transport_handler.send(this, control, rec, lsn, - flags, envid); + public ErrorHandler get_errcall() { + return error_handler; } - - public void lock_vec(/*u_int32_t*/ int locker, int flags, - DbLockRequest[] list, int offset, int count) throws DbException { - db_javaJNI.DbEnv_lock_vec(swigCPtr, locker, flags, list, - offset, count); + + private final void handle_message(String msg) { + message_handler.message(wrapper, msg); } - public void open(String db_home, int flags, int mode) - throws DbException, java.io.FileNotFoundException { - /* Java is always threaded */ - flags |= Db.DB_THREAD; - open0(db_home, flags, mode); + public MessageHandler get_msgcall() { + return message_handler; } - public synchronized void remove(String db_home, int flags) - throws DbException, java.io.FileNotFoundException { - try { - remove0(db_home, flags); - } finally { - cleanup(); - } + private final void handle_panic(DatabaseException e) { + panic_handler.panic(wrapper, e); } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #set_cachesize(long,int)} - */ - public void set_cachesize(int gbytes, int bytes, int ncache) - throws DbException { - set_cachesize((long)gbytes * Db.GIGABYTE + bytes, ncache); + public PanicHandler get_paniccall() { + return panic_handler; } - public String get_errpfx() { - return this.errpfx; + private final int handle_rep_transport(DatabaseEntry control, DatabaseEntry rec, + LogSequenceNumber lsn, int envid, int flags) + throws DatabaseException { + return rep_transport_handler.send(wrapper, control, rec, lsn, envid, + (flags & DbConstants.DB_REP_NOBUFFER) != 0, + (flags & DbConstants.DB_REP_PERMANENT) != 0); + } + + public void lock_vec(/*u_int32_t*/ int locker, int flags, + LockRequest[] list, int offset, int count) throws DatabaseException { + db_javaJNI.DbEnv_lock_vec(swigCPtr, locker, flags, list, + offset, count); } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #setErrorHandler(DbErrorHandler)} - */ - public void set_errcall(DbErrcall db_errcall_fcn) throws DbException { - final DbErrcall ferrcall = db_errcall_fcn; + public synchronized void remove(String db_home, int flags) + throws DatabaseException, java.io.FileNotFoundException { try { - setErrorHandler(new DbErrorHandler() { - public void error(String prefix, String buffer) { - ferrcall.errcall(prefix, buffer); - } - }); - } - catch (DbException dbe) { - // setErrorHandler throws an exception, - // but set_error_stream does not. - // If it does happen, report it. - System.err.println("Exception during DbEnv.setErrorHandler: " + dbe); - dbe.printStackTrace(System.err); + remove0(db_home, flags); + } finally { + cleanup(); } } public void set_error_stream(java.io.OutputStream stream) { + error_stream = stream; final java.io.PrintWriter pw = new java.io.PrintWriter(stream); - try { - setErrorHandler(new DbErrorHandler() { - public void error(String prefix, String buf) { - if (prefix != null) - pw.print(prefix + ": "); - pw.println(buf); - pw.flush(); - } - }); - } - catch (DbException dbe) { - // setErrorHandler throws an exception, - // but set_error_stream does not. - // If it does happen, report it. - System.err.println("Exception during DbEnv.setErrorHandler: " + dbe); - dbe.printStackTrace(System.err); - } + set_errcall(new ErrorHandler() { + public void error(Environment env, + String prefix, String buf) /* no exception */ { + if (prefix != null) + pw.print(prefix + ": "); + pw.println(buf); + pw.flush(); + } + }); } - public void set_errpfx(String errpfx) { - this.errpfx = errpfx; + public java.io.OutputStream get_error_stream() { + return error_stream; } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #setFeedbackHandler(DbEnvFeedbackHandler)} - */ - public void set_feedback(DbEnvFeedback feedback) throws DbException { - final DbEnvFeedback ffeedback = feedback; - setFeedbackHandler(new DbEnvFeedbackHandler() { - public void feedback(DbEnv env, int opcode, int percent) { - ffeedback.feedback(env, opcode, percent); + + public void set_message_stream(java.io.OutputStream stream) { + message_stream = stream; + final java.io.PrintWriter pw = new java.io.PrintWriter(stream); + set_msgcall(new MessageHandler() { + public void message(Environment env, String msg) + /* no exception */ { + pw.println(msg); + pw.flush(); } }); } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #set_rep_limit(long)} - */ - public void set_rep_limit(int gbytes, int bytes) - throws DbException { - set_rep_limit((long)gbytes * Db.GIGABYTE + bytes); + public java.io.OutputStream get_message_stream() { + return message_stream; } - + public void set_tx_timestamp(java.util.Date timestamp) { set_tx_timestamp0(timestamp.getTime()/1000); } %} -%typemap(javacode) Db %{ +%typemap(javacode) struct Db %{ /* package */ static final int GIGABYTE = 1 << 30; - // Internally, the JNI layer creates a global reference to each Db, - // which can potentially be different to this. We keep a copy here so - // we can clean up after destructors. - private Object db_ref; + /* + * Internally, the JNI layer creates a global reference to each Db, + * which can potentially be different to this. We keep a copy here so + * we can clean up after destructors. + */ + private long db_ref; private DbEnv dbenv; private boolean private_dbenv; - private DbAppendRecno append_recno_handler; - private DbBtreeCompare bt_compare_handler; - private DbBtreePrefix bt_prefix_handler; - private DbDupCompare dup_compare_handler; - private DbFeedbackHandler db_feedback_handler; - private DbHash h_hash_handler; - private DbSecondaryKeyCreate seckey_create_handler; - - // Called by the Db constructor + + public Database wrapper; + private RecordNumberAppender append_recno_handler; + private Comparator bt_compare_handler; + private BtreePrefixCalculator bt_prefix_handler; + private Comparator dup_compare_handler; + private FeedbackHandler db_feedback_handler; + private Hasher h_hash_handler; + private SecondaryKeyCreator seckey_create_handler; + + /* Called by the Db constructor */ private void initialize(DbEnv dbenv) { if (dbenv == null) { private_dbenv = true; @@ -245,13 +238,13 @@ private void cleanup() { swigCPtr = 0; db_java.deleteRef0(db_ref); - db_ref = null; + db_ref = 0L; if (private_dbenv) dbenv.cleanup(); dbenv = null; } - public synchronized void close(int flags) throws DbException { + public synchronized void close(int flags) throws DatabaseException { try { close0(flags); } finally { @@ -259,71 +252,76 @@ } } - public DbEnv get_env() throws DbException { + public DbEnv get_env() throws DatabaseException { return dbenv; } - private final void handle_append_recno(Dbt data, int recno) - throws DbException { - append_recno_handler.dbAppendRecno(this, data, recno); + private final void handle_append_recno(DatabaseEntry data, int recno) + throws DatabaseException { + append_recno_handler.appendRecordNumber(wrapper, data, recno); + } + + public RecordNumberAppender get_append_recno() { + return append_recno_handler; } - private final int handle_bt_compare(Dbt dbt1, Dbt dbt2) { - return bt_compare_handler.compare(this, dbt1, dbt2); + private final int handle_bt_compare(DatabaseEntry dbt1, DatabaseEntry dbt2) { + return bt_compare_handler.compare(dbt1, dbt2); } - private final int handle_bt_prefix(Dbt dbt1, Dbt dbt2) { - return bt_prefix_handler.prefix(this, dbt1, dbt2); + public Comparator get_bt_compare() { + return bt_compare_handler; + } + + private final int handle_bt_prefix(DatabaseEntry dbt1, DatabaseEntry dbt2) { + return bt_prefix_handler.prefix(wrapper, dbt1, dbt2); + } + + public BtreePrefixCalculator get_bt_prefix() { + return bt_prefix_handler; } private final void handle_db_feedback(int opcode, int percent) { - db_feedback_handler.feedback(this, opcode, percent); + if (opcode == DbConstants.DB_UPGRADE) + db_feedback_handler.upgradeFeedback(wrapper, percent); + else if (opcode == DbConstants.DB_VERIFY) + db_feedback_handler.upgradeFeedback(wrapper, percent); + /* No other database feedback types known. */ } - private final int handle_dup_compare(Dbt dbt1, Dbt dbt2) { - return dup_compare_handler.compareDuplicates(this, dbt1, dbt2); + public FeedbackHandler get_feedback() { + return db_feedback_handler; } - private final int handle_h_hash(byte[] data, int len) { - return h_hash_handler.hash(this, data, len); + private final int handle_dup_compare(DatabaseEntry dbt1, DatabaseEntry dbt2) { + return dup_compare_handler.compare(dbt1, dbt2); } - private final int handle_seckey_create(Dbt key, Dbt data, Dbt result) - throws DbException { - return seckey_create_handler.secondaryKeyCreate( - this, key, data, result); + public Comparator get_dup_compare() { + return dup_compare_handler; } - /** - * Determine if a database was configured to store data. - * The only algorithm currently available is AES. - * - * @see #set_encrypt - * @return true if the database contents are encrypted. - */ - public boolean isEncrypted() { - return (get_encrypt_flags() != 0); + private final int handle_h_hash(byte[] data, int len) { + return h_hash_handler.hash(wrapper, data, len); } - public void open(DbTxn txnid, String file, String database, - int type, int flags, int mode) - throws DbException, java.io.FileNotFoundException, - DbDeadlockException, DbLockNotGrantedException { - /* Java is always threaded */ - flags |= Db.DB_THREAD; - open0(txnid, file, database, type, flags, mode); + public Hasher get_h_hash() { + return h_hash_handler; } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #get(DbTxn,Dbt,Dbt,Dbt,int)} - */ - public int pget(DbTxn txnid, Dbt key, Dbt pkey, Dbt data, int flags) - throws DbException { - return get(txnid, key, pkey, data, flags); + private final int handle_seckey_create(DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + return seckey_create_handler.createSecondaryKey( + (SecondaryDatabase)wrapper, key, data, result) ? + 0 : DbConstants.DB_DONOTINDEX; + } + + public SecondaryKeyCreator get_seckey_create() { + return seckey_create_handler; } public synchronized void remove(String file, String database, int flags) - throws DbException, java.io.FileNotFoundException { + throws DatabaseException, java.io.FileNotFoundException { try { remove0(file, database, flags); } finally { @@ -333,7 +331,7 @@ public synchronized void rename(String file, String database, String newname, int flags) - throws DbException, java.io.FileNotFoundException { + throws DatabaseException, java.io.FileNotFoundException { try { rename0(file, database, newname, flags); } finally { @@ -341,118 +339,105 @@ } } - public synchronized void verify(String file, String database, + public synchronized boolean verify(String file, String database, java.io.OutputStream outfile, int flags) - throws DbException, java.io.FileNotFoundException { + throws DatabaseException, java.io.FileNotFoundException { try { - verify0(file, database, outfile, flags); + return verify0(file, database, outfile, flags); } finally { cleanup(); } } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #set_cachesize(long,int)} - */ - public void set_cachesize(int gbytes, int bytes, int ncache) - throws DbException { - set_cachesize((long)gbytes * Db.GIGABYTE + bytes, ncache); + public ErrorHandler get_errcall() { + return dbenv.get_errcall(); } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #setErrorHandler(DbErrorHandler)} - */ - public void set_errcall(DbErrcall db_errcall_fcn) { - final DbErrcall ferrcall = db_errcall_fcn; - try { - dbenv.setErrorHandler(new DbErrorHandler() { - public void error(String prefix, String str) { - ferrcall.errcall(prefix, str); - } - }); - } - catch (DbException dbe) { - // setErrorHandler throws an exception, - // but set_errcall does not. - // If it does happen, report it. - System.err.println("Exception during DbEnv.setErrorHandler: " + dbe); - dbe.printStackTrace(System.err); - } - + public void set_errcall(ErrorHandler db_errcall_fcn) { + dbenv.set_errcall(db_errcall_fcn); } - public void setErrorHandler(DbErrorHandler db_errcall_fcn) { - dbenv.setErrorHandler(db_errcall_fcn); + public MessageHandler get_msgcall() { + return dbenv.get_msgcall(); } - public String get_errpfx() { - return dbenv.get_errpfx(); + public void set_msgcall(MessageHandler db_msgcall_fcn) { + dbenv.set_msgcall(db_msgcall_fcn); } - public void set_errpfx(String errpfx) { - dbenv.set_errpfx(errpfx); + public java.io.OutputStream get_error_stream() { + return dbenv.get_error_stream(); } public void set_error_stream(java.io.OutputStream stream) { dbenv.set_error_stream(stream); } - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #setFeedbackHandler(DbFeedbackHandler)} - */ - public void set_feedback(DbFeedback feedback) throws DbException { - final DbFeedback ffeedback = feedback; - setFeedbackHandler(new DbFeedbackHandler() { - public void feedback(Db db, int opcode, int percent) { - ffeedback.feedback(db, opcode, percent); - } - }); + public java.io.OutputStream get_message_stream() { + return dbenv.get_message_stream(); } - public void setPanicHandler(DbPanicHandler db_panic_fcn) throws DbException { - dbenv.setPanicHandler(db_panic_fcn); + public void set_message_stream(java.io.OutputStream stream) { + dbenv.set_message_stream(stream); } - // Don't remove these - special comments used by s_java to add constants - // BEGIN-JAVA-SPECIAL-CONSTANTS - // END-JAVA-SPECIAL-CONSTANTS + public void set_paniccall(PanicHandler db_panic_fcn) + throws DatabaseException { + dbenv.set_paniccall(db_panic_fcn); + } - static { - // BEGIN-JAVA-CONSTANT-INITIALIZATION - // END-JAVA-CONSTANT-INITIALIZATION + public PanicHandler get_paniccall() { + return dbenv.get_paniccall(); } %} -%typemap(javacode) Dbc %{ - public synchronized void close() throws DbException { +%typemap(javacode) struct Dbc %{ + public synchronized void close() throws DatabaseException { try { close0(); } finally { swigCPtr = 0; } } +%} - /** - * @deprecated Replaced in Berkeley DB 4.2 by {@link #get(Dbt,Dbt,Dbt,int)} - */ - public int pget(Dbt key, Dbt pkey, Dbt data, int flags) - throws DbException { - return get(key, pkey, data, flags); +%typemap(javacode) struct DbLock %{ + public Lock wrapper; +%} + +%typemap(javacode) struct DbLogc %{ + public synchronized void close(int flags) throws DatabaseException { + try { + close0(flags); + } finally { + swigCPtr = 0; + } } %} -%typemap(javacode) DbLogc %{ - public synchronized void close(int flags) throws DbException { +%typemap(javacode) struct DbSequence %{ + public Sequence wrapper; + + public synchronized void close(int flags) throws DatabaseException { try { close0(flags); } finally { swigCPtr = 0; } } + + public synchronized void remove(DbTxn txn, int flags) + throws DatabaseException { + try { + remove0(txn, flags); + } finally { + swigCPtr = 0; + } + } %} -%typemap(javacode) DbTxn %{ - public void abort() throws DbException { +%typemap(javacode) struct DbTxn %{ + public void abort() throws DatabaseException { try { abort0(); } finally { @@ -460,7 +445,7 @@ } } - public void commit(int flags) throws DbException { + public void commit(int flags) throws DatabaseException { try { commit0(flags); } finally { @@ -468,7 +453,7 @@ } } - public void discard(int flags) throws DbException { + public void discard(int flags) throws DatabaseException { try { discard0(flags); } finally { @@ -476,11 +461,11 @@ } } - // We override Object.equals because it is possible for - // the Java API to create multiple DbTxns that reference - // the same underlying object. This can happen for example - // during DbEnv.txn_recover(). - // + /* + * We override Object.equals because it is possible for the Java API to + * create multiple DbTxns that reference the same underlying object. + * This can happen for example during DbEnv.txn_recover(). + */ public boolean equals(Object obj) { if (this == obj) @@ -493,59 +478,59 @@ return false; } - // We must override Object.hashCode whenever we override - // Object.equals() to enforce the maxim that equal objects - // have the same hashcode. - // + /* + * We must override Object.hashCode whenever we override + * Object.equals() to enforce the maxim that equal objects have the + * same hashcode. + */ public int hashCode() { return ((int)swigCPtr ^ (int)(swigCPtr >> 32)); } %} -%native(initDbEnvRef0) jobject initDbEnvRef0(DB_ENV *self, void *handle); -%native(initDbRef0) jobject initDbRef0(DB *self, void *handle); -%native(deleteRef0) void deleteRef0(jobject ref); +%native(initDbEnvRef0) jlong initDbEnvRef0(DB_ENV *self, void *handle); +%native(initDbRef0) jlong initDbRef0(DB *self, void *handle); +%native(deleteRef0) void deleteRef0(jlong ref); %native(getDbEnv0) DB_ENV *getDbEnv0(DB *self); %{ -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_initDbEnvRef0( +JNIEXPORT jlong JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_initDbEnvRef0( JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { DB_ENV *self = *(DB_ENV **)&jarg1; + jlong ret; COMPQUIET(jcls, NULL); DB_ENV_INTERNAL(self) = (void *)(*jenv)->NewGlobalRef(jenv, jarg2); - self->set_errpfx(self, (const char*)self); - return (jobject)DB_ENV_INTERNAL(self); + *(jobject *)&ret = (jobject)DB_ENV_INTERNAL(self); + return (ret); } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_initDbRef0( +JNIEXPORT jlong JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_initDbRef0( JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { DB *self = *(DB **)&jarg1; + jlong ret; COMPQUIET(jcls, NULL); DB_INTERNAL(self) = (void *)(*jenv)->NewGlobalRef(jenv, jarg2); - return (jobject)DB_INTERNAL(self); + *(jobject *)&ret = (jobject)DB_INTERNAL(self); + return (ret); } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_deleteRef0( - JNIEnv *jenv, jclass jcls, jobject jref) { - COMPQUIET(jcls, NULL); - - if (jref != NULL) - (*jenv)->DeleteGlobalRef(jenv, jref); -} - -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_getDbRef0( +JNIEXPORT void JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_deleteRef0( JNIEnv *jenv, jclass jcls, jlong jarg1) { - DB *self = *(DB **)&jarg1; + jobject jref = *(jobject *)&jarg1; COMPQUIET(jcls, NULL); - COMPQUIET(jenv, NULL); - return (jobject)DB_INTERNAL(self); + if (jref != 0L) + (*jenv)->DeleteGlobalRef(jenv, jref); } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_getDbEnv0( +JNIEXPORT jlong JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_getDbEnv0( JNIEnv *jenv, jclass jcls, jlong jarg1) { DB *self = *(DB **)&jarg1; jlong env_cptr; @@ -554,11 +539,12 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_getDbEnv0( COMPQUIET(jcls, NULL); *(DB_ENV **)&env_cptr = self->dbenv; - return env_cptr; + return (env_cptr); } JNIEXPORT jboolean JNICALL -Java_com_sleepycat_db_DbUtil_is_1big_1endian(JNIEnv *jenv, jclass clazz) +Java_com_sleepycat_db_internal_DbUtil_is_1big_1endian( + JNIEnv *jenv, jclass clazz) { COMPQUIET(jenv, NULL); COMPQUIET(clazz, NULL); diff --git a/db/libdb_java/db_java_wrap.c b/db/libdb_java/db_java_wrap.c index db42beaf3..5b18ff3c5 100644 --- a/db/libdb_java/db_java_wrap.c +++ b/db/libdb_java/db_java_wrap.c @@ -1,6 +1,6 @@ /* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). - * Version 1.3.19 + * Version 1.3.21 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make @@ -26,6 +26,7 @@ typedef enum { SWIG_JavaArithmeticException, SWIG_JavaIllegalArgumentException, SWIG_JavaNullPointerException, + SWIG_JavaDirectorPureVirtual, SWIG_JavaUnknownError } SWIG_JavaExceptionCodes; @@ -34,12 +35,8 @@ typedef struct { const char *java_exception; } SWIG_JavaExceptions_t; -#if defined(SWIG_NOINCLUDE) -void SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg); -#else - -void SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { +static void SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { jclass excep; static const SWIG_JavaExceptions_t java_exceptions[] = { { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" }, @@ -49,6 +46,7 @@ void SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const c { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" }, { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" }, { SWIG_JavaNullPointerException, "java/lang/NullPointerException" }, + { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" }, { SWIG_JavaUnknownError, "java/lang/UnknownError" }, { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" } }; const SWIG_JavaExceptions_t *except_ptr = java_exceptions; @@ -63,7 +61,9 @@ void SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const c } -#endif +/* Contract support */ + +#define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else #include "db_config.h" @@ -80,7 +80,8 @@ void SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const c #define DB_PKG "com/sleepycat/db/" /* Forward declarations */ -static int __dbj_throw(JNIEnv *jenv, int err, const char *msg, jobject obj, jobject jdbenv); +static int __dbj_throw(JNIEnv *jenv, + int err, const char *msg, jobject obj, jobject jdbenv); /* Global data - JVM handle, classes, fields and methods */ static JavaVM *javavm; @@ -88,14 +89,14 @@ static JavaVM *javavm; static jclass db_class, dbc_class, dbenv_class, dbt_class, dblsn_class; static jclass dbpreplist_class, dbtxn_class; static jclass keyrange_class; -static jclass btree_stat_class, hash_stat_class, lock_stat_class; +static jclass bt_stat_class, h_stat_class, lock_stat_class; static jclass log_stat_class, mpool_stat_class, mpool_fstat_class; -static jclass queue_stat_class, rep_stat_class, txn_stat_class; +static jclass qam_stat_class, rep_stat_class, seq_stat_class, txn_stat_class; static jclass txn_active_class; static jclass lock_class, lockreq_class, rep_processmsg_class; static jclass dbex_class, deadex_class, lockex_class, memex_class; -static jclass runrecex_class; -static jclass filenotfoundex_class, illegalargex_class; +static jclass rephandledeadex_class, runrecex_class; +static jclass filenotfoundex_class, illegalargex_class, outofmemerr_class; static jclass bytearray_class, string_class, outputstream_class; static jfieldID dbc_cptr_fid; @@ -103,24 +104,237 @@ static jfieldID dbt_data_fid, dbt_size_fid, dbt_ulen_fid, dbt_dlen_fid; static jfieldID dbt_doff_fid, dbt_flags_fid, dbt_offset_fid; static jfieldID kr_less_fid, kr_equal_fid, kr_greater_fid; static jfieldID lock_cptr_fid; -static jfieldID lockreq_op_fid, lockreq_mode_fid, lockreq_timeout_fid; +static jfieldID lockreq_op_fid, lockreq_modeflag_fid, lockreq_timeout_fid; static jfieldID lockreq_obj_fid, lockreq_lock_fid; static jfieldID rep_processmsg_envid; -static jfieldID txn_stat_active_fid; + +/* BEGIN-STAT-FIELD-DECLS */ +static jfieldID bt_stat_bt_magic_fid; +static jfieldID bt_stat_bt_version_fid; +static jfieldID bt_stat_bt_metaflags_fid; +static jfieldID bt_stat_bt_nkeys_fid; +static jfieldID bt_stat_bt_ndata_fid; +static jfieldID bt_stat_bt_pagesize_fid; +static jfieldID bt_stat_bt_maxkey_fid; +static jfieldID bt_stat_bt_minkey_fid; +static jfieldID bt_stat_bt_re_len_fid; +static jfieldID bt_stat_bt_re_pad_fid; +static jfieldID bt_stat_bt_levels_fid; +static jfieldID bt_stat_bt_int_pg_fid; +static jfieldID bt_stat_bt_leaf_pg_fid; +static jfieldID bt_stat_bt_dup_pg_fid; +static jfieldID bt_stat_bt_over_pg_fid; +static jfieldID bt_stat_bt_empty_pg_fid; +static jfieldID bt_stat_bt_free_fid; +static jfieldID bt_stat_bt_int_pgfree_fid; +static jfieldID bt_stat_bt_leaf_pgfree_fid; +static jfieldID bt_stat_bt_dup_pgfree_fid; +static jfieldID bt_stat_bt_over_pgfree_fid; +static jfieldID h_stat_hash_magic_fid; +static jfieldID h_stat_hash_version_fid; +static jfieldID h_stat_hash_metaflags_fid; +static jfieldID h_stat_hash_nkeys_fid; +static jfieldID h_stat_hash_ndata_fid; +static jfieldID h_stat_hash_pagesize_fid; +static jfieldID h_stat_hash_ffactor_fid; +static jfieldID h_stat_hash_buckets_fid; +static jfieldID h_stat_hash_free_fid; +static jfieldID h_stat_hash_bfree_fid; +static jfieldID h_stat_hash_bigpages_fid; +static jfieldID h_stat_hash_big_bfree_fid; +static jfieldID h_stat_hash_overflows_fid; +static jfieldID h_stat_hash_ovfl_free_fid; +static jfieldID h_stat_hash_dup_fid; +static jfieldID h_stat_hash_dup_free_fid; +static jfieldID lock_stat_st_id_fid; +static jfieldID lock_stat_st_cur_maxid_fid; +static jfieldID lock_stat_st_maxlocks_fid; +static jfieldID lock_stat_st_maxlockers_fid; +static jfieldID lock_stat_st_maxobjects_fid; +static jfieldID lock_stat_st_nmodes_fid; +static jfieldID lock_stat_st_nlocks_fid; +static jfieldID lock_stat_st_maxnlocks_fid; +static jfieldID lock_stat_st_nlockers_fid; +static jfieldID lock_stat_st_maxnlockers_fid; +static jfieldID lock_stat_st_nobjects_fid; +static jfieldID lock_stat_st_maxnobjects_fid; +static jfieldID lock_stat_st_nconflicts_fid; +static jfieldID lock_stat_st_nrequests_fid; +static jfieldID lock_stat_st_nreleases_fid; +static jfieldID lock_stat_st_nnowaits_fid; +static jfieldID lock_stat_st_ndeadlocks_fid; +static jfieldID lock_stat_st_locktimeout_fid; +static jfieldID lock_stat_st_nlocktimeouts_fid; +static jfieldID lock_stat_st_txntimeout_fid; +static jfieldID lock_stat_st_ntxntimeouts_fid; +static jfieldID lock_stat_st_region_wait_fid; +static jfieldID lock_stat_st_region_nowait_fid; +static jfieldID lock_stat_st_regsize_fid; +static jfieldID log_stat_st_magic_fid; +static jfieldID log_stat_st_version_fid; +static jfieldID log_stat_st_mode_fid; +static jfieldID log_stat_st_lg_bsize_fid; +static jfieldID log_stat_st_lg_size_fid; +static jfieldID log_stat_st_w_bytes_fid; +static jfieldID log_stat_st_w_mbytes_fid; +static jfieldID log_stat_st_wc_bytes_fid; +static jfieldID log_stat_st_wc_mbytes_fid; +static jfieldID log_stat_st_wcount_fid; +static jfieldID log_stat_st_wcount_fill_fid; +static jfieldID log_stat_st_scount_fid; +static jfieldID log_stat_st_region_wait_fid; +static jfieldID log_stat_st_region_nowait_fid; +static jfieldID log_stat_st_cur_file_fid; +static jfieldID log_stat_st_cur_offset_fid; +static jfieldID log_stat_st_disk_file_fid; +static jfieldID log_stat_st_disk_offset_fid; +static jfieldID log_stat_st_regsize_fid; +static jfieldID log_stat_st_maxcommitperflush_fid; +static jfieldID log_stat_st_mincommitperflush_fid; +static jfieldID mpool_fstat_file_name_fid; +static jfieldID mpool_fstat_st_pagesize_fid; +static jfieldID mpool_fstat_st_map_fid; +static jfieldID mpool_fstat_st_cache_hit_fid; +static jfieldID mpool_fstat_st_cache_miss_fid; +static jfieldID mpool_fstat_st_page_create_fid; +static jfieldID mpool_fstat_st_page_in_fid; +static jfieldID mpool_fstat_st_page_out_fid; +static jfieldID mpool_stat_st_gbytes_fid; +static jfieldID mpool_stat_st_bytes_fid; +static jfieldID mpool_stat_st_ncache_fid; +static jfieldID mpool_stat_st_regsize_fid; +static jfieldID mpool_stat_st_mmapsize_fid; +static jfieldID mpool_stat_st_maxopenfd_fid; +static jfieldID mpool_stat_st_maxwrite_fid; +static jfieldID mpool_stat_st_maxwrite_sleep_fid; +static jfieldID mpool_stat_st_map_fid; +static jfieldID mpool_stat_st_cache_hit_fid; +static jfieldID mpool_stat_st_cache_miss_fid; +static jfieldID mpool_stat_st_page_create_fid; +static jfieldID mpool_stat_st_page_in_fid; +static jfieldID mpool_stat_st_page_out_fid; +static jfieldID mpool_stat_st_ro_evict_fid; +static jfieldID mpool_stat_st_rw_evict_fid; +static jfieldID mpool_stat_st_page_trickle_fid; +static jfieldID mpool_stat_st_pages_fid; +static jfieldID mpool_stat_st_page_clean_fid; +static jfieldID mpool_stat_st_page_dirty_fid; +static jfieldID mpool_stat_st_hash_buckets_fid; +static jfieldID mpool_stat_st_hash_searches_fid; +static jfieldID mpool_stat_st_hash_longest_fid; +static jfieldID mpool_stat_st_hash_examined_fid; +static jfieldID mpool_stat_st_hash_nowait_fid; +static jfieldID mpool_stat_st_hash_wait_fid; +static jfieldID mpool_stat_st_hash_max_wait_fid; +static jfieldID mpool_stat_st_region_nowait_fid; +static jfieldID mpool_stat_st_region_wait_fid; +static jfieldID mpool_stat_st_alloc_fid; +static jfieldID mpool_stat_st_alloc_buckets_fid; +static jfieldID mpool_stat_st_alloc_max_buckets_fid; +static jfieldID mpool_stat_st_alloc_pages_fid; +static jfieldID mpool_stat_st_alloc_max_pages_fid; +static jfieldID qam_stat_qs_magic_fid; +static jfieldID qam_stat_qs_version_fid; +static jfieldID qam_stat_qs_metaflags_fid; +static jfieldID qam_stat_qs_nkeys_fid; +static jfieldID qam_stat_qs_ndata_fid; +static jfieldID qam_stat_qs_pagesize_fid; +static jfieldID qam_stat_qs_extentsize_fid; +static jfieldID qam_stat_qs_pages_fid; +static jfieldID qam_stat_qs_re_len_fid; +static jfieldID qam_stat_qs_re_pad_fid; +static jfieldID qam_stat_qs_pgfree_fid; +static jfieldID qam_stat_qs_first_recno_fid; +static jfieldID qam_stat_qs_cur_recno_fid; +static jfieldID rep_stat_st_status_fid; +static jfieldID rep_stat_st_next_lsn_fid; +static jfieldID rep_stat_st_waiting_lsn_fid; +static jfieldID rep_stat_st_next_pg_fid; +static jfieldID rep_stat_st_waiting_pg_fid; +static jfieldID rep_stat_st_dupmasters_fid; +static jfieldID rep_stat_st_env_id_fid; +static jfieldID rep_stat_st_env_priority_fid; +static jfieldID rep_stat_st_gen_fid; +static jfieldID rep_stat_st_egen_fid; +static jfieldID rep_stat_st_log_duplicated_fid; +static jfieldID rep_stat_st_log_queued_fid; +static jfieldID rep_stat_st_log_queued_max_fid; +static jfieldID rep_stat_st_log_queued_total_fid; +static jfieldID rep_stat_st_log_records_fid; +static jfieldID rep_stat_st_log_requested_fid; +static jfieldID rep_stat_st_master_fid; +static jfieldID rep_stat_st_master_changes_fid; +static jfieldID rep_stat_st_msgs_badgen_fid; +static jfieldID rep_stat_st_msgs_processed_fid; +static jfieldID rep_stat_st_msgs_recover_fid; +static jfieldID rep_stat_st_msgs_send_failures_fid; +static jfieldID rep_stat_st_msgs_sent_fid; +static jfieldID rep_stat_st_newsites_fid; +static jfieldID rep_stat_st_nsites_fid; +static jfieldID rep_stat_st_nthrottles_fid; +static jfieldID rep_stat_st_outdated_fid; +static jfieldID rep_stat_st_pg_duplicated_fid; +static jfieldID rep_stat_st_pg_records_fid; +static jfieldID rep_stat_st_pg_requested_fid; +static jfieldID rep_stat_st_startup_complete_fid; +static jfieldID rep_stat_st_txns_applied_fid; +static jfieldID rep_stat_st_elections_fid; +static jfieldID rep_stat_st_elections_won_fid; +static jfieldID rep_stat_st_election_cur_winner_fid; +static jfieldID rep_stat_st_election_gen_fid; +static jfieldID rep_stat_st_election_lsn_fid; +static jfieldID rep_stat_st_election_nsites_fid; +static jfieldID rep_stat_st_election_nvotes_fid; +static jfieldID rep_stat_st_election_priority_fid; +static jfieldID rep_stat_st_election_status_fid; +static jfieldID rep_stat_st_election_tiebreaker_fid; +static jfieldID rep_stat_st_election_votes_fid; +static jfieldID seq_stat_st_wait_fid; +static jfieldID seq_stat_st_nowait_fid; +static jfieldID seq_stat_st_current_fid; +static jfieldID seq_stat_st_value_fid; +static jfieldID seq_stat_st_last_value_fid; +static jfieldID seq_stat_st_min_fid; +static jfieldID seq_stat_st_max_fid; +static jfieldID seq_stat_st_cache_size_fid; +static jfieldID seq_stat_st_flags_fid; +static jfieldID txn_stat_st_last_ckp_fid; +static jfieldID txn_stat_st_time_ckp_fid; +static jfieldID txn_stat_st_last_txnid_fid; +static jfieldID txn_stat_st_maxtxns_fid; +static jfieldID txn_stat_st_naborts_fid; +static jfieldID txn_stat_st_nbegins_fid; +static jfieldID txn_stat_st_ncommits_fid; +static jfieldID txn_stat_st_nactive_fid; +static jfieldID txn_stat_st_nrestores_fid; +static jfieldID txn_stat_st_maxnactive_fid; +static jfieldID txn_stat_st_txnarray_fid; +static jfieldID txn_stat_st_region_wait_fid; +static jfieldID txn_stat_st_region_nowait_fid; +static jfieldID txn_stat_st_regsize_fid; +static jfieldID txn_active_txnid_fid; +static jfieldID txn_active_parentid_fid; +static jfieldID txn_active_lsn_fid; +static jfieldID txn_active_xa_status_fid; +static jfieldID txn_active_xid_fid; +/* END-STAT-FIELD-DECLS */ static jmethodID dbenv_construct, dbt_construct, dblsn_construct; static jmethodID dbpreplist_construct, dbtxn_construct; -static jmethodID btree_stat_construct, hash_stat_construct; +static jmethodID bt_stat_construct, h_stat_construct; static jmethodID lock_stat_construct, log_stat_construct, mpool_stat_construct; -static jmethodID mpool_fstat_construct, queue_stat_construct; -static jmethodID rep_stat_construct, txn_stat_construct, txn_active_construct; +static jmethodID mpool_fstat_construct, qam_stat_construct; +static jmethodID rep_stat_construct, seq_stat_construct; +static jmethodID txn_stat_construct, txn_active_construct; static jmethodID dbex_construct, deadex_construct, lockex_construct; -static jmethodID memex_construct, memex_update_method, runrecex_construct; +static jmethodID memex_construct, memex_update_method; +static jmethodID rephandledeadex_construct, runrecex_construct; static jmethodID filenotfoundex_construct, illegalargex_construct; +static jmethodID outofmemerr_construct; static jmethodID lock_construct; static jmethodID app_dispatch_method, errcall_method, env_feedback_method; -static jmethodID paniccall_method, rep_transport_method; +static jmethodID msgcall_method, paniccall_method, rep_transport_method; static jmethodID append_recno_method, bt_compare_method, bt_prefix_method; static jmethodID db_feedback_method, dup_compare_method, h_hash_method; static jmethodID seckey_create_method; @@ -131,37 +345,40 @@ const struct { jclass *cl; const char *name; } all_classes[] = { - { &dbenv_class, DB_PKG "DbEnv" }, - { &db_class, DB_PKG "Db" }, - { &dbc_class, DB_PKG "Dbc" }, - { &dbt_class, DB_PKG "Dbt" }, - { &dblsn_class, DB_PKG "DbLsn" }, - { &dbpreplist_class, DB_PKG "DbPreplist" }, - { &dbtxn_class, DB_PKG "DbTxn" }, - - { &btree_stat_class, DB_PKG "DbBtreeStat" }, - { &hash_stat_class, DB_PKG "DbHashStat" }, - { &lock_stat_class, DB_PKG "DbLockStat" }, - { &log_stat_class, DB_PKG "DbLogStat" }, - { &mpool_fstat_class, DB_PKG "DbMpoolFStat" }, - { &mpool_stat_class, DB_PKG "DbMpoolStat" }, - { &queue_stat_class, DB_PKG "DbQueueStat" }, - { &rep_stat_class, DB_PKG "DbRepStat" }, - { &txn_stat_class, DB_PKG "DbTxnStat" }, - { &txn_active_class, DB_PKG "DbTxnStat$Active" }, - - { &keyrange_class, DB_PKG "DbKeyRange" }, - { &lock_class, DB_PKG "DbLock" }, - { &lockreq_class, DB_PKG "DbLockRequest" }, - { &rep_processmsg_class, DB_PKG "DbEnv$RepProcessMessage" }, - - { &dbex_class, DB_PKG "DbException" }, - { &deadex_class, DB_PKG "DbDeadlockException" }, - { &lockex_class, DB_PKG "DbLockNotGrantedException" }, - { &memex_class, DB_PKG "DbMemoryException" }, - { &runrecex_class, DB_PKG "DbRunRecoveryException" }, + { &dbenv_class, DB_PKG "internal/DbEnv" }, + { &db_class, DB_PKG "internal/Db" }, + { &dbc_class, DB_PKG "internal/Dbc" }, + { &dbt_class, DB_PKG "DatabaseEntry" }, + { &dblsn_class, DB_PKG "LogSequenceNumber" }, + { &dbpreplist_class, DB_PKG "PreparedTransaction" }, + { &dbtxn_class, DB_PKG "internal/DbTxn" }, + + { &bt_stat_class, DB_PKG "BtreeStats" }, + { &h_stat_class, DB_PKG "HashStats" }, + { &lock_stat_class, DB_PKG "LockStats" }, + { &log_stat_class, DB_PKG "LogStats" }, + { &mpool_fstat_class, DB_PKG "CacheFileStats" }, + { &mpool_stat_class, DB_PKG "CacheStats" }, + { &qam_stat_class, DB_PKG "QueueStats" }, + { &rep_stat_class, DB_PKG "ReplicationStats" }, + { &seq_stat_class, DB_PKG "SequenceStats" }, + { &txn_stat_class, DB_PKG "TransactionStats" }, + { &txn_active_class, DB_PKG "TransactionStats$Active" }, + + { &keyrange_class, DB_PKG "KeyRange" }, + { &lock_class, DB_PKG "internal/DbLock" }, + { &lockreq_class, DB_PKG "LockRequest" }, + { &rep_processmsg_class, DB_PKG "internal/DbEnv$RepProcessMessage" }, + + { &dbex_class, DB_PKG "DatabaseException" }, + { &deadex_class, DB_PKG "DeadlockException" }, + { &lockex_class, DB_PKG "LockNotGrantedException" }, + { &memex_class, DB_PKG "MemoryException" }, + { &rephandledeadex_class, DB_PKG "ReplicationHandleDeadException" }, + { &runrecex_class, DB_PKG "RunRecoveryException" }, { &filenotfoundex_class, "java/io/FileNotFoundException" }, { &illegalargex_class, "java/lang/IllegalArgumentException" }, + { &outofmemerr_class, "java/lang/OutOfMemoryError" }, { &bytearray_class, "[B" }, { &string_class, "java/lang/String" }, @@ -191,14 +408,223 @@ const struct { { &lock_cptr_fid, &lock_class, "swigCPtr", "J" }, { &lockreq_op_fid, &lockreq_class, "op", "I" }, - { &lockreq_mode_fid, &lockreq_class, "mode", "I" }, + { &lockreq_modeflag_fid, &lockreq_class, "modeFlag", "I" }, { &lockreq_timeout_fid, &lockreq_class, "timeout", "I" }, - { &lockreq_obj_fid, &lockreq_class, "obj", "L" DB_PKG "Dbt;" }, - { &lockreq_lock_fid, &lockreq_class, "lock", "L" DB_PKG "DbLock;" }, - - { &rep_processmsg_envid, &rep_processmsg_class, "envid", "I" }, - { &txn_stat_active_fid, &txn_stat_class, "st_txnarray", - "[L" DB_PKG "DbTxnStat$Active;" } + { &lockreq_obj_fid, &lockreq_class, "obj", "L" DB_PKG "DatabaseEntry;" }, + { &lockreq_lock_fid, &lockreq_class, "lock", "L" DB_PKG "internal/DbLock;" }, + +/* BEGIN-STAT-FIELDS */ + { &bt_stat_bt_magic_fid, &bt_stat_class, "bt_magic", "I" }, + { &bt_stat_bt_version_fid, &bt_stat_class, "bt_version", "I" }, + { &bt_stat_bt_metaflags_fid, &bt_stat_class, "bt_metaflags", "I" }, + { &bt_stat_bt_nkeys_fid, &bt_stat_class, "bt_nkeys", "I" }, + { &bt_stat_bt_ndata_fid, &bt_stat_class, "bt_ndata", "I" }, + { &bt_stat_bt_pagesize_fid, &bt_stat_class, "bt_pagesize", "I" }, + { &bt_stat_bt_maxkey_fid, &bt_stat_class, "bt_maxkey", "I" }, + { &bt_stat_bt_minkey_fid, &bt_stat_class, "bt_minkey", "I" }, + { &bt_stat_bt_re_len_fid, &bt_stat_class, "bt_re_len", "I" }, + { &bt_stat_bt_re_pad_fid, &bt_stat_class, "bt_re_pad", "I" }, + { &bt_stat_bt_levels_fid, &bt_stat_class, "bt_levels", "I" }, + { &bt_stat_bt_int_pg_fid, &bt_stat_class, "bt_int_pg", "I" }, + { &bt_stat_bt_leaf_pg_fid, &bt_stat_class, "bt_leaf_pg", "I" }, + { &bt_stat_bt_dup_pg_fid, &bt_stat_class, "bt_dup_pg", "I" }, + { &bt_stat_bt_over_pg_fid, &bt_stat_class, "bt_over_pg", "I" }, + { &bt_stat_bt_empty_pg_fid, &bt_stat_class, "bt_empty_pg", "I" }, + { &bt_stat_bt_free_fid, &bt_stat_class, "bt_free", "I" }, + { &bt_stat_bt_int_pgfree_fid, &bt_stat_class, "bt_int_pgfree", "I" }, + { &bt_stat_bt_leaf_pgfree_fid, &bt_stat_class, "bt_leaf_pgfree", "I" }, + { &bt_stat_bt_dup_pgfree_fid, &bt_stat_class, "bt_dup_pgfree", "I" }, + { &bt_stat_bt_over_pgfree_fid, &bt_stat_class, "bt_over_pgfree", "I" }, + { &h_stat_hash_magic_fid, &h_stat_class, "hash_magic", "I" }, + { &h_stat_hash_version_fid, &h_stat_class, "hash_version", "I" }, + { &h_stat_hash_metaflags_fid, &h_stat_class, "hash_metaflags", "I" }, + { &h_stat_hash_nkeys_fid, &h_stat_class, "hash_nkeys", "I" }, + { &h_stat_hash_ndata_fid, &h_stat_class, "hash_ndata", "I" }, + { &h_stat_hash_pagesize_fid, &h_stat_class, "hash_pagesize", "I" }, + { &h_stat_hash_ffactor_fid, &h_stat_class, "hash_ffactor", "I" }, + { &h_stat_hash_buckets_fid, &h_stat_class, "hash_buckets", "I" }, + { &h_stat_hash_free_fid, &h_stat_class, "hash_free", "I" }, + { &h_stat_hash_bfree_fid, &h_stat_class, "hash_bfree", "I" }, + { &h_stat_hash_bigpages_fid, &h_stat_class, "hash_bigpages", "I" }, + { &h_stat_hash_big_bfree_fid, &h_stat_class, "hash_big_bfree", "I" }, + { &h_stat_hash_overflows_fid, &h_stat_class, "hash_overflows", "I" }, + { &h_stat_hash_ovfl_free_fid, &h_stat_class, "hash_ovfl_free", "I" }, + { &h_stat_hash_dup_fid, &h_stat_class, "hash_dup", "I" }, + { &h_stat_hash_dup_free_fid, &h_stat_class, "hash_dup_free", "I" }, + { &lock_stat_st_id_fid, &lock_stat_class, "st_id", "I" }, + { &lock_stat_st_cur_maxid_fid, &lock_stat_class, "st_cur_maxid", "I" }, + { &lock_stat_st_maxlocks_fid, &lock_stat_class, "st_maxlocks", "I" }, + { &lock_stat_st_maxlockers_fid, &lock_stat_class, "st_maxlockers", "I" }, + { &lock_stat_st_maxobjects_fid, &lock_stat_class, "st_maxobjects", "I" }, + { &lock_stat_st_nmodes_fid, &lock_stat_class, "st_nmodes", "I" }, + { &lock_stat_st_nlocks_fid, &lock_stat_class, "st_nlocks", "I" }, + { &lock_stat_st_maxnlocks_fid, &lock_stat_class, "st_maxnlocks", "I" }, + { &lock_stat_st_nlockers_fid, &lock_stat_class, "st_nlockers", "I" }, + { &lock_stat_st_maxnlockers_fid, &lock_stat_class, "st_maxnlockers", "I" }, + { &lock_stat_st_nobjects_fid, &lock_stat_class, "st_nobjects", "I" }, + { &lock_stat_st_maxnobjects_fid, &lock_stat_class, "st_maxnobjects", "I" }, + { &lock_stat_st_nconflicts_fid, &lock_stat_class, "st_nconflicts", "I" }, + { &lock_stat_st_nrequests_fid, &lock_stat_class, "st_nrequests", "I" }, + { &lock_stat_st_nreleases_fid, &lock_stat_class, "st_nreleases", "I" }, + { &lock_stat_st_nnowaits_fid, &lock_stat_class, "st_nnowaits", "I" }, + { &lock_stat_st_ndeadlocks_fid, &lock_stat_class, "st_ndeadlocks", "I" }, + { &lock_stat_st_locktimeout_fid, &lock_stat_class, "st_locktimeout", "I" }, + { &lock_stat_st_nlocktimeouts_fid, &lock_stat_class, "st_nlocktimeouts", "I" }, + { &lock_stat_st_txntimeout_fid, &lock_stat_class, "st_txntimeout", "I" }, + { &lock_stat_st_ntxntimeouts_fid, &lock_stat_class, "st_ntxntimeouts", "I" }, + { &lock_stat_st_region_wait_fid, &lock_stat_class, "st_region_wait", "I" }, + { &lock_stat_st_region_nowait_fid, &lock_stat_class, "st_region_nowait", "I" }, + { &lock_stat_st_regsize_fid, &lock_stat_class, "st_regsize", "I" }, + { &log_stat_st_magic_fid, &log_stat_class, "st_magic", "I" }, + { &log_stat_st_version_fid, &log_stat_class, "st_version", "I" }, + { &log_stat_st_mode_fid, &log_stat_class, "st_mode", "I" }, + { &log_stat_st_lg_bsize_fid, &log_stat_class, "st_lg_bsize", "I" }, + { &log_stat_st_lg_size_fid, &log_stat_class, "st_lg_size", "I" }, + { &log_stat_st_w_bytes_fid, &log_stat_class, "st_w_bytes", "I" }, + { &log_stat_st_w_mbytes_fid, &log_stat_class, "st_w_mbytes", "I" }, + { &log_stat_st_wc_bytes_fid, &log_stat_class, "st_wc_bytes", "I" }, + { &log_stat_st_wc_mbytes_fid, &log_stat_class, "st_wc_mbytes", "I" }, + { &log_stat_st_wcount_fid, &log_stat_class, "st_wcount", "I" }, + { &log_stat_st_wcount_fill_fid, &log_stat_class, "st_wcount_fill", "I" }, + { &log_stat_st_scount_fid, &log_stat_class, "st_scount", "I" }, + { &log_stat_st_region_wait_fid, &log_stat_class, "st_region_wait", "I" }, + { &log_stat_st_region_nowait_fid, &log_stat_class, "st_region_nowait", "I" }, + { &log_stat_st_cur_file_fid, &log_stat_class, "st_cur_file", "I" }, + { &log_stat_st_cur_offset_fid, &log_stat_class, "st_cur_offset", "I" }, + { &log_stat_st_disk_file_fid, &log_stat_class, "st_disk_file", "I" }, + { &log_stat_st_disk_offset_fid, &log_stat_class, "st_disk_offset", "I" }, + { &log_stat_st_regsize_fid, &log_stat_class, "st_regsize", "I" }, + { &log_stat_st_maxcommitperflush_fid, &log_stat_class, "st_maxcommitperflush", "I" }, + { &log_stat_st_mincommitperflush_fid, &log_stat_class, "st_mincommitperflush", "I" }, + { &mpool_fstat_file_name_fid, &mpool_fstat_class, "file_name", "Ljava/lang/String;" }, + { &mpool_fstat_st_pagesize_fid, &mpool_fstat_class, "st_pagesize", "I" }, + { &mpool_fstat_st_map_fid, &mpool_fstat_class, "st_map", "I" }, + { &mpool_fstat_st_cache_hit_fid, &mpool_fstat_class, "st_cache_hit", "I" }, + { &mpool_fstat_st_cache_miss_fid, &mpool_fstat_class, "st_cache_miss", "I" }, + { &mpool_fstat_st_page_create_fid, &mpool_fstat_class, "st_page_create", "I" }, + { &mpool_fstat_st_page_in_fid, &mpool_fstat_class, "st_page_in", "I" }, + { &mpool_fstat_st_page_out_fid, &mpool_fstat_class, "st_page_out", "I" }, + { &mpool_stat_st_gbytes_fid, &mpool_stat_class, "st_gbytes", "I" }, + { &mpool_stat_st_bytes_fid, &mpool_stat_class, "st_bytes", "I" }, + { &mpool_stat_st_ncache_fid, &mpool_stat_class, "st_ncache", "I" }, + { &mpool_stat_st_regsize_fid, &mpool_stat_class, "st_regsize", "I" }, + { &mpool_stat_st_mmapsize_fid, &mpool_stat_class, "st_mmapsize", "I" }, + { &mpool_stat_st_maxopenfd_fid, &mpool_stat_class, "st_maxopenfd", "I" }, + { &mpool_stat_st_maxwrite_fid, &mpool_stat_class, "st_maxwrite", "I" }, + { &mpool_stat_st_maxwrite_sleep_fid, &mpool_stat_class, "st_maxwrite_sleep", "I" }, + { &mpool_stat_st_map_fid, &mpool_stat_class, "st_map", "I" }, + { &mpool_stat_st_cache_hit_fid, &mpool_stat_class, "st_cache_hit", "I" }, + { &mpool_stat_st_cache_miss_fid, &mpool_stat_class, "st_cache_miss", "I" }, + { &mpool_stat_st_page_create_fid, &mpool_stat_class, "st_page_create", "I" }, + { &mpool_stat_st_page_in_fid, &mpool_stat_class, "st_page_in", "I" }, + { &mpool_stat_st_page_out_fid, &mpool_stat_class, "st_page_out", "I" }, + { &mpool_stat_st_ro_evict_fid, &mpool_stat_class, "st_ro_evict", "I" }, + { &mpool_stat_st_rw_evict_fid, &mpool_stat_class, "st_rw_evict", "I" }, + { &mpool_stat_st_page_trickle_fid, &mpool_stat_class, "st_page_trickle", "I" }, + { &mpool_stat_st_pages_fid, &mpool_stat_class, "st_pages", "I" }, + { &mpool_stat_st_page_clean_fid, &mpool_stat_class, "st_page_clean", "I" }, + { &mpool_stat_st_page_dirty_fid, &mpool_stat_class, "st_page_dirty", "I" }, + { &mpool_stat_st_hash_buckets_fid, &mpool_stat_class, "st_hash_buckets", "I" }, + { &mpool_stat_st_hash_searches_fid, &mpool_stat_class, "st_hash_searches", "I" }, + { &mpool_stat_st_hash_longest_fid, &mpool_stat_class, "st_hash_longest", "I" }, + { &mpool_stat_st_hash_examined_fid, &mpool_stat_class, "st_hash_examined", "I" }, + { &mpool_stat_st_hash_nowait_fid, &mpool_stat_class, "st_hash_nowait", "I" }, + { &mpool_stat_st_hash_wait_fid, &mpool_stat_class, "st_hash_wait", "I" }, + { &mpool_stat_st_hash_max_wait_fid, &mpool_stat_class, "st_hash_max_wait", "I" }, + { &mpool_stat_st_region_nowait_fid, &mpool_stat_class, "st_region_nowait", "I" }, + { &mpool_stat_st_region_wait_fid, &mpool_stat_class, "st_region_wait", "I" }, + { &mpool_stat_st_alloc_fid, &mpool_stat_class, "st_alloc", "I" }, + { &mpool_stat_st_alloc_buckets_fid, &mpool_stat_class, "st_alloc_buckets", "I" }, + { &mpool_stat_st_alloc_max_buckets_fid, &mpool_stat_class, "st_alloc_max_buckets", "I" }, + { &mpool_stat_st_alloc_pages_fid, &mpool_stat_class, "st_alloc_pages", "I" }, + { &mpool_stat_st_alloc_max_pages_fid, &mpool_stat_class, "st_alloc_max_pages", "I" }, + { &qam_stat_qs_magic_fid, &qam_stat_class, "qs_magic", "I" }, + { &qam_stat_qs_version_fid, &qam_stat_class, "qs_version", "I" }, + { &qam_stat_qs_metaflags_fid, &qam_stat_class, "qs_metaflags", "I" }, + { &qam_stat_qs_nkeys_fid, &qam_stat_class, "qs_nkeys", "I" }, + { &qam_stat_qs_ndata_fid, &qam_stat_class, "qs_ndata", "I" }, + { &qam_stat_qs_pagesize_fid, &qam_stat_class, "qs_pagesize", "I" }, + { &qam_stat_qs_extentsize_fid, &qam_stat_class, "qs_extentsize", "I" }, + { &qam_stat_qs_pages_fid, &qam_stat_class, "qs_pages", "I" }, + { &qam_stat_qs_re_len_fid, &qam_stat_class, "qs_re_len", "I" }, + { &qam_stat_qs_re_pad_fid, &qam_stat_class, "qs_re_pad", "I" }, + { &qam_stat_qs_pgfree_fid, &qam_stat_class, "qs_pgfree", "I" }, + { &qam_stat_qs_first_recno_fid, &qam_stat_class, "qs_first_recno", "I" }, + { &qam_stat_qs_cur_recno_fid, &qam_stat_class, "qs_cur_recno", "I" }, + { &rep_stat_st_status_fid, &rep_stat_class, "st_status", "I" }, + { &rep_stat_st_next_lsn_fid, &rep_stat_class, "st_next_lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &rep_stat_st_waiting_lsn_fid, &rep_stat_class, "st_waiting_lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &rep_stat_st_next_pg_fid, &rep_stat_class, "st_next_pg", "I" }, + { &rep_stat_st_waiting_pg_fid, &rep_stat_class, "st_waiting_pg", "I" }, + { &rep_stat_st_dupmasters_fid, &rep_stat_class, "st_dupmasters", "I" }, + { &rep_stat_st_env_id_fid, &rep_stat_class, "st_env_id", "I" }, + { &rep_stat_st_env_priority_fid, &rep_stat_class, "st_env_priority", "I" }, + { &rep_stat_st_gen_fid, &rep_stat_class, "st_gen", "I" }, + { &rep_stat_st_egen_fid, &rep_stat_class, "st_egen", "I" }, + { &rep_stat_st_log_duplicated_fid, &rep_stat_class, "st_log_duplicated", "I" }, + { &rep_stat_st_log_queued_fid, &rep_stat_class, "st_log_queued", "I" }, + { &rep_stat_st_log_queued_max_fid, &rep_stat_class, "st_log_queued_max", "I" }, + { &rep_stat_st_log_queued_total_fid, &rep_stat_class, "st_log_queued_total", "I" }, + { &rep_stat_st_log_records_fid, &rep_stat_class, "st_log_records", "I" }, + { &rep_stat_st_log_requested_fid, &rep_stat_class, "st_log_requested", "I" }, + { &rep_stat_st_master_fid, &rep_stat_class, "st_master", "I" }, + { &rep_stat_st_master_changes_fid, &rep_stat_class, "st_master_changes", "I" }, + { &rep_stat_st_msgs_badgen_fid, &rep_stat_class, "st_msgs_badgen", "I" }, + { &rep_stat_st_msgs_processed_fid, &rep_stat_class, "st_msgs_processed", "I" }, + { &rep_stat_st_msgs_recover_fid, &rep_stat_class, "st_msgs_recover", "I" }, + { &rep_stat_st_msgs_send_failures_fid, &rep_stat_class, "st_msgs_send_failures", "I" }, + { &rep_stat_st_msgs_sent_fid, &rep_stat_class, "st_msgs_sent", "I" }, + { &rep_stat_st_newsites_fid, &rep_stat_class, "st_newsites", "I" }, + { &rep_stat_st_nsites_fid, &rep_stat_class, "st_nsites", "I" }, + { &rep_stat_st_nthrottles_fid, &rep_stat_class, "st_nthrottles", "I" }, + { &rep_stat_st_outdated_fid, &rep_stat_class, "st_outdated", "I" }, + { &rep_stat_st_pg_duplicated_fid, &rep_stat_class, "st_pg_duplicated", "I" }, + { &rep_stat_st_pg_records_fid, &rep_stat_class, "st_pg_records", "I" }, + { &rep_stat_st_pg_requested_fid, &rep_stat_class, "st_pg_requested", "I" }, + { &rep_stat_st_startup_complete_fid, &rep_stat_class, "st_startup_complete", "I" }, + { &rep_stat_st_txns_applied_fid, &rep_stat_class, "st_txns_applied", "I" }, + { &rep_stat_st_elections_fid, &rep_stat_class, "st_elections", "I" }, + { &rep_stat_st_elections_won_fid, &rep_stat_class, "st_elections_won", "I" }, + { &rep_stat_st_election_cur_winner_fid, &rep_stat_class, "st_election_cur_winner", "I" }, + { &rep_stat_st_election_gen_fid, &rep_stat_class, "st_election_gen", "I" }, + { &rep_stat_st_election_lsn_fid, &rep_stat_class, "st_election_lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &rep_stat_st_election_nsites_fid, &rep_stat_class, "st_election_nsites", "I" }, + { &rep_stat_st_election_nvotes_fid, &rep_stat_class, "st_election_nvotes", "I" }, + { &rep_stat_st_election_priority_fid, &rep_stat_class, "st_election_priority", "I" }, + { &rep_stat_st_election_status_fid, &rep_stat_class, "st_election_status", "I" }, + { &rep_stat_st_election_tiebreaker_fid, &rep_stat_class, "st_election_tiebreaker", "I" }, + { &rep_stat_st_election_votes_fid, &rep_stat_class, "st_election_votes", "I" }, + { &seq_stat_st_wait_fid, &seq_stat_class, "st_wait", "I" }, + { &seq_stat_st_nowait_fid, &seq_stat_class, "st_nowait", "I" }, + { &seq_stat_st_current_fid, &seq_stat_class, "st_current", "J" }, + { &seq_stat_st_value_fid, &seq_stat_class, "st_value", "J" }, + { &seq_stat_st_last_value_fid, &seq_stat_class, "st_last_value", "J" }, + { &seq_stat_st_min_fid, &seq_stat_class, "st_min", "J" }, + { &seq_stat_st_max_fid, &seq_stat_class, "st_max", "J" }, + { &seq_stat_st_cache_size_fid, &seq_stat_class, "st_cache_size", "I" }, + { &seq_stat_st_flags_fid, &seq_stat_class, "st_flags", "I" }, + { &txn_stat_st_last_ckp_fid, &txn_stat_class, "st_last_ckp", "L" DB_PKG "LogSequenceNumber;" }, + { &txn_stat_st_time_ckp_fid, &txn_stat_class, "st_time_ckp", "J" }, + { &txn_stat_st_last_txnid_fid, &txn_stat_class, "st_last_txnid", "I" }, + { &txn_stat_st_maxtxns_fid, &txn_stat_class, "st_maxtxns", "I" }, + { &txn_stat_st_naborts_fid, &txn_stat_class, "st_naborts", "I" }, + { &txn_stat_st_nbegins_fid, &txn_stat_class, "st_nbegins", "I" }, + { &txn_stat_st_ncommits_fid, &txn_stat_class, "st_ncommits", "I" }, + { &txn_stat_st_nactive_fid, &txn_stat_class, "st_nactive", "I" }, + { &txn_stat_st_nrestores_fid, &txn_stat_class, "st_nrestores", "I" }, + { &txn_stat_st_maxnactive_fid, &txn_stat_class, "st_maxnactive", "I" }, + { &txn_stat_st_txnarray_fid, &txn_stat_class, "st_txnarray", "[L" DB_PKG "TransactionStats$Active;" }, + { &txn_stat_st_region_wait_fid, &txn_stat_class, "st_region_wait", "I" }, + { &txn_stat_st_region_nowait_fid, &txn_stat_class, "st_region_nowait", "I" }, + { &txn_stat_st_regsize_fid, &txn_stat_class, "st_regsize", "I" }, + { &txn_active_txnid_fid, &txn_active_class, "txnid", "I" }, + { &txn_active_parentid_fid, &txn_active_class, "parentid", "I" }, + { &txn_active_lsn_fid, &txn_active_class, "lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &txn_active_xa_status_fid, &txn_active_class, "xa_status", "I" }, + { &txn_active_xid_fid, &txn_active_class, "xid", "[B" }, +/* END-STAT-FIELDS */ + + { &rep_processmsg_envid, &rep_processmsg_class, "envid", "I" } }; const struct { @@ -209,62 +635,70 @@ const struct { } all_methods[] = { { &dbenv_construct, &dbenv_class, "", "(JZ)V" }, { &dbt_construct, &dbt_class, "", "()V" }, - { &dblsn_construct, &dblsn_class, "", "(JZ)V" }, + { &dblsn_construct, &dblsn_class, "", "(II)V" }, { &dbpreplist_construct, &dbpreplist_class, "", - "(L" DB_PKG "DbTxn;[B)V" }, + "(L" DB_PKG "internal/DbTxn;[B)V" }, { &dbtxn_construct, &dbtxn_class, "", "(JZ)V" }, - { &btree_stat_construct, &btree_stat_class, "", "()V" }, - { &hash_stat_construct, &hash_stat_class, "", "()V" }, + { &bt_stat_construct, &bt_stat_class, "", "()V" }, + { &h_stat_construct, &h_stat_class, "", "()V" }, { &lock_stat_construct, &lock_stat_class, "", "()V" }, { &log_stat_construct, &log_stat_class, "", "()V" }, { &mpool_stat_construct, &mpool_stat_class, "", "()V" }, { &mpool_fstat_construct, &mpool_fstat_class, "", "()V" }, - { &queue_stat_construct, &queue_stat_class, "", "()V" }, + { &qam_stat_construct, &qam_stat_class, "", "()V" }, { &rep_stat_construct, &rep_stat_class, "", "()V" }, + { &seq_stat_construct, &seq_stat_class, "", "()V" }, { &txn_stat_construct, &txn_stat_class, "", "()V" }, { &txn_active_construct, &txn_active_class, "", "()V" }, - { &dbex_construct, &dbex_class, "", "(Ljava/lang/String;IL" DB_PKG "DbEnv;)V" }, + { &dbex_construct, &dbex_class, "", + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &deadex_construct, &deadex_class, "", - "(Ljava/lang/String;IL" DB_PKG "DbEnv;)V" }, + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &lockex_construct, &lockex_class, "", - "(Ljava/lang/String;IIL" DB_PKG "Dbt;L" DB_PKG "DbLock;IL" DB_PKG "DbEnv;)V" }, + "(Ljava/lang/String;IIL" DB_PKG "DatabaseEntry;L" DB_PKG "internal/DbLock;IL" DB_PKG "internal/DbEnv;)V" }, { &memex_construct, &memex_class, "", - "(Ljava/lang/String;L" DB_PKG "Dbt;IL" DB_PKG "DbEnv;)V" }, - { &memex_update_method, &memex_class, "update_dbt", - "(L" DB_PKG "Dbt;)V" }, + "(Ljava/lang/String;L" DB_PKG "DatabaseEntry;IL" DB_PKG "internal/DbEnv;)V" }, + { &memex_update_method, &memex_class, "updateDatabaseEntry", + "(L" DB_PKG "DatabaseEntry;)V" }, + { &rephandledeadex_construct, &rephandledeadex_class, "", + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &runrecex_construct, &runrecex_class, "", - "(Ljava/lang/String;IL" DB_PKG "DbEnv;)V" }, + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &filenotfoundex_construct, &filenotfoundex_class, "", "(Ljava/lang/String;)V" }, { &illegalargex_construct, &illegalargex_class, "", "(Ljava/lang/String;)V" }, + { &outofmemerr_construct, &outofmemerr_class, "", + "(Ljava/lang/String;)V" }, { &lock_construct, &lock_class, "", "(JZ)V" }, { &app_dispatch_method, &dbenv_class, "handle_app_dispatch", - "(L" DB_PKG "Dbt;L" DB_PKG "DbLsn;I)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "LogSequenceNumber;I)I" }, { &env_feedback_method, &dbenv_class, "handle_env_feedback", "(II)V" }, { &errcall_method, &dbenv_class, "handle_error", + "(Ljava/lang/String;Ljava/lang/String;)V" }, + { &msgcall_method, &dbenv_class, "handle_message", "(Ljava/lang/String;)V" }, { &paniccall_method, &dbenv_class, "handle_panic", - "(L" DB_PKG "DbException;)V" }, + "(L" DB_PKG "DatabaseException;)V" }, { &rep_transport_method, &dbenv_class, "handle_rep_transport", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;L" DB_PKG "DbLsn;II)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;L" DB_PKG "LogSequenceNumber;II)I" }, { &append_recno_method, &db_class, "handle_append_recno", - "(L" DB_PKG "Dbt;I)V" }, + "(L" DB_PKG "DatabaseEntry;I)V" }, { &bt_compare_method, &db_class, "handle_bt_compare", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &bt_prefix_method, &db_class, "handle_bt_prefix", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &db_feedback_method, &db_class, "handle_db_feedback", "(II)V" }, { &dup_compare_method, &db_class, "handle_dup_compare", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &h_hash_method, &db_class, "handle_h_hash", "([BI)I" }, { &seckey_create_method, &db_class, "handle_seckey_create", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &outputstream_write_method, &outputstream_class, "write", "([BII)V" } }; @@ -272,10 +706,10 @@ const struct { #define NELEM(x) (sizeof (x) / sizeof (x[0])) JNIEXPORT void JNICALL -Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) +Java_com_sleepycat_db_internal_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) { jclass cl; - unsigned int i; + unsigned int i, j; COMPQUIET(clazz, NULL); @@ -287,8 +721,8 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) for (i = 0; i < NELEM(all_classes); i++) { cl = (*jenv)->FindClass(jenv, all_classes[i].name); if (cl == NULL) { - __db_err(NULL, - "Failed to load class %s - check CLASSPATH", + fprintf(stderr, + "Failed to load class %s - check CLASSPATH\n", all_classes[i].name); return; } @@ -300,8 +734,8 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) *all_classes[i].cl = (jclass)(*jenv)->NewGlobalRef(jenv, cl); if (*all_classes[i].cl == NULL) { - __db_err(NULL, - "Failed to create a global reference for class %s", + fprintf(stderr, + "Failed to create a global reference for %s\n", all_classes[i].name); return; } @@ -314,8 +748,9 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) *all_fields[i].cl, all_fields[i].name, all_fields[i].sig); if (*all_fields[i].fid == NULL) { - __db_err(NULL, "Failed to look up field %s", - all_fields[i].name); + fprintf(stderr, + "Failed to look up field %s with sig %s\n", + all_fields[i].name, all_fields[i].sig); return; } } @@ -327,8 +762,12 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) all_methods[i].sig); if (*all_methods[i].mid == NULL) { - __db_err(NULL, "Failed to look up method %s", - all_methods[i].name); + for (j = 0; j < NELEM(all_classes); j++) + if (all_methods[i].cl == all_classes[j].cl) + break; + fprintf(stderr, + "Failed to look up method %s.%s with sig %s\n", + all_classes[j].name, all_methods[i].name, all_methods[i].sig); return; } } @@ -360,20 +799,8 @@ static JNIEnv *__dbj_get_jnienv(void) static jobject __dbj_wrap_DB_LSN(JNIEnv *jenv, DB_LSN *lsn) { - jlong jptr; - DB_LSN *lsn_copy; - int err; - - if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsn_copy)) != 0) { - __dbj_throw(jenv, err, NULL, NULL, NULL); - return NULL; - } - memset(lsn_copy, 0, sizeof(DB_LSN)); - *lsn_copy = *lsn; - /* Magic to convert a pointer to a long - must match SWIG */ - *(DB_LSN **)&jptr = lsn_copy; return (*jenv)->NewObject(jenv, dblsn_class, dblsn_construct, - jptr, JNI_TRUE); + lsn->file, lsn->offset); } @@ -387,7 +814,6 @@ static jobject __dbj_wrap_DB_LSN(JNIEnv *jenv, DB_LSN *lsn) #define TXN2JDBENV ((jobject)DB_ENV_INTERNAL(arg1->mgrp->dbenv)) - static jthrowable __dbj_get_except(JNIEnv *jenv, int err, const char *msg, jobject obj, jobject jdbenv) { jobject jmsg; @@ -407,9 +833,18 @@ static jthrowable __dbj_get_except(JNIEnv *jenv, filenotfoundex_class, filenotfoundex_construct, jmsg); case ENOMEM: + return (jthrowable)(*jenv)->NewObject(jenv, + outofmemerr_class, outofmemerr_construct, jmsg); + + case DB_BUFFER_SMALL: return (jthrowable)(*jenv)->NewObject(jenv, memex_class, memex_construct, jmsg, obj, err, jdbenv); + case DB_REP_HANDLE_DEAD: + return (jthrowable)(*jenv)->NewObject(jenv, + rephandledeadex_class, rephandledeadex_construct, + jmsg, err, jdbenv); + case DB_RUNRECOVERY: return (jthrowable)(*jenv)->NewObject(jenv, runrecex_class, runrecex_construct, jmsg, err, jdbenv); @@ -428,13 +863,15 @@ static jthrowable __dbj_get_except(JNIEnv *jenv, } } -static int __dbj_throw(JNIEnv *jenv, int err, const char *msg, jobject obj, jobject jdbenv) +static int __dbj_throw(JNIEnv *jenv, + int err, const char *msg, jobject obj, jobject jdbenv) { jthrowable t; /* If an exception is pending, ignore requests to throw a new one. */ if ((*jenv)->ExceptionOccurred(jenv) == NULL) { - if ((t = __dbj_get_except(jenv, err, msg, obj, jdbenv)) == NULL) { + t = __dbj_get_except(jenv, err, msg, obj, jdbenv); + if (t == NULL) { /* * This is a problem - something went wrong creating an * exception. We have to assume there is an exception @@ -460,28 +897,35 @@ typedef struct __dbt_locked { jbyteArray jarr; jbyte *orig_data; jint offset; + int reuse; u_int32_t orig_size; + jsize array_len; } DBT_LOCKED; static int __dbj_dbt_copyin( - JNIEnv *jenv, DBT_LOCKED *ldbt, jobject jdbt) + JNIEnv *jenv, DBT_LOCKED *ldbt, DBT **dbtp, jobject jdbt, int allow_null) { DBT *dbt; - jsize array_len; + + memset(ldbt, 0, sizeof (*ldbt)); + + if (jdbt == NULL) { + if (allow_null) { + *dbtp = NULL; + return (0); + } else { + return (__dbj_throw(jenv, EINVAL, + "DatabaseEntry must not be null", NULL, NULL)); + } + } dbt = &ldbt->dbt; - ldbt->offset = (*jenv)->GetIntField(jenv, jdbt, dbt_offset_fid); + if (dbtp != NULL) + *dbtp = dbt; + ldbt->jarr = (jbyteArray)(*jenv)->GetObjectField(jenv, jdbt, dbt_data_fid); - if (ldbt->jarr == NULL) { - ldbt->orig_data = dbt->data = NULL; - array_len = 0; - } else { - ldbt->orig_data = (*jenv)->GetByteArrayElements(jenv, - ldbt->jarr, NULL); - array_len = (*jenv)->GetArrayLength(jenv, ldbt->jarr); - dbt->data = ldbt->orig_data + ldbt->offset; - } + ldbt->offset = (*jenv)->GetIntField(jenv, jdbt, dbt_offset_fid); dbt->size = (*jenv)->GetIntField(jenv, jdbt, dbt_size_fid); ldbt->orig_size = dbt->size; @@ -495,34 +939,60 @@ static int __dbj_dbt_copyin( * MALLOC. */ if (!F_ISSET(dbt, DB_DBT_USERMEM)) { + ldbt->reuse = !F_ISSET(dbt, DB_DBT_MALLOC); F_CLR(dbt, DB_DBT_REALLOC); F_SET(dbt, DB_DBT_MALLOC); } + + /* Verify parameters before allocating or locking data. */ + if ((jint)dbt->doff < 0) + return (__dbj_throw(jenv, EINVAL, "DatabaseEntry doff illegal", + NULL, NULL)); - /* - * Some code makes the assumption that if dbt->size is non-zero, there - * is data to copy from dbt->data. We may have set dbt->size to a - * non-zero integer above but decided not to point dbt->data at - * anything. - * - * Clean up the dbt fields so we don't run into trouble. (Note that - * doff, dlen, and flags all may contain meaningful values.) - */ - if (dbt->data == NULL) - dbt->size = dbt->ulen = 0; - - /* Verify other parameters */ - if (ldbt->offset < 0) - return (__dbj_throw(jenv, EINVAL, "Dbt.offset illegal", NULL, NULL)); - else if ((jsize)(dbt->size + ldbt->offset) > array_len) - return (__dbj_throw(jenv, EINVAL, - "Dbt.size + Dbt.offset greater than array length", NULL, NULL)); - else if ((jint)dbt->doff < 0) - return (__dbj_throw(jenv, EINVAL, "Dbt.doff illegal", NULL, NULL)); - else if ((jsize)dbt->ulen > array_len) - return (__dbj_throw(jenv, EINVAL, - "Dbt.ulen greater than array length", NULL, NULL)); + if (ldbt->jarr == NULL) { + /* + * Some code makes the assumption that if a DBT's size or ulen + * is non-zero, there is data to copy from dbt->data. + * + * Clean up the dbt fields so we don't run into trouble. + * (Note that doff, dlen, and flags all may contain + * meaningful values.) + */ + ldbt->orig_data = dbt->data = NULL; + ldbt->array_len = ldbt->offset = dbt->size = dbt->ulen = 0; + return (0); + } else + ldbt->array_len = (*jenv)->GetArrayLength(jenv, ldbt->jarr); + if (F_ISSET(dbt, DB_DBT_USERMEM)) { + if (ldbt->offset < 0) + return (__dbj_throw(jenv, EINVAL, + "offset cannot be negative", + NULL, NULL)); + if (dbt->size > dbt->ulen) + return (__dbj_throw(jenv, EINVAL, + "size must be less than or equal to ulen", + NULL, NULL)); + if ((jsize)(ldbt->offset + dbt->ulen) > ldbt->array_len) + return (__dbj_throw(jenv, EINVAL, + "offset + ulen greater than array length", + NULL, NULL)); + if ((ldbt->orig_data = (*jenv)->GetByteArrayElements(jenv, + ldbt->jarr, NULL)) == NULL) + return (EINVAL); /* an exception will be pending */ + dbt->data = ldbt->orig_data + ldbt->offset; + } else { + if (__os_umalloc(NULL, dbt->size, &dbt->data) != 0) + return (ENOMEM); + ldbt->orig_data = dbt->data; + (*jenv)->GetByteArrayRegion(jenv, + ldbt->jarr, ldbt->offset, dbt->size, dbt->data); + if ((*jenv)->ExceptionOccurred(jenv)) { + (void)__os_ufree(NULL, dbt->data); + return (EINVAL); + } + } + return (0); } @@ -535,6 +1005,7 @@ static void __dbj_dbt_copyout( (*jenv)->SetByteArrayRegion(jenv, newarr, 0, (jsize)dbt->size, (jbyte *)dbt->data); (*jenv)->SetObjectField(jenv, jdbt, dbt_data_fid, newarr); + (*jenv)->SetIntField(jenv, jdbt, dbt_offset_fid, 0); (*jenv)->SetIntField(jenv, jdbt, dbt_size_fid, (jint)dbt->size); if (jarr != NULL) *jarr = newarr; @@ -545,31 +1016,43 @@ static void __dbj_dbt_copyout( static void __dbj_dbt_release( JNIEnv *jenv, jobject jdbt, DBT *dbt, DBT_LOCKED *ldbt) { jthrowable t; - - if (ldbt->jarr != NULL) { - (*jenv)->ReleaseByteArrayElements(jenv, ldbt->jarr, - ldbt->orig_data, 0); - } + if (dbt == NULL) + return; + if (dbt->size != ldbt->orig_size) (*jenv)->SetIntField(jenv, jdbt, dbt_size_fid, (jint)dbt->size); - if (F_ISSET(dbt, DB_DBT_USERMEM) && - dbt->size > dbt->ulen && - (t = (*jenv)->ExceptionOccurred(jenv)) != NULL && - (*jenv)->IsInstanceOf(jenv, t, memex_class)) { - (*jenv)->CallNonvirtualVoidMethod(jenv, t, memex_class, - memex_update_method, jdbt); - /* - * We have to rethrow the exception because calling into Java - * clears it. - */ - (*jenv)->Throw(jenv, t); - } - if (ldbt->dbt.data != ldbt->orig_data + ldbt->offset) { - __dbj_dbt_copyout(jenv, &ldbt->dbt, NULL, jdbt); - (*jenv)->SetIntField(jenv, jdbt, dbt_offset_fid, 0); - __os_ufree(NULL, ldbt->dbt.data); + if (F_ISSET(dbt, DB_DBT_USERMEM)) { + if (ldbt->jarr != NULL) + (*jenv)->ReleaseByteArrayElements(jenv, + ldbt->jarr, ldbt->orig_data, 0); + + if (dbt->size > dbt->ulen && + (t = (*jenv)->ExceptionOccurred(jenv)) != NULL && + (*jenv)->IsInstanceOf(jenv, t, memex_class)) { + (*jenv)->CallNonvirtualVoidMethod(jenv, t, memex_class, + memex_update_method, jdbt); + /* + * We have to rethrow the exception because calling + * into Java clears it. + */ + (*jenv)->Throw(jenv, t); + } + } else { + if (dbt->size > 0 && dbt->data != ldbt->orig_data) { + if (ldbt->reuse && + (jsize)(ldbt->offset + dbt->size) <= ldbt->array_len) + (*jenv)->SetByteArrayRegion(jenv, + ldbt->jarr, ldbt->offset, (jsize)dbt->size, + (jbyte *)dbt->data); + else + __dbj_dbt_copyout(jenv, dbt, NULL, jdbt); + (void)__os_ufree(NULL, dbt->data); + } + + if (ldbt->orig_data != NULL) + (void)__os_ufree(NULL, ldbt->orig_data); } } @@ -614,7 +1097,7 @@ static int __dbj_verify_callback(void *handle, const void *str_arg) { } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1vec (JNIEnv *jenv, jclass jcls, jlong jdbenvp, jint locker, jint flags, jobjectArray list, jint offset, jint count) { DB_ENV *dbenv; @@ -682,13 +1165,12 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec case DB_LOCK_GET: /* Needed: mode, obj. Returned: lock. */ prereq->mode = (*jenv)->GetIntField(jenv, jlockreq, - lockreq_mode_fid); + lockreq_modeflag_fid); jobj = (*jenv)->GetObjectField(jenv, jlockreq, lockreq_obj_fid); - if ((err = - __dbj_dbt_copyin(jenv, &locked_dbts[i], jobj)) != 0) + if ((err = __dbj_dbt_copyin(jenv, + &locked_dbts[i], &prereq->obj, jobj, 0)) != 0) goto out2; - prereq->obj = &locked_dbts[i].dbt; break; case DB_LOCK_PUT: /* Needed: lock. Ignored: mode, obj. */ @@ -698,7 +1180,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec (jlockp = (*jenv)->GetLongField(jenv, jlock, lock_cptr_fid)) == 0L) { __dbj_throw(jenv, EINVAL, - "DbLockRequest lock field is NULL", NULL, jdbenv); + "LockRequest lock field is NULL", NULL, jdbenv); goto out2; } lockp = *(DB_LOCK **)&jlockp; @@ -712,10 +1194,9 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec /* Needed: obj. Ignored: lock, mode. */ jobj = (*jenv)->GetObjectField(jenv, jlockreq, lockreq_obj_fid); - if ((err = - __dbj_dbt_copyin(jenv, &locked_dbts[i], jobj)) != 0) + if ((err = __dbj_dbt_copyin(jenv, + &locked_dbts[i], &prereq->obj, jobj, 0)) != 0) goto out2; - prereq->obj = &locked_dbts[i].dbt; break; default: __dbj_throw(jenv, EINVAL, @@ -810,33 +1291,27 @@ out0: return; /* * These macros are used by code generated by the s_java script. */ -#define JAVADB_STAT_INT(jenv, cl, jobj, statp, name) \ - (*jenv)->SetIntField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "I"), \ - (jint)statp->name) - -#define JAVADB_STAT_STRING(jenv, cl, jobj, statp, name) \ - (*jenv)->SetObjectField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, \ - "Ljava/lang/String;"), \ +#define JAVADB_STAT_INT(jenv, jobj, fid, statp, name) \ + (*jenv)->SetIntField(jenv, jobj, fid, (jint)statp->name) + +#define JAVADB_STAT_STRING(jenv, jobj, fid, statp, name) \ + (*jenv)->SetObjectField(jenv, jobj, fid, \ (*jenv)->NewStringUTF(jenv, statp->name)) -#define JAVADB_STAT_LSN(jenv, cl, jobj, statp, name) \ - (*jenv)->SetObjectField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "L" DB_PKG "DbLsn;"), \ +#define JAVADB_STAT_LSN(jenv, jobj, fid, statp, name) \ + (*jenv)->SetObjectField(jenv, jobj, fid, \ __dbj_wrap_DB_LSN(jenv, &statp->name)) -#define JAVADB_STAT_LONG(jenv, cl, jobj, statp, name) \ - (*jenv)->SetLongField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "J"), \ +#define JAVADB_STAT_LONG(jenv, jobj, fid, statp, name) \ + (*jenv)->SetLongField(jenv, jobj, fid, \ (jlong)statp->name) -#define JAVADB_STAT_XID(jenv, cl, jobj, statp, name) { \ - jobject jarr = (*jenv)->NewByteArray(jenv, (jsize)DB_XIDDATASIZE); \ - (*jenv)->SetByteArrayRegion(jenv, jarr, 0, (jsize)DB_XIDDATASIZE, \ - (jbyte *)statp->name); \ - (*jenv)->SetObjectField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "[B"), jarr); \ +#define JAVADB_STAT_XID(jenv, jobj, fid, statp, name) { \ + jobject jarr = \ + (*jenv)->NewByteArray(jenv, (jsize)DB_XIDDATASIZE); \ + (*jenv)->SetByteArrayRegion(jenv, jarr, \ + 0, (jsize)DB_XIDDATASIZE, (jbyte *)statp->name); \ + (*jenv)->SetObjectField(jenv, jobj, fid, jarr); \ } /* @@ -848,19 +1323,16 @@ out0: return; #include "java_stat_auto.c" -/* - * We do a dance so that the prefix in the C API points to the DB_ENV. - * The real prefix is stored as a Java string in the DbEnv object. - */ -static void __dbj_error(const char *prefix, char *msg) +static void __dbj_error(const DB_ENV *dbenv, const char *prefix, const char *msg) { - DB_ENV *dbenv = (DB_ENV *)prefix; JNIEnv *jenv = __dbj_get_jnienv(); jobject jdbenv = (jobject)DB_ENV_INTERNAL(dbenv); if (jdbenv != NULL) (*jenv)->CallNonvirtualVoidMethod(jenv, jdbenv, dbenv_class, - errcall_method, (*jenv)->NewStringUTF(jenv, msg)); + errcall_method, + (*jenv)->NewStringUTF(jenv, prefix), + (*jenv)->NewStringUTF(jenv, msg)); } static void __dbj_env_feedback(DB_ENV *dbenv, int opcode, int percent) @@ -872,6 +1344,16 @@ static void __dbj_env_feedback(DB_ENV *dbenv, int opcode, int percent) env_feedback_method, opcode, percent); } +static void __dbj_message(const DB_ENV *dbenv, const char *msg) +{ + JNIEnv *jenv = __dbj_get_jnienv(); + jobject jdbenv = (jobject)DB_ENV_INTERNAL(dbenv); + + if (jdbenv != NULL) + (*jenv)->CallNonvirtualVoidMethod(jenv, jdbenv, dbenv_class, + msgcall_method, (*jenv)->NewStringUTF(jenv, msg)); +} + static void __dbj_panic(DB_ENV *dbenv, int err) { JNIEnv *jenv = __dbj_get_jnienv(); @@ -961,7 +1443,6 @@ static int __dbj_seckey_create(DB *db, jobject jkey, jdata, jresult; jbyteArray jkeyarr, jdataarr; DBT_LOCKED lresult; - void *data_copy; int ret; jkey = (*jenv)->NewObject(jenv, dbt_class, dbt_construct); @@ -979,28 +1460,25 @@ static int __dbj_seckey_create(DB *db, ret = (int)(*jenv)->CallNonvirtualIntMethod(jenv, jdb, db_class, seckey_create_method, jkey, jdata, jresult); + if (ret != 0) + goto err; + if ((*jenv)->ExceptionOccurred(jenv)) { /* The exception will be thrown, so this could be any error. */ ret = EINVAL; goto err; } - if ((ret = __dbj_dbt_copyin(jenv, &lresult, jresult)) != 0) + if ((ret = __dbj_dbt_copyin(jenv, &lresult, NULL, jresult, 0)) != 0) goto err; if (lresult.jarr != NULL) { /* - * If there's data, we need to make a copy because we can't - * keep the Java array pinned. + * If there's data, we've got a copy of it (that's the default + * when no Dbt flags are set, so we can safely free the array. */ - memset(result, 0, sizeof (DBT)); *result = lresult.dbt; - if ((ret = __os_umalloc(NULL, result->size, &data_copy)) == 0) - memcpy(data_copy, result->data, result->size); - (*jenv)->ReleaseByteArrayElements(jenv, lresult.jarr, - lresult.orig_data, 0); (*jenv)->DeleteLocalRef(jenv, lresult.jarr); - result->data = data_copy; result->flags |= DB_DBT_APPMALLOC; } @@ -1019,7 +1497,6 @@ static int __dbj_append_recno(DB *db, DBT *dbt, db_recno_t recno) jobject jdb = (jobject)DB_INTERNAL(db); jobject jdbt; DBT_LOCKED lresult; - void *data_copy; jbyteArray jdbtarr; int ret; @@ -1041,21 +1518,16 @@ static int __dbj_append_recno(DB *db, DBT *dbt, db_recno_t recno) goto err; } - if ((ret = __dbj_dbt_copyin(jenv, &lresult, jdbt)) != 0) + if ((ret = __dbj_dbt_copyin(jenv, &lresult, NULL, jdbt, 0)) != 0) goto err; if (lresult.jarr != NULL) { /* - * If there's data, we need to make a copy because we can't - * keep the Java array pinned. + * If there's data, we've got a copy of it (that's the default + * when no Dbt flags are set, so we can safely free the array. */ *dbt = lresult.dbt; - if ((ret = __os_umalloc(db->dbenv, dbt->size, &data_copy)) == 0) - memcpy(data_copy, dbt->data, dbt->size); - (*jenv)->ReleaseByteArrayElements(jenv, lresult.jarr, - lresult.orig_data, 0); (*jenv)->DeleteLocalRef(jenv, lresult.jarr); - dbt->data = data_copy; dbt->flags |= DB_DBT_APPMALLOC; } @@ -1069,22 +1541,23 @@ static int __dbj_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2) { JNIEnv *jenv = __dbj_get_jnienv(); jobject jdb = (jobject)DB_INTERNAL(db); - jobject jdbt1, jdbt2; jbyteArray jdbtarr1, jdbtarr2; int ret; - jdbt1 = (*jenv)->NewObject(jenv, dbt_class, dbt_construct); - jdbt2 = (*jenv)->NewObject(jenv, dbt_class, dbt_construct); - if (jdbt1 == NULL || jdbt2 == NULL) - return ENOMEM; /* An exception is pending */ + jdbtarr1 = (*jenv)->NewByteArray(jenv, (jsize)dbt1->size); + if (jdbtarr1 == NULL) + return ENOMEM; + (*jenv)->SetByteArrayRegion(jenv, jdbtarr1, 0, (jsize)dbt1->size, + (jbyte *)dbt1->data); + + jdbtarr2 = (*jenv)->NewByteArray(jenv, (jsize)dbt2->size); + if (jdbtarr2 == NULL) + return ENOMEM; + (*jenv)->SetByteArrayRegion(jenv, jdbtarr2, 0, (jsize)dbt2->size, + (jbyte *)dbt2->data); - __dbj_dbt_copyout(jenv, dbt1, &jdbtarr1, jdbt1); - __dbj_dbt_copyout(jenv, dbt2, &jdbtarr2, jdbt2); - if (jdbtarr1 == NULL || jdbtarr2 == NULL) - return ENOMEM; /* An exception is pending */ - ret = (int)(*jenv)->CallNonvirtualIntMethod(jenv, jdb, db_class, - bt_compare_method, jdbt1, jdbt2); + bt_compare_method, jdbtarr1, jdbtarr2); if ((*jenv)->ExceptionOccurred(jenv)) { /* The exception will be thrown, so this could be any error. */ @@ -1093,8 +1566,6 @@ static int __dbj_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2) (*jenv)->DeleteLocalRef(jenv, jdbtarr2); (*jenv)->DeleteLocalRef(jenv, jdbtarr1); - (*jenv)->DeleteLocalRef(jenv, jdbt2); - (*jenv)->DeleteLocalRef(jenv, jdbt1); return (ret); } @@ -1192,43 +1663,42 @@ static u_int32_t __dbj_h_hash(DB *db, const void *data, u_int32_t len) } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_initDbEnvRef0( +JNIEXPORT jlong JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_initDbEnvRef0( JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { DB_ENV *self = *(DB_ENV **)&jarg1; + jlong ret; COMPQUIET(jcls, NULL); DB_ENV_INTERNAL(self) = (void *)(*jenv)->NewGlobalRef(jenv, jarg2); - self->set_errpfx(self, (const char*)self); - return (jobject)DB_ENV_INTERNAL(self); + *(jobject *)&ret = (jobject)DB_ENV_INTERNAL(self); + return (ret); } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_initDbRef0( +JNIEXPORT jlong JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_initDbRef0( JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { DB *self = *(DB **)&jarg1; + jlong ret; COMPQUIET(jcls, NULL); DB_INTERNAL(self) = (void *)(*jenv)->NewGlobalRef(jenv, jarg2); - return (jobject)DB_INTERNAL(self); -} - -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_deleteRef0( - JNIEnv *jenv, jclass jcls, jobject jref) { - COMPQUIET(jcls, NULL); - - if (jref != NULL) - (*jenv)->DeleteGlobalRef(jenv, jref); + *(jobject *)&ret = (jobject)DB_INTERNAL(self); + return (ret); } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_getDbRef0( +JNIEXPORT void JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_deleteRef0( JNIEnv *jenv, jclass jcls, jlong jarg1) { - DB *self = *(DB **)&jarg1; + jobject jref = *(jobject *)&jarg1; COMPQUIET(jcls, NULL); - COMPQUIET(jenv, NULL); - return (jobject)DB_INTERNAL(self); + if (jref != 0L) + (*jenv)->DeleteGlobalRef(jenv, jref); } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_getDbEnv0( +JNIEXPORT jlong JNICALL +Java_com_sleepycat_db_internal_db_1javaJNI_getDbEnv0( JNIEnv *jenv, jclass jcls, jlong jarg1) { DB *self = *(DB **)&jarg1; jlong env_cptr; @@ -1237,11 +1707,12 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_getDbEnv0( COMPQUIET(jcls, NULL); *(DB_ENV **)&env_cptr = self->dbenv; - return env_cptr; + return (env_cptr); } JNIEXPORT jboolean JNICALL -Java_com_sleepycat_db_DbUtil_is_1big_1endian(JNIEnv *jenv, jclass clazz) +Java_com_sleepycat_db_internal_DbUtil_is_1big_1endian( + JNIEnv *jenv, jclass clazz) { COMPQUIET(jenv, NULL); COMPQUIET(clazz, NULL); @@ -1263,591 +1734,625 @@ struct __db_out_stream { int (*callback) __P((void *, const void *)); }; -struct __db *new___db(DB_ENV *dbenv,u_int32_t flags){ +#define Db __db +#define Dbc __dbc +#define Dbt __db_dbt +#define DbEnv __db_env +#define DbLock __db_lock_u +#define DbLogc __db_log_cursor +#define DbLsn __db_lsn +#define DbMpoolFile __db_mpoolfile +#define DbSequence __db_sequence +#define DbTxn __db_txn + +/* Suppress a compilation warning for an unused symbol */ +void *unused = SWIG_JavaThrowException; + +struct Db *new_Db(DB_ENV *dbenv,u_int32_t flags){ DB *self; errno = db_create(&self, dbenv, flags); return (errno == 0) ? self : NULL; } -static db_ret_t __db_associate(struct __db *self,DB_TXN *txnid,DB *secondary,int (*callback)(DB *,DBT const *,DBT const *,DBT *),u_int32_t flags){ +db_ret_t Db_associate(struct Db *self,DB_TXN *txnid,DB *secondary,int (*callback)(DB *,DBT const *,DBT const *,DBT *),u_int32_t flags){ return self->associate(self, txnid, secondary, callback, flags); } -static int __db_close(struct __db *self,u_int32_t flags){ +int Db_close(struct Db *self,u_int32_t flags){ errno = self->close(self, flags); return errno; } -static DBC *__db_cursor(struct __db *self,DB_TXN *txnid,u_int32_t flags){ +DBC *Db_cursor(struct Db *self,DB_TXN *txnid,u_int32_t flags){ DBC *cursorp; errno = self->cursor(self, txnid, &cursorp, flags); return (errno == 0) ? cursorp : NULL; } -static int __db_del(struct __db *self,DB_TXN *txnid,DBT *key,u_int32_t flags){ +int Db_del(struct Db *self,DB_TXN *txnid,DBT *key,u_int32_t flags){ return self->del(self, txnid, key, flags); } -static void __db_err_internal(struct __db *self,int error,char const *message){ +void Db_err(struct Db *self,int error,char const *message){ self->err(self, error, message); } -static void __db_errx(struct __db *self,char const *message){ +void Db_errx(struct Db *self,char const *message){ self->errx(self, message); } -static int __db_get__SWIG_0(struct __db *self,DB_TXN *txnid,DBT *key,DBT *data,u_int32_t flags){ +int_bool Db_get_transactional(struct Db *self){ + return self->get_transactional(self); + } +int Db_get(struct Db *self,DB_TXN *txnid,DBT *key,DBT *data,u_int32_t flags){ return self->get(self, txnid, key, data, flags); } -static int_bool __db_get_byteswapped(struct __db *self){ +int_bool Db_get_byteswapped(struct Db *self){ int ret; errno = self->get_byteswapped(self, &ret); return ret; } -static jlong __db_get_cachesize(struct __db *self){ +jlong Db_get_cachesize(struct Db *self){ u_int32_t gbytes, bytes; errno = self->get_cachesize(self, &gbytes, &bytes, NULL); return (jlong)gbytes * GIGABYTE + bytes; } -static u_int32_t __db_get_cachesize_ncache(struct __db *self){ +u_int32_t Db_get_cachesize_ncache(struct Db *self){ int ret; errno = self->get_cachesize(self, NULL, NULL, &ret); return ret; } -static char const *__db_get_filename(struct __db *self){ +char const *Db_get_filename(struct Db *self){ const char *ret; errno = self->get_dbname(self, &ret, NULL); return ret; } -static char const *__db_get_dbname(struct __db *self){ +char const *Db_get_dbname(struct Db *self){ const char *ret; errno = self->get_dbname(self, NULL, &ret); return ret; } -static u_int32_t __db_get_encrypt_flags(struct __db *self){ +u_int32_t Db_get_encrypt_flags(struct Db *self){ u_int32_t ret; errno = self->get_encrypt_flags(self, &ret); return ret; } -static u_int32_t __db_get_flags(struct __db *self){ +char const *Db_get_errpfx(struct Db *self){ + const char *ret; + errno = 0; + self->get_errpfx(self, &ret); + return ret; + } +u_int32_t Db_get_flags(struct Db *self){ u_int32_t ret; errno = self->get_flags(self, &ret); return ret; } -static int __db_get_lorder(struct __db *self){ +int Db_get_lorder(struct Db *self){ int ret; errno = self->get_lorder(self, &ret); return ret; } -static DB_MPOOLFILE *__db_get_mpf(struct __db *self){ +DB_MPOOLFILE *Db_get_mpf(struct Db *self){ errno = 0; return self->mpf; } -static u_int32_t __db_get_open_flags(struct __db *self){ +u_int32_t Db_get_open_flags(struct Db *self){ u_int32_t ret; errno = self->get_open_flags(self, &ret); return ret; } -static u_int32_t __db_get_pagesize(struct __db *self){ +u_int32_t Db_get_pagesize(struct Db *self){ u_int32_t ret; errno = self->get_pagesize(self, &ret); return ret; } -static u_int32_t __db_get_bt_minkey(struct __db *self){ +u_int32_t Db_get_bt_minkey(struct Db *self){ u_int32_t ret; errno = self->get_bt_minkey(self, &ret); return ret; } -static u_int32_t __db_get_h_ffactor(struct __db *self){ +u_int32_t Db_get_h_ffactor(struct Db *self){ u_int32_t ret; errno = self->get_h_ffactor(self, &ret); return ret; } -static u_int32_t __db_get_h_nelem(struct __db *self){ +u_int32_t Db_get_h_nelem(struct Db *self){ u_int32_t ret; errno = self->get_h_nelem(self, &ret); return ret; } -static int __db_get_re_delim(struct __db *self){ +int Db_get_re_delim(struct Db *self){ int ret; errno = self->get_re_delim(self, &ret); return ret; } -static u_int32_t __db_get_re_len(struct __db *self){ +u_int32_t Db_get_re_len(struct Db *self){ u_int32_t ret; errno = self->get_re_len(self, &ret); return ret; } -static int __db_get_re_pad(struct __db *self){ +int Db_get_re_pad(struct Db *self){ int ret; errno = self->get_re_pad(self, &ret); return ret; } -static char const *__db_get_re_source(struct __db *self){ +char const *Db_get_re_source(struct Db *self){ const char *ret; errno = self->get_re_source(self, &ret); return ret; } -static u_int32_t __db_get_q_extentsize(struct __db *self){ +u_int32_t Db_get_q_extentsize(struct Db *self){ u_int32_t ret; errno = self->get_q_extentsize(self, &ret); return ret; } -static u_int32_t __db_get_flags_raw(struct __db *self){ - errno = 0; - return self->flags; - } -static int_bool __db_get_transactional(struct __db *self){ - int ret; - errno = self->get_transactional(self, &ret); - return ret; - } -static DBTYPE __db_get_type(struct __db *self){ +DBTYPE Db_get_type(struct Db *self){ DBTYPE type; errno = self->get_type(self, &type); return type; } -static DBC *__db_join(struct __db *self,DBC **curslist,u_int32_t flags){ +DBC *Db_join(struct Db *self,DBC **curslist,u_int32_t flags){ DBC *dbcp; errno = self->join(self, curslist, &dbcp, flags); return (errno == 0) ? dbcp : NULL; } -static db_ret_t __db_key_range(struct __db *self,DB_TXN *txnid,DBT *key,DB_KEY_RANGE *key_range,u_int32_t flags){ +db_ret_t Db_key_range(struct Db *self,DB_TXN *txnid,DBT *key,DB_KEY_RANGE *key_range,u_int32_t flags){ return self->key_range(self, txnid, key, key_range, flags); } -static db_ret_t __db_open(struct __db *self,DB_TXN *txnid,char const *file,char const *database,DBTYPE type,u_int32_t flags,int mode){ +db_ret_t Db_open(struct Db *self,DB_TXN *txnid,char const *file,char const *database,DBTYPE type,u_int32_t flags,int mode){ return self->open(self, txnid, file, database, type, flags, mode); } -static int __db_pget__SWIG_1(struct __db *self,DB_TXN *txnid,DBT *key,DBT *pkey,DBT *data,u_int32_t flags){ +int Db_pget(struct Db *self,DB_TXN *txnid,DBT *key,DBT *pkey,DBT *data,u_int32_t flags){ return self->pget(self, txnid, key, pkey, data, flags); } -static int __db_put(struct __db *self,DB_TXN *txnid,DBT *key,DBT *data,u_int32_t flags){ +int Db_put(struct Db *self,DB_TXN *txnid,DBT *key,DBT *data,u_int32_t flags){ return self->put(self, txnid, key, data, flags); } -static db_ret_t __db_remove(struct __db *self,char const *file,char const *database,u_int32_t flags){ +db_ret_t Db_remove(struct Db *self,char const *file,char const *database,u_int32_t flags){ return self->remove(self, file, database, flags); } -static db_ret_t __db_rename(struct __db *self,char const *file,char const *database,char const *newname,u_int32_t flags){ +db_ret_t Db_rename(struct Db *self,char const *file,char const *database,char const *newname,u_int32_t flags){ return self->rename(self, file, database, newname, flags); } -static db_ret_t __db_set_append_recno(struct __db *self,int (*db_append_recno_fcn)(DB *,DBT *,db_recno_t)){ +db_ret_t Db_set_append_recno(struct Db *self,int (*db_append_recno_fcn)(DB *,DBT *,db_recno_t)){ return self->set_append_recno(self, db_append_recno_fcn); } -static db_ret_t __db_set_bt_compare(struct __db *self,int (*bt_compare_fcn)(DB *,DBT const *,DBT const *)){ +db_ret_t Db_set_bt_compare(struct Db *self,int (*bt_compare_fcn)(DB *,DBT const *,DBT const *)){ return self->set_bt_compare(self, bt_compare_fcn); } -static db_ret_t __db_set_bt_maxkey(struct __db *self,u_int32_t maxkey){ +db_ret_t Db_set_bt_maxkey(struct Db *self,u_int32_t maxkey){ return self->set_bt_maxkey(self, maxkey); } -static db_ret_t __db_set_bt_minkey(struct __db *self,u_int32_t bt_minkey){ +db_ret_t Db_set_bt_minkey(struct Db *self,u_int32_t bt_minkey){ return self->set_bt_minkey(self, bt_minkey); } -static db_ret_t __db_set_bt_prefix(struct __db *self,size_t (*bt_prefix_fcn)(DB *,DBT const *,DBT const *)){ +db_ret_t Db_set_bt_prefix(struct Db *self,size_t (*bt_prefix_fcn)(DB *,DBT const *,DBT const *)){ return self->set_bt_prefix(self, bt_prefix_fcn); } -static db_ret_t __db_set_cachesize(struct __db *self,jlong bytes,int ncache){ +db_ret_t Db_set_cachesize(struct Db *self,jlong bytes,int ncache){ return self->set_cachesize(self, (u_int32_t)(bytes / GIGABYTE), (u_int32_t)(bytes % GIGABYTE), ncache); } -static db_ret_t __db_set_dup_compare(struct __db *self,int (*dup_compare_fcn)(DB *,DBT const *,DBT const *)){ +db_ret_t Db_set_dup_compare(struct Db *self,int (*dup_compare_fcn)(DB *,DBT const *,DBT const *)){ return self->set_dup_compare(self, dup_compare_fcn); } -static db_ret_t __db_set_encrypt(struct __db *self,char const *passwd,u_int32_t flags){ +db_ret_t Db_set_encrypt(struct Db *self,char const *passwd,u_int32_t flags){ return self->set_encrypt(self, passwd, flags); } -static db_ret_t __db_set_feedback(struct __db *self,void (*db_feedback_fcn)(DB *,int,int)){ +void Db_set_errpfx(struct Db *self,char const *errpfx){ + self->set_errpfx(self, errpfx); + } +db_ret_t Db_set_feedback(struct Db *self,void (*db_feedback_fcn)(DB *,int,int)){ return self->set_feedback(self, db_feedback_fcn); } -static db_ret_t __db_set_flags(struct __db *self,u_int32_t flags){ +db_ret_t Db_set_flags(struct Db *self,u_int32_t flags){ return self->set_flags(self, flags); } -static db_ret_t __db_set_h_ffactor(struct __db *self,u_int32_t h_ffactor){ +db_ret_t Db_set_h_ffactor(struct Db *self,u_int32_t h_ffactor){ return self->set_h_ffactor(self, h_ffactor); } -static db_ret_t __db_set_h_hash(struct __db *self,u_int32_t (*h_hash_fcn)(DB *,void const *,u_int32_t)){ +db_ret_t Db_set_h_hash(struct Db *self,u_int32_t (*h_hash_fcn)(DB *,void const *,u_int32_t)){ return self->set_h_hash(self, h_hash_fcn); } -static db_ret_t __db_set_h_nelem(struct __db *self,u_int32_t h_nelem){ +db_ret_t Db_set_h_nelem(struct Db *self,u_int32_t h_nelem){ return self->set_h_nelem(self, h_nelem); } -static db_ret_t __db_set_lorder(struct __db *self,int lorder){ +db_ret_t Db_set_lorder(struct Db *self,int lorder){ return self->set_lorder(self, lorder); } -static db_ret_t __db_set_pagesize(struct __db *self,u_int32_t pagesize){ +db_ret_t Db_set_pagesize(struct Db *self,u_int32_t pagesize){ return self->set_pagesize(self, pagesize); } -static db_ret_t __db_set_re_delim(struct __db *self,int re_delim){ +db_ret_t Db_set_re_delim(struct Db *self,int re_delim){ return self->set_re_delim(self, re_delim); } -static db_ret_t __db_set_re_len(struct __db *self,u_int32_t re_len){ +db_ret_t Db_set_re_len(struct Db *self,u_int32_t re_len){ return self->set_re_len(self, re_len); } -static db_ret_t __db_set_re_pad(struct __db *self,int re_pad){ +db_ret_t Db_set_re_pad(struct Db *self,int re_pad){ return self->set_re_pad(self, re_pad); } -static db_ret_t __db_set_re_source(struct __db *self,char *source){ +db_ret_t Db_set_re_source(struct Db *self,char *source){ return self->set_re_source(self, source); } -static db_ret_t __db_set_q_extentsize(struct __db *self,u_int32_t extentsize){ +db_ret_t Db_set_q_extentsize(struct Db *self,u_int32_t extentsize){ return self->set_q_extentsize(self, extentsize); } -static void *__db_stat(struct __db *self,u_int32_t flags){ +void *Db_stat(struct Db *self,DB_TXN *txnid,u_int32_t flags){ void *statp; - errno = self->stat(self, &statp, flags); + errno = self->stat(self, txnid, &statp, flags); return (errno == 0) ? statp : NULL; } -static db_ret_t __db_sync(struct __db *self,u_int32_t flags){ +db_ret_t Db_sync(struct Db *self,u_int32_t flags){ return self->sync(self, flags); } -static int __db_truncate(struct __db *self,DB_TXN *txnid,u_int32_t flags){ +int Db_truncate(struct Db *self,DB_TXN *txnid,u_int32_t flags){ u_int32_t count; errno = self->truncate(self, txnid, &count, flags); return count; } -static db_ret_t __db_upgrade(struct __db *self,char const *file,u_int32_t flags){ +db_ret_t Db_upgrade(struct Db *self,char const *file,u_int32_t flags){ return self->upgrade(self, file, flags); } -static db_ret_t __db_verify(struct __db *self,char const *file,char const *database,struct __db_out_stream outfile,u_int32_t flags){ +int_bool Db_verify(struct Db *self,char const *file,char const *database,struct __db_out_stream outfile,u_int32_t flags){ /* * We can't easily #include "dbinc/db_ext.h" because of name * clashes, so we declare this explicitly. */ extern int __db_verify_internal __P((DB *, const char *, const char *, void *, int (*)(void *, const void *), u_int32_t)); - return __db_verify_internal(self, file, database, + errno = __db_verify_internal(self, file, database, outfile.handle, outfile.callback, flags); + if (errno == DB_VERIFY_BAD) { + errno = 0; + return 0; + } else + return 1; } -static db_ret_t __dbc_close(struct __dbc *self){ +db_ret_t Dbc_close(struct Dbc *self){ return self->c_close(self); } -static db_recno_t __dbc_count(struct __dbc *self,u_int32_t flags){ +db_recno_t Dbc_count(struct Dbc *self,u_int32_t flags){ db_recno_t count; errno = self->c_count(self, &count, flags); return count; } -static int __dbc_del(struct __dbc *self,u_int32_t flags){ +int Dbc_del(struct Dbc *self,u_int32_t flags){ return self->c_del(self, flags); } -static DBC *__dbc_dup(struct __dbc *self,u_int32_t flags){ +DBC *Dbc_dup(struct Dbc *self,u_int32_t flags){ DBC *newcurs; errno = self->c_dup(self, &newcurs, flags); return (errno == 0) ? newcurs : NULL; } -static int __dbc_get__SWIG_0(struct __dbc *self,DBT *key,DBT *data,u_int32_t flags){ +int Dbc_get(struct Dbc *self,DBT *key,DBT *data,u_int32_t flags){ return self->c_get(self, key, data, flags); } -static int __dbc_pget__SWIG_1(struct __dbc *self,DBT *key,DBT *pkey,DBT *data,u_int32_t flags){ +int Dbc_pget(struct Dbc *self,DBT *key,DBT *pkey,DBT *data,u_int32_t flags){ return self->c_pget(self, key, pkey, data, flags); } -static int __dbc_put(struct __dbc *self,DBT *key,DBT *data,u_int32_t flags){ +int Dbc_put(struct Dbc *self,DBT *key,DBT *data,u_int32_t flags){ return self->c_put(self, key, data, flags); } -struct __db_env *new___db_env(u_int32_t flags){ +struct DbEnv *new_DbEnv(u_int32_t flags){ DB_ENV *self = NULL; errno = db_env_create(&self, flags); return (errno == 0) ? self : NULL; } -static db_ret_t __db_env_close(struct __db_env *self,u_int32_t flags){ +db_ret_t DbEnv_close(struct DbEnv *self,u_int32_t flags){ return self->close(self, flags); } -static db_ret_t __db_env_dbremove(struct __db_env *self,DB_TXN *txnid,char const *file,char const *database,u_int32_t flags){ +db_ret_t DbEnv_dbremove(struct DbEnv *self,DB_TXN *txnid,char const *file,char const *database,u_int32_t flags){ return self->dbremove(self, txnid, file, database, flags); } -static db_ret_t __db_env_dbrename(struct __db_env *self,DB_TXN *txnid,char const *file,char const *database,char const *newname,u_int32_t flags){ +db_ret_t DbEnv_dbrename(struct DbEnv *self,DB_TXN *txnid,char const *file,char const *database,char const *newname,u_int32_t flags){ return self->dbrename(self, txnid, file, database, newname, flags); } -static void __db_env_err(struct __db_env *self,int error,char const *message){ +void DbEnv_err(struct DbEnv *self,int error,char const *message){ self->err(self, error, message); } -static void __db_env_errx(struct __db_env *self,char const *message){ +void DbEnv_errx(struct DbEnv *self,char const *message){ self->errx(self, message); } -static char const **__db_env_get_data_dirs(struct __db_env *self){ +char const **DbEnv_get_data_dirs(struct DbEnv *self){ const char **ret; errno = self->get_data_dirs(self, &ret); return ret; } -static u_int32_t __db_env_get_encrypt_flags(struct __db_env *self){ +u_int32_t DbEnv_get_encrypt_flags(struct DbEnv *self){ u_int32_t ret; errno = self->get_encrypt_flags(self, &ret); return ret; } -static u_int32_t __db_env_get_flags(struct __db_env *self){ +char const *DbEnv_get_errpfx(struct DbEnv *self){ + const char *ret; + errno = 0; + self->get_errpfx(self, &ret); + return ret; + } +u_int32_t DbEnv_get_flags(struct DbEnv *self){ u_int32_t ret; errno = self->get_flags(self, &ret); return ret; } -static char const *__db_env_get_home(struct __db_env *self){ +char const *DbEnv_get_home(struct DbEnv *self){ const char *ret; errno = self->get_home(self, &ret); return ret; } -static u_int32_t __db_env_get_open_flags(struct __db_env *self){ +u_int32_t DbEnv_get_open_flags(struct DbEnv *self){ u_int32_t ret; errno = self->get_open_flags(self, &ret); return ret; } -static long __db_env_get_shm_key(struct __db_env *self){ +long DbEnv_get_shm_key(struct DbEnv *self){ long ret; errno = self->get_shm_key(self, &ret); return ret; } -static u_int32_t __db_env_get_tas_spins(struct __db_env *self){ +u_int32_t DbEnv_get_tas_spins(struct DbEnv *self){ u_int32_t ret; errno = self->get_tas_spins(self, &ret); return ret; } -static char const *__db_env_get_tmp_dir(struct __db_env *self){ +char const *DbEnv_get_tmp_dir(struct DbEnv *self){ const char *ret; errno = self->get_tmp_dir(self, &ret); return ret; } -static int_bool __db_env_get_verbose(struct __db_env *self,u_int32_t which){ +int_bool DbEnv_get_verbose(struct DbEnv *self,u_int32_t which){ int ret; errno = self->get_verbose(self, which, &ret); return ret; } -static db_ret_t __db_env_open(struct __db_env *self,char const *db_home,u_int32_t flags,int mode){ +db_ret_t DbEnv_open(struct DbEnv *self,char const *db_home,u_int32_t flags,int mode){ return self->open(self, db_home, flags, mode); } -static db_ret_t __db_env_remove(struct __db_env *self,char const *db_home,u_int32_t flags){ +db_ret_t DbEnv_remove(struct DbEnv *self,char const *db_home,u_int32_t flags){ return self->remove(self, db_home, flags); } -static db_ret_t __db_env_set_cachesize(struct __db_env *self,jlong bytes,int ncache){ +db_ret_t DbEnv_set_cachesize(struct DbEnv *self,jlong bytes,int ncache){ return self->set_cachesize(self, (u_int32_t)(bytes / GIGABYTE), (u_int32_t)(bytes % GIGABYTE), ncache); } -static db_ret_t __db_env_set_data_dir(struct __db_env *self,char const *dir){ +db_ret_t DbEnv_set_data_dir(struct DbEnv *self,char const *dir){ return self->set_data_dir(self, dir); } -static db_ret_t __db_env_set_encrypt(struct __db_env *self,char const *passwd,u_int32_t flags){ +db_ret_t DbEnv_set_encrypt(struct DbEnv *self,char const *passwd,u_int32_t flags){ return self->set_encrypt(self, passwd, flags); } -static void __db_env_set_errcall(struct __db_env *self,void (*db_errcall_fcn)(char const *,char *)){ +void DbEnv_set_errcall(struct DbEnv *self,void (*db_errcall_fcn)(DB_ENV const *,char const *,char const *)){ self->set_errcall(self, db_errcall_fcn); } -static db_ret_t __db_env_set_flags(struct __db_env *self,u_int32_t flags,int_bool onoff){ +void DbEnv_set_errpfx(struct DbEnv *self,char const *errpfx){ + self->set_errpfx(self, errpfx); + } +db_ret_t DbEnv_set_flags(struct DbEnv *self,u_int32_t flags,int_bool onoff){ return self->set_flags(self, flags, onoff); } -static db_ret_t __db_env_set_feedback(struct __db_env *self,void (*db_feedback_fcn)(DB_ENV *,int,int)){ - return self->set_feedback(self, db_feedback_fcn); +db_ret_t DbEnv_set_feedback(struct DbEnv *self,void (*env_feedback_fcn)(DB_ENV *,int,int)){ + return self->set_feedback(self, env_feedback_fcn); } -static db_ret_t __db_env_set_mp_mmapsize(struct __db_env *self,size_t mp_mmapsize){ +db_ret_t DbEnv_set_mp_mmapsize(struct DbEnv *self,size_t mp_mmapsize){ return self->set_mp_mmapsize(self, mp_mmapsize); } -static db_ret_t __db_env_set_paniccall(struct __db_env *self,void (*db_panic_fcn)(DB_ENV *,int)){ +void DbEnv_set_msgcall(struct DbEnv *self,void (*db_msgcall_fcn)(DB_ENV const *,char const *)){ + self->set_msgcall(self, db_msgcall_fcn); + } +db_ret_t DbEnv_set_paniccall(struct DbEnv *self,void (*db_panic_fcn)(DB_ENV *,int)){ return self->set_paniccall(self, db_panic_fcn); } -static db_ret_t __db_env_set_rpc_server(struct __db_env *self,void *client,char *host,long cl_timeout,long sv_timeout,u_int32_t flags){ +db_ret_t DbEnv_set_rpc_server(struct DbEnv *self,void *client,char *host,long cl_timeout,long sv_timeout,u_int32_t flags){ return self->set_rpc_server(self, client, host, cl_timeout, sv_timeout, flags); } -static db_ret_t __db_env_set_shm_key(struct __db_env *self,long shm_key){ +db_ret_t DbEnv_set_shm_key(struct DbEnv *self,long shm_key){ return self->set_shm_key(self, shm_key); } -static db_ret_t __db_env_set_tas_spins(struct __db_env *self,u_int32_t tas_spins){ +db_ret_t DbEnv_set_tas_spins(struct DbEnv *self,u_int32_t tas_spins){ return self->set_tas_spins(self, tas_spins); } -static db_ret_t __db_env_set_timeout(struct __db_env *self,db_timeout_t timeout,u_int32_t flags){ +db_ret_t DbEnv_set_timeout(struct DbEnv *self,db_timeout_t timeout,u_int32_t flags){ return self->set_timeout(self, timeout, flags); } -static db_ret_t __db_env_set_tmp_dir(struct __db_env *self,char const *dir){ +db_ret_t DbEnv_set_tmp_dir(struct DbEnv *self,char const *dir){ return self->set_tmp_dir(self, dir); } -static db_ret_t __db_env_set_tx_max(struct __db_env *self,u_int32_t max){ +db_ret_t DbEnv_set_tx_max(struct DbEnv *self,u_int32_t max){ return self->set_tx_max(self, max); } -static db_ret_t __db_env_set_app_dispatch(struct __db_env *self,int (*tx_recover)(DB_ENV *,DBT *,DB_LSN *,db_recops)){ +db_ret_t DbEnv_set_app_dispatch(struct DbEnv *self,int (*tx_recover)(DB_ENV *,DBT *,DB_LSN *,db_recops)){ return self->set_app_dispatch(self, tx_recover); } -static db_ret_t __db_env_set_tx_timestamp(struct __db_env *self,time_t *timestamp){ +db_ret_t DbEnv_set_tx_timestamp(struct DbEnv *self,time_t *timestamp){ return self->set_tx_timestamp(self, timestamp); } -static db_ret_t __db_env_set_verbose(struct __db_env *self,u_int32_t which,int_bool onoff){ +db_ret_t DbEnv_set_verbose(struct DbEnv *self,u_int32_t which,int_bool onoff){ return self->set_verbose(self, which, onoff); } -static struct __db_lk_conflicts __db_env_get_lk_conflicts(struct __db_env *self){ +struct __db_lk_conflicts DbEnv_get_lk_conflicts(struct DbEnv *self){ struct __db_lk_conflicts ret; errno = self->get_lk_conflicts(self, (const u_int8_t **)&ret.lk_conflicts, &ret.lk_modes); return ret; } -static u_int32_t __db_env_get_lk_detect(struct __db_env *self){ +u_int32_t DbEnv_get_lk_detect(struct DbEnv *self){ u_int32_t ret; errno = self->get_lk_detect(self, &ret); return ret; } -static u_int32_t __db_env_get_lk_max_locks(struct __db_env *self){ +u_int32_t DbEnv_get_lk_max_locks(struct DbEnv *self){ u_int32_t ret; errno = self->get_lk_max_locks(self, &ret); return ret; } -static u_int32_t __db_env_get_lk_max_lockers(struct __db_env *self){ +u_int32_t DbEnv_get_lk_max_lockers(struct DbEnv *self){ u_int32_t ret; errno = self->get_lk_max_lockers(self, &ret); return ret; } -static u_int32_t __db_env_get_lk_max_objects(struct __db_env *self){ +u_int32_t DbEnv_get_lk_max_objects(struct DbEnv *self){ u_int32_t ret; errno = self->get_lk_max_objects(self, &ret); return ret; } -static int __db_env_lock_detect(struct __db_env *self,u_int32_t flags,u_int32_t atype){ +int DbEnv_lock_detect(struct DbEnv *self,u_int32_t flags,u_int32_t atype){ int aborted; errno = self->lock_detect(self, flags, atype, &aborted); return aborted; } -static DB_LOCK *__db_env_lock_get(struct __db_env *self,u_int32_t locker,u_int32_t flags,DBT const *object,db_lockmode_t lock_mode){ +DB_LOCK *DbEnv_lock_get(struct DbEnv *self,u_int32_t locker,u_int32_t flags,DBT const *object,db_lockmode_t lock_mode){ DB_LOCK *lock = NULL; if ((errno = __os_malloc(self, sizeof (DB_LOCK), &lock)) == 0) errno = self->lock_get(self, locker, flags, object, lock_mode, lock); return lock; } -static u_int32_t __db_env_lock_id(struct __db_env *self){ +u_int32_t DbEnv_lock_id(struct DbEnv *self){ u_int32_t id; errno = self->lock_id(self, &id); return id; } -static db_ret_t __db_env_lock_id_free(struct __db_env *self,u_int32_t id){ +db_ret_t DbEnv_lock_id_free(struct DbEnv *self,u_int32_t id){ return self->lock_id_free(self, id); } -static db_ret_t __db_env_lock_put(struct __db_env *self,DB_LOCK *lock){ +db_ret_t DbEnv_lock_put(struct DbEnv *self,DB_LOCK *lock){ return self->lock_put(self, lock); } -static DB_LOCK_STAT *__db_env_lock_stat(struct __db_env *self,u_int32_t flags){ +DB_LOCK_STAT *DbEnv_lock_stat(struct DbEnv *self,u_int32_t flags){ DB_LOCK_STAT *statp; errno = self->lock_stat(self, &statp, flags); return (errno == 0) ? statp : NULL; } -static db_ret_t __db_env_set_lk_conflicts(struct __db_env *self,struct __db_lk_conflicts conflicts){ +db_ret_t DbEnv_set_lk_conflicts(struct DbEnv *self,struct __db_lk_conflicts conflicts){ return self->set_lk_conflicts(self, conflicts.lk_conflicts, conflicts.lk_modes); } -static db_ret_t __db_env_set_lk_detect(struct __db_env *self,u_int32_t detect){ +db_ret_t DbEnv_set_lk_detect(struct DbEnv *self,u_int32_t detect){ return self->set_lk_detect(self, detect); } -static db_ret_t __db_env_set_lk_max_lockers(struct __db_env *self,u_int32_t max){ +db_ret_t DbEnv_set_lk_max_lockers(struct DbEnv *self,u_int32_t max){ return self->set_lk_max_lockers(self, max); } -static db_ret_t __db_env_set_lk_max_locks(struct __db_env *self,u_int32_t max){ +db_ret_t DbEnv_set_lk_max_locks(struct DbEnv *self,u_int32_t max){ return self->set_lk_max_locks(self, max); } -static db_ret_t __db_env_set_lk_max_objects(struct __db_env *self,u_int32_t max){ +db_ret_t DbEnv_set_lk_max_objects(struct DbEnv *self,u_int32_t max){ return self->set_lk_max_objects(self, max); } -static u_int32_t __db_env_get_lg_bsize(struct __db_env *self){ +u_int32_t DbEnv_get_lg_bsize(struct DbEnv *self){ u_int32_t ret; errno = self->get_lg_bsize(self, &ret); return ret; } -static char const *__db_env_get_lg_dir(struct __db_env *self){ +char const *DbEnv_get_lg_dir(struct DbEnv *self){ const char *ret; errno = self->get_lg_dir(self, &ret); return ret; } -static u_int32_t __db_env_get_lg_max(struct __db_env *self){ +u_int32_t DbEnv_get_lg_max(struct DbEnv *self){ u_int32_t ret; errno = self->get_lg_max(self, &ret); return ret; } -static u_int32_t __db_env_get_lg_regionmax(struct __db_env *self){ +u_int32_t DbEnv_get_lg_regionmax(struct DbEnv *self){ u_int32_t ret; errno = self->get_lg_regionmax(self, &ret); return ret; } -static char **__db_env_log_archive(struct __db_env *self,u_int32_t flags){ - char **list; +char **DbEnv_log_archive(struct DbEnv *self,u_int32_t flags){ + char **list = NULL; errno = self->log_archive(self, &list, flags); return (errno == 0) ? list : NULL; } int DbEnv_log_compare(DB_LSN const *lsn0,DB_LSN const *lsn1){ return log_compare(lsn0, lsn1); } -static DB_LOGC *__db_env_log_cursor(struct __db_env *self,u_int32_t flags){ +DB_LOGC *DbEnv_log_cursor(struct DbEnv *self,u_int32_t flags){ DB_LOGC *cursor; errno = self->log_cursor(self, &cursor, flags); return (errno == 0) ? cursor : NULL; } -static char *__db_env_log_file(struct __db_env *self,DB_LSN *lsn){ +char *DbEnv_log_file(struct DbEnv *self,DB_LSN *lsn){ char namebuf[MAXPATHLEN]; errno = self->log_file(self, lsn, namebuf, sizeof namebuf); return (errno == 0) ? strdup(namebuf) : NULL; } -static db_ret_t __db_env_log_flush(struct __db_env *self,DB_LSN const *lsn){ +db_ret_t DbEnv_log_flush(struct DbEnv *self,DB_LSN const *lsn){ return self->log_flush(self, lsn); } -static db_ret_t __db_env_log_put(struct __db_env *self,DB_LSN *lsn,DBT const *data,u_int32_t flags){ +db_ret_t DbEnv_log_put(struct DbEnv *self,DB_LSN *lsn,DBT const *data,u_int32_t flags){ return self->log_put(self, lsn, data, flags); } -static DB_LOG_STAT *__db_env_log_stat(struct __db_env *self,u_int32_t flags){ +DB_LOG_STAT *DbEnv_log_stat(struct DbEnv *self,u_int32_t flags){ DB_LOG_STAT *sp; errno = self->log_stat(self, &sp, flags); return (errno == 0) ? sp : NULL; } -static db_ret_t __db_env_set_lg_bsize(struct __db_env *self,u_int32_t lg_bsize){ +db_ret_t DbEnv_set_lg_bsize(struct DbEnv *self,u_int32_t lg_bsize){ return self->set_lg_bsize(self, lg_bsize); } -static db_ret_t __db_env_set_lg_dir(struct __db_env *self,char const *dir){ +db_ret_t DbEnv_set_lg_dir(struct DbEnv *self,char const *dir){ return self->set_lg_dir(self, dir); } -static db_ret_t __db_env_set_lg_max(struct __db_env *self,u_int32_t lg_max){ +db_ret_t DbEnv_set_lg_max(struct DbEnv *self,u_int32_t lg_max){ return self->set_lg_max(self, lg_max); } -static db_ret_t __db_env_set_lg_regionmax(struct __db_env *self,u_int32_t lg_regionmax){ +db_ret_t DbEnv_set_lg_regionmax(struct DbEnv *self,u_int32_t lg_regionmax){ return self->set_lg_regionmax(self, lg_regionmax); } -static jlong __db_env_get_cachesize(struct __db_env *self){ +jlong DbEnv_get_cachesize(struct DbEnv *self){ u_int32_t gbytes, bytes; errno = self->get_cachesize(self, &gbytes, &bytes, NULL); return (jlong)gbytes * GIGABYTE + bytes; } -static int __db_env_get_cachesize_ncache(struct __db_env *self){ +int DbEnv_get_cachesize_ncache(struct DbEnv *self){ int ret; errno = self->get_cachesize(self, NULL, NULL, &ret); return ret; } -static size_t __db_env_get_mp_mmapsize(struct __db_env *self){ +size_t DbEnv_get_mp_mmapsize(struct DbEnv *self){ size_t ret; errno = self->get_mp_mmapsize(self, &ret); return ret; } -static DB_MPOOL_STAT *__db_env_memp_stat(struct __db_env *self,u_int32_t flags){ +DB_MPOOL_STAT *DbEnv_memp_stat(struct DbEnv *self,u_int32_t flags){ DB_MPOOL_STAT *mp_stat; errno = self->memp_stat(self, &mp_stat, NULL, flags); return (errno == 0) ? mp_stat : NULL; } -static DB_MPOOL_FSTAT **__db_env_memp_fstat(struct __db_env *self,u_int32_t flags){ +DB_MPOOL_FSTAT **DbEnv_memp_fstat(struct DbEnv *self,u_int32_t flags){ DB_MPOOL_FSTAT **mp_fstat; errno = self->memp_stat(self, NULL, &mp_fstat, flags); return (errno == 0) ? mp_fstat : NULL; } -static int __db_env_memp_trickle(struct __db_env *self,int percent){ +int DbEnv_memp_trickle(struct DbEnv *self,int percent){ int ret; errno = self->memp_trickle(self, percent, &ret); return ret; } -static u_int32_t __db_env_get_tx_max(struct __db_env *self){ +u_int32_t DbEnv_get_tx_max(struct DbEnv *self){ u_int32_t ret; errno = self->get_tx_max(self, &ret); return ret; } -static time_t __db_env_get_tx_timestamp(struct __db_env *self){ +time_t DbEnv_get_tx_timestamp(struct DbEnv *self){ time_t ret; errno = self->get_tx_timestamp(self, &ret); return ret; } -static db_timeout_t __db_env_get_timeout(struct __db_env *self,u_int32_t flag){ +db_timeout_t DbEnv_get_timeout(struct DbEnv *self,u_int32_t flag){ db_timeout_t ret; errno = self->get_timeout(self, &ret, flag); return ret; } -static DB_TXN *__db_env_txn_begin(struct __db_env *self,DB_TXN *parent,u_int32_t flags){ +DB_TXN *DbEnv_txn_begin(struct DbEnv *self,DB_TXN *parent,u_int32_t flags){ DB_TXN *tid; errno = self->txn_begin(self, parent, &tid, flags); return (errno == 0) ? tid : NULL; } -static db_ret_t __db_env_txn_checkpoint(struct __db_env *self,u_int32_t kbyte,u_int32_t min,u_int32_t flags){ +db_ret_t DbEnv_txn_checkpoint(struct DbEnv *self,u_int32_t kbyte,u_int32_t min,u_int32_t flags){ return self->txn_checkpoint(self, kbyte, min, flags); } -static DB_PREPLIST *__db_env_txn_recover(struct __db_env *self,int count,u_int32_t flags){ +DB_PREPLIST *DbEnv_txn_recover(struct DbEnv *self,int count,u_int32_t flags){ DB_PREPLIST *preplist; long retcount; @@ -1865,38 +2370,38 @@ static DB_PREPLIST *__db_env_txn_recover(struct __db_env *self,int count,u_int32 preplist[retcount].txn = NULL; return preplist; } -static DB_TXN_STAT *__db_env_txn_stat(struct __db_env *self,u_int32_t flags){ +DB_TXN_STAT *DbEnv_txn_stat(struct DbEnv *self,u_int32_t flags){ DB_TXN_STAT *statp; errno = self->txn_stat(self, &statp, flags); return (errno == 0) ? statp : NULL; } -static jlong __db_env_get_rep_limit(struct __db_env *self){ +jlong DbEnv_get_rep_limit(struct DbEnv *self){ u_int32_t gbytes, bytes; errno = self->get_rep_limit(self, &gbytes, &bytes); return (jlong)gbytes * GIGABYTE + bytes; } -static int __db_env_rep_elect(struct __db_env *self,int nsites,int priority,u_int32_t timeout){ +int DbEnv_rep_elect(struct DbEnv *self,int nsites,int nvotes,int priority,u_int32_t timeout,u_int32_t flags){ int id; - errno = self->rep_elect(self, nsites, priority, timeout, &id); + errno = self->rep_elect(self, nsites, nvotes, priority, timeout, &id, flags); return id; } -static int __db_env_rep_process_message(struct __db_env *self,DBT *control,DBT *rec,int *envid,DB_LSN *ret_lsn){ +int DbEnv_rep_process_message(struct DbEnv *self,DBT *control,DBT *rec,int *envid,DB_LSN *ret_lsn){ return self->rep_process_message(self, control, rec, envid, ret_lsn); } -static db_ret_t __db_env_rep_start(struct __db_env *self,DBT *cdata,u_int32_t flags){ +db_ret_t DbEnv_rep_start(struct DbEnv *self,DBT *cdata,u_int32_t flags){ return self->rep_start(self, cdata, flags); } -static DB_REP_STAT *__db_env_rep_stat(struct __db_env *self,u_int32_t flags){ +DB_REP_STAT *DbEnv_rep_stat(struct DbEnv *self,u_int32_t flags){ DB_REP_STAT *statp; errno = self->rep_stat(self, &statp, flags); return (errno == 0) ? statp : NULL; } -static db_ret_t __db_env_set_rep_limit(struct __db_env *self,jlong bytes){ +db_ret_t DbEnv_set_rep_limit(struct DbEnv *self,jlong bytes){ return self->set_rep_limit(self, (u_int32_t)(bytes / GIGABYTE), (u_int32_t)(bytes % GIGABYTE)); } -static db_ret_t __db_env_set_rep_transport(struct __db_env *self,int envid,int (*send)(DB_ENV *,DBT const *,DBT const *,DB_LSN const *,int,u_int32_t)){ +db_ret_t DbEnv_set_rep_transport(struct DbEnv *self,int envid,int (*send)(DB_ENV *,DBT const *,DBT const *,DB_LSN const *,int,u_int32_t)){ return self->set_rep_transport(self, envid, send); } char const *DbEnv_strerror(int error){ @@ -1914,86 +2419,133 @@ int DbEnv_get_version_patch(){ char const *DbEnv_get_version_string(){ return DB_VERSION_STRING; } -static db_ret_t __db_txn_abort(struct __db_txn *self){ - return self->abort(self); - } -static db_ret_t __db_txn_commit(struct __db_txn *self,u_int32_t flags){ - return self->commit(self, flags); - } -static db_ret_t __db_txn_discard(struct __db_txn *self,u_int32_t flags){ - return self->discard(self, flags); - } -static u_int32_t __db_txn_id(struct __db_txn *self){ - return self->id(self); - } -static db_ret_t __db_txn_prepare(struct __db_txn *self,u_int8_t *gid){ - return self->prepare(self, gid); - } -static db_ret_t __db_txn_set_timeout(struct __db_txn *self,db_timeout_t timeout,u_int32_t flags){ - return self->set_timeout(self, timeout, flags); +void delete_DbLock(struct DbLock *self){ + __os_free(NULL, self); } -static db_ret_t __db_log_cursor_close(struct __db_log_cursor *self,u_int32_t flags){ +db_ret_t DbLogc_close(struct DbLogc *self,u_int32_t flags){ return self->close(self, flags); } -static int __db_log_cursor_get(struct __db_log_cursor *self,DB_LSN *lsn,DBT *data,u_int32_t flags){ +int DbLogc_get(struct DbLogc *self,DB_LSN *lsn,DBT *data,u_int32_t flags){ return self->get(self, lsn, data, flags); } -void delete___db_lock_u(struct __db_lock_u *self){ - __os_free(NULL, self); - } -struct __db_lsn *new___db_lsn(u_int32_t file,u_int32_t offset){ - DB_LSN *self = NULL; - errno = __os_malloc(NULL, sizeof (DB_LSN), &self); - if (errno == 0) { - self->file = file; - self->offset = offset; - } - return self; - } -void delete___db_lsn(struct __db_lsn *self){ - __os_free(NULL, self); - } -static u_int32_t __db_lsn_get_file(struct __db_lsn *self){ - return self->file; - } -static u_int32_t __db_lsn_get_offset(struct __db_lsn *self){ - return self->offset; - } -static DB_CACHE_PRIORITY __db_mpoolfile_get_priority(struct __db_mpoolfile *self){ +DB_CACHE_PRIORITY DbMpoolFile_get_priority(struct DbMpoolFile *self){ DB_CACHE_PRIORITY ret; errno = self->get_priority(self, &ret); return ret; } -static db_ret_t __db_mpoolfile_set_priority(struct __db_mpoolfile *self,DB_CACHE_PRIORITY priority){ +db_ret_t DbMpoolFile_set_priority(struct DbMpoolFile *self,DB_CACHE_PRIORITY priority){ return self->set_priority(self, priority); } -static u_int32_t __db_mpoolfile_get_flags(struct __db_mpoolfile *self){ +u_int32_t DbMpoolFile_get_flags(struct DbMpoolFile *self){ u_int32_t ret; errno = self->get_flags(self, &ret); return ret; } -static db_ret_t __db_mpoolfile_set_flags(struct __db_mpoolfile *self,u_int32_t flags,int_bool onoff){ +db_ret_t DbMpoolFile_set_flags(struct DbMpoolFile *self,u_int32_t flags,int_bool onoff){ return self->set_flags(self, flags, onoff); } -static jlong __db_mpoolfile_get_maxsize(struct __db_mpoolfile *self){ +jlong DbMpoolFile_get_maxsize(struct DbMpoolFile *self){ u_int32_t gbytes, bytes; errno = self->get_maxsize(self, &gbytes, &bytes); return (jlong)gbytes * GIGABYTE + bytes; } -static db_ret_t __db_mpoolfile_set_maxsize(struct __db_mpoolfile *self,jlong bytes){ +db_ret_t DbMpoolFile_set_maxsize(struct DbMpoolFile *self,jlong bytes){ return self->set_maxsize(self, (u_int32_t)(bytes / GIGABYTE), (u_int32_t)(bytes % GIGABYTE)); } +struct DbSequence *new_DbSequence(DB *db,u_int32_t flags){ + DB_SEQUENCE *self = NULL; + errno = db_sequence_create(&self, db, flags); + return self; + } +db_ret_t DbSequence_close(struct DbSequence *self,u_int32_t flags){ + return self->close(self, flags); + } +db_seq_t DbSequence_get(struct DbSequence *self,DB_TXN *txnid,int32_t delta,u_int32_t flags){ + db_seq_t ret = 0; + errno = self->get(self, txnid, delta, &ret, flags); + return ret; + } +int32_t DbSequence_get_cachesize(struct DbSequence *self){ + int32_t ret = 0; + errno = self->get_cachesize(self, &ret); + return ret; + } +DB *DbSequence_get_db(struct DbSequence *self){ + DB *ret = NULL; + errno = self->get_db(self, &ret); + return ret; + } +u_int32_t DbSequence_get_flags(struct DbSequence *self){ + u_int32_t ret = 0; + errno = self->get_flags(self, &ret); + return ret; + } +db_ret_t DbSequence_get_key(struct DbSequence *self,DBT *key){ + return self->get_key(self, key); + } +db_seq_t DbSequence_get_range_min(struct DbSequence *self){ + db_seq_t ret = 0; + errno = self->get_range(self, &ret, NULL); + return ret; + } +db_seq_t DbSequence_get_range_max(struct DbSequence *self){ + db_seq_t ret = 0; + errno = self->get_range(self, NULL, &ret); + return ret; + } +db_ret_t DbSequence_initial_value(struct DbSequence *self,db_seq_t val){ + return self->initial_value(self, val); + } +db_ret_t DbSequence_open(struct DbSequence *self,DB_TXN *txnid,DBT *key,u_int32_t flags){ + return self->open(self, txnid, key, flags); + } +db_ret_t DbSequence_remove(struct DbSequence *self,DB_TXN *txnid,u_int32_t flags){ + return self->remove(self, txnid, flags); + } +db_ret_t DbSequence_set_cachesize(struct DbSequence *self,int32_t size){ + return self->set_cachesize(self, size); + } +db_ret_t DbSequence_set_flags(struct DbSequence *self,u_int32_t flags){ + return self->set_flags(self, flags); + } +db_ret_t DbSequence_set_range(struct DbSequence *self,db_seq_t min,db_seq_t max){ + return self->set_range(self, min, max); + } +DB_SEQUENCE_STAT *DbSequence_stat(struct DbSequence *self,u_int32_t flags){ + DB_SEQUENCE_STAT *ret = NULL; + errno = self->stat(self, &ret, flags); + return ret; + } +db_ret_t DbTxn_abort(struct DbTxn *self){ + return self->abort(self); + } +db_ret_t DbTxn_commit(struct DbTxn *self,u_int32_t flags){ + return self->commit(self, flags); + } +db_ret_t DbTxn_discard(struct DbTxn *self,u_int32_t flags){ + return self->discard(self, flags); + } +u_int32_t DbTxn_id(struct DbTxn *self){ + return self->id(self); + } +db_ret_t DbTxn_prepare(struct DbTxn *self,u_int8_t *gid){ + return self->prepare(self, gid); + } +db_ret_t DbTxn_set_timeout(struct DbTxn *self,db_timeout_t timeout,u_int32_t flags){ + return self->set_timeout(self, timeout, flags); + } + #ifdef __cplusplus extern "C" { #endif -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_new_1Db(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_new_1Db(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jlong jresult = 0 ; DB_ENV *arg1 = (DB_ENV *) 0 ; u_int32_t arg2 ; - struct __db *result; + struct Db *result; (void)jenv; (void)jcls; @@ -2001,18 +2553,19 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_new_1Db(JNIEnv *jenv, arg2 = (u_int32_t)jarg2; errno = 0; - result = (struct __db *)new___db(arg1,arg2); + result = (struct Db *)new_Db(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } - *(struct __db **)&jresult = result; + *(struct Db **)&jresult = result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1associate(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jlong jarg3, jobject jarg4, jint jarg5) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1associate(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jlong jarg3, jobject jarg4, jint jarg5) { + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; DB *arg3 = (DB *) 0 ; int (*arg4)(DB *,DBT const *,DBT const *,DBT *) = (int (*)(DB *,DBT const *,DBT const *,DBT *)) 0 ; @@ -2021,7 +2574,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1associate(JNIEnv *j (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; arg3 = *(DB **)&jarg3; @@ -2034,24 +2587,24 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1associate(JNIEnv *j return ; } + result = (db_ret_t)Db_associate(arg1,arg2,arg3,arg4,arg5); - result = (db_ret_t)__db_associate(arg1,arg2,arg3,arg4,arg5); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -2059,28 +2612,28 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1close0(JNIEnv *jenv return 0; } - errno = 0; - result = (int)__db_close(arg1,arg2); + result = (int)Db_close(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, NULL); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1cursor(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1cursor(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { jlong jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; u_int32_t arg3 ; DBC *result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; arg3 = (u_int32_t)jarg3; @@ -2089,21 +2642,21 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1cursor(JNIEnv *jen return 0; } - errno = 0; - result = (DBC *)__db_cursor(arg1,arg2,arg3); + result = (DBC *)Db_cursor(arg1,arg2,arg3); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } *(DBC **)&jresult = result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1del(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jint jarg4) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1del(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jint jarg4) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; DBT *arg3 = (DBT *) 0 ; u_int32_t arg4 ; @@ -2112,13 +2665,12 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1del(JNIEnv *jenv, j (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; - + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } arg4 = (u_int32_t)jarg4; if (jarg1 == 0) { @@ -2126,28 +2678,26 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1del(JNIEnv *jenv, j return 0; } + result = (int)Db_del(arg1,arg2,arg3,arg4); - result = (int)__db_del(arg1,arg2,arg3,arg4); - - if (!DB_RETOK_DBDEL(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_DBDEL(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1err(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jstring jarg3) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1err(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jstring jarg3) { + struct Db *arg1 = (struct Db *) 0 ; int arg2 ; char *arg3 ; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (int)jarg2; { arg3 = 0; @@ -2161,8 +2711,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1err(JNIEnv *jenv, j __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } - - __db_err_internal(arg1,arg2,(char const *)arg3); + Db_err(arg1,arg2,(char const *)arg3); { if (arg3) (*jenv)->ReleaseStringUTFChars(jenv, jarg3, arg3); @@ -2170,13 +2719,13 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1err(JNIEnv *jenv, j } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1errx(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1errx(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct Db *arg1 = (struct Db *) 0 ; char *arg2 ; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { arg2 = 0; if (jarg2) { @@ -2189,8 +2738,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1errx(JNIEnv *jenv, __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } - - __db_errx(arg1,(char const *)arg2); + Db_errx(arg1,(char const *)arg2); { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -2198,9 +2746,29 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1errx(JNIEnv *jenv, } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jint jarg5) { +JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1transactional(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jboolean jresult = 0 ; + struct Db *arg1 = (struct Db *) 0 ; + int_bool result; + + (void)jenv; + (void)jcls; + arg1 = *(struct Db **)&jarg1; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } + result = (int_bool)Db_get_transactional(arg1); + + jresult = (result) ? JNI_TRUE : JNI_FALSE; + return jresult; +} + + +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jint jarg5) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; DBT *arg3 = (DBT *) 0 ; DBT *arg4 = (DBT *) 0 ; @@ -2211,18 +2779,16 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_10(JNIE (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; - - - if (__dbj_dbt_copyin(jenv, &ldbt4, jarg4) != 0) - return 0; - arg4 = &ldbt4.dbt; + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } + if (__dbj_dbt_copyin(jenv, &ldbt4, &arg4, jarg4, 0) != 0) { + return 0; + } arg5 = (u_int32_t)jarg5; if (jarg1 == 0) { @@ -2230,123 +2796,117 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_10(JNIE return 0; } + result = (int)Db_get(arg1,arg2,arg3,arg4,arg5); - result = (int)__db_get__SWIG_0(arg1,arg2,arg3,arg4,arg5); - - if (!DB_RETOK_DBGET(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_DBGET(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - - - __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); - + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); + __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); return jresult; } -JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1byteswapped(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1byteswapped(JNIEnv *jenv, jclass jcls, jlong jarg1) { jboolean jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; int_bool result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (int_bool)__db_get_byteswapped(arg1); - - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + result = (int_bool)Db_get_byteswapped(arg1); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } - jresult = (result) ? JNI_TRUE : JNI_FALSE; - + jresult = (result) ? JNI_TRUE : JNI_FALSE; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; jlong result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = __db_get_cachesize(arg1); + result = Db_get_cachesize(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1cachesize_1ncache(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1cachesize_1ncache(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_cachesize_ncache(arg1); + result = (u_int32_t)Db_get_cachesize_ncache(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1filename(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1filename(JNIEnv *jenv, jclass jcls, jlong jarg1) { jstring jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; char *result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (char *)__db_get_filename(arg1); + result = (char *)Db_get_filename(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } { if(result) jresult = (*jenv)->NewStringUTF(jenv, result); @@ -2355,26 +2915,26 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1filename(JN } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1dbname(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1dbname(JNIEnv *jenv, jclass jcls, jlong jarg1) { jstring jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; char *result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (char *)__db_get_dbname(arg1); + result = (char *)Db_get_dbname(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } { if(result) jresult = (*jenv)->NewStringUTF(jenv, result); @@ -2383,338 +2943,366 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1dbname(JNIE } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1encrypt_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1encrypt_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_encrypt_flags(arg1); + result = (u_int32_t)Db_get_encrypt_flags(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1errpfx(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jstring jresult = 0 ; + struct Db *arg1 = (struct Db *) 0 ; + char *result; + + (void)jenv; + (void)jcls; + arg1 = *(struct Db **)&jarg1; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } + + errno = 0; + result = (char *)Db_get_errpfx(arg1); + + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } + + { + if(result) jresult = (*jenv)->NewStringUTF(jenv, result); + } + return jresult; +} + + +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_flags(arg1); + result = (u_int32_t)Db_get_flags(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1lorder(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1lorder(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (int)__db_get_lorder(arg1); + result = (int)Db_get_lorder(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1mpf(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1mpf(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DB_MPOOLFILE *result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (DB_MPOOLFILE *)__db_get_mpf(arg1); + result = (DB_MPOOLFILE *)Db_get_mpf(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } *(DB_MPOOLFILE **)&jresult = result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1open_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1open_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_open_flags(arg1); + result = (u_int32_t)Db_get_open_flags(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1pagesize(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1pagesize(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_pagesize(arg1); + result = (u_int32_t)Db_get_pagesize(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1bt_1minkey(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1bt_1minkey(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_bt_minkey(arg1); + result = (u_int32_t)Db_get_bt_minkey(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1h_1ffactor(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1h_1ffactor(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_h_ffactor(arg1); + result = (u_int32_t)Db_get_h_ffactor(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1h_1nelem(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1h_1nelem(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_h_nelem(arg1); + result = (u_int32_t)Db_get_h_nelem(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1delim(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1delim(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (int)__db_get_re_delim(arg1); + result = (int)Db_get_re_delim(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1len(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1len(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_re_len(arg1); + result = (u_int32_t)Db_get_re_len(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1pad(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1pad(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (int)__db_get_re_pad(arg1); + result = (int)Db_get_re_pad(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1source(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1re_1source(JNIEnv *jenv, jclass jcls, jlong jarg1) { jstring jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; char *result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (char *)__db_get_re_source(arg1); + result = (char *)Db_get_re_source(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } { if(result) jresult = (*jenv)->NewStringUTF(jenv, result); @@ -2723,122 +3311,68 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1re_1source( } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1q_1extentsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1q_1extentsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_get_q_extentsize(arg1); - - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); - - jresult = (jint)result; - return jresult; -} - - -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1flags_1raw(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; - u_int32_t result; + result = (u_int32_t)Db_get_q_extentsize(arg1); - (void)jenv; - (void)jcls; - arg1 = *(struct __db **)&jarg1; - - if (jarg1 == 0) { - __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); - return 0; + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); } - - errno = 0; - result = (u_int32_t)__db_get_flags_raw(arg1); - - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); - jresult = (jint)result; return jresult; } -JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1transactional(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jboolean jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; - int_bool result; - - (void)jenv; - (void)jcls; - arg1 = *(struct __db **)&jarg1; - - if (jarg1 == 0) { - __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); - return 0; - } - - - errno = 0; - result = (int_bool)__db_get_transactional(arg1); - - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); - - - jresult = (result) ? JNI_TRUE : JNI_FALSE; - - return jresult; -} - - -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1type(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1get_1type(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DBTYPE result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (DBTYPE)__db_get_type(arg1); + result = (DBTYPE)Db_get_type(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1join(JNIEnv *jenv, jclass jcls, jlong jarg1, jobjectArray jarg2, jint jarg3) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1join(JNIEnv *jenv, jclass jcls, jlong jarg1, jobjectArray jarg2, jint jarg3) { jlong jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DBC **arg2 = (DBC **) 0 ; u_int32_t arg3 ; DBC *result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { int i, count, err; @@ -2855,7 +3389,7 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1join(JNIEnv *jenv, if (jobj == NULL) { arg2[i] = NULL; break; - }else { + } else { jlong jptr = (*jenv)->GetLongField(jenv, jobj, dbc_cptr_fid); arg2[i] = *(DBC **)&jptr; @@ -2870,12 +3404,12 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1join(JNIEnv *jenv, return 0; } - errno = 0; - result = (DBC *)__db_join(arg1,arg2,arg3); + result = (DBC *)Db_join(arg1,arg2,arg3); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } *(DBC **)&jresult = result; @@ -2885,8 +3419,8 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1join(JNIEnv *jenv, } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1key_1range(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jint jarg5) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1key_1range(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jint jarg5) { + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; DBT *arg3 = (DBT *) 0 ; DB_KEY_RANGE *arg4 = (DB_KEY_RANGE *) 0 ; @@ -2897,13 +3431,12 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1key_1range(JNIEnv * (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return ; - arg3 = &ldbt3.dbt; - + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return ; + } { arg4 = &range4; } @@ -2914,25 +3447,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1key_1range(JNIEnv * return ; } + result = (db_ret_t)Db_key_range(arg1,arg2,arg3,arg4,arg5); - result = (db_ret_t)__db_key_range(arg1,arg2,arg3,arg4,arg5); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } { (*jenv)->SetDoubleField(jenv, jarg4, kr_less_fid, arg4->less); (*jenv)->SetDoubleField(jenv, jarg4, kr_equal_fid, arg4->equal); (*jenv)->SetDoubleField(jenv, jarg4, kr_greater_fid, arg4->greater); } - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1open0(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jstring jarg3, jstring jarg4, jint jarg5, jint jarg6, jint jarg7) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1open(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jstring jarg3, jstring jarg4, jint jarg5, jint jarg6, jint jarg7) { + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; char *arg3 ; char *arg4 ; @@ -2943,7 +3474,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1open0(JNIEnv *jenv, (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; { arg3 = 0; @@ -2968,11 +3499,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1open0(JNIEnv *jenv, return ; } + result = (db_ret_t)Db_open(arg1,arg2,(char const *)arg3,(char const *)arg4,arg5,arg6,arg7); - result = (db_ret_t)__db_open(arg1,arg2,(char const *)arg3,(char const *)arg4,arg5,arg6,arg7); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } { if (arg3) (*jenv)->ReleaseStringUTFChars(jenv, jarg3, arg3); @@ -2983,9 +3514,9 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1open0(JNIEnv *jenv, } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jobject jarg5, jint jarg6) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1pget(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jobject jarg5, jint jarg6) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; DBT *arg3 = (DBT *) 0 ; DBT *arg4 = (DBT *) 0 ; @@ -2998,23 +3529,20 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_11(JNIE (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; - - - if (__dbj_dbt_copyin(jenv, &ldbt4, jarg4) != 0) - return 0; - arg4 = &ldbt4.dbt; - + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } - if (__dbj_dbt_copyin(jenv, &ldbt5, jarg5) != 0) - return 0; - arg5 = &ldbt5.dbt; + if (__dbj_dbt_copyin(jenv, &ldbt4, &arg4, jarg4, 0) != 0) { + return 0; + } + if (__dbj_dbt_copyin(jenv, &ldbt5, &arg5, jarg5, 0) != 0) { + return 0; + } arg6 = (u_int32_t)jarg6; if (jarg1 == 0) { @@ -3022,29 +3550,23 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1get_1_1SWIG_11(JNIE return 0; } + result = (int)Db_pget(arg1,arg2,arg3,arg4,arg5,arg6); - result = (int)__db_pget__SWIG_1(arg1,arg2,arg3,arg4,arg5,arg6); - - if (!DB_RETOK_DBGET(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_DBGET(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - - - __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); - - - __dbj_dbt_release(jenv, jarg5, arg5, &ldbt5); - + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); + __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); + __dbj_dbt_release(jenv, jarg5, arg5, &ldbt5); return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jint jarg5) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jobject jarg4, jint jarg5) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; DBT *arg3 = (DBT *) 0 ; DBT *arg4 = (DBT *) 0 ; @@ -3055,18 +3577,16 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1put(JNIEnv *jenv, j (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; - - - if (__dbj_dbt_copyin(jenv, &ldbt4, jarg4) != 0) - return 0; - arg4 = &ldbt4.dbt; + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } + if (__dbj_dbt_copyin(jenv, &ldbt4, &arg4, jarg4, 0) != 0) { + return 0; + } arg5 = (u_int32_t)jarg5; if (jarg1 == 0) { @@ -3074,25 +3594,21 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1put(JNIEnv *jenv, j return 0; } + result = (int)Db_put(arg1,arg2,arg3,arg4,arg5); - result = (int)__db_put(arg1,arg2,arg3,arg4,arg5); - - if (!DB_RETOK_DBPUT(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_DBPUT(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - - - __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); - + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); + __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1remove0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jstring jarg3, jint jarg4) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1remove0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jstring jarg3, jint jarg4) { + struct Db *arg1 = (struct Db *) 0 ; char *arg2 ; char *arg3 ; u_int32_t arg4 ; @@ -3100,7 +3616,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1remove0(JNIEnv *jen (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { arg2 = 0; if (jarg2) { @@ -3122,11 +3638,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1remove0(JNIEnv *jen return ; } + result = (db_ret_t)Db_remove(arg1,(char const *)arg2,(char const *)arg3,arg4); - result = (db_ret_t)__db_remove(arg1,(char const *)arg2,(char const *)arg3,arg4); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -3137,8 +3653,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1remove0(JNIEnv *jen } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1rename0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jstring jarg3, jstring jarg4, jint jarg5) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1rename0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jstring jarg3, jstring jarg4, jint jarg5) { + struct Db *arg1 = (struct Db *) 0 ; char *arg2 ; char *arg3 ; char *arg4 ; @@ -3147,7 +3663,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1rename0(JNIEnv *jen (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { arg2 = 0; if (jarg2) { @@ -3176,11 +3692,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1rename0(JNIEnv *jen return ; } + result = (db_ret_t)Db_rename(arg1,(char const *)arg2,(char const *)arg3,(char const *)arg4,arg5); - result = (db_ret_t)__db_rename(arg1,(char const *)arg2,(char const *)arg3,(char const *)arg4,arg5); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -3194,14 +3710,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1rename0(JNIEnv *jen } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1append_1recno(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1append_1recno(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct Db *arg1 = (struct Db *) 0 ; int (*arg2)(DB *,DBT *,db_recno_t) = (int (*)(DB *,DBT *,db_recno_t)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_append_recno; @@ -3211,23 +3727,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1append_1recno( return ; } + result = (db_ret_t)Db_set_append_recno(arg1,arg2); - result = (db_ret_t)__db_set_append_recno(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1compare(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1compare(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct Db *arg1 = (struct Db *) 0 ; int (*arg2)(DB *,DBT const *,DBT const *) = (int (*)(DB *,DBT const *,DBT const *)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_bt_compare; @@ -3237,23 +3753,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1compare(JN return ; } + result = (db_ret_t)Db_set_bt_compare(arg1,arg2); - result = (db_ret_t)__db_set_bt_compare(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1maxkey(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1maxkey(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3261,23 +3777,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1maxkey(JNI return ; } + result = (db_ret_t)Db_set_bt_maxkey(arg1,arg2); - result = (db_ret_t)__db_set_bt_maxkey(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1minkey(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1minkey(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3285,23 +3801,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1minkey(JNI return ; } + result = (db_ret_t)Db_set_bt_minkey(arg1,arg2); - result = (db_ret_t)__db_set_bt_minkey(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1prefix(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1bt_1prefix(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct Db *arg1 = (struct Db *) 0 ; size_t (*arg2)(DB *,DBT const *,DBT const *) = (size_t (*)(DB *,DBT const *,DBT const *)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_bt_prefix; @@ -3311,24 +3827,24 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1bt_1prefix(JNI return ; } + result = (db_ret_t)Db_set_bt_prefix(arg1,arg2); - result = (db_ret_t)__db_set_bt_prefix(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { + struct Db *arg1 = (struct Db *) 0 ; jlong arg2 ; int arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = jarg2; arg3 = (int)jarg3; @@ -3337,23 +3853,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1cachesize(JNIE return ; } + result = (db_ret_t)Db_set_cachesize(arg1,arg2,arg3); - result = (db_ret_t)__db_set_cachesize(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1dup_1compare(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1dup_1compare(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct Db *arg1 = (struct Db *) 0 ; int (*arg2)(DB *,DBT const *,DBT const *) = (int (*)(DB *,DBT const *,DBT const *)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_dup_compare; @@ -3363,24 +3879,24 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1dup_1compare(J return ; } + result = (db_ret_t)Db_set_dup_compare(arg1,arg2); - result = (db_ret_t)__db_set_dup_compare(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1encrypt(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1encrypt(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { + struct Db *arg1 = (struct Db *) 0 ; char *arg2 ; u_int32_t arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { arg2 = 0; if (jarg2) { @@ -3395,11 +3911,38 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1encrypt(JNIEnv return ; } + result = (db_ret_t)Db_set_encrypt(arg1,(char const *)arg2,arg3); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } + + { + if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); + } +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1errpfx(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct Db *arg1 = (struct Db *) 0 ; + char *arg2 ; - result = (db_ret_t)__db_set_encrypt(arg1,(char const *)arg2,arg3); + (void)jenv; + (void)jcls; + arg1 = *(struct Db **)&jarg1; + { + arg2 = 0; + if (jarg2) { + arg2 = (char *)(*jenv)->GetStringUTFChars(jenv, jarg2, 0); + if (!arg2) return ; + } + } - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + Db_set_errpfx(arg1,(char const *)arg2); { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -3407,14 +3950,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1encrypt(JNIEnv } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1setFeedbackHandler(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1feedback(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct Db *arg1 = (struct Db *) 0 ; void (*arg2)(DB *,int,int) = (void (*)(DB *,int,int)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_db_feedback; @@ -3424,23 +3967,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1setFeedbackHandler( return ; } + result = (db_ret_t)Db_set_feedback(arg1,arg2); - result = (db_ret_t)__db_set_feedback(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3448,23 +3991,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1flags(JNIEnv * return ; } + result = (db_ret_t)Db_set_flags(arg1,arg2); - result = (db_ret_t)__db_set_flags(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1ffactor(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1h_1ffactor(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3472,23 +4015,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1ffactor(JNI return ; } + result = (db_ret_t)Db_set_h_ffactor(arg1,arg2); - result = (db_ret_t)__db_set_h_ffactor(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1hash(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1h_1hash(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t (*arg2)(DB *,void const *,u_int32_t) = (u_int32_t (*)(DB *,void const *,u_int32_t)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_h_hash; @@ -3498,23 +4041,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1hash(JNIEnv return ; } + result = (db_ret_t)Db_set_h_hash(arg1,arg2); - result = (db_ret_t)__db_set_h_hash(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1nelem(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1h_1nelem(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3522,23 +4065,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1h_1nelem(JNIEn return ; } + result = (db_ret_t)Db_set_h_nelem(arg1,arg2); - result = (db_ret_t)__db_set_h_nelem(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1lorder(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1lorder(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; int arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (int)jarg2; if (jarg1 == 0) { @@ -3546,23 +4089,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1lorder(JNIEnv return ; } + result = (db_ret_t)Db_set_lorder(arg1,arg2); - result = (db_ret_t)__db_set_lorder(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1pagesize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1pagesize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3570,23 +4113,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1pagesize(JNIEn return ; } + result = (db_ret_t)Db_set_pagesize(arg1,arg2); - result = (db_ret_t)__db_set_pagesize(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1delim(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1delim(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; int arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (int)jarg2; if (jarg1 == 0) { @@ -3594,23 +4137,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1delim(JNIE return ; } + result = (db_ret_t)Db_set_re_delim(arg1,arg2); - result = (db_ret_t)__db_set_re_delim(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1len(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1len(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3618,23 +4161,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1len(JNIEnv return ; } + result = (db_ret_t)Db_set_re_len(arg1,arg2); - result = (db_ret_t)__db_set_re_len(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1pad(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1pad(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; int arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (int)jarg2; if (jarg1 == 0) { @@ -3642,23 +4185,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1pad(JNIEnv return ; } + result = (db_ret_t)Db_set_re_pad(arg1,arg2); - result = (db_ret_t)__db_set_re_pad(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1source(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1re_1source(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct Db *arg1 = (struct Db *) 0 ; char *arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { arg2 = 0; if (jarg2) { @@ -3672,11 +4215,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1source(JNI return ; } + result = (db_ret_t)Db_set_re_source(arg1,arg2); - result = (db_ret_t)__db_set_re_source(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -3684,14 +4227,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1re_1source(JNI } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1q_1extentsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1set_1q_1extentsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3699,37 +4242,39 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1set_1q_1extentsize( return ; } + result = (db_ret_t)Db_set_q_extentsize(arg1,arg2); - result = (db_ret_t)__db_set_q_extentsize(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobject JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { jobject jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; - u_int32_t arg2 ; + struct Db *arg1 = (struct Db *) 0 ; + DB_TXN *arg2 = (DB_TXN *) 0 ; + u_int32_t arg3 ; void *result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; - arg2 = (u_int32_t)jarg2; + arg1 = *(struct Db **)&jarg1; + arg2 = *(DB_TXN **)&jarg2; + arg3 = (u_int32_t)jarg3; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (void *)__db_stat(arg1,arg2); + result = (void *)Db_stat(arg1,arg2,arg3); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } if (result == NULL) @@ -3748,28 +4293,28 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1stat(JNIEnv *jen /* Btree and recno share the same stat structure */ case DB_BTREE: case DB_RECNO: - jresult = (*jenv)->NewObject(jenv, btree_stat_class, - btree_stat_construct); + jresult = (*jenv)->NewObject(jenv, bt_stat_class, + bt_stat_construct); if (jresult != NULL) - __dbj_fill_bt_stat(jenv, btree_stat_class, - jresult, (DB_BTREE_STAT *)result); + __dbj_fill_bt_stat(jenv, jresult, + (DB_BTREE_STAT *)result); break; /* Hash stat structure */ case DB_HASH: - jresult = (*jenv)->NewObject(jenv, hash_stat_class, - hash_stat_construct); + jresult = (*jenv)->NewObject(jenv, h_stat_class, + h_stat_construct); if (jresult != NULL) - __dbj_fill_h_stat(jenv, hash_stat_class, - jresult, (DB_HASH_STAT *)result); + __dbj_fill_h_stat(jenv, jresult, + (DB_HASH_STAT *)result); break; case DB_QUEUE: - jresult = (*jenv)->NewObject(jenv, queue_stat_class, - queue_stat_construct); + jresult = (*jenv)->NewObject(jenv, qam_stat_class, + qam_stat_construct); if (jresult != NULL) - __dbj_fill_qam_stat(jenv, queue_stat_class, - jresult, (DB_QUEUE_STAT *)result); + __dbj_fill_qam_stat(jenv, jresult, + (DB_QUEUE_STAT *)result); break; /* That's all the database types we're aware of! */ @@ -3786,14 +4331,14 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1stat(JNIEnv *jen } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1sync(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1sync(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct Db *arg1 = (struct Db *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3801,25 +4346,25 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1sync(JNIEnv *jenv, return ; } + result = (db_ret_t)Db_sync(arg1,arg2); - result = (db_ret_t)__db_sync(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1truncate(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1truncate(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { jint jresult = 0 ; - struct __db *arg1 = (struct __db *) 0 ; + struct Db *arg1 = (struct Db *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; u_int32_t arg3 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; arg2 = *(DB_TXN **)&jarg2; arg3 = (u_int32_t)jarg3; @@ -3828,27 +4373,27 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1truncate(JNIEnv *je return 0; } - errno = 0; - result = (int)__db_truncate(arg1,arg2,arg3); + result = (int)Db_truncate(arg1,arg2,arg3); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DB2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1upgrade(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1upgrade(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { + struct Db *arg1 = (struct Db *) 0 ; char *arg2 ; u_int32_t arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { arg2 = 0; if (jarg2) { @@ -3863,11 +4408,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1upgrade(JNIEnv *jen return ; } + result = (db_ret_t)Db_upgrade(arg1,(char const *)arg2,arg3); - result = (db_ret_t)__db_upgrade(arg1,(char const *)arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, DB2JDBENV); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -3875,30 +4420,31 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1upgrade(JNIEnv *jen } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1verify0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jstring jarg3, jobject jarg4, jint jarg5) { - struct __db *arg1 = (struct __db *) 0 ; +JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Db_1verify0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jstring jarg3, jobject jarg4, jint jarg5) { + jboolean jresult = 0 ; + struct Db *arg1 = (struct Db *) 0 ; char *arg2 ; char *arg3 ; struct __db_out_stream arg4 ; u_int32_t arg5 ; - db_ret_t result; + int_bool result; struct __dbj_verify_data data4 ; (void)jenv; (void)jcls; - arg1 = *(struct __db **)&jarg1; + arg1 = *(struct Db **)&jarg1; { arg2 = 0; if (jarg2) { arg2 = (char *)(*jenv)->GetStringUTFChars(jenv, jarg2, 0); - if (!arg2) return ; + if (!arg2) return 0; } } { arg3 = 0; if (jarg3) { arg3 = (char *)(*jenv)->GetStringUTFChars(jenv, jarg3, 0); - if (!arg3) return ; + if (!arg3) return 0; } } { @@ -3913,55 +4459,58 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Db_1verify0(JNIEnv *jen if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); - return ; + return 0; } + errno = 0; + result = (int_bool)Db_verify(arg1,(char const *)arg2,(char const *)arg3,arg4,arg5); - result = (db_ret_t)__db_verify(arg1,(char const *)arg2,(char const *)arg3,arg4,arg5); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + jresult = (result) ? JNI_TRUE : JNI_FALSE; { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); } { if (arg3) (*jenv)->ReleaseStringUTFChars(jenv, jarg3, arg3); } + return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1) { - struct __dbc *arg1 = (struct __dbc *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1) { + struct Dbc *arg1 = (struct Dbc *) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __dbc **)&jarg1; + arg1 = *(struct Dbc **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)Dbc_close(arg1); - result = (db_ret_t)__dbc_close(arg1); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1count(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1count(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jint jresult = 0 ; - struct __dbc *arg1 = (struct __dbc *) 0 ; + struct Dbc *arg1 = (struct Dbc *) 0 ; u_int32_t arg2 ; db_recno_t result; (void)jenv; (void)jcls; - arg1 = *(struct __dbc **)&jarg1; + arg1 = *(struct Dbc **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3969,27 +4518,27 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1count(JNIEnv *jenv return 0; } - errno = 0; - result = (db_recno_t)__dbc_count(arg1,arg2); + result = (db_recno_t)Dbc_count(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DBC2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DBC2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1del(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1del(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jint jresult = 0 ; - struct __dbc *arg1 = (struct __dbc *) 0 ; + struct Dbc *arg1 = (struct Dbc *) 0 ; u_int32_t arg2 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __dbc **)&jarg1; + arg1 = *(struct Dbc **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -3997,26 +4546,26 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1del(JNIEnv *jenv, return 0; } + result = (int)Dbc_del(arg1,arg2); - result = (int)__dbc_del(arg1,arg2); - - if (!DB_RETOK_DBCDEL(result)) - __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + if (!DB_RETOK_DBCDEL(result)) { + __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1dup(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1dup(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jlong jresult = 0 ; - struct __dbc *arg1 = (struct __dbc *) 0 ; + struct Dbc *arg1 = (struct Dbc *) 0 ; u_int32_t arg2 ; DBC *result; (void)jenv; (void)jcls; - arg1 = *(struct __dbc **)&jarg1; + arg1 = *(struct Dbc **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -4024,21 +4573,21 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1dup(JNIEnv *jenv, return 0; } - errno = 0; - result = (DBC *)__dbc_dup(arg1,arg2); + result = (DBC *)Dbc_dup(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, DBC2JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, DBC2JDBENV); + } *(DBC **)&jresult = result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jint jarg4) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jint jarg4) { jint jresult = 0 ; - struct __dbc *arg1 = (struct __dbc *) 0 ; + struct Dbc *arg1 = (struct Dbc *) 0 ; DBT *arg2 = (DBT *) 0 ; DBT *arg3 = (DBT *) 0 ; u_int32_t arg4 ; @@ -4048,17 +4597,15 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_10(JNI (void)jenv; (void)jcls; - arg1 = *(struct __dbc **)&jarg1; - - if (__dbj_dbt_copyin(jenv, &ldbt2, jarg2) != 0) - return 0; - arg2 = &ldbt2.dbt; + arg1 = *(struct Dbc **)&jarg1; + if (__dbj_dbt_copyin(jenv, &ldbt2, &arg2, jarg2, 0) != 0) { + return 0; + } - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; - + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } arg4 = (u_int32_t)jarg4; if (jarg1 == 0) { @@ -4066,26 +4613,22 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_10(JNI return 0; } + result = (int)Dbc_get(arg1,arg2,arg3,arg4); - result = (int)__dbc_get__SWIG_0(arg1,arg2,arg3,arg4); - - if (!DB_RETOK_DBCGET(result)) - __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + if (!DB_RETOK_DBCGET(result)) { + __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + } jresult = (jint)result; - - __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); - - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - + __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jobject jarg4, jint jarg5) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1pget(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jobject jarg4, jint jarg5) { jint jresult = 0 ; - struct __dbc *arg1 = (struct __dbc *) 0 ; + struct Dbc *arg1 = (struct Dbc *) 0 ; DBT *arg2 = (DBT *) 0 ; DBT *arg3 = (DBT *) 0 ; DBT *arg4 = (DBT *) 0 ; @@ -4097,22 +4640,19 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_11(JNI (void)jenv; (void)jcls; - arg1 = *(struct __dbc **)&jarg1; - - if (__dbj_dbt_copyin(jenv, &ldbt2, jarg2) != 0) - return 0; - arg2 = &ldbt2.dbt; - - - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; + arg1 = *(struct Dbc **)&jarg1; + if (__dbj_dbt_copyin(jenv, &ldbt2, &arg2, jarg2, 0) != 0) { + return 0; + } - if (__dbj_dbt_copyin(jenv, &ldbt4, jarg4) != 0) - return 0; - arg4 = &ldbt4.dbt; + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } + if (__dbj_dbt_copyin(jenv, &ldbt4, &arg4, jarg4, 0) != 0) { + return 0; + } arg5 = (u_int32_t)jarg5; if (jarg1 == 0) { @@ -4120,29 +4660,23 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1get_1_1SWIG_11(JNI return 0; } + result = (int)Dbc_pget(arg1,arg2,arg3,arg4,arg5); - result = (int)__dbc_pget__SWIG_1(arg1,arg2,arg3,arg4,arg5); - - if (!DB_RETOK_DBCGET(result)) - __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + if (!DB_RETOK_DBCGET(result)) { + __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + } jresult = (jint)result; - - __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); - - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - - - __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); - + __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); + __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jint jarg4) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_Dbc_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jint jarg4) { jint jresult = 0 ; - struct __dbc *arg1 = (struct __dbc *) 0 ; + struct Dbc *arg1 = (struct Dbc *) 0 ; DBT *arg2 = (DBT *) 0 ; DBT *arg3 = (DBT *) 0 ; u_int32_t arg4 ; @@ -4152,17 +4686,15 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1put(JNIEnv *jenv, (void)jenv; (void)jcls; - arg1 = *(struct __dbc **)&jarg1; + arg1 = *(struct Dbc **)&jarg1; - if (__dbj_dbt_copyin(jenv, &ldbt2, jarg2) != 0) - return 0; - arg2 = &ldbt2.dbt; - - - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; + if (__dbj_dbt_copyin(jenv, &ldbt2, &arg2, jarg2, 0) != 0) { + return 0; + } + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } arg4 = (u_int32_t)jarg4; if (jarg1 == 0) { @@ -4170,51 +4702,48 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_Dbc_1put(JNIEnv *jenv, return 0; } + result = (int)Dbc_put(arg1,arg2,arg3,arg4); - result = (int)__dbc_put(arg1,arg2,arg3,arg4); - - if (!DB_RETOK_DBCPUT(result)) - __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + if (!DB_RETOK_DBCPUT(result)) { + __dbj_throw(jenv, result, NULL, NULL, DBC2JDBENV); + } jresult = (jint)result; - - __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); - - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - + __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_new_1DbEnv(JNIEnv *jenv, jclass jcls, jint jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_new_1DbEnv(JNIEnv *jenv, jclass jcls, jint jarg1) { jlong jresult = 0 ; u_int32_t arg1 ; - struct __db_env *result; + struct DbEnv *result; (void)jenv; (void)jcls; arg1 = (u_int32_t)jarg1; errno = 0; - result = (struct __db_env *)new___db_env(arg1); + result = (struct DbEnv *)new_DbEnv(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, NULL); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } - *(struct __db_env **)&jresult = result; + *(struct DbEnv **)&jresult = result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -4222,17 +4751,17 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1close0(JNIEnv *j return ; } + result = (db_ret_t)DbEnv_close(arg1,arg2); - result = (db_ret_t)__db_env_close(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbremove(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jstring jarg3, jstring jarg4, jint jarg5) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1dbremove(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jstring jarg3, jstring jarg4, jint jarg5) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; char *arg3 ; char *arg4 ; @@ -4241,7 +4770,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbremove(JNIEnv (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = *(DB_TXN **)&jarg2; { arg3 = 0; @@ -4264,11 +4793,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbremove(JNIEnv return ; } + result = (db_ret_t)DbEnv_dbremove(arg1,arg2,(char const *)arg3,(char const *)arg4,arg5); - result = (db_ret_t)__db_env_dbremove(arg1,arg2,(char const *)arg3,(char const *)arg4,arg5); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg3) (*jenv)->ReleaseStringUTFChars(jenv, jarg3, arg3); @@ -4279,8 +4808,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbremove(JNIEnv } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbrename(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jstring jarg3, jstring jarg4, jstring jarg5, jint jarg6) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1dbrename(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jstring jarg3, jstring jarg4, jstring jarg5, jint jarg6) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; char *arg3 ; char *arg4 ; @@ -4290,7 +4819,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbrename(JNIEnv (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = *(DB_TXN **)&jarg2; { arg3 = 0; @@ -4320,11 +4849,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbrename(JNIEnv return ; } + result = (db_ret_t)DbEnv_dbrename(arg1,arg2,(char const *)arg3,(char const *)arg4,(char const *)arg5,arg6); - result = (db_ret_t)__db_env_dbrename(arg1,arg2,(char const *)arg3,(char const *)arg4,(char const *)arg5,arg6); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg3) (*jenv)->ReleaseStringUTFChars(jenv, jarg3, arg3); @@ -4338,14 +4867,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1dbrename(JNIEnv } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1err(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jstring jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1err(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jstring jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; int arg2 ; char *arg3 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (int)jarg2; { arg3 = 0; @@ -4359,8 +4888,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1err(JNIEnv *jenv __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } - - __db_env_err(arg1,arg2,(char const *)arg3); + DbEnv_err(arg1,arg2,(char const *)arg3); { if (arg3) (*jenv)->ReleaseStringUTFChars(jenv, jarg3, arg3); @@ -4368,13 +4896,13 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1err(JNIEnv *jenv } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1errx(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1errx(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *arg2 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { arg2 = 0; if (jarg2) { @@ -4387,8 +4915,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1errx(JNIEnv *jen __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } - - __db_env_errx(arg1,(char const *)arg2); + DbEnv_errx(arg1,(char const *)arg2); { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -4396,30 +4923,30 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1errx(JNIEnv *jen } -JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1data_1dirs(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1data_1dirs(JNIEnv *jenv, jclass jcls, jlong jarg1) { jobjectArray jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char **result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (char **)__db_env_get_data_dirs(arg1); + result = (char **)DbEnv_get_data_dirs(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { if(result != NULL) { - /*@../libdb_java/java_typemaps.i,229,STRING_ARRAY_OUT@*/ int i, len; + /*@../libdb_java/java_typemaps.i,291,STRING_ARRAY_OUT@*/ int i, len; len = 0; while (result[len] != NULL) @@ -4438,78 +4965,106 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1dat } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1encrypt_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1encrypt_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_encrypt_flags(arg1); + result = (u_int32_t)DbEnv_get_encrypt_flags(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; - u_int32_t result; - +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1errpfx(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jstring jresult = 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; + char *result; + (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } + errno = 0; + result = (char *)DbEnv_get_errpfx(arg1); + + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } + + { + if(result) jresult = (*jenv)->NewStringUTF(jenv, result); + } + return jresult; +} + + +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jint jresult = 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; + u_int32_t result; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbEnv **)&jarg1; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } errno = 0; - result = (u_int32_t)__db_env_get_flags(arg1); + result = (u_int32_t)DbEnv_get_flags(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1home(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1home(JNIEnv *jenv, jclass jcls, jlong jarg1) { jstring jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (char *)__db_env_get_home(arg1); + result = (char *)DbEnv_get_home(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { if(result) jresult = (*jenv)->NewStringUTF(jenv, result); @@ -4518,104 +5073,104 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1home(JNI } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1open_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1open_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_open_flags(arg1); + result = (u_int32_t)DbEnv_get_open_flags(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1shm_1key(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1shm_1key(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; long result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (long)__db_env_get_shm_key(arg1); + result = (long)DbEnv_get_shm_key(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jlong)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tas_1spins(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tas_1spins(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_tas_spins(arg1); + result = (u_int32_t)DbEnv_get_tas_spins(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tmp_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tmp_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1) { jstring jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (char *)__db_env_get_tmp_dir(arg1); + result = (char *)DbEnv_get_tmp_dir(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { if(result) jresult = (*jenv)->NewStringUTF(jenv, result); @@ -4624,15 +5179,15 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tmp_1dir } -JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1verbose(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1verbose(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jboolean jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; int_bool result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -4640,22 +5195,20 @@ JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1verbose return 0; } - errno = 0; - result = (int_bool)__db_env_get_verbose(arg1,arg2); + result = (int_bool)DbEnv_get_verbose(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); - - - jresult = (result) ? JNI_TRUE : JNI_FALSE; + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } + jresult = (result) ? JNI_TRUE : JNI_FALSE; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1open0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3, jint jarg4) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1open(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3, jint jarg4) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *arg2 ; u_int32_t arg3 ; int arg4 ; @@ -4663,7 +5216,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1open0(JNIEnv *je (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { arg2 = 0; if (jarg2) { @@ -4679,11 +5232,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1open0(JNIEnv *je return ; } + result = (db_ret_t)DbEnv_open(arg1,(char const *)arg2,arg3,arg4); - result = (db_ret_t)__db_env_open(arg1,(char const *)arg2,arg3,arg4); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -4691,15 +5244,15 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1open0(JNIEnv *je } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1remove0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1remove0(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *arg2 ; u_int32_t arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { arg2 = 0; if (jarg2) { @@ -4714,11 +5267,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1remove0(JNIEnv * return ; } + result = (db_ret_t)DbEnv_remove(arg1,(char const *)arg2,arg3); - result = (db_ret_t)__db_env_remove(arg1,(char const *)arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -4726,15 +5279,15 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1remove0(JNIEnv * } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; jlong arg2 ; int arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = jarg2; arg3 = (int)jarg3; @@ -4743,23 +5296,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1cachesize(J return ; } + result = (db_ret_t)DbEnv_set_cachesize(arg1,arg2,arg3); - result = (db_ret_t)__db_env_set_cachesize(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1data_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1data_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { arg2 = 0; if (jarg2) { @@ -4773,11 +5326,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1data_1dir(J return ; } + result = (db_ret_t)DbEnv_set_data_dir(arg1,(char const *)arg2); - result = (db_ret_t)__db_env_set_data_dir(arg1,(char const *)arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -4785,15 +5338,15 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1data_1dir(J } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1encrypt(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1encrypt(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2, jint jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *arg2 ; u_int32_t arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { arg2 = 0; if (jarg2) { @@ -4808,11 +5361,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1encrypt(JNI return ; } + result = (db_ret_t)DbEnv_set_encrypt(arg1,(char const *)arg2,arg3); - result = (db_ret_t)__db_env_set_encrypt(arg1,(char const *)arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -4820,13 +5373,13 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1encrypt(JNI } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setErrorHandler(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; - void (*arg2)(char const *,char *) = (void (*)(char const *,char *)) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1errcall(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; + void (*arg2)(DB_ENV const *,char const *,char const *) = (void (*)(DB_ENV const *,char const *,char const *)) 0 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_error; @@ -4835,48 +5388,72 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setErrorHandler( __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + DbEnv_set_errcall(arg1,arg2); + +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1errpfx(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; + char *arg2 ; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbEnv **)&jarg1; + { + arg2 = 0; + if (jarg2) { + arg2 = (char *)(*jenv)->GetStringUTFChars(jenv, jarg2, 0); + if (!arg2) return ; + } + } - __db_env_set_errcall(arg1,arg2); + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + DbEnv_set_errpfx(arg1,(char const *)arg2); + { + if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jboolean jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jboolean jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; int_bool arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; - - arg3 = (jarg3 == JNI_TRUE); - + arg3 = (jarg3 == JNI_TRUE); if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbEnv_set_flags(arg1,arg2,arg3); - result = (db_ret_t)__db_env_set_flags(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setFeedbackHandler(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1feedback(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; void (*arg2)(DB_ENV *,int,int) = (void (*)(DB_ENV *,int,int)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_env_feedback; @@ -4886,23 +5463,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setFeedbackHandl return ; } + result = (db_ret_t)DbEnv_set_feedback(arg1,arg2); - result = (db_ret_t)__db_env_set_feedback(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1mp_1mmapsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1mp_1mmapsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; size_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (size_t)jarg2; if (jarg1 == 0) { @@ -4910,23 +5487,43 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1mp_1mmapsiz return ; } + result = (db_ret_t)DbEnv_set_mp_mmapsize(arg1,arg2); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } + +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1msgcall(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; + void (*arg2)(DB_ENV const *,char const *) = (void (*)(DB_ENV const *,char const *)) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbEnv **)&jarg1; - result = (db_ret_t)__db_env_set_mp_mmapsize(arg1,arg2); + arg2 = (jarg2 == NULL) ? NULL : __dbj_message; - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + DbEnv_set_msgcall(arg1,arg2); } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setPanicHandler(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1paniccall(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; void (*arg2)(DB_ENV *,int) = (void (*)(DB_ENV *,int)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_panic; @@ -4936,17 +5533,17 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1setPanicHandler( return ; } + result = (db_ret_t)DbEnv_set_paniccall(arg1,arg2); - result = (db_ret_t)__db_env_set_paniccall(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rpc_1server(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jstring jarg3, jlong jarg4, jlong jarg5, jint jarg6) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1rpc_1server(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jstring jarg3, jlong jarg4, jlong jarg5, jint jarg6) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; void *arg2 = (void *) 0 ; char *arg3 ; long arg4 ; @@ -4956,7 +5553,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rpc_1server (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = *(void **)&jarg2; { arg3 = 0; @@ -4974,7 +5571,6 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rpc_1server return ; } - if (arg2 != NULL) { __dbj_throw(jenv, EINVAL, "DbEnv.set_rpc_server client arg " "must be null; reserved for future use", NULL, JDBENV); @@ -4982,10 +5578,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rpc_1server } - result = (db_ret_t)__db_env_set_rpc_server(arg1,arg2,arg3,arg4,arg5,arg6); + result = (db_ret_t)DbEnv_set_rpc_server(arg1,arg2,arg3,arg4,arg5,arg6); - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg3) (*jenv)->ReleaseStringUTFChars(jenv, jarg3, arg3); @@ -4993,14 +5590,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rpc_1server } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1shm_1key(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1shm_1key(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; long arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (long)jarg2; if (jarg1 == 0) { @@ -5008,23 +5605,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1shm_1key(JN return ; } + result = (db_ret_t)DbEnv_set_shm_key(arg1,arg2); - result = (db_ret_t)__db_env_set_shm_key(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tas_1spins(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tas_1spins(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5032,24 +5629,24 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tas_1spins( return ; } + result = (db_ret_t)DbEnv_set_tas_spins(arg1,arg2); - result = (db_ret_t)__db_env_set_tas_spins(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1timeout(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1timeout(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; db_timeout_t arg2 ; u_int32_t arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (db_timeout_t)jarg2; arg3 = (u_int32_t)jarg3; @@ -5058,23 +5655,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1timeout(JNI return ; } + result = (db_ret_t)DbEnv_set_timeout(arg1,arg2,arg3); - result = (db_ret_t)__db_env_set_timeout(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tmp_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tmp_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { arg2 = 0; if (jarg2) { @@ -5088,11 +5685,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tmp_1dir(JN return ; } + result = (db_ret_t)DbEnv_set_tmp_dir(arg1,(char const *)arg2); - result = (db_ret_t)__db_env_set_tmp_dir(arg1,(char const *)arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -5100,14 +5697,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tmp_1dir(JN } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tx_1max(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tx_1max(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5115,23 +5712,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tx_1max(JNI return ; } + result = (db_ret_t)DbEnv_set_tx_max(arg1,arg2); - result = (db_ret_t)__db_env_set_tx_max(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1app_1dispatch(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1app_1dispatch(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; int (*arg2)(DB_ENV *,DBT *,DB_LSN *,db_recops) = (int (*)(DB_ENV *,DBT *,DB_LSN *,db_recops)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (jarg2 == NULL) ? NULL : __dbj_app_dispatch; @@ -5141,24 +5738,24 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1app_1dispat return ; } + result = (db_ret_t)DbEnv_set_app_dispatch(arg1,arg2); - result = (db_ret_t)__db_env_set_app_dispatch(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tx_1timestamp0(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1tx_1timestamp0(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; time_t *arg2 = (time_t *) 0 ; db_ret_t result; time_t time2 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; time2 = (time_t)jarg2; arg2 = &time2; @@ -5169,63 +5766,61 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1tx_1timesta return ; } + result = (db_ret_t)DbEnv_set_tx_timestamp(arg1,arg2); - result = (db_ret_t)__db_env_set_tx_timestamp(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1verbose(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jboolean jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1verbose(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jboolean jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; int_bool arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; - - arg3 = (jarg3 == JNI_TRUE); - + arg3 = (jarg3 == JNI_TRUE); if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbEnv_set_verbose(arg1,arg2,arg3); - result = (db_ret_t)__db_env_set_verbose(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1conflicts(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1conflicts(JNIEnv *jenv, jclass jcls, jlong jarg1) { jobjectArray jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; struct __db_lk_conflicts result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = __db_env_get_lk_conflicts(arg1); + result = DbEnv_get_lk_conflicts(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { int i; @@ -5248,120 +5843,120 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_ } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1detect(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1detect(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_lk_detect(arg1); + result = (u_int32_t)DbEnv_get_lk_detect(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1max_1locks(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1max_1locks(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_lk_max_locks(arg1); + result = (u_int32_t)DbEnv_get_lk_max_locks(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1max_1lockers(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1max_1lockers(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_lk_max_lockers(arg1); + result = (u_int32_t)DbEnv_get_lk_max_lockers(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lk_1max_1objects(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lk_1max_1objects(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_lk_max_objects(arg1); + result = (u_int32_t)DbEnv_get_lk_max_objects(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1detect(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1detect(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; u_int32_t arg3 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; arg3 = (u_int32_t)jarg3; @@ -5370,21 +5965,21 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1detect(JNI return 0; } - errno = 0; - result = (int)__db_env_lock_detect(arg1,arg2,arg3); + result = (int)DbEnv_lock_detect(arg1,arg2,arg3); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3, jobject jarg4, jint jarg5) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3, jobject jarg4, jint jarg5) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; u_int32_t arg3 ; DBT *arg4 = (DBT *) 0 ; @@ -5394,14 +5989,13 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1get(JNIEn (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; arg3 = (u_int32_t)jarg3; - if (__dbj_dbt_copyin(jenv, &ldbt4, jarg4) != 0) - return 0; - arg4 = &ldbt4.dbt; - + if (__dbj_dbt_copyin(jenv, &ldbt4, &arg4, jarg4, 0) != 0) { + return 0; + } arg5 = (db_lockmode_t)jarg5; if (jarg1 == 0) { @@ -5409,63 +6003,53 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1get(JNIEn return 0; } - errno = 0; - result = (DB_LOCK *)__db_env_lock_get(arg1,arg2,arg3,(DBT const *)arg4,arg5); - - if (errno == DB_LOCK_NOTGRANTED) { - (*jenv)->Throw(jenv, - (*jenv)->NewObject(jenv, lockex_class, lockex_construct, - (*jenv)->NewStringUTF(jenv, "DbEnv.lock_get not granted"), - DB_LOCK_GET, arg5, jarg4, NULL, -1, JDBENV)); - }else if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); - - *(DB_LOCK **)&jresult = result; + result = (DB_LOCK *)DbEnv_lock_get(arg1,arg2,arg3,(DBT const *)arg4,arg5); - if (ldbt4.jarr != NULL) { - (*jenv)->ReleaseByteArrayElements(jenv, ldbt4.jarr, - ldbt4.orig_data, 0); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); } + *(DB_LOCK **)&jresult = result; + __dbj_dbt_release(jenv, jarg4, arg4, &ldbt4); return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1id(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1id(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_lock_id(arg1); + result = (u_int32_t)DbEnv_lock_id(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1id_1free(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1id_1free(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5473,23 +6057,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1id_1free(J return ; } + result = (db_ret_t)DbEnv_lock_id_free(arg1,arg2); - result = (db_ret_t)__db_env_lock_id_free(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DB_LOCK *arg2 = (DB_LOCK *) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = *(DB_LOCK **)&jarg2; if (jarg1 == 0) { @@ -5497,24 +6081,24 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1put(JNIEnv return ; } + result = (db_ret_t)DbEnv_lock_put(arg1,arg2); - result = (db_ret_t)__db_env_lock_put(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobject JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jobject jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; DB_LOCK_STAT *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5522,31 +6106,31 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1stat(JN return 0; } - errno = 0; - result = (DB_LOCK_STAT *)__db_env_lock_stat(arg1,arg2); + result = (DB_LOCK_STAT *)DbEnv_lock_stat(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (*jenv)->NewObject(jenv, lock_stat_class, lock_stat_construct); if (jresult != NULL) - __dbj_fill_lock_stat(jenv, lock_stat_class, jresult, result); + __dbj_fill_lock_stat(jenv, jresult, result); __os_ufree(NULL, result); return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1conflicts(JNIEnv *jenv, jclass jcls, jlong jarg1, jobjectArray jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1conflicts(JNIEnv *jenv, jclass jcls, jlong jarg1, jobjectArray jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; struct __db_lk_conflicts arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { int i, len, err; size_t bytesize; @@ -5572,11 +6156,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1conflic return ; } + result = (db_ret_t)DbEnv_set_lk_conflicts(arg1,arg2); - result = (db_ret_t)__db_env_set_lk_conflicts(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } __os_free(NULL, (&arg2)->lk_conflicts); @@ -5584,14 +6168,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1conflic } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1detect(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1detect(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5599,23 +6183,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1detect( return ; } + result = (db_ret_t)DbEnv_set_lk_detect(arg1,arg2); - result = (db_ret_t)__db_env_set_lk_detect(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1lockers(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1max_1lockers(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5623,23 +6207,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1lo return ; } + result = (db_ret_t)DbEnv_set_lk_max_lockers(arg1,arg2); - result = (db_ret_t)__db_env_set_lk_max_lockers(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1locks(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1max_1locks(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5647,23 +6231,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1lo return ; } + result = (db_ret_t)DbEnv_set_lk_max_locks(arg1,arg2); - result = (db_ret_t)__db_env_set_lk_max_locks(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1objects(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lk_1max_1objects(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5671,61 +6255,61 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lk_1max_1ob return ; } + result = (db_ret_t)DbEnv_set_lk_max_objects(arg1,arg2); - result = (db_ret_t)__db_env_set_lk_max_objects(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1bsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1bsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_lg_bsize(arg1); + result = (u_int32_t)DbEnv_get_lg_bsize(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1) { jstring jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (char *)__db_env_get_lg_dir(arg1); + result = (char *)DbEnv_get_lg_dir(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { if(result) jresult = (*jenv)->NewStringUTF(jenv, result); @@ -5734,67 +6318,67 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1dir( } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1max(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1max(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_lg_max(arg1); + result = (u_int32_t)DbEnv_get_lg_max(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1lg_1regionmax(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1lg_1regionmax(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_lg_regionmax(arg1); + result = (u_int32_t)DbEnv_get_lg_regionmax(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1archive(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1archive(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jobjectArray jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; char **result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5802,16 +6386,16 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1arc return 0; } - errno = 0; - result = (char **)__db_env_log_archive(arg1,arg2); + result = (char **)DbEnv_log_archive(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { if(result != NULL) { - /*@../libdb_java/java_typemaps.i,229,STRING_ARRAY_OUT@*/ int i, len; + /*@../libdb_java/java_typemaps.i,291,STRING_ARRAY_OUT@*/ int i, len; len = 0; while (result[len] != NULL) @@ -5831,32 +6415,60 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1arc } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1compare(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1compare(JNIEnv *jenv, jclass jcls, jobject jarg1, jobject jarg2) { jint jresult = 0 ; DB_LSN *arg1 = (DB_LSN *) 0 ; DB_LSN *arg2 = (DB_LSN *) 0 ; int result; + DB_LSN lsn1 ; + DB_LSN lsn2 ; (void)jenv; (void)jcls; - arg1 = *(DB_LSN **)&jarg1; - arg2 = *(DB_LSN **)&jarg2; + + /* XXX: TODO */ + arg1 = &lsn1; + + + /* XXX: TODO */ + arg2 = &lsn2; + + + if (jarg1 == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return 0; + } + + + if (jarg2 == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return 0; + } + result = (int)DbEnv_log_compare((DB_LSN const *)arg1,(DB_LSN const *)arg2); jresult = (jint)result; + + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, jarg1, arg1, &lsn1); */ + + + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, jarg2, arg2, &lsn2); */ + return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1cursor(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1cursor(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; DB_LOGC *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5864,89 +6476,120 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1cursor(JNI return 0; } - errno = 0; - result = (DB_LOGC *)__db_env_log_cursor(arg1,arg2); + result = (DB_LOGC *)DbEnv_log_cursor(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } *(DB_LOGC **)&jresult = result; return jresult; } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1file(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1file(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { jstring jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DB_LSN *arg2 = (DB_LSN *) 0 ; char *result; + DB_LSN lsn2 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; - arg2 = *(DB_LSN **)&jarg2; + arg1 = *(struct DbEnv **)&jarg1; + + /* XXX: TODO */ + arg2 = &lsn2; + if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } + if (jarg2 == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return 0; + } + errno = 0; - result = (char *)__db_env_log_file(arg1,arg2); + result = (char *)DbEnv_log_file(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { if(result) jresult = (*jenv)->NewStringUTF(jenv, result); } + + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, jarg2, arg2, &lsn2); */ + return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1flush(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1flush(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DB_LSN *arg2 = (DB_LSN *) 0 ; db_ret_t result; + DB_LSN lsn2 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; - arg2 = *(DB_LSN **)&jarg2; + arg1 = *(struct DbEnv **)&jarg1; + + /* XXX: TODO */ + arg2 = &lsn2; + if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + if (jarg2 == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return ; + } + - result = (db_ret_t)__db_env_log_flush(arg1,(DB_LSN const *)arg2); + result = (db_ret_t)DbEnv_log_flush(arg1,(DB_LSN const *)arg2); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, jarg2, arg2, &lsn2); */ } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jint jarg4) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1put(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jint jarg4) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DB_LSN *arg2 = (DB_LSN *) 0 ; DBT *arg3 = (DBT *) 0 ; u_int32_t arg4 ; db_ret_t result; + DB_LSN lsn2 ; DBT_LOCKED ldbt3 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; - arg2 = *(DB_LSN **)&jarg2; + arg1 = *(struct DbEnv **)&jarg1; + + /* XXX: TODO */ + arg2 = &lsn2; - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return ; - arg3 = &ldbt3.dbt; + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return ; + } arg4 = (u_int32_t)jarg4; if (jarg1 == 0) { @@ -5954,30 +6597,35 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1put(JNIEnv return ; } + if (jarg2 == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return ; + } - result = (db_ret_t)__db_env_log_put(arg1,arg2,(DBT const *)arg3,arg4); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + result = (db_ret_t)DbEnv_log_put(arg1,arg2,(DBT const *)arg3,arg4); - if (ldbt3.jarr != NULL) { - (*jenv)->ReleaseByteArrayElements(jenv, ldbt3.jarr, - ldbt3.orig_data, 0); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); } + + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, jarg2, arg2, &lsn2); */ + + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobject JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1log_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jobject jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; DB_LOG_STAT *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -5985,31 +6633,31 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1log_1stat(JNI return 0; } - errno = 0; - result = (DB_LOG_STAT *)__db_env_log_stat(arg1,arg2); + result = (DB_LOG_STAT *)DbEnv_log_stat(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (*jenv)->NewObject(jenv, log_stat_class, log_stat_construct); if (jresult != NULL) - __dbj_fill_log_stat(jenv, log_stat_class, jresult, result); + __dbj_fill_log_stat(jenv, jresult, result); __os_ufree(NULL, result); return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1bsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1bsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6017,23 +6665,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1bsize(J return ; } + result = (db_ret_t)DbEnv_set_lg_bsize(arg1,arg2); - result = (db_ret_t)__db_env_set_lg_bsize(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1dir(JNIEnv *jenv, jclass jcls, jlong jarg1, jstring jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; char *arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; { arg2 = 0; if (jarg2) { @@ -6047,11 +6695,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1dir(JNI return ; } + result = (db_ret_t)DbEnv_set_lg_dir(arg1,(char const *)arg2); - result = (db_ret_t)__db_env_set_lg_dir(arg1,(char const *)arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } { if (arg2) (*jenv)->ReleaseStringUTFChars(jenv, jarg2, arg2); @@ -6059,14 +6707,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1dir(JNI } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1max(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1max(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6074,23 +6722,23 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1max(JNI return ; } + result = (db_ret_t)DbEnv_set_lg_max(arg1,arg2); - result = (db_ret_t)__db_env_set_lg_max(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1regionmax(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1lg_1regionmax(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6098,102 +6746,102 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1lg_1regionm return ; } + result = (db_ret_t)DbEnv_set_lg_regionmax(arg1,arg2); - result = (db_ret_t)__db_env_set_lg_regionmax(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; jlong result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = __db_env_get_cachesize(arg1); + result = DbEnv_get_cachesize(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1cachesize_1ncache(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1cachesize_1ncache(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (int)__db_env_get_cachesize_ncache(arg1); + result = (int)DbEnv_get_cachesize_ncache(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1mp_1mmapsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1mp_1mmapsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; size_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = __db_env_get_mp_mmapsize(arg1); + result = DbEnv_get_mp_mmapsize(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jlong)result; return jresult; } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobject JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1memp_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jobject jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; DB_MPOOL_STAT *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6201,32 +6849,32 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1stat(JN return 0; } - errno = 0; - result = (DB_MPOOL_STAT *)__db_env_memp_stat(arg1,arg2); + result = (DB_MPOOL_STAT *)DbEnv_memp_stat(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (*jenv)->NewObject(jenv, mpool_stat_class, mpool_stat_construct); if (jresult != NULL) - __dbj_fill_mpool_stat(jenv, mpool_stat_class, jresult, result); + __dbj_fill_mpool_stat(jenv, jresult, result); __os_ufree(NULL, result); return jresult; } -JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1fstat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1memp_1fstat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jobjectArray jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; DB_MPOOL_FSTAT **result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6234,12 +6882,12 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1fs return 0; } - errno = 0; - result = (DB_MPOOL_FSTAT **)__db_env_memp_fstat(arg1,arg2); + result = (DB_MPOOL_FSTAT **)DbEnv_memp_fstat(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { int i, len; @@ -6260,7 +6908,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1fs return 0; /* an exception is pending */ } (*jenv)->SetObjectArrayElement(jenv, jresult, i, obj); - __dbj_fill_mpool_fstat(jenv, mpool_fstat_class, obj, result[i]); + __dbj_fill_mpool_fstat(jenv, obj, result[i]); } __os_ufree(NULL, result); } @@ -6268,15 +6916,15 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1fs } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1trickle(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1memp_1trickle(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; int arg2 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (int)jarg2; if (jarg1 == 0) { @@ -6284,79 +6932,79 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1memp_1trickle(JN return 0; } - errno = 0; - result = (int)__db_env_memp_trickle(arg1,arg2); + result = (int)DbEnv_memp_trickle(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tx_1max(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tx_1max(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (u_int32_t)__db_env_get_tx_max(arg1); + result = (u_int32_t)DbEnv_get_tx_max(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1tx_1timestamp(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1tx_1timestamp(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; time_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = __db_env_get_tx_timestamp(arg1); + result = DbEnv_get_tx_timestamp(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jlong)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1timeout(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1timeout(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; db_timeout_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6364,28 +7012,28 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1timeout(JN return 0; } - errno = 0; - result = (db_timeout_t)__db_env_get_timeout(arg1,arg2); + result = (db_timeout_t)DbEnv_get_timeout(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jlong)result; return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1begin(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1begin(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DB_TXN *arg2 = (DB_TXN *) 0 ; u_int32_t arg3 ; DB_TXN *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = *(DB_TXN **)&jarg2; arg3 = (u_int32_t)jarg3; @@ -6394,20 +7042,20 @@ JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1begin(JNIE return 0; } - errno = 0; - result = (DB_TXN *)__db_env_txn_begin(arg1,arg2,arg3); + result = (DB_TXN *)DbEnv_txn_begin(arg1,arg2,arg3); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } *(DB_TXN **)&jresult = result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1checkpoint(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3, jint jarg4) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1checkpoint(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3, jint jarg4) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; u_int32_t arg3 ; u_int32_t arg4 ; @@ -6415,7 +7063,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1checkpoint( (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; arg3 = (u_int32_t)jarg3; arg4 = (u_int32_t)jarg4; @@ -6425,25 +7073,25 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1checkpoint( return ; } + result = (db_ret_t)DbEnv_txn_checkpoint(arg1,arg2,arg3,arg4); - result = (db_ret_t)__db_env_txn_checkpoint(arg1,arg2,arg3,arg4); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1recover(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3) { +JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1recover(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3) { jobjectArray jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; int arg2 ; u_int32_t arg3 ; DB_PREPLIST *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (int)jarg2; arg3 = (u_int32_t)jarg3; @@ -6452,12 +7100,12 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1rec return 0; } - errno = 0; - result = (DB_PREPLIST *)__db_env_txn_recover(arg1,arg2,arg3); + result = (DB_PREPLIST *)DbEnv_txn_recover(arg1,arg2,arg3); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { int i, len; @@ -6490,15 +7138,15 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1rec } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobject JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1txn_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jobject jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; DB_TXN_STAT *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6506,19 +7154,19 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1stat(JNI return 0; } - errno = 0; - result = (DB_TXN_STAT *)__db_env_txn_stat(arg1,arg2); + result = (DB_TXN_STAT *)DbEnv_txn_stat(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } { unsigned int i; jobjectArray actives; jresult = (*jenv)->NewObject(jenv, txn_stat_class, txn_stat_construct); if (jresult != NULL) - __dbj_fill_txn_stat(jenv, txn_stat_class, jresult, result); + __dbj_fill_txn_stat(jenv, jresult, result); actives = (*jenv)->NewObjectArray(jenv, (jsize)result->st_nactive, txn_active_class, 0); @@ -6526,7 +7174,7 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1stat(JNI __os_ufree(NULL, result); return 0; } - (*jenv)->SetObjectField(jenv, jresult, txn_stat_active_fid, actives); + (*jenv)->SetObjectField(jenv, jresult, txn_stat_st_txnarray_fid, actives); for (i = 0; i < result->st_nactive; i++) { jobject obj = (*jenv)->NewObject(jenv, txn_active_class, txn_active_construct); @@ -6535,8 +7183,7 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1stat(JNI return 0; /* an exception is pending */ } (*jenv)->SetObjectArrayElement(jenv, actives, (jsize)i, obj); - __dbj_fill_txn_active(jenv, txn_active_class, obj, - &result->st_txnarray[i]); + __dbj_fill_txn_active(jenv, obj, &result->st_txnarray[i]); } __os_ufree(NULL, result); } @@ -6544,67 +7191,71 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1txn_1stat(JNI } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1rep_1limit(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1rep_1limit(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; jlong result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = __db_env_get_rep_limit(arg1); + result = DbEnv_get_rep_limit(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1elect(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3, jint jarg4) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1elect(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jint jarg3, jint jarg4, jint jarg5, jint jarg6) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; int arg2 ; int arg3 ; - u_int32_t arg4 ; + int arg4 ; + u_int32_t arg5 ; + u_int32_t arg6 ; int result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (int)jarg2; arg3 = (int)jarg3; - arg4 = (u_int32_t)jarg4; + arg4 = (int)jarg4; + arg5 = (u_int32_t)jarg5; + arg6 = (u_int32_t)jarg6; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - errno = 0; - result = (int)__db_env_rep_elect(arg1,arg2,arg3,arg4); + result = (int)DbEnv_rep_elect(arg1,arg2,arg3,arg4,arg5,arg6); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (jint)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1process_1message(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jobject jarg4, jlong jarg5) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1process_1message(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jobject jarg4, jobject jarg5) { jint jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DBT *arg2 = (DBT *) 0 ; DBT *arg3 = (DBT *) 0 ; int *arg4 = (int *) 0 ; @@ -6613,53 +7264,61 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1process_1me DBT_LOCKED ldbt2 ; DBT_LOCKED ldbt3 ; int id4 ; + DB_LSN lsn5 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; - - if (__dbj_dbt_copyin(jenv, &ldbt2, jarg2) != 0) - return 0; - arg2 = &ldbt2.dbt; + arg1 = *(struct DbEnv **)&jarg1; + if (__dbj_dbt_copyin(jenv, &ldbt2, &arg2, jarg2, 0) != 0) { + return 0; + } - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; - + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } id4 = (*jenv)->GetIntField(jenv, jarg4, rep_processmsg_envid); arg4 = &id4; - arg5 = *(DB_LSN **)&jarg5; + + /* XXX: TODO */ + arg5 = &lsn5; + if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } + if (jarg5 == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return 0; + } + - result = (int)__db_env_rep_process_message(arg1,arg2,arg3,arg4,arg5); + result = (int)DbEnv_rep_process_message(arg1,arg2,arg3,arg4,arg5); - if (!DB_RETOK_REPPMSG(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_REPPMSG(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } jresult = (jint)result; (*jenv)->SetIntField(jenv, jarg4, rep_processmsg_envid, *arg4); + __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); - __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); - - - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, jarg5, arg5, &lsn5); */ return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1start(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jint jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1start(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jint jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; DBT *arg2 = (DBT *) 0 ; u_int32_t arg3 ; db_ret_t result; @@ -6667,12 +7326,11 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1start(JNIEn (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; - - if (__dbj_dbt_copyin(jenv, &ldbt2, jarg2) != 0) - return ; - arg2 = &ldbt2.dbt; + arg1 = *(struct DbEnv **)&jarg1; + if (__dbj_dbt_copyin(jenv, &ldbt2, &arg2, jarg2, 1) != 0) { + return ; + } arg3 = (u_int32_t)jarg3; if (jarg1 == 0) { @@ -6680,27 +7338,25 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1start(JNIEn return ; } + result = (db_ret_t)DbEnv_rep_start(arg1,arg2,arg3); - result = (db_ret_t)__db_env_rep_start(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); - - - __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } + __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); } -JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { +JNIEXPORT jobject JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1rep_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { jobject jresult = 0 ; - struct __db_env *arg1 = (struct __db_env *) 0 ; + struct DbEnv *arg1 = (struct DbEnv *) 0 ; u_int32_t arg2 ; DB_REP_STAT *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6708,31 +7364,31 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1rep_1stat(JNI return 0; } - errno = 0; - result = (DB_REP_STAT *)__db_env_rep_stat(arg1,arg2); + result = (DB_REP_STAT *)DbEnv_rep_stat(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } jresult = (*jenv)->NewObject(jenv, rep_stat_class, rep_stat_construct); if (jresult != NULL) - __dbj_fill_rep_stat(jenv, rep_stat_class, jresult, result); + __dbj_fill_rep_stat(jenv, jresult, result); __os_ufree(NULL, result); return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rep_1limit(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1rep_1limit(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; jlong arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = jarg2; if (jarg1 == 0) { @@ -6740,24 +7396,24 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rep_1limit( return ; } + result = (db_ret_t)DbEnv_set_rep_limit(arg1,arg2); - result = (db_ret_t)__db_env_set_rep_limit(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rep_1transport(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jobject jarg3) { - struct __db_env *arg1 = (struct __db_env *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1set_1rep_1transport(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jobject jarg3) { + struct DbEnv *arg1 = (struct DbEnv *) 0 ; int arg2 ; int (*arg3)(DB_ENV *,DBT const *,DBT const *,DB_LSN const *,int,u_int32_t) = (int (*)(DB_ENV *,DBT const *,DBT const *,DB_LSN const *,int,u_int32_t)) 0 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_env **)&jarg1; + arg1 = *(struct DbEnv **)&jarg1; arg2 = (int)jarg2; arg3 = (jarg3 == NULL) ? NULL : __dbj_rep_transport; @@ -6768,16 +7424,16 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1set_1rep_1transp return ; } + result = (db_ret_t)DbEnv_set_rep_transport(arg1,arg2,arg3); - result = (db_ret_t)__db_env_set_rep_transport(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, JDBENV); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, JDBENV); + } } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1strerror(JNIEnv *jenv, jclass jcls, jint jarg1) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1strerror(JNIEnv *jenv, jclass jcls, jint jarg1) { jstring jresult = 0 ; int arg1 ; char *result; @@ -6794,7 +7450,7 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1strerror(JNIE } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1major(JNIEnv *jenv, jclass jcls) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1major(JNIEnv *jenv, jclass jcls) { jint jresult = 0 ; int result; @@ -6807,7 +7463,7 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1ma } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1minor(JNIEnv *jenv, jclass jcls) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1minor(JNIEnv *jenv, jclass jcls) { jint jresult = 0 ; int result; @@ -6820,7 +7476,7 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1mi } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1patch(JNIEnv *jenv, jclass jcls) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1patch(JNIEnv *jenv, jclass jcls) { jint jresult = 0 ; int result; @@ -6833,7 +7489,7 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1pa } -JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_1string(JNIEnv *jenv, jclass jcls) { +JNIEXPORT jstring JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1get_1version_1string(JNIEnv *jenv, jclass jcls) { jstring jresult = 0 ; char *result; @@ -6848,36 +7504,30 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1get_1version_ } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbTxn_1abort0(JNIEnv *jenv, jclass jcls, jlong jarg1) { - struct __db_txn *arg1 = (struct __db_txn *) 0 ; - db_ret_t result; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_delete_1DbLock(JNIEnv *jenv, jclass jcls, jlong jarg1) { + struct DbLock *arg1 = (struct DbLock *) 0 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_txn **)&jarg1; + arg1 = *(struct DbLock **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } - - - result = (db_ret_t)__db_txn_abort(arg1); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + delete_DbLock(arg1); } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbTxn_1commit0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_txn *arg1 = (struct __db_txn *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbLogc_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbLogc *arg1 = (struct DbLogc *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_txn **)&jarg1; + arg1 = *(struct DbLogc **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -6885,130 +7535,248 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbTxn_1commit0(JNIEnv * return ; } + result = (db_ret_t)DbLogc_close(arg1,arg2); - result = (db_ret_t)__db_txn_commit(arg1,arg2); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); +} + + +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbLogc_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2, jobject jarg3, jint jarg4) { + jint jresult = 0 ; + struct DbLogc *arg1 = (struct DbLogc *) 0 ; + DB_LSN *arg2 = (DB_LSN *) 0 ; + DBT *arg3 = (DBT *) 0 ; + u_int32_t arg4 ; + int result; + DB_LSN lsn2 ; + DBT_LOCKED ldbt3 ; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbLogc **)&jarg1; + + /* XXX: TODO */ + arg2 = &lsn2; + + + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return 0; + } + arg4 = (u_int32_t)jarg4; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } + if (jarg2 == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return 0; + } + + + result = (int)DbLogc_get(arg1,arg2,arg3,arg4); + + if (!DB_RETOK_LGGET(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + + jresult = (jint)result; + + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, jarg2, arg2, &lsn2); */ + + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); + return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbTxn_1discard0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_txn *arg1 = (struct __db_txn *) 0 ; - u_int32_t arg2 ; +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1get_1priority(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jint jresult = 0 ; + struct DbMpoolFile *arg1 = (struct DbMpoolFile *) 0 ; + DB_CACHE_PRIORITY result; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbMpoolFile **)&jarg1; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } + + errno = 0; + result = (DB_CACHE_PRIORITY)DbMpoolFile_get_priority(arg1); + + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + + jresult = (jint)result; + return jresult; +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1set_1priority(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbMpoolFile *arg1 = (struct DbMpoolFile *) 0 ; + DB_CACHE_PRIORITY arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_txn **)&jarg1; - arg2 = (u_int32_t)jarg2; + arg1 = *(struct DbMpoolFile **)&jarg1; + arg2 = (DB_CACHE_PRIORITY)jarg2; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbMpoolFile_set_priority(arg1,arg2); - result = (db_ret_t)__db_txn_discard(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbTxn_1id(JNIEnv *jenv, jclass jcls, jlong jarg1) { +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1get_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { jint jresult = 0 ; - struct __db_txn *arg1 = (struct __db_txn *) 0 ; + struct DbMpoolFile *arg1 = (struct DbMpoolFile *) 0 ; u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_txn **)&jarg1; + arg1 = *(struct DbMpoolFile **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - result = (u_int32_t)__db_txn_id(arg1); + errno = 0; + result = (u_int32_t)DbMpoolFile_get_flags(arg1); + + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } jresult = (jint)result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbTxn_1prepare(JNIEnv *jenv, jclass jcls, jlong jarg1, jbyteArray jarg2) { - struct __db_txn *arg1 = (struct __db_txn *) 0 ; - u_int8_t *arg2 = (u_int8_t *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1set_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jboolean jarg3) { + struct DbMpoolFile *arg1 = (struct DbMpoolFile *) 0 ; + u_int32_t arg2 ; + int_bool arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_txn **)&jarg1; - - arg2 = (u_int8_t *)(*jenv)->GetByteArrayElements(jenv, jarg2, NULL); - + arg1 = *(struct DbMpoolFile **)&jarg1; + arg2 = (u_int32_t)jarg2; + arg3 = (jarg3 == JNI_TRUE); if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbMpoolFile_set_flags(arg1,arg2,arg3); - if ((*jenv)->GetArrayLength(jenv, jarg2) < DB_XIDDATASIZE) { - __dbj_throw(jenv, EINVAL, - "DbTxn.prepare gid array must be >= 128 bytes", NULL, TXN2JDBENV); - return ; + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); } +} + + +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1get_1maxsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jlong jresult = 0 ; + struct DbMpoolFile *arg1 = (struct DbMpoolFile *) 0 ; + jlong result; - result = (db_ret_t)__db_txn_prepare(arg1,arg2); + (void)jenv; + (void)jcls; + arg1 = *(struct DbMpoolFile **)&jarg1; - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, TXN2JDBENV); + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } + errno = 0; + result = DbMpoolFile_get_maxsize(arg1); - (*jenv)->ReleaseByteArrayElements(jenv, jarg2, (jbyte *)arg2, 0); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + jresult = result; + return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbTxn_1set_1timeout(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { - struct __db_txn *arg1 = (struct __db_txn *) 0 ; - db_timeout_t arg2 ; - u_int32_t arg3 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbMpoolFile_1set_1maxsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct DbMpoolFile *arg1 = (struct DbMpoolFile *) 0 ; + jlong arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_txn **)&jarg1; - arg2 = (db_timeout_t)jarg2; - arg3 = (u_int32_t)jarg3; + arg1 = *(struct DbMpoolFile **)&jarg1; + arg2 = jarg2; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbMpoolFile_set_maxsize(arg1,arg2); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + +} + + +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_new_1DbSequence(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + jlong jresult = 0 ; + DB *arg1 = (DB *) 0 ; + u_int32_t arg2 ; + struct DbSequence *result; - result = (db_ret_t)__db_txn_set_timeout(arg1,arg2,arg3); + (void)jenv; + (void)jcls; + arg1 = *(DB **)&jarg1; + arg2 = (u_int32_t)jarg2; - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, TXN2JDBENV); + errno = 0; + result = (struct DbSequence *)new_DbSequence(arg1,arg2); + + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + *(struct DbSequence **)&jresult = result; + return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbLogc_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_log_cursor *arg1 = (struct __db_log_cursor *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1close0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; u_int32_t arg2 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_log_cursor **)&jarg1; + arg1 = *(struct DbSequence **)&jarg1; arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { @@ -7016,33 +7784,28 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbLogc_1close0(JNIEnv * return ; } + result = (db_ret_t)DbSequence_close(arg1,arg2); - result = (db_ret_t)__db_log_cursor_close(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbLogc_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jint jarg4) { - jint jresult = 0 ; - struct __db_log_cursor *arg1 = (struct __db_log_cursor *) 0 ; - DB_LSN *arg2 = (DB_LSN *) 0 ; - DBT *arg3 = (DBT *) 0 ; +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3, jint jarg4) { + jlong jresult = 0 ; + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + DB_TXN *arg2 = (DB_TXN *) 0 ; + int32_t arg3 ; u_int32_t arg4 ; - int result; - DBT_LOCKED ldbt3 ; + db_seq_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_log_cursor **)&jarg1; - arg2 = *(DB_LSN **)&jarg2; - - if (__dbj_dbt_copyin(jenv, &ldbt3, jarg3) != 0) - return 0; - arg3 = &ldbt3.dbt; - + arg1 = *(struct DbSequence **)&jarg1; + arg2 = *(DB_TXN **)&jarg2; + arg3 = (int32_t)jarg3; arg4 = (u_int32_t)jarg4; if (jarg1 == 0) { @@ -7050,268 +7813,515 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbLogc_1get(JNIEnv *jen return 0; } + errno = 0; + result = (db_seq_t)DbSequence_get(arg1,arg2,arg3,arg4); - result = (int)__db_log_cursor_get(arg1,arg2,arg3,arg4); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } - if (!DB_RETOK_LGGET(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + jresult = (jlong)result; + return jresult; +} + + +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jint jresult = 0 ; + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + int32_t result; - jresult = (jint)result; + (void)jenv; + (void)jcls; + arg1 = *(struct DbSequence **)&jarg1; - __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } + + errno = 0; + result = (int32_t)DbSequence_get_cachesize(arg1); + + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + jresult = (jint)result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_delete_1DbLock(JNIEnv *jenv, jclass jcls, jlong jarg1) { - struct __db_lock_u *arg1 = (struct __db_lock_u *) 0 ; +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1db(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jlong jresult = 0 ; + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + DB *result; (void)jenv; (void)jcls; - arg1 = *(struct __db_lock_u **)&jarg1; + arg1 = *(struct DbSequence **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); - return ; + return 0; } - delete___db_lock_u(arg1); + errno = 0; + result = (DB *)DbSequence_get_db(arg1); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + + *(DB **)&jresult = result; + return jresult; } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_new_1DbLsn(JNIEnv *jenv, jclass jcls, jint jarg1, jint jarg2) { - jlong jresult = 0 ; - u_int32_t arg1 ; - u_int32_t arg2 ; - struct __db_lsn *result; +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jint jresult = 0 ; + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + u_int32_t result; (void)jenv; (void)jcls; - arg1 = (u_int32_t)jarg1; - arg2 = (u_int32_t)jarg2; + arg1 = *(struct DbSequence **)&jarg1; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } errno = 0; - result = (struct __db_lsn *)new___db_lsn(arg1,arg2); + result = (u_int32_t)DbSequence_get_flags(arg1); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, NULL); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } - *(struct __db_lsn **)&jresult = result; + jresult = (jint)result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_delete_1DbLsn(JNIEnv *jenv, jclass jcls, jlong jarg1) { - struct __db_lsn *arg1 = (struct __db_lsn *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1key(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg2) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + DBT *arg2 = (DBT *) 0 ; + db_ret_t result; + DBT_LOCKED ldbt2 ; (void)jenv; (void)jcls; - arg1 = *(struct __db_lsn **)&jarg1; + arg1 = *(struct DbSequence **)&jarg1; + + if (__dbj_dbt_copyin(jenv, &ldbt2, &arg2, jarg2, 0) != 0) { + return ; + } if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } - delete___db_lsn(arg1); + result = (db_ret_t)DbSequence_get_key(arg1,arg2); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + + __dbj_dbt_release(jenv, jarg2, arg2, &ldbt2); } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbLsn_1get_1file(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jint jresult = 0 ; - struct __db_lsn *arg1 = (struct __db_lsn *) 0 ; - u_int32_t result; +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1range_1min(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jlong jresult = 0 ; + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + db_seq_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_lsn **)&jarg1; + arg1 = *(struct DbSequence **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - result = (u_int32_t)__db_lsn_get_file(arg1); + errno = 0; + result = (db_seq_t)DbSequence_get_range_min(arg1); - jresult = (jint)result; + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + + jresult = (jlong)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbLsn_1get_1offset(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jint jresult = 0 ; - struct __db_lsn *arg1 = (struct __db_lsn *) 0 ; - u_int32_t result; +JNIEXPORT jlong JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1get_1range_1max(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jlong jresult = 0 ; + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + db_seq_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_lsn **)&jarg1; + arg1 = *(struct DbSequence **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } - result = (u_int32_t)__db_lsn_get_offset(arg1); + errno = 0; + result = (db_seq_t)DbSequence_get_range_max(arg1); - jresult = (jint)result; + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + + jresult = (jlong)result; return jresult; } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1get_1priority(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jint jresult = 0 ; - struct __db_mpoolfile *arg1 = (struct __db_mpoolfile *) 0 ; - DB_CACHE_PRIORITY result; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1initial_1value(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + db_seq_t arg2 ; + db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_mpoolfile **)&jarg1; + arg1 = *(struct DbSequence **)&jarg1; + arg2 = (db_seq_t)jarg2; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); - return 0; + return ; } + result = (db_ret_t)DbSequence_initial_value(arg1,arg2); - errno = 0; - result = (DB_CACHE_PRIORITY)__db_mpoolfile_get_priority(arg1); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1open(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jobject jarg3, jint jarg4) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + DB_TXN *arg2 = (DB_TXN *) 0 ; + DBT *arg3 = (DBT *) 0 ; + u_int32_t arg4 ; + db_ret_t result; + DBT_LOCKED ldbt3 ; - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, NULL); + (void)jenv; + (void)jcls; + arg1 = *(struct DbSequence **)&jarg1; + arg2 = *(DB_TXN **)&jarg2; - jresult = (jint)result; - return jresult; + if (__dbj_dbt_copyin(jenv, &ldbt3, &arg3, jarg3, 0) != 0) { + return ; + } + arg4 = (u_int32_t)jarg4; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + + result = (db_ret_t)DbSequence_open(arg1,arg2,arg3,arg4); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + + __dbj_dbt_release(jenv, jarg3, arg3, &ldbt3); } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1set_1priority(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { - struct __db_mpoolfile *arg1 = (struct __db_mpoolfile *) 0 ; - DB_CACHE_PRIORITY arg2 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1remove0(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + DB_TXN *arg2 = (DB_TXN *) 0 ; + u_int32_t arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_mpoolfile **)&jarg1; - arg2 = (DB_CACHE_PRIORITY)jarg2; + arg1 = *(struct DbSequence **)&jarg1; + arg2 = *(DB_TXN **)&jarg2; + arg3 = (u_int32_t)jarg3; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbSequence_remove(arg1,arg2,arg3); - result = (db_ret_t)__db_mpoolfile_set_priority(arg1,arg2); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1set_1cachesize(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + int32_t arg2 ; + db_ret_t result; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbSequence **)&jarg1; + arg2 = (int32_t)jarg2; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + + result = (db_ret_t)DbSequence_set_cachesize(arg1,arg2); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } } -JNIEXPORT jint JNICALL Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1get_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jint jresult = 0 ; - struct __db_mpoolfile *arg1 = (struct __db_mpoolfile *) 0 ; - u_int32_t result; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1set_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + u_int32_t arg2 ; + db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_mpoolfile **)&jarg1; + arg1 = *(struct DbSequence **)&jarg1; + arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); - return 0; + return ; + } + + result = (db_ret_t)DbSequence_set_flags(arg1,arg2); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1set_1range(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jlong jarg3) { + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + db_seq_t arg2 ; + db_seq_t arg3 ; + db_ret_t result; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbSequence **)&jarg1; + arg2 = (db_seq_t)jarg2; + arg3 = (db_seq_t)jarg3; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; } + result = (db_ret_t)DbSequence_set_range(arg1,arg2,arg3); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + +} + + +JNIEXPORT jobject JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbSequence_1stat(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + jobject jresult = 0 ; + struct DbSequence *arg1 = (struct DbSequence *) 0 ; + u_int32_t arg2 ; + DB_SEQUENCE_STAT *result; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbSequence **)&jarg1; + arg2 = (u_int32_t)jarg2; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return 0; + } errno = 0; - result = (u_int32_t)__db_mpoolfile_get_flags(arg1); + result = (DB_SEQUENCE_STAT *)DbSequence_stat(arg1,arg2); - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, NULL); + if (!DB_RETOK_STD(errno)) { + __dbj_throw(jenv, errno, NULL, NULL, NULL); + } + + + jresult = (*jenv)->NewObject(jenv, seq_stat_class, seq_stat_construct); + if (jresult != NULL) + __dbj_fill_seq_stat(jenv, jresult, result); + __os_ufree(NULL, result); - jresult = (jint)result; return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1set_1flags(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2, jboolean jarg3) { - struct __db_mpoolfile *arg1 = (struct __db_mpoolfile *) 0 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1abort0(JNIEnv *jenv, jclass jcls, jlong jarg1) { + struct DbTxn *arg1 = (struct DbTxn *) 0 ; + db_ret_t result; + + (void)jenv; + (void)jcls; + arg1 = *(struct DbTxn **)&jarg1; + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + + result = (db_ret_t)DbTxn_abort(arg1); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1commit0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbTxn *arg1 = (struct DbTxn *) 0 ; u_int32_t arg2 ; - int_bool arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_mpoolfile **)&jarg1; + arg1 = *(struct DbTxn **)&jarg1; arg2 = (u_int32_t)jarg2; - arg3 = (jarg3 == JNI_TRUE); + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + + result = (db_ret_t)DbTxn_commit(arg1,arg2); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } + +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1discard0(JNIEnv *jenv, jclass jcls, jlong jarg1, jint jarg2) { + struct DbTxn *arg1 = (struct DbTxn *) 0 ; + u_int32_t arg2 ; + db_ret_t result; + (void)jenv; + (void)jcls; + arg1 = *(struct DbTxn **)&jarg1; + arg2 = (u_int32_t)jarg2; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbTxn_discard(arg1,arg2); - result = (db_ret_t)__db_mpoolfile_set_flags(arg1,arg2,arg3); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, NULL); + } } -JNIEXPORT jlong JNICALL Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1get_1maxsize(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jlong jresult = 0 ; - struct __db_mpoolfile *arg1 = (struct __db_mpoolfile *) 0 ; - jlong result; +JNIEXPORT jint JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1id(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jint jresult = 0 ; + struct DbTxn *arg1 = (struct DbTxn *) 0 ; + u_int32_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_mpoolfile **)&jarg1; + arg1 = *(struct DbTxn **)&jarg1; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return 0; } + result = (u_int32_t)DbTxn_id(arg1); + jresult = (jint)result; + return jresult; +} + + +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1prepare(JNIEnv *jenv, jclass jcls, jlong jarg1, jbyteArray jarg2) { + struct DbTxn *arg1 = (struct DbTxn *) 0 ; + u_int8_t *arg2 = (u_int8_t *) 0 ; + db_ret_t result; - errno = 0; - result = __db_mpoolfile_get_maxsize(arg1); + (void)jenv; + (void)jcls; + arg1 = *(struct DbTxn **)&jarg1; - if (!DB_RETOK_STD(errno)) - __dbj_throw(jenv, errno, NULL, NULL, NULL); + arg2 = (u_int8_t *)(*jenv)->GetByteArrayElements(jenv, jarg2, NULL); + + + if (jarg1 == 0) { + __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); + return ; + } + + if ((*jenv)->GetArrayLength(jenv, jarg2) < DB_XIDDATASIZE) { + __dbj_throw(jenv, EINVAL, + "DbTxn.prepare gid array must be >= 128 bytes", NULL, TXN2JDBENV); + return ; + } + + + result = (db_ret_t)DbTxn_prepare(arg1,arg2); + + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, TXN2JDBENV); + } + + + (*jenv)->ReleaseByteArrayElements(jenv, jarg2, (jbyte *)arg2, 0); - jresult = result; - return jresult; } -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbMpoolFile_1set_1maxsize(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { - struct __db_mpoolfile *arg1 = (struct __db_mpoolfile *) 0 ; - jlong arg2 ; +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbTxn_1set_1timeout(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2, jint jarg3) { + struct DbTxn *arg1 = (struct DbTxn *) 0 ; + db_timeout_t arg2 ; + u_int32_t arg3 ; db_ret_t result; (void)jenv; (void)jcls; - arg1 = *(struct __db_mpoolfile **)&jarg1; - arg2 = jarg2; + arg1 = *(struct DbTxn **)&jarg1; + arg2 = (db_timeout_t)jarg2; + arg3 = (u_int32_t)jarg3; if (jarg1 == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return ; } + result = (db_ret_t)DbTxn_set_timeout(arg1,arg2,arg3); - result = (db_ret_t)__db_mpoolfile_set_maxsize(arg1,arg2); - - if (!DB_RETOK_STD(result)) - __dbj_throw(jenv, result, NULL, NULL, NULL); + if (!DB_RETOK_STD(result)) { + __dbj_throw(jenv, result, NULL, NULL, TXN2JDBENV); + } } diff --git a/db/libdb_java/java-post.pl b/db/libdb_java/java-post.pl new file mode 100644 index 000000000..f54d39259 --- /dev/null +++ b/db/libdb_java/java-post.pl @@ -0,0 +1,23 @@ +#!/usr/bin/env perl -p + +# Hide some symbols +s!public (class db_java|[^(]* delete|[^(]* [A-Za-z_]*0\()!/* package */ $1!; + +# Mark methods that don't throw exceptions +s!public [^(]*get_version_[a-z]*\([^)]*\)!$& /* no exception */!; +s!public [^(]*[ _]err[a-z_]*\([^)]*\)!$& /* no exception */!; +s!public [^(]*[ _]msg[a-z_]*\([^)]*\)!$& /* no exception */!; +s!public [^(]*[ _]message[a-z_]*\([^)]*\)!$& /* no exception */!; +s!public [^(]*[ _]strerror\([^)]*\)!$& /* no exception */!; +s!public [^(]*log_compare\([^)]*\)!$& /* no exception */!; +s!public [^(]* feedback\([^)]*\)!$& /* no exception */!; + +# Mark methods that throw special exceptions +m/DbSequence/ || s!(public [^(]*(open|remove|rename)0?\([^)]*\))( {|;)!$1 throws com.sleepycat.db.DatabaseException, java.io.FileNotFoundException$3!; + +# Everything else throws a DbException +s!(public [^(]*\([^)]*\))(;| {)!$1 throws com.sleepycat.db.DatabaseException$2!; + +# Add initialize methods for Java parts of Db and DbEnv +s!\.new_DbEnv\(.*$!$&\n initialize();!; +s!\.new_Db\(.*$!$&\n initialize(dbenv);!; diff --git a/db/libdb_java/java_callbacks.i b/db/libdb_java/java_callbacks.i index cdd259bc7..2d38738f0 100644 --- a/db/libdb_java/java_callbacks.i +++ b/db/libdb_java/java_callbacks.i @@ -1,4 +1,4 @@ -// Callbacks +/* Callbacks */ %define JAVA_CALLBACK(_sig, _jclass, _name) JAVA_TYPEMAP(_sig, _jclass, jobject) %typemap(javain) _sig %{ (_name##_handler = $javainput) %} @@ -13,19 +13,16 @@ JAVA_TYPEMAP(_sig, _jclass, jobject) %enddef %{ -/* - * We do a dance so that the prefix in the C API points to the DB_ENV. - * The real prefix is stored as a Java string in the DbEnv object. - */ -static void __dbj_error(const char *prefix, char *msg) +static void __dbj_error(const DB_ENV *dbenv, const char *prefix, const char *msg) { - DB_ENV *dbenv = (DB_ENV *)prefix; JNIEnv *jenv = __dbj_get_jnienv(); jobject jdbenv = (jobject)DB_ENV_INTERNAL(dbenv); if (jdbenv != NULL) (*jenv)->CallNonvirtualVoidMethod(jenv, jdbenv, dbenv_class, - errcall_method, (*jenv)->NewStringUTF(jenv, msg)); + errcall_method, + (*jenv)->NewStringUTF(jenv, prefix), + (*jenv)->NewStringUTF(jenv, msg)); } static void __dbj_env_feedback(DB_ENV *dbenv, int opcode, int percent) @@ -37,6 +34,16 @@ static void __dbj_env_feedback(DB_ENV *dbenv, int opcode, int percent) env_feedback_method, opcode, percent); } +static void __dbj_message(const DB_ENV *dbenv, const char *msg) +{ + JNIEnv *jenv = __dbj_get_jnienv(); + jobject jdbenv = (jobject)DB_ENV_INTERNAL(dbenv); + + if (jdbenv != NULL) + (*jenv)->CallNonvirtualVoidMethod(jenv, jdbenv, dbenv_class, + msgcall_method, (*jenv)->NewStringUTF(jenv, msg)); +} + static void __dbj_panic(DB_ENV *dbenv, int err) { JNIEnv *jenv = __dbj_get_jnienv(); @@ -126,7 +133,6 @@ static int __dbj_seckey_create(DB *db, jobject jkey, jdata, jresult; jbyteArray jkeyarr, jdataarr; DBT_LOCKED lresult; - void *data_copy; int ret; jkey = (*jenv)->NewObject(jenv, dbt_class, dbt_construct); @@ -144,28 +150,25 @@ static int __dbj_seckey_create(DB *db, ret = (int)(*jenv)->CallNonvirtualIntMethod(jenv, jdb, db_class, seckey_create_method, jkey, jdata, jresult); + if (ret != 0) + goto err; + if ((*jenv)->ExceptionOccurred(jenv)) { /* The exception will be thrown, so this could be any error. */ ret = EINVAL; goto err; } - if ((ret = __dbj_dbt_copyin(jenv, &lresult, jresult)) != 0) + if ((ret = __dbj_dbt_copyin(jenv, &lresult, NULL, jresult, 0)) != 0) goto err; if (lresult.jarr != NULL) { /* - * If there's data, we need to make a copy because we can't - * keep the Java array pinned. + * If there's data, we've got a copy of it (that's the default + * when no Dbt flags are set, so we can safely free the array. */ - memset(result, 0, sizeof (DBT)); *result = lresult.dbt; - if ((ret = __os_umalloc(NULL, result->size, &data_copy)) == 0) - memcpy(data_copy, result->data, result->size); - (*jenv)->ReleaseByteArrayElements(jenv, lresult.jarr, - lresult.orig_data, 0); (*jenv)->DeleteLocalRef(jenv, lresult.jarr); - result->data = data_copy; result->flags |= DB_DBT_APPMALLOC; } @@ -184,7 +187,6 @@ static int __dbj_append_recno(DB *db, DBT *dbt, db_recno_t recno) jobject jdb = (jobject)DB_INTERNAL(db); jobject jdbt; DBT_LOCKED lresult; - void *data_copy; jbyteArray jdbtarr; int ret; @@ -206,21 +208,16 @@ static int __dbj_append_recno(DB *db, DBT *dbt, db_recno_t recno) goto err; } - if ((ret = __dbj_dbt_copyin(jenv, &lresult, jdbt)) != 0) + if ((ret = __dbj_dbt_copyin(jenv, &lresult, NULL, jdbt, 0)) != 0) goto err; if (lresult.jarr != NULL) { /* - * If there's data, we need to make a copy because we can't - * keep the Java array pinned. + * If there's data, we've got a copy of it (that's the default + * when no Dbt flags are set, so we can safely free the array. */ *dbt = lresult.dbt; - if ((ret = __os_umalloc(db->dbenv, dbt->size, &data_copy)) == 0) - memcpy(data_copy, dbt->data, dbt->size); - (*jenv)->ReleaseByteArrayElements(jenv, lresult.jarr, - lresult.orig_data, 0); (*jenv)->DeleteLocalRef(jenv, lresult.jarr); - dbt->data = data_copy; dbt->flags |= DB_DBT_APPMALLOC; } @@ -234,22 +231,23 @@ static int __dbj_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2) { JNIEnv *jenv = __dbj_get_jnienv(); jobject jdb = (jobject)DB_INTERNAL(db); - jobject jdbt1, jdbt2; jbyteArray jdbtarr1, jdbtarr2; int ret; - jdbt1 = (*jenv)->NewObject(jenv, dbt_class, dbt_construct); - jdbt2 = (*jenv)->NewObject(jenv, dbt_class, dbt_construct); - if (jdbt1 == NULL || jdbt2 == NULL) - return ENOMEM; /* An exception is pending */ + jdbtarr1 = (*jenv)->NewByteArray(jenv, (jsize)dbt1->size); + if (jdbtarr1 == NULL) + return ENOMEM; + (*jenv)->SetByteArrayRegion(jenv, jdbtarr1, 0, (jsize)dbt1->size, + (jbyte *)dbt1->data); + + jdbtarr2 = (*jenv)->NewByteArray(jenv, (jsize)dbt2->size); + if (jdbtarr2 == NULL) + return ENOMEM; + (*jenv)->SetByteArrayRegion(jenv, jdbtarr2, 0, (jsize)dbt2->size, + (jbyte *)dbt2->data); - __dbj_dbt_copyout(jenv, dbt1, &jdbtarr1, jdbt1); - __dbj_dbt_copyout(jenv, dbt2, &jdbtarr2, jdbt2); - if (jdbtarr1 == NULL || jdbtarr2 == NULL) - return ENOMEM; /* An exception is pending */ - ret = (int)(*jenv)->CallNonvirtualIntMethod(jenv, jdb, db_class, - bt_compare_method, jdbt1, jdbt2); + bt_compare_method, jdbtarr1, jdbtarr2); if ((*jenv)->ExceptionOccurred(jenv)) { /* The exception will be thrown, so this could be any error. */ @@ -258,8 +256,6 @@ static int __dbj_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2) (*jenv)->DeleteLocalRef(jenv, jdbtarr2); (*jenv)->DeleteLocalRef(jenv, jdbtarr1); - (*jenv)->DeleteLocalRef(jenv, jdbt2); - (*jenv)->DeleteLocalRef(jenv, jdbt1); return (ret); } @@ -357,36 +353,38 @@ static u_int32_t __dbj_h_hash(DB *db, const void *data, u_int32_t len) } %} -JAVA_CALLBACK(void (*db_errcall_fcn)(const char *, char *), - DbErrorHandler, error) -JAVA_CALLBACK(void (*db_feedback_fcn)(DB_ENV *, int, int), - DbEnvFeedbackHandler, env_feedback) +JAVA_CALLBACK(void (*db_errcall_fcn)(const DB_ENV *, const char *, const char *), + com.sleepycat.db.ErrorHandler, error) +JAVA_CALLBACK(void (*env_feedback_fcn)(DB_ENV *, int, int), + com.sleepycat.db.FeedbackHandler, env_feedback) +JAVA_CALLBACK(void (*db_msgcall_fcn)(const DB_ENV *, const char *), + com.sleepycat.db.MessageHandler, message) JAVA_CALLBACK(void (*db_panic_fcn)(DB_ENV *, int), - DbPanicHandler, panic) + com.sleepycat.db.PanicHandler, panic) JAVA_CALLBACK(int (*tx_recover)(DB_ENV *, DBT *, DB_LSN *, db_recops), - DbAppDispatch, app_dispatch) + com.sleepycat.db.LogRecordHandler, app_dispatch) JAVA_CALLBACK(int (*send)(DB_ENV *, const DBT *, const DBT *, const DB_LSN *, int, u_int32_t), - DbRepTransport, rep_transport) + com.sleepycat.db.ReplicationTransport, rep_transport) /* * Db.associate is a special case, because the handler must be set in the * secondary DB - that's what we have in the callback. */ JAVA_CALLBACK(int (*callback)(DB *, const DBT *, const DBT *, DBT *), - DbSecondaryKeyCreate, seckey_create) + com.sleepycat.db.SecondaryKeyCreator, seckey_create) %typemap(javain) int (*callback)(DB *, const DBT *, const DBT *, DBT *) %{ (secondary.seckey_create_handler = $javainput) %} JAVA_CALLBACK(int (*db_append_recno_fcn)(DB *, DBT *, db_recno_t), - DbAppendRecno, append_recno) + com.sleepycat.db.RecordNumberAppender, append_recno) JAVA_CALLBACK(int (*bt_compare_fcn)(DB *, const DBT *, const DBT *), - DbBtreeCompare, bt_compare) + java.util.Comparator, bt_compare) JAVA_CALLBACK(size_t (*bt_prefix_fcn)(DB *, const DBT *, const DBT *), - DbBtreePrefix, bt_prefix) + com.sleepycat.db.BtreePrefixCalculator, bt_prefix) JAVA_CALLBACK(int (*dup_compare_fcn)(DB *, const DBT *, const DBT *), - DbDupCompare, dup_compare) + java.util.Comparator, dup_compare) JAVA_CALLBACK(void (*db_feedback_fcn)(DB *, int, int), - DbFeedbackHandler, db_feedback) + com.sleepycat.db.FeedbackHandler, db_feedback) JAVA_CALLBACK(u_int32_t (*h_hash_fcn)(DB *, const void *, u_int32_t), - DbHash, h_hash) + com.sleepycat.db.Hasher, h_hash) diff --git a/db/libdb_java/java_except.i b/db/libdb_java/java_except.i index fc4eafb7a..bc36cef23 100644 --- a/db/libdb_java/java_except.i +++ b/db/libdb_java/java_except.i @@ -14,8 +14,7 @@ if ($input == 0) { __dbj_throw(jenv, EINVAL, "call on closed handle", NULL, NULL); return $null; - } -%} + }%} %define JAVA_EXCEPT_NONE %exception %{ $action %} @@ -25,8 +24,9 @@ %define JAVA_EXCEPT(_retcheck, _jdbenv) %exception %{ $action - if (!_retcheck(result)) + if (!_retcheck(result)) { __dbj_throw(jenv, result, NULL, NULL, _jdbenv); + } %} %enddef @@ -35,8 +35,9 @@ %exception %{ errno = 0; $action - if (!_retcheck(errno)) + if (!_retcheck(errno)) { __dbj_throw(jenv, errno, NULL, NULL, _jdbenv); + } %} %enddef @@ -49,12 +50,12 @@ (*jenv)->NewObject(jenv, lockex_class, lockex_construct, (*jenv)->NewStringUTF(jenv, "DbEnv.lock_get not granted"), DB_LOCK_GET, arg5, jarg4, NULL, -1, JDBENV)); - } else if (!DB_RETOK_STD(errno)) + } else if (!DB_RETOK_STD(errno)) { __dbj_throw(jenv, errno, NULL, NULL, JDBENV); + } %} %{ - static jthrowable __dbj_get_except(JNIEnv *jenv, int err, const char *msg, jobject obj, jobject jdbenv) { jobject jmsg; @@ -74,9 +75,18 @@ static jthrowable __dbj_get_except(JNIEnv *jenv, filenotfoundex_class, filenotfoundex_construct, jmsg); case ENOMEM: + return (jthrowable)(*jenv)->NewObject(jenv, + outofmemerr_class, outofmemerr_construct, jmsg); + + case DB_BUFFER_SMALL: return (jthrowable)(*jenv)->NewObject(jenv, memex_class, memex_construct, jmsg, obj, err, jdbenv); + case DB_REP_HANDLE_DEAD: + return (jthrowable)(*jenv)->NewObject(jenv, + rephandledeadex_class, rephandledeadex_construct, + jmsg, err, jdbenv); + case DB_RUNRECOVERY: return (jthrowable)(*jenv)->NewObject(jenv, runrecex_class, runrecex_construct, jmsg, err, jdbenv); @@ -95,13 +105,15 @@ static jthrowable __dbj_get_except(JNIEnv *jenv, } } -static int __dbj_throw(JNIEnv *jenv, int err, const char *msg, jobject obj, jobject jdbenv) +static int __dbj_throw(JNIEnv *jenv, + int err, const char *msg, jobject obj, jobject jdbenv) { jthrowable t; /* If an exception is pending, ignore requests to throw a new one. */ if ((*jenv)->ExceptionOccurred(jenv) == NULL) { - if ((t = __dbj_get_except(jenv, err, msg, obj, jdbenv)) == NULL) { + t = __dbj_get_except(jenv, err, msg, obj, jdbenv); + if (t == NULL) { /* * This is a problem - something went wrong creating an * exception. We have to assume there is an exception diff --git a/db/libdb_java/java_stat.i b/db/libdb_java/java_stat.i index 74a80aa90..121f51658 100644 --- a/db/libdb_java/java_stat.i +++ b/db/libdb_java/java_stat.i @@ -1,35 +1,29 @@ -// Statistics classes +/* Statistics classes */ %{ /* * These macros are used by code generated by the s_java script. */ -#define JAVADB_STAT_INT(jenv, cl, jobj, statp, name) \ - (*jenv)->SetIntField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "I"), \ - (jint)statp->name) +#define JAVADB_STAT_INT(jenv, jobj, fid, statp, name) \ + (*jenv)->SetIntField(jenv, jobj, fid, (jint)statp->name) -#define JAVADB_STAT_STRING(jenv, cl, jobj, statp, name) \ - (*jenv)->SetObjectField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, \ - "Ljava/lang/String;"), \ +#define JAVADB_STAT_STRING(jenv, jobj, fid, statp, name) \ + (*jenv)->SetObjectField(jenv, jobj, fid, \ (*jenv)->NewStringUTF(jenv, statp->name)) -#define JAVADB_STAT_LSN(jenv, cl, jobj, statp, name) \ - (*jenv)->SetObjectField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "L" DB_PKG "DbLsn;"), \ +#define JAVADB_STAT_LSN(jenv, jobj, fid, statp, name) \ + (*jenv)->SetObjectField(jenv, jobj, fid, \ __dbj_wrap_DB_LSN(jenv, &statp->name)) -#define JAVADB_STAT_LONG(jenv, cl, jobj, statp, name) \ - (*jenv)->SetLongField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "J"), \ +#define JAVADB_STAT_LONG(jenv, jobj, fid, statp, name) \ + (*jenv)->SetLongField(jenv, jobj, fid, \ (jlong)statp->name) -#define JAVADB_STAT_XID(jenv, cl, jobj, statp, name) { \ - jobject jarr = (*jenv)->NewByteArray(jenv, (jsize)DB_XIDDATASIZE); \ - (*jenv)->SetByteArrayRegion(jenv, jarr, 0, (jsize)DB_XIDDATASIZE, \ - (jbyte *)statp->name); \ - (*jenv)->SetObjectField(jenv, jobj, \ - (*jenv)->GetFieldID(jenv, cl, #name, "[B"), jarr); \ +#define JAVADB_STAT_XID(jenv, jobj, fid, statp, name) { \ + jobject jarr = \ + (*jenv)->NewByteArray(jenv, (jsize)DB_XIDDATASIZE); \ + (*jenv)->SetByteArrayRegion(jenv, jarr, \ + 0, (jsize)DB_XIDDATASIZE, (jbyte *)statp->name); \ + (*jenv)->SetObjectField(jenv, jobj, fid, jarr); \ } /* @@ -46,16 +40,16 @@ JAVA_TYPEMAP(_ctype, _jtype, jobject) %typemap(out) _ctype %{ $result = (*jenv)->NewObject(jenv, _name##_class, _name##_construct); if ($result != NULL) - __dbj_fill_##_name(jenv, _name##_class, $result, $1); + __dbj_fill_##_name(jenv, $result, $1); __os_ufree(NULL, $1); %} %enddef -JAVA_STAT_CLASS(DB_LOCK_STAT *, DbLockStat, lock_stat) -JAVA_STAT_CLASS(DB_LOG_STAT *, DbLogStat, log_stat) -JAVA_STAT_CLASS(DB_MPOOL_STAT *, DbMpoolStat, mpool_stat) +JAVA_STAT_CLASS(DB_LOCK_STAT *, com.sleepycat.db.LockStats, lock_stat) +JAVA_STAT_CLASS(DB_LOG_STAT *, com.sleepycat.db.LogStats, log_stat) +JAVA_STAT_CLASS(DB_MPOOL_STAT *, com.sleepycat.db.CacheStats, mpool_stat) -JAVA_TYPEMAP(DB_MPOOL_FSTAT **, DbMpoolFStat[], jobjectArray) +JAVA_TYPEMAP(DB_MPOOL_FSTAT **, com.sleepycat.db.CacheFileStats[], jobjectArray) %typemap(out) DB_MPOOL_FSTAT ** { int i, len; @@ -75,19 +69,20 @@ JAVA_TYPEMAP(DB_MPOOL_FSTAT **, DbMpoolFStat[], jobjectArray) return $null; /* an exception is pending */ } (*jenv)->SetObjectArrayElement(jenv, $result, i, obj); - __dbj_fill_mpool_fstat(jenv, mpool_fstat_class, obj, $1[i]); + __dbj_fill_mpool_fstat(jenv, obj, $1[i]); } __os_ufree(NULL, $1); } -JAVA_STAT_CLASS(DB_REP_STAT *, DbRepStat, rep_stat) -JAVA_TYPEMAP(DB_TXN_STAT *, DbTxnStat, jobject) +JAVA_STAT_CLASS(DB_REP_STAT *, com.sleepycat.db.ReplicationStats, rep_stat) +JAVA_STAT_CLASS(DB_SEQUENCE_STAT *, com.sleepycat.db.SequenceStats, seq_stat) +JAVA_TYPEMAP(DB_TXN_STAT *, com.sleepycat.db.TransactionStats, jobject) %typemap(out) DB_TXN_STAT * { unsigned int i; jobjectArray actives; $result = (*jenv)->NewObject(jenv, txn_stat_class, txn_stat_construct); if ($result != NULL) - __dbj_fill_txn_stat(jenv, txn_stat_class, $result, $1); + __dbj_fill_txn_stat(jenv, $result, $1); actives = (*jenv)->NewObjectArray(jenv, (jsize)$1->st_nactive, txn_active_class, 0); @@ -95,7 +90,7 @@ JAVA_TYPEMAP(DB_TXN_STAT *, DbTxnStat, jobject) __os_ufree(NULL, $1); return $null; } - (*jenv)->SetObjectField(jenv, $result, txn_stat_active_fid, actives); + (*jenv)->SetObjectField(jenv, $result, txn_stat_st_txnarray_fid, actives); for (i = 0; i < $1->st_nactive; i++) { jobject obj = (*jenv)->NewObject(jenv, txn_active_class, txn_active_construct); @@ -104,13 +99,12 @@ JAVA_TYPEMAP(DB_TXN_STAT *, DbTxnStat, jobject) return $null; /* an exception is pending */ } (*jenv)->SetObjectArrayElement(jenv, actives, (jsize)i, obj); - __dbj_fill_txn_active(jenv, txn_active_class, obj, - &$1->st_txnarray[i]); + __dbj_fill_txn_active(jenv, obj, &$1->st_txnarray[i]); } __os_ufree(NULL, $1); } -// Db.stat return - special case +/* Db.stat return - special case */ %typemap(out) void * %{ if ($1 == NULL) $result = NULL; @@ -128,28 +122,28 @@ JAVA_TYPEMAP(DB_TXN_STAT *, DbTxnStat, jobject) /* Btree and recno share the same stat structure */ case DB_BTREE: case DB_RECNO: - $result = (*jenv)->NewObject(jenv, btree_stat_class, - btree_stat_construct); + $result = (*jenv)->NewObject(jenv, bt_stat_class, + bt_stat_construct); if ($result != NULL) - __dbj_fill_bt_stat(jenv, btree_stat_class, - $result, (DB_BTREE_STAT *)$1); + __dbj_fill_bt_stat(jenv, $result, + (DB_BTREE_STAT *)$1); break; /* Hash stat structure */ case DB_HASH: - $result = (*jenv)->NewObject(jenv, hash_stat_class, - hash_stat_construct); + $result = (*jenv)->NewObject(jenv, h_stat_class, + h_stat_construct); if ($result != NULL) - __dbj_fill_h_stat(jenv, hash_stat_class, - $result, (DB_HASH_STAT *)$1); + __dbj_fill_h_stat(jenv, $result, + (DB_HASH_STAT *)$1); break; case DB_QUEUE: - $result = (*jenv)->NewObject(jenv, queue_stat_class, - queue_stat_construct); + $result = (*jenv)->NewObject(jenv, qam_stat_class, + qam_stat_construct); if ($result != NULL) - __dbj_fill_qam_stat(jenv, queue_stat_class, - $result, (DB_QUEUE_STAT *)$1); + __dbj_fill_qam_stat(jenv, $result, + (DB_QUEUE_STAT *)$1); break; /* That's all the database types we're aware of! */ diff --git a/db/libdb_java/java_stat_auto.c b/db/libdb_java/java_stat_auto.c index 9d505582e..eb03823a0 100644 --- a/db/libdb_java/java_stat_auto.c +++ b/db/libdb_java/java_stat_auto.c @@ -1,228 +1,260 @@ -/* DO NOT EDIT: automatically built by dist/s_java_stat. */ -static int __dbj_fill_bt_stat(JNIEnv *jnienv, jclass cl, +/*- + * DO NOT EDIT: automatically built by dist/s_java_stat. + * + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + */ +static int __dbj_fill_bt_stat(JNIEnv *jnienv, jobject jobj, struct __db_bt_stat *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_magic); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_version); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_metaflags); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_nkeys); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_ndata); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_pagesize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_maxkey); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_minkey); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_re_len); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_re_pad); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_levels); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_int_pg); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_leaf_pg); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_dup_pg); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_over_pg); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_free); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_int_pgfree); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_leaf_pgfree); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_dup_pgfree); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_over_pgfree); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_magic_fid, statp, bt_magic); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_version_fid, statp, bt_version); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_metaflags_fid, statp, bt_metaflags); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_nkeys_fid, statp, bt_nkeys); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_ndata_fid, statp, bt_ndata); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_pagesize_fid, statp, bt_pagesize); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_maxkey_fid, statp, bt_maxkey); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_minkey_fid, statp, bt_minkey); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_re_len_fid, statp, bt_re_len); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_re_pad_fid, statp, bt_re_pad); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_levels_fid, statp, bt_levels); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_int_pg_fid, statp, bt_int_pg); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_leaf_pg_fid, statp, bt_leaf_pg); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_dup_pg_fid, statp, bt_dup_pg); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_over_pg_fid, statp, bt_over_pg); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_empty_pg_fid, statp, bt_empty_pg); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_free_fid, statp, bt_free); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_int_pgfree_fid, statp, bt_int_pgfree); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_leaf_pgfree_fid, statp, bt_leaf_pgfree); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_dup_pgfree_fid, statp, bt_dup_pgfree); + JAVADB_STAT_INT(jnienv, jobj, bt_stat_bt_over_pgfree_fid, statp, bt_over_pgfree); return (0); } -static int __dbj_fill_h_stat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_h_stat(JNIEnv *jnienv, jobject jobj, struct __db_h_stat *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_magic); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_version); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_metaflags); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_nkeys); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ndata); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_pagesize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ffactor); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_buckets); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_free); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_bfree); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_bigpages); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_big_bfree); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_overflows); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ovfl_free); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_dup); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_dup_free); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_magic_fid, statp, hash_magic); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_version_fid, statp, hash_version); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_metaflags_fid, statp, hash_metaflags); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_nkeys_fid, statp, hash_nkeys); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_ndata_fid, statp, hash_ndata); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_pagesize_fid, statp, hash_pagesize); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_ffactor_fid, statp, hash_ffactor); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_buckets_fid, statp, hash_buckets); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_free_fid, statp, hash_free); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_bfree_fid, statp, hash_bfree); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_bigpages_fid, statp, hash_bigpages); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_big_bfree_fid, statp, hash_big_bfree); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_overflows_fid, statp, hash_overflows); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_ovfl_free_fid, statp, hash_ovfl_free); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_dup_fid, statp, hash_dup); + JAVADB_STAT_INT(jnienv, jobj, h_stat_hash_dup_free_fid, statp, hash_dup_free); return (0); } -static int __dbj_fill_lock_stat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_lock_stat(JNIEnv *jnienv, jobject jobj, struct __db_lock_stat *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_id); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_maxid); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxlocks); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxlockers); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxobjects); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nmodes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlocks); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnlocks); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlockers); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnlockers); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nobjects); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnobjects); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nconflicts); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nrequests); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nreleases); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nnowaits); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ndeadlocks); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_locktimeout); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlocktimeouts); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_txntimeout); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ntxntimeouts); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_id_fid, statp, st_id); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_cur_maxid_fid, statp, st_cur_maxid); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_maxlocks_fid, statp, st_maxlocks); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_maxlockers_fid, statp, st_maxlockers); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_maxobjects_fid, statp, st_maxobjects); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nmodes_fid, statp, st_nmodes); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nlocks_fid, statp, st_nlocks); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_maxnlocks_fid, statp, st_maxnlocks); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nlockers_fid, statp, st_nlockers); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_maxnlockers_fid, statp, st_maxnlockers); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nobjects_fid, statp, st_nobjects); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_maxnobjects_fid, statp, st_maxnobjects); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nconflicts_fid, statp, st_nconflicts); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nrequests_fid, statp, st_nrequests); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nreleases_fid, statp, st_nreleases); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nnowaits_fid, statp, st_nnowaits); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_ndeadlocks_fid, statp, st_ndeadlocks); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_locktimeout_fid, statp, st_locktimeout); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_nlocktimeouts_fid, statp, st_nlocktimeouts); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_txntimeout_fid, statp, st_txntimeout); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_ntxntimeouts_fid, statp, st_ntxntimeouts); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_region_wait_fid, statp, st_region_wait); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_region_nowait_fid, statp, st_region_nowait); + JAVADB_STAT_INT(jnienv, jobj, lock_stat_st_regsize_fid, statp, st_regsize); return (0); } -static int __dbj_fill_log_stat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_log_stat(JNIEnv *jnienv, jobject jobj, struct __db_log_stat *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_magic); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_version); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_mode); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_lg_bsize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_lg_size); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_w_bytes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_w_mbytes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wc_bytes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wc_mbytes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wcount); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wcount_fill); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_scount); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_file); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_offset); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_disk_file); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_disk_offset); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxcommitperflush); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_mincommitperflush); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_magic_fid, statp, st_magic); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_version_fid, statp, st_version); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_mode_fid, statp, st_mode); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_lg_bsize_fid, statp, st_lg_bsize); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_lg_size_fid, statp, st_lg_size); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_w_bytes_fid, statp, st_w_bytes); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_w_mbytes_fid, statp, st_w_mbytes); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_wc_bytes_fid, statp, st_wc_bytes); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_wc_mbytes_fid, statp, st_wc_mbytes); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_wcount_fid, statp, st_wcount); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_wcount_fill_fid, statp, st_wcount_fill); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_scount_fid, statp, st_scount); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_region_wait_fid, statp, st_region_wait); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_region_nowait_fid, statp, st_region_nowait); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_cur_file_fid, statp, st_cur_file); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_cur_offset_fid, statp, st_cur_offset); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_disk_file_fid, statp, st_disk_file); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_disk_offset_fid, statp, st_disk_offset); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_regsize_fid, statp, st_regsize); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_maxcommitperflush_fid, statp, st_maxcommitperflush); + JAVADB_STAT_INT(jnienv, jobj, log_stat_st_mincommitperflush_fid, statp, st_mincommitperflush); return (0); } -static int __dbj_fill_mpool_fstat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_mpool_fstat(JNIEnv *jnienv, jobject jobj, struct __db_mpool_fstat *statp) { - JAVADB_STAT_STRING(jnienv, cl, jobj, statp, file_name); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_pagesize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_map); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_hit); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_miss); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_create); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_in); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_out); + JAVADB_STAT_STRING(jnienv, jobj, mpool_fstat_file_name_fid, statp, file_name); + JAVADB_STAT_INT(jnienv, jobj, mpool_fstat_st_pagesize_fid, statp, st_pagesize); + JAVADB_STAT_INT(jnienv, jobj, mpool_fstat_st_map_fid, statp, st_map); + JAVADB_STAT_INT(jnienv, jobj, mpool_fstat_st_cache_hit_fid, statp, st_cache_hit); + JAVADB_STAT_INT(jnienv, jobj, mpool_fstat_st_cache_miss_fid, statp, st_cache_miss); + JAVADB_STAT_INT(jnienv, jobj, mpool_fstat_st_page_create_fid, statp, st_page_create); + JAVADB_STAT_INT(jnienv, jobj, mpool_fstat_st_page_in_fid, statp, st_page_in); + JAVADB_STAT_INT(jnienv, jobj, mpool_fstat_st_page_out_fid, statp, st_page_out); return (0); } -static int __dbj_fill_mpool_stat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_mpool_stat(JNIEnv *jnienv, jobject jobj, struct __db_mpool_stat *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_gbytes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_bytes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ncache); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_map); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_hit); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_miss); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_create); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_in); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_out); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ro_evict); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_rw_evict); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_trickle); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_pages); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_clean); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_dirty); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_buckets); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_searches); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_longest); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_examined); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_nowait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_wait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_max_wait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_buckets); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_max_buckets); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_pages); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_max_pages); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_gbytes_fid, statp, st_gbytes); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_bytes_fid, statp, st_bytes); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_ncache_fid, statp, st_ncache); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_regsize_fid, statp, st_regsize); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_mmapsize_fid, statp, st_mmapsize); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_maxopenfd_fid, statp, st_maxopenfd); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_maxwrite_fid, statp, st_maxwrite); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_maxwrite_sleep_fid, statp, st_maxwrite_sleep); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_map_fid, statp, st_map); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_cache_hit_fid, statp, st_cache_hit); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_cache_miss_fid, statp, st_cache_miss); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_page_create_fid, statp, st_page_create); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_page_in_fid, statp, st_page_in); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_page_out_fid, statp, st_page_out); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_ro_evict_fid, statp, st_ro_evict); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_rw_evict_fid, statp, st_rw_evict); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_page_trickle_fid, statp, st_page_trickle); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_pages_fid, statp, st_pages); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_page_clean_fid, statp, st_page_clean); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_page_dirty_fid, statp, st_page_dirty); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_hash_buckets_fid, statp, st_hash_buckets); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_hash_searches_fid, statp, st_hash_searches); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_hash_longest_fid, statp, st_hash_longest); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_hash_examined_fid, statp, st_hash_examined); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_hash_nowait_fid, statp, st_hash_nowait); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_hash_wait_fid, statp, st_hash_wait); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_hash_max_wait_fid, statp, st_hash_max_wait); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_region_nowait_fid, statp, st_region_nowait); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_region_wait_fid, statp, st_region_wait); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_alloc_fid, statp, st_alloc); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_alloc_buckets_fid, statp, st_alloc_buckets); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_alloc_max_buckets_fid, statp, st_alloc_max_buckets); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_alloc_pages_fid, statp, st_alloc_pages); + JAVADB_STAT_INT(jnienv, jobj, mpool_stat_st_alloc_max_pages_fid, statp, st_alloc_max_pages); return (0); } -static int __dbj_fill_qam_stat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_qam_stat(JNIEnv *jnienv, jobject jobj, struct __db_qam_stat *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_magic); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_version); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_metaflags); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_nkeys); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_ndata); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pagesize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_extentsize); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pages); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_re_len); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_re_pad); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pgfree); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_first_recno); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_cur_recno); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_magic_fid, statp, qs_magic); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_version_fid, statp, qs_version); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_metaflags_fid, statp, qs_metaflags); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_nkeys_fid, statp, qs_nkeys); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_ndata_fid, statp, qs_ndata); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_pagesize_fid, statp, qs_pagesize); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_extentsize_fid, statp, qs_extentsize); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_pages_fid, statp, qs_pages); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_re_len_fid, statp, qs_re_len); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_re_pad_fid, statp, qs_re_pad); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_pgfree_fid, statp, qs_pgfree); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_first_recno_fid, statp, qs_first_recno); + JAVADB_STAT_INT(jnienv, jobj, qam_stat_qs_cur_recno_fid, statp, qs_cur_recno); return (0); } -static int __dbj_fill_rep_stat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_rep_stat(JNIEnv *jnienv, jobject jobj, struct __db_rep_stat *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_status); - JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_next_lsn); - JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_waiting_lsn); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_dupmasters); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_env_id); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_env_priority); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_gen); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_in_recovery); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_duplicated); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued_max); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued_total); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_records); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_requested); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_master); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_master_changes); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_badgen); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_processed); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_recover); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_send_failures); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_sent); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_newsites); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nsites); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nthrottles); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_outdated); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_txns_applied); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_elections); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_elections_won); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_cur_winner); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_gen); - JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_election_lsn); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_nsites); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_priority); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_status); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_tiebreaker); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_votes); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_status_fid, statp, st_status); + JAVADB_STAT_LSN(jnienv, jobj, rep_stat_st_next_lsn_fid, statp, st_next_lsn); + JAVADB_STAT_LSN(jnienv, jobj, rep_stat_st_waiting_lsn_fid, statp, st_waiting_lsn); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_next_pg_fid, statp, st_next_pg); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_waiting_pg_fid, statp, st_waiting_pg); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_dupmasters_fid, statp, st_dupmasters); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_env_id_fid, statp, st_env_id); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_env_priority_fid, statp, st_env_priority); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_gen_fid, statp, st_gen); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_egen_fid, statp, st_egen); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_log_duplicated_fid, statp, st_log_duplicated); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_log_queued_fid, statp, st_log_queued); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_log_queued_max_fid, statp, st_log_queued_max); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_log_queued_total_fid, statp, st_log_queued_total); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_log_records_fid, statp, st_log_records); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_log_requested_fid, statp, st_log_requested); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_master_fid, statp, st_master); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_master_changes_fid, statp, st_master_changes); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_msgs_badgen_fid, statp, st_msgs_badgen); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_msgs_processed_fid, statp, st_msgs_processed); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_msgs_recover_fid, statp, st_msgs_recover); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_msgs_send_failures_fid, statp, st_msgs_send_failures); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_msgs_sent_fid, statp, st_msgs_sent); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_newsites_fid, statp, st_newsites); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_nsites_fid, statp, st_nsites); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_nthrottles_fid, statp, st_nthrottles); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_outdated_fid, statp, st_outdated); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_pg_duplicated_fid, statp, st_pg_duplicated); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_pg_records_fid, statp, st_pg_records); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_pg_requested_fid, statp, st_pg_requested); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_startup_complete_fid, statp, st_startup_complete); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_txns_applied_fid, statp, st_txns_applied); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_elections_fid, statp, st_elections); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_elections_won_fid, statp, st_elections_won); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_cur_winner_fid, statp, st_election_cur_winner); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_gen_fid, statp, st_election_gen); + JAVADB_STAT_LSN(jnienv, jobj, rep_stat_st_election_lsn_fid, statp, st_election_lsn); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_nsites_fid, statp, st_election_nsites); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_nvotes_fid, statp, st_election_nvotes); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_priority_fid, statp, st_election_priority); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_status_fid, statp, st_election_status); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_tiebreaker_fid, statp, st_election_tiebreaker); + JAVADB_STAT_INT(jnienv, jobj, rep_stat_st_election_votes_fid, statp, st_election_votes); return (0); } -static int __dbj_fill_txn_stat(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_seq_stat(JNIEnv *jnienv, + jobject jobj, struct __db_seq_stat *statp) { + JAVADB_STAT_INT(jnienv, jobj, seq_stat_st_wait_fid, statp, st_wait); + JAVADB_STAT_INT(jnienv, jobj, seq_stat_st_nowait_fid, statp, st_nowait); + JAVADB_STAT_LONG(jnienv, jobj, seq_stat_st_current_fid, statp, st_current); + JAVADB_STAT_LONG(jnienv, jobj, seq_stat_st_value_fid, statp, st_value); + JAVADB_STAT_LONG(jnienv, jobj, seq_stat_st_last_value_fid, statp, st_last_value); + JAVADB_STAT_LONG(jnienv, jobj, seq_stat_st_min_fid, statp, st_min); + JAVADB_STAT_LONG(jnienv, jobj, seq_stat_st_max_fid, statp, st_max); + JAVADB_STAT_INT(jnienv, jobj, seq_stat_st_cache_size_fid, statp, st_cache_size); + JAVADB_STAT_INT(jnienv, jobj, seq_stat_st_flags_fid, statp, st_flags); + return (0); +} +static int __dbj_fill_txn_stat(JNIEnv *jnienv, jobject jobj, struct __db_txn_stat *statp) { - JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_last_ckp); - JAVADB_STAT_LONG(jnienv, cl, jobj, statp, st_time_ckp); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_last_txnid); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxtxns); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_naborts); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nbegins); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ncommits); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nactive); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nrestores); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnactive); - JAVADB_STAT_ACTIVE(jnienv, cl, jobj, statp, st_txnarray); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize); + JAVADB_STAT_LSN(jnienv, jobj, txn_stat_st_last_ckp_fid, statp, st_last_ckp); + JAVADB_STAT_LONG(jnienv, jobj, txn_stat_st_time_ckp_fid, statp, st_time_ckp); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_last_txnid_fid, statp, st_last_txnid); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_maxtxns_fid, statp, st_maxtxns); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_naborts_fid, statp, st_naborts); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_nbegins_fid, statp, st_nbegins); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_ncommits_fid, statp, st_ncommits); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_nactive_fid, statp, st_nactive); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_nrestores_fid, statp, st_nrestores); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_maxnactive_fid, statp, st_maxnactive); + JAVADB_STAT_ACTIVE(jnienv, jobj, txn_stat_st_txnarray_fid, statp, st_txnarray); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_region_wait_fid, statp, st_region_wait); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_region_nowait_fid, statp, st_region_nowait); + JAVADB_STAT_INT(jnienv, jobj, txn_stat_st_regsize_fid, statp, st_regsize); return (0); } -static int __dbj_fill_txn_active(JNIEnv *jnienv, jclass cl, +static int __dbj_fill_txn_active(JNIEnv *jnienv, jobject jobj, struct __db_txn_active *statp) { - JAVADB_STAT_INT(jnienv, cl, jobj, statp, txnid); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, parentid); - JAVADB_STAT_LSN(jnienv, cl, jobj, statp, lsn); - JAVADB_STAT_INT(jnienv, cl, jobj, statp, xa_status); - JAVADB_STAT_XID(jnienv, cl, jobj, statp, xid); + JAVADB_STAT_INT(jnienv, jobj, txn_active_txnid_fid, statp, txnid); + JAVADB_STAT_INT(jnienv, jobj, txn_active_parentid_fid, statp, parentid); + JAVADB_STAT_LSN(jnienv, jobj, txn_active_lsn_fid, statp, lsn); + JAVADB_STAT_INT(jnienv, jobj, txn_active_xa_status_fid, statp, xa_status); + JAVADB_STAT_XID(jnienv, jobj, txn_active_xid_fid, statp, xid); return (0); } diff --git a/db/libdb_java/java_typemaps.i b/db/libdb_java/java_typemaps.i index 62a70612b..e1bc92020 100644 --- a/db/libdb_java/java_typemaps.i +++ b/db/libdb_java/java_typemaps.i @@ -1,4 +1,4 @@ -// Typemaps +/* Typemaps */ %define JAVA_TYPEMAP(_ctype, _jtype, _jnitype) %typemap(jstype) _ctype #_jtype %typemap(jtype) _ctype #_jtype @@ -8,9 +8,11 @@ %typemap(javaout) _ctype { return $jnicall; } %enddef +JAVA_TYPEMAP(int32_t, int, jint) JAVA_TYPEMAP(u_int32_t, int, jint) JAVA_TYPEMAP(u_int32_t pagesize, long, jlong) JAVA_TYPEMAP(long, long, jlong) +JAVA_TYPEMAP(db_seq_t, long, jlong) JAVA_TYPEMAP(db_timeout_t, long, jlong) JAVA_TYPEMAP(size_t, long, jlong) JAVA_TYPEMAP(db_ret_t, void, void) @@ -18,15 +20,11 @@ JAVA_TYPEMAP(db_ret_t, void, void) %typemap(out) db_ret_t "" JAVA_TYPEMAP(int_bool, boolean, jboolean) -%typemap(in) int_bool %{ - $1 = ($input == JNI_TRUE); -%} -%typemap(out) int_bool %{ - $result = ($1) ? JNI_TRUE : JNI_FALSE; -%} +%typemap(in) int_bool %{ $1 = ($input == JNI_TRUE); %} +%typemap(out) int_bool %{ $result = ($1) ? JNI_TRUE : JNI_FALSE; %} -// Dbt handling -JAVA_TYPEMAP(DBT *, Dbt, jobject) +/* Dbt handling */ +JAVA_TYPEMAP(DBT *, com.sleepycat.db.DatabaseEntry, jobject) %{ typedef struct __dbt_locked { @@ -34,28 +32,35 @@ typedef struct __dbt_locked { jbyteArray jarr; jbyte *orig_data; jint offset; + int reuse; u_int32_t orig_size; + jsize array_len; } DBT_LOCKED; static int __dbj_dbt_copyin( - JNIEnv *jenv, DBT_LOCKED *ldbt, jobject jdbt) + JNIEnv *jenv, DBT_LOCKED *ldbt, DBT **dbtp, jobject jdbt, int allow_null) { DBT *dbt; - jsize array_len; + + memset(ldbt, 0, sizeof (*ldbt)); + + if (jdbt == NULL) { + if (allow_null) { + *dbtp = NULL; + return (0); + } else { + return (__dbj_throw(jenv, EINVAL, + "DatabaseEntry must not be null", NULL, NULL)); + } + } dbt = &ldbt->dbt; - ldbt->offset = (*jenv)->GetIntField(jenv, jdbt, dbt_offset_fid); + if (dbtp != NULL) + *dbtp = dbt; + ldbt->jarr = (jbyteArray)(*jenv)->GetObjectField(jenv, jdbt, dbt_data_fid); - if (ldbt->jarr == NULL) { - ldbt->orig_data = dbt->data = NULL; - array_len = 0; - } else { - ldbt->orig_data = (*jenv)->GetByteArrayElements(jenv, - ldbt->jarr, NULL); - array_len = (*jenv)->GetArrayLength(jenv, ldbt->jarr); - dbt->data = ldbt->orig_data + ldbt->offset; - } + ldbt->offset = (*jenv)->GetIntField(jenv, jdbt, dbt_offset_fid); dbt->size = (*jenv)->GetIntField(jenv, jdbt, dbt_size_fid); ldbt->orig_size = dbt->size; @@ -69,34 +74,60 @@ static int __dbj_dbt_copyin( * MALLOC. */ if (!F_ISSET(dbt, DB_DBT_USERMEM)) { + ldbt->reuse = !F_ISSET(dbt, DB_DBT_MALLOC); F_CLR(dbt, DB_DBT_REALLOC); F_SET(dbt, DB_DBT_MALLOC); } + + /* Verify parameters before allocating or locking data. */ + if ((jint)dbt->doff < 0) + return (__dbj_throw(jenv, EINVAL, "DatabaseEntry doff illegal", + NULL, NULL)); - /* - * Some code makes the assumption that if dbt->size is non-zero, there - * is data to copy from dbt->data. We may have set dbt->size to a - * non-zero integer above but decided not to point dbt->data at - * anything. - * - * Clean up the dbt fields so we don't run into trouble. (Note that - * doff, dlen, and flags all may contain meaningful values.) - */ - if (dbt->data == NULL) - dbt->size = dbt->ulen = 0; - - /* Verify other parameters */ - if (ldbt->offset < 0) - return (__dbj_throw(jenv, EINVAL, "Dbt.offset illegal", NULL, NULL)); - else if ((jsize)(dbt->size + ldbt->offset) > array_len) - return (__dbj_throw(jenv, EINVAL, - "Dbt.size + Dbt.offset greater than array length", NULL, NULL)); - else if ((jint)dbt->doff < 0) - return (__dbj_throw(jenv, EINVAL, "Dbt.doff illegal", NULL, NULL)); - else if ((jsize)dbt->ulen > array_len) - return (__dbj_throw(jenv, EINVAL, - "Dbt.ulen greater than array length", NULL, NULL)); + if (ldbt->jarr == NULL) { + /* + * Some code makes the assumption that if a DBT's size or ulen + * is non-zero, there is data to copy from dbt->data. + * + * Clean up the dbt fields so we don't run into trouble. + * (Note that doff, dlen, and flags all may contain + * meaningful values.) + */ + ldbt->orig_data = dbt->data = NULL; + ldbt->array_len = ldbt->offset = dbt->size = dbt->ulen = 0; + return (0); + } else + ldbt->array_len = (*jenv)->GetArrayLength(jenv, ldbt->jarr); + if (F_ISSET(dbt, DB_DBT_USERMEM)) { + if (ldbt->offset < 0) + return (__dbj_throw(jenv, EINVAL, + "offset cannot be negative", + NULL, NULL)); + if (dbt->size > dbt->ulen) + return (__dbj_throw(jenv, EINVAL, + "size must be less than or equal to ulen", + NULL, NULL)); + if ((jsize)(ldbt->offset + dbt->ulen) > ldbt->array_len) + return (__dbj_throw(jenv, EINVAL, + "offset + ulen greater than array length", + NULL, NULL)); + if ((ldbt->orig_data = (*jenv)->GetByteArrayElements(jenv, + ldbt->jarr, NULL)) == NULL) + return (EINVAL); /* an exception will be pending */ + dbt->data = ldbt->orig_data + ldbt->offset; + } else { + if (__os_umalloc(NULL, dbt->size, &dbt->data) != 0) + return (ENOMEM); + ldbt->orig_data = dbt->data; + (*jenv)->GetByteArrayRegion(jenv, + ldbt->jarr, ldbt->offset, dbt->size, dbt->data); + if ((*jenv)->ExceptionOccurred(jenv)) { + (void)__os_ufree(NULL, dbt->data); + return (EINVAL); + } + } + return (0); } @@ -109,6 +140,7 @@ static void __dbj_dbt_copyout( (*jenv)->SetByteArrayRegion(jenv, newarr, 0, (jsize)dbt->size, (jbyte *)dbt->data); (*jenv)->SetObjectField(jenv, jdbt, dbt_data_fid, newarr); + (*jenv)->SetIntField(jenv, jdbt, dbt_offset_fid, 0); (*jenv)->SetIntField(jenv, jdbt, dbt_size_fid, (jint)dbt->size); if (jarr != NULL) *jarr = newarr; @@ -119,53 +151,83 @@ static void __dbj_dbt_copyout( static void __dbj_dbt_release( JNIEnv *jenv, jobject jdbt, DBT *dbt, DBT_LOCKED *ldbt) { jthrowable t; - - if (ldbt->jarr != NULL) { - (*jenv)->ReleaseByteArrayElements(jenv, ldbt->jarr, - ldbt->orig_data, 0); - } + if (dbt == NULL) + return; + if (dbt->size != ldbt->orig_size) (*jenv)->SetIntField(jenv, jdbt, dbt_size_fid, (jint)dbt->size); - if (F_ISSET(dbt, DB_DBT_USERMEM) && - dbt->size > dbt->ulen && - (t = (*jenv)->ExceptionOccurred(jenv)) != NULL && - (*jenv)->IsInstanceOf(jenv, t, memex_class)) { - (*jenv)->CallNonvirtualVoidMethod(jenv, t, memex_class, - memex_update_method, jdbt); - /* - * We have to rethrow the exception because calling into Java - * clears it. - */ - (*jenv)->Throw(jenv, t); - } - if (ldbt->dbt.data != ldbt->orig_data + ldbt->offset) { - __dbj_dbt_copyout(jenv, &ldbt->dbt, NULL, jdbt); - (*jenv)->SetIntField(jenv, jdbt, dbt_offset_fid, 0); - __os_ufree(NULL, ldbt->dbt.data); + if (F_ISSET(dbt, DB_DBT_USERMEM)) { + if (ldbt->jarr != NULL) + (*jenv)->ReleaseByteArrayElements(jenv, + ldbt->jarr, ldbt->orig_data, 0); + + if (dbt->size > dbt->ulen && + (t = (*jenv)->ExceptionOccurred(jenv)) != NULL && + (*jenv)->IsInstanceOf(jenv, t, memex_class)) { + (*jenv)->CallNonvirtualVoidMethod(jenv, t, memex_class, + memex_update_method, jdbt); + /* + * We have to rethrow the exception because calling + * into Java clears it. + */ + (*jenv)->Throw(jenv, t); + } + } else { + if (dbt->size > 0 && dbt->data != ldbt->orig_data) { + if (ldbt->reuse && + (jsize)(ldbt->offset + dbt->size) <= ldbt->array_len) + (*jenv)->SetByteArrayRegion(jenv, + ldbt->jarr, ldbt->offset, (jsize)dbt->size, + (jbyte *)dbt->data); + else + __dbj_dbt_copyout(jenv, dbt, NULL, jdbt); + (void)__os_ufree(NULL, dbt->data); + } + + if (ldbt->orig_data != NULL) + (void)__os_ufree(NULL, ldbt->orig_data); } } %} %typemap(in) DBT * (DBT_LOCKED ldbt) %{ - if (__dbj_dbt_copyin(jenv, &ldbt, $input) != 0) + if (__dbj_dbt_copyin(jenv, &ldbt, &$1, $input, 0) != 0) { return $null; - $1 = &ldbt.dbt; -%} + }%} + +/* Special case for the cdata param in DbEnv.rep_start - it may be null */ +%typemap(in) DBT *cdata (DBT_LOCKED ldbt) %{ + if (__dbj_dbt_copyin(jenv, &ldbt, &$1, $input, 1) != 0) { + return $null; + }%} + +%typemap(freearg) DBT * %{ __dbj_dbt_release(jenv, $input, $1, &ldbt$argnum); %} -%typemap(freearg) const DBT * %{ - if (ldbt$argnum.jarr != NULL) { - (*jenv)->ReleaseByteArrayElements(jenv, ldbt$argnum.jarr, - ldbt$argnum.orig_data, 0); + +/* DbLsn handling */ +JAVA_TYPEMAP(DB_LSN *, com.sleepycat.db.LogSequenceNumber, jobject) + +%typemap(check) DB_LSN * %{ + if ($input == NULL) { + __dbj_throw(jenv, EINVAL, "LogSequenceNumber must not be null", NULL, NULL); + return $null; } %} -%typemap(freearg) DBT * %{ - __dbj_dbt_release(jenv, $input, $1, &ldbt$argnum); +%typemap(in) DB_LSN * (DB_LSN lsn) %{ + /* XXX: TODO */ + $1 = &lsn; %} -// Various typemaps +%typemap(freearg) DB_LSN * %{ + /* XXX: TODO */ + /* -- __dbj_dbt_release(jenv, $input, $1, &lsn$argnum); */ +%} + + +/* Various typemaps */ JAVA_TYPEMAP(time_t, long, jlong) JAVA_TYPEMAP(time_t *, long, jlong) %typemap(in) time_t * (time_t time) %{ @@ -177,12 +239,12 @@ JAVA_TYPEMAP(void *client, DbClient, jobject) %typemap(check) void *client %{ if ($1 != NULL) { __dbj_throw(jenv, EINVAL, "DbEnv.set_rpc_server client arg " - "must be null; reserved for future use", NULL, JDBENV); + "must be null; reserved for future use", NULL, JDBENV); return $null; } %} -JAVA_TYPEMAP(DB_KEY_RANGE *, DbKeyRange, jobject) +JAVA_TYPEMAP(DB_KEY_RANGE *, com.sleepycat.db.KeyRange, jobject) %typemap(in) DB_KEY_RANGE * (DB_KEY_RANGE range) { $1 = ⦥ } @@ -361,7 +423,7 @@ JAVA_TYPEMAP(struct __db_out_stream, java.io.OutputStream, jobject) $1.callback = __dbj_verify_callback; } -JAVA_TYPEMAP(DB_PREPLIST *, DbPreplist[], jobjectArray) +JAVA_TYPEMAP(DB_PREPLIST *, com.sleepycat.db.PreparedTransaction[], jobjectArray) %typemap(out) DB_PREPLIST * { int i, len; @@ -390,12 +452,12 @@ JAVA_TYPEMAP(DB_PREPLIST *, DbPreplist[], jobjectArray) __os_ufree(NULL, $1); } -JAVA_TYPEMAP(DB_LOCKREQ *, DbLockRequest[], jobjectArray) +JAVA_TYPEMAP(DB_LOCKREQ *, com.sleepycat.db.LockRequest[], jobjectArray) %native(DbEnv_lock_vec) void DbEnv_lock_vec(DB_ENV *dbenv, u_int32_t locker, u_int32_t flags, DB_LOCKREQ *list, int offset, int nlist); %{ -JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec +JNIEXPORT void JNICALL Java_com_sleepycat_db_internal_db_1javaJNI_DbEnv_1lock_1vec (JNIEnv *jenv, jclass jcls, jlong jdbenvp, jint locker, jint flags, jobjectArray list, jint offset, jint count) { DB_ENV *dbenv; @@ -463,13 +525,12 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec case DB_LOCK_GET: /* Needed: mode, obj. Returned: lock. */ prereq->mode = (*jenv)->GetIntField(jenv, jlockreq, - lockreq_mode_fid); + lockreq_modeflag_fid); jobj = (*jenv)->GetObjectField(jenv, jlockreq, lockreq_obj_fid); - if ((err = - __dbj_dbt_copyin(jenv, &locked_dbts[i], jobj)) != 0) + if ((err = __dbj_dbt_copyin(jenv, + &locked_dbts[i], &prereq->obj, jobj, 0)) != 0) goto out2; - prereq->obj = &locked_dbts[i].dbt; break; case DB_LOCK_PUT: /* Needed: lock. Ignored: mode, obj. */ @@ -479,7 +540,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec (jlockp = (*jenv)->GetLongField(jenv, jlock, lock_cptr_fid)) == 0L) { __dbj_throw(jenv, EINVAL, - "DbLockRequest lock field is NULL", NULL, jdbenv); + "LockRequest lock field is NULL", NULL, jdbenv); goto out2; } lockp = *(DB_LOCK **)&jlockp; @@ -493,10 +554,9 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_db_1javaJNI_DbEnv_1lock_1vec /* Needed: obj. Ignored: lock, mode. */ jobj = (*jenv)->GetObjectField(jenv, jlockreq, lockreq_obj_fid); - if ((err = - __dbj_dbt_copyin(jenv, &locked_dbts[i], jobj)) != 0) + if ((err = __dbj_dbt_copyin(jenv, + &locked_dbts[i], &prereq->obj, jobj, 0)) != 0) goto out2; - prereq->obj = &locked_dbts[i].dbt; break; default: __dbj_throw(jenv, EINVAL, diff --git a/db/libdb_java/java_util.i b/db/libdb_java/java_util.i index c2555ae17..22688813b 100644 --- a/db/libdb_java/java_util.i +++ b/db/libdb_java/java_util.i @@ -1,26 +1,38 @@ %pragma(java) jniclasscode=%{ static { - // An alternate library name can be specified via a property. + /* An alternate library name can be specified via a property. */ String libname; - if ((libname = System.getProperty("sleepycat.db.libfile")) - != null) + if ((libname = System.getProperty("sleepycat.db.libfile")) != null) System.load(libname); - else if ((libname = System.getProperty("sleepycat.db.libname")) - != null) + else if ((libname = System.getProperty("sleepycat.db.libname")) != null) System.loadLibrary(libname); else { String os = System.getProperty("os.name"); if (os != null && os.startsWith("Windows")) { - // library name is e.g., "libdb_java30.dll" - // on Windows - System.loadLibrary("libdb_java" + + /* + * On Windows, library name is something like + * "libdb_java42.dll" or "libdb_java42d.dll". + */ + libname = "libdb_java" + DbConstants.DB_VERSION_MAJOR + - DbConstants.DB_VERSION_MINOR + - (DbConstants.DB_DEBUG ? "d" : "")); + DbConstants.DB_VERSION_MINOR; + + try { + System.loadLibrary(libname); + } catch (UnsatisfiedLinkError e) { + try { + libname += "d"; + System.loadLibrary(libname); + } catch (UnsatisfiedLinkError e2) { + throw e; + } + } } else { - // library name is e.g. "libdb_java-3.0.so" - // on UNIX + /* + * On UNIX, library name is something like + * "libdb_java-3.0.so". + */ System.loadLibrary("db_java-" + DbConstants.DB_VERSION_MAJOR + "." + DbConstants.DB_VERSION_MINOR); @@ -28,6 +40,11 @@ } initialize(); + + if (DbEnv_get_version_major() != DbConstants.DB_VERSION_MAJOR || + DbEnv_get_version_minor() != DbConstants.DB_VERSION_MINOR || + DbEnv_get_version_patch() != DbConstants.DB_VERSION_PATCH) + throw new RuntimeException("Berkeley DB library version doesn't match Java classes"); } static native final void initialize(); @@ -43,7 +60,8 @@ #define DB_PKG "com/sleepycat/db/" /* Forward declarations */ -static int __dbj_throw(JNIEnv *jenv, int err, const char *msg, jobject obj, jobject jdbenv); +static int __dbj_throw(JNIEnv *jenv, + int err, const char *msg, jobject obj, jobject jdbenv); /* Global data - JVM handle, classes, fields and methods */ static JavaVM *javavm; @@ -51,14 +69,14 @@ static JavaVM *javavm; static jclass db_class, dbc_class, dbenv_class, dbt_class, dblsn_class; static jclass dbpreplist_class, dbtxn_class; static jclass keyrange_class; -static jclass btree_stat_class, hash_stat_class, lock_stat_class; +static jclass bt_stat_class, h_stat_class, lock_stat_class; static jclass log_stat_class, mpool_stat_class, mpool_fstat_class; -static jclass queue_stat_class, rep_stat_class, txn_stat_class; +static jclass qam_stat_class, rep_stat_class, seq_stat_class, txn_stat_class; static jclass txn_active_class; static jclass lock_class, lockreq_class, rep_processmsg_class; static jclass dbex_class, deadex_class, lockex_class, memex_class; -static jclass runrecex_class; -static jclass filenotfoundex_class, illegalargex_class; +static jclass rephandledeadex_class, runrecex_class; +static jclass filenotfoundex_class, illegalargex_class, outofmemerr_class; static jclass bytearray_class, string_class, outputstream_class; static jfieldID dbc_cptr_fid; @@ -66,24 +84,237 @@ static jfieldID dbt_data_fid, dbt_size_fid, dbt_ulen_fid, dbt_dlen_fid; static jfieldID dbt_doff_fid, dbt_flags_fid, dbt_offset_fid; static jfieldID kr_less_fid, kr_equal_fid, kr_greater_fid; static jfieldID lock_cptr_fid; -static jfieldID lockreq_op_fid, lockreq_mode_fid, lockreq_timeout_fid; +static jfieldID lockreq_op_fid, lockreq_modeflag_fid, lockreq_timeout_fid; static jfieldID lockreq_obj_fid, lockreq_lock_fid; static jfieldID rep_processmsg_envid; -static jfieldID txn_stat_active_fid; + +/* BEGIN-STAT-FIELD-DECLS */ +static jfieldID bt_stat_bt_magic_fid; +static jfieldID bt_stat_bt_version_fid; +static jfieldID bt_stat_bt_metaflags_fid; +static jfieldID bt_stat_bt_nkeys_fid; +static jfieldID bt_stat_bt_ndata_fid; +static jfieldID bt_stat_bt_pagesize_fid; +static jfieldID bt_stat_bt_maxkey_fid; +static jfieldID bt_stat_bt_minkey_fid; +static jfieldID bt_stat_bt_re_len_fid; +static jfieldID bt_stat_bt_re_pad_fid; +static jfieldID bt_stat_bt_levels_fid; +static jfieldID bt_stat_bt_int_pg_fid; +static jfieldID bt_stat_bt_leaf_pg_fid; +static jfieldID bt_stat_bt_dup_pg_fid; +static jfieldID bt_stat_bt_over_pg_fid; +static jfieldID bt_stat_bt_empty_pg_fid; +static jfieldID bt_stat_bt_free_fid; +static jfieldID bt_stat_bt_int_pgfree_fid; +static jfieldID bt_stat_bt_leaf_pgfree_fid; +static jfieldID bt_stat_bt_dup_pgfree_fid; +static jfieldID bt_stat_bt_over_pgfree_fid; +static jfieldID h_stat_hash_magic_fid; +static jfieldID h_stat_hash_version_fid; +static jfieldID h_stat_hash_metaflags_fid; +static jfieldID h_stat_hash_nkeys_fid; +static jfieldID h_stat_hash_ndata_fid; +static jfieldID h_stat_hash_pagesize_fid; +static jfieldID h_stat_hash_ffactor_fid; +static jfieldID h_stat_hash_buckets_fid; +static jfieldID h_stat_hash_free_fid; +static jfieldID h_stat_hash_bfree_fid; +static jfieldID h_stat_hash_bigpages_fid; +static jfieldID h_stat_hash_big_bfree_fid; +static jfieldID h_stat_hash_overflows_fid; +static jfieldID h_stat_hash_ovfl_free_fid; +static jfieldID h_stat_hash_dup_fid; +static jfieldID h_stat_hash_dup_free_fid; +static jfieldID lock_stat_st_id_fid; +static jfieldID lock_stat_st_cur_maxid_fid; +static jfieldID lock_stat_st_maxlocks_fid; +static jfieldID lock_stat_st_maxlockers_fid; +static jfieldID lock_stat_st_maxobjects_fid; +static jfieldID lock_stat_st_nmodes_fid; +static jfieldID lock_stat_st_nlocks_fid; +static jfieldID lock_stat_st_maxnlocks_fid; +static jfieldID lock_stat_st_nlockers_fid; +static jfieldID lock_stat_st_maxnlockers_fid; +static jfieldID lock_stat_st_nobjects_fid; +static jfieldID lock_stat_st_maxnobjects_fid; +static jfieldID lock_stat_st_nconflicts_fid; +static jfieldID lock_stat_st_nrequests_fid; +static jfieldID lock_stat_st_nreleases_fid; +static jfieldID lock_stat_st_nnowaits_fid; +static jfieldID lock_stat_st_ndeadlocks_fid; +static jfieldID lock_stat_st_locktimeout_fid; +static jfieldID lock_stat_st_nlocktimeouts_fid; +static jfieldID lock_stat_st_txntimeout_fid; +static jfieldID lock_stat_st_ntxntimeouts_fid; +static jfieldID lock_stat_st_region_wait_fid; +static jfieldID lock_stat_st_region_nowait_fid; +static jfieldID lock_stat_st_regsize_fid; +static jfieldID log_stat_st_magic_fid; +static jfieldID log_stat_st_version_fid; +static jfieldID log_stat_st_mode_fid; +static jfieldID log_stat_st_lg_bsize_fid; +static jfieldID log_stat_st_lg_size_fid; +static jfieldID log_stat_st_w_bytes_fid; +static jfieldID log_stat_st_w_mbytes_fid; +static jfieldID log_stat_st_wc_bytes_fid; +static jfieldID log_stat_st_wc_mbytes_fid; +static jfieldID log_stat_st_wcount_fid; +static jfieldID log_stat_st_wcount_fill_fid; +static jfieldID log_stat_st_scount_fid; +static jfieldID log_stat_st_region_wait_fid; +static jfieldID log_stat_st_region_nowait_fid; +static jfieldID log_stat_st_cur_file_fid; +static jfieldID log_stat_st_cur_offset_fid; +static jfieldID log_stat_st_disk_file_fid; +static jfieldID log_stat_st_disk_offset_fid; +static jfieldID log_stat_st_regsize_fid; +static jfieldID log_stat_st_maxcommitperflush_fid; +static jfieldID log_stat_st_mincommitperflush_fid; +static jfieldID mpool_fstat_file_name_fid; +static jfieldID mpool_fstat_st_pagesize_fid; +static jfieldID mpool_fstat_st_map_fid; +static jfieldID mpool_fstat_st_cache_hit_fid; +static jfieldID mpool_fstat_st_cache_miss_fid; +static jfieldID mpool_fstat_st_page_create_fid; +static jfieldID mpool_fstat_st_page_in_fid; +static jfieldID mpool_fstat_st_page_out_fid; +static jfieldID mpool_stat_st_gbytes_fid; +static jfieldID mpool_stat_st_bytes_fid; +static jfieldID mpool_stat_st_ncache_fid; +static jfieldID mpool_stat_st_regsize_fid; +static jfieldID mpool_stat_st_mmapsize_fid; +static jfieldID mpool_stat_st_maxopenfd_fid; +static jfieldID mpool_stat_st_maxwrite_fid; +static jfieldID mpool_stat_st_maxwrite_sleep_fid; +static jfieldID mpool_stat_st_map_fid; +static jfieldID mpool_stat_st_cache_hit_fid; +static jfieldID mpool_stat_st_cache_miss_fid; +static jfieldID mpool_stat_st_page_create_fid; +static jfieldID mpool_stat_st_page_in_fid; +static jfieldID mpool_stat_st_page_out_fid; +static jfieldID mpool_stat_st_ro_evict_fid; +static jfieldID mpool_stat_st_rw_evict_fid; +static jfieldID mpool_stat_st_page_trickle_fid; +static jfieldID mpool_stat_st_pages_fid; +static jfieldID mpool_stat_st_page_clean_fid; +static jfieldID mpool_stat_st_page_dirty_fid; +static jfieldID mpool_stat_st_hash_buckets_fid; +static jfieldID mpool_stat_st_hash_searches_fid; +static jfieldID mpool_stat_st_hash_longest_fid; +static jfieldID mpool_stat_st_hash_examined_fid; +static jfieldID mpool_stat_st_hash_nowait_fid; +static jfieldID mpool_stat_st_hash_wait_fid; +static jfieldID mpool_stat_st_hash_max_wait_fid; +static jfieldID mpool_stat_st_region_nowait_fid; +static jfieldID mpool_stat_st_region_wait_fid; +static jfieldID mpool_stat_st_alloc_fid; +static jfieldID mpool_stat_st_alloc_buckets_fid; +static jfieldID mpool_stat_st_alloc_max_buckets_fid; +static jfieldID mpool_stat_st_alloc_pages_fid; +static jfieldID mpool_stat_st_alloc_max_pages_fid; +static jfieldID qam_stat_qs_magic_fid; +static jfieldID qam_stat_qs_version_fid; +static jfieldID qam_stat_qs_metaflags_fid; +static jfieldID qam_stat_qs_nkeys_fid; +static jfieldID qam_stat_qs_ndata_fid; +static jfieldID qam_stat_qs_pagesize_fid; +static jfieldID qam_stat_qs_extentsize_fid; +static jfieldID qam_stat_qs_pages_fid; +static jfieldID qam_stat_qs_re_len_fid; +static jfieldID qam_stat_qs_re_pad_fid; +static jfieldID qam_stat_qs_pgfree_fid; +static jfieldID qam_stat_qs_first_recno_fid; +static jfieldID qam_stat_qs_cur_recno_fid; +static jfieldID rep_stat_st_status_fid; +static jfieldID rep_stat_st_next_lsn_fid; +static jfieldID rep_stat_st_waiting_lsn_fid; +static jfieldID rep_stat_st_next_pg_fid; +static jfieldID rep_stat_st_waiting_pg_fid; +static jfieldID rep_stat_st_dupmasters_fid; +static jfieldID rep_stat_st_env_id_fid; +static jfieldID rep_stat_st_env_priority_fid; +static jfieldID rep_stat_st_gen_fid; +static jfieldID rep_stat_st_egen_fid; +static jfieldID rep_stat_st_log_duplicated_fid; +static jfieldID rep_stat_st_log_queued_fid; +static jfieldID rep_stat_st_log_queued_max_fid; +static jfieldID rep_stat_st_log_queued_total_fid; +static jfieldID rep_stat_st_log_records_fid; +static jfieldID rep_stat_st_log_requested_fid; +static jfieldID rep_stat_st_master_fid; +static jfieldID rep_stat_st_master_changes_fid; +static jfieldID rep_stat_st_msgs_badgen_fid; +static jfieldID rep_stat_st_msgs_processed_fid; +static jfieldID rep_stat_st_msgs_recover_fid; +static jfieldID rep_stat_st_msgs_send_failures_fid; +static jfieldID rep_stat_st_msgs_sent_fid; +static jfieldID rep_stat_st_newsites_fid; +static jfieldID rep_stat_st_nsites_fid; +static jfieldID rep_stat_st_nthrottles_fid; +static jfieldID rep_stat_st_outdated_fid; +static jfieldID rep_stat_st_pg_duplicated_fid; +static jfieldID rep_stat_st_pg_records_fid; +static jfieldID rep_stat_st_pg_requested_fid; +static jfieldID rep_stat_st_startup_complete_fid; +static jfieldID rep_stat_st_txns_applied_fid; +static jfieldID rep_stat_st_elections_fid; +static jfieldID rep_stat_st_elections_won_fid; +static jfieldID rep_stat_st_election_cur_winner_fid; +static jfieldID rep_stat_st_election_gen_fid; +static jfieldID rep_stat_st_election_lsn_fid; +static jfieldID rep_stat_st_election_nsites_fid; +static jfieldID rep_stat_st_election_nvotes_fid; +static jfieldID rep_stat_st_election_priority_fid; +static jfieldID rep_stat_st_election_status_fid; +static jfieldID rep_stat_st_election_tiebreaker_fid; +static jfieldID rep_stat_st_election_votes_fid; +static jfieldID seq_stat_st_wait_fid; +static jfieldID seq_stat_st_nowait_fid; +static jfieldID seq_stat_st_current_fid; +static jfieldID seq_stat_st_value_fid; +static jfieldID seq_stat_st_last_value_fid; +static jfieldID seq_stat_st_min_fid; +static jfieldID seq_stat_st_max_fid; +static jfieldID seq_stat_st_cache_size_fid; +static jfieldID seq_stat_st_flags_fid; +static jfieldID txn_stat_st_last_ckp_fid; +static jfieldID txn_stat_st_time_ckp_fid; +static jfieldID txn_stat_st_last_txnid_fid; +static jfieldID txn_stat_st_maxtxns_fid; +static jfieldID txn_stat_st_naborts_fid; +static jfieldID txn_stat_st_nbegins_fid; +static jfieldID txn_stat_st_ncommits_fid; +static jfieldID txn_stat_st_nactive_fid; +static jfieldID txn_stat_st_nrestores_fid; +static jfieldID txn_stat_st_maxnactive_fid; +static jfieldID txn_stat_st_txnarray_fid; +static jfieldID txn_stat_st_region_wait_fid; +static jfieldID txn_stat_st_region_nowait_fid; +static jfieldID txn_stat_st_regsize_fid; +static jfieldID txn_active_txnid_fid; +static jfieldID txn_active_parentid_fid; +static jfieldID txn_active_lsn_fid; +static jfieldID txn_active_xa_status_fid; +static jfieldID txn_active_xid_fid; +/* END-STAT-FIELD-DECLS */ static jmethodID dbenv_construct, dbt_construct, dblsn_construct; static jmethodID dbpreplist_construct, dbtxn_construct; -static jmethodID btree_stat_construct, hash_stat_construct; +static jmethodID bt_stat_construct, h_stat_construct; static jmethodID lock_stat_construct, log_stat_construct, mpool_stat_construct; -static jmethodID mpool_fstat_construct, queue_stat_construct; -static jmethodID rep_stat_construct, txn_stat_construct, txn_active_construct; +static jmethodID mpool_fstat_construct, qam_stat_construct; +static jmethodID rep_stat_construct, seq_stat_construct; +static jmethodID txn_stat_construct, txn_active_construct; static jmethodID dbex_construct, deadex_construct, lockex_construct; -static jmethodID memex_construct, memex_update_method, runrecex_construct; +static jmethodID memex_construct, memex_update_method; +static jmethodID rephandledeadex_construct, runrecex_construct; static jmethodID filenotfoundex_construct, illegalargex_construct; +static jmethodID outofmemerr_construct; static jmethodID lock_construct; static jmethodID app_dispatch_method, errcall_method, env_feedback_method; -static jmethodID paniccall_method, rep_transport_method; +static jmethodID msgcall_method, paniccall_method, rep_transport_method; static jmethodID append_recno_method, bt_compare_method, bt_prefix_method; static jmethodID db_feedback_method, dup_compare_method, h_hash_method; static jmethodID seckey_create_method; @@ -94,37 +325,40 @@ const struct { jclass *cl; const char *name; } all_classes[] = { - { &dbenv_class, DB_PKG "DbEnv" }, - { &db_class, DB_PKG "Db" }, - { &dbc_class, DB_PKG "Dbc" }, - { &dbt_class, DB_PKG "Dbt" }, - { &dblsn_class, DB_PKG "DbLsn" }, - { &dbpreplist_class, DB_PKG "DbPreplist" }, - { &dbtxn_class, DB_PKG "DbTxn" }, - - { &btree_stat_class, DB_PKG "DbBtreeStat" }, - { &hash_stat_class, DB_PKG "DbHashStat" }, - { &lock_stat_class, DB_PKG "DbLockStat" }, - { &log_stat_class, DB_PKG "DbLogStat" }, - { &mpool_fstat_class, DB_PKG "DbMpoolFStat" }, - { &mpool_stat_class, DB_PKG "DbMpoolStat" }, - { &queue_stat_class, DB_PKG "DbQueueStat" }, - { &rep_stat_class, DB_PKG "DbRepStat" }, - { &txn_stat_class, DB_PKG "DbTxnStat" }, - { &txn_active_class, DB_PKG "DbTxnStat$Active" }, - - { &keyrange_class, DB_PKG "DbKeyRange" }, - { &lock_class, DB_PKG "DbLock" }, - { &lockreq_class, DB_PKG "DbLockRequest" }, - { &rep_processmsg_class, DB_PKG "DbEnv$RepProcessMessage" }, - - { &dbex_class, DB_PKG "DbException" }, - { &deadex_class, DB_PKG "DbDeadlockException" }, - { &lockex_class, DB_PKG "DbLockNotGrantedException" }, - { &memex_class, DB_PKG "DbMemoryException" }, - { &runrecex_class, DB_PKG "DbRunRecoveryException" }, + { &dbenv_class, DB_PKG "internal/DbEnv" }, + { &db_class, DB_PKG "internal/Db" }, + { &dbc_class, DB_PKG "internal/Dbc" }, + { &dbt_class, DB_PKG "DatabaseEntry" }, + { &dblsn_class, DB_PKG "LogSequenceNumber" }, + { &dbpreplist_class, DB_PKG "PreparedTransaction" }, + { &dbtxn_class, DB_PKG "internal/DbTxn" }, + + { &bt_stat_class, DB_PKG "BtreeStats" }, + { &h_stat_class, DB_PKG "HashStats" }, + { &lock_stat_class, DB_PKG "LockStats" }, + { &log_stat_class, DB_PKG "LogStats" }, + { &mpool_fstat_class, DB_PKG "CacheFileStats" }, + { &mpool_stat_class, DB_PKG "CacheStats" }, + { &qam_stat_class, DB_PKG "QueueStats" }, + { &rep_stat_class, DB_PKG "ReplicationStats" }, + { &seq_stat_class, DB_PKG "SequenceStats" }, + { &txn_stat_class, DB_PKG "TransactionStats" }, + { &txn_active_class, DB_PKG "TransactionStats$Active" }, + + { &keyrange_class, DB_PKG "KeyRange" }, + { &lock_class, DB_PKG "internal/DbLock" }, + { &lockreq_class, DB_PKG "LockRequest" }, + { &rep_processmsg_class, DB_PKG "internal/DbEnv$RepProcessMessage" }, + + { &dbex_class, DB_PKG "DatabaseException" }, + { &deadex_class, DB_PKG "DeadlockException" }, + { &lockex_class, DB_PKG "LockNotGrantedException" }, + { &memex_class, DB_PKG "MemoryException" }, + { &rephandledeadex_class, DB_PKG "ReplicationHandleDeadException" }, + { &runrecex_class, DB_PKG "RunRecoveryException" }, { &filenotfoundex_class, "java/io/FileNotFoundException" }, { &illegalargex_class, "java/lang/IllegalArgumentException" }, + { &outofmemerr_class, "java/lang/OutOfMemoryError" }, { &bytearray_class, "[B" }, { &string_class, "java/lang/String" }, @@ -154,14 +388,223 @@ const struct { { &lock_cptr_fid, &lock_class, "swigCPtr", "J" }, { &lockreq_op_fid, &lockreq_class, "op", "I" }, - { &lockreq_mode_fid, &lockreq_class, "mode", "I" }, + { &lockreq_modeflag_fid, &lockreq_class, "modeFlag", "I" }, { &lockreq_timeout_fid, &lockreq_class, "timeout", "I" }, - { &lockreq_obj_fid, &lockreq_class, "obj", "L" DB_PKG "Dbt;" }, - { &lockreq_lock_fid, &lockreq_class, "lock", "L" DB_PKG "DbLock;" }, - - { &rep_processmsg_envid, &rep_processmsg_class, "envid", "I" }, - { &txn_stat_active_fid, &txn_stat_class, "st_txnarray", - "[L" DB_PKG "DbTxnStat$Active;" } + { &lockreq_obj_fid, &lockreq_class, "obj", "L" DB_PKG "DatabaseEntry;" }, + { &lockreq_lock_fid, &lockreq_class, "lock", "L" DB_PKG "internal/DbLock;" }, + +/* BEGIN-STAT-FIELDS */ + { &bt_stat_bt_magic_fid, &bt_stat_class, "bt_magic", "I" }, + { &bt_stat_bt_version_fid, &bt_stat_class, "bt_version", "I" }, + { &bt_stat_bt_metaflags_fid, &bt_stat_class, "bt_metaflags", "I" }, + { &bt_stat_bt_nkeys_fid, &bt_stat_class, "bt_nkeys", "I" }, + { &bt_stat_bt_ndata_fid, &bt_stat_class, "bt_ndata", "I" }, + { &bt_stat_bt_pagesize_fid, &bt_stat_class, "bt_pagesize", "I" }, + { &bt_stat_bt_maxkey_fid, &bt_stat_class, "bt_maxkey", "I" }, + { &bt_stat_bt_minkey_fid, &bt_stat_class, "bt_minkey", "I" }, + { &bt_stat_bt_re_len_fid, &bt_stat_class, "bt_re_len", "I" }, + { &bt_stat_bt_re_pad_fid, &bt_stat_class, "bt_re_pad", "I" }, + { &bt_stat_bt_levels_fid, &bt_stat_class, "bt_levels", "I" }, + { &bt_stat_bt_int_pg_fid, &bt_stat_class, "bt_int_pg", "I" }, + { &bt_stat_bt_leaf_pg_fid, &bt_stat_class, "bt_leaf_pg", "I" }, + { &bt_stat_bt_dup_pg_fid, &bt_stat_class, "bt_dup_pg", "I" }, + { &bt_stat_bt_over_pg_fid, &bt_stat_class, "bt_over_pg", "I" }, + { &bt_stat_bt_empty_pg_fid, &bt_stat_class, "bt_empty_pg", "I" }, + { &bt_stat_bt_free_fid, &bt_stat_class, "bt_free", "I" }, + { &bt_stat_bt_int_pgfree_fid, &bt_stat_class, "bt_int_pgfree", "I" }, + { &bt_stat_bt_leaf_pgfree_fid, &bt_stat_class, "bt_leaf_pgfree", "I" }, + { &bt_stat_bt_dup_pgfree_fid, &bt_stat_class, "bt_dup_pgfree", "I" }, + { &bt_stat_bt_over_pgfree_fid, &bt_stat_class, "bt_over_pgfree", "I" }, + { &h_stat_hash_magic_fid, &h_stat_class, "hash_magic", "I" }, + { &h_stat_hash_version_fid, &h_stat_class, "hash_version", "I" }, + { &h_stat_hash_metaflags_fid, &h_stat_class, "hash_metaflags", "I" }, + { &h_stat_hash_nkeys_fid, &h_stat_class, "hash_nkeys", "I" }, + { &h_stat_hash_ndata_fid, &h_stat_class, "hash_ndata", "I" }, + { &h_stat_hash_pagesize_fid, &h_stat_class, "hash_pagesize", "I" }, + { &h_stat_hash_ffactor_fid, &h_stat_class, "hash_ffactor", "I" }, + { &h_stat_hash_buckets_fid, &h_stat_class, "hash_buckets", "I" }, + { &h_stat_hash_free_fid, &h_stat_class, "hash_free", "I" }, + { &h_stat_hash_bfree_fid, &h_stat_class, "hash_bfree", "I" }, + { &h_stat_hash_bigpages_fid, &h_stat_class, "hash_bigpages", "I" }, + { &h_stat_hash_big_bfree_fid, &h_stat_class, "hash_big_bfree", "I" }, + { &h_stat_hash_overflows_fid, &h_stat_class, "hash_overflows", "I" }, + { &h_stat_hash_ovfl_free_fid, &h_stat_class, "hash_ovfl_free", "I" }, + { &h_stat_hash_dup_fid, &h_stat_class, "hash_dup", "I" }, + { &h_stat_hash_dup_free_fid, &h_stat_class, "hash_dup_free", "I" }, + { &lock_stat_st_id_fid, &lock_stat_class, "st_id", "I" }, + { &lock_stat_st_cur_maxid_fid, &lock_stat_class, "st_cur_maxid", "I" }, + { &lock_stat_st_maxlocks_fid, &lock_stat_class, "st_maxlocks", "I" }, + { &lock_stat_st_maxlockers_fid, &lock_stat_class, "st_maxlockers", "I" }, + { &lock_stat_st_maxobjects_fid, &lock_stat_class, "st_maxobjects", "I" }, + { &lock_stat_st_nmodes_fid, &lock_stat_class, "st_nmodes", "I" }, + { &lock_stat_st_nlocks_fid, &lock_stat_class, "st_nlocks", "I" }, + { &lock_stat_st_maxnlocks_fid, &lock_stat_class, "st_maxnlocks", "I" }, + { &lock_stat_st_nlockers_fid, &lock_stat_class, "st_nlockers", "I" }, + { &lock_stat_st_maxnlockers_fid, &lock_stat_class, "st_maxnlockers", "I" }, + { &lock_stat_st_nobjects_fid, &lock_stat_class, "st_nobjects", "I" }, + { &lock_stat_st_maxnobjects_fid, &lock_stat_class, "st_maxnobjects", "I" }, + { &lock_stat_st_nconflicts_fid, &lock_stat_class, "st_nconflicts", "I" }, + { &lock_stat_st_nrequests_fid, &lock_stat_class, "st_nrequests", "I" }, + { &lock_stat_st_nreleases_fid, &lock_stat_class, "st_nreleases", "I" }, + { &lock_stat_st_nnowaits_fid, &lock_stat_class, "st_nnowaits", "I" }, + { &lock_stat_st_ndeadlocks_fid, &lock_stat_class, "st_ndeadlocks", "I" }, + { &lock_stat_st_locktimeout_fid, &lock_stat_class, "st_locktimeout", "I" }, + { &lock_stat_st_nlocktimeouts_fid, &lock_stat_class, "st_nlocktimeouts", "I" }, + { &lock_stat_st_txntimeout_fid, &lock_stat_class, "st_txntimeout", "I" }, + { &lock_stat_st_ntxntimeouts_fid, &lock_stat_class, "st_ntxntimeouts", "I" }, + { &lock_stat_st_region_wait_fid, &lock_stat_class, "st_region_wait", "I" }, + { &lock_stat_st_region_nowait_fid, &lock_stat_class, "st_region_nowait", "I" }, + { &lock_stat_st_regsize_fid, &lock_stat_class, "st_regsize", "I" }, + { &log_stat_st_magic_fid, &log_stat_class, "st_magic", "I" }, + { &log_stat_st_version_fid, &log_stat_class, "st_version", "I" }, + { &log_stat_st_mode_fid, &log_stat_class, "st_mode", "I" }, + { &log_stat_st_lg_bsize_fid, &log_stat_class, "st_lg_bsize", "I" }, + { &log_stat_st_lg_size_fid, &log_stat_class, "st_lg_size", "I" }, + { &log_stat_st_w_bytes_fid, &log_stat_class, "st_w_bytes", "I" }, + { &log_stat_st_w_mbytes_fid, &log_stat_class, "st_w_mbytes", "I" }, + { &log_stat_st_wc_bytes_fid, &log_stat_class, "st_wc_bytes", "I" }, + { &log_stat_st_wc_mbytes_fid, &log_stat_class, "st_wc_mbytes", "I" }, + { &log_stat_st_wcount_fid, &log_stat_class, "st_wcount", "I" }, + { &log_stat_st_wcount_fill_fid, &log_stat_class, "st_wcount_fill", "I" }, + { &log_stat_st_scount_fid, &log_stat_class, "st_scount", "I" }, + { &log_stat_st_region_wait_fid, &log_stat_class, "st_region_wait", "I" }, + { &log_stat_st_region_nowait_fid, &log_stat_class, "st_region_nowait", "I" }, + { &log_stat_st_cur_file_fid, &log_stat_class, "st_cur_file", "I" }, + { &log_stat_st_cur_offset_fid, &log_stat_class, "st_cur_offset", "I" }, + { &log_stat_st_disk_file_fid, &log_stat_class, "st_disk_file", "I" }, + { &log_stat_st_disk_offset_fid, &log_stat_class, "st_disk_offset", "I" }, + { &log_stat_st_regsize_fid, &log_stat_class, "st_regsize", "I" }, + { &log_stat_st_maxcommitperflush_fid, &log_stat_class, "st_maxcommitperflush", "I" }, + { &log_stat_st_mincommitperflush_fid, &log_stat_class, "st_mincommitperflush", "I" }, + { &mpool_fstat_file_name_fid, &mpool_fstat_class, "file_name", "Ljava/lang/String;" }, + { &mpool_fstat_st_pagesize_fid, &mpool_fstat_class, "st_pagesize", "I" }, + { &mpool_fstat_st_map_fid, &mpool_fstat_class, "st_map", "I" }, + { &mpool_fstat_st_cache_hit_fid, &mpool_fstat_class, "st_cache_hit", "I" }, + { &mpool_fstat_st_cache_miss_fid, &mpool_fstat_class, "st_cache_miss", "I" }, + { &mpool_fstat_st_page_create_fid, &mpool_fstat_class, "st_page_create", "I" }, + { &mpool_fstat_st_page_in_fid, &mpool_fstat_class, "st_page_in", "I" }, + { &mpool_fstat_st_page_out_fid, &mpool_fstat_class, "st_page_out", "I" }, + { &mpool_stat_st_gbytes_fid, &mpool_stat_class, "st_gbytes", "I" }, + { &mpool_stat_st_bytes_fid, &mpool_stat_class, "st_bytes", "I" }, + { &mpool_stat_st_ncache_fid, &mpool_stat_class, "st_ncache", "I" }, + { &mpool_stat_st_regsize_fid, &mpool_stat_class, "st_regsize", "I" }, + { &mpool_stat_st_mmapsize_fid, &mpool_stat_class, "st_mmapsize", "I" }, + { &mpool_stat_st_maxopenfd_fid, &mpool_stat_class, "st_maxopenfd", "I" }, + { &mpool_stat_st_maxwrite_fid, &mpool_stat_class, "st_maxwrite", "I" }, + { &mpool_stat_st_maxwrite_sleep_fid, &mpool_stat_class, "st_maxwrite_sleep", "I" }, + { &mpool_stat_st_map_fid, &mpool_stat_class, "st_map", "I" }, + { &mpool_stat_st_cache_hit_fid, &mpool_stat_class, "st_cache_hit", "I" }, + { &mpool_stat_st_cache_miss_fid, &mpool_stat_class, "st_cache_miss", "I" }, + { &mpool_stat_st_page_create_fid, &mpool_stat_class, "st_page_create", "I" }, + { &mpool_stat_st_page_in_fid, &mpool_stat_class, "st_page_in", "I" }, + { &mpool_stat_st_page_out_fid, &mpool_stat_class, "st_page_out", "I" }, + { &mpool_stat_st_ro_evict_fid, &mpool_stat_class, "st_ro_evict", "I" }, + { &mpool_stat_st_rw_evict_fid, &mpool_stat_class, "st_rw_evict", "I" }, + { &mpool_stat_st_page_trickle_fid, &mpool_stat_class, "st_page_trickle", "I" }, + { &mpool_stat_st_pages_fid, &mpool_stat_class, "st_pages", "I" }, + { &mpool_stat_st_page_clean_fid, &mpool_stat_class, "st_page_clean", "I" }, + { &mpool_stat_st_page_dirty_fid, &mpool_stat_class, "st_page_dirty", "I" }, + { &mpool_stat_st_hash_buckets_fid, &mpool_stat_class, "st_hash_buckets", "I" }, + { &mpool_stat_st_hash_searches_fid, &mpool_stat_class, "st_hash_searches", "I" }, + { &mpool_stat_st_hash_longest_fid, &mpool_stat_class, "st_hash_longest", "I" }, + { &mpool_stat_st_hash_examined_fid, &mpool_stat_class, "st_hash_examined", "I" }, + { &mpool_stat_st_hash_nowait_fid, &mpool_stat_class, "st_hash_nowait", "I" }, + { &mpool_stat_st_hash_wait_fid, &mpool_stat_class, "st_hash_wait", "I" }, + { &mpool_stat_st_hash_max_wait_fid, &mpool_stat_class, "st_hash_max_wait", "I" }, + { &mpool_stat_st_region_nowait_fid, &mpool_stat_class, "st_region_nowait", "I" }, + { &mpool_stat_st_region_wait_fid, &mpool_stat_class, "st_region_wait", "I" }, + { &mpool_stat_st_alloc_fid, &mpool_stat_class, "st_alloc", "I" }, + { &mpool_stat_st_alloc_buckets_fid, &mpool_stat_class, "st_alloc_buckets", "I" }, + { &mpool_stat_st_alloc_max_buckets_fid, &mpool_stat_class, "st_alloc_max_buckets", "I" }, + { &mpool_stat_st_alloc_pages_fid, &mpool_stat_class, "st_alloc_pages", "I" }, + { &mpool_stat_st_alloc_max_pages_fid, &mpool_stat_class, "st_alloc_max_pages", "I" }, + { &qam_stat_qs_magic_fid, &qam_stat_class, "qs_magic", "I" }, + { &qam_stat_qs_version_fid, &qam_stat_class, "qs_version", "I" }, + { &qam_stat_qs_metaflags_fid, &qam_stat_class, "qs_metaflags", "I" }, + { &qam_stat_qs_nkeys_fid, &qam_stat_class, "qs_nkeys", "I" }, + { &qam_stat_qs_ndata_fid, &qam_stat_class, "qs_ndata", "I" }, + { &qam_stat_qs_pagesize_fid, &qam_stat_class, "qs_pagesize", "I" }, + { &qam_stat_qs_extentsize_fid, &qam_stat_class, "qs_extentsize", "I" }, + { &qam_stat_qs_pages_fid, &qam_stat_class, "qs_pages", "I" }, + { &qam_stat_qs_re_len_fid, &qam_stat_class, "qs_re_len", "I" }, + { &qam_stat_qs_re_pad_fid, &qam_stat_class, "qs_re_pad", "I" }, + { &qam_stat_qs_pgfree_fid, &qam_stat_class, "qs_pgfree", "I" }, + { &qam_stat_qs_first_recno_fid, &qam_stat_class, "qs_first_recno", "I" }, + { &qam_stat_qs_cur_recno_fid, &qam_stat_class, "qs_cur_recno", "I" }, + { &rep_stat_st_status_fid, &rep_stat_class, "st_status", "I" }, + { &rep_stat_st_next_lsn_fid, &rep_stat_class, "st_next_lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &rep_stat_st_waiting_lsn_fid, &rep_stat_class, "st_waiting_lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &rep_stat_st_next_pg_fid, &rep_stat_class, "st_next_pg", "I" }, + { &rep_stat_st_waiting_pg_fid, &rep_stat_class, "st_waiting_pg", "I" }, + { &rep_stat_st_dupmasters_fid, &rep_stat_class, "st_dupmasters", "I" }, + { &rep_stat_st_env_id_fid, &rep_stat_class, "st_env_id", "I" }, + { &rep_stat_st_env_priority_fid, &rep_stat_class, "st_env_priority", "I" }, + { &rep_stat_st_gen_fid, &rep_stat_class, "st_gen", "I" }, + { &rep_stat_st_egen_fid, &rep_stat_class, "st_egen", "I" }, + { &rep_stat_st_log_duplicated_fid, &rep_stat_class, "st_log_duplicated", "I" }, + { &rep_stat_st_log_queued_fid, &rep_stat_class, "st_log_queued", "I" }, + { &rep_stat_st_log_queued_max_fid, &rep_stat_class, "st_log_queued_max", "I" }, + { &rep_stat_st_log_queued_total_fid, &rep_stat_class, "st_log_queued_total", "I" }, + { &rep_stat_st_log_records_fid, &rep_stat_class, "st_log_records", "I" }, + { &rep_stat_st_log_requested_fid, &rep_stat_class, "st_log_requested", "I" }, + { &rep_stat_st_master_fid, &rep_stat_class, "st_master", "I" }, + { &rep_stat_st_master_changes_fid, &rep_stat_class, "st_master_changes", "I" }, + { &rep_stat_st_msgs_badgen_fid, &rep_stat_class, "st_msgs_badgen", "I" }, + { &rep_stat_st_msgs_processed_fid, &rep_stat_class, "st_msgs_processed", "I" }, + { &rep_stat_st_msgs_recover_fid, &rep_stat_class, "st_msgs_recover", "I" }, + { &rep_stat_st_msgs_send_failures_fid, &rep_stat_class, "st_msgs_send_failures", "I" }, + { &rep_stat_st_msgs_sent_fid, &rep_stat_class, "st_msgs_sent", "I" }, + { &rep_stat_st_newsites_fid, &rep_stat_class, "st_newsites", "I" }, + { &rep_stat_st_nsites_fid, &rep_stat_class, "st_nsites", "I" }, + { &rep_stat_st_nthrottles_fid, &rep_stat_class, "st_nthrottles", "I" }, + { &rep_stat_st_outdated_fid, &rep_stat_class, "st_outdated", "I" }, + { &rep_stat_st_pg_duplicated_fid, &rep_stat_class, "st_pg_duplicated", "I" }, + { &rep_stat_st_pg_records_fid, &rep_stat_class, "st_pg_records", "I" }, + { &rep_stat_st_pg_requested_fid, &rep_stat_class, "st_pg_requested", "I" }, + { &rep_stat_st_startup_complete_fid, &rep_stat_class, "st_startup_complete", "I" }, + { &rep_stat_st_txns_applied_fid, &rep_stat_class, "st_txns_applied", "I" }, + { &rep_stat_st_elections_fid, &rep_stat_class, "st_elections", "I" }, + { &rep_stat_st_elections_won_fid, &rep_stat_class, "st_elections_won", "I" }, + { &rep_stat_st_election_cur_winner_fid, &rep_stat_class, "st_election_cur_winner", "I" }, + { &rep_stat_st_election_gen_fid, &rep_stat_class, "st_election_gen", "I" }, + { &rep_stat_st_election_lsn_fid, &rep_stat_class, "st_election_lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &rep_stat_st_election_nsites_fid, &rep_stat_class, "st_election_nsites", "I" }, + { &rep_stat_st_election_nvotes_fid, &rep_stat_class, "st_election_nvotes", "I" }, + { &rep_stat_st_election_priority_fid, &rep_stat_class, "st_election_priority", "I" }, + { &rep_stat_st_election_status_fid, &rep_stat_class, "st_election_status", "I" }, + { &rep_stat_st_election_tiebreaker_fid, &rep_stat_class, "st_election_tiebreaker", "I" }, + { &rep_stat_st_election_votes_fid, &rep_stat_class, "st_election_votes", "I" }, + { &seq_stat_st_wait_fid, &seq_stat_class, "st_wait", "I" }, + { &seq_stat_st_nowait_fid, &seq_stat_class, "st_nowait", "I" }, + { &seq_stat_st_current_fid, &seq_stat_class, "st_current", "J" }, + { &seq_stat_st_value_fid, &seq_stat_class, "st_value", "J" }, + { &seq_stat_st_last_value_fid, &seq_stat_class, "st_last_value", "J" }, + { &seq_stat_st_min_fid, &seq_stat_class, "st_min", "J" }, + { &seq_stat_st_max_fid, &seq_stat_class, "st_max", "J" }, + { &seq_stat_st_cache_size_fid, &seq_stat_class, "st_cache_size", "I" }, + { &seq_stat_st_flags_fid, &seq_stat_class, "st_flags", "I" }, + { &txn_stat_st_last_ckp_fid, &txn_stat_class, "st_last_ckp", "L" DB_PKG "LogSequenceNumber;" }, + { &txn_stat_st_time_ckp_fid, &txn_stat_class, "st_time_ckp", "J" }, + { &txn_stat_st_last_txnid_fid, &txn_stat_class, "st_last_txnid", "I" }, + { &txn_stat_st_maxtxns_fid, &txn_stat_class, "st_maxtxns", "I" }, + { &txn_stat_st_naborts_fid, &txn_stat_class, "st_naborts", "I" }, + { &txn_stat_st_nbegins_fid, &txn_stat_class, "st_nbegins", "I" }, + { &txn_stat_st_ncommits_fid, &txn_stat_class, "st_ncommits", "I" }, + { &txn_stat_st_nactive_fid, &txn_stat_class, "st_nactive", "I" }, + { &txn_stat_st_nrestores_fid, &txn_stat_class, "st_nrestores", "I" }, + { &txn_stat_st_maxnactive_fid, &txn_stat_class, "st_maxnactive", "I" }, + { &txn_stat_st_txnarray_fid, &txn_stat_class, "st_txnarray", "[L" DB_PKG "TransactionStats$Active;" }, + { &txn_stat_st_region_wait_fid, &txn_stat_class, "st_region_wait", "I" }, + { &txn_stat_st_region_nowait_fid, &txn_stat_class, "st_region_nowait", "I" }, + { &txn_stat_st_regsize_fid, &txn_stat_class, "st_regsize", "I" }, + { &txn_active_txnid_fid, &txn_active_class, "txnid", "I" }, + { &txn_active_parentid_fid, &txn_active_class, "parentid", "I" }, + { &txn_active_lsn_fid, &txn_active_class, "lsn", "L" DB_PKG "LogSequenceNumber;" }, + { &txn_active_xa_status_fid, &txn_active_class, "xa_status", "I" }, + { &txn_active_xid_fid, &txn_active_class, "xid", "[B" }, +/* END-STAT-FIELDS */ + + { &rep_processmsg_envid, &rep_processmsg_class, "envid", "I" } }; const struct { @@ -172,62 +615,70 @@ const struct { } all_methods[] = { { &dbenv_construct, &dbenv_class, "", "(JZ)V" }, { &dbt_construct, &dbt_class, "", "()V" }, - { &dblsn_construct, &dblsn_class, "", "(JZ)V" }, + { &dblsn_construct, &dblsn_class, "", "(II)V" }, { &dbpreplist_construct, &dbpreplist_class, "", - "(L" DB_PKG "DbTxn;[B)V" }, + "(L" DB_PKG "internal/DbTxn;[B)V" }, { &dbtxn_construct, &dbtxn_class, "", "(JZ)V" }, - { &btree_stat_construct, &btree_stat_class, "", "()V" }, - { &hash_stat_construct, &hash_stat_class, "", "()V" }, + { &bt_stat_construct, &bt_stat_class, "", "()V" }, + { &h_stat_construct, &h_stat_class, "", "()V" }, { &lock_stat_construct, &lock_stat_class, "", "()V" }, { &log_stat_construct, &log_stat_class, "", "()V" }, { &mpool_stat_construct, &mpool_stat_class, "", "()V" }, { &mpool_fstat_construct, &mpool_fstat_class, "", "()V" }, - { &queue_stat_construct, &queue_stat_class, "", "()V" }, + { &qam_stat_construct, &qam_stat_class, "", "()V" }, { &rep_stat_construct, &rep_stat_class, "", "()V" }, + { &seq_stat_construct, &seq_stat_class, "", "()V" }, { &txn_stat_construct, &txn_stat_class, "", "()V" }, { &txn_active_construct, &txn_active_class, "", "()V" }, - { &dbex_construct, &dbex_class, "", "(Ljava/lang/String;IL" DB_PKG "DbEnv;)V" }, + { &dbex_construct, &dbex_class, "", + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &deadex_construct, &deadex_class, "", - "(Ljava/lang/String;IL" DB_PKG "DbEnv;)V" }, + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &lockex_construct, &lockex_class, "", - "(Ljava/lang/String;IIL" DB_PKG "Dbt;L" DB_PKG "DbLock;IL" DB_PKG "DbEnv;)V" }, + "(Ljava/lang/String;IIL" DB_PKG "DatabaseEntry;L" DB_PKG "internal/DbLock;IL" DB_PKG "internal/DbEnv;)V" }, { &memex_construct, &memex_class, "", - "(Ljava/lang/String;L" DB_PKG "Dbt;IL" DB_PKG "DbEnv;)V" }, - { &memex_update_method, &memex_class, "update_dbt", - "(L" DB_PKG "Dbt;)V" }, + "(Ljava/lang/String;L" DB_PKG "DatabaseEntry;IL" DB_PKG "internal/DbEnv;)V" }, + { &memex_update_method, &memex_class, "updateDatabaseEntry", + "(L" DB_PKG "DatabaseEntry;)V" }, + { &rephandledeadex_construct, &rephandledeadex_class, "", + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &runrecex_construct, &runrecex_class, "", - "(Ljava/lang/String;IL" DB_PKG "DbEnv;)V" }, + "(Ljava/lang/String;IL" DB_PKG "internal/DbEnv;)V" }, { &filenotfoundex_construct, &filenotfoundex_class, "", "(Ljava/lang/String;)V" }, { &illegalargex_construct, &illegalargex_class, "", "(Ljava/lang/String;)V" }, + { &outofmemerr_construct, &outofmemerr_class, "", + "(Ljava/lang/String;)V" }, { &lock_construct, &lock_class, "", "(JZ)V" }, { &app_dispatch_method, &dbenv_class, "handle_app_dispatch", - "(L" DB_PKG "Dbt;L" DB_PKG "DbLsn;I)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "LogSequenceNumber;I)I" }, { &env_feedback_method, &dbenv_class, "handle_env_feedback", "(II)V" }, { &errcall_method, &dbenv_class, "handle_error", + "(Ljava/lang/String;Ljava/lang/String;)V" }, + { &msgcall_method, &dbenv_class, "handle_message", "(Ljava/lang/String;)V" }, { &paniccall_method, &dbenv_class, "handle_panic", - "(L" DB_PKG "DbException;)V" }, + "(L" DB_PKG "DatabaseException;)V" }, { &rep_transport_method, &dbenv_class, "handle_rep_transport", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;L" DB_PKG "DbLsn;II)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;L" DB_PKG "LogSequenceNumber;II)I" }, { &append_recno_method, &db_class, "handle_append_recno", - "(L" DB_PKG "Dbt;I)V" }, + "(L" DB_PKG "DatabaseEntry;I)V" }, { &bt_compare_method, &db_class, "handle_bt_compare", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &bt_prefix_method, &db_class, "handle_bt_prefix", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &db_feedback_method, &db_class, "handle_db_feedback", "(II)V" }, { &dup_compare_method, &db_class, "handle_dup_compare", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &h_hash_method, &db_class, "handle_h_hash", "([BI)I" }, { &seckey_create_method, &db_class, "handle_seckey_create", - "(L" DB_PKG "Dbt;L" DB_PKG "Dbt;L" DB_PKG "Dbt;)I" }, + "(L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;L" DB_PKG "DatabaseEntry;)I" }, { &outputstream_write_method, &outputstream_class, "write", "([BII)V" } }; @@ -235,10 +686,10 @@ const struct { #define NELEM(x) (sizeof (x) / sizeof (x[0])) JNIEXPORT void JNICALL -Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) +Java_com_sleepycat_db_internal_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) { jclass cl; - unsigned int i; + unsigned int i, j; COMPQUIET(clazz, NULL); @@ -250,8 +701,8 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) for (i = 0; i < NELEM(all_classes); i++) { cl = (*jenv)->FindClass(jenv, all_classes[i].name); if (cl == NULL) { - __db_err(NULL, - "Failed to load class %s - check CLASSPATH", + fprintf(stderr, + "Failed to load class %s - check CLASSPATH\n", all_classes[i].name); return; } @@ -263,8 +714,8 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) *all_classes[i].cl = (jclass)(*jenv)->NewGlobalRef(jenv, cl); if (*all_classes[i].cl == NULL) { - __db_err(NULL, - "Failed to create a global reference for class %s", + fprintf(stderr, + "Failed to create a global reference for %s\n", all_classes[i].name); return; } @@ -277,8 +728,9 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) *all_fields[i].cl, all_fields[i].name, all_fields[i].sig); if (*all_fields[i].fid == NULL) { - __db_err(NULL, "Failed to look up field %s", - all_fields[i].name); + fprintf(stderr, + "Failed to look up field %s with sig %s\n", + all_fields[i].name, all_fields[i].sig); return; } } @@ -290,8 +742,12 @@ Java_com_sleepycat_db_db_1javaJNI_initialize(JNIEnv *jenv, jclass clazz) all_methods[i].sig); if (*all_methods[i].mid == NULL) { - __db_err(NULL, "Failed to look up method %s", - all_methods[i].name); + for (j = 0; j < NELEM(all_classes); j++) + if (all_methods[i].cl == all_classes[j].cl) + break; + fprintf(stderr, + "Failed to look up method %s.%s with sig %s\n", + all_classes[j].name, all_methods[i].name, all_methods[i].sig); return; } } @@ -323,19 +779,7 @@ static JNIEnv *__dbj_get_jnienv(void) static jobject __dbj_wrap_DB_LSN(JNIEnv *jenv, DB_LSN *lsn) { - jlong jptr; - DB_LSN *lsn_copy; - int err; - - if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsn_copy)) != 0) { - __dbj_throw(jenv, err, NULL, NULL, NULL); - return NULL; - } - memset(lsn_copy, 0, sizeof(DB_LSN)); - *lsn_copy = *lsn; - /* Magic to convert a pointer to a long - must match SWIG */ - *(DB_LSN **)&jptr = lsn_copy; return (*jenv)->NewObject(jenv, dblsn_class, dblsn_construct, - jptr, JNI_TRUE); + lsn->file, lsn->offset); } %} diff --git a/db/lock/lock.c b/db/lock/lock.c index ec08819a9..4e4b07890 100644 --- a/db/lock/lock.c +++ b/db/lock/lock.c @@ -1,21 +1,18 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: lock.c,v 11.166 2004/10/11 19:38:49 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: lock.c,v 11.134 2003/11/18 21:30:38 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include -#include #endif #include "db_int.h" @@ -25,11 +22,6 @@ static const char revid[] = "$Id: lock.c,v 11.134 2003/11/18 21:30:38 ubell Exp static int __lock_freelock __P((DB_LOCKTAB *, struct __db_lock *, u_int32_t, u_int32_t)); -static void __lock_expires __P((DB_ENV *, db_timeval_t *, db_timeout_t)); -static void __lock_freelocker - __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, u_int32_t)); -static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, - const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *)); static int __lock_getobj __P((DB_LOCKTAB *, const DBT *, u_int32_t, int, DB_LOCKOBJ **)); static int __lock_inherit_locks __P ((DB_LOCKTAB *, u_int32_t, u_int32_t)); @@ -39,176 +31,11 @@ static int __lock_put_internal __P((DB_LOCKTAB *, static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t)); static void __lock_remove_waiter __P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t)); -static int __lock_set_timeout_internal __P(( DB_ENV *, - u_int32_t, db_timeout_t, u_int32_t)); static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t)); -static int __lock_sort_cmp __P((const void *, const void *)); -static int __lock_fix_list __P((DB_ENV *, DBT *, u_int32_t)); -static const char __db_lock_err[] = "Lock table is out of available %s"; static const char __db_lock_invalid[] = "%s: Lock is no longer valid"; static const char __db_locker_invalid[] = "Locker is not valid"; -/* - * __lock_id_pp -- - * DB_ENV->lock_id pre/post processing. - * - * PUBLIC: int __lock_id_pp __P((DB_ENV *, u_int32_t *)); - */ -int -__lock_id_pp(dbenv, idp) - DB_ENV *dbenv; - u_int32_t *idp; -{ - int rep_check, ret; - - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, - dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK); - - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __lock_id(dbenv, idp); - if (rep_check) - __env_rep_exit(dbenv); - return (ret); -} - -/* - * __lock_id -- - * DB_ENV->lock_id. - * - * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *)); - */ -int -__lock_id(dbenv, idp) - DB_ENV *dbenv; - u_int32_t *idp; -{ - DB_LOCKER *lk; - DB_LOCKTAB *lt; - DB_LOCKREGION *region; - u_int32_t *ids, locker_ndx; - int nids, ret; - - lt = dbenv->lk_handle; - region = lt->reginfo.primary; - ret = 0; - - /* - * Allocate a new lock id. If we wrap around then we - * find the minimum currently in use and make sure we - * can stay below that. This code is similar to code - * in __txn_begin_int for recovering txn ids. - */ - LOCKREGION(dbenv, lt); - /* - * Our current valid range can span the maximum valid value, so check - * for it and wrap manually. - */ - if (region->stat.st_id == DB_LOCK_MAXID && - region->stat.st_cur_maxid != DB_LOCK_MAXID) - region->stat.st_id = DB_LOCK_INVALIDID; - if (region->stat.st_id == region->stat.st_cur_maxid) { - if ((ret = __os_malloc(dbenv, - sizeof(u_int32_t) * region->stat.st_nlockers, &ids)) != 0) - goto err; - nids = 0; - for (lk = SH_TAILQ_FIRST(®ion->lockers, __db_locker); - lk != NULL; - lk = SH_TAILQ_NEXT(lk, ulinks, __db_locker)) - ids[nids++] = lk->id; - region->stat.st_id = DB_LOCK_INVALIDID; - region->stat.st_cur_maxid = DB_LOCK_MAXID; - if (nids != 0) - __db_idspace(ids, nids, - ®ion->stat.st_id, ®ion->stat.st_cur_maxid); - __os_free(dbenv, ids); - } - *idp = ++region->stat.st_id; - - /* Allocate a locker for this id. */ - LOCKER_LOCK(lt, region, *idp, locker_ndx); - ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk); - -err: UNLOCKREGION(dbenv, lt); - - return (ret); -} - -/* - * __lock_id_free_pp -- - * DB_ENV->lock_id_free pre/post processing. - * - * PUBLIC: int __lock_id_free_pp __P((DB_ENV *, u_int32_t)); - */ -int -__lock_id_free_pp(dbenv, id) - DB_ENV *dbenv; - u_int32_t id; -{ - int rep_check, ret; - - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, - dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK); - - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __lock_id_free(dbenv, id); - if (rep_check) - __env_rep_exit(dbenv); - return (ret); -} - -/* - * __lock_id_free -- - * Free a locker id. - * - * PUBLIC: int __lock_id_free __P((DB_ENV *, u_int32_t)); - */ -int -__lock_id_free(dbenv, id) - DB_ENV *dbenv; - u_int32_t id; -{ - DB_LOCKER *sh_locker; - DB_LOCKTAB *lt; - DB_LOCKREGION *region; - u_int32_t locker_ndx; - int ret; - - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, - dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK); - - lt = dbenv->lk_handle; - region = lt->reginfo.primary; - - LOCKREGION(dbenv, lt); - LOCKER_LOCK(lt, region, id, locker_ndx); - if ((ret = - __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0) - goto err; - if (sh_locker == NULL) { - ret = EINVAL; - goto err; - } - - if (sh_locker->nlocks != 0) { - __db_err(dbenv, "Locker still has locks"); - ret = EINVAL; - goto err; - } - - __lock_freelocker(lt, region, sh_locker, locker_ndx); - -err: UNLOCKREGION(dbenv, lt); - return (ret); -} - /* * __lock_vec_pp -- * DB_ENV->lock_vec pre/post processing. @@ -239,7 +66,7 @@ __lock_vec_pp(dbenv, locker, flags, list, nlist, elistp) __env_rep_enter(dbenv); ret = __lock_vec(dbenv, locker, flags, list, nlist, elistp); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -286,6 +113,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp) switch (list[i].op) { case DB_LOCK_GET_TIMEOUT: LF_SET(DB_LOCK_SET_TIMEOUT); + /* FALLTHROUGH */ case DB_LOCK_GET: if (IS_RECOVERING(dbenv)) { LOCK_INIT(list[i].lock); @@ -341,7 +169,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp) * but they could be something else, * so allocate room for the size too. */ - objlist->size = + objlist->size = sh_locker->nwrites * sizeof(DBT); if ((ret = __os_malloc(dbenv, objlist->size, &objlist->data)) != 0) @@ -387,7 +215,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp) (char *)objlist->data + objlist->size); np->data = SH_DBT_PTR(&sh_obj->lockobj); - np->size = sh_obj->lockobj.size; + np->size = sh_obj->lockobj.size; np++; } } @@ -409,16 +237,17 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp) locker_links, __db_lock)) { if (lp->mode != DB_LOCK_WWRITE) continue; - lock.off = R_OFFSET(<->reginfo, lp); + lock.off = R_OFFSET(dbenv, + <->reginfo, lp); lock.gen = lp->gen; F_SET(sh_locker, DB_LOCKER_INABORT); if ((ret = __lock_get_internal(lt, - locker, DB_LOCK_UPGRADE, + locker, flags | DB_LOCK_UPGRADE, NULL, DB_LOCK_WRITE, 0, &lock)) !=0) break; } up_done: - /* FALL THROUGH */ + /* FALLTHROUGH */ case DB_LOCK_PUT_READ: case DB_LOCK_PUT_ALL: F_CLR(sh_locker, DB_LOCKER_DELETED); @@ -487,7 +316,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp) */ ret = __lock_trade(dbenv, &list[i].lock, locker); break; -#ifdef DEBUG +#if defined(DEBUG) && defined(HAVE_STATISTICS) case DB_LOCK_DUMP: /* Find the locker. */ LOCKER_LOCK(lt, region, locker, ndx); @@ -500,7 +329,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp) for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) { - __lock_printlock(lt, lp, 1, NULL); + __lock_printlock(lt, NULL, lp, 1); } break; #endif @@ -556,7 +385,7 @@ __lock_get_pp(dbenv, locker, flags, obj, lock_mode, lock) __env_rep_enter(dbenv); ret = __lock_get(dbenv, locker, flags, obj, lock_mode, lock); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -591,11 +420,13 @@ __lock_get(dbenv, locker, flags, obj, lock_mode, lock) /* * __lock_get_internal -- + * All the work for lock_get (and for the GET option of lock_vec) is done + * inside of lock_get_internal. * - * All the work for lock_get (and for the GET option of lock_vec) is done - * inside of lock_get_internal. + * PUBLIC: int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, + * PUBLIC: const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *)); */ -static int +int __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) DB_LOCKTAB *lt; u_int32_t locker, flags; @@ -611,10 +442,10 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) DB_LOCKREGION *region; u_int32_t holder, locker_ndx, obj_ndx; int did_abort, ihold, grant_dirty, no_dd, ret, t_ret; + /* - * We decide what action to take based on what - * locks are already held and what locks are - * in the wait queue. + * We decide what action to take based on what locks are already held + * and what locks are in the wait queue. */ enum { GRANT, /* Grant the lock. */ @@ -642,7 +473,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) LOCK_INIT(*lock); /* Check that the lock mode is valid. */ - if ((u_int32_t)lock_mode >= region->stat.st_nmodes) { + if (lock_mode >= (db_lockmode_t)region->stat.st_nmodes) { __db_err(dbenv, "DB_ENV->lock_get: invalid lock mode %lu", (u_long)lock_mode); return (EINVAL); @@ -651,7 +482,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) if (obj == NULL) { DB_ASSERT(LOCK_ISSET(*lock)); - lp = (struct __db_lock *)R_ADDR(<->reginfo, lock->off); + lp = (struct __db_lock *)R_ADDR(dbenv, <->reginfo, lock->off); sh_obj = (DB_LOCKOBJ *) ((u_int8_t *)lp + lp->obj); } else { /* Allocate a shared memory new object. */ @@ -665,8 +496,9 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) if ((ret = __lock_getlocker(lt, locker, locker_ndx, locker > DB_LOCK_MAXID ? 1 : 0, &sh_locker)) != 0) { /* - * XXX We cannot tell if we created the object or not, - * so we don't kow if we should free it or not. + * XXX + * We cannot tell if we created the object or not, so we don't + * kow if we should free it or not. */ goto err; } @@ -731,7 +563,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) * the locker. */ lp->refcount++; - lock->off = R_OFFSET(<->reginfo, lp); + lock->off = R_OFFSET(dbenv, <->reginfo, lp); lock->gen = lp->gen; lock->mode = lp->mode; goto done; @@ -752,21 +584,24 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) } } + /* If we want a write lock and we have a was write, upgrade. */ + if (wwrite != NULL) + LF_SET(DB_LOCK_UPGRADE); + /* - * If there are conflicting holders we will have to wait. - * An upgrade or dirty reader goes to the head - * of the queue, everone else to the back. + * If there are conflicting holders we will have to wait. An upgrade + * or dirty reader goes to the head of the queue, everyone else to the + * back. */ if (lp != NULL) { - if (LF_ISSET(DB_LOCK_UPGRADE) || - wwrite != NULL || lock_mode == DB_LOCK_DIRTY) + if (LF_ISSET(DB_LOCK_UPGRADE) || lock_mode == DB_LOCK_DIRTY) action = HEAD; else action = TAIL; } else { if (LF_ISSET(DB_LOCK_SWITCH)) action = TAIL; - else if (LF_ISSET(DB_LOCK_UPGRADE) || wwrite != NULL) + else if (LF_ISSET(DB_LOCK_UPGRADE)) action = UPGRADE; else if (ihold) action = GRANT; @@ -837,21 +672,19 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) case SECOND: case GRANT: /* Allocate a new lock. */ + if ((newl = + SH_TAILQ_FIRST(®ion->free_locks, __db_lock)) == NULL) + return (__lock_nomem(dbenv, "locks")); + SH_TAILQ_REMOVE(®ion->free_locks, newl, links, __db_lock); + + /* Update new lock statistics. */ if (++region->stat.st_nlocks > region->stat.st_maxnlocks) region->stat.st_maxnlocks = region->stat.st_nlocks; - if ((newl = - SH_TAILQ_FIRST(®ion->free_locks, __db_lock)) != NULL) - SH_TAILQ_REMOVE( - ®ion->free_locks, newl, links, __db_lock); - if (newl == NULL) { - __db_err(dbenv, __db_lock_err, "locks"); - return (ENOMEM); - } newl->holder = locker; newl->refcount = 1; newl->mode = lock_mode; - newl->obj = SH_PTR_TO_OFF(newl, sh_obj); + newl->obj = (roff_t)SH_PTR_TO_OFF(newl, sh_obj); /* * Now, insert the lock onto its locker's list. * If the locker does not currently hold any locks, @@ -868,17 +701,16 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock) break; case UPGRADE: -upgrade: - if (wwrite != NULL) { +upgrade: if (wwrite != NULL) { lp = wwrite; lp->refcount++; - lock->off = R_OFFSET(<->reginfo, lp); + lock->off = R_OFFSET(dbenv, <->reginfo, lp); lock->gen = lp->gen; lock->mode = lock_mode; } else - lp = - (struct __db_lock *)R_ADDR(<->reginfo, lock->off); + lp = (struct __db_lock *)R_ADDR(dbenv, + <->reginfo, lock->off); if (IS_WRITELOCK(lock_mode) && !IS_WRITELOCK(lp->mode)) sh_locker->nwrites++; lp->mode = lock_mode; @@ -994,53 +826,55 @@ upgrade: if (newl->status != DB_LSTAT_EXPIRED) LOCK_SET_TIME_INVALID(&sh_locker->lk_expire); - if (newl->status != DB_LSTAT_PENDING) { - switch (newl->status) { - case DB_LSTAT_ABORTED: - ret = DB_LOCK_DEADLOCK; - break; - case DB_LSTAT_NOTEXIST: - ret = DB_LOCK_NOTEXIST; - break; - case DB_LSTAT_EXPIRED: -expired: SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx); - if ((ret = __lock_put_internal( - lt, newl, obj_ndx, - DB_LOCK_UNLINK | DB_LOCK_FREE) != 0)) - goto err; - if (LOCK_TIME_EQUAL( - &sh_locker->lk_expire, - &sh_locker->tx_expire)) { - region->stat.st_ntxntimeouts++; - return (DB_LOCK_NOTGRANTED); - } else { - region->stat.st_nlocktimeouts++; - return (DB_LOCK_NOTGRANTED); - } - default: - ret = EINVAL; + switch (newl->status) { + case DB_LSTAT_ABORTED: + ret = DB_LOCK_DEADLOCK; + goto err; + case DB_LSTAT_NOTEXIST: + ret = DB_LOCK_NOTEXIST; + goto err; + case DB_LSTAT_EXPIRED: +expired: SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx); + if ((ret = __lock_put_internal(lt, newl, + obj_ndx, DB_LOCK_UNLINK | DB_LOCK_FREE)) != 0) break; - } + if (LOCK_TIME_EQUAL( + &sh_locker->lk_expire, &sh_locker->tx_expire)) + region->stat.st_ntxntimeouts++; + else + region->stat.st_nlocktimeouts++; + return (DB_LOCK_NOTGRANTED); + case DB_LSTAT_PENDING: + if (LF_ISSET(DB_LOCK_UPGRADE)) { + /* + * The lock just granted got put on the holders + * list. Since we're upgrading some other lock, + * we've got to remove it here. + */ + SH_TAILQ_REMOVE( + &sh_obj->holders, newl, links, __db_lock); + /* + * Ensure the object is not believed to be on + * the object's lists, if we're traversing by + * locker. + */ + newl->links.stqe_prev = -1; + goto upgrade; + } else + newl->status = DB_LSTAT_HELD; + break; + case DB_LSTAT_FREE: + case DB_LSTAT_HELD: + case DB_LSTAT_WAITING: + default: + __db_err(dbenv, + "Unexpected lock status: %d", (int)newl->status); + ret = __db_panic(dbenv, EINVAL); goto err; - } else if (LF_ISSET(DB_LOCK_UPGRADE)) { - /* - * The lock that was just granted got put on the - * holders list. Since we're upgrading some other - * lock, we've got to remove it here. - */ - SH_TAILQ_REMOVE( - &sh_obj->holders, newl, links, __db_lock); - /* - * Ensure that the object is not believed to be on - * the object's lists, if we're traversing by locker. - */ - newl->links.stqe_prev = -1; - goto upgrade; - } else - newl->status = DB_LSTAT_HELD; + } } - lock->off = R_OFFSET(<->reginfo, newl); + lock->off = R_OFFSET(dbenv, <->reginfo, newl); lock->gen = newl->gen; lock->mode = newl->mode; sh_locker->nlocks++; @@ -1079,22 +913,23 @@ __lock_put_pp(dbenv, lock) rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; if (rep_check) __env_rep_enter(dbenv); - ret = __lock_put(dbenv, lock); + ret = __lock_put(dbenv, lock, 0); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } /* * __lock_put -- - * DB_ENV->lock_put. * - * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *)); + * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *, u_int32_t)); + * Internal lock_put interface. */ int -__lock_put(dbenv, lock) +__lock_put(dbenv, lock, flags) DB_ENV *dbenv; DB_LOCK *lock; + u_int32_t flags; { DB_LOCKTAB *lt; int ret, run_dd; @@ -1105,7 +940,7 @@ __lock_put(dbenv, lock) lt = dbenv->lk_handle; LOCKREGION(dbenv, lt); - ret = __lock_put_nolock(dbenv, lock, &run_dd, 0); + ret = __lock_put_nolock(dbenv, lock, &run_dd, flags); UNLOCKREGION(dbenv, lt); /* @@ -1139,15 +974,23 @@ __lock_put_nolock(dbenv, lock, runp, flags) lt = dbenv->lk_handle; region = lt->reginfo.primary; - lockp = (struct __db_lock *)R_ADDR(<->reginfo, lock->off); - LOCK_INIT(*lock); + lockp = (struct __db_lock *)R_ADDR(dbenv, <->reginfo, lock->off); if (lock->gen != lockp->gen) { __db_err(dbenv, __db_lock_invalid, "DB_LOCK->lock_put"); + LOCK_INIT(*lock); return (EINVAL); } - ret = __lock_put_internal(lt, - lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE); + if (LF_ISSET(DB_LOCK_DOWNGRADE) && + lock->mode == DB_LOCK_WRITE && lockp->refcount > 1) { + ret = __lock_downgrade(dbenv, + lock, DB_LOCK_WWRITE, DB_LOCK_NOREGION); + if (ret == 0) + lockp->refcount--; + } else + ret = __lock_put_internal(lt, + lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE); + LOCK_INIT(*lock); *runp = 0; if (ret == 0 && region->detect != DB_LOCK_NORUN && @@ -1160,10 +1003,11 @@ __lock_put_nolock(dbenv, lock, runp, flags) /* * __lock_downgrade -- * - * Used to downgrade locks. Currently this is used in two places: 1) by the + * Used to downgrade locks. Currently this is used in three places: 1) by the * Concurrent Data Store product to downgrade write locks back to iwrite locks * and 2) to downgrade write-handle locks to read-handle locks at the end of - * an open/create. + * an open/create. 3) To downgrade write locks to was_write to support dirty + * reads. * * PUBLIC: int __lock_downgrade __P((DB_ENV *, * PUBLIC: DB_LOCK *, db_lockmode_t, u_int32_t)); @@ -1183,8 +1027,6 @@ __lock_downgrade(dbenv, lock, new_mode, flags) u_int32_t indx; int ret; - COMPQUIET(flags, 0); - PANIC_CHECK(dbenv); ret = 0; @@ -1195,9 +1037,10 @@ __lock_downgrade(dbenv, lock, new_mode, flags) lt = dbenv->lk_handle; region = lt->reginfo.primary; - LOCKREGION(dbenv, lt); + if (!LF_ISSET(DB_LOCK_NOREGION)) + LOCKREGION(dbenv, lt); - lockp = (struct __db_lock *)R_ADDR(<->reginfo, lock->off); + lockp = (struct __db_lock *)R_ADDR(dbenv, <->reginfo, lock->off); if (lock->gen != lockp->gen) { __db_err(dbenv, __db_lock_invalid, "lock_downgrade"); ret = EINVAL; @@ -1220,12 +1063,14 @@ __lock_downgrade(dbenv, lock, new_mode, flags) F_SET(sh_locker, DB_LOCKER_DIRTY); lockp->mode = new_mode; + lock->mode = new_mode; /* Get the object associated with this lock. */ obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS)); -out: UNLOCKREGION(dbenv, lt); +out: if (!LF_ISSET(DB_LOCK_NOREGION)) + UNLOCKREGION(dbenv, lt); return (ret); } @@ -1290,7 +1135,7 @@ __lock_put_internal(lt, lockp, obj_ndx, flags) HASHREMOVE_EL(lt->obj_tab, obj_ndx, __db_lockobj, links, sh_obj); if (sh_obj->lockobj.size > sizeof(sh_obj->objdata)) - __db_shalloc_free(lt->reginfo.addr, + __db_shalloc_free(<->reginfo, SH_DBT_PTR(&sh_obj->lockobj)); SH_TAILQ_INSERT_HEAD( ®ion->free_objs, sh_obj, links, __db_lockobj); @@ -1312,10 +1157,6 @@ __lock_put_internal(lt, lockp, obj_ndx, flags) return (ret); } -/* - * Utility functions; listed alphabetically. - */ - /* * __lock_freelock -- * Free a lock. Unlink it from its locker if necessary. @@ -1365,348 +1206,6 @@ __lock_freelock(lt, lockp, locker, flags) return (ret); } -/* - * __lock_addfamilylocker - * Put a locker entry in for a child transaction. - * - * PUBLIC: int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t)); - */ -int -__lock_addfamilylocker(dbenv, pid, id) - DB_ENV *dbenv; - u_int32_t pid, id; -{ - DB_LOCKER *lockerp, *mlockerp; - DB_LOCKREGION *region; - DB_LOCKTAB *lt; - u_int32_t ndx; - int ret; - - lt = dbenv->lk_handle; - region = lt->reginfo.primary; - LOCKREGION(dbenv, lt); - - /* get/create the parent locker info */ - LOCKER_LOCK(lt, region, pid, ndx); - if ((ret = __lock_getlocker(dbenv->lk_handle, - pid, ndx, 1, &mlockerp)) != 0) - goto err; - - /* - * We assume that only one thread can manipulate - * a single transaction family. - * Therefore the master locker cannot go away while - * we manipulate it, nor can another child in the - * family be created at the same time. - */ - LOCKER_LOCK(lt, region, id, ndx); - if ((ret = __lock_getlocker(dbenv->lk_handle, - id, ndx, 1, &lockerp)) != 0) - goto err; - - /* Point to our parent. */ - lockerp->parent_locker = R_OFFSET(<->reginfo, mlockerp); - - /* See if this locker is the family master. */ - if (mlockerp->master_locker == INVALID_ROFF) - lockerp->master_locker = R_OFFSET(<->reginfo, mlockerp); - else { - lockerp->master_locker = mlockerp->master_locker; - mlockerp = R_ADDR(<->reginfo, mlockerp->master_locker); - } - - /* - * Link the child at the head of the master's list. - * The guess is when looking for deadlock that - * the most recent child is the one thats blocked. - */ - SH_LIST_INSERT_HEAD( - &mlockerp->child_locker, lockerp, child_link, __db_locker); - -err: - UNLOCKREGION(dbenv, lt); - - return (ret); -} - -/* - * __lock_freefamilylocker - * Remove a locker from the hash table and its family. - * - * This must be called without the locker bucket locked. - * - * PUBLIC: int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t)); - */ -int -__lock_freefamilylocker(lt, locker) - DB_LOCKTAB *lt; - u_int32_t locker; -{ - DB_ENV *dbenv; - DB_LOCKER *sh_locker; - DB_LOCKREGION *region; - u_int32_t indx; - int ret; - - dbenv = lt->dbenv; - region = lt->reginfo.primary; - - LOCKREGION(dbenv, lt); - LOCKER_LOCK(lt, region, locker, indx); - - if ((ret = __lock_getlocker(lt, - locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) - goto err; - - if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) { - ret = EINVAL; - __db_err(dbenv, "Freeing locker with locks"); - goto err; - } - - /* If this is part of a family, we must fix up its links. */ - if (sh_locker->master_locker != INVALID_ROFF) - SH_LIST_REMOVE(sh_locker, child_link, __db_locker); - - __lock_freelocker(lt, region, sh_locker, indx); - -err: - UNLOCKREGION(dbenv, lt); - return (ret); -} - -/* - * __lock_freelocker - * common code for deleting a locker. - * - * This must be called with the locker bucket locked. - */ -static void -__lock_freelocker(lt, region, sh_locker, indx) - DB_LOCKTAB *lt; - DB_LOCKREGION *region; - DB_LOCKER *sh_locker; - u_int32_t indx; - -{ - HASHREMOVE_EL( - lt->locker_tab, indx, __db_locker, links, sh_locker); - SH_TAILQ_INSERT_HEAD( - ®ion->free_lockers, sh_locker, links, __db_locker); - SH_TAILQ_REMOVE(®ion->lockers, sh_locker, ulinks, __db_locker); - region->stat.st_nlockers--; -} - -/* - * __lock_set_timeout - * -- set timeout values in shared memory. - * This is called from the transaction system. - * We either set the time that this tranaction expires or the - * amount of time that a lock for this transaction is permitted - * to wait. - * - * PUBLIC: int __lock_set_timeout __P(( DB_ENV *, - * PUBLIC: u_int32_t, db_timeout_t, u_int32_t)); - */ -int -__lock_set_timeout(dbenv, locker, timeout, op) - DB_ENV *dbenv; - u_int32_t locker; - db_timeout_t timeout; - u_int32_t op; -{ - DB_LOCKTAB *lt; - int ret; - - lt = dbenv->lk_handle; - - LOCKREGION(dbenv, lt); - ret = __lock_set_timeout_internal(dbenv, locker, timeout, op); - UNLOCKREGION(dbenv, lt); - return (ret); -} -/* - * __lock_set_timeout_internal - * -- set timeout values in shared memory. - * This is the internal version called from the lock system. - * We either set the time that this tranaction expires or the - * amount of time that a lock for this transaction is permitted - * to wait. - * - */ -static int -__lock_set_timeout_internal(dbenv, locker, timeout, op) - DB_ENV *dbenv; - u_int32_t locker; - db_timeout_t timeout; - u_int32_t op; -{ - DB_LOCKER *sh_locker; - DB_LOCKREGION *region; - DB_LOCKTAB *lt; - u_int32_t locker_ndx; - int ret; - - lt = dbenv->lk_handle; - region = lt->reginfo.primary; - - LOCKER_LOCK(lt, region, locker, locker_ndx); - ret = __lock_getlocker(lt, locker, locker_ndx, 1, &sh_locker); - - if (ret != 0) - return (ret); - - if (op == DB_SET_TXN_TIMEOUT) { - if (timeout == 0) - LOCK_SET_TIME_INVALID(&sh_locker->tx_expire); - else - __lock_expires(dbenv, &sh_locker->tx_expire, timeout); - } else if (op == DB_SET_LOCK_TIMEOUT) { - sh_locker->lk_timeout = timeout; - F_SET(sh_locker, DB_LOCKER_TIMEOUT); - } else if (op == DB_SET_TXN_NOW) { - LOCK_SET_TIME_INVALID(&sh_locker->tx_expire); - __lock_expires(dbenv, &sh_locker->tx_expire, 0); - sh_locker->lk_expire = sh_locker->tx_expire; - if (!LOCK_TIME_ISVALID(®ion->next_timeout) || - LOCK_TIME_GREATER( - ®ion->next_timeout, &sh_locker->lk_expire)) - region->next_timeout = sh_locker->lk_expire; - } else - return (EINVAL); - - return (0); -} - -/* - * __lock_inherit_timeout - * -- inherit timeout values from parent locker. - * This is called from the transaction system. This will - * return EINVAL if the parent does not exist or did not - * have a current txn timeout set. - * - * PUBLIC: int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t)); - */ -int -__lock_inherit_timeout(dbenv, parent, locker) - DB_ENV *dbenv; - u_int32_t parent, locker; -{ - DB_LOCKER *parent_locker, *sh_locker; - DB_LOCKREGION *region; - DB_LOCKTAB *lt; - u_int32_t locker_ndx; - int ret; - - lt = dbenv->lk_handle; - region = lt->reginfo.primary; - ret = 0; - LOCKREGION(dbenv, lt); - - /* If the parent does not exist, we are done. */ - LOCKER_LOCK(lt, region, parent, locker_ndx); - if ((ret = __lock_getlocker(lt, - parent, locker_ndx, 0, &parent_locker)) != 0) - goto err; - - /* - * If the parent is not there yet, thats ok. If it - * does not have any timouts set, then avoid creating - * the child locker at this point. - */ - if (parent_locker == NULL || - (LOCK_TIME_ISVALID(&parent_locker->tx_expire) && - !F_ISSET(parent_locker, DB_LOCKER_TIMEOUT))) { - ret = EINVAL; - goto done; - } - - LOCKER_LOCK(lt, region, locker, locker_ndx); - if ((ret = __lock_getlocker(lt, - locker, locker_ndx, 1, &sh_locker)) != 0) - goto err; - - sh_locker->tx_expire = parent_locker->tx_expire; - - if (F_ISSET(parent_locker, DB_LOCKER_TIMEOUT)) { - sh_locker->lk_timeout = parent_locker->lk_timeout; - F_SET(sh_locker, DB_LOCKER_TIMEOUT); - if (!LOCK_TIME_ISVALID(&parent_locker->tx_expire)) - ret = EINVAL; - } - -done: -err: - UNLOCKREGION(dbenv, lt); - return (ret); -} - -/* - * __lock_getlocker -- - * Get a locker in the locker hash table. The create parameter - * indicates if the locker should be created if it doesn't exist in - * the table. - * - * This must be called with the locker bucket locked. - * - * PUBLIC: int __lock_getlocker __P((DB_LOCKTAB *, - * PUBLIC: u_int32_t, u_int32_t, int, DB_LOCKER **)); - */ -int -__lock_getlocker(lt, locker, indx, create, retp) - DB_LOCKTAB *lt; - u_int32_t locker, indx; - int create; - DB_LOCKER **retp; -{ - DB_ENV *dbenv; - DB_LOCKER *sh_locker; - DB_LOCKREGION *region; - - dbenv = lt->dbenv; - region = lt->reginfo.primary; - - HASHLOOKUP(lt->locker_tab, - indx, __db_locker, links, locker, sh_locker, __lock_locker_cmp); - - /* - * If we found the locker, then we can just return it. If - * we didn't find the locker, then we need to create it. - */ - if (sh_locker == NULL && create) { - /* Create new locker and then insert it into hash table. */ - if ((sh_locker = SH_TAILQ_FIRST( - ®ion->free_lockers, __db_locker)) == NULL) { - __db_err(dbenv, __db_lock_err, "locker entries"); - return (ENOMEM); - } - SH_TAILQ_REMOVE( - ®ion->free_lockers, sh_locker, links, __db_locker); - if (++region->stat.st_nlockers > region->stat.st_maxnlockers) - region->stat.st_maxnlockers = region->stat.st_nlockers; - - sh_locker->id = locker; - sh_locker->dd_id = 0; - sh_locker->master_locker = INVALID_ROFF; - sh_locker->parent_locker = INVALID_ROFF; - SH_LIST_INIT(&sh_locker->child_locker); - sh_locker->flags = 0; - SH_LIST_INIT(&sh_locker->heldby); - sh_locker->nlocks = 0; - sh_locker->nwrites = 0; - sh_locker->lk_timeout = 0; - LOCK_SET_TIME_INVALID(&sh_locker->tx_expire); - LOCK_SET_TIME_INVALID(&sh_locker->lk_expire); - - HASHINSERT(lt->locker_tab, indx, __db_locker, links, sh_locker); - SH_TAILQ_INSERT_HEAD(®ion->lockers, - sh_locker, ulinks, __db_locker); - } - - *retp = sh_locker; - return (0); -} - /* * __lock_getobj -- * Get an object in the object hash table. The create parameter @@ -1744,8 +1243,7 @@ __lock_getobj(lt, obj, ndx, create, retp) /* Create new object and then insert it into hash table. */ if ((sh_obj = SH_TAILQ_FIRST(®ion->free_objs, __db_lockobj)) == NULL) { - __db_err(lt->dbenv, __db_lock_err, "object entries"); - ret = ENOMEM; + ret = __lock_nomem(lt->dbenv, "object entries"); goto err; } @@ -1755,8 +1253,8 @@ __lock_getobj(lt, obj, ndx, create, retp) */ if (obj->size <= sizeof(sh_obj->objdata)) p = sh_obj->objdata; - else if ((ret = __db_shalloc( - lt->reginfo.addr, obj->size, 0, &p)) != 0) { + else if ((ret = + __db_shalloc(<->reginfo, obj->size, 0, &p)) != 0) { __db_err(dbenv, "No space for lock object storage"); goto err; } @@ -1771,7 +1269,8 @@ __lock_getobj(lt, obj, ndx, create, retp) SH_TAILQ_INIT(&sh_obj->waiters); SH_TAILQ_INIT(&sh_obj->holders); sh_obj->lockobj.size = obj->size; - sh_obj->lockobj.off = SH_PTR_TO_OFF(&sh_obj->lockobj, p); + sh_obj->lockobj.off = + (roff_t)SH_PTR_TO_OFF(&sh_obj->lockobj, p); HASHINSERT(lt->obj_tab, ndx, __db_lockobj, links, sh_obj); } @@ -1785,7 +1284,7 @@ err: return (ret); /* * __lock_is_parent -- * Given a locker and a transaction, return 1 if the locker is - * an ancestor of the designcated transaction. This is used to determine + * an ancestor of the designated transaction. This is used to determine * if we should grant locks that appear to conflict, but don't because * the lock is already held by an ancestor. */ @@ -1800,7 +1299,7 @@ __lock_is_parent(lt, locker, sh_locker) parent = sh_locker; while (parent->parent_locker != INVALID_ROFF) { parent = (DB_LOCKER *) - R_ADDR(<->reginfo, parent->parent_locker); + R_ADDR(lt->dbenv, <->reginfo, parent->parent_locker); if (parent->id == locker) return (1); } @@ -1809,10 +1308,50 @@ __lock_is_parent(lt, locker, sh_locker) } /* - * __lock_inherit_locks -- - * Called on child commit to merge child's locks with parent's. + * __lock_locker_is_parent -- + * Determine if "locker" is an ancestor of "child". + * *retp == 1 if so, 0 otherwise. + * + * PUBLIC: int __lock_locker_is_parent + * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, int *)); */ +int +__lock_locker_is_parent(dbenv, locker, child, retp) + DB_ENV *dbenv; + u_int32_t locker, child; + int *retp; +{ + DB_LOCKER *sh_locker; + DB_LOCKREGION *region; + DB_LOCKTAB *lt; + u_int32_t locker_ndx; + int ret; + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + + LOCKER_LOCK(lt, region, child, locker_ndx); + if ((ret = + __lock_getlocker(lt, child, locker_ndx, 0, &sh_locker)) != 0) { + __db_err(dbenv, __db_locker_invalid); + return (ret); + } + + /* + * The locker may not exist for this transaction, if not then it has + * no parents. + */ + if (sh_locker == NULL) + *retp = 0; + else + *retp = __lock_is_parent(lt, locker, sh_locker); + return (0); +} + +/* + * __lock_inherit_locks -- + * Called on child commit to merge child's locks with parent's. + */ static int __lock_inherit_locks(lt, locker, flags) DB_LOCKTAB *lt; @@ -1856,7 +1395,7 @@ __lock_inherit_locks(lt, locker, flags) goto err; } sh_parent = (DB_LOCKER *) - R_ADDR(<->reginfo, sh_locker->parent_locker); + R_ADDR(dbenv, <->reginfo, sh_locker->parent_locker); F_SET(sh_locker, DB_LOCKER_DELETED); /* @@ -2057,52 +1596,6 @@ __lock_remove_waiter(lt, sh_obj, lockp, status) MUTEX_UNLOCK(lt->dbenv, &lockp->mutex); } -/* - * __lock_expires -- set the expire time given the time to live. - * We assume that if timevalp is set then it contains "now". - * This avoids repeated system calls to get the time. - */ -static void -__lock_expires(dbenv, timevalp, timeout) - DB_ENV *dbenv; - db_timeval_t *timevalp; - db_timeout_t timeout; -{ - if (!LOCK_TIME_ISVALID(timevalp)) - __os_clock(dbenv, &timevalp->tv_sec, &timevalp->tv_usec); - if (timeout > 1000000) { - timevalp->tv_sec += timeout / 1000000; - timevalp->tv_usec += timeout % 1000000; - } else - timevalp->tv_usec += timeout; - - if (timevalp->tv_usec > 1000000) { - timevalp->tv_sec++; - timevalp->tv_usec -= 1000000; - } -} - -/* - * __lock_expired -- determine if a lock has expired. - * - * PUBLIC: int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *)); - */ -int -__lock_expired(dbenv, now, timevalp) - DB_ENV *dbenv; - db_timeval_t *now, *timevalp; -{ - if (!LOCK_TIME_ISVALID(timevalp)) - return (0); - - if (!LOCK_TIME_ISVALID(now)) - __os_clock(dbenv, &now->tv_sec, &now->tv_usec); - - return (now->tv_sec > timevalp->tv_sec || - (now->tv_sec == timevalp->tv_sec && - now->tv_usec >= timevalp->tv_usec)); -} - /* * __lock_trade -- * @@ -2126,7 +1619,7 @@ __lock_trade(dbenv, lock, new_locker) lt = dbenv->lk_handle; region = lt->reginfo.primary; - lp = (struct __db_lock *)R_ADDR(<->reginfo, lock->off); + lp = (struct __db_lock *)R_ADDR(dbenv, <->reginfo, lock->off); /* If the lock is already released, simply return. */ if (lp->gen != lock->gen) @@ -2156,285 +1649,3 @@ __lock_trade(dbenv, lock, new_locker) return (0); } - -/* - * Lock list routines. - * The list is composed of a 32-bit count of locks followed by - * each lock. A lock is represented by a 16-bit page-count, a lock - * object and a page list. A lock object consists of a 16-bit size - * and the object itself. In a pseudo BNF notation, you get: - * - * LIST = COUNT32 LOCK* - * LOCK = COUNT16 LOCKOBJ PAGELIST - * LOCKOBJ = COUNT16 OBJ - * PAGELIST = COUNT32* - * - * (Recall that X* means "0 or more X's") - * - * In most cases, the OBJ is a struct __db_ilock and the page list is - * a series of (32-bit) page numbers that should get written into the - * pgno field of the __db_ilock. So, the actual number of pages locked - * is the number of items in the PAGELIST plus 1. If this is an application- - * specific lock, then we cannot interpret obj and the pagelist must - * be empty. - * - * Consider a lock list for: File A, pages 1&2, File B pages 3-5, Applock - * This would be represented as: - * 5 1 [fid=A;page=1] 2 2 [fid=B;page=3] 4 5 0 APPLOCK - * ------------------ -------------------- --------- - * LOCK for file A LOCK for file B application-specific lock - */ - -#define MAX_PGNOS 0xffff - -/* - * These macros are bigger than one might exepect becasue the - * Solaris compiler says that a cast does not return an lvalue, - * so constructs like *(u_int32_t*)dp = count; generate warnings. - */ - -#define RET_SIZE(size, count) ((size) + \ - sizeof(u_int32_t) + (count) * 2 * sizeof(u_int16_t)) - -#define PUT_COUNT(dp, count) do { u_int32_t *ip = (u_int32_t *)dp;\ - *ip = count; \ - dp = (u_int8_t *)dp + \ - sizeof(u_int32_t); \ - } while (0) -#define PUT_PCOUNT(dp, count) do { u_int16_t *ip = (u_int16_t *)dp;\ - *ip = count; \ - dp = (u_int8_t *)dp + \ - sizeof(u_int16_t); \ - } while (0) -#define PUT_SIZE(dp, size) do { u_int16_t *ip = (u_int16_t *)dp;\ - *ip = size; \ - dp = (u_int8_t *)dp + \ - sizeof(u_int16_t); \ - } while (0) -#define PUT_PGNO(dp, pgno) do { db_pgno_t *ip = (db_pgno_t *)dp;\ - *ip = pgno; \ - dp = (u_int8_t *)dp + \ - sizeof(db_pgno_t); \ - } while (0) -#define COPY_OBJ(dp, obj) do { \ - memcpy(dp, obj->data, obj->size); \ - dp = (u_int8_t *)dp + \ - ALIGN(obj->size, \ - sizeof(u_int32_t)); \ - } while (0) - -#define GET_COUNT(dp, count) do { \ - (count) = *(u_int32_t *) dp; \ - dp = (u_int8_t *)dp + \ - sizeof(u_int32_t); \ - } while (0); -#define GET_PCOUNT(dp, count) do { \ - (count) = *(u_int16_t *) dp; \ - dp = (u_int8_t *)dp + \ - sizeof(u_int16_t); \ - } while (0); -#define GET_SIZE(dp, size) do { \ - (size) = *(u_int16_t *) dp; \ - dp = (u_int8_t *)dp + \ - sizeof(u_int16_t); \ - } while (0); -#define GET_PGNO(dp, pgno) do { \ - (pgno) = *(db_pgno_t *) dp; \ - dp = (u_int8_t *)dp + \ - sizeof(db_pgno_t); \ - } while (0); - -static int -__lock_fix_list(dbenv, list_dbt, nlocks) - DB_ENV *dbenv; - DBT *list_dbt; - u_int32_t nlocks; -{ - DBT *obj; - DB_LOCK_ILOCK *lock, *plock; - u_int32_t i, j, nfid, npgno, size; - int ret; - u_int8_t *data, *dp; - - size = list_dbt->size; - if (size == 0) - return (0); - - obj = (DBT *) list_dbt->data; - - /* - * If necessary sort the list of locks so that locks - * on the same fileid are together. We do not sort - * 1 or 2 locks because by definition if there are - * locks on the same fileid they will be together. - * The sort will also move any locks that do not - * look like page locks to the end of the list - * so we can stop looking for locks we can combine - * when we hit one. - */ - switch (nlocks) { - case 1: - size = RET_SIZE(obj->size, 1); - if ((ret = __os_malloc(dbenv, size, &data)) != 0) - return (ret); - - dp = data; - PUT_COUNT(dp, 1); - PUT_PCOUNT(dp, 0); - PUT_SIZE(dp, obj->size); - COPY_OBJ(dp, obj); - break; - - default: - /* Sort so that all locks with same fileid are together. */ - qsort(list_dbt->data, nlocks, sizeof(DBT), __lock_sort_cmp); - /* FALL THROUGH */ - case 2: - nfid = npgno = 0; - i = 0; - if (obj->size != sizeof(DB_LOCK_ILOCK)) - goto not_ilock; - - nfid = 1; - plock = (DB_LOCK_ILOCK *)obj->data; - - /* We use ulen to keep track of the number of pages. */ - j = 0; - obj[0].ulen = 0; - for (i = 1; i < nlocks; i++) { - if (obj[i].size != sizeof(DB_LOCK_ILOCK)) - break; - lock = (DB_LOCK_ILOCK *)obj[i].data; - if (obj[j].ulen < MAX_PGNOS && - lock->type == plock->type && - memcmp(lock->fileid, - plock->fileid, DB_FILE_ID_LEN) == 0) { - obj[j].ulen++; - npgno++; - } else { - nfid++; - plock = lock; - j = i; - obj[j].ulen = 0; - } - } - -not_ilock: - size = nfid * sizeof(DB_LOCK_ILOCK); - size += npgno * sizeof(db_pgno_t); - /* Add the number of nonstandard locks and get their size. */ - nfid += nlocks - i; - for (; i < nlocks; i++) { - size += obj[i].size; - obj[i].ulen = 0; - } - - size = RET_SIZE(size, nfid); - if ((ret = __os_malloc(dbenv, size, &data)) != 0) - return (ret); - - dp = data; - PUT_COUNT(dp, nfid); - - for (i = 0; i < nlocks; i = j) { - PUT_PCOUNT(dp, obj[i].ulen); - PUT_SIZE(dp, obj[i].size); - COPY_OBJ(dp, obj); - lock = (DB_LOCK_ILOCK *)obj[i].data; - for (j = i + 1; j <= i + obj[i].ulen; j++) { - lock = (DB_LOCK_ILOCK *)obj[j].data; - PUT_PGNO(dp, lock->pgno); - } - } - } - - (void)__os_free(dbenv, list_dbt->data); - - list_dbt->data = data; - list_dbt->size = size; - - return (0); -} - -/* - * PUBLIC: int __lock_get_list __P((DB_ENV *, u_int32_t, u_int32_t, - * PUBLIC: db_lockmode_t, DBT *)); - */ -int -__lock_get_list(dbenv, locker, flags, lock_mode, list) - DB_ENV *dbenv; - u_int32_t locker, flags; - db_lockmode_t lock_mode; - DBT *list; -{ - DBT obj_dbt; - DB_LOCK ret_lock; - DB_LOCK_ILOCK *lock; - DB_LOCKTAB *lt; - DB_LOCKREGION *region; - db_pgno_t save_pgno; - u_int16_t npgno, size; - u_int32_t i, nlocks; - int ret; - void *dp; - - if (list->size == 0) - return (0); - ret = 0; - lt = dbenv->lk_handle; - region = lt->reginfo.primary; - dp = list->data; - - GET_COUNT(dp, nlocks); - LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle); - - for (i = 0; i < nlocks; i++) { - GET_PCOUNT(dp, npgno); - GET_SIZE(dp, size); - lock = (DB_LOCK_ILOCK *) dp; - save_pgno = lock->pgno; - obj_dbt.data = dp; - obj_dbt.size = size; - dp = ((u_int8_t *)dp) + ALIGN(size, sizeof(u_int32_t)); - do { - if ((ret = __lock_get_internal(lt, locker, - flags, &obj_dbt, lock_mode, 0, &ret_lock)) != 0) { - lock->pgno = save_pgno; - goto err; - } - if (npgno != 0) - GET_PGNO(dp, lock->pgno); - } while (npgno-- != 0); - lock->pgno = save_pgno; - } - -err: - UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle); - return (ret); -} - -static int -__lock_sort_cmp(a, b) - const void *a, *b; -{ - const DBT *d1, *d2; - DB_LOCK_ILOCK *l1, *l2; - - d1 = a; - d2 = b; - - /* Force all non-standard locks to sort at end. */ - if (d1->size != sizeof(DB_LOCK_ILOCK)) { - if (d2->size != sizeof(DB_LOCK_ILOCK)) - return (d1->size - d2->size); - else - return (1); - } else if (d2->size != sizeof(DB_LOCK_ILOCK)) - return (-1); - - l1 = d1->data; - l2 = d2->data; - if (l1->type != l2->type) - return (l1->type - l2->type); - return (memcmp(l1->fileid, l2->fileid, DB_FILE_ID_LEN)); -} diff --git a/db/lock/lock_deadlock.c b/db/lock/lock_deadlock.c index d7cf5e0b7..61f4f5a35 100644 --- a/db/lock/lock_deadlock.c +++ b/db/lock/lock_deadlock.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: lock_deadlock.c,v 11.85 2004/09/22 03:48:29 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: lock_deadlock.c,v 11.66 2003/11/19 19:59:02 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -23,7 +21,7 @@ static const char revid[] = "$Id: lock_deadlock.c,v 11.66 2003/11/19 19:59:02 ub #include "dbinc/log.h" #include "dbinc/txn.h" -#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << (N) % 32)) +#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << ((N) % 32))) #define CLEAR_MAP(M, N) { \ u_int32_t __i; \ @@ -47,8 +45,8 @@ typedef struct { int in_abort; u_int32_t count; u_int32_t id; - u_int32_t last_lock; - ssize_t last_obj; + roff_t last_lock; + roff_t last_obj; u_int32_t last_locker_id; db_pgno_t pgno; } locker_info; @@ -92,6 +90,7 @@ __lock_detect_pp(dbenv, flags, atype, abortp) case DB_LOCK_DEFAULT: case DB_LOCK_EXPIRE: case DB_LOCK_MAXLOCKS: + case DB_LOCK_MAXWRITE: case DB_LOCK_MINLOCKS: case DB_LOCK_MINWRITE: case DB_LOCK_OLDEST: @@ -109,7 +108,7 @@ __lock_detect_pp(dbenv, flags, atype, abortp) __env_rep_enter(dbenv); ret = __lock_detect(dbenv, atype, abortp); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -131,7 +130,7 @@ __lock_detect(dbenv, atype, abortp) db_timeval_t now; locker_info *idmap; u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap; - u_int32_t i, keeper, killid, limit, nalloc, nlockers; + u_int32_t i, cid, keeper, killid, limit, nalloc, nlockers; u_int32_t lock_max, txn_max; int ret; @@ -216,98 +215,123 @@ __lock_detect(dbenv, atype, abortp) for (; *deadp != NULL; deadp++) { if (abortp != NULL) ++*abortp; - killid = (u_int32_t)((*deadp - bitmap) / nalloc); + killid = (u_int32_t)(*deadp - bitmap) / nalloc; limit = killid; - keeper = BAD_KILLID; - if (atype == DB_LOCK_DEFAULT || atype == DB_LOCK_RANDOM) - goto dokill; /* - * It's conceivable that under XA, the locker could - * have gone away. + * There are cases in which our general algorithm will + * fail. Returning 1 from verify indicates that the + * particular locker is not only involved in a deadlock, + * but that killing him will allow others to make forward + * progress. Unfortunately, there are cases where we need + * to abort someone, but killing them will not necessarily + * ensure forward progress (imagine N readers all trying to + * acquire a write lock). + * killid is only set to lockers that pass the db_verify test. + * keeper will hold the best candidate even if it does + * not pass db_verify. Once we fill in killid then we do + * not need a keeper, but we keep updating it anyway. */ - if (killid == BAD_KILLID) - break; + + keeper = idmap[killid].in_abort == 0 ? killid : BAD_KILLID; + if (keeper == BAD_KILLID || + __dd_verify(idmap, *deadp, + tmpmap, copymap, nlockers, nalloc, keeper) == 0) + killid = BAD_KILLID; + + if (killid != BAD_KILLID && + (atype == DB_LOCK_DEFAULT || atype == DB_LOCK_RANDOM)) + goto dokill; /* - * Start with the id that we know is deadlocked - * and then examine all other set bits and see - * if any are a better candidate for abortion - * and that they are genuinely part of the - * deadlock. The definition of "best": - * OLDEST: smallest id - * YOUNGEST: largest id - * MAXLOCKS: maximum count - * MINLOCKS: minimum count - * MINWRITE: minimum count + * Start with the id that we know is deadlocked, then examine + * all other set bits and see if any are a better candidate + * for abortion and they are genuinely part of the deadlock. + * The definition of "best": + * MAXLOCKS: maximum count + * MAXWRITE: maximum write count + * MINLOCKS: minimum count + * MINWRITE: minimum write count + * OLDEST: smallest id + * YOUNGEST: largest id */ - - for (i = (killid + 1) % nlockers; + for (i = (limit + 1) % nlockers; i != limit; i = (i + 1) % nlockers) { if (!ISSET_MAP(*deadp, i) || idmap[i].in_abort) continue; + + /* + * Determine if we have a verified candidate + * in killid, if not then compare with the + * non-verified candidate in keeper. + */ + if (killid == BAD_KILLID) { + if (keeper == BAD_KILLID) + goto use_next; + else + cid = keeper; + } else + cid = killid; + switch (atype) { case DB_LOCK_OLDEST: - if (__dd_isolder(idmap[killid].id, + if (__dd_isolder(idmap[cid].id, idmap[i].id, lock_max, txn_max)) continue; - keeper = i; break; case DB_LOCK_YOUNGEST: if (__dd_isolder(idmap[i].id, - idmap[killid].id, lock_max, txn_max)) + idmap[cid].id, lock_max, txn_max)) continue; - keeper = i; break; case DB_LOCK_MAXLOCKS: - if (idmap[i].count < idmap[killid].count) + if (idmap[i].count < idmap[cid].count) + continue; + break; + case DB_LOCK_MAXWRITE: + if (idmap[i].count < idmap[cid].count) continue; - keeper = i; break; case DB_LOCK_MINLOCKS: case DB_LOCK_MINWRITE: - if (idmap[i].count > idmap[killid].count) + if (idmap[i].count > idmap[cid].count) continue; - keeper = i; break; + case DB_LOCK_DEFAULT: + case DB_LOCK_RANDOM: + goto dokill; + default: killid = BAD_KILLID; ret = EINVAL; goto dokill; } + +use_next: keeper = i; if (__dd_verify(idmap, *deadp, tmpmap, copymap, nlockers, nalloc, i)) killid = i; } -dokill: if (killid == BAD_KILLID) - continue; - - /* - * There are cases in which our general algorithm will - * fail. Returning 1 from verify indicates that the - * particular locker is not only involved in a deadlock, - * but that killing him will allow others to make forward - * progress. Unfortunately, there are cases where we need - * to abort someone, but killing them will not necessarily - * ensure forward progress (imagine N readers all trying to - * acquire a write lock). In such a scenario, we'll have - * gotten all the way through the loop, we will have found - * someone to keep (keeper will be valid), but killid will - * still be the initial deadlocker. In this case, if the - * initial killid satisfies __dd_verify, kill it, else abort - * keeper and indicate that we need to run deadlock detection - * again. - */ - - if (keeper != BAD_KILLID && killid == limit && - __dd_verify(idmap, *deadp, - tmpmap, copymap, nlockers, nalloc, killid) == 0) { - LOCKREGION(dbenv, lt); - region->need_dd = 1; - UNLOCKREGION(dbenv, lt); - killid = keeper; +dokill: if (killid == BAD_KILLID) { + if (keeper == BAD_KILLID) + /* + * It's conceivable that under XA, the + * locker could have gone away. + */ + continue; + else { + /* + * Removing a single locker will not + * break the deadlock, signal to run + * detection again. + */ + LOCKREGION(dbenv, lt); + region->need_dd = 1; + UNLOCKREGION(dbenv, lt); + killid = keeper; + } } /* Kill the locker with lockid idmap[killid]. */ @@ -324,7 +348,7 @@ dokill: if (killid == BAD_KILLID) "warning: unable to abort locker %lx", (u_long)idmap[killid].id); } else if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK)) - __db_err(dbenv, + __db_msg(dbenv, "Aborting locker %lx", (u_long)idmap[killid].id); } __os_free(dbenv, tmpmap); @@ -396,10 +420,10 @@ retry: count = region->stat.st_nlockers; } if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK)) - __db_err(dbenv, "%lu lockers", (u_long)count); + __db_msg(dbenv, "%lu lockers", (u_long)count); count += 20; - nentries = ALIGN(count, 32) / 32; + nentries = (u_int32_t)DB_ALIGN(count, 32) / 32; /* * Allocate enough space for a count by count bitmap matrix. @@ -444,11 +468,16 @@ retry: count = region->stat.st_nlockers; if (lip->master_locker == INVALID_ROFF) { lip->dd_id = id++; id_array[lip->dd_id].id = lip->id; - if (atype == DB_LOCK_MINLOCKS || - atype == DB_LOCK_MAXLOCKS) + switch (atype) { + case DB_LOCK_MINLOCKS: + case DB_LOCK_MAXLOCKS: id_array[lip->dd_id].count = lip->nlocks; - if (atype == DB_LOCK_MINWRITE) + break; + case DB_LOCK_MINWRITE: + case DB_LOCK_MAXWRITE: id_array[lip->dd_id].count = lip->nwrites; + break; + } if (F_ISSET(lip, DB_LOCKER_INABORT)) id_array[lip->dd_id].in_abort = 1; } else @@ -483,14 +512,19 @@ obj_loop: continue; if (lockerp->dd_id == DD_INVALID_ID) { - dd = ((DB_LOCKER *)R_ADDR(<->reginfo, + dd = ((DB_LOCKER *)R_ADDR(dbenv, <->reginfo, lockerp->master_locker))->dd_id; lockerp->dd_id = dd; - if (atype == DB_LOCK_MINLOCKS || - atype == DB_LOCK_MAXLOCKS) + switch (atype) { + case DB_LOCK_MINLOCKS: + case DB_LOCK_MAXLOCKS: id_array[dd].count += lockerp->nlocks; - if (atype == DB_LOCK_MINWRITE) + break; + case DB_LOCK_MINWRITE: + case DB_LOCK_MAXWRITE: id_array[dd].count += lockerp->nwrites; + break; + } if (F_ISSET(lockerp, DB_LOCKER_INABORT)) id_array[dd].in_abort = 1; @@ -537,14 +571,19 @@ look_waiters: continue; if (lockerp->dd_id == DD_INVALID_ID) { - dd = ((DB_LOCKER *)R_ADDR(<->reginfo, + dd = ((DB_LOCKER *)R_ADDR(dbenv, <->reginfo, lockerp->master_locker))->dd_id; lockerp->dd_id = dd; - if (atype == DB_LOCK_MINLOCKS || - atype == DB_LOCK_MAXLOCKS) + switch (atype) { + case DB_LOCK_MINLOCKS: + case DB_LOCK_MAXLOCKS: id_array[dd].count += lockerp->nlocks; - if (atype == DB_LOCK_MINWRITE) + break; + case DB_LOCK_MINWRITE: + case DB_LOCK_MAXWRITE: id_array[dd].count += lockerp->nwrites; + break; + } } else dd = lockerp->dd_id; id_array[dd].valid = 1; @@ -615,7 +654,8 @@ look_waiters: lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock); if (lp != NULL) { id_array[id].last_locker_id = lockerp->id; - get_lock: id_array[id].last_lock = R_OFFSET(<->reginfo, lp); +get_lock: id_array[id].last_lock = R_OFFSET(dbenv, + <->reginfo, lp); id_array[id].last_obj = lp->obj; lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj); pptr = SH_DBT_PTR(&lo->lockobj); @@ -651,9 +691,9 @@ __dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp) locker_info *idmap; u_int32_t ***deadp; { - u_int32_t i, j, k, *mymap, *tmpmap; - u_int32_t **retp; - int ndead, ndeadalloc, ret; + u_int32_t i, j, k, *mymap, *tmpmap, **retp; + u_int ndead, ndeadalloc; + int ret; #undef INITIAL_DEAD_ALLOC #define INITIAL_DEAD_ALLOC 8 @@ -669,7 +709,7 @@ __dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp) * locker is waiting. */ for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nalloc) { - if (!idmap[i].valid || idmap[i].in_abort) + if (!idmap[i].valid) continue; for (j = 0; j < nlockers; j++) { if (!ISSET_MAP(mymap, j)) @@ -750,7 +790,7 @@ __dd_abort(dbenv, info) ret = DB_ALREADY_ABORTED; goto out; } - if (R_OFFSET(<->reginfo, lockp) != info->last_lock || + if (R_OFFSET(dbenv, <->reginfo, lockp) != info->last_lock || lockp->holder != lockerp->id || lockp->obj != info->last_obj || lockp->status != DB_LSTAT_WAITING) { ret = DB_ALREADY_ABORTED; @@ -792,32 +832,25 @@ __dd_debug(dbenv, idmap, bitmap, nlockers, nalloc) locker_info *idmap; u_int32_t *bitmap, nlockers, nalloc; { + DB_MSGBUF mb; u_int32_t i, j, *mymap; - char *msgbuf; - __db_err(dbenv, "Waitsfor array\nWaiter:\tWaiting on:"); - - /* Allocate space to print 10 bytes per item waited on. */ -#undef MSGBUF_LEN -#define MSGBUF_LEN ((nlockers + 1) * 10 + 64) - if (__os_malloc(dbenv, MSGBUF_LEN, &msgbuf) != 0) - return; + DB_MSGBUF_INIT(&mb); + __db_msg(dbenv, "Waitsfor array\nWaiter:\tWaiting on:"); for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nalloc) { if (!idmap[i].valid) continue; - sprintf(msgbuf, /* Waiter. */ + + __db_msgadd(dbenv, &mb, /* Waiter. */ "%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno); for (j = 0; j < nlockers; j++) if (ISSET_MAP(mymap, j)) - sprintf(msgbuf, "%s %lx", msgbuf, - (u_long)idmap[j].id); - (void)sprintf(msgbuf, - "%s %lu", msgbuf, (u_long)idmap[i].last_lock); - __db_err(dbenv, msgbuf); + __db_msgadd(dbenv, + &mb, " %lx", (u_long)idmap[j].id); + __db_msgadd(dbenv, &mb, " %lu", (u_long)idmap[i].last_lock); + DB_MSGBUF_FLUSH(dbenv, &mb); } - - __os_free(dbenv, msgbuf); } #endif @@ -825,12 +858,12 @@ __dd_debug(dbenv, idmap, bitmap, nlockers, nalloc) * Given a bitmap that contains a deadlock, verify that the bit * specified in the which parameter indicates a transaction that * is actually deadlocked. Return 1 if really deadlocked, 0 otherwise. - * deadmap is the array that identified the deadlock. - * tmpmap is a copy of the initial bitmaps from the dd_build phase - * origmap is a temporary bit map into which we can OR things - * nlockers is the number of actual lockers under consideration - * nalloc is the number of words allocated for the bitmap - * which is the locker in question + * deadmap -- the array that identified the deadlock. + * tmpmap -- a copy of the initial bitmaps from the dd_build phase. + * origmap -- a temporary bit map into which we can OR things. + * nlockers -- the number of actual lockers under consideration. + * nalloc -- the number of words allocated for the bitmap. + * which -- the locker in question. */ static int __dd_verify(idmap, deadmap, tmpmap, origmap, nlockers, nalloc, which) diff --git a/db/lock/lock_id.c b/db/lock/lock_id.c new file mode 100644 index 000000000..0efd001b0 --- /dev/null +++ b/db/lock/lock_id.c @@ -0,0 +1,408 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: lock_id.c,v 11.145 2004/09/15 21:49:17 mjc Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_shash.h" +#include "dbinc/lock.h" +#include "dbinc/log.h" + +/* + * __lock_id_pp -- + * DB_ENV->lock_id pre/post processing. + * + * PUBLIC: int __lock_id_pp __P((DB_ENV *, u_int32_t *)); + */ +int +__lock_id_pp(dbenv, idp) + DB_ENV *dbenv; + u_int32_t *idp; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __lock_id(dbenv, idp); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +/* + * __lock_id -- + * DB_ENV->lock_id. + * + * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *)); + */ +int +__lock_id(dbenv, idp) + DB_ENV *dbenv; + u_int32_t *idp; +{ + DB_LOCKER *lk; + DB_LOCKTAB *lt; + DB_LOCKREGION *region; + u_int32_t *ids, locker_ndx; + int nids, ret; + + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + ret = 0; + + /* + * Allocate a new lock id. If we wrap around then we + * find the minimum currently in use and make sure we + * can stay below that. This code is similar to code + * in __txn_begin_int for recovering txn ids. + */ + LOCKREGION(dbenv, lt); + /* + * Our current valid range can span the maximum valid value, so check + * for it and wrap manually. + */ + if (region->stat.st_id == DB_LOCK_MAXID && + region->stat.st_cur_maxid != DB_LOCK_MAXID) + region->stat.st_id = DB_LOCK_INVALIDID; + if (region->stat.st_id == region->stat.st_cur_maxid) { + if ((ret = __os_malloc(dbenv, + sizeof(u_int32_t) * region->stat.st_nlockers, &ids)) != 0) + goto err; + nids = 0; + for (lk = SH_TAILQ_FIRST(®ion->lockers, __db_locker); + lk != NULL; + lk = SH_TAILQ_NEXT(lk, ulinks, __db_locker)) + ids[nids++] = lk->id; + region->stat.st_id = DB_LOCK_INVALIDID; + region->stat.st_cur_maxid = DB_LOCK_MAXID; + if (nids != 0) + __db_idspace(ids, nids, + ®ion->stat.st_id, ®ion->stat.st_cur_maxid); + __os_free(dbenv, ids); + } + *idp = ++region->stat.st_id; + + /* Allocate a locker for this id. */ + LOCKER_LOCK(lt, region, *idp, locker_ndx); + ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk); + +err: UNLOCKREGION(dbenv, lt); + + return (ret); +} + +/* + * __lock_id_free_pp -- + * DB_ENV->lock_id_free pre/post processing. + * + * PUBLIC: int __lock_id_free_pp __P((DB_ENV *, u_int32_t)); + */ +int +__lock_id_free_pp(dbenv, id) + DB_ENV *dbenv; + u_int32_t id; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __lock_id_free(dbenv, id); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +/* + * __lock_id_free -- + * Free a locker id. + * + * PUBLIC: int __lock_id_free __P((DB_ENV *, u_int32_t)); + */ +int +__lock_id_free(dbenv, id) + DB_ENV *dbenv; + u_int32_t id; +{ + DB_LOCKER *sh_locker; + DB_LOCKTAB *lt; + DB_LOCKREGION *region; + u_int32_t locker_ndx; + int ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK); + + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + + LOCKREGION(dbenv, lt); + LOCKER_LOCK(lt, region, id, locker_ndx); + if ((ret = __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0) + goto err; + + if (sh_locker == NULL) { + __db_err(dbenv, "Unknown locker ID: %lx", (u_long)id); + ret = EINVAL; + goto err; + } + + if (sh_locker->nlocks != 0) { + __db_err(dbenv, "Locker still has locks"); + ret = EINVAL; + goto err; + } + + __lock_freelocker(lt, region, sh_locker, locker_ndx); + +err: UNLOCKREGION(dbenv, lt); + return (ret); +} + +/* + * __lock_id_set -- + * Set the current locker ID and current maximum unused ID (for + * testing purposes only). + * + * PUBLIC: int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t)); + */ +int +__lock_id_set(dbenv, cur_id, max_id) + DB_ENV *dbenv; + u_int32_t cur_id, max_id; +{ + DB_LOCKTAB *lt; + DB_LOCKREGION *region; + + ENV_REQUIRES_CONFIG(dbenv, + dbenv->lk_handle, "lock_id_set", DB_INIT_LOCK); + + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + region->stat.st_id = cur_id; + region->stat.st_cur_maxid = max_id; + + return (0); +} + +/* + * __lock_getlocker -- + * Get a locker in the locker hash table. The create parameter + * indicates if the locker should be created if it doesn't exist in + * the table. + * + * This must be called with the locker bucket locked. + * + * PUBLIC: int __lock_getlocker __P((DB_LOCKTAB *, + * PUBLIC: u_int32_t, u_int32_t, int, DB_LOCKER **)); + */ +int +__lock_getlocker(lt, locker, indx, create, retp) + DB_LOCKTAB *lt; + u_int32_t locker, indx; + int create; + DB_LOCKER **retp; +{ + DB_ENV *dbenv; + DB_LOCKER *sh_locker; + DB_LOCKREGION *region; + + dbenv = lt->dbenv; + region = lt->reginfo.primary; + + HASHLOOKUP(lt->locker_tab, + indx, __db_locker, links, locker, sh_locker, __lock_locker_cmp); + + /* + * If we found the locker, then we can just return it. If + * we didn't find the locker, then we need to create it. + */ + if (sh_locker == NULL && create) { + /* Create new locker and then insert it into hash table. */ + if ((sh_locker = SH_TAILQ_FIRST( + ®ion->free_lockers, __db_locker)) == NULL) + return (__lock_nomem(dbenv, "locker entries")); + SH_TAILQ_REMOVE( + ®ion->free_lockers, sh_locker, links, __db_locker); + if (++region->stat.st_nlockers > region->stat.st_maxnlockers) + region->stat.st_maxnlockers = region->stat.st_nlockers; + + sh_locker->id = locker; + sh_locker->dd_id = 0; + sh_locker->master_locker = INVALID_ROFF; + sh_locker->parent_locker = INVALID_ROFF; + SH_LIST_INIT(&sh_locker->child_locker); + sh_locker->flags = 0; + SH_LIST_INIT(&sh_locker->heldby); + sh_locker->nlocks = 0; + sh_locker->nwrites = 0; + sh_locker->lk_timeout = 0; + LOCK_SET_TIME_INVALID(&sh_locker->tx_expire); + LOCK_SET_TIME_INVALID(&sh_locker->lk_expire); + + HASHINSERT(lt->locker_tab, indx, __db_locker, links, sh_locker); + SH_TAILQ_INSERT_HEAD(®ion->lockers, + sh_locker, ulinks, __db_locker); + } + + *retp = sh_locker; + return (0); +} + +/* + * __lock_addfamilylocker + * Put a locker entry in for a child transaction. + * + * PUBLIC: int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t)); + */ +int +__lock_addfamilylocker(dbenv, pid, id) + DB_ENV *dbenv; + u_int32_t pid, id; +{ + DB_LOCKER *lockerp, *mlockerp; + DB_LOCKREGION *region; + DB_LOCKTAB *lt; + u_int32_t ndx; + int ret; + + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + LOCKREGION(dbenv, lt); + + /* get/create the parent locker info */ + LOCKER_LOCK(lt, region, pid, ndx); + if ((ret = __lock_getlocker(dbenv->lk_handle, + pid, ndx, 1, &mlockerp)) != 0) + goto err; + + /* + * We assume that only one thread can manipulate + * a single transaction family. + * Therefore the master locker cannot go away while + * we manipulate it, nor can another child in the + * family be created at the same time. + */ + LOCKER_LOCK(lt, region, id, ndx); + if ((ret = __lock_getlocker(dbenv->lk_handle, + id, ndx, 1, &lockerp)) != 0) + goto err; + + /* Point to our parent. */ + lockerp->parent_locker = R_OFFSET(dbenv, <->reginfo, mlockerp); + + /* See if this locker is the family master. */ + if (mlockerp->master_locker == INVALID_ROFF) + lockerp->master_locker = + R_OFFSET(dbenv, <->reginfo, mlockerp); + else { + lockerp->master_locker = mlockerp->master_locker; + mlockerp = R_ADDR(dbenv, <->reginfo, mlockerp->master_locker); + } + + /* + * Link the child at the head of the master's list. + * The guess is when looking for deadlock that + * the most recent child is the one thats blocked. + */ + SH_LIST_INSERT_HEAD( + &mlockerp->child_locker, lockerp, child_link, __db_locker); + +err: + UNLOCKREGION(dbenv, lt); + + return (ret); +} + +/* + * __lock_freefamilylocker + * Remove a locker from the hash table and its family. + * + * This must be called without the locker bucket locked. + * + * PUBLIC: int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t)); + */ +int +__lock_freefamilylocker(lt, locker) + DB_LOCKTAB *lt; + u_int32_t locker; +{ + DB_ENV *dbenv; + DB_LOCKER *sh_locker; + DB_LOCKREGION *region; + u_int32_t indx; + int ret; + + dbenv = lt->dbenv; + region = lt->reginfo.primary; + + LOCKREGION(dbenv, lt); + LOCKER_LOCK(lt, region, locker, indx); + + if ((ret = __lock_getlocker(lt, + locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) + goto err; + + if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) { + ret = EINVAL; + __db_err(dbenv, "Freeing locker with locks"); + goto err; + } + + /* If this is part of a family, we must fix up its links. */ + if (sh_locker->master_locker != INVALID_ROFF) + SH_LIST_REMOVE(sh_locker, child_link, __db_locker); + + __lock_freelocker(lt, region, sh_locker, indx); + +err: + UNLOCKREGION(dbenv, lt); + return (ret); +} + +/* + * __lock_freelocker + * Common code for deleting a locker; must be called with the + * locker bucket locked. + * + * PUBLIC: void __lock_freelocker + * PUBLIC: __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, u_int32_t)); + */ +void +__lock_freelocker(lt, region, sh_locker, indx) + DB_LOCKTAB *lt; + DB_LOCKREGION *region; + DB_LOCKER *sh_locker; + u_int32_t indx; + +{ + HASHREMOVE_EL( + lt->locker_tab, indx, __db_locker, links, sh_locker); + SH_TAILQ_INSERT_HEAD( + ®ion->free_lockers, sh_locker, links, __db_locker); + SH_TAILQ_REMOVE(®ion->lockers, sh_locker, ulinks, __db_locker); + region->stat.st_nlockers--; +} diff --git a/db/lock/lock_list.c b/db/lock/lock_list.c new file mode 100644 index 000000000..5851dc7fa --- /dev/null +++ b/db/lock/lock_list.c @@ -0,0 +1,351 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: lock_list.c,v 11.146 2004/09/22 03:48:29 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_shash.h" +#include "dbinc/lock.h" +#include "dbinc/log.h" + +static int __lock_sort_cmp __P((const void *, const void *)); + +/* + * Lock list routines. + * The list is composed of a 32-bit count of locks followed by + * each lock. A lock is represented by a 16-bit page-count, a lock + * object and a page list. A lock object consists of a 16-bit size + * and the object itself. In a pseudo BNF notation, you get: + * + * LIST = COUNT32 LOCK* + * LOCK = COUNT16 LOCKOBJ PAGELIST + * LOCKOBJ = COUNT16 OBJ + * PAGELIST = COUNT32* + * + * (Recall that X* means "0 or more X's") + * + * In most cases, the OBJ is a struct __db_ilock and the page list is + * a series of (32-bit) page numbers that should get written into the + * pgno field of the __db_ilock. So, the actual number of pages locked + * is the number of items in the PAGELIST plus 1. If this is an application- + * specific lock, then we cannot interpret obj and the pagelist must + * be empty. + * + * Consider a lock list for: File A, pages 1&2, File B pages 3-5, Applock + * This would be represented as: + * 5 1 [fid=A;page=1] 2 2 [fid=B;page=3] 4 5 0 APPLOCK + * ------------------ -------------------- --------- + * LOCK for file A LOCK for file B application-specific lock + */ + +#define MAX_PGNOS 0xffff + +/* + * These macros are bigger than one might expect because some compilers say a + * cast does not return an lvalue, so constructs like *(u_int32_t*)dp = count; + * generate warnings. + */ +#define RET_SIZE(size, count) ((size) + \ + sizeof(u_int32_t) + (count) * 2 * sizeof(u_int16_t)) + +#define PUT_COUNT(dp, count) do { u_int32_t *ip = (u_int32_t *)dp;\ + *ip = count; \ + dp = (u_int8_t *)dp + \ + sizeof(u_int32_t); \ + } while (0) +#define PUT_PCOUNT(dp, count) do { u_int16_t *ip = (u_int16_t *)dp;\ + *ip = count; \ + dp = (u_int8_t *)dp + \ + sizeof(u_int16_t); \ + } while (0) +#define PUT_SIZE(dp, size) do { u_int16_t *ip = (u_int16_t *)dp;\ + *ip = size; \ + dp = (u_int8_t *)dp + \ + sizeof(u_int16_t); \ + } while (0) +#define PUT_PGNO(dp, pgno) do { db_pgno_t *ip = (db_pgno_t *)dp;\ + *ip = pgno; \ + dp = (u_int8_t *)dp + \ + sizeof(db_pgno_t); \ + } while (0) +#define COPY_OBJ(dp, obj) do { \ + memcpy(dp, \ + (obj)->data, (obj)->size); \ + dp = (u_int8_t *)dp + \ + DB_ALIGN((obj)->size, \ + sizeof(u_int32_t)); \ + } while (0) +#define GET_COUNT(dp, count) do { \ + (count) = *(u_int32_t *)dp; \ + dp = (u_int8_t *)dp + \ + sizeof(u_int32_t); \ + } while (0) +#define GET_PCOUNT(dp, count) do { \ + (count) = *(u_int16_t *)dp; \ + dp = (u_int8_t *)dp + \ + sizeof(u_int16_t); \ + } while (0) +#define GET_SIZE(dp, size) do { \ + (size) = *(u_int16_t *)dp; \ + dp = (u_int8_t *)dp + \ + sizeof(u_int16_t); \ + } while (0) +#define GET_PGNO(dp, pgno) do { \ + (pgno) = *(db_pgno_t *)dp; \ + dp = (u_int8_t *)dp + \ + sizeof(db_pgno_t); \ + } while (0) + +/* + * __lock_fix_list -- + * + * PUBLIC: int __lock_fix_list __P((DB_ENV *, DBT *, u_int32_t)); + */ +int +__lock_fix_list(dbenv, list_dbt, nlocks) + DB_ENV *dbenv; + DBT *list_dbt; + u_int32_t nlocks; +{ + DBT *obj; + DB_LOCK_ILOCK *lock, *plock; + u_int32_t i, j, nfid, npgno, size; + u_int8_t *data, *dp; + int ret; + + if ((size = list_dbt->size) == 0) + return (0); + + obj = (DBT *)list_dbt->data; + + /* + * If necessary sort the list of locks so that locks on the same fileid + * are together. We do not sort 1 or 2 locks because by definition if + * there are locks on the same fileid they will be together. The sort + * will also move any locks that do not look like page locks to the end + * of the list so we can stop looking for locks we can combine when we + * hit one. + */ + switch (nlocks) { + case 1: + size = RET_SIZE(obj->size, 1); + if ((ret = __os_malloc(dbenv, size, &data)) != 0) + return (ret); + + dp = data; + PUT_COUNT(dp, 1); + PUT_PCOUNT(dp, 0); + PUT_SIZE(dp, obj->size); + COPY_OBJ(dp, obj); + break; + default: + /* Sort so that all locks with same fileid are together. */ + qsort(list_dbt->data, nlocks, sizeof(DBT), __lock_sort_cmp); + /* FALLTHROUGH */ + case 2: + nfid = npgno = 0; + i = 0; + if (obj->size != sizeof(DB_LOCK_ILOCK)) + goto not_ilock; + + nfid = 1; + plock = (DB_LOCK_ILOCK *)obj->data; + + /* We use ulen to keep track of the number of pages. */ + j = 0; + obj[0].ulen = 0; + for (i = 1; i < nlocks; i++) { + if (obj[i].size != sizeof(DB_LOCK_ILOCK)) + break; + lock = (DB_LOCK_ILOCK *)obj[i].data; + if (obj[j].ulen < MAX_PGNOS && + lock->type == plock->type && + memcmp(lock->fileid, + plock->fileid, DB_FILE_ID_LEN) == 0) { + obj[j].ulen++; + npgno++; + } else { + nfid++; + plock = lock; + j = i; + obj[j].ulen = 0; + } + } + +not_ilock: size = nfid * sizeof(DB_LOCK_ILOCK); + size += npgno * sizeof(db_pgno_t); + /* Add the number of nonstandard locks and get their size. */ + nfid += nlocks - i; + for (; i < nlocks; i++) { + size += obj[i].size; + obj[i].ulen = 0; + } + + size = RET_SIZE(size, nfid); + if ((ret = __os_malloc(dbenv, size, &data)) != 0) + return (ret); + + dp = data; + PUT_COUNT(dp, nfid); + + for (i = 0; i < nlocks; i = j) { + PUT_PCOUNT(dp, obj[i].ulen); + PUT_SIZE(dp, obj[i].size); + COPY_OBJ(dp, &obj[i]); + lock = (DB_LOCK_ILOCK *)obj[i].data; + for (j = i + 1; j <= i + obj[i].ulen; j++) { + lock = (DB_LOCK_ILOCK *)obj[j].data; + PUT_PGNO(dp, lock->pgno); + } + } + } + + __os_free(dbenv, list_dbt->data); + + list_dbt->data = data; + list_dbt->size = size; + + return (0); +} + +/* + * PUBLIC: int __lock_get_list __P((DB_ENV *, u_int32_t, u_int32_t, + * PUBLIC: db_lockmode_t, DBT *)); + */ +int +__lock_get_list(dbenv, locker, flags, lock_mode, list) + DB_ENV *dbenv; + u_int32_t locker, flags; + db_lockmode_t lock_mode; + DBT *list; +{ + DBT obj_dbt; + DB_LOCK ret_lock; + DB_LOCK_ILOCK *lock; + DB_LOCKTAB *lt; + db_pgno_t save_pgno; + u_int16_t npgno, size; + u_int32_t i, nlocks; + int ret; + void *dp; + + if (list->size == 0) + return (0); + ret = 0; + lt = dbenv->lk_handle; + dp = list->data; + + GET_COUNT(dp, nlocks); + LOCKREGION(dbenv, dbenv->lk_handle); + + for (i = 0; i < nlocks; i++) { + GET_PCOUNT(dp, npgno); + GET_SIZE(dp, size); + lock = (DB_LOCK_ILOCK *) dp; + save_pgno = lock->pgno; + obj_dbt.data = dp; + obj_dbt.size = size; + dp = ((u_int8_t *)dp) + DB_ALIGN(size, sizeof(u_int32_t)); + do { + if ((ret = __lock_get_internal(lt, locker, + flags, &obj_dbt, lock_mode, 0, &ret_lock)) != 0) { + lock->pgno = save_pgno; + goto err; + } + if (npgno != 0) + GET_PGNO(dp, lock->pgno); + } while (npgno-- != 0); + lock->pgno = save_pgno; + } + +err: + UNLOCKREGION(dbenv, dbenv->lk_handle); + return (ret); +} + +#define UINT32_CMP(A, B) ((A) == (B) ? 0 : ((A) > (B) ? 1 : -1)) +static int +__lock_sort_cmp(a, b) + const void *a, *b; +{ + const DBT *d1, *d2; + DB_LOCK_ILOCK *l1, *l2; + + d1 = a; + d2 = b; + + /* Force all non-standard locks to sort at end. */ + if (d1->size != sizeof(DB_LOCK_ILOCK)) { + if (d2->size != sizeof(DB_LOCK_ILOCK)) + return (UINT32_CMP(d1->size, d2->size)); + else + return (1); + } else if (d2->size != sizeof(DB_LOCK_ILOCK)) + return (-1); + + l1 = d1->data; + l2 = d2->data; + if (l1->type != l2->type) + return (UINT32_CMP(l1->type, l2->type)); + return (memcmp(l1->fileid, l2->fileid, DB_FILE_ID_LEN)); +} + +/* + * PUBLIC: void __lock_list_print __P((DB_ENV *, DBT *)); + */ +void +__lock_list_print(dbenv, list) + DB_ENV *dbenv; + DBT *list; +{ + DB_LOCK_ILOCK *lock; + db_pgno_t pgno; + u_int16_t npgno, size; + u_int32_t i, nlocks; + u_int8_t *fidp; + char *namep; + void *dp; + + if (list->size == 0) + return; + dp = list->data; + + GET_COUNT(dp, nlocks); + + for (i = 0; i < nlocks; i++) { + GET_PCOUNT(dp, npgno); + GET_SIZE(dp, size); + lock = (DB_LOCK_ILOCK *) dp; + fidp = lock->fileid; + if (__dbreg_get_name(dbenv, fidp, &namep) != 0) + namep = NULL; + printf("\t"); + if (namep == NULL) + printf("(%lx %lx %lx %lx %lx)", + (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2], + (u_long)fidp[3], (u_long)fidp[4]); + else + printf("%-25s", namep); + dp = ((u_int8_t *)dp) + DB_ALIGN(size, sizeof(u_int32_t)); + pgno = lock->pgno; + do { + printf(" %d", pgno); + if (npgno != 0) + GET_PGNO(dp, pgno); + } while (npgno-- != 0); + printf("\n"); + } +} diff --git a/db/lock/lock_method.c b/db/lock/lock_method.c index fd2beebe4..d57179493 100644 --- a/db/lock/lock_method.c +++ b/db/lock/lock_method.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: lock_method.c,v 11.44 2004/06/01 21:50:05 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: lock_method.c,v 11.35 2003/06/30 17:20:15 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -21,12 +19,15 @@ static const char revid[] = "$Id: lock_method.c,v 11.35 2003/06/30 17:20:15 bost #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + #include "db_int.h" #include "dbinc/db_shash.h" #include "dbinc/lock.h" #ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" #endif @@ -74,12 +75,12 @@ __lock_dbenv_create(dbenv) dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects; dbenv->lock_detect = __dbcl_lock_detect; - dbenv->lock_dump_region = NULL; dbenv->lock_get = __dbcl_lock_get; dbenv->lock_id = __dbcl_lock_id; dbenv->lock_id_free = __dbcl_lock_id_free; dbenv->lock_put = __dbcl_lock_put; dbenv->lock_stat = __dbcl_lock_stat; + dbenv->lock_stat_print = NULL; dbenv->lock_vec = __dbcl_lock_vec; } else #endif @@ -99,12 +100,12 @@ __lock_dbenv_create(dbenv) dbenv->set_timeout = __lock_set_env_timeout; dbenv->lock_detect = __lock_detect_pp; - dbenv->lock_dump_region = __lock_dump_region; dbenv->lock_get = __lock_get_pp; dbenv->lock_id = __lock_id_pp; dbenv->lock_id_free = __lock_id_free_pp; dbenv->lock_put = __lock_put_pp; dbenv->lock_stat = __lock_stat_pp; + dbenv->lock_stat_print = __lock_stat_print_pp; dbenv->lock_vec = __lock_vec_pp; } } @@ -113,9 +114,9 @@ __lock_dbenv_create(dbenv) * __lock_dbenv_close -- * Lock specific destruction of the DB_ENV structure. * - * PUBLIC: void __lock_dbenv_close __P((DB_ENV *)); + * PUBLIC: int __lock_dbenv_close __P((DB_ENV *)); */ -void +int __lock_dbenv_close(dbenv) DB_ENV *dbenv; { @@ -123,6 +124,8 @@ __lock_dbenv_close(dbenv) __os_free(dbenv, dbenv->lk_conflicts); dbenv->lk_conflicts = NULL; } + + return (0); } /* @@ -135,10 +138,23 @@ __lock_get_lk_conflicts(dbenv, lk_conflictsp, lk_modesp) const u_int8_t **lk_conflictsp; int *lk_modesp; { - if (lk_conflictsp != NULL) - *lk_conflictsp = dbenv->lk_conflicts; - if (lk_modesp != NULL) - *lk_modesp = dbenv->lk_modes; + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->get_lk_conflicts", DB_INIT_LOCK); + + if (LOCKING_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + if (lk_conflictsp != NULL) + *lk_conflictsp = + ((DB_LOCKTAB *)dbenv->lk_handle)->conflicts; + if (lk_modesp != NULL) + *lk_modesp = ((DB_LOCKREGION *)((DB_LOCKTAB *) + dbenv->lk_handle)->reginfo.primary)->stat.st_nmodes; + } else { + if (lk_conflictsp != NULL) + *lk_conflictsp = dbenv->lk_conflicts; + if (lk_modesp != NULL) + *lk_modesp = dbenv->lk_modes; + } return (0); } @@ -161,9 +177,10 @@ __lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes) dbenv->lk_conflicts = NULL; } if ((ret = __os_malloc(dbenv, - lk_modes * lk_modes, &dbenv->lk_conflicts)) != 0) + (size_t)(lk_modes * lk_modes), &dbenv->lk_conflicts)) != 0) return (ret); - memcpy(dbenv->lk_conflicts, lk_conflicts, lk_modes * lk_modes); + memcpy( + dbenv->lk_conflicts, lk_conflicts, (size_t)(lk_modes * lk_modes)); dbenv->lk_modes = lk_modes; return (0); @@ -174,7 +191,19 @@ __lock_get_lk_detect(dbenv, lk_detectp) DB_ENV *dbenv; u_int32_t *lk_detectp; { - *lk_detectp = dbenv->lk_detect; + DB_LOCKTAB *lt; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->get_lk_detect", DB_INIT_LOCK); + + if (LOCKING_ON(dbenv)) { + lt = dbenv->lk_handle; + LOCKREGION(dbenv, lt); + *lk_detectp = ((DB_LOCKREGION *) + ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary)->detect; + UNLOCKREGION(dbenv, lt); + } else + *lk_detectp = dbenv->lk_detect; return (0); } @@ -189,12 +218,18 @@ __lock_set_lk_detect(dbenv, lk_detect) DB_ENV *dbenv; u_int32_t lk_detect; { - ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_lk_detect"); + DB_LOCKTAB *lt; + DB_LOCKREGION *region; + int ret; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->set_lk_detect", DB_INIT_LOCK); switch (lk_detect) { case DB_LOCK_DEFAULT: case DB_LOCK_EXPIRE: case DB_LOCK_MAXLOCKS: + case DB_LOCK_MAXWRITE: case DB_LOCK_MINLOCKS: case DB_LOCK_MINWRITE: case DB_LOCK_OLDEST: @@ -206,8 +241,35 @@ __lock_set_lk_detect(dbenv, lk_detect) "DB_ENV->set_lk_detect: unknown deadlock detection mode specified"); return (EINVAL); } - dbenv->lk_detect = lk_detect; - return (0); + + ret = 0; + if (LOCKING_ON(dbenv)) { + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + LOCKREGION(dbenv, lt); + /* + * Check for incompatible automatic deadlock detection requests. + * There are scenarios where changing the detector configuration + * is reasonable, but we disallow them guessing it is likely to + * be an application error. + * + * We allow applications to turn on the lock detector, and we + * ignore attempts to set it to the default or current value. + */ + if (region->detect != DB_LOCK_NORUN && + lk_detect != DB_LOCK_DEFAULT && + region->detect != lk_detect) { + __db_err(dbenv, + "DB_ENV->set_lk_detect: incompatible deadlock detector mode"); + ret = EINVAL; + } else + if (region->detect == DB_LOCK_NORUN) + region->detect = lk_detect; + UNLOCKREGION(dbenv, lt); + } else + dbenv->lk_detect = lk_detect; + + return (ret); } /* @@ -234,7 +296,15 @@ __lock_get_lk_max_locks(dbenv, lk_maxp) DB_ENV *dbenv; u_int32_t *lk_maxp; { - *lk_maxp = dbenv->lk_max; + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->get_lk_maxlocks", DB_INIT_LOCK); + + if (LOCKING_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + *lk_maxp = ((DB_LOCKREGION *)((DB_LOCKTAB *) + dbenv->lk_handle)->reginfo.primary)->stat.st_maxlocks; + } else + *lk_maxp = dbenv->lk_max; return (0); } @@ -260,7 +330,15 @@ __lock_get_lk_max_lockers(dbenv, lk_maxp) DB_ENV *dbenv; u_int32_t *lk_maxp; { - *lk_maxp = dbenv->lk_max_lockers; + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->get_lk_max_lockers", DB_INIT_LOCK); + + if (LOCKING_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + *lk_maxp = ((DB_LOCKREGION *)((DB_LOCKTAB *) + dbenv->lk_handle)->reginfo.primary)->stat.st_maxlockers; + } else + *lk_maxp = dbenv->lk_max_lockers; return (0); } @@ -286,7 +364,15 @@ __lock_get_lk_max_objects(dbenv, lk_maxp) DB_ENV *dbenv; u_int32_t *lk_maxp; { - *lk_maxp = dbenv->lk_max_objects; + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->get_lk_max_objects", DB_INIT_LOCK); + + if (LOCKING_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + *lk_maxp = ((DB_LOCKREGION *)((DB_LOCKTAB *) + dbenv->lk_handle)->reginfo.primary)->stat.st_maxobjects; + } else + *lk_maxp = dbenv->lk_max_objects; return (0); } @@ -313,19 +399,47 @@ __lock_get_env_timeout(dbenv, timeoutp, flag) db_timeout_t *timeoutp; u_int32_t flag; { - switch (flag) { - case DB_SET_LOCK_TIMEOUT: - *timeoutp = dbenv->lk_timeout; - break; - case DB_SET_TXN_TIMEOUT: - *timeoutp = dbenv->tx_timeout; - break; - default: - return (__db_ferr(dbenv, "DB_ENV->get_timeout", 0)); - /* NOTREACHED */ - } + DB_LOCKTAB *lt; + DB_LOCKREGION *region; + int ret; - return (0); + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->get_env_timeout", DB_INIT_LOCK); + + ret = 0; + if (LOCKING_ON(dbenv)) { + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + LOCKREGION(dbenv, lt); + switch (flag) { + case DB_SET_LOCK_TIMEOUT: + *timeoutp = region->lk_timeout; + break; + case DB_SET_TXN_TIMEOUT: + *timeoutp = region->tx_timeout; + break; + default: + ret = 1; + break; + } + UNLOCKREGION(dbenv, lt); + } else + switch (flag) { + case DB_SET_LOCK_TIMEOUT: + *timeoutp = dbenv->lk_timeout; + break; + case DB_SET_TXN_TIMEOUT: + *timeoutp = dbenv->tx_timeout; + break; + default: + ret = 1; + break; + } + + if (ret) + ret = __db_ferr(dbenv, "DB_ENV->get_timeout", 0); + + return (ret); } /* @@ -340,31 +454,45 @@ __lock_set_env_timeout(dbenv, timeout, flags) db_timeout_t timeout; u_int32_t flags; { + DB_LOCKTAB *lt; DB_LOCKREGION *region; + int ret; - region = NULL; - if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) { - if (!LOCKING_ON(dbenv)) - return (__db_env_config( - dbenv, "set_timeout", DB_INIT_LOCK)); - region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary; - } + ENV_NOT_CONFIGURED(dbenv, + dbenv->lk_handle, "DB_ENV->set_env_timeout", DB_INIT_LOCK); - switch (flags) { - case DB_SET_LOCK_TIMEOUT: - dbenv->lk_timeout = timeout; - if (region != NULL) + ret = 0; + if (LOCKING_ON(dbenv)) { + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + LOCKREGION(dbenv, lt); + switch (flags) { + case DB_SET_LOCK_TIMEOUT: region->lk_timeout = timeout; - break; - case DB_SET_TXN_TIMEOUT: - dbenv->tx_timeout = timeout; - if (region != NULL) + break; + case DB_SET_TXN_TIMEOUT: region->tx_timeout = timeout; - break; - default: - return (__db_ferr(dbenv, "DB_ENV->set_timeout", 0)); - /* NOTREACHED */ - } - - return (0); + break; + default: + ret = 1; + break; + } + UNLOCKREGION(dbenv, lt); + } else + switch (flags) { + case DB_SET_LOCK_TIMEOUT: + dbenv->lk_timeout = timeout; + break; + case DB_SET_TXN_TIMEOUT: + dbenv->tx_timeout = timeout; + break; + default: + ret = 1; + break; + } + + if (ret) + ret = __db_ferr(dbenv, "DB_ENV->set_timeout", 0); + + return (ret); } diff --git a/db/lock/lock_region.c b/db/lock/lock_region.c index 3f3442c3f..ff4bf2270 100644 --- a/db/lock/lock_region.c +++ b/db/lock/lock_region.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: lock_region.c,v 11.81 2004/09/15 21:49:17 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: lock_region.c,v 11.73 2003/07/23 13:13:12 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -21,7 +19,7 @@ static const char revid[] = "$Id: lock_region.c,v 11.73 2003/07/23 13:13:12 mjc #include "dbinc/db_shash.h" #include "dbinc/lock.h" -static int __lock_init __P((DB_ENV *, DB_LOCKTAB *)); +static int __lock_region_init __P((DB_ENV *, DB_LOCKTAB *)); static size_t __lock_region_size __P((DB_ENV *)); @@ -83,9 +81,9 @@ __lock_open(dbenv) lt->dbenv = dbenv; /* Join/create the lock region. */ + lt->reginfo.dbenv = dbenv; lt->reginfo.type = REGION_TYPE_LOCK; lt->reginfo.id = INVALID_REGION_ID; - lt->reginfo.mode = dbenv->db_mode; lt->reginfo.flags = REGION_JOIN_OK; if (F_ISSET(dbenv, DB_ENV_CREATE)) F_SET(<->reginfo, REGION_CREATE_OK); @@ -95,15 +93,23 @@ __lock_open(dbenv) /* If we created the region, initialize it. */ if (F_ISSET(<->reginfo, REGION_CREATE)) - if ((ret = __lock_init(dbenv, lt)) != 0) + if ((ret = __lock_region_init(dbenv, lt)) != 0) goto err; /* Set the local addresses. */ region = lt->reginfo.primary = - R_ADDR(<->reginfo, lt->reginfo.rp->primary); + R_ADDR(dbenv, <->reginfo, lt->reginfo.rp->primary); - /* Check for incompatible automatic deadlock detection requests. */ if (dbenv->lk_detect != DB_LOCK_NORUN) { + /* + * Check for incompatible automatic deadlock detection requests. + * There are scenarios where changing the detector configuration + * is reasonable, but we disallow them guessing it is likely to + * be an application error. + * + * We allow applications to turn on the lock detector, and we + * ignore attempts to set it to the default or current value. + */ if (region->detect != DB_LOCK_NORUN && dbenv->lk_detect != DB_LOCK_DEFAULT && region->detect != dbenv->lk_detect) { @@ -112,12 +118,6 @@ __lock_open(dbenv) ret = EINVAL; goto err; } - - /* - * Upgrade if our caller wants automatic detection, and it - * was not currently being done, whether or not we created - * the region. - */ if (region->detect == DB_LOCK_NORUN) region->detect = dbenv->lk_detect; } @@ -126,15 +126,18 @@ __lock_open(dbenv) * A process joining the region may have reset the lock and transaction * timeouts. */ - if (dbenv->lk_timeout != 0) + if (dbenv->lk_timeout != 0) region->lk_timeout = dbenv->lk_timeout; - if (dbenv->tx_timeout != 0) + if (dbenv->tx_timeout != 0) region->tx_timeout = dbenv->tx_timeout; /* Set remaining pointers into region. */ - lt->conflicts = (u_int8_t *)R_ADDR(<->reginfo, region->conf_off); - lt->obj_tab = (DB_HASHTAB *)R_ADDR(<->reginfo, region->obj_off); - lt->locker_tab = (DB_HASHTAB *)R_ADDR(<->reginfo, region->locker_off); + lt->conflicts = + (u_int8_t *)R_ADDR(dbenv, <->reginfo, region->conf_off); + lt->obj_tab = + (DB_HASHTAB *)R_ADDR(dbenv, <->reginfo, region->obj_off); + lt->locker_tab = + (DB_HASHTAB *)R_ADDR(dbenv, <->reginfo, region->locker_off); R_UNLOCK(dbenv, <->reginfo); @@ -152,11 +155,11 @@ err: if (lt->reginfo.addr != NULL) { } /* - * __lock_init -- + * __lock_region_init -- * Initialize the lock region. */ static int -__lock_init(dbenv, lt) +__lock_region_init(dbenv, lt) DB_ENV *dbenv; DB_LOCKTAB *lt; { @@ -168,14 +171,15 @@ __lock_init(dbenv, lt) #ifdef HAVE_MUTEX_SYSTEM_RESOURCES size_t maint_size; #endif - u_int32_t i, lk_modes; + u_int32_t i; u_int8_t *addr; - int ret; + int lk_modes, ret; - if ((ret = __db_shalloc(lt->reginfo.addr, + if ((ret = __db_shalloc(<->reginfo, sizeof(DB_LOCKREGION), 0, <->reginfo.primary)) != 0) goto mem_err; - lt->reginfo.rp->primary = R_OFFSET(<->reginfo, lt->reginfo.primary); + lt->reginfo.rp->primary = + R_OFFSET(dbenv, <->reginfo, lt->reginfo.primary); region = lt->reginfo.primary; memset(region, 0, sizeof(*region)); @@ -209,34 +213,34 @@ __lock_init(dbenv, lt) region->stat.st_nmodes = lk_modes; /* Allocate room for the conflict matrix and initialize it. */ - if ((ret = - __db_shalloc(lt->reginfo.addr, lk_modes * lk_modes, 0, &addr)) != 0) + if ((ret = __db_shalloc( + <->reginfo, (size_t)(lk_modes * lk_modes), 0, &addr)) != 0) goto mem_err; - memcpy(addr, lk_conflicts, lk_modes * lk_modes); - region->conf_off = R_OFFSET(<->reginfo, addr); + memcpy(addr, lk_conflicts, (size_t)(lk_modes * lk_modes)); + region->conf_off = R_OFFSET(dbenv, <->reginfo, addr); /* Allocate room for the object hash table and initialize it. */ - if ((ret = __db_shalloc(lt->reginfo.addr, + if ((ret = __db_shalloc(<->reginfo, region->object_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0) goto mem_err; __db_hashinit(addr, region->object_t_size); - region->obj_off = R_OFFSET(<->reginfo, addr); + region->obj_off = R_OFFSET(dbenv, <->reginfo, addr); /* Allocate room for the locker hash table and initialize it. */ - if ((ret = __db_shalloc(lt->reginfo.addr, + if ((ret = __db_shalloc(<->reginfo, region->locker_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0) goto mem_err; __db_hashinit(addr, region->locker_t_size); - region->locker_off = R_OFFSET(<->reginfo, addr); + region->locker_off = R_OFFSET(dbenv, <->reginfo, addr); #ifdef HAVE_MUTEX_SYSTEM_RESOURCES maint_size = __lock_region_maint(dbenv); /* Allocate room for the locker maintenance info and initialize it. */ - if ((ret = __db_shalloc(lt->reginfo.addr, + if ((ret = __db_shalloc(<->reginfo, sizeof(REGMAINT) + maint_size, 0, &addr)) != 0) goto mem_err; __db_maintinit(<->reginfo, addr, maint_size); - region->maint_off = R_OFFSET(<->reginfo, addr); + region->maint_off = R_OFFSET(dbenv, <->reginfo, addr); #endif /* @@ -246,7 +250,7 @@ __lock_init(dbenv, lt) */ SH_TAILQ_INIT(®ion->free_locks); for (i = 0; i < region->stat.st_maxlocks; ++i) { - if ((ret = __db_shalloc(lt->reginfo.addr, + if ((ret = __db_shalloc(<->reginfo, sizeof(struct __db_lock), MUTEX_ALIGN, &lp)) != 0) goto mem_err; lp->status = DB_LSTAT_FREE; @@ -263,7 +267,7 @@ __lock_init(dbenv, lt) SH_TAILQ_INIT(®ion->dd_objs); SH_TAILQ_INIT(®ion->free_objs); for (i = 0; i < region->stat.st_maxobjects; ++i) { - if ((ret = __db_shalloc(lt->reginfo.addr, + if ((ret = __db_shalloc(<->reginfo, sizeof(DB_LOCKOBJ), 0, &op)) != 0) goto mem_err; SH_TAILQ_INSERT_HEAD( @@ -274,7 +278,7 @@ __lock_init(dbenv, lt) SH_TAILQ_INIT(®ion->lockers); SH_TAILQ_INIT(®ion->free_lockers); for (i = 0; i < region->stat.st_maxlockers; ++i) { - if ((ret = __db_shalloc(lt->reginfo.addr, + if ((ret = __db_shalloc(<->reginfo, sizeof(DB_LOCKER), 0, &lidp)) != 0) { mem_err: __db_err(dbenv, "Unable to allocate memory for the lock table"); @@ -289,8 +293,7 @@ mem_err: __db_err(dbenv, /* * __lock_dbenv_refresh -- - * Clean up after the lock system on a close or failed open. Called - * only from __dbenv_refresh. (Formerly called __lock_close.) + * Clean up after the lock system on a close or failed open. * * PUBLIC: int __lock_dbenv_refresh __P((DB_ENV *)); */ @@ -298,17 +301,67 @@ int __lock_dbenv_refresh(dbenv) DB_ENV *dbenv; { + struct __db_lock *lp; + DB_LOCKER *locker; + DB_LOCKOBJ *lockobj; + DB_LOCKREGION *lr; DB_LOCKTAB *lt; + REGINFO *reginfo; int ret; lt = dbenv->lk_handle; + reginfo = <->reginfo; + lr = reginfo->primary; + + /* + * If a private region, return the memory to the heap. Not needed for + * filesystem-backed or system shared memory regions, that memory isn't + * owned by any particular process. + */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { + /* Discard the conflict matrix. */ + __db_shalloc_free(reginfo, + R_ADDR(dbenv, <->reginfo, lr->conf_off)); + + /* Discard the object hash table. */ + __db_shalloc_free(reginfo, + R_ADDR(dbenv, <->reginfo, lr->obj_off)); + + /* Discard the locker hash table. */ + __db_shalloc_free( + reginfo, R_ADDR(dbenv, <->reginfo, lr->locker_off)); + + /* Discard locks. */ + while ((lp = + SH_TAILQ_FIRST(&lr->free_locks, __db_lock)) != NULL) { + SH_TAILQ_REMOVE(&lr->free_locks, lp, links, __db_lock); + __db_shalloc_free(reginfo, lp); + } + + /* Discard objects. */ + while ((lockobj = + SH_TAILQ_FIRST(&lr->free_objs, __db_lockobj)) != NULL) { + SH_TAILQ_REMOVE( + &lr->free_objs, lockobj, links, __db_lockobj); + __db_shalloc_free(reginfo, lockobj); + } + + /* Discard lockers. */ + while ((locker = + SH_TAILQ_FIRST(&lr->free_lockers, __db_locker)) != NULL) { + SH_TAILQ_REMOVE( + &lr->free_lockers, locker, links, __db_locker); + __db_shalloc_free(reginfo, locker); + } + } /* Detach from the region. */ - ret = __db_r_detach(dbenv, <->reginfo, 0); + ret = __db_r_detach(dbenv, reginfo, 0); + /* Discard DB_LOCKTAB. */ __os_free(dbenv, lt); - dbenv->lk_handle = NULL; + return (ret); } @@ -324,25 +377,26 @@ __lock_region_size(dbenv) /* * Figure out how much space we're going to need. This list should - * map one-to-one with the __db_shalloc calls in __lock_init. + * map one-to-one with the __db_shalloc calls in __lock_region_init. */ retval = 0; - retval += __db_shalloc_size(sizeof(DB_LOCKREGION), 1); - retval += __db_shalloc_size(dbenv->lk_modes * dbenv->lk_modes, 1); - retval += __db_shalloc_size( - __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 1); + retval += __db_shalloc_size(sizeof(DB_LOCKREGION), 0); retval += __db_shalloc_size( - __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 1); + (size_t)(dbenv->lk_modes * dbenv->lk_modes), 0); + retval += __db_shalloc_size(__db_tablesize + (dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 0); + retval += __db_shalloc_size(__db_tablesize + (dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 0); #ifdef HAVE_MUTEX_SYSTEM_RESOURCES retval += - __db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), 1); + __db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), 0); #endif - retval += __db_shalloc_size( - sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max; - retval += __db_shalloc_size( - sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects; - retval += __db_shalloc_size( - sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers; + retval += __db_shalloc_size + (sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max; + retval += + __db_shalloc_size(sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects; + retval += + __db_shalloc_size(sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers; /* * Include 16 bytes of string space per lock. DB doesn't use it @@ -383,35 +437,23 @@ __lock_region_destroy(dbenv, infop) DB_ENV *dbenv; REGINFO *infop; { - __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop, - ((DB_LOCKREGION *)R_ADDR(infop, infop->rp->primary))->maint_off)); - - COMPQUIET(dbenv, NULL); - COMPQUIET(infop, NULL); -} - -/* - * __lock_id_set -- - * Set the current locker ID and current maximum unused ID (for - * testing purposes only). - * - * PUBLIC: int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t)); - */ -int -__lock_id_set(dbenv, cur_id, max_id) - DB_ENV *dbenv; - u_int32_t cur_id, max_id; -{ - DB_LOCKTAB *lt; - DB_LOCKREGION *region; - - ENV_REQUIRES_CONFIG(dbenv, - dbenv->lk_handle, "lock_id_set", DB_INIT_LOCK); + /* + * This routine is called in two cases: when discarding the mutexes + * from a previous Berkeley DB run, during recovery, and two, when + * discarding the mutexes as we shut down the database environment. + * In the latter case, we also need to discard shared memory segments, + * this is the last time we use them, and the last region-specific + * call we make. + */ +#ifdef HAVE_MUTEX_SYSTEM_RESOURCES + DB_LOCKREGION *lt; - lt = dbenv->lk_handle; - region = lt->reginfo.primary; - region->stat.st_id = cur_id; - region->stat.st_cur_maxid = max_id; + lt = R_ADDR(dbenv, infop, infop->rp->primary); - return (0); + __db_shlocks_destroy(infop, R_ADDR(dbenv, infop, lt->maint_off)); + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, R_ADDR(dbenv, infop, lt->maint_off)); +#endif + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, infop->primary); } diff --git a/db/lock/lock_stat.c b/db/lock/lock_stat.c index de539f560..d49235dac 100644 --- a/db/lock/lock_stat.c +++ b/db/lock/lock_stat.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: lock_stat.c,v 11.63 2004/09/22 03:48:29 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: lock_stat.c,v 11.44 2003/09/13 19:20:36 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -35,9 +33,13 @@ static const char revid[] = "$Id: lock_stat.c,v 11.44 2003/09/13 19:20:36 bostic #include "dbinc/log.h" #include "dbinc/db_am.h" -static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKER *, FILE *)); -static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); -static void __lock_printheader __P((FILE *)); +#ifdef HAVE_STATISTICS +static void __lock_dump_locker + __P((DB_ENV *, DB_MSGBUF *, DB_LOCKTAB *, DB_LOCKER *)); +static void __lock_dump_object __P((DB_LOCKTAB *, DB_MSGBUF *, DB_LOCKOBJ *)); +static int __lock_print_all __P((DB_ENV *, u_int32_t)); +static int __lock_print_stats __P((DB_ENV *, u_int32_t)); +static void __lock_print_header __P((DB_ENV *)); static int __lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t)); /* @@ -67,7 +69,7 @@ __lock_stat_pp(dbenv, statp, flags) __env_rep_enter(dbenv); ret = __lock_stat(dbenv, statp, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -106,8 +108,7 @@ __lock_stat(dbenv, statp, flags) if (LF_ISSET(DB_STAT_CLEAR)) { tmp = region->stat; memset(®ion->stat, 0, sizeof(region->stat)); - lt->reginfo.rp->mutex.mutex_set_wait = 0; - lt->reginfo.rp->mutex.mutex_set_nowait = 0; + MUTEX_CLEAR(<->reginfo.rp->mutex); region->stat.st_id = tmp.st_id; region->stat.st_cur_maxid = tmp.st_cur_maxid; @@ -129,170 +130,266 @@ __lock_stat(dbenv, statp, flags) return (0); } -#define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */ -#define LOCK_DUMP_LOCKERS 0x002 /* Display lockers. */ -#define LOCK_DUMP_MEM 0x004 /* Display region memory. */ -#define LOCK_DUMP_OBJECTS 0x008 /* Display objects. */ -#define LOCK_DUMP_PARAMS 0x010 /* Display params. */ -#define LOCK_DUMP_ALL /* All */ \ - (LOCK_DUMP_CONF | LOCK_DUMP_LOCKERS | LOCK_DUMP_MEM | \ - LOCK_DUMP_OBJECTS | LOCK_DUMP_PARAMS) +/* + * __lock_stat_print_pp -- + * DB_ENV->lock_stat_print pre/post processing. + * + * PUBLIC: int __lock_stat_print_pp __P((DB_ENV *, u_int32_t)); + */ +int +__lock_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->lk_handle, "DB_ENV->lock_stat_print", DB_INIT_LOCK); + +#define DB_STAT_LOCK_FLAGS \ + (DB_STAT_ALL | DB_STAT_CLEAR | DB_STAT_LOCK_CONF | \ + DB_STAT_LOCK_LOCKERS | DB_STAT_LOCK_OBJECTS | DB_STAT_LOCK_PARAMS) + if ((ret = __db_fchk(dbenv, "DB_ENV->lock_stat_print", + flags, DB_STAT_CLEAR | DB_STAT_LOCK_FLAGS)) != 0) + return (ret); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __lock_stat_print(dbenv, flags); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} /* - * __lock_dump_region -- + * __lock_stat_print -- + * DB_ENV->lock_stat_print method. * - * PUBLIC: int __lock_dump_region __P((DB_ENV *, const char *, FILE *)); + * PUBLIC: int __lock_stat_print __P((DB_ENV *, u_int32_t)); */ int -__lock_dump_region(dbenv, area, fp) +__lock_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + u_int32_t orig_flags; + int ret; + + orig_flags = flags; + LF_CLR(DB_STAT_CLEAR); + if (flags == 0 || LF_ISSET(DB_STAT_ALL)) { + ret = __lock_print_stats(dbenv, orig_flags); + if (flags == 0 || ret != 0) + return (ret); + } + + if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_CONF | DB_STAT_LOCK_LOCKERS | + DB_STAT_LOCK_OBJECTS | DB_STAT_LOCK_PARAMS) && + (ret = __lock_print_all(dbenv, orig_flags)) != 0) + return (ret); + + return (0); +} + +/* + * __lock_print_stats -- + * Display default lock region statistics. + */ +static int +__lock_print_stats(dbenv, flags) DB_ENV *dbenv; - const char *area; - FILE *fp; + u_int32_t flags; +{ + DB_LOCK_STAT *sp; + int ret; + + if ((ret = __lock_stat(dbenv, &sp, flags)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) + __db_msg(dbenv, "Default locking region information:"); + __db_dl(dbenv, "Last allocated locker ID", (u_long)sp->st_id); + __db_msg(dbenv, "%#lx\tCurrent maximum unused locker ID", + (u_long)sp->st_cur_maxid); + __db_dl(dbenv, "Number of lock modes", (u_long)sp->st_nmodes); + __db_dl(dbenv, + "Maximum number of locks possible", (u_long)sp->st_maxlocks); + __db_dl(dbenv, + "Maximum number of lockers possible", (u_long)sp->st_maxlockers); + __db_dl(dbenv, "Maximum number of lock objects possible", + (u_long)sp->st_maxobjects); + __db_dl(dbenv, "Number of current locks", (u_long)sp->st_nlocks); + __db_dl(dbenv, "Maximum number of locks at any one time", + (u_long)sp->st_maxnlocks); + __db_dl(dbenv, "Number of current lockers", (u_long)sp->st_nlockers); + __db_dl(dbenv, "Maximum number of lockers at any one time", + (u_long)sp->st_maxnlockers); + __db_dl(dbenv, + "Number of current lock objects", (u_long)sp->st_nobjects); + __db_dl(dbenv, "Maximum number of lock objects at any one time", + (u_long)sp->st_maxnobjects); + __db_dl(dbenv, + "Total number of locks requested", (u_long)sp->st_nrequests); + __db_dl(dbenv, + "Total number of locks released", (u_long)sp->st_nreleases); + __db_dl(dbenv, + "Total number of lock requests failing because DB_LOCK_NOWAIT was set", + (u_long)sp->st_nnowaits); + __db_dl(dbenv, + "Total number of locks not immediately available due to conflicts", + (u_long)sp->st_nconflicts); + __db_dl(dbenv, "Number of deadlocks", (u_long)sp->st_ndeadlocks); + __db_dl(dbenv, "Lock timeout value", (u_long)sp->st_locktimeout); + __db_dl(dbenv, "Number of locks that have timed out", + (u_long)sp->st_nlocktimeouts); + __db_dl(dbenv, + "Transaction timeout value", (u_long)sp->st_txntimeout); + __db_dl(dbenv, "Number of transactions that have timed out", + (u_long)sp->st_ntxntimeouts); + + __db_dlbytes(dbenv, "The size of the lock region", + (u_long)0, (u_long)0, (u_long)sp->st_regsize); + __db_dl_pct(dbenv, + "The number of region locks that required waiting", + (u_long)sp->st_region_wait, DB_PCT(sp->st_region_wait, + sp->st_region_wait + sp->st_region_nowait), NULL); + + __os_ufree(dbenv, sp); + + return (0); +} + +/* + * __lock_print_all -- + * Display debugging lock region statistics. + */ +static int +__lock_print_all(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; { DB_LOCKER *lip; DB_LOCKOBJ *op; DB_LOCKREGION *lrp; DB_LOCKTAB *lt; - u_int32_t flags, i, j; + DB_MSGBUF mb; + int i, j; + u_int32_t k; char buf[64]; - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, - dbenv->lk_handle, "lock_dump_region", DB_INIT_LOCK); - - /* Make it easy to call from the debugger. */ - if (fp == NULL) - fp = stderr; - - for (flags = 0; *area != '\0'; ++area) - switch (*area) { - case 'A': - LF_SET(LOCK_DUMP_ALL); - break; - case 'c': - LF_SET(LOCK_DUMP_CONF); - break; - case 'l': - LF_SET(LOCK_DUMP_LOCKERS); - break; - case 'm': - LF_SET(LOCK_DUMP_MEM); - break; - case 'o': - LF_SET(LOCK_DUMP_OBJECTS); - break; - case 'p': - LF_SET(LOCK_DUMP_PARAMS); - break; - } - lt = dbenv->lk_handle; lrp = lt->reginfo.primary; + DB_MSGBUF_INIT(&mb); + LOCKREGION(dbenv, lt); - if (LF_ISSET(LOCK_DUMP_PARAMS)) { - fprintf(fp, "%s\nLock region parameters\n", DB_LINE); - fprintf(fp, - "%s: %lu, %s: %lu, %s: %lu,\n%s: %lu, %s: %lu, %s: %lu, %s: %lu\n", - "locker table size", (u_long)lrp->locker_t_size, - "object table size", (u_long)lrp->object_t_size, - "obj_off", (u_long)lrp->obj_off, - "osynch_off", (u_long)lrp->osynch_off, - "locker_off", (u_long)lrp->locker_off, - "lsynch_off", (u_long)lrp->lsynch_off, - "need_dd", (u_long)lrp->need_dd); - if (LOCK_TIME_ISVALID(&lrp->next_timeout)) { - strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", - localtime((time_t*)&lrp->next_timeout.tv_sec)); - fprintf(fp, "next_timeout: %s.%lu\n", + __db_print_reginfo(dbenv, <->reginfo, "Lock"); + + if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_PARAMS)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Lock region parameters:"); + STAT_ULONG("locker table size", lrp->locker_t_size); + STAT_ULONG("object table size", lrp->object_t_size); + STAT_ULONG("obj_off", lrp->obj_off); + STAT_ULONG("osynch_off", lrp->osynch_off); + STAT_ULONG("locker_off", lrp->locker_off); + STAT_ULONG("lsynch_off", lrp->lsynch_off); + STAT_ULONG("need_dd", lrp->need_dd); + if (LOCK_TIME_ISVALID(&lrp->next_timeout) && + strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", + localtime((time_t*)&lrp->next_timeout.tv_sec)) != 0) + __db_msg(dbenv, "next_timeout: %s.%lu", buf, (u_long)lrp->next_timeout.tv_usec); - } } - if (LF_ISSET(LOCK_DUMP_CONF)) { - fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE); + if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_CONF)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Lock conflict matrix:"); for (i = 0; i < lrp->stat.st_nmodes; i++) { for (j = 0; j < lrp->stat.st_nmodes; j++) - fprintf(fp, "%lu\t", (u_long) + __db_msgadd(dbenv, &mb, "%lu\t", (u_long) lt->conflicts[i * lrp->stat.st_nmodes + j]); - fprintf(fp, "\n"); + DB_MSGBUF_FLUSH(dbenv, &mb); } } - if (LF_ISSET(LOCK_DUMP_LOCKERS)) { - fprintf(fp, "%s\nLocks grouped by lockers\n", DB_LINE); - __lock_printheader(fp); - for (i = 0; i < lrp->locker_t_size; i++) + if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_LOCKERS)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Locks grouped by lockers:"); + __lock_print_header(dbenv); + for (k = 0; k < lrp->locker_t_size; k++) for (lip = - SH_TAILQ_FIRST(<->locker_tab[i], __db_locker); + SH_TAILQ_FIRST(<->locker_tab[k], __db_locker); lip != NULL; lip = SH_TAILQ_NEXT(lip, links, __db_locker)) { - __lock_dump_locker(lt, lip, fp); + __lock_dump_locker(dbenv, &mb, lt, lip); } } - if (LF_ISSET(LOCK_DUMP_OBJECTS)) { - fprintf(fp, "%s\nLocks grouped by object\n", DB_LINE); - __lock_printheader(fp); - for (i = 0; i < lrp->object_t_size; i++) { - for (op = SH_TAILQ_FIRST(<->obj_tab[i], __db_lockobj); + if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_OBJECTS)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Locks grouped by object:"); + __lock_print_header(dbenv); + for (k = 0; k < lrp->object_t_size; k++) { + for (op = SH_TAILQ_FIRST(<->obj_tab[k], __db_lockobj); op != NULL; - op = SH_TAILQ_NEXT(op, links, __db_lockobj)) - __lock_dump_object(lt, op, fp); + op = SH_TAILQ_NEXT(op, links, __db_lockobj)) { + __lock_dump_object(lt, &mb, op); + __db_msg(dbenv, "%s", ""); + } } } - - if (LF_ISSET(LOCK_DUMP_MEM)) - __db_shalloc_dump(lt->reginfo.addr, fp); - UNLOCKREGION(dbenv, lt); return (0); } static void -__lock_dump_locker(lt, lip, fp) +__lock_dump_locker(dbenv, mbp, lt, lip) + DB_ENV *dbenv; + DB_MSGBUF *mbp; DB_LOCKTAB *lt; DB_LOCKER *lip; - FILE *fp; { struct __db_lock *lp; time_t s; char buf[64]; - fprintf(fp, "%8lx dd=%2ld locks held %-4d write locks %-4d", + __db_msgadd(dbenv, + mbp, "%8lx dd=%2ld locks held %-4d write locks %-4d", (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites); - fprintf(fp, "%s", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : " "); + __db_msgadd( + dbenv, mbp, "%s", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : " "); if (LOCK_TIME_ISVALID(&lip->tx_expire)) { - s = lip->tx_expire.tv_sec; - strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s)); - fprintf(fp, - "expires %s.%lu", buf, (u_long)lip->tx_expire.tv_usec); + s = (time_t)lip->tx_expire.tv_sec; + if (strftime(buf, + sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s)) != 0) + __db_msgadd(dbenv, mbp, "expires %s.%lu", + buf, (u_long)lip->tx_expire.tv_usec); } if (F_ISSET(lip, DB_LOCKER_TIMEOUT)) - fprintf(fp, " lk timeout %u", lip->lk_timeout); + __db_msgadd(dbenv, mbp, " lk timeout %u", lip->lk_timeout); if (LOCK_TIME_ISVALID(&lip->lk_expire)) { - s = lip->lk_expire.tv_sec; - strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s)); - fprintf(fp, - " lk expires %s.%lu", buf, (u_long)lip->lk_expire.tv_usec); - } - fprintf(fp, "\n"); - - lp = SH_LIST_FIRST(&lip->heldby, __db_lock); - if (lp != NULL) { - for (; lp != NULL; - lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) - __lock_printlock(lt, lp, 1, fp); - fprintf(fp, "\n"); + s = (time_t)lip->lk_expire.tv_sec; + if (strftime(buf, + sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s)) != 0) + __db_msgadd(dbenv, mbp, " lk expires %s.%lu", + buf, (u_long)lip->lk_expire.tv_usec); } + DB_MSGBUF_FLUSH(dbenv, mbp); + + for (lp = SH_LIST_FIRST(&lip->heldby, __db_lock); + lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) + __lock_printlock(lt, mbp, lp, 1); } static void -__lock_dump_object(lt, op, fp) +__lock_dump_object(lt, mbp, op) DB_LOCKTAB *lt; + DB_MSGBUF *mbp; DB_LOCKOBJ *op; - FILE *fp; { struct __db_lock *lp; @@ -300,24 +397,22 @@ __lock_dump_object(lt, op, fp) SH_TAILQ_FIRST(&op->holders, __db_lock); lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 1, fp); + __lock_printlock(lt, mbp, lp, 1); for (lp = SH_TAILQ_FIRST(&op->waiters, __db_lock); lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 1, fp); - - fprintf(fp, "\n"); + __lock_printlock(lt, mbp, lp, 1); } /* - * __lock_printheader -- + * __lock_print_header -- */ static void -__lock_printheader(fp) - FILE *fp; +__lock_print_header(dbenv) + DB_ENV *dbenv; { - fprintf(fp, "%-8s %-10s%-4s %-7s %s\n", + __db_msg(dbenv, "%-8s %-10s%-4s %-7s %s", "Locker", "Mode", "Count", "Status", "----------------- Object ---------------"); } @@ -325,26 +420,31 @@ __lock_printheader(fp) /* * __lock_printlock -- * - * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, - * PUBLIC: struct __db_lock *, int, FILE *)); + * PUBLIC: void __lock_printlock + * PUBLIC: __P((DB_LOCKTAB *, DB_MSGBUF *mbp, struct __db_lock *, int)); */ void -__lock_printlock(lt, lp, ispgno, fp) +__lock_printlock(lt, mbp, lp, ispgno) DB_LOCKTAB *lt; + DB_MSGBUF *mbp; struct __db_lock *lp; int ispgno; - FILE *fp; { + DB_ENV *dbenv; DB_LOCKOBJ *lockobj; + DB_MSGBUF mb; db_pgno_t pgno; u_int32_t *fidp, type; u_int8_t *ptr; char *namep; const char *mode, *status; - /* Make it easy to call from the debugger. */ - if (fp == NULL) - fp = stderr; + dbenv = lt->dbenv; + + if (mbp == NULL) { + DB_MSGBUF_INIT(&mb); + mbp = &mb; + } switch (lp->mode) { case DB_LOCK_DIRTY: @@ -382,8 +482,8 @@ __lock_printlock(lt, lp, ispgno, fp) case DB_LSTAT_ABORTED: status = "ABORT"; break; - case DB_LSTAT_ERR: - status = "ERROR"; + case DB_LSTAT_EXPIRED: + status = "EXPIRED"; break; case DB_LSTAT_FREE: status = "FREE"; @@ -391,20 +491,20 @@ __lock_printlock(lt, lp, ispgno, fp) case DB_LSTAT_HELD: status = "HELD"; break; - case DB_LSTAT_WAITING: - status = "WAIT"; + case DB_LSTAT_NOTEXIST: + status = "NOTEXIST"; break; case DB_LSTAT_PENDING: status = "PENDING"; break; - case DB_LSTAT_EXPIRED: - status = "EXPIRED"; + case DB_LSTAT_WAITING: + status = "WAIT"; break; default: status = "UNKNOWN"; break; } - fprintf(fp, "%8lx %-10s %4lu %-7s ", + __db_msgadd(dbenv, mbp, "%8lx %-10s %4lu %-7s ", (u_long)lp->holder, mode, (u_long)lp->refcount, status); lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj); @@ -417,18 +517,44 @@ __lock_printlock(lt, lp, ispgno, fp) if (__dbreg_get_name(lt->dbenv, (u_int8_t *)fidp, &namep) != 0) namep = NULL; if (namep == NULL) - fprintf(fp, "(%lx %lx %lx %lx %lx)", - (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2], - (u_long)fidp[3], (u_long)fidp[4]); + __db_msgadd(dbenv, mbp, "(%lx %lx %lx %lx %lx) ", + (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2], + (u_long)fidp[3], (u_long)fidp[4]); else - fprintf(fp, "%-25s", namep); - fprintf(fp, "%-7s %7lu\n", + __db_msgadd(dbenv, mbp, "%-25s ", namep); + __db_msgadd(dbenv, mbp, "%-7s %7lu", type == DB_PAGE_LOCK ? "page" : type == DB_RECORD_LOCK ? "record" : "handle", (u_long)pgno); } else { - fprintf(fp, "0x%lx ", (u_long)R_OFFSET(<->reginfo, lockobj)); - __db_pr(ptr, lockobj->lockobj.size, fp); - fprintf(fp, "\n"); + __db_msgadd(dbenv, mbp, "0x%lx ", + (u_long)R_OFFSET(dbenv, <->reginfo, lockobj)); + __db_pr(dbenv, mbp, ptr, lockobj->lockobj.size); } + DB_MSGBUF_FLUSH(dbenv, mbp); } + +#else /* !HAVE_STATISTICS */ + +int +__lock_stat_pp(dbenv, statp, flags) + DB_ENV *dbenv; + DB_LOCK_STAT **statp; + u_int32_t flags; +{ + COMPQUIET(statp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} + +int +__lock_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} +#endif diff --git a/db/lock/lock_timer.c b/db/lock/lock_timer.c new file mode 100644 index 000000000..55efb6c6c --- /dev/null +++ b/db/lock/lock_timer.c @@ -0,0 +1,216 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: lock_timer.c,v 11.141 2004/03/24 20:51:39 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_shash.h" +#include "dbinc/lock.h" + +/* + * __lock_set_timeout + * -- set timeout values in shared memory. + * + * This is called from the transaction system. We either set the time that + * this transaction expires or the amount of time a lock for this transaction + * is permitted to wait. + * + * PUBLIC: int __lock_set_timeout __P(( DB_ENV *, + * PUBLIC: u_int32_t, db_timeout_t, u_int32_t)); + */ +int +__lock_set_timeout(dbenv, locker, timeout, op) + DB_ENV *dbenv; + u_int32_t locker; + db_timeout_t timeout; + u_int32_t op; +{ + DB_LOCKTAB *lt; + int ret; + + lt = dbenv->lk_handle; + + LOCKREGION(dbenv, lt); + ret = __lock_set_timeout_internal(dbenv, locker, timeout, op); + UNLOCKREGION(dbenv, lt); + return (ret); +} + +/* + * __lock_set_timeout_internal + * -- set timeout values in shared memory. + * + * This is the internal version called from the lock system. We either set + * the time that this transaction expires or the amount of time that a lock + * for this transaction is permitted to wait. + * + * PUBLIC: int __lock_set_timeout_internal + * PUBLIC: __P((DB_ENV *, u_int32_t, db_timeout_t, u_int32_t)); + */ +int +__lock_set_timeout_internal(dbenv, locker, timeout, op) + DB_ENV *dbenv; + u_int32_t locker; + db_timeout_t timeout; + u_int32_t op; +{ + DB_LOCKER *sh_locker; + DB_LOCKREGION *region; + DB_LOCKTAB *lt; + u_int32_t locker_ndx; + int ret; + + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + + LOCKER_LOCK(lt, region, locker, locker_ndx); + ret = __lock_getlocker(lt, locker, locker_ndx, 1, &sh_locker); + + if (ret != 0) + return (ret); + + if (op == DB_SET_TXN_TIMEOUT) { + if (timeout == 0) + LOCK_SET_TIME_INVALID(&sh_locker->tx_expire); + else + __lock_expires(dbenv, &sh_locker->tx_expire, timeout); + } else if (op == DB_SET_LOCK_TIMEOUT) { + sh_locker->lk_timeout = timeout; + F_SET(sh_locker, DB_LOCKER_TIMEOUT); + } else if (op == DB_SET_TXN_NOW) { + LOCK_SET_TIME_INVALID(&sh_locker->tx_expire); + __lock_expires(dbenv, &sh_locker->tx_expire, 0); + sh_locker->lk_expire = sh_locker->tx_expire; + if (!LOCK_TIME_ISVALID(®ion->next_timeout) || + LOCK_TIME_GREATER( + ®ion->next_timeout, &sh_locker->lk_expire)) + region->next_timeout = sh_locker->lk_expire; + } else + return (EINVAL); + + return (0); +} + +/* + * __lock_inherit_timeout + * -- inherit timeout values from parent locker. + * This is called from the transaction system. This will + * return EINVAL if the parent does not exist or did not + * have a current txn timeout set. + * + * PUBLIC: int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t)); + */ +int +__lock_inherit_timeout(dbenv, parent, locker) + DB_ENV *dbenv; + u_int32_t parent, locker; +{ + DB_LOCKER *parent_locker, *sh_locker; + DB_LOCKREGION *region; + DB_LOCKTAB *lt; + u_int32_t locker_ndx; + int ret; + + lt = dbenv->lk_handle; + region = lt->reginfo.primary; + ret = 0; + LOCKREGION(dbenv, lt); + + /* If the parent does not exist, we are done. */ + LOCKER_LOCK(lt, region, parent, locker_ndx); + if ((ret = __lock_getlocker(lt, + parent, locker_ndx, 0, &parent_locker)) != 0) + goto err; + + /* + * If the parent is not there yet, thats ok. If it + * does not have any timouts set, then avoid creating + * the child locker at this point. + */ + if (parent_locker == NULL || + (LOCK_TIME_ISVALID(&parent_locker->tx_expire) && + !F_ISSET(parent_locker, DB_LOCKER_TIMEOUT))) { + ret = EINVAL; + goto done; + } + + LOCKER_LOCK(lt, region, locker, locker_ndx); + if ((ret = __lock_getlocker(lt, + locker, locker_ndx, 1, &sh_locker)) != 0) + goto err; + + sh_locker->tx_expire = parent_locker->tx_expire; + + if (F_ISSET(parent_locker, DB_LOCKER_TIMEOUT)) { + sh_locker->lk_timeout = parent_locker->lk_timeout; + F_SET(sh_locker, DB_LOCKER_TIMEOUT); + if (!LOCK_TIME_ISVALID(&parent_locker->tx_expire)) + ret = EINVAL; + } + +done: +err: + UNLOCKREGION(dbenv, lt); + return (ret); +} + +/* + * __lock_expires -- + * Set the expire time given the time to live. If timevalp is set then + * it contains "now". This avoids repeated system calls to get the time. + * + * PUBLIC: void __lock_expires __P((DB_ENV *, db_timeval_t *, db_timeout_t)); + */ +void +__lock_expires(dbenv, timevalp, timeout) + DB_ENV *dbenv; + db_timeval_t *timevalp; + db_timeout_t timeout; +{ + if (!LOCK_TIME_ISVALID(timevalp)) + __os_clock(dbenv, &timevalp->tv_sec, &timevalp->tv_usec); + if (timeout > 1000000) { + timevalp->tv_sec += timeout / 1000000; + timevalp->tv_usec += timeout % 1000000; + } else + timevalp->tv_usec += timeout; + + if (timevalp->tv_usec > 1000000) { + timevalp->tv_sec++; + timevalp->tv_usec -= 1000000; + } +} + +/* + * __lock_expired -- determine if a lock has expired. + * + * PUBLIC: int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *)); + */ +int +__lock_expired(dbenv, now, timevalp) + DB_ENV *dbenv; + db_timeval_t *now, *timevalp; +{ + if (!LOCK_TIME_ISVALID(timevalp)) + return (0); + + if (!LOCK_TIME_ISVALID(now)) + __os_clock(dbenv, &now->tv_sec, &now->tv_usec); + + return (now->tv_sec > timevalp->tv_sec || + (now->tv_sec == timevalp->tv_sec && + now->tv_usec >= timevalp->tv_usec)); +} diff --git a/db/lock/lock_util.c b/db/lock/lock_util.c index 9a1d5ee9c..0c38d72ac 100644 --- a/db/lock/lock_util.c +++ b/db/lock/lock_util.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: lock_util.c,v 11.12 2004/09/22 03:48:29 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: lock_util.c,v 11.9 2003/01/08 05:22:12 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -136,3 +134,18 @@ __lock_locker_hash(locker) { return (locker); } + +/* + * __lock_nomem -- + * Report a lack of some resource. + * + * PUBLIC: int __lock_nomem __P((DB_ENV *, const char *)); + */ +int +__lock_nomem(dbenv, res) + DB_ENV *dbenv; + const char *res; +{ + __db_err(dbenv, "Lock table is out of available %s", res); + return (ENOMEM); +} diff --git a/db/log/log.c b/db/log/log.c index 95c5fac90..1d39f631c 100644 --- a/db/log/log.c +++ b/db/log/log.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: log.c,v 11.160 2004/10/07 16:50:57 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: log.c,v 11.133 2003/09/13 19:20:37 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -27,7 +26,6 @@ static const char revid[] = "$Id: log.c,v 11.133 2003/09/13 19:20:37 bostic Exp static int __log_init __P((DB_ENV *, DB_LOG *)); static int __log_recover __P((DB_LOG *)); static size_t __log_region_size __P((DB_ENV *)); -static int __log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); static int __log_zero __P((DB_ENV *, DB_LSN *, DB_LSN *)); /* @@ -49,10 +47,15 @@ __log_open(dbenv) return (ret); dblp->dbenv = dbenv; + /* Set the default buffer size, if not otherwise configured. */ + if (dbenv->lg_bsize == 0) + dbenv->lg_bsize = F_ISSET(dbenv, DB_ENV_LOG_INMEMORY) ? + LG_BSIZE_INMEM : LG_BSIZE_DEFAULT; + /* Join/create the log region. */ + dblp->reginfo.dbenv = dbenv; dblp->reginfo.type = REGION_TYPE_LOG; dblp->reginfo.id = INVALID_REGION_ID; - dblp->reginfo.mode = dbenv->db_mode; dblp->reginfo.flags = REGION_JOIN_OK; if (F_ISSET(dbenv, DB_ENV_CREATE)) F_SET(&dblp->reginfo, REGION_CREATE_OK); @@ -67,7 +70,7 @@ __log_open(dbenv) /* Set the local addresses. */ lp = dblp->reginfo.primary = - R_ADDR(&dblp->reginfo, dblp->reginfo.rp->primary); + R_ADDR(dbenv, &dblp->reginfo, dblp->reginfo.rp->primary); /* * If the region is threaded, then we have to lock both the handles @@ -79,7 +82,7 @@ __log_open(dbenv) goto err; /* Initialize the rest of the structure. */ - dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off); + dblp->bufp = R_ADDR(dbenv, &dblp->reginfo, lp->buffer_off); /* * Set the handle -- we may be about to run recovery, which allocates @@ -100,7 +103,8 @@ __log_open(dbenv) * log file header. */ if (lp->log_size == 0) - lp->log_size = LG_MAX_DEFAULT; + lp->log_size = F_ISSET(dbenv, DB_ENV_LOG_INMEMORY) ? + LG_MAX_INMEM : LG_MAX_DEFAULT; if ((ret = __log_recover(dblp)) != 0) goto err; @@ -117,7 +121,7 @@ __log_open(dbenv) * so that checkpoint gets a valid ckp_lsn value. */ if (IS_INIT_LSN(lp->lsn) && - (ret = __log_newfile(dblp, NULL)) != 0) + (ret = __log_newfile(dblp, NULL, 0)) != 0) goto err; /* Initialize replication's next-expected LSN value. */ @@ -126,10 +130,22 @@ __log_open(dbenv) /* * A process joining the region may have reset the log file * size, too. If so, it only affects the next log file we - * create. + * create. We need to check that the size is reasonable given + * the buffer size in the region. */ - if (dbenv->lg_size != 0) + if (dbenv->lg_size != 0) { + if ((ret = + __log_check_sizes(dbenv, dbenv->lg_size, 0)) != 0) + goto err; + lp->log_nsize = dbenv->lg_size; + } + + /* Migrate persistent flags from the region into the DB_ENV. */ + if (lp->db_log_autoremove) + F_SET(dbenv, DB_ENV_LOG_AUTOREMOVE); + if (lp->db_log_inmemory) + F_SET(dbenv, DB_ENV_LOG_INMEMORY); } R_UNLOCK(dbenv, &dblp->reginfo); @@ -161,54 +177,63 @@ __log_init(dbenv, dblp) DB_LOG *dblp; { DB_MUTEX *flush_mutexp; - LOG *region; + LOG *lp; int ret; void *p; #ifdef HAVE_MUTEX_SYSTEM_RESOURCES u_int8_t *addr; #endif - if ((ret = __db_shalloc(dblp->reginfo.addr, - sizeof(*region), 0, &dblp->reginfo.primary)) != 0) + /* + * This is the first point where we can validate the buffer size, + * because we know all three settings have been configured (file size, + * buffer size and the in-memory flag). + */ + if ((ret = + __log_check_sizes(dbenv, dbenv->lg_size, dbenv->lg_bsize)) != 0) + return (ret); + + if ((ret = __db_shalloc(&dblp->reginfo, + sizeof(*lp), MUTEX_ALIGN, &dblp->reginfo.primary)) != 0) goto mem_err; dblp->reginfo.rp->primary = - R_OFFSET(&dblp->reginfo, dblp->reginfo.primary); - region = dblp->reginfo.primary; - memset(region, 0, sizeof(*region)); + R_OFFSET(dbenv, &dblp->reginfo, dblp->reginfo.primary); + lp = dblp->reginfo.primary; + memset(lp, 0, sizeof(*lp)); - region->fid_max = 0; - SH_TAILQ_INIT(®ion->fq); - region->free_fid_stack = INVALID_ROFF; - region->free_fids = region->free_fids_alloced = 0; + lp->fid_max = 0; + SH_TAILQ_INIT(&lp->fq); + lp->free_fid_stack = INVALID_ROFF; + lp->free_fids = lp->free_fids_alloced = 0; /* Initialize LOG LSNs. */ - INIT_LSN(region->lsn); - INIT_LSN(region->t_lsn); + INIT_LSN(lp->lsn); + INIT_LSN(lp->t_lsn); /* * It's possible to be waiting for an LSN of [1][0], if a replication * client gets the first log record out of order. An LSN of [0][0] * signifies that we're not waiting. */ - ZERO_LSN(region->waiting_lsn); + ZERO_LSN(lp->waiting_lsn); /* * Log makes note of the fact that it ran into a checkpoint on * startup if it did so, as a recovery optimization. A zero * LSN signifies that it hasn't found one [yet]. */ - ZERO_LSN(region->cached_ckp_lsn); + ZERO_LSN(lp->cached_ckp_lsn); #ifdef HAVE_MUTEX_SYSTEM_RESOURCES /* Allocate room for the log maintenance info and initialize it. */ - if ((ret = __db_shalloc(dblp->reginfo.addr, + if ((ret = __db_shalloc(&dblp->reginfo, sizeof(REGMAINT) + LG_MAINT_SIZE, 0, &addr)) != 0) goto mem_err; __db_maintinit(&dblp->reginfo, addr, LG_MAINT_SIZE); - region->maint_off = R_OFFSET(&dblp->reginfo, addr); + lp->maint_off = R_OFFSET(dbenv, &dblp->reginfo, addr); #endif - if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, ®ion->fq_mutex, + if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, &lp->fq_mutex, MUTEX_NO_RLOCK)) != 0) return (ret); @@ -217,37 +242,47 @@ __log_init(dbenv, dblp) * to be aligned to MUTEX_ALIGN, and the only way to guarantee that is * to make sure they're at the beginning of a shalloc'ed chunk. */ - if ((ret = __db_shalloc(dblp->reginfo.addr, + if ((ret = __db_shalloc(&dblp->reginfo, sizeof(DB_MUTEX), MUTEX_ALIGN, &flush_mutexp)) != 0) goto mem_err; if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, flush_mutexp, MUTEX_NO_RLOCK)) != 0) return (ret); - region->flush_mutex_off = R_OFFSET(&dblp->reginfo, flush_mutexp); + lp->flush_mutex_off = R_OFFSET(dbenv, &dblp->reginfo, flush_mutexp); /* Initialize the buffer. */ - if ((ret = - __db_shalloc(dblp->reginfo.addr, dbenv->lg_bsize, 0, &p)) != 0) { + if ((ret = __db_shalloc(&dblp->reginfo, dbenv->lg_bsize, 0, &p)) != 0) { mem_err: __db_err(dbenv, "Unable to allocate memory for the log buffer"); return (ret); } - region->buffer_size = dbenv->lg_bsize; - region->buffer_off = R_OFFSET(&dblp->reginfo, p); - region->log_size = region->log_nsize = dbenv->lg_size; + lp->regionmax = dbenv->lg_regionmax; + lp->buffer_off = R_OFFSET(dbenv, &dblp->reginfo, p); + lp->buffer_size = dbenv->lg_bsize; + lp->log_size = lp->log_nsize = dbenv->lg_size; /* Initialize the commit Queue. */ - SH_TAILQ_INIT(®ion->free_commits); - SH_TAILQ_INIT(®ion->commits); - region->ncommit = 0; + SH_TAILQ_INIT(&lp->free_commits); + SH_TAILQ_INIT(&lp->commits); + lp->ncommit = 0; + + /* Initialize the logfiles list for in-memory logs. */ + SH_TAILQ_INIT(&lp->logfiles); + SH_TAILQ_INIT(&lp->free_logfiles); /* * Fill in the log's persistent header. Don't fill in the log file * sizes, as they may change at any time and so have to be filled in * as each log file is created. */ - region->persist.magic = DB_LOGMAGIC; - region->persist.version = DB_LOGVERSION; - region->persist.mode = (u_int32_t)dbenv->db_mode; + lp->persist.magic = DB_LOGMAGIC; + lp->persist.version = DB_LOGVERSION; + lp->persist.mode = (u_int32_t)dbenv->db_mode; + + /* Migrate persistent flags from the DB_ENV into the region. */ + if (F_ISSET(dbenv, DB_ENV_LOG_AUTOREMOVE)) + lp->db_log_autoremove = 1; + if (F_ISSET(dbenv, DB_ENV_LOG_INMEMORY)) + lp->db_log_inmemory = 1; return (0); } @@ -348,12 +383,13 @@ __log_recover(dblp) /* Set up the current buffer information, too. */ lp->len = logc->c_len; + lp->a_off = 0; lp->b_off = 0; lp->w_off = lp->lsn.offset; skipsearch: if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) - __db_err(dbenv, + __db_msg(dbenv, "Finding last valid log LSN: file: %lu offset %lu", (u_long)lp->lsn.file, (u_long)lp->lsn.offset); @@ -379,18 +415,33 @@ __log_find(dblp, find_first, valp, statusp) logfile_validity *statusp; { DB_ENV *dbenv; + LOG *lp; logfile_validity logval_status, status; + struct __db_filestart *filestart; u_int32_t clv, logval; int cnt, fcnt, ret; const char *dir; char *c, **names, *p, *q, savech; dbenv = dblp->dbenv; + lp = dblp->reginfo.primary; logval_status = status = DB_LV_NONEXISTENT; /* Return a value of 0 as the log file number on failure. */ *valp = 0; + if (lp->db_log_inmemory) { + filestart = find_first ? + SH_TAILQ_FIRST(&lp->logfiles, __db_filestart) : + SH_TAILQ_LAST(&lp->logfiles, links, __db_filestart); + if (filestart != NULL) { + *valp = filestart->file; + logval_status = DB_LV_NORMAL; + } + *statusp = logval_status; + return (0); + } + /* Find the directory name. */ if ((ret = __log_name(dblp, 1, &p, NULL, 0)) != 0) return (ret); @@ -440,7 +491,7 @@ __log_find(dblp, find_first, valp, statusp) * Use atol, not atoi; if an "int" is 16-bits, the largest * log file name won't fit. */ - clv = atol(names[cnt] + (sizeof(LFPREFIX) - 1)); + clv = (u_int32_t)atol(names[cnt] + (sizeof(LFPREFIX) - 1)); /* * If searching for the first log file, we want to return the @@ -451,7 +502,7 @@ __log_find(dblp, find_first, valp, statusp) * If we're searching for the last log file, we want to return * the newest log file, period. * - * Readable log files should never preceede unreadable log + * Readable log files should never precede unreadable log * files, that would mean the admin seriously screwed up. */ if (find_first) { @@ -527,7 +578,7 @@ err: __os_dirfree(dbenv, names, fcnt); * normal, if it's zero-length, or if it's an old version). * * PUBLIC: int __log_valid __P((DB_LOG *, u_int32_t, int, - * PUBLIC: DB_FH **, int, logfile_validity *)); + * PUBLIC: DB_FH **, u_int32_t, logfile_validity *)); */ int __log_valid(dblp, number, set_persist, fhpp, flags, statusp) @@ -535,17 +586,17 @@ __log_valid(dblp, number, set_persist, fhpp, flags, statusp) u_int32_t number; int set_persist; DB_FH **fhpp; - int flags; + u_int32_t flags; logfile_validity *statusp; { DB_CIPHER *db_cipher; DB_ENV *dbenv; DB_FH *fhp; HDR *hdr; - LOG *region; + LOG *lp; LOGP *persist; logfile_validity status; - size_t hdrsize, nw, recsize; + size_t hdrsize, nr, recsize; int is_hmac, ret; u_int8_t *tmp; char *fname; @@ -583,9 +634,14 @@ __log_valid(dblp, number, set_persist, fhpp, flags, statusp) hdr = (HDR *)tmp; persist = (LOGP *)(tmp + hdrsize); - /* Try to read the header. */ - if ((ret = __os_read(dbenv, fhp, tmp, recsize + hdrsize, &nw)) != 0 || - nw != recsize + hdrsize) { + /* + * Try to read the header. This can fail if the log is truncated, or + * if we find a preallocated log file where the header has not yet been + * written, so we need to check whether the header is zero-filled. + */ + if ((ret = __os_read(dbenv, fhp, tmp, recsize + hdrsize, &nr)) != 0 || + nr != recsize + hdrsize || + (hdr->len == 0 && persist->magic == 0 && persist->log_size == 0)) { if (ret == 0) status = DB_LV_INCOMPLETE; else @@ -652,14 +708,14 @@ __log_valid(dblp, number, set_persist, fhpp, flags, statusp) } /* - * Set our status code to indicate whether the log file - * belongs to an unreadable or readable old version; leave it - * alone if and only if the log file version is the current one. + * Set our status code to indicate whether the log file belongs to an + * unreadable or readable old version; leave it alone if and only if + * the log file version is the current one. */ if (persist->version > DB_LOGVERSION) { /* This is a fatal error--the log file is newer than DB. */ __db_err(dbenv, - "Ignoring log file: %s: unsupported log version %lu", + "Unacceptable log file %s: unsupported log version %lu", fname, (u_long)persist->version); ret = EINVAL; goto err; @@ -667,21 +723,20 @@ __log_valid(dblp, number, set_persist, fhpp, flags, statusp) status = DB_LV_OLD_UNREADABLE; /* This is a non-fatal error, but give some feedback. */ __db_err(dbenv, - "Ignoring log file: %s: unreadable log version %lu", + "Skipping log file %s: historic log version %lu", fname, (u_long)persist->version); /* - * We don't want to set persistent info based on an - * unreadable region, so jump to "err". + * We don't want to set persistent info based on an unreadable + * region, so jump to "err". */ goto err; } else if (persist->version < DB_LOGVERSION) status = DB_LV_OLD_READABLE; /* - * Only if we have a current log do we verify the checksum. - * We could not check the checksum before checking the magic - * and version because old log hdrs have the length and checksum - * in a different location. + * Only if we have a current log do we verify the checksum. We could + * not check the checksum before checking the magic and version because + * old log headers put the length and checksum in a different location. */ if (!CRYPTO_ON(dbenv) && ((ret = __db_check_chksum(dbenv, db_cipher, &hdr->chksum[0], (u_int8_t *)persist, @@ -703,9 +758,9 @@ __log_valid(dblp, number, set_persist, fhpp, flags, statusp) * be a problem, though. */ if (set_persist) { - region = dblp->reginfo.primary; - region->log_size = persist->log_size; - region->persist.mode = persist->mode; + lp = dblp->reginfo.primary; + lp->log_size = persist->log_size; + lp->persist.mode = persist->mode; } err: if (fname != NULL) @@ -725,8 +780,7 @@ err: if (fname != NULL) /* * __log_dbenv_refresh -- - * Clean up after the log system on a close or failed open. Called only - * from __dbenv_refresh. (Formerly called __log_close.) + * Clean up after the log system on a close or failed open. * * PUBLIC: int __log_dbenv_refresh __P((DB_ENV *)); */ @@ -735,21 +789,44 @@ __log_dbenv_refresh(dbenv) DB_ENV *dbenv; { DB_LOG *dblp; + LOG *lp; + REGINFO *reginfo; int ret, t_ret; dblp = dbenv->lg_handle; + reginfo = &dblp->reginfo; + lp = reginfo->primary; /* We may have opened files as part of XA; if so, close them. */ F_SET(dblp, DBLOG_RECOVER); ret = __dbreg_close_files(dbenv); + /* + * If a private region, return the memory to the heap. Not needed for + * filesystem-backed or system shared memory regions, that memory isn't + * owned by any particular process. + */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { + /* Discard the flush mutex. */ + __db_shalloc_free(reginfo, + R_ADDR(dbenv, reginfo, lp->flush_mutex_off)); + + /* Discard the buffer. */ + __db_shalloc_free(reginfo, + R_ADDR(dbenv, reginfo, lp->buffer_off)); + + /* Discard stack of free file IDs. */ + if (lp->free_fid_stack != INVALID_ROFF) + __db_shalloc_free(reginfo, + R_ADDR(dbenv, reginfo, lp->free_fid_stack)); + } + /* Discard the per-thread lock. */ if (dblp->mutexp != NULL) - __db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp); + __db_mutex_free(dbenv, reginfo, dblp->mutexp); /* Detach from the region. */ - if ((t_ret = - __db_r_detach(dbenv, &dblp->reginfo, 0)) != 0 && ret == 0) + if ((t_ret = __db_r_detach(dbenv, reginfo, 0)) != 0 && ret == 0) ret = t_ret; /* Close open files, release allocated memory. */ @@ -768,91 +845,6 @@ __log_dbenv_refresh(dbenv) return (ret); } -/* - * __log_stat_pp -- - * DB_ENV->log_stat pre/post processing. - * - * PUBLIC: int __log_stat_pp __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); - */ -int -__log_stat_pp(dbenv, statp, flags) - DB_ENV *dbenv; - DB_LOG_STAT **statp; - u_int32_t flags; -{ - int rep_check, ret; - - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, - dbenv->lg_handle, "DB_ENV->log_stat", DB_INIT_LOG); - - if ((ret = __db_fchk(dbenv, - "DB_ENV->log_stat", flags, DB_STAT_CLEAR)) != 0) - return (ret); - - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __log_stat(dbenv, statp, flags); - if (rep_check) - __env_rep_exit(dbenv); - return (ret); -} - -/* - * __log_stat -- - * Return log statistics. - */ -static int -__log_stat(dbenv, statp, flags) - DB_ENV *dbenv; - DB_LOG_STAT **statp; - u_int32_t flags; -{ - DB_LOG *dblp; - DB_LOG_STAT *stats; - LOG *region; - int ret; - - *statp = NULL; - - dblp = dbenv->lg_handle; - region = dblp->reginfo.primary; - - if ((ret = __os_umalloc(dbenv, sizeof(DB_LOG_STAT), &stats)) != 0) - return (ret); - - /* Copy out the global statistics. */ - R_LOCK(dbenv, &dblp->reginfo); - *stats = region->stat; - if (LF_ISSET(DB_STAT_CLEAR)) - memset(®ion->stat, 0, sizeof(region->stat)); - - stats->st_magic = region->persist.magic; - stats->st_version = region->persist.version; - stats->st_mode = region->persist.mode; - stats->st_lg_bsize = region->buffer_size; - stats->st_lg_size = region->log_nsize; - - stats->st_region_wait = dblp->reginfo.rp->mutex.mutex_set_wait; - stats->st_region_nowait = dblp->reginfo.rp->mutex.mutex_set_nowait; - if (LF_ISSET(DB_STAT_CLEAR)) { - dblp->reginfo.rp->mutex.mutex_set_wait = 0; - dblp->reginfo.rp->mutex.mutex_set_nowait = 0; - } - stats->st_regsize = dblp->reginfo.rp->size; - - stats->st_cur_file = region->lsn.file; - stats->st_cur_offset = region->lsn.offset; - stats->st_disk_file = region->s_lsn.file; - stats->st_disk_offset = region->s_lsn.offset; - - R_UNLOCK(dbenv, &dblp->reginfo); - - *statp = stats; - return (0); -} - /* * __log_get_cached_ckp_lsn -- * Retrieve any last checkpoint LSN that we may have found on startup. @@ -908,11 +900,26 @@ __log_region_destroy(dbenv, infop) DB_ENV *dbenv; REGINFO *infop; { - __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop, - ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off)); + /* + * This routine is called in two cases: when discarding the mutexes + * from a previous Berkeley DB run, during recovery, and two, when + * discarding the mutexes as we shut down the database environment. + * In the latter case, we also need to discard shared memory segments, + * this is the last time we use them, and the last region-specific + * call we make. + */ +#ifdef HAVE_MUTEX_SYSTEM_RESOURCES + LOG *lp; + + lp = R_ADDR(dbenv, infop, infop->rp->primary); - COMPQUIET(dbenv, NULL); - COMPQUIET(infop, NULL); + /* Destroy mutexes. */ + __db_shlocks_destroy(infop, R_ADDR(dbenv, infop, lp->maint_off)); + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, R_ADDR(dbenv, infop, lp->maint_off)); +#endif + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, infop->primary); } /* @@ -968,6 +975,10 @@ __log_vtruncate(dbenv, lsn, ckplsn, trunclsn) lp->len = c_len; lp->lsn.offset += lp->len; + if (lp->db_log_inmemory && + (ret = __log_inmem_lsnoff(dblp, &lp->lsn, &lp->b_off)) != 0) + goto err; + /* * I am going to assume that the number of bytes written since * the last checkpoint doesn't exceed a 32-bit number. @@ -978,7 +989,7 @@ __log_vtruncate(dbenv, lsn, ckplsn, trunclsn) bytes = lp->log_size - ckplsn->offset; if (lp->lsn.file > ckplsn->file + 1) bytes += lp->log_size * - (lp->lsn.file - ckplsn->file - 1); + ((lp->lsn.file - ckplsn->file) - 1); bytes += lp->lsn.offset; } else bytes = lp->lsn.offset - ckplsn->offset; @@ -990,7 +1001,7 @@ __log_vtruncate(dbenv, lsn, ckplsn, trunclsn) * If the saved lsn is greater than our new end of log, reset it * to our current end of log. */ - flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off); + flush_mutexp = R_ADDR(dbenv, &dblp->reginfo, lp->flush_mutex_off); MUTEX_LOCK(dbenv, flush_mutexp); if (log_compare(&lp->s_lsn, lsn) > 0) lp->s_lsn = lp->lsn; @@ -1032,10 +1043,20 @@ __log_is_outdated(dbenv, fnum, outdatedp) char *name; int ret; u_int32_t cfile; + struct __db_filestart *filestart; dblp = dbenv->lg_handle; - *outdatedp = 0; + if (F_ISSET(dbenv, DB_ENV_LOG_INMEMORY)) { + R_LOCK(dbenv, &dblp->reginfo); + lp = (LOG *)dblp->reginfo.primary; + filestart = SH_TAILQ_FIRST(&lp->logfiles, __db_filestart); + *outdatedp = (fnum < filestart->file); + R_UNLOCK(dbenv, &dblp->reginfo); + return (0); + } + + *outdatedp = 0; if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0) return (ret); @@ -1070,10 +1091,12 @@ __log_zero(dbenv, from_lsn, to_lsn) { DB_FH *fhp; DB_LOG *dblp; + LOG *lp; + struct __db_filestart *filestart, *nextstart; size_t nbytes, len, nw; + u_int32_t fn, mbytes, bytes; u_int8_t buf[4096]; - u_int32_t mbytes, bytes; - int fn, ret; + int ret; char *fname; dblp = dbenv->lg_handle; @@ -1084,6 +1107,27 @@ __log_zero(dbenv, from_lsn, to_lsn) return (0); } + lp = (LOG *)dblp->reginfo.primary; + if (lp->db_log_inmemory) { + /* + * Remove the first file if it is invalidated by this write. + * Log records can't be bigger than a file, so we only need to + * check the first file. + */ + for (filestart = SH_TAILQ_FIRST(&lp->logfiles, __db_filestart); + filestart != NULL && from_lsn->file < filestart->file; + filestart = nextstart) { + nextstart = SH_TAILQ_NEXT(filestart, + links, __db_filestart); + SH_TAILQ_REMOVE(&lp->logfiles, filestart, + links, __db_filestart); + SH_TAILQ_INSERT_HEAD(&lp->free_logfiles, filestart, + links, __db_filestart); + } + + return (0); + } + /* Close any open file handles so unlinks don't fail. */ if (dblp->lfhp != NULL) { (void)__os_closehandle(dbenv, dblp->lfhp); @@ -1136,24 +1180,216 @@ err: (void)__os_closehandle(dbenv, dblp->lfhp); } /* - * __log_autoremove -- - * Delete any non-essential log files. + * __log_inmem_lsnoff -- + * Find the offset in the buffer of a given LSN. * - * PUBLIC: void __log_autoremove __P((DB_ENV *)); + * PUBLIC: int __log_inmem_lsnoff __P((DB_LOG *, DB_LSN *, size_t *)); */ -void -__log_autoremove(dbenv) - DB_ENV *dbenv; +int +__log_inmem_lsnoff(dblp, lsn, offsetp) + DB_LOG *dblp; + DB_LSN *lsn; + size_t *offsetp; { - char **begin, **list; + LOG *lp; + struct __db_filestart *filestart; - if (__log_archive(dbenv, &list, DB_ARCH_ABS) != 0) - return; + lp = (LOG *)dblp->reginfo.primary; - if (list != NULL) { - for (begin = list; *list != NULL; ++list) - (void)__os_unlink(dbenv, *list); - __os_ufree(dbenv, begin); + SH_TAILQ_FOREACH(filestart, &lp->logfiles, links, __db_filestart) + if (filestart->file == lsn->file) { + *offsetp = + (filestart->b_off + lsn->offset) % lp->buffer_size; + return (0); + } + + return (DB_NOTFOUND); +} + +/* + * __log_inmem_newfile -- + * Records the offset of the beginning of a new file in the in-memory + * buffer. + * + * PUBLIC: int __log_inmem_newfile __P((DB_LOG *, u_int32_t)); + */ +int +__log_inmem_newfile(dblp, file) + DB_LOG *dblp; + u_int32_t file; +{ + HDR hdr; + LOG *lp; + struct __db_filestart *filestart; + int ret; +#ifdef DIAGNOSTIC + struct __db_filestart *first, *last; +#endif + + lp = (LOG *)dblp->reginfo.primary; + + /* + * We write an empty header at the end of every in-memory log file. + * This is used during cursor traversal to indicate when to switch the + * LSN to the next file. + */ + if (file > 1) { + memset(&hdr, 0, sizeof(HDR)); + __log_inmem_copyin(dblp, lp->b_off, &hdr, sizeof(HDR)); + lp->b_off = (lp->b_off + sizeof(HDR)) % lp->buffer_size; } - return; + + filestart = SH_TAILQ_FIRST(&lp->free_logfiles, __db_filestart); + if (filestart == NULL) { + if ((ret = __db_shalloc(&dblp->reginfo, + sizeof(struct __db_filestart), 0, &filestart)) != 0) + return (ret); + memset(filestart, 0, sizeof(*filestart)); + } else + SH_TAILQ_REMOVE(&lp->free_logfiles, filestart, + links, __db_filestart); + + filestart->file = file; + filestart->b_off = lp->b_off; + +#ifdef DIAGNOSTIC + first = SH_TAILQ_FIRST(&lp->logfiles, __db_filestart); + last = SH_TAILQ_LAST(&(lp)->logfiles, links, __db_filestart); + + /* Check that we don't wrap. */ + DB_ASSERT(!first || first == last || + RINGBUF_LEN(lp, first->b_off, lp->b_off) == + RINGBUF_LEN(lp, first->b_off, last->b_off) + + RINGBUF_LEN(lp, last->b_off, lp->b_off)); +#endif + + SH_TAILQ_INSERT_TAIL(&lp->logfiles, filestart, links); + return (0); +} + +/* + * __log_inmem_chkspace -- + * Ensure that the requested amount of space is available in the buffer, + * and invalidate the region. + * Note: assumes that the region lock is held on entry. + * + * PUBLIC: int __log_inmem_chkspace __P((DB_LOG *, size_t)); + */ +int +__log_inmem_chkspace(dblp, len) + DB_LOG *dblp; + size_t len; +{ + LOG *lp; + DB_LSN active_lsn, old_active_lsn; + struct __db_filestart *filestart; + + lp = dblp->reginfo.primary; + + DB_ASSERT(lp->db_log_inmemory); + + /* + * If transactions are enabled and we're about to fill available space, + * update the active LSN and recheck. If transactions aren't enabled, + * don't even bother checking: in that case we can always overwrite old + * log records, because we're never going to abort. + * + * Allow room for an extra header so that we don't need to check for + * space when switching files. + */ + while (TXN_ON(dblp->dbenv) && + RINGBUF_LEN(lp, lp->b_off, lp->a_off) <= len + sizeof(HDR)) { + old_active_lsn = lp->active_lsn; + active_lsn = lp->lsn; + + /* + * Drop the log region lock so we don't hold it while + * taking the transaction region lock. + */ + R_UNLOCK(dblp->dbenv, &dblp->reginfo); + __txn_getactive(dblp->dbenv, &active_lsn); + R_LOCK(dblp->dbenv, &dblp->reginfo); + active_lsn.offset = 0; + + /* If we didn't make any progress, give up. */ + if (log_compare(&active_lsn, &old_active_lsn) == 0) { + __db_err(dblp->dbenv, + "In-memory log buffer is full (an active transaction spans the buffer)"); + return (DB_LOG_BUFFER_FULL); + } + + /* Make sure we're moving the region LSN forwards. */ + if (log_compare(&active_lsn, &lp->active_lsn) > 0) { + lp->active_lsn = active_lsn; + (void)__log_inmem_lsnoff(dblp, &active_lsn, + &lp->a_off); + } + } + + /* + * Remove the first file if it is invalidated by this write. + * Log records can't be bigger than a file, so we only need to + * check the first file. + */ + filestart = SH_TAILQ_FIRST(&lp->logfiles, __db_filestart); + if (filestart != NULL && + RINGBUF_LEN(lp, lp->b_off, filestart->b_off) <= len) { + SH_TAILQ_REMOVE(&lp->logfiles, filestart, + links, __db_filestart); + SH_TAILQ_INSERT_HEAD(&lp->free_logfiles, filestart, + links, __db_filestart); + lp->f_lsn.file = filestart->file + 1; + } + + return (0); +} + +/* + * __log_inmem_copyout -- + * Copies the given number of bytes from the buffer -- no checking. + * Note: assumes that the region lock is held on entry. + * + * PUBLIC: void __log_inmem_copyout __P((DB_LOG *, size_t, void *, size_t)); + */ +void +__log_inmem_copyout(dblp, offset, buf, size) + DB_LOG *dblp; + size_t offset; + void *buf; + size_t size; +{ + LOG *lp; + size_t nbytes; + + lp = (LOG *)dblp->reginfo.primary; + nbytes = (offset + size < lp->buffer_size) ? + size : lp->buffer_size - offset; + memcpy(buf, dblp->bufp + offset, nbytes); + if (nbytes < size) + memcpy((u_int8_t *)buf + nbytes, dblp->bufp, size - nbytes); +} + +/* + * __log_inmem_copyin -- + * Copies the given number of bytes into the buffer -- no checking. + * Note: assumes that the region lock is held on entry. + * + * PUBLIC: void __log_inmem_copyin __P((DB_LOG *, size_t, void *, size_t)); + */ +void +__log_inmem_copyin(dblp, offset, buf, size) + DB_LOG *dblp; + size_t offset; + void *buf; + size_t size; +{ + LOG *lp; + size_t nbytes; + + lp = (LOG *)dblp->reginfo.primary; + nbytes = (offset + size < lp->buffer_size) ? + size : lp->buffer_size - offset; + memcpy(dblp->bufp + offset, buf, nbytes); + if (nbytes < size) + memcpy(dblp->bufp, (u_int8_t *)buf + nbytes, size - nbytes); } diff --git a/db/log/log_archive.c b/db/log/log_archive.c index b5c80820d..8b9e58163 100644 --- a/db/log/log_archive.c +++ b/db/log/log_archive.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: log_archive.c,v 11.62 2004/07/16 21:38:59 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: log_archive.c,v 11.51 2003/09/13 19:20:38 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -28,6 +26,7 @@ static const char revid[] = "$Id: log_archive.c,v 11.51 2003/09/13 19:20:38 bost static int __absname __P((DB_ENV *, char *, char *, char **)); static int __build_data __P((DB_ENV *, char *, char ***)); static int __cmpfunc __P((const void *, const void *)); +static int __log_archive __P((DB_ENV *, char **[], u_int32_t)); static int __usermem __P((DB_ENV *, char ***)); /* @@ -48,22 +47,34 @@ __log_archive_pp(dbenv, listp, flags) ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, "DB_ENV->log_archive", DB_INIT_LOG); +#define OKFLAGS (DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG | DB_ARCH_REMOVE) + if (flags != 0) { + if ((ret = __db_fchk( + dbenv, "DB_ENV->log_archive", flags, OKFLAGS)) != 0) + return (ret); + if ((ret = __db_fcchk(dbenv, "DB_ENV->log_archive", + flags, DB_ARCH_DATA, DB_ARCH_LOG)) != 0) + return (ret); + if ((ret = __db_fcchk(dbenv, "DB_ENV->log_archive", + flags, DB_ARCH_REMOVE, + DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG)) != 0) + return (ret); + } + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; if (rep_check) __env_rep_enter(dbenv); ret = __log_archive(dbenv, listp, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } /* * __log_archive -- - * DB_ENV->log_archive. - * - * PUBLIC: int __log_archive __P((DB_ENV *, char **[], u_int32_t)); + * DB_ENV->log_archive. Internal. */ -int +static int __log_archive(dbenv, listp, flags) DB_ENV *dbenv; char ***listp; @@ -71,61 +82,39 @@ __log_archive(dbenv, listp, flags) { DBT rec; DB_LOG *dblp; + LOG *lp; DB_LOGC *logc; DB_LSN stable_lsn; __txn_ckp_args *ckp_args; - char **array, **arrayp, *name, *p, *pref, buf[MAXPATHLEN]; - int array_size, db_arch_abs, n, rep_check, ret; + u_int array_size, n; u_int32_t fnum; + int ret, t_ret; + char **array, **arrayp, *name, *p, *pref, buf[MAXPATHLEN]; - ret = 0; - name = NULL; - array = NULL; dblp = dbenv->lg_handle; + lp = (LOG *)dblp->reginfo.primary; + array = NULL; + name = NULL; + ret = 0; COMPQUIET(fnum, 0); -#define OKFLAGS (DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG | DB_ARCH_REMOVE) - if (flags != 0) { - if ((ret = __db_fchk( - dbenv, "DB_ENV->log_archive", flags, OKFLAGS)) != 0) - return (ret); - if ((ret = __db_fcchk(dbenv, "DB_ENV->log_archive", - flags, DB_ARCH_DATA, DB_ARCH_LOG)) != 0) - return (ret); - if ((ret = __db_fcchk(dbenv, "DB_ENV->log_archive", - flags, DB_ARCH_REMOVE, - DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG)) != 0) - return (ret); - } - - if (LF_ISSET(DB_ARCH_ABS)) { - db_arch_abs = 1; - LF_CLR(DB_ARCH_ABS); - } else - db_arch_abs = 0; + if (flags != DB_ARCH_REMOVE) + *listp = NULL; - if (flags == 0 || flags == DB_ARCH_DATA) - ENV_REQUIRES_CONFIG(dbenv, - dbenv->tx_handle, "DB_ENV->log_archive", DB_INIT_TXN); + /* There are no log files if logs are in memory. */ + if (lp->db_log_inmemory) { + LF_CLR(~DB_ARCH_DATA); + if (flags == 0) + return (0); + } /* * If the user wants the list of log files to remove and we're - * at a bad time in replication initialization, give them - * back an empty list. Otherwise, wait until it's OK to run - * log archive. + * at a bad time in replication initialization, just return. */ - rep_check = 0; - if (flags == 0 || flags == DB_ARCH_REMOVE || db_arch_abs) { - if (__rep_noarchive(dbenv)) { - *listp = NULL; - ret = 0; - goto err; - } - } else { - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - } + if (!LF_ISSET(DB_ARCH_DATA) && + !LF_ISSET(DB_ARCH_LOG) && __rep_noarchive(dbenv)) + return (0); /* * Get the absolute pathname of the current directory. It would @@ -136,49 +125,59 @@ __log_archive(dbenv, listp, flags) * Can't trust getcwd(3) to set a valid errno. If it doesn't, just * guess that we ran out of memory. */ - if (db_arch_abs) { + if (LF_ISSET(DB_ARCH_ABS)) { __os_set_errno(0); if ((pref = getcwd(buf, sizeof(buf))) == NULL) { if (__os_get_errno() == 0) __os_set_errno(ENOMEM); ret = __os_get_errno(); - goto err1; + goto err; } } else pref = NULL; + LF_CLR(DB_ARCH_ABS); switch (flags) { case DB_ARCH_DATA: - return (__build_data(dbenv, pref, listp)); + ret = __build_data(dbenv, pref, listp); + goto err; case DB_ARCH_LOG: memset(&rec, 0, sizeof(rec)); if ((ret = __log_cursor(dbenv, &logc)) != 0) - goto err1; + goto err; #ifdef UMRW ZERO_LSN(stable_lsn); #endif ret = __log_c_get(logc, &stable_lsn, &rec, DB_LAST); - (void)__log_c_close(logc); + if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) + ret = t_ret; if (ret != 0) - goto err1; + goto err; fnum = stable_lsn.file; break; case DB_ARCH_REMOVE: __log_autoremove(dbenv); - goto err1; + goto err; case 0: memset(&rec, 0, sizeof(rec)); - if (__txn_getckp(dbenv, &stable_lsn) != 0) { + if (!TXN_ON(dbenv)) { + __log_get_cached_ckp_lsn(dbenv, &stable_lsn); + if (IS_ZERO_LSN(stable_lsn) && (ret = + __txn_findlastckp(dbenv, &stable_lsn, NULL)) != 0) + goto err; + if (IS_ZERO_LSN(stable_lsn)) + goto err; + } + else if (__txn_getckp(dbenv, &stable_lsn) != 0) { /* * A failure return means that there's no checkpoint * in the log (so we are not going to be deleting * any log files). */ - *listp = NULL; - goto err1; + goto err; } if ((ret = __log_cursor(dbenv, &logc)) != 0) - goto err1; + goto err; if ((ret = __log_c_get(logc, &stable_lsn, &rec, DB_SET)) != 0 || (ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) { /* @@ -187,21 +186,24 @@ __log_archive(dbenv, listp, flags) * log files that we still have. This is not * an error; it just means our work is done. */ - if (ret == DB_NOTFOUND) { - *listp = NULL; + if (ret == DB_NOTFOUND) ret = 0; - } - (void)__log_c_close(logc); - goto err1; + if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) + ret = t_ret; + goto err; } if ((ret = __log_c_close(logc)) != 0) - goto err1; + goto err; stable_lsn = ckp_args->ckp_lsn; __os_free(dbenv, ckp_args); /* Remove any log files before the last stable LSN. */ fnum = stable_lsn.file - 1; break; + default: + DB_ASSERT(0); + ret = EINVAL; + goto err; } #define LIST_INCREMENT 64 @@ -209,7 +211,7 @@ __log_archive(dbenv, listp, flags) array_size = 64; if ((ret = __os_malloc(dbenv, sizeof(char *) * array_size, &array)) != 0) - goto err1; + goto err; array[0] = NULL; /* Build an array of the file names. */ @@ -231,9 +233,9 @@ __log_archive(dbenv, listp, flags) goto err; } - if (db_arch_abs) { - if ((ret = __absname(dbenv, - pref, name, &array[n])) != 0) + if (pref != NULL) { + if ((ret = + __absname(dbenv, pref, name, &array[n])) != 0) goto err; __os_free(dbenv, name); } else if ((p = __db_rpath(name)) != NULL) { @@ -248,11 +250,8 @@ __log_archive(dbenv, listp, flags) } /* If there's nothing to return, we're done. */ - if (n == 0) { - *listp = NULL; - ret = 0; + if (n == 0) goto err; - } /* Sort the list. */ qsort(array, (size_t)n, sizeof(char *), __cmpfunc); @@ -261,21 +260,45 @@ __log_archive(dbenv, listp, flags) if ((ret = __usermem(dbenv, &array)) != 0) goto err; - *listp = array; - return (0); + if (listp != NULL) + *listp = array; -err: if (array != NULL) { - for (arrayp = array; *arrayp != NULL; ++arrayp) - __os_free(dbenv, *arrayp); - __os_free(dbenv, array); + if (0) { +err: if (array != NULL) { + for (arrayp = array; *arrayp != NULL; ++arrayp) + __os_free(dbenv, *arrayp); + __os_free(dbenv, array); + } + if (name != NULL) + __os_free(dbenv, name); } - if (name != NULL) - __os_free(dbenv, name); -err1: if (rep_check) - __env_rep_exit(dbenv); + return (ret); } +/* + * __log_autoremove -- + * Delete any non-essential log files. + * + * PUBLIC: void __log_autoremove __P((DB_ENV *)); + */ +void +__log_autoremove(dbenv) + DB_ENV *dbenv; +{ + char **begin, **list; + + if (__log_archive(dbenv, &list, DB_ARCH_ABS) != 0) + return; + + if (list != NULL) { + for (begin = list; *list != NULL; ++list) + (void)__os_unlink(dbenv, *list); + __os_ufree(dbenv, begin); + } + return; +} + /* * __build_data -- * Build a list of datafiles for return. @@ -289,8 +312,9 @@ __build_data(dbenv, pref, listp) DB_LOGC *logc; DB_LSN lsn; __dbreg_register_args *argp; + u_int array_size, last, n, nxt; u_int32_t rectype; - int array_size, last, n, nxt, ret, t_ret; + int ret, t_ret; char **array, **arrayp, **list, **lp, *p, *real_name; /* Get some initial space. */ @@ -307,7 +331,7 @@ __build_data(dbenv, pref, listp) if (rec.size < sizeof(rectype)) { ret = EINVAL; __db_err(dbenv, "DB_ENV->log_archive: bad log record"); - goto free_continue; + break; } memcpy(&rectype, rec.data, sizeof(rectype)); @@ -318,7 +342,7 @@ __build_data(dbenv, pref, listp) ret = EINVAL; __db_err(dbenv, "DB_ENV->log_archive: unable to read log record"); - goto free_continue; + break; } if (n >= array_size - 2) { diff --git a/db/log/log_compare.c b/db/log/log_compare.c index 6bc672718..97d0367ea 100644 --- a/db/log/log_compare.c +++ b/db/log/log_compare.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: log_compare.c,v 11.8 2004/01/28 03:36:17 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: log_compare.c,v 11.7 2003/01/08 05:23:59 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/log/log_get.c b/db/log/log_get.c index 82697d834..51cb4a3eb 100644 --- a/db/log/log_get.c +++ b/db/log/log_get.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: log_get.c,v 11.110 2004/09/17 22:00:31 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: log_get.c,v 11.98 2003/09/13 19:20:38 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -35,7 +34,7 @@ static int __log_c_inregion __P((DB_LOGC *, static int __log_c_io __P((DB_LOGC *, u_int32_t, u_int32_t, void *, size_t *, int *)); static int __log_c_ondisk __P((DB_LOGC *, - DB_LSN *, DB_LSN *, int, HDR *, u_int8_t **, int *)); + DB_LSN *, DB_LSN *, u_int32_t, HDR *, u_int8_t **, int *)); static int __log_c_set_maxrec __P((DB_LOGC *, char *)); static int __log_c_shortread __P((DB_LOGC *, DB_LSN *, int)); @@ -66,7 +65,7 @@ __log_cursor_pp(dbenv, logcp, flags) __env_rep_enter(dbenv); ret = __log_cursor(dbenv, logcp); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -131,7 +130,7 @@ __log_c_close_pp(logc, flags) __env_rep_enter(dbenv); ret = __log_c_close(logc); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -205,7 +204,7 @@ __log_c_get_pp(logc, alsn, dbt, flags) __env_rep_enter(dbenv); ret = __log_c_get(logc, alsn, dbt, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -262,6 +261,10 @@ __log_c_get(logc, alsn, dbt, flags) case DB_LAST: flags = DB_PREV; break; + case DB_NEXT: + case DB_PREV: + default: + break; } if (F_ISSET(dbt, DB_DBT_MALLOC)) { __os_free(dbenv, dbt->data); @@ -366,14 +369,15 @@ __log_c_get_int(logc, alsn, dbt, flags) if (!IS_ZERO_LSN(nlsn)) { /* If at start-of-file, move to the previous file. */ if (nlsn.offset == 0) { - if (nlsn.file == 1 || __log_valid(dblp, - nlsn.file - 1, 0, NULL, 0, &status) != 0) { + if (nlsn.file == 1) { ret = DB_NOTFOUND; goto err; } - - if (status != DB_LV_NORMAL && - status != DB_LV_OLD_READABLE) { + if ((!lp->db_log_inmemory && + (__log_valid(dblp, nlsn.file - 1, 0, NULL, + 0, &status) != 0 || + (status != DB_LV_NORMAL && + status != DB_LV_OLD_READABLE)))) { ret = DB_NOTFOUND; goto err; } @@ -395,6 +399,10 @@ __log_c_get_int(logc, alsn, dbt, flags) case DB_SET: /* Set log record. */ nlsn = *alsn; break; + default: + DB_ASSERT(0); + ret = EINVAL; + goto err; } if (0) { /* Move to the next file. */ @@ -425,12 +433,13 @@ next_file: ++nlsn.file; * coming from the disk -- it means the record can't be in the region's * buffer. Else, check the region's buffer. * - * If the record isn't in the region's buffer, we're going to have to - * read the record from disk. We want to make a point of not reading - * past the end of the logical log (after recovery, there may be data - * after the end of the logical log, not to mention the log file may - * have been pre-allocated). So, zero out last_lsn, and initialize it - * inside __log_c_inregion -- if it's still zero when we check it in + * If the record isn't in the region's buffer, then either logs are + * in-memory, and we're done, or we're going to have to read the + * record from disk. We want to make a point of not reading past the + * end of the logical log (after recovery, there may be data after the + * end of the logical log, not to mention the log file may have been + * pre-allocated). So, zero out last_lsn, and initialize it inside + * __log_c_inregion -- if it's still zero when we check it in * __log_c_ondisk, that's OK, it just means the logical end of the log * isn't an issue for this request. */ @@ -444,6 +453,10 @@ next_file: ++nlsn.file; goto err; if (rp != NULL) goto cksum; + if (lp->db_log_inmemory) { + ret = DB_NOTFOUND; + goto err; + } } /* @@ -461,7 +474,7 @@ next_file: ++nlsn.file; if ((ret = __log_c_ondisk( logc, &nlsn, &last_lsn, flags, &hdr, &rp, &eof)) != 0) goto err; - if (eof == 1) { + if (eof) { /* * Only DB_NEXT automatically moves to the next file, and * it only happens once. @@ -517,7 +530,6 @@ cksum: /* case DB_NEXT: /* Zero'd records always indicate the end of a file. */ goto next_file; - case DB_LAST: case DB_PREV: /* @@ -528,9 +540,11 @@ cksum: /* * the first record in that new file should have its * prev field set correctly. */ - __db_err(dbenv, + __db_err(dbenv, "Encountered zero length records while traversing backwards"); - DB_ASSERT(0); + DB_ASSERT(0); + ret = __db_panic(dbenv, DB_RUNRECOVERY); + goto err; case DB_SET: default: /* Return the 0-length record. */ @@ -584,7 +598,7 @@ __log_c_incursor(logc, lsn, hdr, pp) u_int8_t **pp; { u_int8_t *p; - int eof; + int eof; *pp = NULL; @@ -642,9 +656,9 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp) DB_ENV *dbenv; DB_LOG *dblp; LOG *lp; - size_t len, nr; - u_int32_t b_disk, b_region; - int ret; + size_t b_region, len, nr; + u_int32_t b_disk; + int eof, ret; u_int8_t *p; dbenv = logc->dbenv; @@ -652,6 +666,7 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp) lp = ((DB_LOG *)logc->dbenv->lg_handle)->reginfo.primary; ret = 0; + b_region = 0; *pp = NULL; /* If we haven't yet acquired the log region lock, do so. */ @@ -671,7 +686,7 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp) * come later than this point if the log buffer isn't empty. */ *last_lsn = lp->lsn; - if (last_lsn->offset > lp->w_off) + if (!lp->db_log_inmemory && last_lsn->offset > lp->w_off) last_lsn->offset = lp->w_off; /* @@ -698,52 +713,62 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp) */ if (IS_ZERO_LSN(lp->lsn)) return (0); - if (lsn->file > lp->lsn.file || - (lsn->file == lp->lsn.file && lsn->offset >= lp->lsn.offset)) + if (log_compare(lsn, &lp->lsn) >= 0) return (DB_NOTFOUND); - if (lp->b_off == 0) - return (0); - if (lsn->file < lp->f_lsn.file || lsn->offset < lp->f_lsn.offset) + else if (lp->db_log_inmemory) { + if ((ret = __log_inmem_lsnoff(dblp, lsn, &b_region)) != 0) + return (ret); + } else if (lp->b_off == 0 || log_compare(lsn, &lp->f_lsn) < 0) return (0); /* * The current contents of the cursor's buffer will be useless for a - * future call -- trash it rather than try and make it look correct. + * future call, we're about to overwrite it -- trash it rather than + * try and make it look correct. */ - ZERO_LSN(logc->bp_lsn); + logc->bp_rlen = 0; /* * If the requested LSN is greater than the region buffer's first * byte, we know the entire record is in the buffer on a good LSN. * - * If we're given a bad LSN, the "entire" record might - * not be in our buffer in order to fail at the chksum. - * __log_c_hdrchk made sure our dest buffer fits, via - * bp_maxrec, but we also need to make sure we don't run off - * the end of this buffer, the src. + * If we're given a bad LSN, the "entire" record might not be in + * our buffer in order to fail at the chksum. __log_c_hdrchk made + * sure our dest buffer fits, via bp_maxrec, but we also need to + * make sure we don't run off the end of this buffer, the src. * - * If the header check fails for any reason, it must be because the - * LSN is bogus. Fail hard. + * There is one case where the header check can fail: on a scan through + * in-memory logs, when we reach the end of a file we can read an empty + * heady. In that case, it's safe to return zero, here: it will be + * caught in our caller. Otherwise, the LSN is bogus. Fail hard. */ - if (lsn->offset > lp->f_lsn.offset) { - p = dblp->bufp + (lsn->offset - lp->w_off); - memcpy(hdr, p, hdr->size); - if (__log_c_hdrchk(logc, lsn, hdr, NULL)) + if (lp->db_log_inmemory || log_compare(lsn, &lp->f_lsn) > 0) { + if (!lp->db_log_inmemory) + b_region = lsn->offset - lp->w_off; + __log_inmem_copyout(dblp, b_region, hdr, hdr->size); + if (__log_c_hdrchk(logc, lsn, hdr, &eof) != 0) return (DB_NOTFOUND); - if (lsn->offset + hdr->len > lp->w_off + lp->buffer_size) + if (eof) + return (0); + if (lp->db_log_inmemory) { + if (RINGBUF_LEN(lp, b_region, lp->b_off) < hdr->len) + return (DB_NOTFOUND); + } else if (lsn->offset + hdr->len > lp->w_off + lp->buffer_size) return (DB_NOTFOUND); if (logc->bp_size <= hdr->len) { - len = ALIGN(hdr->len * 2, 128); + len = (size_t)DB_ALIGN(hdr->len * 2, 128); if ((ret = __os_realloc(logc->dbenv, len, &logc->bp)) != 0) return (ret); logc->bp_size = (u_int32_t)len; } - memcpy(logc->bp, p, hdr->len); + __log_inmem_copyout(dblp, b_region, logc->bp, hdr->len); *pp = logc->bp; return (0); } + DB_ASSERT(!lp->db_log_inmemory); + /* * There's a partial record, that is, the requested record starts * in a log file and finishes in the region buffer. We have to @@ -780,7 +805,7 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp) * of waiting. */ if (logc->bp_size <= b_region + b_disk) { - len = ALIGN((b_region + b_disk) * 2, 128); + len = (size_t)DB_ALIGN((b_region + b_disk) * 2, 128); if ((ret = __os_realloc(logc->dbenv, len, &logc->bp)) != 0) return (ret); logc->bp_size = (u_int32_t)len; @@ -825,7 +850,8 @@ static int __log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp) DB_LOGC *logc; DB_LSN *lsn, *last_lsn; - int flags, *eofp; + u_int32_t flags; + int *eofp; HDR *hdr; u_int8_t **pp; { @@ -844,8 +870,14 @@ __log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp) if (*eofp) return (0); - /* If we read 0 bytes, assume we've hit EOF. */ - if (nr == 0) { + /* + * If the read was successful, but we can't read a full header, assume + * we've hit EOF. We can't check that the header has been partially + * zeroed out, but it's unlikely that this is caused by a write failure + * since the header is written as a single write call and it's less + * than sector. + */ + if (nr < hdr->size) { *eofp = 1; return (0); } @@ -856,15 +888,11 @@ __log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp) if (*eofp) return (0); - /* Otherwise, we should have gotten the bytes we wanted. */ - if (nr < hdr->size) - return (__log_c_shortread(logc, lsn, 1)); - /* * Regardless of how we return, the previous contents of the cursor's * buffer are useless -- trash it. */ - ZERO_LSN(logc->bp_lsn); + logc->bp_rlen = 0; /* * Otherwise, we now (finally!) know how big the record is. (Maybe @@ -872,7 +900,7 @@ __log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp) * Make sure we have enough space. */ if (logc->bp_size <= hdr->len) { - len = ALIGN(hdr->len * 2, 128); + len = (size_t)DB_ALIGN(hdr->len * 2, 128); if ((ret = __os_realloc(dbenv, len, &logc->bp)) != 0) return (ret); logc->bp_size = (u_int32_t)len; @@ -911,9 +939,13 @@ __log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp) if (nr < (lsn->offset + hdr->len) - offset) return (__log_c_shortread(logc, lsn, 1)); - /* Set up the return information. */ + /* + * Set up the return information. + * + * !!! + * No need to set the bp_lsn.file field, __log_c_io set it for us. + */ logc->bp_rlen = (u_int32_t)nr; - logc->bp_lsn.file = lsn->file; logc->bp_lsn.offset = offset; *pp = logc->bp + (lsn->offset - offset); @@ -933,16 +965,6 @@ __log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp) * and one made it to disk before a different one that logically precedes * it in the log file. * - * XXX - * I think there's a potential pre-allocation recovery flaw here -- if we - * fail to write a buffer at the end of a log file (by scheduling its - * write asynchronously, and it never making it to disk), then succeed in - * writing a log file block to a subsequent log file, I don't think we will - * detect that the buffer of 0's should have marked the end of the log files - * during recovery. I think we may need to always write some garbage after - * each block write if we pre-allocate log files. (At the moment, we do not - * pre-allocate, so this isn't currently an issue.) - * * Check for impossibly large records. The malloc should fail later, but we * have customers that run mallocs that treat all allocation failures as fatal * errors. @@ -1034,6 +1056,8 @@ __log_c_io(logc, fnum, offset, p, nrp, eofp) if (logc->c_fhp != NULL && logc->bp_lsn.file != fnum) { ret = __os_closehandle(dbenv, logc->c_fhp); logc->c_fhp = NULL; + logc->bp_lsn.file = 0; + if (ret != 0) return (ret); } @@ -1061,6 +1085,8 @@ __log_c_io(logc, fnum, offset, p, nrp, eofp) return (ret); } __os_free(dbenv, np); + + logc->bp_lsn.file = fnum; } /* Seek to the record's offset. */ diff --git a/db/log/log_method.c b/db/log/log_method.c index 3562afc40..1565a53a0 100644 --- a/db/log/log_method.c +++ b/db/log/log_method.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: log_method.c,v 11.50 2004/09/22 16:26:15 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: log_method.c,v 11.38 2003/06/30 17:20:16 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -21,11 +20,14 @@ static const char revid[] = "$Id: log_method.c,v 11.38 2003/06/30 17:20:16 bosti #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + #include "db_int.h" #include "dbinc/log.h" #ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" #endif @@ -51,7 +53,7 @@ __log_dbenv_create(dbenv) * the panic state or acquire a mutex in the DB_ENV create path. */ - dbenv->lg_bsize = LG_BSIZE_DEFAULT; + dbenv->lg_bsize = 0; dbenv->lg_regionmax = LG_BASE_REGION_SIZE; #ifdef HAVE_RPC @@ -75,6 +77,7 @@ __log_dbenv_create(dbenv) dbenv->log_flush = __dbcl_log_flush; dbenv->log_put = __dbcl_log_put; dbenv->log_stat = __dbcl_log_stat; + dbenv->log_stat_print = NULL; } else #endif { @@ -93,6 +96,7 @@ __log_dbenv_create(dbenv) dbenv->log_flush = __log_flush_pp; dbenv->log_put = __log_put_pp; dbenv->log_stat = __log_stat_pp; + dbenv->log_stat_print = __log_stat_print_pp; } } @@ -101,7 +105,15 @@ __log_get_lg_bsize(dbenv, lg_bsizep) DB_ENV *dbenv; u_int32_t *lg_bsizep; { - *lg_bsizep = dbenv->lg_bsize; + ENV_NOT_CONFIGURED(dbenv, + dbenv->lg_handle, "DB_ENV->get_lg_bsize", DB_INIT_LOG); + + if (LOGGING_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + *lg_bsizep = ((LOG *) + ((DB_LOG *)dbenv->lg_handle)->reginfo.primary)->buffer_size; + } else + *lg_bsizep = dbenv->lg_bsize; return (0); } @@ -116,20 +128,8 @@ __log_set_lg_bsize(dbenv, lg_bsize) DB_ENV *dbenv; u_int32_t lg_bsize; { - u_int32_t lg_max; - ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_lg_bsize"); - if (lg_bsize == 0) - lg_bsize = LG_BSIZE_DEFAULT; - - /* Let's not be silly. */ - lg_max = dbenv->lg_size == 0 ? LG_MAX_DEFAULT : dbenv->lg_size; - if (lg_bsize > lg_max / 4) { - __db_err(dbenv, "log buffer size must be <= log file size / 4"); - return (EINVAL); - } - dbenv->lg_bsize = lg_bsize; return (0); } @@ -139,15 +139,16 @@ __log_get_lg_max(dbenv, lg_maxp) DB_ENV *dbenv; u_int32_t *lg_maxp; { - LOG *region; + DB_LOG *dblp; - if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) { - if (!LOGGING_ON(dbenv)) - return (__db_env_config( - dbenv, "get_lg_max", DB_INIT_LOG)); - region = ((DB_LOG *)dbenv->lg_handle)->reginfo.primary; + ENV_NOT_CONFIGURED(dbenv, + dbenv->lg_handle, "DB_ENV->get_lg_max", DB_INIT_LOG); - *lg_maxp = region->log_nsize; + if (LOGGING_ON(dbenv)) { + dblp = dbenv->lg_handle; + R_LOCK(dbenv, &dblp->reginfo); + *lg_maxp = ((LOG *)dblp->reginfo.primary)->log_nsize; + R_UNLOCK(dbenv, &dblp->reginfo); } else *lg_maxp = dbenv->lg_size; @@ -165,32 +166,25 @@ __log_set_lg_max(dbenv, lg_max) DB_ENV *dbenv; u_int32_t lg_max; { - LOG *region; - - if (lg_max == 0) - lg_max = LG_MAX_DEFAULT; - - if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) { - if (!LOGGING_ON(dbenv)) - return (__db_env_config( - dbenv, "set_lg_max", DB_INIT_LOG)); - region = ((DB_LOG *)dbenv->lg_handle)->reginfo.primary; - - /* Let's not be silly. */ - if (lg_max < region->buffer_size * 4) - goto err; - region->log_nsize = lg_max; - } else { - /* Let's not be silly. */ - if (lg_max < dbenv->lg_bsize * 4) - goto err; + DB_LOG *dblp; + LOG *lp; + int ret; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->lg_handle, "DB_ENV->set_lg_max", DB_INIT_LOG); + + if (LOGGING_ON(dbenv)) { + if ((ret = __log_check_sizes(dbenv, lg_max, 0)) != 0) + return (ret); + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + R_LOCK(dbenv, &dblp->reginfo); + lp->log_nsize = lg_max; + R_UNLOCK(dbenv, &dblp->reginfo); + } else dbenv->lg_size = lg_max; - } return (0); - -err: __db_err(dbenv, "log file size must be >= log buffer size * 4"); - return (EINVAL); } static int @@ -198,7 +192,15 @@ __log_get_lg_regionmax(dbenv, lg_regionmaxp) DB_ENV *dbenv; u_int32_t *lg_regionmaxp; { - *lg_regionmaxp = dbenv->lg_regionmax; + ENV_NOT_CONFIGURED(dbenv, + dbenv->lg_handle, "DB_ENV->get_lg_regionmax", DB_INIT_LOG); + + if (LOGGING_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + *lg_regionmaxp = ((LOG *) + ((DB_LOG *)dbenv->lg_handle)->reginfo.primary)->regionmax; + } else + *lg_regionmaxp = dbenv->lg_regionmax; return (0); } @@ -250,3 +252,99 @@ __log_set_lg_dir(dbenv, dir) __os_free(dbenv, dbenv->db_log_dir); return (__os_strdup(dbenv, dir, &dbenv->db_log_dir)); } + +/* + * __log_get_flags -- + * DB_ENV->get_flags. + * + * PUBLIC: void __log_get_flags __P((DB_ENV *, u_int32_t *)); + */ +void +__log_get_flags(dbenv, flagsp) + DB_ENV *dbenv; + u_int32_t *flagsp; +{ + DB_LOG *dblp; + LOG *lp; + u_int32_t flags; + + if ((dblp = dbenv->lg_handle) == NULL) + return; + + lp = dblp->reginfo.primary; + + flags = *flagsp; + if (lp->db_log_autoremove) + LF_SET(DB_LOG_AUTOREMOVE); + else + LF_CLR(DB_LOG_AUTOREMOVE); + if (lp->db_log_inmemory) + LF_SET(DB_LOG_INMEMORY); + else + LF_CLR(DB_LOG_INMEMORY); + *flagsp = flags; +} + +/* + * __log_set_flags -- + * DB_ENV->set_flags. + * + * PUBLIC: void __log_set_flags __P((DB_ENV *, u_int32_t, int)); + */ +void +__log_set_flags(dbenv, flags, on) + DB_ENV *dbenv; + u_int32_t flags; + int on; +{ + DB_LOG *dblp; + LOG *lp; + + if ((dblp = dbenv->lg_handle) == NULL) + return; + + lp = dblp->reginfo.primary; + + if (LF_ISSET(DB_LOG_AUTOREMOVE)) + lp->db_log_autoremove = on ? 1 : 0; + if (LF_ISSET(DB_LOG_INMEMORY)) + lp->db_log_inmemory = on ? 1 : 0; +} + +/* + * __log_check_sizes -- + * Makes sure that the log file size and log buffer size are compatible. + * + * PUBLIC: int __log_check_sizes __P((DB_ENV *, u_int32_t, u_int32_t)); + */ +int +__log_check_sizes(dbenv, lg_max, lg_bsize) + DB_ENV *dbenv; + u_int32_t lg_max; + u_int32_t lg_bsize; +{ + LOG *lp; + int inmem; + + if (LOGGING_ON(dbenv)) { + lp = ((DB_LOG *)dbenv->lg_handle)->reginfo.primary; + inmem = lp->db_log_inmemory; + lg_bsize = lp->buffer_size; + } else + inmem = (F_ISSET(dbenv, DB_ENV_LOG_INMEMORY) != 0); + + if (inmem) { + if (lg_bsize == 0) + lg_bsize = LG_BSIZE_INMEM; + if (lg_max == 0) + lg_max = LG_MAX_INMEM; + + if (lg_bsize <= lg_max) { + __db_err(dbenv, + "in-memory log buffer must be larger than the log file size"); + return (EINVAL); + } + } + + return (0); +} diff --git a/db/log/log_put.c b/db/log/log_put.c index f3b92acc3..64177e6ec 100644 --- a/db/log/log_put.c +++ b/db/log/log_put.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: log_put.c,v 11.167 2004/09/29 15:06:40 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: log_put.c,v 11.145 2003/09/13 19:20:39 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -38,7 +37,7 @@ static int __log_encrypt_record __P((DB_ENV *, DBT *, HDR *, u_int32_t)); static int __log_file __P((DB_ENV *, const DB_LSN *, char *, size_t)); static int __log_fill __P((DB_LOG *, DB_LSN *, void *, u_int32_t)); static int __log_flush_commit __P((DB_ENV *, const DB_LSN *, u_int32_t)); -static int __log_newfh __P((DB_LOG *)); +static int __log_newfh __P((DB_LOG *, int)); static int __log_put_next __P((DB_ENV *, DB_LSN *, const DBT *, HDR *, DB_LSN *)); static int __log_putr __P((DB_LOG *, @@ -86,7 +85,7 @@ __log_put_pp(dbenv, lsnp, udbt, flags) __env_rep_enter(dbenv); ret = __log_put(dbenv, lsnp, udbt, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -118,6 +117,7 @@ __log_put(dbenv, lsnp, udbt, flags) dbt = &t; t = *udbt; lock_held = need_free = 0; + ZERO_LSN(old_lsn); /* * If we are coming from the logging code, we use an internal flag, @@ -151,39 +151,45 @@ __log_put(dbenv, lsnp, udbt, flags) R_LOCK(dbenv, &dblp->reginfo); lock_held = 1; - ZERO_LSN(old_lsn); if ((ret = __log_put_next(dbenv, &lsn, dbt, &hdr, &old_lsn)) != 0) goto panic_check; + /* + * If we are not a rep application, but are sharing a master rep env, + * we should not be writing log records. + */ + if (IS_REP_MASTER(dbenv) && dbenv->rep_send == NULL) { + __db_err(dbenv, "%s %s", + "Non-replication DB_ENV handle attempting", + "to modify a replicated environment"); + ret = EINVAL; + goto err; + } + + /* + * Assign the return LSN before dropping the region lock. Necessary + * in case the lsn is a begin_lsn from a TXN_DETAIL structure passed + * in by the logging routines. + */ + *lsnp = lsn; + if (IS_REP_MASTER(dbenv)) { /* - * Replication masters need to drop the lock to send - * messages, but we want to drop and reacquire it a minimal - * number of times. + * Replication masters need to drop the lock to send messages, + * but want to drop and reacquire it a minimal number of times. */ R_UNLOCK(dbenv, &dblp->reginfo); lock_held = 0; - /* - * If we are not a rep application, but are sharing a - * master rep env, we should not be writing log records. - */ - if (dbenv->rep_send == NULL) { - __db_err(dbenv, "%s %s", - "Non-replication DB_ENV handle attempting", - "to modify a replicated environment"); - ret = EINVAL; - goto err; - } /* - * If we changed files and we're in a replicated - * environment, we need to inform our clients now that - * we've dropped the region lock. + * If we changed files and we're in a replicated environment, + * we need to inform our clients now that we've dropped the + * region lock. * - * Note that a failed NEWFILE send is a dropped message - * that our client can handle, so we can ignore it. It's - * possible that the record we already put is a commit, so - * we don't just want to return failure. + * Note that a failed NEWFILE send is a dropped message that + * our client can handle, so we can ignore it. It's possible + * that the record we already put is a commit, so we don't just + * want to return failure. */ if (!IS_ZERO_LSN(old_lsn)) (void)__rep_send_message(dbenv, @@ -226,8 +232,6 @@ __log_put(dbenv, lsnp, udbt, flags) goto panic_check; } - *lsnp = lsn; - /* * If flushed a checkpoint record, reset the "bytes since the last * checkpoint" counters. @@ -246,17 +250,17 @@ panic_check: /* if (ret != 0 && IS_REP_MASTER(dbenv)) ret = __db_panic(dbenv, ret); } -err: - if (lock_held) + +err: if (lock_held) R_UNLOCK(dbenv, &dblp->reginfo); if (need_free) __os_free(dbenv, dbt->data); + /* * If auto-remove is set and we switched files, remove unnecessary * log files. */ - if (ret == 0 && - F_ISSET(dbenv, DB_ENV_LOG_AUTOREMOVE) && !IS_ZERO_LSN(old_lsn)) + if (ret == 0 && !IS_ZERO_LSN(old_lsn) && lp->db_log_autoremove) __log_autoremove(dbenv); return (ret); @@ -284,7 +288,7 @@ __log_txn_lsn(dbenv, lsnp, mbytesp, bytesp) /* * We are trying to get the LSN of the last entry in the log. We use - * this in two places: 1) DB_ENV->txn_checkpiont uses it as a first + * this in two places: 1) DB_ENV->txn_checkpoint uses it as a first * value when trying to compute an LSN such that all transactions begun * before it are complete. 2) DB_ENV->txn_begin uses it as the * begin_lsn. @@ -361,7 +365,7 @@ __log_put_next(dbenv, lsn, dbt, hdr, old_lsnp) return (EINVAL); } - if ((ret = __log_newfile(dblp, NULL)) != 0) + if ((ret = __log_newfile(dblp, NULL, 0)) != 0) return (ret); /* @@ -418,7 +422,7 @@ __log_flush_commit(dbenv, lsnp, flags) */ if (LF_ISSET(DB_FLUSH)) ret = __log_flush_int(dblp, &flush_lsn, 1); - else if (lp->b_off != 0) + else if (!lp->db_log_inmemory && lp->b_off != 0) if ((ret = __log_write(dblp, dblp->bufp, (u_int32_t)lp->b_off)) == 0) lp->b_off = 0; @@ -439,7 +443,7 @@ __log_flush_commit(dbenv, lsnp, flags) * Else, make sure that the commit record does not get out after we * abort the transaction. Do this by overwriting the commit record * in the buffer. (Note that other commits in this buffer will wait - * wait until a sucessful write happens, we do not wake them.) We + * wait until a successful write happens, we do not wake them.) We * point at the right part of the buffer and write an abort record * over the commit. We must then try and flush the buffer again, * since the interesting part of the buffer may have actually made @@ -457,12 +461,13 @@ __log_flush_commit(dbenv, lsnp, flags) * Initialize and switch to a new log file. (Note that this is * called both when no log yet exists and when we fill a log file.) * - * PUBLIC: int __log_newfile __P((DB_LOG *, DB_LSN *)); + * PUBLIC: int __log_newfile __P((DB_LOG *, DB_LSN *, u_int32_t)); */ int -__log_newfile(dblp, lsnp) +__log_newfile(dblp, lsnp, logfile) DB_LOG *dblp; DB_LSN *lsnp; + u_int32_t logfile; { DB_CIPHER *db_cipher; DB_ENV *dbenv; @@ -478,6 +483,8 @@ __log_newfile(dblp, lsnp) dbenv = dblp->dbenv; lp = dblp->reginfo.primary; + DB_ASSERT(logfile == 0 || logfile > lp->lsn.file); + /* If we're not at the beginning of a file already, start a new one. */ if (lp->lsn.offset != 0) { /* @@ -490,13 +497,12 @@ __log_newfile(dblp, lsnp) * require all threads to wait here so that the lsn.file * can be moved ahead after the flush completes. This * probably can be changed if we had an lsn for the - * previous file and one for the curent, but it does not + * previous file and one for the current, but it does not * seem like this would get much more throughput, if any. */ if ((ret = __log_flush_int(dblp, NULL, 0)) != 0) return (ret); - DB_ASSERT(lp->b_off == 0); /* * Save the last known offset from the previous file, we'll * need it to initialize the persistent header information. @@ -512,13 +518,28 @@ __log_newfile(dblp, lsnp) } else lastoff = 0; + /* + * Replication may require we reset the log file name space entirely. + * In that case we also force a file switch so that replication can + * clean up old files. + */ + if (logfile != 0) { + lp->lsn.file = logfile; + lp->lsn.offset = 0; + if ((ret = __log_newfh(dblp, 1)) != 0) + return (ret); + } + + DB_ASSERT(lp->db_log_inmemory || lp->b_off == 0); + if (lp->db_log_inmemory && + (ret = __log_inmem_newfile(dblp, lp->lsn.file)) != 0) + return (ret); + /* * Insert persistent information as the first record in every file. * Note that the previous length is wrong for the very first record * of the log, but that's okay, we check for it during retrieval. */ - DB_ASSERT(lp->b_off == 0); - memset(&t, 0, sizeof(t)); memset(&hdr, 0, sizeof(HDR)); @@ -549,8 +570,7 @@ __log_newfile(dblp, lsnp) if (lsnp != NULL) *lsnp = lp->lsn; -err: - if (need_free) +err: if (need_free) __os_free(dbenv, tmp); return (ret); } @@ -619,6 +639,10 @@ __log_putr(dblp, lsn, dbt, prev, h) (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL, hdr->chksum); + if (lp->db_log_inmemory && (ret = __log_inmem_chkspace(dblp, + (u_int32_t)hdr->size + dbt->size)) != 0) + goto err; + if ((ret = __log_fill(dblp, lsn, hdr, (u_int32_t)hdr->size)) != 0) goto err; @@ -635,6 +659,7 @@ err: * and be ignored. */ if (w_off + lp->buffer_size < lp->w_off) { + DB_ASSERT(!lp->db_log_inmemory); if ((t_ret = __os_seek(dbenv, dblp->lfhp, 0, 0, w_off, 0, DB_OS_SEEK_SET)) != 0 || (t_ret = __os_read(dbenv, dblp->lfhp, dblp->bufp, @@ -676,7 +701,7 @@ __log_flush_pp(dbenv, lsn) __env_rep_enter(dbenv); ret = __log_flush(dbenv, lsn); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -726,10 +751,16 @@ __log_flush_int(dblp, lsnp, release) dbenv = dblp->dbenv; lp = dblp->reginfo.primary; - flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off); + flush_mutexp = R_ADDR(dbenv, &dblp->reginfo, lp->flush_mutex_off); ncommit = 0; ret = 0; + if (lp->db_log_inmemory) { + lp->s_lsn = lp->lsn; + ++lp->stat.st_scount; + return (0); + } + /* * If no LSN specified, flush the entire log by setting the flush LSN * to the last LSN written in the log. Otherwise, check that the LSN @@ -749,7 +780,7 @@ __log_flush_int(dblp, lsnp, release) "Database environment corrupt; the wrong log files may", "have been removed or incompatible database files imported", "from another environment"); - return (EINVAL); + return (__db_panic(dbenv, DB_RUNRECOVERY)); } else { /* * See if we need to wait. s_lsn is not locked so some @@ -777,8 +808,7 @@ __log_flush_int(dblp, lsnp, release) if (release && lp->in_flush != 0) { if ((commit = SH_TAILQ_FIRST( &lp->free_commits, __db_commit)) == NULL) { - if ((ret = - __db_shalloc(dblp->reginfo.addr, + if ((ret = __db_shalloc(&dblp->reginfo, sizeof(struct __db_commit), MUTEX_ALIGN, &commit)) != 0) goto flush; @@ -786,7 +816,7 @@ __log_flush_int(dblp, lsnp, release) if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, &commit->mutex, MUTEX_SELF_BLOCK | MUTEX_NO_RLOCK)) != 0) { - __db_shalloc_free(dblp->reginfo.addr, commit); + __db_shalloc_free(&dblp->reginfo, commit); return (ret); } MUTEX_LOCK(dbenv, &commit->mutex); @@ -851,6 +881,10 @@ flush: MUTEX_LOCK(dbenv, flush_mutexp); * We may need to write the current buffer. We have to write the * current buffer if the flush LSN is greater than or equal to the * buffer's starting LSN. + * + * Otherwise, it's still possible that this thread may never have + * written to this log file. Acquire a file descriptor if we don't + * already have one. */ if (lp->b_off != 0 && log_compare(&flush_lsn, &lp->f_lsn) >= 0) { if ((ret = __log_write(dblp, @@ -860,14 +894,8 @@ flush: MUTEX_LOCK(dbenv, flush_mutexp); } lp->b_off = 0; - } - - /* - * It's possible that this thread may never have written to this log - * file. Acquire a file descriptor if we don't already have one. - */ - if (dblp->lfhp == NULL || dblp->lfname != lp->lsn.file) - if ((ret = __log_newfh(dblp)) != 0) { + } else if (dblp->lfhp == NULL || dblp->lfname != lp->lsn.file) + if ((ret = __log_newfh(dblp, 0)) != 0) { MUTEX_UNLOCK(dbenv, flush_mutexp); goto done; } @@ -971,6 +999,12 @@ __log_fill(dblp, lsn, addr, len) lp = dblp->reginfo.primary; bsize = lp->buffer_size; + if (lp->db_log_inmemory) { + __log_inmem_copyin(dblp, lp->b_off, addr, len); + lp->b_off = (lp->b_off + len) % lp->buffer_size; + return (0); + } + while (len > 0) { /* Copy out the data. */ /* * If we're beginning a new buffer, note the user LSN to which @@ -1032,14 +1066,31 @@ __log_write(dblp, addr, len) dbenv = dblp->dbenv; lp = dblp->reginfo.primary; + DB_ASSERT(!lp->db_log_inmemory); + /* - * If we haven't opened the log file yet or the current one - * has changed, acquire a new log file. + * If we haven't opened the log file yet or the current one has + * changed, acquire a new log file. We are creating the file if we're + * about to write to the start of it, in other words, if the write + * offset is zero. */ if (dblp->lfhp == NULL || dblp->lfname != lp->lsn.file) - if ((ret = __log_newfh(dblp)) != 0) + if ((ret = __log_newfh(dblp, lp->w_off == 0)) != 0) return (ret); + /* + * If we're writing the first block in a log file on a filesystem that + * guarantees unwritten blocks are zero-filled, we set the size of the + * file in advance. This increases sync performance on some systems, + * because they don't need to update metadata on every sync. + */ +#ifdef HAVE_FILESYSTEM_NOTZERO + if (lp->w_off == 0 && !__os_fs_notzero()) +#else + if (lp->w_off == 0) +#endif + ret = __db_fileinit(dbenv, dblp->lfhp, lp->log_size, 0); + /* * Seek to the offset in the file (someone may have written it * since we last did). @@ -1085,12 +1136,18 @@ __log_file_pp(dbenv, lsn, namep, len) ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, "DB_ENV->log_file", DB_INIT_LOG); + if (F_ISSET(dbenv, DB_ENV_LOG_INMEMORY)) { + __db_err(dbenv, + "DB_ENV->log_file is illegal with in-memory logs."); + return (EINVAL); + } + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; if (rep_check) __env_rep_enter(dbenv); ret = __log_file(dbenv, lsn, namep, len); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1133,8 +1190,9 @@ __log_file(dbenv, lsn, namep, len) * Acquire a file handle for the current log file. */ static int -__log_newfh(dblp) +__log_newfh(dblp, create) DB_LOG *dblp; + int create; { DB_ENV *dbenv; LOG *lp; @@ -1151,19 +1209,10 @@ __log_newfh(dblp) dblp->lfhp = NULL; } - /* - * Adding DB_OSO_LOG to the flags may add additional platform-specific - * optimizations. On WinNT, the logfile is preallocated, which may - * have a time penalty at startup, but have better overall throughput. - * We are not certain that this works reliably, so enable at your own - * risk. - */ - flags = DB_OSO_CREATE | DB_OSO_SEQ | - (F_ISSET(dbenv, DB_ENV_DIRECT_LOG) ? DB_OSO_DIRECT : 0); - -#ifdef SET_DB_OSO_LOG - LF_SET(DB_OSO_LOG); -#endif + flags = DB_OSO_LOG | DB_OSO_SEQ | + (create ? DB_OSO_CREATE : 0) | + (F_ISSET(dbenv, DB_ENV_DIRECT_LOG) ? DB_OSO_DIRECT : 0) | + (F_ISSET(dbenv, DB_ENV_DSYNC_LOG) ? DB_OSO_DSYNC : 0); /* Get the path of the new file and open it. */ dblp->lfname = lp->lsn.file; @@ -1200,6 +1249,8 @@ __log_name(dblp, filenumber, namep, fhpp, flags) dbenv = dblp->dbenv; lp = dblp->reginfo.primary; + DB_ASSERT(!lp->db_log_inmemory); + /* * !!! * The semantics of this routine are bizarre. @@ -1224,8 +1275,8 @@ __log_name(dblp, filenumber, namep, fhpp, flags) return (ret); /* Open the new-style file -- if we succeed, we're done. */ - if ((ret = __os_open_extend(dbenv, - *namep, lp->log_size, 0, flags, lp->persist.mode, fhpp)) == 0) + if ((ret = __os_open_extend(dbenv, *namep, 0, flags, + (int)lp->persist.mode, fhpp)) == 0) return (0); /* @@ -1249,7 +1300,7 @@ __log_name(dblp, filenumber, namep, fhpp, flags) * name to the caller. */ if ((ret = - __os_open(dbenv, oname, flags, lp->persist.mode, fhpp)) == 0) { + __os_open(dbenv, oname, flags, (int)lp->persist.mode, fhpp)) == 0) { __os_free(dbenv, *namep); *namep = oname; return (0); diff --git a/db/log/log_stat.c b/db/log/log_stat.c new file mode 100644 index 000000000..ebba564fd --- /dev/null +++ b/db/log/log_stat.c @@ -0,0 +1,337 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: log_stat.c,v 11.148 2004/09/15 21:49:18 mjc Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" + +#ifdef HAVE_STATISTICS +static int __log_print_all __P((DB_ENV *, u_int32_t)); +static int __log_print_stats __P((DB_ENV *, u_int32_t)); +static int __log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); + +/* + * __log_stat_pp -- + * DB_ENV->log_stat pre/post processing. + * + * PUBLIC: int __log_stat_pp __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); + */ +int +__log_stat_pp(dbenv, statp, flags) + DB_ENV *dbenv; + DB_LOG_STAT **statp; + u_int32_t flags; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->lg_handle, "DB_ENV->log_stat", DB_INIT_LOG); + + if ((ret = __db_fchk(dbenv, + "DB_ENV->log_stat", flags, DB_STAT_CLEAR)) != 0) + return (ret); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __log_stat(dbenv, statp, flags); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +/* + * __log_stat -- + * DB_ENV->log_stat. + */ +static int +__log_stat(dbenv, statp, flags) + DB_ENV *dbenv; + DB_LOG_STAT **statp; + u_int32_t flags; +{ + DB_LOG *dblp; + DB_LOG_STAT *stats; + LOG *lp; + int ret; + + *statp = NULL; + + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + if ((ret = __os_umalloc(dbenv, sizeof(DB_LOG_STAT), &stats)) != 0) + return (ret); + + /* Copy out the global statistics. */ + R_LOCK(dbenv, &dblp->reginfo); + *stats = lp->stat; + if (LF_ISSET(DB_STAT_CLEAR)) + memset(&lp->stat, 0, sizeof(lp->stat)); + + stats->st_magic = lp->persist.magic; + stats->st_version = lp->persist.version; + stats->st_mode = (int)lp->persist.mode; + stats->st_lg_bsize = lp->buffer_size; + stats->st_lg_size = lp->log_nsize; + + stats->st_region_wait = dblp->reginfo.rp->mutex.mutex_set_wait; + stats->st_region_nowait = dblp->reginfo.rp->mutex.mutex_set_nowait; + if (LF_ISSET(DB_STAT_CLEAR)) + MUTEX_CLEAR(&dblp->reginfo.rp->mutex); + stats->st_regsize = dblp->reginfo.rp->size; + + stats->st_cur_file = lp->lsn.file; + stats->st_cur_offset = lp->lsn.offset; + stats->st_disk_file = lp->s_lsn.file; + stats->st_disk_offset = lp->s_lsn.offset; + + R_UNLOCK(dbenv, &dblp->reginfo); + + *statp = stats; + return (0); +} + +/* + * __log_stat_print_pp -- + * DB_ENV->log_stat_print pre/post processing. + * + * PUBLIC: int __log_stat_print_pp __P((DB_ENV *, u_int32_t)); + */ +int +__log_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->lg_handle, "DB_ENV->log_stat_print", DB_INIT_LOG); + + if ((ret = __db_fchk(dbenv, "DB_ENV->log_stat_print", + flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0) + return (ret); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __log_stat_print(dbenv, flags); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +/* + * __log_stat_print -- + * DB_ENV->log_stat_print method. + * + * PUBLIC: int __log_stat_print __P((DB_ENV *, u_int32_t)); + */ +int +__log_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + u_int32_t orig_flags; + int ret; + + orig_flags = flags; + LF_CLR(DB_STAT_CLEAR); + if (flags == 0 || LF_ISSET(DB_STAT_ALL)) { + ret = __log_print_stats(dbenv, orig_flags); + if (flags == 0 || ret != 0) + return (ret); + } + + if (LF_ISSET(DB_STAT_ALL) && + (ret = __log_print_all(dbenv, orig_flags)) != 0) + return (ret); + + return (0); +} + +/* + * __log_print_stats -- + * Display default log region statistics. + */ +static int +__log_print_stats(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + DB_LOG_STAT *sp; + int ret; + + if ((ret = __log_stat(dbenv, &sp, flags)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) + __db_msg(dbenv, "Default logging region information:"); + STAT_HEX("Log magic number", sp->st_magic); + STAT_ULONG("Log version number", sp->st_version); + __db_dlbytes(dbenv, "Log record cache size", + (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize); + __db_msg(dbenv, "%#o\tLog file mode", sp->st_mode); + if (sp->st_lg_size % MEGABYTE == 0) + __db_msg(dbenv, "%luMb\tCurrent log file size", + (u_long)sp->st_lg_size / MEGABYTE); + else if (sp->st_lg_size % 1024 == 0) + __db_msg(dbenv, "%luKb\tCurrent log file size", + (u_long)sp->st_lg_size / 1024); + else + __db_msg(dbenv, "%lu\tCurrent log file size", + (u_long)sp->st_lg_size); + __db_dlbytes(dbenv, "Log bytes written", + (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes); + __db_dlbytes(dbenv, "Log bytes written since last checkpoint", + (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes); + __db_dl(dbenv, "Total log file writes", (u_long)sp->st_wcount); + __db_dl(dbenv, "Total log file write due to overflow", + (u_long)sp->st_wcount_fill); + __db_dl(dbenv, "Total log file flushes", (u_long)sp->st_scount); + STAT_ULONG("Current log file number", sp->st_cur_file); + STAT_ULONG("Current log file offset", sp->st_cur_offset); + STAT_ULONG("On-disk log file number", sp->st_disk_file); + STAT_ULONG("On-disk log file offset", sp->st_disk_offset); + + __db_dl(dbenv, + "Maximum commits in a log flush", (u_long)sp->st_maxcommitperflush); + __db_dl(dbenv, + "Minimum commits in a log flush", (u_long)sp->st_mincommitperflush); + + __db_dlbytes(dbenv, "Log region size", + (u_long)0, (u_long)0, (u_long)sp->st_regsize); + __db_dl_pct(dbenv, + "The number of region locks that required waiting", + (u_long)sp->st_region_wait, DB_PCT(sp->st_region_wait, + sp->st_region_wait + sp->st_region_nowait), NULL); + + __os_ufree(dbenv, sp); + + return (0); +} + +/* + * __log_print_all -- + * Display debugging log region statistics. + */ +static int +__log_print_all(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + static const FN fn[] = { + { DBLOG_RECOVER, "DBLOG_RECOVER" }, + { DBLOG_FORCE_OPEN, "DBLOG_FORCE_OPEN" }, + { 0, NULL } + }; + DB_LOG *dblp; + DB_MUTEX *flush_mutexp; + LOG *lp; + + dblp = dbenv->lg_handle; + lp = (LOG *)dblp->reginfo.primary; + + R_LOCK(dbenv, &dblp->reginfo); + + __db_print_reginfo(dbenv, &dblp->reginfo, "Log"); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB_LOG handle information:"); + + __db_print_mutex( + dbenv, NULL, dblp->mutexp, "DB_LOG handle mutex", flags); + STAT_ULONG("Log file name", dblp->lfname); + if (dblp->lfhp == NULL) + STAT_ISSET("Log file handle", dblp->lfhp); + else + __db_print_fh(dbenv, dblp->lfhp, flags); + __db_prflags(dbenv, NULL, dblp->flags, fn, NULL, "\tFlags"); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "LOG handle information:"); + + __db_print_mutex( + dbenv, NULL, &lp->fq_mutex, "file name list mutex", flags); + + STAT_HEX("persist.magic", lp->persist.magic); + STAT_ULONG("persist.version", lp->persist.version); + __db_dlbytes(dbenv, + "persist.log_size", (u_long)0, (u_long)0, lp->persist.log_size); + STAT_FMT("persist.mode", "%#lo", u_long, lp->persist.mode); + STAT_LSN("current file offset LSN", &lp->lsn); + STAT_LSN("first buffer byte LSN", &lp->lsn); + STAT_ULONG("current buffer offset", lp->b_off); + STAT_ULONG("current file write offset", lp->w_off); + STAT_ULONG("length of last record", lp->len); + STAT_LONG("log flush in progress", lp->in_flush); + + flush_mutexp = R_ADDR(dbenv, &dblp->reginfo, lp->flush_mutex_off); + __db_print_mutex(dbenv, NULL, flush_mutexp, "Log flush mutex", flags); + + STAT_LSN("last sync LSN", &lp->s_lsn); + + /* + * Don't display the replication fields here, they're displayed as part + * of the replication statistics. + */ + + STAT_LSN("cached checkpoint LSN", &lp->cached_ckp_lsn); + + __db_dlbytes(dbenv, + "log buffer size", (u_long)0, (u_long)0, lp->buffer_size); + __db_dlbytes(dbenv, + "log file size", (u_long)0, (u_long)0, lp->log_size); + __db_dlbytes(dbenv, + "next log file size", (u_long)0, (u_long)0, lp->log_nsize); + + STAT_ULONG("transactions waiting to commit", lp->ncommit); + STAT_LSN("LSN of first commit", &lp->t_lsn); + + __dbreg_print_dblist(dbenv, flags); + R_UNLOCK(dbenv, &dblp->reginfo); + + return (0); +} + +#else /* !HAVE_STATISTICS */ + +int +__log_stat_pp(dbenv, statp, flags) + DB_ENV *dbenv; + DB_LOG_STAT **statp; + u_int32_t flags; +{ + COMPQUIET(statp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} + +int +__log_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} +#endif diff --git a/db/mod_db4/ABOUT b/db/mod_db4/ABOUT new file mode 100644 index 000000000..e5cccaf9f --- /dev/null +++ b/db/mod_db4/ABOUT @@ -0,0 +1,47 @@ + + +This is the mod_db4 apache module, providing a safe framework +for running db4 applications in the Apache 1.3 environment. In +general, it is dangerous to run db4 in a multi-process system +without some facility to coordinate recovery between +participating processes. Apache natively provides no +interface for commuication between processes, so the mod_db4 +module exists to provide this communication. + +Specifically, mod_db4 provides the following facilities: + +o New constructors for DB and DB_ENV structures, which install +replacement open/close methods. +o Transparent caching of open DB and DB_ENV structures +o Reference counting on all structures, allowing the module to +detect the initial opening of any managed database +and automatically perform recovery. +o Automatic detection of unexpected failures (segfaults, or a +module actually calling exit() and avoiding shutdown phases, +and automatic termination of all child processes with open +database resources to attempt consistency. + +mod_db4 is designed to be used as an alternative interface to db4. +To have another Apache module (for example, mod_foo) use mod_db4, +do not link mod_foo against libdb-4.2. In your mod_foo makefile, +you should + +#include "mod_db4_export.h" + +and add your Apache include directory to your CPPFLAGS. + +In mod_foo, to create a mod_db4 managed DB_ENV, use the following: + +int mod_db4_db_env_create(DB_ENV **dbenvp, u_int32_t flags); + +which takes identical arguments to db_env_create(). + +To create a mod_db4 managed DB, use + +int mod_db4_db_create(DB **dbp, DB_ENV *dbenv, u_int32_t flags); + +which takes identical arguments to db_create(). + +Otherwise the API is completely consistent with the standard Sleepycat API. + +For installation instructions, see the INSTALL file. diff --git a/db/mod_db4/INSTALL b/db/mod_db4/INSTALL new file mode 100644 index 000000000..72d5d8b5c --- /dev/null +++ b/db/mod_db4/INSTALL @@ -0,0 +1,14 @@ +This apache module requires db-4.2 compiled with c++ extensions +and libmm (a shared memory manager). + +To build this apache module, perform the following steps: + +> autoconf +> autoheader +> ./configure --with-apxs=/path/to/apxs --with-db4=/usr/local/BerkeleyDb-4.2 --with-mm=/usr/local +> make +> su - +# make install + +Post-installation, modules can use this extension via the functions +documented in $APACHE_INCLUDEDIR/mod_db4_export.h diff --git a/db/mod_db4/Makefile.in b/db/mod_db4/Makefile.in new file mode 100644 index 000000000..1732a0208 --- /dev/null +++ b/db/mod_db4/Makefile.in @@ -0,0 +1,26 @@ +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# http://www.apache.org/licenses/LICENSE-2.0.txt +# + +APXS=@APXS@ +CXXFLAGS=@CXXFLAGS@ +LDFLAGS=@LDFLAGS@ +LIBS=@LIBS@ + +SOURCES=mod_db4.c sem_utils.c skiplist.c utils.c mm_hash.c + +all: mod_db4.so + +mod_db4.so: $(SOURCES) + $(APXS) -c $(LDFLAGS) $(CXXFLAGS) $(SOURCES) $(LIBS) + +install: mod_db4.so + $(APXS) -i mod_db4.so + cp mod_db4_export.h `$(APXS) -q INCLUDEDIR`/ + chmod a+r `$(APXS) -q INCLUDEDIR`/mod_db4_export.h + +clean: + rm -f *.o *.a *.so *.lo *.tlo *.to config.cache config.log *.out core diff --git a/db/mod_db4/config.h.in b/db/mod_db4/config.h.in new file mode 100644 index 000000000..a7ac66c09 --- /dev/null +++ b/db/mod_db4/config.h.in @@ -0,0 +1,22 @@ +/* config.h.in. Generated from configure.in by autoheader. */ + +/* Define to 1 if you have the `mm' library (-lmm). */ +#undef HAVE_LIBMM + +/* */ +#undef HAVE_SEMUN + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION diff --git a/db/mod_db4/configure b/db/mod_db4/configure new file mode 100755 index 000000000..f83b3bb69 --- /dev/null +++ b/db/mod_db4/configure @@ -0,0 +1,3224 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.59. +# +# Copyright (C) 2003 Free Software Foundation, Inc. +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' +elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then + set -o posix +fi +DUALCASE=1; export DUALCASE # for MKS sh + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# Work around bugs in pre-3.0 UWIN ksh. +$as_unset ENV MAIL MAILPATH +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)$' \| \ + . : '\(.\)' 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } + /^X\/\(\/\/\)$/{ s//\1/; q; } + /^X\/\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + + +# PATH needs CR, and LINENO needs CR and PATH. +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" || { + # Find who we are. Look in the path if we contain no path at all + # relative or not. + case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done + + ;; + esac + # We did not find ourselves, most probably we were run as `sh COMMAND' + # in which case we are not to be found in the path. + if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then + { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2 + { (exit 1); exit 1; }; } + fi + case $CONFIG_SHELL in + '') + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for as_base in sh bash ksh sh5; do + case $as_dir in + /*) + if ("$as_dir/$as_base" -c ' + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then + $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } + $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } + CONFIG_SHELL=$as_dir/$as_base + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$0" ${1+"$@"} + fi;; + esac + done +done +;; + esac + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line before each line; the second 'sed' does the real + # work. The second script uses 'N' to pair each line-number line + # with the numbered line, and appends trailing '-' during + # substitution so that $LINENO is not a special case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) + sed '=' <$as_myself | + sed ' + N + s,$,-, + : loop + s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, + t loop + s,-$,, + s,^['$as_cr_digits']*\n,, + ' >$as_me.lineno && + chmod +x $as_me.lineno || + { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensible to this). + . ./$as_me.lineno + # Exit status is that of the last command. + exit +} + + +case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in + *c*,-n*) ECHO_N= ECHO_C=' +' ECHO_T=' ' ;; + *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; + *) ECHO_N= ECHO_C='\c' ECHO_T= ;; +esac + +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + # We could just check for DJGPP; but this test a) works b) is more generic + # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). + if test -f conf$$.exe; then + # Don't use ln at all; we don't have any links + as_ln_s='cp -p' + else + as_ln_s='ln -s' + fi +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.file + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_executable_p="test -f" + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +# IFS +# We need space, tab and new line, in precisely that order. +as_nl=' +' +IFS=" $as_nl" + +# CDPATH. +$as_unset CDPATH + + +# Name of the host. +# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +exec 6>&1 + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_config_libobj_dir=. +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= +SHELL=${CONFIG_SHELL-/bin/sh} + +# Maximum number of lines to put in a shell here document. +# This variable seems obsolete. It should probably be removed, and +# only ac_max_sed_lines should be used. +: ${ac_max_here_lines=38} + +# Identity of this package. +PACKAGE_NAME= +PACKAGE_TARNAME= +PACKAGE_VERSION= +PACKAGE_STRING= +PACKAGE_BUGREPORT= + +ac_unique_file="mod_db4.c" +ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS CXX CXXFLAGS LDFLAGS CPPFLAGS ac_ct_CXX EXEEXT OBJEXT APXS HAVE_SEMUN LIBOBJS LTLIBOBJS' +ac_subst_files='' + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datadir='${prefix}/share' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +libdir='${exec_prefix}/lib' +includedir='${prefix}/include' +oldincludedir='/usr/include' +infodir='${prefix}/info' +mandir='${prefix}/man' + +ac_prev= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval "$ac_prev=\$ac_option" + ac_prev= + continue + fi + + ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'` + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_option in + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad | --data | --dat | --da) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \ + | --da=*) + datadir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/-/_/g'` + eval "enable_$ac_feature=no" ;; + + -enable-* | --enable-*) + ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/-/_/g'` + case $ac_option in + *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; + *) ac_optarg=yes ;; + esac + eval "enable_$ac_feature='$ac_optarg'" ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst \ + | --locals | --local | --loca | --loc | --lo) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* \ + | --locals=* | --local=* | --loca=* | --loc=* | --lo=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package| sed 's/-/_/g'` + case $ac_option in + *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; + *) ac_optarg=yes ;; + esac + eval "with_$ac_package='$ac_optarg'" ;; + + -without-* | --without-*) + ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package | sed 's/-/_/g'` + eval "with_$ac_package=no" ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) { echo "$as_me: error: unrecognized option: $ac_option +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { (exit 1); exit 1; }; } + ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` + eval "$ac_envvar='$ac_optarg'" + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + { echo "$as_me: error: missing argument to $ac_option" >&2 + { (exit 1); exit 1; }; } +fi + +# Be sure to have absolute paths. +for ac_var in exec_prefix prefix +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [\\/$]* | ?:[\\/]* | NONE | '' ) ;; + *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; };; + esac +done + +# Be sure to have absolute paths. +for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \ + localstatedir libdir includedir oldincludedir infodir mandir +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [\\/$]* | ?:[\\/]* ) ;; + *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; };; + esac +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used." >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then its parent. + ac_confdir=`(dirname "$0") 2>/dev/null || +$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$0" : 'X\(//\)[^/]' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$0" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r $srcdir/$ac_unique_file; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r $srcdir/$ac_unique_file; then + if test "$ac_srcdir_defaulted" = yes; then + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2 + { (exit 1); exit 1; }; } + else + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { (exit 1); exit 1; }; } + fi +fi +(cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null || + { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2 + { (exit 1); exit 1; }; } +srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'` +ac_env_build_alias_set=${build_alias+set} +ac_env_build_alias_value=$build_alias +ac_cv_env_build_alias_set=${build_alias+set} +ac_cv_env_build_alias_value=$build_alias +ac_env_host_alias_set=${host_alias+set} +ac_env_host_alias_value=$host_alias +ac_cv_env_host_alias_set=${host_alias+set} +ac_cv_env_host_alias_value=$host_alias +ac_env_target_alias_set=${target_alias+set} +ac_env_target_alias_value=$target_alias +ac_cv_env_target_alias_set=${target_alias+set} +ac_cv_env_target_alias_value=$target_alias +ac_env_CXX_set=${CXX+set} +ac_env_CXX_value=$CXX +ac_cv_env_CXX_set=${CXX+set} +ac_cv_env_CXX_value=$CXX +ac_env_CXXFLAGS_set=${CXXFLAGS+set} +ac_env_CXXFLAGS_value=$CXXFLAGS +ac_cv_env_CXXFLAGS_set=${CXXFLAGS+set} +ac_cv_env_CXXFLAGS_value=$CXXFLAGS +ac_env_LDFLAGS_set=${LDFLAGS+set} +ac_env_LDFLAGS_value=$LDFLAGS +ac_cv_env_LDFLAGS_set=${LDFLAGS+set} +ac_cv_env_LDFLAGS_value=$LDFLAGS +ac_env_CPPFLAGS_set=${CPPFLAGS+set} +ac_env_CPPFLAGS_value=$CPPFLAGS +ac_cv_env_CPPFLAGS_set=${CPPFLAGS+set} +ac_cv_env_CPPFLAGS_value=$CPPFLAGS + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures this package to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +_ACEOF + + cat <<_ACEOF +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --datadir=DIR read-only architecture-independent data [PREFIX/share] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --infodir=DIR info documentation [PREFIX/info] + --mandir=DIR man documentation [PREFIX/man] +_ACEOF + + cat <<\_ACEOF +_ACEOF +fi + +if test -n "$ac_init_help"; then + + cat <<\_ACEOF + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-cxxflags Specify additional flags to pass to compiler + --with-ldflags Specify additional flags to pass to linker + --with-libs Specify additional libraries + --with-mm Specify additional libraries + --with-db4 Specify additional libraries + --with-apxs=FILE Build shared Apache module. FILE is optional + pathname to the Apache apxs tool; defaults to + "apxs". + +Some influential environment variables: + CXX C++ compiler command + CXXFLAGS C++ compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + CPPFLAGS C/C++ preprocessor flags, e.g. -I if you have + headers in a nonstandard directory + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +_ACEOF +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + ac_popdir=`pwd` + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d $ac_dir || continue + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + cd $ac_dir + # Check for guested configure; otherwise get Cygnus style configure. + if test -f $ac_srcdir/configure.gnu; then + echo + $SHELL $ac_srcdir/configure.gnu --help=recursive + elif test -f $ac_srcdir/configure; then + echo + $SHELL $ac_srcdir/configure --help=recursive + elif test -f $ac_srcdir/configure.ac || + test -f $ac_srcdir/configure.in; then + echo + $ac_configure --help + else + echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi + cd $ac_popdir + done +fi + +test -n "$ac_init_help" && exit 0 +if $ac_init_version; then + cat <<\_ACEOF + +Copyright (C) 2003 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit 0 +fi +exec 5>config.log +cat >&5 <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by $as_me, which was +generated by GNU Autoconf 2.59. Invocation command line was + + $ $0 $@ + +_ACEOF +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +hostinfo = `(hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + echo "PATH: $as_dir" +done + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_sep= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) + ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; + 2) + ac_configure_args1="$ac_configure_args1 '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'" + # Get rid of the leading space. + ac_sep=" " + ;; + esac + done +done +$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } +$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Be sure not to use single quotes in there, as some shells, +# such as our DU 5.0 friend, will then `close' the trap. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + cat <<\_ASBOX +## ---------------- ## +## Cache variables. ## +## ---------------- ## +_ASBOX + echo + # The following way of writing the cache mishandles newlines in values, +{ + (set) 2>&1 | + case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in + *ac_space=\ *) + sed -n \ + "s/'"'"'/'"'"'\\\\'"'"''"'"'/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p" + ;; + *) + sed -n \ + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + ;; + esac; +} + echo + + cat <<\_ASBOX +## ----------------- ## +## Output variables. ## +## ----------------- ## +_ASBOX + echo + for ac_var in $ac_subst_vars + do + eval ac_val=$`echo $ac_var` + echo "$ac_var='"'"'$ac_val'"'"'" + done | sort + echo + + if test -n "$ac_subst_files"; then + cat <<\_ASBOX +## ------------- ## +## Output files. ## +## ------------- ## +_ASBOX + echo + for ac_var in $ac_subst_files + do + eval ac_val=$`echo $ac_var` + echo "$ac_var='"'"'$ac_val'"'"'" + done | sort + echo + fi + + if test -s confdefs.h; then + cat <<\_ASBOX +## ----------- ## +## confdefs.h. ## +## ----------- ## +_ASBOX + echo + sed "/^$/d" confdefs.h | sort + echo + fi + test "$ac_signal" != 0 && + echo "$as_me: caught signal $ac_signal" + echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core && + rm -rf conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status + ' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -rf conftest* confdefs.h +# AIX cpp loses on an empty file, so make sure it contains at least a newline. +echo >confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer explicitly selected file to automatically selected ones. +if test -z "$CONFIG_SITE"; then + if test "x$prefix" != xNONE; then + CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site" + else + CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" + fi +fi +for ac_site_file in $CONFIG_SITE; do + if test -r "$ac_site_file"; then + { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 +echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special + # files actually), so we avoid doing that. + if test -f "$cache_file"; then + { echo "$as_me:$LINENO: loading cache $cache_file" >&5 +echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . $cache_file;; + *) . ./$cache_file;; + esac + fi +else + { echo "$as_me:$LINENO: creating cache $cache_file" >&5 +echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in `(set) 2>&1 | + sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val="\$ac_cv_env_${ac_var}_value" + eval ac_new_val="\$ac_env_${ac_var}_value" + case $ac_old_set,$ac_new_set in + set,) + { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 +echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 +echo "$as_me: former value: $ac_old_val" >&2;} + { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 +echo "$as_me: current value: $ac_new_val" >&2;} + ac_cache_corrupted=: + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) + ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 +echo "$as_me: error: changes in the environment can compromise the build" >&2;} + { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + + + + + + + + + + + + + + + + ac_config_headers="$ac_config_headers config.h" + +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + echo "$as_me:$LINENO: result: $CXX" >&5 +echo "${ECHO_T}$CXX" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 +echo "${ECHO_T}$ac_ct_CXX" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$ac_ct_CXX" && break +done +test -n "$ac_ct_CXX" || ac_ct_CXX="g++" + + CXX=$ac_ct_CXX +fi + + +# Provide some information about the compiler. +echo "$as_me:$LINENO:" \ + "checking for C++ compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 + (eval $ac_compiler --version &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 + (eval $ac_compiler -v &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 + (eval $ac_compiler -V &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +echo "$as_me:$LINENO: checking for C++ compiler default output file name" >&5 +echo $ECHO_N "checking for C++ compiler default output file name... $ECHO_C" >&6 +ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` +if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5 + (eval $ac_link_default) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Find the output, starting from the most likely. This scheme is +# not robust to junk in `.', hence go to wildcards (a.*) only as a last +# resort. + +# Be careful to initialize this variable, since it used to be cached. +# Otherwise an old cache value of `no' led to `EXEEXT = no' in a Makefile. +ac_cv_exeext= +# b.out is created by i960 compilers. +for ac_file in a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) + ;; + conftest.$ac_ext ) + # This is the source file. + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + # FIXME: I believe we export ac_cv_exeext for Libtool, + # but it would be cool to find out if it's true. Does anybody + # maintain Libtool? --akim. + export ac_cv_exeext + break;; + * ) + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: C++ compiler cannot create executables +See \`config.log' for more details." >&5 +echo "$as_me: error: C++ compiler cannot create executables +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } +fi + +ac_exeext=$ac_cv_exeext +echo "$as_me:$LINENO: result: $ac_file" >&5 +echo "${ECHO_T}$ac_file" >&6 + +# Check the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +echo "$as_me:$LINENO: checking whether the C++ compiler works" >&5 +echo $ECHO_N "checking whether the C++ compiler works... $ECHO_C" >&6 +# FIXME: These cross compiler hacks should be removed for Autoconf 3.0 +# If not cross compiling, check that we can run a simple program. +if test "$cross_compiling" != yes; then + if { ac_try='./$ac_file' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { echo "$as_me:$LINENO: error: cannot run C++ compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run C++ compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + fi + fi +fi +echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + +rm -f a.out a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +# Check the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6 +echo "$as_me:$LINENO: result: $cross_compiling" >&5 +echo "${ECHO_T}$cross_compiling" >&6 + +echo "$as_me:$LINENO: checking for suffix of executables" >&5 +echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6 +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + export ac_cv_exeext + break;; + * ) break;; + esac +done +else + { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest$ac_cv_exeext +echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 +echo "${ECHO_T}$ac_cv_exeext" >&6 + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +echo "$as_me:$LINENO: checking for suffix of object files" >&5 +echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6 +if test "${ac_cv_objext+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + for ac_file in `(ls conftest.o conftest.obj; ls conftest.*) 2>/dev/null`; do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 +echo "${ECHO_T}$ac_cv_objext" >&6 +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6 +if test "${ac_cv_cxx_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_compiler_gnu=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6 +GXX=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +CXXFLAGS="-g" +echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 +echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6 +if test "${ac_cv_prog_cxx_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_cxx_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_prog_cxx_g=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6 +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +for ac_declaration in \ + '' \ + 'extern "C" void std::exit (int) throw (); using std::exit;' \ + 'extern "C" void std::exit (int); using std::exit;' \ + 'extern "C" void exit (int) throw ();' \ + 'extern "C" void exit (int);' \ + 'void exit (int);' +do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +#include +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +continue +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +rm -f conftest* +if test -n "$ac_declaration"; then + echo '#ifdef __cplusplus' >>confdefs.h + echo $ac_declaration >>confdefs.h + echo '#endif' >>confdefs.h +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +if test "$cross_compiling" = no; then + echo "$as_me:$LINENO: checking that C++ compiler can compile simple program" >&5 +echo $ECHO_N "checking that C++ compiler can compile simple program... $ECHO_C" >&6 +fi + +if test "$cross_compiling" = yes; then + { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run test program while cross compiling +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +int main() { return 0; } +_ACEOF +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6;{ { echo "$as_me:$LINENO: error: a working C++ compiler is required" >&5 +echo "$as_me: error: a working C++ compiler is required" >&2;} + { (exit 1); exit 1; }; } +fi +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi + +# Allow user to specify flags + +# Check whether --with-cxxflags or --without-cxxflags was given. +if test "${with_cxxflags+set}" = set; then + withval="$with_cxxflags" + + if test "x$withval" != "xno" ; then + CXXFLAGS="$CXXFLAGS $withval" + fi + + +fi; +CXXFLAGS=`echo $CXXFLAGS | sed -e 's/-O2//g;' | sed -e 's/-g//g;'` + +# Check whether --with-ldflags or --without-ldflags was given. +if test "${with_ldflags+set}" = set; then + withval="$with_ldflags" + + if test "x$withval" != "xno" ; then + LDFLAGS="$LDFLAGS $withval" + fi + + +fi; + + +# Check whether --with-libs or --without-libs was given. +if test "${with_libs+set}" = set; then + withval="$with_libs" + + if test "x$withval" != "xno" ; then + LIBS="$LIBS $withval" + fi + + +fi; + + +# Check whether --with-mm or --without-mm was given. +if test "${with_mm+set}" = set; then + withval="$with_mm" + + if test "x$withval" != "xno" ; then + LIBS="$LIBS -L$withval/lib" + CPPFLAGS="$CPPFLAGS -I$withval/include" + fi + + + +fi; + + +# Check whether --with-db4 or --without-db4 was given. +if test "${with_db4+set}" = set; then + withval="$with_db4" + + if test "x$withval" != "xno" ; then + LIBS="$LIBS -L$withval/lib" + CPPFLAGS="$CPPFLAGS -I$withval/include" + fi + + + +fi; + + +# Check whether --with-apxs or --without-apxs was given. +if test "${with_apxs+set}" = set; then + withval="$with_apxs" + + if test "$withval" = "yes"; then + withval="apxs" + fi + APXS="$withval" + + +else + + { { echo "$as_me:$LINENO: error: apxs is required" >&5 +echo "$as_me: error: apxs is required" >&2;} + { (exit 1); exit 1; }; } + +fi; + +LIBS="$LIBS -ldb_cxx" + +echo "$as_me:$LINENO: checking for union semun" >&5 +echo $ECHO_N "checking for union semun... $ECHO_C" >&6 +if test "${cv_semun+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +#include +#include +#include + +int +main () +{ +union semun x; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + + cv_semun=yes + +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + + cv_semun=no + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +echo "$as_me:$LINENO: result: $cv_semun" >&5 +echo "${ECHO_T}$cv_semun" >&6 +if test "$cv_semun" = "yes"; then + +cat >>confdefs.h <<\_ACEOF +#define HAVE_SEMUN 1 +_ACEOF + +else + +cat >>confdefs.h <<\_ACEOF +#define HAVE_SEMUN 0 +_ACEOF + +fi + + + + + + + +echo "$as_me:$LINENO: checking for mm_core_create in -lmm" >&5 +echo $ECHO_N "checking for mm_core_create in -lmm... $ECHO_C" >&6 +if test "${ac_cv_lib_mm_mm_core_create+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lmm $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char mm_core_create (); +int +main () +{ +mm_core_create (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_mm_mm_core_create=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_mm_mm_core_create=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_mm_mm_core_create" >&5 +echo "${ECHO_T}$ac_cv_lib_mm_mm_core_create" >&6 +if test $ac_cv_lib_mm_mm_core_create = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBMM 1 +_ACEOF + + LIBS="-lmm $LIBS" + +else + { { echo "$as_me:$LINENO: error: libmm required" >&5 +echo "$as_me: error: libmm required" >&2;} + { (exit 1); exit 1; }; } +fi + + + + ac_config_files="$ac_config_files Makefile" +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, don't put newlines in cache variables' values. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +{ + (set) 2>&1 | + case `(ac_space=' '; set | grep ac_space) 2>&1` in + *ac_space=\ *) + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n \ + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + ;; + esac; +} | + sed ' + t clear + : clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + /^ac_cv_env/!s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + : end' >>confcache +if diff $cache_file confcache >/dev/null 2>&1; then :; else + if test -w $cache_file; then + test "x$cache_file" != "x/dev/null" && echo "updating cache $cache_file" + cat confcache >$cache_file + else + echo "not updating unwritable cache $cache_file" + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +# VPATH may cause trouble with some makes, so we remove $(srcdir), +# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=/{ +s/:*\$(srcdir):*/:/; +s/:*\${srcdir}:*/:/; +s/:*@srcdir@:*/:/; +s/^\([^=]*=[ ]*\):*/\1/; +s/:*$//; +s/^[^=]*=[ ]*$//; +}' +fi + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_i=`echo "$ac_i" | + sed 's/\$U\././;s/\.o$//;s/\.obj$//'` + # 2. Add them. + ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + +: ${CONFIG_STATUS=./config.status} +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 +echo "$as_me: creating $CONFIG_STATUS" >&6;} +cat >$CONFIG_STATUS <<_ACEOF +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' +elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then + set -o posix +fi +DUALCASE=1; export DUALCASE # for MKS sh + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# Work around bugs in pre-3.0 UWIN ksh. +$as_unset ENV MAIL MAILPATH +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)$' \| \ + . : '\(.\)' 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } + /^X\/\(\/\/\)$/{ s//\1/; q; } + /^X\/\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + + +# PATH needs CR, and LINENO needs CR and PATH. +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" || { + # Find who we are. Look in the path if we contain no path at all + # relative or not. + case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done + + ;; + esac + # We did not find ourselves, most probably we were run as `sh COMMAND' + # in which case we are not to be found in the path. + if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then + { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5 +echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;} + { (exit 1); exit 1; }; } + fi + case $CONFIG_SHELL in + '') + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for as_base in sh bash ksh sh5; do + case $as_dir in + /*) + if ("$as_dir/$as_base" -c ' + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then + $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } + $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } + CONFIG_SHELL=$as_dir/$as_base + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$0" ${1+"$@"} + fi;; + esac + done +done +;; + esac + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line before each line; the second 'sed' does the real + # work. The second script uses 'N' to pair each line-number line + # with the numbered line, and appends trailing '-' during + # substitution so that $LINENO is not a special case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) + sed '=' <$as_myself | + sed ' + N + s,$,-, + : loop + s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, + t loop + s,-$,, + s,^['$as_cr_digits']*\n,, + ' >$as_me.lineno && + chmod +x $as_me.lineno || + { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5 +echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;} + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensible to this). + . ./$as_me.lineno + # Exit status is that of the last command. + exit +} + + +case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in + *c*,-n*) ECHO_N= ECHO_C=' +' ECHO_T=' ' ;; + *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; + *) ECHO_N= ECHO_C='\c' ECHO_T= ;; +esac + +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + # We could just check for DJGPP; but this test a) works b) is more generic + # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). + if test -f conf$$.exe; then + # Don't use ln at all; we don't have any links + as_ln_s='cp -p' + else + as_ln_s='ln -s' + fi +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.file + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_executable_p="test -f" + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +# IFS +# We need space, tab and new line, in precisely that order. +as_nl=' +' +IFS=" $as_nl" + +# CDPATH. +$as_unset CDPATH + +exec 6>&1 + +# Open the log real soon, to keep \$[0] and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. Logging --version etc. is OK. +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX +} >&5 +cat >&5 <<_CSEOF + +This file was extended by $as_me, which was +generated by GNU Autoconf 2.59. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +_CSEOF +echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5 +echo >&5 +_ACEOF + +# Files that config.status was made for. +if test -n "$ac_config_files"; then + echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_headers"; then + echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_links"; then + echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_commands"; then + echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS +fi + +cat >>$CONFIG_STATUS <<\_ACEOF + +ac_cs_usage="\ +\`$as_me' instantiates files from templates according to the +current configuration. + +Usage: $0 [OPTIONS] [FILE]... + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Report bugs to ." +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF +ac_cs_version="\\ +config.status +configured by $0, generated by GNU Autoconf 2.59, + with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\" + +Copyright (C) 2003 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." +srcdir=$srcdir +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +# If no file are specified by the user, then we need to provide default +# value. By we need to know if files were specified by the user. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=*) + ac_option=`expr "x$1" : 'x\([^=]*\)='` + ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'` + ac_shift=: + ;; + -*) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + *) # This is not an option, so the user has probably given explicit + # arguments. + ac_option=$1 + ac_need_defaults=false;; + esac + + case $ac_option in + # Handling of the options. +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --vers* | -V ) + echo "$ac_cs_version"; exit 0 ;; + --he | --h) + # Conflict between --help and --header + { { echo "$as_me:$LINENO: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&5 +echo "$as_me: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&2;} + { (exit 1); exit 1; }; };; + --help | --hel | -h ) + echo "$ac_cs_usage"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + CONFIG_FILES="$CONFIG_FILES $ac_optarg" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg" + ac_need_defaults=false;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&5 +echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2;} + { (exit 1); exit 1; }; } ;; + + *) ac_config_targets="$ac_config_targets $1" ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +if \$ac_cs_recheck; then + echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 + exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion +fi + +_ACEOF + + + + + +cat >>$CONFIG_STATUS <<\_ACEOF +for ac_config_target in $ac_config_targets +do + case "$ac_config_target" in + # Handling of arguments. + "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "config.h" ) CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; + *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; + esac +done + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason to put it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Create a temporary directory, and hook for its removal unless debugging. +$debug || +{ + trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0 + trap '{ (exit 1); exit 1; }' 1 2 13 15 +} + +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` && + test -n "$tmp" && test -d "$tmp" +} || +{ + tmp=./confstat$$-$RANDOM + (umask 077 && mkdir $tmp) +} || +{ + echo "$me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} + +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF + +# +# CONFIG_FILES section. +# + +# No need to generate the scripts if there are no CONFIG_FILES. +# This happens for instance when ./config.status config.h +if test -n "\$CONFIG_FILES"; then + # Protect against being on the right side of a sed subst in config.status. + sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g; + s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF +s,@SHELL@,$SHELL,;t t +s,@PATH_SEPARATOR@,$PATH_SEPARATOR,;t t +s,@PACKAGE_NAME@,$PACKAGE_NAME,;t t +s,@PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t +s,@PACKAGE_VERSION@,$PACKAGE_VERSION,;t t +s,@PACKAGE_STRING@,$PACKAGE_STRING,;t t +s,@PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t +s,@exec_prefix@,$exec_prefix,;t t +s,@prefix@,$prefix,;t t +s,@program_transform_name@,$program_transform_name,;t t +s,@bindir@,$bindir,;t t +s,@sbindir@,$sbindir,;t t +s,@libexecdir@,$libexecdir,;t t +s,@datadir@,$datadir,;t t +s,@sysconfdir@,$sysconfdir,;t t +s,@sharedstatedir@,$sharedstatedir,;t t +s,@localstatedir@,$localstatedir,;t t +s,@libdir@,$libdir,;t t +s,@includedir@,$includedir,;t t +s,@oldincludedir@,$oldincludedir,;t t +s,@infodir@,$infodir,;t t +s,@mandir@,$mandir,;t t +s,@build_alias@,$build_alias,;t t +s,@host_alias@,$host_alias,;t t +s,@target_alias@,$target_alias,;t t +s,@DEFS@,$DEFS,;t t +s,@ECHO_C@,$ECHO_C,;t t +s,@ECHO_N@,$ECHO_N,;t t +s,@ECHO_T@,$ECHO_T,;t t +s,@LIBS@,$LIBS,;t t +s,@CXX@,$CXX,;t t +s,@CXXFLAGS@,$CXXFLAGS,;t t +s,@LDFLAGS@,$LDFLAGS,;t t +s,@CPPFLAGS@,$CPPFLAGS,;t t +s,@ac_ct_CXX@,$ac_ct_CXX,;t t +s,@EXEEXT@,$EXEEXT,;t t +s,@OBJEXT@,$OBJEXT,;t t +s,@APXS@,$APXS,;t t +s,@HAVE_SEMUN@,$HAVE_SEMUN,;t t +s,@LIBOBJS@,$LIBOBJS,;t t +s,@LTLIBOBJS@,$LTLIBOBJS,;t t +CEOF + +_ACEOF + + cat >>$CONFIG_STATUS <<\_ACEOF + # Split the substitutions into bite-sized pieces for seds with + # small command number limits, like on Digital OSF/1 and HP-UX. + ac_max_sed_lines=48 + ac_sed_frag=1 # Number of current file. + ac_beg=1 # First line for current file. + ac_end=$ac_max_sed_lines # Line after last line for current file. + ac_more_lines=: + ac_sed_cmds= + while $ac_more_lines; do + if test $ac_beg -gt 1; then + sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag + else + sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag + fi + if test ! -s $tmp/subs.frag; then + ac_more_lines=false + else + # The purpose of the label and of the branching condition is to + # speed up the sed processing (if there are no `@' at all, there + # is no need to browse any of the substitutions). + # These are the two extra sed commands mentioned above. + (echo ':t + /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed + if test -z "$ac_sed_cmds"; then + ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed" + else + ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed" + fi + ac_sed_frag=`expr $ac_sed_frag + 1` + ac_beg=$ac_end + ac_end=`expr $ac_end + $ac_max_sed_lines` + fi + done + if test -z "$ac_sed_cmds"; then + ac_sed_cmds=cat + fi +fi # test -n "$CONFIG_FILES" + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue + # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". + case $ac_file in + - | *:- | *:-:* ) # input from stdin + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + * ) ac_file_in=$ac_file.in ;; + esac + + # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories. + ac_dir=`(dirname "$ac_file") 2>/dev/null || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + + + if test x"$ac_file" != x-; then + { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + rm -f "$ac_file" + fi + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + if test x"$ac_file" = x-; then + configure_input= + else + configure_input="$ac_file. " + fi + configure_input=$configure_input"Generated from `echo $ac_file_in | + sed 's,.*/,,'` by configure." + + # First look for the input files in the build tree, otherwise in the + # src tree. + ac_file_inputs=`IFS=: + for f in $ac_file_in; do + case $f in + -) echo $tmp/stdin ;; + [\\/$]*) + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + echo "$f";; + *) # Relative + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + fi;; + esac + done` || { (exit 1); exit 1; } +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF + sed "$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s,@configure_input@,$configure_input,;t t +s,@srcdir@,$ac_srcdir,;t t +s,@abs_srcdir@,$ac_abs_srcdir,;t t +s,@top_srcdir@,$ac_top_srcdir,;t t +s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t +s,@builddir@,$ac_builddir,;t t +s,@abs_builddir@,$ac_abs_builddir,;t t +s,@top_builddir@,$ac_top_builddir,;t t +s,@abs_top_builddir@,$ac_abs_top_builddir,;t t +" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out + rm -f $tmp/stdin + if test x"$ac_file" != x-; then + mv $tmp/out $ac_file + else + cat $tmp/out + rm -f $tmp/out + fi + +done +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + +# +# CONFIG_HEADER section. +# + +# These sed commands are passed to sed as "A NAME B NAME C VALUE D", where +# NAME is the cpp macro being defined and VALUE is the value it is being given. +# +# ac_d sets the value in "#define NAME VALUE" lines. +ac_dA='s,^\([ ]*\)#\([ ]*define[ ][ ]*\)' +ac_dB='[ ].*$,\1#\2' +ac_dC=' ' +ac_dD=',;t' +# ac_u turns "#undef NAME" without trailing blanks into "#define NAME VALUE". +ac_uA='s,^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)' +ac_uB='$,\1#\2define\3' +ac_uC=' ' +ac_uD=',;t' + +for ac_file in : $CONFIG_HEADERS; do test "x$ac_file" = x: && continue + # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". + case $ac_file in + - | *:- | *:-:* ) # input from stdin + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + * ) ac_file_in=$ac_file.in ;; + esac + + test x"$ac_file" != x- && { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + + # First look for the input files in the build tree, otherwise in the + # src tree. + ac_file_inputs=`IFS=: + for f in $ac_file_in; do + case $f in + -) echo $tmp/stdin ;; + [\\/$]*) + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + # Do quote $f, to prevent DOS paths from being IFS'd. + echo "$f";; + *) # Relative + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + fi;; + esac + done` || { (exit 1); exit 1; } + # Remove the trailing spaces. + sed 's/[ ]*$//' $ac_file_inputs >$tmp/in + +_ACEOF + +# Transform confdefs.h into two sed scripts, `conftest.defines' and +# `conftest.undefs', that substitutes the proper values into +# config.h.in to produce config.h. The first handles `#define' +# templates, and the second `#undef' templates. +# And first: Protect against being on the right side of a sed subst in +# config.status. Protect against being in an unquoted here document +# in config.status. +rm -f conftest.defines conftest.undefs +# Using a here document instead of a string reduces the quoting nightmare. +# Putting comments in sed scripts is not portable. +# +# `end' is used to avoid that the second main sed command (meant for +# 0-ary CPP macros) applies to n-ary macro definitions. +# See the Autoconf documentation for `clear'. +cat >confdef2sed.sed <<\_ACEOF +s/[\\&,]/\\&/g +s,[\\$`],\\&,g +t clear +: clear +s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*\)\(([^)]*)\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1\2${ac_dC}\3${ac_dD},gp +t end +s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1${ac_dC}\2${ac_dD},gp +: end +_ACEOF +# If some macros were called several times there might be several times +# the same #defines, which is useless. Nevertheless, we may not want to +# sort them, since we want the *last* AC-DEFINE to be honored. +uniq confdefs.h | sed -n -f confdef2sed.sed >conftest.defines +sed 's/ac_d/ac_u/g' conftest.defines >conftest.undefs +rm -f confdef2sed.sed + +# This sed command replaces #undef with comments. This is necessary, for +# example, in the case of _POSIX_SOURCE, which is predefined and required +# on some systems where configure will not decide to define it. +cat >>conftest.undefs <<\_ACEOF +s,^[ ]*#[ ]*undef[ ][ ]*[a-zA-Z_][a-zA-Z_0-9]*,/* & */, +_ACEOF + +# Break up conftest.defines because some shells have a limit on the size +# of here documents, and old seds have small limits too (100 cmds). +echo ' # Handle all the #define templates only if necessary.' >>$CONFIG_STATUS +echo ' if grep "^[ ]*#[ ]*define" $tmp/in >/dev/null; then' >>$CONFIG_STATUS +echo ' # If there are no defines, we may have an empty if/fi' >>$CONFIG_STATUS +echo ' :' >>$CONFIG_STATUS +rm -f conftest.tail +while grep . conftest.defines >/dev/null +do + # Write a limited-size here document to $tmp/defines.sed. + echo ' cat >$tmp/defines.sed <>$CONFIG_STATUS + # Speed up: don't consider the non `#define' lines. + echo '/^[ ]*#[ ]*define/!b' >>$CONFIG_STATUS + # Work around the forget-to-reset-the-flag bug. + echo 't clr' >>$CONFIG_STATUS + echo ': clr' >>$CONFIG_STATUS + sed ${ac_max_here_lines}q conftest.defines >>$CONFIG_STATUS + echo 'CEOF + sed -f $tmp/defines.sed $tmp/in >$tmp/out + rm -f $tmp/in + mv $tmp/out $tmp/in +' >>$CONFIG_STATUS + sed 1,${ac_max_here_lines}d conftest.defines >conftest.tail + rm -f conftest.defines + mv conftest.tail conftest.defines +done +rm -f conftest.defines +echo ' fi # grep' >>$CONFIG_STATUS +echo >>$CONFIG_STATUS + +# Break up conftest.undefs because some shells have a limit on the size +# of here documents, and old seds have small limits too (100 cmds). +echo ' # Handle all the #undef templates' >>$CONFIG_STATUS +rm -f conftest.tail +while grep . conftest.undefs >/dev/null +do + # Write a limited-size here document to $tmp/undefs.sed. + echo ' cat >$tmp/undefs.sed <>$CONFIG_STATUS + # Speed up: don't consider the non `#undef' + echo '/^[ ]*#[ ]*undef/!b' >>$CONFIG_STATUS + # Work around the forget-to-reset-the-flag bug. + echo 't clr' >>$CONFIG_STATUS + echo ': clr' >>$CONFIG_STATUS + sed ${ac_max_here_lines}q conftest.undefs >>$CONFIG_STATUS + echo 'CEOF + sed -f $tmp/undefs.sed $tmp/in >$tmp/out + rm -f $tmp/in + mv $tmp/out $tmp/in +' >>$CONFIG_STATUS + sed 1,${ac_max_here_lines}d conftest.undefs >conftest.tail + rm -f conftest.undefs + mv conftest.tail conftest.undefs +done +rm -f conftest.undefs + +cat >>$CONFIG_STATUS <<\_ACEOF + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + if test x"$ac_file" = x-; then + echo "/* Generated by configure. */" >$tmp/config.h + else + echo "/* $ac_file. Generated by configure. */" >$tmp/config.h + fi + cat $tmp/in >>$tmp/config.h + rm -f $tmp/in + if test x"$ac_file" != x-; then + if diff $ac_file $tmp/config.h >/dev/null 2>&1; then + { echo "$as_me:$LINENO: $ac_file is unchanged" >&5 +echo "$as_me: $ac_file is unchanged" >&6;} + else + ac_dir=`(dirname "$ac_file") 2>/dev/null || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + rm -f $ac_file + mv $tmp/config.h $ac_file + fi + else + cat $tmp/config.h + rm -f $tmp/config.h + fi +done +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF + +{ (exit 0); exit 0; } +_ACEOF +chmod +x $CONFIG_STATUS +ac_clean_files=$ac_clean_files_save + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || { (exit 1); exit 1; } +fi + + diff --git a/db/mod_db4/configure.in b/db/mod_db4/configure.in new file mode 100644 index 000000000..14c72d104 --- /dev/null +++ b/db/mod_db4/configure.in @@ -0,0 +1,115 @@ +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# http://www.apache.org/licenses/LICENSE-2.0.txt +# + +AC_INIT(mod_db4.c) +AC_CONFIG_HEADER(config.h) +AC_PROG_CXX + +AC_LANG_PUSH(C++) +if test "$cross_compiling" = no; then + AC_MSG_CHECKING([that C++ compiler can compile simple program]) +fi +AC_TRY_RUN([int main() { return 0; }], + AC_MSG_RESULT(yes), + AC_MSG_RESULT(no);AC_MSG_ERROR([a working C++ compiler is required])) + +# Allow user to specify flags +AC_ARG_WITH(cxxflags, + [ --with-cxxflags Specify additional flags to pass to compiler], + [ + if test "x$withval" != "xno" ; then + CXXFLAGS="$CXXFLAGS $withval" + fi + ] +) +CXXFLAGS=`echo $CXXFLAGS | sed -e 's/-O2//g;' | sed -e 's/-g//g;'` +AC_ARG_WITH(ldflags, + [ --with-ldflags Specify additional flags to pass to linker], + [ + if test "x$withval" != "xno" ; then + LDFLAGS="$LDFLAGS $withval" + fi + ] +) + +AC_ARG_WITH(libs, + [ --with-libs Specify additional libraries], + [ + if test "x$withval" != "xno" ; then + LIBS="$LIBS $withval" + fi + ] +) + +AC_ARG_WITH(mm, + [ --with-mm Specify additional libraries], + [ + if test "x$withval" != "xno" ; then + LIBS="$LIBS -L$withval/lib" + CPPFLAGS="$CPPFLAGS -I$withval/include" + fi + ] + +) + +AC_ARG_WITH(db4, + [ --with-db4 Specify additional libraries], + [ + if test "x$withval" != "xno" ; then + LIBS="$LIBS -L$withval/lib" + CPPFLAGS="$CPPFLAGS -I$withval/include" + fi + ] + +) + +AC_ARG_WITH(apxs, +[ --with-apxs[=FILE] Build shared Apache module. FILE is optional + pathname to the Apache apxs tool; defaults to + "apxs".], +[ + if test "$withval" = "yes"; then + withval="apxs" + fi + APXS="$withval" + AC_SUBST(APXS) +], +[ + AC_MSG_ERROR([apxs is required]) +]) + +LIBS="$LIBS -ldb_cxx" + +AC_CACHE_CHECK(for union semun,cv_semun, + AC_TRY_COMPILE([ +#include +#include +#include + ], + [union semun x;], + [ + cv_semun=yes + ],[ + cv_semun=no + ]) +) +if test "$cv_semun" = "yes"; then + AC_DEFINE(HAVE_SEMUN, 1, [ ]) +else + AC_DEFINE(HAVE_SEMUN, 0, [ ]) +fi + +AC_SUBST(LDFLAGS) +AC_SUBST(LIBS) +AC_SUBST(CPPFLAGS) +AC_SUBST(CXXFLAGS) + +AC_CHECK_LIB(mm, mm_core_create, , [ AC_MSG_ERROR([libmm required]) ]) + +AC_SUBST(HAVE_SEMUN) +AC_OUTPUT(Makefile) + diff --git a/db/mod_db4/mm_hash.c b/db/mod_db4/mm_hash.c new file mode 100644 index 000000000..f42409c93 --- /dev/null +++ b/db/mod_db4/mm_hash.c @@ -0,0 +1,137 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#include +#include +#include "mm_hash.h" + +MM_Hash *mm_hash_new(MM *mm, MM_HashDtor dtor) +{ + MM_Hash *table; + + table = mm_calloc(mm, 1, sizeof(MM_Hash)); + table->mm = mm; + table->dtor = dtor; + + return table; +} + +void mm_hash_free(MM_Hash *table) +{ + MM_Bucket *cur; + MM_Bucket *next; + int i; + + for (i = 0; i < MM_HASH_SIZE; i++) { + cur = table->buckets[i]; + while (cur) { + next = cur->next; + + if (table->dtor) table->dtor(cur->data); + mm_free(table->mm, cur->key); + mm_free(table->mm, cur); + + cur = next; + } + } + mm_free(table->mm, table); +} + +static unsigned int hash_hash(const char *key, int length) +{ + unsigned int hash = 0; + + while (--length) + hash = hash * 65599 + *key++; + + return hash; +} + +void *mm_hash_find(MM_Hash *table, const void *key, int length) +{ + MM_Bucket *b; + unsigned int hash = hash_hash(key, length) % MM_HASH_SIZE; + + for (b = table->buckets[ hash ]; b; b = b->next) { + if (hash != b->hash) continue; + if (length != b->length) continue; + if (memcmp(key, b->key, length)) continue; + + return b->data; + } + + return NULL; +} + +void mm_hash_update(MM_Hash *table, char *key, int length, void *data) +{ + MM_Bucket *b, p; + unsigned int hash; + + hash = hash_hash(key, length) % MM_HASH_SIZE; + + for(b = table->buckets[ hash ]; b; b = b->next) { + if (hash != b->hash) continue; + if (length != b->length) continue; + if (memcmp(key, b->key, length)) continue; + if (table->dtor) table->dtor(b->data); + b->data = data; + } + if(!b) { + b = mm_malloc(table->mm, sizeof(MM_Bucket)); + b->key = mm_malloc(table->mm, length + 1); + memcpy(b->key, key, length); + b->key[length] = 0; + b->length = length; + b->hash = hash; + b->data = data; + b->next = table->buckets[ hash ]; + table->buckets[ hash ] = b; + } + table->nElements++; +} + +void mm_hash_delete(MM_Hash *table, char *key, int length) +{ + MM_Bucket *b; + MM_Bucket *prev = NULL; + unsigned int hash; + + hash = hash_hash(key, length) % MM_HASH_SIZE; + for (b = table->buckets[ hash ]; b; b = b->next) { + if (hash != b->hash || length != b->length || memcmp(key, b->key, length)) { + prev = b; + continue; + } + + /* unlink */ + if (prev) { + prev->next = b->next; + } else { + table->buckets[hash] = b->next; + } + + if (table->dtor) table->dtor(b->data); + mm_free(table->mm, b->key); + mm_free(table->mm, b); + + break; + } +} + +/* +Local variables: +tab-width: 4 +c-basic-offset: 4 +End: +vim600: noet sw=4 ts=4 fdm=marker +vim<600: noet sw=4 ts=4 +*/ diff --git a/db/mod_db4/mm_hash.h b/db/mod_db4/mm_hash.h new file mode 100644 index 000000000..d942d83b4 --- /dev/null +++ b/db/mod_db4/mm_hash.h @@ -0,0 +1,48 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#ifndef _MM_HASH_H +#define _MM_HASH_H +#include "mm.h" + +typedef void (*MM_HashDtor)(void *); + +typedef struct _MM_Bucket { + struct _MM_Bucket *next; + char *key; + int length; + unsigned int hash; + void *data; +} MM_Bucket; + +#define MM_HASH_SIZE 1009 +typedef struct _Hash { + MM_Bucket *buckets[ MM_HASH_SIZE ]; + MM *mm; + MM_HashDtor dtor; + int nElements; +} MM_Hash; + +MM_Hash *mm_hash_new(MM *, MM_HashDtor); +void mm_hash_free(MM_Hash *table); +void *mm_hash_find(MM_Hash *table, const void *key, int length); +void mm_hash_add(MM_Hash *table, char *key, int length, void *data); +void mm_hash_delete(MM_Hash *table, char *key, int length); +#endif + +/* +Local variables: +tab-width: 4 +c-basic-offset: 4 +End: +vim600: noet sw=4 ts=4 fdm=marker +vim<600: noet sw=4 ts=4 +*/ diff --git a/db/mod_db4/mod_db4.c b/db/mod_db4/mod_db4.c new file mode 100644 index 000000000..8b74d5fe4 --- /dev/null +++ b/db/mod_db4/mod_db4.c @@ -0,0 +1,129 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#include "httpd.h" +#include "http_config.h" +#include "http_core.h" +#include "http_log.h" +#include "http_main.h" +#include "http_protocol.h" +#include "scoreboard.h" +#include "util_script.h" + +#include "sem_utils.h" +#include "utils.h" + +extern scoreboard *ap_scoreboard_image; + +/* + * Declare ourselves so the configuration routines can find and know us. + * We'll fill it in at the end of the module. + */ + +module MODULE_VAR_EXPORT db4_module; + +void kill_all_children() +{ + int i, ret = 1; + ap_sync_scoreboard_image(); + for(;ret != 0;) { + ret = 0; + for (i = 0; i < HARD_SERVER_LIMIT; ++i) { + ret += kill(ap_scoreboard_image->parent[i].pid, SIGTERM); + } + } +} + +int moderator_main(void * ptr, child_info *ci) +{ + for(;;) { + env_wait_for_child_crash(); + kill_all_children(); + env_global_rw_lock(); + global_ref_count_clean(); + env_ok_to_proceed(); + env_global_unlock(); + } +} + +static void sig_unrecoverable(int sig) +{ + env_child_crash(); + /* reinstall default apache handler */ + signal(sig, SIG_DFL); + kill(getpid(), sig); +} + +static void db4_init(server_rec *s, pool *p) +{ + int mpid; + env_locks_init(); + mpid=ap_spawn_child(p, moderator_main, NULL, kill_always, NULL, NULL, NULL); +} + +/* + * Worker process init + */ + +static void db4_child_init(server_rec *s, pool *p) +{ + /* install our private signal handlers */ + signal(SIGSEGV, sig_unrecoverable); + signal(SIGBUS, sig_unrecoverable); + signal(SIGABRT, sig_unrecoverable); + signal(SIGILL, sig_unrecoverable); + env_rsrc_list_init(); +} + +/* + * Worker process exit + */ +static void db4_child_exit(server_rec *s, pool *p) +{ + child_clean_process_shutdown(); +} + +static const command_rec db4_cmds[] = +{ + {NULL} +}; + +module MODULE_VAR_EXPORT db4_module = +{ + STANDARD_MODULE_STUFF, + db4_init, /* module initializer */ + NULL, /* per-directory config creator */ + NULL, /* dir config merger */ + NULL, /* server config creator */ + NULL, /* server config merger */ + db4_cmds, /* command table */ + NULL, /* [9] list of handlers */ + NULL, /* [2] filename-to-URI translation */ + NULL, /* [5] check/validate user_id */ + NULL, /* [6] check user_id is valid *here* */ + NULL, /* [4] check access by host address */ + NULL, /* [7] MIME type checker/setter */ + NULL, /* [8] fixups */ + NULL, /* [10] logger */ +#if MODULE_MAGIC_NUMBER >= 19970103 + NULL, /* [3] header parser */ +#endif +#if MODULE_MAGIC_NUMBER >= 19970719 + db4_child_init, /* process initializer */ +#endif +#if MODULE_MAGIC_NUMBER >= 19970728 + db4_child_exit, /* process exit/cleanup */ +#endif +#if MODULE_MAGIC_NUMBER >= 19970902 + NULL /* [1] post read_request handling */ +#endif +}; +/* vim: set ts=4 sts=4 bs=2 ai expandtab : */ diff --git a/db/mod_db4/mod_db4_export.h b/db/mod_db4/mod_db4_export.h new file mode 100644 index 000000000..fa8c9fedb --- /dev/null +++ b/db/mod_db4/mod_db4_export.h @@ -0,0 +1,22 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#ifndef MOD_DB4_EXPORT_H +#define MOD_DB4_EXPORT_H + +#include "db.h" + +int mod_db4_db_env_create(DB_ENV **dbenvp, u_int32_t flags); +int mod_db4_db_create(DB **dbp, DB_ENV *dbenv, u_int32_t flags); +void mod_db4_child_clean_request_shutdown(); +void mod_db4_child_clean_process_shutdown(); + +#endif diff --git a/db/mod_db4/sem_utils.c b/db/mod_db4/sem_utils.c new file mode 100644 index 000000000..b01d37b1b --- /dev/null +++ b/db/mod_db4/sem_utils.c @@ -0,0 +1,116 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#include +#include +#include +#include +#include + +#include "config.h" +#include "sem_utils.h" + + +#include +extern int errno; + +#if HAVE_SEMUN +/* we have semun, no need to define */ +#else +union semun { + int val; /* value for SETVAL */ + struct semid_ds *buf; /* buffer for IPC_STAT, IPC_SET */ + unsigned short *array; /* array for GETALL, SETALL */ + /* Linux specific part: */ + struct seminfo *__buf; /* buffer for IPC_INFO */ +}; +#endif + +#ifndef SEM_R +# define SEM_R 0444 +#endif +#ifndef SEM_A +# define SEM_A 0222 +#endif + +int md4_sem_create(int semnum, unsigned short *start) +{ + int semid; + int perms = 0777; + union semun arg; + key_t key = 12345; + int count = 0; + while((semid = semget(key, semnum, IPC_CREAT | IPC_EXCL | perms)) == -1) { + if(count++ > 2) { + return -1; + } + if (errno == EEXIST) { + semid = semget(key, 1, perms); + md4_sem_destroy(semid); + } + } + arg.array = start; + if(semctl(semid, 0, SETALL, arg) < 0) { + /* destroy (FIXME) and return */ + md4_sem_destroy(semid); + return -1; + } + return semid; +} + +void md4_sem_destroy(int semid) +{ + union semun dummy; + /* semid should always be 0, this clobbers the whole set */ + semctl(semid, 0, IPC_RMID, dummy); +} + +void md4_sem_lock(int semid, int semnum) +{ + struct sembuf sops; + sops.sem_num = semnum; + sops.sem_op = -1; + sops.sem_flg = SEM_UNDO; + semop(semid, &sops, 1); +} + +void md4_sem_unlock(int semid, int semnum) +{ + struct sembuf sops; + sops.sem_num = semnum; + sops.sem_op = 1; + sops.sem_flg = SEM_UNDO; + semop(semid, &sops, 1); +} + +void md4_sem_wait_for_zero(int semid, int semnum) +{ + struct sembuf sops; + sops.sem_num = semnum; + sops.sem_op = 0; + sops.sem_flg = SEM_UNDO; + semop(semid, &sops, 1); +} + +void md4_sem_set(int semid, int semnum, int value) +{ + union semun arg; + arg.val = value; + semctl(semid, semnum, SETALL, arg); +} + +int md4_sem_get(int semid, int semnum) +{ + union semun arg; + return semctl(semid, 0, GETVAL, arg); +} + +/* vim: set ts=4 sts=4 expandtab bs=2 ai : */ diff --git a/db/mod_db4/sem_utils.h b/db/mod_db4/sem_utils.h new file mode 100644 index 000000000..7a3157e1f --- /dev/null +++ b/db/mod_db4/sem_utils.h @@ -0,0 +1,24 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#ifndef MOD_DB4_SEM_UTILS_H +#define MOD_DB4_SEM_UTILS_H + +extern int md4_sem_create(int semnum, unsigned short *start); +extern void md4_sem_destroy(int semid); +extern void md4_sem_lock(int semid, int semnum); +extern void md4_sem_unlock(int semid, int semnum); +extern void md4_sem_wait_for_zero(int semid, int semnum); +extern void md4_sem_set(int semid, int semnum, int value); +extern int md4_sem_get(int semid, int semnum); + +/* vim: set ts=4 sts=4 expandtab bs=2 ai : */ +#endif diff --git a/db/mod_db4/skiplist.c b/db/mod_db4/skiplist.c new file mode 100644 index 000000000..3919e86aa --- /dev/null +++ b/db/mod_db4/skiplist.c @@ -0,0 +1,503 @@ +/* ====================================================================== + * Copyright (c) 2000 Theo Schlossnagle + * All rights reserved. + * The following code was written by Theo Schlossnagle for use in the + * Backhand project at The Center for Networking and Distributed Systems + * at The Johns Hopkins University. + * + * This is a skiplist implementation to be used for abstract structures + * and is release under the LGPL license version 2.1 or later. A copy + * of this license can be found at http://www.gnu.org/copyleft/lesser.html + * ====================================================================== +*/ + +#include +#include +#include + +#include "skiplist.h" + +#ifdef USE_DMALLOC +# include +#endif + +#ifndef MIN +#define MIN(a,b) ((a 31) { /* Num bits in return of lrand48() */ + ph=0; + randseq = lrand48(); + } + ph++; + return ((randseq & (1 << (ph-1))) >> (ph-1)); +} + +void skiplisti_init(Skiplist *sl) { + sl->compare = (SkiplistComparator)NULL; + sl->comparek = (SkiplistComparator)NULL; + sl->height=0; + sl->preheight=0; + sl->size=0; + sl->top = NULL; + sl->bottom = NULL; + sl->index = NULL; +} + +static int indexing_comp(void *a, void *b) { + assert(a); + assert(b); + return (void *)(((Skiplist *)a)->compare)>(void *)(((Skiplist *)b)->compare); +} +static int indexing_compk(void *a, void *b) { + assert(b); + return a>(void *)(((Skiplist *)b)->compare); +} + +void skiplist_init(Skiplist *sl) { + skiplisti_init(sl); + sl->index = (Skiplist *)malloc(sizeof(Skiplist)); + skiplisti_init(sl->index); + skiplist_set_compare(sl->index, indexing_comp, indexing_compk); +} + +void skiplist_set_compare(Skiplist *sl, + SkiplistComparator comp, + SkiplistComparator compk) { + if(sl->compare && sl->comparek) { + skiplist_add_index(sl, comp, compk); + } else { + sl->compare = comp; + sl->comparek = compk; + } +} + +void skiplist_add_index(Skiplist *sl, + SkiplistComparator comp, + SkiplistComparator compk) { + struct skiplistnode *m; + Skiplist *ni; + int icount=0; +#ifdef SLDEBUG + fprintf(stderr, "Adding index to %p\n", sl); +#endif + skiplist_find(sl->index, (void *)comp, &m); + if(m) return; /* Index already there! */ + ni = (Skiplist *)malloc(sizeof(Skiplist)); + skiplisti_init(ni); + skiplist_set_compare(ni, comp, compk); + /* Build the new index... This can be expensive! */ + m = skiplist_insert(sl->index, ni); + while(m->prev) m=m->prev, icount++; + for(m=skiplist_getlist(sl); m; skiplist_next(sl, &m)) { + int j=icount-1; + struct skiplistnode *nsln; + nsln = skiplist_insert(ni, m->data); + /* skip from main index down list */ + while(j>0) m=m->nextindex, j--; + /* insert this node in the indexlist after m */ + nsln->nextindex = m->nextindex; + if(m->nextindex) m->nextindex->previndex = nsln; + nsln->previndex = m; + m->nextindex = nsln; + } +} + +struct skiplistnode *skiplist_getlist(Skiplist *sl) { + if(!sl->bottom) return NULL; + return sl->bottom->next; +} + +void *skiplist_find(Skiplist *sl, + void *data, + struct skiplistnode **iter) { + void *ret; + struct skiplistnode *aiter; + if(!sl->compare) return 0; + if(iter) + ret = skiplist_find_compare(sl, data, iter, sl->compare); + else + ret = skiplist_find_compare(sl, data, &aiter, sl->compare); + return ret; +} +void *skiplist_find_compare(Skiplist *sli, + void *data, + struct skiplistnode **iter, + SkiplistComparator comp) { + struct skiplistnode *m = NULL; + Skiplist *sl; + if(comp==sli->compare || !sli->index) { + sl = sli; + } else { + skiplist_find(sli->index, (void *)comp, &m); + assert(m); + sl=m->data; + } + skiplisti_find_compare(sl, data, iter, sl->comparek); + return (*iter)?((*iter)->data):(*iter); +} +int skiplisti_find_compare(Skiplist *sl, + void *data, + struct skiplistnode **ret, + SkiplistComparator comp) { + struct skiplistnode *m = NULL; + int count=0; + m = sl->top; + while(m) { + int compared; + if(m->next) compared=comp(data, m->next->data); + if(compared == 0) { +#ifdef SL_DEBUG + printf("Looking -- found in %d steps\n", count); +#endif + m=m->next; + while(m->down) m=m->down; + *ret = m; + return count; + } + if((m->next == NULL) || (compared<0)) + m = m->down, count++; + else + m = m->next, count++; + } +#ifdef SL_DEBUG + printf("Looking -- not found in %d steps\n", count); +#endif + *ret = NULL; + return count; +} +void *skiplist_next(Skiplist *sl, struct skiplistnode **iter) { + if(!*iter) return NULL; + *iter = (*iter)->next; + return (*iter)?((*iter)->data):NULL; +} +void *skiplist_previous(Skiplist *sl, struct skiplistnode **iter) { + if(!*iter) return NULL; + *iter = (*iter)->prev; + return (*iter)?((*iter)->data):NULL; +} +struct skiplistnode *skiplist_insert(Skiplist *sl, + void *data) { + if(!sl->compare) return 0; + return skiplist_insert_compare(sl, data, sl->compare); +} + +struct skiplistnode *skiplist_insert_compare(Skiplist *sl, + void *data, + SkiplistComparator comp) { + struct skiplistnode *m, *p, *tmp, *ret, **stack; + int nh=1, ch, stacki; +#ifdef SLDEBUG + skiplist_print_struct(sl, "BI: "); +#endif + if(!sl->top) { + sl->height = 1; + sl->topend = sl->bottomend = sl->top = sl->bottom = + (struct skiplistnode *)malloc(sizeof(struct skiplistnode)); + assert(sl->top); + sl->top->next = sl->top->data = sl->top->prev = + sl->top->up = sl->top->down = + sl->top->nextindex = sl->top->previndex = NULL; + sl->top->sl = sl; + } + if(sl->preheight) { + while(nh < sl->preheight && get_b_rand()) nh++; + } else { + while(nh <= sl->height && get_b_rand()) nh++; + } + /* Now we have the new height at which we wish to insert our new node */ + /* Let us make sure that our tree is a least that tall (grow if necessary)*/ + for(;sl->heightheight++) { + sl->top->up = + (struct skiplistnode *)malloc(sizeof(struct skiplistnode)); + assert(sl->top->up); + sl->top->up->down = sl->top; + sl->top = sl->topend = sl->top->up; + sl->top->prev = sl->top->next = sl->top->nextindex = + sl->top->previndex = sl->top->up = NULL; + sl->top->data = NULL; + sl->top->sl = sl; + } + ch = sl->height; + /* Find the node (or node after which we would insert) */ + /* Keep a stack to pop back through for insertion */ + m = sl->top; + stack = (struct skiplistnode **)malloc(sizeof(struct skiplistnode *)*(nh)); + stacki=0; + while(m) { + int compared=-1; + if(m->next) compared=comp(data, m->next->data); + if(compared == 0) { + free(stack); + return 0; + } + if((m->next == NULL) || (compared<0)) { + if(ch<=nh) { + /* push on stack */ + stack[stacki++] = m; + } + m = m->down; + ch--; + } else { + m = m->next; + } + } + /* Pop the stack and insert nodes */ + p = NULL; + for(;stacki>0;stacki--) { + m = stack[stacki-1]; + tmp = (struct skiplistnode *)malloc(sizeof(struct skiplistnode)); + tmp->next = m->next; + if(m->next) m->next->prev=tmp; + tmp->prev = m; + tmp->up = NULL; + tmp->nextindex = tmp->previndex = NULL; + tmp->down = p; + if(p) p->up=tmp; + tmp->data = data; + tmp->sl = sl; + m->next = tmp; + /* This sets ret to the bottom-most node we are inserting */ + if(!p) ret=tmp; + p = tmp; + } + free(stack); + if(sl->index != NULL) { + /* this is a external insertion, we must insert into each index as well */ + struct skiplistnode *p, *ni, *li; + li=ret; + for(p = skiplist_getlist(sl->index); p; skiplist_next(sl->index, &p)) { + ni = skiplist_insert((Skiplist *)p->data, ret->data); + assert(ni); +#ifdef SLDEBUG + fprintf(stderr, "Adding %p to index %p\n", ret->data, p->data); +#endif + li->nextindex = ni; + ni->previndex = li; + li = ni; + } + } else { + sl->size++; + } +#ifdef SLDEBUG + skiplist_print_struct(sl, "AI: "); +#endif + return ret; +} +struct skiplistnode *skiplist_append(Skiplist *sl, void *data) { + int nh=1, ch, compared; + struct skiplistnode *lastnode, *nodeago; + if(sl->bottomend != sl->bottom) { + compared=sl->compare(data, sl->bottomend->prev->data); + /* If it doesn't belong at the end, then fail */ + if(compared<=0) return NULL; + } + if(sl->preheight) { + while(nh < sl->preheight && get_b_rand()) nh++; + } else { + while(nh <= sl->height && get_b_rand()) nh++; + } + /* Now we have the new height at which we wish to insert our new node */ + /* Let us make sure that our tree is a least that tall (grow if necessary)*/ + lastnode = sl->bottomend; + nodeago = NULL; + + if(!lastnode) return skiplist_insert(sl, data); + + for(;sl->heightheight++) { + sl->top->up = + (struct skiplistnode *)malloc(sizeof(struct skiplistnode)); + assert(sl->top); + sl->top->up->down = sl->top; + sl->top = sl->top->up; + sl->top->prev = sl->top->next = sl->top->nextindex = + sl->top->previndex = NULL; + sl->top->data = NULL; + sl->top->sl = sl; + } + ch = sl->height; + while(nh) { + struct skiplistnode *anode; + anode = + (struct skiplistnode *)malloc(sizeof(struct skiplistnode)); + anode->next = lastnode; + anode->prev = lastnode->prev; + anode->up = NULL; + anode->down = nodeago; + if(lastnode->prev) { + if(lastnode == sl->bottom) + sl->bottom = anode; + else if (lastnode == sl->top) + sl->top = anode; + } + nodeago = anode; + lastnode = lastnode->up; + nh--; + } + sl->size++; + return sl->bottomend; +} +Skiplist *skiplist_concat(Skiplist *sl1, Skiplist *sl2) { + /* Check integrity! */ + int compared, eheight; + Skiplist temp; + struct skiplistnode *lbottom, *lbottomend, *b1, *e1, *b2, *e2; + if(sl1->bottomend == NULL || sl1->bottomend->prev == NULL) { + skiplist_remove_all(sl1, free); + temp = *sl1; + *sl1 = *sl2; + *sl2 = temp; + /* swap them so that sl2 can be freed normally upon return. */ + return sl1; + } + if(sl2->bottom == NULL || sl2->bottom->next == NULL) { + skiplist_remove_all(sl2, free); + return sl1; + } + compared = sl1->compare(sl1->bottomend->prev->data, sl2->bottom->data); + /* If it doesn't belong at the end, then fail */ + if(compared<=0) return NULL; + + /* OK now append sl2 onto sl1 */ + lbottom = lbottomend = NULL; + eheight = MIN(sl1->height, sl2->height); + b1 = sl1->bottom; e1 = sl1->bottomend; + b2 = sl2->bottom; e2 = sl2->bottomend; + while(eheight) { + e1->prev->next = b2; + b2->prev = e1->prev->next; + e2->prev->next = e1; + e1->prev = e2->prev; + e2->prev = NULL; + b2 = e2; + b1->down = lbottom; + e1->down = lbottomend; + if(lbottom) lbottom->up = b1; + if(lbottomend) lbottomend->up = e1; + + lbottom = b1; + lbottomend = e1; + } + /* Take the top of the longer one (if it is sl2) and make it sl1's */ + if(sl2->height > sl1->height) { + b1->up = b2->up; + e1->up = e2->up; + b1->up->down = b1; + e1->up->down = e1; + sl1->height = sl2->height; + sl1->top = sl2->top; + sl1->topend = sl2->topend; + } + + /* move the top pointer to here if it isn't there already */ + sl2->top = sl2->topend = b2; + sl2->top->up = NULL; /* If it isn't already */ + sl1->size += sl2->size; + skiplist_remove_all(sl2, free); + return sl1; +} +int skiplist_remove(Skiplist *sl, + void *data, FreeFunc myfree) { + if(!sl->compare) return 0; + return skiplist_remove_compare(sl, data, myfree, sl->comparek); +} +void skiplist_print_struct(Skiplist *sl, char *prefix) { + struct skiplistnode *p, *q; + fprintf(stderr, "Skiplist Structure (height: %d)\n", sl->height); + p = sl->bottom; + while(p) { + q = p; + fprintf(stderr, prefix); + while(q) { + fprintf(stderr, "%p ", q->data); + q=q->up; + } + fprintf(stderr, "\n"); + p=p->next; + } +} +int skiplisti_remove(Skiplist *sl, struct skiplistnode *m, FreeFunc myfree) { + struct skiplistnode *p; + if(!m) return 0; + if(m->nextindex) skiplisti_remove(m->nextindex->sl, m->nextindex, NULL); + else sl->size--; +#ifdef SLDEBUG + skiplist_print_struct(sl, "BR:"); +#endif + while(m->up) m=m->up; + while(m) { + p=m; + p->prev->next = p->next; /* take me out of the list */ + if(p->next) p->next->prev = p->prev; /* take me out of the list */ + m=m->down; + /* This only frees the actual data in the bottom one */ + if(!m && myfree && p->data) myfree(p->data); + free(p); + } + while(sl->top && sl->top->next == NULL) { + /* While the row is empty and we are not on the bottom row */ + p = sl->top; + sl->top = sl->top->down; /* Move top down one */ + if(sl->top) sl->top->up = NULL; /* Make it think its the top */ + free(p); + sl->height--; + } + if(!sl->top) sl->bottom = NULL; + assert(sl->height>=0); +#ifdef SLDEBUG + skiplist_print_struct(sl, "AR: "); +#endif + return sl->height; +} +int skiplist_remove_compare(Skiplist *sli, + void *data, + FreeFunc myfree, SkiplistComparator comp) { + struct skiplistnode *m; + Skiplist *sl; + if(comp==sli->comparek || !sli->index) { + sl = sli; + } else { + skiplist_find(sli->index, (void *)comp, &m); + assert(m); + sl=m->data; + } + skiplisti_find_compare(sl, data, &m, comp); + if(!m) return 0; + while(m->previndex) m=m->previndex; + return skiplisti_remove(sl, m, myfree); +} +void skiplist_remove_all(Skiplist *sl, FreeFunc myfree) { + /* This must remove even the place holder nodes (bottom though top) + because we specify in the API that one can free the Skiplist after + making this call without memory leaks */ + struct skiplistnode *m, *p, *u; + m=sl->bottom; + while(m) { + p = m->next; + if(myfree && p->data) myfree(p->data); + while(m) { + u = m->up; + free(m); + m=u; + } + m = p; + } + sl->top = sl->bottom = NULL; + sl->height = 0; + sl->size = 0; +} + +void *skiplist_pop(Skiplist * a, FreeFunc myfree) +{ + struct skiplistnode *sln; + void *data = NULL; + sln = skiplist_getlist(a); + if (sln) { + data = sln->data; + skiplisti_remove(a, sln, myfree); + } + return data; +} diff --git a/db/mod_db4/skiplist.h b/db/mod_db4/skiplist.h new file mode 100644 index 000000000..bf8c6e868 --- /dev/null +++ b/db/mod_db4/skiplist.h @@ -0,0 +1,84 @@ +/* ====================================================================== + * Copyright (c) 2000 Theo Schlossnagle + * All rights reserved. + * The following code was written by Theo Schlossnagle for use in the + * Backhand project at The Center for Networking and Distributed Systems + * at The Johns Hopkins University. + * + * This is a skiplist implementation to be used for abstract structures + * and is release under the LGPL license version 2.1 or later. A copy + * of this license can be found at http://www.gnu.org/copyleft/lesser.html + * ====================================================================== +*/ + +#ifndef _SKIPLIST_P_H +#define _SKIPLIST_P_H + +/* This is a skiplist implementation to be used for abstract structures + within the Spread multicast and group communication toolkit + + This portion written by -- Theo Schlossnagle +*/ + +/* This is the function type that must be implemented per object type + that is used in a skiplist for comparisons to maintain order */ +typedef int (*SkiplistComparator)(void *, void *); +typedef void (*FreeFunc)(void *); + +struct skiplistnode; + +typedef struct _iskiplist { + SkiplistComparator compare; + SkiplistComparator comparek; + int height; + int preheight; + int size; + struct skiplistnode *top; + struct skiplistnode *bottom; + /* These two are needed for appending */ + struct skiplistnode *topend; + struct skiplistnode *bottomend; + struct _iskiplist *index; +} Skiplist; + +struct skiplistnode { + void *data; + struct skiplistnode *next; + struct skiplistnode *prev; + struct skiplistnode *down; + struct skiplistnode *up; + struct skiplistnode *previndex; + struct skiplistnode *nextindex; + Skiplist *sl; +}; + + +void skiplist_init(Skiplist *sl); +void skiplist_set_compare(Skiplist *sl, SkiplistComparator, + SkiplistComparator); +void skiplist_add_index(Skiplist *sl, SkiplistComparator, + SkiplistComparator); +struct skiplistnode *skiplist_getlist(Skiplist *sl); +void *skiplist_find_compare(Skiplist *sl, void *data, struct skiplistnode **iter, + SkiplistComparator func); +void *skiplist_find(Skiplist *sl, void *data, struct skiplistnode **iter); +void *skiplist_next(Skiplist *sl, struct skiplistnode **); +void *skiplist_previous(Skiplist *sl, struct skiplistnode **); + +struct skiplistnode *skiplist_insert_compare(Skiplist *sl, + void *data, SkiplistComparator comp); +struct skiplistnode *skiplist_insert(Skiplist *sl, void *data); +int skiplist_remove_compare(Skiplist *sl, void *data, + FreeFunc myfree, SkiplistComparator comp); +int skiplist_remove(Skiplist *sl, void *data, FreeFunc myfree); +int skiplisti_remove(Skiplist *sl, struct skiplistnode *m, FreeFunc myfree); +void skiplist_remove_all(Skiplist *sl, FreeFunc myfree); + +int skiplisti_find_compare(Skiplist *sl, + void *data, + struct skiplistnode **ret, + SkiplistComparator comp); + +void *skiplist_pop(Skiplist * a, FreeFunc myfree); + +#endif diff --git a/db/mod_db4/utils.c b/db/mod_db4/utils.c new file mode 100644 index 000000000..1ee4bbdd7 --- /dev/null +++ b/db/mod_db4/utils.c @@ -0,0 +1,615 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#include "httpd.h" +#include "http_config.h" +#include "http_core.h" /* For REMOTE_NAME */ +#include "http_log.h" + +#include "sem_utils.h" +#include "skiplist.h" +#include "db.h" +#include "mm_hash.h" + +/* the semaphore set for the application */ +static int semset; + +/* process-local handle for global ref count management */ +/* individual semaphores */ +#define OK_TO_PROCEED 0 +#define GLOBAL_LOCK 1 +#define NUM_SEMS 2 + +/* mm helpers */ +static MM *mm; +static MM_Hash *ref_counts; + +/* locks */ +int env_locks_init() +{ + char shmpath[32]; + unsigned short start[2] = { 1, 1 }; + + snprintf(shmpath, 32, "/tmp/.mod_db4.%d", getpid()); + mm = mm_create(0, shmpath); + if(NULL == mm) { + return -1; + } + mm_lock(mm, MM_LOCK_RW); + ref_counts = mm_hash_new(mm, NULL); + mm_unlock(mm); + if((semset = md4_sem_create(NUM_SEMS, start)) < 0) { + return -1; + } + return 0; +} + +void env_global_rw_lock() +{ + mm_lock(mm, MM_LOCK_RW); +} + +void env_global_rd_lock() +{ + mm_lock(mm, MM_LOCK_RD); +} + +void env_global_unlock() +{ + mm_unlock(mm); +} + +void env_wait_for_child_crash() +{ + md4_sem_wait_for_zero(semset, OK_TO_PROCEED); +} + +void env_child_crash() +{ + md4_sem_set(semset, OK_TO_PROCEED, 0); +} + +void env_ok_to_proceed() +{ + md4_sem_set(semset, OK_TO_PROCEED, 1); +} + +/* process resource globals */ +static Skiplist open_transactions; +static Skiplist open_cursors; +static Skiplist open_log_cursors; +static Skiplist open_dbs; +static Skiplist open_dbenvs; + +/* named pointers for db_env and bd skiplists */ +struct named_resource { + char *name; + void *ptr; +}; + +/* skiplist comparitors for void pointers */ + +static int VP_COMPARE(void *a, void *b) +{ + return (a < b) ? (-1) : ((a == b) ? (0) : (1)); +} + +/* key for comparing DB *'s in skiplist */ + +struct db_key { + const char *fname; + const char *dname; +}; + +static int DB_COMPARE(void *a, void *b) +{ + int ret; + DB *ae = (DB *) a; + DB *be = (DB *) b; + if(ae->fname == NULL) { + if(be->fname == NULL) { + return (ae < be) ? (-1) : ((ae == be) ? (0) : (1)); + } + return 1; + } + else if(be->fname == NULL) { + /* ae->fname != NULL, from above */ + return -1; + } + ret = strcmp(ae->fname, be->fname); + if(ret == 0) { + if(ae->dname == NULL) { + if(be->dname == NULL) { + return 0; + } + return 1; + } + else if(be->dname == NULL) { + return -1; + } + ret = strcmp(ae->dname, be->dname); + } + return ret; +} + +static int DB_COMPARE_K(void *a, void *b) +{ + struct db_key *akey = (struct db_key *) a; + DB *be = (DB *) b; + int ret; + if(akey->fname == NULL) { + if(be->fname == NULL) { + /* should never match here */ + return (a < b) ? (-1) : ((a == b) ? (0) : (1)); + } + return 1; + } + else if(be->fname == NULL) { + /* akey->fname != NULL, from above */ + return -1; + } + ret = strcmp(akey->fname, be->fname); + if(ret == 0) { + if(akey->dname == NULL) { + if(be->dname == NULL) { + return 0; + } + return 1; + } + else if(be->dname == NULL) { + return -1; + } + ret = strcmp(akey->dname, be->dname); + } + return ret; +} + +static int DBENV_COMPARE(void *a, void *b) +{ + DB_ENV *ae = (DB_ENV *) a; + DB_ENV *be = (DB_ENV *) b; + return strcmp(ae->db_home, be->db_home); +} + +static int DBENV_COMPARE_K(void *a, void *b) +{ + const char *aname = (const char *) a; + DB_ENV *be = (DB_ENV *) b; + return strcmp(aname, be->db_home); +} + +void env_rsrc_list_init() +{ + skiplist_init(&open_transactions); + skiplist_set_compare(&open_transactions, VP_COMPARE, VP_COMPARE); + + skiplist_init(&open_cursors); + skiplist_set_compare(&open_cursors, VP_COMPARE, VP_COMPARE); + + skiplist_init(&open_log_cursors); + skiplist_set_compare(&open_log_cursors, VP_COMPARE, VP_COMPARE); + + skiplist_init(&open_dbs); + skiplist_set_compare(&open_dbs, DB_COMPARE, DB_COMPARE_K); + + skiplist_init(&open_dbenvs); + skiplist_set_compare(&open_dbenvs, DBENV_COMPARE, DBENV_COMPARE_K); +} + +static void register_cursor(DBC *dbc) +{ + skiplist_insert(&open_cursors, dbc); +} + +static void unregister_cursor(DBC *dbc) +{ + skiplist_remove(&open_cursors, dbc, NULL); +} + +static void register_log_cursor(DB_LOGC *cursor) +{ + skiplist_insert(&open_log_cursors, cursor); +} + +static void unregister_log_cursor(DB_LOGC *cursor) +{ + skiplist_remove(&open_log_cursors, cursor, NULL); +} + +static void register_transaction(DB_TXN *txn) +{ + skiplist_insert(&open_transactions, txn); +} + +static void unregister_transaction(DB_TXN *txn) +{ + skiplist_remove(&open_transactions, txn, NULL); +} + +static DB *retrieve_db(const char *fname, const char *dname) +{ + DB *rv; + struct db_key key; + if(fname == NULL) { + return NULL; + } + key.fname = fname; + key.dname = dname; + rv = skiplist_find(&open_dbs, (void *) &key, NULL); + return rv; +} + +static void register_db(DB *db) +{ + skiplist_insert(&open_dbs, db); +} + +static void unregister_db(DB *db) +{ + struct db_key key; + key.fname = db->fname; + key.dname = db->dname; + skiplist_remove(&open_dbs, &key, NULL); +} + +static DB_ENV *retrieve_db_env(const char *db_home) +{ + return skiplist_find(&open_dbenvs, (void *) db_home, NULL); +} + +static void register_db_env(DB_ENV *dbenv) +{ + global_ref_count_increase(dbenv->db_home); + skiplist_insert(&open_dbenvs, dbenv); +} + +static void unregister_db_env(DB_ENV *dbenv) +{ + global_ref_count_decrease(dbenv->db_home); + skiplist_remove(&open_dbenvs, dbenv->db_home, NULL); +} + +int global_ref_count_increase(char *path) +{ + int refcount = 0; + int pathlen = 0; + pathlen = strlen(path); + + env_global_rw_lock(); + refcount = (int) mm_hash_find(ref_counts, path, pathlen); + refcount++; + mm_hash_update(ref_counts, path, pathlen, (void *)refcount); + env_global_unlock(); + return refcount; +} + +int global_ref_count_decrease(char *path) +{ + int refcount = 0; + int pathlen = 0; + pathlen = strlen(path); + + env_global_rw_lock(); + refcount = (int) mm_hash_find(ref_counts, path, pathlen); + if(refcount > 0) refcount--; + mm_hash_update(ref_counts, path, pathlen, (void *)refcount); + env_global_unlock(); + return refcount; +} + +int global_ref_count_get(const char *path) +{ + int refcount = 0; + int pathlen = 0; + pathlen = strlen(path); + + env_global_rd_lock(); + refcount = (int) mm_hash_find(ref_counts, path, pathlen); + env_global_unlock(); + return refcount; +} + +void global_ref_count_clean() +{ + env_global_rd_lock(); + mm_hash_free(ref_counts); + ref_counts = mm_hash_new(mm, NULL); + env_global_unlock(); +} + +/* wrapper methods {{{ */ + +static int (*old_log_cursor_close)(DB_LOGC *, u_int32_t) = NULL; +static int new_log_cursor_close(DB_LOGC *cursor, u_int32_t flags) +{ + unregister_log_cursor(cursor); + return old_log_cursor_close(cursor, flags); +} + +static int (*old_db_txn_abort)(DB_TXN *) = NULL; +static int new_db_txn_abort(DB_TXN *tid) +{ + unregister_transaction(tid); + return old_db_txn_abort(tid); +} + +static int (*old_db_txn_commit)(DB_TXN *, u_int32_t) = NULL; +static int new_db_txn_commit(DB_TXN *tid, u_int32_t flags) +{ + unregister_transaction(tid); + return old_db_txn_commit(tid, flags); +} + +static int (*old_db_txn_discard)(DB_TXN *, u_int32_t) = NULL; +static int new_db_txn_discard(DB_TXN *tid, u_int32_t flags) +{ + unregister_transaction(tid); + return old_db_txn_discard(tid, flags); +} + +static int (*old_db_env_txn_begin)(DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t); +static int new_db_env_txn_begin(DB_ENV *env, DB_TXN *parent, DB_TXN **tid, u_int32_t flags) +{ + int ret; + if((ret = old_db_env_txn_begin(env, parent, tid, flags)) == 0) { + register_transaction(*tid); + /* overload DB_TXN->abort */ + if(old_db_txn_abort == NULL) { + old_db_txn_abort = (*tid)->abort; + } + (*tid)->abort = new_db_txn_abort; + + /* overload DB_TXN->commit */ + if(old_db_txn_commit == NULL) { + old_db_txn_commit = (*tid)->commit; + } + (*tid)->commit = new_db_txn_commit; + + /* overload DB_TXN->discard */ + if(old_db_txn_discard == NULL) { + old_db_txn_discard = (*tid)->discard; + } + (*tid)->discard = new_db_txn_discard; + } + return ret; +} + +static int (*old_db_env_open)(DB_ENV *, const char *, u_int32_t, int) = NULL; +static int new_db_env_open(DB_ENV *dbenv, const char *db_home, u_int32_t flags, int mode) +{ + int ret =666; + DB_ENV *cached_dbenv; + flags |= DB_INIT_MPOOL; + /* if global ref count is 0, open for recovery */ + if(global_ref_count_get(db_home) == 0) { + flags |= DB_RECOVER; + flags |= DB_INIT_TXN; + flags |= DB_CREATE; + } + if((cached_dbenv = retrieve_db_env(db_home)) != NULL) { + memcpy(dbenv, cached_dbenv, sizeof(DB_ENV)); + ret = 0; + } + else if((ret = old_db_env_open(dbenv, db_home, flags, mode)) == 0) { + register_db_env(dbenv); + } + return ret; +} + +static int(*old_db_env_close)(DB_ENV *, u_int32_t) = NULL; +static int new_db_env_close(DB_ENV *dbenv, u_int32_t flags) +{ + int ret; + /* we're already locked */ + unregister_db_env(dbenv); + ret = old_db_env_close(dbenv, flags); +} + +static int (*old_db_env_log_cursor)(DB_ENV *, DB_LOGC **, u_int32_t) = NULL; +static int new_db_env_log_cursor(DB_ENV *dbenv, DB_LOGC **cursop, u_int32_t flags) +{ + int ret; + if((ret = old_db_env_log_cursor(dbenv, cursop, flags)) == 0) { + register_log_cursor(*cursop); + if(old_log_cursor_close == NULL) { + old_log_cursor_close = (*cursop)->close; + } + (*cursop)->close = new_log_cursor_close; + } + return ret; +} + +static int (*old_db_open)(DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int) = NULL; +static int new_db_open(DB *db, DB_TXN *txnid, const char *file, + const char *database, DBTYPE type, u_int32_t flags, int mode) +{ + int ret; + DB *cached_db; + + cached_db = retrieve_db(file, database); + if(cached_db) { + memcpy(db, cached_db, sizeof(DB)); + ret = 0; + } + else if((ret = old_db_open(db, txnid, file, database, type, flags, mode)) == 0) { + register_db(db); + } + return ret; +} + +static int (*old_db_close)(DB *, u_int32_t) = NULL; +static int new_db_close(DB *db, u_int32_t flags) +{ + unregister_db(db); + ap_log_error(APLOG_MARK, APLOG_ERR, NULL, "closing DB* (%p)\n", db); + return old_db_close(db, flags); +} + + +static int (*old_dbc_close)(DBC *); +static int new_dbc_close(DBC *cursor) +{ + unregister_cursor(cursor); + return old_dbc_close(cursor); +} + +static int (*old_dbc_dup)(DBC *, DBC **, u_int32_t) = NULL; +static int new_dbc_dup(DBC *oldcursor, DBC **newcursor, u_int32_t flags) +{ + int ret; + if((ret = old_dbc_dup(oldcursor, newcursor, flags)) == 0) { + register_cursor(*newcursor); + + /* overload DBC->c_close */ + (*newcursor)->c_close = oldcursor->c_close; + + /* overload DBC->c_dup */ + (*newcursor)->c_dup = oldcursor->c_dup; + } + return ret; +} + +static int (*old_db_cursor)(DB *, DB_TXN *, DBC **, u_int32_t) = NULL; +static int new_db_cursor(DB *db, DB_TXN *txnid, DBC **cursop, u_int32_t flags) +{ + int ret; + if((ret = old_db_cursor(db, txnid, cursop, flags)) == 0) { + register_cursor(*cursop); + + /* overload DBC->c_close */ + if(old_dbc_close == NULL) { + old_dbc_close = (*cursop)->c_close; + } + (*cursop)->c_close = new_dbc_close; + + /* overload DBC->c_dup */ + if(old_dbc_dup == NULL) { + old_dbc_dup = (*cursop)->c_dup; + } + (*cursop)->c_dup = new_dbc_dup; + } + return ret; +} + +/* }}} */ + +/* {{{ new DB_ENV constructor + */ + +int mod_db4_db_env_create(DB_ENV **dbenvp, u_int32_t flags) +{ + int cachesize = 0; + int ret; + DB_ENV *dbenv; + + if ((ret = db_env_create(dbenvp, 0)) != 0) { + /* FIXME report error */ + + return ret; + } + dbenv = *dbenvp; + /* Here we set defaults settings for the db_env */ + /* grab context info from httpd.conf for error file */ + /* grab context info for cachesize */ + if (0 && cachesize) { + if(( ret = dbenv->set_cachesize(dbenv, 0, cachesize, 0)) != 0) { + dbenv->err(dbenv, ret, "set_cachesize"); + dbenv->close(dbenv, 0); + } + } + /* overload DB_ENV->open */ + if(old_db_env_open == NULL) { + old_db_env_open = dbenv->open; + } + dbenv->open = new_db_env_open; + + /* overload DB_ENV->close */ + if(old_db_env_close == NULL) { + old_db_env_close = dbenv->close; + } + dbenv->close = new_db_env_close; + + /* overload DB_ENV->log_cursor */ + if(old_db_env_log_cursor == NULL) { + old_db_env_log_cursor = dbenv->log_cursor; + } + dbenv->log_cursor = new_db_env_log_cursor; + + /* overload DB_ENV->txn_begin */ + if(old_db_env_txn_begin == NULL) { + old_db_env_txn_begin = dbenv->txn_begin; + } + dbenv->txn_begin = new_db_env_txn_begin; + return 0; +} +/* }}} */ + +/* {{{ new DB constructor + */ +int mod_db4_db_create(DB **dbp, DB_ENV *dbenv, u_int32_t flags) +{ + int ret; + +flags = 0; + + if((ret = db_create(dbp, dbenv, flags)) == 0) { + // FIXME this should be removed I think register_db(*dbp); + /* overload DB->open */ + if(old_db_open == NULL) { + old_db_open = (*dbp)->open; + } + (*dbp)->open = new_db_open; + + /* overload DB->close */ + if(old_db_close == NULL) { + old_db_close = (*dbp)->close; + } + (*dbp)->close = new_db_close; + + /* overload DB->cursor */ + if(old_db_cursor == NULL) { + old_db_cursor = (*dbp)->cursor; + } + (*dbp)->cursor = new_db_cursor; + } + return ret; +} + +/* }}} */ + +void child_clean_request_shutdown() +{ + DBC *cursor; + DB_TXN *transaction; + while(cursor = (DBC *)skiplist_pop(&open_cursors, NULL)) { + cursor->c_close(cursor); + } + while(transaction = (DB_TXN *)skiplist_pop(&open_transactions, NULL)) { + transaction->abort(transaction); + } +} + +void child_clean_process_shutdown() +{ + DB *db; + DB_ENV *dbenv; + child_clean_request_shutdown(); + while(db = (DB *)skiplist_pop(&open_dbs, NULL)) { + ap_log_error(APLOG_MARK, APLOG_ERR, NULL, "calling close on %x\n", db); + db->close(db, 0); + /* destroy db FIXME */ + } + while(dbenv = (DB_ENV *)skiplist_pop(&open_dbenvs, NULL)) { + global_ref_count_decrease(dbenv->db_home); + dbenv->close(dbenv, 0); + } +} +/* vim: set ts=4 sts=4 expandtab bs=2 ai fdm=marker: */ diff --git a/db/mod_db4/utils.h b/db/mod_db4/utils.h new file mode 100644 index 000000000..9fe70a0fa --- /dev/null +++ b/db/mod_db4/utils.h @@ -0,0 +1,35 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#ifndef DB4_UTILS_H +#define DB4_UTILS_H + +#include "db.h" +#include "mod_db4_export.h" + +/* locks */ +int env_locks_init(); +void env_global_rw_lock(); +void env_global_rd_lock(); +void env_global_unlock(); +void env_wait_for_child_crash(); +void env_child_crash(); +void env_ok_to_proceed(); + +void env_rsrc_list_init(); + +int global_ref_count_increase(char *path); +int global_ref_count_decrease(char *path); +int global_ref_count_get(const char *path); +void global_ref_count_clean(); + +#endif +/* vim: set ts=4 sts=4 expandtab bs=2 ai fdm=marker: */ diff --git a/db/mp/mp_alloc.c b/db/mp/mp_alloc.c index bc9e234f8..ff02d64e3 100644 --- a/db/mp/mp_alloc.c +++ b/db/mp/mp_alloc.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_alloc.c,v 11.46 2004/09/15 21:49:19 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_alloc.c,v 11.40 2003/07/03 02:24:34 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -19,11 +18,6 @@ static const char revid[] = "$Id: mp_alloc.c,v 11.40 2003/07/03 02:24:34 bostic #include "dbinc/db_shash.h" #include "dbinc/mp.h" -typedef struct { - DB_MPOOL_HASH *bucket; - u_int32_t priority; -} HS; - static void __memp_bad_buffer __P((DB_MPOOL_HASH *)); /* @@ -56,7 +50,7 @@ __memp_alloc(dbmp, memreg, mfp, len, offsetp, retp) dbenv = dbmp->dbenv; c_mp = memreg->primary; - dbht = R_ADDR(memreg, c_mp->htab); + dbht = R_ADDR(dbenv, memreg, c_mp->htab); hp_end = &dbht[c_mp->htab_buckets]; buckets = buffers = put_counter = total_buckets = 0; @@ -91,13 +85,13 @@ __memp_alloc(dbmp, memreg, mfp, len, offsetp, retp) * we need in the hopes it will coalesce into a contiguous chunk of the * right size. In the latter case we branch back here and try again. */ -alloc: if ((ret = __db_shalloc(memreg->addr, len, MUTEX_ALIGN, &p)) == 0) { +alloc: if ((ret = __db_shalloc(memreg, len, MUTEX_ALIGN, &p)) == 0) { if (mfp != NULL) c_mp->stat.st_pages++; R_UNLOCK(dbenv, memreg); found: if (offsetp != NULL) - *offsetp = R_OFFSET(memreg, p); + *offsetp = R_OFFSET(dbenv, memreg, p); *(void **)retp = p; /* @@ -208,7 +202,7 @@ found: if (offsetp != NULL) (void)__memp_sync_int( dbenv, NULL, 0, DB_SYNC_ALLOC, NULL); - (void)__os_sleep(dbenv, 1, 0); + __os_sleep(dbenv, 1, 0); break; default: aggressive = 1; @@ -268,7 +262,7 @@ found: if (offsetp != NULL) buffers++; /* Find the associated MPOOLFILE. */ - bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset); + bh_mfp = R_ADDR(dbenv, dbmp->reginfo, bhp->mf_offset); /* If the page is dirty, pin it and write it. */ ret = 0; @@ -311,8 +305,8 @@ found: if (offsetp != NULL) goto found; } - freed_space += __db_shsizeof(bhp); - __memp_bhfree(dbmp, hp, bhp, 1); + freed_space += __db_shalloc_sizeof(bhp); + __memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM); if (aggressive > 1) aggressive = 1; diff --git a/db/mp/mp_bh.c b/db/mp/mp_bh.c index 2069de014..44296c078 100644 --- a/db/mp/mp_bh.c +++ b/db/mp/mp_bh.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_bh.c,v 11.98 2004/09/17 22:00:31 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_bh.c,v 11.86 2003/07/02 20:02:37 mjc Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -147,24 +146,32 @@ __memp_bhwrite(dbmp, hp, mfp, bhp, open_extents) } /* - * Try and open the file, attaching to the underlying shared area. - * Ignore any error, assume it's a permissions problem. + * Try and open the file, specifying the known underlying shared area. * - * XXX + * !!! * There's no negative cache, so we may repeatedly try and open files * that we have previously tried (and failed) to open. */ if ((ret = __memp_fcreate(dbenv, &dbmfp)) != 0) return (ret); - if ((ret = __memp_fopen(dbmfp, mfp, - R_ADDR(dbmp->reginfo, mfp->path_off), - 0, 0, mfp->stat.st_pagesize)) != 0) { + if ((ret = __memp_fopen(dbmfp, + mfp, NULL, DB_DURABLE_UNKNOWN, 0, mfp->stat.st_pagesize)) != 0) { (void)__memp_fclose(dbmfp, 0); - return (ret); + + /* + * Ignore any error if the file is marked dead, assume the file + * was removed from under us. + */ + if (!mfp->deadfile) + return (ret); + + dbmfp = NULL; } pgwrite: ret = __memp_pgwrite(dbenv, dbmfp, hp, bhp); + if (dbmfp == NULL) + return (ret); /* * Discard our reference, and, if we're the last reference, make sure @@ -195,7 +202,8 @@ __memp_pgread(dbmfp, mutexp, bhp, can_create) { DB_ENV *dbenv; MPOOLFILE *mfp; - size_t len, nr, pagesize; + size_t len, nr; + u_int32_t pagesize; int ret; dbenv = dbmfp->dbenv; @@ -324,7 +332,8 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp) * If the page is in a file for which we have LSN information, we have * to ensure the appropriate log records are on disk. */ - if (LOGGING_ON(dbenv) && mfp->lsn_off != -1) { + if (LOGGING_ON(dbenv) && mfp->lsn_off != -1 && + !IS_CLIENT_PGRECOVER(dbenv)) { memcpy(&lsn, bhp->buf + mfp->lsn_off, sizeof(DB_LSN)); if ((ret = __log_flush(dbenv, &lsn)) != 0) goto err; @@ -335,7 +344,7 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp) * Verify write-ahead logging semantics. * * !!! - * One special case. There is a single field on the meta-data page, + * Two special cases. There is a single field on the meta-data page, * the last-page-number-in-the-file field, for which we do not log * changes. If the page was originally created in a database that * didn't have logging turned on, we can see a page marked dirty but @@ -344,8 +353,13 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp) * previous log record and valid LSN is when the page was created * without logging turned on, and so we check for that special-case * LSN value. + * + * Second, when a client is reading database pages from a master + * during an internal backup, we may get pages modified after + * the current end-of-log. */ - if (LOGGING_ON(dbenv) && !IS_NOT_LOGGED_LSN(LSN(bhp->buf))) { + if (LOGGING_ON(dbenv) && !IS_NOT_LOGGED_LSN(LSN(bhp->buf)) && + !IS_CLIENT_PGRECOVER(dbenv)) { /* * There is a potential race here. If we are in the midst of * switching log files, it's possible we could test against the @@ -358,8 +372,10 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp) dblp = dbenv->lg_handle; lp = dblp->reginfo.primary; - if (log_compare(&lp->s_lsn, &LSN(bhp->buf)) <= 0) { - mtx = R_ADDR(&dblp->reginfo, lp->flush_mutex_off); + if (!lp->db_log_inmemory && + log_compare(&lp->s_lsn, &LSN(bhp->buf)) <= 0) { + mtx = R_ADDR(dbenv, + &dblp->reginfo, lp->flush_mutex_off); MUTEX_LOCK(dbenv, mtx); DB_ASSERT(log_compare(&lp->s_lsn, &LSN(bhp->buf)) > 0); MUTEX_UNLOCK(dbenv, mtx); @@ -385,8 +401,6 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp) __memp_fn(dbmfp), (u_long)bhp->pgno); goto err; } - - mfp->file_written = 1; ++mfp->stat.st_page_out; err: @@ -459,8 +473,9 @@ __memp_pg(dbmfp, bhp, is_pgin) if (mfp->pgcookie_len == 0) dbtp = NULL; else { - dbt.size = mfp->pgcookie_len; - dbt.data = R_ADDR(dbmp->reginfo, mfp->pgcookie_off); + dbt.size = (u_int32_t)mfp->pgcookie_len; + dbt.data = R_ADDR(dbenv, + dbmp->reginfo, mfp->pgcookie_off); dbtp = &dbt; } MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp); @@ -493,14 +508,15 @@ err: MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp); * __memp_bhfree -- * Free a bucket header and its referenced data. * - * PUBLIC: void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, int)); + * PUBLIC: void __memp_bhfree + * PUBLIC: __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, u_int32_t)); */ void -__memp_bhfree(dbmp, hp, bhp, free_mem) +__memp_bhfree(dbmp, hp, bhp, flags) DB_MPOOL *dbmp; DB_MPOOL_HASH *hp; BH *bhp; - int free_mem; + u_int32_t flags; { DB_ENV *dbenv; MPOOL *c_mp, *mp; @@ -528,13 +544,14 @@ __memp_bhfree(dbmp, hp, bhp, free_mem) * Discard the hash bucket's mutex, it's no longer needed, and * we don't want to be holding it when acquiring other locks. */ - MUTEX_UNLOCK(dbenv, &hp->hash_mutex); + if (!LF_ISSET(BH_FREE_UNLOCKED)) + MUTEX_UNLOCK(dbenv, &hp->hash_mutex); /* * Find the underlying MPOOLFILE and decrement its reference count. * If this is its last reference, remove it. */ - mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset); + mfp = R_ADDR(dbenv, dbmp->reginfo, bhp->mf_offset); MUTEX_LOCK(dbenv, &mfp->mutex); if (--mfp->block_cnt == 0 && mfp->mpf_cnt == 0) (void)__memp_mf_discard(dbmp, mfp); @@ -548,14 +565,14 @@ __memp_bhfree(dbmp, hp, bhp, free_mem) * be held. */ __db_shlocks_clear(&bhp->mutex, &dbmp->reginfo[n_cache], - (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], mp->maint_off)); + (REGMAINT *)R_ADDR(dbenv, &dbmp->reginfo[n_cache], mp->maint_off)); /* * If we're not reusing the buffer immediately, free the buffer header * and data for real. */ - if (free_mem) { - __db_shalloc_free(dbmp->reginfo[n_cache].addr, bhp); + if (LF_ISSET(BH_FREE_FREEMEM)) { + __db_shalloc_free(&dbmp->reginfo[n_cache], bhp); c_mp = dbmp->reginfo[n_cache].primary; c_mp->stat.st_pages--; } diff --git a/db/mp/mp_fget.c b/db/mp/mp_fget.c index 1df3f3bfa..1c332808a 100644 --- a/db/mp/mp_fget.c +++ b/db/mp/mp_fget.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_fget.c,v 11.95 2004/09/15 21:49:19 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_fget.c,v 11.81 2003/09/25 02:15:16 sue Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -76,9 +75,9 @@ __memp_fget_pp(dbmfp, pgnoaddr, flags, addrp) __op_rep_enter(dbenv); ret = __memp_fget(dbmfp, pgnoaddr, flags, addrp); /* - * We only decrement the count in op_rep_exit if the operattion fails. - * Otherwise the count will be decremeneted when the page - * is no longer pinned in memp_fput. + * We only decrement the count in op_rep_exit if the operation fails. + * Otherwise the count will be decremented when the page is no longer + * pinned in memp_fput. */ if (ret != 0 && rep_check) __op_rep_exit(dbenv); @@ -118,7 +117,7 @@ __memp_fget(dbmfp, pgnoaddr, flags, addrp) c_mp = NULL; mp = dbmp->reginfo[0].primary; mfp = dbmfp->mfp; - mf_offset = R_OFFSET(dbmp->reginfo, mfp); + mf_offset = R_OFFSET(dbenv, dbmp->reginfo, mfp); alloc_bhp = bhp = NULL; hp = NULL; b_incr = extending = ret = 0; @@ -126,20 +125,16 @@ __memp_fget(dbmfp, pgnoaddr, flags, addrp) switch (flags) { case DB_MPOOL_LAST: /* Get the last page number in the file. */ - if (flags == DB_MPOOL_LAST) { - R_LOCK(dbenv, dbmp->reginfo); - *pgnoaddr = mfp->last_pgno; - R_UNLOCK(dbenv, dbmp->reginfo); - } + R_LOCK(dbenv, dbmp->reginfo); + *pgnoaddr = mfp->last_pgno; + R_UNLOCK(dbenv, dbmp->reginfo); break; case DB_MPOOL_NEW: /* * If always creating a page, skip the first search * of the hash bucket. */ - if (flags == DB_MPOOL_NEW) - goto alloc; - break; + goto alloc; case DB_MPOOL_CREATE: default: break; @@ -147,7 +142,9 @@ __memp_fget(dbmfp, pgnoaddr, flags, addrp) /* * If mmap'ing the file and the page is not past the end of the file, - * just return a pointer. + * just return a pointer. We can't use R_ADDR here: this is an offset + * into an mmap'd file, not a shared region, and doesn't change for + * private environments. * * The page may be past the end of the file, so check the page number * argument against the original length of the file. If we previously @@ -167,8 +164,8 @@ __memp_fget(dbmfp, pgnoaddr, flags, addrp) */ if (dbmfp->addr != NULL && F_ISSET(mfp, MP_CAN_MMAP) && *pgnoaddr <= mfp->orig_last_pgno) { - *(void **)addrp = - R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize); + *(void **)addrp = (u_int8_t *)dbmfp->addr + + (*pgnoaddr * mfp->stat.st_pagesize); ++mfp->stat.st_map; return (0); } @@ -181,7 +178,7 @@ hb_search: */ n_cache = NCACHE(mp, mf_offset, *pgnoaddr); c_mp = dbmp->reginfo[n_cache].primary; - hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab); + hp = R_ADDR(dbenv, &dbmp->reginfo[n_cache], c_mp->htab); hp = &hp[NBUCKET(c_mp, mf_offset, *pgnoaddr)]; /* Search the hash chain for the page. */ @@ -199,12 +196,13 @@ retry: st_hsearch = 0; * need to ensure it doesn't move and its contents remain * unchanged. */ - if (bhp->ref == UINT16_T_MAX) { + if (bhp->ref == UINT16_MAX) { + MUTEX_UNLOCK(dbenv, &hp->hash_mutex); + __db_err(dbenv, "%s: page %lu: reference count overflow", __memp_fn(dbmfp), (u_long)bhp->pgno); - ret = EINVAL; - MUTEX_UNLOCK(dbenv, &hp->hash_mutex); + ret = __db_panic(dbenv, EINVAL); goto err; } ++bhp->ref; @@ -289,6 +287,23 @@ retry: st_hsearch = 0; (alloc_bhp == NULL ? FIRST_FOUND : SECOND_FOUND); switch (state) { case FIRST_FOUND: + /* + * If we are to free the buffer, then this had better + * be the only reference. If so, just free the buffer. + * If not, complain and get out. + */ + if (flags == DB_MPOOL_FREE) { + if (bhp->ref == 1) { + __memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM); + return (0); + } + __db_err(dbenv, + "File %s: freeing pinned buffer for page %lu", + __memp_fns(dbmp, mfp), (u_long)*pgnoaddr); + ret = __db_panic(dbenv, EINVAL); + goto err; + } + /* We found the buffer in our first check -- we're done. */ break; case FIRST_MISS: @@ -299,6 +314,12 @@ retry: st_hsearch = 0; */ MUTEX_UNLOCK(dbenv, &hp->hash_mutex); + /* + * The buffer is not in the pool, so we don't need to free it. + */ + if (flags == DB_MPOOL_FREE) + return (0); + alloc: /* * If DB_MPOOL_NEW is set, we have to allocate a page number. * If neither DB_MPOOL_CREATE or DB_MPOOL_CREATE is set, then @@ -340,7 +361,7 @@ alloc: /* * In the DB_MPOOL_NEW code path, mf_offset and n_cache have * not yet been initialized. */ - mf_offset = R_OFFSET(dbmp->reginfo, mfp); + mf_offset = R_OFFSET(dbenv, dbmp->reginfo, mfp); n_cache = NCACHE(mp, mf_offset, *pgnoaddr); c_mp = dbmp->reginfo[n_cache].primary; @@ -349,10 +370,10 @@ alloc: /* &dbmp->reginfo[n_cache], mfp, 0, NULL, &alloc_bhp)) != 0) goto err; #ifdef DIAGNOSTIC - if ((db_alignp_t)alloc_bhp->buf & (sizeof(size_t) - 1)) { + if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) { __db_err(dbenv, "DB_MPOOLFILE->get: buffer data is NOT size_t aligned"); - ret = EINVAL; + ret = __db_panic(dbenv, EINVAL); goto err; } #endif @@ -399,7 +420,7 @@ alloc: /* R_LOCK(dbenv, &dbmp->reginfo[n_cache]); __db_shalloc_free( - dbmp->reginfo[n_cache].addr, alloc_bhp); + &dbmp->reginfo[n_cache], alloc_bhp); c_mp->stat.st_pages--; R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]); @@ -436,11 +457,10 @@ alloc: /* */ MUTEX_UNLOCK(dbenv, &hp->hash_mutex); R_LOCK(dbenv, &dbmp->reginfo[n_cache]); - __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp); + __db_shalloc_free(&dbmp->reginfo[n_cache], alloc_bhp); c_mp->stat.st_pages--; alloc_bhp = NULL; R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]); - MUTEX_LOCK(dbenv, &hp->hash_mutex); /* * We can't use the page we found in the pool if DB_MPOOL_NEW @@ -455,6 +475,9 @@ alloc: /* b_incr = 0; goto alloc; } + + /* We can use the page -- get the bucket lock. */ + MUTEX_LOCK(dbenv, &hp->hash_mutex); break; case SECOND_MISS: /* @@ -474,9 +497,10 @@ alloc: /* */ b_incr = 1; + /*lint --e{668} (flexelint: bhp cannot be NULL). */ memset(bhp, 0, sizeof(BH)); bhp->ref = 1; - bhp->priority = UINT32_T_MAX; + bhp->priority = UINT32_MAX; bhp->pgno = *pgnoaddr; bhp->mf_offset = mf_offset; SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq); @@ -552,7 +576,7 @@ alloc: /* * the buffer, so there is no need to do it again.) */ if (state != SECOND_MISS && bhp->ref == 1) { - bhp->priority = UINT32_T_MAX; + bhp->priority = UINT32_MAX; SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh); SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq); hp->hash_priority = @@ -614,7 +638,7 @@ err: /* */ if (b_incr) { if (bhp->ref == 1) - (void)__memp_bhfree(dbmp, hp, bhp, 1); + __memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM); else { --bhp->ref; MUTEX_UNLOCK(dbenv, &hp->hash_mutex); @@ -624,7 +648,7 @@ err: /* /* If alloc_bhp is set, free the memory. */ if (alloc_bhp != NULL) { R_LOCK(dbenv, &dbmp->reginfo[n_cache]); - __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp); + __db_shalloc_free(&dbmp->reginfo[n_cache], alloc_bhp); c_mp->stat.st_pages--; R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]); } diff --git a/db/mp/mp_fmethod.c b/db/mp/mp_fmethod.c new file mode 100644 index 000000000..68bc71dde --- /dev/null +++ b/db/mp/mp_fmethod.c @@ -0,0 +1,599 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: mp_fmethod.c,v 11.141 2004/09/24 00:43:19 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#ifdef HAVE_RPC +#include +#endif + +#include +#endif + +#ifdef HAVE_RPC +#include "db_server.h" +#endif + +#include "db_int.h" +#include "dbinc/db_shash.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" + +#ifdef HAVE_RPC +#include "dbinc_auto/rpc_client_ext.h" +#endif + +static int __memp_get_clear_len __P((DB_MPOOLFILE *, u_int32_t *)); +static int __memp_get_lsn_offset __P((DB_MPOOLFILE *, int32_t *)); +static int __memp_get_maxsize __P((DB_MPOOLFILE *, u_int32_t *, u_int32_t *)); +static int __memp_set_maxsize __P((DB_MPOOLFILE *, u_int32_t, u_int32_t)); +static int __memp_get_pgcookie __P((DB_MPOOLFILE *, DBT *)); +static int __memp_get_priority __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY *)); +static int __memp_set_priority __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY)); + +/* + * __memp_fcreate_pp -- + * DB_ENV->memp_fcreate pre/post processing. + * + * PUBLIC: int __memp_fcreate_pp __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); + */ +int +__memp_fcreate_pp(dbenv, retp, flags) + DB_ENV *dbenv; + DB_MPOOLFILE **retp; + u_int32_t flags; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + + /* Validate arguments. */ + if ((ret = __db_fchk(dbenv, "DB_ENV->memp_fcreate", flags, 0)) != 0) + return (ret); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __memp_fcreate(dbenv, retp); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +/* + * __memp_fcreate -- + * DB_ENV->memp_fcreate. + * + * PUBLIC: int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **)); + */ +int +__memp_fcreate(dbenv, retp) + DB_ENV *dbenv; + DB_MPOOLFILE **retp; +{ + DB_MPOOLFILE *dbmfp; + int ret; + + /* Allocate and initialize the per-process structure. */ + if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MPOOLFILE), &dbmfp)) != 0) + return (ret); + + dbmfp->ref = 1; + dbmfp->lsn_offset = -1; + dbmfp->dbenv = dbenv; + dbmfp->mfp = INVALID_ROFF; + +#ifdef HAVE_RPC + if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) { + dbmfp->get_clear_len = __dbcl_memp_get_clear_len; + dbmfp->set_clear_len = __dbcl_memp_set_clear_len; + dbmfp->get_fileid = __dbcl_memp_get_fileid; + dbmfp->set_fileid = __dbcl_memp_set_fileid; + dbmfp->get_flags = __dbcl_memp_get_flags; + dbmfp->set_flags = __dbcl_memp_set_flags; + dbmfp->get_ftype = __dbcl_memp_get_ftype; + dbmfp->set_ftype = __dbcl_memp_set_ftype; + dbmfp->get_lsn_offset = __dbcl_memp_get_lsn_offset; + dbmfp->set_lsn_offset = __dbcl_memp_set_lsn_offset; + dbmfp->get_maxsize = __dbcl_memp_get_maxsize; + dbmfp->set_maxsize = __dbcl_memp_set_maxsize; + dbmfp->get_pgcookie = __dbcl_memp_get_pgcookie; + dbmfp->set_pgcookie = __dbcl_memp_set_pgcookie; + dbmfp->get_priority = __dbcl_memp_get_priority; + dbmfp->set_priority = __dbcl_memp_set_priority; + + dbmfp->get = __dbcl_memp_fget; + dbmfp->open = __dbcl_memp_fopen; + dbmfp->put = __dbcl_memp_fput; + dbmfp->set = __dbcl_memp_fset; + dbmfp->sync = __dbcl_memp_fsync; + } else +#endif + { + dbmfp->get_clear_len = __memp_get_clear_len; + dbmfp->set_clear_len = __memp_set_clear_len; + dbmfp->get_fileid = __memp_get_fileid; + dbmfp->set_fileid = __memp_set_fileid; + dbmfp->get_flags = __memp_get_flags; + dbmfp->set_flags = __memp_set_flags; + dbmfp->get_ftype = __memp_get_ftype; + dbmfp->set_ftype = __memp_set_ftype; + dbmfp->get_lsn_offset = __memp_get_lsn_offset; + dbmfp->set_lsn_offset = __memp_set_lsn_offset; + dbmfp->get_maxsize = __memp_get_maxsize; + dbmfp->set_maxsize = __memp_set_maxsize; + dbmfp->get_pgcookie = __memp_get_pgcookie; + dbmfp->set_pgcookie = __memp_set_pgcookie; + dbmfp->get_priority = __memp_get_priority; + dbmfp->set_priority = __memp_set_priority; + + dbmfp->get = __memp_fget_pp; + dbmfp->open = __memp_fopen_pp; + dbmfp->put = __memp_fput_pp; + dbmfp->set = __memp_fset_pp; + dbmfp->sync = __memp_fsync_pp; + } + dbmfp->close = __memp_fclose_pp; + + *retp = dbmfp; + return (0); +} + +/* + * __memp_get_clear_len -- + * Get the clear length. + */ +static int +__memp_get_clear_len(dbmfp, clear_lenp) + DB_MPOOLFILE *dbmfp; + u_int32_t *clear_lenp; +{ + *clear_lenp = dbmfp->clear_len; + return (0); +} + +/* + * __memp_set_clear_len -- + * DB_MPOOLFILE->set_clear_len. + * + * PUBLIC: int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t)); + */ +int +__memp_set_clear_len(dbmfp, clear_len) + DB_MPOOLFILE *dbmfp; + u_int32_t clear_len; +{ + MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_clear_len"); + + dbmfp->clear_len = clear_len; + return (0); +} + +/* + * __memp_get_fileid -- + * DB_MPOOLFILE->get_fileid. + * + * PUBLIC: int __memp_get_fileid __P((DB_MPOOLFILE *, u_int8_t *)); + */ +int +__memp_get_fileid(dbmfp, fileid) + DB_MPOOLFILE *dbmfp; + u_int8_t *fileid; +{ + if (!F_ISSET(dbmfp, MP_FILEID_SET)) { + __db_err(dbmfp->dbenv, "get_fileid: file ID not set"); + return (EINVAL); + } + + memcpy(fileid, dbmfp->fileid, DB_FILE_ID_LEN); + return (0); +} + +/* + * __memp_set_fileid -- + * DB_MPOOLFILE->set_fileid. + * + * PUBLIC: int __memp_set_fileid __P((DB_MPOOLFILE *, u_int8_t *)); + */ +int +__memp_set_fileid(dbmfp, fileid) + DB_MPOOLFILE *dbmfp; + u_int8_t *fileid; +{ + MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_fileid"); + + memcpy(dbmfp->fileid, fileid, DB_FILE_ID_LEN); + F_SET(dbmfp, MP_FILEID_SET); + + return (0); +} + +/* + * __memp_get_flags -- + * Get the DB_MPOOLFILE flags; + * + * PUBLIC: int __memp_get_flags __P((DB_MPOOLFILE *, u_int32_t *)); + */ +int +__memp_get_flags(dbmfp, flagsp) + DB_MPOOLFILE *dbmfp; + u_int32_t *flagsp; +{ + MPOOLFILE *mfp; + + mfp = dbmfp->mfp; + + *flagsp = 0; + + if (mfp == NULL) + *flagsp = FLD_ISSET(dbmfp->config_flags, + DB_MPOOL_NOFILE | DB_MPOOL_UNLINK); + else { + if (mfp->no_backing_file) + FLD_SET(*flagsp, DB_MPOOL_NOFILE); + if (mfp->unlink_on_close) + FLD_SET(*flagsp, DB_MPOOL_UNLINK); + } + return (0); +} + +/* + * __memp_set_flags -- + * Set the DB_MPOOLFILE flags; + * + * PUBLIC: int __memp_set_flags __P((DB_MPOOLFILE *, u_int32_t, int)); + */ +int +__memp_set_flags(dbmfp, flags, onoff) + DB_MPOOLFILE *dbmfp; + u_int32_t flags; + int onoff; +{ + DB_ENV *dbenv; + MPOOLFILE *mfp; + int ret; + + dbenv = dbmfp->dbenv; + mfp = dbmfp->mfp; + + switch (flags) { + case DB_MPOOL_NOFILE: + if (mfp == NULL) + if (onoff) + FLD_SET(dbmfp->config_flags, DB_MPOOL_NOFILE); + else + FLD_CLR(dbmfp->config_flags, DB_MPOOL_NOFILE); + else + mfp->no_backing_file = onoff; + break; + case DB_MPOOL_UNLINK: + if (mfp == NULL) + if (onoff) + FLD_SET(dbmfp->config_flags, DB_MPOOL_UNLINK); + else + FLD_CLR(dbmfp->config_flags, DB_MPOOL_UNLINK); + else + mfp->unlink_on_close = onoff; + break; + default: + if ((ret = __db_fchk(dbenv, "DB_MPOOLFILE->set_flags", + flags, DB_MPOOL_NOFILE | DB_MPOOL_UNLINK)) != 0) + return (ret); + break; + } + return (0); +} + +/* + * __memp_get_ftype -- + * Get the file type (as registered). + * + * PUBLIC: int __memp_get_ftype __P((DB_MPOOLFILE *, int *)); + */ +int +__memp_get_ftype(dbmfp, ftypep) + DB_MPOOLFILE *dbmfp; + int *ftypep; +{ + *ftypep = dbmfp->ftype; + return (0); +} + +/* + * __memp_set_ftype -- + * DB_MPOOLFILE->set_ftype. + * + * PUBLIC: int __memp_set_ftype __P((DB_MPOOLFILE *, int)); + */ +int +__memp_set_ftype(dbmfp, ftype) + DB_MPOOLFILE *dbmfp; + int ftype; +{ + MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_ftype"); + + dbmfp->ftype = ftype; + return (0); +} + +/* + * __memp_get_lsn_offset -- + * Get the page's LSN offset. + */ +static int +__memp_get_lsn_offset(dbmfp, lsn_offsetp) + DB_MPOOLFILE *dbmfp; + int32_t *lsn_offsetp; +{ + *lsn_offsetp = dbmfp->lsn_offset; + return (0); +} + +/* + * __memp_set_lsn_offset -- + * Set the page's LSN offset. + * + * PUBLIC: int __memp_set_lsn_offset __P((DB_MPOOLFILE *, int32_t)); + */ +int +__memp_set_lsn_offset(dbmfp, lsn_offset) + DB_MPOOLFILE *dbmfp; + int32_t lsn_offset; +{ + MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_lsn_offset"); + + dbmfp->lsn_offset = lsn_offset; + return (0); +} + +/* + * __memp_get_maxsize -- + * Get the file's maximum size. + */ +static int +__memp_get_maxsize(dbmfp, gbytesp, bytesp) + DB_MPOOLFILE *dbmfp; + u_int32_t *gbytesp, *bytesp; +{ + DB_ENV *dbenv; + DB_MPOOL *dbmp; + MPOOLFILE *mfp; + + if ((mfp = dbmfp->mfp) == NULL) { + *gbytesp = dbmfp->gbytes; + *bytesp = dbmfp->bytes; + } else { + dbenv = dbmfp->dbenv; + dbmp = dbenv->mp_handle; + + R_LOCK(dbenv, dbmp->reginfo); + *gbytesp = (u_int32_t) + (mfp->maxpgno / (GIGABYTE / mfp->stat.st_pagesize)); + *bytesp = (u_int32_t) + ((mfp->maxpgno % (GIGABYTE / mfp->stat.st_pagesize)) * + mfp->stat.st_pagesize); + R_UNLOCK(dbenv, dbmp->reginfo); + } + + return (0); +} + +/* + * __memp_set_maxsize -- + * Set the file's maximum size. + */ +static int +__memp_set_maxsize(dbmfp, gbytes, bytes) + DB_MPOOLFILE *dbmfp; + u_int32_t gbytes, bytes; +{ + DB_ENV *dbenv; + DB_MPOOL *dbmp; + MPOOLFILE *mfp; + + if ((mfp = dbmfp->mfp) == NULL) { + dbmfp->gbytes = gbytes; + dbmfp->bytes = bytes; + } else { + dbenv = dbmfp->dbenv; + dbmp = dbenv->mp_handle; + + R_LOCK(dbenv, dbmp->reginfo); + mfp->maxpgno = (db_pgno_t) + (gbytes * (GIGABYTE / mfp->stat.st_pagesize)); + mfp->maxpgno += (db_pgno_t) + ((bytes + mfp->stat.st_pagesize - 1) / + mfp->stat.st_pagesize); + R_UNLOCK(dbenv, dbmp->reginfo); + } + + return (0); +} + +/* + * __memp_get_pgcookie -- + * Get the pgin/pgout cookie. + */ +static int +__memp_get_pgcookie(dbmfp, pgcookie) + DB_MPOOLFILE *dbmfp; + DBT *pgcookie; +{ + if (dbmfp->pgcookie == NULL) { + pgcookie->size = 0; + pgcookie->data = ""; + } else + memcpy(pgcookie, dbmfp->pgcookie, sizeof(DBT)); + return (0); +} + +/* + * __memp_set_pgcookie -- + * Set the pgin/pgout cookie. + * + * PUBLIC: int __memp_set_pgcookie __P((DB_MPOOLFILE *, DBT *)); + */ +int +__memp_set_pgcookie(dbmfp, pgcookie) + DB_MPOOLFILE *dbmfp; + DBT *pgcookie; +{ + DB_ENV *dbenv; + DBT *cookie; + int ret; + + MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_pgcookie"); + dbenv = dbmfp->dbenv; + + if ((ret = __os_calloc(dbenv, 1, sizeof(*cookie), &cookie)) != 0) + return (ret); + if ((ret = __os_malloc(dbenv, pgcookie->size, &cookie->data)) != 0) { + __os_free(dbenv, cookie); + return (ret); + } + + memcpy(cookie->data, pgcookie->data, pgcookie->size); + cookie->size = pgcookie->size; + + dbmfp->pgcookie = cookie; + return (0); +} + +/* + * __memp_get_priority -- + * Set the cache priority for pages from this file. + */ +static int +__memp_get_priority(dbmfp, priorityp) + DB_MPOOLFILE *dbmfp; + DB_CACHE_PRIORITY *priorityp; +{ + switch (dbmfp->priority) { + case MPOOL_PRI_VERY_LOW: + *priorityp = DB_PRIORITY_VERY_LOW; + break; + case MPOOL_PRI_LOW: + *priorityp = DB_PRIORITY_LOW; + break; + case MPOOL_PRI_DEFAULT: + *priorityp = DB_PRIORITY_DEFAULT; + break; + case MPOOL_PRI_HIGH: + *priorityp = DB_PRIORITY_HIGH; + break; + case MPOOL_PRI_VERY_HIGH: + *priorityp = DB_PRIORITY_VERY_HIGH; + break; + default: + __db_err(dbmfp->dbenv, + "DB_MPOOLFILE->get_priority: unknown priority value: %d", + dbmfp->priority); + return (EINVAL); + } + + return (0); +} + +/* + * __memp_set_priority -- + * Set the cache priority for pages from this file. + */ +static int +__memp_set_priority(dbmfp, priority) + DB_MPOOLFILE *dbmfp; + DB_CACHE_PRIORITY priority; +{ + switch (priority) { + case DB_PRIORITY_VERY_LOW: + dbmfp->priority = MPOOL_PRI_VERY_LOW; + break; + case DB_PRIORITY_LOW: + dbmfp->priority = MPOOL_PRI_LOW; + break; + case DB_PRIORITY_DEFAULT: + dbmfp->priority = MPOOL_PRI_DEFAULT; + break; + case DB_PRIORITY_HIGH: + dbmfp->priority = MPOOL_PRI_HIGH; + break; + case DB_PRIORITY_VERY_HIGH: + dbmfp->priority = MPOOL_PRI_VERY_HIGH; + break; + default: + __db_err(dbmfp->dbenv, + "DB_MPOOLFILE->set_priority: unknown priority value: %d", + priority); + return (EINVAL); + } + + /* Update the underlying file if we've already opened it. */ + if (dbmfp->mfp != NULL) + dbmfp->mfp->priority = priority; + + return (0); +} + +/* + * __memp_last_pgno -- + * Return the page number of the last page in the file. + * + * !!! + * Undocumented interface: DB private. + * + * PUBLIC: void __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *)); + */ +void +__memp_last_pgno(dbmfp, pgnoaddr) + DB_MPOOLFILE *dbmfp; + db_pgno_t *pgnoaddr; +{ + DB_ENV *dbenv; + DB_MPOOL *dbmp; + + dbenv = dbmfp->dbenv; + dbmp = dbenv->mp_handle; + + R_LOCK(dbenv, dbmp->reginfo); + *pgnoaddr = dbmfp->mfp->last_pgno; + R_UNLOCK(dbenv, dbmp->reginfo); +} + +/* + * __memp_fn -- + * On errors we print whatever is available as the file name. + * + * PUBLIC: char * __memp_fn __P((DB_MPOOLFILE *)); + */ +char * +__memp_fn(dbmfp) + DB_MPOOLFILE *dbmfp; +{ + return (__memp_fns(dbmfp->dbenv->mp_handle, dbmfp->mfp)); +} + +/* + * __memp_fns -- + * On errors we print whatever is available as the file name. + * + * PUBLIC: char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *)); + * + */ +char * +__memp_fns(dbmp, mfp) + DB_MPOOL *dbmp; + MPOOLFILE *mfp; +{ + if (mfp->path_off == 0) + return ((char *)"temporary"); + + return ((char *)R_ADDR(dbmp->dbenv, dbmp->reginfo, mfp->path_off)); +} diff --git a/db/mp/mp_fopen.c b/db/mp/mp_fopen.c index 6e8505350..81e565495 100644 --- a/db/mp/mp_fopen.c +++ b/db/mp/mp_fopen.c @@ -1,21 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_fopen.c,v 11.142 2004/09/17 22:00:31 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_fopen.c,v 11.120 2003/11/07 18:45:15 ubell Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include -#ifdef HAVE_RPC -#include -#endif #include #endif @@ -24,522 +20,14 @@ static const char revid[] = "$Id: mp_fopen.c,v 11.120 2003/11/07 18:45:15 ubell #include "dbinc/log.h" #include "dbinc/mp.h" -#ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" -#include "dbinc_auto/rpc_client_ext.h" -#endif - -static int __memp_fclose_pp __P((DB_MPOOLFILE *, u_int32_t)); -static int __memp_fopen_pp __P((DB_MPOOLFILE *, - const char *, u_int32_t, int, size_t)); -static int __memp_get_clear_len __P((DB_MPOOLFILE *, u_int32_t *)); -static int __memp_get_flags __P((DB_MPOOLFILE *, u_int32_t *)); -static int __memp_get_lsn_offset __P((DB_MPOOLFILE *, int32_t *)); -static int __memp_get_maxsize __P((DB_MPOOLFILE *, u_int32_t *, u_int32_t *)); -static int __memp_set_maxsize __P((DB_MPOOLFILE *, u_int32_t, u_int32_t)); -static int __memp_get_pgcookie __P((DB_MPOOLFILE *, DBT *)); -static int __memp_get_priority __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY *)); -static int __memp_set_priority __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY)); - -/* - * __memp_fcreate_pp -- - * DB_ENV->memp_fcreate pre/post processing. - * - * PUBLIC: int __memp_fcreate_pp __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); - */ -int -__memp_fcreate_pp(dbenv, retp, flags) - DB_ENV *dbenv; - DB_MPOOLFILE **retp; - u_int32_t flags; -{ - int rep_check, ret; - - PANIC_CHECK(dbenv); - - /* Validate arguments. */ - if ((ret = __db_fchk(dbenv, "DB_ENV->memp_fcreate", flags, 0)) != 0) - return (ret); - - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __memp_fcreate(dbenv, retp); - if (rep_check) - __env_rep_exit(dbenv); - return (ret); -} - -/* - * __memp_fcreate -- - * DB_ENV->memp_fcreate. - * - * PUBLIC: int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **)); - */ -int -__memp_fcreate(dbenv, retp) - DB_ENV *dbenv; - DB_MPOOLFILE **retp; -{ - DB_MPOOLFILE *dbmfp; - int ret; - - /* Allocate and initialize the per-process structure. */ - if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MPOOLFILE), &dbmfp)) != 0) - return (ret); - - dbmfp->ref = 1; - dbmfp->lsn_offset = -1; - dbmfp->dbenv = dbenv; - dbmfp->mfp = INVALID_ROFF; - -#ifdef HAVE_RPC - if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) { - dbmfp->get_clear_len = __dbcl_memp_get_clear_len; - dbmfp->set_clear_len = __dbcl_memp_set_clear_len; - dbmfp->get_fileid = __dbcl_memp_get_fileid; - dbmfp->set_fileid = __dbcl_memp_set_fileid; - dbmfp->get_flags = __dbcl_memp_get_flags; - dbmfp->set_flags = __dbcl_memp_set_flags; - dbmfp->get_ftype = __dbcl_memp_get_ftype; - dbmfp->set_ftype = __dbcl_memp_set_ftype; - dbmfp->get_lsn_offset = __dbcl_memp_get_lsn_offset; - dbmfp->set_lsn_offset = __dbcl_memp_set_lsn_offset; - dbmfp->get_maxsize = __dbcl_memp_get_maxsize; - dbmfp->set_maxsize = __dbcl_memp_set_maxsize; - dbmfp->get_pgcookie = __dbcl_memp_get_pgcookie; - dbmfp->set_pgcookie = __dbcl_memp_set_pgcookie; - dbmfp->get_priority = __dbcl_memp_get_priority; - dbmfp->set_priority = __dbcl_memp_set_priority; - - dbmfp->get = __dbcl_memp_fget; - dbmfp->open = __dbcl_memp_fopen; - dbmfp->put = __dbcl_memp_fput; - dbmfp->set = __dbcl_memp_fset; - dbmfp->sync = __dbcl_memp_fsync; - } else -#endif - { - dbmfp->get_clear_len = __memp_get_clear_len; - dbmfp->set_clear_len = __memp_set_clear_len; - dbmfp->get_fileid = __memp_get_fileid; - dbmfp->set_fileid = __memp_set_fileid; - dbmfp->get_flags = __memp_get_flags; - dbmfp->set_flags = __memp_set_flags; - dbmfp->get_ftype = __memp_get_ftype; - dbmfp->set_ftype = __memp_set_ftype; - dbmfp->get_lsn_offset = __memp_get_lsn_offset; - dbmfp->set_lsn_offset = __memp_set_lsn_offset; - dbmfp->get_maxsize = __memp_get_maxsize; - dbmfp->set_maxsize = __memp_set_maxsize; - dbmfp->get_pgcookie = __memp_get_pgcookie; - dbmfp->set_pgcookie = __memp_set_pgcookie; - dbmfp->get_priority = __memp_get_priority; - dbmfp->set_priority = __memp_set_priority; - - dbmfp->get = __memp_fget_pp; - dbmfp->open = __memp_fopen_pp; - dbmfp->put = __memp_fput_pp; - dbmfp->set = __memp_fset_pp; - dbmfp->sync = __memp_fsync_pp; - } - dbmfp->close = __memp_fclose_pp; - - *retp = dbmfp; - return (0); -} - -/* - * __memp_get_clear_len -- - * Get the clear length. - */ -static int -__memp_get_clear_len(dbmfp, clear_lenp) - DB_MPOOLFILE *dbmfp; - u_int32_t *clear_lenp; -{ - *clear_lenp = dbmfp->clear_len; - return (0); -} - -/* - * __memp_set_clear_len -- - * DB_MPOOLFILE->set_clear_len. - * - * PUBLIC: int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t)); - */ -int -__memp_set_clear_len(dbmfp, clear_len) - DB_MPOOLFILE *dbmfp; - u_int32_t clear_len; -{ - MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_clear_len"); - - dbmfp->clear_len = clear_len; - return (0); -} - -/* - * __memp_get_fileid -- - * DB_MPOOLFILE->get_fileid. - * - * PUBLIC: int __memp_get_fileid __P((DB_MPOOLFILE *, u_int8_t *)); - */ -int -__memp_get_fileid(dbmfp, fileid) - DB_MPOOLFILE *dbmfp; - u_int8_t *fileid; -{ - if (!F_ISSET(dbmfp, MP_FILEID_SET)) { - __db_err(dbmfp->dbenv, "get_fileid: file ID not set"); - return (EINVAL); - } - - memcpy(fileid, dbmfp->fileid, DB_FILE_ID_LEN); - return (0); -} - -/* - * __memp_set_fileid -- - * DB_MPOOLFILE->set_fileid. - * - * PUBLIC: int __memp_set_fileid __P((DB_MPOOLFILE *, u_int8_t *)); - */ -int -__memp_set_fileid(dbmfp, fileid) - DB_MPOOLFILE *dbmfp; - u_int8_t *fileid; -{ - MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_fileid"); - - memcpy(dbmfp->fileid, fileid, DB_FILE_ID_LEN); - F_SET(dbmfp, MP_FILEID_SET); - - return (0); -} - -/* - * __memp_get_flags -- - * Get the DB_MPOOLFILE flags; - */ -static int -__memp_get_flags(dbmfp, flagsp) - DB_MPOOLFILE *dbmfp; - u_int32_t *flagsp; -{ - MPOOLFILE *mfp; - - mfp = dbmfp->mfp; - - *flagsp = 0; - - if (mfp == NULL) - *flagsp = FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE); - else - if (mfp->no_backing_file) - FLD_SET(*flagsp, DB_MPOOL_NOFILE); - return (0); -} - -/* - * __memp_set_flags -- - * Set the DB_MPOOLFILE flags; - * - * PUBLIC: int __memp_set_flags __P((DB_MPOOLFILE *, u_int32_t, int)); - */ -int -__memp_set_flags(dbmfp, flags, onoff) - DB_MPOOLFILE *dbmfp; - u_int32_t flags; - int onoff; -{ - DB_ENV *dbenv; - MPOOLFILE *mfp; - int ret; - - dbenv = dbmfp->dbenv; - mfp = dbmfp->mfp; - -#define OKFLAGS (DB_MPOOL_NOFILE | DB_MPOOL_UNLINK) - if ((ret = - __db_fchk(dbenv, "DB_MPOOLFILE->set_flags", flags, OKFLAGS)) != 0) - return (ret); - - switch (flags) { - case 0: - break; - case DB_MPOOL_NOFILE: - if (mfp == NULL) - if (onoff) - FLD_SET(dbmfp->config_flags, DB_MPOOL_NOFILE); - else - FLD_CLR(dbmfp->config_flags, DB_MPOOL_NOFILE); - else - mfp->no_backing_file = onoff; - break; - case DB_MPOOL_UNLINK: - if (mfp == NULL) - if (onoff) - FLD_SET(dbmfp->config_flags, DB_MPOOL_UNLINK); - else - FLD_CLR(dbmfp->config_flags, DB_MPOOL_UNLINK); - else - mfp->unlink_on_close = onoff; - break; - } - return (0); -} - -/* - * __memp_get_ftype -- - * Get the file type (as registered). - * - * PUBLIC: int __memp_get_ftype __P((DB_MPOOLFILE *, int *)); - */ -int -__memp_get_ftype(dbmfp, ftypep) - DB_MPOOLFILE *dbmfp; - int *ftypep; -{ - *ftypep = dbmfp->ftype; - return (0); -} - -/* - * __memp_set_ftype -- - * DB_MPOOLFILE->set_ftype. - * - * PUBLIC: int __memp_set_ftype __P((DB_MPOOLFILE *, int)); - */ -int -__memp_set_ftype(dbmfp, ftype) - DB_MPOOLFILE *dbmfp; - int ftype; -{ - MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_ftype"); - - dbmfp->ftype = ftype; - return (0); -} - -/* - * __memp_get_lsn_offset -- - * Get the page's LSN offset. - */ -static int -__memp_get_lsn_offset(dbmfp, lsn_offsetp) - DB_MPOOLFILE *dbmfp; - int32_t *lsn_offsetp; -{ - *lsn_offsetp = dbmfp->lsn_offset; - return (0); -} - -/* - * __memp_set_lsn_offset -- - * Set the page's LSN offset. - * - * PUBLIC: int __memp_set_lsn_offset __P((DB_MPOOLFILE *, int32_t)); - */ -int -__memp_set_lsn_offset(dbmfp, lsn_offset) - DB_MPOOLFILE *dbmfp; - int32_t lsn_offset; -{ - MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_lsn_offset"); - - dbmfp->lsn_offset = lsn_offset; - return (0); -} - -/* - * __memp_get_maxsize -- - * Get the file's maximum size. - */ -static int -__memp_get_maxsize(dbmfp, gbytesp, bytesp) - DB_MPOOLFILE *dbmfp; - u_int32_t *gbytesp, *bytesp; -{ - DB_ENV *dbenv; - DB_MPOOL *dbmp; - MPOOLFILE *mfp; - - if ((mfp = dbmfp->mfp) == NULL) { - *gbytesp = dbmfp->gbytes; - *bytesp = dbmfp->bytes; - } else { - dbenv = dbmfp->dbenv; - dbmp = dbenv->mp_handle; - - R_LOCK(dbenv, dbmp->reginfo); - *gbytesp = mfp->maxpgno / (GIGABYTE / mfp->stat.st_pagesize); - *bytesp = (mfp->maxpgno % - (GIGABYTE / mfp->stat.st_pagesize)) * mfp->stat.st_pagesize; - R_UNLOCK(dbenv, dbmp->reginfo); - } - - return (0); -} - -/* - * __memp_set_maxsize -- - * Set the files's maximum size. - */ -static int -__memp_set_maxsize(dbmfp, gbytes, bytes) - DB_MPOOLFILE *dbmfp; - u_int32_t gbytes, bytes; -{ - DB_ENV *dbenv; - DB_MPOOL *dbmp; - MPOOLFILE *mfp; - - if ((mfp = dbmfp->mfp) == NULL) { - dbmfp->gbytes = gbytes; - dbmfp->bytes = bytes; - } else { - dbenv = dbmfp->dbenv; - dbmp = dbenv->mp_handle; - - R_LOCK(dbenv, dbmp->reginfo); - mfp->maxpgno = gbytes * (GIGABYTE / mfp->stat.st_pagesize); - mfp->maxpgno += (bytes + - mfp->stat.st_pagesize - 1) / mfp->stat.st_pagesize; - R_UNLOCK(dbenv, dbmp->reginfo); - } - - return (0); -} - -/* - * __memp_get_pgcookie -- - * Get the pgin/pgout cookie. - */ -static int -__memp_get_pgcookie(dbmfp, pgcookie) - DB_MPOOLFILE *dbmfp; - DBT *pgcookie; -{ - if (dbmfp->pgcookie == NULL) { - pgcookie->size = 0; - pgcookie->data = ""; - } else - memcpy(pgcookie, dbmfp->pgcookie, sizeof(DBT)); - return (0); -} - -/* - * __memp_set_pgcookie -- - * Set the pgin/pgout cookie. - * - * PUBLIC: int __memp_set_pgcookie __P((DB_MPOOLFILE *, DBT *)); - */ -int -__memp_set_pgcookie(dbmfp, pgcookie) - DB_MPOOLFILE *dbmfp; - DBT *pgcookie; -{ - DB_ENV *dbenv; - DBT *cookie; - int ret; - - MPF_ILLEGAL_AFTER_OPEN(dbmfp, "DB_MPOOLFILE->set_pgcookie"); - dbenv = dbmfp->dbenv; - - if ((ret = __os_calloc(dbenv, 1, sizeof(*cookie), &cookie)) != 0) - return (ret); - if ((ret = __os_malloc(dbenv, pgcookie->size, &cookie->data)) != 0) { - (void)__os_free(dbenv, cookie); - return (ret); - } - - memcpy(cookie->data, pgcookie->data, pgcookie->size); - cookie->size = pgcookie->size; - - dbmfp->pgcookie = cookie; - return (0); -} - -/* - * __memp_get_priority -- - * Set the cache priority for pages from this file. - */ -static int -__memp_get_priority(dbmfp, priorityp) - DB_MPOOLFILE *dbmfp; - DB_CACHE_PRIORITY *priorityp; -{ - switch (dbmfp->priority) { - case MPOOL_PRI_VERY_LOW: - *priorityp = DB_PRIORITY_VERY_LOW; - break; - case MPOOL_PRI_LOW: - *priorityp = DB_PRIORITY_LOW; - break; - case MPOOL_PRI_DEFAULT: - *priorityp = DB_PRIORITY_DEFAULT; - break; - case MPOOL_PRI_HIGH: - *priorityp = DB_PRIORITY_HIGH; - break; - case MPOOL_PRI_VERY_HIGH: - *priorityp = DB_PRIORITY_VERY_HIGH; - break; - default: - __db_err(dbmfp->dbenv, - "DB_MPOOLFILE->get_priority: unknown priority value: %d", - dbmfp->priority); - return (EINVAL); - } - - return (0); -} - -/* - * __memp_set_priority -- - * Set the cache priority for pages from this file. - */ -static int -__memp_set_priority(dbmfp, priority) - DB_MPOOLFILE *dbmfp; - DB_CACHE_PRIORITY priority; -{ - switch (priority) { - case DB_PRIORITY_VERY_LOW: - dbmfp->priority = MPOOL_PRI_VERY_LOW; - break; - case DB_PRIORITY_LOW: - dbmfp->priority = MPOOL_PRI_LOW; - break; - case DB_PRIORITY_DEFAULT: - dbmfp->priority = MPOOL_PRI_DEFAULT; - break; - case DB_PRIORITY_HIGH: - dbmfp->priority = MPOOL_PRI_HIGH; - break; - case DB_PRIORITY_VERY_HIGH: - dbmfp->priority = MPOOL_PRI_VERY_HIGH; - break; - default: - __db_err(dbmfp->dbenv, - "DB_MPOOLFILE->set_priority: unknown priority value: %d", - priority); - return (EINVAL); - } - - /* Update the underlying file if we've already opened it. */ - if (dbmfp->mfp != NULL) - dbmfp->mfp->priority = priority; - - return (0); -} - /* * __memp_fopen_pp -- * DB_MPOOLFILE->open pre/post processing. + * + * PUBLIC: int __memp_fopen_pp + * PUBLIC: __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); */ -static int +int __memp_fopen_pp(dbmfp, path, flags, mode, pagesize) DB_MPOOLFILE *dbmfp; const char *path; @@ -587,7 +75,7 @@ __memp_fopen_pp(dbmfp, path, flags, mode, pagesize) __env_rep_enter(dbenv); ret = __memp_fopen(dbmfp, NULL, path, flags, mode, pagesize); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -599,13 +87,13 @@ __memp_fopen_pp(dbmfp, path, flags, mode, pagesize) * PUBLIC: MPOOLFILE *, const char *, u_int32_t, int, size_t)); */ int -__memp_fopen(dbmfp, mfp, path, flags, mode, pagesize) +__memp_fopen(dbmfp, mfp, path, flags, mode, pgsize) DB_MPOOLFILE *dbmfp; MPOOLFILE *mfp; const char *path; u_int32_t flags; int mode; - size_t pagesize; + size_t pgsize; { DB_ENV *dbenv; DB_MPOOL *dbmp; @@ -613,7 +101,7 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pagesize) MPOOL *mp; db_pgno_t last_pgno; size_t maxmap; - u_int32_t mbytes, bytes, oflags; + u_int32_t mbytes, bytes, oflags, pagesize; int refinc, ret; char *rpath; void *p; @@ -625,30 +113,51 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pagesize) rpath = NULL; /* - * If it's a temporary file, delay the open until we actually need - * to write the file, and we know we can't join any existing files. + * We're keeping the page size as a size_t in the public API, but + * it's a u_int32_t everywhere internally. */ - if (path == NULL) + pagesize = (u_int32_t)pgsize; + + /* + * We're called internally with a specified mfp, in which case the + * path is NULL, but we'll get the path from the underlying region + * information. Otherwise, if the path is NULL, it's a temporary + * file -- we know we can't join any existing files, and we'll delay + * the open until we actually need to write the file. + */ + DB_ASSERT(mfp == NULL || path == NULL); + + if (mfp == NULL && path == NULL) goto alloc; /* - * If our caller knows what mfp we're using, increment the ref count, - * no need to search. - * - * We don't need to acquire a lock other than the mfp itself, because - * we know there's another reference and it's not going away. + * Our caller may be able to tell us which underlying MPOOLFILE we + * need a handle for. */ if (mfp != NULL) { + /* + * Deadfile can only be set if mpf_cnt goes to zero (or if we + * failed creating the file DB_AM_DISCARD). Increment the ref + * count so the file cannot become dead and be unlinked. + */ MUTEX_LOCK(dbenv, &mfp->mutex); - ++mfp->mpf_cnt; - refinc = 1; + if (!mfp->deadfile) { + ++mfp->mpf_cnt; + refinc = 1; + } MUTEX_UNLOCK(dbenv, &mfp->mutex); + + /* + * Test one last time to see if the file is dead -- it may have + * been removed. This happens when a checkpoint trying to open + * the file to flush a buffer races with the Db::remove method. + * The error will be ignored, so don't output an error message. + */ + if (mfp->deadfile) + return (EINVAL); } - /* - * Get the real name for this file and open it. If it's a Queue extent - * file, it may not exist, and that's OK. - */ + /* Convert MP open flags to DB OS-layer open flags. */ oflags = 0; if (LF_ISSET(DB_CREATE)) oflags |= DB_OSO_CREATE; @@ -658,16 +167,29 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pagesize) F_SET(dbmfp, MP_READONLY); oflags |= DB_OSO_RDONLY; } - if ((ret = - __db_appname(dbenv, DB_APP_DATA, path, 0, NULL, &rpath)) != 0) - goto err; /* + * Get the real name for this file and open it. + * * Supply a page size so os_open can decide whether to turn buffering * off if the DB_DIRECT_DB flag is set. + * + * Acquire the region lock if we're using a path from an underlying + * MPOOLFILE -- there's a race in accessing the path name stored in + * the region, __memp_nameop may be simultaneously renaming the file. */ - if ((ret = __os_open_extend(dbenv, rpath, - 0, (u_int32_t)pagesize, oflags, mode, &dbmfp->fhp)) != 0) { + if (mfp != NULL) { + R_LOCK(dbenv, dbmp->reginfo); + path = R_ADDR(dbenv, dbmp->reginfo, mfp->path_off); + } + if ((ret = + __db_appname(dbenv, DB_APP_DATA, path, 0, NULL, &rpath)) == 0) + ret = __os_open_extend(dbenv, + rpath, (u_int32_t)pagesize, oflags, mode, &dbmfp->fhp); + if (mfp != NULL) + R_UNLOCK(dbenv, dbmp->reginfo); + if (ret != 0) { + /* If it's a Queue extent file, it may not exist, that's OK. */ if (!LF_ISSET(DB_EXTENT)) __db_err(dbenv, "%s: %s", rpath, db_strerror(ret)); goto err; @@ -709,8 +231,8 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pagesize) (ret = __os_fileid(dbenv, rpath, 0, dbmfp->fileid)) != 0) goto err; - if (mfp != NULL) - goto check_map; + if (mfp != NULL) + goto have_mfp; /* * If not creating a temporary file, walk the list of MPOOLFILE's, @@ -741,7 +263,7 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pagesize) continue; /* Skip non-matching files. */ - if (memcmp(dbmfp->fileid, R_ADDR(dbmp->reginfo, + if (memcmp(dbmfp->fileid, R_ADDR(dbenv, dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0) continue; @@ -813,7 +335,7 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pagesize) R_UNLOCK(dbenv, dbmp->reginfo); if (mfp != NULL) - goto check_map; + goto have_mfp; alloc: /* Allocate and initialize a new MPOOLFILE. */ if ((ret = __memp_alloc( @@ -827,10 +349,11 @@ alloc: /* Allocate and initialize a new MPOOLFILE. */ mfp->clear_len = dbmfp->clear_len; mfp->priority = dbmfp->priority; if (dbmfp->gbytes != 0 || dbmfp->bytes != 0) { - mfp->maxpgno = - dbmfp->gbytes * (GIGABYTE / mfp->stat.st_pagesize); - mfp->maxpgno += (dbmfp->bytes + - mfp->stat.st_pagesize - 1) / mfp->stat.st_pagesize; + mfp->maxpgno = (db_pgno_t) + (dbmfp->gbytes * (GIGABYTE / mfp->stat.st_pagesize)); + mfp->maxpgno += (db_pgno_t) + ((dbmfp->bytes + mfp->stat.st_pagesize - 1) / + mfp->stat.st_pagesize); } if (FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE)) mfp->no_backing_file = 1; @@ -839,6 +362,8 @@ alloc: /* Allocate and initialize a new MPOOLFILE. */ if (LF_ISSET(DB_TXN_NOT_DURABLE)) F_SET(mfp, MP_NOT_DURABLE); + if (LF_ISSET(DB_DURABLE_UNKNOWN | DB_RDONLY)) + F_SET(mfp, MP_DURABLE_UNKNOWN); if (LF_ISSET(DB_DIRECT)) F_SET(mfp, MP_DIRECT); if (LF_ISSET(DB_EXTENT)) @@ -919,18 +444,24 @@ alloc: /* Allocate and initialize a new MPOOLFILE. */ if (ret != 0) goto err; -check_map: +have_mfp: /* * We need to verify that all handles open a file either durable or not * durable. This needs to be cross process and cross sub-databases, so * mpool is the place to do it. */ - if (!LF_ISSET(DB_RDONLY) && - !LF_ISSET(DB_TXN_NOT_DURABLE) != !F_ISSET(mfp, MP_NOT_DURABLE)) { - __db_err(dbenv, + if (!LF_ISSET(DB_DURABLE_UNKNOWN | DB_RDONLY)) { + if (F_ISSET(mfp, MP_DURABLE_UNKNOWN)) { + if (LF_ISSET(MP_NOT_DURABLE)) + F_SET(mfp, MP_NOT_DURABLE); + F_CLR(mfp, MP_DURABLE_UNKNOWN); + } else if (!LF_ISSET(DB_TXN_NOT_DURABLE) != + !F_ISSET(mfp, MP_NOT_DURABLE)) { + __db_err(dbenv, "Cannot open DURABLE and NOT DURABLE handles in the same file"); - ret = EINVAL; - goto err; + ret = EINVAL; + goto err; + } } /* * All paths to here have initialized the mfp variable to reference @@ -961,8 +492,6 @@ check_map: */ #define DB_MAXMMAPSIZE (10 * 1024 * 1024) /* 10 MB. */ if (F_ISSET(mfp, MP_CAN_MMAP)) { - maxmap = dbenv->mp_mmapsize == 0 ? - DB_MAXMMAPSIZE : dbenv->mp_mmapsize; if (path == NULL) F_CLR(mfp, MP_CAN_MMAP); else if (!F_ISSET(dbmfp, MP_READONLY)) @@ -971,9 +500,16 @@ check_map: F_CLR(mfp, MP_CAN_MMAP); else if (LF_ISSET(DB_NOMMAP) || F_ISSET(dbenv, DB_ENV_NOMMAP)) F_CLR(mfp, MP_CAN_MMAP); - else if (mbytes > maxmap / MEGABYTE || - (mbytes == maxmap / MEGABYTE && bytes >= maxmap % MEGABYTE)) - F_CLR(mfp, MP_CAN_MMAP); + else { + R_LOCK(dbenv, dbmp->reginfo); + maxmap = mp->mp_mmapsize == 0 ? + DB_MAXMMAPSIZE : mp->mp_mmapsize; + R_UNLOCK(dbenv, dbmp->reginfo); + if (mbytes > maxmap / MEGABYTE || + (mbytes == maxmap / MEGABYTE && + bytes >= maxmap % MEGABYTE)) + F_CLR(mfp, MP_CAN_MMAP); + } dbmfp->addr = NULL; if (F_ISSET(mfp, MP_CAN_MMAP)) { @@ -1016,6 +552,12 @@ check_map: if (0) { err: if (refinc) { + /* + * If mpf_cnt goes to zero here and unlink_on_close is + * set, then we missed the last close, but there was an + * error trying to open the file, so we probably cannot + * unlink it anyway. + */ MUTEX_LOCK(dbenv, &mfp->mutex); --mfp->mpf_cnt; MUTEX_UNLOCK(dbenv, &mfp->mutex); @@ -1027,36 +569,13 @@ err: if (refinc) { return (ret); } -/* - * __memp_last_pgno -- - * Return the page number of the last page in the file. - * - * !!! - * Undocumented interface: DB private. - * - * PUBLIC: void __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *)); - */ -void -__memp_last_pgno(dbmfp, pgnoaddr) - DB_MPOOLFILE *dbmfp; - db_pgno_t *pgnoaddr; -{ - DB_ENV *dbenv; - DB_MPOOL *dbmp; - - dbenv = dbmfp->dbenv; - dbmp = dbenv->mp_handle; - - R_LOCK(dbenv, dbmp->reginfo); - *pgnoaddr = dbmfp->mfp->last_pgno; - R_UNLOCK(dbenv, dbmp->reginfo); -} - /* * memp_fclose_pp -- * DB_MPOOLFILE->close pre/post processing. + * + * PUBLIC: int __memp_fclose_pp __P((DB_MPOOLFILE *, u_int32_t)); */ -static int +int __memp_fclose_pp(dbmfp, flags) DB_MPOOLFILE *dbmfp; u_int32_t flags; @@ -1080,7 +599,7 @@ __memp_fclose_pp(dbmfp, flags) if ((t_ret = __memp_fclose(dbmfp, flags)) != 0 && ret == 0) ret = t_ret; if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1192,12 +711,12 @@ __memp_fclose(dbmfp, flags) mfp->deadfile = 1; if (mfp->unlink_on_close) { if ((t_ret = __db_appname(dbmp->dbenv, - DB_APP_DATA, R_ADDR(dbmp->reginfo, + DB_APP_DATA, R_ADDR(dbenv, dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) != 0 && ret == 0) ret = t_ret; if (t_ret == 0) { if ((t_ret = __os_unlink( - dbmp->dbenv, rpath) != 0) && ret == 0) + dbmp->dbenv, rpath)) != 0 && ret == 0) ret = t_ret; __os_free(dbenv, rpath); } @@ -1222,39 +741,6 @@ done: /* Discard the DB_MPOOLFILE structure. */ return (ret); } -/* - * __memp_mf_sync -- - * sync an MPOOLFILE. Should only be used when - * the file is not already open in this process. - * - * PUBLIC: int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *)); - */ -int -__memp_mf_sync(dbmp, mfp) - DB_MPOOL *dbmp; - MPOOLFILE *mfp; -{ - DB_ENV *dbenv; - DB_FH *fhp; - int ret, t_ret; - char *rpath; - - dbenv = dbmp->dbenv; - - if ((ret = __db_appname(dbenv, DB_APP_DATA, - R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) == 0) { - if ((ret = __os_open(dbenv, rpath, 0, 0, &fhp)) == 0) { - ret = __os_fsync(dbenv, fhp); - if ((t_ret = - __os_closehandle(dbenv, fhp)) != 0 && ret == 0) - ret = t_ret; - } - __os_free(dbenv, rpath); - } - - return (ret); -} - /* * __memp_mf_discard -- * Discard an MPOOLFILE. @@ -1269,11 +755,10 @@ __memp_mf_discard(dbmp, mfp) DB_ENV *dbenv; DB_MPOOL_STAT *sp; MPOOL *mp; - int ret; + int need_sync, ret; dbenv = dbmp->dbenv; mp = dbmp->reginfo[0].primary; - ret = 0; /* * Expects caller to be holding the MPOOLFILE mutex. @@ -1282,24 +767,27 @@ __memp_mf_discard(dbmp, mfp) * The scenario is that dirty buffers from this file need to be * flushed to satisfy a future checkpoint, but when the checkpoint * calls mpool sync, the sync code won't know anything about them. + * Ignore files not written, discarded, or only temporary. */ - if (mfp->file_written && !mfp->deadfile) - ret = __memp_mf_sync(dbmp, mfp); + need_sync = + mfp->file_written && !mfp->deadfile && !F_ISSET(mfp, MP_TEMP); /* - * We have to release the MPOOLFILE lock before acquiring the region - * lock so that we don't deadlock. Make sure nobody ever looks at - * this structure again. + * We have to release the MPOOLFILE mutex before acquiring the region + * mutex so we don't deadlock. Make sure nobody ever looks at this + * structure again. */ mfp->deadfile = 1; /* Discard the mutex we're holding. */ MUTEX_UNLOCK(dbenv, &mfp->mutex); - /* Delete from the list of MPOOLFILEs. */ + /* Lock the region and delete from the list of MPOOLFILEs. */ R_LOCK(dbenv, dbmp->reginfo); SH_TAILQ_REMOVE(&mp->mpfq, mfp, q, __mpoolfile); + ret = need_sync ? __memp_mf_sync(dbmp, mfp) : 0; + /* Copy the statistics into the region. */ sp = &mp->stat; sp->st_cache_hit += mfp->stat.st_cache_hit; @@ -1311,52 +799,21 @@ __memp_mf_discard(dbmp, mfp) /* Clear the mutex this MPOOLFILE recorded. */ __db_shlocks_clear(&mfp->mutex, dbmp->reginfo, - (REGMAINT *)R_ADDR(dbmp->reginfo, mp->maint_off)); + (REGMAINT *)R_ADDR(dbenv, dbmp->reginfo, mp->maint_off)); /* Free the space. */ if (mfp->path_off != 0) - __db_shalloc_free(dbmp->reginfo[0].addr, - R_ADDR(dbmp->reginfo, mfp->path_off)); + __db_shalloc_free(&dbmp->reginfo[0], + R_ADDR(dbenv, dbmp->reginfo, mfp->path_off)); if (mfp->fileid_off != 0) - __db_shalloc_free(dbmp->reginfo[0].addr, - R_ADDR(dbmp->reginfo, mfp->fileid_off)); + __db_shalloc_free(&dbmp->reginfo[0], + R_ADDR(dbenv, dbmp->reginfo, mfp->fileid_off)); if (mfp->pgcookie_off != 0) - __db_shalloc_free(dbmp->reginfo[0].addr, - R_ADDR(dbmp->reginfo, mfp->pgcookie_off)); - __db_shalloc_free(dbmp->reginfo[0].addr, mfp); + __db_shalloc_free(&dbmp->reginfo[0], + R_ADDR(dbenv, dbmp->reginfo, mfp->pgcookie_off)); + __db_shalloc_free(&dbmp->reginfo[0], mfp); R_UNLOCK(dbenv, dbmp->reginfo); return (ret); } - -/* - * __memp_fn -- - * On errors we print whatever is available as the file name. - * - * PUBLIC: char * __memp_fn __P((DB_MPOOLFILE *)); - */ -char * -__memp_fn(dbmfp) - DB_MPOOLFILE *dbmfp; -{ - return (__memp_fns(dbmfp->dbenv->mp_handle, dbmfp->mfp)); -} - -/* - * __memp_fns -- - * On errors we print whatever is available as the file name. - * - * PUBLIC: char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *)); - * - */ -char * -__memp_fns(dbmp, mfp) - DB_MPOOL *dbmp; - MPOOLFILE *mfp; -{ - if (mfp->path_off == 0) - return ((char *)"temporary"); - - return ((char *)R_ADDR(dbmp->reginfo, mfp->path_off)); -} diff --git a/db/mp/mp_fput.c b/db/mp/mp_fput.c index 7fda24e4c..fadafc3c5 100644 --- a/db/mp/mp_fput.c +++ b/db/mp/mp_fput.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_fput.c,v 11.58 2004/09/15 21:49:19 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_fput.c,v 11.48 2003/09/30 17:12:00 sue Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -63,6 +62,7 @@ __memp_fput(dbmfp, pgaddr, flags) DB_MPOOL *dbmp; DB_MPOOL_HASH *hp; MPOOL *c_mp; + MPOOLFILE *mfp; u_int32_t n_cache; int adjust, ret; @@ -87,7 +87,6 @@ __memp_fput(dbmfp, pgaddr, flags) } } - /* * If we're mapping the file, there's nothing to do. Because we can * stop mapping the file at any time, we have to check on each buffer @@ -99,16 +98,16 @@ __memp_fput(dbmfp, pgaddr, flags) return (0); #ifdef DIAGNOSTIC - { int ret; + { /* * Decrement the per-file pinned buffer count (mapped pages aren't * counted). */ R_LOCK(dbenv, dbmp->reginfo); if (dbmfp->pinref == 0) { - ret = EINVAL; __db_err(dbenv, "%s: more pages returned than retrieved", __memp_fn(dbmfp)); + ret = __db_panic(dbenv, EINVAL); } else { ret = 0; --dbmfp->pinref; @@ -123,7 +122,7 @@ __memp_fput(dbmfp, pgaddr, flags) bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf)); n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno); c_mp = dbmp->reginfo[n_cache].primary; - hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab); + hp = R_ADDR(dbenv, &dbmp->reginfo[n_cache], c_mp->htab); hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)]; MUTEX_LOCK(dbenv, &hp->hash_mutex); @@ -147,15 +146,24 @@ __memp_fput(dbmfp, pgaddr, flags) * application returns a page twice. */ if (bhp->ref == 0) { + MUTEX_UNLOCK(dbenv, &hp->hash_mutex); __db_err(dbenv, "%s: page %lu: unpinned page returned", __memp_fn(dbmfp), (u_long)bhp->pgno); - MUTEX_UNLOCK(dbenv, &hp->hash_mutex); - return (EINVAL); + return (__db_panic(dbenv, EINVAL)); } /* Note the activity so allocation won't decide to quit. */ ++c_mp->put_counter; + /* + * Mark the file dirty. Check for a dirty bit on the buffer as well + * as the dirty flag because the buffer might have been marked dirty + * in the DB_MPOOLFILE->set method. + */ + mfp = dbmfp->mfp; + if (LF_ISSET(DB_MPOOL_DIRTY) || F_ISSET(bhp, BH_DIRTY)) + mfp->file_written = 1; + /* * If more than one reference to the page or a reference other than a * thread waiting to flush the buffer to disk, we're done. Ignore the @@ -167,8 +175,7 @@ __memp_fput(dbmfp, pgaddr, flags) } /* Update priority values. */ - if (F_ISSET(bhp, BH_DISCARD) || - dbmfp->mfp->priority == MPOOL_PRI_VERY_LOW) + if (F_ISSET(bhp, BH_DISCARD) || mfp->priority == MPOOL_PRI_VERY_LOW) bhp->priority = 0; else { /* @@ -179,14 +186,14 @@ __memp_fput(dbmfp, pgaddr, flags) bhp->priority = c_mp->lru_count; adjust = 0; - if (dbmfp->mfp->priority != 0) + if (mfp->priority != 0) adjust = - (int)c_mp->stat.st_pages / dbmfp->mfp->priority; + (int)c_mp->stat.st_pages / mfp->priority; if (F_ISSET(bhp, BH_DIRTY)) adjust += c_mp->stat.st_pages / MPOOL_PRI_DIRTY; if (adjust > 0) { - if (UINT32_T_MAX - bhp->priority >= (u_int32_t)adjust) + if (UINT32_MAX - bhp->priority >= (u_int32_t)adjust) bhp->priority += adjust; } else if (adjust < 0) if (bhp->priority > (u_int32_t)-adjust) @@ -240,7 +247,7 @@ done: * On every buffer put we update the buffer generation number and check * for wraparound. */ - if (++c_mp->lru_count == UINT32_T_MAX) + if (++c_mp->lru_count == UINT32_MAX) __memp_reset_lru(dbenv, dbmp->reginfo); return (0); @@ -258,7 +265,7 @@ __memp_reset_lru(dbenv, memreg) BH *bhp; DB_MPOOL_HASH *hp; MPOOL *c_mp; - int bucket; + u_int32_t bucket; c_mp = memreg->primary; @@ -269,7 +276,7 @@ __memp_reset_lru(dbenv, memreg) c_mp->lru_count -= MPOOL_BASE_DECREMENT; /* Adjust the priority of every buffer in the system. */ - for (hp = R_ADDR(memreg, c_mp->htab), + for (hp = R_ADDR(dbenv, memreg, c_mp->htab), bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) { /* * Skip empty buckets. @@ -283,8 +290,9 @@ __memp_reset_lru(dbenv, memreg) MUTEX_LOCK(dbenv, &hp->hash_mutex); for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh); bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) - if (bhp->priority != UINT32_T_MAX && + if (bhp->priority != UINT32_MAX && bhp->priority > MPOOL_BASE_DECREMENT) + bhp->priority -= MPOOL_BASE_DECREMENT; MUTEX_UNLOCK(dbenv, &hp->hash_mutex); } } diff --git a/db/mp/mp_fset.c b/db/mp/mp_fset.c index b10ea4f08..167957895 100644 --- a/db/mp/mp_fset.c +++ b/db/mp/mp_fset.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_fset.c,v 11.33 2004/09/15 21:49:19 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_fset.c,v 11.30 2003/09/13 19:26:21 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -62,7 +61,7 @@ __memp_fset_pp(dbmfp, pgaddr, flags) __env_rep_enter(dbenv); ret = __memp_fset(dbmfp, pgaddr, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -92,7 +91,7 @@ __memp_fset(dbmfp, pgaddr, flags) bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf)); n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno); c_mp = dbmp->reginfo[n_cache].primary; - hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab); + hp = R_ADDR(dbenv, &dbmp->reginfo[n_cache], c_mp->htab); hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)]; MUTEX_LOCK(dbenv, &hp->hash_mutex); diff --git a/db/mp/mp_method.c b/db/mp/mp_method.c index 5f7c66a4c..28ca0a268 100644 --- a/db/mp/mp_method.c +++ b/db/mp/mp_method.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_method.c,v 11.57 2004/09/22 16:26:19 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_method.c,v 11.40 2003/06/30 17:20:19 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -16,20 +15,24 @@ static const char revid[] = "$Id: mp_method.c,v 11.40 2003/06/30 17:20:19 bostic #ifdef HAVE_RPC #include #endif + #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + #include "db_int.h" #include "dbinc/db_shash.h" #include "dbinc/mp.h" #ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" #endif -static int __memp_get_mp_maxwrite __P((DB_ENV *, int *, int *)); -static int __memp_set_mp_maxwrite __P((DB_ENV *, int, int)); +static int __memp_get_mp_max_openfd __P((DB_ENV *, int *)); +static int __memp_get_mp_max_write __P((DB_ENV *, int *, int *)); static int __memp_get_mp_mmapsize __P((DB_ENV *, size_t *)); /* @@ -63,13 +66,15 @@ __memp_dbenv_create(dbenv) if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) { dbenv->get_cachesize = __dbcl_env_get_cachesize; dbenv->set_cachesize = __dbcl_env_cachesize; - dbenv->get_mp_maxwrite = __dbcl_get_mp_maxwrite; - dbenv->set_mp_maxwrite = __dbcl_set_mp_maxwrite; + dbenv->get_mp_max_openfd = __dbcl_get_mp_max_openfd; + dbenv->set_mp_max_openfd = __dbcl_set_mp_max_openfd; + dbenv->get_mp_max_write = __dbcl_get_mp_max_write; + dbenv->set_mp_max_write = __dbcl_set_mp_max_write; dbenv->get_mp_mmapsize = __dbcl_get_mp_mmapsize; dbenv->set_mp_mmapsize = __dbcl_set_mp_mmapsize; - dbenv->memp_dump_region = NULL; dbenv->memp_register = __dbcl_memp_register; dbenv->memp_stat = __dbcl_memp_stat; + dbenv->memp_stat_print = NULL; dbenv->memp_sync = __dbcl_memp_sync; dbenv->memp_trickle = __dbcl_memp_trickle; } else @@ -77,13 +82,15 @@ __memp_dbenv_create(dbenv) { dbenv->get_cachesize = __memp_get_cachesize; dbenv->set_cachesize = __memp_set_cachesize; - dbenv->get_mp_maxwrite = __memp_get_mp_maxwrite; - dbenv->set_mp_maxwrite = __memp_set_mp_maxwrite; + dbenv->get_mp_max_openfd = __memp_get_mp_max_openfd; + dbenv->set_mp_max_openfd = __memp_set_mp_max_openfd; + dbenv->get_mp_max_write = __memp_get_mp_max_write; + dbenv->set_mp_max_write = __memp_set_mp_max_write; dbenv->get_mp_mmapsize = __memp_get_mp_mmapsize; dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize; - dbenv->memp_dump_region = __memp_dump_region; dbenv->memp_register = __memp_register_pp; dbenv->memp_stat = __memp_stat_pp; + dbenv->memp_stat_print = __memp_stat_print_pp; dbenv->memp_sync = __memp_sync_pp; dbenv->memp_trickle = __memp_trickle_pp; } @@ -103,12 +110,28 @@ __memp_get_cachesize(dbenv, gbytesp, bytesp, ncachep) u_int32_t *gbytesp, *bytesp; int *ncachep; { - if (gbytesp != NULL) - *gbytesp = dbenv->mp_gbytes; - if (bytesp != NULL) - *bytesp = dbenv->mp_bytes; - if (ncachep != NULL) - *ncachep = dbenv->mp_ncache; + MPOOL *mp; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->mp_handle, "DB_ENV->get_cachesize", DB_INIT_MPOOL); + + if (MPOOL_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + mp = ((DB_MPOOL *)dbenv->mp_handle)->reginfo[0].primary; + if (gbytesp != NULL) + *gbytesp = mp->stat.st_gbytes; + if (bytesp != NULL) + *bytesp = mp->stat.st_bytes; + if (ncachep != NULL) + *ncachep = (int)mp->nreg; + } else { + if (gbytesp != NULL) + *gbytesp = dbenv->mp_gbytes; + if (bytesp != NULL) + *bytesp = dbenv->mp_bytes; + if (ncachep != NULL) + *ncachep = (int)dbenv->mp_ncache; + } return (0); } @@ -119,16 +142,17 @@ __memp_get_cachesize(dbenv, gbytesp, bytesp, ncachep) * PUBLIC: int __memp_set_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int)); */ int -__memp_set_cachesize(dbenv, gbytes, bytes, ncache) +__memp_set_cachesize(dbenv, gbytes, bytes, arg_ncache) DB_ENV *dbenv; u_int32_t gbytes, bytes; - int ncache; + int arg_ncache; { + u_int ncache; + ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_cachesize"); - /* Normalize the values. */ - if (ncache == 0) - ncache = 1; + /* Normalize the cache count. */ + ncache = arg_ncache <= 0 ? 1 : (u_int)arg_ncache; /* * You can only store 4GB-1 in an unsigned 32-bit value, so correct for @@ -174,26 +198,107 @@ __memp_set_cachesize(dbenv, gbytes, bytes, ncache) } static int -__memp_get_mp_maxwrite(dbenv, maxwritep, maxwrite_sleepp) +__memp_get_mp_max_openfd(dbenv, maxopenfdp) + DB_ENV *dbenv; + int *maxopenfdp; +{ + DB_MPOOL *dbmp; + MPOOL *mp; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->mp_handle, "DB_ENV->get_mp_max_openfd", DB_INIT_MPOOL); + + if (MPOOL_ON(dbenv)) { + dbmp = dbenv->mp_handle; + mp = dbmp->reginfo[0].primary; + R_LOCK(dbenv, dbmp->reginfo); + *maxopenfdp = mp->mp_maxopenfd; + R_UNLOCK(dbenv, dbmp->reginfo); + } else + *maxopenfdp = dbenv->mp_maxopenfd; + return (0); +} + +/* + * __memp_set_mp_max_openfd -- + * Set the maximum number of open fd's when flushing the cache. + * PUBLIC: int __memp_set_mp_max_openfd __P((DB_ENV *, int)); + */ +int +__memp_set_mp_max_openfd(dbenv, maxopenfd) + DB_ENV *dbenv; + int maxopenfd; +{ + DB_MPOOL *dbmp; + MPOOL *mp; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->mp_handle, "DB_ENV->set_mp_max_openfd", DB_INIT_MPOOL); + + if (MPOOL_ON(dbenv)) { + dbmp = dbenv->mp_handle; + mp = dbmp->reginfo[0].primary; + R_LOCK(dbenv, dbmp->reginfo); + mp->mp_maxopenfd = maxopenfd; + R_UNLOCK(dbenv, dbmp->reginfo); + } else + dbenv->mp_maxopenfd = maxopenfd; + return (0); +} + +static int +__memp_get_mp_max_write(dbenv, maxwritep, maxwrite_sleepp) DB_ENV *dbenv; int *maxwritep, *maxwrite_sleepp; { - *maxwritep = dbenv->mp_maxwrite; - *maxwrite_sleepp = dbenv->mp_maxwrite_sleep; + DB_MPOOL *dbmp; + MPOOL *mp; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->mp_handle, "DB_ENV->get_mp_max_openfd", DB_INIT_MPOOL); + + if (MPOOL_ON(dbenv)) { + dbmp = dbenv->mp_handle; + mp = dbmp->reginfo[0].primary; + R_LOCK(dbenv, dbmp->reginfo); + *maxwritep = mp->mp_maxwrite; + *maxwrite_sleepp = mp->mp_maxwrite_sleep; + R_UNLOCK(dbenv, dbmp->reginfo); + } else { + *maxwritep = dbenv->mp_maxwrite; + *maxwrite_sleepp = dbenv->mp_maxwrite_sleep; + } return (0); } /* - * __memp_set_mp_maxwrite -- + * __memp_set_mp_max_write -- * Set the maximum continuous I/O count. + * + * PUBLIC: int __memp_set_mp_max_write __P((DB_ENV *, int, int)); */ -static int -__memp_set_mp_maxwrite(dbenv, maxwrite, maxwrite_sleep) +int +__memp_set_mp_max_write(dbenv, maxwrite, maxwrite_sleep) DB_ENV *dbenv; int maxwrite, maxwrite_sleep; { - dbenv->mp_maxwrite = maxwrite; - dbenv->mp_maxwrite_sleep = maxwrite_sleep; + DB_MPOOL *dbmp; + MPOOL *mp; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->mp_handle, "DB_ENV->get_mp_max_openfd", DB_INIT_MPOOL); + + if (MPOOL_ON(dbenv)) { + dbmp = dbenv->mp_handle; + mp = dbmp->reginfo[0].primary; + R_LOCK(dbenv, dbmp->reginfo); + mp->mp_maxwrite = maxwrite; + mp->mp_maxwrite_sleep = maxwrite_sleep; + R_UNLOCK(dbenv, dbmp->reginfo); + } else { + dbenv->mp_maxwrite = maxwrite; + dbenv->mp_maxwrite_sleep = maxwrite_sleep; + } return (0); } @@ -202,7 +307,20 @@ __memp_get_mp_mmapsize(dbenv, mp_mmapsizep) DB_ENV *dbenv; size_t *mp_mmapsizep; { - *mp_mmapsizep = dbenv->mp_mmapsize; + DB_MPOOL *dbmp; + MPOOL *mp; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->mp_handle, "DB_ENV->get_mp_max_mmapsize", DB_INIT_MPOOL); + + if (MPOOL_ON(dbenv)) { + dbmp = dbenv->mp_handle; + mp = dbmp->reginfo[0].primary; + R_LOCK(dbenv, dbmp->reginfo); + *mp_mmapsizep = mp->mp_mmapsize; + R_UNLOCK(dbenv, dbmp->reginfo); + } else + *mp_mmapsizep = dbenv->mp_mmapsize; return (0); } @@ -217,7 +335,20 @@ __memp_set_mp_mmapsize(dbenv, mp_mmapsize) DB_ENV *dbenv; size_t mp_mmapsize; { - dbenv->mp_mmapsize = mp_mmapsize; + DB_MPOOL *dbmp; + MPOOL *mp; + + ENV_NOT_CONFIGURED(dbenv, + dbenv->mp_handle, "DB_ENV->get_mp_max_mmapsize", DB_INIT_MPOOL); + + if (MPOOL_ON(dbenv)) { + dbmp = dbenv->mp_handle; + mp = dbmp->reginfo[0].primary; + R_LOCK(dbenv, dbmp->reginfo); + mp->mp_mmapsize = mp_mmapsize; + R_UNLOCK(dbenv, dbmp->reginfo); + } else + dbenv->mp_mmapsize = mp_mmapsize; return (0); } @@ -244,6 +375,11 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew) int locked, ret; void *p; + /* We get passed either a two names, or two NULLs. */ + DB_ASSERT( + (newname == NULL && fullnew == NULL) || + (newname != NULL && fullnew != NULL)); + locked = 0; dbmp = NULL; @@ -263,9 +399,10 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew) * If this is a rename, allocate first, because we can't recursively * grab the region lock. */ - if (newname == NULL) + if (newname == NULL) { p = NULL; - else { + COMPQUIET(newname_off, INVALID_ROFF); + } else { if ((ret = __memp_alloc(dbmp, dbmp->reginfo, NULL, strlen(newname) + 1, &newname_off, &p)) != 0) return (ret); @@ -277,7 +414,7 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew) /* * Find the file -- if mpool doesn't know about this file, that's not - * an error-- we may not have it open. + * an error -- we may not have it open. */ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile); mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) { @@ -286,7 +423,7 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew) continue; /* Ignore non-matching files. */ - if (memcmp(fileid, R_ADDR( + if (memcmp(fileid, R_ADDR(dbenv, dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0) continue; @@ -300,7 +437,7 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew) * Else, it's a rename. We've allocated memory * for the new name. Swap it with the old one. */ - p = R_ADDR(dbmp->reginfo, mfp->path_off); + p = R_ADDR(dbenv, dbmp->reginfo, mfp->path_off); mfp->path_off = newname_off; } break; @@ -308,12 +445,24 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew) /* Delete the memory we no longer need. */ if (p != NULL) - __db_shalloc_free(dbmp->reginfo[0].addr, p); + __db_shalloc_free(&dbmp->reginfo[0], p); + +fsop: if (newname == NULL) { + /* + * !!! + * Replication may ask us to unlink a file that's been + * renamed. Don't complain if it doesn't exist. + */ + if ((ret = __os_unlink(dbenv, fullold)) == ENOENT) + ret = 0; + } else { + /* Defensive only, fullname should never be NULL. */ + DB_ASSERT(fullnew != NULL); + if (fullnew == NULL) + return (EINVAL); -fsop: if (newname == NULL) - ret = __os_unlink(dbenv, fullold); - else ret = __os_rename(dbenv, fullold, fullnew, 1); + } if (locked) R_UNLOCK(dbenv, dbmp->reginfo); @@ -325,13 +474,13 @@ fsop: if (newname == NULL) * __memp_get_refcnt * Return a reference count, given a fileid. * - * PUBLIC: int __memp_get_refcnt __P((DB_ENV *, u_int8_t *, int *)); + * PUBLIC: int __memp_get_refcnt __P((DB_ENV *, u_int8_t *, u_int32_t *)); */ int __memp_get_refcnt(dbenv, fileid, refp) DB_ENV *dbenv; u_int8_t *fileid; - int *refp; + u_int32_t *refp; { DB_MPOOL *dbmp; MPOOL *mp; @@ -358,7 +507,7 @@ __memp_get_refcnt(dbenv, fileid, refp) continue; /* Ignore non-matching files. */ - if (memcmp(fileid, R_ADDR( + if (memcmp(fileid, R_ADDR(dbenv, dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0) continue; @@ -369,3 +518,53 @@ __memp_get_refcnt(dbenv, fileid, refp) return (0); } + +/* + * __memp_ftruncate __ + * Truncate the file. + * + * PUBLIC: int __memp_ftruncate __P((DB_MPOOLFILE *, db_pgno_t, u_int32_t)); + */ +int +__memp_ftruncate(dbmfp, pgno, flags) + DB_MPOOLFILE *dbmfp; + db_pgno_t pgno; + u_int32_t flags; +{ + DB_ENV *dbenv; + DB_MPOOL *dbmp; + void *pagep; + db_pgno_t last_pgno, pg; + int ret; + + COMPQUIET(flags, 0); + dbenv = dbmfp->dbenv; + dbmp = dbenv->mp_handle; + + R_LOCK(dbenv, dbmp->reginfo); + last_pgno = dbmfp->mfp->last_pgno; + R_UNLOCK(dbenv, dbmp->reginfo); + + if (pgno > last_pgno) { + __db_err(dbenv, "Truncate beyond the end of file"); + return (EINVAL); + } + + pg = pgno; + do { + if ((ret = + __memp_fget(dbmfp, &pg, DB_MPOOL_FREE, &pagep)) != 0) + return (ret); + } while (pg++ < last_pgno); + + if (!F_ISSET(dbmfp->mfp, MP_TEMP) && + (ret = __os_truncate(dbenv, + dbmfp->fhp, pgno, dbmfp->mfp->stat.st_pagesize)) != 0) + return (ret); + + R_LOCK(dbenv, dbmp->reginfo); + dbmfp->mfp->last_pgno = pgno - 1; + R_UNLOCK(dbenv, dbmp->reginfo); + + return (ret); +} diff --git a/db/mp/mp_region.c b/db/mp/mp_region.c index d16d580cb..64e0eeb7e 100644 --- a/db/mp/mp_region.c +++ b/db/mp/mp_region.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_region.c,v 11.67 2004/09/17 22:00:31 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_region.c,v 11.55 2003/06/30 17:20:19 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -20,9 +19,10 @@ static const char revid[] = "$Id: mp_region.c,v 11.55 2003/06/30 17:20:19 bostic #include "dbinc/db_shash.h" #include "dbinc/mp.h" -static int __mpool_init __P((DB_ENV *, DB_MPOOL *, int, int)); +static int __memp_init __P((DB_ENV *, DB_MPOOL *, u_int, u_int32_t)); +static void __memp_init_config __P((DB_ENV *, MPOOL *)); #ifdef HAVE_MUTEX_SYSTEM_RESOURCES -static size_t __mpool_region_maint __P((REGINFO *)); +static size_t __memp_region_maint __P((REGINFO *)); #endif /* @@ -38,9 +38,10 @@ __memp_open(dbenv) DB_MPOOL *dbmp; MPOOL *mp; REGINFO reginfo; - roff_t reg_size, *regids; - u_int32_t i; - int htab_buckets, ret; + roff_t reg_size; + u_int i; + u_int32_t htab_buckets, *regids; + int ret; /* Figure out how big each cache region is. */ reg_size = (dbenv->mp_gbytes / dbenv->mp_ncache) * GIGABYTE; @@ -55,7 +56,7 @@ __memp_open(dbenv) * files. Use a pagesize of 1K for the calculation -- we walk these * chains a lot, they must be kept short. */ - htab_buckets = __db_tablesize((reg_size / (1 * 1024)) / 10); + htab_buckets = __db_tablesize((u_int32_t)(reg_size / (1 * 1024)) / 10); /* Create and initialize the DB_MPOOL structure. */ if ((ret = __os_calloc(dbenv, 1, sizeof(*dbmp), &dbmp)) != 0) @@ -66,9 +67,9 @@ __memp_open(dbenv) /* Join/create the first mpool region. */ memset(®info, 0, sizeof(REGINFO)); + reginfo.dbenv = dbenv; reginfo.type = REGION_TYPE_MPOOL; reginfo.id = INVALID_REGION_ID; - reginfo.mode = dbenv->db_mode; reginfo.flags = REGION_JOIN_OK; if (F_ISSET(dbenv, DB_ENV_CREATE)) F_SET(®info, REGION_CREATE_OK); @@ -95,31 +96,33 @@ __memp_open(dbenv) dbmp->reginfo[0] = reginfo; /* Initialize the first region. */ - if ((ret = __mpool_init(dbenv, dbmp, 0, htab_buckets)) != 0) + if ((ret = __memp_init(dbenv, dbmp, 0, htab_buckets)) != 0) goto err; /* * Create/initialize remaining regions and copy their IDs into * the first region. */ - mp = R_ADDR(dbmp->reginfo, dbmp->reginfo[0].rp->primary); - regids = R_ADDR(dbmp->reginfo, mp->regids); + mp = R_ADDR(dbenv, dbmp->reginfo, dbmp->reginfo[0].rp->primary); + regids = R_ADDR(dbenv, dbmp->reginfo, mp->regids); for (i = 1; i < dbmp->nreg; ++i) { + dbmp->reginfo[i].dbenv = dbenv; dbmp->reginfo[i].type = REGION_TYPE_MPOOL; dbmp->reginfo[i].id = INVALID_REGION_ID; - dbmp->reginfo[i].mode = dbenv->db_mode; dbmp->reginfo[i].flags = REGION_CREATE_OK; if ((ret = __db_r_attach( dbenv, &dbmp->reginfo[i], reg_size)) != 0) goto err; if ((ret = - __mpool_init(dbenv, dbmp, i, htab_buckets)) != 0) + __memp_init(dbenv, dbmp, i, htab_buckets)) != 0) goto err; R_UNLOCK(dbenv, &dbmp->reginfo[i]); regids[i] = dbmp->reginfo[i].id; } + __memp_init_config(dbenv, mp); + R_UNLOCK(dbenv, dbmp->reginfo); } else { /* @@ -127,7 +130,7 @@ __memp_open(dbenv) * the REGINFO structures and fill in local copies of that * information. */ - mp = R_ADDR(®info, reginfo.rp->primary); + mp = R_ADDR(dbenv, ®info, reginfo.rp->primary); dbmp->nreg = mp->nreg; if ((ret = __os_calloc(dbenv, dbmp->nreg, sizeof(REGINFO), &dbmp->reginfo)) != 0) @@ -137,6 +140,8 @@ __memp_open(dbenv) dbmp->reginfo[i].id = INVALID_REGION_ID; dbmp->reginfo[0] = reginfo; + __memp_init_config(dbenv, mp); + /* * We have to unlock the primary mpool region before we attempt * to join the additional mpool regions. If we don't, we can @@ -151,11 +156,11 @@ __memp_open(dbenv) R_UNLOCK(dbenv, dbmp->reginfo); /* Join remaining regions. */ - regids = R_ADDR(dbmp->reginfo, mp->regids); + regids = R_ADDR(dbenv, dbmp->reginfo, mp->regids); for (i = 1; i < dbmp->nreg; ++i) { + dbmp->reginfo[i].dbenv = dbenv; dbmp->reginfo[i].type = REGION_TYPE_MPOOL; dbmp->reginfo[i].id = regids[i]; - dbmp->reginfo[i].mode = 0; dbmp->reginfo[i].flags = REGION_JOIN_OK; if ((ret = __db_r_attach( dbenv, &dbmp->reginfo[i], 0)) != 0) @@ -166,8 +171,8 @@ __memp_open(dbenv) /* Set the local addresses for the regions. */ for (i = 0; i < dbmp->nreg; ++i) - dbmp->reginfo[i].primary = - R_ADDR(&dbmp->reginfo[i], dbmp->reginfo[i].rp->primary); + dbmp->reginfo[i].primary = R_ADDR(dbenv, + &dbmp->reginfo[i], dbmp->reginfo[i].rp->primary); /* If the region is threaded, allocate a mutex to lock the handles. */ if (F_ISSET(dbenv, DB_ENV_THREAD) && @@ -197,14 +202,15 @@ err: if (dbmp->reginfo != NULL && dbmp->reginfo[0].addr != NULL) { } /* - * __mpool_init -- + * __memp_init -- * Initialize a MPOOL structure in shared memory. */ static int -__mpool_init(dbenv, dbmp, reginfo_off, htab_buckets) +__memp_init(dbenv, dbmp, reginfo_off, htab_buckets) DB_ENV *dbenv; DB_MPOOL *dbmp; - int reginfo_off, htab_buckets; + u_int reginfo_off; + u_int32_t htab_buckets; { DB_MPOOL_HASH *htab; MPOOL *mp; @@ -212,27 +218,26 @@ __mpool_init(dbenv, dbmp, reginfo_off, htab_buckets) #ifdef HAVE_MUTEX_SYSTEM_RESOURCES size_t maint_size; #endif - int i, ret; + u_int32_t i; + int ret; void *p; - mp = NULL; - reginfo = &dbmp->reginfo[reginfo_off]; - if ((ret = __db_shalloc(reginfo->addr, + if ((ret = __db_shalloc(reginfo, sizeof(MPOOL), MUTEX_ALIGN, ®info->primary)) != 0) goto mem_err; - reginfo->rp->primary = R_OFFSET(reginfo, reginfo->primary); + reginfo->rp->primary = R_OFFSET(dbenv, reginfo, reginfo->primary); mp = reginfo->primary; memset(mp, 0, sizeof(*mp)); #ifdef HAVE_MUTEX_SYSTEM_RESOURCES - maint_size = __mpool_region_maint(reginfo); + maint_size = __memp_region_maint(reginfo); /* Allocate room for the maintenance info and initialize it. */ - if ((ret = __db_shalloc(reginfo->addr, + if ((ret = __db_shalloc(reginfo, sizeof(REGMAINT) + maint_size, 0, &p)) != 0) goto mem_err; __db_maintinit(reginfo, p, maint_size); - mp->maint_off = R_OFFSET(reginfo, p); + mp->maint_off = R_OFFSET(dbenv, reginfo, p); #endif if (reginfo_off == 0) { @@ -241,21 +246,20 @@ __mpool_init(dbenv, dbmp, reginfo_off, htab_buckets) ZERO_LSN(mp->lsn); mp->nreg = dbmp->nreg; - if ((ret = __db_shalloc(dbmp->reginfo[0].addr, - dbmp->nreg * sizeof(int), 0, &p)) != 0) + if ((ret = __db_shalloc(&dbmp->reginfo[0], + dbmp->nreg * sizeof(u_int32_t), 0, &p)) != 0) goto mem_err; - mp->regids = R_OFFSET(dbmp->reginfo, p); + mp->regids = R_OFFSET(dbenv, dbmp->reginfo, p); } /* Allocate hash table space and initialize it. */ - if ((ret = __db_shalloc(reginfo->addr, - htab_buckets * sizeof(DB_MPOOL_HASH), 0, &htab)) != 0) + if ((ret = __db_shalloc(reginfo, + htab_buckets * sizeof(DB_MPOOL_HASH), MUTEX_ALIGN, &htab)) != 0) goto mem_err; - mp->htab = R_OFFSET(reginfo, htab); + mp->htab = R_OFFSET(dbenv, reginfo, htab); for (i = 0; i < htab_buckets; i++) { if ((ret = __db_mutex_setup(dbenv, - reginfo, &htab[i].hash_mutex, - MUTEX_NO_RLOCK)) != 0) + reginfo, &htab[i].hash_mutex, MUTEX_NO_RLOCK)) != 0) return (ret); SH_TAILQ_INIT(&htab[i].hash_bucket); htab[i].hash_page_dirty = htab[i].hash_priority = 0; @@ -274,6 +278,26 @@ mem_err:__db_err(dbenv, "Unable to allocate memory for mpool region"); return (ret); } +/* + * __memp_init_config -- + * Initialize shared configuration information. + */ +static void +__memp_init_config(dbenv, mp) + DB_ENV *dbenv; + MPOOL *mp; +{ + /* A process joining the region may reset the mpool configuration. */ + if (dbenv->mp_mmapsize != 0) + mp->mp_mmapsize = dbenv->mp_mmapsize; + if (dbenv->mp_maxopenfd != 0) + mp->mp_maxopenfd = dbenv->mp_maxopenfd; + if (dbenv->mp_maxwrite != 0) + mp->mp_maxwrite = dbenv->mp_maxwrite; + if (dbenv->mp_maxwrite_sleep != 0) + mp->mp_maxwrite_sleep = dbenv->mp_maxwrite_sleep; +} + /* * __memp_dbenv_refresh -- * Clean up after the mpool system on a close or failed open. @@ -284,36 +308,76 @@ int __memp_dbenv_refresh(dbenv) DB_ENV *dbenv; { + BH *bhp; DB_MPOOL *dbmp; DB_MPOOLFILE *dbmfp; + DB_MPOOL_HASH *hp; DB_MPREG *mpreg; - u_int32_t i; + MPOOL *mp; + REGINFO *reginfo; + u_int32_t bucket, i; int ret, t_ret; ret = 0; dbmp = dbenv->mp_handle; - /* Discard DB_MPREGs. */ - while ((mpreg = LIST_FIRST(&dbmp->dbregq)) != NULL) { - LIST_REMOVE(mpreg, q); - __os_free(dbenv, mpreg); - } + /* + * If a private region, return the memory to the heap. Not needed for + * filesystem-backed or system shared memory regions, that memory isn't + * owned by any particular process. + * + * Discard buffers. + */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) + for (i = 0; i < dbmp->nreg; ++i) { + reginfo = &dbmp->reginfo[i]; + mp = reginfo->primary; + for (hp = R_ADDR(dbenv, reginfo, mp->htab), bucket = 0; + bucket < mp->htab_buckets; ++hp, ++bucket) + while ((bhp = SH_TAILQ_FIRST( + &hp->hash_bucket, __bh)) != NULL) + __memp_bhfree(dbmp, hp, bhp, + BH_FREE_FREEMEM | BH_FREE_UNLOCKED); + } /* Discard DB_MPOOLFILEs. */ while ((dbmfp = TAILQ_FIRST(&dbmp->dbmfq)) != NULL) if ((t_ret = __memp_fclose(dbmfp, 0)) != 0 && ret == 0) ret = t_ret; - /* Discard the thread mutex. */ + /* Discard DB_MPREGs. */ + while ((mpreg = LIST_FIRST(&dbmp->dbregq)) != NULL) { + LIST_REMOVE(mpreg, q); + __os_free(dbenv, mpreg); + } + + /* Discard the DB_MPOOL thread mutex. */ if (dbmp->mutexp != NULL) __db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp); - /* Detach from the region(s). */ - for (i = 0; i < dbmp->nreg; ++i) - if ((t_ret = __db_r_detach( - dbenv, &dbmp->reginfo[i], 0)) != 0 && ret == 0) + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { + /* Discard REGION IDs. */ + reginfo = &dbmp->reginfo[0]; + mp = dbmp->reginfo[0].primary; + __db_shalloc_free(reginfo, R_ADDR(dbenv, reginfo, mp->regids)); + + /* Discard Hash tables. */ + for (i = 0; i < dbmp->nreg; ++i) { + reginfo = &dbmp->reginfo[i]; + mp = reginfo->primary; + __db_shalloc_free(reginfo, + R_ADDR(dbenv, reginfo, mp->htab)); + } + } + + /* Detach from the region. */ + for (i = 0; i < dbmp->nreg; ++i) { + reginfo = &dbmp->reginfo[i]; + if ((t_ret = __db_r_detach(dbenv, reginfo, 0)) != 0 && ret == 0) ret = t_ret; + } + /* Discard DB_MPOOL. */ __os_free(dbenv, dbmp->reginfo); __os_free(dbenv, dbmp); @@ -323,12 +387,12 @@ __memp_dbenv_refresh(dbenv) #ifdef HAVE_MUTEX_SYSTEM_RESOURCES /* - * __mpool_region_maint -- + * __memp_region_maint -- * Return the amount of space needed for region maintenance info. * */ static size_t -__mpool_region_maint(infop) +__memp_region_maint(infop) REGINFO *infop; { size_t s; @@ -348,19 +412,34 @@ __mpool_region_maint(infop) #endif /* - * __mpool_region_destroy + * __memp_region_destroy * Destroy any region maintenance info. * - * PUBLIC: void __mpool_region_destroy __P((DB_ENV *, REGINFO *)); + * PUBLIC: void __memp_region_destroy __P((DB_ENV *, REGINFO *)); */ void -__mpool_region_destroy(dbenv, infop) +__memp_region_destroy(dbenv, infop) DB_ENV *dbenv; REGINFO *infop; { - __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop, - ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off)); + /* + * This routine is called in two cases: when discarding the mutexes + * from a previous Berkeley DB run, during recovery, and two, when + * discarding the mutexes as we shut down the database environment. + * In the latter case, we also need to discard shared memory segments, + * this is the last time we use them, and the last region-specific + * call we make. + */ +#ifdef HAVE_MUTEX_SYSTEM_RESOURCES + MPOOL *mp; - COMPQUIET(dbenv, NULL); - COMPQUIET(infop, NULL); + mp = R_ADDR(dbenv, infop, infop->rp->primary); + + /* Destroy mutexes. */ + __db_shlocks_destroy(infop, R_ADDR(dbenv, infop, mp->maint_off)); + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, R_ADDR(dbenv, infop, mp->maint_off)); +#endif + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, infop->primary); } diff --git a/db/mp/mp_register.c b/db/mp/mp_register.c index aa678a1e6..0294fd5f0 100644 --- a/db/mp/mp_register.c +++ b/db/mp/mp_register.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_register.c,v 11.26 2004/07/15 15:52:54 sue Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_register.c,v 11.24 2003/09/13 19:20:40 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -45,7 +44,7 @@ __memp_register_pp(dbenv, ftype, pgin, pgout) __env_rep_enter(dbenv); ret = __memp_register(dbenv, ftype, pgin, pgout); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } diff --git a/db/mp/mp_stat.c b/db/mp/mp_stat.c index bb1a41eb9..b927f2b9d 100644 --- a/db/mp/mp_stat.c +++ b/db/mp/mp_stat.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_stat.c,v 11.81 2004/09/28 20:08:17 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_stat.c,v 11.58 2003/09/13 19:20:41 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -24,12 +23,17 @@ static const char revid[] = "$Id: mp_stat.c,v 11.58 2003/09/13 19:20:41 bostic E #include "dbinc/log.h" #include "dbinc/mp.h" -static void __memp_dumpcache __P((DB_ENV *, - DB_MPOOL *, REGINFO *, size_t *, FILE *, u_int32_t)); -static void __memp_pbh __P((DB_MPOOL *, BH *, size_t *, FILE *)); +#ifdef HAVE_STATISTICS +static void __memp_print_bh + __P((DB_ENV *, DB_MPOOL *, BH *, roff_t *, u_int32_t)); +static int __memp_print_all __P((DB_ENV *, u_int32_t)); +static int __memp_print_stats __P((DB_ENV *, u_int32_t)); +static void __memp_print_hash __P((DB_ENV *, + DB_MPOOL *, REGINFO *, roff_t *, u_int32_t)); static int __memp_stat __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t)); -static void __memp_stat_wait __P((REGINFO *, MPOOL *, DB_MPOOL_STAT *, int)); +static void __memp_stat_wait __P((DB_ENV *, + REGINFO *, MPOOL *, DB_MPOOL_STAT *, u_int32_t)); /* * __memp_stat_pp -- @@ -49,7 +53,7 @@ __memp_stat_pp(dbenv, gspp, fspp, flags) PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, - dbenv->mp_handle, "memp_stat", DB_INIT_MPOOL); + dbenv->mp_handle, "DB_ENV->memp_stat", DB_INIT_MPOOL); if ((ret = __db_fchk(dbenv, "DB_ENV->memp_stat", flags, DB_STAT_CLEAR)) != 0) @@ -60,13 +64,13 @@ __memp_stat_pp(dbenv, gspp, fspp, flags) __env_rep_enter(dbenv); ret = __memp_stat(dbenv, gspp, fspp, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } /* * __memp_stat -- - * DB_ENV->memp_stat. + * DB_ENV->memp_stat */ static int __memp_stat(dbenv, gspp, fspp, flags) @@ -80,8 +84,8 @@ __memp_stat(dbenv, gspp, fspp, flags) DB_MPOOL_STAT *sp; MPOOL *c_mp, *mp; MPOOLFILE *mfp; - size_t len, nlen, pagesize; - u_int32_t pages, i; + size_t len, nlen; + u_int32_t pages, pagesize, i; int ret; char *name, *tname; @@ -99,7 +103,8 @@ __memp_stat(dbenv, gspp, fspp, flags) /* * Initialization and information that is not maintained on - * a per-cache basis. + * a per-cache basis. Note that configuration information + * may be modified at any time, and so we have to lock. */ c_mp = dbmp->reginfo[0].primary; sp->st_gbytes = c_mp->stat.st_gbytes; @@ -107,6 +112,13 @@ __memp_stat(dbenv, gspp, fspp, flags) sp->st_ncache = dbmp->nreg; sp->st_regsize = dbmp->reginfo[0].rp->size; + R_LOCK(dbenv, dbmp->reginfo); + sp->st_mmapsize = mp->mp_mmapsize; + sp->st_maxopenfd = mp->mp_maxopenfd; + sp->st_maxwrite = mp->mp_maxwrite; + sp->st_maxwrite_sleep = mp->mp_maxwrite_sleep; + R_UNLOCK(dbenv, dbmp->reginfo); + /* Walk the cache list and accumulate the global information. */ for (i = 0; i < mp->nreg; ++i) { c_mp = dbmp->reginfo[i].primary; @@ -125,7 +137,7 @@ __memp_stat(dbenv, gspp, fspp, flags) * st_page_dirty calculated by __memp_stat_hash * st_page_clean calculated here */ - __memp_stat_hash( + __memp_stat_hash(dbenv, &dbmp->reginfo[i], c_mp, &sp->st_page_dirty); sp->st_page_clean = sp->st_pages - sp->st_page_dirty; sp->st_hash_buckets += c_mp->stat.st_hash_buckets; @@ -136,7 +148,8 @@ __memp_stat(dbenv, gspp, fspp, flags) * st_hash_nowait calculated by __memp_stat_wait * st_hash_wait */ - __memp_stat_wait(&dbmp->reginfo[i], c_mp, sp, flags); + __memp_stat_wait(dbenv, + &dbmp->reginfo[i], c_mp, sp, flags); sp->st_region_nowait += dbmp->reginfo[i].rp->mutex.mutex_set_nowait; sp->st_region_wait += @@ -154,8 +167,7 @@ __memp_stat(dbenv, gspp, fspp, flags) c_mp->stat.st_alloc_max_pages; if (LF_ISSET(DB_STAT_CLEAR)) { - dbmp->reginfo[i].rp->mutex.mutex_set_wait = 0; - dbmp->reginfo[i].rp->mutex.mutex_set_nowait = 0; + MUTEX_CLEAR(&dbmp->reginfo[i].rp->mutex); R_LOCK(dbenv, dbmp->reginfo); pages = c_mp->stat.st_pages; @@ -257,82 +269,281 @@ __memp_stat(dbenv, gspp, fspp, flags) return (0); } -#define FMAP_ENTRIES 200 /* Files we map. */ +/* + * __memp_stat_print_pp -- + * DB_ENV->memp_stat_print pre/post processing. + * + * PUBLIC: int __memp_stat_print_pp __P((DB_ENV *, u_int32_t)); + */ +int +__memp_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + int rep_check, ret; -#define MPOOL_DUMP_HASH 0x01 /* Debug hash chains. */ -#define MPOOL_DUMP_MEM 0x04 /* Debug region memory. */ -#define MPOOL_DUMP_ALL 0x07 /* Debug all. */ + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->mp_handle, "DB_ENV->memp_stat_print", DB_INIT_MPOOL); + +#define DB_STAT_MEMP_FLAGS \ + (DB_STAT_ALL | DB_STAT_CLEAR | DB_STAT_MEMP_HASH) + if ((ret = __db_fchk(dbenv, + "DB_ENV->memp_stat_print", flags, DB_STAT_MEMP_FLAGS)) != 0) + return (ret); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __memp_stat_print(dbenv, flags); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +#define FMAP_ENTRIES 200 /* Files we map. */ /* - * __memp_dump_region -- - * Display MPOOL structures. + * __memp_stat_print -- + * DB_ENV->memp_stat_print method. * - * PUBLIC: int __memp_dump_region __P((DB_ENV *, const char *, FILE *)); + * PUBLIC: int __memp_stat_print __P((DB_ENV *, u_int32_t)); */ int -__memp_dump_region(dbenv, area, fp) +__memp_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + u_int32_t orig_flags; + int ret; + + orig_flags = flags; + LF_CLR(DB_STAT_CLEAR); + if (flags == 0 || LF_ISSET(DB_STAT_ALL)) { + ret = __memp_print_stats(dbenv, orig_flags); + if (flags == 0 || ret != 0) + return (ret); + } + + if (LF_ISSET(DB_STAT_ALL | DB_STAT_MEMP_HASH) && + (ret = __memp_print_all(dbenv, orig_flags)) != 0) + return (ret); + + return (0); +} + +/* + * __memp_print_stats -- + * Display default mpool region statistics. + */ +static int +__memp_print_stats(dbenv, flags) DB_ENV *dbenv; - const char *area; - FILE *fp; + u_int32_t flags; +{ + DB_MPOOL_FSTAT **fsp, **tfsp; + DB_MPOOL_STAT *gsp; + int ret; + + if ((ret = __memp_stat(dbenv, &gsp, &fsp, flags)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) + __db_msg(dbenv, "Default cache region information:"); + __db_dlbytes(dbenv, "Total cache size", + (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes); + __db_dl(dbenv, "Number of caches", (u_long)gsp->st_ncache); + __db_dlbytes(dbenv, "Pool individual cache size", + (u_long)0, (u_long)0, (u_long)gsp->st_regsize); + __db_dlbytes(dbenv, "Maximum memory-mapped file size", + (u_long)0, (u_long)0, (u_long)gsp->st_mmapsize); + STAT_LONG("Maximum open file descriptors", gsp->st_maxopenfd); + STAT_LONG("Maximum sequential buffer writes", gsp->st_maxwrite); + STAT_LONG("Sleep after writing maximum sequential buffers", + gsp->st_maxwrite_sleep); + __db_dl(dbenv, + "Requested pages mapped into the process' address space", + (u_long)gsp->st_map); + __db_dl_pct(dbenv, "Requested pages found in the cache", + (u_long)gsp->st_cache_hit, DB_PCT( + gsp->st_cache_hit, gsp->st_cache_hit + gsp->st_cache_miss), NULL); + __db_dl(dbenv, "Requested pages not found in the cache", + (u_long)gsp->st_cache_miss); + __db_dl(dbenv, + "Pages created in the cache", (u_long)gsp->st_page_create); + __db_dl(dbenv, "Pages read into the cache", (u_long)gsp->st_page_in); + __db_dl(dbenv, "Pages written from the cache to the backing file", + (u_long)gsp->st_page_out); + __db_dl(dbenv, "Clean pages forced from the cache", + (u_long)gsp->st_ro_evict); + __db_dl(dbenv, "Dirty pages forced from the cache", + (u_long)gsp->st_rw_evict); + __db_dl(dbenv, "Dirty pages written by trickle-sync thread", + (u_long)gsp->st_page_trickle); + __db_dl(dbenv, "Current total page count", + (u_long)gsp->st_pages); + __db_dl(dbenv, "Current clean page count", + (u_long)gsp->st_page_clean); + __db_dl(dbenv, "Current dirty page count", + (u_long)gsp->st_page_dirty); + __db_dl(dbenv, "Number of hash buckets used for page location", + (u_long)gsp->st_hash_buckets); + __db_dl(dbenv, + "Total number of times hash chains searched for a page", + (u_long)gsp->st_hash_searches); + __db_dl(dbenv, "The longest hash chain searched for a page", + (u_long)gsp->st_hash_longest); + __db_dl(dbenv, + "Total number of hash buckets examined for page location", + (u_long)gsp->st_hash_examined); + __db_dl_pct(dbenv, + "The number of hash bucket locks that required waiting", + (u_long)gsp->st_hash_wait, DB_PCT( + gsp->st_hash_wait, gsp->st_hash_wait + gsp->st_hash_nowait), NULL); + __db_dl(dbenv, + "The maximum number of times any hash bucket lock was waited for", + (u_long)gsp->st_hash_max_wait); + __db_dl_pct(dbenv, + "The number of region locks that required waiting", + (u_long)gsp->st_region_wait, DB_PCT(gsp->st_region_wait, + gsp->st_region_wait + gsp->st_region_nowait), NULL); + __db_dl(dbenv, "The number of page allocations", (u_long)gsp->st_alloc); + __db_dl(dbenv, + "The number of hash buckets examined during allocations", + (u_long)gsp->st_alloc_buckets); + __db_dl(dbenv, + "The maximum number of hash buckets examined for an allocation", + (u_long)gsp->st_alloc_max_buckets); + __db_dl(dbenv, "The number of pages examined during allocations", + (u_long)gsp->st_alloc_pages); + __db_dl(dbenv, "The max number of pages examined for an allocation", + (u_long)gsp->st_alloc_max_pages); + + for (tfsp = fsp; fsp != NULL && *tfsp != NULL; ++tfsp) { + if (LF_ISSET(DB_STAT_ALL)) + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Pool File: %s", (*tfsp)->file_name); + __db_dl(dbenv, "Page size", (u_long)(*tfsp)->st_pagesize); + __db_dl(dbenv, + "Requested pages mapped into the process' address space", + (u_long)(*tfsp)->st_map); + __db_dl_pct(dbenv, "Requested pages found in the cache", + (u_long)(*tfsp)->st_cache_hit, DB_PCT((*tfsp)->st_cache_hit, + (*tfsp)->st_cache_hit + (*tfsp)->st_cache_miss), NULL); + __db_dl(dbenv, "Requested pages not found in the cache", + (u_long)(*tfsp)->st_cache_miss); + __db_dl(dbenv, "Pages created in the cache", + (u_long)(*tfsp)->st_page_create); + __db_dl(dbenv, "Pages read into the cache", + (u_long)(*tfsp)->st_page_in); + __db_dl(dbenv, + "Pages written from the cache to the backing file", + (u_long)(*tfsp)->st_page_out); + } + + __os_ufree(dbenv, fsp); + __os_ufree(dbenv, gsp); + return (0); +} + +/* + * __memp_print_all -- + * Display debugging mpool region statistics. + */ +static int +__memp_print_all(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; { static const FN fn[] = { - { MP_CAN_MMAP, "mmapped" }, - { MP_DIRECT, "no buffer" }, - { MP_EXTENT, "extent" }, + { MP_CAN_MMAP, "MP_CAN_MMAP" }, + { MP_DIRECT, "MP_DIRECT" }, + { MP_EXTENT, "MP_EXTENT" }, { MP_FAKE_DEADFILE, "deadfile" }, { MP_FAKE_FILEWRITTEN, "file written" }, { MP_FAKE_NB, "no backing file" }, { MP_FAKE_UOC, "unlink on close" }, - { MP_TEMP, "temporary" }, - { 0, NULL } + { MP_NOT_DURABLE, "not durable" }, + { MP_TEMP, "MP_TEMP" }, + { 0, NULL } + }; + static const FN cfn[] = { + { DB_MPOOL_NOFILE, "DB_MPOOL_NOFILE" }, + { DB_MPOOL_UNLINK, "DB_MPOOL_UNLINK" }, + { 0, NULL } }; DB_MPOOL *dbmp; DB_MPOOLFILE *dbmfp; MPOOL *mp; MPOOLFILE *mfp; - size_t fmap[FMAP_ENTRIES + 1]; - u_int32_t i, flags, mfp_flags; + roff_t fmap[FMAP_ENTRIES + 1]; + u_int32_t i, mfp_flags; int cnt; - u_int8_t *p; - - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, - dbenv->mp_handle, "memp_dump_region", DB_INIT_MPOOL); dbmp = dbenv->mp_handle; - - /* Make it easy to call from the debugger. */ - if (fp == NULL) - fp = stderr; - - for (flags = 0; *area != '\0'; ++area) - switch (*area) { - case 'A': - LF_SET(MPOOL_DUMP_ALL); - break; - case 'h': - LF_SET(MPOOL_DUMP_HASH); - break; - case 'm': - LF_SET(MPOOL_DUMP_MEM); - break; - } - mp = dbmp->reginfo[0].primary; - /* Display MPOOL structures. */ - (void)fprintf(fp, "%s\nPool (region addr 0x%lx)\n", - DB_LINE, P_TO_ULONG(dbmp->reginfo[0].addr)); - - /* Display the MPOOLFILE structures. */ R_LOCK(dbenv, dbmp->reginfo); + + __db_print_reginfo(dbenv, dbmp->reginfo, "Mpool"); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "MPOOL structure:"); + STAT_LSN("Maximum checkpoint LSN", &mp->lsn); + STAT_ULONG("Hash table entries", mp->htab_buckets); + STAT_ULONG("Hash table last-checked", mp->last_checked); + STAT_ULONG("Hash table LRU count", mp->lru_count); + STAT_ULONG("Put counter", mp->put_counter); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB_MPOOL handle information:"); + __db_print_mutex( + dbenv, NULL, dbmp->mutexp, "DB_MPOOL handle mutex", flags); + STAT_ULONG("Underlying cache regions", dbmp->nreg); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB_MPOOLFILE structures:"); + for (cnt = 0, dbmfp = TAILQ_FIRST(&dbmp->dbmfq); + dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q), ++cnt) { + __db_msg(dbenv, "File #%d: %s: per-process, %s", + cnt + 1, __memp_fn(dbmfp), + F_ISSET(dbmfp, MP_READONLY) ? "readonly" : "read/write"); + STAT_ULONG("Reference count", dbmfp->ref); + STAT_ULONG("Pinned block reference count", dbmfp->ref); + STAT_ULONG("Clear length", dbmfp->clear_len); + __db_print_fileid(dbenv, dbmfp->fileid, "\tID"); + STAT_ULONG("File type", dbmfp->ftype); + STAT_ULONG("LSN offset", dbmfp->lsn_offset); + STAT_ULONG("Max gbytes", dbmfp->gbytes); + STAT_ULONG("Max bytes", dbmfp->bytes); + STAT_ULONG("Cache priority", dbmfp->priority); + STAT_HEX("mmap address", dbmfp->addr); + STAT_ULONG("mmap length", dbmfp->len); + __db_prflags(dbenv, NULL, dbmfp->flags, cfn, NULL, "\tFlags"); + __db_print_fh(dbenv, dbmfp->fhp, flags); + } + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "MPOOLFILE structures:"); for (cnt = 0, mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile); mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile), ++cnt) { - (void)fprintf(fp, "File #%d: %s: pagesize %lu\n", cnt + 1, - __memp_fns(dbmp, mfp), (u_long)mfp->stat.st_pagesize); - (void)fprintf(fp, "\t type %ld; ref %lu; blocks %lu; last %lu;", - (long)mfp->ftype, (u_long)mfp->mpf_cnt, - (u_long)mfp->block_cnt, (u_long)mfp->last_pgno); + __db_msg(dbenv, "File #%d: %s", cnt + 1, __memp_fns(dbmp, mfp)); + __db_print_mutex(dbenv, NULL, &mfp->mutex, "Mutex", flags); + + MUTEX_LOCK(dbenv, &mfp->mutex); + STAT_ULONG("Reference count", mfp->mpf_cnt); + STAT_ULONG("Block count", mfp->block_cnt); + STAT_ULONG("Last page number", mfp->last_pgno); + STAT_ULONG("Original last page number", mfp->orig_last_pgno); + STAT_ULONG("Maximum page number", mfp->maxpgno); + STAT_LONG("Type", mfp->ftype); + STAT_LONG("Priority", mfp->priority); + STAT_LONG("Page's LSN offset", mfp->lsn_off); + STAT_LONG("Page's clear length", mfp->clear_len); + + __db_print_fileid(dbenv, + R_ADDR(dbenv, dbmp->reginfo, mfp->fileid_off), "\tID"); + mfp_flags = 0; if (mfp->deadfile) FLD_SET(mfp_flags, MP_FAKE_DEADFILE); @@ -342,105 +553,86 @@ __memp_dump_region(dbenv, area, fp) FLD_SET(mfp_flags, MP_FAKE_NB); if (mfp->unlink_on_close) FLD_SET(mfp_flags, MP_FAKE_UOC); - __db_prflags(mfp_flags, fn, fp); - - (void)fprintf(fp, "\n\t UID: "); - p = R_ADDR(dbmp->reginfo, mfp->fileid_off); - for (i = 0; i < DB_FILE_ID_LEN; ++i, ++p) { - (void)fprintf(fp, "%x", (u_int)*p); - if (i < DB_FILE_ID_LEN - 1) - (void)fprintf(fp, " "); - } - (void)fprintf(fp, "\n"); + __db_prflags(dbenv, NULL, mfp_flags, fn, NULL, "\tFlags"); + if (cnt < FMAP_ENTRIES) - fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp); + fmap[cnt] = R_OFFSET(dbenv, dbmp->reginfo, mfp); + MUTEX_UNLOCK(dbenv, &mfp->mutex); } R_UNLOCK(dbenv, dbmp->reginfo); - MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp); - for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq); - dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q), ++cnt) { - (void)fprintf(fp, "File #%d: %s: per-process, %s\n", - cnt + 1, __memp_fn(dbmfp), - F_ISSET(dbmfp, MP_READONLY) ? "readonly" : "read/write"); - if (cnt < FMAP_ENTRIES) - fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp); - } - MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp); if (cnt < FMAP_ENTRIES) fmap[cnt] = INVALID_ROFF; else fmap[FMAP_ENTRIES] = INVALID_ROFF; - /* Dump the memory pools. */ + /* Dump the individual caches. */ for (i = 0; i < mp->nreg; ++i) { - (void)fprintf(fp, "%s\nCache #%d:\n", DB_LINE, i + 1); - __memp_dumpcache( - dbenv, dbmp, &dbmp->reginfo[i], fmap, fp, flags); + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Cache #%d:", i + 1); + __memp_print_hash(dbenv, dbmp, &dbmp->reginfo[i], fmap, flags); } - /* Flush in case we're debugging. */ - (void)fflush(fp); - return (0); } /* - * __memp_dumpcache -- - * Display statistics for a cache. + * __memp_print_hash -- + * Display hash bucket statistics for a cache. */ static void -__memp_dumpcache(dbenv, dbmp, reginfo, fmap, fp, flags) +__memp_print_hash(dbenv, dbmp, reginfo, fmap, flags) DB_ENV *dbenv; DB_MPOOL *dbmp; REGINFO *reginfo; - size_t *fmap; - FILE *fp; + roff_t *fmap; u_int32_t flags; { BH *bhp; DB_MPOOL_HASH *hp; + DB_MSGBUF mb; MPOOL *c_mp; - int bucket; + u_int32_t bucket; c_mp = reginfo->primary; + DB_MSGBUF_INIT(&mb); /* Display the hash table list of BH's. */ - if (LF_ISSET(MPOOL_DUMP_HASH)) { - (void)fprintf(fp, - "%s\nBH hash table (%lu hash slots)\nbucket (priority):\n", - DB_LINE, (u_long)c_mp->htab_buckets); - (void)fprintf(fp, - "\tpageno, file, ref, address [LSN] priority\n"); - - for (hp = R_ADDR(reginfo, c_mp->htab), - bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) { - MUTEX_LOCK(dbenv, &hp->hash_mutex); - if ((bhp = - SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) != NULL) - (void)fprintf(fp, "%lu (%u):\n", - (u_long)bucket, hp->hash_priority); - for (; bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) - __memp_pbh(dbmp, bhp, fmap, fp); - MUTEX_UNLOCK(dbenv, &hp->hash_mutex); + __db_msg(dbenv, + "BH hash table (%lu hash slots)", (u_long)c_mp->htab_buckets); + __db_msg(dbenv, "bucket #: priority, mutex"); + __db_msg(dbenv, + "\tpageno, file, ref, LSN, mutex, address, priority, flags"); + + for (hp = R_ADDR(dbenv, reginfo, c_mp->htab), + bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) { + MUTEX_LOCK(dbenv, &hp->hash_mutex); + if ((bhp = + SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) != NULL) { + __db_msgadd(dbenv, &mb, "bucket %lu: %lu, ", + (u_long)bucket, (u_long)hp->hash_priority); + __db_print_mutex( + dbenv, &mb, &hp->hash_mutex, ":", flags); + DB_MSGBUF_FLUSH(dbenv, &mb); } - } + for (; bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) + __memp_print_bh(dbenv, dbmp, bhp, fmap, flags); - /* Dump the memory pool. */ - if (LF_ISSET(MPOOL_DUMP_MEM)) - __db_shalloc_dump(reginfo->addr, fp); + MUTEX_UNLOCK(dbenv, &hp->hash_mutex); + } } /* - * __memp_pbh -- + * __memp_print_bh -- * Display a BH structure. */ static void -__memp_pbh(dbmp, bhp, fmap, fp) +__memp_print_bh(dbenv, dbmp, bhp, fmap, flags) + DB_ENV *dbenv; DB_MPOOL *dbmp; BH *bhp; - size_t *fmap; - FILE *fp; + roff_t *fmap; + u_int32_t flags; { static const FN fn[] = { { BH_CALLPGIN, "callpgin" }, @@ -451,50 +643,29 @@ __memp_pbh(dbmp, bhp, fmap, fp) { BH_TRASH, "trash" }, { 0, NULL } }; + DB_MSGBUF mb; int i; + DB_MSGBUF_INIT(&mb); + for (i = 0; i < FMAP_ENTRIES; ++i) if (fmap[i] == INVALID_ROFF || fmap[i] == bhp->mf_offset) break; if (fmap[i] == INVALID_ROFF) - (void)fprintf(fp, "\t%5lu, %lu, %2lu, %8lu [%lu,%lu] %lu", - (u_long)bhp->pgno, (u_long)bhp->mf_offset, - (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp), - (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset, - (u_long)bhp->priority); + __db_msgadd(dbenv, &mb, "\t%5lu, %lu, ", + (u_long)bhp->pgno, (u_long)bhp->mf_offset); else - (void)fprintf(fp, "\t%5lu, #%d, %2lu, %8lu [%lu,%lu] %lu", - (u_long)bhp->pgno, i + 1, - (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp), - (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset, - (u_long)bhp->priority); - - __db_prflags(bhp->flags, fn, fp); - - (void)fprintf(fp, "\n"); -} - -/* - * __memp_stat_hash -- - * Total hash bucket stats (other than mutex wait) into the region. - * - * PUBLIC: void __memp_stat_hash __P((REGINFO *, MPOOL *, u_int32_t *)); - */ -void -__memp_stat_hash(reginfo, mp, dirtyp) - REGINFO *reginfo; - MPOOL *mp; - u_int32_t *dirtyp; -{ - DB_MPOOL_HASH *hp; - u_int32_t dirty; - int i; - - hp = R_ADDR(reginfo, mp->htab); - for (i = 0, dirty = 0; i < mp->htab_buckets; i++, hp++) - dirty += hp->hash_page_dirty; - *dirtyp = dirty; + __db_msgadd( + dbenv, &mb, "\t%5lu, #%d, ", (u_long)bhp->pgno, i + 1); + + __db_msgadd(dbenv, &mb, "%2lu, %lu/%lu, ", (u_long)bhp->ref, + (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset); + __db_print_mutex(dbenv, &mb, &bhp->mutex, ", ", flags); + __db_msgadd(dbenv, &mb, "%#08lx, %lu", + (u_long)R_OFFSET(dbenv, dbmp->reginfo, bhp), (u_long)bhp->priority); + __db_prflags(dbenv, &mb, bhp->flags, fn, " (", ")"); + DB_MSGBUF_FLUSH(dbenv, &mb); } /* @@ -502,18 +673,19 @@ __memp_stat_hash(reginfo, mp, dirtyp) * Total hash bucket wait stats into the region. */ static void -__memp_stat_wait(reginfo, mp, mstat, flags) +__memp_stat_wait(dbenv, reginfo, mp, mstat, flags) + DB_ENV *dbenv; REGINFO *reginfo; MPOOL *mp; DB_MPOOL_STAT *mstat; - int flags; + u_int32_t flags; { DB_MPOOL_HASH *hp; DB_MUTEX *mutexp; - int i; + u_int32_t i; mstat->st_hash_max_wait = 0; - hp = R_ADDR(reginfo, mp->htab); + hp = R_ADDR(dbenv, reginfo, mp->htab); for (i = 0; i < mp->htab_buckets; i++, hp++) { mutexp = &hp->hash_mutex; mstat->st_hash_nowait += mutexp->mutex_set_nowait; @@ -521,9 +693,57 @@ __memp_stat_wait(reginfo, mp, mstat, flags) if (mutexp->mutex_set_wait > mstat->st_hash_max_wait) mstat->st_hash_max_wait = mutexp->mutex_set_wait; - if (LF_ISSET(DB_STAT_CLEAR)) { - mutexp->mutex_set_wait = 0; - mutexp->mutex_set_nowait = 0; - } + if (LF_ISSET(DB_STAT_CLEAR)) + MUTEX_CLEAR(mutexp); } } + +#else /* !HAVE_STATISTICS */ + +int +__memp_stat_pp(dbenv, gspp, fspp, flags) + DB_ENV *dbenv; + DB_MPOOL_STAT **gspp; + DB_MPOOL_FSTAT ***fspp; + u_int32_t flags; +{ + COMPQUIET(gspp, NULL); + COMPQUIET(fspp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} + +int +__memp_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} +#endif + +/* + * __memp_stat_hash -- + * Total hash bucket stats (other than mutex wait) into the region. + * + * PUBLIC: void __memp_stat_hash + * PUBLIC: __P((DB_ENV *, REGINFO *, MPOOL *, u_int32_t *)); + */ +void +__memp_stat_hash(dbenv, reginfo, mp, dirtyp) + DB_ENV *dbenv; + REGINFO *reginfo; + MPOOL *mp; + u_int32_t *dirtyp; +{ + DB_MPOOL_HASH *hp; + u_int32_t dirty, i; + + hp = R_ADDR(dbenv, reginfo, mp->htab); + for (i = 0, dirty = 0; i < mp->htab_buckets; i++, hp++) + dirty += hp->hash_page_dirty; + *dirtyp = dirty; +} diff --git a/db/mp/mp_sync.c b/db/mp/mp_sync.c index 1fe0b8d1f..d90dba52f 100644 --- a/db/mp/mp_sync.c +++ b/db/mp/mp_sync.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_sync.c,v 11.97 2004/09/22 16:26:19 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_sync.c,v 11.80 2003/09/13 19:20:41 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -29,7 +28,7 @@ typedef struct { } BH_TRACK; static int __bhcmp __P((const void *, const void *)); -static int __memp_close_flush_files __P((DB_ENV *, DB_MPOOL *)); +static int __memp_close_flush_files __P((DB_ENV *, DB_MPOOL *, int)); static int __memp_sync_files __P((DB_ENV *, DB_MPOOL *)); /* @@ -62,7 +61,7 @@ __memp_sync_pp(dbenv, lsnp) __env_rep_enter(dbenv); ret = __memp_sync(dbenv, lsnp); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -127,11 +126,12 @@ __memp_fsync_pp(dbmfp) PANIC_CHECK(dbenv); MPF_ILLEGAL_BEFORE_OPEN(dbmfp, "DB_MPOOLFILE->sync"); - if ((rep_check = IS_ENV_REPLICATED(dbenv)) != 0) + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) __env_rep_enter(dbenv); ret = __memp_fsync(dbmfp); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -145,15 +145,23 @@ int __memp_fsync(dbmfp) DB_MPOOLFILE *dbmfp; { + MPOOLFILE *mfp; + + mfp = dbmfp->mfp; + /* * If this handle doesn't have a file descriptor that's open for - * writing, or if the file is a temporary, there's no reason to - * proceed further. + * writing, or if the file is a temporary, or if the file hasn't + * been written since it was flushed, there's no reason to proceed + * further. */ if (F_ISSET(dbmfp, MP_READONLY)) return (0); - if (F_ISSET(dbmfp->mfp, MP_TEMP)) + if (F_ISSET(mfp, MP_TEMP)) + return (0); + + if (mfp->file_written == 0) return (0); return (__memp_sync_int(dbmfp->dbenv, dbmfp, 0, DB_SYNC_FILE, NULL)); @@ -194,14 +202,14 @@ __mp_xxx_fh(dbmfp, fhp) * __memp_sync_int -- * Mpool sync internal function. * - * PUBLIC: int __memp_sync_int - * PUBLIC: __P((DB_ENV *, DB_MPOOLFILE *, int, db_sync_op, int *)); + * PUBLIC: int __memp_sync_int __P((DB_ENV *, + * PUBLIC: DB_MPOOLFILE *, u_int32_t, db_sync_op, u_int32_t *)); */ int __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) DB_ENV *dbenv; DB_MPOOLFILE *dbmfp; - int trickle_max, *wrotep; + u_int32_t trickle_max, *wrotep; db_sync_op op; { BH *bhp; @@ -211,13 +219,22 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) DB_MUTEX *mutexp; MPOOL *c_mp, *mp; MPOOLFILE *mfp; - u_int32_t n_cache; - int ar_cnt, ar_max, hb_lock, i, pass, remaining, ret, t_ret; - int wait_cnt, write_cnt, wrote; + roff_t last_mf_offset; + u_int32_t ar_cnt, ar_max, i, n_cache, remaining, wrote; + int filecnt, hb_lock, maxopenfd, maxwrite, maxwrite_sleep; + int pass, ret, t_ret, wait_cnt, write_cnt; dbmp = dbenv->mp_handle; mp = dbmp->reginfo[0].primary; - pass = wrote = 0; + last_mf_offset = INVALID_ROFF; + filecnt = pass = wrote = 0; + + /* Get shared configuration information. */ + R_LOCK(dbenv, dbmp->reginfo); + maxopenfd = mp->mp_maxopenfd; + maxwrite = mp->mp_maxwrite; + maxwrite_sleep = mp->mp_maxwrite_sleep; + R_UNLOCK(dbenv, dbmp->reginfo); /* Assume one dirty page per bucket. */ ar_max = mp->nreg * mp->htab_buckets; @@ -233,7 +250,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) for (ar_cnt = 0, n_cache = 0; n_cache < mp->nreg; ++n_cache) { c_mp = dbmp->reginfo[n_cache].primary; - hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab); + hp = R_ADDR(dbenv, &dbmp->reginfo[n_cache], c_mp->htab); for (i = 0; i < c_mp->htab_buckets; i++, hp++) { /* * We can check for empty buckets before locking as we @@ -266,7 +283,8 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) !F_ISSET(bhp, BH_DIRTY)) continue; - mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset); + mfp = R_ADDR(dbenv, + dbmp->reginfo, bhp->mf_offset); /* * Ignore temporary files -- this means you @@ -336,7 +354,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) * If we're trickling buffers, only write enough to reach the correct * percentage. */ - if (op == DB_SYNC_TRICKLE && ar_cnt > trickle_max) + if (op == DB_SYNC_TRICKLE && ar_cnt > trickle_max) ar_cnt = trickle_max; /* @@ -359,7 +377,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) if (i >= ar_cnt) { i = 0; ++pass; - (void)__os_sleep(dbenv, 1, 0); + __os_sleep(dbenv, 1, 0); } if ((hp = bharray[i].track_hp) == NULL) continue; @@ -436,10 +454,24 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) MUTEX_UNLOCK(dbenv, mutexp); for (wait_cnt = 1; bhp->ref_sync != 0 && wait_cnt < 4; ++wait_cnt) - (void)__os_sleep(dbenv, 1, 0); + __os_sleep(dbenv, 1, 0); MUTEX_LOCK(dbenv, mutexp); hb_lock = 1; + /* + * If we've switched files, check to see if we're configured + * to close file descriptors. + */ + if (maxopenfd != 0 && bhp->mf_offset != last_mf_offset) { + if (++filecnt >= maxopenfd) { + filecnt = 0; + if ((ret = __memp_close_flush_files( + dbenv, dbmp, 1)) != 0) + break; + } + last_mf_offset = bhp->mf_offset; + } + /* * If the ref_sync count has gone to 0, we're going to be done * with this buffer no matter what happens. @@ -452,33 +484,25 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep) /* * If the ref_sync count has gone to 0 and the buffer is still * dirty, we write it. We only try to write the buffer once. - * Any process checkpointing or trickle-flushing the pool - * must be able to write any underlying file -- if the write - * fails, error out. It would be very strange if file sync - * failed to write, but we don't care if it happens. */ if (bhp->ref_sync == 0 && F_ISSET(bhp, BH_DIRTY)) { hb_lock = 0; MUTEX_UNLOCK(dbenv, mutexp); - mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset); + mfp = R_ADDR(dbenv, dbmp->reginfo, bhp->mf_offset); if ((ret = __memp_bhwrite(dbmp, hp, mfp, bhp, 1)) == 0) ++wrote; - else if (op == DB_SYNC_CACHE || op == DB_SYNC_TRICKLE) + else __db_err(dbenv, "%s: unable to flush page: %lu", __memp_fns(dbmp, mfp), (u_long)bhp->pgno); - else - ret = 0; /* * Avoid saturating the disk, sleep once we've done * some number of writes. */ - if (dbenv->mp_maxwrite != 0 && - ++write_cnt >= dbenv->mp_maxwrite) { + if (maxwrite != 0 && ++write_cnt >= maxwrite) { write_cnt = 0; - (void)__os_sleep( - dbenv, 0, dbenv->mp_maxwrite_sleep); + __os_sleep(dbenv, 0, (u_long)maxwrite_sleep); } } @@ -535,7 +559,7 @@ done: /* } /* If we've opened files to flush pages, close them. */ - if ((t_ret = __memp_close_flush_files(dbenv, dbmp)) != 0 && ret == 0) + if ((t_ret = __memp_close_flush_files(dbenv, dbmp, 0)) != 0 && ret == 0) ret = t_ret; err: __os_free(dbenv, bharray); @@ -589,28 +613,87 @@ int __memp_sync_files(dbenv, dbmp) ret = __memp_mf_sync(dbmp, mfp); if (ret != 0) { __db_err(dbenv, "%s: unable to flush: %s", - (char *)R_ADDR(dbmp->reginfo, mfp->path_off), + (char *)R_ADDR(dbenv, dbmp->reginfo, mfp->path_off), db_strerror(ret)); if (final_ret == 0) final_ret = ret; + continue; } - } + /* + * If we wrote the file and there are no open handles (or there + * is a single open handle, and it's the one we opened to write + * buffers during checkpoint), clear the file_written flag. We + * do this so that applications opening thousands of files don't + * loop here opening and flushing those files during checkpoint. + * + * The danger here is if a buffer were to be written as part of + * a checkpoint, and then not be flushed to disk. This cannot + * happen because we only clear file_written when there are no + * other users of the MPOOLFILE in the system, and, as we hold + * the region lock, no possibility of another thread of control + * racing with us to open a MPOOLFILE. + */ + if (mfp->mpf_cnt == 0 || (mfp->mpf_cnt == 1 && + dbmfp != NULL && F_ISSET(dbmfp, MP_FLUSH))) + mfp->file_written = 0; + } R_UNLOCK(dbenv, dbmp->reginfo); return (final_ret); } +/* + * __memp_mf_sync -- + * Flush an MPOOLFILE. + * + * Should only be used when the file is not already open in this process. + * + * PUBLIC: int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *)); + */ +int +__memp_mf_sync(dbmp, mfp) + DB_MPOOL *dbmp; + MPOOLFILE *mfp; +{ + DB_ENV *dbenv; + DB_FH *fhp; + int ret, t_ret; + char *rpath; + + dbenv = dbmp->dbenv; + + /* + * Expects caller to be holding the region lock: we're using the path + * name and __memp_nameop might try and rename the file. + */ + if ((ret = __db_appname(dbenv, DB_APP_DATA, + R_ADDR(dbenv, dbmp->reginfo, mfp->path_off), 0, NULL, + &rpath)) == 0) { + if ((ret = __os_open(dbenv, rpath, 0, 0, &fhp)) == 0) { + ret = __os_fsync(dbenv, fhp); + if ((t_ret = + __os_closehandle(dbenv, fhp)) != 0 && ret == 0) + ret = t_ret; + } + __os_free(dbenv, rpath); + } + + return (ret); +} + /* * __memp_close_flush_files -- * Close files opened only to flush buffers. */ static int -__memp_close_flush_files(dbenv, dbmp) +__memp_close_flush_files(dbenv, dbmp, dosync) DB_ENV *dbenv; DB_MPOOL *dbmp; + int dosync; { DB_MPOOLFILE *dbmfp; + MPOOLFILE *mfp; int ret; /* @@ -618,7 +701,7 @@ __memp_close_flush_files(dbenv, dbmp) * flush buffers. There are two cases: first, extent files have to * be closed so they may be removed when empty. Second, regular * files have to be closed so we don't run out of descriptors (for - * example, and application partitioning its data into databases + * example, an application partitioning its data into databases * based on timestamps, so there's a continually increasing set of * files). * @@ -632,6 +715,22 @@ retry: MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp); if (F_ISSET(dbmfp, MP_FLUSH)) { F_CLR(dbmfp, MP_FLUSH); MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp); + if (dosync) { + if ((ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) + return (ret); + /* + * If the file is clean and we have the only + * open handle on the file, clear the dirty + * flag so we don't re-open and sync it again. + */ + mfp = dbmfp->mfp; + if (mfp->mpf_cnt == 1) { + R_LOCK(dbenv, dbmp->reginfo); + if (mfp->mpf_cnt == 1) + mfp->file_written = 0; + R_UNLOCK(dbenv, dbmp->reginfo); + } + } if ((ret = __memp_fclose(dbmfp, 0)) != 0) return (ret); goto retry; diff --git a/db/mp/mp_trickle.c b/db/mp/mp_trickle.c index 90e26064f..876f6fe85 100644 --- a/db/mp/mp_trickle.c +++ b/db/mp/mp_trickle.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mp_trickle.c,v 11.34 2004/09/15 21:49:19 mjc Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mp_trickle.c,v 11.30 2003/09/13 19:20:41 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -45,7 +44,7 @@ __memp_trickle_pp(dbenv, pct, nwrotep) __env_rep_enter(dbenv); ret = __memp_trickle(dbenv, pct, nwrotep); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -60,8 +59,8 @@ __memp_trickle(dbenv, pct, nwrotep) { DB_MPOOL *dbmp; MPOOL *c_mp, *mp; - u_int32_t dirty, i, total, dtmp; - int n, ret, wrote; + u_int32_t dirty, i, total, dtmp, wrote; + int n, ret; dbmp = dbenv->mp_handle; mp = dbmp->reginfo[0].primary; @@ -87,7 +86,7 @@ __memp_trickle(dbenv, pct, nwrotep) for (ret = 0, i = dirty = total = 0; i < mp->nreg; ++i) { c_mp = dbmp->reginfo[i].primary; total += c_mp->stat.st_pages; - __memp_stat_hash(&dbmp->reginfo[i], c_mp, &dtmp); + __memp_stat_hash(dbenv, &dbmp->reginfo[i], c_mp, &dtmp); dirty += dtmp; } @@ -95,15 +94,15 @@ __memp_trickle(dbenv, pct, nwrotep) * !!! * Be careful in modifying this calculation, total may be 0. */ - n = ((total * pct) / 100) - (total - dirty); + n = ((total * (u_int)pct) / 100) - (total - dirty); if (dirty == 0 || n <= 0) return (0); - if (nwrotep == NULL) - nwrotep = &wrote; - ret = __memp_sync_int(dbenv, NULL, n, DB_SYNC_TRICKLE, nwrotep); - - mp->stat.st_page_trickle += *nwrotep; + ret = __memp_sync_int( + dbenv, NULL, (u_int32_t)n, DB_SYNC_TRICKLE, &wrote); + mp->stat.st_page_trickle += wrote; + if (nwrotep != NULL) + *nwrotep = (int)wrote; return (ret); } diff --git a/db/mutex/mut_fcntl.c b/db/mutex/mut_fcntl.c index ac600312c..03521bd77 100644 --- a/db/mutex/mut_fcntl.c +++ b/db/mutex/mut_fcntl.c @@ -1,22 +1,21 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mut_fcntl.c,v 11.26 2004/01/28 03:36:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mut_fcntl.c,v 11.24 2003/05/05 19:55:03 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include #include #include +#include /* SEEK_SET on SunOS. */ #endif #include "db_int.h" diff --git a/db/mutex/mut_pthread.c b/db/mutex/mut_pthread.c index 10c9bc597..6507eba73 100644 --- a/db/mutex/mut_pthread.c +++ b/db/mutex/mut_pthread.c @@ -1,20 +1,19 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mut_pthread.c,v 11.62 2004/09/22 16:27:05 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mut_pthread.c,v 11.57 2003/05/05 19:55:03 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include +#include #endif #include "db_int.h" @@ -30,26 +29,29 @@ static const char revid[] = "$Id: mut_pthread.c,v 11.57 2003/05/05 19:55:03 bost #endif #ifdef HAVE_MUTEX_SOLARIS_LWP +#define pthread_cond_destroy(x) 0 #define pthread_cond_signal _lwp_cond_signal #define pthread_cond_wait _lwp_cond_wait +#define pthread_mutex_destroy(x) 0 #define pthread_mutex_lock _lwp_mutex_lock #define pthread_mutex_trylock _lwp_mutex_trylock #define pthread_mutex_unlock _lwp_mutex_unlock /* + * !!! * _lwp_self returns the LWP process ID which isn't a unique per-thread * identifier. Use pthread_self instead, it appears to work even if we * are not a pthreads application. */ -#define pthread_mutex_destroy(x) 0 #endif #ifdef HAVE_MUTEX_UI_THREADS +#define pthread_cond_destroy(x) cond_destroy #define pthread_cond_signal cond_signal #define pthread_cond_wait cond_wait +#define pthread_mutex_destroy mutex_destroy #define pthread_mutex_lock mutex_lock #define pthread_mutex_trylock mutex_trylock #define pthread_mutex_unlock mutex_unlock #define pthread_self thr_self -#define pthread_mutex_destroy mutex_destroy #endif #define PTHREAD_UNLOCK_ATTEMPTS 5 @@ -348,12 +350,19 @@ int __db_pthread_mutex_destroy(mutexp) DB_MUTEX *mutexp; { - int ret; + int ret, t_ret; if (F_ISSET(mutexp, MUTEX_IGNORE)) return (0); - if ((ret = pthread_mutex_destroy(&mutexp->mutex)) != 0) - __db_err(NULL, "unable to destroy mutex: %s", strerror(ret)); + ret = 0; + if (F_ISSET(mutexp, MUTEX_SELF_BLOCK) && + (ret = pthread_cond_destroy(&mutexp->cond)) != 0) + __db_err(NULL, "unable to destroy cond: %s", strerror(ret)); + if ((t_ret = pthread_mutex_destroy(&mutexp->mutex)) != 0) { + __db_err(NULL, "unable to destroy mutex: %s", strerror(t_ret)); + if (ret == 0) + ret = t_ret; + } return (ret); } diff --git a/db/mutex/mut_tas.c b/db/mutex/mut_tas.c index cfe31c5a9..08d7ed876 100644 --- a/db/mutex/mut_tas.c +++ b/db/mutex/mut_tas.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mut_tas.c,v 11.44 2004/09/15 19:14:49 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mut_tas.c,v 11.40 2003/05/06 14:25:33 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -39,7 +37,11 @@ __db_tas_mutex_init(dbenv, mutexp, flags) u_int32_t save; /* Check alignment. */ - DB_ASSERT(((db_alignp_t)mutexp & (MUTEX_ALIGN - 1)) == 0); + if ((uintptr_t)mutexp & (MUTEX_ALIGN - 1)) { + __db_err(dbenv, + "__db_tas_mutex_init: mutex not appropriately aligned"); + return (EINVAL); + } /* * The only setting/checking of the MUTEX_MPOOL flag is in the mutex @@ -115,7 +117,16 @@ relock: #ifdef HAVE_MUTEX_S390_CC_ASSEMBLY tsl_t zero = 0; #endif - if (!MUTEX_SET(&mutexp->tas)) { + if ( +#ifdef MUTEX_SET_TEST + /* + * If using test-and-set mutexes, and we know the "set" value, + * we can avoid interlocked instructions since they're unlikely + * to succeed. + */ + mutexp->tas || +#endif + !MUTEX_SET(&mutexp->tas)) { /* * Some systems (notably those with newer Intel CPUs) * need a small pause here. [#6975] diff --git a/db/mutex/mut_win32.c b/db/mutex/mut_win32.c index b5cbfc731..b51000321 100644 --- a/db/mutex/mut_win32.c +++ b/db/mutex/mut_win32.c @@ -1,16 +1,14 @@ /* * See the file LICENSE for redistribution information. * - * Copyright (c) 2002-2003 + * Copyright (c) 2002-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mut_win32.c,v 1.18 2004/07/06 21:06:39 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mut_win32.c,v 1.15 2003/05/05 19:55:03 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -26,13 +24,18 @@ static const char revid[] = "$Id: mut_win32.c,v 1.15 2003/05/05 19:55:03 bostic /* We don't want to run this code even in "ordinary" diagnostic mode. */ #undef MUTEX_DIAG +static _TCHAR hex_digits[] = _T("0123456789abcdef"); + #define GET_HANDLE(mutexp, event) do { \ - char idbuf[13]; \ + _TCHAR idbuf[] = _T("db.m00000000"); \ + _TCHAR *p = idbuf + 12; \ + u_int32_t id; \ \ - snprintf(idbuf, sizeof idbuf, "db.m%08x", mutexp->id); \ + for (id = (mutexp)->id; id != 0; id >>= 4) \ + *--p = hex_digits[id & 0xf]; \ event = CreateEvent(NULL, FALSE, FALSE, idbuf); \ if (event == NULL) \ - return (__os_win32_errno()); \ + return (__os_get_errno()); \ } while (0) /* @@ -158,7 +161,7 @@ loop: /* Attempt to acquire the resource for N spins. */ GET_HANDLE(mutexp, event); } if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) - return (__os_win32_errno()); + return (__os_get_errno()); if ((ms <<= 1) > MS_PER_SEC) ms = MS_PER_SEC; @@ -205,7 +208,7 @@ __db_win32_mutex_unlock(dbenv, mutexp) now.QuadPart, mutexp, mutexp->id); #endif if (!PulseEvent(event)) - ret = __os_win32_errno(); + ret = __os_get_errno(); CloseHandle(event); } diff --git a/db/mutex/mutex.c b/db/mutex/mutex.c index 51ed121de..91400cd62 100644 --- a/db/mutex/mutex.c +++ b/db/mutex/mutex.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: mutex.c,v 11.42 2004/09/15 21:49:19 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: mutex.c,v 11.39 2003/04/23 19:43:37 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -132,7 +130,7 @@ __db_mutex_alloc_int(dbenv, infop, storep) * we can free buffers until memory is available. */ #if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES) - ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX), MUTEX_ALIGN, storep); + ret = __db_shalloc(infop, sizeof(DB_MUTEX), MUTEX_ALIGN, storep); if (ret == ENOMEM && MPOOL_ON(dbenv)) { DB_MPOOL *dbmp; @@ -176,10 +174,10 @@ __db_mutex_free(dbenv, infop, mutexp) dbmp = dbenv->mp_handle; R_LOCK(dbenv, dbmp->reginfo); - __db_shalloc_free(dbmp->reginfo[0].addr, mutexp); + __db_shalloc_free(&dbmp->reginfo[0], mutexp); R_UNLOCK(dbenv, dbmp->reginfo); } else - __db_shalloc_free(infop->addr, mutexp); + __db_shalloc_free(infop, mutexp); R_UNLOCK(dbenv, infop); #else COMPQUIET(dbenv, NULL); @@ -207,7 +205,8 @@ __db_shreg_locks_record(dbenv, mutexp, infop, rp) return (0); DB_ASSERT(mutexp->reg_off == INVALID_ROFF); rp->stat.st_records++; - i = (roff_t *)R_ADDR(infop, rp->regmutex_hint) - &rp->regmutexes[0]; + i = (roff_t *)R_ADDR(dbenv, infop, rp->regmutex_hint) - + &rp->regmutexes[0]; if (rp->regmutexes[i] != INVALID_ROFF) { /* * Our hint failed, search for an open slot. @@ -229,11 +228,11 @@ __db_shreg_locks_record(dbenv, mutexp, infop, rp) * When we get here, i is an empty slot. Record this * mutex, set hint to point to the next slot and we are done. */ - rp->regmutexes[i] = R_OFFSET(infop, mutexp); - mutexp->reg_off = R_OFFSET(infop, &rp->regmutexes[i]); + rp->regmutexes[i] = R_OFFSET(dbenv, infop, mutexp); + mutexp->reg_off = R_OFFSET(dbenv, infop, &rp->regmutexes[i]); rp->regmutex_hint = (i < rp->reglocks - 1) ? - R_OFFSET(infop, &rp->regmutexes[i+1]) : - R_OFFSET(infop, &rp->regmutexes[0]); + R_OFFSET(dbenv, infop, &rp->regmutexes[i+1]) : + R_OFFSET(dbenv, infop, &rp->regmutexes[0]); return (0); } @@ -260,9 +259,9 @@ __db_shreg_locks_clear(mutexp, infop, rp) * environment. We recorded our index in the mutex, find and clear it. */ DB_ASSERT(mutexp->reg_off != INVALID_ROFF); - DB_ASSERT(*(roff_t *)R_ADDR(infop, mutexp->reg_off) == \ - R_OFFSET(infop, mutexp)); - *(roff_t *)R_ADDR(infop, mutexp->reg_off) = 0; + DB_ASSERT(*(roff_t *)R_ADDR(dbenv, infop, mutexp->reg_off) == \ + R_OFFSET(dbenv, infop, mutexp)); + *(roff_t *)R_ADDR(dbenv, infop, mutexp->reg_off) = 0; if (rp != NULL) { rp->regmutex_hint = mutexp->reg_off; rp->stat.st_clears++; @@ -290,7 +289,7 @@ __db_shreg_locks_destroy(infop, rp) for (i = 0; i < rp->reglocks; i++) if (rp->regmutexes[i] != 0) { rp->stat.st_destroys++; - __db_mutex_destroy((DB_MUTEX *)R_ADDR(infop, + __db_mutex_destroy((DB_MUTEX *)R_ADDR(dbenv, infop, rp->regmutexes[i])); } } @@ -358,7 +357,7 @@ __db_shreg_maintinit(infop, addr, size) rp = (REGMAINT *)addr; memset(addr, 0, sizeof(REGMAINT)); rp->reglocks = size / sizeof(roff_t); - rp->regmutex_hint = R_OFFSET(infop, &rp->regmutexes[0]); + rp->regmutex_hint = R_OFFSET(dbenv, infop, &rp->regmutexes[0]); for (i = 0; i < rp->reglocks; i++) rp->regmutexes[i] = INVALID_ROFF; } @@ -372,17 +371,19 @@ __db_mutex_maint(dbenv, infop) switch (infop->type) { case REGION_TYPE_LOCK: - moff = ((DB_LOCKREGION *)R_ADDR(infop, + moff = ((DB_LOCKREGION *)R_ADDR(dbenv, infop, infop->rp->primary))->maint_off; break; case REGION_TYPE_LOG: - moff = ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off; + moff = ((LOG *)R_ADDR(dbenv, infop, + infop->rp->primary))->maint_off; break; case REGION_TYPE_MPOOL: - moff = ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off; + moff = ((MPOOL *)R_ADDR(dbenv, infop, + infop->rp->primary))->maint_off; break; case REGION_TYPE_TXN: - moff = ((DB_TXNREGION *)R_ADDR(infop, + moff = ((DB_TXNREGION *)R_ADDR(dbenv, infop, infop->rp->primary))->maint_off; break; default: @@ -390,6 +391,6 @@ __db_mutex_maint(dbenv, infop) "Attempting to record mutex in a region not set up to do so"); return (NULL); } - return ((REGMAINT *)R_ADDR(infop, moff)); + return ((REGMAINT *)R_ADDR(dbenv, infop, moff)); } #endif /* HAVE_MUTEX_SYSTEM_RESOURCES */ diff --git a/db/mutex/tm.c b/db/mutex/tm.c index 49f1ac7eb..448e1bfde 100644 --- a/db/mutex/tm.c +++ b/db/mutex/tm.c @@ -127,7 +127,7 @@ main(argc, argv) * TM[nthreads] per-thread mutex array * TM[maxlocks] per-lock mutex array */ - align = ALIGN(sizeof(TM), MUTEX_ALIGN); + align = DB_ALIGN(sizeof(TM), MUTEX_ALIGN); len = align * (1 + nthreads * nprocs + maxlocks); switch (rtype) { @@ -253,7 +253,7 @@ run_locker(id) #endif int status; - (void)__os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */ + __os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */ srand((u_int)time(NULL) % getpid()); /* Initialize random numbers. */ @@ -348,7 +348,7 @@ run_lthread(arg) * we still hold the mutex. */ for (i = 0; i < 3; ++i) { - (void)__os_sleep(&dbenv, 0, rand() % 3); + __os_sleep(&dbenv, 0, rand() % 3); if (mp->id != id) { fprintf(stderr, "RACE! (%03lu stole lock %d from %03lu)\n", @@ -427,7 +427,7 @@ run_lthread(arg) if (nl == 0) break; - (void)__os_sleep(&dbenv, 0, rand() % 500); + __os_sleep(&dbenv, 0, rand() % 500); } } @@ -443,7 +443,7 @@ run_wakeup(id) int status; void *retp; #endif - (void)__os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */ + __os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */ srand((u_int)time(NULL) % getpid()); /* Initialize random numbers. */ @@ -543,7 +543,7 @@ run_wthread(arg) return ((void *)EXIT_FAILURE); } - (void)__os_sleep(&dbenv, 0, rand() % 3); + __os_sleep(&dbenv, 0, rand() % 3); } return (NULL); } diff --git a/db/mutex/uts4_cc.s b/db/mutex/uts4_cc.s index f68d82922..9b314c4af 100644 --- a/db/mutex/uts4_cc.s +++ b/db/mutex/uts4_cc.s @@ -1,9 +1,9 @@ / See the file LICENSE for redistribution information. / - / Copyright (c) 1997-2003 + / Copyright (c) 1997-2004 / Sleepycat Software. All rights reserved. / - / $Id: uts4_cc.s,v 11.3 2003/01/08 05:28:42 bostic Exp $ + / $Id: uts4_cc.s,v 11.4 2004/01/28 03:36:18 bostic Exp $ / / int uts_lock ( int *p, int i ); / Update the lock word pointed to by p with the diff --git a/db/os/os_abs.c b/db/os/os_abs.c index d607bfc19..3d9f921ae 100644 --- a/db/os/os_abs.c +++ b/db/os/os_abs.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_abs.c,v 11.7 2004/01/28 03:36:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_abs.c,v 11.6 2003/01/08 05:28:55 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/os/os_alloc.c b/db/os/os_alloc.c index 24eba2c3b..7dd9f94f5 100644 --- a/db/os/os_alloc.c +++ b/db/os/os_alloc.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_alloc.c,v 11.41 2004/07/06 21:06:36 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_alloc.c,v 11.36 2003/04/24 19:47:36 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -23,7 +21,7 @@ static const char revid[] = "$Id: os_alloc.c,v 11.36 2003/04/24 19:47:36 bostic #ifdef DIAGNOSTIC static void __os_guard __P((DB_ENV *)); -union __db_alloc { +union __db_allocinfo { size_t size; double align; }; @@ -75,7 +73,7 @@ __os_umalloc(dbenv, size, storep) /* * Correct error return, see __os_malloc. */ - if ((ret = __os_get_errno()) == 0) { + if ((ret = __os_get_errno_ret_zero()) == 0) { ret = ENOMEM; __os_set_errno(ENOMEM); } @@ -127,7 +125,7 @@ __os_urealloc(dbenv, size, storep) /* * Correct errno, see __os_realloc. */ - if ((ret = __os_get_errno()) == 0) { + if ((ret = __os_get_errno_ret_zero()) == 0) { ret = ENOMEM; __os_set_errno(ENOMEM); } @@ -242,7 +240,7 @@ __os_malloc(dbenv, size, storep) #ifdef DIAGNOSTIC /* Add room for size and a guard byte. */ - size += sizeof(union __db_alloc) + 1; + size += sizeof(union __db_allocinfo) + 1; #endif if (DB_GLOBAL(j_malloc) != NULL) @@ -256,7 +254,7 @@ __os_malloc(dbenv, size, storep) * but it turns out that setting errno is quite expensive on * Windows/NT in an MT environment. */ - if ((ret = __os_get_errno()) == 0) { + if ((ret = __os_get_errno_ret_zero()) == 0) { ret = ENOMEM; __os_set_errno(ENOMEM); } @@ -276,8 +274,8 @@ __os_malloc(dbenv, size, storep) */ ((u_int8_t *)p)[size - 1] = CLEAR_BYTE; - ((union __db_alloc *)p)->size = size; - p = &((union __db_alloc *)p)[1]; + ((union __db_allocinfo *)p)->size = size; + p = &((union __db_allocinfo *)p)[1]; #endif *(void **)storep = p; @@ -311,10 +309,10 @@ __os_realloc(dbenv, size, storep) #ifdef DIAGNOSTIC /* Add room for size and a guard byte. */ - size += sizeof(union __db_alloc) + 1; + size += sizeof(union __db_allocinfo) + 1; - /* Back up to the real begining */ - ptr = &((union __db_alloc *)ptr)[-1]; + /* Back up to the real beginning */ + ptr = &((union __db_allocinfo *)ptr)[-1]; #endif /* @@ -332,7 +330,7 @@ __os_realloc(dbenv, size, storep) * but it turns out that setting errno is quite expensive on * Windows/NT in an MT environment. */ - if ((ret = __os_get_errno()) == 0) { + if ((ret = __os_get_errno_ret_zero()) == 0) { ret = ENOMEM; __os_set_errno(ENOMEM); } @@ -343,8 +341,8 @@ __os_realloc(dbenv, size, storep) #ifdef DIAGNOSTIC ((u_int8_t *)p)[size - 1] = CLEAR_BYTE; /* Initialize guard byte. */ - ((union __db_alloc *)p)->size = size; - p = &((union __db_alloc *)p)[1]; + ((union __db_allocinfo *)p)->size = size; + p = &((union __db_allocinfo *)p)[1]; #endif *(void **)storep = p; @@ -364,7 +362,7 @@ __os_free(dbenv, ptr) void *ptr; { #ifdef DIAGNOSTIC - int size; + size_t size; /* * Check that the guard byte (one past the end of the memory) is * still CLEAR_BYTE. @@ -372,8 +370,8 @@ __os_free(dbenv, ptr) if (ptr == NULL) return; - ptr = &((union __db_alloc *)ptr)[-1]; - size = ((union __db_alloc *)ptr)->size; + ptr = &((union __db_allocinfo *)ptr)[-1]; + size = ((union __db_allocinfo *)ptr)->size; if (((u_int8_t *)ptr)[size - 1] != CLEAR_BYTE) __os_guard(dbenv); diff --git a/db/os/os_clock.c b/db/os/os_clock.c index 33c6a3add..2a8c44d1c 100644 --- a/db/os/os_clock.c +++ b/db/os/os_clock.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_clock.c,v 1.15 2004/07/06 17:33:14 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_clock.c,v 1.10 2003/01/08 05:29:03 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -34,38 +32,37 @@ static const char revid[] = "$Id: os_clock.c,v 1.10 2003/01/08 05:29:03 bostic E * __os_clock -- * Return the current time-of-day clock in seconds and microseconds. * - * PUBLIC: int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *)); + * PUBLIC: void __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *)); */ -int +void __os_clock(dbenv, secsp, usecsp) DB_ENV *dbenv; u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */ { + const char *sc; + int ret; + #if defined(HAVE_GETTIMEOFDAY) struct timeval tp; - int ret; -retry: if (gettimeofday(&tp, NULL) != 0) { - if ((ret = __os_get_errno()) == EINTR) - goto retry; - __db_err(dbenv, "gettimeofday: %s", strerror(ret)); - return (ret); + RETRY_CHK((gettimeofday(&tp, NULL)), ret); + if (ret != 0) { + sc = "gettimeofday"; + goto err; } if (secsp != NULL) - *secsp = tp.tv_sec; + *secsp = (u_int32_t)tp.tv_sec; if (usecsp != NULL) - *usecsp = tp.tv_usec; + *usecsp = (u_int32_t)tp.tv_usec; #endif #if !defined(HAVE_GETTIMEOFDAY) && defined(HAVE_CLOCK_GETTIME) struct timespec tp; - int ret; -retry: if (clock_gettime(CLOCK_REALTIME, &tp) != 0) { - if ((ret = __os_get_errno()) == EINTR) - goto retry; - __db_err(dbenv, "clock_gettime: %s", strerror(ret)); - return (ret); + RETRY_CHK((clock_gettime(CLOCK_REALTIME, &tp)), ret); + if (ret != 0) { + sc = "clock_gettime"; + goto err; } if (secsp != NULL) @@ -75,12 +72,11 @@ retry: if (clock_gettime(CLOCK_REALTIME, &tp) != 0) { #endif #if !defined(HAVE_GETTIMEOFDAY) && !defined(HAVE_CLOCK_GETTIME) time_t now; - int ret; - if (time(&now) == (time_t)-1) { - ret = __os_get_errno(); - __db_err(dbenv, "time: %s", strerror(ret)); - return (ret); + RETRY_CHK((time(&now) == (time_t)-1 ? 1 : 0), ret); + if (ret != 0) { + sc = "time"; + goto err; } if (secsp != NULL) @@ -88,5 +84,8 @@ retry: if (clock_gettime(CLOCK_REALTIME, &tp) != 0) { if (usecsp != NULL) *usecsp = 0; #endif - return (0); + return; + +err: __db_err(dbenv, "%s: %s", sc, strerror(ret)); + (void)__db_panic(dbenv, ret); } diff --git a/db/os/os_config.c b/db/os/os_config.c index 70242b817..dcde0dca9 100644 --- a/db/os/os_config.c +++ b/db/os/os_config.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_config.c,v 11.15 2004/01/28 03:36:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_config.c,v 11.14 2003/01/08 05:29:08 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/os/os_dir.c b/db/os/os_dir.c index 6004486dd..3e381ae7f 100644 --- a/db/os/os_dir.c +++ b/db/os/os_dir.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_dir.c,v 11.17 2004/04/26 18:48:19 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_dir.c,v 11.15 2003/01/08 05:29:11 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -67,7 +65,7 @@ __os_dirlist(dbenv, dir, namesp, cntp) if (cnt >= arraysz) { arraysz += 100; if ((ret = __os_realloc(dbenv, - arraysz * sizeof(names[0]), &names)) != 0) + (u_int)arraysz * sizeof(names[0]), &names)) != 0) goto nomem; } if ((ret = __os_strdup(dbenv, dp->d_name, &names[cnt])) != 0) diff --git a/db/os/os_errno.c b/db/os/os_errno.c index 6fd6939ab..52bce4ce6 100644 --- a/db/os/os_errno.c +++ b/db/os/os_errno.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_errno.c,v 11.11 2004/01/28 03:36:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_errno.c,v 11.10 2003/02/14 16:21:04 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* diff --git a/db/os/os_fid.c b/db/os/os_fid.c index 5a2b6ca8e..29f19cd81 100644 --- a/db/os/os_fid.c +++ b/db/os/os_fid.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_fid.c,v 11.21 2004/07/06 13:55:48 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_fid.c,v 11.17 2003/05/05 19:55:04 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -32,9 +30,6 @@ static const char revid[] = "$Id: os_fid.c,v 11.17 2003/05/05 19:55:04 bostic Ex #include "db_int.h" -#define SERIAL_INIT 0 -static u_int32_t fid_serial = SERIAL_INIT; - /* * __os_fileid -- * Return a unique identifier for a file. The structure @@ -55,48 +50,24 @@ __os_fileid(dbenv, fname, unique_okay, fidp) { struct stat sb; size_t i; - int ret, retries; + int ret; u_int32_t tmp; u_int8_t *p; - retries = 0; - /* Clear the buffer. */ memset(fidp, 0, DB_FILE_ID_LEN); /* On POSIX/UNIX, use a dev/inode pair. */ -retry: #ifdef HAVE_VXWORKS - if (stat((char *)fname, &sb) != 0) { + RETRY_CHK((stat((char *)fname, &sb)), ret); #else - if (stat(fname, &sb) != 0) { + RETRY_CHK((stat(fname, &sb)), ret); #endif - if (((ret = __os_get_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; + if (ret != 0) { __db_err(dbenv, "%s: %s", fname, strerror(ret)); return (ret); } - /* - * Initialize/increment the serial number we use to help avoid - * fileid collisions. Note that we don't bother with locking; - * it's unpleasant to do from down in here, and if we race on - * this no real harm will be done, since the finished fileid - * has so many other components. - * - * We increment by 100000 on each call as a simple way of - * randomizing; simply incrementing seems potentially less useful - * if pids are also simply incremented, since this is process-local - * and we may be one of a set of processes starting up. 100000 - * pushes us out of pid space on most platforms, and has few - * interesting properties in base 2. - */ - if (fid_serial == SERIAL_INIT) - __os_id(&fid_serial); - else - fid_serial += 100000; - /* * !!! * Nothing is ever big enough -- on Sparc V9, st_ino, st_dev and the @@ -132,17 +103,35 @@ retry: *fidp++ = *p++; if (unique_okay) { - /* - * We want the number of seconds, not the high-order 0 bits, - * so convert the returned time_t to a (potentially) smaller - * fixed-size type. - */ - tmp = (u_int32_t)time(NULL); + static u_int32_t fid_serial = 0; + + /* Add in 32-bits of (hopefully) unique number. */ + __os_unique_id(dbenv, &tmp); for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i) *fidp++ = *p++; - for (p = (u_int8_t *)&fid_serial, i = sizeof(u_int32_t); - i > 0; --i) + /* + * Initialize/increment the serial number we use to help + * avoid fileid collisions. Note we don't bother with + * locking; it's unpleasant to do from down in here, and + * if we race on this no real harm will be done, since the + * finished fileid has so many other components. + * + * We increment by 100000 on each call as a simple way of + * randomizing; simply incrementing seems potentially less + * useful if pids are also simply incremented, since this + * is process-local and we may be one of a set of processes + * starting up. 100000 pushes us out of pid space on most + * 32-bit platforms, and has few interesting properties in + * base 2. + */ + if (fid_serial == 0) + __os_id(&fid_serial); + else + fid_serial += 100000; + + for (p = + (u_int8_t *)&fid_serial, i = sizeof(u_int32_t); i > 0; --i) *fidp++ = *p++; } diff --git a/db/os/os_fsync.c b/db/os/os_fsync.c index 819072938..576acf00b 100644 --- a/db/os/os_fsync.c +++ b/db/os/os_fsync.c @@ -1,22 +1,20 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_fsync.c,v 11.22 2004/07/06 20:54:09 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_fsync.c,v 11.18 2003/02/16 15:53:55 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include /* XXX: Required by __hp3000s900 */ -#include #include +#include #endif #include "db_int.h" @@ -24,7 +22,7 @@ static const char revid[] = "$Id: os_fsync.c,v 11.18 2003/02/16 15:53:55 bostic #ifdef HAVE_VXWORKS #include "ioLib.h" -#define fsync(fd) __vx_fsync(fd); +#define fsync(fd) __vx_fsync(fd) int __vx_fsync(fd) @@ -44,7 +42,7 @@ __vx_fsync(fd) #endif #ifdef __hp3000s900 -#define fsync(fd) __mpe_fsync(fd); +#define fsync(fd) __mpe_fsync(fd) int __mpe_fsync(fd) @@ -69,7 +67,7 @@ __os_fsync(dbenv, fhp) DB_ENV *dbenv; DB_FH *fhp; { - int ret, retries; + int ret; /* * Do nothing if the file descriptor has been marked as not requiring @@ -81,13 +79,14 @@ __os_fsync(dbenv, fhp) /* Check for illegal usage. */ DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1); - retries = 0; - do { - ret = DB_GLOBAL(j_fsync) != NULL ? - DB_GLOBAL(j_fsync)(fhp->fd) : fsync(fhp->fd); - } while (ret != 0 && - ((ret = __os_get_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY); + if (DB_GLOBAL(j_fsync) != NULL) + ret = DB_GLOBAL(j_fsync)(fhp->fd); + else +#ifdef HAVE_FDATASYNC + RETRY_CHK((fdatasync(fhp->fd)), ret); +#else + RETRY_CHK((fsync(fhp->fd)), ret); +#endif if (ret != 0) __db_err(dbenv, "fsync %s", strerror(ret)); diff --git a/db/os/os_handle.c b/db/os/os_handle.c index e357bf66a..62a7bc1a1 100644 --- a/db/os/os_handle.c +++ b/db/os/os_handle.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_handle.c,v 11.40 2004/08/19 17:59:22 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_handle.c,v 11.32 2003/02/16 15:54:03 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -36,7 +34,8 @@ __os_openhandle(dbenv, name, flags, mode, fhpp) DB_FH **fhpp; { DB_FH *fhp; - int ret, nrepeat, retries; + u_int nrepeat, retries; + int ret; #ifdef HAVE_VXWORKS int newflags; #endif @@ -73,6 +72,13 @@ __os_openhandle(dbenv, name, flags, mode, fhpp) DB_BEGIN_SINGLE_THREAD; newflags = flags & ~(O_CREAT | O_EXCL); if ((fhp->fd = open(name, newflags, mode)) != -1) { + /* + * We need to mark the file opened at this + * point so that if we get any error below + * we will properly close the fd we just + * opened on the error path. + */ + F_SET(fhp, DB_FH_OPENED); if (LF_ISSET(O_EXCL)) { /* * If we get here, want O_EXCL create, @@ -96,7 +102,6 @@ __os_openhandle(dbenv, name, flags, mode, fhpp) fhp->fd = creat(name, newflags); DB_END_SINGLE_THREAD; } else - /* FALLTHROUGH */ #endif #ifdef __VMS @@ -138,17 +143,20 @@ __os_openhandle(dbenv, name, flags, mode, fhpp) * if we can't open a database, an inability to open a * log file is cause for serious dismay. */ - (void)__os_sleep(dbenv, nrepeat * 2, 0); + __os_sleep(dbenv, nrepeat * 2, 0); break; + case EAGAIN: case EBUSY: case EINTR: /* - * If it was an EINTR or EBUSY, retry immediately, + * If an EAGAIN, EBUSY or EINTR, retry immediately for * DB_RETRY times. */ if (++retries < DB_RETRY) --nrepeat; break; + default: + break; } } @@ -171,7 +179,7 @@ __os_closehandle(dbenv, fhp) DB_ENV *dbenv; DB_FH *fhp; { - int ret, retries; + int ret; ret = 0; @@ -180,13 +188,10 @@ __os_closehandle(dbenv, fhp) * file. */ if (F_ISSET(fhp, DB_FH_OPENED)) { - retries = 0; - do { - ret = DB_GLOBAL(j_close) != NULL ? - DB_GLOBAL(j_close)(fhp->fd) : close(fhp->fd); - } while (ret != 0 && - ((ret = __os_get_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY); + if (DB_GLOBAL(j_close) != NULL) + ret = DB_GLOBAL(j_close)(fhp->fd); + else + RETRY_CHK((close(fhp->fd)), ret); if (ret != 0) __db_err(dbenv, "close: %s", strerror(ret)); @@ -194,7 +199,7 @@ __os_closehandle(dbenv, fhp) /* Unlink the file if we haven't already done so. */ if (F_ISSET(fhp, DB_FH_UNLINK)) { (void)__os_unlink(dbenv, fhp->name); - (void)__os_free(dbenv, fhp->name); + __os_free(dbenv, fhp->name); } } diff --git a/db/os/os_id.c b/db/os/os_id.c index 77e2eb208..79df12f9b 100644 --- a/db/os/os_id.c +++ b/db/os/os_id.c @@ -1,19 +1,18 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_id.c,v 1.9 2004/09/22 16:27:54 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_id.c,v 1.3 2003/01/08 05:29:18 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include +#include #include #endif @@ -42,6 +41,49 @@ __os_id(idp) #ifdef HAVE_VXWORKS *idp = taskIdSelf(); #else - *idp = getpid(); + *idp = (u_int32_t)getpid(); +#endif +} + +/* + * __os_unique_id -- + * Return a unique 32-bit value. + * + * PUBLIC: void __os_unique_id __P((DB_ENV *, u_int32_t *)); + */ +void +__os_unique_id(dbenv, idp) + DB_ENV *dbenv; + u_int32_t *idp; +{ + static int first = 1; + u_int32_t id, pid, sec, usec; + + *idp = 0; + + /* + * Our randomized value is comprised of our process ID, the current + * time of day and a couple of a stack addresses, all XOR'd together. + */ + __os_id(&pid); + __os_clock(dbenv, &sec, &usec); + + id = pid ^ sec ^ usec ^ P_TO_UINT32(&pid); + + /* + * We could try and find a reasonable random-number generator, but + * that's not all that easy to do. Seed and use srand()/rand(), if + * we can find them. + */ +#if HAVE_SRAND + if (first == 1) + srand((u_int)id); #endif + first = 0; + +#if HAVE_RAND + id ^= (u_int)rand(); +#endif + + *idp = id; } diff --git a/db/os/os_map.c b/db/os/os_map.c index 75c243681..adcdaef3c 100644 --- a/db/os/os_map.c +++ b/db/os/os_map.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_map.c,v 11.57 2004/07/06 13:55:48 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_map.c,v 11.51 2003/07/01 19:47:15 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #ifdef HAVE_MMAP @@ -65,7 +63,7 @@ __os_r_sysattach(dbenv, infop, rp) #if defined(HAVE_SHMGET) { key_t segid; - int id, ret; + int id, mode, ret; /* * We could potentially create based on REGION_CREATE_OK, but @@ -106,8 +104,13 @@ __os_r_sysattach(dbenv, infop, rp) return (EAGAIN); } } - if ((id = - shmget(segid, rp->size, IPC_CREAT | 0600)) == -1) { + + /* + * Map the DbEnv::open method file mode permissions to + * shmget call permissions. + */ + mode = IPC_CREAT | __db_shm_mode(dbenv); + if ((id = shmget(segid, rp->size, mode)) == -1) { ret = __os_get_errno(); __db_err(dbenv, "shmget: key: %ld: unable to create shared system memory region: %s", @@ -148,9 +151,9 @@ __os_r_sysattach(dbenv, infop, rp) * of that. */ if ((ret = __os_open(dbenv, infop->name, - DB_OSO_REGION | DB_OSO_DIRECT | + DB_OSO_REGION | (F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE : 0), - infop->mode, &fhp)) != 0) + dbenv->db_mode, &fhp)) != 0) __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret)); /* @@ -295,6 +298,8 @@ __os_unmapfile(dbenv, addr, len) void *addr; size_t len; { + int ret; + /* If the user replaced the map call, call through their interface. */ if (DB_GLOBAL(j_unmap) != NULL) return (DB_GLOBAL(j_unmap)(addr, len)); @@ -302,22 +307,12 @@ __os_unmapfile(dbenv, addr, len) #ifdef HAVE_MMAP #ifdef HAVE_MUNLOCK if (F_ISSET(dbenv, DB_ENV_LOCKDOWN)) - while (munlock(addr, len) != 0 && __os_get_errno() == EINTR) - ; + RETRY_CHK((munlock(addr, len)), ret); #else COMPQUIET(dbenv, NULL); #endif - { - int err, ret, retries; - - err = retries = 0; - do { - ret = munmap(addr, len); - } while (ret != 0 && - ((err = __os_get_errno()) == EINTR || err == EBUSY) && - ++retries < DB_RETRY); - return (ret ? err : 0); - } + RETRY_CHK((munmap(addr, len)), ret); + return (ret); #else COMPQUIET(dbenv, NULL); @@ -379,6 +374,19 @@ __os_map(dbenv, path, fhp, len, is_region, is_rdonly, addrp) COMPQUIET(is_region, 0); #endif + /* + * FreeBSD: + * Causes data dirtied via this VM map to be flushed to physical media + * only when necessary (usually by the pager) rather then gratuitously. + * Typically this prevents the update daemons from flushing pages + * dirtied through such maps and thus allows efficient sharing of + * memory across unassociated processes using a file-backed shared + * memory map. + */ +#ifdef MAP_NOSYNC + flags |= MAP_NOSYNC; +#endif + prot = PROT_READ | (is_rdonly ? 0 : PROT_WRITE); /* diff --git a/db/os/os_method.c b/db/os/os_method.c index 4705ddee1..a5bb17a79 100644 --- a/db/os/os_method.c +++ b/db/os/os_method.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_method.c,v 11.21 2004/09/17 22:00:31 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_method.c,v 11.16 2003/01/08 05:29:22 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -84,6 +82,17 @@ db_env_set_func_fsync(func_fsync) return (0); } +/* + * EXTERN: int db_env_set_func_ftruncate __P((int (*)(int, off_t))); + */ +int +db_env_set_func_ftruncate(func_ftruncate) + int (*func_ftruncate) __P((int, off_t)); +{ + DB_GLOBAL(j_ftruncate) = func_ftruncate; + return (0); +} + /* * EXTERN: int db_env_set_func_ioinfo __P((int (*)(const char *, * EXTERN: int, u_int32_t *, u_int32_t *, u_int32_t *))); @@ -120,6 +129,30 @@ db_env_set_func_map(func_map) return (0); } +/* + * EXTERN: int db_env_set_func_pread + * EXTERN: __P((ssize_t (*)(int, void *, size_t, off_t))); + */ +int +db_env_set_func_pread(func_pread) + ssize_t (*func_pread) __P((int, void *, size_t, off_t)); +{ + DB_GLOBAL(j_pread) = func_pread; + return (0); +} + +/* + * EXTERN: int db_env_set_func_pwrite + * EXTERN: __P((ssize_t (*)(int, const void *, size_t, off_t))); + */ +int +db_env_set_func_pwrite(func_pwrite) + ssize_t (*func_pwrite) __P((int, const void *, size_t, off_t)); +{ + DB_GLOBAL(j_pwrite) = func_pwrite; + return (0); +} + /* * EXTERN: int db_env_set_func_open __P((int (*)(const char *, int, ...))); */ @@ -167,11 +200,11 @@ db_env_set_func_rename(func_rename) /* * EXTERN: int db_env_set_func_seek - * EXTERN: __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int))); + * EXTERN: __P((int (*)(int, off_t, int))); */ int db_env_set_func_seek(func_seek) - int (*func_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int)); + int (*func_seek) __P((int, off_t, int)); { DB_GLOBAL(j_seek) = func_seek; return (0); diff --git a/db/os/os_oflags.c b/db/os/os_oflags.c index 6313ba23d..2ffb6db2d 100644 --- a/db/os/os_oflags.c +++ b/db/os/os_oflags.c @@ -1,20 +1,23 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_oflags.c,v 11.14 2004/07/09 18:39:10 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_oflags.c,v 11.11 2003/01/08 05:29:23 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include +#ifdef HAVE_SHMGET +#include +#include +#endif + #include #endif @@ -60,18 +63,6 @@ __db_oflags(oflags) return (dbflags); } -/* - * __db_omode -- - * Convert a permission string to the correct open(2) flags. - * - * PUBLIC: int __db_omode __P((const char *)); - */ -int -__db_omode(perm) - const char *perm; -{ - int mode; - #ifdef DB_WIN32 #ifndef S_IRUSR #define S_IRUSR S_IREAD /* R for owner */ @@ -111,6 +102,18 @@ __db_omode(perm) #define S_IWOTH 0000002 /* W for other */ #endif #endif /* DB_WIN32 */ + +/* + * __db_omode -- + * Convert a permission string to the correct open(2) flags. + * + * PUBLIC: int __db_omode __P((const char *)); + */ +int +__db_omode(perm) + const char *perm; +{ + int mode; mode = 0; if (perm[0] == 'r') mode |= S_IRUSR; @@ -126,3 +129,46 @@ __db_omode(perm) mode |= S_IWOTH; return (mode); } + +#ifdef HAVE_SHMGET + +#ifndef SHM_R +#define SHM_R 0400 +#endif +#ifndef SHM_W +#define SHM_W 0200 +#endif + +/* + * __db_shm_mode -- + * Map the DbEnv::open method file mode permissions to shmget call + * permissions. + * + * PUBLIC: int __db_shm_mode __P((DB_ENV *)); + */ +int +__db_shm_mode(dbenv) + DB_ENV *dbenv; +{ + int mode; + + /* Default to r/w owner, r/w group. */ + if (dbenv->db_mode == 0) + return (SHM_R | SHM_W | SHM_R >> 3 | SHM_W >> 3); + + mode = 0; + if (dbenv->db_mode & S_IRUSR) + mode |= SHM_R; + if (dbenv->db_mode & S_IWUSR) + mode |= SHM_W; + if (dbenv->db_mode & S_IRGRP) + mode |= SHM_R >> 3; + if (dbenv->db_mode & S_IWGRP) + mode |= SHM_W >> 3; + if (dbenv->db_mode & S_IROTH) + mode |= SHM_R >> 6; + if (dbenv->db_mode & S_IWOTH) + mode |= SHM_W >> 6; + return (mode); +} +#endif diff --git a/db/os/os_open.c b/db/os/os_open.c index 6a8326b4a..44c02e6d4 100644 --- a/db/os/os_open.c +++ b/db/os/os_open.c @@ -1,18 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_open.c,v 11.60 2004/09/24 00:43:19 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_open.c,v 11.48 2003/09/10 00:27:29 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include +#include #ifdef HAVE_SYS_FCNTL_H #include @@ -20,10 +19,13 @@ static const char revid[] = "$Id: os_open.c,v 11.48 2003/09/10 00:27:29 bostic E #include #include +#include #endif #include "db_int.h" +static int __os_intermediate_dir __P((DB_ENV *, const char *)); +static int __os_mkdir __P((DB_ENV *, const char *)); #ifdef HAVE_QNX static int __os_region_open __P((DB_ENV *, const char *, int, int, DB_FH **)); #endif @@ -65,7 +67,7 @@ __os_open(dbenv, name, flags, mode, fhpp) int mode; DB_FH **fhpp; { - return (__os_open_extend(dbenv, name, 0, 0, flags, mode, fhpp)); + return (__os_open_extend(dbenv, name, 0, flags, mode, fhpp)); } /* @@ -73,29 +75,28 @@ __os_open(dbenv, name, flags, mode, fhpp) * Open a file descriptor (including page size and log size information). * * PUBLIC: int __os_open_extend __P((DB_ENV *, - * PUBLIC: const char *, u_int32_t, u_int32_t, u_int32_t, int, DB_FH **)); + * PUBLIC: const char *, u_int32_t, u_int32_t, int, DB_FH **)); */ int -__os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) +__os_open_extend(dbenv, name, page_size, flags, mode, fhpp) DB_ENV *dbenv; const char *name; - u_int32_t log_size, page_size, flags; + u_int32_t page_size, flags; int mode; DB_FH **fhpp; { DB_FH *fhp; int oflags, ret; - COMPQUIET(log_size, 0); COMPQUIET(page_size, 0); *fhpp = NULL; oflags = 0; #define OKFLAGS \ - (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \ - DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \ - DB_OSO_TRUNC) + (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_DSYNC | DB_OSO_EXCL | \ + DB_OSO_LOG | DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | \ + DB_OSO_TEMP | DB_OSO_TRUNC) if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0) return (ret); @@ -120,16 +121,12 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) if (LF_ISSET(DB_OSO_EXCL)) oflags |= O_EXCL; -#if defined(O_DSYNC) && defined(XXX_NEVER_SET) - /* - * !!! - * We should get better performance if we push the log files to disk - * immediately instead of waiting for the sync. However, Solaris - * (and likely any other system based on the 4BSD filesystem releases), - * doesn't implement O_DSYNC correctly, only flushing data blocks and - * not inode or indirect blocks. - */ - if (LF_ISSET(DB_OSO_LOG)) +#ifdef HAVE_O_DIRECT + if (LF_ISSET(DB_OSO_DIRECT)) + oflags |= O_DIRECT; +#endif +#ifdef O_DSYNC + if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC)) oflags |= O_DSYNC; #endif @@ -141,10 +138,14 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) if (LF_ISSET(DB_OSO_TRUNC)) oflags |= O_TRUNC; -#ifdef HAVE_O_DIRECT - if (LF_ISSET(DB_OSO_DIRECT)) - oflags |= O_DIRECT; -#endif + /* + * Undocumented feature: allow applications to create intermediate + * directories whenever a file is opened. + */ + if (dbenv != NULL && + dbenv->dir_mode != 0 && LF_ISSET(DB_OSO_CREATE) && + (ret = __os_intermediate_dir(dbenv, name)) != 0) + return (ret); #ifdef HAVE_QNX if (LF_ISSET(DB_OSO_REGION)) @@ -154,6 +155,11 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) if ((ret = __os_openhandle(dbenv, name, oflags, mode, &fhp)) != 0) return (ret); +#ifdef O_DSYNC + if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC)) + F_SET(fhp, DB_FH_NOSYNC); +#endif + #if defined(HAVE_DIRECTIO) && defined(DIRECTIO_ON) /* * The Solaris C library includes directio, but you have to set special @@ -230,7 +236,7 @@ __os_qnx_region_open(dbenv, name, oflags, mode, fhpp) F_SET(fhp, DB_FH_OPENED); #ifdef HAVE_FCNTL_F_SETFD - /* Deny file descriptor acces to any child process. */ + /* Deny file descriptor access to any child process. */ if (fcntl(fhp->fd, F_SETFD, 1) == -1) { ret = __os_get_errno(); __db_err(dbenv, "fcntl(F_SETFD): %s", strerror(ret)); @@ -321,3 +327,86 @@ __os_shmname(dbenv, name, newnamep) return (0); } #endif + +/* + * __os_intermediate_dir -- + * Create intermediate directories. + */ +static int +__os_intermediate_dir(dbenv, name) + DB_ENV *dbenv; + const char *name; +{ + size_t len; + int ret; + char savech, *p, *t, buf[128]; + + ret = 0; + + /* + * Get a copy so we can modify the string. + * + * Allocate memory if temporary space is too small. + */ + if ((len = strlen(name)) > sizeof(buf) - 1) { + if ((ret = __os_umalloc(dbenv, len, &t)) != 0) + return (ret); + } else + t = buf; + (void)strcpy(t, name); + + /* + * Cycle through the path, creating intermediate directories. + * + * Skip the first byte if it's a path separator, it's the start of an + * absolute pathname. + */ + if (PATH_SEPARATOR[1] == '\0') { + for (p = t + 1; p[0] != '\0'; ++p) + if (p[0] == PATH_SEPARATOR[0]) { + savech = *p; + *p = '\0'; + if (__os_exists(t, NULL) && + (ret = __os_mkdir(dbenv, t)) != 0) + break; + *p = savech; + } + } else + for (p = t + 1; p[0] != '\0'; ++p) + if (strchr(PATH_SEPARATOR, p[0]) != NULL) { + savech = *p; + *p = '\0'; + if (__os_exists(t, NULL) && + (ret = __os_mkdir(dbenv, t)) != 0) + break; + *p = savech; + } + if (t != buf) + __os_free(dbenv, t); + return (ret); +} + +/* + * __os_mkdir -- + * Create a directory. + */ +static int +__os_mkdir(dbenv, name) + DB_ENV *dbenv; + const char *name; +{ + int ret; + + /* Make the directory, with paranoid permissions. */ +#ifdef HAVE_VXWORKS + RETRY_CHK((mkdir((char *)name)), ret); +#else + RETRY_CHK((mkdir(name, 0600)), ret); + if (ret != 0) + return (ret); + + /* Set the absolute permissions. */ + RETRY_CHK((chmod(name, dbenv->dir_mode)), ret); +#endif + return (ret); +} diff --git a/db/os/os_region.c b/db/os/os_region.c index ff7cd5a07..024c0320d 100644 --- a/db/os/os_region.c +++ b/db/os/os_region.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_region.c,v 11.21 2004/06/10 17:20:57 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_region.c,v 11.17 2003/07/13 17:45:23 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -32,6 +30,7 @@ __os_r_attach(dbenv, infop, rp) REGION *rp; { int ret; + /* Round off the requested size for the underlying VM. */ OS_VMROUNDOFF(rp->size); @@ -69,21 +68,43 @@ __os_r_attach(dbenv, infop, rp) return (EINVAL); } #endif - if ((ret = - __os_malloc(dbenv, rp->size, &infop->addr)) != 0) + /* + * Pad out the allocation, we're going to align it to mutex + * alignment. + */ + if ((ret = __os_malloc(dbenv, + sizeof(REGENV) + (MUTEX_ALIGN - 1), &infop->addr)) != 0) + return (ret); + + infop->max_alloc = rp->size; + } else { + /* + * If the user replaced the map call, call through their + * interface. + */ + if (DB_GLOBAL(j_map) != NULL && (ret = DB_GLOBAL(j_map) + (infop->name, rp->size, 1, 0, &infop->addr)) != 0) + return (ret); + + /* Get some space from the underlying system. */ + if ((ret = __os_r_sysattach(dbenv, infop, rp)) != 0) return (ret); -#if defined(UMRW) && !defined(DIAGNOSTIC) - memset(infop->addr, CLEAR_BYTE, rp->size); -#endif - return (0); } - /* If the user replaced the map call, call through their interface. */ - if (DB_GLOBAL(j_map) != NULL) - return (DB_GLOBAL(j_map)(infop->name, - rp->size, 1, 0, &infop->addr)); + /* + * We may require alignment the underlying system or heap allocation + * library doesn't supply. Align the address if necessary, saving + * the original values for restoration when the region is discarded. + */ + infop->addr_orig = infop->addr; + infop->addr = ALIGNP_INC(infop->addr_orig, MUTEX_ALIGN); + + rp->size_orig = rp->size; + if (infop->addr != infop->addr_orig) + rp->size -= + (u_int8_t *)infop->addr - (u_int8_t *)infop->addr_orig; - return (__os_r_sysattach(dbenv, infop, rp)); + return (0); } /* @@ -102,6 +123,12 @@ __os_r_detach(dbenv, infop, destroy) rp = infop->rp; + /* Restore any address/size altered for alignment reasons. */ + if (infop->addr != infop->addr_orig) { + infop->addr = infop->addr_orig; + rp->size = rp->size_orig; + } + /* If a region is private, free the memory. */ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { __os_free(dbenv, infop->addr); diff --git a/db/os/os_rename.c b/db/os/os_rename.c index 9fee9b729..a55160bcc 100644 --- a/db/os/os_rename.c +++ b/db/os/os_rename.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_rename.c,v 11.17 2004/07/06 13:55:48 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_rename.c,v 11.15 2003/05/05 19:55:04 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -21,29 +19,27 @@ static const char revid[] = "$Id: os_rename.c,v 11.15 2003/05/05 19:55:04 bostic /* * __os_rename -- - * Rename a file. If flags is non-zero, then errors are OK and we - * should not output an error message. + * Rename a file. * * PUBLIC: int __os_rename __P((DB_ENV *, * PUBLIC: const char *, const char *, u_int32_t)); */ int -__os_rename(dbenv, old, new, flags) +__os_rename(dbenv, old, new, silent) DB_ENV *dbenv; const char *old, *new; - u_int32_t flags; + u_int32_t silent; { - int ret, retries; + int ret; - retries = 0; - do { - ret = DB_GLOBAL(j_rename) != NULL ? - DB_GLOBAL(j_rename)(old, new) : rename(old, new); - } while (ret != 0 && - ((ret = __os_get_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY); + RETRY_CHK((DB_GLOBAL(j_rename) != NULL ? + DB_GLOBAL(j_rename)(old, new) : rename(old, new)), ret); - if (ret != 0 && flags == 0) + /* + * If "silent" is not set, then errors are OK and we should not output + * an error message. + */ + if (!silent && ret != 0) __db_err(dbenv, "rename %s %s: %s", old, new, strerror(ret)); return (ret); } diff --git a/db/os/os_root.c b/db/os/os_root.c index 9337700ac..bf4702ed2 100644 --- a/db/os/os_root.c +++ b/db/os/os_root.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_root.c,v 11.8 2004/01/28 03:36:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_root.c,v 11.7 2003/01/08 05:29:32 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include diff --git a/db/os/os_rpath.c b/db/os/os_rpath.c index 1ebff0fc1..28a0a4826 100644 --- a/db/os/os_rpath.c +++ b/db/os/os_rpath.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_rpath.c,v 11.9 2004/01/28 03:36:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_rpath.c,v 11.8 2003/01/08 05:29:34 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/os/os_rw.c b/db/os/os_rw.c index df3a88615..5519f35e4 100644 --- a/db/os/os_rw.c +++ b/db/os/os_rw.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_rw.c,v 11.39 2004/09/17 22:00:31 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_rw.c,v 11.30 2003/05/23 21:19:05 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -31,7 +29,7 @@ static int __os_physwrite __P((DB_ENV *, DB_FH *, void *, size_t, size_t *)); * Do an I/O. * * PUBLIC: int __os_io __P((DB_ENV *, - * PUBLIC: int, DB_FH *, db_pgno_t, size_t, u_int8_t *, size_t *)); + * PUBLIC: int, DB_FH *, db_pgno_t, u_int32_t, u_int8_t *, size_t *)); */ int __os_io(dbenv, op, fhp, pgno, pagesize, buf, niop) @@ -39,9 +37,13 @@ __os_io(dbenv, op, fhp, pgno, pagesize, buf, niop) int op; DB_FH *fhp; db_pgno_t pgno; - size_t pagesize, *niop; + u_int32_t pagesize; u_int8_t *buf; + size_t *niop; { +#if defined(HAVE_PREAD) && defined(HAVE_PWRITE) + ssize_t nio; +#endif int ret; /* Check for illegal usage. */ @@ -52,7 +54,9 @@ __os_io(dbenv, op, fhp, pgno, pagesize, buf, niop) case DB_IO_READ: if (DB_GLOBAL(j_read) != NULL) goto slow; - *niop = pread(fhp->fd, buf, pagesize, (off_t)pgno * pagesize); + nio = DB_GLOBAL(j_pread) != NULL ? DB_GLOBAL(j_pread) + (fhp->fd, buf, pagesize, (off_t)pgno * pagesize) : + pread(fhp->fd, buf, pagesize, (off_t)pgno * pagesize); break; case DB_IO_WRITE: if (DB_GLOBAL(j_write) != NULL) @@ -61,11 +65,17 @@ __os_io(dbenv, op, fhp, pgno, pagesize, buf, niop) if (__os_fs_notzero()) goto slow; #endif - *niop = pwrite(fhp->fd, buf, pagesize, (off_t)pgno * pagesize); + nio = DB_GLOBAL(j_pwrite) != NULL ? DB_GLOBAL(j_pwrite) + (fhp->fd, buf, pagesize, (off_t)pgno * pagesize) : + pwrite(fhp->fd, buf, pagesize, (off_t)pgno * pagesize); break; + default: + return (EINVAL); } - if (*niop == (size_t)pagesize) + if (nio == (ssize_t)pagesize) { + *niop = pagesize; return (0); + } slow: #endif MUTEX_THREAD_LOCK(dbenv, fhp->mutexp); @@ -80,6 +90,9 @@ slow: case DB_IO_WRITE: ret = __os_write(dbenv, fhp, buf, pagesize, niop); break; + default: + ret = EINVAL; + break; } err: MUTEX_THREAD_UNLOCK(dbenv, fhp->mutexp); @@ -104,31 +117,36 @@ __os_read(dbenv, fhp, addr, len, nrp) { size_t offset; ssize_t nr; - int ret, retries; + int ret; u_int8_t *taddr; + ret = 0; + /* Check for illegal usage. */ DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1); - retries = 0; - for (taddr = addr, - offset = 0; offset < len; taddr += nr, offset += nr) { -retry: if ((nr = DB_GLOBAL(j_read) != NULL ? - DB_GLOBAL(j_read)(fhp->fd, taddr, len - offset) : - read(fhp->fd, taddr, len - offset)) < 0) { + if (DB_GLOBAL(j_read) != NULL) { + *nrp = len; + if (DB_GLOBAL(j_read)(fhp->fd, addr, len) != (ssize_t)len) { ret = __os_get_errno(); - if ((ret == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; - __db_err(dbenv, "read: 0x%x, %lu: %s", - (u_int)taddr, (u_long)len-offset, strerror(ret)); - return (ret); + __db_err(dbenv, "read: %#lx, %lu: %s", + P_TO_ULONG(addr), (u_long)len, strerror(ret)); } - if (nr == 0) + return (ret); + } + + for (taddr = addr, offset = 0; + offset < len; taddr += nr, offset += (u_int32_t)nr) { + RETRY_CHK(((nr = read( + fhp->fd, taddr, len - offset)) < 0 ? 1 : 0), ret); + if (nr == 0 || ret != 0) break; } - *nrp = taddr - (u_int8_t *)addr; - return (0); + *nrp = (size_t)(taddr - (u_int8_t *)addr); + if (ret != 0) + __db_err(dbenv, "read: %#lx, %lu: %s", + P_TO_ULONG(taddr), (u_long)len - offset, strerror(ret)); + return (ret); } /* @@ -173,9 +191,11 @@ __os_physwrite(dbenv, fhp, addr, len, nwp) { size_t offset; ssize_t nw; - int ret, retries; + int ret; u_int8_t *taddr; + ret = 0; + #if defined(HAVE_FILESYSTEM_NOTZERO) && defined(DIAGNOSTIC) if (__os_fs_notzero()) { struct stat sb; @@ -187,22 +207,28 @@ __os_physwrite(dbenv, fhp, addr, len, nwp) } #endif - retries = 0; - for (taddr = addr, - offset = 0; offset < len; taddr += nw, offset += nw) -retry: if ((nw = DB_GLOBAL(j_write) != NULL ? - DB_GLOBAL(j_write)(fhp->fd, taddr, len - offset) : - write(fhp->fd, taddr, len - offset)) < 0) { + if (DB_GLOBAL(j_write) != NULL) { + *nwp = len; + if (DB_GLOBAL(j_write)(fhp->fd, addr, len) != (ssize_t)len) { ret = __os_get_errno(); - if ((ret == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; - __db_err(dbenv, "write: 0x%x, %lu: %s", - (u_int)taddr, (u_long)len-offset, strerror(ret)); - return (ret); + __db_err(dbenv, "write: %#lx, %lu: %s", + P_TO_ULONG(addr), (u_long)len, strerror(ret)); } + return (ret); + } + + for (taddr = addr, offset = 0; + offset < len; taddr += nw, offset += (u_int32_t)nw) { + RETRY_CHK(((nw = write( + fhp->fd, taddr, len - offset)) < 0 ? 1 : 0), ret); + if (ret != 0) + break; + } *nwp = len; - return (0); + if (ret != 0) + __db_err(dbenv, "write: %#lx, %lu: %s", + P_TO_ULONG(taddr), (u_long)len - offset, strerror(ret)); + return (ret); } #ifdef HAVE_FILESYSTEM_NOTZERO diff --git a/db/os/os_seek.c b/db/os/os_seek.c index c20c27514..482bb6c6c 100644 --- a/db/os/os_seek.c +++ b/db/os/os_seek.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_seek.c,v 11.26 2004/09/17 22:00:31 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_seek.c,v 11.23 2003/02/16 15:54:06 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -26,20 +24,20 @@ static const char revid[] = "$Id: os_seek.c,v 11.23 2003/02/16 15:54:06 bostic E * Seek to a page/byte offset in the file. * * PUBLIC: int __os_seek __P((DB_ENV *, - * PUBLIC: DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK)); + * PUBLIC: DB_FH *, u_int32_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK)); */ int __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence) DB_ENV *dbenv; DB_FH *fhp; - size_t pgsize; + u_int32_t pgsize; db_pgno_t pageno; u_int32_t relative; int isrewind; DB_OS_SEEK db_whence; { off_t offset; - int ret, retries, whence; + int ret, whence; /* Check for illegal usage. */ DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1); @@ -58,20 +56,14 @@ __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence) return (EINVAL); } + offset = (off_t)pgsize * pageno + relative; + if (isrewind) + offset = -offset; + if (DB_GLOBAL(j_seek) != NULL) - ret = DB_GLOBAL(j_seek)(fhp->fd, - pgsize, pageno, relative, isrewind, whence); - else { - offset = (off_t)pgsize * pageno + relative; - if (isrewind) - offset = -offset; - retries = 0; - do { - ret = lseek(fhp->fd, offset, whence) == -1 ? - __os_get_errno() : 0; - } while ((ret == EINTR || ret == EBUSY) && - ++retries < DB_RETRY); - } + ret = DB_GLOBAL(j_seek)(fhp->fd, offset, whence); + else + RETRY_CHK((lseek(fhp->fd, offset, whence) == -1 ? 1 : 0), ret); if (ret == 0) { fhp->pgsize = pgsize; diff --git a/db/os/os_sleep.c b/db/os/os_sleep.c index bef69f624..da3e97280 100644 --- a/db/os/os_sleep.c +++ b/db/os/os_sleep.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_sleep.c,v 11.23 2004/03/27 19:09:13 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_sleep.c,v 11.18 2003/05/14 17:01:23 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -45,9 +43,9 @@ static const char revid[] = "$Id: os_sleep.c,v 11.18 2003/05/14 17:01:23 bostic * __os_sleep -- * Yield the processor for a period of time. * - * PUBLIC: int __os_sleep __P((DB_ENV *, u_long, u_long)); + * PUBLIC: void __os_sleep __P((DB_ENV *, u_long, u_long)); */ -int +void __os_sleep(dbenv, secs, usecs) DB_ENV *dbenv; u_long secs, usecs; /* Seconds and microseconds. */ @@ -59,8 +57,10 @@ __os_sleep(dbenv, secs, usecs) for (; usecs >= 1000000; usecs -= 1000000) ++secs; - if (DB_GLOBAL(j_sleep) != NULL) - return (DB_GLOBAL(j_sleep)(secs, usecs)); + if (DB_GLOBAL(j_sleep) != NULL) { + (void)DB_GLOBAL(j_sleep)(secs, usecs); + return; + } /* * It's important that we yield the processor here so that other @@ -68,11 +68,11 @@ __os_sleep(dbenv, secs, usecs) * * Sheer raving paranoia -- don't select for 0 time. */ - t.tv_sec = secs; + t.tv_sec = (long)secs; if (secs == 0 && usecs == 0) t.tv_usec = 1; else - t.tv_usec = usecs; + t.tv_usec = (long)usecs; /* * We don't catch interrupts and restart the system call here, unlike @@ -81,12 +81,7 @@ __os_sleep(dbenv, secs, usecs) * we want the utility to see the signal and quit. This assumes it's * always OK for DB to sleep for less time than originally scheduled. */ - if ((ret = select(0, NULL, NULL, NULL, &t)) != 0) - if ((ret = __os_get_errno()) == EINTR) - ret = 0; - - if (ret != 0) - __db_err(dbenv, "select: %s", strerror(ret)); - - return (ret); + if (select(0, NULL, NULL, NULL, &t) == -1) + if ((ret = __os_get_errno()) != EINTR) + __db_err(dbenv, "select: %s", strerror(ret)); } diff --git a/db/os/os_spin.c b/db/os/os_spin.c index 6cd90c274..23d4d71ae 100644 --- a/db/os/os_spin.c +++ b/db/os/os_spin.c @@ -1,23 +1,21 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_spin.c,v 11.20 2004/06/23 14:10:56 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_spin.c,v 11.17 2003/11/07 16:30:57 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #if defined(HAVE_PSTAT_GETDYNAMIC) #include #endif -#include +#include /* Needed for sysconf on Solaris. */ #include #endif @@ -109,5 +107,5 @@ __os_yield(dbenv, usecs) #ifdef HAVE_VXWORKS taskDelay(1); #endif - (void)__os_sleep(dbenv, 0, usecs); + __os_sleep(dbenv, 0, usecs); } diff --git a/db/os/os_stat.c b/db/os/os_stat.c index cdc362a3b..92cea98c0 100644 --- a/db/os/os_stat.c +++ b/db/os/os_stat.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_stat.c,v 11.27 2004/07/06 13:55:48 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_stat.c,v 11.24 2003/02/16 15:54:06 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -31,25 +29,17 @@ __os_exists(path, isdirp) const char *path; int *isdirp; { - int ret, retries; struct stat sb; + int ret; if (DB_GLOBAL(j_exists) != NULL) return (DB_GLOBAL(j_exists)(path, isdirp)); - retries = 0; - do { - ret = #ifdef HAVE_VXWORKS - stat((char *)path, &sb); + RETRY_CHK((stat((char *)path, &sb)), ret); #else - stat(path, &sb); + RETRY_CHK((stat(path, &sb)), ret); #endif - if (ret != 0) - ret = __os_get_errno(); - } while ((ret == EINTR || ret == EBUSY) && - ++retries < DB_RETRY); - if (ret != 0) return (ret); @@ -82,8 +72,8 @@ __os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep) DB_FH *fhp; u_int32_t *mbytesp, *bytesp, *iosizep; { - int ret, retries; struct stat sb; + int ret; if (DB_GLOBAL(j_ioinfo) != NULL) return (DB_GLOBAL(j_ioinfo)(path, @@ -92,12 +82,8 @@ __os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep) /* Check for illegal usage. */ DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1); - retries = 0; -retry: - if (fstat(fhp->fd, &sb) == -1) { - if (((ret = __os_get_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; + RETRY_CHK((fstat(fhp->fd, &sb)), ret); + if (ret != 0) { __db_err(dbenv, "fstat: %s", strerror(ret)); return (ret); } diff --git a/db/os/os_tmpdir.c b/db/os/os_tmpdir.c index eff37443d..c1abf3cff 100644 --- a/db/os/os_tmpdir.c +++ b/db/os/os_tmpdir.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_tmpdir.c,v 11.24 2004/10/05 14:55:33 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_tmpdir.c,v 11.20 2003/01/08 05:29:41 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -98,17 +96,25 @@ __os_tmpdir(dbenv, flags) #endif #ifdef DB_WIN32 /* Get the path to the temporary directory. */ - {int len; - char *eos, temp[MAXPATHLEN + 1]; + { + int ret; + _TCHAR tpath[MAXPATHLEN + 1]; + char *path, *eos; - if ((len = GetTempPath(sizeof(temp) - 1, temp)) > 2) { - eos = &temp[len]; - *eos-- = '\0'; + if (GetTempPath(MAXPATHLEN, tpath) > 2) { + FROM_TSTRING(dbenv, tpath, path, ret); + if (ret != 0) + return (ret); + eos = path + strlen(path) - 1; if (*eos == '\\' || *eos == '/') *eos = '\0'; - if (__os_exists(temp, &isdir) == 0 && isdir != 0) - return (__os_strdup(dbenv, - temp, &dbenv->db_tmp_dir)); + if (__os_exists(path, &isdir) == 0 && isdir) { + ret = __os_strdup(dbenv, + path, &dbenv->db_tmp_dir); + FREE_STRING(dbenv, path); + return (ret); + } + FREE_STRING(dbenv, path); } } #endif diff --git a/db/os/os_truncate.c b/db/os/os_truncate.c new file mode 100644 index 000000000..0367fde73 --- /dev/null +++ b/db/os/os_truncate.c @@ -0,0 +1,58 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * $Id: os_truncate.c,v 11.7 2004/09/17 22:00:31 mjc Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" + +/* + * __os_truncate -- + * Truncate the file. + * + * PUBLIC: int __os_truncate __P((DB_ENV *, DB_FH *, db_pgno_t, u_int32_t)); + */ +int +__os_truncate(dbenv, fhp, pgno, pgsize) + DB_ENV *dbenv; + DB_FH *fhp; + db_pgno_t pgno; + u_int32_t pgsize; +{ + off_t offset; + int ret; + + /* + * Truncate a file so that "pgno" is discarded from the end of the + * file. + */ + offset = (off_t)pgsize * pgno; + + if (DB_GLOBAL(j_ftruncate) != NULL) + ret = DB_GLOBAL(j_ftruncate)(fhp->fd, offset); + else { +#ifdef HAVE_FTRUNCATE + RETRY_CHK((ftruncate(fhp->fd, offset)), ret); +#else + ret = DB_OPNOTSUP; +#endif + } + + if (ret != 0) + __db_err(dbenv, + "ftruncate: %lu: %s", (u_long)offset, strerror(ret)); + + return (ret); +} diff --git a/db/os/os_unlink.c b/db/os/os_unlink.c index ab70f6eb5..228e06d39 100644 --- a/db/os/os_unlink.c +++ b/db/os/os_unlink.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_unlink.c,v 11.28 2004/07/06 13:55:48 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_unlink.c,v 11.26 2003/01/08 05:29:43 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -67,45 +65,33 @@ __os_unlink(dbenv, path) DB_ENV *dbenv; const char *path; { - int ret, retries; + int ret; - retries = 0; -retry: ret = DB_GLOBAL(j_unlink) != NULL ? - DB_GLOBAL(j_unlink)(path) : + if (DB_GLOBAL(j_unlink) != NULL) + ret = DB_GLOBAL(j_unlink)(path); + else #ifdef HAVE_VXWORKS - unlink((char *)path); + RETRY_CHK((unlink((char *)path)), ret); #else - unlink(path); + RETRY_CHK((unlink(path)), ret); #endif - if (ret == -1) { - if (((ret = __os_get_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; - /* - * XXX - * We really shouldn't be looking at this value ourselves, - * but ENOENT usually signals that a file is missing, and - * we attempt to unlink things (such as v. 2.x environment - * regions, in DB_ENV->remove) that we're expecting not to - * be there. Reporting errors in these cases is annoying. - */ -#ifdef HAVE_VXWORKS - /* - * XXX - * The results of unlink are file system driver specific - * on VxWorks. In the case of removing a file that did - * not exist, some, at least, return an error, but with - * an errno of 0, not ENOENT. - * - * Code below falls through to original if-statement only - * we didn't get a "successful" error. - */ - if (ret != 0) - /* FALLTHROUGH */ -#endif - if (ret != ENOENT) - __db_err(dbenv, "unlink: %s: %s", path, strerror(ret)); - } + /* + * !!! + * The results of unlink are file system driver specific on VxWorks. + * In the case of removing a file that did not exist, some, at least, + * return an error, but with an errno of 0, not ENOENT. We do not + * have to test for the explicitly, the RETRY_CHK macro resets "ret" + * to be the errno, and so we'll just slide right on through. + * + * XXX + * We shouldn't be testing for an errno of ENOENT here, but ENOENT + * signals that a file is missing, and we attempt to unlink things + * (such as v. 2.x environment regions, in DB_ENV->remove) that we + * are expecting not to be there. Reporting errors in these cases + * is annoying. + */ + if (ret != 0 && ret != ENOENT) + __db_err(dbenv, "unlink: %s: %s", path, strerror(ret)); return (ret); } diff --git a/db/os_vxworks/os_vx_abs.c b/db/os_vxworks/os_vx_abs.c index 87dbbdb8e..34a1fe37a 100644 --- a/db/os_vxworks/os_vx_abs.c +++ b/db/os_vxworks/os_vx_abs.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_vx_abs.c,v 1.9 2004/01/28 03:36:19 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_vx_abs.c,v 1.8 2003/01/08 05:32:23 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" #include "iosLib.h" diff --git a/db/os_vxworks/os_vx_config.c b/db/os_vxworks/os_vx_config.c index 2535640bf..b90a4365d 100644 --- a/db/os_vxworks/os_vx_config.c +++ b/db/os_vxworks/os_vx_config.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_vx_config.c,v 1.6 2004/01/28 03:36:19 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_vx_config.c,v 1.5 2003/01/08 05:32:33 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* diff --git a/db/os_vxworks/os_vx_map.c b/db/os_vxworks/os_vx_map.c index fc8ee52f0..416f4cc8c 100644 --- a/db/os_vxworks/os_vx_map.c +++ b/db/os_vxworks/os_vx_map.c @@ -1,19 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. * * This code is derived from software contributed to Sleepycat Software by * Frederick G.M. Roeber of Netscape Communications Corp. + * + * $Id: os_vx_map.c,v 1.23 2004/01/28 03:36:19 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_vx_map.c,v 1.22 2003/01/08 05:32:38 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include diff --git a/db/os_win32/os_abs.c b/db/os_win32/os_abs.c index c495235c6..ab05b0a7e 100644 --- a/db/os_win32/os_abs.c +++ b/db/os_win32/os_abs.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_abs.c,v 11.7 2004/01/28 03:36:19 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_abs.c,v 11.6 2003/01/08 05:33:51 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* diff --git a/db/os_win32/os_clock.c b/db/os_win32/os_clock.c index 9db0548ca..c77076691 100644 --- a/db/os_win32/os_clock.c +++ b/db/os_win32/os_clock.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_clock.c,v 1.11 2004/06/28 13:57:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_clock.c,v 1.8 2003/01/08 05:33:57 bostic Exp $"; -#endif /* not lint */ - #include #include #include @@ -21,7 +19,7 @@ static const char revid[] = "$Id: os_clock.c,v 1.8 2003/01/08 05:33:57 bostic Ex * __os_clock -- * Return the current time-of-day clock in seconds and microseconds. */ -int +void __os_clock(dbenv, secsp, usecsp) DB_ENV *dbenv; u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */ @@ -33,5 +31,4 @@ __os_clock(dbenv, secsp, usecsp) *secsp = (u_int32_t)now.time; if (usecsp != NULL) *usecsp = now.millitm * 1000; - return (0); } diff --git a/db/os_win32/os_config.c b/db/os_win32/os_config.c index 87dfe3422..41daebd37 100644 --- a/db/os_win32/os_config.c +++ b/db/os_win32/os_config.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_config.c,v 11.18 2004/02/09 20:54:27 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_config.c,v 11.15 2003/01/08 05:34:00 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* @@ -44,10 +42,47 @@ __os_is_winnt() int __os_fs_notzero() { + static int __os_notzero = -1; + OSVERSIONINFO osvi; + /* * Windows/NT zero-fills pages that were never explicitly written to - * the file. Windows 95/98 gives you random garbage, and that breaks + * the file. Note however that this is *NOT* documented. In fact, the + * Win32 documentation makes it clear that there are no guarantees that + * uninitialized bytes will be zeroed: + * + * If the file is extended, the contents of the file between the old + * EOF position and the new position are not defined. + * + * Experiments confirm that NT/2K/XP all zero fill for both NTFS and + * FAT32. Cygwin also relies on this behavior. This is the relevant + * comment from Cygwin: + * + * Oops, this is the bug case - Win95 uses whatever is on the disk + * instead of some known (safe) value, so we must seek back and fill + * in the gap with zeros. - DJ + * Note: this bug doesn't happen on NT4, even though the + * documentation for WriteFile() says that it *may* happen on any OS. + * + * We're making a bet, here, but we made it a long time ago and haven't + * yet seen any evidence that it was wrong. + * + * Windows 95/98 and On-Time give random garbage, and that breaks * Berkeley DB. + * + * The value of __os_notzero is computed only once, and cached to + * avoid the overhead of repeated calls to GetVersion(). */ - return (__os_is_winnt() ? 0 : 1); + if (__os_notzero == -1) { + if (__os_is_winnt()) { + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&osvi); + if (_tcscmp(osvi.szCSDVersion, _T("RTTarget-32")) == 0) + __os_notzero = 1; /* On-Time */ + else + __os_notzero = 0; /* Windows/NT */ + } else + __os_notzero = 1; /* Not Windows/NT */ + } + return (__os_notzero); } diff --git a/db/os_win32/os_dir.c b/db/os_win32/os_dir.c index 8f329fbd9..e0abbb6ee 100644 --- a/db/os_win32/os_dir.c +++ b/db/os_win32/os_dir.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_dir.c,v 11.20 2004/10/13 19:12:17 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_dir.c,v 11.14 2003/02/28 17:25:26 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* @@ -24,45 +22,62 @@ __os_dirlist(dbenv, dir, namesp, cntp) char ***namesp; int *cntp; { - struct _finddata_t fdata; -#ifdef _WIN64 - intptr_t dirhandle; -#else - long dirhandle; -#endif - int arraysz, cnt, finished, ret; - char **names, filespec[MAXPATHLEN]; + HANDLE dirhandle; + WIN32_FIND_DATA fdata; + int arraysz, cnt, ret; + char **names, *onename; + _TCHAR tfilespec[MAXPATHLEN + 1]; if (DB_GLOBAL(j_dirlist) != NULL) return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp)); - (void)snprintf(filespec, - sizeof(filespec), "%s%c*", dir, PATH_SEPARATOR[0]); - if ((dirhandle = _findfirst(filespec, &fdata)) == -1) + (void)_sntprintf(tfilespec, MAXPATHLEN, + _T("%hs%hc*"), dir, PATH_SEPARATOR[0]); + if ((dirhandle = FindFirstFile(tfilespec, &fdata)) + == INVALID_HANDLE_VALUE) return (__os_get_errno()); names = NULL; - finished = 0; - for (arraysz = cnt = 0; finished != 1; ++cnt) { + arraysz = cnt = ret = 0; + for (;;) { if (cnt >= arraysz) { arraysz += 100; if ((ret = __os_realloc(dbenv, arraysz * sizeof(names[0]), &names)) != 0) - goto nomem; + goto err; + } + /* + * FROM_TSTRING doesn't necessarily allocate new memory, so we + * must do that explicitly. Unfortunately, when compiled with + * UNICODE, we'll copy twice. + */ + FROM_TSTRING(dbenv, fdata.cFileName, onename, ret); + if (ret != 0) + goto err; + ret = __os_strdup(dbenv, onename, &names[cnt]); + FREE_STRING(dbenv, onename); + if (ret != 0) + goto err; + cnt++; + if (!FindNextFile(dirhandle, &fdata)) { + if (GetLastError() == ERROR_NO_MORE_FILES) + break; + else { + ret = __os_get_errno(); + goto err; + } } - if ((ret = __os_strdup(dbenv, fdata.name, &names[cnt])) != 0) - goto nomem; - if (_findnext(dirhandle, &fdata) != 0) - finished = 1; } - _findclose(dirhandle); - *namesp = names; - *cntp = cnt; - return (0); +err: if (!FindClose(dirhandle) && ret == 0) + ret = __os_get_errno(); -nomem: if (names != NULL) + if (ret == 0) { + *namesp = names; + *cntp = cnt; + } else if (names != NULL) __os_dirfree(dbenv, names, cnt); + return (ret); } diff --git a/db/os_win32/os_errno.c b/db/os_win32/os_errno.c index c71bac1a6..1af2824cc 100644 --- a/db/os_win32/os_errno.c +++ b/db/os_win32/os_errno.c @@ -1,65 +1,46 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_errno.c,v 11.14 2004/07/06 21:06:38 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_errno.c,v 11.12 2003/11/16 20:10:19 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* - * __os_get_errno -- - * Return the value of errno. + * __os_get_errno_ret_zero -- + * Return the value of errno, even if it's zero. */ int -__os_get_errno() +__os_get_errno_ret_zero() { /* This routine must be able to return the same value repeatedly. */ return (errno); } /* - * __os_set_errno -- - * Set the value of errno. - */ -void -__os_set_errno(evalue) - int evalue; -{ - errno = evalue; -} - -/* - * __os_win32_errno -- + * __os_get_errno -- * Return the last Windows error as an errno. * We give generic error returns: * * EFAULT means Win* call failed, * and GetLastError provided no extra info. * - * EIO means error on Win* call. + * EIO means error on Win* call, * and we were unable to provide a meaningful errno for this Windows * error. More information is only available by setting a breakpoint * here. - * - * PUBLIC: #if defined(DB_WIN32) - * PUBLIC: int __os_win32_errno __P((void)); - * PUBLIC: #endif */ int -__os_win32_errno(void) +__os_get_errno() { DWORD last_error; int ret; - /* Ignore errno - we used to check it here. */ - last_error = GetLastError(); /* @@ -144,3 +125,14 @@ __os_win32_errno(void) return (ret); } + +/* + * __os_set_errno -- + * Set the value of errno. + */ +void +__os_set_errno(evalue) + int evalue; +{ + errno = evalue; +} diff --git a/db/os_win32/os_fid.c b/db/os_win32/os_fid.c index 473bb3a48..69df865d4 100644 --- a/db/os_win32/os_fid.c +++ b/db/os_win32/os_fid.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_fid.c,v 11.19 2004/07/06 21:06:38 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_fid.c,v 11.17 2003/02/16 15:54:32 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" #define SERIAL_INIT 0 @@ -76,7 +74,7 @@ __os_fileid(dbenv, fname, unique_okay, fidp) /* File open, get its info */ if ((retval = GetFileInformationByHandle(fhp->handle, &fi)) == FALSE) - ret = __os_win32_errno(); + ret = __os_get_errno(); (void)__os_closehandle(dbenv, fhp); if (retval == FALSE) diff --git a/db/os_win32/os_fsync.c b/db/os_win32/os_fsync.c index 75281531c..cc188a2fe 100644 --- a/db/os_win32/os_fsync.c +++ b/db/os_win32/os_fsync.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_fsync.c,v 11.21 2004/07/06 21:06:38 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_fsync.c,v 11.18 2003/02/16 15:54:41 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -30,8 +28,7 @@ __os_fsync(dbenv, fhp) DB_ENV *dbenv; DB_FH *fhp; { - BOOL success; - int ret, retries; + int ret; /* * Do nothing if the file descriptor has been marked as not requiring @@ -40,18 +37,10 @@ __os_fsync(dbenv, fhp) if (F_ISSET(fhp, DB_FH_NOSYNC)) return (0); - ret = retries = 0; - do { - if (DB_GLOBAL(j_fsync) != NULL) - success = (DB_GLOBAL(j_fsync)(fhp->fd) == 0); - else { - success = FlushFileBuffers(fhp->handle); - if (!success) - __os_set_errno(__os_win32_errno()); - } - } while (!success && - ((ret = __os_get_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY); + if (DB_GLOBAL(j_fsync) != NULL) + ret = DB_GLOBAL(j_fsync)(fhp->fd); + else + RETRY_CHK((!FlushFileBuffers(fhp->handle)), ret); if (ret != 0) __db_err(dbenv, "fsync %s", strerror(ret)); diff --git a/db/os_win32/os_handle.c b/db/os_win32/os_handle.c index b77a574fc..4953afd31 100644 --- a/db/os_win32/os_handle.c +++ b/db/os_win32/os_handle.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_handle.c,v 11.39 2004/07/06 21:06:38 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_handle.c,v 11.34 2003/04/24 16:17:06 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -69,12 +67,13 @@ __os_openhandle(dbenv, name, flags, mode, fhpp) * if we can't open a database, an inability to open a * log file is cause for serious dismay. */ - (void)__os_sleep(dbenv, nrepeat * 2, 0); + __os_sleep(dbenv, nrepeat * 2, 0); break; + case EAGAIN: case EBUSY: case EINTR: /* - * If it was an EINTR or EBUSY, retry immediately, + * If an EAGAIN, EBUSY or EINTR, retry immediately for * DB_RETRY times. */ if (++retries < DB_RETRY) @@ -100,7 +99,6 @@ __os_closehandle(dbenv, fhp) DB_ENV *dbenv; DB_FH *fhp; { - BOOL success; int ret; ret = 0; @@ -110,20 +108,21 @@ __os_closehandle(dbenv, fhp) * file. */ if (F_ISSET(fhp, DB_FH_OPENED)) { - do { - if (DB_GLOBAL(j_close) != NULL) - success = (DB_GLOBAL(j_close)(fhp->fd) == 0); - else if (fhp->handle != INVALID_HANDLE_VALUE) { - success = CloseHandle(fhp->handle); - if (!success) - __os_set_errno(__os_win32_errno()); - } - else - success = (close(fhp->fd) == 0); - } while (!success && (ret = __os_get_errno()) == EINTR); + if (DB_GLOBAL(j_close) != NULL) + ret = DB_GLOBAL(j_close)(fhp->fd); + else if (fhp->handle != INVALID_HANDLE_VALUE) + RETRY_CHK((!CloseHandle(fhp->handle)), ret); + else + RETRY_CHK((close(fhp->fd)), ret); if (ret != 0) __db_err(dbenv, "CloseHandle: %s", strerror(ret)); + + /* Unlink the file if we haven't already done so. */ + if (F_ISSET(fhp, DB_FH_UNLINK)) { + (void)__os_unlink(dbenv, fhp->name); + __os_free(dbenv, fhp->name); + } } __os_free(dbenv, fhp); diff --git a/db/os_win32/os_map.c b/db/os_win32/os_map.c index 259679cd0..140ac4980 100644 --- a/db/os_win32/os_map.c +++ b/db/os_win32/os_map.c @@ -1,21 +1,19 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_map.c,v 11.51 2004/10/05 14:55:34 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_map.c,v 11.43 2003/02/17 16:05:45 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" static int __os_map __P((DB_ENV *, char *, REGINFO *, DB_FH *, size_t, int, int, int, void **)); -static int __os_unique_name __P((char *, HANDLE, char *, size_t)); +static int __os_unique_name __P((_TCHAR *, HANDLE, _TCHAR *, size_t)); /* * __os_r_sysattach -- @@ -36,9 +34,8 @@ __os_r_sysattach(dbenv, infop, rp) * properly ordered, our caller has already taken care of that. */ if ((ret = __os_open(dbenv, infop->name, - DB_OSO_DIRECT | F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE: 0, - infop->mode, &fhp)) != 0) { + dbenv->db_mode, &fhp)) != 0) { __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret)); return (ret); } @@ -85,7 +82,7 @@ __os_r_sysdetach(dbenv, infop, destroy) infop->wnt_handle = NULL; } - ret = !UnmapViewOfFile(infop->addr) ? __os_win32_errno() : 0; + ret = !UnmapViewOfFile(infop->addr) ? __os_get_errno() : 0; if (ret != 0) __db_err(dbenv, "UnmapViewOfFile: %s", strerror(ret)); @@ -133,7 +130,7 @@ __os_unmapfile(dbenv, addr, len) if (DB_GLOBAL(j_unmap) != NULL) return (DB_GLOBAL(j_unmap)(addr, len)); - return (!UnmapViewOfFile(addr) ? __os_win32_errno() : 0); + return (!UnmapViewOfFile(addr) ? __os_get_errno() : 0); } /* @@ -145,12 +142,12 @@ __os_unmapfile(dbenv, addr, len) * names), and repeatable (same files, map to same names). It's not * so easy to do by name. Should handle not only: * - * foo.bar == ./foo.bar == c:/whatever_path/foo.bar + * foo.bar == ./foo.bar == c:/whatever_path/foo.bar * * but also understand that: * - * foo.bar == Foo.Bar (FAT file system) - * foo.bar != Foo.Bar (NTFS) + * foo.bar == Foo.Bar (FAT file system) + * foo.bar != Foo.Bar (NTFS) * * The best solution is to use the file index, found in the file * information structure (similar to UNIX inode #). @@ -165,24 +162,24 @@ __os_unmapfile(dbenv, addr, len) */ static int __os_unique_name(orig_path, hfile, result_path, result_path_len) - char *orig_path, *result_path; + _TCHAR *orig_path, *result_path; HANDLE hfile; size_t result_path_len; { BY_HANDLE_FILE_INFORMATION fileinfo; - char *basename, *p; + _TCHAR *basename, *p; /* * In Windows, pathname components are delimited by '/' or '\', and * if neither is present, we need to strip off leading drive letter * (e.g. c:foo.txt). */ - basename = strrchr(orig_path, '/'); - p = strrchr(orig_path, '\\'); + basename = _tcsrchr(orig_path, '/'); + p = _tcsrchr(orig_path, '\\'); if (basename == NULL || (p != NULL && p > basename)) basename = p; if (basename == NULL) - basename = strrchr(orig_path, ':'); + basename = _tcsrchr(orig_path, ':'); if (basename == NULL) basename = orig_path; @@ -190,10 +187,10 @@ __os_unique_name(orig_path, hfile, result_path, result_path_len) basename++; if (!GetFileInformationByHandle(hfile, &fileinfo)) - return (__os_win32_errno()); + return (__os_get_errno()); - (void)snprintf(result_path, result_path_len, - "__db_shmem.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%s", + (void)_sntprintf(result_path, result_path_len, + _T("__db_shmem.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%s"), fileinfo.dwVolumeSerialNumber, fileinfo.nFileIndexHigh, fileinfo.nFileIndexLow, @@ -220,7 +217,7 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr) { HANDLE hMemory; int ret, use_pagefile; - char shmem_name[MAXPATHLEN]; + _TCHAR *tpath, shmem_name[MAXPATHLEN]; void *pMemory; ret = 0; @@ -233,9 +230,16 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr) * If creating a region in system space, get a matching name in the * paging file namespace. */ - if (use_pagefile && (ret = __os_unique_name( - path, fhp->handle, shmem_name, sizeof(shmem_name))) != 0) - return (ret); + if (use_pagefile) { + TO_TSTRING(dbenv, path, tpath, ret); + if (ret != 0) + return (ret); + ret = __os_unique_name(tpath, fhp->handle, + shmem_name, sizeof(shmem_name)); + FREE_STRING(dbenv, tpath); + if (ret != 0) + return (ret); + } /* * XXX @@ -275,7 +279,7 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr) 0, (DWORD)len, NULL); if (hMemory == NULL) { - ret = __os_win32_errno(); + ret = __os_get_errno(); __db_err(dbenv, "OpenFileMapping: %s", strerror(ret)); return (__db_panic(dbenv, ret)); } @@ -283,7 +287,7 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr) pMemory = MapViewOfFile(hMemory, (is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS), 0, 0, len); if (pMemory == NULL) { - ret = __os_win32_errno(); + ret = __os_get_errno(); __db_err(dbenv, "MapViewOfFile: %s", strerror(ret)); return (__db_panic(dbenv, ret)); } diff --git a/db/os_win32/os_open.c b/db/os_win32/os_open.c index 36d262f1a..1aa65cfa0 100644 --- a/db/os_win32/os_open.c +++ b/db/os_win32/os_open.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_open.c,v 11.37 2004/10/05 14:55:35 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_open.c,v 11.28 2003/08/29 18:50:46 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -45,7 +43,7 @@ __os_open(dbenv, name, flags, mode, fhpp) int mode; DB_FH **fhpp; { - return (__os_open_extend(dbenv, name, 0, 0, flags, mode, fhpp)); + return (__os_open_extend(dbenv, name, 0, flags, mode, fhpp)); } /* @@ -53,25 +51,26 @@ __os_open(dbenv, name, flags, mode, fhpp) * Open a file descriptor (including page size and log size information). */ int -__os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) +__os_open_extend(dbenv, name, page_size, flags, mode, fhpp) DB_ENV *dbenv; const char *name; - u_int32_t log_size, page_size, flags; + u_int32_t page_size, flags; int mode; DB_FH **fhpp; { DB_FH *fhp; - DWORD bytesWritten; DWORD cluster_size, sector_size, free_clusters, total_clusters; int access, attr, createflag, nrepeat, oflags, ret, share; - char *drive, dbuf[4]; /* */ + _TCHAR *drive, *tname; + _TCHAR dbuf[4]; /* */ - *fhpp = NULL; + fhp = NULL; + tname = NULL; #define OKFLAGS \ - (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \ - DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \ - DB_OSO_TRUNC) + (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_DSYNC | DB_OSO_EXCL | \ + DB_OSO_LOG | DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | \ + DB_OSO_TEMP | DB_OSO_TRUNC) if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0) return (ret); @@ -85,6 +84,10 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) if (LF_ISSET(DB_OSO_CREATE)) oflags |= O_CREAT; +#ifdef O_DSYNC + if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC)) + oflags |= O_DSYNC; +#endif if (LF_ISSET(DB_OSO_EXCL)) oflags |= O_EXCL; @@ -108,9 +111,12 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) return (__os_openhandle(dbenv, name, oflags, mode, fhpp)); } - if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), fhpp)) != 0) - return (ret); - fhp = *fhpp; + TO_TSTRING(dbenv, name, tname, ret); + if (ret != 0) + goto err; + + if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &fhp)) != 0) + goto err; /* * Otherwise, use the Windows/32 CreateFile interface so that we can @@ -150,7 +156,7 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) else createflag = OPEN_EXISTING; /* open only if existing */ - if (LF_ISSET(DB_OSO_LOG)) { + if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC)) { F_SET(fhp, DB_FH_NOSYNC); attr |= FILE_FLAG_WRITE_THROUGH; } @@ -172,7 +178,7 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) if (LF_ISSET(DB_OSO_DIRECT) && page_size != 0 && name[0] != '\0') { if (name[1] == ':') { drive = dbuf; - snprintf(dbuf, sizeof(dbuf), "%c:\\", name[0]); + _sntprintf(dbuf, sizeof(dbuf), _T("%c:\\"), tname[0]); } else drive = NULL; @@ -188,7 +194,7 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) for (nrepeat = 1;; ++nrepeat) { fhp->handle = - CreateFile(name, access, share, NULL, createflag, attr, 0); + CreateFile(tname, access, share, NULL, createflag, attr, 0); if (fhp->handle == INVALID_HANDLE_VALUE) { /* * If it's a "temporary" error, we retry up to 3 times, @@ -196,49 +202,27 @@ __os_open_extend(dbenv, name, log_size, page_size, flags, mode, fhpp) * if we can't open a database, an inability to open a * log file is cause for serious dismay. */ - ret = __os_win32_errno(); + ret = __os_get_errno(); if ((ret != ENFILE && ret != EMFILE && ret != ENOSPC) || nrepeat > 3) goto err; - (void)__os_sleep(dbenv, nrepeat * 2, 0); + __os_sleep(dbenv, nrepeat * 2, 0); } else break; } - F_SET(fhp, DB_FH_OPENED); - /* - * Special handling needed for log files. To get Windows to not update - * the MFT metadata on each write, extend the file to its maximum size. - * Windows will allocate all the data blocks and store them in the MFT - * (inode) area. In addition, flush the MFT area to disk. - * This strategy only works for Win/NT; Win/9X does not - * guarantee that the logs will be zero filled. - */ - if (LF_ISSET(DB_OSO_LOG) && log_size != 0 && __os_is_winnt()) { - if (SetFilePointer(fhp->handle, - log_size - 1, NULL, FILE_BEGIN) == (DWORD)-1) - goto err; - if (WriteFile(fhp->handle, "\x00", 1, &bytesWritten, NULL) == 0) - goto err; - if (bytesWritten != 1) - goto err; - if (SetEndOfFile(fhp->handle) == 0) - goto err; - if (SetFilePointer( - fhp->handle, 0, NULL, FILE_BEGIN) == (DWORD)-1) - goto err; - if (FlushFileBuffers(fhp->handle) == 0) - goto err; - } + FREE_STRING(dbenv, tname); + F_SET(fhp, DB_FH_OPENED); + *fhpp = fhp; return (0); err: if (ret == 0) - ret = __os_win32_errno(); - - __os_closehandle(dbenv, fhp); - *fhpp = NULL; + ret = __os_get_errno(); + FREE_STRING(dbenv, tname); + if (fhp != NULL) + (void)__os_closehandle(dbenv, fhp); return (ret); } diff --git a/db/os_win32/os_rename.c b/db/os_win32/os_rename.c index 48ea8ef37..102633231 100644 --- a/db/os_win32/os_rename.c +++ b/db/os_win32/os_rename.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_rename.c,v 1.19 2004/10/05 14:55:36 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_rename.c,v 1.15 2003/09/09 15:44:36 mjc Exp $"; -#endif /* not lint */ - #include "db_int.h" /* @@ -24,6 +22,7 @@ __os_rename(dbenv, oldname, newname, flags) u_int32_t flags; { int ret; + _TCHAR *toldname, *tnewname; ret = 0; if (DB_GLOBAL(j_rename) != NULL) { @@ -32,15 +31,22 @@ __os_rename(dbenv, oldname, newname, flags) goto done; } - if (!MoveFile(oldname, newname)) - ret = __os_win32_errno(); + TO_TSTRING(dbenv, oldname, toldname, ret); + if (ret != 0) + goto done; + TO_TSTRING(dbenv, newname, tnewname, ret); + if (ret != 0) + goto err; + + if (!MoveFile(toldname, tnewname)) + ret = __os_get_errno(); if (ret == EEXIST) { ret = 0; if (__os_is_winnt()) { if (!MoveFileEx( - oldname, newname, MOVEFILE_REPLACE_EXISTING)) - ret = __os_win32_errno(); + toldname, tnewname, MOVEFILE_REPLACE_EXISTING)) + ret = __os_get_errno(); } else { /* * There is no MoveFileEx for Win9x/Me, so we have to @@ -49,12 +55,15 @@ __os_rename(dbenv, oldname, newname, flags) * refer to the same file, so we don't need to check * that here. */ - (void)DeleteFile(newname); - if (!MoveFile(oldname, newname)) - ret = __os_win32_errno(); + (void)DeleteFile(tnewname); + if (!MoveFile(toldname, tnewname)) + ret = __os_get_errno(); } } + FREE_STRING(dbenv, tnewname); +err: FREE_STRING(dbenv, toldname); + done: if (ret != 0 && flags == 0) __db_err(dbenv, "Rename %s %s: %s", oldname, newname, strerror(ret)); diff --git a/db/os_win32/os_rw.c b/db/os_win32/os_rw.c index 56b7d4d47..c3c103a9e 100644 --- a/db/os_win32/os_rw.c +++ b/db/os_win32/os_rw.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_rw.c,v 11.38 2004/09/17 22:00:32 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_rw.c,v 11.33 2003/02/16 15:55:06 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -35,8 +33,9 @@ __os_io(dbenv, op, fhp, pgno, pagesize, buf, niop) int op; DB_FH *fhp; db_pgno_t pgno; - size_t pagesize, *niop; + u_int32_t pagesize; u_int8_t *buf; + size_t *niop; { int ret; @@ -108,41 +107,35 @@ __os_read(dbenv, fhp, addr, len, nrp) { size_t offset, nr; DWORD count; - int ret, retries; - BOOL success; + int ret; u_int8_t *taddr; - retries = 0; - for (taddr = addr, - offset = 0; offset < len; taddr += nr, offset += nr) { -retry: if (DB_GLOBAL(j_read) != NULL) { - nr = DB_GLOBAL(j_read)(fhp->fd, - taddr, len - offset); - success = (nr >= 0); - } else { - success = ReadFile(fhp->handle, - taddr, (DWORD)(len - offset), &count, NULL); - if (!success) - __os_set_errno(__os_win32_errno()); - else - nr = (size_t)count; - } + ret = 0; - if (!success) { + if (DB_GLOBAL(j_read) != NULL) { + *nrp = len; + if (DB_GLOBAL(j_read)(fhp->fd, addr, len) != (ssize_t)len) { ret = __os_get_errno(); - if ((ret == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; - __db_err(dbenv, "read: 0x%lx, %lu: %s", - P_TO_ULONG(taddr), - (u_long)len - offset, strerror(ret)); - return (ret); + __db_err(dbenv, "read: %#lx, %lu: %s", + P_TO_ULONG(addr), (u_long)len, strerror(ret)); } - if (nr == 0) + return (ret); + } + + ret = 0; + for (taddr = addr, + offset = 0; offset < len; taddr += nr, offset += nr) { + RETRY_CHK((!ReadFile(fhp->handle, + taddr, (DWORD)(len - offset), &count, NULL)), ret); + if (count == 0 || ret != 0) break; + nr = (size_t)count; } *nrp = taddr - (u_int8_t *)addr; - return (0); + if (ret != 0) + __db_err(dbenv, "read: 0x%lx, %lu: %s", + P_TO_ULONG(taddr), (u_long)len - offset, strerror(ret)); + return (ret); } /* @@ -181,39 +174,33 @@ __os_physwrite(dbenv, fhp, addr, len, nwp) { size_t offset, nw; DWORD count; - int ret, retries; - BOOL success; + int ret; u_int8_t *taddr; - retries = 0; - for (taddr = addr, - offset = 0; offset < len; taddr += nw, offset += nw) { -retry: if (DB_GLOBAL(j_write) != NULL) { - nw = DB_GLOBAL(j_write)(fhp->fd, - taddr, len - offset); - success = (nw >= 0); - } else { - success = WriteFile(fhp->handle, - taddr, (DWORD)(len - offset), &count, NULL); - if (!success) - __os_set_errno(__os_win32_errno()); - else - nw = (size_t)count; - } - - if (!success) { + if (DB_GLOBAL(j_write) != NULL) { + *nwp = len; + if (DB_GLOBAL(j_write)(fhp->fd, addr, len) != (ssize_t)len) { ret = __os_get_errno(); - if ((ret == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; - __db_err(dbenv, "write: 0x%x, %lu: %s", taddr, - (u_long)len-offset, strerror(ret)); - return (ret); + __db_err(dbenv, "write: %#lx, %lu: %s", + P_TO_ULONG(addr), (u_long)len, strerror(ret)); } + return (ret); } + ret = 0; + for (taddr = addr, + offset = 0; offset < len; taddr += nw, offset += nw) { + RETRY_CHK((!WriteFile(fhp->handle, + taddr, (DWORD)(len - offset), &count, NULL)), ret); + if (ret != 0) + break; + nw = (size_t)count; + } *nwp = len; - return (0); + if (ret != 0) + __db_err(dbenv, "write: %#lx, %lu: %s", + P_TO_ULONG(taddr), (u_long)len - offset, strerror(ret)); + return (ret); } #ifdef HAVE_FILESYSTEM_NOTZERO diff --git a/db/os_win32/os_seek.c b/db/os_win32/os_seek.c index 1837530da..e356c3884 100644 --- a/db/os_win32/os_seek.c +++ b/db/os_win32/os_seek.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_seek.c,v 11.22 2004/09/17 22:00:32 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_seek.c,v 11.19 2003/01/08 05:34:57 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* @@ -21,7 +19,7 @@ int __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence) DB_ENV *dbenv; DB_FH *fhp; - size_t pgsize; + u_int32_t pgsize; db_pgno_t pageno; u_int32_t relative; int isrewind; @@ -34,10 +32,15 @@ __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence) unsigned long low; long high; }; - } offset; + } offbytes; + off_t offset; int ret, whence; DWORD from; + offset = (off_t)pgsize * pageno + relative; + if (isrewind) + offset = -offset; + if (DB_GLOBAL(j_seek) != NULL) { switch (db_whence) { case DB_OS_SEEK_CUR: @@ -53,8 +56,7 @@ __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence) return (EINVAL); } - ret = DB_GLOBAL(j_seek)(fhp->fd, pgsize, pageno, - relative, isrewind, whence); + ret = DB_GLOBAL(j_seek)(fhp->fd, offset, whence); } else { switch (db_whence) { case DB_OS_SEEK_CUR: @@ -70,13 +72,10 @@ __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence) return (EINVAL); } - offset.bigint = (__int64)pgsize * pageno + relative; - if (isrewind) - offset.bigint = -offset.bigint; - + offbytes.bigint = offset; ret = (SetFilePointer(fhp->handle, - offset.low, &offset.high, from) == (DWORD) - 1) ? - __os_win32_errno() : 0; + offbytes.low, &offbytes.high, from) == (DWORD) - 1) ? + __os_get_errno() : 0; } if (ret == 0) { diff --git a/db/os_win32/os_sleep.c b/db/os_win32/os_sleep.c index c07198cd0..ae06e4980 100644 --- a/db/os_win32/os_sleep.c +++ b/db/os_win32/os_sleep.c @@ -1,23 +1,21 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_sleep.c,v 11.11 2004/03/24 15:13:16 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_sleep.c,v 11.9 2003/01/08 05:35:01 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* * __os_sleep -- * Yield the processor for a period of time. */ -int +void __os_sleep(dbenv, secs, usecs) DB_ENV *dbenv; u_long secs, usecs; /* Seconds and microseconds. */ @@ -28,13 +26,14 @@ __os_sleep(dbenv, secs, usecs) for (; usecs >= 1000000; ++secs, usecs -= 1000000) ; - if (DB_GLOBAL(j_sleep) != NULL) - return (DB_GLOBAL(j_sleep)(secs, usecs)); + if (DB_GLOBAL(j_sleep) != NULL) { + DB_GLOBAL(j_sleep)(secs, usecs); + return; + } /* * It's important that we yield the processor here so that other * processes or threads are permitted to run. */ Sleep(secs * 1000 + usecs / 1000); - return (0); } diff --git a/db/os_win32/os_spin.c b/db/os_win32/os_spin.c index b6af0b6b7..a5cb58539 100644 --- a/db/os_win32/os_spin.c +++ b/db/os_win32/os_spin.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_spin.c,v 11.16 2004/03/24 15:13:16 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_spin.c,v 11.14 2003/06/18 14:21:20 bostic Exp $"; -#endif /* not lint */ - #include "db_int.h" /* @@ -54,5 +52,5 @@ __os_yield(dbenv, usecs) { if (DB_GLOBAL(j_yield) != NULL && DB_GLOBAL(j_yield)() == 0) return; - (void)__os_sleep(dbenv, 0, usecs); + __os_sleep(dbenv, 0, usecs); } diff --git a/db/os_win32/os_stat.c b/db/os_win32/os_stat.c index df290d66e..b11da487a 100644 --- a/db/os_win32/os_stat.c +++ b/db/os_win32/os_stat.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. + * + * $Id: os_stat.c,v 11.32 2004/10/07 14:00:11 carol Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: os_stat.c,v 11.26 2003/02/20 14:36:07 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -29,27 +27,25 @@ __os_exists(path, isdirp) const char *path; int *isdirp; { - int ret, retries; + int ret; DWORD attrs; + _TCHAR *tpath; if (DB_GLOBAL(j_exists) != NULL) return (DB_GLOBAL(j_exists)(path, isdirp)); - ret = retries = 0; - do { - attrs = GetFileAttributes(path); - if (attrs == (DWORD)-1) - ret = __os_win32_errno(); - } while ((ret == EINTR || ret == EBUSY) && - ++retries < DB_RETRY); - + TO_TSTRING(NULL, path, tpath, ret); if (ret != 0) return (ret); - if (isdirp != NULL) + RETRY_CHK( + ((attrs = GetFileAttributes(tpath)) == (DWORD)-1 ? 1 : 0), ret); + + if (ret == 0 && isdirp != NULL) *isdirp = (attrs & FILE_ATTRIBUTE_DIRECTORY); - return (0); + FREE_STRING(NULL, tpath); + return (ret); } /* @@ -64,19 +60,16 @@ __os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep) DB_FH *fhp; u_int32_t *mbytesp, *bytesp, *iosizep; { - int ret, retries; + int ret; BY_HANDLE_FILE_INFORMATION bhfi; unsigned __int64 filesize; - retries = 0; if (DB_GLOBAL(j_ioinfo) != NULL) return (DB_GLOBAL(j_ioinfo)(path, fhp->fd, mbytesp, bytesp, iosizep)); -retry: if (!GetFileInformationByHandle(fhp->handle, &bhfi)) { - if (((ret = __os_win32_errno()) == EINTR || ret == EBUSY) && - ++retries < DB_RETRY) - goto retry; + RETRY_CHK((!GetFileInformationByHandle(fhp->handle, &bhfi)), ret); + if (ret != 0) { __db_err(dbenv, "GetFileInformationByHandle: %s", strerror(ret)); return (ret); diff --git a/db/os_win32/os_truncate.c b/db/os_win32/os_truncate.c new file mode 100644 index 000000000..51820ab1d --- /dev/null +++ b/db/os_win32/os_truncate.c @@ -0,0 +1,99 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * $Id: os_truncate.c,v 1.9 2004/10/05 14:45:30 mjc Exp $ + */ + +#include "db_config.h" + +#include "db_int.h" + +/* + * __os_truncate -- + * Truncate the file. + */ +int +__os_truncate(dbenv, fhp, pgno, pgsize) + DB_ENV *dbenv; + DB_FH *fhp; + db_pgno_t pgno; + u_int32_t pgsize; +{ + /* Yes, this really is how Microsoft have designed their API */ + union { + __int64 bigint; + struct { + unsigned long low; + long high; + }; + } oldpos; + off_t offset; + int ret, retries, t_ret; + + offset = (off_t)pgsize * pgno; + + if (DB_GLOBAL(j_ftruncate) != NULL) { + ret = DB_GLOBAL(j_ftruncate)(fhp->fd, offset); + goto done; + } + +#ifdef HAVE_FILESYSTEM_NOTZERO + /* + * If the filesystem doesn't zero fill, it isn't safe to extend the + * file, or we end up with junk blocks. Just return in that case. + */ + if (__os_fs_notzero()) { + off_t stat_offset; + u_int32_t mbytes, bytes; + + /* Stat the file. */ + if ((ret = + __os_ioinfo(dbenv, NULL, fhp, &mbytes, &bytes, NULL)) != 0) + return (ret); + stat_offset = (off_t)mbytes * MEGABYTE + bytes; + + if (offset > stat_offset) + return (0); + } +#endif + + retries = 0; + do { + /* + * Windows doesn't provide truncate directly. Instead, + * it has SetEndOfFile, which truncates to the current + * position. So we have to save the current position, + * seek to where we want to truncate to, then seek back + * to where we were. To avoid races, all of that needs + * to be done while holding the file handle mutex. + */ + MUTEX_THREAD_LOCK(dbenv, fhp->mutexp); + oldpos.bigint = 0; + if ((oldpos.low = SetFilePointer(fhp->handle, + 0, &oldpos.high, FILE_CURRENT)) == -1 && + GetLastError() != NO_ERROR) { + ret = __os_get_errno(); + goto end; + } + if ((ret = __os_seek(dbenv, fhp, pgsize, pgno, + 0, 0, DB_OS_SEEK_SET)) != 0) + goto end; + if (!SetEndOfFile(fhp->handle)) + ret = __os_get_errno(); + if ((t_ret = __os_seek(dbenv, fhp, pgsize, + (db_pgno_t)(oldpos.bigint / pgsize), + 0, 0, DB_OS_SEEK_SET)) != 0 && ret == 0) + ret = t_ret; +end: MUTEX_THREAD_UNLOCK(dbenv, fhp->mutexp); + } while ((ret == EAGAIN || ret == EBUSY || ret == EINTR) && + ++retries < DB_RETRY); + +done: if (ret != 0) + __db_err(dbenv, + "ftruncate: %lu: %s", pgno * pgsize, strerror(ret)); + + return (ret); +} diff --git a/db/os_win32/os_unlink.c b/db/os_win32/os_unlink.c new file mode 100644 index 000000000..d1b50539e --- /dev/null +++ b/db/os_win32/os_unlink.c @@ -0,0 +1,73 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1997-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: os_unlink.c,v 11.5 2004/10/05 14:55:36 mjc Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" + +/* + * __os_region_unlink -- + * Remove a shared memory object file. + */ +int +__os_region_unlink(dbenv, path) + DB_ENV *dbenv; + const char *path; +{ + if (F_ISSET(dbenv, DB_ENV_OVERWRITE)) + (void)__db_overwrite(dbenv, path); + + return (__os_unlink(dbenv, path)); +} + +/* + * __os_unlink -- + * Remove a file. + * + * PUBLIC: int __os_unlink __P((DB_ENV *, const char *)); + */ +int +__os_unlink(dbenv, path) + DB_ENV *dbenv; + const char *path; +{ + _TCHAR *tpath; + int ret; + + if (DB_GLOBAL(j_unlink) != NULL) { + ret = DB_GLOBAL(j_unlink)(path); + goto done; + } + + TO_TSTRING(dbenv, path, tpath, ret); + if (ret != 0) + return (ret); + RETRY_CHK((!DeleteFile(tpath)), ret); + FREE_STRING(dbenv, tpath); + + /* + * XXX + * We shouldn't be testing for an errno of ENOENT here, but ENOENT + * signals that a file is missing, and we attempt to unlink things + * (such as v. 2.x environment regions, in DB_ENV->remove) that we + * are expecting not to be there. Reporting errors in these cases + * is annoying. + */ +done: if (ret != 0 && ret != ENOENT) + __db_err(dbenv, "unlink: %s: %s", path, strerror(ret)); + + return (ret); +} diff --git a/db/perl/BerkeleyDB/BerkeleyDB.pm b/db/perl/BerkeleyDB/BerkeleyDB.pm index db0d70d35..5791faea5 100644 --- a/db/perl/BerkeleyDB/BerkeleyDB.pm +++ b/db/perl/BerkeleyDB/BerkeleyDB.pm @@ -2,7 +2,7 @@ package BerkeleyDB; -# Copyright (c) 1997-2003 Paul Marquess. All rights reserved. +# Copyright (c) 1997-2004 Paul Marquess. All rights reserved. # This program is free software; you can redistribute it and/or # modify it under the same terms as Perl itself. # @@ -17,7 +17,7 @@ use Carp; use vars qw($VERSION @ISA @EXPORT $AUTOLOAD $use_XSLoader); -$VERSION = '0.25'; +$VERSION = '0.26'; require Exporter; #require DynaLoader; @@ -53,11 +53,11 @@ BEGIN { DB_ARCH_REMOVE DB_AUTO_COMMIT DB_BEFORE - DB_BROADCAST_EID DB_BTREE DB_BTREEMAGIC DB_BTREEOLDVER DB_BTREEVERSION + DB_BUFFER_SMALL DB_CACHED_COUNTS DB_CDB_ALLDB DB_CHECKPOINT @@ -72,6 +72,7 @@ BEGIN { DB_CURLSN DB_CURRENT DB_CXX_NO_EXCEPTIONS + DB_DEGREE_2 DB_DELETED DB_DELIMITER DB_DIRECT @@ -79,9 +80,11 @@ BEGIN { DB_DIRECT_LOG DB_DIRTY_READ DB_DONOTINDEX + DB_DSYNC_LOG DB_DUP DB_DUPCURSOR DB_DUPSORT + DB_DURABLE_UNKNOWN DB_EID_BROADCAST DB_EID_INVALID DB_ENCRYPT @@ -94,11 +97,13 @@ BEGIN { DB_ENV_DBLOCAL DB_ENV_DIRECT_DB DB_ENV_DIRECT_LOG + DB_ENV_DSYNC_LOG DB_ENV_FATAL DB_ENV_LOCKDOWN DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_LOG_AUTOREMOVE + DB_ENV_LOG_INMEMORY DB_ENV_NOLOCKING DB_ENV_NOMMAP DB_ENV_NOPANIC @@ -149,7 +154,7 @@ BEGIN { DB_INIT_MPOOL DB_INIT_REP DB_INIT_TXN - DB_INVALID_EID + DB_INORDER DB_JAVA_CALLBACK DB_JOINENV DB_JOIN_ITEM @@ -162,6 +167,7 @@ BEGIN { DB_LOCKDOWN DB_LOCKMAGIC DB_LOCKVERSION + DB_LOCK_ABORT DB_LOCK_CONFLICT DB_LOCK_DEADLOCK DB_LOCK_DEFAULT @@ -172,6 +178,7 @@ BEGIN { DB_LOCK_GET_TIMEOUT DB_LOCK_INHERIT DB_LOCK_MAXLOCKS + DB_LOCK_MAXWRITE DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NORUN @@ -202,13 +209,16 @@ BEGIN { DB_LOGOLDVER DB_LOGVERSION DB_LOG_AUTOREMOVE + DB_LOG_BUFFER_FULL DB_LOG_CHKPNT DB_LOG_COMMIT DB_LOG_DISK + DB_LOG_INMEMORY DB_LOG_LOCKED DB_LOG_NOCOPY DB_LOG_NOT_DURABLE DB_LOG_PERM + DB_LOG_RESEND DB_LOG_SILENT_ERR DB_LOG_WRNOSYNC DB_MAX_PAGES @@ -218,6 +228,7 @@ BEGIN { DB_MPOOL_DIRTY DB_MPOOL_DISCARD DB_MPOOL_EXTENT + DB_MPOOL_FREE DB_MPOOL_LAST DB_MPOOL_NEW DB_MPOOL_NEW_GROUP @@ -298,9 +309,11 @@ BEGIN { DB_REP_CLIENT DB_REP_CREATE DB_REP_DUPMASTER + DB_REP_EGENCHG DB_REP_HANDLE_DEAD DB_REP_HOLDELECTION DB_REP_ISPERM + DB_REP_LOGREADY DB_REP_LOGSONLY DB_REP_MASTER DB_REP_NEWMASTER @@ -308,7 +321,9 @@ BEGIN { DB_REP_NOBUFFER DB_REP_NOTPERM DB_REP_OUTDATED + DB_REP_PAGEDONE DB_REP_PERMANENT + DB_REP_STARTUPDONE DB_REP_UNAVAIL DB_REVSPLITOFF DB_RMW @@ -318,7 +333,12 @@ BEGIN { DB_RUNRECOVERY DB_SALVAGE DB_SECONDARY_BAD + DB_SEQUENCE_VERSION DB_SEQUENTIAL + DB_SEQ_DEC + DB_SEQ_INC + DB_SEQ_RANGE_SET + DB_SEQ_WRAP DB_SET DB_SET_LOCK_TIMEOUT DB_SET_RANGE @@ -326,7 +346,14 @@ BEGIN { DB_SET_TXN_NOW DB_SET_TXN_TIMEOUT DB_SNAPSHOT + DB_STAT_ALL DB_STAT_CLEAR + DB_STAT_LOCK_CONF + DB_STAT_LOCK_LOCKERS + DB_STAT_LOCK_OBJECTS + DB_STAT_LOCK_PARAMS + DB_STAT_MEMP_HASH + DB_STAT_SUBSYSTEM DB_SURPRISE_KID DB_SWAPBYTES DB_SYSTEM_MEM @@ -338,18 +365,12 @@ BEGIN { DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 DB_TEST_POSTDESTROY - DB_TEST_POSTEXTDELETE - DB_TEST_POSTEXTOPEN - DB_TEST_POSTEXTUNLINK DB_TEST_POSTLOG DB_TEST_POSTLOGMETA DB_TEST_POSTOPEN DB_TEST_POSTRENAME DB_TEST_POSTSYNC DB_TEST_PREDESTROY - DB_TEST_PREEXTDELETE - DB_TEST_PREEXTOPEN - DB_TEST_PREEXTUNLINK DB_TEST_PREOPEN DB_TEST_PRERENAME DB_TEST_SUBDB_LOCKS @@ -365,7 +386,6 @@ BEGIN { DB_TXN_BACKWARD_ROLL DB_TXN_CKP DB_TXN_FORWARD_ROLL - DB_TXN_GETPGNOS DB_TXN_LOCK DB_TXN_LOCK_2PL DB_TXN_LOCK_MASK @@ -386,7 +406,7 @@ BEGIN { DB_TXN_UNDO DB_TXN_WRITE_NOSYNC DB_UNKNOWN - DB_UNRESOLVED_CHILD + DB_UNREF DB_UPDATE_SECONDARY DB_UPGRADE DB_USE_ENVIRON @@ -401,6 +421,7 @@ BEGIN { DB_VERIFY_FATAL DB_VERSION_MAJOR DB_VERSION_MINOR + DB_VERSION_MISMATCH DB_VERSION_PATCH DB_VERSION_STRING DB_VRFY_FLAGMASK @@ -453,9 +474,12 @@ sub ParseParameters($@) %options = %{ $rest[0] } ; } - elsif (@rest >= 2) { + elsif (@rest >= 2 && @rest % 2 == 0) { %options = @rest ; } + elsif (@rest > 0) { + croak "$sub: malformed option list"; + } while (($key, $value) = each %options) { @@ -505,7 +529,7 @@ sub env_remove { # Usage: # - # $env = new BerkeleyDB::Env + # $env = BerkeleyDB::env_remove # [ -Home => $path, ] # [ -Config => { name => value, name => value } # [ -Flags => DB_INIT_LOCK| ] @@ -603,6 +627,7 @@ package BerkeleyDB::Env ; use UNIVERSAL qw( isa ) ; use Carp ; +use IO::File; use vars qw( %valid_config_keys ) ; sub isaFilehandle @@ -649,19 +674,18 @@ sub new Verbose => 0, Config => undef, Encrypt => undef, + SharedMemKey => undef, }, @_) ; + my $errfile = $got->{ErrFile} ; if (defined $got->{ErrFile}) { - croak("ErrFile parameter must be a file name") - if ref $got->{ErrFile} ; - #if (!isaFilehandle($got->{ErrFile})) { - # my $handle = new IO::File ">$got->{ErrFile}" -# or croak "Cannot open file $got->{ErrFile}: $!\n" ; -# $got->{ErrFile} = $handle ; -# } + if (!isaFilehandle($got->{ErrFile})) { + my $handle = new IO::File ">$got->{ErrFile}" + or croak "Cannot open file $got->{ErrFile}: $!\n" ; + $errfile = $got->{ErrFile} = $handle ; + } } - my %config ; if (defined $got->{Config}) { croak("Config parameter must be a hash reference") @@ -684,7 +708,7 @@ sub new BerkeleyDB::parseEncrypt($got); - my ($addr) = _db_appinit($pkg, $got) ; + my ($addr) = _db_appinit($pkg, $got, $errfile) ; my $obj ; $obj = bless [$addr] , $pkg if $addr ; if ($obj && $BerkeleyDB::db_version >= 3.1 && keys %config) { @@ -1262,7 +1286,7 @@ sub SHIFT { my $self = shift; my ($key, $value) = (0, 0) ; - my $cursor = $self->db_cursor() ; + my $cursor = $self->_db_write_cursor() ; return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) != 0 ; return undef if $cursor->c_del() != 0 ; @@ -1276,7 +1300,7 @@ sub UNSHIFT if (@_) { my ($key, $value) = (0, 0) ; - my $cursor = $self->db_cursor() ; + my $cursor = $self->_db_write_cursor() ; my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) ; if ($status == 0) { @@ -1303,7 +1327,7 @@ sub PUSH if (@_) { my ($key, $value) = (-1, 0) ; - my $cursor = $self->db_cursor() ; + my $cursor = $self->_db_write_cursor() ; my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) ; if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND()) { @@ -1328,7 +1352,7 @@ sub POP { my $self = shift; my ($key, $value) = (0, 0) ; - my $cursor = $self->db_cursor() ; + my $cursor = $self->_db_write_cursor() ; return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) != 0 ; return undef if $cursor->c_del() != 0 ; @@ -1566,6 +1590,57 @@ sub DESTROY $self->_DESTROY() ; } +package BerkeleyDB::CDS::Lock; + +use vars qw(%Object %Count); +use Carp; + +sub BerkeleyDB::Common::cds_lock +{ + my $db = shift ; + + # fatal error if database not opened in CDS mode + croak("CDS not enabled for this database\n") + if ! $db->cds_enabled(); + + if ( ! defined $Object{"$db"}) + { + $Object{"$db"} = $db->_db_write_cursor() + || return undef ; + } + + ++ $Count{"$db"} ; + + return bless [$db, 1], "BerkeleyDB::CDS::Lock" ; +} + +sub cds_unlock +{ + my $self = shift ; + my $db = $self->[0] ; + + if ($self->[1]) + { + $self->[1] = 0 ; + -- $Count{"$db"} if $Count{"$db"} > 0 ; + + if ($Count{"$db"} == 0) + { + $Object{"$db"}->c_close() ; + undef $Object{"$db"}; + } + + return 1 ; + } + + return undef ; +} + +sub DESTROY +{ + my $self = shift ; + $self->cds_unlock() ; +} package BerkeleyDB::Term ; @@ -1585,3 +1660,4 @@ package BerkeleyDB ; __END__ + diff --git a/db/perl/BerkeleyDB/BerkeleyDB.pod b/db/perl/BerkeleyDB/BerkeleyDB.pod index 6dcf1f6df..ba2cc0c58 100644 --- a/db/perl/BerkeleyDB/BerkeleyDB.pod +++ b/db/perl/BerkeleyDB/BerkeleyDB.pod @@ -45,10 +45,10 @@ BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4 $boolean = $db->byteswapped() ; $status = $db->truncate($count) ; - $bool = BerkeleyDB::cds_available(); - $lock = $db->cds_lock(); + $bool = $env->cds_enabled(); $bool = $db->cds_enabled(); - $bool = $db->locked(); + $lock = $db->cds_lock(); + $lock->cds_unlock(); ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ; ($flag, $old_offset, $old_length) = $db->partial_clear() ; @@ -128,7 +128,7 @@ particularly relevant. The interface to Berkeley DB is implemented with a number of Perl classes. -=head1 ENV CLASS +=head1 The BerkeleyDB::Env Class The B class provides an interface to the Berkeley DB function B in Berkeley DB 2.x or B and @@ -156,14 +156,15 @@ with the C parameter, described below. [ -Flags => number, ] [ -SetFlags => bitmask, ] [ -LockDetect => number, ] + [ -SharedMemKey => number, ] [ -Verbose => boolean, ] [ -Encrypt => { Password => "string", Flags => number }, ] -=over 5 - All the parameters to the BerkeleyDB::Env constructor are optional. +=over 5 + =item -Home If present, this parameter should point to an existing directory. Any @@ -212,6 +213,17 @@ This option requires Berkeley DB 4.1 or better. If present, this parameter sets the size of the environments shared memory buffer pool. +=item -SharedMemKey + +If present, this parameter sets the base segment ID for the shared memory +region used by Berkeley DB. + +This option requires Berkeley DB 3.1 or better. + +Use C<$env-Eget_shm_key($id)> to find out the base segment ID used +once the environment is open. + + =item -Config This is a variation on the C<-Home> parameter, but it allows finer @@ -231,8 +243,15 @@ The code below shows an example of how it can be used. =item -ErrFile -Expects a filenme. Any errors generated internally by Berkeley DB will -be logged to this file. +Expects a filename or filenhandle. Any errors generated internally by +Berkeley DB will be logged to this file. A useful debug setting is to +open environments with either + + -ErrFile => *STDOUT + +or + + -ErrFile => *STDERR =item -ErrPrefix @@ -365,6 +384,16 @@ Returns the status of the last BerkeleyDB::Env method. Returns a pointer to the underlying DB_ENV data structure that Berkeley DB uses. +=item $env->get_shm_key($id) + +Writes the base segment ID for the shared memory region used by the +Berkeley DB environment into C<$id>. Returns 0 on success. + +This option requires Berkeley DB 4.2 or better. + +Use the C<-SharedMemKey> option when opening the environemt to set the +base segment ID. + =item $env->status() Returns the status of the last BerkeleyDB::Env method. @@ -380,7 +409,7 @@ TODO. $status = BerkeleyDB::db_remove [OPTIONS] $status = BerkeleyDB::db_rename [OPTIONS] $status = BerkeleyDB::db_verify [OPTIONS] - + =head1 THE DATABASE CLASSES B supports the following database formats: @@ -1358,11 +1387,32 @@ database and B for a B database. This method is typically used when a database has been opened with B. +=head2 $bool = $env->cds_enabled(); + +Returns true if the Berkeley DB environment C<$env> has been opened on +CDS mode. + +=head2 $bool = $db->cds_enabled(); + +Returns true if the database C<$db> has been opened on CDS mode. + =head2 $lock = $db->cds_lock(); -TODO. +Creates a CDS write lock object C<$lock>. + +It is a fatal error to attempt to create a cds_lock if the Berkeley DB +environment has not been opened in CDS mode. + +=head2 $lock->cds_unlock(); -=item $ref = $db->db_stat() +Removes a CDS lock. The destruction of the CDS lock object automatically +calls this method. + +Note that if multiple CDS lock objects are created, the underlying write +lock will not be released until all CDS lock objects are either explictly +unlocked with this method, or the CDS lock objects have been destroyed. + +=head2 $ref = $db->db_stat() Returns a reference to an associative array containing information about the database. The keys of the associative array correspond directly to the @@ -1569,7 +1619,7 @@ deleted, B will return B. The B<$flags> parameter is not used at present. -=head2 $status = $cursor->c_del($cnt [, $flags]) +=head2 $status = $cursor->c_count($cnt [, $flags]) Stores the number of duplicates at the current cursor position in B<$cnt>. @@ -1580,6 +1630,14 @@ Berkeley DB 3.1 or better. Returns the status of the last cursor method as a dual type. +=head2 $status = $cursor->c_pget() ; + +TODO + +=head2 $status = $cursor->c_close() + +Closes the cursor B<$cursor>. + =head2 Cursor Examples TODO @@ -1598,6 +1656,12 @@ TODO TODO. +=head1 CDS Mode + +The Berkeley Db Concurrent Data Store is a lightweight locking mechanism +that is useful in scenarios where transactions are overkill. See the +accompanying document .. for details of using this module in CDS mode. + =head1 DBM Filters A DBM Filter is a piece of code that is be used when you I @@ -1816,7 +1880,7 @@ The official web site for Berkeley DB is F. =head1 COPYRIGHT -Copyright (c) 1997-2003 Paul Marquess. All rights reserved. This program +Copyright (c) 1997-2004 Paul Marquess. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. diff --git a/db/perl/BerkeleyDB/BerkeleyDB.pod.P b/db/perl/BerkeleyDB/BerkeleyDB.pod.P index b8c0b996e..6540a943a 100644 --- a/db/perl/BerkeleyDB/BerkeleyDB.pod.P +++ b/db/perl/BerkeleyDB/BerkeleyDB.pod.P @@ -45,10 +45,10 @@ BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4 $boolean = $db->byteswapped() ; $status = $db->truncate($count) ; - $bool = BerkeleyDB::cds_available(); - $lock = $db->cds_lock(); + $bool = $env->cds_enabled(); $bool = $db->cds_enabled(); - $bool = $db->locked(); + $lock = $db->cds_lock(); + $lock->cds_unlock(); ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ; ($flag, $old_offset, $old_length) = $db->partial_clear() ; @@ -128,7 +128,7 @@ particularly relevant. The interface to Berkeley DB is implemented with a number of Perl classes. -=head1 ENV CLASS +=head1 The BerkeleyDB::Env Class The B class provides an interface to the Berkeley DB function B in Berkeley DB 2.x or B and @@ -156,14 +156,15 @@ with the C parameter, described below. [ -Flags => number, ] [ -SetFlags => bitmask, ] [ -LockDetect => number, ] + [ -SharedMemKey => number, ] [ -Verbose => boolean, ] [ -Encrypt => { Password => "string", Flags => number }, ] -=over 5 - All the parameters to the BerkeleyDB::Env constructor are optional. +=over 5 + =item -Home If present, this parameter should point to an existing directory. Any @@ -212,6 +213,17 @@ This option requires Berkeley DB 4.1 or better. If present, this parameter sets the size of the environments shared memory buffer pool. +=item -SharedMemKey + +If present, this parameter sets the base segment ID for the shared memory +region used by Berkeley DB. + +This option requires Berkeley DB 3.1 or better. + +Use C<$env-Eget_shm_key($id)> to find out the base segment ID used +once the environment is open. + + =item -Config This is a variation on the C<-Home> parameter, but it allows finer @@ -231,8 +243,15 @@ The code below shows an example of how it can be used. =item -ErrFile -Expects a filenme. Any errors generated internally by Berkeley DB will -be logged to this file. +Expects a filename or filenhandle. Any errors generated internally by +Berkeley DB will be logged to this file. A useful debug setting is to +open environments with either + + -ErrFile => *STDOUT + +or + + -ErrFile => *STDERR =item -ErrPrefix @@ -365,6 +384,16 @@ Returns the status of the last BerkeleyDB::Env method. Returns a pointer to the underlying DB_ENV data structure that Berkeley DB uses. +=item $env->get_shm_key($id) + +Writes the base segment ID for the shared memory region used by the +Berkeley DB environment into C<$id>. Returns 0 on success. + +This option requires Berkeley DB 4.2 or better. + +Use the C<-SharedMemKey> option when opening the environemt to set the +base segment ID. + =item $env->status() Returns the status of the last BerkeleyDB::Env method. @@ -380,7 +409,7 @@ TODO. $status = BerkeleyDB::db_remove [OPTIONS] $status = BerkeleyDB::db_rename [OPTIONS] $status = BerkeleyDB::db_verify [OPTIONS] - + =head1 THE DATABASE CLASSES B supports the following database formats: @@ -1164,11 +1193,32 @@ database and B for a B database. This method is typically used when a database has been opened with B. +=head2 $bool = $env->cds_enabled(); + +Returns true if the Berkeley DB environment C<$env> has been opened on +CDS mode. + +=head2 $bool = $db->cds_enabled(); + +Returns true if the database C<$db> has been opened on CDS mode. + =head2 $lock = $db->cds_lock(); -TODO. +Creates a CDS write lock object C<$lock>. + +It is a fatal error to attempt to create a cds_lock if the Berkeley DB +environment has not been opened in CDS mode. + +=head2 $lock->cds_unlock(); -=item $ref = $db->db_stat() +Removes a CDS lock. The destruction of the CDS lock object automatically +calls this method. + +Note that if multiple CDS lock objects are created, the underlying write +lock will not be released until all CDS lock objects are either explictly +unlocked with this method, or the CDS lock objects have been destroyed. + +=head2 $ref = $db->db_stat() Returns a reference to an associative array containing information about the database. The keys of the associative array correspond directly to the @@ -1375,7 +1425,7 @@ deleted, B will return B. The B<$flags> parameter is not used at present. -=head2 $status = $cursor->c_del($cnt [, $flags]) +=head2 $status = $cursor->c_count($cnt [, $flags]) Stores the number of duplicates at the current cursor position in B<$cnt>. @@ -1386,6 +1436,14 @@ Berkeley DB 3.1 or better. Returns the status of the last cursor method as a dual type. +=head2 $status = $cursor->c_pget() ; + +TODO + +=head2 $status = $cursor->c_close() + +Closes the cursor B<$cursor>. + =head2 Cursor Examples TODO @@ -1404,6 +1462,12 @@ TODO TODO. +=head1 CDS Mode + +The Berkeley Db Concurrent Data Store is a lightweight locking mechanism +that is useful in scenarios where transactions are overkill. See the +accompanying document .. for details of using this module in CDS mode. + =head1 DBM Filters A DBM Filter is a piece of code that is be used when you I @@ -1583,7 +1647,7 @@ The official web site for Berkeley DB is F. =head1 COPYRIGHT -Copyright (c) 1997-2003 Paul Marquess. All rights reserved. This program +Copyright (c) 1997-2004 Paul Marquess. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. diff --git a/db/perl/BerkeleyDB/BerkeleyDB.xs b/db/perl/BerkeleyDB/BerkeleyDB.xs index ae6410a44..bd78509f5 100644 --- a/db/perl/BerkeleyDB/BerkeleyDB.xs +++ b/db/perl/BerkeleyDB/BerkeleyDB.xs @@ -6,7 +6,7 @@ All comments/suggestions/problems are welcome - Copyright (c) 1997-2003 Paul Marquess. All rights reserved. + Copyright (c) 1997-2004 Paul Marquess. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. @@ -60,9 +60,9 @@ extern "C" { #undef __attribute__ #ifdef USE_PERLIO -# define GetFILEptr(sv) PerlIO_findFILE(IoOFP(sv_2io(sv))) +# define GetFILEptr(sv) PerlIO_findFILE(IoIFP(sv_2io(sv))) #else -# define GetFILEptr(sv) IoOFP(sv_2io(sv)) +# define GetFILEptr(sv) IoIFP(sv_2io(sv)) #endif #include @@ -121,6 +121,10 @@ extern "C" { # define AT_LEAST_DB_4_2 #endif +#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3) +# define AT_LEAST_DB_4_3 +#endif + #ifdef __cplusplus } #endif @@ -199,7 +203,7 @@ typedef struct { int Status ; /* char ErrBuff[1000] ; */ SV * ErrPrefix ; - FILE * ErrHandle ; + SV * ErrHandle ; DB_ENV * Env ; int open_dbs ; int TxnMgrStatus ; @@ -481,6 +485,8 @@ hash_delete(char * hash, char * key); #define dieIfEnvOpened(e, m) if (e->opened) softCrash("Cannot call method BerkeleyDB::Env::%s after environment has been opened", m); +#define isSTDOUT_ERR(f) ((f) == stdout || (f) == stderr) + /* Internal Global Data */ static db_recno_t Value ; static db_recno_t zero = 0 ; @@ -516,7 +522,7 @@ my_strdup(const char *s) return NULL ; { - MEM_SIZE l = strlen(s); + MEM_SIZE l = strlen(s) + 1; char *s1 = (char *)safemalloc(l); Copy(s, s1, (MEM_SIZE)l, char); @@ -690,6 +696,8 @@ destroyDB(BerkeleyDB db) { dTHR; if (! PL_dirty && db->active) { + if (db->parent_env && db->parent_env->open_dbs) + -- db->parent_env->open_dbs ; -- db->open_cursors ; ((db->dbp)->close)(db->dbp, 0) ; } @@ -1057,7 +1065,8 @@ static int associate_cb(DB_callback const DBT * pkey, const DBT * pdata, DBT * skey) { dSP ; - char * pk_dat, * pd_dat, *sk_dat ; + char * pk_dat, * pd_dat ; + /* char *sk_dat ; */ int retval ; int count ; SV * skey_SV ; @@ -1130,7 +1139,11 @@ associate_cb(DB_callback const DBT * pkey, const DBT * pdata, DBT * skey) #endif /* AT_LEAST_DB_3_3 */ static void +#ifdef AT_LEAST_DB_4_3 +db_errcall_cb(const DB_ENV* dbenv, const char * db_errpfx, const char * buffer) +#else db_errcall_cb(const char * db_errpfx, char * buffer) +#endif { #if 0 @@ -1631,6 +1644,14 @@ open(env, db_home=NULL, flags=0, mode=0777) OUTPUT: RETVAL +bool +cds_enabled(env) + BerkeleyDB::Env env + CODE: + RETVAL = env->cds_enabled ; + OUTPUT: + RETVAL + int set_encrypt(env, passwd, flags) @@ -1652,9 +1673,10 @@ set_encrypt(env, passwd, flags) BerkeleyDB::Env::Raw -_db_appinit(self, ref) +_db_appinit(self, ref, errfile=NULL) char * self SV * ref + SV * errfile CODE: { HV * hash ; @@ -1662,13 +1684,13 @@ _db_appinit(self, ref) char * enc_passwd = NULL ; int enc_flags = 0 ; char * home = NULL ; - char * errfile = NULL ; char * server = NULL ; char ** config = NULL ; int flags = 0 ; int setflags = 0 ; int cachesize = 0 ; int lk_detect = 0 ; + long shm_key = 0 ; SV * errprefix = NULL; DB_ENV * env ; int status ; @@ -1685,11 +1707,14 @@ _db_appinit(self, ref) SetValue_pv(server, "Server", char *) ; SetValue_iv(cachesize, "Cachesize") ; SetValue_iv(lk_detect, "LockDetect") ; + SetValue_iv(shm_key, "SharedMemKey") ; #ifndef AT_LEAST_DB_3_2 if (setflags) softCrash("-SetFlags needs Berkeley DB 3.x or better") ; #endif /* ! AT_LEAST_DB_3 */ #ifndef AT_LEAST_DB_3_1 + if (shm_key) + softCrash("-SharedMemKey needs Berkeley DB 3.1 or better") ; if (server) softCrash("-Server needs Berkeley DB 3.1 or better") ; #endif /* ! AT_LEAST_DB_3_1 */ @@ -1723,29 +1748,33 @@ _db_appinit(self, ref) Trace(("copying errprefix\n" )) ; RETVAL->ErrPrefix = newSVsv(errprefix) ; SvPOK_only(RETVAL->ErrPrefix) ; - } + } if (RETVAL->ErrPrefix) RETVAL->Env->db_errpfx = SvPVX(RETVAL->ErrPrefix) ; - SetValue_pv(errfile, "ErrFile", char *) ; - if (errfile) { - RETVAL->ErrHandle = env->db_errfile = fopen(errfile, "w"); - if (RETVAL->ErrHandle == NULL) - croak("Cannot open file %s: %s\n", errfile, Strerror(errno)); + if (SvGMAGICAL(errfile)) + mg_get(errfile); + if (SvOK(errfile)) { + FILE * ef = GetFILEptr(errfile) ; + if (! ef) + croak("Cannot open file ErrFile", Strerror(errno)); + RETVAL->ErrHandle = newSVsv(errfile) ; + env->db_errfile = ef; } SetValue_iv(env->db_verbose, "Verbose") ; env->db_errcall = db_errcall_cb ; RETVAL->active = TRUE ; RETVAL->opened = TRUE; - RETVAL->cds_enabled = (flags & DB_INIT_CDB != 0 ? TRUE : FALSE) ; + RETVAL->cds_enabled = ((flags & DB_INIT_CDB) != 0 ? TRUE : FALSE) ; status = db_appinit(home, config, env, flags) ; printf(" status = %d errno %d \n", status, errno) ; Trace((" status = %d env %d Env %d\n", status, RETVAL, env)) ; if (status == 0) hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ; else { + if (RETVAL->ErrHandle) - fclose(RETVAL->ErrHandle) ; + SvREFCNT_dec(RETVAL->ErrHandle) ; if (RETVAL->ErrPrefix) SvREFCNT_dec(RETVAL->ErrPrefix) ; Safefree(RETVAL->Env) ; @@ -1767,6 +1796,13 @@ _db_appinit(self, ref) #ifdef AT_LEAST_DB_3_3 env->set_alloc(env, safemalloc, MyRealloc, safefree) ; #endif +#ifdef AT_LEAST_DB_3_1 + if (status == 0 && shm_key) { + status = env->set_shm_key(env, shm_key) ; + Trace(("set_shm_key [%d] returned %s\n", shm_key, + my_db_strerror(status))); + } +#endif if (status == 0 && cachesize) { status = env->set_cachesize(env, 0, cachesize, 0) ; Trace(("set_cachesize [%d] returned %s\n", @@ -1827,18 +1863,21 @@ _db_appinit(self, ref) if (RETVAL->ErrPrefix) env->set_errpfx(env, SvPVX(RETVAL->ErrPrefix)) ; - SetValue_pv(errfile, "ErrFile", char *) ; - if (errfile) { - RETVAL->ErrHandle = fopen(errfile, "w"); - if (RETVAL->ErrHandle == NULL) - croak("Cannot open file %s: %s\n", errfile, Strerror(errno)); - env->set_errfile(env, RETVAL->ErrHandle) ; + if (SvGMAGICAL(errfile)) + mg_get(errfile); + if (SvOK(errfile)) { + FILE * ef = GetFILEptr(errfile); + if (! ef) + croak("Cannot open file ErrFile", Strerror(errno)); + RETVAL->ErrHandle = newSVsv(errfile) ; + env->set_errfile(env, ef) ; + } SetValue_iv(mode, "Mode") ; env->set_errcall(env, db_errcall_cb) ; RETVAL->active = TRUE ; - RETVAL->cds_enabled = (flags & DB_INIT_CDB != 0 ? TRUE : FALSE) ; + RETVAL->cds_enabled = ((flags & DB_INIT_CDB) != 0 ? TRUE : FALSE) ; #ifdef IS_DB_3_0_x status = (env->open)(env, home, config, flags, mode) ; #else /* > 3.0 */ @@ -1853,7 +1892,7 @@ _db_appinit(self, ref) else { (env->close)(env, 0) ; if (RETVAL->ErrHandle) - fclose(RETVAL->ErrHandle) ; + SvREFCNT_dec(RETVAL->ErrHandle) ; if (RETVAL->ErrPrefix) SvREFCNT_dec(RETVAL->ErrPrefix) ; Safefree(RETVAL) ; @@ -2074,6 +2113,8 @@ status(env) OUTPUT: RETVAL + + DualType db_appexit(env) BerkeleyDB::Env env @@ -2111,7 +2152,7 @@ _DESTROY(env) (env->Env->close)(env->Env, 0) ; #endif if (env->ErrHandle) - fclose(env->ErrHandle) ; + SvREFCNT_dec(env->ErrHandle) ; if (env->ErrPrefix) SvREFCNT_dec(env->ErrPrefix) ; #if DB_VERSION_MAJOR == 2 @@ -2135,6 +2176,23 @@ _TxnMgr(env) OUTPUT: RETVAL +int +get_shm_key(env, id) + BerkeleyDB::Env env + long id = NO_INIT + INIT: + ckActive_Database(env->active) ; + CODE: +#ifndef AT_LEAST_DB_4_2 + softCrash("$env->get_shm_key needs Berkeley DB 4.2 or better") ; +#else + RETVAL = env->Env->get_shm_key(env->Env, &id); +#endif + OUTPUT: + RETVAL + id + + int set_lg_dir(env, dir) BerkeleyDB::Env env @@ -2353,10 +2411,14 @@ db_stat(db, flags=0) softCrash("$db->db_stat for a Hash needs Berkeley DB 3.x or better") ; #else DB_HASH_STAT * stat ; +#ifdef AT_LEAST_DB_4_3 + db->Status = ((db->dbp)->stat)(db->dbp, db->txn, &stat, flags) ; +#else #ifdef AT_LEAST_DB_3_3 db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ; #else db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ; +#endif #endif if (db->Status == 0) { RETVAL = (HV*)sv_2mortal((SV*)newHV()) ; @@ -2525,10 +2587,14 @@ db_stat(db, flags=0) CODE: { DB_BTREE_STAT * stat ; +#ifdef AT_LEAST_DB_4_3 + db->Status = ((db->dbp)->stat)(db->dbp, db->txn, &stat, flags) ; +#else #ifdef AT_LEAST_DB_3_3 db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ; #else db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ; +#endif #endif if (db->Status == 0) { RETVAL = (HV*)sv_2mortal((SV*)newHV()) ; @@ -2723,10 +2789,14 @@ db_stat(db, flags=0) softCrash("$db->db_stat for a Queue needs Berkeley DB 3.x or better") ; #else /* Berkeley DB 3, or better */ DB_QUEUE_STAT * stat ; +#ifdef AT_LEAST_DB_4_3 + db->Status = ((db->dbp)->stat)(db->dbp, db->txn, &stat, flags) ; +#else #ifdef AT_LEAST_DB_3_3 db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ; #else db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ; +#endif #endif if (db->Status == 0) { RETVAL = (HV*)sv_2mortal((SV*)newHV()) ; @@ -2949,17 +3019,6 @@ ArrayOffset(db) OUTPUT: RETVAL -bool -cds_available() - CODE: -#ifndef AT_LEAST_DB_2 - RETVAL = TRUE; -#else - RETVAL = FALSE; -#endif - OUTPUT: - RETVAL - bool cds_enabled(db) diff --git a/db/perl/BerkeleyDB/Changes b/db/perl/BerkeleyDB/Changes index a0a8d3c13..8f3718a7f 100644 --- a/db/perl/BerkeleyDB/Changes +++ b/db/perl/BerkeleyDB/Changes @@ -1,5 +1,33 @@ Revision history for Perl extension BerkeleyDB. +0.26 10th October 2004 + + * Changed to allow Building with Berkeley DB 4.3 + + * added cds_lock and associated methods as a convenience to allow + safe updaing of database records when using Berkeley DB CDS mode. + + * added t/cds.t and t/pod.t + + * Modified the test suite to use "-ErrFile => *STDOUT" where + possible. This will make it easier to diagnose build issues. + + * -Errfile will now accept a filehandle as well as a filename + This means that -ErrFile => *STDOUT will get all extended error + messages displayed directly on screen. + + * Added support for set_shm_key & get_shm_key. + + * Patch from Mark Jason Dominus to add a better error message + when an odd number of parameters are passed to ParseParameters. + + * fixed off-by-one error in my_strdup + + * Fixed a problem with push, pop, shift & unshift with Queue & + Recno when used in CDS mode. These methods were not using + a write cursor behind the scenes. + Problem reported by Pavel Hlavnicka. + 0.25 1st November 2003 * Minor update to dbinfo diff --git a/db/perl/BerkeleyDB/MANIFEST b/db/perl/BerkeleyDB/MANIFEST index 2a646f65d..7c090a175 100644 --- a/db/perl/BerkeleyDB/MANIFEST +++ b/db/perl/BerkeleyDB/MANIFEST @@ -19,6 +19,7 @@ mkpod ppport.h README t/btree.t +t/cds.t t/db-3.0.t t/db-3.1.t t/db-3.2.t @@ -34,6 +35,7 @@ t/filter.t t/hash.t t/join.t t/mldbm.t +t/pod.t t/queue.t t/recno.t t/strict.t diff --git a/db/perl/BerkeleyDB/Makefile.PL b/db/perl/BerkeleyDB/Makefile.PL index df72a8cd5..0c926a394 100644 --- a/db/perl/BerkeleyDB/Makefile.PL +++ b/db/perl/BerkeleyDB/Makefile.PL @@ -27,11 +27,16 @@ my $LIBS ; ParseCONFIG() ; -if (defined $DB_NAME) +if (defined $DB_NAME) { $LIBS = $DB_NAME } -else { +else { if ($^O eq 'MSWin32') { $LIBS = '-llibdb' } + elsif ($^O =~ /aix/i ) { + $LIBS .= '-ldb -lpthread '; + if ($Config{'cc'} eq 'gcc' && $Config{'osvers'} eq '5.1') + { $LIBS .= '-lgcc_s' } + } else { $LIBS = '-ldb' } } @@ -40,6 +45,10 @@ else { my $OS2 = "" ; $OS2 = "-DOS2" if $^O eq 'os2' ; +my $WALL = ''; +#$WALL = ' -Wall ' if $Config{'cc'} =~ /gcc/ ; + + WriteMakefile( NAME => 'BerkeleyDB', LIBS => ["-L${LIB_DIR} $LIBS"], @@ -47,7 +56,7 @@ WriteMakefile( INC => "-I$INC_DIR", VERSION_FROM => 'BerkeleyDB.pm', XSPROTOARG => '-noprototypes', - DEFINE => "$OS2", + DEFINE => "$OS2 $WALL", #'macro' => { INSTALLDIRS => 'perl' }, 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz'}, ($] >= 5.005 diff --git a/db/perl/BerkeleyDB/README b/db/perl/BerkeleyDB/README index ec3aedff5..3c08d2c82 100644 --- a/db/perl/BerkeleyDB/README +++ b/db/perl/BerkeleyDB/README @@ -1,10 +1,10 @@ BerkeleyDB - Version 0.25 + Version 0.26 - 28th Sept 2003 + 10th Oct 2004 - Copyright (c) 1997-2003 Paul Marquess. All rights reserved. This + Copyright (c) 1997-2004 Paul Marquess. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. @@ -386,6 +386,39 @@ The solution is to use a local drive. Berkeley DB doesn't support network drives. +Berkeley DB library configured to support only DB_PRIVATE environments +---------------------------------------------------------------------- + +BerkeleyDB seems to have built correctly, but you get a series of errors +like this when you run the test harness: + + t/btree........ok 27/244 + # : Berkeley DB library configured to support only DB_PRIVATE environments + t/btree........ok 177/244 + # : Berkeley DB library configured to support only DB_PRIVATE environments + t/btree........NOK 178Can't call method "txn_begin" on an undefined value at t/btree.t line 638. + t/btree........dubious + Test returned status 2 (wstat 512, 0x200) + Scalar found where operator expected at (eval 153) line 1, near "'int' $__val" + (Missing operator before $__val?) + DIED. FAILED tests 28, 178-244 + Failed 68/244 tests, 72.13% okay + + +Some versions of Redhat Linux, and possibly some other Linux +distributions, include a seriously restricted build of the +Berkeley DB library that is incompatible with this module. See +https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=91933 for an +exhaustive discussion on the reasons for this. + + +Solution: + +You will have to build a private copy of the Berkeley DB library and +use it when building this Perl module. + + + Linux Notes ----------- diff --git a/db/perl/BerkeleyDB/config.in b/db/perl/BerkeleyDB/config.in index f7eb07765..3c37ea937 100644 --- a/db/perl/BerkeleyDB/config.in +++ b/db/perl/BerkeleyDB/config.in @@ -8,15 +8,8 @@ # installed on your system. #INCLUDE = /usr/local/include -#INCLUDE = /usr/local/BerkeleyDB/include -#INCLUDE = ./libraries/2.7.5/include -#INCLUDE = ./libraries/3.0.55/include -#INCLUDE = ./libraries/3.1.17/include -#INCLUDE = ./libraries/3.3.11/include -#INCLUDE = ./libraries/4.1.12/include -#INCLUDE = ./libraries/4.1.24.NC/include -#INCLUDE = ./libraries/4.1.25/include -INCLUDE = ./libraries/4.2.41/include +#INCLUDE = ../.. +INCLUDE = /usr/local/BerkeleyDB/include # 2. Where is libdb? # @@ -24,17 +17,8 @@ INCLUDE = ./libraries/4.2.41/include # installed on your system. #LIB = /usr/local/lib -#LIB = /usr/local/BerkeleyDB/lib -#LIB = ./libraries/2.7.5/lib -#LIB = ./libraries/3.0.55/lib -#LIB = ./libraries/3.1.17/lib -#LIB = ./libraries/3.3.11/lib -#LIB = ./libraries/4.1.12/lib -#LIB = ./libraries/4.1.24.NC/lib -#LIB = ./libraries/4.1.25/lib -LIB = ./libraries/4.2.41/lib - -#LIB = ./libraries/1.85/lib +#LIB = ../.. +LIB = /usr/local/BerkeleyDB/lib # 3. Is the library called libdb? # diff --git a/db/perl/BerkeleyDB/constants.h b/db/perl/BerkeleyDB/constants.h index 02e04a7e8..98dc5eb16 100644 --- a/db/perl/BerkeleyDB/constants.h +++ b/db/perl/BerkeleyDB/constants.h @@ -99,7 +99,7 @@ constant_7 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_HASH; return PERL_constant_ISIV; #else @@ -148,7 +148,7 @@ static int constant_8 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. - DB_AFTER DB_BTREE DB_FIRST DB_FLUSH DB_FORCE DB_QUEUE DB_RECNO */ + DB_AFTER DB_BTREE DB_FIRST DB_FLUSH DB_FORCE DB_QUEUE DB_RECNO DB_UNREF */ /* Offset 4 gives the best switch position. */ switch (name[4]) { case 'E': @@ -157,7 +157,7 @@ constant_8 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_RECNO; return PERL_constant_ISIV; #else @@ -195,6 +195,17 @@ constant_8 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + break; + case 'N': + if (memEQ(name, "DB_UNREF", 8)) { + /* ^ */ +#ifdef DB_UNREF + *iv_return = DB_UNREF; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } break; @@ -215,7 +226,7 @@ constant_8 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_BTREE; return PERL_constant_ISIV; #else @@ -436,23 +447,34 @@ constant_10 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. DB_CONSUME DB_CURRENT DB_DELETED DB_DUPSORT DB_ENCRYPT DB_ENV_CDB - DB_ENV_TXN DB_JOINENV DB_KEYLAST DB_NOPANIC DB_OK_HASH DB_PRIVATE - DB_PR_PAGE DB_RECOVER DB_SALVAGE DB_TIMEOUT DB_TXN_CKP DB_UNKNOWN - DB_UPGRADE */ - /* Offset 8 gives the best switch position. */ - switch (name[8]) { - case 'D': - if (memEQ(name, "DB_ENV_CDB", 10)) { - /* ^ */ -#ifdef DB_ENV_CDB - *iv_return = DB_ENV_CDB; + DB_ENV_TXN DB_INORDER DB_JOINENV DB_KEYLAST DB_NOPANIC DB_OK_HASH + DB_PRIVATE DB_PR_PAGE DB_RECOVER DB_SALVAGE DB_SEQ_DEC DB_SEQ_INC + DB_TIMEOUT DB_TXN_CKP DB_UNKNOWN DB_UPGRADE */ + /* Offset 5 gives the best switch position. */ + switch (name[5]) { + case 'C': + if (memEQ(name, "DB_ENCRYPT", 10)) { + /* ^ */ +#ifdef DB_ENCRYPT + *iv_return = DB_ENCRYPT; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_RECOVER", 10)) { + /* ^ */ +#ifdef DB_RECOVER + *iv_return = DB_RECOVER; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } + break; + case 'G': if (memEQ(name, "DB_UPGRADE", 10)) { - /* ^ */ + /* ^ */ #ifdef DB_UPGRADE *iv_return = DB_UPGRADE; return PERL_constant_ISIV; @@ -461,38 +483,52 @@ constant_10 (pTHX_ const char *name, IV *iv_return) { #endif } break; - case 'E': - if (memEQ(name, "DB_DELETED", 10)) { - /* ^ */ -#ifdef DB_DELETED - *iv_return = DB_DELETED; + case 'I': + if (memEQ(name, "DB_JOINENV", 10)) { + /* ^ */ +#ifdef DB_JOINENV + *iv_return = DB_JOINENV; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_RECOVER", 10)) { - /* ^ */ -#ifdef DB_RECOVER - *iv_return = DB_RECOVER; + if (memEQ(name, "DB_PRIVATE", 10)) { + /* ^ */ +#ifdef DB_PRIVATE + *iv_return = DB_PRIVATE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'G': - if (memEQ(name, "DB_PR_PAGE", 10)) { - /* ^ */ -#ifdef DB_PR_PAGE - *iv_return = DB_PR_PAGE; + case 'K': + if (memEQ(name, "DB_UNKNOWN", 10)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 2) || \ + (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ + (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ + DB_VERSION_PATCH >= 3) + *iv_return = DB_UNKNOWN; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } + break; + case 'L': + if (memEQ(name, "DB_DELETED", 10)) { + /* ^ */ +#ifdef DB_DELETED + *iv_return = DB_DELETED; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_SALVAGE", 10)) { - /* ^ */ + /* ^ */ #ifdef DB_SALVAGE *iv_return = DB_SALVAGE; return PERL_constant_ISIV; @@ -501,20 +537,29 @@ constant_10 (pTHX_ const char *name, IV *iv_return) { #endif } break; - case 'I': - if (memEQ(name, "DB_NOPANIC", 10)) { - /* ^ */ -#ifdef DB_NOPANIC - *iv_return = DB_NOPANIC; + case 'M': + if (memEQ(name, "DB_TIMEOUT", 10)) { + /* ^ */ +#ifdef DB_TIMEOUT + *iv_return = DB_TIMEOUT; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'K': + case 'N': + if (memEQ(name, "DB_CONSUME", 10)) { + /* ^ */ +#ifdef DB_CONSUME + *iv_return = DB_CONSUME; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } if (memEQ(name, "DB_TXN_CKP", 10)) { - /* ^ */ + /* ^ */ #ifdef DB_TXN_CKP *iv_return = DB_TXN_CKP; return PERL_constant_ISIV; @@ -523,120 +568,113 @@ constant_10 (pTHX_ const char *name, IV *iv_return) { #endif } break; - case 'M': - if (memEQ(name, "DB_CONSUME", 10)) { - /* ^ */ -#ifdef DB_CONSUME - *iv_return = DB_CONSUME; + case 'O': + if (memEQ(name, "DB_INORDER", 10)) { + /* ^ */ +#ifdef DB_INORDER + *iv_return = DB_INORDER; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'N': - if (memEQ(name, "DB_CURRENT", 10)) { - /* ^ */ -#ifdef DB_CURRENT - *iv_return = DB_CURRENT; + case 'P': + if (memEQ(name, "DB_DUPSORT", 10)) { + /* ^ */ +#ifdef DB_DUPSORT + *iv_return = DB_DUPSORT; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_JOINENV", 10)) { - /* ^ */ -#ifdef DB_JOINENV - *iv_return = DB_JOINENV; + if (memEQ(name, "DB_NOPANIC", 10)) { + /* ^ */ +#ifdef DB_NOPANIC + *iv_return = DB_NOPANIC; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'P': - if (memEQ(name, "DB_ENCRYPT", 10)) { - /* ^ */ -#ifdef DB_ENCRYPT - *iv_return = DB_ENCRYPT; + case 'Q': + if (memEQ(name, "DB_SEQ_DEC", 10)) { + /* ^ */ +#ifdef DB_SEQ_DEC + *iv_return = DB_SEQ_DEC; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'R': - if (memEQ(name, "DB_DUPSORT", 10)) { - /* ^ */ -#ifdef DB_DUPSORT - *iv_return = DB_DUPSORT; + if (memEQ(name, "DB_SEQ_INC", 10)) { + /* ^ */ +#ifdef DB_SEQ_INC + *iv_return = DB_SEQ_INC; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'S': - if (memEQ(name, "DB_KEYLAST", 10)) { - /* ^ */ -#ifdef DB_KEYLAST - *iv_return = DB_KEYLAST; + case 'R': + if (memEQ(name, "DB_CURRENT", 10)) { + /* ^ */ +#ifdef DB_CURRENT + *iv_return = DB_CURRENT; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_OK_HASH", 10)) { - /* ^ */ -#ifdef DB_OK_HASH - *iv_return = DB_OK_HASH; + break; + case 'V': + if (memEQ(name, "DB_ENV_CDB", 10)) { + /* ^ */ +#ifdef DB_ENV_CDB + *iv_return = DB_ENV_CDB; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'T': - if (memEQ(name, "DB_PRIVATE", 10)) { - /* ^ */ -#ifdef DB_PRIVATE - *iv_return = DB_PRIVATE; + if (memEQ(name, "DB_ENV_TXN", 10)) { + /* ^ */ +#ifdef DB_ENV_TXN + *iv_return = DB_ENV_TXN; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'U': - if (memEQ(name, "DB_TIMEOUT", 10)) { - /* ^ */ -#ifdef DB_TIMEOUT - *iv_return = DB_TIMEOUT; + case 'Y': + if (memEQ(name, "DB_KEYLAST", 10)) { + /* ^ */ +#ifdef DB_KEYLAST + *iv_return = DB_KEYLAST; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'W': - if (memEQ(name, "DB_UNKNOWN", 10)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 2) || \ - (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ - (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) - *iv_return = DB_UNKNOWN; + case '_': + if (memEQ(name, "DB_OK_HASH", 10)) { + /* ^ */ +#ifdef DB_OK_HASH + *iv_return = DB_OK_HASH; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'X': - if (memEQ(name, "DB_ENV_TXN", 10)) { - /* ^ */ -#ifdef DB_ENV_TXN - *iv_return = DB_ENV_TXN; + if (memEQ(name, "DB_PR_PAGE", 10)) { + /* ^ */ +#ifdef DB_PR_PAGE + *iv_return = DB_PR_PAGE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -651,13 +689,13 @@ static int constant_11 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. - DB_APP_INIT DB_ARCH_ABS DB_ARCH_LOG DB_FILEOPEN DB_FIXEDLEN DB_GET_BOTH - DB_INIT_CDB DB_INIT_LOG DB_INIT_REP DB_INIT_TXN DB_KEYEMPTY DB_KEYEXIST - DB_KEYFIRST DB_LOCKDOWN DB_LOCK_GET DB_LOCK_PUT DB_LOGMAGIC DB_LOG_DISK - DB_LOG_PERM DB_MULTIPLE DB_NEXT_DUP DB_NOSERVER DB_NOTFOUND DB_OK_BTREE - DB_OK_QUEUE DB_OK_RECNO DB_POSITION DB_QAMMAGIC DB_RENUMBER DB_SNAPSHOT - DB_TRUNCATE DB_TXNMAGIC DB_TXN_LOCK DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO - DB_WRNOSYNC DB_YIELDCPU */ + DB_APP_INIT DB_ARCH_ABS DB_ARCH_LOG DB_DEGREE_2 DB_FILEOPEN DB_FIXEDLEN + DB_GET_BOTH DB_INIT_CDB DB_INIT_LOG DB_INIT_REP DB_INIT_TXN DB_KEYEMPTY + DB_KEYEXIST DB_KEYFIRST DB_LOCKDOWN DB_LOCK_GET DB_LOCK_PUT DB_LOGMAGIC + DB_LOG_DISK DB_LOG_PERM DB_MULTIPLE DB_NEXT_DUP DB_NOSERVER DB_NOTFOUND + DB_OK_BTREE DB_OK_QUEUE DB_OK_RECNO DB_POSITION DB_QAMMAGIC DB_RENUMBER + DB_SEQ_WRAP DB_SNAPSHOT DB_STAT_ALL DB_TRUNCATE DB_TXNMAGIC DB_TXN_LOCK + DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO DB_WRNOSYNC DB_YIELDCPU */ /* Offset 8 gives the best switch position. */ switch (name[8]) { case 'A': @@ -668,6 +706,15 @@ constant_11 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_STAT_ALL", 11)) { + /* ^ */ +#ifdef DB_STAT_ALL + *iv_return = DB_STAT_ALL; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_TRUNCATE", 11)) { @@ -732,6 +779,15 @@ constant_11 (pTHX_ const char *name, IV *iv_return) { } break; case 'E': + if (memEQ(name, "DB_DEGREE_2", 11)) { + /* ^ */ +#ifdef DB_DEGREE_2 + *iv_return = DB_DEGREE_2; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } if (memEQ(name, "DB_LOG_PERM", 11)) { /* ^ */ #ifdef DB_LOG_PERM @@ -766,7 +822,7 @@ constant_11 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_LOCK_GET; return PERL_constant_ISIV; #else @@ -943,7 +999,7 @@ constant_11 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_LOCK_PUT; return PERL_constant_ISIV; #else @@ -986,6 +1042,15 @@ constant_11 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_SEQ_WRAP", 11)) { + /* ^ */ +#ifdef DB_SEQ_WRAP + *iv_return = DB_SEQ_WRAP; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } break; @@ -1051,14 +1116,14 @@ constant_12 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. DB_ARCH_DATA DB_CDB_ALLDB DB_CL_WRITER DB_DELIMITER DB_DIRECT_DB - DB_DUPCURSOR DB_ENV_FATAL DB_FAST_STAT DB_GET_BOTHC DB_GET_RECNO - DB_HASHMAGIC DB_INIT_LOCK DB_JOIN_ITEM DB_LOCKMAGIC DB_LOCK_DUMP - DB_LOCK_RW_N DB_LOGOLDVER DB_MAX_PAGES DB_MPOOL_NEW DB_NEEDSPLIT - DB_NODUPDATA DB_NOLOCKING DB_NORECURSE DB_OVERWRITE DB_PAGEYIELD - DB_PAGE_LOCK DB_PERMANENT DB_POSITIONI DB_PRINTABLE DB_QAMOLDVER - DB_RPCCLIENT DB_SET_RANGE DB_SET_RECNO DB_SWAPBYTES DB_TEMPORARY - DB_TXN_ABORT DB_TXN_APPLY DB_TXN_PRINT DB_WRITELOCK DB_WRITEOPEN - DB_XA_CREATE */ + DB_DSYNC_LOG DB_DUPCURSOR DB_ENV_FATAL DB_FAST_STAT DB_GET_BOTHC + DB_GET_RECNO DB_HASHMAGIC DB_INIT_LOCK DB_JOIN_ITEM DB_LOCKMAGIC + DB_LOCK_DUMP DB_LOCK_RW_N DB_LOGOLDVER DB_MAX_PAGES DB_MPOOL_NEW + DB_NEEDSPLIT DB_NODUPDATA DB_NOLOCKING DB_NORECURSE DB_OVERWRITE + DB_PAGEYIELD DB_PAGE_LOCK DB_PERMANENT DB_POSITIONI DB_PRINTABLE + DB_QAMOLDVER DB_RPCCLIENT DB_SET_RANGE DB_SET_RECNO DB_SWAPBYTES + DB_TEMPORARY DB_TXN_ABORT DB_TXN_APPLY DB_TXN_PRINT DB_WRITELOCK + DB_WRITEOPEN DB_XA_CREATE */ /* Offset 3 gives the best switch position. */ switch (name[3]) { case 'A': @@ -1109,6 +1174,15 @@ constant_12 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_DSYNC_LOG", 12)) { + /* ^ */ +#ifdef DB_DSYNC_LOG + *iv_return = DB_DSYNC_LOG; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_DUPCURSOR", 12)) { @@ -1211,7 +1285,7 @@ constant_12 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_LOCK_DUMP; return PERL_constant_ISIV; #else @@ -1419,7 +1493,7 @@ constant_12 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 3) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 12) + DB_VERSION_PATCH >= 14) *iv_return = DB_TXN_ABORT; return PERL_constant_ISIV; #else @@ -1431,7 +1505,7 @@ constant_12 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 4) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 7) + DB_VERSION_PATCH >= 14) *iv_return = DB_TXN_APPLY; return PERL_constant_ISIV; #else @@ -1443,7 +1517,7 @@ constant_12 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 4) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) + DB_VERSION_PATCH >= 24) *iv_return = DB_TXN_PRINT; return PERL_constant_ISIV; #else @@ -1492,13 +1566,14 @@ constant_13 (pTHX_ const char *name, IV *iv_return) { here. However, subsequent manual editing may have added or removed some. DB_AGGRESSIVE DB_BTREEMAGIC DB_CHECKPOINT DB_DIRECT_LOG DB_DIRTY_READ DB_DONOTINDEX DB_ENV_CREATE DB_ENV_NOMMAP DB_ENV_THREAD DB_HASHOLDVER - DB_INCOMPLETE DB_INIT_MPOOL DB_LOCK_NORUN DB_LOCK_RIW_N DB_LOCK_TRADE - DB_LOGVERSION DB_LOG_CHKPNT DB_LOG_COMMIT DB_LOG_LOCKED DB_LOG_NOCOPY - DB_MPOOL_LAST DB_MUTEXDEBUG DB_MUTEXLOCKS DB_NEXT_NODUP DB_NOORDERCHK - DB_PREV_NODUP DB_PR_HEADERS DB_QAMVERSION DB_RDWRMASTER DB_REGISTERED - DB_REP_CLIENT DB_REP_CREATE DB_REP_ISPERM DB_REP_MASTER DB_SEQUENTIAL - DB_STAT_CLEAR DB_SYSTEM_MEM DB_TXNVERSION DB_TXN_NOSYNC DB_TXN_NOWAIT - DB_VERIFY_BAD DB_debug_FLAG DB_user_BEGIN */ + DB_INCOMPLETE DB_INIT_MPOOL DB_LOCK_ABORT DB_LOCK_NORUN DB_LOCK_RIW_N + DB_LOCK_TRADE DB_LOGVERSION DB_LOG_CHKPNT DB_LOG_COMMIT DB_LOG_LOCKED + DB_LOG_NOCOPY DB_LOG_RESEND DB_MPOOL_FREE DB_MPOOL_LAST DB_MUTEXDEBUG + DB_MUTEXLOCKS DB_NEXT_NODUP DB_NOORDERCHK DB_PREV_NODUP DB_PR_HEADERS + DB_QAMVERSION DB_RDWRMASTER DB_REGISTERED DB_REP_CLIENT DB_REP_CREATE + DB_REP_ISPERM DB_REP_MASTER DB_SEQUENTIAL DB_STAT_CLEAR DB_SYSTEM_MEM + DB_TXNVERSION DB_TXN_NOSYNC DB_TXN_NOWAIT DB_VERIFY_BAD DB_debug_FLAG + DB_user_BEGIN */ /* Offset 5 gives the best switch position. */ switch (name[5]) { case 'A': @@ -1520,6 +1595,15 @@ constant_13 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_LOCK_ABORT", 13)) { + /* ^ */ +#ifdef DB_LOCK_ABORT + *iv_return = DB_LOCK_ABORT; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_LOCK_NORUN", 13)) { @@ -1545,7 +1629,7 @@ constant_13 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 4) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) + DB_VERSION_PATCH >= 24) *iv_return = DB_LOCK_TRADE; return PERL_constant_ISIV; #else @@ -1626,6 +1710,15 @@ constant_13 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_LOG_RESEND", 13)) { + /* ^ */ +#ifdef DB_LOG_RESEND + *iv_return = DB_LOG_RESEND; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_REGISTERED", 13)) { @@ -1699,6 +1792,15 @@ constant_13 (pTHX_ const char *name, IV *iv_return) { } break; case 'O': + if (memEQ(name, "DB_MPOOL_FREE", 13)) { + /* ^ */ +#ifdef DB_MPOOL_FREE + *iv_return = DB_MPOOL_FREE; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } if (memEQ(name, "DB_MPOOL_LAST", 13)) { /* ^ */ #ifdef DB_MPOOL_LAST @@ -1940,11 +2042,11 @@ constant_14 (pTHX_ const char *name, IV *iv_return) { DB_ARCH_REMOVE DB_AUTO_COMMIT DB_BTREEOLDVER DB_CHKSUM_SHA1 DB_EID_INVALID DB_ENCRYPT_AES DB_ENV_APPINIT DB_ENV_DBLOCAL DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_NOPANIC DB_ENV_PRIVATE DB_FILE_ID_LEN DB_HANDLE_LOCK DB_HASHVERSION - DB_INVALID_EID DB_JOIN_NOSORT DB_LOCKVERSION DB_LOCK_EXPIRE DB_LOCK_NOWAIT - DB_LOCK_OLDEST DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_SWITCH - DB_MAX_RECORDS DB_MPOOL_CLEAN DB_MPOOL_DIRTY DB_NOOVERWRITE DB_NOSERVER_ID - DB_ODDFILESIZE DB_OLD_VERSION DB_OPEN_CALLED DB_RECORDCOUNT DB_RECORD_LOCK - DB_REGION_ANON DB_REGION_INIT DB_REGION_NAME DB_RENAMEMAGIC DB_REP_NEWSITE + DB_JOIN_NOSORT DB_LOCKVERSION DB_LOCK_EXPIRE DB_LOCK_NOWAIT DB_LOCK_OLDEST + DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_SWITCH DB_MAX_RECORDS + DB_MPOOL_CLEAN DB_MPOOL_DIRTY DB_NOOVERWRITE DB_NOSERVER_ID DB_ODDFILESIZE + DB_OLD_VERSION DB_OPEN_CALLED DB_RECORDCOUNT DB_RECORD_LOCK DB_REGION_ANON + DB_REGION_INIT DB_REGION_NAME DB_RENAMEMAGIC DB_REP_EGENCHG DB_REP_NEWSITE DB_REP_NOTPERM DB_REP_UNAVAIL DB_REVSPLITOFF DB_RUNRECOVERY DB_SET_TXN_NOW DB_USE_ENVIRON DB_WRITECURSOR DB_XIDDATASIZE */ /* Offset 9 gives the best switch position. */ @@ -2033,15 +2135,6 @@ constant_14 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; -#endif - } - if (memEQ(name, "DB_INVALID_EID", 14)) { - /* ^ */ -#ifdef DB_INVALID_EID - *iv_return = DB_INVALID_EID; - return PERL_constant_ISIV; -#else - return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_MPOOL_DIRTY", 14)) { @@ -2098,6 +2191,15 @@ constant_14 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_REP_EGENCHG", 14)) { + /* ^ */ +#ifdef DB_REP_EGENCHG + *iv_return = DB_REP_EGENCHG; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } break; @@ -2425,15 +2527,16 @@ static int constant_15 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. - DB_APPLY_LOGREG DB_BTREEVERSION DB_CONSUME_WAIT DB_ENV_LOCKDOWN - DB_ENV_PANIC_OK DB_ENV_YIELDCPU DB_LOCK_DEFAULT DB_LOCK_INHERIT - DB_LOCK_NOTHELD DB_LOCK_PUT_ALL DB_LOCK_PUT_OBJ DB_LOCK_TIMEOUT - DB_LOCK_UPGRADE DB_LOG_WRNOSYNC DB_MPOOL_CREATE DB_MPOOL_EXTENT - DB_MPOOL_NOFILE DB_MPOOL_UNLINK DB_MULTIPLE_KEY DB_OPFLAGS_MASK - DB_ORDERCHKONLY DB_PRIORITY_LOW DB_REGION_MAGIC DB_REP_LOGSONLY - DB_REP_NOBUFFER DB_REP_OUTDATED DB_SURPRISE_KID DB_TEST_POSTLOG - DB_TEST_PREOPEN DB_TXN_GETPGNOS DB_TXN_LOCK_2PL DB_TXN_LOG_MASK - DB_TXN_LOG_REDO DB_TXN_LOG_UNDO DB_VERIFY_FATAL */ + DB_APPLY_LOGREG DB_BTREEVERSION DB_BUFFER_SMALL DB_CONSUME_WAIT + DB_ENV_LOCKDOWN DB_ENV_PANIC_OK DB_ENV_YIELDCPU DB_LOCK_DEFAULT + DB_LOCK_INHERIT DB_LOCK_NOTHELD DB_LOCK_PUT_ALL DB_LOCK_PUT_OBJ + DB_LOCK_TIMEOUT DB_LOCK_UPGRADE DB_LOG_INMEMORY DB_LOG_WRNOSYNC + DB_MPOOL_CREATE DB_MPOOL_EXTENT DB_MPOOL_NOFILE DB_MPOOL_UNLINK + DB_MULTIPLE_KEY DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_PRIORITY_LOW + DB_REGION_MAGIC DB_REP_LOGREADY DB_REP_LOGSONLY DB_REP_NOBUFFER + DB_REP_OUTDATED DB_REP_PAGEDONE DB_SURPRISE_KID DB_TEST_POSTLOG + DB_TEST_PREOPEN DB_TXN_LOCK_2PL DB_TXN_LOG_MASK DB_TXN_LOG_REDO + DB_TXN_LOG_UNDO DB_VERIFY_FATAL */ /* Offset 10 gives the best switch position. */ switch (name[10]) { case 'D': @@ -2448,6 +2551,15 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { } break; case 'E': + if (memEQ(name, "DB_LOG_INMEMORY", 15)) { + /* ^ */ +#ifdef DB_LOG_INMEMORY + *iv_return = DB_LOG_INMEMORY; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } if (memEQ(name, "DB_MULTIPLE_KEY", 15)) { /* ^ */ #ifdef DB_MULTIPLE_KEY @@ -2455,6 +2567,15 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_REP_PAGEDONE", 15)) { + /* ^ */ +#ifdef DB_REP_PAGEDONE + *iv_return = DB_REP_PAGEDONE; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_SURPRISE_KID", 15)) { @@ -2578,7 +2699,7 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 4) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 7) + DB_VERSION_PATCH >= 14) *iv_return = DB_LOCK_TIMEOUT; return PERL_constant_ISIV; #else @@ -2632,20 +2753,6 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; -#endif - } - break; - case 'P': - if (memEQ(name, "DB_TXN_GETPGNOS", 15)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 4) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) - *iv_return = DB_TXN_GETPGNOS; - return PERL_constant_ISIV; -#else - return PERL_constant_NOTDEF; #endif } break; @@ -2666,10 +2773,28 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_REP_LOGREADY", 15)) { + /* ^ */ +#ifdef DB_REP_LOGREADY + *iv_return = DB_REP_LOGREADY; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } break; case 'S': + if (memEQ(name, "DB_BUFFER_SMALL", 15)) { + /* ^ */ +#ifdef DB_BUFFER_SMALL + *iv_return = DB_BUFFER_SMALL; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } if (memEQ(name, "DB_REP_LOGSONLY", 15)) { /* ^ */ #ifdef DB_REP_LOGSONLY @@ -2704,7 +2829,7 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_LOCK_PUT_ALL; return PERL_constant_ISIV; #else @@ -2716,7 +2841,7 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 2) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 0) + DB_VERSION_PATCH >= 3) *iv_return = DB_LOCK_PUT_OBJ; return PERL_constant_ISIV; #else @@ -2752,7 +2877,7 @@ constant_15 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 4) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) + DB_VERSION_PATCH >= 24) *iv_return = DB_PRIORITY_LOW; return PERL_constant_ISIV; #else @@ -2815,53 +2940,51 @@ static int constant_16 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. - DB_BROADCAST_EID DB_CACHED_COUNTS DB_EID_BROADCAST DB_ENV_CDB_ALLDB - DB_ENV_DIRECT_DB DB_ENV_NOLOCKING DB_ENV_OVERWRITE DB_ENV_RPCCLIENT + DB_CACHED_COUNTS DB_EID_BROADCAST DB_ENV_CDB_ALLDB DB_ENV_DIRECT_DB + DB_ENV_DSYNC_LOG DB_ENV_NOLOCKING DB_ENV_OVERWRITE DB_ENV_RPCCLIENT DB_FCNTL_LOCKING DB_JAVA_CALLBACK DB_LOCK_CONFLICT DB_LOCK_DEADLOCK - DB_LOCK_MAXLOCKS DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NOTEXIST - DB_LOCK_PUT_READ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_MPOOL_DISCARD - DB_MPOOL_PRIVATE DB_NOSERVER_HOME DB_PAGE_NOTFOUND DB_PRIORITY_HIGH - DB_RECOVER_FATAL DB_REP_DUPMASTER DB_REP_NEWMASTER DB_REP_PERMANENT - DB_SECONDARY_BAD DB_TEST_POSTOPEN DB_TEST_POSTSYNC DB_TXN_LOCK_MASK - DB_TXN_OPENFILES DB_VERB_CHKPOINT DB_VERB_DEADLOCK DB_VERB_RECOVERY - DB_VERB_WAITSFOR DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH - DB_VRFY_FLAGMASK */ - /* Offset 12 gives the best switch position. */ - switch (name[12]) { + DB_LOCK_MAXLOCKS DB_LOCK_MAXWRITE DB_LOCK_MINLOCKS DB_LOCK_MINWRITE + DB_LOCK_NOTEXIST DB_LOCK_PUT_READ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE + DB_MPOOL_DISCARD DB_MPOOL_PRIVATE DB_NOSERVER_HOME DB_PAGE_NOTFOUND + DB_PRIORITY_HIGH DB_RECOVER_FATAL DB_REP_DUPMASTER DB_REP_NEWMASTER + DB_REP_PERMANENT DB_SECONDARY_BAD DB_SEQ_RANGE_SET DB_TEST_POSTOPEN + DB_TEST_POSTSYNC DB_TXN_LOCK_MASK DB_TXN_OPENFILES DB_VERB_CHKPOINT + DB_VERB_DEADLOCK DB_VERB_RECOVERY DB_VERB_WAITSFOR DB_VERSION_MAJOR + DB_VERSION_MINOR DB_VERSION_PATCH DB_VRFY_FLAGMASK */ + /* Offset 10 gives the best switch position. */ + switch (name[10]) { case 'A': - if (memEQ(name, "DB_RECOVER_FATAL", 16)) { - /* ^ */ -#ifdef DB_RECOVER_FATAL - *iv_return = DB_RECOVER_FATAL; - return PERL_constant_ISIV; + if (memEQ(name, "DB_EID_BROADCAST", 16)) { + /* ^ */ +#ifdef DB_EID_BROADCAST + *iv_return = DB_EID_BROADCAST; + return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_VERSION_MAJOR", 16)) { - /* ^ */ -#ifdef DB_VERSION_MAJOR - *iv_return = DB_VERSION_MAJOR; + if (memEQ(name, "DB_LOCK_DEADLOCK", 16)) { + /* ^ */ +#ifdef DB_LOCK_DEADLOCK + *iv_return = DB_LOCK_DEADLOCK; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_VERSION_PATCH", 16)) { - /* ^ */ -#ifdef DB_VERSION_PATCH - *iv_return = DB_VERSION_PATCH; + if (memEQ(name, "DB_VERB_DEADLOCK", 16)) { + /* ^ */ +#ifdef DB_VERB_DEADLOCK + *iv_return = DB_VERB_DEADLOCK; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'B': - if (memEQ(name, "DB_JAVA_CALLBACK", 16)) { - /* ^ */ -#ifdef DB_JAVA_CALLBACK - *iv_return = DB_JAVA_CALLBACK; + if (memEQ(name, "DB_VRFY_FLAGMASK", 16)) { + /* ^ */ +#ifdef DB_VRFY_FLAGMASK + *iv_return = DB_VRFY_FLAGMASK; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -2869,85 +2992,81 @@ constant_16 (pTHX_ const char *name, IV *iv_return) { } break; case 'C': - if (memEQ(name, "DB_EID_BROADCAST", 16)) { - /* ^ */ -#ifdef DB_EID_BROADCAST - *iv_return = DB_EID_BROADCAST; + if (memEQ(name, "DB_CACHED_COUNTS", 16)) { + /* ^ */ +#ifdef DB_CACHED_COUNTS + *iv_return = DB_CACHED_COUNTS; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_MPOOL_DISCARD", 16)) { - /* ^ */ -#ifdef DB_MPOOL_DISCARD - *iv_return = DB_MPOOL_DISCARD; + if (memEQ(name, "DB_ENV_RPCCLIENT", 16)) { + /* ^ */ +#ifdef DB_ENV_RPCCLIENT + *iv_return = DB_ENV_RPCCLIENT; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'G': - if (memEQ(name, "DB_LOCK_YOUNGEST", 16)) { - /* ^ */ -#ifdef DB_LOCK_YOUNGEST - *iv_return = DB_LOCK_YOUNGEST; + if (memEQ(name, "DB_VERB_RECOVERY", 16)) { + /* ^ */ +#ifdef DB_VERB_RECOVERY + *iv_return = DB_VERB_RECOVERY; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'H': - if (memEQ(name, "DB_NOSERVER_HOME", 16)) { - /* ^ */ -#ifdef DB_NOSERVER_HOME - *iv_return = DB_NOSERVER_HOME; + case 'E': + if (memEQ(name, "DB_ENV_DIRECT_DB", 16)) { + /* ^ */ +#ifdef DB_ENV_DIRECT_DB + *iv_return = DB_ENV_DIRECT_DB; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_PRIORITY_HIGH", 16)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 4) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) - *iv_return = DB_PRIORITY_HIGH; + break; + case 'F': + if (memEQ(name, "DB_LOGC_BUF_SIZE", 16)) { + /* ^ */ +#ifdef DB_LOGC_BUF_SIZE + *iv_return = DB_LOGC_BUF_SIZE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'I': - if (memEQ(name, "DB_ENV_RPCCLIENT", 16)) { - /* ^ */ -#ifdef DB_ENV_RPCCLIENT - *iv_return = DB_ENV_RPCCLIENT; + case 'G': + if (memEQ(name, "DB_SEQ_RANGE_SET", 16)) { + /* ^ */ +#ifdef DB_SEQ_RANGE_SET + *iv_return = DB_SEQ_RANGE_SET; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_TXN_OPENFILES", 16)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 3) || \ - (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \ - (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 12) - *iv_return = DB_TXN_OPENFILES; + break; + case 'I': + if (memEQ(name, "DB_MPOOL_DISCARD", 16)) { + /* ^ */ +#ifdef DB_MPOOL_DISCARD + *iv_return = DB_MPOOL_DISCARD; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_VERSION_MINOR", 16)) { - /* ^ */ -#ifdef DB_VERSION_MINOR - *iv_return = DB_VERSION_MINOR; + if (memEQ(name, "DB_VERB_WAITSFOR", 16)) { + /* ^ */ +#ifdef DB_VERB_WAITSFOR + *iv_return = DB_VERB_WAITSFOR; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -2955,19 +3074,19 @@ constant_16 (pTHX_ const char *name, IV *iv_return) { } break; case 'K': - if (memEQ(name, "DB_ENV_NOLOCKING", 16)) { - /* ^ */ -#ifdef DB_ENV_NOLOCKING - *iv_return = DB_ENV_NOLOCKING; + if (memEQ(name, "DB_TXN_LOCK_MASK", 16)) { + /* ^ */ +#ifdef DB_TXN_LOCK_MASK + *iv_return = DB_TXN_LOCK_MASK; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_FCNTL_LOCKING", 16)) { - /* ^ */ -#ifdef DB_FCNTL_LOCKING - *iv_return = DB_FCNTL_LOCKING; + if (memEQ(name, "DB_VERB_CHKPOINT", 16)) { + /* ^ */ +#ifdef DB_VERB_CHKPOINT + *iv_return = DB_VERB_CHKPOINT; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -2975,115 +3094,109 @@ constant_16 (pTHX_ const char *name, IV *iv_return) { } break; case 'L': - if (memEQ(name, "DB_ENV_CDB_ALLDB", 16)) { - /* ^ */ -#ifdef DB_ENV_CDB_ALLDB - *iv_return = DB_ENV_CDB_ALLDB; + if (memEQ(name, "DB_JAVA_CALLBACK", 16)) { + /* ^ */ +#ifdef DB_JAVA_CALLBACK + *iv_return = DB_JAVA_CALLBACK; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_LOCK_CONFLICT", 16)) { - /* ^ */ -#ifdef DB_LOCK_CONFLICT - *iv_return = DB_LOCK_CONFLICT; + break; + case 'M': + if (memEQ(name, "DB_REP_DUPMASTER", 16)) { + /* ^ */ +#ifdef DB_REP_DUPMASTER + *iv_return = DB_REP_DUPMASTER; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_LOCK_DEADLOCK", 16)) { - /* ^ */ -#ifdef DB_LOCK_DEADLOCK - *iv_return = DB_LOCK_DEADLOCK; + if (memEQ(name, "DB_REP_NEWMASTER", 16)) { + /* ^ */ +#ifdef DB_REP_NEWMASTER + *iv_return = DB_REP_NEWMASTER; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_VERB_DEADLOCK", 16)) { - /* ^ */ -#ifdef DB_VERB_DEADLOCK - *iv_return = DB_VERB_DEADLOCK; + if (memEQ(name, "DB_REP_PERMANENT", 16)) { + /* ^ */ +#ifdef DB_REP_PERMANENT + *iv_return = DB_REP_PERMANENT; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'M': - if (memEQ(name, "DB_TXN_LOCK_MASK", 16)) { - /* ^ */ -#ifdef DB_TXN_LOCK_MASK - *iv_return = DB_TXN_LOCK_MASK; - return PERL_constant_ISIV; -#else - return PERL_constant_NOTDEF; -#endif - } - if (memEQ(name, "DB_VRFY_FLAGMASK", 16)) { - /* ^ */ -#ifdef DB_VRFY_FLAGMASK - *iv_return = DB_VRFY_FLAGMASK; + case 'N': + if (memEQ(name, "DB_ENV_DSYNC_LOG", 16)) { + /* ^ */ +#ifdef DB_ENV_DSYNC_LOG + *iv_return = DB_ENV_DSYNC_LOG; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'N': - if (memEQ(name, "DB_REP_PERMANENT", 16)) { - /* ^ */ -#ifdef DB_REP_PERMANENT - *iv_return = DB_REP_PERMANENT; + if (memEQ(name, "DB_LOCK_CONFLICT", 16)) { + /* ^ */ +#ifdef DB_LOCK_CONFLICT + *iv_return = DB_LOCK_CONFLICT; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'O': - if (memEQ(name, "DB_LOCK_MAXLOCKS", 16)) { - /* ^ */ -#ifdef DB_LOCK_MAXLOCKS - *iv_return = DB_LOCK_MAXLOCKS; + if (memEQ(name, "DB_LOCK_MINLOCKS", 16)) { + /* ^ */ +#ifdef DB_LOCK_MINLOCKS + *iv_return = DB_LOCK_MINLOCKS; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_LOCK_MINLOCKS", 16)) { - /* ^ */ -#ifdef DB_LOCK_MINLOCKS - *iv_return = DB_LOCK_MINLOCKS; + if (memEQ(name, "DB_LOCK_MINWRITE", 16)) { + /* ^ */ +#ifdef DB_LOCK_MINWRITE + *iv_return = DB_LOCK_MINWRITE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_PAGE_NOTFOUND", 16)) { - /* ^ */ -#ifdef DB_PAGE_NOTFOUND - *iv_return = DB_PAGE_NOTFOUND; + if (memEQ(name, "DB_TXN_OPENFILES", 16)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 3) || \ + (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \ + (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \ + DB_VERSION_PATCH >= 14) + *iv_return = DB_TXN_OPENFILES; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_TEST_POSTOPEN", 16)) { - /* ^ */ -#ifdef DB_TEST_POSTOPEN - *iv_return = DB_TEST_POSTOPEN; + break; + case 'O': + if (memEQ(name, "DB_ENV_NOLOCKING", 16)) { + /* ^ */ +#ifdef DB_ENV_NOLOCKING + *iv_return = DB_ENV_NOLOCKING; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_VERB_CHKPOINT", 16)) { - /* ^ */ -#ifdef DB_VERB_CHKPOINT - *iv_return = DB_VERB_CHKPOINT; + if (memEQ(name, "DB_FCNTL_LOCKING", 16)) { + /* ^ */ +#ifdef DB_FCNTL_LOCKING + *iv_return = DB_FCNTL_LOCKING; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -3092,7 +3205,7 @@ constant_16 (pTHX_ const char *name, IV *iv_return) { break; case 'R': if (memEQ(name, "DB_ENV_OVERWRITE", 16)) { - /* ^ */ + /* ^ */ #ifdef DB_ENV_OVERWRITE *iv_return = DB_ENV_OVERWRITE; return PERL_constant_ISIV; @@ -3100,80 +3213,80 @@ constant_16 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_LOCK_MINWRITE", 16)) { - /* ^ */ -#ifdef DB_LOCK_MINWRITE - *iv_return = DB_LOCK_MINWRITE; + if (memEQ(name, "DB_MPOOL_PRIVATE", 16)) { + /* ^ */ +#ifdef DB_MPOOL_PRIVATE + *iv_return = DB_MPOOL_PRIVATE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_LOCK_PUT_READ", 16)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 4) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 7) - *iv_return = DB_LOCK_PUT_READ; + if (memEQ(name, "DB_NOSERVER_HOME", 16)) { + /* ^ */ +#ifdef DB_NOSERVER_HOME + *iv_return = DB_NOSERVER_HOME; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'S': - if (memEQ(name, "DB_LOGC_BUF_SIZE", 16)) { - /* ^ */ -#ifdef DB_LOGC_BUF_SIZE - *iv_return = DB_LOGC_BUF_SIZE; + if (memEQ(name, "DB_SECONDARY_BAD", 16)) { + /* ^ */ +#ifdef DB_SECONDARY_BAD + *iv_return = DB_SECONDARY_BAD; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_REP_DUPMASTER", 16)) { - /* ^ */ -#ifdef DB_REP_DUPMASTER - *iv_return = DB_REP_DUPMASTER; + break; + case 'S': + if (memEQ(name, "DB_TEST_POSTOPEN", 16)) { + /* ^ */ +#ifdef DB_TEST_POSTOPEN + *iv_return = DB_TEST_POSTOPEN; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_REP_NEWMASTER", 16)) { - /* ^ */ -#ifdef DB_REP_NEWMASTER - *iv_return = DB_REP_NEWMASTER; + if (memEQ(name, "DB_TEST_POSTSYNC", 16)) { + /* ^ */ +#ifdef DB_TEST_POSTSYNC + *iv_return = DB_TEST_POSTSYNC; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_TEST_POSTSYNC", 16)) { - /* ^ */ -#ifdef DB_TEST_POSTSYNC - *iv_return = DB_TEST_POSTSYNC; + break; + case 'T': + if (memEQ(name, "DB_LOCK_NOTEXIST", 16)) { + /* ^ */ +#ifdef DB_LOCK_NOTEXIST + *iv_return = DB_LOCK_NOTEXIST; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_VERB_WAITSFOR", 16)) { - /* ^ */ -#ifdef DB_VERB_WAITSFOR - *iv_return = DB_VERB_WAITSFOR; + if (memEQ(name, "DB_LOCK_PUT_READ", 16)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 4) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \ + DB_VERSION_PATCH >= 14) + *iv_return = DB_LOCK_PUT_READ; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - break; - case 'T': - if (memEQ(name, "DB_ENV_DIRECT_DB", 16)) { - /* ^ */ -#ifdef DB_ENV_DIRECT_DB - *iv_return = DB_ENV_DIRECT_DB; + if (memEQ(name, "DB_PAGE_NOTFOUND", 16)) { + /* ^ */ +#ifdef DB_PAGE_NOTFOUND + *iv_return = DB_PAGE_NOTFOUND; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -3181,41 +3294,44 @@ constant_16 (pTHX_ const char *name, IV *iv_return) { } break; case 'U': - if (memEQ(name, "DB_CACHED_COUNTS", 16)) { - /* ^ */ -#ifdef DB_CACHED_COUNTS - *iv_return = DB_CACHED_COUNTS; + if (memEQ(name, "DB_LOCK_YOUNGEST", 16)) { + /* ^ */ +#ifdef DB_LOCK_YOUNGEST + *iv_return = DB_LOCK_YOUNGEST; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'V': - if (memEQ(name, "DB_MPOOL_PRIVATE", 16)) { - /* ^ */ -#ifdef DB_MPOOL_PRIVATE - *iv_return = DB_MPOOL_PRIVATE; + case 'X': + if (memEQ(name, "DB_LOCK_MAXLOCKS", 16)) { + /* ^ */ +#ifdef DB_LOCK_MAXLOCKS + *iv_return = DB_LOCK_MAXLOCKS; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_VERB_RECOVERY", 16)) { - /* ^ */ -#ifdef DB_VERB_RECOVERY - *iv_return = DB_VERB_RECOVERY; + if (memEQ(name, "DB_LOCK_MAXWRITE", 16)) { + /* ^ */ +#ifdef DB_LOCK_MAXWRITE + *iv_return = DB_LOCK_MAXWRITE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'X': - if (memEQ(name, "DB_LOCK_NOTEXIST", 16)) { - /* ^ */ -#ifdef DB_LOCK_NOTEXIST - *iv_return = DB_LOCK_NOTEXIST; + case 'Y': + if (memEQ(name, "DB_PRIORITY_HIGH", 16)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 4) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ + DB_VERSION_PATCH >= 24) + *iv_return = DB_PRIORITY_HIGH; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -3223,19 +3339,46 @@ constant_16 (pTHX_ const char *name, IV *iv_return) { } break; case '_': - if (memEQ(name, "DB_BROADCAST_EID", 16)) { - /* ^ */ -#ifdef DB_BROADCAST_EID - *iv_return = DB_BROADCAST_EID; + if (memEQ(name, "DB_ENV_CDB_ALLDB", 16)) { + /* ^ */ +#ifdef DB_ENV_CDB_ALLDB + *iv_return = DB_ENV_CDB_ALLDB; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_SECONDARY_BAD", 16)) { - /* ^ */ -#ifdef DB_SECONDARY_BAD - *iv_return = DB_SECONDARY_BAD; + if (memEQ(name, "DB_RECOVER_FATAL", 16)) { + /* ^ */ +#ifdef DB_RECOVER_FATAL + *iv_return = DB_RECOVER_FATAL; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_VERSION_MAJOR", 16)) { + /* ^ */ +#ifdef DB_VERSION_MAJOR + *iv_return = DB_VERSION_MAJOR; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_VERSION_MINOR", 16)) { + /* ^ */ +#ifdef DB_VERSION_MINOR + *iv_return = DB_VERSION_MINOR; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_VERSION_PATCH", 16)) { + /* ^ */ +#ifdef DB_VERSION_PATCH + *iv_return = DB_VERSION_PATCH; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -3253,8 +3396,9 @@ constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) { DB_ENV_DIRECT_LOG DB_ENV_REP_CLIENT DB_ENV_REP_MASTER DB_ENV_STANDALONE DB_ENV_SYSTEM_MEM DB_ENV_TXN_NOSYNC DB_ENV_USER_ALLOC DB_GET_BOTH_RANGE DB_LOG_AUTOREMOVE DB_LOG_SILENT_ERR DB_NO_AUTO_COMMIT DB_RPC_SERVERPROG - DB_RPC_SERVERVERS DB_TEST_ELECTINIT DB_TEST_ELECTSEND DB_TEST_PRERENAME - DB_TXN_POPENFILES DB_VERSION_STRING */ + DB_RPC_SERVERVERS DB_STAT_LOCK_CONF DB_STAT_MEMP_HASH DB_STAT_SUBSYSTEM + DB_TEST_ELECTINIT DB_TEST_ELECTSEND DB_TEST_PRERENAME DB_TXN_POPENFILES + DB_VERSION_STRING */ /* Offset 13 gives the best switch position. */ switch (name[13]) { case 'A': @@ -3265,6 +3409,28 @@ constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + break; + case 'C': + if (memEQ(name, "DB_STAT_LOCK_CONF", 17)) { + /* ^ */ +#ifdef DB_STAT_LOCK_CONF + *iv_return = DB_STAT_LOCK_CONF; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } + break; + case 'H': + if (memEQ(name, "DB_STAT_MEMP_HASH", 17)) { + /* ^ */ +#ifdef DB_STAT_MEMP_HASH + *iv_return = DB_STAT_MEMP_HASH; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } break; @@ -3292,7 +3458,7 @@ constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) { #if (DB_VERSION_MAJOR > 3) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \ - DB_VERSION_PATCH >= 4) + DB_VERSION_PATCH >= 11) *iv_return = DB_TXN_POPENFILES; return PERL_constant_ISIV; #else @@ -3390,6 +3556,15 @@ constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_STAT_SUBSYSTEM", 17)) { + /* ^ */ +#ifdef DB_STAT_SUBSYSTEM + *iv_return = DB_STAT_SUBSYSTEM; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_TEST_ELECTSEND", 17)) { @@ -3450,13 +3625,13 @@ static int constant_18 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. - DB_ALREADY_ABORTED DB_ENV_AUTO_COMMIT DB_ENV_OPEN_CALLED - DB_ENV_REGION_INIT DB_LOCK_NOTGRANTED DB_LOG_NOT_DURABLE - DB_MPOOL_NEW_GROUP DB_PR_RECOVERYTEST DB_REP_HANDLE_DEAD + DB_ALREADY_ABORTED DB_DURABLE_UNKNOWN DB_ENV_AUTO_COMMIT + DB_ENV_OPEN_CALLED DB_ENV_REGION_INIT DB_LOCK_NOTGRANTED + DB_LOG_BUFFER_FULL DB_LOG_NOT_DURABLE DB_MPOOL_NEW_GROUP + DB_PR_RECOVERYTEST DB_REP_HANDLE_DEAD DB_REP_STARTUPDONE DB_SET_TXN_TIMEOUT DB_TEST_ELECTVOTE1 DB_TEST_ELECTVOTE2 DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 DB_TEST_POSTRENAME - DB_TEST_PREDESTROY DB_TEST_PREEXTOPEN DB_TIME_NOTGRANTED - DB_TXN_NOT_DURABLE */ + DB_TEST_PREDESTROY DB_TIME_NOTGRANTED DB_TXN_NOT_DURABLE */ /* Offset 13 gives the best switch position. */ switch (name[13]) { case 'A': @@ -3507,6 +3682,17 @@ constant_18 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + break; + case 'K': + if (memEQ(name, "DB_DURABLE_UNKNOWN", 18)) { + /* ^ */ +#ifdef DB_DURABLE_UNKNOWN + *iv_return = DB_DURABLE_UNKNOWN; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } break; @@ -3538,6 +3724,17 @@ constant_18 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + break; + case 'P': + if (memEQ(name, "DB_REP_STARTUPDONE", 18)) { + /* ^ */ +#ifdef DB_REP_STARTUPDONE + *iv_return = DB_REP_STARTUPDONE; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } break; @@ -3569,17 +3766,6 @@ constant_18 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; -#endif - } - break; - case 'T': - if (memEQ(name, "DB_TEST_PREEXTOPEN", 18)) { - /* ^ */ -#ifdef DB_TEST_PREEXTOPEN - *iv_return = DB_TEST_PREEXTOPEN; - return PERL_constant_ISIV; -#else - return PERL_constant_NOTDEF; #endif } break; @@ -3642,6 +3828,15 @@ constant_18 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; +#endif + } + if (memEQ(name, "DB_LOG_BUFFER_FULL", 18)) { + /* ^ */ +#ifdef DB_LOG_BUFFER_FULL + *iv_return = DB_LOG_BUFFER_FULL; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; #endif } if (memEQ(name, "DB_REP_HANDLE_DEAD", 18)) { @@ -3662,15 +3857,25 @@ static int constant_19 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. - DB_ENV_REP_LOGSONLY DB_LOCK_FREE_LOCKER DB_LOCK_GET_TIMEOUT - DB_LOCK_SET_TIMEOUT DB_PRIORITY_DEFAULT DB_REP_HOLDELECTION - DB_SET_LOCK_TIMEOUT DB_TEST_POSTDESTROY DB_TEST_POSTEXTOPEN - DB_TEST_POSTLOGMETA DB_TEST_SUBDB_LOCKS DB_TXN_FORWARD_ROLL - DB_TXN_LOG_UNDOREDO DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD - DB_UPDATE_SECONDARY DB_USE_ENVIRON_ROOT DB_VERB_REPLICATION */ + DB_ENV_LOG_INMEMORY DB_ENV_REP_LOGSONLY DB_LOCK_FREE_LOCKER + DB_LOCK_GET_TIMEOUT DB_LOCK_SET_TIMEOUT DB_PRIORITY_DEFAULT + DB_REP_HOLDELECTION DB_SEQUENCE_VERSION DB_SET_LOCK_TIMEOUT + DB_STAT_LOCK_PARAMS DB_TEST_POSTDESTROY DB_TEST_POSTLOGMETA + DB_TEST_SUBDB_LOCKS DB_TXN_FORWARD_ROLL DB_TXN_LOG_UNDOREDO + DB_TXN_WRITE_NOSYNC DB_UPDATE_SECONDARY DB_USE_ENVIRON_ROOT + DB_VERB_REPLICATION DB_VERSION_MISMATCH */ /* Offset 9 gives the best switch position. */ switch (name[9]) { case 'C': + if (memEQ(name, "DB_SEQUENCE_VERSION", 19)) { + /* ^ */ +#ifdef DB_SEQUENCE_VERSION + *iv_return = DB_SEQUENCE_VERSION; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } if (memEQ(name, "DB_SET_LOCK_TIMEOUT", 19)) { /* ^ */ #ifdef DB_SET_LOCK_TIMEOUT @@ -3687,7 +3892,7 @@ constant_19 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 4) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \ - DB_VERSION_PATCH >= 7) + DB_VERSION_PATCH >= 14) *iv_return = DB_LOCK_GET_TIMEOUT; return PERL_constant_ISIV; #else @@ -3714,6 +3919,15 @@ constant_19 (pTHX_ const char *name, IV *iv_return) { } break; case 'G': + if (memEQ(name, "DB_ENV_LOG_INMEMORY", 19)) { + /* ^ */ +#ifdef DB_ENV_LOG_INMEMORY + *iv_return = DB_ENV_LOG_INMEMORY; + return PERL_constant_ISIV; +#else + return PERL_constant_NOTDEF; +#endif + } if (memEQ(name, "DB_TXN_LOG_UNDOREDO", 19)) { /* ^ */ #ifdef DB_TXN_LOG_UNDOREDO @@ -3745,10 +3959,12 @@ constant_19 (pTHX_ const char *name, IV *iv_return) { return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_UNRESOLVED_CHILD", 19)) { + break; + case 'N': + if (memEQ(name, "DB_VERSION_MISMATCH", 19)) { /* ^ */ -#ifdef DB_UNRESOLVED_CHILD - *iv_return = DB_UNRESOLVED_CHILD; +#ifdef DB_VERSION_MISMATCH + *iv_return = DB_VERSION_MISMATCH; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -3756,19 +3972,19 @@ constant_19 (pTHX_ const char *name, IV *iv_return) { } break; case 'O': - if (memEQ(name, "DB_TEST_POSTDESTROY", 19)) { + if (memEQ(name, "DB_STAT_LOCK_PARAMS", 19)) { /* ^ */ -#ifdef DB_TEST_POSTDESTROY - *iv_return = DB_TEST_POSTDESTROY; +#ifdef DB_STAT_LOCK_PARAMS + *iv_return = DB_STAT_LOCK_PARAMS; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_TEST_POSTEXTOPEN", 19)) { + if (memEQ(name, "DB_TEST_POSTDESTROY", 19)) { /* ^ */ -#ifdef DB_TEST_POSTEXTOPEN - *iv_return = DB_TEST_POSTEXTOPEN; +#ifdef DB_TEST_POSTDESTROY + *iv_return = DB_TEST_POSTDESTROY; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -3810,7 +4026,7 @@ constant_19 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 3) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 12) + DB_VERSION_PATCH >= 14) *iv_return = DB_TXN_FORWARD_ROLL; return PERL_constant_ISIV; #else @@ -3824,7 +4040,7 @@ constant_19 (pTHX_ const char *name, IV *iv_return) { #if (DB_VERSION_MAJOR > 4) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) + DB_VERSION_PATCH >= 24) *iv_return = DB_PRIORITY_DEFAULT; return PERL_constant_ISIV; #else @@ -3874,47 +4090,46 @@ constant_20 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. DB_CXX_NO_EXCEPTIONS DB_LOGFILEID_INVALID DB_PANIC_ENVIRONMENT - DB_PRIORITY_VERY_LOW DB_TEST_PREEXTDELETE DB_TEST_PREEXTUNLINK + DB_PRIORITY_VERY_LOW DB_STAT_LOCK_LOCKERS DB_STAT_LOCK_OBJECTS DB_TXN_BACKWARD_ROLL DB_TXN_LOCK_OPTIMIST */ - /* Offset 14 gives the best switch position. */ - switch (name[14]) { - case 'D': - if (memEQ(name, "DB_TEST_PREEXTDELETE", 20)) { - /* ^ */ -#ifdef DB_TEST_PREEXTDELETE - *iv_return = DB_TEST_PREEXTDELETE; + /* Offset 15 gives the best switch position. */ + switch (name[15]) { + case 'C': + if (memEQ(name, "DB_STAT_LOCK_LOCKERS", 20)) { + /* ^ */ +#ifdef DB_STAT_LOCK_LOCKERS + *iv_return = DB_STAT_LOCK_LOCKERS; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_TXN_BACKWARD_ROLL", 20)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 3) || \ - (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \ - (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 12) - *iv_return = DB_TXN_BACKWARD_ROLL; + break; + case 'I': + if (memEQ(name, "DB_TXN_LOCK_OPTIMIST", 20)) { + /* ^ */ +#ifdef DB_TXN_LOCK_OPTIMIST + *iv_return = DB_TXN_LOCK_OPTIMIST; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'N': - if (memEQ(name, "DB_LOGFILEID_INVALID", 20)) { - /* ^ */ -#ifdef DB_LOGFILEID_INVALID - *iv_return = DB_LOGFILEID_INVALID; + case 'J': + if (memEQ(name, "DB_STAT_LOCK_OBJECTS", 20)) { + /* ^ */ +#ifdef DB_STAT_LOCK_OBJECTS + *iv_return = DB_STAT_LOCK_OBJECTS; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'O': + case 'N': if (memEQ(name, "DB_PANIC_ENVIRONMENT", 20)) { - /* ^ */ + /* ^ */ #ifdef DB_PANIC_ENVIRONMENT *iv_return = DB_PANIC_ENVIRONMENT; return PERL_constant_ISIV; @@ -3923,9 +4138,9 @@ constant_20 (pTHX_ const char *name, IV *iv_return) { #endif } break; - case 'P': + case 'T': if (memEQ(name, "DB_CXX_NO_EXCEPTIONS", 20)) { - /* ^ */ + /* ^ */ #ifdef DB_CXX_NO_EXCEPTIONS *iv_return = DB_CXX_NO_EXCEPTIONS; return PERL_constant_ISIV; @@ -3934,36 +4149,39 @@ constant_20 (pTHX_ const char *name, IV *iv_return) { #endif } break; - case 'R': - if (memEQ(name, "DB_PRIORITY_VERY_LOW", 20)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 4) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) - *iv_return = DB_PRIORITY_VERY_LOW; + case 'V': + if (memEQ(name, "DB_LOGFILEID_INVALID", 20)) { + /* ^ */ +#ifdef DB_LOGFILEID_INVALID + *iv_return = DB_LOGFILEID_INVALID; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'T': - if (memEQ(name, "DB_TXN_LOCK_OPTIMIST", 20)) { - /* ^ */ -#ifdef DB_TXN_LOCK_OPTIMIST - *iv_return = DB_TXN_LOCK_OPTIMIST; + case 'Y': + if (memEQ(name, "DB_PRIORITY_VERY_LOW", 20)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 4) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ + DB_VERSION_PATCH >= 24) + *iv_return = DB_PRIORITY_VERY_LOW; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'U': - if (memEQ(name, "DB_TEST_PREEXTUNLINK", 20)) { - /* ^ */ -#ifdef DB_TEST_PREEXTUNLINK - *iv_return = DB_TEST_PREEXTUNLINK; + case '_': + if (memEQ(name, "DB_TXN_BACKWARD_ROLL", 20)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 3) || \ + (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \ + (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \ + DB_VERSION_PATCH >= 14) + *iv_return = DB_TXN_BACKWARD_ROLL; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -3979,28 +4197,14 @@ constant_21 (pTHX_ const char *name, IV *iv_return) { /* When generated this function returned values for the list of names given here. However, subsequent manual editing may have added or removed some. DB_ENV_LOG_AUTOREMOVE DB_LOCK_UPGRADE_WRITE DB_PRIORITY_VERY_HIGH - DB_TEST_POSTEXTDELETE DB_TEST_POSTEXTUNLINK DB_TXN_BACKWARD_ALLOC */ - /* Offset 19 gives the best switch position. */ - switch (name[19]) { - case 'G': - if (memEQ(name, "DB_PRIORITY_VERY_HIGH", 21)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 4) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) - *iv_return = DB_PRIORITY_VERY_HIGH; - return PERL_constant_ISIV; -#else - return PERL_constant_NOTDEF; -#endif - } - break; + DB_TXN_BACKWARD_ALLOC */ + /* Offset 4 gives the best switch position. */ + switch (name[4]) { case 'N': - if (memEQ(name, "DB_TEST_POSTEXTUNLINK", 21)) { - /* ^ */ -#ifdef DB_TEST_POSTEXTUNLINK - *iv_return = DB_TEST_POSTEXTUNLINK; + if (memEQ(name, "DB_ENV_LOG_AUTOREMOVE", 21)) { + /* ^ */ +#ifdef DB_ENV_LOG_AUTOREMOVE + *iv_return = DB_ENV_LOG_AUTOREMOVE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -4008,47 +4212,41 @@ constant_21 (pTHX_ const char *name, IV *iv_return) { } break; case 'O': - if (memEQ(name, "DB_TXN_BACKWARD_ALLOC", 21)) { - /* ^ */ -#if (DB_VERSION_MAJOR > 4) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ - (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ - DB_VERSION_PATCH >= 17) - *iv_return = DB_TXN_BACKWARD_ALLOC; - return PERL_constant_ISIV; -#else - return PERL_constant_NOTDEF; -#endif - } - break; - case 'T': if (memEQ(name, "DB_LOCK_UPGRADE_WRITE", 21)) { - /* ^ */ + /* ^ */ #if (DB_VERSION_MAJOR > 3) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \ - DB_VERSION_PATCH >= 4) + DB_VERSION_PATCH >= 11) *iv_return = DB_LOCK_UPGRADE_WRITE; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } - if (memEQ(name, "DB_TEST_POSTEXTDELETE", 21)) { - /* ^ */ -#ifdef DB_TEST_POSTEXTDELETE - *iv_return = DB_TEST_POSTEXTDELETE; + break; + case 'R': + if (memEQ(name, "DB_PRIORITY_VERY_HIGH", 21)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 4) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ + DB_VERSION_PATCH >= 24) + *iv_return = DB_PRIORITY_VERY_HIGH; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; #endif } break; - case 'V': - if (memEQ(name, "DB_ENV_LOG_AUTOREMOVE", 21)) { - /* ^ */ -#ifdef DB_ENV_LOG_AUTOREMOVE - *iv_return = DB_ENV_LOG_AUTOREMOVE; + case 'X': + if (memEQ(name, "DB_TXN_BACKWARD_ALLOC", 21)) { + /* ^ */ +#if (DB_VERSION_MAJOR > 4) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \ + (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \ + DB_VERSION_PATCH >= 24) + *iv_return = DB_TXN_BACKWARD_ALLOC; return PERL_constant_ISIV; #else return PERL_constant_NOTDEF; @@ -4068,8 +4266,8 @@ constant_22 (pTHX_ const char *name, IV *iv_return) { /* Offset 21 gives the best switch position. */ switch (name[21]) { case 'C': - if (memEQ(name, "DB_TXN_LOCK_OPTIMISTIC", 22)) { - /* ^ */ + if (memEQ(name, "DB_TXN_LOCK_OPTIMISTI", 21)) { + /* C */ #ifdef DB_TXN_LOCK_OPTIMISTIC *iv_return = DB_TXN_LOCK_OPTIMISTIC; return PERL_constant_ISIV; @@ -4079,8 +4277,8 @@ constant_22 (pTHX_ const char *name, IV *iv_return) { } break; case 'D': - if (memEQ(name, "DB_ENV_TIME_NOTGRANTED", 22)) { - /* ^ */ + if (memEQ(name, "DB_ENV_TIME_NOTGRANTE", 21)) { + /* D */ #ifdef DB_ENV_TIME_NOTGRANTED *iv_return = DB_ENV_TIME_NOTGRANTED; return PERL_constant_ISIV; @@ -4090,8 +4288,8 @@ constant_22 (pTHX_ const char *name, IV *iv_return) { } break; case 'E': - if (memEQ(name, "DB_ENV_TXN_NOT_DURABLE", 22)) { - /* ^ */ + if (memEQ(name, "DB_ENV_TXN_NOT_DURABL", 21)) { + /* E */ #ifdef DB_ENV_TXN_NOT_DURABLE *iv_return = DB_ENV_TXN_NOT_DURABLE; return PERL_constant_ISIV; @@ -4101,8 +4299,8 @@ constant_22 (pTHX_ const char *name, IV *iv_return) { } break; case 'N': - if (memEQ(name, "DB_ENV_RPCCLIENT_GIVEN", 22)) { - /* ^ */ + if (memEQ(name, "DB_ENV_RPCCLIENT_GIVE", 21)) { + /* N */ #ifdef DB_ENV_RPCCLIENT_GIVEN *iv_return = DB_ENV_RPCCLIENT_GIVEN; return PERL_constant_ISIV; @@ -4128,24 +4326,25 @@ constant (pTHX_ const char *name, STRLEN len, IV *iv_return, const char **pv_ret Regenerate these constant functions by feeding this entire source file to perl -x -#!/usr/bin/perl5.8.0 -w +#!/usr/bin/perl5.8.3 -w use ExtUtils::Constant qw (constant_types C_constant XS_constant); my $types = {map {($_, 1)} qw(IV PV)}; my @names = (qw(DB_AFTER DB_AGGRESSIVE DB_ALREADY_ABORTED DB_APPEND DB_APPLY_LOGREG DB_APP_INIT DB_ARCH_ABS DB_ARCH_DATA DB_ARCH_LOG - DB_ARCH_REMOVE DB_AUTO_COMMIT DB_BEFORE DB_BROADCAST_EID - DB_BTREEMAGIC DB_BTREEOLDVER DB_BTREEVERSION DB_CACHED_COUNTS + DB_ARCH_REMOVE DB_AUTO_COMMIT DB_BEFORE DB_BTREEMAGIC + DB_BTREEOLDVER DB_BTREEVERSION DB_BUFFER_SMALL DB_CACHED_COUNTS DB_CDB_ALLDB DB_CHECKPOINT DB_CHKSUM DB_CHKSUM_SHA1 DB_CLIENT DB_CL_WRITER DB_COMMIT DB_CONSUME DB_CONSUME_WAIT DB_CREATE - DB_CURLSN DB_CURRENT DB_CXX_NO_EXCEPTIONS DB_DELETED + DB_CURLSN DB_CURRENT DB_CXX_NO_EXCEPTIONS DB_DEGREE_2 DB_DELETED DB_DELIMITER DB_DIRECT DB_DIRECT_DB DB_DIRECT_LOG DB_DIRTY_READ - DB_DONOTINDEX DB_DUP DB_DUPCURSOR DB_DUPSORT DB_EID_BROADCAST - DB_EID_INVALID DB_ENCRYPT DB_ENCRYPT_AES DB_ENV_APPINIT - DB_ENV_AUTO_COMMIT DB_ENV_CDB DB_ENV_CDB_ALLDB DB_ENV_CREATE - DB_ENV_DBLOCAL DB_ENV_DIRECT_DB DB_ENV_DIRECT_LOG DB_ENV_FATAL - DB_ENV_LOCKDOWN DB_ENV_LOCKING DB_ENV_LOGGING - DB_ENV_LOG_AUTOREMOVE DB_ENV_NOLOCKING DB_ENV_NOMMAP + DB_DONOTINDEX DB_DSYNC_LOG DB_DUP DB_DUPCURSOR DB_DUPSORT + DB_DURABLE_UNKNOWN DB_EID_BROADCAST DB_EID_INVALID DB_ENCRYPT + DB_ENCRYPT_AES DB_ENV_APPINIT DB_ENV_AUTO_COMMIT DB_ENV_CDB + DB_ENV_CDB_ALLDB DB_ENV_CREATE DB_ENV_DBLOCAL DB_ENV_DIRECT_DB + DB_ENV_DIRECT_LOG DB_ENV_DSYNC_LOG DB_ENV_FATAL DB_ENV_LOCKDOWN + DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_LOG_AUTOREMOVE + DB_ENV_LOG_INMEMORY DB_ENV_NOLOCKING DB_ENV_NOMMAP DB_ENV_NOPANIC DB_ENV_OPEN_CALLED DB_ENV_OVERWRITE DB_ENV_PANIC_OK DB_ENV_PRIVATE DB_ENV_REGION_INIT DB_ENV_REP_CLIENT DB_ENV_REP_LOGSONLY DB_ENV_REP_MASTER @@ -4158,29 +4357,30 @@ my @names = (qw(DB_AFTER DB_AGGRESSIVE DB_ALREADY_ABORTED DB_APPEND DB_GET_BOTH DB_GET_BOTHC DB_GET_BOTH_RANGE DB_GET_RECNO DB_HANDLE_LOCK DB_HASHMAGIC DB_HASHOLDVER DB_HASHVERSION DB_INCOMPLETE DB_INIT_CDB DB_INIT_LOCK DB_INIT_LOG DB_INIT_MPOOL - DB_INIT_REP DB_INIT_TXN DB_INVALID_EID DB_JAVA_CALLBACK - DB_JOINENV DB_JOIN_ITEM DB_JOIN_NOSORT DB_KEYEMPTY DB_KEYEXIST - DB_KEYFIRST DB_KEYLAST DB_LAST DB_LOCKDOWN DB_LOCKMAGIC - DB_LOCKVERSION DB_LOCK_CONFLICT DB_LOCK_DEADLOCK DB_LOCK_DEFAULT + DB_INIT_REP DB_INIT_TXN DB_INORDER DB_JAVA_CALLBACK DB_JOINENV + DB_JOIN_ITEM DB_JOIN_NOSORT DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST + DB_KEYLAST DB_LAST DB_LOCKDOWN DB_LOCKMAGIC DB_LOCKVERSION + DB_LOCK_ABORT DB_LOCK_CONFLICT DB_LOCK_DEADLOCK DB_LOCK_DEFAULT DB_LOCK_EXPIRE DB_LOCK_FREE_LOCKER DB_LOCK_MAXLOCKS - DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NORUN DB_LOCK_NOTEXIST - DB_LOCK_NOTGRANTED DB_LOCK_NOTHELD DB_LOCK_NOWAIT DB_LOCK_OLDEST - DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_RIW_N - DB_LOCK_RW_N DB_LOCK_SET_TIMEOUT DB_LOCK_SWITCH DB_LOCK_UPGRADE - DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_LOGFILEID_INVALID - DB_LOGMAGIC DB_LOGOLDVER DB_LOGVERSION DB_LOG_AUTOREMOVE - DB_LOG_CHKPNT DB_LOG_COMMIT DB_LOG_DISK DB_LOG_LOCKED - DB_LOG_NOCOPY DB_LOG_NOT_DURABLE DB_LOG_PERM DB_LOG_SILENT_ERR + DB_LOCK_MAXWRITE DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NORUN + DB_LOCK_NOTEXIST DB_LOCK_NOTGRANTED DB_LOCK_NOTHELD + DB_LOCK_NOWAIT DB_LOCK_OLDEST DB_LOCK_RANDOM DB_LOCK_RECORD + DB_LOCK_REMOVE DB_LOCK_RIW_N DB_LOCK_RW_N DB_LOCK_SET_TIMEOUT + DB_LOCK_SWITCH DB_LOCK_UPGRADE DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE + DB_LOGFILEID_INVALID DB_LOGMAGIC DB_LOGOLDVER DB_LOGVERSION + DB_LOG_AUTOREMOVE DB_LOG_BUFFER_FULL DB_LOG_CHKPNT DB_LOG_COMMIT + DB_LOG_DISK DB_LOG_INMEMORY DB_LOG_LOCKED DB_LOG_NOCOPY + DB_LOG_NOT_DURABLE DB_LOG_PERM DB_LOG_RESEND DB_LOG_SILENT_ERR DB_LOG_WRNOSYNC DB_MAX_PAGES DB_MAX_RECORDS DB_MPOOL_CLEAN DB_MPOOL_CREATE DB_MPOOL_DIRTY DB_MPOOL_DISCARD DB_MPOOL_EXTENT - DB_MPOOL_LAST DB_MPOOL_NEW DB_MPOOL_NEW_GROUP DB_MPOOL_NOFILE - DB_MPOOL_PRIVATE DB_MPOOL_UNLINK DB_MULTIPLE DB_MULTIPLE_KEY - DB_MUTEXDEBUG DB_MUTEXLOCKS DB_NEEDSPLIT DB_NEXT DB_NEXT_DUP - DB_NEXT_NODUP DB_NOCOPY DB_NODUPDATA DB_NOLOCKING DB_NOMMAP - DB_NOORDERCHK DB_NOOVERWRITE DB_NOPANIC DB_NORECURSE DB_NOSERVER - DB_NOSERVER_HOME DB_NOSERVER_ID DB_NOSYNC DB_NOTFOUND - DB_NO_AUTO_COMMIT DB_ODDFILESIZE DB_OK_BTREE DB_OK_HASH - DB_OK_QUEUE DB_OK_RECNO DB_OLD_VERSION DB_OPEN_CALLED + DB_MPOOL_FREE DB_MPOOL_LAST DB_MPOOL_NEW DB_MPOOL_NEW_GROUP + DB_MPOOL_NOFILE DB_MPOOL_PRIVATE DB_MPOOL_UNLINK DB_MULTIPLE + DB_MULTIPLE_KEY DB_MUTEXDEBUG DB_MUTEXLOCKS DB_NEEDSPLIT DB_NEXT + DB_NEXT_DUP DB_NEXT_NODUP DB_NOCOPY DB_NODUPDATA DB_NOLOCKING + DB_NOMMAP DB_NOORDERCHK DB_NOOVERWRITE DB_NOPANIC DB_NORECURSE + DB_NOSERVER DB_NOSERVER_HOME DB_NOSERVER_ID DB_NOSYNC + DB_NOTFOUND DB_NO_AUTO_COMMIT DB_ODDFILESIZE DB_OK_BTREE + DB_OK_HASH DB_OK_QUEUE DB_OK_RECNO DB_OLD_VERSION DB_OPEN_CALLED DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_OVERWRITE DB_PAD DB_PAGEYIELD DB_PAGE_LOCK DB_PAGE_NOTFOUND DB_PANIC_ENVIRONMENT DB_PERMANENT DB_POSITION DB_POSITIONI DB_PREV DB_PREV_NODUP DB_PRINTABLE @@ -4189,66 +4389,68 @@ my @names = (qw(DB_AFTER DB_AGGRESSIVE DB_ALREADY_ABORTED DB_APPEND DB_RECNUM DB_RECORDCOUNT DB_RECORD_LOCK DB_RECOVER DB_RECOVER_FATAL DB_REGION_ANON DB_REGION_INIT DB_REGION_MAGIC DB_REGION_NAME DB_REGISTERED DB_RENAMEMAGIC DB_RENUMBER - DB_REP_CLIENT DB_REP_CREATE DB_REP_DUPMASTER DB_REP_HANDLE_DEAD - DB_REP_HOLDELECTION DB_REP_ISPERM DB_REP_LOGSONLY DB_REP_MASTER - DB_REP_NEWMASTER DB_REP_NEWSITE DB_REP_NOBUFFER DB_REP_NOTPERM - DB_REP_OUTDATED DB_REP_PERMANENT DB_REP_UNAVAIL DB_REVSPLITOFF - DB_RMW DB_RPCCLIENT DB_RPC_SERVERPROG DB_RPC_SERVERVERS - DB_RUNRECOVERY DB_SALVAGE DB_SECONDARY_BAD DB_SEQUENTIAL DB_SET + DB_REP_CLIENT DB_REP_CREATE DB_REP_DUPMASTER DB_REP_EGENCHG + DB_REP_HANDLE_DEAD DB_REP_HOLDELECTION DB_REP_ISPERM + DB_REP_LOGREADY DB_REP_LOGSONLY DB_REP_MASTER DB_REP_NEWMASTER + DB_REP_NEWSITE DB_REP_NOBUFFER DB_REP_NOTPERM DB_REP_OUTDATED + DB_REP_PAGEDONE DB_REP_PERMANENT DB_REP_STARTUPDONE + DB_REP_UNAVAIL DB_REVSPLITOFF DB_RMW DB_RPCCLIENT + DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_RUNRECOVERY DB_SALVAGE + DB_SECONDARY_BAD DB_SEQUENCE_VERSION DB_SEQUENTIAL DB_SEQ_DEC + DB_SEQ_INC DB_SEQ_RANGE_SET DB_SEQ_WRAP DB_SET DB_SET_LOCK_TIMEOUT DB_SET_RANGE DB_SET_RECNO DB_SET_TXN_NOW - DB_SET_TXN_TIMEOUT DB_SNAPSHOT DB_STAT_CLEAR DB_SURPRISE_KID - DB_SWAPBYTES DB_SYSTEM_MEM DB_TEMPORARY DB_TEST_ELECTINIT - DB_TEST_ELECTSEND DB_TEST_ELECTVOTE1 DB_TEST_ELECTVOTE2 - DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 DB_TEST_POSTDESTROY - DB_TEST_POSTEXTDELETE DB_TEST_POSTEXTOPEN DB_TEST_POSTEXTUNLINK - DB_TEST_POSTLOG DB_TEST_POSTLOGMETA DB_TEST_POSTOPEN - DB_TEST_POSTRENAME DB_TEST_POSTSYNC DB_TEST_PREDESTROY - DB_TEST_PREEXTDELETE DB_TEST_PREEXTOPEN DB_TEST_PREEXTUNLINK - DB_TEST_PREOPEN DB_TEST_PRERENAME DB_TEST_SUBDB_LOCKS DB_THREAD - DB_TIMEOUT DB_TIME_NOTGRANTED DB_TRUNCATE DB_TXNMAGIC - DB_TXNVERSION DB_TXN_CKP DB_TXN_LOCK DB_TXN_LOCK_2PL - DB_TXN_LOCK_MASK DB_TXN_LOCK_OPTIMIST DB_TXN_LOCK_OPTIMISTIC - DB_TXN_LOG_MASK DB_TXN_LOG_REDO DB_TXN_LOG_UNDO - DB_TXN_LOG_UNDOREDO DB_TXN_NOSYNC DB_TXN_NOT_DURABLE - DB_TXN_NOWAIT DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO - DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD DB_UPDATE_SECONDARY + DB_SET_TXN_TIMEOUT DB_SNAPSHOT DB_STAT_ALL DB_STAT_CLEAR + DB_STAT_LOCK_CONF DB_STAT_LOCK_LOCKERS DB_STAT_LOCK_OBJECTS + DB_STAT_LOCK_PARAMS DB_STAT_MEMP_HASH DB_STAT_SUBSYSTEM + DB_SURPRISE_KID DB_SWAPBYTES DB_SYSTEM_MEM DB_TEMPORARY + DB_TEST_ELECTINIT DB_TEST_ELECTSEND DB_TEST_ELECTVOTE1 + DB_TEST_ELECTVOTE2 DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 + DB_TEST_POSTDESTROY DB_TEST_POSTLOG DB_TEST_POSTLOGMETA + DB_TEST_POSTOPEN DB_TEST_POSTRENAME DB_TEST_POSTSYNC + DB_TEST_PREDESTROY DB_TEST_PREOPEN DB_TEST_PRERENAME + DB_TEST_SUBDB_LOCKS DB_THREAD DB_TIMEOUT DB_TIME_NOTGRANTED + DB_TRUNCATE DB_TXNMAGIC DB_TXNVERSION DB_TXN_CKP DB_TXN_LOCK + DB_TXN_LOCK_2PL DB_TXN_LOCK_MASK DB_TXN_LOCK_OPTIMIST + DB_TXN_LOCK_OPTIMISTIC DB_TXN_LOG_MASK DB_TXN_LOG_REDO + DB_TXN_LOG_UNDO DB_TXN_LOG_UNDOREDO DB_TXN_NOSYNC + DB_TXN_NOT_DURABLE DB_TXN_NOWAIT DB_TXN_REDO DB_TXN_SYNC + DB_TXN_UNDO DB_TXN_WRITE_NOSYNC DB_UNREF DB_UPDATE_SECONDARY DB_UPGRADE DB_USE_ENVIRON DB_USE_ENVIRON_ROOT DB_VERB_CHKPOINT DB_VERB_DEADLOCK DB_VERB_RECOVERY DB_VERB_REPLICATION DB_VERB_WAITSFOR DB_VERIFY DB_VERIFY_BAD DB_VERIFY_FATAL - DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH - DB_VRFY_FLAGMASK DB_WRITECURSOR DB_WRITELOCK DB_WRITEOPEN - DB_WRNOSYNC DB_XA_CREATE DB_XIDDATASIZE DB_YIELDCPU + DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_MISMATCH + DB_VERSION_PATCH DB_VRFY_FLAGMASK DB_WRITECURSOR DB_WRITELOCK + DB_WRITEOPEN DB_WRNOSYNC DB_XA_CREATE DB_XIDDATASIZE DB_YIELDCPU DB_debug_FLAG DB_user_BEGIN), - {name=>"DB_BTREE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_HASH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_LOCK_DUMP", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_LOCK_GET", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_LOCK_GET_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]}, + {name=>"DB_BTREE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_HASH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_LOCK_DUMP", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_LOCK_GET", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_LOCK_GET_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, {name=>"DB_LOCK_INHERIT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \\\n DB_VERSION_PATCH >= 1)\n", "#endif\n"]}, - {name=>"DB_LOCK_PUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_LOCK_PUT_ALL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_LOCK_PUT_OBJ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_LOCK_PUT_READ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]}, - {name=>"DB_LOCK_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]}, - {name=>"DB_LOCK_TRADE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_LOCK_UPGRADE_WRITE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]}, - {name=>"DB_PRIORITY_DEFAULT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_PRIORITY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_PRIORITY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_PRIORITY_VERY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_PRIORITY_VERY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, + {name=>"DB_LOCK_PUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_LOCK_PUT_ALL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_LOCK_PUT_OBJ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_LOCK_PUT_READ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, + {name=>"DB_LOCK_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, + {name=>"DB_LOCK_TRADE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, + {name=>"DB_LOCK_UPGRADE_WRITE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 11)\n", "#endif\n"]}, + {name=>"DB_PRIORITY_DEFAULT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, + {name=>"DB_PRIORITY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, + {name=>"DB_PRIORITY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, + {name=>"DB_PRIORITY_VERY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, + {name=>"DB_PRIORITY_VERY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, {name=>"DB_QUEUE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 55)\n", "#endif\n"]}, - {name=>"DB_RECNO", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, - {name=>"DB_TXN_ABORT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]}, - {name=>"DB_TXN_APPLY", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]}, - {name=>"DB_TXN_BACKWARD_ALLOC", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_TXN_BACKWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]}, - {name=>"DB_TXN_FORWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]}, - {name=>"DB_TXN_GETPGNOS", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_TXN_OPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]}, - {name=>"DB_TXN_POPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]}, - {name=>"DB_TXN_PRINT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]}, - {name=>"DB_UNKNOWN", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]}, + {name=>"DB_RECNO", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, + {name=>"DB_TXN_ABORT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, + {name=>"DB_TXN_APPLY", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, + {name=>"DB_TXN_BACKWARD_ALLOC", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, + {name=>"DB_TXN_BACKWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, + {name=>"DB_TXN_FORWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, + {name=>"DB_TXN_OPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 14)\n", "#endif\n"]}, + {name=>"DB_TXN_POPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 11)\n", "#endif\n"]}, + {name=>"DB_TXN_PRINT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 24)\n", "#endif\n"]}, + {name=>"DB_UNKNOWN", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 3)\n", "#endif\n"]}, {name=>"DB_VERSION_STRING", type=>"PV"}); print constant_types(); # macro defs diff --git a/db/perl/BerkeleyDB/mkconsts b/db/perl/BerkeleyDB/mkconsts index 2aca7c6dc..0383378e4 100644 --- a/db/perl/BerkeleyDB/mkconsts +++ b/db/perl/BerkeleyDB/mkconsts @@ -8,8 +8,9 @@ use constant IGNORE => 'ignore' ; %constants = ( + ######### - # 2.0.0 + # 2.0.3 ######### DBM_INSERT => IGNORE, @@ -92,10 +93,12 @@ use constant IGNORE => 'ignore' ; DB_RDONLY => DEFINE, DB_REGISTERED => DEFINE, DB_RE_MODIFIED => IGNORE, + DB_SEQUENTIAL => DEFINE, DB_SET => DEFINE, DB_SET_RANGE => DEFINE, DB_SNAPSHOT => DEFINE, DB_SWAPBYTES => DEFINE, + DB_TEMPORARY => DEFINE, DB_TRUNCATE => DEFINE, DB_TXNMAGIC => DEFINE, DB_TXNVERSION => DEFINE, @@ -122,36 +125,29 @@ use constant IGNORE => 'ignore' ; const => IGNORE, # enum DBTYPE - DB_BTREE => '2.0.0', - DB_HASH => '2.0.0', - DB_RECNO => '2.0.0', - DB_UNKNOWN => '2.0.0', + DB_BTREE => '2.0.3', + DB_HASH => '2.0.3', + DB_RECNO => '2.0.3', + DB_UNKNOWN => '2.0.3', # enum db_lockop_t - DB_LOCK_DUMP => '2.0.0', - DB_LOCK_GET => '2.0.0', - DB_LOCK_PUT => '2.0.0', - DB_LOCK_PUT_ALL => '2.0.0', - DB_LOCK_PUT_OBJ => '2.0.0', + DB_LOCK_DUMP => '2.0.3', + DB_LOCK_GET => '2.0.3', + DB_LOCK_PUT => '2.0.3', + DB_LOCK_PUT_ALL => '2.0.3', + DB_LOCK_PUT_OBJ => '2.0.3', # enum db_lockmode_t - DB_LOCK_NG => IGNORE, # 2.0.0 - DB_LOCK_READ => IGNORE, # 2.0.0 - DB_LOCK_WRITE => IGNORE, # 2.0.0 - DB_LOCK_IREAD => IGNORE, # 2.0.0 - DB_LOCK_IWRITE => IGNORE, # 2.0.0 - DB_LOCK_IWR => IGNORE, # 2.0.0 + DB_LOCK_NG => IGNORE, # 2.0.3 + DB_LOCK_READ => IGNORE, # 2.0.3 + DB_LOCK_WRITE => IGNORE, # 2.0.3 + DB_LOCK_IREAD => IGNORE, # 2.0.3 + DB_LOCK_IWRITE => IGNORE, # 2.0.3 + DB_LOCK_IWR => IGNORE, # 2.0.3 # enum ACTION - FIND => IGNORE, # 2.0.0 - ENTER => IGNORE, # 2.0.0 - - ######### - # 2.0.3 - ######### - - DB_SEQUENTIAL => DEFINE, - DB_TEMPORARY => DEFINE, + FIND => IGNORE, # 2.0.3 + ENTER => IGNORE, # 2.0.3 ######### # 2.1.0 @@ -199,22 +195,12 @@ use constant IGNORE => 'ignore' ; DB_RE_SNAPSHOT => IGNORE, ######### - # 2.3.1 + # 2.3.10 ######### + DB_APPEND => DEFINE, DB_GET_RECNO => DEFINE, DB_SET_RECNO => DEFINE, - - ######### - # 2.3.3 - ######### - - DB_APPEND => DEFINE, - - ######### - # 2.3.6 - ######### - DB_TXN_CKP => DEFINE, ######### @@ -295,7 +281,7 @@ use constant IGNORE => 'ignore' ; __UNUSED_4000 => IGNORE, ######### - # 2.5.2 + # 2.5.9 ######### DBC_CONTINUE => IGNORE, @@ -303,20 +289,15 @@ use constant IGNORE => 'ignore' ; DBC_RECOVER => IGNORE, DBC_RMW => IGNORE, DB_DBM_ERROR => IGNORE, + DB_DUPSORT => DEFINE, DB_GET_BOTH => DEFINE, + DB_JOIN_ITEM => DEFINE, DB_NEXT_DUP => DEFINE, DB_OPFLAGS_MASK => DEFINE, DB_RMW => DEFINE, DB_RUNRECOVERY => DEFINE, dbmclose => IGNORE, - ######### - # 2.5.9 - ######### - - DB_DUPSORT => DEFINE, - DB_JOIN_ITEM => DEFINE, - ######### # 2.6.4 ######### @@ -409,7 +390,7 @@ use constant IGNORE => 'ignore' ; DB_QUEUE => '3.0.55', ######### - # 3.1.12 + # 3.1.14 ######### DBC_ACTIVE => IGNORE, @@ -443,49 +424,34 @@ use constant IGNORE => 'ignore' ; DB_VRFY_FLAGMASK => DEFINE, # enum db_recops - DB_TXN_ABORT => '3.1.12', - DB_TXN_BACKWARD_ROLL => '3.1.12', - DB_TXN_FORWARD_ROLL => '3.1.12', - DB_TXN_OPENFILES => '3.1.12', + DB_TXN_ABORT => '3.1.14', + DB_TXN_BACKWARD_ROLL => '3.1.14', + DB_TXN_FORWARD_ROLL => '3.1.14', + DB_TXN_OPENFILES => '3.1.14', ######### - # 3.2.3 + # 3.2.9 ######### DBC_COMPENSATE => IGNORE, + DB_ALREADY_ABORTED => DEFINE, DB_AM_VERIFYING => IGNORE, DB_CDB_ALLDB => DEFINE, + DB_CONSUME_WAIT => DEFINE, DB_ENV_CDB_ALLDB => DEFINE, DB_EXTENT => DEFINE, + DB_JAVA_CALLBACK => DEFINE, DB_JOINENV => DEFINE, DB_LOCK_SWITCH => DEFINE, DB_MPOOL_EXTENT => DEFINE, DB_REGION_MAGIC => DEFINE, - DB_UNRESOLVED_CHILD => DEFINE, DB_VERIFY => DEFINE, - # enum db_notices - DB_NOTICE_LOGFILE_CHANGED => IGNORE, # 3.2.3 - - ######### - # 3.2.6 - ######### - - DB_ALREADY_ABORTED => DEFINE, - DB_CONSUME_WAIT => DEFINE, - DB_JAVA_CALLBACK => DEFINE, - DB_TEST_POSTEXTDELETE => DEFINE, - DB_TEST_POSTEXTOPEN => DEFINE, - DB_TEST_POSTEXTUNLINK => DEFINE, - DB_TEST_PREEXTDELETE => DEFINE, - DB_TEST_PREEXTOPEN => DEFINE, - DB_TEST_PREEXTUNLINK => DEFINE, - # enum db_lockmode_t - DB_LOCK_WAIT => IGNORE, # 3.2.6 + DB_LOCK_WAIT => IGNORE, # 3.2.9 ######### - # 3.3.4 + # 3.3.11 ######### DBC_DIRTY_READ => IGNORE, @@ -508,35 +474,31 @@ use constant IGNORE => 'ignore' ; DB_PAGE_NOTFOUND => DEFINE, DB_RPC_SERVERPROG => DEFINE, DB_RPC_SERVERVERS => DEFINE, + DB_SECONDARY_BAD => DEFINE, + DB_SURPRISE_KID => DEFINE, + DB_TEST_POSTDESTROY => DEFINE, + DB_TEST_PREDESTROY => DEFINE, DB_UPDATE_SECONDARY => DEFINE, DB_XIDDATASIZE => DEFINE, # enum db_recops - DB_TXN_POPENFILES => '3.3.4', + DB_TXN_POPENFILES => '3.3.11', # enum db_lockop_t - DB_LOCK_UPGRADE_WRITE => '3.3.4', + DB_LOCK_UPGRADE_WRITE => '3.3.11', # enum db_lockmode_t - DB_LOCK_DIRTY => IGNORE, # 3.3.4 - DB_LOCK_WWRITE => IGNORE, # 3.3.4 + DB_LOCK_DIRTY => IGNORE, # 3.3.11 + DB_LOCK_WWRITE => IGNORE, # 3.3.11 ######### - # 3.3.11 - ######### - - DB_SECONDARY_BAD => DEFINE, - DB_SURPRISE_KID => DEFINE, - DB_TEST_POSTDESTROY => DEFINE, - DB_TEST_PREDESTROY => DEFINE, - - ######### - # 4.0.7 + # 4.0.14 ######### DB_APPLY_LOGREG => DEFINE, - DB_BROADCAST_EID => DEFINE, DB_CL_WRITER => DEFINE, + DB_EID_BROADCAST => DEFINE, + DB_EID_INVALID => DEFINE, DB_ENV_NOLOCKING => DEFINE, DB_ENV_NOPANIC => DEFINE, DB_ENV_REGION_INIT => DEFINE, @@ -545,7 +507,6 @@ use constant IGNORE => 'ignore' ; DB_ENV_REP_MASTER => DEFINE, DB_ENV_YIELDCPU => DEFINE, DB_GET_BOTH_RANGE => DEFINE, - DB_INVALID_EID => DEFINE, DB_LOCK_EXPIRE => DEFINE, DB_LOCK_FREE_LOCKER => DEFINE, DB_LOCK_SET_TIMEOUT => DEFINE, @@ -571,6 +532,7 @@ use constant IGNORE => 'ignore' ; DB_SET_TXN_TIMEOUT => DEFINE, DB_STAT_CLEAR => DEFINE, DB_TIMEOUT => DEFINE, + DB_VERB_REPLICATION => DEFINE, DB_YIELDCPU => DEFINE, MP_FLUSH => IGNORE, MP_OPEN_CALLED => IGNORE, @@ -587,26 +549,18 @@ use constant IGNORE => 'ignore' ; TXN_SYNC => IGNORE, # enum db_recops - DB_TXN_APPLY => '4.0.7', + DB_TXN_APPLY => '4.0.14', # enum db_lockop_t - DB_LOCK_GET_TIMEOUT => '4.0.7', - DB_LOCK_PUT_READ => '4.0.7', - DB_LOCK_TIMEOUT => '4.0.7', + DB_LOCK_GET_TIMEOUT => '4.0.14', + DB_LOCK_PUT_READ => '4.0.14', + DB_LOCK_TIMEOUT => '4.0.14', # enum db_status_t - DB_LSTAT_EXPIRED => IGNORE, # 4.0.7 + DB_LSTAT_EXPIRED => IGNORE, # 4.0.14 ######### - # 4.0.14 - ######### - - DB_EID_BROADCAST => DEFINE, - DB_EID_INVALID => DEFINE, - DB_VERB_REPLICATION => DEFINE, - - ######### - # 4.1.17 + # 4.1.24 ######### DBC_OWN_LID => IGNORE, @@ -661,26 +615,24 @@ use constant IGNORE => 'ignore' ; _DB_EXT_PROT_IN_ => IGNORE, # enum db_lockop_t - DB_LOCK_TRADE => '4.1.17', + DB_LOCK_TRADE => '4.1.24', # enum db_status_t - DB_LSTAT_NOTEXIST => IGNORE, # 4.1.17 + DB_LSTAT_NOTEXIST => IGNORE, # 4.1.24 # enum DB_CACHE_PRIORITY - DB_PRIORITY_VERY_LOW => '4.1.17', - DB_PRIORITY_LOW => '4.1.17', - DB_PRIORITY_DEFAULT => '4.1.17', - DB_PRIORITY_HIGH => '4.1.17', - DB_PRIORITY_VERY_HIGH => '4.1.17', + DB_PRIORITY_VERY_LOW => '4.1.24', + DB_PRIORITY_LOW => '4.1.24', + DB_PRIORITY_DEFAULT => '4.1.24', + DB_PRIORITY_HIGH => '4.1.24', + DB_PRIORITY_VERY_HIGH => '4.1.24', # enum db_recops - DB_TXN_BACKWARD_ALLOC => '4.1.17', - DB_TXN_GETPGNOS => '4.1.17', - DB_TXN_PRINT => '4.1.17', - + DB_TXN_BACKWARD_ALLOC => '4.1.24', + DB_TXN_PRINT => '4.1.24', ######### - # 4.2.41 + # 4.2.50 ######### DB_AM_NOT_DURABLE => IGNORE, @@ -715,6 +667,46 @@ use constant IGNORE => 'ignore' ; MP_FILEID_SET => IGNORE, TXN_RESTORED => IGNORE, + ######### + # 4.3.12 + ######### + + DBC_DEGREE_2 => IGNORE, + DB_AM_INORDER => IGNORE, + DB_BUFFER_SMALL => DEFINE, + DB_DEGREE_2 => DEFINE, + DB_DSYNC_LOG => DEFINE, + DB_DURABLE_UNKNOWN => DEFINE, + DB_ENV_DSYNC_LOG => DEFINE, + DB_ENV_LOG_INMEMORY => DEFINE, + DB_INORDER => DEFINE, + DB_LOCK_ABORT => DEFINE, + DB_LOCK_MAXWRITE => DEFINE, + DB_LOG_BUFFER_FULL => DEFINE, + DB_LOG_INMEMORY => DEFINE, + DB_LOG_RESEND => DEFINE, + DB_MPOOL_FREE => DEFINE, + DB_REP_EGENCHG => DEFINE, + DB_REP_LOGREADY => DEFINE, + DB_REP_PAGEDONE => DEFINE, + DB_REP_STARTUPDONE => DEFINE, + DB_SEQUENCE_VERSION => DEFINE, + DB_SEQ_DEC => DEFINE, + DB_SEQ_INC => DEFINE, + DB_SEQ_RANGE_SET => DEFINE, + DB_SEQ_WRAP => DEFINE, + DB_STAT_ALL => DEFINE, + DB_STAT_LOCK_CONF => DEFINE, + DB_STAT_LOCK_LOCKERS => DEFINE, + DB_STAT_LOCK_OBJECTS => DEFINE, + DB_STAT_LOCK_PARAMS => DEFINE, + DB_STAT_MEMP_HASH => DEFINE, + DB_STAT_SUBSYSTEM => DEFINE, + DB_UNREF => DEFINE, + DB_VERSION_MISMATCH => DEFINE, + TXN_DEADLOCK => IGNORE, + TXN_DEGREE_2 => IGNORE, + ) ; sub enum_Macro diff --git a/db/perl/BerkeleyDB/ppport.h b/db/perl/BerkeleyDB/ppport.h index 42fd002b8..0815cf2d8 100644 --- a/db/perl/BerkeleyDB/ppport.h +++ b/db/perl/BerkeleyDB/ppport.h @@ -86,6 +86,11 @@ /* Replace: 0 */ #endif +#ifndef SvGETMAGIC +# define SvGETMAGIC(x) STMT_START { if (SvGMAGICAL(x)) mg_get(x); } STMT_END +#endif + + /* DEFSV appears first in 5.004_56 */ #ifndef DEFSV # define DEFSV GvSV(PL_defgv) diff --git a/db/perl/BerkeleyDB/scan b/db/perl/BerkeleyDB/scan index 6e6b32071..c501f3c45 100644 --- a/db/perl/BerkeleyDB/scan +++ b/db/perl/BerkeleyDB/scan @@ -16,19 +16,25 @@ my $ignore_re = '^(' . join("|", DB_TSL MP TXN + DB_TXN_GETPGNOS )) . ')' ; my %ignore_def = map {$_, 1} qw() ; %ignore_enums = map {$_, 1} qw( ACTION db_status_t db_notices db_lockmode_t ) ; +my %ignore_exact_enum = map { $_ => 1} + qw( + DB_TXN_GETPGNOS + ); + my $filler = ' ' x 26 ; chdir "libraries" || die "Cannot chdir into './libraries': $!\n"; foreach my $name (sort tuple glob "[2-9]*") { - next if $name =~ /NC$/; + next if $name =~ /(NC|private)$/; my $inc = "$name/include/db.h" ; next unless -f $inc ; @@ -123,6 +129,7 @@ sub scan my $out = $filler ; foreach $name (@new) { + next if $ignore_exact_enum{$name} ; $out = $filler ; substr($out,0, length $name) = $name; $result .= "\t$out => $value\n" ; diff --git a/db/perl/BerkeleyDB/t/btree.t b/db/perl/BerkeleyDB/t/btree.t index 03ae1e5f1..152c36601 100644 --- a/db/perl/BerkeleyDB/t/btree.t +++ b/db/perl/BerkeleyDB/t/btree.t @@ -104,7 +104,7 @@ umask(0) ; ok 27, my $lexD = new LexDir($home) ; ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL, - -Home => $home ; + @StdErrFile, -Home => $home ; ok 29, my $db = new BerkeleyDB::Btree -Filename => $Dfile, -Env => $env, -Flags => DB_CREATE ; @@ -632,7 +632,7 @@ print "[$db] [$!] $BerkeleyDB::Error\n" ; my $home = "./fred" ; ok 177, my $lexD = new LexDir($home) ; - ok 178, my $env = new BerkeleyDB::Env -Home => $home, + ok 178, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 179, my $txn = $env->txn_begin() ; diff --git a/db/perl/BerkeleyDB/t/cds.t b/db/perl/BerkeleyDB/t/cds.t new file mode 100644 index 000000000..4d129a0a6 --- /dev/null +++ b/db/perl/BerkeleyDB/t/cds.t @@ -0,0 +1,80 @@ +#!./perl -w + +# Tests for Concurrent Data Store mode + +use strict ; + +BEGIN { + unless(grep /blib/, @INC) { + chdir 't' if -d 't'; + @INC = '../lib' if -d '../lib'; + } +} + +use BerkeleyDB; +use t::util ; + +BEGIN +{ + if ($BerkeleyDB::db_version < 2) { + print "1..0 # Skip: this needs Berkeley DB 2.x.x or better\n" ; + exit 0 ; + } +} + + + +print "1..12\n"; + +my $Dfile = "dbhash.tmp"; +unlink $Dfile; + +umask(0) ; + +{ + # Error case -- env not opened in CDS mode + + my $lex = new LexFile $Dfile ; + + my $home = "./fred" ; + ok 1, my $lexD = new LexDir($home) ; + + ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL, + -Home => $home, @StdErrFile ; + + ok 3, my $db = new BerkeleyDB::Btree -Filename => $Dfile, + -Env => $env, + -Flags => DB_CREATE ; + + ok 4, ! $env->cds_enabled() ; + ok 5, ! $db->cds_enabled() ; + + eval { $db->cds_lock() }; + ok 6, $@ =~ /CDS not enabled for this database/; + + undef $db; + undef $env ; +} + +{ + my $lex = new LexFile $Dfile ; + + my $home = "./fred" ; + ok 7, my $lexD = new LexDir($home) ; + + ok 8, my $env = new BerkeleyDB::Env -Flags => DB_INIT_CDB|DB_CREATE|DB_INIT_MPOOL, + -Home => $home, @StdErrFile ; + + ok 9, my $db = new BerkeleyDB::Btree -Filename => $Dfile, + -Env => $env, + -Flags => DB_CREATE ; + + ok 10, $env->cds_enabled() ; + ok 11, $db->cds_enabled() ; + + my $cds = $db->cds_lock() ; + ok 12, $cds ; + + undef $db; + undef $env ; +} diff --git a/db/perl/BerkeleyDB/t/db-3.0.t b/db/perl/BerkeleyDB/t/db-3.0.t index 017a759e3..6f5137106 100644 --- a/db/perl/BerkeleyDB/t/db-3.0.t +++ b/db/perl/BerkeleyDB/t/db-3.0.t @@ -35,7 +35,7 @@ umask(0); my $home = "./fred" ; ok 1, my $lexD = new LexDir($home) ; chdir "./fred" ; - ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ; + ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE, @StdErrFile ; ok 3, $env->set_mutexlocks(0) == 0 ; chdir ".." ; undef $env ; diff --git a/db/perl/BerkeleyDB/t/db-3.2.t b/db/perl/BerkeleyDB/t/db-3.2.t index d5a24a5b5..b9807cef4 100644 --- a/db/perl/BerkeleyDB/t/db-3.2.t +++ b/db/perl/BerkeleyDB/t/db-3.2.t @@ -45,7 +45,7 @@ umask(0) ; my $home = "./fred" ; ok 2, my $lexD = new LexDir($home) ; - ok 3, my $env = new BerkeleyDB::Env -Home => $home, + ok 3, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE , -SetFlags => DB_NOMMAP ; @@ -57,7 +57,7 @@ umask(0) ; my $home = "./fred" ; ok 4, my $lexD = new LexDir($home) ; - ok 5, my $env = new BerkeleyDB::Env -Home => $home, + ok 5, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE ; ok 6, ! $env->set_flags(DB_NOMMAP, 1); diff --git a/db/perl/BerkeleyDB/t/destroy.t b/db/perl/BerkeleyDB/t/destroy.t index 7457d36c5..445d07407 100644 --- a/db/perl/BerkeleyDB/t/destroy.t +++ b/db/perl/BerkeleyDB/t/destroy.t @@ -27,7 +27,7 @@ umask(0); my $value ; ok 1, my $lexD = new LexDir($home) ; - ok 2, my $env = new BerkeleyDB::Env -Home => $home, + ok 2, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 3, my $txn = $env->txn_begin() ; diff --git a/db/perl/BerkeleyDB/t/encrypt.t b/db/perl/BerkeleyDB/t/encrypt.t index 9ff74058b..b3cc13821 100644 --- a/db/perl/BerkeleyDB/t/encrypt.t +++ b/db/perl/BerkeleyDB/t/encrypt.t @@ -22,7 +22,7 @@ BEGIN } # Is encryption available? - my $env = new BerkeleyDB::Env + my $env = new BerkeleyDB::Env @StdErrFile, -Encrypt => {Password => "abc", Flags => DB_ENCRYPT_AES }; @@ -41,7 +41,7 @@ print "1..80\n"; { eval { - my $env = new BerkeleyDB::Env + my $env = new BerkeleyDB::Env @StdErrFile, -Encrypt => 1, -Flags => DB_CREATE ; }; @@ -49,7 +49,7 @@ print "1..80\n"; eval { - my $env = new BerkeleyDB::Env + my $env = new BerkeleyDB::Env @StdErrFile, -Encrypt => {}, -Flags => DB_CREATE ; }; @@ -57,7 +57,7 @@ print "1..80\n"; eval { - my $env = new BerkeleyDB::Env + my $env = new BerkeleyDB::Env @StdErrFile, -Encrypt => {Password => "fred"}, -Flags => DB_CREATE ; }; @@ -65,7 +65,7 @@ print "1..80\n"; eval { - my $env = new BerkeleyDB::Env + my $env = new BerkeleyDB::Env @StdErrFile, -Encrypt => {Flags => 1}, -Flags => DB_CREATE ; }; @@ -73,7 +73,7 @@ print "1..80\n"; eval { - my $env = new BerkeleyDB::Env + my $env = new BerkeleyDB::Env @StdErrFile, -Encrypt => {Fred => 1}, -Flags => DB_CREATE ; }; @@ -88,14 +88,13 @@ print "1..80\n"; my $home = "./fred" ; #mkdir $home; ok 6, my $lexD = new LexDir($home) ; - ok 7, my $env = new BerkeleyDB::Env + ok 7, my $env = new BerkeleyDB::Env @StdErrFile, -Home => $home, -Encrypt => {Password => "abc", Flags => DB_ENCRYPT_AES }, -Flags => DB_CREATE | DB_INIT_MPOOL ; -print "$BerkeleyDB::Error\n" ; my $Dfile = "abc.enc"; diff --git a/db/perl/BerkeleyDB/t/env.t b/db/perl/BerkeleyDB/t/env.t index fd69db279..6729ed92b 100644 --- a/db/perl/BerkeleyDB/t/env.t +++ b/db/perl/BerkeleyDB/t/env.t @@ -11,10 +11,15 @@ BEGIN { } } + +BEGIN { + $ENV{LC_ALL} = 'de_DE@euro'; +} + use BerkeleyDB; use t::util ; -print "1..50\n"; +print "1..53\n"; my $Dfile = "dbhash.tmp"; @@ -54,7 +59,8 @@ my $version_major = 0; my $home = "./fred" ; ok 11, my $lexD = new LexDir($home) ; chdir "./fred" ; - ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ; + ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE, + @StdErrFile; chdir ".." ; undef $env ; } @@ -74,7 +80,7 @@ my $version_major = 0; my $home = "./not_there" ; rmtree $home ; ok 15, ! -d $home ; - my $env = new BerkeleyDB::Env -Home => $home, + my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_INIT_LOCK ; ok 16, ! $env ; ok 17, $! != 0 || $^E != 0 ; @@ -93,7 +99,7 @@ my $version_major = 0; ok 18, my $lexD = new LexDir($home) ; ok 19, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ; ok 20, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ; - my $env = new BerkeleyDB::Env -Home => $home, + my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Config => { DB_DATA_DIR => $data_dir, DB_LOG_DIR => $log_dir }, @@ -144,48 +150,63 @@ my $version_major = 0; } { - # -ErrFile with a filehandle/reference -- should fail + # -ErrFile with a filehandle + use IO::File ; + my $errfile = "./errfile" ; my $home = "./fred" ; ok 30, my $lexD = new LexDir($home) ; - eval { my $env = new BerkeleyDB::Env( -ErrFile => [], + my $lex = new LexFile $errfile ; + my $fh = new IO::File ">$errfile" ; + ok 31, my $env = new BerkeleyDB::Env( -ErrFile => $fh, -Flags => DB_CREATE, - -Home => $home) ; }; - ok 31, $@ =~ /ErrFile parameter must be a file name/; + -Home => $home) ; + my $db = new BerkeleyDB::Hash -Filename => $Dfile, + -Env => $env, + -Flags => -1; + ok 32, !$db ; + + ok 33, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/; + ok 34, -e $errfile ; + my $contents = docat($errfile) ; + chomp $contents ; + ok 35, $BerkeleyDB::Error eq $contents ; + + undef $env ; } { # -ErrPrefix my $home = "./fred" ; - ok 32, my $lexD = new LexDir($home) ; + ok 36, my $lexD = new LexDir($home) ; my $errfile = "./errfile" ; my $lex = new LexFile $errfile ; - ok 33, my $env = new BerkeleyDB::Env( -ErrFile => $errfile, + ok 37, my $env = new BerkeleyDB::Env( -ErrFile => $errfile, -ErrPrefix => "PREFIX", -Flags => DB_CREATE, -Home => $home) ; my $db = new BerkeleyDB::Hash -Filename => $Dfile, -Env => $env, -Flags => -1; - ok 34, !$db ; + ok 38, !$db ; - ok 35, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/; - ok 36, -e $errfile ; + ok 39, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/; + ok 40, -e $errfile ; my $contents = docat($errfile) ; chomp $contents ; - ok 37, $BerkeleyDB::Error eq $contents ; + ok 41, $BerkeleyDB::Error eq $contents ; # change the prefix on the fly my $old = $env->errPrefix("NEW ONE") ; - ok 38, $old eq "PREFIX" ; + ok 42, $old eq "PREFIX" ; $db = new BerkeleyDB::Hash -Filename => $Dfile, -Env => $env, -Flags => -1; - ok 39, !$db ; - ok 40, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/; + ok 43, !$db ; + ok 44, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/; $contents = docat($errfile) ; chomp $contents ; - ok 41, $contents =~ /$BerkeleyDB::Error$/ ; + ok 45, $contents =~ /$BerkeleyDB::Error$/ ; undef $env ; } @@ -197,20 +218,20 @@ my $version_major = 0; my $data_dir = "$home/data_dir" ; my $log_dir = "$home/log_dir" ; my $data_file = "data.db" ; - ok 42, my $lexD = new LexDir($home); - ok 43, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ; - ok 44, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ; - my $env = new BerkeleyDB::Env -Home => $home, + ok 46, my $lexD = new LexDir($home); + ok 47, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ; + ok 48, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ; + my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Config => { DB_DATA_DIR => $data_dir, DB_LOG_DIR => $log_dir }, -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG| DB_INIT_MPOOL|DB_INIT_LOCK ; - ok 45, $env ; + ok 49, $env ; - ok 46, my $txn_mgr = $env->TxnMgr() ; + ok 50, my $txn_mgr = $env->TxnMgr() ; - ok 47, $env->db_appexit() == 0 ; + ok 51, $env->db_appexit() == 0 ; } @@ -219,13 +240,17 @@ my $version_major = 0; # should fail with Berkeley DB 3.x or better. my $home = "./fred" ; - ok 48, my $lexD = new LexDir($home) ; + ok 52, my $lexD = new LexDir($home) ; chdir "./fred" ; my $env = new BerkeleyDB::Env -Home => $home, -Flags => DB_CREATE ; - ok 49, $version_major == 2 ? $env : ! $env ; - ok 50, $version_major == 2 ? 1 - : $BerkeleyDB::Error =~ /No such file or directory/ ; - #print " $BerkeleyDB::Error\n"; + ok 53, $version_major == 2 ? $env : ! $env ; + + # The test below is not portable -- the error message returned by + # $BerkeleyDB::Error is locale dependant. + + #ok 54, $version_major == 2 ? 1 + # : $BerkeleyDB::Error =~ /No such file or directory/ ; + # or print "# BerkeleyDB::Error is $BerkeleyDB::Error\n"; chdir ".." ; undef $env ; } diff --git a/db/perl/BerkeleyDB/t/hash.t b/db/perl/BerkeleyDB/t/hash.t index f2c5caf5a..25b8b20cd 100644 --- a/db/perl/BerkeleyDB/t/hash.t +++ b/db/perl/BerkeleyDB/t/hash.t @@ -103,7 +103,7 @@ umask(0) ; my $home = "./fred" ; ok 28, my $lexD = new LexDir($home); - ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL, + ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL,@StdErrFile, -Home => $home ; ok 30, my $db = new BerkeleyDB::Hash -Filename => $Dfile, -Env => $env, @@ -438,7 +438,7 @@ umask(0) ; my $home = "./fred" ; ok 146, my $lexD = new LexDir($home); - ok 147, my $env = new BerkeleyDB::Env -Home => $home, + ok 147, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 148, my $txn = $env->txn_begin() ; diff --git a/db/perl/BerkeleyDB/t/join.t b/db/perl/BerkeleyDB/t/join.t index 0f1881065..ae7942f2c 100644 --- a/db/perl/BerkeleyDB/t/join.t +++ b/db/perl/BerkeleyDB/t/join.t @@ -21,7 +21,7 @@ if ($BerkeleyDB::db_ver < 2.005002) } -print "1..41\n"; +print "1..42\n"; my $Dfile1 = "dbhash1.tmp"; my $Dfile2 = "dbhash2.tmp"; @@ -77,14 +77,16 @@ umask(0) ; my $value ; my $status ; - my $home = "./fred" ; - ok 7, my $lexD = new LexDir($home); - ok 8, my $env = new BerkeleyDB::Env -Home => $home, + my $home = "./fred7" ; + rmtree $home; + ok 7, ! -d $home; + ok 8, my $lexD = new LexDir($home); + ok 9, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN |DB_INIT_MPOOL; #|DB_INIT_MPOOL| DB_INIT_LOCK; - ok 9, my $txn = $env->txn_begin() ; - ok 10, my $db1 = tie %hash1, 'BerkeleyDB::Hash', + ok 10, my $txn = $env->txn_begin() ; + ok 11, my $db1 = tie %hash1, 'BerkeleyDB::Hash', -Filename => $Dfile1, -Flags => DB_CREATE, -DupCompare => sub { $_[0] cmp $_[1] }, @@ -93,7 +95,7 @@ umask(0) ; -Txn => $txn ; ; - ok 11, my $db2 = tie %hash2, 'BerkeleyDB::Hash', + ok 12, my $db2 = tie %hash2, 'BerkeleyDB::Hash', -Filename => $Dfile2, -Flags => DB_CREATE, -DupCompare => sub { $_[0] cmp $_[1] }, @@ -101,7 +103,7 @@ umask(0) ; -Env => $env, -Txn => $txn ; - ok 12, my $db3 = tie %hash3, 'BerkeleyDB::Btree', + ok 13, my $db3 = tie %hash3, 'BerkeleyDB::Btree', -Filename => $Dfile3, -Flags => DB_CREATE, -DupCompare => sub { $_[0] cmp $_[1] }, @@ -110,7 +112,7 @@ umask(0) ; -Txn => $txn ; - ok 13, addData($db1, qw( apple Convenience + ok 14, addData($db1, qw( apple Convenience peach Shopway pear Farmer raspberry Shopway @@ -119,7 +121,7 @@ umask(0) ; blueberry Farmer )); - ok 14, addData($db2, qw( red apple + ok 15, addData($db2, qw( red apple red raspberry red strawberry yellow peach @@ -127,7 +129,7 @@ umask(0) ; green gooseberry blue blueberry)) ; - ok 15, addData($db3, qw( expensive apple + ok 16, addData($db3, qw( expensive apple reasonable raspberry expensive strawberry reasonable peach @@ -135,13 +137,13 @@ umask(0) ; expensive gooseberry reasonable blueberry)) ; - ok 16, my $cursor2 = $db2->db_cursor() ; + ok 17, my $cursor2 = $db2->db_cursor() ; my $k = "red" ; my $v = "" ; - ok 17, $cursor2->c_get($k, $v, DB_SET) == 0 ; + ok 18, $cursor2->c_get($k, $v, DB_SET) == 0 ; # Two way Join - ok 18, my $cursor1 = $db1->db_join([$cursor2]) ; + ok 19, my $cursor1 = $db1->db_join([$cursor2]) ; my %expected = qw( apple Convenience raspberry Shopway @@ -154,20 +156,20 @@ umask(0) ; if defined $expected{$k} && $expected{$k} eq $v ; #print "[$k] [$v]\n" ; } - ok 19, keys %expected == 0 ; - ok 20, $cursor1->status() == DB_NOTFOUND ; + ok 20, keys %expected == 0 ; + ok 21, $cursor1->status() == DB_NOTFOUND ; # Three way Join - ok 21, $cursor2 = $db2->db_cursor() ; + ok 22, $cursor2 = $db2->db_cursor() ; $k = "red" ; $v = "" ; - ok 22, $cursor2->c_get($k, $v, DB_SET) == 0 ; + ok 23, $cursor2->c_get($k, $v, DB_SET) == 0 ; - ok 23, my $cursor3 = $db3->db_cursor() ; + ok 24, my $cursor3 = $db3->db_cursor() ; $k = "expensive" ; $v = "" ; - ok 24, $cursor3->c_get($k, $v, DB_SET) == 0 ; - ok 25, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ; + ok 25, $cursor3->c_get($k, $v, DB_SET) == 0 ; + ok 26, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ; %expected = qw( apple Convenience strawberry Shopway @@ -179,21 +181,21 @@ umask(0) ; if defined $expected{$k} && $expected{$k} eq $v ; #print "[$k] [$v]\n" ; } - ok 26, keys %expected == 0 ; - ok 27, $cursor1->status() == DB_NOTFOUND ; + ok 27, keys %expected == 0 ; + ok 28, $cursor1->status() == DB_NOTFOUND ; # test DB_JOIN_ITEM # ################# - ok 28, $cursor2 = $db2->db_cursor() ; + ok 29, $cursor2 = $db2->db_cursor() ; $k = "red" ; $v = "" ; - ok 29, $cursor2->c_get($k, $v, DB_SET) == 0 ; + ok 30, $cursor2->c_get($k, $v, DB_SET) == 0 ; - ok 30, $cursor3 = $db3->db_cursor() ; + ok 31, $cursor3 = $db3->db_cursor() ; $k = "expensive" ; $v = "" ; - ok 31, $cursor3->c_get($k, $v, DB_SET) == 0 ; - ok 32, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ; + ok 32, $cursor3->c_get($k, $v, DB_SET) == 0 ; + ok 33, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ; %expected = qw( apple 1 strawberry 1 @@ -207,22 +209,22 @@ umask(0) ; if defined $expected{$k} ; #print "[$k]\n" ; } - ok 33, keys %expected == 0 ; - ok 34, $cursor1->status() == DB_NOTFOUND ; + ok 34, keys %expected == 0 ; + ok 35, $cursor1->status() == DB_NOTFOUND ; - ok 35, $cursor1->c_close() == 0 ; - ok 36, $cursor2->c_close() == 0 ; - ok 37, $cursor3->c_close() == 0 ; + ok 36, $cursor1->c_close() == 0 ; + ok 37, $cursor2->c_close() == 0 ; + ok 38, $cursor3->c_close() == 0 ; - ok 38, ($status = $txn->txn_commit) == 0; + ok 39, ($status = $txn->txn_commit()) == 0; undef $txn ; - ok 39, my $cursor1a = $db1->db_cursor() ; + ok 40, my $cursor1a = $db1->db_cursor() ; eval { $cursor1 = $db1->db_join([$cursor1a]) }; - ok 40, $@ =~ /BerkeleyDB Aborting: attempted to do a self-join at/; - eval { $cursor1 = $db1->db_join([$cursor1]) } ; ok 41, $@ =~ /BerkeleyDB Aborting: attempted to do a self-join at/; + eval { $cursor1 = $db1->db_join([$cursor1]) } ; + ok 42, $@ =~ /BerkeleyDB Aborting: attempted to do a self-join at/; undef $cursor1a; #undef $cursor1; diff --git a/db/perl/BerkeleyDB/t/pod.t b/db/perl/BerkeleyDB/t/pod.t new file mode 100644 index 000000000..230df4bd9 --- /dev/null +++ b/db/perl/BerkeleyDB/t/pod.t @@ -0,0 +1,18 @@ +eval " use Test::More " ; + +if ($@) +{ + print "1..0 # Skip: Test::More required for testing POD\n" ; + exit 0; +} + +eval "use Test::Pod 1.00"; + +if ($@) +{ + print "1..0 # Skip: Test::Pod 1.00 required for testing POD\n" ; + exit 0; +} + +all_pod_files_ok(); + diff --git a/db/perl/BerkeleyDB/t/queue.t b/db/perl/BerkeleyDB/t/queue.t index 53753cd45..00291641c 100644 --- a/db/perl/BerkeleyDB/t/queue.t +++ b/db/perl/BerkeleyDB/t/queue.t @@ -115,7 +115,7 @@ umask(0) ; my $rec_len = 11 ; ok 30, my $lexD = new LexDir($home); - ok 31, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL, + ok 31, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,@StdErrFile, -Home => $home ; ok 32, my $db = new BerkeleyDB::Queue -Filename => $Dfile, -Env => $env, @@ -508,7 +508,7 @@ umask(0) ; my $home = "./fred" ; ok 168, my $lexD = new LexDir($home); my $rec_len = 9 ; - ok 169, my $env = new BerkeleyDB::Env -Home => $home, + ok 169, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 170, my $txn = $env->txn_begin() ; diff --git a/db/perl/BerkeleyDB/t/recno.t b/db/perl/BerkeleyDB/t/recno.t index 74e9d3100..7bbb50169 100644 --- a/db/perl/BerkeleyDB/t/recno.t +++ b/db/perl/BerkeleyDB/t/recno.t @@ -97,7 +97,7 @@ umask(0) ; my $home = "./fred" ; ok 27, my $lexD = new LexDir($home); - ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL, + ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,@StdErrFile, -Home => $home ; ok 29, my $db = new BerkeleyDB::Recno -Filename => $Dfile, @@ -465,7 +465,7 @@ umask(0) ; my $home = "./fred" ; ok 167, my $lexD = new LexDir($home); - ok 168, my $env = new BerkeleyDB::Env -Home => $home, + ok 168, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 169, my $txn = $env->txn_begin() ; diff --git a/db/perl/BerkeleyDB/t/strict.t b/db/perl/BerkeleyDB/t/strict.t index b873da18b..4774cd15d 100644 --- a/db/perl/BerkeleyDB/t/strict.t +++ b/db/perl/BerkeleyDB/t/strict.t @@ -26,7 +26,7 @@ umask(0); my $status ; ok 1, my $lexD = new LexDir($home); - ok 2, my $env = new BerkeleyDB::Env -Home => $home, + ok 2, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; @@ -49,7 +49,7 @@ umask(0); my %hash ; ok 7, my $lexD = new LexDir($home); - ok 8, my $env = new BerkeleyDB::Env -Home => $home, + ok 8, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; @@ -73,7 +73,7 @@ umask(0); my $status ; ok 11, my $lexD = new LexDir($home); - ok 12, my $env = new BerkeleyDB::Env -Home => $home, + ok 12, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; @@ -100,7 +100,7 @@ umask(0); my %hash ; ok 20, my $lexD = new LexDir($home); - ok 21, my $env = new BerkeleyDB::Env -Home => $home, + ok 21, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; @@ -152,7 +152,7 @@ umask(0); my $home = 'fred1'; ok 33, my $lexD = new LexDir($home); - ok 34, my $env = new BerkeleyDB::Env -Home => $home, + ok 34, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 35, my $txn = $env->txn_begin() ; diff --git a/db/perl/BerkeleyDB/t/txn.t b/db/perl/BerkeleyDB/t/txn.t index ba6b636cd..f8fa2ceb8 100644 --- a/db/perl/BerkeleyDB/t/txn.t +++ b/db/perl/BerkeleyDB/t/txn.t @@ -27,7 +27,7 @@ umask(0); my $home = "./fred" ; ok 1, my $lexD = new LexDir($home); - ok 2, my $env = new BerkeleyDB::Env -Home => $home, + ok 2, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE| DB_INIT_MPOOL; eval { $env->txn_begin() ; } ; ok 3, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ; @@ -47,7 +47,7 @@ umask(0); my $home = "./fred" ; ok 5, my $lexD = new LexDir($home); - ok 6, my $env = new BerkeleyDB::Env -Home => $home, + ok 6, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 7, my $txn = $env->txn_begin() ; @@ -117,7 +117,7 @@ umask(0); my $home = "./fred" ; ok 18, my $lexD = new LexDir($home); - ok 19, my $env = new BerkeleyDB::Env -Home => $home, + ok 19, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 20, my $txn_mgr = $env->TxnMgr() ; @@ -188,7 +188,7 @@ umask(0); my $home = "./fred" ; ok 32, my $lexD = new LexDir($home); - ok 33, my $env = new BerkeleyDB::Env -Home => $home, + ok 33, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 34, my $txn = $env->txn_begin() ; @@ -257,7 +257,7 @@ umask(0); my $home = "./fred" ; ok 45, my $lexD = new LexDir($home); - ok 46, my $env = new BerkeleyDB::Env -Home => $home, + ok 46, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile, -Flags => DB_CREATE|DB_INIT_TXN| DB_INIT_MPOOL|DB_INIT_LOCK ; ok 47, my $txn_mgr = $env->TxnMgr() ; diff --git a/db/perl/BerkeleyDB/t/util.pm b/db/perl/BerkeleyDB/t/util.pm index 63c1d28fc..9f55c40d8 100644 --- a/db/perl/BerkeleyDB/t/util.pm +++ b/db/perl/BerkeleyDB/t/util.pm @@ -7,6 +7,10 @@ use BerkeleyDB ; use File::Path qw(rmtree); use vars qw(%DB_errors $FA) ; +use vars qw( @StdErrFile ); + +@StdErrFile = ( -ErrFile => *STDERR, -ErrPrefix => "\n# " ) ; + $| = 1; %DB_errors = ( diff --git a/db/perl/BerkeleyDB/typemap b/db/perl/BerkeleyDB/typemap index 96685a3b1..66c622bd0 100644 --- a/db/perl/BerkeleyDB/typemap +++ b/db/perl/BerkeleyDB/typemap @@ -178,6 +178,7 @@ T_dbtkeydatum SV* my_sv = $arg ; DBM_ckFilter(my_sv, filter_store_key, \"filter_store_key\"); DBT_clear($var) ; + SvGETMAGIC($arg) ; if (db->recno_or_queue) { Value = GetRecnoKey(db, SvIV(my_sv)) ; $var.data = & Value; @@ -194,6 +195,7 @@ T_dbtkeydatum_btree SV* my_sv = $arg ; DBM_ckFilter(my_sv, filter_store_key, \"filter_store_key\"); DBT_clear($var) ; + SvGETMAGIC($arg) ; if (db->recno_or_queue || (db->type == DB_BTREE && flagSet(DB_SET_RECNO))) { Value = GetRecnoKey(db, SvIV(my_sv)) ; @@ -211,6 +213,7 @@ T_dbtdatum SV* my_sv = $arg ; DBM_ckFilter(my_sv, filter_store_value, \"filter_store_value\"); DBT_clear($var) ; + SvGETMAGIC($arg) ; $var.data = SvPV(my_sv, PL_na); $var.size = (int)PL_na; $var.flags = db->partial ; @@ -223,6 +226,7 @@ T_dbtdatum_opt if (flagSet(DB_GET_BOTH)) { SV* my_sv = $arg ; DBM_ckFilter(my_sv, filter_store_value, \"filter_store_value\"); + SvGETMAGIC($arg) ; $var.data = SvPV(my_sv, PL_na); $var.size = (int)PL_na; $var.flags = db->partial ; @@ -235,6 +239,7 @@ T_dbtdatum_btree if (flagSet(DB_GET_BOTH)) { SV* my_sv = $arg ; DBM_ckFilter(my_sv, filter_store_value, \"filter_store_value\"); + SvGETMAGIC($arg) ; $var.data = SvPV(my_sv, PL_na); $var.size = (int)PL_na; $var.flags = db->partial ; diff --git a/db/perl/DB_File/Changes b/db/perl/DB_File/Changes index 848d26a85..89027d13f 100644 --- a/db/perl/DB_File/Changes +++ b/db/perl/DB_File/Changes @@ -1,4 +1,29 @@ + +1.810 7th August 2004 + + * Fixed db-hash.t for Cygwin + + * Added substr tests to db-hast.t + +1.809 20th June 2004 + + * Merged core patch 22258 + + * Merged core patch 22741 + + * Fixed core bug 30237. + Using substr to pass parameters to the low-level Berkeley DB interface + causes problems with Perl 5.8.1 or better. + typemap fix supplied by Marcus Holland-Moritz. + +1.808 22nd December 2003 + + * Added extra DBM Filter tests. + + * Fixed a memory leak in ParseOpenInfo, which whould occur if the + opening of the database failed. Leak spotted by Adrian Enache. + 1.807 1st November 2003 * Fixed minor typos on pod documetation - reported by Jeremy Mates & diff --git a/db/perl/DB_File/DB_File.pm b/db/perl/DB_File/DB_File.pm index 54e0b527b..5ddac46c9 100644 --- a/db/perl/DB_File/DB_File.pm +++ b/db/perl/DB_File/DB_File.pm @@ -1,10 +1,10 @@ # DB_File.pm -- Perl 5 interface to Berkeley DB # # written by Paul Marquess (pmqs@cpan.org) -# last modified 22nd October 2002 -# version 1.807 +# last modified 7th August 2004 +# version 1.810 # -# Copyright (c) 1995-2003 Paul Marquess. All rights reserved. +# Copyright (c) 1995-2004 Paul Marquess. All rights reserved. # This program is free software; you can redistribute it and/or # modify it under the same terms as Perl itself. @@ -165,7 +165,7 @@ our ($db_version, $use_XSLoader, $splice_end_array); use Carp; -$VERSION = "1.807" ; +$VERSION = "1.810" ; { local $SIG{__WARN__} = sub {$splice_end_array = "@_";}; @@ -266,7 +266,8 @@ sub tie_hash_or_array $arg[2] = O_CREAT()|O_RDWR() if @arg >=3 && ! defined $arg[2]; $arg[3] = 0666 if @arg >=4 && ! defined $arg[3]; - # make recno in Berkeley DB version 2 work like recno in version 1. + # make recno in Berkeley DB version 2 (or better) work like + # recno in version 1. if ($db_version > 1 and defined $arg[4] and $arg[4] =~ /RECNO/ and $arg[1] and ! -e $arg[1]) { open(FH, ">$arg[1]") or return undef ; @@ -1821,7 +1822,7 @@ fix very easily. use DB_File ; my %hash ; - my $filename = "/tmp/filt" ; + my $filename = "filt" ; unlink $filename ; my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH @@ -1863,7 +1864,7 @@ Here is a DBM Filter that does it: use strict ; use DB_File ; my %hash ; - my $filename = "/tmp/filt" ; + my $filename = "filt" ; unlink $filename ; @@ -1894,8 +1895,8 @@ peril! The locking technique went like this. - $db = tie(%db, 'DB_File', '/tmp/foo.db', O_CREAT|O_RDWR, 0666) - || die "dbcreat /tmp/foo.db $!"; + $db = tie(%db, 'DB_File', 'foo.db', O_CREAT|O_RDWR, 0644) + || die "dbcreat foo.db $!"; $fd = $db->fd; open(DB_FH, "+<&=$fd") || die "dup $!"; flock (DB_FH, LOCK_EX) || die "flock: $!"; @@ -2233,7 +2234,7 @@ B comes with the standard Perl source distribution. Look in the directory F. Given the amount of time between releases of Perl the version that ships with Perl is quite likely to be out of date, so the most recent version can always be found on CPAN (see -L for details), in the directory +L for details), in the directory F. This version of B will work with either version 1.x, 2.x or @@ -2252,7 +2253,7 @@ compile properly on IRIX 5.3. =head1 COPYRIGHT -Copyright (c) 1995-2003 Paul Marquess. All rights reserved. This program +Copyright (c) 1995-2004 Paul Marquess. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. @@ -2278,14 +2279,14 @@ Berkeley DB authors or the author of DB_File. See L<"AUTHOR"> for details. =head1 SEE ALSO -L, L, L, L, L, -L +L, L, L, L, L, +L =head1 AUTHOR The DB_File interface was written by Paul Marquess -Epmqs@cpan.org. +Epmqs@cpan.orgE. Questions about the DB system itself may be addressed to -Edb@sleepycat.com. +Edb@sleepycat.comE. =cut diff --git a/db/perl/DB_File/DB_File.xs b/db/perl/DB_File/DB_File.xs index 3f097de8d..8f6cec1cc 100644 --- a/db/perl/DB_File/DB_File.xs +++ b/db/perl/DB_File/DB_File.xs @@ -3,12 +3,12 @@ DB_File.xs -- Perl 5 interface to Berkeley DB written by Paul Marquess - last modified 22nd October 2002 - version 1.807 + last modified 7th August 2004 + version 1.810 All comments/suggestions/problems are welcome - Copyright (c) 1995-2003 Paul Marquess. All rights reserved. + Copyright (c) 1995-2004 Paul Marquess. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. @@ -107,6 +107,9 @@ Filter code can now cope with read-only $_ 1.806 - recursion detection beefed up. 1.807 - no change + 1.808 - leak fixed in ParseOpenInfo + 1.809 - no change + 1.810 - no change */ @@ -395,9 +398,11 @@ typedef DBT DBTKEY ; #define OutputValue(arg, name) \ { if (RETVAL == 0) { \ + SvGETMAGIC(arg) ; \ my_sv_setpvn(arg, name.data, name.size) ; \ - TAINT; \ + TAINT; \ SvTAINTED_on(arg); \ + SvUTF8_off(arg); \ DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \ } \ } @@ -405,13 +410,15 @@ typedef DBT DBTKEY ; #define OutputKey(arg, name) \ { if (RETVAL == 0) \ { \ + SvGETMAGIC(arg) ; \ if (db->type != DB_RECNO) { \ my_sv_setpvn(arg, name.data, name.size); \ } \ else \ sv_setiv(arg, (I32)*(I32*)name.data - 1); \ - TAINT; \ + TAINT; \ SvTAINTED_on(arg); \ + SvUTF8_off(arg); \ DBM_ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; \ } \ } @@ -929,7 +936,10 @@ SV * sv ; STRLEN n_a; dMY_CXT; -/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */ +#ifdef TRACE + printf("In ParseOpenInfo name=[%s] flags=[%d] mode=[%d] SV NULL=[%d]\n", + name, flags, mode, sv == NULL) ; +#endif Zero(RETVAL, 1, DB_File_type) ; /* Default to HASH */ @@ -1489,8 +1499,10 @@ db_DoTie_(isHASH, dbtype, name=undef, flags=O_CREAT|O_RDWR, mode=0666, type=DB_H sv = ST(5) ; RETVAL = ParseOpenInfo(aTHX_ isHASH, name, flags, mode, sv) ; - if (RETVAL->dbp == NULL) + if (RETVAL->dbp == NULL) { + Safefree(RETVAL); RETVAL = NULL ; + } } OUTPUT: RETVAL @@ -1653,7 +1665,8 @@ unshift(db, ...) #endif for (i = items-1 ; i > 0 ; --i) { - value.data = SvPV(ST(i), n_a) ; + DBM_ckFilter(ST(i), filter_store_value, "filter_store_value"); + value.data = SvPVbyte(ST(i), n_a) ; value.size = n_a ; One = 1 ; key.data = &One ; @@ -1762,7 +1775,8 @@ push(db, ...) keyval = 0 ; for (i = 1 ; i < items ; ++i) { - value.data = SvPV(ST(i), n_a) ; + DBM_ckFilter(ST(i), filter_store_value, "filter_store_value"); + value.data = SvPVbyte(ST(i), n_a) ; value.size = n_a ; ++ keyval ; key.data = &keyval ; diff --git a/db/perl/DB_File/META.yml b/db/perl/DB_File/META.yml index 7c8ad993d..2cb481b8c 100644 --- a/db/perl/DB_File/META.yml +++ b/db/perl/DB_File/META.yml @@ -1,10 +1,10 @@ # http://module-build.sourceforge.net/META-spec.html #XXXXXXX This is a prototype!!! It will change in the future!!! XXXXX# name: DB_File -version: 1.807 +version: 1.810 version_from: DB_File.pm installdirs: site requires: distribution_type: module -generated_by: ExtUtils::MakeMaker version 6.17 +generated_by: ExtUtils::MakeMaker version 6.21_02 diff --git a/db/perl/DB_File/Makefile.PL b/db/perl/DB_File/Makefile.PL index 8bb3598f5..a8c671002 100644 --- a/db/perl/DB_File/Makefile.PL +++ b/db/perl/DB_File/Makefile.PL @@ -38,6 +38,9 @@ $LIBS .= " -lpthread" if $^O eq 'aix' ; my $OS2 = "" ; $OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ; +my $WALL = '' ; +#$WALL = ' -Wall '; + WriteMakefile( NAME => 'DB_File', LIBS => ["-L${LIB_DIR} $LIBS"], @@ -45,7 +48,7 @@ WriteMakefile( INC => "-I$INC_DIR", VERSION_FROM => 'DB_File.pm', XSPROTOARG => '-noprototypes', - DEFINE => "-D_NOT_CORE $OS2 $VER_INFO $COMPAT185", + DEFINE => "-D_NOT_CORE $OS2 $VER_INFO $COMPAT185 $WALL", OBJECT => 'version$(OBJ_EXT) DB_File$(OBJ_EXT)', #OPTIMIZE => '-g', 'depend' => { 'Makefile' => 'config.in', diff --git a/db/perl/DB_File/README b/db/perl/DB_File/README index b0693335b..5a435fd0c 100644 --- a/db/perl/DB_File/README +++ b/db/perl/DB_File/README @@ -1,10 +1,10 @@ DB_File - Version 1.807 + Version 1.810 - 1st Nov 2003 + 7th August 2004 - Copyright (c) 1995-2003 Paul Marquess. All rights reserved. This + Copyright (c) 1995-2004 Paul Marquess. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. @@ -455,6 +455,37 @@ If you are running IRIX, and want to use Berkeley DB version 1, you can get it from http://reality.sgi.com/ariel. It has the patches necessary to compile properly on IRIX 5.3. +AIX NOTES +--------- + +I've had replrts of a build failure like this on AIX 5.2 using the +xlC compiler. + + rm -f blib/arch/auto/DB_File/DB_File.so + LD_RUN_PATH="" ld -bhalt:4 -bM:SRE -bI:/usr/local/5.8.1/lib/perl5/5.8.1/aix/CORE/perl.exp -bE:DB_File.exp -bnoentry -lc + -L/usr/local/lib version.o DB_File.o -o blib/arch/auto/DB_File/DB_File.so + -L/usr/local/BerkeleyDB/lib -ldb -lpthread + ld: 0711-317 ERROR: Undefined symbol: .mutex_lock + ld: 0711-317 ERROR: Undefined symbol: .cond_signal + ld: 0711-317 ERROR: Undefined symbol: .mutex_unlock + ld: 0711-317 ERROR: Undefined symbol: .mutex_trylock + ld: 0711-317 ERROR: Undefined symbol: .cond_wait + ld: 0711-317 ERROR: Undefined symbol: .mutex_init + ld: 0711-317 ERROR: Undefined symbol: .cond_init + ld: 0711-317 ERROR: Undefined symbol: .mutex_destroy + ld: 0711-345 Use the -bloadmap or -bnoquiet option to obtain more information. + make: 1254-004 The error code from the last command is 8. + +Editing Makefile.PL, and changing the line + + $LIBS .= " -lpthread" if $^O eq 'aix' ; + +to this + + $LIBS .= " -lthread" if $^O eq 'aix' ; + +fixed the problem. + FEEDBACK ======== diff --git a/db/perl/DB_File/ppport.h b/db/perl/DB_File/ppport.h index 0887c2159..effa50729 100644 --- a/db/perl/DB_File/ppport.h +++ b/db/perl/DB_File/ppport.h @@ -278,7 +278,35 @@ SV *sv; #endif /* START_MY_CXT */ +#ifdef SvPVbyte +# if PERL_REVISION == 5 && PERL_VERSION < 7 + /* SvPVbyte does not work in perl-5.6.1, borrowed version for 5.7.3 */ +# undef SvPVbyte +# define SvPVbyte(sv, lp) \ + ((SvFLAGS(sv) & (SVf_POK|SVf_UTF8)) == (SVf_POK) \ + ? ((lp = SvCUR(sv)), SvPVX(sv)) : my_sv_2pvbyte(aTHX_ sv, &lp)) + static char * + my_sv_2pvbyte(pTHX_ register SV *sv, STRLEN *lp) + { + sv_utf8_downgrade(sv,0); + return SvPV(sv,*lp); + } +# endif +#else +# define SvPVbyte SvPV +#endif + +#ifndef SvUTF8_off +# define SvUTF8_off(s) +#endif +#if 1 +#ifdef DBM_setFilter +#undef DBM_setFilter +#undef DBM_ckFilter +#endif +#endif + #ifndef DBM_setFilter /* @@ -305,6 +333,7 @@ SV *sv; #define DBM_ckFilter(arg,type,name) \ if (db->type) { \ + /*printf("ckFilter %s\n", name);*/ \ if (db->filtering) { \ croak("recursion detected in %s", name) ; \ } \ @@ -313,6 +342,8 @@ SV *sv; SAVEINT(db->filtering) ; \ db->filtering = TRUE ; \ SAVESPTR(DEFSV) ; \ + if (name[7] == 's') \ + arg = newSVsv(arg); \ DEFSV = arg ; \ SvTEMP_off(arg) ; \ PUSHMARK(SP) ; \ @@ -322,6 +353,10 @@ SV *sv; PUTBACK ; \ FREETMPS ; \ LEAVE ; \ + if (name[7] == 's'){ \ + arg = sv_2mortal(arg); \ + } \ + SvOKp(arg); \ } #endif /* DBM_setFilter */ diff --git a/db/perl/DB_File/t/db-btree.t b/db/perl/DB_File/t/db-btree.t index 643e8fba5..deab41010 100644 --- a/db/perl/DB_File/t/db-btree.t +++ b/db/perl/DB_File/t/db-btree.t @@ -34,7 +34,7 @@ EOM use DB_File; use Fcntl; -print "1..177\n"; +print "1..197\n"; unlink glob "__db.*"; @@ -1490,4 +1490,169 @@ ok(165,1); unlink $Dfile; } +{ + # Check low-level API works with filter + + use warnings ; + use strict ; + my (%h, $db) ; + my $Dfile = "xxy.db"; + unlink $Dfile; + + ok(178, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) ); + + + $db->filter_fetch_key (sub { $_ = unpack("i", $_) } ); + $db->filter_store_key (sub { $_ = pack("i", $_) } ); + $db->filter_fetch_value (sub { $_ = unpack("i", $_) } ); + $db->filter_store_value (sub { $_ = pack("i", $_) } ); + + $_ = 'fred'; + + my $key = 22 ; + my $value = 34 ; + + $db->put($key, $value) ; + ok 179, $key == 22; + ok 180, $value == 34 ; + ok 181, $_ eq 'fred'; + #print "k [$key][$value]\n" ; + + my $val ; + $db->get($key, $val) ; + ok 182, $key == 22; + ok 183, $val == 34 ; + ok 184, $_ eq 'fred'; + + $key = 51 ; + $value = 454; + $h{$key} = $value ; + ok 185, $key == 51; + ok 186, $value == 454 ; + ok 187, $_ eq 'fred'; + + undef $db ; + untie %h; + unlink $Dfile; +} + + + +{ + # Regression Test for bug 30237 + # Check that substr can be used in the key to db_put + # and that db_put does not trigger the warning + # + # Use of uninitialized value in subroutine entry + + + use warnings ; + use strict ; + my (%h, $db) ; + my $Dfile = "xxy.db"; + unlink $Dfile; + + ok(188, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )); + + my $warned = ''; + local $SIG{__WARN__} = sub {$warned = $_[0]} ; + + # db-put with substr of key + my %remember = () ; + for my $ix ( 10 .. 12 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $db->put(substr($key,0), $value) ; + } + + ok 189, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # db-put with substr of value + $warned = ''; + for my $ix ( 20 .. 22 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $db->put($key, substr($value,0)) ; + } + + ok 190, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # via the tied hash is not a problem, but check anyway + # substr of key + $warned = ''; + for my $ix ( 30 .. 32 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $h{substr($key,0)} = $value ; + } + + ok 191, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # via the tied hash is not a problem, but check anyway + # substr of value + $warned = ''; + for my $ix ( 40 .. 42 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $h{$key} = substr($value,0) ; + } + + ok 192, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + my %bad = () ; + $key = ''; + for ($status = $db->seq($key, $value, R_FIRST ) ; + $status == 0 ; + $status = $db->seq($key, $value, R_NEXT ) ) { + + #print "# key [$key] value [$value]\n" ; + if (defined $remember{$key} && defined $value && + $remember{$key} eq $value) { + delete $remember{$key} ; + } + else { + $bad{$key} = $value ; + } + } + + ok 193, keys %bad == 0 ; + ok 194, keys %remember == 0 ; + + print "# missing -- $key $value\n" while ($key, $value) = each %remember; + print "# bad -- $key $value\n" while ($key, $value) = each %bad; + + # Make sure this fix does not break code to handle an undef key + # Berkeley DB undef key is bron between versions 2.3.16 and + my $value = 'fred'; + $warned = ''; + $db->put(undef, $value) ; + ok 195, $warned eq '' + or print "# Caught warning [$warned]\n" ; + $warned = ''; + + my $no_NULL = ($DB_File::db_ver >= 2.003016 && $DB_File::db_ver < 3.001) ; + print "# db_ver $DB_File::db_ver\n"; + $value = '' ; + $db->get(undef, $value) ; + ok 196, $no_NULL || $value eq 'fred' or print "# got [$value]\n" ; + ok 197, $warned eq '' + or print "# Caught warning [$warned]\n" ; + $warned = ''; + + undef $db ; + untie %h; + unlink $Dfile; +} exit ; diff --git a/db/perl/DB_File/t/db-hash.t b/db/perl/DB_File/t/db-hash.t index 5f687a75d..018952f9d 100644 --- a/db/perl/DB_File/t/db-hash.t +++ b/db/perl/DB_File/t/db-hash.t @@ -23,7 +23,7 @@ BEGIN { use DB_File; use Fcntl; -print "1..143\n"; +print "1..166\n"; unlink glob "__db.*"; @@ -34,6 +34,8 @@ sub ok print "not " unless $result ; print "ok $no\n" ; + + return $result ; } { @@ -580,7 +582,8 @@ EOM ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ; $k = 'Fred'; $v =''; ok(74, ! $db->seq($k, $v, R_FIRST) ) ; - ok(75, $k eq "FRED") ; + ok(75, $k eq "Fred") ; + #print "k [$k]\n" ; ok(76, $v eq "[Jxe]") ; # fk sk fv sv ok(77, checkOutput( "FRED", "fred", "[Jxe]", "")) ; @@ -880,8 +883,8 @@ EOM # unlink $Dfile; #} -ok(127,1); -ok(128,1); +#ok(127, 1); +#ok(128, 1); { # Check that two hash's don't interact @@ -899,8 +902,8 @@ ok(128,1); my (%h); - ok(129, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) ); - ok(130, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) ); + ok(127, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) ); + ok(128, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) ); $hash1{DEFG} = 5; $hash1{XYZ} = 2; @@ -910,11 +913,11 @@ ok(128,1); $hash2{xyz} = 2; $hash2{abcde} = 5; - ok(131, $h1_count > 0); - ok(132, $h1_count == $h2_count); + ok(129, $h1_count > 0); + ok(130, $h1_count == $h2_count); - ok(133, safeUntie \%hash1); - ok(134, safeUntie \%hash2); + ok(131, safeUntie \%hash1); + ok(132, safeUntie \%hash2); unlink $Dfile, $Dfile2; } @@ -929,12 +932,16 @@ ok(128,1); unlink $Dfile; tie %hash1, 'DB_File',$Dfile, undef; - ok(135, $warn_count == 0); + ok(133, $warn_count == 0); $warn_count = 0; + untie %hash1; + unlink $Dfile; tie %hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, undef; - ok(136, $warn_count == 0); + ok(134, $warn_count == 0); + untie %hash1; + unlink $Dfile; tie %hash1, 'DB_File',$Dfile, undef, undef; - ok(137, $warn_count == 0); + ok(135, $warn_count == 0); $warn_count = 0; untie %hash1; @@ -950,7 +957,7 @@ ok(128,1); my $Dfile = "xxy.db"; unlink $Dfile; - ok(138, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) ); + ok(136, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) ); $db->filter_fetch_key (sub { }) ; $db->filter_store_key (sub { }) ; @@ -960,10 +967,10 @@ ok(128,1); $_ = "original" ; $h{"fred"} = "joe" ; - ok(139, $h{"fred"} eq "joe"); + ok(137, $h{"fred"} eq "joe"); eval { grep { $h{$_} } (1, 2, 3) }; - ok (140, ! $@); + ok (138, ! $@); # delete the filters @@ -974,12 +981,58 @@ ok(128,1); $h{"fred"} = "joe" ; - ok(141, $h{"fred"} eq "joe"); + ok(139, $h{"fred"} eq "joe"); - ok(142, $db->FIRSTKEY() eq "fred") ; + ok(140, $db->FIRSTKEY() eq "fred") ; eval { grep { $h{$_} } (1, 2, 3) }; - ok (143, ! $@); + ok (141, ! $@); + + undef $db ; + untie %h; + unlink $Dfile; +} + +{ + # Check low-level API works with filter + + use warnings ; + use strict ; + my (%h, $db) ; + my $Dfile = "xxy.db"; + unlink $Dfile; + + ok(142, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) ); + + + $db->filter_fetch_key (sub { $_ = unpack("i", $_) } ); + $db->filter_store_key (sub { $_ = pack("i", $_) } ); + $db->filter_fetch_value (sub { $_ = unpack("i", $_) } ); + $db->filter_store_value (sub { $_ = pack("i", $_) } ); + + $_ = 'fred'; + + my $key = 22 ; + my $value = 34 ; + + $db->put($key, $value) ; + ok 143, $key == 22; + ok 144, $value == 34 ; + ok 145, $_ eq 'fred'; + #print "k [$key][$value]\n" ; + + my $val ; + $db->get($key, $val) ; + ok 146, $key == 22; + ok 147, $val == 34 ; + ok 148, $_ eq 'fred'; + + $key = 51 ; + $value = 454; + $h{$key} = $value ; + ok 149, $key == 51; + ok 150, $value == 454 ; + ok 151, $_ eq 'fred'; undef $db ; untie %h; @@ -987,4 +1040,192 @@ ok(128,1); } +{ + # Regression Test for bug 30237 + # Check that substr can be used in the key to db_put + # and that db_put does not trigger the warning + # + # Use of uninitialized value in subroutine entry + + + use warnings ; + use strict ; + my (%h, $db) ; + my $Dfile = "xxy.db"; + unlink $Dfile; + + ok(152, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) ); + + my $warned = ''; + local $SIG{__WARN__} = sub {$warned = $_[0]} ; + + # db-put with substr of key + my %remember = () ; + for my $ix ( 1 .. 2 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $db->put(substr($key,0), $value) ; + } + + ok 153, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # db-put with substr of value + $warned = ''; + for my $ix ( 10 .. 12 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $db->put($key, substr($value,0)) ; + } + + ok 154, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # via the tied hash is not a problem, but check anyway + # substr of key + $warned = ''; + for my $ix ( 30 .. 32 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $h{substr($key,0)} = $value ; + } + + ok 155, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # via the tied hash is not a problem, but check anyway + # substr of value + $warned = ''; + for my $ix ( 40 .. 42 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $h{$key} = substr($value,0) ; + } + + ok 156, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + my %bad = () ; + $key = ''; + for ($status = $db->seq(substr($key,0), substr($value,0), R_FIRST ) ; + $status == 0 ; + $status = $db->seq(substr($key,0), substr($value,0), R_NEXT ) ) { + + #print "# key [$key] value [$value]\n" ; + if (defined $remember{$key} && defined $value && + $remember{$key} eq $value) { + delete $remember{$key} ; + } + else { + $bad{$key} = $value ; + } + } + + ok 157, keys %bad == 0 ; + ok 158, keys %remember == 0 ; + + print "# missing -- $key=>$value\n" while ($key, $value) = each %remember; + print "# bad -- $key=>$value\n" while ($key, $value) = each %bad; + + # Make sure this fix does not break code to handle an undef key + # Berkeley DB undef key is broken between versions 2.3.16 and 3.1 + my $value = 'fred'; + $warned = ''; + $db->put(undef, $value) ; + ok 159, $warned eq '' + or print "# Caught warning [$warned]\n" ; + $warned = ''; + + my $no_NULL = ($DB_File::db_ver >= 2.003016 && $DB_File::db_ver < 3.001) ; + print "# db_ver $DB_File::db_ver\n"; + $value = '' ; + $db->get(undef, $value) ; + ok 160, $no_NULL || $value eq 'fred' or print "# got [$value]\n" ; + ok 161, $warned eq '' + or print "# Caught warning [$warned]\n" ; + $warned = ''; + + undef $db ; + untie %h; + unlink $Dfile; +} + +{ + # Check filter + substr + + use warnings ; + use strict ; + my (%h, $db) ; + my $Dfile = "xxy.db"; + unlink $Dfile; + + ok(162, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) ); + + + { + $db->filter_fetch_key (sub { lc $_ } ); + $db->filter_store_key (sub { uc $_ } ); + $db->filter_fetch_value (sub { lc $_ } ); + $db->filter_store_value (sub { uc $_ } ); + } + + $_ = 'fred'; + + # db-put with substr of key + my %remember = () ; + my $status = 0 ; + for my $ix ( 1 .. 2 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$key} = $value ; + $status += $db->put(substr($key,0), substr($value,0)) ; + } + + ok 163, $status == 0 or print "# Status $status\n" ; + + if (1) + { + $db->filter_fetch_key (undef); + $db->filter_store_key (undef); + $db->filter_fetch_value (undef); + $db->filter_store_value (undef); + } + + my %bad = () ; + my $key = ''; + my $value = ''; + for ($status = $db->seq($key, $value, R_FIRST ) ; + $status == 0 ; + $status = $db->seq($key, $value, R_NEXT ) ) { + + #print "# key [$key] value [$value]\n" ; + if (defined $remember{$key} && defined $value && + $remember{$key} eq $value) { + delete $remember{$key} ; + } + else { + $bad{$key} = $value ; + } + } + + ok 164, $_ eq 'fred'; + ok 165, keys %bad == 0 ; + ok 166, keys %remember == 0 ; + + print "# missing -- $key $value\n" while ($key, $value) = each %remember; + print "# bad -- $key $value\n" while ($key, $value) = each %bad; + undef $db ; + untie %h; + unlink $Dfile; +} + exit ; diff --git a/db/perl/DB_File/t/db-recno.t b/db/perl/DB_File/t/db-recno.t index f2cd97bbf..23bf0cdec 100644 --- a/db/perl/DB_File/t/db-recno.t +++ b/db/perl/DB_File/t/db-recno.t @@ -151,7 +151,7 @@ BEGIN } my $splice_tests = 10 + 12 + 1; # ten regressions, plus the randoms -my $total_tests = 158 ; +my $total_tests = 181 ; $total_tests += $splice_tests if $FA ; print "1..$total_tests\n"; @@ -1014,6 +1014,175 @@ EOM unlink $Dfile; } +{ + # Check low-level API works with filter + + use warnings ; + use strict ; + my (@h, $db) ; + my $Dfile = "xxy.db"; + unlink $Dfile; + + ok(159, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ); + + + $db->filter_fetch_key (sub { ++ $_ } ); + $db->filter_store_key (sub { -- $_ } ); + $db->filter_fetch_value (sub { $_ = unpack("i", $_) } ); + $db->filter_store_value (sub { $_ = pack("i", $_) } ); + + $_ = 'fred'; + + my $key = 22 ; + my $value = 34 ; + + $db->put($key, $value) ; + ok 160, $key == 22; + ok 161, $value == 34 ; + ok 162, $_ eq 'fred'; + #print "k [$key][$value]\n" ; + + my $val ; + $db->get($key, $val) ; + ok 163, $key == 22; + ok 164, $val == 34 ; + ok 165, $_ eq 'fred'; + + $key = 51 ; + $value = 454; + $h[$key] = $value ; + ok 166, $key == 51; + ok 167, $value == 454 ; + ok 168, $_ eq 'fred'; + + undef $db ; + untie @h; + unlink $Dfile; +} + + +{ + # Regression Test for bug 30237 + # Check that substr can be used in the key to db_put + # and that db_put does not trigger the warning + # + # Use of uninitialized value in subroutine entry + + + use warnings ; + use strict ; + my (@h, $db) ; + my $status ; + my $Dfile = "xxy.db"; + unlink $Dfile; + + ok(169, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO) ); + + my $warned = ''; + local $SIG{__WARN__} = sub {$warned = $_[0]} ; + + # db-put with substr of key + my %remember = () ; + for my $ix ( 0 .. 2 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{substr($key,0, 1)} = $value ; + $db->put(substr($key,0, 1), $value) ; + } + + ok 170, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # db-put with substr of value + $warned = ''; + for my $ix ( 3 .. 5 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$ix} = $value ; + $db->put($ix, substr($value,0)) ; + } + + ok 171, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # via the tied array is not a problem, but check anyway + # substr of key + $warned = ''; + for my $ix ( 6 .. 8 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{substr($key,0,1)} = $value ; + $h[substr($key,0,1)] = $value ; + } + + ok 172, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + # via the tied array is not a problem, but check anyway + # substr of value + $warned = ''; + for my $ix ( 9 .. 10 ) + { + my $key = $ix . "data" ; + my $value = "value$ix" ; + $remember{$ix} = $value ; + $h[$ix] = substr($value,0) ; + } + + ok 173, $warned eq '' + or print "# Caught warning [$warned]\n" ; + + my %bad = () ; + my $key = ''; + for (my $status = $db->seq($key, $value, R_FIRST ) ; + $status == 0 ; + $status = $db->seq($key, $value, R_NEXT ) ) { + + #print "# key [$key] value [$value]\n" ; + if (defined $remember{$key} && defined $value && + $remember{$key} eq $value) { + delete $remember{$key} ; + } + else { + $bad{$key} = $value ; + } + } + + ok 174, keys %bad == 0 ; + ok 175, keys %remember == 0 ; + + print "# missing -- $key $value\n" while ($key, $value) = each %remember; + print "# bad -- $key $value\n" while ($key, $value) = each %bad; + + # Make sure this fix does not break code to handle an undef key + my $value = 'fred'; + $warned = ''; + $status = $db->put(undef, $value) ; + ok 176, $status == 0 + or print "# put failed - status $status\n"; + ok 177, $warned eq '' + or print "# Caught warning [$warned]\n" ; + $warned = ''; + + print "# db_ver $DB_File::db_ver\n"; + $value = '' ; + $status = $db->get(undef, $value) ; + ok 178, $status == 0 + or print "# get failed - status $status\n" ; + ok(179, $db->get(undef, $value) == 0) or print "# get failed\n" ; + ok 180, $value eq 'fred' or print "# got [$value]\n" ; + ok 181, $warned eq '' + or print "# Caught warning [$warned]\n" ; + $warned = ''; + + undef $db ; + untie @h; + unlink $Dfile; +} + # Only test splice if this is a newish version of Perl exit unless $FA ; @@ -1041,36 +1210,36 @@ exit unless $FA ; my $offset ; $a = ''; splice(@a, $offset); - ok(159, $a =~ /^Use of uninitialized value /); + ok(182, $a =~ /^Use of uninitialized value /); $a = ''; splice(@tied, $offset); - ok(160, $a =~ /^Use of uninitialized value in splice/); + ok(183, $a =~ /^Use of uninitialized value in splice/); no warnings 'uninitialized'; $a = ''; splice(@a, $offset); - ok(161, $a eq ''); + ok(184, $a eq ''); $a = ''; splice(@tied, $offset); - ok(162, $a eq ''); + ok(185, $a eq ''); # uninitialized length use warnings; my $length ; $a = ''; splice(@a, 0, $length); - ok(163, $a =~ /^Use of uninitialized value /); + ok(186, $a =~ /^Use of uninitialized value /); $a = ''; splice(@tied, 0, $length); - ok(164, $a =~ /^Use of uninitialized value in splice/); + ok(187, $a =~ /^Use of uninitialized value in splice/); no warnings 'uninitialized'; $a = ''; splice(@a, 0, $length); - ok(165, $a eq ''); + ok(188, $a eq ''); $a = ''; splice(@tied, 0, $length); - ok(166, $a eq ''); + ok(189, $a eq ''); # offset past end of array use warnings; @@ -1079,17 +1248,17 @@ exit unless $FA ; my $splice_end_array = ($a =~ /^splice\(\) offset past end of array/); $a = ''; splice(@tied, 3); - ok(167, !$splice_end_array || $a =~ /^splice\(\) offset past end of array/); + ok(190, !$splice_end_array || $a =~ /^splice\(\) offset past end of array/); no warnings 'misc'; $a = ''; splice(@a, 3); - ok(168, $a eq ''); + ok(191, $a eq ''); $a = ''; splice(@tied, 3); - ok(169, $a eq ''); + ok(192, $a eq ''); - ok(170, safeUntie \@tied); + ok(193, safeUntie \@tied); unlink $Dfile; } @@ -1150,9 +1319,9 @@ my @tests = ([ [ 'falsely', 'dinosaur', 'remedy', 'commotion', 'void' ], ); -my $testnum = 171; +my $testnum = 194; my $failed = 0; -require POSIX; my $tmp = POSIX::tmpnam(); +my $tmp = "dbr$$"; foreach my $test (@tests) { my $err = test_splice(@$test); if (defined $err) { @@ -1267,6 +1436,8 @@ sub test_splice { foreach ($s_error, @s_warnings) { chomp; s/ at \S+ line \d+\.$//; + # only built-in splice identifies name of uninit value + s/(uninitialized value) \$\w+/$1/; } # Now do the same for DB_File's version of splice diff --git a/db/perl/DB_File/typemap b/db/perl/DB_File/typemap index 8ad7b1282..f15999508 100644 --- a/db/perl/DB_File/typemap +++ b/db/perl/DB_File/typemap @@ -1,8 +1,8 @@ # typemap for Perl 5 interface to Berkeley # # written by Paul Marquess -# last modified 10th December 2000 -# version 1.74 +# last modified 20th June 2004 +# version 1.809 # #################################### DB SECTION # @@ -17,22 +17,25 @@ INPUT T_dbtkeydatum DBM_ckFilter($arg, filter_store_key, \"filter_store_key\"); DBT_clear($var) ; - if (SvOK($arg)){ - if (db->type != DB_RECNO) { - $var.data = SvPV($arg, PL_na); - $var.size = (int)PL_na; - } - else { - Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ; - $var.data = & Value; - $var.size = (int)sizeof(recno_t); - } + SvGETMAGIC($arg) ; + if (db->type == DB_RECNO) { + if (SvOK($arg)) + Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ; + else + Value = 1 ; + $var.data = & Value; + $var.size = (int)sizeof(recno_t); + } + else if (SvOK($arg)) { + $var.data = SvPVbyte($arg, PL_na); + $var.size = (int)PL_na; } T_dbtdatum DBM_ckFilter($arg, filter_store_value, \"filter_store_value\"); DBT_clear($var) ; + SvGETMAGIC($arg) ; if (SvOK($arg)) { - $var.data = SvPV($arg, PL_na); + $var.data = SvPVbyte($arg, PL_na); $var.size = (int)PL_na; } diff --git a/db/php_db4/ABOUT b/db/php_db4/ABOUT new file mode 100644 index 000000000..7550b4b16 --- /dev/null +++ b/db/php_db4/ABOUT @@ -0,0 +1,62 @@ +This is a PHP 4 wrapper for DB-4.2. It can either either link +directly against libdb-4.2, which is necessary for running in +a non Apache/mod_php4 environemnt), or against mod_db4, +which provides additional safeties when running under Apache/mod_php4. + +This extension provides the following classes, which mirror the standard +db4 C++ API. + +class Db4Env { + function Db4Env($flags = 0) {} + function close($flags = 0) {} // force a close + function dbremove($txn, $filename, $database = null, $flags = 0) {} + function dbrename($txn, $file, $database, $new_database, $flags = 0) {} + function open($home, + $flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, + $mode = 0666) {} + function remove($home, $flags = 0) {} + function set_data_dir($directory) {} + function txn_begin($parent_txn = null, $flags = 0) {} + function txn_checkpoint($kbytes, $minutes, $flags = 0) {} +} + +class Db4 { + function Db4($dbenv = null) {} // create a new Db4 object using the optional DbEnv + function open($txn = null, $file = null, $database = null, $flags = DB_CREATE, $mode = 0) {} + function close() {} // force a close + function del($key, $txn = null) {} + function get($key, $txn = null, $flags = 0) {} + function pget($key, &$pkey, $txn = null, $flags = 0) {} + function get_type() {} // returns the stringified database type name + function stat() {} // returns statistics as an associative array + function join($cursor_list, $flags = 0) {} + function sync() {} + function truncate($txn = null, $flags = 0) {} + function cursor($txn = null, flags = 0) {} +} + +class Db4Txn { + function abort() {} + function commit() {} + function discard() P{ + function id() {} + function set_timeout($timeout, $flags = 0) {} +} + +class Db4Cursor { + function close() {} + function count() {} + function del() {} + function dup($flags = 0) {} + function get($key, $flags = 0) {} + function pget($key, &$primary_key, $flags = 0) {} + function put($key, $data, $flags = 0) {} +} + +The db4 extension attempts to be 'smart' for you by: +o Automatically making operations auto-commit, when they +must be transactional to even possibly succeed and you +neglect a Db4Txn object. +o Performing reference and dependency checking to insure +that all resources are closed in the correct order. +o Attempting intelligent default values for flags. diff --git a/db/php_db4/INSTALL b/db/php_db4/INSTALL new file mode 100644 index 000000000..07906a188 --- /dev/null +++ b/db/php_db4/INSTALL @@ -0,0 +1,33 @@ +To install this php module linked against the mod_db4 framework, +execute the following steps: + +> phpize +> ./configure --with-db4[=/path/to/db4] +--with-mod_db4=$APACHE_INCLUDEDIR +> make +> su - +# make install + +Then in your php.ini file add: + +extension=db4.so + +This extension will now only run in a SAPI linked into Apache httpd +(mod_php4, +most likely), and will take advantage of all of it's auto-recovery and +handle-caching facilities. + + +To install this php module linked against db-4.2 and NOT the mod_db4 +framework, +execute the following steps: + +> phpize +> ./configure --with-db4[=/path/to/db4] +> make +> su - +# make install + +Then in your php.ini file add: + +extension=db4.so diff --git a/db/php_db4/config.m4 b/db/php_db4/config.m4 new file mode 100644 index 000000000..81df69a48 --- /dev/null +++ b/db/php_db4/config.m4 @@ -0,0 +1,59 @@ +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# http://www.apache.org/licenses/LICENSE-2.0.txt +# + +dnl $Id: config.m4,v 1.1 2004/10/05 14:45:58 bostic Exp $ +dnl config.m4 for extension db4 + +dnl Comments in this file start with the string 'dnl'. +dnl Remove where necessary. This file will not work +dnl without editing. + +PHP_ARG_WITH(db4, whether to enable db4 support, +[ --enable-db4 Enable db4 support]) + +PHP_ARG_WITH(mod_db4, whether to link against mod_db4 or db4, +[ --with-mod_db4 Enable mod_db4 support]) + +if test "$PHP_DB4" != "no"; then + if test "$PHP_DB4" != "no"; then + for i in $withval /usr/local/BerkeleyDB.4.2 /usr/local/BerkeleyDB.4.1 /usr/local/BerkeleyDB.4.0 /usr/local /usr; do + if test -f "$i/db4/db.h"; then + THIS_PREFIX=$i + THIS_INCLUDE=$i/db4/db.h + break + elif test -f "$i/include/db4/db.h"; then + THIS_PREFIX=$i + THIS_INCLUDE=$i/include/db4/db.h + break + elif test -f "$i/include/db/db4.h"; then + THIS_PREFIX=$i + THIS_INCLUDE=$i/include/db/db4.h + break + elif test -f "$i/include/db4.h"; then + THIS_PREFIX=$i + THIS_INCLUDE=$i/include/db4.h + break + elif test -f "$i/include/db.h"; then + THIS_PREFIX=$i + THIS_INCLUDE=$i/include/db.h + break + fi + done + PHP_ADD_INCLUDE(THIS_INCLUDE) + PHP_ADD_LIBRARY_WITH_PATH(db-4.2, THIS_PREFIX, DB4_SHARED_LIBADD) + fi + if test "$PHP_MOD_DB4" != "no" && test "$PHP_MOD_DB4" != "yes"; then + PHP_ADD_INCLUDE("$PHP_MOD_DB4") + AC_DEFINE(HAVE_MOD_DB4, 1, [Whether you have mod_db4]) + elif test "$PHP_MOD_DB4" = "no"; then + PHP_ADD_LIBRARY(db-4.2,, DB4_SHARED_LIBADD) + else + AC_MSG_RESULT([no]) + fi + PHP_NEW_EXTENSION(db4, db4.c, $ext_shared) + PHP_SUBST(DB4_SHARED_LIBADD) +fi diff --git a/db/php_db4/db4.c b/db/php_db4/db4.c new file mode 100644 index 000000000..4118dfbe5 --- /dev/null +++ b/db/php_db4/db4.c @@ -0,0 +1,1994 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "php.h" +#include "php_ini.h" +#include "ext/standard/info.h" +#include "php_db4.h" +#ifdef HAVE_MOD_DB4 +#include "mod_db4_export.h" +#else +#include "db.h" +#endif + + +#ifdef HAVE_MOD_DB4 + #define my_db_create mod_db4_db_create + #define my_db_env_create mod_db4_db_env_create +#else + #define my_db_create db_create + #define my_db_env_create db_env_create +#endif + +extern int errno; + +/* True global resources - no need for thread safety here */ +static int le_db; +static int le_dbc; +static int le_db_txn; +static int le_dbenv; + +struct php_DB_TXN { + DB_TXN *db_txn; + struct my_llist *open_cursors; + struct my_llist *open_dbs; +}; + +struct php_DBC { + DBC *dbc; + struct php_DB_TXN *parent_txn; +}; + +struct php_DB { + DB *db; + int autocommit; +}; + +static void _free_php_db_txn(zend_rsrc_list_entry *rsrc TSRMLS_DC) +{ + struct php_DB_TXN *pdbtxn = (struct php_DB_TXN *) rsrc->ptr; + /* should probably iterate over open_cursors */ +#ifndef HAVE_MOD_DB4 + if(pdbtxn->db_txn) pdbtxn->db_txn->abort(pdbtxn->db_txn); + pdbtxn->db_txn = NULL; +#endif + if(pdbtxn) efree(pdbtxn); +} + +static void _free_php_dbc(zend_rsrc_list_entry *rsrc TSRMLS_DC) +{ + struct php_DBC *pdbc = (struct php_DBC *) rsrc->ptr; +#ifndef HAVE_MOD_DB4 + if(pdbc->dbc) pdbc->dbc->c_close(pdbc->dbc); + pdbc->dbc = NULL; +#endif + if(pdbc) efree(pdbc); +} + +static void _free_php_db(zend_rsrc_list_entry *rsrc TSRMLS_DC) +{ + struct php_DB *pdb = (struct php_DB *) rsrc->ptr; +#ifndef HAVE_MOD_DB4 + if(pdb->db) pdb->db->close(pdb->db, 0); + pdb->db = NULL; +#endif + if(pdb) efree(pdb); +} + +static void _free_php_dbenv(zend_rsrc_list_entry *rsrc TSRMLS_DC) +{ +#ifndef HAVE_MOD_DB4 + DB_ENV *dbenv = (DB_ENV *) rsrc->ptr; + if(dbenv) dbenv->close(dbenv, 0); +#endif +} + +static zend_class_entry *db_txn_ce; +static zend_class_entry *dbc_ce; +static zend_class_entry *db_ce; +static zend_class_entry *db_env_ce; + +/* helpers */ +struct my_llist { + void *data; + struct my_llist *next; + struct my_llist *prev; +}; + +static struct my_llist *my_llist_add(struct my_llist *list, void *data) { + if(!list) { + list = (struct my_llist *)emalloc(sizeof(*list)); + list->data = data; + list->next = list->prev = NULL; + return list; + } else { + struct my_llist *node; + node = (struct my_llist *)emalloc(sizeof(*node)); + node->data = data; + node->next = list; + node->prev = NULL; + return node; + } +} + +static struct my_llist *my_llist_del(struct my_llist *list, void *data) { + struct my_llist *ptr = list; + if(!ptr) return NULL; + if(ptr->data == data) { /* special case, first element */ + ptr = ptr->next; + efree(list); + return ptr; + } + while(ptr) { + if(data == ptr->data) { + if(ptr->prev) ptr->prev->next = ptr->next; + if(ptr->next) ptr->next->prev = ptr->prev; + efree(ptr); + break; + } + ptr = ptr->next; + } + return list; +} + +/* {{{ db4_functions[] + * + * Every user visible function must have an entry in db4_functions[]. + */ +function_entry db4_functions[] = { + /* PHP_FE(db4_dbenv_create, NULL) */ + {NULL, NULL, NULL} /* Must be the last line in db4_functions[] */ +}; +/* }}} */ + +PHP_MINIT_FUNCTION(db4); +PHP_MSHUTDOWN_FUNCTION(db4); +PHP_RINIT_FUNCTION(db4); +PHP_RSHUTDOWN_FUNCTION(db4); +PHP_MINFO_FUNCTION(db4); + +/* {{{ db4_module_entry + */ +zend_module_entry db4_module_entry = { +#if ZEND_MODULE_API_NO >= 20010901 + STANDARD_MODULE_HEADER, +#endif + "db4", + db4_functions, + PHP_MINIT(db4), + PHP_MSHUTDOWN(db4), + NULL, + NULL, + PHP_MINFO(db4), + "0.9", /* Replace with version number for your extension */ + STANDARD_MODULE_PROPERTIES +}; +/* }}} */ + +/* {{{ class entries + */ + +/* {{{ DB4Txn method forward declarations + */ + +zend_class_entry *db_txn_ce_get(void) +{ + return db_txn_ce; +} + +ZEND_NAMED_FUNCTION(_wrap_db_txn_abort); +ZEND_NAMED_FUNCTION(_wrap_db_txn_commit); +ZEND_NAMED_FUNCTION(_wrap_db_txn_discard); +ZEND_NAMED_FUNCTION(_wrap_db_txn_id); +ZEND_NAMED_FUNCTION(_wrap_db_txn_set_timeout); +ZEND_NAMED_FUNCTION(_wrap_new_DbTxn); + +static zend_function_entry DbTxn_functions[] = { + ZEND_NAMED_FE(abort, _wrap_db_txn_abort, NULL) + ZEND_NAMED_FE(commit, _wrap_db_txn_commit, NULL) + ZEND_NAMED_FE(discard, _wrap_db_txn_discard, NULL) + ZEND_NAMED_FE(id, _wrap_db_txn_id, NULL) + ZEND_NAMED_FE(set_timeout, _wrap_db_txn_set_timeout, NULL) + ZEND_NAMED_FE(db4txn, _wrap_new_DbTxn, NULL) + { NULL, NULL, NULL} +}; +/* }}} */ + +/* {{{ DB4Cursor method forward declarations + */ + +zend_class_entry *dbc_ce_get(void) +{ + return dbc_ce; +} + +ZEND_NAMED_FUNCTION(_wrap_dbc_close); +ZEND_NAMED_FUNCTION(_wrap_dbc_count); +ZEND_NAMED_FUNCTION(_wrap_dbc_del); +ZEND_NAMED_FUNCTION(_wrap_dbc_dup); +ZEND_NAMED_FUNCTION(_wrap_dbc_get); +ZEND_NAMED_FUNCTION(_wrap_dbc_put); +ZEND_NAMED_FUNCTION(_wrap_dbc_pget); + +static zend_function_entry Dbc_functions[] = { + ZEND_NAMED_FE(close, _wrap_dbc_close, NULL) + ZEND_NAMED_FE(count, _wrap_dbc_count, NULL) + ZEND_NAMED_FE(del, _wrap_dbc_del, NULL) + ZEND_NAMED_FE(dup, _wrap_dbc_dup, NULL) + ZEND_NAMED_FE(get, _wrap_dbc_get, NULL) + ZEND_NAMED_FE(put, _wrap_dbc_put, NULL) + ZEND_NAMED_FE(pget, _wrap_dbc_pget, second_arg_force_ref) + { NULL, NULL, NULL} +}; +/* }}} */ + +/* {{{ DB4Env method forward declarations + */ + +zend_class_entry *db_env_ce_get(void) +{ + return db_env_ce; +} + +ZEND_NAMED_FUNCTION(_wrap_new_DbEnv); +ZEND_NAMED_FUNCTION(_wrap_db_env_close); +ZEND_NAMED_FUNCTION(_wrap_db_env_dbremove); +ZEND_NAMED_FUNCTION(_wrap_db_env_dbrename); +ZEND_NAMED_FUNCTION(_wrap_db_env_open); +ZEND_NAMED_FUNCTION(_wrap_db_env_remove); +ZEND_NAMED_FUNCTION(_wrap_db_env_set_data_dir); +ZEND_NAMED_FUNCTION(_wrap_db_env_txn_begin); +ZEND_NAMED_FUNCTION(_wrap_db_env_txn_checkpoint); + +static zend_function_entry DbEnv_functions[] = { + ZEND_NAMED_FE(db4env, _wrap_new_DbEnv, NULL) + ZEND_NAMED_FE(close, _wrap_db_env_close, NULL) + ZEND_NAMED_FE(dbremove, _wrap_db_env_dbremove, NULL) + ZEND_NAMED_FE(dbrename, _wrap_db_env_dbrename, NULL) + ZEND_NAMED_FE(open, _wrap_db_env_open, NULL) + ZEND_NAMED_FE(remove, _wrap_db_env_remove, NULL) + ZEND_NAMED_FE(set_data_dir, _wrap_db_env_set_data_dir, NULL) + ZEND_NAMED_FE(txn_begin, _wrap_db_env_txn_begin, NULL) + ZEND_NAMED_FE(txn_checkpoint, _wrap_db_env_txn_checkpoint, NULL) + { NULL, NULL, NULL} +}; + +/* }}} */ + +/* {{{ DB4 method forward declarations + */ + +zend_class_entry *db_ce_get(void) +{ + return db_ce; +} + +ZEND_NAMED_FUNCTION(_wrap_new_db4); +ZEND_NAMED_FUNCTION(_wrap_db_open); +ZEND_NAMED_FUNCTION(_wrap_db_close); +ZEND_NAMED_FUNCTION(_wrap_db_del); +ZEND_NAMED_FUNCTION(_wrap_db_get); +ZEND_NAMED_FUNCTION(_wrap_db_pget); +ZEND_NAMED_FUNCTION(_wrap_db_get_type); +ZEND_NAMED_FUNCTION(_wrap_db_join); +ZEND_NAMED_FUNCTION(_wrap_db_put); +ZEND_NAMED_FUNCTION(_wrap_db_stat); +ZEND_NAMED_FUNCTION(_wrap_db_sync); +ZEND_NAMED_FUNCTION(_wrap_db_truncate); +ZEND_NAMED_FUNCTION(_wrap_db_cursor); + +static zend_function_entry Db4_functions[] = { + ZEND_NAMED_FE(db4, _wrap_new_db4, NULL) + ZEND_NAMED_FE(open, _wrap_db_open, NULL) + ZEND_NAMED_FE(close, _wrap_db_close, NULL) + ZEND_NAMED_FE(del, _wrap_db_del, NULL) + ZEND_NAMED_FE(get, _wrap_db_get, NULL) + ZEND_NAMED_FE(pget, _wrap_db_pget, second_arg_force_ref) + ZEND_NAMED_FE(get_type, _wrap_db_get_type, NULL) + ZEND_NAMED_FE(put, _wrap_db_put, NULL) + ZEND_NAMED_FE(stat, _wrap_db_stat, NULL) + ZEND_NAMED_FE(sync, _wrap_db_sync, NULL) + ZEND_NAMED_FE(truncate, _wrap_db_truncate, NULL) + ZEND_NAMED_FE(cursor, _wrap_db_cursor, NULL) + ZEND_NAMED_FE(join, _wrap_db_join, NULL) + { NULL, NULL, NULL} +}; +/* }}} */ +/* }}} */ + +#ifdef COMPILE_DL_DB4 +ZEND_GET_MODULE(db4) +#endif + +/* {{{ PHP_INI + */ +/* Remove comments and fill if you need to have entries in php.ini +PHP_INI_BEGIN() +PHP_INI_END() +*/ +/* }}} */ + +/* {{{ php_db4_init_globals + */ +/* Uncomment this function if you have INI entries +static void php_db4_init_globals(zend_db4_globals *db4_globals) +{ +} +*/ +/* }}} */ + +/* {{{ PHP_MINIT_FUNCTION + */ +PHP_MINIT_FUNCTION(db4) +{ + /* If you have INI entries, uncomment these lines + ZEND_INIT_MODULE_GLOBALS(db4, php_db4_init_globals, NULL); + REGISTER_INI_ENTRIES(); + */ + static zend_class_entry _db_txn_ce; + static zend_class_entry _dbc_ce; + static zend_class_entry _db_ce; + static zend_class_entry _db_env_ce; + + INIT_CLASS_ENTRY(_db_txn_ce, "db4txn", DbTxn_functions); + db_txn_ce = zend_register_internal_class(&_db_txn_ce TSRMLS_CC); + + INIT_CLASS_ENTRY(_dbc_ce, "db4cursor", Dbc_functions); + dbc_ce = zend_register_internal_class(&_dbc_ce TSRMLS_CC); + + INIT_CLASS_ENTRY(_db_ce, "db4", Db4_functions); + db_ce = zend_register_internal_class(&_db_ce TSRMLS_CC); + + INIT_CLASS_ENTRY(_db_env_ce, "db4env", DbEnv_functions); + db_env_ce = zend_register_internal_class(&_db_env_ce TSRMLS_CC); + + le_db = zend_register_list_destructors_ex(_free_php_db, NULL, "Db4", module_number); + le_dbenv = zend_register_list_destructors_ex(_free_php_dbenv, NULL, "Db4Env", module_number); + le_db_txn = zend_register_list_destructors_ex(_free_php_db_txn, NULL, "Db4Txn", module_number); + le_dbc = zend_register_list_destructors_ex(_free_php_dbc, NULL, "Db4Cursor", module_number); + + REGISTER_LONG_CONSTANT("DB_VERSION_MAJOR", DB_VERSION_MAJOR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERSION_MINOR", DB_VERSION_MINOR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERSION_PATCH", DB_VERSION_PATCH, CONST_CS | CONST_PERSISTENT); + REGISTER_STRING_CONSTANT("DB_VERSION_STRING", DB_VERSION_STRING, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MAX_PAGES", DB_MAX_PAGES, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MAX_RECORDS", DB_MAX_RECORDS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBT_APPMALLOC", DB_DBT_APPMALLOC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBT_ISSET", DB_DBT_ISSET, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBT_MALLOC", DB_DBT_MALLOC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBT_PARTIAL", DB_DBT_PARTIAL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBT_REALLOC", DB_DBT_REALLOC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBT_USERMEM", DB_DBT_USERMEM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBT_DUPOK", DB_DBT_DUPOK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CREATE", DB_CREATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CXX_NO_EXCEPTIONS", DB_CXX_NO_EXCEPTIONS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_FORCE", DB_FORCE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOMMAP", DB_NOMMAP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RDONLY", DB_RDONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RECOVER", DB_RECOVER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_THREAD", DB_THREAD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TRUNCATE", DB_TRUNCATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TXN_NOSYNC", DB_TXN_NOSYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TXN_NOT_DURABLE", DB_TXN_NOT_DURABLE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_USE_ENVIRON", DB_USE_ENVIRON, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_USE_ENVIRON_ROOT", DB_USE_ENVIRON_ROOT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AUTO_COMMIT", DB_AUTO_COMMIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DIRTY_READ", DB_DIRTY_READ, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NO_AUTO_COMMIT", DB_NO_AUTO_COMMIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RPCCLIENT", DB_RPCCLIENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_CREATE", DB_REP_CREATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_XA_CREATE", DB_XA_CREATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_INIT_CDB", DB_INIT_CDB, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_INIT_LOCK", DB_INIT_LOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_INIT_LOG", DB_INIT_LOG, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_INIT_MPOOL", DB_INIT_MPOOL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_INIT_REP", DB_INIT_REP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_INIT_TXN", DB_INIT_TXN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_JOINENV", DB_JOINENV, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCKDOWN", DB_LOCKDOWN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PRIVATE", DB_PRIVATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RECOVER_FATAL", DB_RECOVER_FATAL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SYSTEM_MEM", DB_SYSTEM_MEM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_EXCL", DB_EXCL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_FCNTL_LOCKING", DB_FCNTL_LOCKING, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RDWRMASTER", DB_RDWRMASTER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_WRITEOPEN", DB_WRITEOPEN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TXN_NOWAIT", DB_TXN_NOWAIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TXN_SYNC", DB_TXN_SYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENCRYPT_AES", DB_ENCRYPT_AES, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CDB_ALLDB", DB_CDB_ALLDB, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DIRECT_DB", DB_DIRECT_DB, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DIRECT_LOG", DB_DIRECT_LOG, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_AUTOREMOVE", DB_LOG_AUTOREMOVE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOLOCKING", DB_NOLOCKING, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOPANIC", DB_NOPANIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_OVERWRITE", DB_OVERWRITE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PANIC_ENVIRONMENT", DB_PANIC_ENVIRONMENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REGION_INIT", DB_REGION_INIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TIME_NOTGRANTED", DB_TIME_NOTGRANTED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TXN_WRITE_NOSYNC", DB_TXN_WRITE_NOSYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_YIELDCPU", DB_YIELDCPU, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_UPGRADE", DB_UPGRADE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERIFY", DB_VERIFY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DIRECT", DB_DIRECT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_EXTENT", DB_EXTENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ODDFILESIZE", DB_ODDFILESIZE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CHKSUM", DB_CHKSUM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DUP", DB_DUP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DUPSORT", DB_DUPSORT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENCRYPT", DB_ENCRYPT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RECNUM", DB_RECNUM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RENUMBER", DB_RENUMBER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REVSPLITOFF", DB_REVSPLITOFF, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SNAPSHOT", DB_SNAPSHOT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_STAT_CLEAR", DB_STAT_CLEAR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_JOIN_NOSORT", DB_JOIN_NOSORT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AGGRESSIVE", DB_AGGRESSIVE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOORDERCHK", DB_NOORDERCHK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ORDERCHKONLY", DB_ORDERCHKONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PR_PAGE", DB_PR_PAGE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PR_RECOVERYTEST", DB_PR_RECOVERYTEST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PRINTABLE", DB_PRINTABLE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SALVAGE", DB_SALVAGE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_NOBUFFER", DB_REP_NOBUFFER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_PERMANENT", DB_REP_PERMANENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCKVERSION", DB_LOCKVERSION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_FILE_ID_LEN", DB_FILE_ID_LEN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_NORUN", DB_LOCK_NORUN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_DEFAULT", DB_LOCK_DEFAULT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_EXPIRE", DB_LOCK_EXPIRE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_MAXLOCKS", DB_LOCK_MAXLOCKS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_MINLOCKS", DB_LOCK_MINLOCKS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_MINWRITE", DB_LOCK_MINWRITE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_OLDEST", DB_LOCK_OLDEST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_RANDOM", DB_LOCK_RANDOM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_YOUNGEST", DB_LOCK_YOUNGEST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_NOWAIT", DB_LOCK_NOWAIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_RECORD", DB_LOCK_RECORD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_REMOVE", DB_LOCK_REMOVE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_SET_TIMEOUT", DB_LOCK_SET_TIMEOUT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_SWITCH", DB_LOCK_SWITCH, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_UPGRADE", DB_LOCK_UPGRADE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_HANDLE_LOCK", DB_HANDLE_LOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RECORD_LOCK", DB_RECORD_LOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PAGE_LOCK", DB_PAGE_LOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOGVERSION", DB_LOGVERSION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOGOLDVER", DB_LOGOLDVER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOGMAGIC", DB_LOGMAGIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ARCH_ABS", DB_ARCH_ABS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ARCH_DATA", DB_ARCH_DATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ARCH_LOG", DB_ARCH_LOG, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ARCH_REMOVE", DB_ARCH_REMOVE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_FLUSH", DB_FLUSH, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_CHKPNT", DB_LOG_CHKPNT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_COMMIT", DB_LOG_COMMIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_NOCOPY", DB_LOG_NOCOPY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_NOT_DURABLE", DB_LOG_NOT_DURABLE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_PERM", DB_LOG_PERM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_WRNOSYNC", DB_LOG_WRNOSYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_user_BEGIN", DB_user_BEGIN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_debug_FLAG", DB_debug_FLAG, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOGC_BUF_SIZE", DB_LOGC_BUF_SIZE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_DISK", DB_LOG_DISK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_LOCKED", DB_LOG_LOCKED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOG_SILENT_ERR", DB_LOG_SILENT_ERR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_CREATE", DB_MPOOL_CREATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_LAST", DB_MPOOL_LAST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_NEW", DB_MPOOL_NEW, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_CLEAN", DB_MPOOL_CLEAN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_DIRTY", DB_MPOOL_DIRTY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_DISCARD", DB_MPOOL_DISCARD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_NOFILE", DB_MPOOL_NOFILE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MPOOL_UNLINK", DB_MPOOL_UNLINK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TXNVERSION", DB_TXNVERSION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_XIDDATASIZE", DB_XIDDATASIZE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_EID_BROADCAST", DB_EID_BROADCAST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_EID_INVALID", DB_EID_INVALID, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_CLIENT", DB_REP_CLIENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_LOGSONLY", DB_REP_LOGSONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_MASTER", DB_REP_MASTER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RENAMEMAGIC", DB_RENAMEMAGIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_BTREEVERSION", DB_BTREEVERSION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_BTREEOLDVER", DB_BTREEOLDVER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_BTREEMAGIC", DB_BTREEMAGIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_HASHVERSION", DB_HASHVERSION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_HASHOLDVER", DB_HASHOLDVER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_HASHMAGIC", DB_HASHMAGIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_QAMVERSION", DB_QAMVERSION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_QAMOLDVER", DB_QAMOLDVER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_QAMMAGIC", DB_QAMMAGIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AFTER", DB_AFTER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_APPEND", DB_APPEND, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_BEFORE", DB_BEFORE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CACHED_COUNTS", DB_CACHED_COUNTS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CONSUME", DB_CONSUME, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CONSUME_WAIT", DB_CONSUME_WAIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_CURRENT", DB_CURRENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_FAST_STAT", DB_FAST_STAT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_FIRST", DB_FIRST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_GET_BOTH", DB_GET_BOTH, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_GET_BOTHC", DB_GET_BOTHC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_GET_BOTH_RANGE", DB_GET_BOTH_RANGE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_GET_RECNO", DB_GET_RECNO, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_JOIN_ITEM", DB_JOIN_ITEM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_KEYFIRST", DB_KEYFIRST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_KEYLAST", DB_KEYLAST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LAST", DB_LAST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NEXT", DB_NEXT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NEXT_DUP", DB_NEXT_DUP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NEXT_NODUP", DB_NEXT_NODUP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NODUPDATA", DB_NODUPDATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOOVERWRITE", DB_NOOVERWRITE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOSYNC", DB_NOSYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_POSITION", DB_POSITION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PREV", DB_PREV, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PREV_NODUP", DB_PREV_NODUP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RECORDCOUNT", DB_RECORDCOUNT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SET", DB_SET, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SET_LOCK_TIMEOUT", DB_SET_LOCK_TIMEOUT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SET_RANGE", DB_SET_RANGE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SET_RECNO", DB_SET_RECNO, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SET_TXN_NOW", DB_SET_TXN_NOW, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SET_TXN_TIMEOUT", DB_SET_TXN_TIMEOUT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_UPDATE_SECONDARY", DB_UPDATE_SECONDARY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_WRITECURSOR", DB_WRITECURSOR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_WRITELOCK", DB_WRITELOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_OPFLAGS_MASK", DB_OPFLAGS_MASK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MULTIPLE", DB_MULTIPLE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_MULTIPLE_KEY", DB_MULTIPLE_KEY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RMW", DB_RMW, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_DEADLOCK", DB_LOCK_DEADLOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DONOTINDEX", DB_DONOTINDEX, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_FILEOPEN", DB_FILEOPEN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_KEYEMPTY", DB_KEYEMPTY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_KEYEXIST", DB_KEYEXIST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_DEADLOCK", DB_LOCK_DEADLOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_NOTGRANTED", DB_LOCK_NOTGRANTED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOSERVER", DB_NOSERVER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOSERVER_HOME", DB_NOSERVER_HOME, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOSERVER_ID", DB_NOSERVER_ID, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NOTFOUND", DB_NOTFOUND, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_OLD_VERSION", DB_OLD_VERSION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_PAGE_NOTFOUND", DB_PAGE_NOTFOUND, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_DUPMASTER", DB_REP_DUPMASTER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_HANDLE_DEAD", DB_REP_HANDLE_DEAD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_HOLDELECTION", DB_REP_HOLDELECTION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_ISPERM", DB_REP_ISPERM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_NEWMASTER", DB_REP_NEWMASTER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_NEWSITE", DB_REP_NEWSITE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_NOTPERM", DB_REP_NOTPERM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_OUTDATED", DB_REP_OUTDATED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REP_UNAVAIL", DB_REP_UNAVAIL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RUNRECOVERY", DB_RUNRECOVERY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SECONDARY_BAD", DB_SECONDARY_BAD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERIFY_BAD", DB_VERIFY_BAD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ALREADY_ABORTED", DB_ALREADY_ABORTED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DELETED", DB_DELETED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOCK_NOTEXIST", DB_LOCK_NOTEXIST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_NEEDSPLIT", DB_NEEDSPLIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SURPRISE_KID", DB_SURPRISE_KID, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_SWAPBYTES", DB_SWAPBYTES, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TIMEOUT", DB_TIMEOUT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TXN_CKP", DB_TXN_CKP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERIFY_FATAL", DB_VERIFY_FATAL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_LOGFILEID_INVALID", DB_LOGFILEID_INVALID, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_OK_BTREE", DB_OK_BTREE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_OK_HASH", DB_OK_HASH, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_OK_QUEUE", DB_OK_QUEUE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_OK_RECNO", DB_OK_RECNO, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_CHKSUM", DB_AM_CHKSUM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_CL_WRITER", DB_AM_CL_WRITER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_COMPENSATE", DB_AM_COMPENSATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_CREATED", DB_AM_CREATED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_CREATED_MSTR", DB_AM_CREATED_MSTR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_DBM_ERROR", DB_AM_DBM_ERROR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_DELIMITER", DB_AM_DELIMITER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_DIRTY", DB_AM_DIRTY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_DISCARD", DB_AM_DISCARD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_DUP", DB_AM_DUP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_DUPSORT", DB_AM_DUPSORT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_ENCRYPT", DB_AM_ENCRYPT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_FIXEDLEN", DB_AM_FIXEDLEN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_INMEM", DB_AM_INMEM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_IN_RENAME", DB_AM_IN_RENAME, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_NOT_DURABLE", DB_AM_NOT_DURABLE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_OPEN_CALLED", DB_AM_OPEN_CALLED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_PAD", DB_AM_PAD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_PGDEF", DB_AM_PGDEF, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_RDONLY", DB_AM_RDONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_RECNUM", DB_AM_RECNUM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_RECOVER", DB_AM_RECOVER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_RENUMBER", DB_AM_RENUMBER, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_REPLICATION", DB_AM_REPLICATION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_REVSPLITOFF", DB_AM_REVSPLITOFF, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_SECONDARY", DB_AM_SECONDARY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_SNAPSHOT", DB_AM_SNAPSHOT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_SUBDB", DB_AM_SUBDB, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_SWAP", DB_AM_SWAP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_TXN", DB_AM_TXN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_AM_VERIFYING", DB_AM_VERIFYING, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_REGION_MAGIC", DB_REGION_MAGIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERB_CHKPOINT", DB_VERB_CHKPOINT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERB_DEADLOCK", DB_VERB_DEADLOCK, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERB_RECOVERY", DB_VERB_RECOVERY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERB_REPLICATION", DB_VERB_REPLICATION, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_VERB_WAITSFOR", DB_VERB_WAITSFOR, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_ELECTINIT", DB_TEST_ELECTINIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_POSTDESTROY", DB_TEST_POSTDESTROY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_POSTLOG", DB_TEST_POSTLOG, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_POSTLOGMETA", DB_TEST_POSTLOGMETA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_POSTOPEN", DB_TEST_POSTOPEN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_POSTSYNC", DB_TEST_POSTSYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_PREDESTROY", DB_TEST_PREDESTROY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_PREOPEN", DB_TEST_PREOPEN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_TEST_SUBDB_LOCKS", DB_TEST_SUBDB_LOCKS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_AUTO_COMMIT", DB_ENV_AUTO_COMMIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_CDB", DB_ENV_CDB, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_CDB_ALLDB", DB_ENV_CDB_ALLDB, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_CREATE", DB_ENV_CREATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_DBLOCAL", DB_ENV_DBLOCAL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_DIRECT_DB", DB_ENV_DIRECT_DB, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_DIRECT_LOG", DB_ENV_DIRECT_LOG, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_FATAL", DB_ENV_FATAL, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_LOCKDOWN", DB_ENV_LOCKDOWN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_LOG_AUTOREMOVE", DB_ENV_LOG_AUTOREMOVE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_NOLOCKING", DB_ENV_NOLOCKING, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_NOMMAP", DB_ENV_NOMMAP, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_NOPANIC", DB_ENV_NOPANIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_OPEN_CALLED", DB_ENV_OPEN_CALLED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_OVERWRITE", DB_ENV_OVERWRITE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_PRIVATE", DB_ENV_PRIVATE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_REGION_INIT", DB_ENV_REGION_INIT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_RPCCLIENT", DB_ENV_RPCCLIENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_RPCCLIENT_GIVEN", DB_ENV_RPCCLIENT_GIVEN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_SYSTEM_MEM", DB_ENV_SYSTEM_MEM, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_THREAD", DB_ENV_THREAD, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_TIME_NOTGRANTED", DB_ENV_TIME_NOTGRANTED, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_TXN_NOSYNC", DB_ENV_TXN_NOSYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_TXN_NOT_DURABLE", DB_ENV_TXN_NOT_DURABLE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_TXN_WRITE_NOSYNC", DB_ENV_TXN_WRITE_NOSYNC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_ENV_YIELDCPU", DB_ENV_YIELDCPU, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_DBM_HSEARCH", DB_DBM_HSEARCH, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RPC_SERVERPROG", DB_RPC_SERVERPROG, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT("DB_RPC_SERVERVERS", DB_RPC_SERVERVERS, CONST_CS | CONST_PERSISTENT); +} +/* }}} */ + +/* {{{ PHP_MSHUTDOWN_FUNCTION + */ +PHP_MSHUTDOWN_FUNCTION(db4) +{ + /* uncomment this line if you have INI entries + UNREGISTER_INI_ENTRIES(); + */ + return SUCCESS; +} +/* }}} */ + +/* {{{ PHP_MINFO_FUNCTION + */ +PHP_MINFO_FUNCTION(db4) +{ + php_info_print_table_start(); + php_info_print_table_header(2, "db4 support", "enabled"); + php_info_print_table_end(); + + /* Remove comments if you have entries in php.ini + DISPLAY_INI_ENTRIES(); + */ +} +/* }}} */ + + +/* {{{ resource accessors + */ +void setDbEnv(zval *z, DB_ENV *dbenv TSRMLS_DC) +{ + long rsrc_id; + + rsrc_id = zend_register_resource(NULL, dbenv, le_dbenv); + zend_list_addref(rsrc_id); + add_property_resource(z, "_dbenv_ptr", rsrc_id); +} + +DB_ENV *php_db4_getDbEnvFromObj(zval *z TSRMLS_DC) +{ + DB_ENV *dbenv; + zval **rsrc; + if(zend_hash_find(HASH_OF(z), "_dbenv_ptr", sizeof("_dbenv_ptr"), + (void **) &rsrc) == SUCCESS) + { + dbenv = (DB_ENV *) zend_fetch_resource(rsrc TSRMLS_CC, -1, "Db4Env", NULL, 1, le_dbenv); + return dbenv; + } + return NULL; +} + + +#define getDbEnvFromThis(a) \ +do { \ + zval *_this = getThis(); \ + if(!_this) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "must be called as a method"); \ + RETURN_FALSE; \ + } \ + (a) = php_db4_getDbEnvFromObj(_this TSRMLS_CC); \ + if(!(a)) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "not a valid db4Env object"); \ + RETURN_FALSE; \ + } \ +} while(0) + +void setDb(zval *z, DB *db, int autocommit TSRMLS_DC) +{ + long rsrc_id; + struct php_DB *pdb = emalloc(sizeof(*pdb)); + memset(pdb, 0, sizeof(*pdb)); + pdb->db = db; + pdb->autocommit = autocommit; + rsrc_id = ZEND_REGISTER_RESOURCE(NULL, pdb, le_db); + add_property_resource(z, "_db_ptr", rsrc_id); +} + +struct php_DB *getPhpDbFromObj(zval *z TSRMLS_DC) +{ + struct php_DB *pdb; + zval **rsrc; + if(zend_hash_find(HASH_OF(z), "_db_ptr", sizeof("_db_ptr"), (void **) &rsrc) == SUCCESS) { + pdb = (struct php_DB *) zend_fetch_resource(rsrc TSRMLS_CC, -1, "Db4", NULL, 1, le_db); + return pdb; + } + return NULL; +} + +DB *php_db4_getDbFromObj(zval *z TSRMLS_DC) +{ + struct php_DB *pdb; + zval **rsrc; + if(zend_hash_find(HASH_OF(z), "_db_ptr", sizeof("_db_ptr"), (void **) &rsrc) == SUCCESS) { + pdb = (struct php_DB *) zend_fetch_resource(rsrc TSRMLS_CC, -1, "Db4", NULL, 1, le_db); + return pdb->db; + } + return NULL; +} + +#define getDbFromThis(a, b) \ +do { \ + struct php_DB *pdb; \ + zval *_this = getThis(); \ + if(!_this) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "must be called as a method"); \ + RETURN_FALSE; \ + } \ + pdb = getPhpDbFromObj(_this TSRMLS_CC); \ + if(!pdb || !pdb->db) { \ + assert(0); \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "not a valid db4 object"); \ + RETURN_FALSE; \ + } \ + (a) = pdb->db; \ + (b) = pdb->autocommit; \ +} while(0) + +void setDbTxn(zval *z, DB_TXN *dbtxn TSRMLS_DC) +{ + long rsrc_id; + struct php_DB_TXN *txn = emalloc(sizeof(*txn)); + memset(txn, 0, sizeof(*txn)); + txn->db_txn = dbtxn; + rsrc_id = ZEND_REGISTER_RESOURCE(NULL, txn, le_db_txn); + zend_list_addref(rsrc_id); + add_property_resource(z, "_dbtxn_ptr", rsrc_id); +} + +DB_TXN *php_db4_getDbTxnFromObj(zval *z TSRMLS_DC) +{ + struct php_DB_TXN *pdbtxn; + zval **rsrc; + if(zend_hash_find(HASH_OF(z), "_dbtxn_ptr", sizeof("_dbtxn_ptr"), + (void **) &rsrc) == SUCCESS) + { + pdbtxn = (struct php_DB_TXN *) zend_fetch_resource(rsrc TSRMLS_CC, -1, "Db4Txn", NULL, 1, le_db_txn); + return pdbtxn->db_txn; + } + return NULL; +} + +struct php_DB_TXN *getPhpDbTxnFromObj(zval *z TSRMLS_DC) +{ + struct php_DB_TXN *pdbtxn; + zval **rsrc; + if(zend_hash_find(HASH_OF(z), "_dbtxn_ptr", sizeof("_dbtxn_ptr"), + (void **) &rsrc) == SUCCESS) + { + pdbtxn = (struct php_DB_TXN *) zend_fetch_resource(rsrc TSRMLS_CC, -1, "Db4Txn", NULL, 1, le_db_txn); + return pdbtxn; + } + return NULL; +} + +#define getDbTxnFromThis(a) \ +do { \ + zval *_this = getThis(); \ + if(!_this) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "must be called as a method"); \ + RETURN_FALSE; \ + } \ + (a) = php_db4_getDbTxnFromObj(_this TSRMLS_CC); \ + if(!(a)) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "not a valid db4txn object"); \ + RETURN_FALSE; \ + } \ +} while(0) + +#define getPhpDbTxnFromThis(a) \ +do { \ + zval *_this = getThis(); \ + if(!_this) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "must be called as a method"); \ + RETURN_FALSE; \ + } \ + (a) = getPhpDbTxnFromObj(_this TSRMLS_CC); \ + if(!(a) || !(a)->db_txn) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "not a valid db4txn object"); \ + RETURN_FALSE; \ + } \ +} while(0) + +void closeDbTxnDependencies(zval *obj TSRMLS_DC) { + struct php_DB_TXN *pdbtxn = getPhpDbTxnFromObj(obj TSRMLS_CC); + if(pdbtxn) { + while(pdbtxn->open_cursors) { + struct my_llist *el = pdbtxn->open_cursors; + struct php_DBC *pdbc = el->data; + if(pdbc) { + if(pdbc->dbc) { + pdbc->dbc->c_close(pdbc->dbc); + pdbc->dbc = NULL; + } + pdbc->parent_txn = NULL; + } +// efree(el->data); + pdbtxn->open_cursors = el->next; + efree(el); + php_error_docref(NULL TSRMLS_CC, E_WARNING, "Attempting to end a transaction without closing it's child cursors."); + } + /* should handle open dbs with pending transactions */ + } +} + + +void setDbc(zval *z, DBC *dbc, struct php_DB_TXN *txn TSRMLS_DC) +{ + long rsrc_id; + struct php_DBC *pdbc = emalloc(sizeof(*pdbc)); + memset(pdbc, 0, sizeof(*pdbc)); + pdbc->dbc = dbc; + if(txn) { + pdbc->parent_txn = txn; + txn->open_cursors = my_llist_add(txn->open_cursors, pdbc); + } + rsrc_id = zend_register_resource(NULL, pdbc, le_dbc); + zend_list_addref(rsrc_id); + add_property_resource(z, "_dbc_ptr", rsrc_id); +} + +DBC *php_db4_getDbcFromObj(zval *z TSRMLS_DC) +{ + struct php_DBC *pdbc; + zval **rsrc; + if(zend_hash_find(HASH_OF(z), "_dbc_ptr", sizeof("_dbc_ptr"), + (void **) &rsrc) == SUCCESS) + { + pdbc = (struct php_DBC *) zend_fetch_resource(rsrc TSRMLS_CC, -1, "Db4Cursor", NULL, 1, le_dbc); + return pdbc->dbc; + } + return NULL; +} + +struct php_DBC *getPhpDbcFromObj(zval *z TSRMLS_DC) +{ + struct php_DBC *pdbc; + zval **rsrc; + if(zend_hash_find(HASH_OF(z), "_dbc_ptr", sizeof("_dbc_ptr"), + (void **) &rsrc) == SUCCESS) + { + pdbc = (struct php_DBC *) zend_fetch_resource(rsrc TSRMLS_CC, -1, "Db4Cursor", NULL, 1, le_dbc); + return pdbc; + } + return NULL; +} + +#define getDbcFromThis(a) \ +do { \ + zval *_this = getThis(); \ + if(!_this) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "must be called as a method"); \ + RETURN_FALSE; \ + } \ + (a) = php_db4_getDbcFromObj(_this TSRMLS_CC); \ + if(!(a)) { \ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "not a valid db4Cursor object"); \ + RETURN_FALSE; \ + } \ +} while(0) + +int closeDbc(zval *obj TSRMLS_DC) +{ + int ret = 0; + struct php_DBC *pdbc = getPhpDbcFromObj(obj TSRMLS_CC); + if(pdbc) { + if(pdbc->parent_txn) { + pdbc->parent_txn->open_cursors = + my_llist_del(pdbc->parent_txn->open_cursors, pdbc); + } + ret = pdbc->dbc->c_close(pdbc->dbc); + pdbc->dbc = NULL; + pdbc->parent_txn = NULL; + } + return ret; +} + +/* }}} */ + +/* {{{ DB4Txn method definitions + */ + +/* {{{ proto bool Db4Txn::abort() + */ +ZEND_NAMED_FUNCTION(_wrap_db_txn_abort) +{ + struct php_DB_TXN *ptxn; + zval **open_cursors; + zval *this; + int ret; + + if(ZEND_NUM_ARGS()) { + WRONG_PARAM_COUNT; + } + this = getThis(); + getPhpDbTxnFromThis(ptxn); + closeDbTxnDependencies(this TSRMLS_CC); + if((ret = ptxn->db_txn->abort(ptxn->db_txn)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + } + ptxn->db_txn = NULL; + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto bool Db4Txn::commit() + */ +ZEND_NAMED_FUNCTION(_wrap_db_txn_commit) +{ + struct php_DB_TXN *ptxn; + u_int32_t flags = 0; + int ret; + zval *this; + zval **open_cursors; + + this = getThis(); + getPhpDbTxnFromThis(ptxn); + closeDbTxnDependencies(this TSRMLS_CC); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &flags) == FAILURE) + { + return; + } + if((ret = ptxn->db_txn->commit(ptxn->db_txn, flags)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + } + ptxn->db_txn = NULL; + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto bool Db4Txn::discard() + */ +ZEND_NAMED_FUNCTION(_wrap_db_txn_discard) +{ + struct php_DB_TXN *ptxn; + int ret; + zval *this; + zval **open_cursors; + + this = getThis(); + getPhpDbTxnFromThis(ptxn); + closeDbTxnDependencies(this TSRMLS_CC); + if(ZEND_NUM_ARGS()) WRONG_PARAM_COUNT; + if((ret = ptxn->db_txn->discard(ptxn->db_txn, 0)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + } + ptxn->db_txn = NULL; + /* FIXME should destroy $this */ + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto long Db4Txn::id() + */ +ZEND_NAMED_FUNCTION(_wrap_db_txn_id) +{ + DB_TXN *txn; + int ret; + + getDbTxnFromThis(txn); + if(ZEND_NUM_ARGS()) WRONG_PARAM_COUNT; + RETURN_LONG(txn->id(txn)); +} +/* }}} */ + +/* {{{ proto bool Db4Txn::set_timeout(long $timeout [, long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_txn_set_timeout) +{ + DB_TXN *txn; + u_int32_t flags = 0; + long timeout; + int ret; + + getDbTxnFromThis(txn); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &timeout, &flags) == FAILURE) + { + return; + } + if((ret = txn->set_timeout(txn, timeout, flags)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + } + RETURN_TRUE; +} +/* }}} */ + +/* {{{ private Db4Txn::Db4Txn() + */ +ZEND_NAMED_FUNCTION(_wrap_new_DbTxn) +{ + php_error_docref(NULL TSRMLS_CC, E_ERROR, "DB4Txn objects must be created with Db4Env::begin_txn()"); +} +/* }}} */ + +/* }}} */ + + +/* {{{ DB4 method definitions + */ + +/* {{{ proto object DB4::DB4([object $dbenv]) + */ +ZEND_NAMED_FUNCTION(_wrap_new_db4) +{ + DB *db; + DB_ENV *dbenv = NULL; + zval *dbenv_obj = NULL; + zval *this; + int ret, autocommit = 0; + + this = getThis(); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|O", + &dbenv_obj, db_env_ce) == FAILURE) + { + return; + } + if(dbenv_obj) { + dbenv = php_db4_getDbEnvFromObj(dbenv_obj TSRMLS_CC); + zval_add_ref(&dbenv_obj); + add_property_zval(this, "dbenv", dbenv_obj); + autocommit = 1; + } + if((ret = my_db_create(&db, dbenv, 0)) != 0) { + php_error_docref(NULL TSRMLS_CC, + E_WARNING, "error occurred during open"); + RETURN_FALSE; + } + setDb(this, db, autocommit TSRMLS_CC); +} +/* }}} */ + +/* {{{ proto bool DB4::open([object $txn [, string $file [, string $database [, long $flags [, long $mode]]]]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_open) +{ + DB *db = NULL; + DB_TXN *dbtxn = NULL; + zval *dbtxn_obj = NULL; + char *file = NULL, *database = NULL; + long filelen = 0, databaselen = 0; + DBTYPE type = DB_BTREE; + u_int32_t flags = DB_CREATE; + int mode = 0; + int ret; + u_int32_t autocommit; + + zval *this; + this = getThis(); + getDbFromThis(db, autocommit); + + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|O!sslll", + &dbtxn_obj, db_txn_ce, + &file, &filelen, + &database, &databaselen, + &type, &flags, &mode) == FAILURE) + { + return; + } + if(dbtxn_obj) { + dbtxn = php_db4_getDbTxnFromObj(dbtxn_obj TSRMLS_CC); + } else if(autocommit) { + flags |= DB_AUTO_COMMIT; + } + add_property_string(this, "file", file, 1); + add_property_string(this, "database", database, 1); + if(strcmp(file, "") == 0) file = NULL; + if(strcmp(database, "") == 0) database = NULL; + /* add type and other introspection data */ + if((ret = db->open(db, dbtxn, file, database, type, flags, mode)) == 0) { + RETURN_TRUE; + } + else { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + add_property_string(this, "lastError", db_strerror(ret), 1); + RETURN_FALSE; + } +} +/* }}} */ + +/* {{{ proto bool DB4::close() + */ +ZEND_NAMED_FUNCTION(_wrap_db_close) +{ + DB *db = NULL; + int autocommit; + getDbFromThis(db, autocommit); + + if(ZEND_NUM_ARGS() TSRMLS_CC) { + WRONG_PARAM_COUNT; + } + db->close(db, 0); + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto bool DB4::del(string $key [, object $txn]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_del) +{ + DB *db = NULL; + DB_TXN *txn = NULL; + zval *txn_obj = NULL; + u_int32_t flags; + DBT key; + char *keyname; + int keylen, autocommit; + + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|O", &keyname, &keylen, + &txn_obj, db_txn_ce) == FAILURE) + { + return; + } + if(txn_obj) { + getDbTxnFromThis(txn); + flags = 0; + } else if(autocommit) { + flags = DB_AUTO_COMMIT; + } + memset(&key, 0, sizeof(DBT)); + key.data = keyname; + key.size = keylen; + RETURN_LONG(db->del(db, txn, &key, flags)); +} +/* }}} */ + +/* {{{ proto string DB4::get(string $key [,object $txn [, long flags]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_get) +{ + DB *db = NULL; + DB_TXN *txn = NULL; + zval *txn_obj = NULL; + DBT key, value; + char *keyname; + int keylen, autocommit; + u_int32_t flags = 0; + + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|Ol", &keyname, &keylen, + &txn_obj, db_txn_ce, &flags) == FAILURE) + { + return; + } + if(txn_obj) { + txn = php_db4_getDbTxnFromObj(txn_obj); + } else if (((flags & DB_CONSUME) || (flags & DB_CONSUME_WAIT)) && autocommit) { + flags |= DB_AUTO_COMMIT; + } + memset(&key, 0, sizeof(DBT)); + key.data = keyname; + key.size = keylen; + memset(&value, 0, sizeof(DBT)); + if(db->get(db, txn, &key, &value, flags) == 0) { + RETURN_STRINGL(value.data, value.size, 1); + } + RETURN_FALSE; +} +/* }}} */ + +/* {{{ proto string DB4::pget(string $key, string &$pkey [,object $txn [, long flags]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_pget) +{ + DB *db = NULL; + DB_TXN *txn = NULL; + zval *txn_obj = NULL; + DBT key, value, pkey; + char *keyname; + int keylen, autocommit; + zval *z_pkey; + u_int32_t flags = 0; + + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz|Ol", + &keyname, &keylen, &z_pkey, + &txn_obj, db_txn_ce, &flags) == FAILURE) + { + return; + } + if(txn_obj) { + txn = php_db4_getDbTxnFromObj(txn_obj); + } else if (((flags & DB_CONSUME) || (flags & DB_CONSUME_WAIT)) && autocommit) { + flags |= DB_AUTO_COMMIT; + } + memset(&key, 0, sizeof(DBT)); + key.data = keyname; + key.size = keylen; + memset(&pkey, 0, sizeof(DBT)); + memset(&value, 0, sizeof(DBT)); + if(db->pget(db, txn, &key, &pkey, &value, flags) == 0) { + if(Z_STRLEN_P(z_pkey) == 0) { + Z_STRVAL_P(z_pkey) = emalloc(pkey.size); + } else { + Z_STRVAL_P(z_pkey) = erealloc(Z_STRVAL_P(z_pkey), pkey.size); + } + memcpy(Z_STRVAL_P(z_pkey), pkey.data, pkey.size); + Z_STRLEN_P(z_pkey) = pkey.size; + RETURN_STRINGL(value.data, value.size, 1); + } + RETURN_FALSE; +} +/* }}} */ + +/* {{{ proto string DB4::get_type() + */ +ZEND_NAMED_FUNCTION(_wrap_db_get_type) +{ + DB *db = NULL; + DBTYPE type; + int autocommit; + + getDbFromThis(db, autocommit); + if(db->get_type(db, &type)) { + RETURN_FALSE; + } + switch(type) { + case DB_BTREE: + RETURN_STRING("DB_BTREE", 1); + break; + case DB_HASH: + RETURN_STRING("DB_HASH", 1); + break; + case DB_RECNO: + RETURN_STRING("DB_RECNO", 1); + break; + case DB_QUEUE: + RETURN_STRING("DB_QUEUE", 1); + break; + default: + RETURN_STRING("UNKNOWN", 1); + break; + } +} +/* }}} */ + +/* {{{ proto array DB4::stat() + */ +ZEND_NAMED_FUNCTION(_wrap_db_stat) +{ + DB *db = NULL; + DBTYPE type; + int autocommit; + u_int32_t flags = 0; + + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &flags) == FAILURE) { + return; + } + if(db->get_type(db, &type)) { + RETURN_FALSE; + } + switch(type) { +#define ADD_STAT_LONG(a) add_assoc_long(return_value, #a, sb.a) + case DB_HASH: + { + DB_HASH_STAT sb; + if(db->stat(db, (void *)&sb, flags)) { + RETURN_FALSE; + } + array_init(return_value); + if(flags & DB_FAST_STAT) { + ADD_STAT_LONG(hash_magic); + ADD_STAT_LONG(hash_version); + ADD_STAT_LONG(hash_nkeys); + ADD_STAT_LONG(hash_ndata); + ADD_STAT_LONG(hash_pagesize); + ADD_STAT_LONG(hash_ffactor); + ADD_STAT_LONG(hash_buckets); + } + ADD_STAT_LONG(hash_free); + ADD_STAT_LONG(hash_bfree); + ADD_STAT_LONG(hash_bigpages); + ADD_STAT_LONG(hash_bfree); + ADD_STAT_LONG(hash_overflows); + ADD_STAT_LONG(hash_ovfl_free); + ADD_STAT_LONG(hash_dup); + ADD_STAT_LONG(hash_dup_free); + } + break; + case DB_BTREE: + case DB_RECNO: + { + DB_BTREE_STAT sb; + if(db->stat(db, (void *)&sb, flags)) { + RETURN_FALSE; + } + array_init(return_value); + if(flags & DB_FAST_STAT) { + ADD_STAT_LONG(bt_magic); + ADD_STAT_LONG(bt_version); + ADD_STAT_LONG(bt_nkeys); + ADD_STAT_LONG(bt_ndata); + ADD_STAT_LONG(bt_pagesize); + ADD_STAT_LONG(bt_minkey); + ADD_STAT_LONG(bt_re_len); + ADD_STAT_LONG(bt_re_pad); + } + ADD_STAT_LONG(bt_levels); + ADD_STAT_LONG(bt_int_pg); + ADD_STAT_LONG(bt_leaf_pg); + ADD_STAT_LONG(bt_dup_pg); + ADD_STAT_LONG(bt_over_pg); + ADD_STAT_LONG(bt_free); + ADD_STAT_LONG(bt_int_pgfree); + ADD_STAT_LONG(bt_leaf_pgfree); + ADD_STAT_LONG(bt_dup_pgfree); + ADD_STAT_LONG(bt_over_pgfree); + } + break; + case DB_QUEUE: + { + DB_QUEUE_STAT sb; + if(db->stat(db, (void *)&sb, flags)) { + RETURN_FALSE; + } + array_init(return_value); + if(flags & DB_FAST_STAT) { + ADD_STAT_LONG(qs_magic); + ADD_STAT_LONG(qs_version); + ADD_STAT_LONG(qs_nkeys); + ADD_STAT_LONG(qs_ndata); + ADD_STAT_LONG(qs_pagesize); + ADD_STAT_LONG(qs_extentsize); + ADD_STAT_LONG(qs_re_len); + ADD_STAT_LONG(qs_re_pad); + ADD_STAT_LONG(qs_first_recno); + ADD_STAT_LONG(qs_cur_recno); + } + ADD_STAT_LONG(qs_pages); + ADD_STAT_LONG(qs_pgfree); + break; + } + default: + RETURN_FALSE; + } +} +/* }}} */ + +/* {{{ proto DBCursor DB4::join(array $curslist [, long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_join) +{ + DB *db = NULL; + DBC *dbcp; + DBC **curslist; + zval *z_array; + HashTable *array; + HashPosition pos; + zval **z_cursor; + int num_cursors, rv, autocommit, i; + + u_int32_t flags = 0; + + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a|l", + &z_array, &flags) == FAILURE) + { + return; + } + array = HASH_OF(z_array); + num_cursors = zend_hash_num_elements(array); + curslist = (DBC **) calloc(sizeof(DBC *), num_cursors + 1); + for(zend_hash_internal_pointer_reset_ex(array, &pos), i=0; + zend_hash_get_current_data_ex(array, (void **) &z_cursor, &pos) == SUCCESS; + zend_hash_move_forward_ex(array, &pos), i++) { + curslist[i] = php_db4_getDbcFromObj(*z_cursor); + } + rv = db->join(db, curslist, &dbcp, flags); + free(curslist); + if(rv) { + RETURN_FALSE; + } else { + object_init_ex(return_value, dbc_ce); + setDbc(return_value, dbcp, NULL TSRMLS_CC); + } +} +/* }}} */ + +/* {{{ proto bool DB4::put(string $key, string $value [, object $txn [, long flags]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_put) +{ + DB *db = NULL; + DB_TXN *txn = NULL; + zval *txn_obj = NULL; + DBT key, value; + char *keyname, *dataname; + int keylen, datalen; + int ret, autocommit; + zval *this; + long flags = 0; + + this = getThis(); + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss|Ol", &keyname, &keylen, + &dataname, &datalen, &txn_obj, db_txn_ce, &flags) == FAILURE) + { + return; + } + if(txn_obj) { + txn = php_db4_getDbTxnFromObj(txn_obj TSRMLS_CC); + } else if (autocommit) { + flags |= DB_AUTO_COMMIT; + } + memset(&key, 0, sizeof(DBT)); + key.data = keyname; + key.size = keylen; + memset(&value, 0, sizeof(DBT)); + value.data = dataname; + value.size = datalen; + if((ret = db->put(db, txn, &key, &value, flags)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + add_property_string(this, "lastError", db_strerror(ret), 1); + RETURN_FALSE; + } + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto bool DB4::sync() + */ +ZEND_NAMED_FUNCTION(_wrap_db_sync) +{ + int autocommit; + DB *db = NULL; + getDbFromThis(db, autocommit); + if(ZEND_NUM_ARGS()) { + WRONG_PARAM_COUNT; + } + db->sync(db, 0); + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto bool DB4::truncate([object $txn [, long $flags]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_truncate) +{ + DB *db = NULL; + DB_TXN *txn = NULL; + zval *txn_obj = NULL; + long flags = DB_AUTO_COMMIT; + int countp, autocommit; + + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|Ol", + &txn_obj, db_txn_ce, &flags) == FAILURE) + { + return; + } + if(txn_obj) { + txn = php_db4_getDbTxnFromObj(txn_obj TSRMLS_CC); + flags = 0; + } else if (autocommit) { + flags = DB_AUTO_COMMIT; + } + if(db->truncate(db, txn, &countp, flags) == 0) { + RETURN_LONG(countp); + } + RETURN_FALSE; +} +/* }}} */ + +/* {{{ proto DB4Cursor DB4::cursor([object $txn [, long flags]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_cursor) +{ + DB *db; + DB_TXN *txn = NULL; + zval *txn_obj = NULL, *this; + DBC *cursor; + u_int32_t flags = 0; + int ret, autocommit; + + this = getThis(); + getDbFromThis(db, autocommit); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|Ol", &txn_obj, db_txn_ce, &flags) == FAILURE) + { + return; + } + if(txn_obj) { + txn = php_db4_getDbTxnFromObj(txn_obj TSRMLS_CC); + } + if((ret = db->cursor(db, txn, &cursor, flags)) != 0 ) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + add_property_string(this, "lastError", db_strerror(ret), 1); + RETURN_FALSE; + } + else { + object_init_ex(return_value, dbc_ce); + setDbc(return_value, cursor, getPhpDbTxnFromObj(txn_obj TSRMLS_CC) TSRMLS_CC); + } + +} +/* }}} */ + +/* }}} end DB4 method definitions */ + +/* {{{ DB4Cursor method definitions + */ + +/* {{{ proto bool Db4Cursor::close() + */ +ZEND_NAMED_FUNCTION(_wrap_dbc_close) +{ + DBC *dbc; + int ret; + zval **parent_txn, **open_cursors; + zval *this; + HashPosition pos; + + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "") == FAILURE) return; + this = getThis(); + getDbcFromThis(dbc); + if((ret = closeDbc(this TSRMLS_CC)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + } + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto long Db4Cursor::count() + */ +ZEND_NAMED_FUNCTION(_wrap_dbc_count) +{ + DBC *dbc; + db_recno_t count; + int ret; + + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "") == FAILURE) return; + getDbcFromThis(dbc); + if((ret = dbc->c_count(dbc, &count, 0)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + } + RETURN_LONG(count); +} +/* }}} */ + +/* {{{ proto bool Db4Cursor::del() + */ +ZEND_NAMED_FUNCTION(_wrap_dbc_del) +{ + DBC *dbc; + int ret; + + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "") == FAILURE) return; + getDbcFromThis(dbc); + if((ret = dbc->c_del(dbc, 0)) != 0) { + if(ret != DB_KEYEMPTY) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + } + RETURN_FALSE; + } + RETURN_TRUE; +} +/* }}} */ + +/* {{{ proto object Db4Cursor::dup([long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_dbc_dup) +{ + DBC *dbc, *newdbc; + u_int32_t flags = 0; + int ret; + + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &flags) == FAILURE) return; + getDbcFromThis(dbc); + if((ret = dbc->c_dup(dbc, &newdbc, flags)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + } + object_init_ex(return_value, dbc_ce); + /* FIXME should pass in dbc's parent txn */ + setDbc(return_value, newdbc, NULL TSRMLS_CC); +} +/* }}} */ + +/* {{{ proto string Db4Cursor::get(string $key [, long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_dbc_get) +{ + DBC *dbc; + DBT key, value; + char *keyname; + int keylen; + u_int32_t flags = 0; + zval *this; + int ret; + + this = getThis(); + getDbcFromThis(dbc); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &keyname, &keylen, &flags) == FAILURE) + { + return; + } + memset(&key, 0, sizeof(DBT)); + key.data = keyname; + key.size = keylen; + memset(&value, 0, sizeof(DBT)); + if((ret = dbc->c_get(dbc, &key, &value, flags)) == 0) { + RETURN_STRINGL(value.data, value.size, 1); + } + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + add_property_string(this, "lastError", db_strerror(ret), 1); + RETURN_FALSE; +} +/* }}} */ + +/* {{{ proto string Db4Cursor::pget(string $key, string &$primary_key [, long flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_dbc_pget) +{ + DBC *dbc; + DBT key, pkey, value; + char *keyname; + int keylen; + u_int32_t flags = 0; + zval *this, *z_pkey; + int ret; + + this = getThis(); + getDbcFromThis(dbc); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz|l", &keyname, &keylen, &z_pkey, &flags) == FAILURE) + { + return; + } + memset(&key, 0, sizeof(DBT)); + key.data = keyname; + key.size = keylen; + memset(&pkey, 0, sizeof(DBT)); + memset(&value, 0, sizeof(DBT)); + if((ret = dbc->c_pget(dbc, &key, &pkey, &value, flags)) == 0) { + if(Z_STRLEN_P(z_pkey) == 0) { + Z_STRVAL_P(z_pkey) = emalloc(pkey.size); + } else { + Z_STRVAL_P(z_pkey) = erealloc(Z_STRVAL_P(z_pkey), pkey.size); + } + memcpy(Z_STRVAL_P(z_pkey), pkey.data, pkey.size); + Z_STRLEN_P(z_pkey) = pkey.size; + RETURN_STRINGL(value.data, value.size, 1); + } + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + add_property_string(this, "lastError", db_strerror(ret), 1); + RETURN_FALSE; +} +/* }}} */ + +/* {{{ proto bool Db4Cursor::put(string $key, string $data [, long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_dbc_put) +{ + DBC *dbc; + DBT key, value; + char *keyname, *dataname; + int keylen, datalen; + u_int32_t flags = 0; + int ret; + + getDbcFromThis(dbc); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss|l", &keyname, &keylen, + &dataname, &datalen, &flags) == FAILURE) + { + return; + } + memset(&key, 0, sizeof(DBT)); + key.data = keyname; + key.size = keylen; + memset(&value, 0, sizeof(DBT)); + value.data = dataname; + value.size = datalen; + if((ret = dbc->c_put(dbc, &key, &value, flags)) == 0) { + RETURN_TRUE; + } + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + RETURN_FALSE; + +} +/* }}} */ + +/* }}} */ + +/* {{{ DB4Env method definitions + */ + +/* {{{ php_db4_error ( zend_error wrapper ) + */ + +void php_db4_error(const char *errpfx, char *msg) +{ + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s %s\n", errpfx, msg); +} +/* }}} */ + +/* {{{ proto object DB4Env::Db4Env([long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_new_DbEnv) +{ + DB_ENV *dbenv; + u_int32_t flags = 0; + + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &flags) == FAILURE) + { + return; + } + if(my_db_env_create(&dbenv, flags) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "bad things here: %s:%d\n", __FILE__, __LINE__); + RETURN_FALSE; + } + dbenv->set_errcall(dbenv, php_db4_error); + setDbEnv(this_ptr, dbenv TSRMLS_CC); +} +/* }}} */ + +/* {{{ proto bool DB4Env::close([long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_close) +{ + DB_ENV *dbenv; + u_int32_t flags = 0; + + getDbEnvFromThis(dbenv); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &flags) == FAILURE) { + RETURN_FALSE; + } + RETURN_BOOL(dbenv->close(dbenv, flags)); +} +/* }}} */ + +/* {{{ proto bool DB4Env::dbremove(object $txn, string $file [, string $database [, long flags]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_dbremove) +{ + DB_ENV *dbenv; + DB_TXN *txn; + zval *txn_obj; + char *filename=NULL, *database=NULL; + int filenamelen, databaselen; + u_int32_t flags = 0; + + getDbEnvFromThis(dbenv); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O!s|sl", &txn_obj, db_txn_ce, + &filename, &filenamelen, &database, &databaselen, &flags) == FAILURE) + { + return; + } + if(txn_obj) { + txn = php_db4_getDbTxnFromObj(txn_obj TSRMLS_CC); + flags = 0; + } + if(dbenv->dbremove(dbenv, txn, filename, database, flags) == 0) { + RETURN_TRUE; + } + RETURN_FALSE; +} +/* }}} */ + +/* {{{ proto bool DB4Env::dbrename(object $txn, string $file, string $database, string $newdatabase [, long flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_dbrename) +{ + DB_ENV *dbenv; + DB_TXN *txn; + zval *txn_obj; + char *filename=NULL, *database=NULL, *newname=NULL; + int filenamelen, databaselen, newnamelen; + u_int32_t flags = 0; + + getDbEnvFromThis(dbenv); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O!sss|l", &txn_obj, db_txn_ce, + &filename, &filenamelen, &database, &databaselen, + &newname, &newnamelen, &flags) == FAILURE) + { + return; + } + if(txn_obj) { + txn = php_db4_getDbTxnFromObj(txn_obj TSRMLS_CC); + flags = 0; + } + if(dbenv->dbrename(dbenv, txn, filename, database, newname, flags) == 0) { + RETURN_TRUE; + } + RETURN_FALSE; +} +/* }}} */ + +/* {{{ proto bool DB4Env::open(string $home [, long flags [, long mode]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_open) +{ + DB_ENV *dbenv; + zval *this; + char *home; + long homelen; + u_int32_t flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | \ + DB_INIT_MPOOL | DB_INIT_TXN ; + int mode = 0666; + int ret; + + getDbEnvFromThis(dbenv); + this = getThis(); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|ll", &home, &homelen, + &flags, &mode) == FAILURE) + { + return; + } + if((ret = dbenv->open(dbenv, home, flags, mode) != 0)) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "open(%s, %d, %o) failed: %s (%d) %s:%d\n", home, flags, mode, strerror(ret), ret, __FILE__, __LINE__); + RETURN_FALSE; + } + add_property_stringl(this, "home", home, homelen, 1); +} +/* }}} */ + +/* {{{ proto bool DB4Env::remove(string $home [, long flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_remove) +{ + DB_ENV *dbenv; + zval *this; + char *home; + long homelen; + u_int32_t flags = 0; + this = getThis(); + getDbEnvFromThis(dbenv); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &home, &homelen, &flags) == FAILURE) + { + return; + } + RETURN_BOOL(dbenv->remove(dbenv, home, flags)?0:1); +} +/* }}} */ + +/* {{{ proto bool DB4Env::set_data_dir(string $dir) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_set_data_dir) +{ + DB_ENV *dbenv; + zval *this; + char *dir; + long dirlen; + this = getThis(); + getDbEnvFromThis(dbenv); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &dir, &dirlen) == FAILURE) + { + return; + } + RETURN_BOOL(dbenv->set_data_dir(dbenv, dir)?0:1); +} +/* }}} */ + +/* {{{ proto object Db4Env::txn_begin([object $parent_txn [, long $flags]]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_txn_begin) +{ + DB_ENV *dbenv; + DB_TXN *txn, *parenttxn = NULL; + zval *this; + zval *cursor_array; + zval *parenttxn_obj = NULL; + u_int32_t flags = 0; + int ret; + + this = getThis(); + getDbEnvFromThis(dbenv); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|Ol", &parenttxn_obj, db_txn_ce, + &flags) == FAILURE) + { + return; + } + if(parenttxn_obj) { + parenttxn = php_db4_getDbTxnFromObj(parenttxn_obj TSRMLS_CC); + } + if((ret = dbenv->txn_begin(dbenv, parenttxn, &txn, flags)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", db_strerror(ret)); + add_property_string(this, "lastError", db_strerror(ret), 1); + RETURN_FALSE; + } + object_init_ex(return_value, db_txn_ce); + MAKE_STD_ZVAL(cursor_array); + array_init(cursor_array); + add_property_zval(return_value, "openCursors", cursor_array); + setDbTxn(return_value, txn TSRMLS_CC); +} +/* }}} */ + +/* {{{ Db4Env::txn_checkpoint(long $kbytes, long $minutes [, long $flags]) + */ +ZEND_NAMED_FUNCTION(_wrap_db_env_txn_checkpoint) +{ + DB_ENV *dbenv; + zval *this; + u_int32_t kbytes = 0; + u_int32_t mins = 0; + u_int32_t flags = 0; + int ret; + + this = getThis(); + getDbEnvFromThis(dbenv); + if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ll|l", &kbytes, &mins, &flags) == FAILURE) + { + return; + } + if((ret = dbenv->txn_checkpoint(dbenv, kbytes, mins, flags)) != 0) { + php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", sb_strerror(ret)); + add_property_string(this, "lastError", db_strerror(ret), 1); + RETURN_FALSE; + } + RETURN_TRUE; +} +/* }}} */ + +/* }}} end db4env */ + +/* + * Local variables: + * tab-width: 4 + * c-basic-offset: 4 + * End: + * vim600: noet sw=4 ts=4 fdm=marker + * vim<600: noet sw=4 ts=4 + */ diff --git a/db/php_db4/php_db4.h b/db/php_db4/php_db4.h new file mode 100644 index 000000000..139a44cee --- /dev/null +++ b/db/php_db4/php_db4.h @@ -0,0 +1,75 @@ +/*- + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * authors: Thies C. Arntzen + * Sterling Hughes + * George Schlossnagle + */ + +#ifndef PHP_DB4_H +#define PHP_DB4_H + +extern zend_module_entry db4_module_entry; +#define phpext_db4_ptr &db4_module_entry + +#ifdef PHP_WIN32 +#define PHP_DB4_API __declspec(dllexport) +#else +#define PHP_DB4_API +#endif + +#ifdef ZTS +#include "TSRM.h" +#endif + +#include "db.h" + +zend_class_entry *db_txn_ce_get(void); +zend_class_entry *dbc_ce_get(void); +zend_class_entry *db_env_ce_get(void); +zend_class_entry *db_ce_get(void); + +DB_ENV *php_db4_getDbEnvFromObj(zval *z); +DB *php_db4_getDbFromObj(zval *z); +DB_TXN *php_db4_getDbTxnFromObj(zval *z); +DBC *php_db4_getDbcFromObj(zval *z); + +/* + Declare any global variables you may need between the BEGIN + and END macros here: + +ZEND_BEGIN_MODULE_GLOBALS(db4) + long global_value; + char *global_string; +ZEND_END_MODULE_GLOBALS(db4) +*/ + +/* In every utility function you add that needs to use variables + in php_db4_globals, call TSRM_FETCH(); after declaring other + variables used by that function, or better yet, pass in TSRMLS_CC + after the last function argument and declare your utility function + with TSRMLS_DC after the last declared argument. Always refer to + the globals in your function as DB4_G(variable). You are + encouraged to rename these macros something shorter, see + examples in any other php module directory. +*/ + +#ifdef ZTS +#define DB4_G(v) TSRMG(db4_globals_id, zend_db4_globals *, v) +#else +#define DB4_G(v) (db4_globals.v) +#endif + +#endif /* PHP_DB4_H */ + + +/* + * Local variables: + * tab-width: 4 + * c-basic-offset: 4 + * indent-tabs-mode: t + * End: + */ diff --git a/db/qam/qam.c b/db/qam/qam.c index f90ccce07..9c0b4812f 100644 --- a/db/qam/qam.c +++ b/db/qam/qam.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam.c,v 11.186 2004/09/22 16:29:47 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam.c,v 11.159 2003/11/18 21:32:17 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -53,7 +51,7 @@ __qam_position(dbc, recnop, mode, exactp) DB *dbp; QAMDATA *qp; db_pgno_t pg; - int ret; + int ret, t_ret; dbp = dbc->dbp; cp = (QUEUE_CURSOR *)dbc->internal; @@ -68,11 +66,13 @@ __qam_position(dbc, recnop, mode, exactp) *exactp = 0; if ((ret = __qam_fget(dbp, &pg, mode == QAM_WRITE ? DB_MPOOL_CREATE : 0, &cp->page)) != 0) { - /* We did not fetch it, we can release the lock. */ - (void)__LPUT(dbc, cp->lock); if (mode != QAM_WRITE && (ret == DB_PAGE_NOTFOUND || ret == ENOENT)) - return (0); + ret = 0; + + /* We did not fetch it, we can release the lock. */ + if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0) + ret = t_ret; return (ret); } cp->pgno = pg; @@ -118,12 +118,12 @@ __qam_pitem(dbc, pagep, indx, recno, data) QAMDATA *qp; QUEUE *t; u_int8_t *dest, *p; - int alloced, ret; + int allocated, ret; dbp = dbc->dbp; dbenv = dbp->dbenv; t = (QUEUE *)dbp->q_internal; - alloced = ret = 0; + allocated = ret = 0; if (data->size > t->re_len) return (__db_rec_toobig(dbenv, data->size, t->re_len)); @@ -164,7 +164,7 @@ __qam_pitem(dbc, pagep, indx, recno, data) if ((ret = __os_malloc(dbenv, t->re_len, &datap->data)) != 0) return (ret); - alloced = 1; + allocated = 1; datap->size = t->re_len; /* @@ -204,7 +204,7 @@ no_partial: if (!F_ISSET(data, DB_DBT_PARTIAL)) memset(p + datap->size, t->re_pad, t->re_len - datap->size); -err: if (alloced) +err: if (allocated) __os_free(dbenv, datap->data); return (ret); @@ -249,7 +249,7 @@ __qam_c_put(dbc, key, data, flags, pgnop) default: /* The interface shouldn't let anything else through. */ DB_ASSERT(0); - return (__db_ferr(dbp->dbenv, "__qam_c_put", flags)); + return (__db_ferr(dbp->dbenv, "DBC->put", 0)); } /* Write lock the record. */ @@ -257,10 +257,9 @@ __qam_c_put(dbc, key, data, flags, pgnop) 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0) return (ret); - if ((ret = __qam_position(dbc, - &cp->recno, QAM_WRITE, &exact)) != 0) { + if ((ret = __qam_position(dbc, &cp->recno, QAM_WRITE, &exact)) != 0) { /* We could not get the page, we can release the record lock. */ - __LPUT(dbc, lock); + (void)__LPUT(dbc, lock); return (ret); } @@ -403,8 +402,10 @@ __qam_append(dbc, key, data) meta->cur_recno--; if (meta->cur_recno == RECNO_OOB) meta->cur_recno--; - (void)__LPUT(dbc, lock); - ret = EFBIG; + ret = __LPUT(dbc, lock); + + if (ret == 0) + ret = EFBIG; goto err; } @@ -462,8 +463,8 @@ __qam_append(dbc, key, data) if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) ret = t_ret; - if ((t_ret - = __qam_fput(dbp, pg, page, DB_MPOOL_DIRTY)) != 0 && ret == 0) + if ((t_ret = + __qam_fput(dbp, pg, page, DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; /* Return the record number to the user. */ @@ -478,18 +479,18 @@ __qam_append(dbc, key, data) qp = (QUEUE *) dbp->q_internal; if (qp->page_ext != 0 && (recno % (qp->page_ext * qp->rec_page) == 0 || - recno == UINT32_T_MAX)) { + recno == UINT32_MAX)) { if ((ret = __db_lget(dbc, 0, ((QUEUE *)dbp->q_internal)->q_meta, DB_LOCK_WRITE, 0, &lock)) != 0) goto err; if (!QAM_AFTER_CURRENT(meta, recno)) ret = __qam_fclose(dbp, pg); - (void)__LPUT(dbc, lock); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; } -err: - /* Release the meta page. */ +err: /* Release the meta page. */ if ((t_ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY)) != 0 && ret == 0) ret = t_ret; @@ -543,22 +544,20 @@ __qam_c_del(dbc) ret = t_ret; if (ret != 0) - goto err1; + goto err; if ((ret = __db_lget(dbc, 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0) - goto err1; - + goto err; cp->lock_mode = DB_LOCK_WRITE; + /* Find the record ; delete only deletes exact matches. */ - if ((ret = __qam_position(dbc, - &cp->recno, QAM_WRITE, &exact)) != 0) { - cp->lock = lock; - goto err1; - } + if ((ret = __qam_position(dbc, &cp->recno, QAM_WRITE, &exact)) != 0) + goto err; + if (!exact) { ret = DB_NOTFOUND; - goto err1; + goto err; } pagep = cp->page; @@ -570,14 +569,14 @@ __qam_c_del(dbc) if ((ret = __qam_del_log(dbp, dbc->txn, &LSN(pagep), 0, &LSN(pagep), pagep->pgno, cp->indx, cp->recno)) != 0) - goto err1; + goto err; } else { data.size = ((QUEUE *)dbp->q_internal)->re_len; data.data = qp->data; if ((ret = __qam_delext_log(dbp, dbc->txn, &LSN(pagep), 0, &LSN(pagep), pagep->pgno, cp->indx, cp->recno, &data)) != 0) - goto err1; + goto err; } } @@ -587,14 +586,13 @@ __qam_c_del(dbc) pg = ((QUEUE *)dbp->q_internal)->q_meta; if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &metalock)) != 0) - goto err1; + goto err; ret = __qam_consume(dbc, meta, first); if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) ret = t_ret; } -err1: - if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) +err: if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) ret = t_ret; if (cp->page != NULL && (t_ret = __qam_fput(dbp, cp->pgno, cp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0) @@ -639,7 +637,7 @@ __qam_c_get(dbc, key, data, flags, pgnop) db_pgno_t metapno; db_recno_t first; qam_position_mode mode; - int exact, is_first, locked, ret, t_ret, wait, with_delete; + int exact, inorder, is_first, locked, ret, t_ret, wait, with_delete; int put_mode, retrying; dbp = dbc->dbp; @@ -654,6 +652,7 @@ __qam_c_get(dbc, key, data, flags, pgnop) retrying = 0; lock_mode = DB_LOCK_READ; meta = NULL; + inorder = F_ISSET(dbp, DB_AM_INORDER); put_mode = 0; t_ret = 0; *pgnop = 0; @@ -701,7 +700,8 @@ __qam_c_get(dbc, key, data, flags, pgnop) first = 0; /* Release any previous lock if not in a transaction. */ - (void)__TLPUT(dbc, cp->lock); + if ((ret = __TLPUT(dbc, cp->lock)) != 0) + goto err; retry: /* Update the record number. */ switch (flags) { @@ -713,7 +713,7 @@ retry: /* Update the record number. */ /* NOTREACHED */ case DB_NEXT: case DB_NEXT_NODUP: - if (cp->recno != RECNO_OOB) { +get_next: if (cp->recno != RECNO_OOB) { ++cp->recno; /* Wrap around, skipping zero. */ if (cp->recno == RECNO_OOB) @@ -773,7 +773,8 @@ retry: /* Update the record number. */ lock_mode, 0, &metalock)) != 0) goto err; locked = 1; - if (cp->recno != RECNO_OOB && + if (cp->recno != meta->cur_recno && + cp->recno != RECNO_OOB && !QAM_AFTER_CURRENT(meta, cp->recno)) goto retry; } @@ -791,11 +792,13 @@ retry: /* Update the record number. */ if ((ret = __memp_fget( mpf, &metapno, 0, &meta)) != 0) goto err; - if ((ret = __lock_get(dbenv, - dbc->locker, DB_LOCK_UPGRADE, - &dbc->lock_dbt, DB_LOCK_WRITE, - &metalock)) != 0) + if ((ret = __db_lget(dbc, 0, + PGNO_INVALID, DB_LOCK_WRITE, + DB_LOCK_UPGRADE, &metalock)) != 0) { + if (ret == DB_LOCK_DEADLOCK) + ret = DB_LOCK_NOTGRANTED; goto err; + } locked = 1; goto retry; } @@ -838,8 +841,7 @@ retry: /* Update the record number. */ case DB_SET_RANGE: case DB_GET_BOTH: case DB_GET_BOTH_RANGE: - if ((ret = __qam_getno(dbp, - key, &cp->recno)) != 0) + if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0) goto err; if (QAM_NOT_VALID(meta, cp->recno)) { ret = DB_NOTFOUND; @@ -884,13 +886,15 @@ retry: /* Update the record number. */ * since the first/last may have moved while we slept. * We release our locks and try again. */ - if ((!with_delete && is_first) || flags == DB_LAST) { + if (((inorder || !with_delete) && is_first) || flags == DB_LAST) { +get_first: if ((ret = __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0) goto err; if (cp->recno != (is_first ? meta->first_recno : (meta->cur_recno - 1))) { - __LPUT(dbc, lock); + if ((ret = __LPUT(dbc, lock)) != 0) + goto err; if (is_first) flags = DB_FIRST; locked = 1; @@ -914,36 +918,44 @@ retry: /* Update the record number. */ cp->lock_mode = lock_mode; if (!exact) { - if (flags == DB_NEXT || flags == DB_NEXT_NODUP || - flags == DB_PREV || flags == DB_PREV_NODUP || - flags == DB_LAST) { - /* Release locks and try again. */ - if (pg != NULL) - (void)__qam_fput(dbp, cp->pgno, pg, 0); - cp->page = pg = NULL; - (void)__LPUT(dbc, pglock); - (void)__LPUT(dbc, cp->lock); - if (flags == DB_LAST) - flags = DB_PREV; +release_retry: /* Release locks and retry, if possible. */ + if (pg != NULL) + (void)__qam_fput(dbp, cp->pgno, pg, 0); + cp->page = pg = NULL; + if ((ret = __LPUT(dbc, pglock)) != 0) + goto err1; + + switch (flags) { + case DB_GET_BOTH_RANGE: + flags = DB_SET_RANGE; + /* FALLTHROUGH */ + case DB_NEXT: + case DB_NEXT_NODUP: + case DB_SET_RANGE: if (!with_delete) is_first = 0; + /* Peek at the meta page unlocked. */ + if (QAM_BEFORE_FIRST(meta, cp->recno)) + goto get_first; + /* FALLTHROUGH */ + case DB_PREV: + case DB_PREV_NODUP: + case DB_LAST: + if (flags == DB_LAST) + flags = DB_PREV; retrying = 0; - goto retry; - } - /* this is for the SET and SET_RANGE cases */ - ret = DB_KEYEMPTY; - goto err1; - } + if ((ret = __LPUT(dbc, cp->lock)) != 0) + goto err1; + if (flags == DB_SET_RANGE) + goto get_next; + else + goto retry; - /* Return the key if the user didn't give us one. */ - if (key != NULL) { - if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE && - flags != DB_SET && flags != DB_SET_RANGE && - (ret = __db_retcopy(dbp->dbenv, - key, &cp->recno, sizeof(cp->recno), - &dbc->rkey->data, &dbc->rkey->ulen)) != 0) + default: + /* this is for the SET and GET_BOTH cases */ + ret = DB_KEYEMPTY; goto err1; - F_SET(key, DB_DBT_ISSET); + } } qp = QAM_GET_RECORD(dbp, pg, cp->indx); @@ -956,18 +968,30 @@ retry: /* Update the record number. */ tmp.data = qp->data; tmp.size = t->re_len; if ((ret = __bam_defcmp(dbp, data, &tmp)) != 0) { + if (flags == DB_GET_BOTH_RANGE) + goto release_retry; ret = DB_NOTFOUND; goto err1; } } - if (data != NULL && - !F_ISSET(dbc, DBC_MULTIPLE|DBC_MULTIPLE_KEY) && - (ret = __db_retcopy(dbp->dbenv, data, - qp->data, t->re_len, &dbc->rdata->data, &dbc->rdata->ulen)) != 0) - goto err1; - if (data != NULL) + /* Return the key if the user didn't give us one. */ + if (key != NULL) { + if (flags != DB_GET_BOTH && flags != DB_SET && + (ret = __db_retcopy(dbp->dbenv, + key, &cp->recno, sizeof(cp->recno), + &dbc->rkey->data, &dbc->rkey->ulen)) != 0) + goto err1; + F_SET(key, DB_DBT_ISSET); + } + + if (data != NULL) { + if (!F_ISSET(dbc, DBC_MULTIPLE|DBC_MULTIPLE_KEY) && + (ret = __db_retcopy(dbp->dbenv, data, qp->data, t->re_len, + &dbc->rdata->data, &dbc->rdata->ulen)) != 0) + goto err1; F_SET(data, DB_DBT_ISSET); + } /* Finally, if we are doing DB_CONSUME mark the record. */ if (with_delete) { @@ -1068,41 +1092,28 @@ retry: /* Update the record number. */ done: err1: if (cp->page != NULL) { - t_ret = __qam_fput(dbp, cp->pgno, cp->page, put_mode); - - if (!ret) + if ((t_ret = __qam_fput( + dbp, cp->pgno, cp->page, put_mode)) != 0 && ret == 0) ret = t_ret; + /* Doing record locking, release the page lock */ - t_ret = __LPUT(dbc, pglock); + if ((t_ret = __LPUT(dbc, pglock)) != 0 && ret == 0) + ret = t_ret; cp->page = NULL; } -err: if (!ret) - ret = t_ret; - if (meta) { - - /* release the meta page */ - t_ret = __memp_fput(mpf, meta, 0); - - if (!ret) +err: if (meta) { + /* Release the meta page. */ + if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) ret = t_ret; /* Don't hold the meta page long term. */ if (locked) - t_ret = __LPUT(dbc, metalock); + if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; } DB_ASSERT(!LOCK_ISSET(metalock)); - /* - * There is no need to keep the record locked if we are - * not in a transaction. - */ - if (t_ret == 0) - t_ret = __TLPUT(dbc, cp->lock); - - if (!ret) - ret = t_ret; - return ((ret == DB_LOCK_NOTGRANTED && !F_ISSET(dbenv, DB_ENV_TIME_NOTGRANTED)) ? DB_LOCK_DEADLOCK : ret); @@ -1112,7 +1123,6 @@ err: if (!ret) * __qam_consume -- try to reset the head of the queue. * */ - static int __qam_consume(dbc, meta, first) DBC *dbc; @@ -1126,14 +1136,14 @@ __qam_consume(dbc, meta, first) db_indx_t save_indx; db_pgno_t save_page; db_recno_t current, save_recno; - u_int32_t rec_extent; - int exact, put_mode, ret, t_ret, wrapped; + u_int32_t put_mode, rec_extent; + int exact, ret, t_ret, wrapped; dbp = dbc->dbp; mpf = dbp->mpf; cp = (QUEUE_CURSOR *)dbc->internal; put_mode = DB_MPOOL_DIRTY; - ret = t_ret = 0; + ret = 0; save_page = cp->pgno; save_indx = cp->indx; @@ -1189,7 +1199,7 @@ __qam_consume(dbc, meta, first) if (cp->page != NULL && rec_extent != 0 && ((exact = (first % rec_extent == 0)) || first % meta->rec_page == 0 || - first == UINT32_T_MAX)) { + first == UINT32_MAX)) { if (exact == 1 && (ret = __db_lget(dbc, 0, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0) break; @@ -1207,14 +1217,12 @@ __qam_consume(dbc, meta, first) if (exact == 1) { ret = __qam_fremove(dbp, cp->pgno); - t_ret = __LPUT(dbc, cp->lock); + if ((t_ret = + __LPUT(dbc, cp->lock)) != 0 && ret == 0) + ret = t_ret; } if (ret != 0) break; - if (t_ret != 0) { - ret = t_ret; - break; - } } else if (cp->page != NULL && (ret = __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0) break; @@ -1292,13 +1300,14 @@ __qam_bulk(dbc, data, flags) u_int32_t flags; { DB *dbp; - DB_LOCK metalock; + DB_LOCK metalock, rlock; DB_MPOOLFILE *mpf; PAGE *pg; QMETA *meta; QAMDATA *qp; QUEUE_CURSOR *cp; db_indx_t indx; + db_lockmode_t lkmode; db_pgno_t metapno; qam_position_mode mode; int32_t *endp, *offp; @@ -1311,8 +1320,11 @@ __qam_bulk(dbc, data, flags) cp = (QUEUE_CURSOR *)dbc->internal; mode = QAM_READ; - if (F_ISSET(dbc, DBC_RMW)) + lkmode = DB_LOCK_READ; + if (F_ISSET(dbc, DBC_RMW)) { mode = QAM_WRITE; + lkmode = DB_LOCK_WRITE; + } pagesize = dbp->pgsize; re_len = ((QUEUE *)dbp->q_internal)->re_len; @@ -1341,6 +1353,9 @@ __qam_bulk(dbc, data, flags) endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen); endp--; offp = endp; + /* Save the lock on the current position of the cursor. */ + rlock = cp->lock; + LOCK_INIT(cp->lock); next_pg: /* Wrap around, skipping zero. */ @@ -1363,6 +1378,9 @@ next_pg: valid = 0; if (pg != NULL) { + if ((ret = __db_lget(dbc, LCK_COUPLE, + cp->recno, lkmode, DB_LOCK_RECORD, &rlock)) != 0) + goto done; qp = QAM_GET_RECORD(dbp, pg, indx); if (F_ISSET(qp, QAM_VALID)) { valid = 1; @@ -1375,11 +1393,11 @@ next_pg: if (space < size) { get_space: if (offp == endp) { - data->size = - ALIGN(size + + data->size = (u_int32_t) + DB_ALIGN(size + pagesize, sizeof(u_int32_t)); - ret = ENOMEM; + ret = DB_BUFFER_SMALL; break; } if (indx != 0) @@ -1411,7 +1429,8 @@ get_space: cp->recno != meta->cur_recno && !QAM_AFTER_CURRENT(meta, cp->recno)); - if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0) + /* Drop the page lock. */ + if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0) ret = t_ret; if (cp->page != NULL) { @@ -1440,14 +1459,13 @@ get_space: else *offp = -1; -done: - /* release the meta page */ - t_ret = __memp_fput(mpf, meta, 0); - - if (!ret) +done: /* Release the meta page. */ + if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) ret = t_ret; - t_ret = __LPUT(dbc, metalock); + cp->lock = rlock; return (ret); } @@ -1463,6 +1481,7 @@ __qam_c_close(dbc, root_pgno, rmroot) int *rmroot; { QUEUE_CURSOR *cp; + int ret; COMPQUIET(root_pgno, 0); COMPQUIET(rmroot, NULL); @@ -1470,9 +1489,9 @@ __qam_c_close(dbc, root_pgno, rmroot) cp = (QUEUE_CURSOR *)dbc->internal; /* Discard any locks not acquired inside of a transaction. */ - (void)__TLPUT(dbc, cp->lock); - LOCK_INIT(cp->lock); + ret = __TLPUT(dbc, cp->lock); + LOCK_INIT(cp->lock); cp->page = NULL; cp->pgno = PGNO_INVALID; cp->indx = 0; @@ -1480,7 +1499,7 @@ __qam_c_close(dbc, root_pgno, rmroot) cp->recno = RECNO_OOB; cp->flags = 0; - return (0); + return (ret); } /* @@ -1495,19 +1514,20 @@ __qam_c_dup(orig_dbc, new_dbc) DBC *orig_dbc, *new_dbc; { QUEUE_CURSOR *orig, *new; + int ret; orig = (QUEUE_CURSOR *)orig_dbc->internal; new = (QUEUE_CURSOR *)new_dbc->internal; new->recno = orig->recno; - /* reget the long term lock if we are not in a xact */ - if (orig_dbc->txn != NULL || - !STD_LOCKING(orig_dbc) || !LOCK_ISSET(orig->lock)) - return (0); + /* Acquire the long term lock if we are not in a transaction. */ + if (orig_dbc->txn == NULL && LOCK_ISSET(orig->lock)) + if ((ret = __db_lget(new_dbc, 0, new->recno, + new->lock_mode, DB_LOCK_RECORD, &new->lock)) != 0) + return (ret); - return (__db_lget(new_dbc, - 0, new->recno, new->lock_mode, DB_LOCK_RECORD, &new->lock)); + return (0); } /* @@ -1601,24 +1621,22 @@ __qam_truncate(dbc, countp) QMETA *meta; QUEUE_CURSOR *cp; db_pgno_t metapno; - int count, ret, t_ret; + u_int32_t count; + int ret, t_ret; dbp = dbc->dbp; /* Walk the queue, counting rows. */ - count = 0; - while ((ret = __qam_c_get(dbc, NULL, NULL, DB_CONSUME, &metapno)) == 0) + for (count = 0; + (ret = __qam_c_get(dbc, NULL, NULL, DB_CONSUME, &metapno)) == 0;) count++; - - if (ret == DB_NOTFOUND) - ret = 0; - else + if (ret != DB_NOTFOUND) return (ret); cp = (QUEUE_CURSOR *)dbc->internal; /* Remove the last extent file. */ if (cp->pgno != 0 && - ((QUEUE *)dbp->q_internal)->page_ext != 0 && + ((QUEUE *)dbp->q_internal)->page_ext != 0 && (ret = __qam_fremove(dbp, cp->pgno)) != 0) return (ret); diff --git a/db/qam/qam.src b/db/qam/qam.src index 34eada651..71063f0b6 100644 --- a/db/qam/qam.src +++ b/db/qam/qam.src @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. * - * $Id: qam.src,v 11.31 2003/11/14 05:32:38 ubell Exp $ + * $Id: qam.src,v 11.33 2004/06/17 17:35:22 bostic Exp $ */ PREFIX __qam DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE diff --git a/db/qam/qam_auto.c b/db/qam/qam_auto.c index a108e347a..e20cbd934 100644 --- a/db/qam/qam_auto.c +++ b/db/qam/qam_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -33,33 +34,42 @@ __qam_incfirst_log(dbp, txnid, ret_lsnp, flags, recno, meta_pgno) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___qam_incfirst; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -74,27 +84,23 @@ __qam_incfirst_log(dbp, txnid, ret_lsnp, flags, recno, meta_pgno) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -129,123 +135,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__qam_incfirst_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __qam_incfirst_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__qam_incfirst_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_incfirst_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __qam_incfirst_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __qam_incfirst_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__qam_incfirst%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\trecno: %lu\n", (u_long)argp->recno); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __qam_incfirst_read __P((DB_ENV *, void *, * PUBLIC: __qam_incfirst_args **)); @@ -264,9 +194,9 @@ __qam_incfirst_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__qam_incfirst_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -316,33 +246,42 @@ __qam_mvptr_log(dbp, txnid, ret_lsnp, flags, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___qam_mvptr; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -362,27 +301,23 @@ __qam_mvptr_log(dbp, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -439,129 +374,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__qam_mvptr_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __qam_mvptr_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_mvptr_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_mvptr_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __qam_mvptr_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __qam_mvptr_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__qam_mvptr%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\told_first: %lu\n", (u_long)argp->old_first); - (void)printf("\tnew_first: %lu\n", (u_long)argp->new_first); - (void)printf("\told_cur: %lu\n", (u_long)argp->old_cur); - (void)printf("\tnew_cur: %lu\n", (u_long)argp->new_cur); - (void)printf("\tmetalsn: [%lu][%lu]\n", - (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset); - (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __qam_mvptr_read __P((DB_ENV *, void *, __qam_mvptr_args **)); */ @@ -579,9 +432,9 @@ __qam_mvptr_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__qam_mvptr_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -644,33 +497,42 @@ __qam_del_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___qam_del; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -687,27 +549,23 @@ __qam_del_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -752,126 +610,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__qam_del_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __qam_del_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_del_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_del_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __qam_del_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __qam_del_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__qam_del%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\trecno: %lu\n", (u_long)argp->recno); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __qam_del_read __P((DB_ENV *, void *, __qam_del_args **)); */ @@ -889,9 +668,9 @@ __qam_del_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__qam_del_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -947,33 +726,42 @@ __qam_add_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno, data, DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___qam_add; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -993,27 +781,23 @@ __qam_add_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno, data, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1084,141 +868,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__qam_add_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __qam_add_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_add_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_add_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __qam_add_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __qam_add_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__qam_add%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\trecno: %lu\n", (u_long)argp->recno); - (void)printf("\tdata: "); - for (i = 0; i < argp->data.size; i++) { - ch = ((u_int8_t *)argp->data.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tvflag: %lu\n", (u_long)argp->vflag); - (void)printf("\tolddata: "); - for (i = 0; i < argp->olddata.size; i++) { - ch = ((u_int8_t *)argp->olddata.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __qam_add_read __P((DB_ENV *, void *, __qam_add_args **)); */ @@ -1236,9 +926,9 @@ __qam_add_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__qam_add_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1307,33 +997,42 @@ __qam_delext_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno, data) DBT logrec; DB_ENV *dbenv; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; dbenv = dbp->dbenv; + COMPQUIET(lr, NULL); + rectype = DB___qam_delext; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) || F_ISSET(dbp, DB_AM_NOT_DURABLE)) { - if (F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE) && txnid == NULL) - return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1351,27 +1050,23 @@ __qam_delext_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno, data) logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1427,134 +1122,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__qam_delext_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __qam_delext_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_delext_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__qam_delext_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __qam_delext_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __qam_delext_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__qam_delext%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tfileid: %ld\n", (long)argp->fileid); - (void)printf("\tlsn: [%lu][%lu]\n", - (u_long)argp->lsn.file, (u_long)argp->lsn.offset); - (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); - (void)printf("\tindx: %lu\n", (u_long)argp->indx); - (void)printf("\trecno: %lu\n", (u_long)argp->recno); - (void)printf("\tdata: "); - for (i = 0; i < argp->data.size; i++) { - ch = ((u_int8_t *)argp->data.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __qam_delext_read __P((DB_ENV *, void *, __qam_delext_args **)); */ @@ -1572,9 +1180,9 @@ __qam_delext_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__qam_delext_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1613,68 +1221,6 @@ __qam_delext_read(dbenv, recbuf, argpp) return (0); } -/* - * PUBLIC: int __qam_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__qam_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_incfirst_print, DB___qam_incfirst)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_mvptr_print, DB___qam_mvptr)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_del_print, DB___qam_del)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_add_print, DB___qam_add)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_delext_print, DB___qam_delext)) != 0) - return (ret); - return (0); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __qam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__qam_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_incfirst_getpgnos, DB___qam_incfirst)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_mvptr_getpgnos, DB___qam_mvptr)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_del_getpgnos, DB___qam_del)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_add_getpgnos, DB___qam_add)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __qam_delext_getpgnos, DB___qam_delext)) != 0) - return (ret); - return (0); -} -#endif /* HAVE_REPLICATION */ - /* * PUBLIC: int __qam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); diff --git a/db/qam/qam_autop.c b/db/qam/qam_autop.c new file mode 100644 index 000000000..e9c122385 --- /dev/null +++ b/db/qam/qam_autop.c @@ -0,0 +1,274 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifdef HAVE_QUEUE +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/qam.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__qam_incfirst_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __qam_incfirst_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __qam_incfirst_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__qam_incfirst%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\trecno: %lu\n", (u_long)argp->recno); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__qam_mvptr_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __qam_mvptr_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __qam_mvptr_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__qam_mvptr%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\told_first: %lu\n", (u_long)argp->old_first); + (void)printf("\tnew_first: %lu\n", (u_long)argp->new_first); + (void)printf("\told_cur: %lu\n", (u_long)argp->old_cur); + (void)printf("\tnew_cur: %lu\n", (u_long)argp->new_cur); + (void)printf("\tmetalsn: [%lu][%lu]\n", + (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset); + (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__qam_del_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __qam_del_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __qam_del_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__qam_del%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\trecno: %lu\n", (u_long)argp->recno); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__qam_add_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __qam_add_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __qam_add_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__qam_add%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\trecno: %lu\n", (u_long)argp->recno); + (void)printf("\tdata: "); + for (i = 0; i < argp->data.size; i++) { + ch = ((u_int8_t *)argp->data.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tvflag: %lu\n", (u_long)argp->vflag); + (void)printf("\tolddata: "); + for (i = 0; i < argp->olddata.size; i++) { + ch = ((u_int8_t *)argp->olddata.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__qam_delext_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __qam_delext_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __qam_delext_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__qam_delext%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tfileid: %ld\n", (long)argp->fileid); + (void)printf("\tlsn: [%lu][%lu]\n", + (u_long)argp->lsn.file, (u_long)argp->lsn.offset); + (void)printf("\tpgno: %lu\n", (u_long)argp->pgno); + (void)printf("\tindx: %lu\n", (u_long)argp->indx); + (void)printf("\trecno: %lu\n", (u_long)argp->recno); + (void)printf("\tdata: "); + for (i = 0; i < argp->data.size; i++) { + ch = ((u_int8_t *)argp->data.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __qam_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__qam_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __qam_incfirst_print, DB___qam_incfirst)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __qam_mvptr_print, DB___qam_mvptr)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __qam_del_print, DB___qam_del)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __qam_add_print, DB___qam_add)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __qam_delext_print, DB___qam_delext)) != 0) + return (ret); + return (0); +} +#endif /* HAVE_QUEUE */ diff --git a/db/qam/qam_conv.c b/db/qam/qam_conv.c index ee5f23b50..c2b7d53a4 100644 --- a/db/qam/qam_conv.c +++ b/db/qam/qam_conv.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_conv.c,v 11.17 2004/01/28 03:36:19 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_conv.c,v 11.16 2003/01/08 05:37:19 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/qam/qam_files.c b/db/qam/qam_files.c index b6fd42dd0..fe0b01da5 100644 --- a/db/qam/qam_files.c +++ b/db/qam/qam_files.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_files.c,v 1.86 2004/10/12 22:53:44 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_files.c,v 1.72 2003/10/03 21:21:54 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -28,8 +26,8 @@ static const char revid[] = "$Id: qam_files.c,v 1.72 2003/10/03 21:21:54 ubell E #include "dbinc/mp.h" #include "dbinc/qam.h" -#define QAM_EXNAME(Q, I, B, L) \ - snprintf((B), (L), \ +#define QAM_EXNAME(Q, I, B, L) \ + snprintf((B), (L), \ QUEUE_EXTENT, (Q)->dir, PATH_SEPARATOR[0], (Q)->name, (I)) /* @@ -53,9 +51,9 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags) MPFARRAY *array; QUEUE *qp; u_int8_t fid[DB_FILE_ID_LEN]; - u_int32_t extid, maxext, openflags; + u_int32_t extid, maxext, numext, offset, oldext, openflags; char buf[MAXPATHLEN]; - int ftype, numext, offset, oldext, ret; + int ftype, less, ret, t_ret; dbenv = dbp->dbenv; qp = (QUEUE *)dbp->q_internal; @@ -82,19 +80,32 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags) array = &qp->array1; if (array->n_extent == 0) { /* Start with 4 extents */ - oldext = 0; array->n_extent = 4; array->low_extent = extid; - offset = 0; - numext = 0; + numext = offset = oldext = 0; + less = 0; goto alloc; } - offset = extid - qp->array1.low_extent; + if (extid < array->low_extent) { + less = 1; + offset = array->low_extent - extid; + } else { + less = 0; + offset = extid - array->low_extent; + } if (qp->array2.n_extent != 0 && - abs(offset) > abs(extid - qp->array2.low_extent)) { + (extid >= qp->array2.low_extent ? + offset > extid - qp->array2.low_extent : + offset > qp->array2.low_extent - extid)) { array = &qp->array2; - offset = extid - array->low_extent; + if (extid < array->low_extent) { + less = 1; + offset = array->low_extent - extid; + } else { + less = 0; + offset = extid - array->low_extent; + } } /* @@ -102,27 +113,26 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags) * extents in the array. This is true by default if there are * no extents here yet. */ - if (offset < 0 || (unsigned) offset >= array->n_extent) { + if (less == 1 || offset >= array->n_extent) { oldext = array->n_extent; - numext = array->hi_extent - array->low_extent + 1; - if (offset < 0 && - (unsigned) -offset + numext <= array->n_extent) { + numext = (array->hi_extent - array->low_extent) + 1; + if (less == 1 && offset + numext <= array->n_extent) { /* * If we can fit this one into the existing array by * shifting the existing entries then we do not have * to allocate. */ - memmove(&array->mpfarray[-offset], + memmove(&array->mpfarray[offset], array->mpfarray, numext * sizeof(array->mpfarray[0])); - memset(array->mpfarray, 0, -offset + memset(array->mpfarray, 0, offset * sizeof(array->mpfarray[0])); offset = 0; - } else if ((u_int32_t)offset == array->n_extent && + } else if (less == 0 && offset == array->n_extent && mode != QAM_PROBE_MPF && array->mpfarray[0].pinref == 0) { /* * If this is at the end of the array and the file at - * the begining has a zero pin count we can close + * the beginning has a zero pin count we can close * the bottom extent and put this one at the end. * TODO: If this process is "slow" then it might be * appending but miss one or more extents. @@ -146,9 +156,9 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags) * If it has then allocate the second array. * Otherwise just expand the one we are using. */ - maxext = (u_int32_t) UINT32_T_MAX + maxext = (u_int32_t) UINT32_MAX / (qp->page_ext * qp->rec_page); - if ((u_int32_t) abs(offset) >= maxext/2) { + if (offset >= maxext/2) { array = &qp->array2; DB_ASSERT(array->n_extent == 0); oldext = 0; @@ -161,21 +171,19 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags) * Increase the size to at least include * the new one and double it. */ - array->n_extent += abs(offset); + array->n_extent += offset; array->n_extent <<= 2; } - alloc: - if ((ret = __os_realloc(dbenv, +alloc: if ((ret = __os_realloc(dbenv, array->n_extent * sizeof(struct __qmpf), &array->mpfarray)) != 0) goto err; - if (offset < 0) { + if (less == 1) { /* * Move the array up and put the new one * in the first slot. */ - offset = -offset; memmove(&array->mpfarray[offset], array->mpfarray, numext * sizeof(array->mpfarray[0])); @@ -256,14 +264,27 @@ err: } pgno--; pgno %= qp->page_ext; - if (mode == QAM_PROBE_GET) - return (__memp_fget(mpf, &pgno, flags, addrp)); - ret = __memp_fput(mpf, addrp, flags); + if (mode == QAM_PROBE_GET) { + if ((ret = __memp_fget(mpf, &pgno, flags, addrp)) == 0) + return (ret); + } else + ret = __memp_fput(mpf, addrp, flags); + MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); /* Recalculate because we dropped the lock. */ offset = extid - array->low_extent; DB_ASSERT(array->mpfarray[offset].pinref > 0); - array->mpfarray[offset].pinref--; + if (--array->mpfarray[offset].pinref == 0 && + (mode == QAM_PROBE_GET || ret == 0)) { + /* Check to see if this file will be unlinked. */ + (void)__memp_get_flags(mpf, &flags); + if (LF_ISSET(DB_MPOOL_UNLINK)) { + array->mpfarray[offset].mpf = NULL; + if ((t_ret = + __memp_fclose(mpf, 0)) != 0 && ret == 0) + ret = t_ret; + } + } MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); } return (ret); @@ -286,8 +307,8 @@ __qam_fclose(dbp, pgnoaddr) DB_MPOOLFILE *mpf; MPFARRAY *array; QUEUE *qp; - u_int32_t extid; - int offset, ret; + u_int32_t extid, offset; + int ret; ret = 0; dbenv = dbp->dbenv; @@ -301,7 +322,7 @@ __qam_fclose(dbp, pgnoaddr) array = &qp->array2; offset = extid - array->low_extent; - DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent); + DB_ASSERT(extid >= array->low_extent && offset < array->n_extent); /* If other threads are still using this file, leave it. */ if (array->mpfarray[offset].pinref != 0) @@ -334,11 +355,11 @@ __qam_fremove(dbp, pgnoaddr) DB_MPOOLFILE *mpf; MPFARRAY *array; QUEUE *qp; - u_int32_t extid; -#if CONFIG_TEST + u_int32_t extid, offset; +#ifdef CONFIG_TEST char buf[MAXPATHLEN], *real_name; #endif - int offset, ret; + int ret; qp = (QUEUE *)dbp->q_internal; dbenv = dbp->dbenv; @@ -352,9 +373,9 @@ __qam_fremove(dbp, pgnoaddr) array = &qp->array2; offset = extid - array->low_extent; - DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent); + DB_ASSERT(extid >= array->low_extent && offset < array->n_extent); -#if CONFIG_TEST +#ifdef CONFIG_TEST real_name = NULL; /* Find the real name of the file. */ QAM_EXNAME(qp, extid, buf, sizeof(buf)); @@ -370,8 +391,11 @@ __qam_fremove(dbp, pgnoaddr) goto err; mpf = array->mpfarray[offset].mpf; - array->mpfarray[offset].mpf = NULL; (void)__memp_set_flags(mpf, DB_MPOOL_UNLINK, 1); + /* Someone could be real slow, let them close it down. */ + if (array->mpfarray[offset].pinref != 0) + goto err; + array->mpfarray[offset].mpf = NULL; if ((ret = __memp_fclose(mpf, 0)) != 0) goto err; @@ -394,7 +418,7 @@ __qam_fremove(dbp, pgnoaddr) err: MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); -#if CONFIG_TEST +#ifdef CONFIG_TEST if (real_name != NULL) __os_free(dbenv, real_name); #endif @@ -483,14 +507,14 @@ __qam_gen_filelist(dbp, filelistp) * roundoff at first (e.g., current record in extent); * roundoff at current (e.g., first record in extent); * NULL termination; and - * UINT32_T_MAX wraparound (the last extent can be small). + * UINT32_MAX wraparound (the last extent can be small). */ rec_extent = qp->rec_page * qp->page_ext; if (current >= first) extent_cnt = (current - first) / rec_extent + 3; else extent_cnt = - (current + (UINT32_T_MAX - first)) / rec_extent + 4; + (current + (UINT32_MAX - first)) / rec_extent + 4; if ((ret = __os_calloc(dbenv, extent_cnt, sizeof(QUEUE_FILELIST), filelistp)) != 0) return (ret); @@ -500,7 +524,7 @@ again: if (current >= first) stop = current; else - stop = UINT32_T_MAX; + stop = UINT32_MAX; /* * Make sure that first is at the same offset in the extent as stop. @@ -545,8 +569,9 @@ __qam_extent_names(dbenv, name, namelistp) DB *dbp; QUEUE *qp; QUEUE_FILELIST *filelist, *fp; + size_t len; + int cnt, ret, t_ret; char buf[MAXPATHLEN], **cp, *freep; - int cnt, len, ret; *namelistp = NULL; filelist = NULL; @@ -570,19 +595,18 @@ __qam_extent_names(dbenv, name, namelistp) cnt++; /* QUEUE_EXTENT contains extra chars, but add 6 anyway for the int. */ - len = (u_int32_t)(cnt * (sizeof(**namelistp) - + strlen(QUEUE_EXTENT) + strlen(qp->dir) + strlen(qp->name) + 6)); + len = (size_t)cnt * (sizeof(**namelistp) + + strlen(QUEUE_EXTENT) + strlen(qp->dir) + strlen(qp->name) + 6); - if ((ret = - __os_malloc(dbp->dbenv, len, namelistp)) != 0) + if ((ret = __os_malloc(dbp->dbenv, len, namelistp)) != 0) goto done; cp = *namelistp; freep = (char *)(cp + cnt + 1); for (fp = filelist; fp->mpf != NULL; fp++) { QAM_EXNAME(qp, fp->id, buf, sizeof(buf)); - len = (u_int32_t)strlen(buf); + len = strlen(buf); *cp++ = freep; - strcpy(freep, buf); + (void)strcpy(freep, buf); freep += len + 1; } *cp = NULL; @@ -590,7 +614,8 @@ __qam_extent_names(dbenv, name, namelistp) done: if (filelist != NULL) __os_free(dbp->dbenv, filelist); - (void)__db_close(dbp, NULL, DB_NOSYNC); + if ((t_ret = __db_close(dbp, NULL, DB_NOSYNC)) != 0 && ret == 0) + ret = t_ret; return (ret); } @@ -632,7 +657,7 @@ __qam_exid(dbp, fidp, exnum) /* * __qam_nameop -- - * Remove or rename extent files associated with a particular file. + * Remove or rename extent files associated with a particular file. * This is to remove or rename (both in mpool and the file system) any * extent files associated with the given dbp. * This is either called from the QUEUE remove or rename methods or @@ -648,17 +673,19 @@ int __qam_nameop(dbp, txn, newname, op) { DB_ENV *dbenv; QUEUE *qp; + size_t exlen, fulllen, len; + u_int8_t fid[DB_FILE_ID_LEN]; + u_int32_t exid; + int cnt, i, ret, t_ret; char buf[MAXPATHLEN], nbuf[MAXPATHLEN], sepsave; char *endname, *endpath, *exname, *fullname, **names; char *ndir, *namep, *new, *cp; - int cnt, exlen, fulllen, i, len, ret, t_ret; - u_int8_t fid[DB_FILE_ID_LEN]; - u_int32_t exid; ret = t_ret = 0; dbenv = dbp->dbenv; qp = (QUEUE *)dbp->q_internal; namep = exname = fullname = NULL; + names = NULL; /* If this isn't a queue with extents, we're done. */ if (qp->page_ext == 0) @@ -753,8 +780,7 @@ int __qam_nameop(dbp, txn, newname, op) * We have a queue extent file. We need to generate its * name and its fileid. */ - - exid = atol(names[i] + len); + exid = (u_int32_t)strtoul(names[i] + len, NULL, 10); __qam_exid(dbp, fid, exid); switch (op) { @@ -793,5 +819,7 @@ err: if (fullname != NULL) __os_free(dbenv, exname); if (namep != NULL) __os_free(dbenv, namep); + if (names != NULL) + __os_free(dbenv, names); return (ret); } diff --git a/db/qam/qam_method.c b/db/qam/qam_method.c index b0270f767..737ced0d2 100644 --- a/db/qam/qam_method.c +++ b/db/qam/qam_method.c @@ -1,40 +1,31 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_method.c,v 11.83 2004/10/05 16:59:26 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_method.c,v 11.64 2003/10/01 20:03:43 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include - -#include #endif #include "db_int.h" #include "dbinc/db_page.h" #include "dbinc/db_shash.h" #include "dbinc/db_am.h" -#include "dbinc/fop.h" #include "dbinc/lock.h" #include "dbinc/mp.h" #include "dbinc/qam.h" #include "dbinc/txn.h" -static int __qam_get_extentsize __P((DB *, u_int32_t *)); +static int __qam_rr __P((DB *, DB_TXN *, + const char *, const char *, const char *, qam_name_op)); static int __qam_set_extentsize __P((DB *, u_int32_t)); -struct __qam_cookie { - DB_LSN lsn; - QUEUE_FILELIST *filelist; -}; - /* * __qam_db_create -- * Queue specific initialization of the DB structure. @@ -116,7 +107,13 @@ again: return (ret); } -static int +/* + * __qam_get_extentsize -- + * The DB->q_get_extentsize method. + * + * PUBLIC: int __qam_get_extentsize __P((DB *, u_int32_t *)); + */ +int __qam_get_extentsize(dbp, q_extentsizep) DB *dbp; u_int32_t *q_extentsizep; @@ -143,22 +140,24 @@ __qam_set_extentsize(dbp, extentsize) } /* - * __db_prqueue -- - * Print out a queue + * __queue_pageinfo - + * Given a dbp, get first/last page information about a queue. * - * PUBLIC: int __db_prqueue __P((DB *, FILE *, u_int32_t)); + * PUBLIC: int __queue_pageinfo __P((DB *, db_pgno_t *, db_pgno_t *, + * PUBLIC: int *, int, u_int32_t)); */ int -__db_prqueue(dbp, fp, flags) +__queue_pageinfo(dbp, firstp, lastp, emptyp, prpage, flags) DB *dbp; - FILE *fp; + db_pgno_t *firstp, *lastp; + int *emptyp; + int prpage; u_int32_t flags; { DB_MPOOLFILE *mpf; - PAGE *h; QMETA *meta; - db_pgno_t first, i, last, pg_ext, stop; - int ret, t_ret; + db_pgno_t first, i, last; + int empty, ret, t_ret; mpf = dbp->mpf; @@ -168,18 +167,55 @@ __db_prqueue(dbp, fp, flags) return (ret); first = QAM_RECNO_PAGE(dbp, meta->first_recno); - last = QAM_RECNO_PAGE(dbp, meta->cur_recno); + last = QAM_RECNO_PAGE( + dbp, meta->cur_recno == 1 ? 1 : meta->cur_recno - 1); + + empty = meta->cur_recno == meta->first_recno; + if (firstp != NULL) + *firstp = first; + if (lastp != NULL) + *lastp = last; + if (emptyp != NULL) + *emptyp = empty; +#ifdef HAVE_STATISTICS + if (prpage) + ret = __db_prpage(dbp, (PAGE *)meta, flags); +#else + COMPQUIET(prpage, 0); + COMPQUIET(flags, 0); +#endif - ret = __db_prpage(dbp, (PAGE *)meta, fp, flags); if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) ret = t_ret; - if (ret != 0) + return (ret); +} + +#ifdef HAVE_STATISTICS +/* + * __db_prqueue -- + * Print out a queue + * + * PUBLIC: int __db_prqueue __P((DB *, u_int32_t)); + */ +int +__db_prqueue(dbp, flags) + DB *dbp; + u_int32_t flags; +{ + PAGE *h; + db_pgno_t first, i, last, pg_ext, stop; + int empty, ret; + + if ((ret = __queue_pageinfo(dbp, &first, &last, &empty, 1, flags)) != 0) + return (ret); + + if (empty || ret != 0) return (ret); i = first; if (first > last) - stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX); + stop = QAM_RECNO_PAGE(dbp, UINT32_MAX); else stop = last; @@ -194,12 +230,12 @@ begin: return (ret); } if (ret == ENOENT || ret == DB_PAGE_NOTFOUND) { - i += pg_ext - ((i - 1) % pg_ext) - 1; + i += (pg_ext - ((i - 1) % pg_ext)) - 1; continue; } return (ret); } - (void)__db_prpage(dbp, h, fp, flags); + (void)__db_prpage(dbp, h, flags); if ((ret = __qam_fput(dbp, i, h, 0)) != 0) return (ret); } @@ -212,46 +248,68 @@ begin: } return (0); } +#endif /* - * __qam_remove + * __qam_remove -- * Remove method for a Queue. * - * PUBLIC: int __qam_remove __P((DB *, - * PUBLIC: DB_TXN *, const char *, const char *, DB_LSN *)); + * PUBLIC: int __qam_remove __P((DB *, DB_TXN *, const char *, const char *)); */ int -__qam_remove(dbp, txn, name, subdb, lsnp) +__qam_remove(dbp, txn, name, subdb) DB *dbp; DB_TXN *txn; const char *name, *subdb; - DB_LSN *lsnp; +{ + return (__qam_rr(dbp, txn, name, subdb, NULL, QAM_NAME_REMOVE)); +} + +/* + * __qam_rename -- + * Rename method for a Queue. + * + * PUBLIC: int __qam_rename __P((DB *, + * PUBLIC: DB_TXN *, const char *, const char *, const char *)); + */ +int +__qam_rename(dbp, txn, name, subdb, newname) + DB *dbp; + DB_TXN *txn; + const char *name, *subdb, *newname; +{ + return (__qam_rr(dbp, txn, name, subdb, newname, QAM_NAME_RENAME)); +} + +/* + * __qam_rr -- + * Remove/Rename method for a Queue. + */ +static int +__qam_rr(dbp, txn, name, subdb, newname, op) + DB *dbp; + DB_TXN *txn; + const char *name, *subdb, *newname; + qam_name_op op; { DB_ENV *dbenv; DB *tmpdbp; QUEUE *qp; - int ret, needclose, t_ret; - - COMPQUIET(lsnp, NULL); + int ret, t_ret; dbenv = dbp->dbenv; ret = 0; - needclose = 0; PANIC_CHECK(dbenv); - /* - * Subdatabases. - */ if (subdb != NULL) { __db_err(dbenv, "Queue does not support multiple databases per file"); - ret = EINVAL; - goto err; + return (EINVAL); } /* - * Since regular remove no longer opens the database, we may have + * Since regular rename no longer opens the database, we may have * to do it here. */ if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) @@ -259,40 +317,29 @@ __qam_remove(dbp, txn, name, subdb, lsnp) else { if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0) return (ret); + /* * We need to make sure we don't self-deadlock, so give * this dbp the same locker as the incoming one. */ tmpdbp->lid = dbp->lid; - - /* - * If this is a transactional dbp and the open fails, then - * the transactional abort will close the dbp. If it's not - * a transactional open, then we always have to close it - * even if the open fails. Once the open has succeeded, - * then we will always want to close it. - */ - if (txn == NULL) - needclose = 1; - if ((ret = __db_open(tmpdbp, - txn, name, NULL, DB_QUEUE, 0, 0, PGNO_BASE_MD)) != 0) + if ((ret = __db_open(tmpdbp, txn, + name, NULL, DB_QUEUE, DB_RDONLY, 0, PGNO_BASE_MD)) != 0) goto err; - needclose = 1; } qp = (QUEUE *)tmpdbp->q_internal; + if (qp->page_ext != 0) + ret = __qam_nameop(tmpdbp, txn, newname, op); - if (qp->page_ext != 0) - ret = __qam_nameop(tmpdbp, txn, NULL, QAM_NAME_REMOVE); - -err: if (needclose) { - /* - * Since we copied the lid from the dbp, we'd better not + if (!F_ISSET(dbp, DB_AM_OPEN_CALLED)) { +err: /* + * Since we copied the locker ID from the dbp, we'd better not * free it here. */ tmpdbp->lid = DB_LOCK_INVALIDID; - /* We need to remove the lockevent we associated with this. */ + /* We need to remove the lock event we associated with this. */ if (txn != NULL) __txn_remlock(dbenv, txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID); @@ -301,73 +348,40 @@ err: if (needclose) { __db_close(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; } - return (ret); } /* - * __qam_rename - * Rename method for Queue. + * __qam_map_flags -- + * Map queue-specific flags from public to the internal values. * - * PUBLIC: int __qam_rename __P((DB *, DB_TXN *, - * PUBLIC: const char *, const char *, const char *)); + * PUBLIC: void __qam_map_flags __P((DB *, u_int32_t *, u_int32_t *)); */ -int -__qam_rename(dbp, txn, filename, subdb, newname) +void +__qam_map_flags(dbp, inflagsp, outflagsp) DB *dbp; - DB_TXN *txn; - const char *filename, *subdb, *newname; + u_int32_t *inflagsp, *outflagsp; { - DB_ENV *dbenv; - DB *tmpdbp; - QUEUE *qp; - int ret, needclose, t_ret; + COMPQUIET(dbp, NULL); - dbenv = dbp->dbenv; - ret = 0; - needclose = 0; - - if (subdb != NULL) { - __db_err(dbenv, - "Queue does not support multiple databases per file"); - ret = EINVAL; - goto err; + if (FLD_ISSET(*inflagsp, DB_INORDER)) { + FLD_SET(*outflagsp, DB_AM_INORDER); + FLD_CLR(*inflagsp, DB_INORDER); } +} - /* - * Since regular rename no longer opens the database, we may have - * to do it here. - */ - if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) - tmpdbp = dbp; - else { - if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0) - return (ret); - /* Copy the incoming locker so we don't self-deadlock. */ - tmpdbp->lid = dbp->lid; - needclose = 1; - if ((ret = __db_open(tmpdbp, - txn, filename, NULL, DB_QUEUE, 0, 0, PGNO_BASE_MD)) != 0) - goto err; - } - - qp = (QUEUE *)tmpdbp->q_internal; - - if (qp->page_ext != 0) - ret = __qam_nameop(tmpdbp, txn, newname, QAM_NAME_RENAME); - -err: if (needclose) { - /* We copied this, so we mustn't free it. */ - tmpdbp->lid = DB_LOCK_INVALIDID; - - /* We need to remove the lockevent we associated with this. */ - if (txn != NULL) - __txn_remlock(dbenv, - txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID); +/* + * __qam_set_flags -- + * Set queue-specific flags. + * + * PUBLIC: int __qam_set_flags __P((DB *, u_int32_t *flagsp)); + */ +int +__qam_set_flags(dbp, flagsp) + DB *dbp; + u_int32_t *flagsp; +{ - if ((t_ret = - __db_close(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - } - return (ret); + __qam_map_flags(dbp, flagsp, &dbp->flags); + return (0); } diff --git a/db/qam/qam_open.c b/db/qam/qam_open.c index 53b9e17a1..595d74dac 100644 --- a/db/qam/qam_open.c +++ b/db/qam/qam_open.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_open.c,v 11.68 2004/02/27 12:38:31 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_open.c,v 11.66 2003/09/25 01:35:38 margo Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -119,7 +117,8 @@ err: if (qmeta != NULL && ret = t_ret; /* Don't hold the meta page long term. */ - (void)__LPUT(dbc, metalock); + if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) + ret = t_ret; if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) ret = t_ret; diff --git a/db/qam/qam_rec.c b/db/qam/qam_rec.c index d846118ac..e92141ddd 100644 --- a/db/qam/qam_rec.c +++ b/db/qam/qam_rec.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_rec.c,v 11.78 2004/05/11 14:04:51 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_rec.c,v 11.75 2003/08/17 23:38:14 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -50,7 +48,8 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info) QMETA *meta; QUEUE_CURSOR *cp; db_pgno_t metapg; - int exact, modified, ret, rec_ext; + u_int32_t rec_ext; + int exact, modified, ret, t_ret; REC_PRINT(__qam_incfirst_print); REC_INTRO(__qam_incfirst_read, 1); @@ -71,8 +70,7 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info) meta->dbmeta.type = P_QAMMETA; } else { *lsnp = argp->prev_lsn; - ret = 0; - (void)__LPUT(dbc, lock); + ret = __LPUT(dbc, lock); goto out; } } @@ -102,8 +100,9 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info) LSN(meta) = *lsnp; modified = 1; } - rec_ext = 0; - if (meta->page_ext != 0) + if (meta->page_ext == 0) + rec_ext = 0; + else rec_ext = meta->page_ext * meta->rec_page; cp = (QUEUE_CURSOR *)dbc->internal; if (meta->first_recno == RECNO_OOB) @@ -113,8 +112,9 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info) if ((ret = __qam_position(dbc, &meta->first_recno, QAM_READ, &exact)) != 0) goto err; - if (cp->page != NULL) - __qam_fput(file_dbp, cp->pgno, cp->page, 0); + if (cp->page != NULL && (ret = + __qam_fput(file_dbp, cp->pgno, cp->page, 0)) != 0) + goto err; if (exact == 1) break; @@ -130,17 +130,18 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info) } } - if ((ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto err1; - - (void)__LPUT(dbc, lock); + ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto out; done: *lsnp = argp->prev_lsn; ret = 0; if (0) { err: (void)__memp_fput(mpf, meta, 0); -err1: (void)__LPUT(dbc, lock); + (void)__LPUT(dbc, lock); } out: REC_CLOSE; @@ -190,8 +191,7 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info) meta->dbmeta.type = P_QAMMETA; } else { *lsnp = argp->prev_lsn; - ret = 0; - (void)__LPUT(dbc, lock); + ret = __LPUT(dbc, lock); goto out; } } @@ -235,7 +235,8 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info) if ((ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) goto out; - (void)__LPUT(dbc, lock); + if ((ret = __LPUT(dbc, lock)) != 0) + goto out; done: *lsnp = argp->prev_lsn; ret = 0; @@ -268,7 +269,7 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info) QMETA *meta; QPAGE *pagep; db_pgno_t metapg; - int cmp_n, modified, ret; + int cmp_n, modified, ret, t_ret; COMPQUIET(info, NULL); REC_PRINT(__qam_del_print); @@ -303,10 +304,13 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info) meta->first_recno - argp->recno < argp->recno - meta->cur_recno))) { meta->first_recno = argp->recno; - (void)__memp_fput(mpf, meta, DB_MPOOL_DIRTY); + ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); } else - (void)__memp_fput(mpf, meta, 0); - (void)__LPUT(dbc, lock); + ret = __memp_fput(mpf, meta, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; /* Need to undo delete - mark the record as present */ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx); @@ -367,7 +371,7 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info) QMETA *meta; QPAGE *pagep; db_pgno_t metapg; - int cmp_n, modified, ret; + int cmp_n, modified, ret, t_ret; COMPQUIET(info, NULL); REC_PRINT(__qam_delext_print); @@ -412,10 +416,13 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info) meta->first_recno - argp->recno < argp->recno - meta->cur_recno))) { meta->first_recno = argp->recno; - (void)__memp_fput(mpf, meta, DB_MPOOL_DIRTY); + ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); } else - (void)__memp_fput(mpf, meta, 0); - (void)__LPUT(dbc, lock); + ret = __memp_fput(mpf, meta, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; if ((ret = __qam_pitem(dbc, pagep, argp->indx, argp->recno, &argp->data)) != 0) diff --git a/db/qam/qam_stat.c b/db/qam/qam_stat.c index bc6409e2f..c5264bd01 100644 --- a/db/qam/qam_stat.c +++ b/db/qam/qam_stat.c @@ -1,19 +1,18 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_stat.c,v 11.47 2004/09/22 16:29:47 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_stat.c,v 11.38 2003/09/04 18:06:48 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include +#include #include #endif @@ -25,6 +24,7 @@ static const char revid[] = "$Id: qam_stat.c,v 11.38 2003/09/04 18:06:48 bostic #include "dbinc/mp.h" #include "dbinc/qam.h" +#ifdef HAVE_STATISTICS /* * __qam_stat -- * Gather/print the qam statistics @@ -48,7 +48,7 @@ __qam_stat(dbc, spp, flags) db_indx_t indx; db_pgno_t first, last, pgno, pg_ext, stop; u_int32_t re_len; - int ret; + int ret, t_ret; dbp = dbc->dbp; @@ -82,13 +82,15 @@ __qam_stat(dbc, spp, flags) first = QAM_RECNO_PAGE(dbp, meta->first_recno); last = QAM_RECNO_PAGE(dbp, meta->cur_recno); - if ((ret = __memp_fput(mpf, meta, 0)) != 0) + ret = __memp_fput(mpf, meta, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) goto err; - (void)__LPUT(dbc, lock); pgno = first; if (first > last) - stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX); + stop = QAM_RECNO_PAGE(dbp, UINT32_MAX); else stop = last; @@ -112,7 +114,7 @@ begin: ret = 0; break; } - pgno += pg_ext - ((pgno - 1) % pg_ext) - 1; + pgno += (pg_ext - ((pgno - 1) % pg_ext)) - 1; continue; } if (ret != 0) @@ -130,12 +132,15 @@ begin: sp->qs_pgfree += re_len; } - if ((ret = __qam_fput(dbp, pgno, h, 0)) != 0) + ret = __qam_fput(dbp, pgno, h, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) goto err; - (void)__LPUT(dbc, lock); } - (void)__LPUT(dbc, lock); + if ((ret = __LPUT(dbc, lock)) != 0) + goto err; if (first > last) { pgno = 1; stop = last; @@ -169,20 +174,88 @@ meta_only: sp->qs_cur_recno = meta->cur_recno; /* Discard the meta-data page. */ - if ((ret = __memp_fput(mpf, - meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY)) != 0) + ret = __memp_fput(mpf, + meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) goto err; - (void)__LPUT(dbc, lock); *(DB_QUEUE_STAT **)spp = sp; - ret = 0; if (0) { err: if (sp != NULL) __os_ufree(dbp->dbenv, sp); } - (void)__LPUT(dbc, lock); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; return (ret); } + +/* + * __qam_stat_print -- + * Display queue statistics. + * + * PUBLIC: int __qam_stat_print __P((DBC *, u_int32_t)); + */ +int +__qam_stat_print(dbc, flags) + DBC *dbc; + u_int32_t flags; +{ + DB *dbp; + DB_ENV *dbenv; + DB_QUEUE_STAT *sp; + int ret; + + dbp = dbc->dbp; + dbenv = dbp->dbenv; + + if ((ret = __qam_stat(dbc, &sp, 0)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) { + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "Default Queue database information:"); + } + __db_msg(dbenv, "%lx\tQueue magic number", (u_long)sp->qs_magic); + __db_msg(dbenv, "%lu\tQueue version number", (u_long)sp->qs_version); + __db_dl(dbenv, "Fixed-length record size", (u_long)sp->qs_re_len); + __db_msg(dbenv, "%#x\tFixed-length record pad", (int)sp->qs_re_pad); + __db_dl(dbenv, + "Underlying database page size", (u_long)sp->qs_pagesize); + __db_dl(dbenv, + "Underlying database extent size", (u_long)sp->qs_extentsize); + __db_dl(dbenv, + "Number of records in the database", (u_long)sp->qs_nkeys); + __db_dl(dbenv, "Number of database pages", (u_long)sp->qs_pages); + __db_dl_pct(dbenv, + "Number of bytes free in database pages", + (u_long)sp->qs_pgfree, + DB_PCT_PG(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize), "ff"); + __db_msg(dbenv, + "%lu\tFirst undeleted record", (u_long)sp->qs_first_recno); + __db_msg(dbenv, + "%lu\tNext available record number", (u_long)sp->qs_cur_recno); + + __os_ufree(dbenv, sp); + + return (0); +} + +#else /* !HAVE_STATISTICS */ + +int +__qam_stat(dbc, spp, flags) + DBC *dbc; + void *spp; + u_int32_t flags; +{ + COMPQUIET(spp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbc->dbp->dbenv)); +} +#endif diff --git a/db/qam/qam_stub.c b/db/qam/qam_stub.c index 941aacfb4..1c22aaa52 100644 --- a/db/qam/qam_stub.c +++ b/db/qam/qam_stub.c @@ -1,15 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_stub.c,v 1.12 2004/06/14 15:23:33 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_stub.c,v 1.7 2003/10/28 18:52:34 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" +#ifndef HAVE_QUEUE #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -40,12 +40,10 @@ __db_no_queue_am(dbenv) } int -__db_prqueue(dbp, fp, flags) +__db_prqueue(dbp, flags) DB *dbp; - FILE *fp; u_int32_t flags; { - COMPQUIET(fp, NULL); COMPQUIET(flags, 0); return (__db_no_queue_am(dbp->dbenv)); } @@ -135,18 +133,6 @@ __qam_gen_filelist(dbp, filelistp) return (__db_no_queue_am(dbp->dbenv)); } -int -__qam_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - COMPQUIET(dbenv, NULL); - COMPQUIET(dtabp, NULL); - COMPQUIET(dtabsizep, NULL); - return (0); -} - int __qam_init_print(dbenv, dtabp, dtabsizep) DB_ENV *dbenv; @@ -264,6 +250,15 @@ __qam_stat(dbc, spp, flags) return (__db_no_queue_am(dbc->dbp->dbenv)); } +int +__qam_stat_print(dbc, flags) + DBC *dbc; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + return (__db_no_queue_am(dbc->dbp->dbenv)); +} + int __qam_sync(dbp) DB *dbp; @@ -336,3 +331,4 @@ __qam_vrfy_walkqueue(dbp, vdp, handle, callback, flags) COMPQUIET(flags, 0); return (__db_no_queue_am(dbp->dbenv)); } +#endif /* !HAVE_QUEUE */ diff --git a/db/qam/qam_upgrade.c b/db/qam/qam_upgrade.c index b10b4696a..dff82404a 100644 --- a/db/qam/qam_upgrade.c +++ b/db/qam/qam_upgrade.c @@ -1,19 +1,17 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_upgrade.c,v 11.16 2004/05/10 21:29:43 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_upgrade.c,v 11.14 2003/01/08 05:37:44 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include -#include #include #endif diff --git a/db/qam/qam_verify.c b/db/qam/qam_verify.c index ddbc9525f..571157b8f 100644 --- a/db/qam/qam_verify.c +++ b/db/qam/qam_verify.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: qam_verify.c,v 1.51 2004/10/11 18:47:51 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: qam_verify.c,v 1.45 2003/08/12 19:51:55 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -49,11 +47,14 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags) int count, i, isbad, nextents, ret, t_ret; char *buf, **names; + COMPQUIET(count, 0); + dbenv = dbp->dbenv; + qp = (QUEUE *)dbp->q_internal; + extents = NULL; first = last = 0; buf = NULL; names = NULL; - qp = (QUEUE *)dbp->q_internal; if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) return (ret); @@ -79,7 +80,7 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags) * re_len: If this is bad, we can't safely verify queue data pages, so * return DB_VERIFY_FATAL */ - if (ALIGN(meta->re_len + sizeof(QAMDATA) - 1, sizeof(u_int32_t)) * + if (DB_ALIGN(meta->re_len + sizeof(QAMDATA) - 1, sizeof(u_int32_t)) * meta->rec_page + QPAGE_SZ(dbp) > dbp->pgsize) { EPRINT((dbenv, "Page %lu: queue record length %lu too high for page size and recs/page", @@ -145,21 +146,19 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags) len = strlen(QUEUE_EXTENT_HEAD) + strlen(qp->name) + 1; if ((ret = __os_malloc(dbenv, len, &buf)) != 0) goto err; - len = snprintf(buf, len, QUEUE_EXTENT_HEAD, qp->name); - nextents = 0; - extents = NULL; - for (i = 0; i < count; i++) { + len = (size_t)snprintf(buf, len, QUEUE_EXTENT_HEAD, qp->name); + for (i = nextents = 0; i < count; i++) { if (strncmp(names[i], buf, len) == 0) { /* Only save extents out of bounds. */ - extid = atoi(&names[i][len]); + extid = (db_pgno_t)strtoul(&names[i][len], NULL, 10); if (qp->page_ext != 0 && (last > first ? (extid >= first && extid <= last) : (extid >= first || extid <= last))) continue; - if (extents == NULL && - (ret = __os_malloc(dbenv, - (count - i) * sizeof(extid), &extents)) != 0) + if (extents == NULL && (ret = __os_malloc( + dbenv, (size_t)(count - i) * sizeof(extid), + &extents)) != 0) goto err; extents[nextents] = extid; nextents++; @@ -179,6 +178,9 @@ err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) __os_free(dbenv, buf); if (ret != 0 && extents != NULL) __os_free(dbenv, extents); + if (LF_ISSET(DB_SALVAGE) && + (t_ret = __db_salvage_markdone(vdp, pgno)) != 0 && ret == 0) + ret = t_ret; return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret); } @@ -312,13 +314,13 @@ __qam_vrfy_walkqueue(dbp, vdp, handle, callback, flags) db_pgno_t first, i, last, pg_ext, stop; int isbad, nextents, ret, t_ret; - isbad = ret = t_ret = 0; + COMPQUIET(h, NULL); - pip = NULL; dbenv = dbp->dbenv; qp = dbp->q_internal; - + pip = NULL; pg_ext = qp->page_ext; + isbad = ret = t_ret = 0; /* If this database has no extents, we've seen all the pages already. */ if (pg_ext == 0) @@ -329,14 +331,13 @@ __qam_vrfy_walkqueue(dbp, vdp, handle, callback, flags) i = first; if (first > last) - stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX); + stop = QAM_RECNO_PAGE(dbp, UINT32_MAX); else stop = last; nextents = vdp->nextents; /* Verify/salvage each page. */ -begin: - for (; i <= stop; i++) { +begin: for (; i <= stop; i++) { /* * If DB_SALVAGE is set, we inspect our database of completed * pages, and skip any we've already printed in the subdb pass. @@ -345,7 +346,7 @@ begin: continue; if ((t_ret = __qam_fget(dbp, &i, 0, &h)) != 0) { if (t_ret == ENOENT || t_ret == DB_PAGE_NOTFOUND) { - i += pg_ext - ((i - 1) % pg_ext) - 1; + i += (pg_ext - ((i - 1) % pg_ext)) - 1; continue; } @@ -382,7 +383,6 @@ begin: * may make it easier to diagnose problems and * determine the magnitude of the corruption. */ - if ((ret = __db_vrfy_common(dbp, vdp, h, i, flags)) == DB_VERIFY_BAD) isbad = 1; @@ -402,8 +402,7 @@ begin: isbad = 1; goto err; } - if ((ret = - __db_vrfy_pgset_inc(vdp->pgset, i)) != 0) + if ((ret = __db_vrfy_pgset_inc(vdp->pgset, i)) != 0) goto err; if ((ret = __qam_vrfy_data(dbp, vdp, (QPAGE *)h, i, flags)) == DB_VERIFY_BAD) @@ -438,7 +437,6 @@ put: if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) * Now check to see if there were any lingering * extents and dump their data. */ - if (LF_ISSET(DB_SALVAGE) && nextents != 0) { nextents--; i = 1 + @@ -505,11 +503,11 @@ __qam_salvage(dbp, vdp, pgno, h, handle, callback, flags) continue; dbt.data = qp->data; - if ((ret = __db_prdbt(&key, + if ((ret = __db_vrfy_prdbt(&key, 0, " ", handle, callback, 1, vdp)) != 0) err_ret = ret; - if ((ret = __db_prdbt(&dbt, + if ((ret = __db_vrfy_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; } diff --git a/db/rep/rep.src b/db/rep/rep.src new file mode 100644 index 000000000..ffabca3fa --- /dev/null +++ b/db/rep/rep.src @@ -0,0 +1,48 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: rep.src,v 1.5 2004/09/22 18:01:04 bostic Exp $ + */ + +PREFIX __rep +DBPRIVATE + +INCLUDE #ifndef NO_SYSTEM_INCLUDES +INCLUDE #include +INCLUDE #include +INCLUDE #endif +INCLUDE +INCLUDE #include "db_int.h" +INCLUDE #include "dbinc/db_page.h" +INCLUDE #include "dbinc/db_shash.h" +INCLUDE #include "dbinc/db_am.h" +INCLUDE #include "dbinc/log.h" +INCLUDE #include "dbinc/mp.h" +INCLUDE #include "dbinc/txn.h" +INCLUDE + +/* + * update - send update information + */ +BEGIN_BUF update +POINTER first_lsn DB_LSN * lu +ARG num_files int d +END + +/* + * file info + */ +BEGIN_BUF fileinfo +ARG pgsize size_t lu +ARG pgno db_pgno_t lu +ARG max_pgno db_pgno_t lu +ARG filenum int d +ARG id int32_t d +ARG type u_int32_t lu +ARG flags u_int32_t lu +DBT uid DBT s +DBT info DBT s +END diff --git a/db/rep/rep_auto.c b/db/rep/rep_auto.c new file mode 100644 index 000000000..75a9d5a34 --- /dev/null +++ b/db/rep/rep_auto.c @@ -0,0 +1,268 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_shash.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __rep_update_buf __P((u_int8_t *, size_t, size_t *, + * PUBLIC: DB_LSN *, int)); + */ +int +__rep_update_buf(buf, max, lenp, + first_lsn, num_files) + u_int8_t *buf; + size_t max, *lenp; + DB_LSN * first_lsn; + int num_files; +{ + u_int32_t uinttmp; + u_int8_t *endbuf; + u_int8_t *bp; + int ret; + + ret = 0; + + bp = buf; + endbuf = bp + max; + + if (bp + sizeof(*first_lsn) > endbuf) + return (ENOMEM); + if (first_lsn != NULL) + memcpy(bp, first_lsn, sizeof(*first_lsn)); + else + memset(bp, 0, sizeof(*first_lsn)); + bp += sizeof(*first_lsn); + + uinttmp = (u_int32_t)num_files; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + *lenp = (u_int32_t)(bp - buf); + + return (ret); +} + +/* + * PUBLIC: int __rep_update_read __P((DB_ENV *, void *, void **, + * PUBLIC: __rep_update_args **)); + */ +int +__rep_update_read(dbenv, recbuf, nextp, argpp) + DB_ENV *dbenv; + void *recbuf; + void **nextp; + __rep_update_args **argpp; +{ + __rep_update_args *argp; + u_int32_t uinttmp; + u_int8_t *bp; + int ret; + + if ((ret = __os_malloc(dbenv, + sizeof(__rep_update_args), &argp)) != 0) + return (ret); + bp = recbuf; + memcpy(&argp->first_lsn, bp, sizeof(argp->first_lsn)); + bp += sizeof(argp->first_lsn); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->num_files = (int)uinttmp; + bp += sizeof(uinttmp); + + *nextp = bp; + *argpp = argp; + return (0); +} + +/* + * PUBLIC: int __rep_fileinfo_buf __P((u_int8_t *, size_t, size_t *, + * PUBLIC: size_t, db_pgno_t, db_pgno_t, int, int32_t, u_int32_t, + * PUBLIC: u_int32_t, const DBT *, const DBT *)); + */ +int +__rep_fileinfo_buf(buf, max, lenp, + pgsize, pgno, max_pgno, filenum, id, type, + flags, uid, info) + u_int8_t *buf; + size_t max, *lenp; + size_t pgsize; + db_pgno_t pgno; + db_pgno_t max_pgno; + int filenum; + int32_t id; + u_int32_t type; + u_int32_t flags; + const DBT *uid; + const DBT *info; +{ + u_int32_t zero; + u_int32_t uinttmp; + u_int8_t *endbuf; + u_int8_t *bp; + int ret; + + ret = 0; + + bp = buf; + endbuf = bp + max; + + uinttmp = (u_int32_t)pgsize; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)pgno; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)max_pgno; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)filenum; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)id; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)type; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + uinttmp = (u_int32_t)flags; + if (bp + sizeof(uinttmp) > endbuf) + return (ENOMEM); + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + + if (uid == NULL) { + zero = 0; + if (bp + sizeof(u_int32_t) > endbuf) + return (ENOMEM); + memcpy(bp, &zero, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + } else { + if (bp + sizeof(uid->size) > endbuf) + return (ENOMEM); + memcpy(bp, &uid->size, sizeof(uid->size)); + bp += sizeof(uid->size); + if (bp + uid->size > endbuf) + return (ENOMEM); + memcpy(bp, uid->data, uid->size); + bp += uid->size; + } + + if (info == NULL) { + zero = 0; + if (bp + sizeof(u_int32_t) > endbuf) + return (ENOMEM); + memcpy(bp, &zero, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + } else { + if (bp + sizeof(info->size) > endbuf) + return (ENOMEM); + memcpy(bp, &info->size, sizeof(info->size)); + bp += sizeof(info->size); + if (bp + info->size > endbuf) + return (ENOMEM); + memcpy(bp, info->data, info->size); + bp += info->size; + } + + *lenp = (u_int32_t)(bp - buf); + + return (ret); +} + +/* + * PUBLIC: int __rep_fileinfo_read __P((DB_ENV *, void *, void **, + * PUBLIC: __rep_fileinfo_args **)); + */ +int +__rep_fileinfo_read(dbenv, recbuf, nextp, argpp) + DB_ENV *dbenv; + void *recbuf; + void **nextp; + __rep_fileinfo_args **argpp; +{ + __rep_fileinfo_args *argp; + u_int32_t uinttmp; + u_int8_t *bp; + int ret; + + if ((ret = __os_malloc(dbenv, + sizeof(__rep_fileinfo_args), &argp)) != 0) + return (ret); + bp = recbuf; + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->pgsize = (size_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->max_pgno = (db_pgno_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->filenum = (int)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->id = (int32_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->type = (u_int32_t)uinttmp; + bp += sizeof(uinttmp); + + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->flags = (u_int32_t)uinttmp; + bp += sizeof(uinttmp); + + memset(&argp->uid, 0, sizeof(argp->uid)); + memcpy(&argp->uid.size, bp, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + argp->uid.data = bp; + bp += argp->uid.size; + + memset(&argp->info, 0, sizeof(argp->info)); + memcpy(&argp->info.size, bp, sizeof(u_int32_t)); + bp += sizeof(u_int32_t); + argp->info.data = bp; + bp += argp->info.size; + + *nextp = bp; + *argpp = argp; + return (0); +} + diff --git a/db/rep/rep_autop.c b/db/rep/rep_autop.c new file mode 100644 index 000000000..64cb10bea --- /dev/null +++ b/db/rep/rep_autop.c @@ -0,0 +1,17 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_shash.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" +#include "dbinc/txn.h" + diff --git a/db/rep/rep_backup.c b/db/rep/rep_backup.c new file mode 100644 index 000000000..f19404162 --- /dev/null +++ b/db/rep/rep_backup.c @@ -0,0 +1,1784 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * $Id: rep_backup.c,v 1.29 2004/10/14 12:56:13 sue Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_shash.h" +#include "dbinc/db_am.h" +#include "dbinc/lock.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" +#include "dbinc/qam.h" +#include "dbinc/txn.h" + +static int __rep_filedone __P((DB_ENV *, int, REP *, __rep_fileinfo_args *, + u_int32_t)); +static int __rep_files_data __P((DB_ENV *, u_int8_t *, size_t *, + size_t *, int *)); +static int __rep_files_inmem __P((DB_ENV *, u_int8_t *, size_t *, + size_t *, int *)); +static int __rep_get_fileinfo __P((DB_ENV *, const char *, + __rep_fileinfo_args *, u_int8_t *, int *)); +static int __rep_log_setup __P((DB_ENV *, REP *)); +static int __rep_mpf_open __P((DB_ENV *, DB_MPOOLFILE **, + __rep_fileinfo_args *)); +static int __rep_page_gap __P((DB_ENV *, REP *, __rep_fileinfo_args *, + u_int32_t)); +static int __rep_page_sendpages __P((DB_ENV *, int, + __rep_fileinfo_args *, DB_MPOOLFILE *, DB *)); +static int __rep_queue_filedone __P((DB_ENV *, REP *, __rep_fileinfo_args *)); +static int __rep_walk_dir __P((DB_ENV *, const char *, u_int8_t *, + size_t *, size_t *, int *)); +static int __rep_write_page __P((DB_ENV *, REP *, __rep_fileinfo_args *)); + +/* + * __rep_update_req - + * Process an update_req and send the file information to the client. + * + * PUBLIC: int __rep_update_req __P((DB_ENV *, int)); + */ +int +__rep_update_req(dbenv, eid) + DB_ENV *dbenv; + int eid; +{ + DBT updbt; + DB_LOG *dblp; + DB_LOGC *logc; + DB_LSN lsn; + DBT data_dbt; + size_t filelen, filesz, updlen; + u_int8_t *buf, *fp; + int filecnt, ret, t_ret; + + /* + * Allocate enough for all currently open files and then some. + * Optimize for the common use of having most databases open. + * Allocate dbentry_cnt * 2 plus an estimated 60 bytes per + * file for the filename/path (or multiplied by 120). + * + * The data we send looks like this: + * __rep_update_args + * __rep_fileinfo_args + * __rep_fileinfo_args + * ... + */ + dblp = dbenv->lg_handle; + filecnt = 0; + filelen = 0; + updlen = 0; + filesz = MEGABYTE; + if ((ret = __os_calloc(dbenv, 1, filesz, &buf)) != 0) + return (ret); + + /* + * First get our file information. Get in-memory files first + * then get on-disk files. + */ + fp = buf + sizeof(__rep_update_args); + if ((ret = __rep_files_inmem(dbenv, fp, &filesz, &filelen, + &filecnt)) != 0) + goto err; + if ((ret = __rep_files_data(dbenv, fp, &filesz, &filelen, + &filecnt)) != 0) + goto err; + + /* + * Now get our first LSN. + */ + if ((ret = __log_cursor(dbenv, &logc)) != 0) + goto err; + memset(&data_dbt, 0, sizeof(data_dbt)); + ret = __log_c_get(logc, &lsn, &data_dbt, DB_FIRST); + if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; + + /* + * Package up the update information. + */ + if ((ret = __rep_update_buf(buf, filesz, &updlen, &lsn, filecnt)) != 0) + goto err; + /* + * We have all the file information now. Send it to the client. + */ + memset(&updbt, 0, sizeof(updbt)); + updbt.data = buf; + updbt.size = (u_int32_t)(filelen + updlen); + R_LOCK(dbenv, &dblp->reginfo); + lsn = ((LOG *)dblp->reginfo.primary)->lsn; + R_UNLOCK(dbenv, &dblp->reginfo); + (void)__rep_send_message(dbenv, eid, REP_UPDATE, &lsn, &updbt, 0); + +err: + __os_free(dbenv, buf); + return (ret); +} + +/* + * __rep_files_data - + * Walk through all the files in the env's data_dirs. We need to + * open them, gather the necessary information and then close them. + * Then we need to figure out if they're already in the dbentry array. + */ +static int +__rep_files_data(dbenv, fp, fileszp, filelenp, filecntp) + DB_ENV *dbenv; + u_int8_t *fp; + size_t *fileszp, *filelenp; + int *filecntp; +{ + int ret; + char **ddir; + + ret = 0; + if (dbenv->db_data_dir == NULL) { + /* + * If we don't have a data dir, we have just the + * env home dir. + */ + ret = __rep_walk_dir(dbenv, dbenv->db_home, fp, + fileszp, filelenp, filecntp); + } else { + for (ddir = dbenv->db_data_dir; *ddir != NULL; ++ddir) + if ((ret = __rep_walk_dir(dbenv, *ddir, fp, + fileszp, filelenp, filecntp)) != 0) + break; + } + return (ret); +} + +static int +__rep_walk_dir(dbenv, dir, fp, fileszp, filelenp, filecntp) + DB_ENV *dbenv; + const char *dir; + u_int8_t *fp; + size_t *fileszp, *filelenp; + int *filecntp; +{ + DBT namedbt, uiddbt; + __rep_fileinfo_args tmpfp; + size_t len, offset; + int cnt, i, ret; + u_int8_t *rfp, uid[DB_FILE_ID_LEN]; + char **names; +#ifdef DIAGNOSTIC + REP *rep; + DB_MSGBUF mb; + DB_REP *db_rep; + + db_rep = dbenv->rep_handle; + rep = db_rep->region; +#endif + memset(&namedbt, 0, sizeof(namedbt)); + memset(&uiddbt, 0, sizeof(uiddbt)); + if ((ret = __os_dirlist(dbenv, dir, &names, &cnt)) != 0) + return (ret); + rfp = fp; + for (i = 0; i < cnt; i++) { + /* + * Skip DB-owned files: ., .., __db*, DB_CONFIG, log* + */ + if (strcmp(names[i], ".") == 0) + continue; + if (strcmp(names[i], "..") == 0) + continue; + if (strncmp(names[i], "__db", 4) == 0) + continue; + if (strncmp(names[i], "DB_CONFIG", 9) == 0) + continue; + if (strncmp(names[i], "log", 3) == 0) + continue; + /* + * We found a file to process. Check if we need + * to allocate more space. + */ + if ((ret = __rep_get_fileinfo(dbenv, names[i], &tmpfp, uid, + filecntp)) != 0) { + /* + * If we find a file that isn't a database, skip it. + */ + RPRINT(dbenv, rep, (dbenv, &mb, + "Walk_dir: File %d %s: returned error %s", + i, names[i], db_strerror(ret))); + ret = 0; + continue; + } + RPRINT(dbenv, rep, (dbenv, &mb, + "Walk_dir: File %d (of %d) %s: pgsize %lu, max_pgno %lu", + tmpfp.filenum, i, names[i], + (u_long)tmpfp.pgsize, (u_long)tmpfp.max_pgno)); + namedbt.data = names[i]; + namedbt.size = (u_int32_t)strlen(names[i]) + 1; + uiddbt.data = uid; + uiddbt.size = DB_FILE_ID_LEN; +retry: + ret = __rep_fileinfo_buf(rfp, *fileszp, &len, + tmpfp.pgsize, tmpfp.pgno, tmpfp.max_pgno, + tmpfp.filenum, tmpfp.id, tmpfp.type, + tmpfp.flags, &uiddbt, &namedbt); + if (ret == ENOMEM) { + offset = (size_t)(rfp - fp); + *fileszp *= 2; + /* + * Need to account for update info on both sides + * of the allocation. + */ + fp -= sizeof(__rep_update_args); + if ((ret = __os_realloc(dbenv, *fileszp, fp)) != 0) + break; + fp += sizeof(__rep_update_args); + rfp = fp + offset; + /* + * Now that we've reallocated the space, try to + * store it again. + */ + goto retry; + } + rfp += len; + *filelenp += len; + } + __os_dirfree(dbenv, names, cnt); + return (ret); +} + +static int +__rep_get_fileinfo(dbenv, file, rfp, uid, filecntp) + DB_ENV *dbenv; + const char *file; + __rep_fileinfo_args *rfp; + u_int8_t *uid; + int *filecntp; +{ + + DB *dbp, *entdbp; + DB_LOCK lk; + DB_LOG *dblp; + DB_MPOOLFILE *mpf; + DBC *dbc; + DBMETA *dbmeta; + PAGE *pagep; + int i, ret, t_ret; + + dbp = NULL; + dbc = NULL; + pagep = NULL; + mpf = NULL; + LOCK_INIT(lk); + + dblp = dbenv->lg_handle; + if ((ret = db_create(&dbp, dbenv, 0)) != 0) + goto err; + if ((ret = __db_open(dbp, NULL, file, NULL, DB_UNKNOWN, + DB_RDONLY | (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0), + 0, PGNO_BASE_MD)) != 0) + goto err; + + if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) + goto err; + if ((ret = __db_lget( + dbc, 0, dbp->meta_pgno, DB_LOCK_READ, 0, &lk)) != 0) + goto err; + if ((ret = __memp_fget(dbp->mpf, &dbp->meta_pgno, 0, &pagep)) != 0) + goto err; + /* + * We have the meta page. Set up our information. + */ + dbmeta = (DBMETA *)pagep; + rfp->pgno = 0; + /* + * Queue is a special-case. We need to set max_pgno to 0 so that + * the client can compute the pages from the meta-data. + */ + if (dbp->type == DB_QUEUE) + rfp->max_pgno = 0; + else + rfp->max_pgno = dbmeta->last_pgno; + rfp->pgsize = dbp->pgsize; + memcpy(uid, dbp->fileid, DB_FILE_ID_LEN); + rfp->filenum = (*filecntp)++; + rfp->type = dbp->type; + rfp->flags = dbp->flags; + rfp->id = DB_LOGFILEID_INVALID; + ret = __memp_fput(dbp->mpf, pagep, 0); + pagep = NULL; + if ((t_ret = __LPUT(dbc, lk)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + goto err; +err: + if ((t_ret = __LPUT(dbc, lk)) != 0 && ret == 0) + ret = t_ret; + if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + if (pagep != NULL && + (t_ret = __memp_fput(mpf, pagep, 0)) != 0 && ret == 0) + ret = t_ret; + if (dbp != NULL && (t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0) + ret = t_ret; + /* + * We walk the entry table now, after closing the dbp because + * otherwise we find the open from this function and the id + * is useless in that case. + */ + if (ret == 0) { + MUTEX_THREAD_LOCK(dbenv, dblp->mutexp); + /* + * Walk entry table looking for this uid. + * If we find it, save the id. + */ + for (i = 0; i < dblp->dbentry_cnt; i++) { + entdbp = dblp->dbentry[i].dbp; + if (entdbp == NULL) + break; + DB_ASSERT(entdbp->log_filename != NULL); + if (memcmp(uid, + entdbp->log_filename->ufid, + DB_FILE_ID_LEN) == 0) + rfp->id = i; + } + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); + } + return (ret); +} + +/* + * __rep_files_inmem - + * Gather all the information about in-memory files. + */ +static int +__rep_files_inmem(dbenv, fp, fileszp, filelenp, filecntp) + DB_ENV *dbenv; + u_int8_t *fp; + size_t *fileszp, *filelenp; + int *filecntp; +{ + + int ret; + + COMPQUIET(dbenv, NULL); + COMPQUIET(fp, NULL); + COMPQUIET(fileszp, NULL); + COMPQUIET(filelenp, NULL); + COMPQUIET(filecntp, NULL); + ret = 0; + return (ret); +} + +/* + * __rep_page_req + * Process a page_req and send the page information to the client. + * + * PUBLIC: int __rep_page_req __P((DB_ENV *, int, DBT *)); + */ +int +__rep_page_req(dbenv, eid, rec) + DB_ENV *dbenv; + int eid; + DBT *rec; +{ + DB *dbp; + DBT msgdbt; + DB_LOG *dblp; + DB_MPOOLFILE *mpf; + __rep_fileinfo_args *msgfp; + int ret, t_ret; + void *next; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; + DB_REP *db_rep; + REP *rep; + + db_rep = dbenv->rep_handle; + rep = db_rep->region; +#endif + dblp = dbenv->lg_handle; + if ((ret = __rep_fileinfo_read(dbenv, rec->data, &next, &msgfp)) != 0) + return (ret); + /* + * See if we can find it already. If so we can quickly + * access its mpool and process. Otherwise we have to + * open the file ourselves. + */ + RPRINT(dbenv, rep, (dbenv, &mb, "page_req: file %d page %lu to %lu", + msgfp->filenum, (u_long)msgfp->pgno, (u_long)msgfp->max_pgno)); + MUTEX_THREAD_LOCK(dbenv, dblp->mutexp); + if (msgfp->id >= 0 && dblp->dbentry_cnt > msgfp->id) { + dbp = dblp->dbentry[msgfp->id].dbp; + if (dbp != NULL) { + DB_ASSERT(dbp->log_filename != NULL); + if (memcmp(msgfp->uid.data, dbp->log_filename->ufid, + DB_FILE_ID_LEN) == 0) { + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); + RPRINT(dbenv, rep, (dbenv, &mb, + "page_req: found %d in dbreg", + msgfp->filenum)); + ret = __rep_page_sendpages(dbenv, eid, + msgfp, dbp->mpf, dbp); + goto err; + } + } + } + MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp); + + /* + * If we get here, we do not have the file open via dbreg. + * We need to open the file and then send its pages. + * If we cannot open the file, we send REP_FILE_FAIL. + */ + RPRINT(dbenv, rep, (dbenv, &mb, "page_req: Open %d via mpf_open", + msgfp->filenum)); + if ((ret = __rep_mpf_open(dbenv, &mpf, msgfp)) != 0) { + memset(&msgdbt, 0, sizeof(msgdbt)); + msgdbt.data = msgfp; + msgdbt.size = sizeof(*msgfp); + RPRINT(dbenv, rep, (dbenv, &mb, "page_req: Open %d failed", + msgfp->filenum)); + (void)__rep_send_message(dbenv, eid, REP_FILE_FAIL, + NULL, &msgdbt, 0); + goto err; + } + + ret = __rep_page_sendpages(dbenv, eid, msgfp, mpf, NULL); + t_ret = __memp_fclose(mpf, 0); + if (ret == 0 && t_ret != 0) + ret = t_ret; +err: + return (ret); +} + +static int +__rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp) + DB_ENV *dbenv; + int eid; + __rep_fileinfo_args *msgfp; + DB_MPOOLFILE *mpf; + DB *dbp; +{ + DB *qdbp; + DBT msgdbt, pgdbt; + DB_LOG *dblp; + DB_LSN lsn; + DB_MSGBUF mb; + DB_REP *db_rep; + PAGE *pagep; + REP *rep; + db_pgno_t p; + size_t len, msgsz; + u_int32_t bytes, gbytes, type; + int check_limit, opened, ret, t_ret; + u_int8_t *buf; + +#ifndef DIAGNOSTIC + DB_MSGBUF_INIT(&mb); +#endif + db_rep = dbenv->rep_handle; + rep = db_rep->region; + opened = 0; + gbytes = bytes = 0; + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + gbytes = rep->gbytes; + bytes = rep->bytes; + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + check_limit = gbytes != 0 || bytes != 0; + qdbp = NULL; + buf = NULL; + if (msgfp->type == DB_QUEUE) { + if (dbp == NULL) { + if ((ret = db_create(&qdbp, dbenv, 0)) != 0) + goto err; + if ((ret = __db_open(qdbp, NULL, msgfp->info.data, + NULL, DB_UNKNOWN, + DB_RDONLY | (F_ISSET(dbenv, DB_ENV_THREAD) ? + DB_THREAD : 0), 0, PGNO_BASE_MD)) != 0) + goto err; + opened = 1; + } else + qdbp = dbp; + } + msgsz = sizeof(__rep_fileinfo_args) + DB_FILE_ID_LEN + msgfp->pgsize; + if ((ret = __os_calloc(dbenv, 1, msgsz, &buf)) != 0) + return (ret); + memset(&msgdbt, 0, sizeof(msgdbt)); + memset(&pgdbt, 0, sizeof(pgdbt)); + RPRINT(dbenv, rep, (dbenv, &mb, "sendpages: file %d page %lu to %lu", + msgfp->filenum, (u_long)msgfp->pgno, (u_long)msgfp->max_pgno)); + for (p = msgfp->pgno; p <= msgfp->max_pgno; p++) { + if (msgfp->type == DB_QUEUE && p != 0) +#ifdef HAVE_QUEUE + ret = __qam_fget(qdbp, &p, DB_MPOOL_CREATE, &pagep); +#else + ret = DB_PAGE_NOTFOUND; +#endif + else + ret = __memp_fget(mpf, &p, DB_MPOOL_CREATE, &pagep); + type = REP_PAGE; + if (ret == DB_PAGE_NOTFOUND) { + memset(&pgdbt, 0, sizeof(pgdbt)); + ret = 0; + ZERO_LSN(lsn); + RPRINT(dbenv, rep, (dbenv, &mb, + "sendpages: PAGE_FAIL on page %lu", (u_long)p)); + type = REP_PAGE_FAIL; + msgfp->pgno = p; + goto send; + } else if (ret != 0) + goto err; + else { + pgdbt.data = pagep; + pgdbt.size = (u_int32_t)msgfp->pgsize; + } + len = 0; + ret = __rep_fileinfo_buf(buf, msgsz, &len, + msgfp->pgsize, p, msgfp->max_pgno, + msgfp->filenum, msgfp->id, msgfp->type, + msgfp->flags, &msgfp->uid, &pgdbt); + if (ret != 0) + goto err; + + DB_ASSERT(len <= msgsz); + msgdbt.data = buf; + msgdbt.size = (u_int32_t)len; + + dblp = dbenv->lg_handle; + R_LOCK(dbenv, &dblp->reginfo); + lsn = ((LOG *)dblp->reginfo.primary)->lsn; + R_UNLOCK(dbenv, &dblp->reginfo); + if (check_limit) { + /* + * msgdbt.size is only the size of the page and + * other information we're sending. It doesn't + * count the size of the control structure. Factor + * that in as well so we're not off by a lot if + * pages are small. + */ + while (bytes < msgdbt.size + sizeof(REP_CONTROL)) { + if (gbytes > 0) { + bytes += GIGABYTE; + --gbytes; + continue; + } + /* + * We don't hold the rep mutex, and may + * miscount. + */ + rep->stat.st_nthrottles++; + type = REP_PAGE_MORE; + goto send; + } + bytes -= (msgdbt.size + sizeof(REP_CONTROL)); + } +send: + RPRINT(dbenv, rep, (dbenv, &mb, + "sendpages: %s %lu, lsn [%lu][%lu]", + (type == REP_PAGE ? "PAGE" : + (type == REP_PAGE_MORE ? "PAGE_MORE" : "PAGE_FAIL")), + (u_long)p, (u_long)lsn.file, (u_long)lsn.offset)); + (void)__rep_send_message(dbenv, eid, type, &lsn, &msgdbt, 0); + /* + * If we have REP_PAGE_FAIL we need to break before trying + * to give the page back to mpool. If we have REP_PAGE_MORE + * we need to break this loop after giving the page back + * to mpool. Otherwise, with REP_PAGE, we keep going. + */ + if (type == REP_PAGE_FAIL) + break; + if (msgfp->type != DB_QUEUE || p == 0) + ret = __memp_fput(mpf, pagep, 0); +#ifdef HAVE_QUEUE + else + /* + * We don't need an #else for HAVE_QUEUE here because if + * we're not compiled with queue, then we're guaranteed + * to have set REP_PAGE_FAIL above. + */ + ret = __qam_fput(qdbp, p, pagep, 0); +#endif + if (type == REP_PAGE_MORE) + break; + } +err: + if (opened && (t_ret = __db_close(qdbp, NULL, DB_NOSYNC)) != 0 && + ret == 0) + ret = t_ret; + if (buf != NULL) + __os_free(dbenv, buf); + return (ret); +} + +/* + * __rep_update_setup + * Process and setup with this file information. + * + * PUBLIC: int __rep_update_setup __P((DB_ENV *, int, REP_CONTROL *, DBT *)); + */ +int +__rep_update_setup(dbenv, eid, rp, rec) + DB_ENV *dbenv; + int eid; + REP_CONTROL *rp; + DBT *rec; +{ + DB_LOG *dblp; + DB_LSN lsn; + DB_REP *db_rep; + DBT pagereq_dbt; + LOG *lp; + REGENV *renv; + REGINFO *infop; + REP *rep; + __rep_update_args *rup; + int ret; + u_int32_t count, infolen; + void *next; +#ifdef DIAGNOSTIC + __rep_fileinfo_args *msgfp; + DB_MSGBUF mb; +#endif + + db_rep = dbenv->rep_handle; + rep = db_rep->region; + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + ret = 0; + + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + if (!F_ISSET(rep, REP_F_RECOVER_UPDATE)) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (0); + } + F_CLR(rep, REP_F_RECOVER_UPDATE); + /* + * We know we're the first to come in here due to the + * REP_F_RECOVER_UPDATE flag. REP_F_READY should not be set. + */ + DB_ASSERT(!F_ISSET(rep, REP_F_READY)); + F_SET(rep, REP_F_RECOVER_PAGE); + /* + * We do not clear REP_F_READY or rep->in_recovery in this code. + * We'll eventually call the normal __rep_verify_match recovery + * code and that will clear all the flags and allow others to + * proceed. + */ + __rep_lockout(dbenv, db_rep, rep); + /* + * We need to update the timestamp and kill any open handles + * on this client. The files are changing completely. + */ + infop = dbenv->reginfo; + renv = infop->primary; + (void)time(&renv->rep_timestamp); + + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + lp->wait_recs = rep->request_gap; + lp->rcvd_recs = 0; + ZERO_LSN(lp->ready_lsn); + ZERO_LSN(lp->waiting_lsn); + ZERO_LSN(lp->max_wait_lsn); + ZERO_LSN(lp->max_perm_lsn); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + if ((ret = __rep_update_read(dbenv, rec->data, &next, &rup)) != 0) + goto err; + R_LOCK(dbenv, &dblp->reginfo); + lsn = lp->lsn; + R_UNLOCK(dbenv, &dblp->reginfo); + + /* + * We need to empty out any old log records that might be in the + * temp database. + */ + if ((ret = __db_truncate(db_rep->rep_db, NULL, &count)) != 0) + goto err; + + /* + * If our log is before the master's beginning of log, + * we need to request from the master's beginning. + * If we have some log, we need the earlier of the + * master's last checkpoint LSN or our current LSN. + */ + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + if (log_compare(&lsn, &rup->first_lsn) < 0) + rep->first_lsn = rup->first_lsn; + else + rep->first_lsn = lsn; + rep->last_lsn = rp->lsn; + rep->nfiles = rup->num_files; + rep->curfile = 0; + rep->ready_pg = 0; + rep->npages = 0; + rep->waiting_pg = PGNO_INVALID; + rep->max_wait_pg = PGNO_INVALID; + + RPRINT(dbenv, rep, (dbenv, &mb, + "Update setup for %d files.", rep->nfiles)); + RPRINT(dbenv, rep, (dbenv, &mb, "Update setup: First LSN [%lu][%lu].", + (u_long)rep->first_lsn.file, (u_long)rep->first_lsn.offset)); + RPRINT(dbenv, rep, (dbenv, &mb, "Update setup: Last LSN [%lu][%lu]", + (u_long)rep->last_lsn.file, (u_long)rep->last_lsn.offset)); + + infolen = rec->size - sizeof(__rep_update_args); + if ((ret = __os_calloc(dbenv, 1, infolen, &rep->originfo)) != 0) + goto err; + memcpy(rep->originfo, next, infolen); + rep->finfo = rep->originfo; + if ((ret = __rep_fileinfo_read(dbenv, + rep->finfo, &next, &rep->curinfo)) != 0) { + RPRINT(dbenv, rep, (dbenv, &mb, + "Update setup: Fileinfo read: %s", db_strerror(ret))); + goto errmem1; + } + rep->nextinfo = next; + +#ifdef DIAGNOSTIC + msgfp = rep->curinfo; + DB_ASSERT(msgfp->pgno == 0); +#endif + + /* + * We want to create/open our dbp to the database + * where we'll keep our page information. + */ + if ((ret = __rep_client_dbinit(dbenv, 1, REP_PG)) != 0) { + RPRINT(dbenv, rep, (dbenv, &mb, + "Update setup: Client_dbinit %s", db_strerror(ret))); + goto errmem; + } + + /* + * We should get file info 'ready to go' to avoid data copies. + */ + memset(&pagereq_dbt, 0, sizeof(pagereq_dbt)); + pagereq_dbt.data = rep->finfo; + pagereq_dbt.size = (u_int32_t)((u_int8_t *)rep->nextinfo - + (u_int8_t *)rep->finfo); + + RPRINT(dbenv, rep, (dbenv, &mb, + "Update PAGE_REQ file 0: pgsize %lu, maxpg %lu", + (u_long)rep->curinfo->pgsize, + (u_long)rep->curinfo->max_pgno)); + /* + * We set up pagereq_dbt as we went along. Send it now. + */ + (void)__rep_send_message(dbenv, eid, REP_PAGE_REQ, + NULL, &pagereq_dbt, 0); + if (0) { +errmem: __os_free(dbenv, rep->curinfo); +errmem1: __os_free(dbenv, rep->originfo); + rep->finfo = NULL; + rep->curinfo = NULL; + rep->originfo = NULL; + } +err: + /* + * If we get an error, we cannot leave ourselves in the + * RECOVER_PAGE state because we have no file information. + * We need to move back to the RECOVER_UPDATE stage. + */ + if (ret != 0) { + RPRINT(dbenv, rep, (dbenv, &mb, + "Update_setup: Error: Clear PAGE, set UPDATE again. %s", + db_strerror(ret))); + F_CLR(rep, REP_F_RECOVER_PAGE); + F_SET(rep, REP_F_RECOVER_UPDATE); + } + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (ret); +} + +/* + * __rep_page + * Process a page message. + * + * PUBLIC: int __rep_page __P((DB_ENV *, int, REP_CONTROL *, DBT *)); + */ +int +__rep_page(dbenv, eid, rp, rec) + DB_ENV *dbenv; + int eid; + REP_CONTROL *rp; + DBT *rec; +{ + + DB_REP *db_rep; + DBT key, data; + REP *rep; + __rep_fileinfo_args *msgfp; + db_recno_t recno; + int ret; + void *next; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif + + ret = 0; + db_rep = dbenv->rep_handle; + rep = db_rep->region; + + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + if (!F_ISSET(rep, REP_F_RECOVER_PAGE)) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (0); + } + if ((ret = __rep_fileinfo_read(dbenv, rec->data, &next, &msgfp)) != 0) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (ret); + } + RPRINT(dbenv, rep, (dbenv, &mb, + "PAGE: Received page %lu from file %d", + (u_long)msgfp->pgno, msgfp->filenum)); + /* + * Check if this page is from the file we're expecting. + * This may be an old or delayed page message. + */ + /* + * !!! + * If we allow dbrename/dbremove on the master while a client + * is updating, then we'd have to verify the file's uid here too. + */ + if (msgfp->filenum != rep->curfile) { + RPRINT(dbenv, rep, + (dbenv, &mb, "Msg file %d != curfile %d", + msgfp->filenum, rep->curfile)); + goto err; + } + /* + * We want to create/open our dbp to the database + * where we'll keep our page information. + */ + if ((ret = __rep_client_dbinit(dbenv, 1, REP_PG)) != 0) + goto err; + + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + memset(&key, 0, sizeof(key)); + memset(&data, 0, sizeof(data)); + recno = (db_recno_t)(msgfp->pgno + 1); + key.data = &recno; + key.ulen = key.size = sizeof(db_recno_t); + key.flags = DB_DBT_USERMEM; + + /* + * If we already have this page, then we don't want to bother + * rewriting it into the file. Otherwise, any other error + * we want to return. + */ + ret = __db_put(rep->file_dbp, NULL, &key, &data, DB_NOOVERWRITE); + if (ret == DB_KEYEXIST) { + RPRINT(dbenv, rep, (dbenv, &mb, + "PAGE: Received duplicate page %lu from file %d", + (u_long)msgfp->pgno, msgfp->filenum)); + rep->stat.st_pg_duplicated++; + ret = 0; + goto err_nolock; + } + if (ret != 0) + goto err_nolock; + + RPRINT(dbenv, rep, (dbenv, &mb, + "PAGE: Write page %lu into mpool", (u_long)msgfp->pgno)); + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + /* + * We put the page in the database file itself. + */ + ret = __rep_write_page(dbenv, rep, msgfp); + if (ret != 0) { + /* + * We got an error storing the page, therefore, we need + * remove this page marker from the page database too. + * !!! + * I'm ignoring errors from the delete because we want to + * return the original error. If we cannot write the page + * and we cannot delete the item we just put, what should + * we do? Panic the env and return DB_RUNRECOVERY? + */ + (void)__db_del(rep->file_dbp, NULL, &key, 0); + goto err; + } + rep->stat.st_pg_records++; + rep->npages++; + + /* + * Now check the LSN on the page and save it if it is later + * than the one we have. + */ + if (log_compare(&rp->lsn, &rep->last_lsn) > 0) + rep->last_lsn = rp->lsn; + + /* + * We've successfully written the page. Now we need to see if + * we're done with this file. __rep_filedone will check if we + * have all the pages expected and if so, set up for the next + * file and send out a page request for the next file's pages. + */ + ret = __rep_filedone(dbenv, eid, rep, msgfp, rp->rectype); + +err: + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); +err_nolock: + __os_free(dbenv, msgfp); + return (ret); +} + +/* + * __rep_page_fail + * Process a page fail message. + * + * PUBLIC: int __rep_page_fail __P((DB_ENV *, int, DBT *)); + */ +int +__rep_page_fail(dbenv, eid, rec) + DB_ENV *dbenv; + int eid; + DBT *rec; +{ + + DB_REP *db_rep; + REP *rep; + __rep_fileinfo_args *msgfp, *rfp; + int ret; + void *next; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif + + ret = 0; + db_rep = dbenv->rep_handle; + rep = db_rep->region; + + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + if (!F_ISSET(rep, REP_F_RECOVER_PAGE)) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (0); + } + if ((ret = __rep_fileinfo_read(dbenv, rec->data, &next, &msgfp)) != 0) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (ret); + } + /* + * Check if this page is from the file we're expecting. + * This may be an old or delayed page message. + */ + /* + * !!! + * If we allow dbrename/dbremove on the master while a client + * is updating, then we'd have to verify the file's uid here too. + */ + if (msgfp->filenum != rep->curfile) { + RPRINT(dbenv, rep, (dbenv, &mb, "Msg file %d != curfile %d", + msgfp->filenum, rep->curfile)); + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (0); + } + rfp = rep->curinfo; + if (rfp->type != DB_QUEUE) + --rfp->max_pgno; + else { + /* + * Queue is special. Pages at the beginning of the queue + * may disappear, as well as at the end. Use msgfp->pgno + * to adjust accordingly. + */ + RPRINT(dbenv, rep, (dbenv, &mb, + "page_fail: BEFORE page %lu failed. ready %lu, max %lu, npages %d", + (u_long)msgfp->pgno, (u_long)rep->ready_pg, + (u_long)rfp->max_pgno, rep->npages)); + if (msgfp->pgno == rfp->max_pgno) + --rfp->max_pgno; + if (msgfp->pgno >= rep->ready_pg) { + rep->ready_pg = msgfp->pgno + 1; + rep->npages = rep->ready_pg; + } + RPRINT(dbenv, rep, (dbenv, &mb, + "page_fail: AFTER page %lu failed. ready %lu, max %lu, npages %d", + (u_long)msgfp->pgno, (u_long)rep->ready_pg, + (u_long)rfp->max_pgno, rep->npages)); + } + /* + * We've lowered the number of pages expected. It is possible that + * this was the last page we were expecting. Now we need to see if + * we're done with this file. __rep_filedone will check if we have + * all the pages expected and if so, set up for the next file and + * send out a page request for the next file's pages. + */ + ret = __rep_filedone(dbenv, eid, rep, msgfp, REP_PAGE_FAIL); + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + return (ret); +} + +/* + * __rep_write_page - + * Write this page into a database. + */ +static int +__rep_write_page(dbenv, rep, msgfp) + DB_ENV *dbenv; + REP *rep; + __rep_fileinfo_args *msgfp; +{ + __rep_fileinfo_args *rfp; + DB_FH *rfh; + int ret; + void *dst; + char *real_name; + + real_name = NULL; + + /* + * If this is the first page we're putting in this database, we need + * to create the mpool file. Otherwise call memp_fget to create the + * page in mpool. Then copy the data to the page, and memp_fput the + * page to give it back to mpool. + * + * We need to create the file, removing any existing file and associate + * the correct file ID with the new one. + */ + if (rep->file_mpf == NULL) { + rfp = rep->curinfo; + + if (!F_ISSET(rfp, DB_AM_INMEM)) { + if ((ret = __db_appname(dbenv, DB_APP_DATA, + rfp->info.data, 0, NULL, &real_name)) != 0) + goto err; + /* + * Calling memp_nameop will both purge any matching + * fileid from mpool and unlink it on disk. + */ + if ((ret = __memp_nameop(dbenv, + rfp->uid.data, NULL, real_name, NULL)) != 0) + goto err; + /* + * Create the file on disk. We'll be putting the data + * into the file via mpool. + */ + if ((ret = __os_open(dbenv, real_name, + DB_OSO_CREATE, dbenv->db_mode, &rfh)) == 0) + ret = __os_closehandle(dbenv, rfh); + if (ret != 0) + goto err; + } + + if ((ret = + __rep_mpf_open(dbenv, &rep->file_mpf, rep->curinfo)) != 0) + goto err; + } + /* + * Handle queue specially. If we're a QUEUE database, we need to + * use the __qam_fget/put calls. We need to use rep->queue_dbp for + * that. That dbp is opened after getting the metapage for the + * queue database. Since the meta-page is always in the queue file, + * we'll use the normal path for that first page. After that we + * can assume the dbp is opened. + */ + if (msgfp->type == DB_QUEUE && msgfp->pgno != 0) { +#ifdef HAVE_QUEUE + if ((ret = __qam_fget( + rep->queue_dbp, &msgfp->pgno, DB_MPOOL_CREATE, &dst)) != 0) +#else + if ((ret = __db_no_queue_am(dbenv)) != 0) +#endif + goto err; + } else if ((ret = __memp_fget( + rep->file_mpf, &msgfp->pgno, DB_MPOOL_CREATE, &dst)) != 0) + goto err; + + memcpy(dst, msgfp->info.data, msgfp->pgsize); + if (msgfp->type != DB_QUEUE || msgfp->pgno == 0) + ret = __memp_fput(rep->file_mpf, dst, DB_MPOOL_DIRTY); +#ifdef HAVE_QUEUE + else + ret = __qam_fput(rep->queue_dbp, msgfp->pgno, dst, + DB_MPOOL_DIRTY); +#endif + +err: if (real_name != NULL) + __os_free(dbenv, real_name); + return (ret); +} + +/* + * __rep_page_gap - + * After we've put the page into the database, we need to check if + * we have a page gap and whether we need to request pages. + */ +static int +__rep_page_gap(dbenv, rep, msgfp, type) + DB_ENV *dbenv; + REP *rep; + __rep_fileinfo_args *msgfp; + u_int32_t type; +{ + DB_LOG *dblp; + DB_REP *db_rep; + DBT data, key; + LOG *lp; + __rep_fileinfo_args *rfp; + db_recno_t recno; + int ret; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif + + db_rep = dbenv->rep_handle; + ret = 0; + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + /* + * We've successfully put this page into our file. + * Now we need to account for it and re-request new pages + * if necessary. + */ + /* + * We already hold the rep mutex, but we also need the db mutex. + * So we need to drop it, acquire both in the right order and + * then recheck the state of the world. + */ + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + rfp = rep->curinfo; + + /* + * Make sure we're still talking about the same file. + * If not, we're done here. + */ + if (rfp->filenum != msgfp->filenum) { + ret = DB_REP_PAGEDONE; + goto err; + } + + /* + * We have 3 possible states: + * 1. We receive a page we already have. + * msg pgno < ready pgno + * 2. We receive a page that is beyond a gap. + * msg pgno > ready pgno + * 3. We receive the page we're expecting. + * msg pgno == ready pgno + */ + /* + * State 1. This should not happen because this function + * should only be called once per page received because we + * check for DB_KEY_EXIST when we save the page information. + */ + DB_ASSERT(msgfp->pgno >= rep->ready_pg); + + /* + * State 2. This page is beyond the page we're expecting. + * We need to update waiting_pg if this page is less than + * (earlier) the current waiting_pg. There is nothing + * to do but see if we need to request. + */ + RPRINT(dbenv, rep, (dbenv, &mb, + "PAGE_GAP: pgno %lu, max_pg %lu ready %lu, waiting %lu max_wait %lu", + (u_long)msgfp->pgno, (u_long)rfp->max_pgno, (u_long)rep->ready_pg, + (u_long)rep->waiting_pg, (u_long)rep->max_wait_pg)); + if (msgfp->pgno > rep->ready_pg) { + if (rep->waiting_pg == PGNO_INVALID || + msgfp->pgno < rep->waiting_pg) + rep->waiting_pg = msgfp->pgno; + } else { + /* + * We received the page we're expecting. + */ + rep->ready_pg++; + lp->rcvd_recs = 0; + while (ret == 0 && rep->ready_pg == rep->waiting_pg) { + /* + * If we get here we know we just filled a gap. + */ + lp->wait_recs = 0; + lp->rcvd_recs = 0; + rep->max_wait_pg = PGNO_INVALID; + /* + * We need to walk the recno database looking for the + * next page we need or expect. + */ + memset(&key, 0, sizeof(key)); + memset(&data, 0, sizeof(data)); + recno = (db_recno_t)rep->ready_pg; + key.data = &recno; + key.ulen = key.size = sizeof(db_recno_t); + key.flags = DB_DBT_USERMEM; + ret = __db_get(rep->file_dbp, NULL, &key, &data, 0); + if (ret == DB_NOTFOUND || ret == DB_KEYEMPTY) + break; + else if (ret != 0) + goto err; + rep->ready_pg++; + } + } + + /* + * If we filled a gap and now have the entire file, there's + * nothing to do. We're done when ready_pg is > max_pgno + * because ready_pg is larger than the last page we received. + */ + if (rep->ready_pg > rfp->max_pgno) + goto err; + + /* + * Check if we need to ask for more pages. + */ + if ((rep->waiting_pg != PGNO_INVALID && + rep->ready_pg != rep->waiting_pg) || type == REP_PAGE_MORE) { + /* + * We got a page but we may still be waiting for more. + */ + if (lp->wait_recs == 0) { + /* + * This is a new gap. Initialize the number of + * records that we should wait before requesting + * that it be resent. We grab the limits out of + * the rep without the mutex. + */ + lp->wait_recs = rep->request_gap; + lp->rcvd_recs = 0; + rep->max_wait_pg = PGNO_INVALID; + } + /* + * If we got REP_PAGE_MORE we always want to ask for more. + */ + if ((__rep_check_doreq(dbenv, rep) || type == REP_PAGE_MORE) && + ((ret = __rep_pggap_req(dbenv, rep, rfp, + type == REP_PAGE_MORE)) != 0)) + goto err; + } else { + lp->wait_recs = 0; + rep->max_wait_pg = PGNO_INVALID; + } + +err: + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + return (ret); +} + +/* + * __rep_filedone - + * We need to check if we're done with the current file after + * processing the current page. Stat the database to see if + * we have all the pages. If so, we need to clean up/close + * this one, set up for the next one, and ask for its pages, + * or if this is the last file, request the log records and + * move to the REP_RECOVER_LOG state. + */ +static int +__rep_filedone(dbenv, eid, rep, msgfp, type) + DB_ENV *dbenv; + int eid; + REP *rep; + __rep_fileinfo_args *msgfp; + u_int32_t type; +{ + DBT dbt; + DB_REP *db_rep; + __rep_fileinfo_args *rfp; + int ret; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif + + db_rep = dbenv->rep_handle; + /* + * We've put our page, now we need to do any gap processing + * that might be needed to re-request pages. + */ + ret = __rep_page_gap(dbenv, rep, msgfp, type); + /* + * The world changed while we were doing gap processing. + * We're done here. + */ + if (ret == DB_REP_PAGEDONE) + return (0); + + rfp = rep->curinfo; + /* + * max_pgno is 0-based and npages is 1-based, so we don't have + * all the pages until npages is > max_pgno. + */ + RPRINT(dbenv, rep, (dbenv, &mb, "FILEDONE: have %lu pages. Need %lu.", + (u_long)rep->npages, (u_long)rfp->max_pgno + 1)); + if (rep->npages <= rfp->max_pgno) + return (0); + + /* + * If we're queue and we think we have all the pages for this file, + * we need to do special queue processing. Queue is handled in + * several stages. + */ + if (rfp->type == DB_QUEUE && + ((ret = __rep_queue_filedone(dbenv, rep, rfp)) != + DB_REP_PAGEDONE)) + return (ret); + /* + * We have all the pages for this file. We need to: + * 1. Close up the file data pointer we used. + * 2. Close/reset the page database. + * 3. Check if we have all file data. If so, request logs. + * 4. If not, set curfile to next file and request its pages. + */ + /* + * 1. Close up the file data pointer we used. + */ + if (rep->file_mpf != NULL) { + ret = __memp_fclose(rep->file_mpf, 0); + rep->file_mpf = NULL; + if (ret != 0) + goto err; + } + + /* + * 2. Close/reset the page database. + */ + ret = __db_close(rep->file_dbp, NULL, DB_NOSYNC); + rep->file_dbp = NULL; + if (ret != 0) + goto err; + + /* + * 3. Check if we have all file data. If so, request logs. + */ + __os_free(dbenv, rep->curinfo); + if (++rep->curfile == rep->nfiles) { + RPRINT(dbenv, rep, (dbenv, &mb, + "FILEDONE: have %d files. RECOVER_LOG now", rep->nfiles)); + /* + * Move to REP_RECOVER_LOG state. + * Request logs. + */ + __os_free(dbenv, rep->originfo); + /* + * We need to do a sync here so that any later opens + * can find the file and file id. We need to do it + * before we clear REP_F_RECOVER_PAGE so that we do not + * try to flush the log. + */ + if ((ret = __memp_sync(dbenv, NULL)) != 0) + goto err; + F_CLR(rep, REP_F_RECOVER_PAGE); + F_SET(rep, REP_F_RECOVER_LOG); + memset(&dbt, 0, sizeof(dbt)); + dbt.data = &rep->last_lsn; + dbt.size = sizeof(rep->last_lsn); + RPRINT(dbenv, rep, (dbenv, &mb, + "FILEDONE: LOG_REQ from LSN [%lu][%lu] to [%lu][%lu]", + (u_long)rep->first_lsn.file, (u_long)rep->first_lsn.offset, + (u_long)rep->last_lsn.file, (u_long)rep->last_lsn.offset)); + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + if ((ret = __rep_log_setup(dbenv, rep)) != 0) + goto err; + (void)__rep_send_message(dbenv, eid, + REP_LOG_REQ, &rep->first_lsn, &dbt, 0); + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + return (0); + } + + /* + * 4. If not, set curinfo to next file and request its pages. + */ + rep->finfo = rep->nextinfo; + if ((ret = __rep_fileinfo_read(dbenv, rep->finfo, &rep->nextinfo, + &rep->curinfo)) != 0) + goto err; + DB_ASSERT(rep->curinfo->pgno == 0); + rep->ready_pg = 0; + rep->npages = 0; + rep->waiting_pg = PGNO_INVALID; + rep->max_wait_pg = PGNO_INVALID; + memset(&dbt, 0, sizeof(dbt)); + RPRINT(dbenv, rep, (dbenv, &mb, + "FILEDONE: Next file %d. Request pages 0 to %lu", + rep->curinfo->filenum, (u_long)rep->curinfo->max_pgno)); + dbt.data = rep->finfo; + dbt.size = (u_int32_t)((u_int8_t *)rep->nextinfo - + (u_int8_t *)rep->finfo); + (void)__rep_send_message(dbenv, eid, REP_PAGE_REQ, + NULL, &dbt, 0); +err: + return (ret); +} + +/* + * __rep_mpf_open - + * Create and open the mpool file for a database. + * Used by both master and client to bring files into mpool. + */ +static int +__rep_mpf_open(dbenv, mpfp, rfp) + DB_ENV *dbenv; + DB_MPOOLFILE **mpfp; + __rep_fileinfo_args *rfp; +{ + DB db; + int ret; + + if ((ret = __memp_fcreate(dbenv, mpfp)) != 0) + return (ret); + + /* + * We need a dbp to pass into to __db_dbenv_mpool. Set up + * only the parts that it needs. + */ + db.type = rfp->type; + db.pgsize = (u_int32_t)rfp->pgsize; + memcpy(db.fileid, rfp->uid.data, DB_FILE_ID_LEN); + db.flags = rfp->flags; + db.mpf = *mpfp; + db.dbenv = dbenv; + if ((ret = __db_dbenv_mpool(&db, rfp->info.data, 0)) != 0) { + (void)__memp_fclose(*mpfp, 0); + *mpfp = NULL; + } + return (ret); +} + +/* + * __rep_pggap_req - + * Request a page gap. Assumes the caller holds the rep_mutexp. + * + * PUBLIC: int __rep_pggap_req __P((DB_ENV *, REP *, __rep_fileinfo_args *, + * PUBLIC: int)); + */ +int +__rep_pggap_req(dbenv, rep, reqfp, moregap) + DB_ENV *dbenv; + REP *rep; + __rep_fileinfo_args *reqfp; + int moregap; +{ + DBT max_pg_dbt; + __rep_fileinfo_args *tmpfp; + size_t len; + int alloc, ret; + + ret = 0; + alloc = 0; + if (reqfp == NULL) { + if ((ret = __rep_finfo_alloc(dbenv, rep->curinfo, &tmpfp)) != 0) + return (ret); + alloc = 1; + } else + tmpfp = reqfp; + + /* + * If we've never requested this page, then + * request everything between it and the first + * page we have. If we have requested this page + * then only request this record, not the entire gap. + */ + memset(&max_pg_dbt, 0, sizeof(max_pg_dbt)); + tmpfp->pgno = rep->ready_pg; + max_pg_dbt.data = rep->finfo; + max_pg_dbt.size = (u_int32_t)((u_int8_t *)rep->nextinfo - + (u_int8_t *)rep->finfo); + if (rep->max_wait_pg == PGNO_INVALID || moregap) { + /* + * Request the gap - set max to waiting_pg - 1 or if + * there is no waiting_pg, just ask for one. + */ + if (rep->waiting_pg == PGNO_INVALID) { + if (moregap) + rep->max_wait_pg = rep->curinfo->max_pgno; + else + rep->max_wait_pg = rep->ready_pg; + } else + rep->max_wait_pg = rep->waiting_pg - 1; + tmpfp->max_pgno = rep->max_wait_pg; + } else { + /* + * Request 1 page - set max to ready_pg. + */ + rep->max_wait_pg = rep->ready_pg; + tmpfp->max_pgno = rep->ready_pg; + } + if (rep->master_id != DB_EID_INVALID) { + rep->stat.st_pg_requested++; + /* + * We need to request the pages, but we need to get the + * new info into rep->finfo. Assert that the sizes never + * change. The only thing this should do is change + * the pgno field. Everything else remains the same. + */ + ret = __rep_fileinfo_buf(rep->finfo, max_pg_dbt.size, &len, + tmpfp->pgsize, tmpfp->pgno, tmpfp->max_pgno, + tmpfp->filenum, tmpfp->id, tmpfp->type, + tmpfp->flags, &tmpfp->uid, &tmpfp->info); + DB_ASSERT(len == max_pg_dbt.size); + (void)__rep_send_message(dbenv, rep->master_id, + REP_PAGE_REQ, NULL, &max_pg_dbt, 0); + } else + (void)__rep_send_message(dbenv, DB_EID_BROADCAST, + REP_MASTER_REQ, NULL, NULL, 0); + + if (alloc) + __os_free(dbenv, tmpfp); + return (ret); +} + +/* + * __rep_loggap_req - + * Request a log gap. Assumes the caller holds the db_mutexp. + * + * PUBLIC: void __rep_loggap_req __P((DB_ENV *, REP *, DB_LSN *, int)); + */ +void +__rep_loggap_req(dbenv, rep, lsnp, moregap) + DB_ENV *dbenv; + REP *rep; + DB_LSN *lsnp; + int moregap; +{ + DB_LOG *dblp; + DBT max_lsn_dbt, *max_lsn_dbtp; + DB_LSN next_lsn; + LOG *lp; + + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + R_LOCK(dbenv, &dblp->reginfo); + next_lsn = lp->lsn; + R_UNLOCK(dbenv, &dblp->reginfo); + + if (moregap || + (lsnp != NULL && + (log_compare(lsnp, &lp->max_wait_lsn) == 0 || + IS_ZERO_LSN(lp->max_wait_lsn)))) { + /* + * We need to ask for the gap. Either we never asked + * for records before, or we asked for a single record + * and received it. + */ + lp->max_wait_lsn = lp->waiting_lsn; + memset(&max_lsn_dbt, 0, sizeof(max_lsn_dbt)); + max_lsn_dbt.data = &lp->waiting_lsn; + max_lsn_dbt.size = sizeof(lp->waiting_lsn); + max_lsn_dbtp = &max_lsn_dbt; + } else { + max_lsn_dbtp = NULL; + lp->max_wait_lsn = next_lsn; + } + if (rep->master_id != DB_EID_INVALID) { + rep->stat.st_log_requested++; + (void)__rep_send_message(dbenv, rep->master_id, + REP_LOG_REQ, &next_lsn, max_lsn_dbtp, 0); + } else + (void)__rep_send_message(dbenv, DB_EID_BROADCAST, + REP_MASTER_REQ, NULL, NULL, 0); + + return; +} + +/* + * __rep_finfo_alloc - + * Allocate and initialize a fileinfo structure. + * + * PUBLIC: int __rep_finfo_alloc __P((DB_ENV *, __rep_fileinfo_args *, + * PUBLIC: __rep_fileinfo_args **)); + */ +int +__rep_finfo_alloc(dbenv, rfpsrc, rfpp) + DB_ENV *dbenv; + __rep_fileinfo_args *rfpsrc, **rfpp; +{ + size_t size; + int ret; + + size = sizeof(__rep_fileinfo_args) + rfpsrc->uid.size + + rfpsrc->info.size; + if ((ret = __os_malloc(dbenv, size, rfpp)) != 0) + return (ret); + + memcpy(*rfpp, rfpsrc, size); + return (ret); +} + +/* + * __rep_log_setup - + * We know our first LSN and need to reset the log subsystem + * to get our logs set up for the proper file. + */ +static int +__rep_log_setup(dbenv, rep) + DB_ENV *dbenv; + REP *rep; +{ + DB_LOG *dblp; + DB_LSN lsn; + u_int32_t fnum; + int ret; + char *name; + + dblp = dbenv->lg_handle; + /* + * Set up the log starting at the file number of the first LSN we + * need to get from the master. + */ + if ((ret = __log_newfile(dblp, &lsn, rep->first_lsn.file)) == 0) { + /* + * We do know we want to start this client's log at + * log file 'first_lsn.file'. So we want to forcibly + * remove any log files earlier than that number. + * We don't know what might be in any earlier log files + * so we cannot just use __log_autoremove. + */ + for (fnum = 1; fnum < rep->first_lsn.file; fnum++) { + if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0) + goto err; + (void)__os_unlink(dbenv, name); + __os_free(dbenv, name); + } + } +err: + return (ret); +} + +/* + * __rep_queue_filedone - + * Determine if we're really done getting the pages for a queue file. + * Queue is handled in several steps. + * 1. First we get the meta page only. + * 2. We use the meta-page information to figure out first and last + * page numbers (and if queue wraps, first can be > last. + * 3. If first < last, we do a REP_PAGE_REQ for all pages. + * 4. If first > last, we REP_PAGE_REQ from first -> max page number. + * Then we'll ask for page 1 -> last. + * + * This function can return several things: + * DB_REP_PAGEDONE - if we're done with this file. + * 0 - if we're not doen with this file. + * error - if we get an error doing some operations. + * + * This function will open a dbp handle to the queue file. This is needed + * by most of the QAM macros. We'll open it on the first pass through + * here and we'll close it whenever we decide we're done. + */ +static int +__rep_queue_filedone(dbenv, rep, rfp) + DB_ENV *dbenv; + REP *rep; + __rep_fileinfo_args *rfp; +{ + db_pgno_t first, last; + u_int32_t flags; + int empty, ret, t_ret; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif + +#ifndef HAVE_QUEUE + COMPQUIET(rep, NULL); + COMPQUIET(rfp, NULL); + return (__db_no_queue_am(dbenv)); +#else + ret = 0; + if (rep->queue_dbp == NULL) { + /* + * We need to do a sync here so that the open + * can find the file and file id. + */ + if ((ret = __memp_sync(dbenv, NULL)) != 0) + goto out; + if ((ret = db_create(&rep->queue_dbp, dbenv, + DB_REP_CREATE)) != 0) + goto out; + flags = DB_NO_AUTO_COMMIT | + (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0); + if ((ret = __db_open(rep->queue_dbp, NULL, rfp->info.data, + NULL, DB_QUEUE, flags, 0, PGNO_BASE_MD)) != 0) + goto out; + } + if ((ret = __queue_pageinfo(rep->queue_dbp, + &first, &last, &empty, 0, 0)) != 0) + goto out; + RPRINT(dbenv, rep, (dbenv, &mb, + "Queue fileinfo: first %lu, last %lu, empty %d", + (u_long)first, (u_long)last, empty)); + /* + * We can be at the end of 3 possible states. + * 1. We have received the meta-page and now need to get the + * rest of the pages in the database. + * 2. We have received from first -> max_pgno. We might be done, + * or we might need to ask for wrapped pages. + * 3. We have received all pages in the file. We're done. + */ + if (rfp->max_pgno == 0) { + /* + * We have just received the meta page. Set up the next + * pages to ask for and check if the file is empty. + */ + if (empty) + goto out; + if (first > last) { + rfp->max_pgno = + QAM_RECNO_PAGE(rep->queue_dbp, UINT32_MAX); + } else + rfp->max_pgno = last; + RPRINT(dbenv, rep, (dbenv, &mb, + "Queue fileinfo: First req: first %lu, last %lu", + (u_long)first, (u_long)rfp->max_pgno)); + goto req; + } else if (rfp->max_pgno != last) { + /* + * If max_pgno != last that means we're dealing with a + * wrapped situation. Request next batch of pages. + * Set npages to 1 because we already have page 0, the + * meta-page, now we need pages 1-max_pgno. + */ + first = 1; + rfp->max_pgno = last; + RPRINT(dbenv, rep, (dbenv, &mb, + "Queue fileinfo: Wrap req: first %lu, last %lu", + (u_long)first, (u_long)last)); +req: + /* + * Since we're simulating a "gap" to resend new PAGE_REQ + * for this file, we need to set waiting page to last + 1 + * so that we'll ask for all from ready_pg -> last. + */ + rep->npages = first; + rep->ready_pg = first; + rep->waiting_pg = rfp->max_pgno + 1; + rep->max_wait_pg = PGNO_INVALID; + ret = __rep_pggap_req(dbenv, rep, rfp, 0); + return (ret); + } + /* + * max_pgno == last + * If we get here, we have all the pages we need. + * Close the dbp and return. + */ +out: + if (rep->queue_dbp != NULL && + (t_ret = __db_close(rep->queue_dbp, NULL, DB_NOSYNC)) != 0 && + ret == 0) + ret = t_ret; + rep->queue_dbp = NULL; + if (ret == 0) + ret = DB_REP_PAGEDONE; + return (ret); +#endif +} diff --git a/db/rep/rep_method.c b/db/rep/rep_method.c index dadaff072..fb74ecb46 100644 --- a/db/rep/rep_method.c +++ b/db/rep/rep_method.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: rep_method.c,v 1.167 2004/10/07 17:20:12 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: rep_method.c,v 1.134 2003/11/13 15:41:51 sue Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #include @@ -21,6 +20,10 @@ static const char revid[] = "$Id: rep_method.c,v 1.134 2003/11/13 15:41:51 sue E #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + #include "db_int.h" #include "dbinc/db_page.h" #include "dbinc/btree.h" @@ -28,16 +31,15 @@ static const char revid[] = "$Id: rep_method.c,v 1.134 2003/11/13 15:41:51 sue E #include "dbinc/txn.h" #ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" #endif static int __rep_abort_prepared __P((DB_ENV *)); static int __rep_bt_cmp __P((DB *, const DBT *, const DBT *)); -static int __rep_client_dbinit __P((DB_ENV *, int)); -static int __rep_elect __P((DB_ENV *, int, int, u_int32_t, int *)); +static int __rep_elect + __P((DB_ENV *, int, int, int, u_int32_t, int *, u_int32_t)); static int __rep_elect_init - __P((DB_ENV *, DB_LSN *, int, int, int *, u_int32_t *)); + __P((DB_ENV *, DB_LSN *, int, int, int, int *, u_int32_t *)); static int __rep_flush __P((DB_ENV *)); static int __rep_restore_prepared __P((DB_ENV *)); static int __rep_get_limit __P((DB_ENV *, u_int32_t *, u_int32_t *)); @@ -47,16 +49,15 @@ static int __rep_set_rep_transport __P((DB_ENV *, int, int (*)(DB_ENV *, const DBT *, const DBT *, const DB_LSN *, int, u_int32_t))); static int __rep_start __P((DB_ENV *, DBT *, u_int32_t)); -static int __rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); static int __rep_wait __P((DB_ENV *, u_int32_t, int *, u_int32_t)); /* * __rep_dbenv_create -- * Replication-specific initialization of the DB_ENV structure. * - * PUBLIC: int __rep_dbenv_create __P((DB_ENV *)); + * PUBLIC: void __rep_dbenv_create __P((DB_ENV *)); */ -int +void __rep_dbenv_create(dbenv) DB_ENV *dbenv; { @@ -67,6 +68,7 @@ __rep_dbenv_create(dbenv) dbenv->rep_process_message = __dbcl_rep_process_message; dbenv->rep_start = __dbcl_rep_start; dbenv->rep_stat = __dbcl_rep_stat; + dbenv->rep_stat_print = NULL; dbenv->get_rep_limit = __dbcl_rep_get_limit; dbenv->set_rep_limit = __dbcl_rep_set_limit; dbenv->set_rep_request = __dbcl_rep_set_request; @@ -79,14 +81,13 @@ __rep_dbenv_create(dbenv) dbenv->rep_flush = __rep_flush; dbenv->rep_process_message = __rep_process_message; dbenv->rep_start = __rep_start; - dbenv->rep_stat = __rep_stat; + dbenv->rep_stat = __rep_stat_pp; + dbenv->rep_stat_print = __rep_stat_print_pp; dbenv->get_rep_limit = __rep_get_limit; dbenv->set_rep_limit = __rep_set_limit; dbenv->set_rep_request = __rep_set_request; dbenv->set_rep_transport = __rep_set_rep_transport; } - - return (0); } /* @@ -127,7 +128,7 @@ __rep_open(dbenv) * recovery, and want to prohibit new transactions from entering and cause * existing ones to return immediately (with a DB_LOCK_DEADLOCK error). * - * There is also the rep->timestamp which is updated whenever significant + * There is also the renv->rep_timestamp which is updated whenever significant * events (i.e., new masters, log rollback, etc). Upon creation, a handle * is associated with the current timestamp. Each time a handle enters the * library it must check if the handle timestamp is the same as the one @@ -147,6 +148,9 @@ __rep_start(dbenv, dbt, flags) REP *rep; u_int32_t repflags; int announce, init_db, redo_prepared, ret, sleep_cnt, t_ret, was_client; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif PANIC_CHECK(dbenv); ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->rep_start"); @@ -156,24 +160,19 @@ __rep_start(dbenv, dbt, flags) rep = db_rep->region; if ((ret = __db_fchk(dbenv, "DB_ENV->rep_start", flags, - DB_REP_CLIENT | DB_REP_LOGSONLY | DB_REP_MASTER)) != 0) + DB_REP_CLIENT | DB_REP_MASTER)) != 0) return (ret); /* Exactly one of CLIENT and MASTER must be specified. */ if ((ret = __db_fcchk(dbenv, "DB_ENV->rep_start", flags, DB_REP_CLIENT, DB_REP_MASTER)) != 0) return (ret); - if (!LF_ISSET(DB_REP_CLIENT | DB_REP_MASTER | DB_REP_LOGSONLY)) { + if (!LF_ISSET(DB_REP_CLIENT | DB_REP_MASTER)) { __db_err(dbenv, "DB_ENV->rep_start: replication mode must be specified"); return (EINVAL); } - /* Masters can't be logs-only. */ - if ((ret = __db_fcchk(dbenv, - "DB_ENV->rep_start", flags, DB_REP_LOGSONLY, DB_REP_MASTER)) != 0) - return (ret); - /* We need a transport function. */ if (dbenv->rep_send == NULL) { __db_err(dbenv, @@ -199,10 +198,7 @@ __rep_start(dbenv, dbt, flags) /* * There is already someone in rep_start. Return. */ -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Thread already in rep_start"); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, "Thread already in rep_start")); goto err; } else rep->start_th = 1; @@ -218,7 +214,7 @@ __rep_start(dbenv, dbt, flags) "DB_ENV->rep_start waiting %d minutes for replication message thread", sleep_cnt / 60); MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - (void)__os_sleep(dbenv, 1, 0); + __os_sleep(dbenv, 1, 0); MUTEX_LOCK(dbenv, db_rep->rep_mutexp); } @@ -226,7 +222,7 @@ __rep_start(dbenv, dbt, flags) rep->eid = dbenv->rep_eid; if (LF_ISSET(DB_REP_MASTER)) { - was_client = F_ISSET(rep, REP_F_UPGRADE); + was_client = F_ISSET(rep, REP_F_CLIENT); if (was_client) { /* * If we're upgrading from having been a client, @@ -269,11 +265,9 @@ __rep_start(dbenv, dbt, flags) } if (rep->egen <= rep->gen) rep->egen = rep->gen + 1; -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "New master gen %lu, egen %lu", - (u_long)rep->gen, (u_long)rep->egen); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "New master gen %lu, egen %lu", + (u_long)rep->gen, (u_long)rep->egen)); } rep->master_id = rep->eid; /* @@ -281,6 +275,7 @@ __rep_start(dbenv, dbt, flags) * REP_F_NOARCHIVE, REP_F_INIT and REP_F_READY. */ rep->flags = REP_F_MASTER; + rep->start_th = 0; MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); dblp = (DB_LOG *)dbenv->lg_handle; R_LOCK(dbenv, &dblp->reginfo); @@ -310,16 +305,13 @@ __rep_start(dbenv, dbt, flags) ret = t_ret; } else { init_db = 0; - was_client = F_ISSET(rep, REP_ISCLIENT); + was_client = F_ISSET(rep, REP_F_CLIENT); announce = !was_client || rep->master_id == DB_EID_INVALID; /* Zero out everything except recovery and tally flags. */ repflags = F_ISSET(rep, REP_F_NOARCHIVE | - REP_F_READY | REP_F_RECOVER | REP_F_TALLY); - if (LF_ISSET(DB_REP_LOGSONLY)) - FLD_SET(repflags, REP_F_LOGSONLY); - else - FLD_SET(repflags, REP_F_UPGRADE); + REP_F_RECOVER_MASK | REP_F_TALLY); + FLD_SET(repflags, REP_F_CLIENT); rep->flags = repflags; if (!was_client) { @@ -339,8 +331,14 @@ __rep_start(dbenv, dbt, flags) if ((ret = __rep_abort_prepared(dbenv)) != 0) goto errlock; - if ((ret = __rep_client_dbinit(dbenv, init_db)) != 0) + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + ret = __rep_client_dbinit(dbenv, init_db, REP_DB); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + if (ret != 0) goto errlock; + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + rep->start_th = 0; + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); /* * If this client created a newly replicated environment, @@ -353,20 +351,25 @@ __rep_start(dbenv, dbt, flags) if (announce) (void)__rep_send_message(dbenv, DB_EID_BROADCAST, REP_NEWCLIENT, NULL, dbt, 0); + else + (void)__rep_send_message(dbenv, + DB_EID_BROADCAST, REP_ALIVE_REQ, NULL, NULL, 0); } - /* - * We have separate labels for errors. If we're returning an - * error before we've set start_th, we use 'err'. If - * we are erroring while holding the rep_mutex, then we use - * 'errunlock' label. If we're erroring without holding the rep - * mutex we must use 'errlock'. - */ + if (0) { + /* + * We have separate labels for errors. If we're returning an + * error before we've set start_th, we use 'err'. If + * we are erroring while holding the rep_mutex, then we use + * 'errunlock' label. If we're erroring without holding the rep + * mutex we must use 'errlock'. + */ errlock: - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); errunlock: - rep->start_th = 0; -err: MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + rep->start_th = 0; +err: MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + } return (ret); } @@ -378,29 +381,43 @@ err: MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); * this is the first thread/process starting up and therefore should create * the LSN database. This routine must be called once by each process acting * as a client. + * + * Assumes caller holds appropriate mutex. + * + * PUBLIC: int __rep_client_dbinit __P((DB_ENV *, int, repdb_t)); */ -static int -__rep_client_dbinit(dbenv, startup) +int +__rep_client_dbinit(dbenv, startup, which) DB_ENV *dbenv; int startup; + repdb_t which; { DB_REP *db_rep; - DB *dbp; + DB *dbp, **rdbpp; + REP *rep; int ret, t_ret; u_int32_t flags; + const char *name; PANIC_CHECK(dbenv); db_rep = dbenv->rep_handle; + rep = db_rep->region; dbp = NULL; #define REPDBNAME "__db.rep.db" +#define REPPAGENAME "__db.reppg.db" + if (which == REP_DB) { + name = REPDBNAME; + rdbpp = &db_rep->rep_db; + } else { + name = REPPAGENAME; + rdbpp = &rep->file_dbp; + } /* Check if this has already been called on this environment. */ - if (db_rep->rep_db != NULL) + if (*rdbpp != NULL) return (0); - MUTEX_LOCK(dbenv, db_rep->db_mutexp); - if (startup) { if ((ret = db_create(&dbp, dbenv, DB_REP_CREATE)) != 0) goto err; @@ -408,12 +425,13 @@ __rep_client_dbinit(dbenv, startup) * Ignore errors, because if the file doesn't exist, this * is perfectly OK. */ - (void)__db_remove(dbp, NULL, REPDBNAME, NULL, DB_FORCE); + (void)__db_remove(dbp, NULL, name, NULL, DB_FORCE); } if ((ret = db_create(&dbp, dbenv, DB_REP_CREATE)) != 0) goto err; - if ((ret = __bam_set_bt_compare(dbp, __rep_bt_cmp)) != 0) + if (which == REP_DB && + (ret = __bam_set_bt_compare(dbp, __rep_bt_cmp)) != 0) goto err; /* Allow writes to this database on a client. */ @@ -423,21 +441,20 @@ __rep_client_dbinit(dbenv, startup) (startup ? DB_CREATE : 0) | (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0); - if ((ret = __db_open(dbp, NULL, - REPDBNAME, NULL, DB_BTREE, flags, 0, PGNO_BASE_MD)) != 0) + if ((ret = __db_open(dbp, NULL, name, NULL, + (which == REP_DB ? DB_BTREE : DB_RECNO), + flags, 0, PGNO_BASE_MD)) != 0) goto err; - db_rep->rep_db = dbp; + *rdbpp= dbp; if (0) { err: if (dbp != NULL && (t_ret = __db_close(dbp, NULL, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; - db_rep->rep_db = NULL; + *rdbpp = NULL; } - MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - return (ret); } @@ -462,8 +479,8 @@ __rep_bt_cmp(dbp, dbt1, dbt2) rp1 = dbt1->data; rp2 = dbt2->data; - __ua_memcpy(&lsn1, &rp1->lsn, sizeof(DB_LSN)); - __ua_memcpy(&lsn2, &rp2->lsn, sizeof(DB_LSN)); + (void)__ua_memcpy(&lsn1, &rp1->lsn, sizeof(DB_LSN)); + (void)__ua_memcpy(&lsn2, &rp2->lsn, sizeof(DB_LSN)); if (lsn1.file > lsn2.file) return (1); @@ -548,7 +565,7 @@ __rep_restore_prepared(dbenv) __txn_regop_args *regop_args; __txn_xa_regop_args *prep_args; int ret, t_ret; - u_int32_t hi_txn, low_txn, rectype; + u_int32_t hi_txn, low_txn, rectype, status; void *txninfo; txninfo = NULL; @@ -682,11 +699,13 @@ __rep_restore_prepared(dbenv) goto err; ret = __db_txnlist_find(dbenv, - txninfo, regop_args->txnid->txnid); - if (ret == TXN_NOTFOUND) + txninfo, regop_args->txnid->txnid, &status); + if (ret == DB_NOTFOUND) ret = __db_txnlist_add(dbenv, txninfo, regop_args->txnid->txnid, regop_args->opcode, &lsn); + else if (ret != 0) + goto err; __os_free(dbenv, regop_args); break; case DB___txn_xa_regop: @@ -699,8 +718,8 @@ __rep_restore_prepared(dbenv) &prep_args)) != 0) goto err; ret = __db_txnlist_find(dbenv, txninfo, - prep_args->txnid->txnid); - if (ret == TXN_NOTFOUND) { + prep_args->txnid->txnid, &status); + if (ret == DB_NOTFOUND) { if (prep_args->opcode == TXN_ABORT) ret = __db_txnlist_add(dbenv, txninfo, prep_args->txnid->txnid, @@ -709,7 +728,8 @@ __rep_restore_prepared(dbenv) __rep_process_txn(dbenv, &rec)) == 0) ret = __txn_restore_txn(dbenv, &lsn, prep_args); - } + } else if (ret != 0) + goto err; __os_free(dbenv, prep_args); break; default: @@ -876,20 +896,25 @@ __rep_set_rep_transport(dbenv, eid, f_send) * a new master. */ static int -__rep_elect(dbenv, nsites, priority, timeout, eidp) +__rep_elect(dbenv, nsites, nvotes, priority, timeout, eidp, flags) DB_ENV *dbenv; - int nsites, priority; + int nsites, nvotes, priority; u_int32_t timeout; int *eidp; + u_int32_t flags; { DB_LOG *dblp; DB_LSN lsn; DB_REP *db_rep; REP *rep; - int done, in_progress, ret, send_vote, tiebreaker; - u_int32_t egen, orig_tally, pid, sec, usec; + int ack, done, in_progress, ret, send_vote; + u_int32_t egen, orig_tally, tiebreaker, to; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif PANIC_CHECK(dbenv); + COMPQUIET(flags, 0); ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle, "rep_elect", DB_INIT_REP); /* Error checking. */ @@ -898,23 +923,57 @@ __rep_elect(dbenv, nsites, priority, timeout, eidp) "DB_ENV->rep_elect: nsites must be greater than 0"); return (EINVAL); } + if (nvotes < 0) { + __db_err(dbenv, + "DB_ENV->rep_elect: nvotes may not be negative"); + return (EINVAL); + } if (priority < 0) { __db_err(dbenv, "DB_ENV->rep_elect: priority may not be negative"); return (EINVAL); } + if (nsites < nvotes) { + __db_err(dbenv, + "DB_ENV->rep_elect: nvotes (%d) is larger than nsites (%d)", + nvotes, nsites); + return (EINVAL); + } + + ack = nvotes; + /* If they give us a 0 for nvotes, default to simple majority. */ + if (nvotes == 0) + ack = (nsites / 2) + 1; + + /* + * XXX + * If users give us less than a majority, they run the risk of + * having a network partition. However, this also allows the + * scenario of master/1 client to elect the client. Allow + * sub-majority values, but give a warning. + */ + if (nvotes <= (nsites / 2)) { + __db_err(dbenv, + "DB_ENV->rep_elect:WARNING: nvotes (%d) is sub-majority with nsites (%d)", + nvotes, nsites); + } db_rep = dbenv->rep_handle; rep = db_rep->region; dblp = dbenv->lg_handle; + RPRINT(dbenv, rep, + (dbenv, &mb, "Start election nsites %d, ack %d, priority %d", + nsites, ack, priority)); + R_LOCK(dbenv, &dblp->reginfo); lsn = ((LOG *)dblp->reginfo.primary)->lsn; R_UNLOCK(dbenv, &dblp->reginfo); orig_tally = 0; + to = timeout; if ((ret = __rep_elect_init(dbenv, - &lsn, nsites, priority, &in_progress, &orig_tally)) != 0) { + &lsn, nsites, ack, priority, &in_progress, &orig_tally)) != 0) { if (ret == DB_REP_NEWMASTER) { ret = 0; *eidp = dbenv->rep_eid; @@ -926,82 +985,98 @@ __rep_elect(dbenv, nsites, priority, timeout, eidp) * just quietly return and not interfere. */ if (in_progress) { - *eidp = dbenv->rep_eid; + *eidp = rep->master_id; return (0); } (void)__rep_send_message(dbenv, DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0); - ret = __rep_wait(dbenv, timeout/4, eidp, REP_F_EPHASE1); + ret = __rep_wait(dbenv, to/4, eidp, REP_F_EPHASE1); switch (ret) { case 0: /* Check if we found a master. */ if (*eidp != DB_EID_INVALID) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, - DB_VERB_REPLICATION)) - __db_err(dbenv, - "Found master %d", *eidp); -#endif - return (0); + RPRINT(dbenv, rep, (dbenv, &mb, + "Found master %d", *eidp)); + goto edone; } /* * If we didn't find a master, continue * the election. */ break; + case DB_REP_EGENCHG: + /* + * Egen changed, just continue with election. + */ + break; case DB_TIMEOUT: -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Did not find master. Sending vote1"); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Did not find master. Sending vote1")); break; default: goto err; } - /* Generate a randomized tiebreaker value. */ restart: - __os_id(&pid); - if ((ret = __os_clock(dbenv, &sec, &usec)) != 0) - return (ret); - tiebreaker = pid ^ sec ^ usec ^ (u_int)rand() ^ P_TO_UINT32(&pid); + /* Generate a randomized tiebreaker value. */ + __os_unique_id(dbenv, &tiebreaker); MUTEX_LOCK(dbenv, db_rep->rep_mutexp); F_SET(rep, REP_F_EPHASE1 | REP_F_NOARCHIVE); F_CLR(rep, REP_F_TALLY); + /* + * We are about to participate at this egen. We must + * write out the next egen before participating in this one + * so that if we crash we can never participate in this egen + * again. + */ + if ((ret = __rep_write_egen(dbenv, rep->egen + 1)) != 0) + goto lockdone; + /* Tally our own vote */ if (__rep_tally(dbenv, rep, rep->eid, &rep->sites, rep->egen, - rep->tally_off) != 0) + rep->tally_off) != 0) { + ret = EINVAL; goto lockdone; + } __rep_cmp_vote(dbenv, rep, &rep->eid, &lsn, priority, rep->gen, tiebreaker); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Beginning an election"); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, "Beginning an election")); /* Now send vote */ send_vote = DB_EID_INVALID; egen = rep->egen; MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - __rep_send_vote(dbenv, &lsn, nsites, priority, tiebreaker, egen, + __rep_send_vote(dbenv, &lsn, nsites, ack, priority, tiebreaker, egen, DB_EID_BROADCAST, REP_VOTE1); - ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE1); + DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE1, ret, NULL); + ret = __rep_wait(dbenv, to, eidp, REP_F_EPHASE1); switch (ret) { case 0: /* Check if election complete or phase complete. */ if (*eidp != DB_EID_INVALID) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, - DB_VERB_REPLICATION)) - __db_err(dbenv, - "Ended election phase 1 %d", ret); -#endif - return (0); + RPRINT(dbenv, rep, (dbenv, &mb, + "Ended election phase 1 %d", ret)); + goto edone; } goto phase2; + case DB_REP_EGENCHG: + if (to > timeout) + to = timeout; + to = (to * 8) / 10; + RPRINT(dbenv, rep, (dbenv, &mb, +"Egen changed while waiting. Now %lu. New timeout %lu, orig timeout %lu", + (u_long)rep->egen, (u_long)to, (u_long)timeout)); + /* + * If the egen changed while we were sleeping, that + * means we're probably late to the next election, + * so we'll backoff our timeout so that we don't get + * into an out-of-phase election scenario. + * + * Backoff to 80% of the current timeout. + */ + goto restart; case DB_TIMEOUT: break; default: @@ -1020,14 +1095,11 @@ restart: */ if (egen != rep->egen) { MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Egen changed from %lu to %lu", - (u_long)egen, (u_long)rep->egen); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, "Egen changed from %lu to %lu", + (u_long)egen, (u_long)rep->egen)); goto restart; } - if (rep->sites > rep->nsites / 2) { + if (rep->sites >= rep->nvotes) { /* We think we've seen enough to cast a vote. */ send_vote = rep->winner; @@ -1039,11 +1111,8 @@ restart: if (rep->winner == rep->eid) { (void)__rep_tally(dbenv, rep, rep->eid, &rep->votes, egen, rep->v2tally_off); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Counted my vote %d", rep->votes); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Counted my vote %d", rep->votes)); } F_SET(rep, REP_F_EPHASE2); F_CLR(rep, REP_F_EPHASE1); @@ -1051,12 +1120,9 @@ restart: MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); if (send_vote == DB_EID_INVALID) { /* We do not have enough votes to elect. */ -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Not enough votes to elect: received %d of %d", - rep->sites, rep->nsites); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Not enough votes to elect: recvd %d of %d from %d sites", + rep->sites, rep->nvotes, rep->nsites)); ret = DB_REP_UNAVAIL; goto err; @@ -1066,26 +1132,40 @@ restart: * for all the vote2's. */ if (send_vote != rep->eid) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION) && - send_vote != rep->eid) - __db_err(dbenv, "Sending vote"); -#endif - __rep_send_vote(dbenv, NULL, 0, 0, 0, egen, + RPRINT(dbenv, rep, (dbenv, &mb, "Sending vote")); + __rep_send_vote(dbenv, NULL, 0, 0, 0, 0, egen, send_vote, REP_VOTE2); + /* + * If we are NOT the new master we want to send + * our vote to the winner, and wait longer. The + * reason is that the winner may be "behind" us + * in the election waiting and if the master is + * down, the winner will wait the full timeout + * and we want to give the winner enough time to + * process all the votes. Otherwise we could + * incorrectly return DB_REP_UNAVAIL and start a + * new election before the winner can declare + * itself. + */ + to = to * 2; } -phase2: - ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE2); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, - DB_VERB_REPLICATION)) - __db_err(dbenv, - "Ended election phase 2 %d", ret); -#endif + +phase2: ret = __rep_wait(dbenv, to, eidp, REP_F_EPHASE2); + RPRINT(dbenv, rep, (dbenv, &mb, + "Ended election phase 2 %d", ret)); switch (ret) { case 0: - return (0); + goto edone; + case DB_REP_EGENCHG: + if (to > timeout) + to = timeout; + to = (to * 8) / 10; + RPRINT(dbenv, rep, (dbenv, &mb, +"While waiting egen changed to %lu. Phase 2 New timeout %lu, orig timeout %lu", + (u_long)rep->egen, + (u_long)to, (u_long)timeout)); + goto restart; case DB_TIMEOUT: ret = DB_REP_UNAVAIL; break; @@ -1093,13 +1173,17 @@ phase2: goto err; } MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - done = rep->votes > rep->nsites / 2; -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "After phase 2: done %d, votes %d, nsites %d", - done, rep->votes, rep->nsites); -#endif + if (egen != rep->egen) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + RPRINT(dbenv, rep, (dbenv, &mb, + "Egen ph2 changed from %lu to %lu", + (u_long)egen, (u_long)rep->egen)); + goto restart; + } + done = rep->votes >= rep->nvotes; + RPRINT(dbenv, rep, (dbenv, &mb, + "After phase 2: done %d, votes %d, nsites %d", + done, rep->votes, rep->nsites)); if (send_vote == rep->eid && done) { __rep_elect_master(dbenv, rep, eidp); ret = 0; @@ -1121,13 +1205,19 @@ lockdone: else if (orig_tally) F_SET(rep, orig_tally); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Ended election with %d, sites %d, egen %lu, flags 0x%lx", - ret, rep->sites, (u_long)rep->egen, (u_long)rep->flags); -#endif + /* + * If the election finished elsewhere, we need to decrement + * the elect_th anyway. + */ + if (0) +edone: MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + rep->elect_th = 0; + + RPRINT(dbenv, rep, (dbenv, &mb, + "Ended election with %d, sites %d, egen %lu, flags 0x%lx", + ret, rep->sites, (u_long)rep->egen, (u_long)rep->flags)); MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); +DB_TEST_RECOVERY_LABEL return (ret); } @@ -1137,10 +1227,10 @@ lockdone: * already in progress; makes it 0 otherwise. */ static int -__rep_elect_init(dbenv, lsnp, nsites, priority, beginp, otally) +__rep_elect_init(dbenv, lsnp, nsites, nvotes, priority, beginp, otally) DB_ENV *dbenv; DB_LSN *lsnp; - int nsites, priority; + int nsites, nvotes, priority; int *beginp; u_int32_t *otally; { @@ -1167,7 +1257,7 @@ __rep_elect_init(dbenv, lsnp, nsites, priority, beginp, otally) MUTEX_LOCK(dbenv, db_rep->rep_mutexp); if (otally != NULL) *otally = F_ISSET(rep, REP_F_TALLY); - *beginp = IN_ELECTION(rep); + *beginp = IN_ELECTION(rep) || rep->elect_th; if (!*beginp) { /* * Make sure that we always initialize all the election fields @@ -1179,7 +1269,9 @@ __rep_elect_init(dbenv, lsnp, nsites, priority, beginp, otally) (ret = __rep_grow_sites(dbenv, nsites)) != 0) goto err; DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTINIT, ret, NULL); + rep->elect_th = 1; rep->nsites = nsites; + rep->nvotes = nvotes; rep->priority = priority; rep->master_id = DB_EID_INVALID; } @@ -1201,19 +1293,19 @@ __rep_elect_master(dbenv, rep, eidp) REP *rep; int *eidp; { +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#else + COMPQUIET(dbenv, NULL); +#endif rep->master_id = rep->eid; F_SET(rep, REP_F_MASTERELECT); if (eidp != NULL) *eidp = rep->master_id; rep->stat.st_elections_won++; -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Got enough votes to win; election done; winner is %d, gen %lu", - rep->master_id, (u_long)rep->gen); -#else - COMPQUIET(dbenv, NULL); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Got enough votes to win; election done; winner is %d, gen %lu", + rep->master_id, (u_long)rep->gen)); } static int @@ -1225,12 +1317,13 @@ __rep_wait(dbenv, timeout, eidp, flags) { DB_REP *db_rep; REP *rep; - int done; - u_int32_t sleeptime; + int done, echg; + u_int32_t egen, sleeptime; - done = 0; + done = echg = 0; db_rep = dbenv->rep_handle; rep = db_rep->region; + egen = rep->egen; /* * The user specifies an overall timeout function, but checking @@ -1241,8 +1334,9 @@ __rep_wait(dbenv, timeout, eidp, flags) if (sleeptime == 0) sleeptime++; while (timeout > 0) { - (void)__os_sleep(dbenv, 0, sleeptime); + __os_sleep(dbenv, 0, sleeptime); MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + echg = egen != rep->egen; done = !F_ISSET(rep, flags) && rep->master_id != DB_EID_INVALID; *eidp = rep->master_id; @@ -1251,6 +1345,9 @@ __rep_wait(dbenv, timeout, eidp, flags) if (done) return (0); + if (echg) + return (DB_REP_EGENCHG); + if (timeout > sleeptime) timeout -= sleeptime; else @@ -1292,120 +1389,3 @@ err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) ret = t_ret; return (ret); } - -/* - * __rep_stat -- - * Fetch replication statistics. - */ -static int -__rep_stat(dbenv, statp, flags) - DB_ENV *dbenv; - DB_REP_STAT **statp; - u_int32_t flags; -{ - DB_LOG *dblp; - DB_REP *db_rep; - DB_REP_STAT *stats; - LOG *lp; - REP *rep; - u_int32_t queued, repflags; - int dolock, ret; - - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle, "rep_stat", DB_INIT_REP); - - db_rep = dbenv->rep_handle; - rep = db_rep->region; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - *statp = NULL; - if ((ret = __db_fchk(dbenv, - "DB_ENV->rep_stat", flags, DB_STAT_CLEAR)) != 0) - return (ret); - - /* Allocate a stat struct to return to the user. */ - if ((ret = __os_umalloc(dbenv, sizeof(DB_REP_STAT), &stats)) != 0) - return (ret); - - /* - * Read without holding the lock. If we are in client - * recovery, we copy just the stats struct so we won't - * block. We only copy out those stats that don't - * require acquiring any mutex. - */ - repflags = rep->flags; - if (FLD_ISSET(repflags, REP_F_RECOVER)) - dolock = 0; - else { - dolock = 1; - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - } - memcpy(stats, &rep->stat, sizeof(*stats)); - - /* Copy out election stats. */ - if (IN_ELECTION_TALLY(rep)) { - if (F_ISSET(rep, REP_F_EPHASE1)) - stats->st_election_status = 1; - else if (F_ISSET(rep, REP_F_EPHASE2)) - stats->st_election_status = 2; - - stats->st_election_nsites = rep->sites; - stats->st_election_cur_winner = rep->winner; - stats->st_election_priority = rep->w_priority; - stats->st_election_gen = rep->w_gen; - stats->st_election_lsn = rep->w_lsn; - stats->st_election_votes = rep->votes; - stats->st_election_tiebreaker = rep->w_tiebreaker; - } - - /* Copy out other info that's protected by the rep mutex. */ - stats->st_env_id = rep->eid; - stats->st_env_priority = rep->priority; - stats->st_nsites = rep->nsites; - stats->st_master = rep->master_id; - stats->st_gen = rep->gen; - - if (F_ISSET(rep, REP_F_MASTER)) - stats->st_status = DB_REP_MASTER; - else if (F_ISSET(rep, REP_F_LOGSONLY)) - stats->st_status = DB_REP_LOGSONLY; - else if (F_ISSET(rep, REP_F_UPGRADE)) - stats->st_status = DB_REP_CLIENT; - else - stats->st_status = 0; - - if (LF_ISSET(DB_STAT_CLEAR)) { - queued = rep->stat.st_log_queued; - memset(&rep->stat, 0, sizeof(rep->stat)); - rep->stat.st_log_queued = rep->stat.st_log_queued_total = - rep->stat.st_log_queued_max = queued; - } - - if (dolock) { - stats->st_in_recovery = 0; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - MUTEX_LOCK(dbenv, db_rep->db_mutexp); - } else - stats->st_in_recovery = 1; - - /* - * Log-related replication info is stored in the log system and - * protected by the log region lock. - */ - if (F_ISSET(rep, REP_ISCLIENT)) { - stats->st_next_lsn = lp->ready_lsn; - stats->st_waiting_lsn = lp->waiting_lsn; - } else { - if (F_ISSET(rep, REP_F_MASTER)) - stats->st_next_lsn = lp->lsn; - else - ZERO_LSN(stats->st_next_lsn); - ZERO_LSN(stats->st_waiting_lsn); - } - if (dolock) - MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - - *statp = stats; - return (0); -} diff --git a/db/rep/rep_record.c b/db/rep/rep_record.c index cb6d83bc4..c37eaf7fd 100644 --- a/db/rep/rep_record.c +++ b/db/rep/rep_record.c @@ -1,17 +1,26 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: rep_record.c,v 1.251 2004/10/14 12:56:13 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: rep_record.c,v 1.193 2003/11/14 05:32:31 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + #include #include #endif @@ -27,36 +36,36 @@ static const char revid[] = "$Id: rep_record.c,v 1.193 2003/11/14 05:32:31 ubell static int __rep_apply __P((DB_ENV *, REP_CONTROL *, DBT *, DB_LSN *)); static int __rep_collect_txn __P((DB_ENV *, DB_LSN *, LSN_COLLECTION *)); +static int __rep_do_ckp __P((DB_ENV *, DBT *, REP_CONTROL *)); static int __rep_dorecovery __P((DB_ENV *, DB_LSN *, DB_LSN *)); +static int __rep_getnext __P((DB_ENV *)); static int __rep_lsn_cmp __P((const void *, const void *)); static int __rep_newfile __P((DB_ENV *, REP_CONTROL *, DB_LSN *)); -static int __rep_verify_match __P((DB_ENV *, REP_CONTROL *, time_t)); - -#define IS_SIMPLE(R) ((R) != DB___txn_regop && (R) != DB___txn_xa_regop && \ - (R) != DB___txn_ckp && (R) != DB___dbreg_register) +static int __rep_process_rec __P((DB_ENV *, + REP_CONTROL *, DBT *, u_int32_t *, DB_LSN *)); +static int __rep_remfirst __P((DB_ENV *, DBT *, DBT *)); +static int __rep_resend_req __P((DB_ENV *, int)); +static int __rep_verify_match __P((DB_ENV *, DB_LSN *, time_t)); /* Used to consistently designate which messages ought to be received where. */ -#ifdef DIAGNOSTIC #define MASTER_ONLY(rep, rp) do { \ if (!F_ISSET(rep, REP_F_MASTER)) { \ - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) { \ - __db_err(dbenv, "Master record received on client"); \ - __rep_print_message(dbenv, \ - *eidp, rp, "rep_process_message"); \ - } \ + RPRINT(dbenv, rep, \ + (dbenv, &mb, "Master record received on client")); \ + REP_PRINT_MESSAGE(dbenv, \ + *eidp, rp, "rep_process_message"); \ ret = EINVAL; \ goto errlock; \ } \ } while (0) #define CLIENT_ONLY(rep, rp) do { \ - if (!F_ISSET(rep, REP_ISCLIENT)) { \ - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) { \ - __db_err(dbenv, "Client record received on master"); \ - __rep_print_message(dbenv, \ - *eidp, rp, "rep_process_message"); \ - } \ + if (!F_ISSET(rep, REP_F_CLIENT)) { \ + RPRINT(dbenv, rep, \ + (dbenv, &mb, "Client record received on master")); \ + REP_PRINT_MESSAGE(dbenv, \ + *eidp, rp, "rep_process_message"); \ (void)__rep_send_message(dbenv, \ DB_EID_BROADCAST, REP_DUPMASTER, NULL, NULL, 0); \ ret = DB_REP_DUPMASTER; \ @@ -64,13 +73,10 @@ static int __rep_verify_match __P((DB_ENV *, REP_CONTROL *, time_t)); } \ } while (0) -#define MASTER_CHECK(dbenv, eid, rep) \ -do { \ +#define MASTER_CHECK(dbenv, eid, rep) do { \ if (rep->master_id == DB_EID_INVALID) { \ - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) \ - __db_err(dbenv, \ - "Received record from %d, master is INVALID",\ - eid); \ + RPRINT(dbenv, rep, (dbenv, &mb, \ + "Received record from %d, master is INVALID", eid));\ ret = 0; \ (void)__rep_send_message(dbenv, \ DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0); \ @@ -84,40 +90,13 @@ do { \ goto errlock; \ } \ } while (0) -#else -#define MASTER_ONLY(rep, rp) do { \ - if (!F_ISSET(rep, REP_F_MASTER)) { \ - ret = EINVAL; \ - goto errlock; \ - } \ -} while (0) -#define CLIENT_ONLY(rep, rp) do { \ - if (!F_ISSET(rep, REP_ISCLIENT)) { \ - (void)__rep_send_message(dbenv, \ - DB_EID_BROADCAST, REP_DUPMASTER, NULL, NULL, 0); \ - ret = DB_REP_DUPMASTER; \ - goto errlock; \ - } \ -} while (0) - -#define MASTER_CHECK(dbenv, eid, rep) \ -do { \ - if (rep->master_id == DB_EID_INVALID) { \ - ret = 0; \ - (void)__rep_send_message(dbenv, \ - DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0); \ - goto errlock; \ - } \ - if (eid != rep->master_id) { \ - __db_err(dbenv, \ - "Received master record from %d, master is %d", \ - eid, rep->master_id); \ - ret = EINVAL; \ - goto errlock; \ - } \ +#define MASTER_UPDATE(dbenv, renv) do { \ + MUTEX_LOCK((dbenv), &(renv)->mutex); \ + F_SET((renv), DB_REGENV_REPLOCKED); \ + (void)time(&(renv)->op_timestamp); \ + MUTEX_UNLOCK((dbenv), &(renv)->mutex); \ } while (0) -#endif #define ANYSITE(rep) @@ -151,13 +130,18 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp) DB_REP *db_rep; DBT *d, data_dbt, mylog; LOG *lp; + REGENV *renv; + REGINFO *infop; REP *rep; REP_CONTROL *rp; REP_VOTE_INFO *vi; - u_int32_t bytes, egen, flags, gen, gbytes, type; + u_int32_t bytes, egen, flags, gen, gbytes, rectype, type; int check_limit, cmp, done, do_req; - int master, old, recovering, ret, t_ret; + int master, match, old, recovering, ret, t_ret; time_t savetime; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle, "rep_process_message", @@ -181,7 +165,11 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp) rep = db_rep->region; dblp = dbenv->lg_handle; lp = dblp->reginfo.primary; + infop = dbenv->reginfo; + renv = infop->primary; rp = (REP_CONTROL *)control->data; + if (ret_lsnp != NULL) + ZERO_LSN(*ret_lsnp); /* * Acquire the replication lock. @@ -192,36 +180,20 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp) * If we're racing with a thread in rep_start, then * just ignore the message and return. */ + RPRINT(dbenv, rep, (dbenv, &mb, + "Racing rep_start, ignore message.")); MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - if (F_ISSET(rp, DB_LOG_PERM)) { - if (ret_lsnp != NULL) - *ret_lsnp = rp->lsn; - return (DB_REP_NOTPERM); - } else - return (0); - } - if (rep->in_recovery != 0) { - /* - * If we're racing with a thread in __db_apprec, - * just ignore the message and return. - */ - rep->stat.st_msgs_recover++; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - return (0); + goto out; } rep->msg_th++; gen = rep->gen; - recovering = rep->in_recovery || - F_ISSET(rep, REP_F_READY | REP_F_RECOVER); - savetime = rep->timestamp; + recovering = rep->in_recovery || F_ISSET(rep, REP_F_RECOVER_MASK); + savetime = renv->rep_timestamp; rep->stat.st_msgs_processed++; MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __rep_print_message(dbenv, *eidp, rp, "rep_process_message"); -#endif + REP_PRINT_MESSAGE(dbenv, *eidp, rp, "rep_process_message"); /* Complain if we see an improper version number. */ if (rp->rep_version != DB_REPVERSION) { @@ -279,20 +251,20 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp) if (rp->rectype == REP_ALIVE || rp->rectype == REP_VOTE1 || rp->rectype == REP_VOTE2) { MUTEX_LOCK(dbenv, db_rep->rep_mutexp); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Updating gen from %lu to %lu", - (u_long)gen, (u_long)rp->gen); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Updating gen from %lu to %lu", + (u_long)gen, (u_long)rp->gen)); + rep->master_id = DB_EID_INVALID; gen = rep->gen = rp->gen; - if (rep->egen <= gen) - rep->egen = rep->gen + 1; -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Updating egen to %lu", - (u_long)rep->egen); -#endif + /* + * Updating of egen will happen when we process the + * message below for each message type. + */ MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + if (rp->rectype == REP_ALIVE) + (void)__rep_send_message(dbenv, + DB_EID_BROADCAST, REP_MASTER_REQ, NULL, + NULL, 0); } else if (rp->rectype != REP_NEWMASTER) { (void)__rep_send_message(dbenv, DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0); @@ -310,65 +282,65 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp) /* * We need to check if we're in recovery and if we are * then we need to ignore any messages except VERIFY*, VOTE*, - * NEW* and ALIVE_REQ. + * NEW* and ALIVE_REQ, or backup related messages: UPDATE*, + * PAGE* and FILE*. We need to also accept LOG messages + * if we're copying the log for recovery/backup. */ if (recovering) { switch (rp->rectype) { - case REP_VERIFY: - MUTEX_LOCK(dbenv, db_rep->db_mutexp); - cmp = log_compare(&lp->verify_lsn, &rp->lsn); - MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - if (cmp != 0) - goto skip; - break; - case REP_ALIVE: - case REP_ALIVE_REQ: - case REP_DUPMASTER: - case REP_NEWCLIENT: - case REP_NEWMASTER: - case REP_NEWSITE: - case REP_VERIFY_FAIL: - case REP_VOTE1: - case REP_VOTE2: - break; - default: -skip: /* - * We don't hold the rep mutex, and could - * miscount if we race. + case REP_VERIFY: + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + cmp = log_compare(&lp->verify_lsn, &rp->lsn); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + if (cmp != 0) + goto skip; + break; + case REP_NEWFILE: + case REP_LOG: + case REP_LOG_MORE: + if (!F_ISSET(rep, REP_F_RECOVER_LOG)) + goto skip; + break; + case REP_ALIVE: + case REP_ALIVE_REQ: + case REP_DUPMASTER: + case REP_FILE_FAIL: + case REP_NEWCLIENT: + case REP_NEWMASTER: + case REP_NEWSITE: + case REP_PAGE: + case REP_PAGE_FAIL: + case REP_PAGE_MORE: + case REP_PAGE_REQ: + case REP_UPDATE: + case REP_UPDATE_REQ: + case REP_VERIFY_FAIL: + case REP_VOTE1: + case REP_VOTE2: + break; + default: +skip: + /* Check for need to retransmit. */ + /* Not holding rep_mutex, may miscount */ + rep->stat.st_msgs_recover++; + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + do_req = __rep_check_doreq(dbenv, rep); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + if (do_req) { + /* + * Don't respond to a MASTER_REQ with + * a MASTER_REQ. */ - rep->stat.st_msgs_recover++; - - /* Check for need to retransmit. */ - MUTEX_LOCK(dbenv, db_rep->db_mutexp); - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - do_req = ++lp->rcvd_recs >= lp->wait_recs; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - if (do_req) { - lp->wait_recs *= 2; - if (lp->wait_recs > rep->max_gap) - lp->wait_recs = rep->max_gap; - lp->rcvd_recs = 0; - lsn = lp->verify_lsn; - } - MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - if (do_req) { - /* - * Don't respond to a MASTER_REQ with - * a MASTER_REQ. - */ - if (rep->master_id == DB_EID_INVALID && - rp->rectype != REP_MASTER_REQ) - (void)__rep_send_message(dbenv, - DB_EID_BROADCAST, - REP_MASTER_REQ, - NULL, NULL, 0); - else if (*eidp == rep->master_id) - (void)__rep_send_message( - dbenv, *eidp, - REP_VERIFY_REQ, - &lsn, NULL, 0); - } - goto errlock; + if (rep->master_id == DB_EID_INVALID && + rp->rectype != REP_MASTER_REQ) + (void)__rep_send_message(dbenv, + DB_EID_BROADCAST, + REP_MASTER_REQ, + NULL, NULL, 0); + else if (*eidp == rep->master_id) + ret = __rep_resend_req(dbenv, *eidp); + } + goto errlock; } } @@ -377,13 +349,17 @@ skip: /* ANYSITE(rep); egen = *(u_int32_t *)rec->data; MUTEX_LOCK(dbenv, db_rep->rep_mutexp); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Received ALIVE egen of %lu, mine %lu", - (u_long)egen, (u_long)rep->egen); -#endif - if (egen > rep->egen) + RPRINT(dbenv, rep, (dbenv, &mb, + "Received ALIVE egen of %lu, mine %lu", + (u_long)egen, (u_long)rep->egen)); + if (egen > rep->egen) { + /* + * We're changing egen, need to clear out any old + * election information. + */ + __rep_elect_done(dbenv, rep); rep->egen = egen; + } MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); break; case REP_ALIVE_REQ: @@ -456,8 +432,8 @@ skip: /* bytes -= (data_dbt.size + sizeof(REP_CONTROL)); } -send: if (__rep_send_message(dbenv, - *eidp, type, &lsn, &data_dbt, 0) != 0) +send: if (__rep_send_message(dbenv, *eidp, type, + &lsn, &data_dbt, DB_LOG_RESEND) != 0) break; /* @@ -483,12 +459,36 @@ send: if (__rep_send_message(dbenv, ret = __rep_send_file(dbenv, rec, *eidp); goto errlock; #endif + case REP_FILE_FAIL: + CLIENT_ONLY(rep, rp); + MASTER_CHECK(dbenv, *eidp, rep); + /* + * XXX + */ + break; case REP_LOG: case REP_LOG_MORE: CLIENT_ONLY(rep, rp); MASTER_CHECK(dbenv, *eidp, rep); - if ((ret = __rep_apply(dbenv, rp, rec, ret_lsnp)) != 0) + if ((ret = __rep_apply(dbenv, rp, rec, ret_lsnp)) != 0 && + ret != DB_REP_LOGREADY) goto errlock; + /* + * We're in an internal backup and we've gotten all the log + * we need to run recovery. Do so now. + */ + if (ret == DB_REP_LOGREADY) { + if ((ret = __log_flush(dbenv, NULL)) != 0) + goto errlock; + if ((ret = __rep_verify_match(dbenv, &rep->last_lsn, + savetime)) == 0) { + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + ZERO_LSN(rep->first_lsn); + ZERO_LSN(rep->last_lsn); + F_CLR(rep, REP_F_RECOVER_LOG); + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + } + } if (rp->rectype == REP_LOG_MORE) { MUTEX_LOCK(dbenv, db_rep->rep_mutexp); master = rep->master_id; @@ -506,26 +506,35 @@ send: if (__rep_send_message(dbenv, * we'll re-negotiate where the end of the log is and * try to bring ourselves up to date again anyway. */ + MUTEX_LOCK(dbenv, db_rep->db_mutexp); if (master == DB_EID_INVALID) ret = 0; - else + /* + * If we've asked for a bunch of records, it could + * either be from a LOG_REQ or ALL_REQ. If we're + * waiting for a gap to be filled, call loggap_req, + * otherwise use ALL_REQ again. + */ + else if (IS_ZERO_LSN(lp->waiting_lsn)) { + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); if (__rep_send_message(dbenv, master, REP_ALL_REQ, &lsn, NULL, 0) != 0) break; + } else { + __rep_loggap_req(dbenv, rep, &lsn, 1); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + } } goto errlock; case REP_LOG_REQ: MASTER_ONLY(rep, rp); -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION) && - rec != NULL && rec->size != 0) { - __db_err(dbenv, + if (rec != NULL && rec->size != 0) { + RPRINT(dbenv, rep, (dbenv, &mb, "[%lu][%lu]: LOG_REQ max lsn: [%lu][%lu]", (u_long) rp->lsn.file, (u_long)rp->lsn.offset, (u_long)((DB_LSN *)rec->data)->file, - (u_long)((DB_LSN *)rec->data)->offset); + (u_long)((DB_LSN *)rec->data)->offset)); } -#endif /* * There are three different cases here. * 1. We asked for a particular LSN and got it. @@ -539,15 +548,15 @@ send: if (__rep_send_message(dbenv, * it, then we need to send all records up to the LSN in the * data dbt. */ - lsn = rp->lsn; + oldfilelsn = lsn = rp->lsn; if ((ret = __log_cursor(dbenv, &logc)) != 0) goto errlock; memset(&data_dbt, 0, sizeof(data_dbt)); - ret = __log_c_get(logc, &rp->lsn, &data_dbt, DB_SET); + ret = __log_c_get(logc, &lsn, &data_dbt, DB_SET); if (ret == 0) /* Case 1 */ (void)__rep_send_message(dbenv, - *eidp, REP_LOG, &rp->lsn, &data_dbt, 0); + *eidp, REP_LOG, &lsn, &data_dbt, DB_LOG_RESEND); else if (ret == DB_NOTFOUND) { R_LOCK(dbenv, &dblp->reginfo); endlsn = lp->lsn; @@ -567,13 +576,27 @@ send: if (__rep_send_message(dbenv, &endlsn, &data_dbt, DB_SET)) != 0 || (ret = __log_c_get(logc, &endlsn, &data_dbt, DB_PREV)) != 0) { - if (FLD_ISSET(dbenv->verbose, - DB_VERB_REPLICATION)) - __db_err(dbenv, - "Unable to get prev of [%lu][%lu]", - (u_long)lsn.file, - (u_long)lsn.offset); - ret = DB_REP_OUTDATED; + RPRINT(dbenv, rep, (dbenv, &mb, + "Unable to get prev of [%lu][%lu]", + (u_long)lsn.file, + (u_long)lsn.offset)); + /* + * We want to push the error back + * to the client so that the client + * does an internal backup. The + * client asked for a log record + * we no longer have and it is + * outdated. + * XXX - This could be optimized by + * having the master perform and + * send a REP_UPDATE message. We + * currently want the client to set + * up its 'update' state prior to + * requesting REP_UPDATE_REQ. + */ + ret = 0; + (void)__rep_send_message(dbenv, *eidp, + REP_VERIFY_FAIL, &rp->lsn, NULL, 0); } else { endlsn.offset += logc->c_len; (void)__rep_send_message(dbenv, *eidp, @@ -581,34 +604,77 @@ send: if (__rep_send_message(dbenv, } } else { /* Case 3 */ - DB_ASSERT(0); __db_err(dbenv, "Request for LSN [%lu][%lu] fails", (u_long)lsn.file, (u_long)lsn.offset); + DB_ASSERT(0); ret = EINVAL; } } /* - * XXX - * Note that we are not observing the limits here that - * we observe on ALL_REQs. If we think that we need to, - * then we need to figure out how to convey back to the - * client the max_lsn with the LOG_MORE message and I - * can't quite figure out how to do that. + * If the user requested a gap, send the whole thing, + * while observing the limits from set_rep_limit. */ - while (ret == 0 && rec != NULL && rec->size != 0) { + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + gbytes = rep->gbytes; + bytes = rep->bytes; + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + check_limit = gbytes != 0 || bytes != 0; + type = REP_LOG; + while (ret == 0 && rec != NULL && rec->size != 0 && + type == REP_LOG) { if ((ret = __log_c_get(logc, &lsn, &data_dbt, DB_NEXT)) != 0) { if (ret == DB_NOTFOUND) ret = 0; - break;; + break; } if (log_compare(&lsn, (DB_LSN *)rec->data) >= 0) break; - if (__rep_send_message(dbenv, - *eidp, REP_LOG, &lsn, &data_dbt, 0) != 0) + /* + * When a log file changes, we'll have a real log + * record with some lsn [n][m], and we'll also want + * to send a NEWFILE message with lsn [n-1][MAX]. + */ + if (lsn.file != oldfilelsn.file) + (void)__rep_send_message(dbenv, + *eidp, REP_NEWFILE, &oldfilelsn, NULL, 0); + if (check_limit) { + /* + * data_dbt.size is only the size of the log + * record; it doesn't count the size of the + * control structure. Factor that in as well + * so we're not off by a lot if our log records + * are small. + */ + while (bytes < + data_dbt.size + sizeof(REP_CONTROL)) { + if (gbytes > 0) { + bytes += GIGABYTE; + --gbytes; + continue; + } + /* + * We don't hold the rep mutex, + * and may miscount. + */ + rep->stat.st_nthrottles++; + type = REP_LOG_MORE; + goto send1; + } + bytes -= (data_dbt.size + sizeof(REP_CONTROL)); + } + +send1: if (__rep_send_message(dbenv, *eidp, type, + &lsn, &data_dbt, DB_LOG_RESEND) != 0) break; + /* + * If we are about to change files, then we'll need the + * last LSN in the previous file. Save it here. + */ + oldfilelsn = lsn; + oldfilelsn.offset += logc->c_len; } if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) @@ -645,14 +711,16 @@ send: if (__rep_send_message(dbenv, ret = DB_REP_NEWSITE; - if (F_ISSET(rep, REP_F_UPGRADE)) { + if (F_ISSET(rep, REP_F_CLIENT)) { MUTEX_LOCK(dbenv, db_rep->rep_mutexp); egen = rep->egen; + if (*eidp == rep->master_id) + rep->master_id = DB_EID_INVALID; MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); data_dbt.data = &egen; data_dbt.size = sizeof(egen); - (void)__rep_send_message(dbenv, - *eidp, REP_ALIVE, &rp->lsn, &data_dbt, 0); + (void)__rep_send_message(dbenv, DB_EID_BROADCAST, + REP_ALIVE, &rp->lsn, &data_dbt, 0); goto errlock; } /* FALLTHROUGH */ @@ -665,8 +733,23 @@ send: if (__rep_send_message(dbenv, DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0); } /* - * Otherwise, clients just ignore it. + * If there is no master, then we could get into a state + * where an old client lost the initial ALIVE message and + * is calling an election under an old gen and can + * never get to the current gen. */ + if (F_ISSET(rep, REP_F_CLIENT) && rp->gen < gen) { + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + egen = rep->egen; + if (*eidp == rep->master_id) + rep->master_id = DB_EID_INVALID; + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + data_dbt.data = &egen; + data_dbt.size = sizeof(egen); + (void)__rep_send_message(dbenv, *eidp, + REP_ALIVE, &rp->lsn, &data_dbt, 0); + goto errlock; + } goto errlock; case REP_NEWFILE: CLIENT_ONLY(rep, rp); @@ -686,27 +769,38 @@ send: if (__rep_send_message(dbenv, } ret = __rep_new_master(dbenv, rp, *eidp); goto errlock; - case REP_PAGE: /* TODO */ + case REP_PAGE: + case REP_PAGE_MORE: + CLIENT_ONLY(rep, rp); + MASTER_CHECK(dbenv, *eidp, rep); + ret = __rep_page(dbenv, *eidp, rp, rec); + break; + case REP_PAGE_FAIL: CLIENT_ONLY(rep, rp); MASTER_CHECK(dbenv, *eidp, rep); + ret = __rep_page_fail(dbenv, *eidp, rec); break; - case REP_PAGE_REQ: /* TODO */ + case REP_PAGE_REQ: MASTER_ONLY(rep, rp); + MASTER_UPDATE(dbenv, renv); + ret = __rep_page_req(dbenv, *eidp, rec); break; - case REP_PLIST: /* TODO */ + case REP_UPDATE: CLIENT_ONLY(rep, rp); MASTER_CHECK(dbenv, *eidp, rep); + + ret = __rep_update_setup(dbenv, *eidp, rp, rec); break; - case REP_PLIST_REQ: /* TODO */ + case REP_UPDATE_REQ: MASTER_ONLY(rep, rp); + infop = dbenv->reginfo; + renv = infop->primary; + MASTER_UPDATE(dbenv, renv); + ret = __rep_update_req(dbenv, *eidp); break; case REP_VERIFY: CLIENT_ONLY(rep, rp); MASTER_CHECK(dbenv, *eidp, rep); - DB_ASSERT((F_ISSET(rep, REP_F_RECOVER) && - !IS_ZERO_LSN(lp->verify_lsn)) || - (!F_ISSET(rep, REP_F_RECOVER) && - IS_ZERO_LSN(lp->verify_lsn))); if (IS_ZERO_LSN(lp->verify_lsn)) goto errlock; @@ -715,41 +809,85 @@ send: if (__rep_send_message(dbenv, memset(&mylog, 0, sizeof(mylog)); if ((ret = __log_c_get(logc, &rp->lsn, &mylog, DB_SET)) != 0) goto rep_verify_err; + match = 0; + memcpy(&rectype, mylog.data, sizeof(rectype)); if (mylog.size == rec->size && - memcmp(mylog.data, rec->data, rec->size) == 0) { - ret = __rep_verify_match(dbenv, rp, savetime); - } else if ((ret = - __log_c_get(logc, &lsn, &mylog, DB_PREV)) == 0) { - MUTEX_LOCK(dbenv, db_rep->db_mutexp); - lp->verify_lsn = lsn; - lp->rcvd_recs = 0; - lp->wait_recs = rep->request_gap; - MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - (void)__rep_send_message(dbenv, - *eidp, REP_VERIFY_REQ, &lsn, NULL, 0); - } else if (ret == DB_NOTFOUND) { - /* We've either run out of records because - * logs have been removed or we've rolled back - * all the way to the beginning. In both cases - * we to return DB_REP_OUTDATED; in the latter - * we don't think these sites were every part of - * the same environment and we'll say so. - */ - ret = DB_REP_OUTDATED; - if (rp->lsn.file != 1) - __db_err(dbenv, - "Too few log files to sync with master"); - else - __db_err(dbenv, + memcmp(mylog.data, rec->data, rec->size) == 0) + match = 1; + DB_ASSERT(rectype == DB___txn_ckp); + /* + * If we don't have a match, backup to the previous + * checkpoint and try again. + */ + if (match == 0) { + ZERO_LSN(lsn); + if ((ret = __log_backup(dbenv, logc, &rp->lsn, &lsn, + LASTCKP_CMP)) == 0) { + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + lp->verify_lsn = lsn; + lp->rcvd_recs = 0; + lp->wait_recs = rep->request_gap; + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + (void)__rep_send_message(dbenv, + *eidp, REP_VERIFY_REQ, &lsn, NULL, 0); + } else if (ret == DB_NOTFOUND) { + /* + * We've either run out of records because + * logs have been removed or we've rolled back + * all the way to the beginning. In the latter + * we don't think these sites were ever part of + * the same environment and we'll say so. + * In the former, request internal backup. + */ + if (rp->lsn.file == 1) { + __db_err(dbenv, "Client was never part of master's environment"); - } + ret = EINVAL; + } else { + rep->stat.st_outdated++; + + R_LOCK(dbenv, &dblp->reginfo); + lsn = lp->lsn; + R_UNLOCK(dbenv, &dblp->reginfo); + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + F_CLR(rep, REP_F_RECOVER_VERIFY); + F_SET(rep, REP_F_RECOVER_UPDATE); + ZERO_LSN(rep->first_lsn); + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + (void)__rep_send_message(dbenv, + *eidp, REP_UPDATE_REQ, NULL, + NULL, 0); + } + } + } else + ret = __rep_verify_match(dbenv, &rp->lsn, savetime); rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) ret = t_ret; goto errlock; case REP_VERIFY_FAIL: + CLIENT_ONLY(rep, rp); + MASTER_CHECK(dbenv, *eidp, rep); + /* + * If any recovery flags are set, but not VERIFY, + * then we ignore this message. We are already + * in the middle of updating. + */ + if (F_ISSET(rep, REP_F_RECOVER_MASK) && + !F_ISSET(rep, REP_F_RECOVER_VERIFY)) + goto errlock; rep->stat.st_outdated++; - ret = DB_REP_OUTDATED; + + R_LOCK(dbenv, &dblp->reginfo); + lsn = lp->lsn; + R_UNLOCK(dbenv, &dblp->reginfo); + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + F_CLR(rep, REP_F_RECOVER_VERIFY); + F_SET(rep, REP_F_RECOVER_UPDATE); + ZERO_LSN(rep->first_lsn); + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + (void)__rep_send_message(dbenv, + *eidp, REP_UPDATE_REQ, NULL, NULL, 0); goto errlock; case REP_VERIFY_REQ: MASTER_ONLY(rep, rp); @@ -781,10 +919,8 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) goto errlock; case REP_VOTE1: if (F_ISSET(rep, REP_F_MASTER)) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Master received vote"); -#endif + RPRINT(dbenv, rep, + (dbenv, &mb, "Master received vote")); R_LOCK(dbenv, &dblp->reginfo); lsn = lp->lsn; R_UNLOCK(dbenv, &dblp->reginfo); @@ -799,24 +935,28 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) /* * If we get a vote from a later election gen, we * clear everything from the current one, and we'll - * start over by tallying it. + * start over by tallying it. If we get an old vote, + * send an ALIVE to the old participant. */ + RPRINT(dbenv, rep, (dbenv, &mb, + "Received vote1 egen %lu, egen %lu", + (u_long)vi->egen, (u_long)rep->egen)); if (vi->egen < rep->egen) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, + RPRINT(dbenv, rep, (dbenv, &mb, "Received old vote %lu, egen %lu, ignoring vote1", - (u_long)vi->egen, (u_long)rep->egen); -#endif - goto errunlock; + (u_long)vi->egen, (u_long)rep->egen)); + egen = rep->egen; + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + data_dbt.data = &egen; + data_dbt.size = sizeof(egen); + (void)__rep_send_message(dbenv, + *eidp, REP_ALIVE, &rp->lsn, &data_dbt, 0); + goto errlock; } if (vi->egen > rep->egen) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, + RPRINT(dbenv, rep, (dbenv, &mb, "Received VOTE1 from egen %lu, my egen %lu; reset", - (u_long)vi->egen, (u_long)rep->egen); -#endif + (u_long)vi->egen, (u_long)rep->egen)); __rep_elect_done(dbenv, rep); rep->egen = vi->egen; } @@ -827,6 +967,10 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) if (vi->nsites > rep->nsites) rep->nsites = vi->nsites; + /* Check if this site requires more votes than we do. */ + if (vi->nvotes > rep->nvotes) + rep->nvotes = vi->nvotes; + /* * We are keeping the vote, let's see if that changes our * count of the number of sites. @@ -835,11 +979,8 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) rep->nsites = rep->sites + 1; if (rep->nsites > rep->asites && (ret = __rep_grow_sites(dbenv, rep->nsites)) != 0) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Grow sites returned error %d", ret); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Grow sites returned error %d", ret)); goto errunlock; } @@ -847,10 +988,8 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) * Ignore vote1's if we're in phase 2. */ if (F_ISSET(rep, REP_F_EPHASE2)) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "In phase 2, ignoring vote1"); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "In phase 2, ignoring vote1")); goto errunlock; } @@ -860,29 +999,25 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) */ if ((ret = __rep_tally(dbenv, rep, *eidp, &rep->sites, vi->egen, rep->tally_off)) != 0) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Tally returned %d, sites %d", - ret, rep->sites); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Tally returned %d, sites %d", + ret, rep->sites)); ret = 0; goto errunlock; } -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) { - __db_err(dbenv, + RPRINT(dbenv, rep, (dbenv, &mb, "Incoming vote: (eid)%d (pri)%d (gen)%lu (egen)%lu [%lu,%lu]", - *eidp, vi->priority, - (u_long)rp->gen, (u_long)vi->egen, - (u_long)rp->lsn.file, (u_long)rp->lsn.offset); - if (rep->sites > 1) - __db_err(dbenv, + *eidp, vi->priority, + (u_long)rp->gen, (u_long)vi->egen, + (u_long)rp->lsn.file, (u_long)rp->lsn.offset)); +#ifdef DIAGNOSTIC + if (rep->sites > 1) + RPRINT(dbenv, rep, (dbenv, &mb, "Existing vote: (eid)%d (pri)%d (gen)%lu (sites)%d [%lu,%lu]", - rep->winner, rep->w_priority, - (u_long)rep->w_gen, rep->sites, - (u_long)rep->w_lsn.file, - (u_long)rep->w_lsn.offset); - } + rep->winner, rep->w_priority, + (u_long)rep->w_gen, rep->sites, + (u_long)rep->w_lsn.file, + (u_long)rep->w_lsn.offset)); #endif __rep_cmp_vote(dbenv, rep, eidp, &rp->lsn, vi->priority, rp->gen, vi->tiebreaker); @@ -892,12 +1027,9 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) * to do. */ if (!IN_ELECTION(rep)) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Not in election, but received vote1 0x%x", - rep->flags); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Not in election, but received vote1 0x%x", + rep->flags)); ret = DB_REP_HOLDELECTION; goto errunlock; } @@ -913,13 +1045,10 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) */ done = rep->sites >= rep->nsites && rep->w_priority != 0; if (done) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) { - __db_err(dbenv, "Phase1 election done"); - __db_err(dbenv, "Voting for %d%s", - master, master == rep->eid ? "(self)" : ""); - } -#endif + RPRINT(dbenv, rep, + (dbenv, &mb, "Phase1 election done")); + RPRINT(dbenv, rep, (dbenv, &mb, "Voting for %d%s", + master, master == rep->eid ? "(self)" : "")); egen = rep->egen; F_SET(rep, REP_F_EPHASE2); F_CLR(rep, REP_F_EPHASE1); @@ -931,7 +1060,7 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); /* Vote for someone else. */ - __rep_send_vote(dbenv, NULL, 0, 0, 0, egen, + __rep_send_vote(dbenv, NULL, 0, 0, 0, 0, egen, master, REP_VOTE2); } else MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); @@ -939,12 +1068,8 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) /* Election is still going on. */ break; case REP_VOTE2: -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "We received a vote%s", - F_ISSET(rep, REP_F_MASTER) ? - " (master)" : ""); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, "We received a vote%s", + F_ISSET(rep, REP_F_MASTER) ? " (master)" : "")); if (F_ISSET(rep, REP_F_MASTER)) { R_LOCK(dbenv, &dblp->reginfo); lsn = lp->lsn; @@ -970,12 +1095,9 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) */ vi = (REP_VOTE_INFO *)rec->data; if (!IN_ELECTION_TALLY(rep) && vi->egen >= rep->egen) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Not in election gen %lu, at %lu, got vote", - (u_long)vi->egen, (u_long)rep->egen); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Not in election gen %lu, at %lu, got vote", + (u_long)vi->egen, (u_long)rep->egen)); ret = DB_REP_HOLDELECTION; goto errunlock; } @@ -1010,11 +1132,9 @@ rep_verify_err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) ret = 0; goto errunlock; } - done = rep->votes > rep->nsites / 2; -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Counted vote %d", rep->votes); -#endif + done = rep->votes >= rep->nvotes; + RPRINT(dbenv, rep, (dbenv, &mb, "Counted vote %d of %d", + rep->votes, rep->nvotes)); if (done) { __rep_elect_master(dbenv, rep, eidp); ret = DB_REP_NEWMASTER; @@ -1040,6 +1160,12 @@ errlock: errunlock: rep->msg_th--; MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); +out: + if (ret == 0 && F_ISSET(rp, DB_LOG_PERM)) { + if (ret_lsnp != NULL) + *ret_lsnp = rp->lsn; + ret = DB_REP_NOTPERM; + } return (ret); } @@ -1047,9 +1173,11 @@ errunlock: * __rep_apply -- * * Handle incoming log records on a client, applying when possible and - * entering into the bookkeeping table otherwise. This is the guts of - * the routine that handles the state machine that describes how we - * process and manage incoming log records. + * entering into the bookkeeping table otherwise. This routine manages + * the state of the incoming message stream -- processing records, via + * __rep_process_rec, when possible and enqueuing in the __db.rep.db + * when necessary. As gaps in the stream are filled in, this is where + * we try to process as much as possible from __db.rep.db to catch up. */ static int __rep_apply(dbenv, rp, rec, ret_lsnp) @@ -1058,76 +1186,43 @@ __rep_apply(dbenv, rp, rec, ret_lsnp) DBT *rec; DB_LSN *ret_lsnp; { - __dbreg_register_args dbreg_args; - __txn_ckp_args ckp_args; DB_REP *db_rep; - DBT control_dbt, key_dbt, lsn_dbt; - DBT max_lsn_dbt, *max_lsn_dbtp, nextrec_dbt, rec_dbt; + DBT control_dbt, key_dbt; + DBT rec_dbt; DB *dbp; - DBC *dbc; DB_LOG *dblp; - DB_LSN ckp_lsn, max_lsn, next_lsn; + DB_LSN max_lsn; LOG *lp; REP *rep; - REP_CONTROL *grp; - u_int32_t rectype, txnid; - int cmp, do_req, eid, gap, ret, t_ret; + u_int32_t rectype; + int cmp, ret; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif db_rep = dbenv->rep_handle; rep = db_rep->region; dbp = db_rep->rep_db; - dbc = NULL; - ret = gap = 0; + rectype = 0; + ret = 0; memset(&control_dbt, 0, sizeof(control_dbt)); memset(&rec_dbt, 0, sizeof(rec_dbt)); - max_lsn_dbtp = NULL; + ZERO_LSN(max_lsn); - /* - * If this is a log record and it's the next one in line, simply - * write it to the log. If it's a "normal" log record, i.e., not - * a COMMIT or CHECKPOINT or something that needs immediate processing, - * just return. If it's a COMMIT, CHECKPOINT, LOG_REGISTER, PREPARE - * (i.e., not SIMPLE), handle it now. If it's a NEWFILE record, - * then we have to be prepared to deal with a logfile change. - */ dblp = dbenv->lg_handle; MUTEX_LOCK(dbenv, db_rep->db_mutexp); lp = dblp->reginfo.primary; + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + if (F_ISSET(rep, REP_F_RECOVER_LOG) && + log_compare(&lp->ready_lsn, &rep->first_lsn) < 0) + lp->ready_lsn = rep->first_lsn; + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); cmp = log_compare(&rp->lsn, &lp->ready_lsn); - /* - * This is written to assume that you don't end up with a lot of - * records after a hole. That is, it optimizes for the case where - * there is only a record or two after a hole. If you have a lot - * of records after a hole, what you'd really want to do is write - * all of them and then process all the commits, checkpoints, etc. - * together. That is more complicated processing that we can add - * later if necessary. - * - * That said, I really don't want to do db operations holding the - * log mutex, so the synchronization here is tricky. - */ if (cmp == 0) { - /* We got the log record that we are expecting. */ - if (rp->rectype == REP_NEWFILE) { - ret = __rep_newfile(dbenv, rp, &lp->ready_lsn); - - /* Make this evaluate to a simple rectype. */ - rectype = 0; - } else { - if (F_ISSET(rp, DB_LOG_PERM)) { - gap = 1; - max_lsn = rp->lsn; - } - ret = __log_rep_put(dbenv, &rp->lsn, rec); - memcpy(&rectype, rec->data, sizeof(rectype)); - if (ret == 0) - /* - * We may miscount if we race, since we - * don't currently hold the rep mutex. - */ - rep->stat.st_log_records++; - } + if ((ret = + __rep_process_rec(dbenv, rp, rec, &rectype, &max_lsn)) != 0) + goto err; /* * If we get the record we are expecting, reset * the count of records we've received and are applying @@ -1135,156 +1230,54 @@ __rep_apply(dbenv, rp, rec, ret_lsnp) */ lp->rcvd_recs = 0; - while (ret == 0 && IS_SIMPLE(rectype) && + while (ret == 0 && log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) { /* * We just filled in a gap in the log record stream. * Write subsequent records to the log. */ -gap_check: max_lsn_dbtp = NULL; - lp->wait_recs = 0; +gap_check: lp->wait_recs = 0; lp->rcvd_recs = 0; ZERO_LSN(lp->max_wait_lsn); - if (dbc == NULL && - (ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) - goto err; - - /* The DBTs need to persist through another call. */ - F_SET(&control_dbt, DB_DBT_REALLOC); - F_SET(&rec_dbt, DB_DBT_REALLOC); - if ((ret = __db_c_get(dbc, - &control_dbt, &rec_dbt, DB_RMW | DB_FIRST)) != 0) + if ((ret = + __rep_remfirst(dbenv, &control_dbt, &rec_dbt)) != 0) goto err; rp = (REP_CONTROL *)control_dbt.data; rec = &rec_dbt; - memcpy(&rectype, rec->data, sizeof(rectype)); - if (rp->rectype != REP_NEWFILE) { - ret = __log_rep_put(dbenv, &rp->lsn, rec); - /* - * We may miscount if we race, since we - * don't currently hold the rep mutex. - */ - if (ret == 0) - rep->stat.st_log_records++; - } else { - ret = __rep_newfile(dbenv, rp, &lp->ready_lsn); - rectype = 0; - } - if ((ret = __db_c_del(dbc, 0)) != 0) + if ((ret = __rep_process_rec(dbenv, + rp, rec, &rectype, &max_lsn)) != 0) goto err; /* - * If we just processed a permanent log record, make - * sure that we note that we've done so and that we - * save its LSN. - */ - if (F_ISSET(rp, DB_LOG_PERM)) { - gap = 1; - max_lsn = rp->lsn; - } - /* - * We may miscount, as we don't hold the rep - * mutex. + * We may miscount, as we don't hold the rep mutex. */ --rep->stat.st_log_queued; - /* - * Update waiting_lsn. We need to move it - * forward to the LSN of the next record - * in the queue. - * - * If the next item in the database is a log - * record--the common case--we're not - * interested in its contents, just in its LSN. - * Optimize by doing a partial get of the data item. - */ - memset(&nextrec_dbt, 0, sizeof(nextrec_dbt)); - F_SET(&nextrec_dbt, DB_DBT_PARTIAL); - nextrec_dbt.ulen = nextrec_dbt.dlen = 0; - - memset(&lsn_dbt, 0, sizeof(lsn_dbt)); - ret = __db_c_get(dbc, &lsn_dbt, &nextrec_dbt, DB_NEXT); - if (ret != DB_NOTFOUND && ret != 0) - goto err; - - if (ret == DB_NOTFOUND) { - ZERO_LSN(lp->waiting_lsn); - /* - * Whether or not the current record is - * simple, there's no next one, and - * therefore we haven't got anything - * else to do right now. Break out. - */ - break; - } - grp = (REP_CONTROL *)lsn_dbt.data; - lp->waiting_lsn = grp->lsn; - - /* - * If the current rectype is simple, we're done with it, - * and we should check and see whether the next record - * queued is the next one we're ready for. This is - * just the loop condition, so we continue. - * - * If this record isn't simple, then we need to - * process it before continuing. - */ - if (!IS_SIMPLE(rectype)) + if ((ret = __rep_getnext(dbenv)) == DB_NOTFOUND) { + ret = 0; break; + } else if (ret != 0) + goto err; } /* * Check if we're at a gap in the table and if so, whether we * need to ask for any records. */ - do_req = 0; if (!IS_ZERO_LSN(lp->waiting_lsn) && log_compare(&lp->ready_lsn, &lp->waiting_lsn) != 0) { /* * We got a record and processed it, but we may * still be waiting for more records. */ - next_lsn = lp->ready_lsn; - do_req = ++lp->rcvd_recs >= lp->wait_recs; - if (do_req) { - lp->wait_recs = rep->request_gap; - lp->rcvd_recs = 0; - if (log_compare(&rp->lsn, - &lp->max_wait_lsn) == 0) { - /* - * This single record was requested - * so ask for the rest of the gap. - */ - lp->max_wait_lsn = lp->waiting_lsn; - memset(&max_lsn_dbt, - 0, sizeof(max_lsn_dbt)); - max_lsn_dbt.data = &lp->waiting_lsn; - max_lsn_dbt.size = - sizeof(lp->waiting_lsn); - max_lsn_dbtp = &max_lsn_dbt; - } - } + if (__rep_check_doreq(dbenv, rep)) + __rep_loggap_req(dbenv, rep, &rp->lsn, 0); } else { lp->wait_recs = 0; ZERO_LSN(lp->max_wait_lsn); } - if (dbc != NULL) - if ((ret = __db_c_close(dbc)) != 0) - goto err; - dbc = NULL; - - if (do_req) { - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - eid = db_rep->region->master_id; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - if (eid != DB_EID_INVALID) { - rep->stat.st_log_requested++; - (void)__rep_send_message(dbenv, eid, - REP_LOG_REQ, &next_lsn, max_lsn_dbtp, 0); - } - } } else if (cmp > 0) { /* * The LSN is higher than the one we were waiting for. @@ -1296,10 +1289,6 @@ gap_check: max_lsn_dbtp = NULL; memset(&key_dbt, 0, sizeof(key_dbt)); key_dbt.data = rp; key_dbt.size = sizeof(*rp); - R_LOCK(dbenv, &dblp->reginfo); - next_lsn = lp->lsn; - R_UNLOCK(dbenv, &dblp->reginfo); - do_req = 0; if (lp->wait_recs == 0) { /* * This is a new gap. Initialize the number of @@ -1311,42 +1300,17 @@ gap_check: max_lsn_dbtp = NULL; lp->rcvd_recs = 0; ZERO_LSN(lp->max_wait_lsn); } + if (__rep_check_doreq(dbenv, rep)) + __rep_loggap_req(dbenv, rep, &rp->lsn, 0); - if (++lp->rcvd_recs >= lp->wait_recs) { - /* - * If we've waited long enough, request the record - * (or set of records) and double the wait interval. - */ - do_req = 1; - lp->rcvd_recs = 0; - lp->wait_recs *= 2; - if (lp->wait_recs > rep->max_gap) - lp->wait_recs = rep->max_gap; - - /* - * If we've never requested this record, then request - * everything between it and the first record we have. - * If we have requested this record, then only request - * this record, not the entire gap. - */ - if (IS_ZERO_LSN(lp->max_wait_lsn)) { - lp->max_wait_lsn = lp->waiting_lsn; - memset(&max_lsn_dbt, 0, sizeof(max_lsn_dbt)); - max_lsn_dbt.data = &lp->waiting_lsn; - max_lsn_dbt.size = sizeof(lp->waiting_lsn); - max_lsn_dbtp = &max_lsn_dbt; - } else { - max_lsn_dbtp = NULL; - lp->max_wait_lsn = next_lsn; - } - } - - ret = __db_put(dbp, NULL, &key_dbt, rec, 0); + ret = __db_put(dbp, NULL, &key_dbt, rec, DB_NOOVERWRITE); rep->stat.st_log_queued++; rep->stat.st_log_queued_total++; if (rep->stat.st_log_queued_max < rep->stat.st_log_queued) rep->stat.st_log_queued_max = rep->stat.st_log_queued; + if (ret == DB_KEYEXIST) + ret = 0; if (ret != 0) goto done; @@ -1354,41 +1318,12 @@ gap_check: max_lsn_dbtp = NULL; log_compare(&rp->lsn, &lp->waiting_lsn) < 0) lp->waiting_lsn = rp->lsn; - if (do_req) { - /* Request the LSN we are still waiting for. */ - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - eid = db_rep->region->master_id; - - /* - * If the master_id is invalid, this means that since - * the last record was sent, somebody declared an - * election and we may not have a master to request - * things of. - * - * This is not an error; when we find a new master, - * we'll re-negotiate where the end of the log is and - * try to to bring ourselves up to date again anyway. - */ - if (eid != DB_EID_INVALID) { - rep->stat.st_log_requested++; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - (void)__rep_send_message(dbenv, eid, - REP_LOG_REQ, &next_lsn, max_lsn_dbtp, 0); - } else { - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - (void)__rep_send_message(dbenv, - DB_EID_BROADCAST, REP_MASTER_REQ, - NULL, NULL, 0); - } - } - /* * If this is permanent; let the caller know that we have * not yet written it to disk, but we've accepted it. */ if (ret == 0 && F_ISSET(rp, DB_LOG_PERM)) { - if (ret_lsnp != NULL) - *ret_lsnp = rp->lsn; + max_lsn = rp->lsn; ret = DB_REP_NOTPERM; } goto done; @@ -1398,124 +1333,78 @@ gap_check: max_lsn_dbtp = NULL; * don't currently hold the rep mutex. */ rep->stat.st_log_duplicated++; + if (F_ISSET(rp, DB_LOG_PERM)) + max_lsn = lp->max_perm_lsn; goto done; } - if (ret != 0 || cmp < 0 || (cmp == 0 && IS_SIMPLE(rectype))) - goto done; - /* - * If we got here, then we've got a log record in rp and rec that - * we need to process. - */ - switch (rectype) { - case DB___dbreg_register: - /* - * DB opens occur in the context of a transaction, so we can - * simply handle them when we process the transaction. Closes, - * however, are not transaction-protected, so we have to - * handle them here. - * - * Note that it should be unsafe for the master to do a close - * of a file that was opened in an active transaction, so we - * should be guaranteed to get the ordering right. - */ - memcpy(&txnid, (u_int8_t *)rec->data + - ((u_int8_t *)&dbreg_args.txnid - (u_int8_t *)&dbreg_args), - sizeof(u_int32_t)); - if (txnid == TXN_INVALID && - !F_ISSET(rep, REP_F_LOGSONLY)) - ret = __db_dispatch(dbenv, dbenv->recover_dtab, - dbenv->recover_dtab_size, rec, &rp->lsn, - DB_TXN_APPLY, NULL); - break; - case DB___txn_ckp: - /* Sync the memory pool. */ - memcpy(&ckp_lsn, (u_int8_t *)rec->data + - ((u_int8_t *)&ckp_args.ckp_lsn - (u_int8_t *)&ckp_args), - sizeof(DB_LSN)); - if (!F_ISSET(rep, REP_F_LOGSONLY)) - ret = __memp_sync(dbenv, &ckp_lsn); - else - /* - * We ought to make sure the logs on a logs-only - * replica get flushed now and again. - */ - ret = __log_flush(dbenv, &ckp_lsn); - /* Update the last_ckp in the txn region. */ - if (ret == 0) - __txn_updateckp(dbenv, &rp->lsn); - else { - __db_err(dbenv, "Error syncing ckp [%lu][%lu]", - (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset); - __db_panic(dbenv, ret); - } - break; - case DB___txn_regop: - if (!F_ISSET(rep, REP_F_LOGSONLY)) - do { - /* - * If an application is doing app-specific - * recovery and acquires locks while applying - * a transaction, it can deadlock. Any other - * locks held by this thread should have been - * discarded in the __rep_process_txn error - * path, so if we simply retry, we should - * eventually succeed. - */ - ret = __rep_process_txn(dbenv, rec); - } while (ret == DB_LOCK_DEADLOCK); + /* Check if we need to go back into the table. */ + if (ret == 0 && log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) + goto gap_check; - /* Now flush the log unless we're running TXN_NOSYNC. */ - if (ret == 0 && !F_ISSET(dbenv, DB_ENV_TXN_NOSYNC)) - ret = __log_flush(dbenv, NULL); - if (ret != 0) { - __db_err(dbenv, "Error processing txn [%lu][%lu]", - (u_long)rp->lsn.file, (u_long)rp->lsn.offset); - __db_panic(dbenv, ret); - } - break; - case DB___txn_xa_regop: - ret = __log_flush(dbenv, NULL); - break; - default: - goto err; +done: +err: /* Check if we need to go back into the table. */ + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + if (ret == 0 && + F_ISSET(rep, REP_F_RECOVER_LOG) && + log_compare(&lp->ready_lsn, &rep->last_lsn) >= 0) { + rep->last_lsn = max_lsn; + ZERO_LSN(max_lsn); + ret = DB_REP_LOGREADY; } + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - /* Check if we need to go back into the table. */ - if (ret == 0) { - if (log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) - goto gap_check; + if (ret == 0 && !F_ISSET(rep, REP_F_RECOVER_LOG) && + !IS_ZERO_LSN(max_lsn)) { + if (ret_lsnp != NULL) + *ret_lsnp = max_lsn; + ret = DB_REP_ISPERM; + DB_ASSERT(log_compare(&max_lsn, &lp->max_perm_lsn) >= 0); + lp->max_perm_lsn = max_lsn; } - -done: -err: if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - if (ret == 0 && F_ISSET(dbenv, DB_ENV_LOG_AUTOREMOVE) && - rp->rectype == REP_NEWFILE) + if (ret == 0 && rp->rectype == REP_NEWFILE && lp->db_log_autoremove) __log_autoremove(dbenv); if (control_dbt.data != NULL) __os_ufree(dbenv, control_dbt.data); if (rec_dbt.data != NULL) __os_ufree(dbenv, rec_dbt.data); - if (ret == 0 && gap) { - if (ret_lsnp != NULL) - *ret_lsnp = max_lsn; - ret = DB_REP_ISPERM; + + if (ret == DB_REP_NOTPERM && !F_ISSET(rep, REP_F_RECOVER_LOG) && + !IS_ZERO_LSN(max_lsn) && ret_lsnp != NULL) + *ret_lsnp = max_lsn; + + /* + * Startup is complete when we process our first live record. However, + * we want to return DB_REP_STARTUPDONE on the first record we can -- + * but other return values trump this one. We know we've processed at + * least one record when rectype is non-zero. + */ + if (ret == 0 && !F_ISSET(rp, DB_LOG_RESEND) && + rectype != 0 && rep->stat.st_startup_complete == 0) { + rep->stat.st_startup_complete = 1; + ret = DB_REP_STARTUPDONE; } #ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) { - if (ret == DB_REP_ISPERM) - __db_err(dbenv, "Returning ISPERM [%lu][%lu]", - (u_long)ret_lsnp->file, (u_long)ret_lsnp->offset); - else if (ret == DB_REP_NOTPERM) - __db_err(dbenv, "Returning NOTPERM [%lu][%lu]", - (u_long)ret_lsnp->file, (u_long)ret_lsnp->offset); - else if (ret != 0) - __db_err(dbenv, "Returning %d [%lu][%lu]", ret, - (u_long)ret_lsnp->file, (u_long)ret_lsnp->offset); - } + if (ret == DB_REP_ISPERM) + RPRINT(dbenv, rep, (dbenv, &mb, "Returning ISPERM [%lu][%lu]", + (u_long)max_lsn.file, (u_long)max_lsn.offset)); + else if (ret == DB_REP_LOGREADY) + RPRINT(dbenv, rep, (dbenv, &mb, + "Returning LOGREADY up to [%lu][%lu]", + (u_long)rep->last_lsn.file, + (u_long)rep->last_lsn.offset)); + else if (ret == DB_REP_NOTPERM) + RPRINT(dbenv, rep, (dbenv, &mb, "Returning NOTPERM [%lu][%lu]", + (u_long)max_lsn.file, (u_long)max_lsn.offset)); + else if (ret == DB_REP_STARTUPDONE) + RPRINT(dbenv, rep, (dbenv, &mb, + "Returning STARTUPDONE [%lu][%lu]", + (u_long)rp->lsn.file, (u_long)rp->lsn.offset)); + else if (ret != 0) + RPRINT(dbenv, rep, (dbenv, &mb, "Returning %d [%lu][%lu]", ret, + (u_long)max_lsn.file, (u_long)max_lsn.offset)); #endif return (ret); } @@ -1543,29 +1432,29 @@ __rep_process_txn(dbenv, rec) __txn_regop_args *txn_args; __txn_xa_regop_args *prep_args; u_int32_t lockid, rectype; - int i, ret, t_ret; + u_int i; + int ret, t_ret; void *txninfo; db_rep = dbenv->rep_handle; rep = db_rep->region; - logc = NULL; + txn_args = NULL; + prep_args = NULL; txninfo = NULL; + memset(&data_dbt, 0, sizeof(data_dbt)); if (F_ISSET(dbenv, DB_ENV_THREAD)) F_SET(&data_dbt, DB_DBT_REALLOC); /* - * There are two phases: First, we have to traverse - * backwards through the log records gathering the list - * of all LSNs in the transaction. Once we have this information, - * we can loop through and then apply it. - */ - - /* - * We may be passed a prepare (if we're restoring a prepare - * on upgrade) instead of a commit (the common case). - * Check which and behave appropriately. + * There are two phases: First, we have to traverse backwards through + * the log records gathering the list of all LSNs in the transaction. + * Once we have this information, we can loop through and then apply it. + * + * We may be passed a prepare (if we're restoring a prepare on upgrade) + * instead of a commit (the common case). Check which it is and behave + * appropriately. */ memcpy(&rectype, rec->data, sizeof(rectype)); memset(&lc, 0, sizeof(lc)); @@ -1641,11 +1530,11 @@ err: memset(&req, 0, sizeof(req)); if ((t_ret = __lock_id_free(dbenv, lockid)) != 0 && ret == 0) ret = t_ret; -err1: if (rectype == DB___txn_regop) +err1: if (txn_args != NULL) __os_free(dbenv, txn_args); - else + if (prep_args != NULL) __os_free(dbenv, prep_args); - if (lc.nalloc != 0) + if (lc.array != NULL) __os_free(dbenv, lc.array); if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0) @@ -1683,7 +1572,8 @@ __rep_collect_txn(dbenv, lsnp, lc) DB_LSN c_lsn; DBT data; u_int32_t rectype; - int nalloc, ret, t_ret; + u_int nalloc; + int ret, t_ret; memset(&data, 0, sizeof(data)); F_SET(&data, DB_DBT_REALLOC); @@ -1726,7 +1616,7 @@ __rep_collect_txn(dbenv, lsnp, lc) if (ret != 0) goto err; - } + } if (ret != 0) __db_err(dbenv, "collect failed at: [%lu][%lu]", (u_long)lsnp->file, (u_long)lsnp->offset); @@ -1769,7 +1659,7 @@ __rep_newfile(dbenv, rc, lsnp) lp = dblp->reginfo.primary; if (rc->lsn.file + 1 > lp->lsn.file) - return (__log_newfile(dblp, lsnp)); + return (__log_newfile(dblp, lsnp, 0)); else { /* We've already applied this NEWFILE. Just ignore it. */ *lsnp = lp->lsn; @@ -1780,7 +1670,7 @@ __rep_newfile(dbenv, rc, lsnp) /* * __rep_tally -- * PUBLIC: int __rep_tally __P((DB_ENV *, REP *, int, int *, - * PUBLIC: u_int32_t, u_int32_t)); + * PUBLIC: u_int32_t, roff_t)); * * Handle incoming vote1 message on a client. Called with the db_rep * mutex held. This function will return 0 if we successfully tally @@ -1793,16 +1683,18 @@ __rep_tally(dbenv, rep, eid, countp, egen, vtoff) DB_ENV *dbenv; REP *rep; int eid, *countp; - u_int32_t egen, vtoff; + u_int32_t egen; + roff_t vtoff; { REP_VTALLY *tally, *vtp; int i; - -#ifndef DIAGNOSTIC +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#else COMPQUIET(rep, NULL); #endif - tally = R_ADDR((REGINFO *)dbenv->reginfo, vtoff); + tally = R_ADDR(dbenv, (REGINFO *)dbenv->reginfo, vtoff); i = 0; vtp = &tally[i]; while (i < *countp) { @@ -1817,13 +1709,10 @@ __rep_tally(dbenv, rep, eid, countp, egen, vtoff) * Also ignore votes that are duplicates. */ if (vtp->eid == eid) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, + RPRINT(dbenv, rep, (dbenv, &mb, "Tally found[%d] (%d, %lu), this vote (%d, %lu)", i, vtp->eid, (u_long)vtp->egen, - eid, (u_long)egen); -#endif + eid, (u_long)egen)); if (vtp->egen >= egen) return (1); else { @@ -1839,14 +1728,12 @@ __rep_tally(dbenv, rep, eid, countp, egen, vtoff) * seen before. Tally this vote. */ #ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) { - if (vtoff == rep->tally_off) - __db_err(dbenv, "Tallying VOTE1[%d] (%d, %lu)", - i, eid, (u_long)egen); - else - __db_err(dbenv, "Tallying VOTE2[%d] (%d, %lu)", - i, eid, (u_long)egen); - } + if (vtoff == rep->tally_off) + RPRINT(dbenv, rep, (dbenv, &mb, "Tallying VOTE1[%d] (%d, %lu)", + i, eid, (u_long)egen)); + else + RPRINT(dbenv, rep, (dbenv, &mb, "Tallying VOTE2[%d] (%d, %lu)", + i, eid, (u_long)egen)); #endif vtp->eid = eid; vtp->egen = egen; @@ -1857,7 +1744,7 @@ __rep_tally(dbenv, rep, eid, countp, egen, vtoff) /* * __rep_cmp_vote -- * PUBLIC: void __rep_cmp_vote __P((DB_ENV *, REP *, int *, DB_LSN *, - * PUBLIC: int, int, int)); + * PUBLIC: int, u_int32_t, u_int32_t)); * * Compare incoming vote1 message on a client. Called with the db_rep * mutex held. @@ -1868,11 +1755,14 @@ __rep_cmp_vote(dbenv, rep, eidp, lsnp, priority, gen, tiebreaker) REP *rep; int *eidp; DB_LSN *lsnp; - int priority, gen, tiebreaker; + int priority; + u_int32_t gen, tiebreaker; { int cmp; -#ifndef DIAGNOSTIC +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#else COMPQUIET(dbenv, NULL); #endif cmp = log_compare(lsnp, &rep->w_lsn); @@ -1889,10 +1779,7 @@ __rep_cmp_vote(dbenv, rep, eidp, lsnp, priority, gen, tiebreaker) (cmp == 0 && (priority > rep->w_priority || (priority == rep->w_priority && (tiebreaker > rep->w_tiebreaker))))) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Accepting new vote"); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, "Accepting new vote")); rep->winner = *eidp; rep->w_priority = priority; rep->w_lsn = *lsnp; @@ -1935,27 +1822,25 @@ __rep_cmp_vote2(dbenv, rep, eid, egen) { int i; REP_VTALLY *tally, *vtp; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif - tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off); + tally = R_ADDR(dbenv, (REGINFO *)dbenv->reginfo, rep->tally_off); i = 0; vtp = &tally[i]; for (i = 0; i < rep->sites; i++) { vtp = &tally[i]; if (vtp->eid == eid && vtp->egen == egen) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, + RPRINT(dbenv, rep, (dbenv, &mb, "Found matching vote1 (%d, %lu), at %d of %d", - eid, (u_long)egen, i, rep->sites); -#endif + eid, (u_long)egen, i, rep->sites)); return (0); } } -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Did not find vote1 for eid %d, egen %lu", - eid, (u_long)egen); -#endif + RPRINT(dbenv, rep, + (dbenv, &mb, "Didn't find vote1 for eid %d, egen %lu", + eid, (u_long)egen)); return (1); } @@ -1965,19 +1850,22 @@ __rep_dorecovery(dbenv, lsnp, trunclsnp) DB_LSN *lsnp, *trunclsnp; { DB_LSN lsn; + DB_REP *db_rep; DBT mylog; DB_LOGC *logc; - int ret, t_ret, undo; + int ret, t_ret, update; u_int32_t rectype; __txn_regop_args *txnrec; - /* Figure out if we are backing out any commited transactions. */ + db_rep = dbenv->rep_handle; + + /* Figure out if we are backing out any committed transactions. */ if ((ret = __log_cursor(dbenv, &logc)) != 0) return (ret); memset(&mylog, 0, sizeof(mylog)); - undo = 0; - while (undo == 0 && + update = 0; + while (update == 0 && (ret = __log_c_get(logc, &lsn, &mylog, DB_PREV)) == 0 && log_compare(&lsn, lsnp) > 0) { memcpy(&rectype, mylog.data, sizeof(rectype)); @@ -1985,14 +1873,19 @@ __rep_dorecovery(dbenv, lsnp, trunclsnp) if ((ret = __txn_regop_read(dbenv, mylog.data, &txnrec)) != 0) goto err; - if (txnrec->opcode != TXN_ABORT) { - undo = 1; - } + if (txnrec->opcode != TXN_ABORT) + update = 1; __os_free(dbenv, txnrec); } } - ret = __db_apprec(dbenv, lsnp, trunclsnp, undo, 0); + /* + * If we successfully run recovery, we've opened all the necessary + * files. We are guaranteed to be single-threaded here, so no mutex + * is necessary. + */ + if ((ret = __db_apprec(dbenv, lsnp, trunclsnp, update, 0)) == 0) + F_SET(db_rep, DBREP_OPENFILES); err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) ret = t_ret; @@ -2008,17 +1901,19 @@ err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) * correctly and move forward. */ static int -__rep_verify_match(dbenv, rp, savetime) +__rep_verify_match(dbenv, reclsnp, savetime) DB_ENV *dbenv; - REP_CONTROL *rp; + DB_LSN *reclsnp; time_t savetime; { DB_LOG *dblp; - DB_LSN ckplsn, trunclsn; + DB_LSN trunclsn; DB_REP *db_rep; LOG *lp; + REGENV *renv; + REGINFO *infop; REP *rep; - int done, master, ret, wait_cnt; + int done, master, ret; u_int32_t unused; dblp = dbenv->lg_handle; @@ -2026,6 +1921,8 @@ __rep_verify_match(dbenv, rp, savetime) rep = db_rep->region; lp = dblp->reginfo.primary; ret = 0; + infop = dbenv->reginfo; + renv = infop->primary; /* * Check if the savetime is different than our current time stamp. @@ -2033,92 +1930,36 @@ __rep_verify_match(dbenv, rp, savetime) * and we lost. We must give up. */ MUTEX_LOCK(dbenv, db_rep->db_mutexp); - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - done = savetime != rep->timestamp; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + done = savetime != renv->rep_timestamp; if (done) { MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); return (0); } - ZERO_LSN(lp->verify_lsn); - - /* Check if we our log is already up to date. */ - R_LOCK(dbenv, &dblp->reginfo); - done = rp->lsn.file == lp->lsn.file && - rp->lsn.offset + lp->len == lp->lsn.offset; - if (done) { - lp->ready_lsn = lp->lsn; - ZERO_LSN(lp->waiting_lsn); - } - R_UNLOCK(dbenv, &dblp->reginfo); - if (done) - goto finish; /* Yes, holding the mutex. */ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - if (F_ISSET(rep, REP_F_LOGSONLY)) { - /* - * If we're a logs-only client, we can simply truncate - * the log to the point where it last agreed with the - * master's. - */ - INIT_LSN(ckplsn); - if ((ret = __log_flush(dbenv, &rp->lsn)) != 0 || (ret = - __log_vtruncate(dbenv, &rp->lsn, &ckplsn, &trunclsn)) != 0) - return (ret); - } else { - /* - * Make sure the world hasn't changed while we tried to get - * the lock. If it hasn't then it's time for us to kick all - * operations out of DB and run recovery. - */ - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - if (F_ISSET(rep, REP_F_READY) || rep->in_recovery != 0) { - rep->stat.st_msgs_recover++; - goto errunlock; - } - - /* Phase 1: set REP_F_READY and wait for op_cnt to go to 0. */ - F_SET(rep, REP_F_READY); - for (wait_cnt = 0; rep->op_cnt != 0;) { - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - __os_sleep(dbenv, 1, 0); -#ifdef DIAGNOSTIC - if (++wait_cnt % 60 == 0) - __db_err(dbenv, - "Waiting for txn_cnt to run replication recovery for %d minutes", - wait_cnt / 60); -#endif - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - } + /* + * Make sure the world hasn't changed while we tried to get + * the lock. If it hasn't then it's time for us to kick all + * operations out of DB and run recovery. + */ + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + if (!F_ISSET(rep, REP_F_RECOVER_LOG) && + (F_ISSET(rep, REP_F_READY) || rep->in_recovery != 0)) { + rep->stat.st_msgs_recover++; + goto errunlock; + } - /* - * Phase 2: set in_recovery and wait for handle count to go - * to 0 and for the number of threads in __rep_process_message - * to go to 1 (us). - */ - rep->in_recovery = 1; - for (wait_cnt = 0; rep->handle_cnt != 0 || rep->msg_th > 1;) { - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - __os_sleep(dbenv, 1, 0); -#ifdef DIAGNOSTIC - if (++wait_cnt % 60 == 0) - __db_err(dbenv, -"Waiting for handle/thread count to run replication recovery for %d minutes", - wait_cnt / 60); -#endif - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - } + __rep_lockout(dbenv, db_rep, rep); - /* OK, everyone is out, we can now run recovery. */ - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + /* OK, everyone is out, we can now run recovery. */ + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - if ((ret = __rep_dorecovery(dbenv, &rp->lsn, &trunclsn)) != 0) { - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - rep->in_recovery = 0; - F_CLR(rep, REP_F_READY); - goto errunlock; - } + if ((ret = __rep_dorecovery(dbenv, reclsnp, &trunclsn)) != 0) { + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + rep->in_recovery = 0; + F_CLR(rep, REP_F_READY); + goto errunlock; } /* @@ -2128,7 +1969,9 @@ __rep_verify_match(dbenv, rp, savetime) */ MUTEX_LOCK(dbenv, db_rep->db_mutexp); lp->ready_lsn = trunclsn; -finish: ZERO_LSN(lp->waiting_lsn); + ZERO_LSN(lp->waiting_lsn); + ZERO_LSN(lp->max_wait_lsn); + lp->max_perm_lsn = *reclsnp; lp->wait_recs = 0; lp->rcvd_recs = 0; ZERO_LSN(lp->verify_lsn); @@ -2141,7 +1984,7 @@ finish: ZERO_LSN(lp->waiting_lsn); */ F_SET(db_rep->rep_db, DB_AM_RECOVER); MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - ret = __db_truncate(db_rep->rep_db, NULL, &unused, 0); + ret = __db_truncate(db_rep->rep_db, NULL, &unused); MUTEX_LOCK(dbenv, db_rep->db_mutexp); F_CLR(db_rep->rep_db, DB_AM_RECOVER); MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); @@ -2149,7 +1992,7 @@ finish: ZERO_LSN(lp->waiting_lsn); MUTEX_LOCK(dbenv, db_rep->rep_mutexp); rep->stat.st_log_queued = 0; rep->in_recovery = 0; - F_CLR(rep, REP_F_NOARCHIVE | REP_F_READY | REP_F_RECOVER); + F_CLR(rep, REP_F_NOARCHIVE | REP_F_RECOVER_MASK); if (ret != 0) goto errunlock; @@ -2176,10 +2019,457 @@ finish: ZERO_LSN(lp->waiting_lsn); ret = 0; else (void)__rep_send_message(dbenv, - master, REP_ALL_REQ, &rp->lsn, NULL, 0); + master, REP_ALL_REQ, reclsnp, NULL, 0); if (0) { errunlock: MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); } return (ret); } + +/* + * __rep_do_ckp -- + * Perform the memp_sync necessary for this checkpoint without holding + * the db_rep->db_mutexp. All callers of this function must hold the + * db_rep->db_mutexp and must not be holding the db_rep->rep_mutexp. + */ +static int +__rep_do_ckp(dbenv, rec, rp) + DB_ENV *dbenv; + DBT *rec; + REP_CONTROL *rp; +{ + DB_LSN ckp_lsn; + DB_REP *db_rep; + int ret; + + db_rep = dbenv->rep_handle; + + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + + DB_TEST_CHECKPOINT(dbenv, dbenv->test_check); + + /* Sync the memory pool. */ + memcpy(&ckp_lsn, (u_int8_t *)rec->data + + SSZ(__txn_ckp_args, ckp_lsn), sizeof(DB_LSN)); + ret = __memp_sync(dbenv, &ckp_lsn); + + /* Update the last_ckp in the txn region. */ + if (ret == 0) + __txn_updateckp(dbenv, &rp->lsn); + else { + __db_err(dbenv, "Error syncing ckp [%lu][%lu]", + (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset); + ret = __db_panic(dbenv, ret); + } + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + + return (ret); +} + +/* + * __rep_remfirst -- + * Remove the first entry from the __db.rep.db + */ +static int +__rep_remfirst(dbenv, cntrl, rec) + DB_ENV *dbenv; + DBT *cntrl; + DBT *rec; +{ + DB *dbp; + DBC *dbc; + DB_REP *db_rep; + int ret, t_ret; + u_int32_t rectype; + + db_rep = dbenv->rep_handle; + dbp = db_rep->rep_db; + + if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) + return (ret); + + /* The DBTs need to persist through another call. */ + memset(cntrl, 0, sizeof(*cntrl)); + memset(rec, 0, sizeof(*rec)); + F_SET(cntrl, DB_DBT_REALLOC); + F_SET(rec, DB_DBT_REALLOC); + if ((ret = __db_c_get(dbc, cntrl, rec, DB_RMW | DB_FIRST)) == 0) { + memcpy(&rectype, rec->data, sizeof(rectype)); + ret = __db_c_del(dbc, 0); + } + if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + + return (ret); +} + +/* + * __rep_getnext -- + * Get the next record out of the __db.rep.db table. + */ +static int +__rep_getnext(dbenv) + DB_ENV *dbenv; +{ + DB *dbp; + DB_REP *db_rep; + DB_LOG *dblp; + DBC *dbc; + DBT lsn_dbt, nextrec_dbt; + LOG *lp; + REP_CONTROL *rp; + int ret, t_ret; + + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + db_rep = dbenv->rep_handle; + dbp = db_rep->rep_db; + + if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) + return (ret); + + /* + * Update waiting_lsn. We need to move it + * forward to the LSN of the next record + * in the queue. + * + * If the next item in the database is a log + * record--the common case--we're not + * interested in its contents, just in its LSN. + * Optimize by doing a partial get of the data item. + */ + memset(&nextrec_dbt, 0, sizeof(nextrec_dbt)); + F_SET(&nextrec_dbt, DB_DBT_PARTIAL); + nextrec_dbt.ulen = nextrec_dbt.dlen = 0; + + memset(&lsn_dbt, 0, sizeof(lsn_dbt)); + ret = __db_c_get(dbc, &lsn_dbt, &nextrec_dbt, DB_FIRST); + if (ret != DB_NOTFOUND && ret != 0) + goto err; + + if (ret == DB_NOTFOUND) { + ZERO_LSN(lp->waiting_lsn); + /* + * Whether or not the current record is + * simple, there's no next one, and + * therefore we haven't got anything + * else to do right now. Break out. + */ + goto err; + } + rp = (REP_CONTROL *)lsn_dbt.data; + lp->waiting_lsn = rp->lsn; + +err: if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + return (ret); +} + +/* + * __rep_process_rec -- + * + * Given a record in 'rp', process it. In the case of a NEWFILE, that means + * potentially switching files. In the case of a checkpoint, it means doing + * the checkpoint, and in other cases, it means simply writing the record into + * the log. + */ +static int +__rep_process_rec(dbenv, rp, rec, typep, ret_lsnp) + DB_ENV *dbenv; + REP_CONTROL *rp; + DBT *rec; + u_int32_t *typep; + DB_LSN *ret_lsnp; +{ + DB *dbp; + DB_LOG *dblp; + DB_REP *db_rep; + DBT control_dbt, key_dbt, rec_dbt; + LOG *lp; + REP *rep; + u_int32_t txnid; + int ret, t_ret; + + db_rep = dbenv->rep_handle; + rep = db_rep->region; + dbp = db_rep->rep_db; + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + ret = 0; + + if (rp->rectype == REP_NEWFILE) { + ret = __rep_newfile(dbenv, rp, &lp->ready_lsn); + + /* Make this evaluate to a simple rectype. */ + *typep = 0; + return (0); + } + + memcpy(typep, rec->data, sizeof(*typep)); + + /* + * We write all records except for checkpoint records here. + * All non-checkpoint records need to appear in the log before + * we take action upon them (i.e., we enforce write-ahead logging). + * However, we can't write the checkpoint record here until the + * data buffers are actually written to disk, else we are creating + * an invalid log -- one that says all data before a certain point + * has been written to disk. + * + * If two threads are both processing the same checkpoint record + * (because, for example, it was resent and the original finally + * arrived), we handle that below by checking for the existence of + * the log record when we add it to the replication database. + * + * Any log records that arrive while we are processing the checkpoint + * are added to the bookkeeping database because ready_lsn is not yet + * updated to point after the checkpoint record. + */ + if (*typep != DB___txn_ckp || F_ISSET(rep, REP_F_RECOVER_LOG)) { + if ((ret = __log_rep_put(dbenv, &rp->lsn, rec)) != 0) + return (ret); + rep->stat.st_log_records++; + if (F_ISSET(rep, REP_F_RECOVER_LOG)) { + *ret_lsnp = rp->lsn; + goto out; + } + } + + switch (*typep) { + case DB___dbreg_register: + /* + * DB opens occur in the context of a transaction, so we can + * simply handle them when we process the transaction. Closes, + * however, are not transaction-protected, so we have to + * handle them here. + * + * Note that it should be unsafe for the master to do a close + * of a file that was opened in an active transaction, so we + * should be guaranteed to get the ordering right. + */ + memcpy(&txnid, (u_int8_t *)rec->data + + SSZ(__dbreg_register_args, txnid), sizeof(u_int32_t)); + if (txnid == TXN_INVALID) + ret = __db_dispatch(dbenv, dbenv->recover_dtab, + dbenv->recover_dtab_size, rec, &rp->lsn, + DB_TXN_APPLY, NULL); + break; + case DB___txn_regop: + /* + * If an application is doing app-specific recovery + * and acquires locks while applying a transaction, + * it can deadlock. Any other locks held by this + * thread should have been discarded in the + * __rep_process_txn error path, so if we simply + * retry, we should eventually succeed. + */ + do { + ret = 0; + if (!F_ISSET(db_rep, DBREP_OPENFILES)) { + ret = __txn_openfiles(dbenv, NULL, 1); + F_SET(db_rep, DBREP_OPENFILES); + } + if (ret == 0) + ret = __rep_process_txn(dbenv, rec); + } while (ret == DB_LOCK_DEADLOCK); + + /* Now flush the log unless we're running TXN_NOSYNC. */ + if (ret == 0 && !F_ISSET(dbenv, DB_ENV_TXN_NOSYNC)) + ret = __log_flush(dbenv, NULL); + if (ret != 0) { + __db_err(dbenv, "Error processing txn [%lu][%lu]", + (u_long)rp->lsn.file, (u_long)rp->lsn.offset); + ret = __db_panic(dbenv, ret); + } + break; + case DB___txn_xa_regop: + ret = __log_flush(dbenv, NULL); + break; + case DB___txn_ckp: + /* + * We do not want to hold the db_rep->db_mutexp + * mutex while syncing the mpool, so if we get + * a checkpoint record that we are supposed to + * process, we add it to the __db.rep.db, do + * the memp_sync and then go back and process + * it later, when the sync has finished. If + * this record is already in the table, then + * some other thread will process it, so simply + * return REP_NOTPERM; + */ + memset(&key_dbt, 0, sizeof(key_dbt)); + key_dbt.data = rp; + key_dbt.size = sizeof(*rp); + + /* + * We want to put this record into the tmp DB only if + * it doesn't exist, so use DB_NOOVERWRITE. + */ + ret = __db_put(dbp, NULL, &key_dbt, rec, DB_NOOVERWRITE); + if (ret == DB_KEYEXIST) { + if (ret_lsnp != NULL) + *ret_lsnp = rp->lsn; + ret = DB_REP_NOTPERM; + } + if (ret != 0) + break; + + /* + * Now, do the checkpoint. Regardless of + * whether the checkpoint succeeds or not, + * we need to remove the record we just put + * in the temporary database. If the + * checkpoint failed, return an error. We + * will act like we never received the + * checkpoint. + */ + if ((ret = __rep_do_ckp(dbenv, rec, rp)) == 0) + ret = __log_rep_put(dbenv, &rp->lsn, rec); + if ((t_ret = __rep_remfirst(dbenv, + &control_dbt, &rec_dbt)) != 0 && ret == 0) + ret = t_ret; + break; + default: + break; + } + +out: + if (ret == 0 && F_ISSET(rp, DB_LOG_PERM)) + *ret_lsnp = rp->lsn; + + return (ret); +} + +/* + * __rep_resend_req -- + * We might have dropped a message, we need to resend our request. + * The request we send is dependent on what recovery state we're in. + * The caller holds no locks. + */ +static int +__rep_resend_req(dbenv, eid) + DB_ENV *dbenv; + int eid; +{ + + DB_LOG *dblp; + DB_LSN lsn; + DB_REP *db_rep; + LOG *lp; + REP *rep; + int ret; + u_int32_t repflags; + + ret = 0; + db_rep = dbenv->rep_handle; + rep = db_rep->region; + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + repflags = rep->flags; + if (FLD_ISSET(repflags, REP_F_RECOVER_VERIFY)) { + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + lsn = lp->verify_lsn; + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + if (!IS_ZERO_LSN(lsn)) + (void)__rep_send_message(dbenv, eid, + REP_VERIFY_REQ, &lsn, NULL, 0); + goto out; + } else if (FLD_ISSET(repflags, REP_F_RECOVER_UPDATE)) { + (void)__rep_send_message(dbenv, eid, + REP_UPDATE_REQ, NULL, NULL, 0); + } else if (FLD_ISSET(repflags, REP_F_RECOVER_PAGE)) { + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + ret = __rep_pggap_req(dbenv, rep, NULL, 0); + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + } else if (FLD_ISSET(repflags, REP_F_RECOVER_LOG)) { + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + __rep_loggap_req(dbenv, rep, NULL, 0); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + } + +out: + return (ret); +} + +/* + * __rep_check_doreq -- + * PUBLIC: int __rep_check_doreq __P((DB_ENV *, REP *)); + * + * Check if we need to send another request. If so, compare with + * the request limits the user might have set. This assumes the + * caller holds the db_rep->db_mutexp mutex. Returns 1 if a request + * needs to be made, and 0 if it does not. + */ +int +__rep_check_doreq(dbenv, rep) + DB_ENV *dbenv; + REP *rep; +{ + + DB_LOG *dblp; + LOG *lp; + int req; + + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + req = ++lp->rcvd_recs >= lp->wait_recs; + if (req) { + lp->wait_recs *= 2; + if (lp->wait_recs > rep->max_gap) + lp->wait_recs = rep->max_gap; + lp->rcvd_recs = 0; + } + return (req); +} + +/* + * __rep_lockout -- + * PUBLIC: void __rep_lockout __P((DB_ENV *, DB_REP *, REP *)); + * + * Coordinate with other threads in the library and active txns so + * that we can run single-threaded, for recovery or internal backup. + * Assumes the caller holds rep_mutexp. + */ +void +__rep_lockout(dbenv, db_rep, rep) + DB_ENV *dbenv; + DB_REP *db_rep; + REP *rep; +{ + int wait_cnt; + + /* Phase 1: set REP_F_READY and wait for op_cnt to go to 0. */ + F_SET(rep, REP_F_READY); + for (wait_cnt = 0; rep->op_cnt != 0;) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + __os_sleep(dbenv, 1, 0); +#ifdef DIAGNOSTIC + if (++wait_cnt % 60 == 0) + __db_err(dbenv, + "Waiting for txn_cnt to run replication recovery/backup for %d minutes", + wait_cnt / 60); +#endif + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + } + + /* + * Phase 2: set in_recovery and wait for handle count to go + * to 0 and for the number of threads in __rep_process_message + * to go to 1 (us). + */ + rep->in_recovery = 1; + for (wait_cnt = 0; rep->handle_cnt != 0 || rep->msg_th > 1;) { + MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + __os_sleep(dbenv, 1, 0); +#ifdef DIAGNOSTIC + if (++wait_cnt % 60 == 0) + __db_err(dbenv, +"Waiting for handle count to run replication recovery/backup for %d minutes", + wait_cnt / 60); +#endif + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + } +} diff --git a/db/rep/rep_region.c b/db/rep/rep_region.c index 10a001b4b..06dbc9cd8 100644 --- a/db/rep/rep_region.c +++ b/db/rep/rep_region.c @@ -1,14 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: rep_region.c,v 1.52 2004/10/07 17:20:12 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: rep_region.c,v 1.42 2003/09/04 18:06:49 bostic Exp $"; -#endif /* not lint */ +#include "db_config.h" #ifndef NO_SYSTEM_INCLUDES #if TIME_WITH_SYS_TIME @@ -30,6 +29,8 @@ static const char revid[] = "$Id: rep_region.c,v 1.42 2003/09/04 18:06:49 bostic #include "dbinc/log.h" #include "dbinc/db_am.h" +static int __rep_egen_init __P((DB_ENV *, REP *)); + /* * __rep_region_init -- * Initialize the shared memory state for the replication system. @@ -55,13 +56,13 @@ __rep_region_init(dbenv) MUTEX_LOCK(dbenv, &renv->mutex); if (renv->rep_off == INVALID_ROFF) { /* Must create the region. */ - if ((ret = __db_shalloc(infop->addr, - sizeof(REP), MUTEX_ALIGN, &rep)) != 0) + if ((ret = + __db_shalloc(infop, sizeof(REP), MUTEX_ALIGN, &rep)) != 0) goto err; memset(rep, 0, sizeof(*rep)); rep->tally_off = INVALID_ROFF; rep->v2tally_off = INVALID_ROFF; - renv->rep_off = R_OFFSET(infop, rep); + renv->rep_off = R_OFFSET(dbenv, infop, rep); if ((ret = __db_mutex_setup(dbenv, infop, &rep->mutex, MUTEX_NO_RECORD)) != 0) @@ -73,10 +74,10 @@ __rep_region_init(dbenv) * to guarantee that is to make sure they're at the beginning * of a shalloc'ed chunk. */ - if ((ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX), + if ((ret = __db_shalloc(infop, sizeof(DB_MUTEX), MUTEX_ALIGN, &db_mutexp)) != 0) goto err; - rep->db_mutex_off = R_OFFSET(infop, db_mutexp); + rep->db_mutex_off = R_OFFSET(dbenv, infop, db_mutexp); /* * Because we have no way to prevent deadlocks and cannot log @@ -93,8 +94,8 @@ __rep_region_init(dbenv) rep->eid = DB_EID_INVALID; rep->master_id = DB_EID_INVALID; rep->gen = 0; - rep->egen = rep->gen + 1; - + if ((ret = __rep_egen_init(dbenv, rep)) != 0) + goto err; /* * Set default values for the min and max log records that we * wait before requesting a missing log record. @@ -102,13 +103,13 @@ __rep_region_init(dbenv) rep->request_gap = DB_REP_REQUEST_GAP; rep->max_gap = DB_REP_MAX_GAP; F_SET(rep, REP_F_NOARCHIVE); - (void)time(&rep->timestamp); + (void)time(&renv->rep_timestamp); } else - rep = R_ADDR(infop, renv->rep_off); + rep = R_ADDR(dbenv, infop, renv->rep_off); MUTEX_UNLOCK(dbenv, &renv->mutex); db_rep->rep_mutexp = &rep->mutex; - db_rep->db_mutexp = R_ADDR(infop, rep->db_mutex_off); + db_rep->db_mutexp = R_ADDR(dbenv, infop, rep->db_mutex_off); db_rep->region = rep; return (0); @@ -190,26 +191,101 @@ __rep_preclose(dbenv, do_closefiles) DB_ENV *dbenv; int do_closefiles; { - DB *dbp; DB_REP *db_rep; int ret, t_ret; - ret = t_ret = 0; - - /* If replication is not initialized, we have nothing to do. */ - if (!REP_ON(dbenv)) - return (0); + ret = 0; db_rep = dbenv->rep_handle; - if ((dbp = db_rep->rep_db) != NULL) { - MUTEX_LOCK(dbenv, db_rep->db_mutexp); - ret = __db_close(dbp, NULL, DB_NOSYNC); + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + if (db_rep->rep_db != NULL) { + ret = __db_close(db_rep->rep_db, NULL, DB_NOSYNC); db_rep->rep_db = NULL; - MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); } - if (do_closefiles) - t_ret = __dbreg_close_files(dbenv); + if (do_closefiles) { + if ((t_ret = __dbreg_close_files(dbenv)) != 0 && ret == 0) + ret = t_ret; + F_CLR(db_rep, DBREP_OPENFILES); + } + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + return (ret); +} - return (ret == 0 ? t_ret : ret); +/* + * __rep_egen_init -- + * Initialize the value of egen in the region. Called + * only from __rep_region_init, which is guaranteed to be + * single-threaded as we create the rep region. We set the + * rep->egen field which is normally protected by db_rep->rep_mutex. + */ +static int +__rep_egen_init(dbenv, rep) + DB_ENV *dbenv; + REP *rep; +{ + DB_FH *fhp; + int ret; + size_t cnt; + char *p; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif + + if ((ret = + __db_appname(dbenv, DB_APP_NONE, REP_EGENNAME, 0, NULL, &p)) != 0) + return (ret); + /* + * If the file doesn't exist, create it now and initialize with 1. + */ + if (__os_exists(p, NULL) != 0) { + rep->egen = rep->gen + 1; + if ((ret = __rep_write_egen(dbenv, rep->egen)) != 0) + goto err; + } else { + /* + * File exists, open it and read in our egen. + */ + if ((ret = __os_open(dbenv, p, DB_OSO_RDONLY, + __db_omode("rw----"), &fhp)) != 0) + goto err; + if ((ret = __os_read(dbenv, fhp, &rep->egen, sizeof(u_int32_t), + &cnt)) < 0 || cnt == 0) + goto err1; + RPRINT(dbenv, rep, (dbenv, &mb, "Read in egen %lu", + (u_long)rep->egen)); +err1: (void)__os_closehandle(dbenv, fhp); + } +err: __os_free(dbenv, p); + return (ret); +} + +/* + * __rep_write_egen -- + * Write out the egen into the env file. + * + * PUBLIC: int __rep_write_egen __P((DB_ENV *, u_int32_t)); + */ +int +__rep_write_egen(dbenv, egen) + DB_ENV *dbenv; + u_int32_t egen; +{ + DB_FH *fhp; + int ret; + size_t cnt; + char *p; + + if ((ret = + __db_appname(dbenv, DB_APP_NONE, REP_EGENNAME, 0, NULL, &p)) != 0) + return (ret); + if ((ret = __os_open(dbenv, p, DB_OSO_CREATE | DB_OSO_TRUNC, + __db_omode("rw----"), &fhp)) == 0) { + if ((ret = __os_write(dbenv, fhp, &egen, sizeof(u_int32_t), + &cnt)) != 0 || ((ret = __os_fsync(dbenv, fhp)) != 0)) + __db_err(dbenv, "%s: %s", p, db_strerror(ret)); + (void)__os_closehandle(dbenv, fhp); + } + __os_free(dbenv, p); + return (ret); } diff --git a/db/rep/rep_stat.c b/db/rep/rep_stat.c new file mode 100644 index 000000000..d740c2b40 --- /dev/null +++ b/db/rep/rep_stat.c @@ -0,0 +1,491 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: rep_stat.c,v 1.155 2004/09/29 15:36:38 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" +#include "dbinc/log.h" + +#ifdef HAVE_STATISTICS +static int __rep_print_all __P((DB_ENV *, u_int32_t)); +static int __rep_print_stats __P((DB_ENV *, u_int32_t)); +static int __rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); + +/* + * __rep_stat_pp -- + * DB_ENV->rep_stat pre/post processing. + * + * PUBLIC: int __rep_stat_pp __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); + */ +int +__rep_stat_pp(dbenv, statp, flags) + DB_ENV *dbenv; + DB_REP_STAT **statp; + u_int32_t flags; +{ + int ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->rep_handle, "DB_ENV->rep_stat", DB_INIT_REP); + + if ((ret = __db_fchk(dbenv, + "DB_ENV->rep_stat", flags, DB_STAT_CLEAR)) != 0) + return (ret); + + return (__rep_stat(dbenv, statp, flags)); +} + +/* + * __rep_stat -- + * DB_ENV->rep_stat. + */ +static int +__rep_stat(dbenv, statp, flags) + DB_ENV *dbenv; + DB_REP_STAT **statp; + u_int32_t flags; +{ + DB_LOG *dblp; + DB_REP *db_rep; + DB_REP_STAT *stats; + LOG *lp; + REP *rep; + u_int32_t queued; + int dolock, ret; + + db_rep = dbenv->rep_handle; + rep = db_rep->region; + dblp = dbenv->lg_handle; + lp = dblp->reginfo.primary; + + *statp = NULL; + + /* Allocate a stat struct to return to the user. */ + if ((ret = __os_umalloc(dbenv, sizeof(DB_REP_STAT), &stats)) != 0) + return (ret); + + /* + * Read without holding the lock. If we are in client recovery, we + * copy just the stats struct so we won't block. We only copy out + * those stats that don't require acquiring any mutex. + */ + dolock = FLD_ISSET(rep->flags, REP_F_RECOVER_MASK) ? 0 : 1; + memcpy(stats, &rep->stat, sizeof(*stats)); + + /* Copy out election stats. */ + if (IN_ELECTION_TALLY(rep)) { + if (F_ISSET(rep, REP_F_EPHASE1)) + stats->st_election_status = 1; + else if (F_ISSET(rep, REP_F_EPHASE2)) + stats->st_election_status = 2; + + stats->st_election_nsites = rep->sites; + stats->st_election_cur_winner = rep->winner; + stats->st_election_priority = rep->w_priority; + stats->st_election_gen = rep->w_gen; + stats->st_election_lsn = rep->w_lsn; + stats->st_election_votes = rep->votes; + stats->st_election_tiebreaker = rep->w_tiebreaker; + } + + /* Copy out other info that's protected by the rep mutex. */ + stats->st_env_id = rep->eid; + stats->st_env_priority = rep->priority; + stats->st_nsites = rep->nsites; + stats->st_master = rep->master_id; + stats->st_gen = rep->gen; + stats->st_egen = rep->egen; + + if (F_ISSET(rep, REP_F_MASTER)) + stats->st_status = DB_REP_MASTER; + else if (F_ISSET(rep, REP_F_CLIENT)) + stats->st_status = DB_REP_CLIENT; + else + stats->st_status = 0; + + if (LF_ISSET(DB_STAT_CLEAR)) { + queued = rep->stat.st_log_queued; + memset(&rep->stat, 0, sizeof(rep->stat)); + rep->stat.st_log_queued = rep->stat.st_log_queued_total = + rep->stat.st_log_queued_max = queued; + } + + /* + * Log-related replication info is stored in the log system and + * protected by the log region lock. + */ + if (dolock) + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + if (F_ISSET(rep, REP_F_CLIENT)) { + stats->st_next_lsn = lp->ready_lsn; + stats->st_waiting_lsn = lp->waiting_lsn; + stats->st_next_pg = rep->ready_pg; + stats->st_waiting_pg = rep->waiting_pg; + } else { + if (F_ISSET(rep, REP_F_MASTER)) + stats->st_next_lsn = lp->lsn; + else + ZERO_LSN(stats->st_next_lsn); + ZERO_LSN(stats->st_waiting_lsn); + } + if (dolock) + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + + *statp = stats; + return (0); +} + +/* + * __rep_stat_print_pp -- + * DB_ENV->rep_stat_print pre/post processing. + * + * PUBLIC: int __rep_stat_print_pp __P((DB_ENV *, u_int32_t)); + */ +int +__rep_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + int ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->rep_handle, "DB_ENV->rep_stat_print", DB_INIT_REP); + + if ((ret = __db_fchk(dbenv, "DB_ENV->rep_stat_print", + flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0) + return (ret); + + return (__rep_stat_print(dbenv, flags)); +} + +/* + * __rep_stat_print -- + * DB_ENV->rep_stat_print method. + * + * PUBLIC: int __rep_stat_print __P((DB_ENV *, u_int32_t)); + */ +int +__rep_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + u_int32_t orig_flags; + int ret; + + orig_flags = flags; + LF_CLR(DB_STAT_CLEAR); + if (flags == 0 || LF_ISSET(DB_STAT_ALL)) { + ret = __rep_print_stats(dbenv, orig_flags); + if (flags == 0 || ret != 0) + return (ret); + } + + if (LF_ISSET(DB_STAT_ALL) && + (ret = __rep_print_all(dbenv, orig_flags)) != 0) + return (ret); + + return (0); +} + +/* + * __rep_print_stats -- + * Print out default statistics. + */ +static int +__rep_print_stats(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + DB_REP_STAT *sp; + int is_client, ret; + char *p; + + if ((ret = __rep_stat(dbenv, &sp, flags)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) + __db_msg(dbenv, "Default replication region information:"); + is_client = 0; + switch (sp->st_status) { + case DB_REP_MASTER: + __db_msg(dbenv, + "Environment configured as a replication master"); + break; + case DB_REP_CLIENT: + __db_msg(dbenv, + "Environment configured as a replication client"); + is_client = 1; + break; + default: + __db_msg(dbenv, + "Environment not configured for replication"); + break; + } + + __db_msg(dbenv, "%lu/%lu\t%s", + (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset, + is_client ? "Next LSN expected" : "Next LSN to be used"); + __db_msg(dbenv, "%lu/%lu\t%s", + (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset, + sp->st_waiting_lsn.file == 0 ? + "Not waiting for any missed log records" : + "LSN of first log record we have after missed log records"); + + __db_dl(dbenv, "Next page number expected.", (u_long)sp->st_next_pg); + p = sp->st_waiting_pg == PGNO_INVALID ? + "Not waiting for any missed pages." : + "Page number of first page we have after missed pages."; + __db_msg(dbenv, "%lu\t%s", (u_long)sp->st_waiting_pg, p); + __db_dl(dbenv, "Number of duplicate master conditions detected.", + (u_long)sp->st_dupmasters); + if (sp->st_env_id != DB_EID_INVALID) + __db_dl(dbenv, "Current environment ID", (u_long)sp->st_env_id); + else + __db_msg(dbenv, "No current environment ID"); + __db_dl(dbenv, + "Current environment priority", (u_long)sp->st_env_priority); + __db_dl(dbenv, "Current generation number", (u_long)sp->st_gen); + __db_dl(dbenv, + "Current election generation number", (u_long)sp->st_egen); + __db_dl(dbenv, "Number of duplicate log records received", + (u_long)sp->st_log_duplicated); + __db_dl(dbenv, "Number of log records currently queued", + (u_long)sp->st_log_queued); + __db_dl(dbenv, "Maximum number of log records ever queued at once", + (u_long)sp->st_log_queued_max); + __db_dl(dbenv, "Total number of log records queued", + (u_long)sp->st_log_queued_total); + __db_dl(dbenv, + "Number of log records received and appended to the log", + (u_long)sp->st_log_records); + __db_dl(dbenv, "Number of log records missed and requested", + (u_long)sp->st_log_requested); + if (sp->st_master != DB_EID_INVALID) + __db_dl(dbenv, "Current master ID", (u_long)sp->st_master); + else + __db_msg(dbenv, "No current master ID"); + __db_dl(dbenv, "Number of times the master has changed", + (u_long)sp->st_master_changes); + __db_dl(dbenv, + "Number of messages received with a bad generation number", + (u_long)sp->st_msgs_badgen); + __db_dl(dbenv, "Number of messages received and processed", + (u_long)sp->st_msgs_processed); + __db_dl(dbenv, "Number of messages ignored due to pending recovery", + (u_long)sp->st_msgs_recover); + __db_dl(dbenv, "Number of failed message sends", + (u_long)sp->st_msgs_send_failures); + __db_dl(dbenv, "Number of messages sent", (u_long)sp->st_msgs_sent); + __db_dl(dbenv, + "Number of new site messages received", (u_long)sp->st_newsites); + __db_dl(dbenv, + "Number of environments believed to be in the replication group", + (u_long)sp->st_nsites); + __db_dl(dbenv, "Transmission limited", (u_long)sp->st_nthrottles); + __db_dl(dbenv, "Number of outdated conditions detected", + (u_long)sp->st_outdated); + __db_dl(dbenv, "Number of duplicate page records received", + (u_long)sp->st_pg_duplicated); + __db_dl(dbenv, "Number of page records received and added to databases", + (u_long)sp->st_pg_records); + __db_dl(dbenv, "Number of page records missed and requested", + (u_long)sp->st_pg_requested); + if (sp->st_startup_complete == 0) + __db_msg(dbenv, "Startup incomplete"); + else + __db_msg(dbenv, "Startup complete"); + __db_dl(dbenv, + "Number of transactions applied", (u_long)sp->st_txns_applied); + + __db_dl(dbenv, "Number of elections held", (u_long)sp->st_elections); + __db_dl(dbenv, + "Number of elections won", (u_long)sp->st_elections_won); + + if (sp->st_election_status == 0) + __db_msg(dbenv, "No election in progress"); + else { + __db_dl(dbenv, "Current election phase", + (u_long)sp->st_election_status); + __db_dl(dbenv, "Election winner", + (u_long)sp->st_election_cur_winner); + __db_dl(dbenv, "Election generation number", + (u_long)sp->st_election_gen); + __db_msg(dbenv, "%lu/%lu\tMaximum LSN of election winner", + (u_long)sp->st_election_lsn.file, + (u_long)sp->st_election_lsn.offset); + __db_dl(dbenv, + "Number of sites expected to participate in elections", + (u_long)sp->st_election_nsites); + __db_dl(dbenv, "Number of votes needed to win an election", + (u_long)sp->st_election_nvotes); + __db_dl(dbenv, + "Election priority", (u_long)sp->st_election_priority); + __db_dl(dbenv, "Election tiebreaker value", + (u_long)sp->st_election_tiebreaker); + __db_dl(dbenv, "Votes received this election round", + (u_long)sp->st_election_votes); + } + + __os_ufree(dbenv, sp); + + return (0); +} + +/* + * __rep_print_all -- + * Display debugging replication region statistics. + */ +static int +__rep_print_all(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + static const FN rep_fn[] = { + { REP_F_CLIENT, "REP_F_CLIENT" }, + { REP_F_EPHASE1, "REP_F_EPHASE1" }, + { REP_F_EPHASE2, "REP_F_EPHASE2" }, + { REP_F_MASTER, "REP_F_MASTER" }, + { REP_F_MASTERELECT, "REP_F_MASTERELECT" }, + { REP_F_NOARCHIVE, "REP_F_NOARCHIVE" }, + { REP_F_READY, "REP_F_READY" }, + { REP_F_RECOVER_LOG, "REP_F_RECOVER_LOG" }, + { REP_F_RECOVER_PAGE, "REP_F_RECOVER_PAGE" }, + { REP_F_RECOVER_UPDATE, "REP_F_RECOVER_UPDATE" }, + { REP_F_RECOVER_VERIFY, "REP_F_RECOVER_VERIFY" }, + { REP_F_TALLY, "REP_F_TALLY" }, + { 0, NULL } + }; + static const FN dbrep_fn[] = { + { DBREP_OPENFILES, "DBREP_OPENFILES" }, + { 0, NULL } + }; + DB_LOG *dblp; + DB_REP *db_rep; + LOG *lp; + REGENV *renv; + REGINFO *infop; + REP *rep; + + db_rep = dbenv->rep_handle; + rep = db_rep->region; + infop = dbenv->reginfo; + renv = infop->primary; + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB_REP handle information:"); + __db_print_mutex(dbenv, NULL, + db_rep->rep_mutexp, "Replication region mutex", flags); + __db_print_mutex(dbenv, NULL, + db_rep->db_mutexp, "Bookkeeping database mutex", flags); + + if (db_rep->rep_db == NULL) + STAT_ISSET("Bookkeeping database", db_rep->rep_db); + else + (void)__db_stat_print(db_rep->rep_db, flags); + + __db_prflags(dbenv, NULL, db_rep->flags, dbrep_fn, NULL, "\tFlags"); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "REP handle information:"); + __db_print_mutex(dbenv, NULL, &rep->mutex, "REP mutex", flags); + + STAT_LONG("Environment ID", rep->eid); + STAT_LONG("Master environment ID", rep->master_id); + STAT_ULONG("Election generation", rep->egen); + STAT_ULONG("Election generation number", rep->gen); + STAT_ULONG("Last generation number in log", rep->recover_gen); + STAT_LONG("Space allocated for sites", rep->asites); + STAT_LONG("Sites in group", rep->nsites); + STAT_LONG("Votes needed for election", rep->nvotes); + STAT_LONG("Priority in election", rep->priority); + __db_dlbytes(dbenv, "Limit on data sent in a single call", + rep->gbytes, (u_long)0, rep->bytes); + STAT_ULONG("Request gap", rep->request_gap); + STAT_ULONG("Maximum gap", rep->max_gap); + + STAT_LONG("Thread is in rep_elect", rep->elect_th); + STAT_ULONG("Callers in rep_proc_msg", rep->msg_th); + STAT_LONG("Thread is in rep_start", rep->start_th); + STAT_ULONG("Library handle count", rep->handle_cnt); + STAT_ULONG("Multi-step operation count", rep->op_cnt); + STAT_LONG("Running recovery", rep->in_recovery); + __db_msg(dbenv, "%.24s\tRecovery timestamp", + renv->rep_timestamp == 0 ? "0" : ctime(&renv->rep_timestamp)); + + STAT_LONG("Sites heard from", rep->sites); + STAT_LONG("Current winner", rep->winner); + STAT_LONG("Winner priority", rep->w_priority); + STAT_ULONG("Winner generation", rep->w_gen); + STAT_LSN("Winner LSN", &rep->w_lsn); + STAT_LONG("Winner tiebreaker", rep->w_tiebreaker); + STAT_LONG("Votes for this site", rep->votes); + + __db_prflags(dbenv, NULL, rep->flags, rep_fn, NULL, "\tFlags"); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "LOG replication information:"); + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + dblp = dbenv->lg_handle; + lp = (LOG *)dblp->reginfo.primary; + STAT_LSN("First log record after a gap", &lp->waiting_lsn); + STAT_LSN("LSN waiting to verify", &lp->verify_lsn); + STAT_LSN("Maximum LSN requested", &lp->max_wait_lsn); + STAT_ULONG("Records to wait before requesting", lp->wait_recs); + STAT_ULONG("Records received while waiting", lp->rcvd_recs); + STAT_LSN("Next LSN expected", &lp->ready_lsn); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + + return (0); +} + +#else /* !HAVE_STATISTICS */ + +int +__rep_stat_pp(dbenv, statp, flags) + DB_ENV *dbenv; + DB_REP_STAT **statp; + u_int32_t flags; +{ + COMPQUIET(statp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} + +int +__rep_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} +#endif diff --git a/db/rep/rep_stub.c b/db/rep/rep_stub.c index 664e4c2c3..c2851915d 100644 --- a/db/rep/rep_stub.c +++ b/db/rep/rep_stub.c @@ -1,15 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: rep_stub.c,v 1.22 2004/09/29 15:36:04 bostic Exp $ */ -#include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: rep_stub.c,v 1.9 2003/11/14 05:32:31 ubell Exp $"; -#endif /* not lint */ +#include "db_config.h" +#ifndef HAVE_REPLICATION #ifndef NO_SYSTEM_INCLUDES #include #endif @@ -24,16 +24,15 @@ static const char revid[] = "$Id: rep_stub.c,v 1.9 2003/11/14 05:32:31 ubell Exp */ static int __db_norep __P((DB_ENV *)); -static int __rep_elect __P((DB_ENV *, int, int, u_int32_t, int *)); +static int __rep_elect + __P((DB_ENV *, int, int, int, u_int32_t, int *, u_int32_t)); static int __rep_flush __P((DB_ENV *)); static int __rep_start __P((DB_ENV *, DBT *, u_int32_t)); -static int __rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); static int __rep_get_limit __P((DB_ENV *, u_int32_t *, u_int32_t *)); static int __rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t)); static int __rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t)); -static int __rep_set_rep_transport __P((DB_ENV *, int, - int (*)(DB_ENV *, const DBT *, const DBT *, const DB_LSN *, - int, u_int32_t))); +static int __rep_set_rep_transport __P((DB_ENV *, int, int (*) + (DB_ENV *, const DBT *, const DBT *, const DB_LSN *, int, u_int32_t))); /* * __db_norep -- @@ -49,34 +48,16 @@ __db_norep(dbenv) } int -__db_default_getpgnos(dbenv, lsnp, summary) - DB_ENV *dbenv; - DB_LSN *lsnp; - void *summary; -{ - COMPQUIET(lsnp, NULL); - COMPQUIET(summary, NULL); - return (__db_norep(dbenv)); -} - -int -__db_rep_enter(dbp, checkgen, doreturn) +__db_rep_enter(dbp, checkgen, checklock, return_now) DB *dbp; - int checkgen, doreturn; + int checkgen, checklock, return_now; { COMPQUIET(checkgen, 0); - COMPQUIET(doreturn, 0); + COMPQUIET(checklock, 0); + COMPQUIET(return_now, 0); return (__db_norep(dbp->dbenv)); } -void -__db_rep_exit(dbenv) - DB_ENV *dbenv; -{ - COMPQUIET(dbenv, NULL); - return; -} - void __env_rep_enter(dbenv) DB_ENV *dbenv; @@ -86,7 +67,7 @@ __env_rep_enter(dbenv) } void -__env_rep_exit(dbenv) +__env_db_rep_exit(dbenv) DB_ENV *dbenv; { COMPQUIET(dbenv, NULL); @@ -97,6 +78,7 @@ void __op_rep_enter(dbenv) DB_ENV *dbenv; { + COMPQUIET(dbenv, NULL); return; } @@ -104,6 +86,7 @@ void __op_rep_exit(dbenv) DB_ENV *dbenv; { + COMPQUIET(dbenv, NULL); return; } @@ -115,7 +98,7 @@ __rep_dbenv_close(dbenv) return (0); } -int +void __rep_dbenv_create(dbenv) DB_ENV *dbenv; { @@ -123,12 +106,12 @@ __rep_dbenv_create(dbenv) dbenv->rep_flush = __rep_flush; dbenv->rep_process_message = __rep_process_message; dbenv->rep_start = __rep_start; - dbenv->rep_stat = __rep_stat; + dbenv->rep_stat = __rep_stat_pp; + dbenv->rep_stat_print = __rep_stat_print_pp; dbenv->get_rep_limit = __rep_get_limit; dbenv->set_rep_limit = __rep_set_limit; dbenv->set_rep_request = __rep_set_request; dbenv->set_rep_transport = __rep_set_rep_transport; - return (0); } void @@ -140,16 +123,18 @@ __rep_dbenv_refresh(dbenv) } static int -__rep_elect(dbenv, nsites, priority, timeout, eidp) +__rep_elect(dbenv, nsites, nvotes, priority, timeout, eidp, flags) DB_ENV *dbenv; - int nsites, priority; - u_int32_t timeout; + int nsites, nvotes, priority; + u_int32_t timeout, flags; int *eidp; { COMPQUIET(nsites, 0); + COMPQUIET(nvotes, 0); COMPQUIET(priority, 0); COMPQUIET(timeout, 0); COMPQUIET(eidp, NULL); + COMPQUIET(flags, 0); return (__db_norep(dbenv)); } @@ -303,8 +288,8 @@ __rep_start(dbenv, dbt, flags) return (__db_norep(dbenv)); } -static int -__rep_stat(dbenv, statp, flags) +int +__rep_stat_pp(dbenv, statp, flags) DB_ENV *dbenv; DB_REP_STAT **statp; u_int32_t flags; @@ -314,18 +299,21 @@ __rep_stat(dbenv, statp, flags) return (__db_norep(dbenv)); } -void -__txn_rep_enter(dbenv) +int +__rep_stat_print_pp(dbenv, flags) DB_ENV *dbenv; + u_int32_t flags; { - COMPQUIET(dbenv, NULL); - return; + COMPQUIET(flags, 0); + return (__db_norep(dbenv)); } -void -__txn_rep_exit(dbenv) +int +__rep_stat_print(dbenv, flags) DB_ENV *dbenv; + u_int32_t flags; { - COMPQUIET(dbenv, NULL); - return; + COMPQUIET(flags, 0); + return (__db_norep(dbenv)); } +#endif /* !HAVE_REPLICATION */ diff --git a/db/rep/rep_util.c b/db/rep/rep_util.c index 678edbcee..d23a6aba3 100644 --- a/db/rep/rep_util.c +++ b/db/rep/rep_util.c @@ -1,30 +1,31 @@ /*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2003 + * See the file LICENSE for redistribution information. * + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: rep_util.c,v 1.134 2004/10/12 15:42:43 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: rep_util.c,v 1.103 2003/11/14 05:32:32 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + #include #include #endif #include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/fop.h" -#include "dbinc/hash.h" #include "dbinc/log.h" -#include "dbinc/lock.h" -#include "dbinc/qam.h" #include "dbinc/txn.h" /* @@ -33,39 +34,21 @@ static const char revid[] = "$Id: rep_util.c,v 1.103 2003/11/14 05:32:32 ubell E * those called by other subsystems. */ +#define TIMESTAMP_CHECK(dbenv, ts, renv) \ +do { \ + if (renv->op_timestamp != 0 && \ + renv->op_timestamp + DB_REGENV_TIMEOUT < ts) { \ + MUTEX_LOCK(dbenv, &renv->mutex); \ + F_CLR(renv, DB_REGENV_REPLOCKED); \ + renv->op_timestamp = 0; \ + MUTEX_UNLOCK(dbenv, &renv->mutex); \ + } \ +} while (0) + #ifdef REP_DIAGNOSTIC static void __rep_print_logmsg __P((DB_ENV *, const DBT *, DB_LSN *)); #endif -/* - * __rep_check_alloc -- - * Make sure the array of TXN_REC entries is of at least size n. - * (This function is called by the __*_getpgnos() functions in - * *.src.) - * - * PUBLIC: int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int)); - */ -int -__rep_check_alloc(dbenv, r, n) - DB_ENV *dbenv; - TXN_RECS *r; - int n; -{ - int nalloc, ret; - - while (r->nalloc < r->npages + n) { - nalloc = r->nalloc == 0 ? 20 : r->nalloc * 2; - - if ((ret = __os_realloc(dbenv, nalloc * sizeof(LSN_PAGE), - &r->array)) != 0) - return (ret); - - r->nalloc = nalloc; - } - - return (0); -} - /* * __rep_send_message -- * This is a wrapper for sending a message. It takes care of constructing @@ -89,6 +72,9 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags) REP_CONTROL cntrl; int ret; u_int32_t myflags, rectype; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif db_rep = dbenv->rep_handle; rep = db_rep->region; @@ -103,9 +89,7 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags) cntrl.flags = flags; cntrl.rep_version = DB_REPVERSION; cntrl.log_version = DB_LOGVERSION; - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); cntrl.gen = rep->gen; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); memset(&cdbt, 0, sizeof(cdbt)); cdbt.data = &cntrl; @@ -117,10 +101,7 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags) dbtp = &scrap_dbt; } -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __rep_print_message(dbenv, eid, &cntrl, "rep_send_message"); -#endif + REP_PRINT_MESSAGE(dbenv, eid, &cntrl, "rep_send_message"); #ifdef REP_DIAGNOSTIC if (rtype == REP_LOG) __rep_print_logmsg(dbenv, dbtp, lsnp); @@ -135,9 +116,9 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags) myflags = 0; if (LF_ISSET(DB_LOG_PERM)) myflags = DB_REP_PERMANENT; - else if (rtype != REP_LOG) + else if (rtype != REP_LOG || LF_ISSET(DB_LOG_RESEND)) myflags = DB_REP_NOBUFFER; - else { + if (rtype == REP_LOG && !LF_ISSET(DB_LOG_PERM)) { /* * Check if this is a log record we just read that * may need a DB_LOG_PERM. This is of type REP_LOG, @@ -162,13 +143,11 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags) */ if (ret == 0) rep->stat.st_msgs_sent++; - else + else { rep->stat.st_msgs_send_failures++; - -#ifdef DIAGNOSTIC - if (ret != 0 && FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "rep_send_function returned: %d", ret); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "rep_send_function returned: %d", ret)); + } return (ret); } @@ -227,48 +206,51 @@ __rep_new_master(dbenv, cntrl, eid) int eid; { DB_LOG *dblp; - DB_LOGC *logc; - DB_LSN last_lsn, lsn; + DB_LSN ckp_lsn, lsn; DB_REP *db_rep; - DBT dbt; + DB_TXNMGR *mgr; + DB_TXNREGION *region; LOG *lp; + REGENV *renv; + REGINFO *infop; REP *rep; - int change, ret, t_ret; + int change, do_req, ret; +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#endif db_rep = dbenv->rep_handle; + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; rep = db_rep->region; ret = 0; MUTEX_LOCK(dbenv, db_rep->rep_mutexp); __rep_elect_done(dbenv, rep); change = rep->gen != cntrl->gen || rep->master_id != eid; if (change) { -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Updating gen from %lu to %lu from master %d", - (u_long)rep->gen, (u_long)cntrl->gen, eid); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Updating gen from %lu to %lu from master %d", + (u_long)rep->gen, (u_long)cntrl->gen, eid)); rep->gen = cntrl->gen; if (rep->egen <= rep->gen) rep->egen = rep->gen + 1; -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, - "Updating egen to %lu", (u_long)rep->egen); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Egen is %lu", (u_long)rep->egen)); rep->master_id = eid; rep->stat.st_master_changes++; - F_SET(rep, REP_F_NOARCHIVE | REP_F_RECOVER); + rep->stat.st_startup_complete = 0; + F_SET(rep, REP_F_NOARCHIVE | REP_F_RECOVER_VERIFY); } MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); dblp = dbenv->lg_handle; lp = dblp->reginfo.primary; R_LOCK(dbenv, &dblp->reginfo); - last_lsn = lsn = lp->lsn; - if (last_lsn.offset > sizeof(LOGP)) - last_lsn.offset -= lp->len; + lsn = lp->lsn; R_UNLOCK(dbenv, &dblp->reginfo); + R_LOCK(dbenv, &mgr->reginfo); + ckp_lsn = region->last_ckp; + R_UNLOCK(dbenv, &mgr->reginfo); if (!change) { /* @@ -276,15 +258,17 @@ __rep_new_master(dbenv, cntrl, eid) * catching up or verification to do. */ ret = 0; - if (F_ISSET(rep, REP_F_RECOVER)) { - MUTEX_LOCK(dbenv, db_rep->db_mutexp); + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + do_req = __rep_check_doreq(dbenv, rep); + if (F_ISSET(rep, REP_F_RECOVER_VERIFY)) { lsn = lp->verify_lsn; MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); - if (!IS_ZERO_LSN(lsn)) + if (!IS_ZERO_LSN(lsn) && do_req) (void)__rep_send_message(dbenv, eid, - REP_VERIFY_REQ, &last_lsn, NULL, 0); + REP_VERIFY_REQ, &lsn, NULL, 0); } else { - if (log_compare(&lsn, &cntrl->lsn) < 0) + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); + if (log_compare(&lsn, &cntrl->lsn) < 0 && do_req) (void)__rep_send_message(dbenv, eid, REP_ALL_REQ, &lsn, NULL, 0); MUTEX_LOCK(dbenv, db_rep->rep_mutexp); @@ -302,49 +286,45 @@ __rep_new_master(dbenv, cntrl, eid) * the master is not, then we just need to request all the log * records from the master. */ - if (IS_INIT_LSN(lsn) || IS_ZERO_LSN(lsn)) { -empty: MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - F_CLR(rep, REP_F_NOARCHIVE | REP_F_READY | REP_F_RECOVER); + if (IS_INIT_LSN(lsn) || IS_ZERO_LSN(lsn) || IS_ZERO_LSN(ckp_lsn)) { + /* + * If we don't have a checkpoint, we still might have + * some log records but we're discarding them to sync + * up with the master from the start. Therefore, + * truncate our log. + */ + if (IS_ZERO_LSN(ckp_lsn)) { + INIT_LSN(lsn); + (void)__log_vtruncate(dbenv, &lsn, &ckp_lsn, NULL); + infop = dbenv->reginfo; + renv = infop->primary; + (void)time(&renv->rep_timestamp); + } + + /* + * If we have no log, then we have no files to open + * in recovery, but we've opened what we can, which + * is none. Mark DBREP_OPENFILES here. + */ + MUTEX_LOCK(dbenv, db_rep->db_mutexp); + F_SET(db_rep, DBREP_OPENFILES); + MUTEX_LOCK(dbenv, db_rep->rep_mutexp); + F_CLR(rep, REP_F_NOARCHIVE | REP_F_RECOVER_MASK); MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); + MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); if (!IS_INIT_LSN(cntrl->lsn)) (void)__rep_send_message(dbenv, rep->master_id, REP_ALL_REQ, &lsn, NULL, 0); return (DB_REP_NEWMASTER); - } else if (last_lsn.offset <= sizeof(LOGP)) { - /* - * We have just changed log files and need to set lastlsn - * to the last record in the previous log files. - */ - if ((ret = __log_cursor(dbenv, &logc)) != 0) - return (ret); - memset(&dbt, 0, sizeof(dbt)); - ret = __log_c_get(logc, &last_lsn, &dbt, DB_LAST); - if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) - ret = t_ret; - if (ret == DB_NOTFOUND) - goto empty; - if (ret != 0) { - /* - * We failed here and if we set recover above, - * we'd better clear it, because we haven't - * set the verify LSN - */ - if (change) { - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - F_CLR(rep, REP_F_RECOVER); - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - } - return (ret); - } } MUTEX_LOCK(dbenv, db_rep->db_mutexp); - lp->verify_lsn = last_lsn; + lp->verify_lsn = ckp_lsn; MUTEX_UNLOCK(dbenv, db_rep->db_mutexp); (void)__rep_send_message(dbenv, - eid, REP_VERIFY_REQ, &last_lsn, NULL, 0); + eid, REP_VERIFY_REQ, &ckp_lsn, NULL, 0); return (DB_REP_NEWMASTER); } @@ -362,17 +342,18 @@ __rep_is_client(dbenv) { DB_REP *db_rep; REP *rep; - int ret; if (!REP_ON(dbenv)) return (0); + db_rep = dbenv->rep_handle; rep = db_rep->region; - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - ret = F_ISSET(rep, REP_F_UPGRADE | REP_F_LOGSONLY); - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - return (ret); + /* + * Don't just return F_ISSET since that converts unsigned + * into signed. + */ + return (F_ISSET(rep, REP_F_CLIENT) ? 1 : 0); } /* @@ -387,14 +368,31 @@ __rep_noarchive(dbenv) DB_ENV *dbenv; { DB_REP *db_rep; + REGENV *renv; + REGINFO *infop; REP *rep; + time_t timestamp; if (!REP_ON(dbenv)) return (0); db_rep = dbenv->rep_handle; rep = db_rep->region; + infop = dbenv->reginfo; + renv = infop->primary; - return (F_ISSET(rep, REP_F_NOARCHIVE)); + if (F_ISSET(rep, REP_F_NOARCHIVE)) + return (1); + if (F_ISSET(renv, DB_REGENV_REPLOCKED)) { + (void)time(×tamp); + TIMESTAMP_CHECK(dbenv, timestamp, renv); + /* + * Check if we're still locked out after checking + * the timestamp. + */ + if (F_ISSET(renv, DB_REGENV_REPLOCKED)) + return (EINVAL); + } + return (0); } /* @@ -402,14 +400,14 @@ __rep_noarchive(dbenv) * Send this site's vote for the election. * * PUBLIC: void __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int, - * PUBLIC: u_int32_t, int, u_int32_t)); + * PUBLIC: u_int32_t, u_int32_t, int, u_int32_t)); */ void -__rep_send_vote(dbenv, lsnp, nsites, pri, tiebreaker, egen, eid, vtype) +__rep_send_vote(dbenv, lsnp, nsites, nvotes, pri, tie, egen, eid, vtype) DB_ENV *dbenv; DB_LSN *lsnp; - int eid, nsites, pri, tiebreaker; - u_int32_t egen, vtype; + int eid, nsites, nvotes, pri; + u_int32_t egen, tie, vtype; { DBT vote_dbt; REP_VOTE_INFO vi; @@ -419,7 +417,8 @@ __rep_send_vote(dbenv, lsnp, nsites, pri, tiebreaker, egen, eid, vtype) vi.egen = egen; vi.priority = pri; vi.nsites = nsites; - vi.tiebreaker = tiebreaker; + vi.nvotes = nvotes; + vi.tiebreaker = tie; memset(&vote_dbt, 0, sizeof(vote_dbt)); vote_dbt.data = &vi; @@ -441,21 +440,19 @@ __rep_elect_done(dbenv, rep) REP *rep; { int inelect; - -#ifndef DIAGNOSTIC +#ifdef DIAGNOSTIC + DB_MSGBUF mb; +#else COMPQUIET(dbenv, NULL); #endif - inelect = IN_ELECTION_TALLY(rep); F_CLR(rep, REP_F_EPHASE1 | REP_F_EPHASE2 | REP_F_TALLY); rep->sites = 0; rep->votes = 0; if (inelect) rep->egen++; -#ifdef DIAGNOSTIC - if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) - __db_err(dbenv, "Election done; egen %lu", (u_long)rep->egen); -#endif + RPRINT(dbenv, rep, (dbenv, &mb, + "Election done; egen %lu", (u_long)rep->egen)); } /* @@ -496,21 +493,21 @@ __rep_grow_sites(dbenv, nsites) * one for VOTE2's. Always grow them in tandem, because if we * get more VOTE1's we'll always expect more VOTE2's then too. */ - if ((ret = __db_shalloc(infop->addr, - nalloc * sizeof(REP_VTALLY), sizeof(REP_VTALLY), + if ((ret = __db_shalloc(infop, + (size_t)nalloc * sizeof(REP_VTALLY), sizeof(REP_VTALLY), &tally)) == 0) { if (rep->tally_off != INVALID_ROFF) - __db_shalloc_free(infop->addr, - R_ADDR(infop, rep->tally_off)); - rep->tally_off = R_OFFSET(infop, tally); - if ((ret = __db_shalloc(infop->addr, - nalloc * sizeof(REP_VTALLY), sizeof(REP_VTALLY), + __db_shalloc_free( + infop, R_ADDR(dbenv, infop, rep->tally_off)); + rep->tally_off = R_OFFSET(dbenv, infop, tally); + if ((ret = __db_shalloc(infop, + (size_t)nalloc * sizeof(REP_VTALLY), sizeof(REP_VTALLY), &tally)) == 0) { /* Success */ if (rep->v2tally_off != INVALID_ROFF) - __db_shalloc_free(infop->addr, - R_ADDR(infop, rep->v2tally_off)); - rep->v2tally_off = R_OFFSET(infop, tally); + __db_shalloc_free(infop, + R_ADDR(dbenv, infop, rep->v2tally_off)); + rep->v2tally_off = R_OFFSET(dbenv, infop, tally); rep->asites = nalloc; rep->nsites = nsites; } else { @@ -522,10 +519,10 @@ __rep_grow_sites(dbenv, nsites) * to the error. */ if (rep->v2tally_off != INVALID_ROFF) - __db_shalloc_free(infop->addr, - R_ADDR(infop, rep->v2tally_off)); - __db_shalloc_free(infop->addr, - R_ADDR(infop, rep->tally_off)); + __db_shalloc_free(infop, + R_ADDR(dbenv, infop, rep->v2tally_off)); + __db_shalloc_free(infop, + R_ADDR(dbenv, infop, rep->tally_off)); rep->v2tally_off = rep->tally_off = INVALID_ROFF; rep->asites = 0; rep->nsites = 0; @@ -563,7 +560,7 @@ __env_rep_enter(dbenv) MUTEX_LOCK(dbenv, db_rep->rep_mutexp); for (cnt = 0; rep->in_recovery;) { MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - (void)__os_sleep(dbenv, 1, 0); + __os_sleep(dbenv, 1, 0); MUTEX_LOCK(dbenv, db_rep->rep_mutexp); if (++cnt % 60 == 0) __db_err(dbenv, @@ -575,14 +572,14 @@ __env_rep_enter(dbenv) } /* - * __env_rep_exit -- + * __env_db_rep_exit -- * * Decrement handle count upon routine exit. * - * PUBLIC: void __env_rep_exit __P((DB_ENV *)); + * PUBLIC: void __env_db_rep_exit __P((DB_ENV *)); */ void -__env_rep_exit(dbenv) +__env_db_rep_exit(dbenv) DB_ENV *dbenv; { DB_REP *db_rep; @@ -608,34 +605,49 @@ __env_rep_exit(dbenv) * If return_now is non-zero, we'll return DB_DEADLOCK immediately, else we'll * sleep before returning DB_DEADLOCK. * - * PUBLIC: int __db_rep_enter __P((DB *, int, int)); + * PUBLIC: int __db_rep_enter __P((DB *, int, int, int)); */ int -__db_rep_enter(dbp, checkgen, return_now) +__db_rep_enter(dbp, checkgen, checklock, return_now) DB *dbp; - int checkgen, return_now; + int checkgen, checklock, return_now; { DB_ENV *dbenv; DB_REP *db_rep; + REGENV *renv; + REGINFO *infop; REP *rep; + time_t timestamp; dbenv = dbp->dbenv; - /* Check if locks have been globally turned off. */ + /* Check if locks have been globally turned off. */ if (F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); db_rep = dbenv->rep_handle; rep = db_rep->region; + infop = dbenv->reginfo; + renv = infop->primary; + if (checklock && F_ISSET(renv, DB_REGENV_REPLOCKED)) { + (void)time(×tamp); + TIMESTAMP_CHECK(dbenv, timestamp, renv); + /* + * Check if we're still locked out after checking + * the timestamp. + */ + if (F_ISSET(renv, DB_REGENV_REPLOCKED)) + return (EINVAL); + } MUTEX_LOCK(dbenv, db_rep->rep_mutexp); if (F_ISSET(rep, REP_F_READY)) { MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); if (!return_now) - (void)__os_sleep(dbenv, 5, 0); + __os_sleep(dbenv, 5, 0); return (DB_LOCK_DEADLOCK); } - if (checkgen && dbp->timestamp != rep->timestamp) { + if (checkgen && dbp->timestamp != renv->rep_timestamp) { MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); __db_err(dbenv, "%s %s", "replication recovery unrolled committed transactions;", @@ -648,31 +660,6 @@ __db_rep_enter(dbp, checkgen, return_now) return (0); } -/* - * __db_rep_exit -- - * Decrement handle counts. - * - * PUBLIC: void __db_rep_exit __P((DB_ENV *)); - */ -void -__db_rep_exit(dbenv) - DB_ENV *dbenv; -{ - DB_REP *db_rep; - REP *rep; - - /* Check if locks have been globally turned off. */ - if (F_ISSET(dbenv, DB_ENV_NOLOCKING)) - return; - - db_rep = dbenv->rep_handle; - rep = db_rep->region; - - MUTEX_LOCK(dbenv, db_rep->rep_mutexp); - rep->handle_cnt--; - MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); -} - /* * __op_rep_enter -- * @@ -702,7 +689,7 @@ __op_rep_enter(dbenv) MUTEX_LOCK(dbenv, db_rep->rep_mutexp); for (cnt = 0; F_ISSET(rep, REP_F_READY);) { MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); - (void)__os_sleep(dbenv, 5, 0); + __os_sleep(dbenv, 5, 0); MUTEX_LOCK(dbenv, db_rep->rep_mutexp); if (++cnt % 60 == 0) __db_err(dbenv, @@ -717,7 +704,7 @@ __op_rep_enter(dbenv) * __op_rep_exit -- * * Decrement op count upon transaction commit/abort/discard or - * memp_fput. + * memp_fput. * * PUBLIC: void __op_rep_exit __P((DB_ENV *)); */ @@ -767,86 +754,6 @@ __rep_get_gen(dbenv, genp) MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp); } -#ifdef NOTYET -static int __rep_send_file __P((DB_ENV *, DBT *, u_int32_t)); -/* - * __rep_send_file -- - * Send an entire file, one block at a time. - */ -static int -__rep_send_file(dbenv, rec, eid) - DB_ENV *dbenv; - DBT *rec; - u_int32_t eid; -{ - DB *dbp; - DB_LOCK lk; - DB_MPOOLFILE *mpf; - DBC *dbc; - DBT rec_dbt; - PAGE *pagep; - db_pgno_t last_pgno, pgno; - int ret, t_ret; - - dbp = NULL; - dbc = NULL; - pagep = NULL; - mpf = NULL; - LOCK_INIT(lk); - - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - goto err; - - if ((ret = __db_open( - dbp, rec->data, NULL, DB_UNKNOWN, 0, 0, PGNO_BASE_MD)) != 0) - goto err; - - if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) - goto err; - /* - * Force last_pgno to some value that will let us read the meta-dat - * page in the following loop. - */ - memset(&rec_dbt, 0, sizeof(rec_dbt)); - last_pgno = 1; - for (pgno = 0; pgno <= last_pgno; pgno++) { - if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lk)) != 0) - goto err; - - if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0) - goto err; - - if (pgno == 0) - last_pgno = ((DBMETA *)pagep)->last_pgno; - - rec_dbt.data = pagep; - rec_dbt.size = dbp->pgsize; - if (__rep_send_message(dbenv, eid, - REP_FILE, NULL, &rec_dbt, pgno == last_pgno) != 0) - break; - ret = __memp_fput(mpf, pagep, 0); - pagep = NULL; - if (ret != 0) - goto err; - ret = __LPUT(dbc, lk); - LOCK_INIT(lk); - if (ret != 0) - goto err; - } - -err: if (LOCK_ISSET(lk) && (t_ret = __LPUT(dbc, lk)) != 0 && ret == 0) - ret = t_ret; - if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - if (pagep != NULL && - (t_ret = __memp_fput(mpf, pagep, 0)) != 0 && ret == 0) - ret = t_ret; - if (dbp != NULL && (t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} -#endif - #ifdef DIAGNOSTIC /* * PUBLIC: void __rep_print_message __P((DB_ENV *, int, REP_CONTROL *, char *)); @@ -858,7 +765,9 @@ __rep_print_message(dbenv, eid, rp, str) REP_CONTROL *rp; char *str; { + DB_MSGBUF mb; char *type; + switch (rp->rectype) { case REP_ALIVE: type = "alive"; @@ -875,6 +784,9 @@ __rep_print_message(dbenv, eid, rp, str) case REP_FILE: type = "file"; break; + case REP_FILE_FAIL: + type = "file_fail"; + break; case REP_FILE_REQ: type = "file_req"; break; @@ -905,14 +817,20 @@ __rep_print_message(dbenv, eid, rp, str) case REP_PAGE: type = "page"; break; + case REP_PAGE_FAIL: + type = "page_fail"; + break; + case REP_PAGE_MORE: + type = "page_more"; + break; case REP_PAGE_REQ: type = "page_req"; break; - case REP_PLIST: - type = "plist"; + case REP_UPDATE: + type = "update"; break; - case REP_PLIST_REQ: - type = "plist_req"; + case REP_UPDATE_REQ: + type = "update_req"; break; case REP_VERIFY: type = "verify"; @@ -933,8 +851,9 @@ __rep_print_message(dbenv, eid, rp, str) type = "NOTYPE"; break; } - __db_err(dbenv, "%s %s: gen = %lu eid %d, type %s, LSN [%lu][%lu]", + RPRINT(dbenv, ((REP *)((DB_REP *)(dbenv)->rep_handle)->region), + (dbenv, &mb, "%s %s: gen = %lu eid %d, type %s, LSN [%lu][%lu]", dbenv->db_home, str, (u_long)rp->gen, - eid, type, (u_long)rp->lsn.file, (u_long)rp->lsn.offset); + eid, type, (u_long)rp->lsn.file, (u_long)rp->lsn.offset)); } #endif diff --git a/db/rpc_client/client.c b/db/rpc_client/client.c index f61e357ba..d96721ece 100644 --- a/db/rpc_client/client.c +++ b/db/rpc_client/client.c @@ -1,17 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: client.c,v 1.60 2004/09/21 16:09:54 sue Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: client.c,v 1.54 2003/06/14 17:56:01 bostic Exp $"; -#endif /* not lint */ - -#ifdef HAVE_RPC #ifndef NO_SYSTEM_INCLUDES #include @@ -25,12 +22,12 @@ static const char revid[] = "$Id: client.c,v 1.54 2003/06/14 17:56:01 bostic Exp #include #endif +#include "db_server.h" + #include "db_int.h" #include "dbinc/db_page.h" #include "dbinc/db_am.h" #include "dbinc/txn.h" - -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" static int __dbcl_c_destroy __P((DBC *)); @@ -225,11 +222,17 @@ __dbcl_retcopy(dbenv, dbt, data, len, memp, memsize) /* * The RPC server handles DB_DBT_PARTIAL, so we mask it out here to - * avoid the handling of partials in __db_retcopy. + * avoid the handling of partials in __db_retcopy. Check first whether + * the data has actually changed, so we don't try to copy over + * read-only keys, which the RPC server always returns regardless. */ orig_flags = dbt->flags; F_CLR(dbt, DB_DBT_PARTIAL); - ret = __db_retcopy(dbenv, dbt, data, len, memp, memsize); + if (dbt->data != NULL && dbt->size == len && + memcmp(dbt->data, data, len) == 0) + ret = 0; + else + ret = __db_retcopy(dbenv, dbt, data, len, memp, memsize); dbt->flags = orig_flags; return (ret); } @@ -238,7 +241,7 @@ __dbcl_retcopy(dbenv, dbt, data, len, memp, memsize) * __dbcl_txn_close -- * Clean up an environment's transactions. */ -int +static int __dbcl_txn_close(dbenv) DB_ENV *dbenv; { @@ -400,11 +403,11 @@ __dbcl_c_refresh(dbc) * __dbcl_c_setup -- * Allocate a cursor. * - * PUBLIC: int __dbcl_c_setup __P((long, DB *, DBC **)); + * PUBLIC: int __dbcl_c_setup __P((u_int, DB *, DBC **)); */ int __dbcl_c_setup(cl_id, dbp, dbcp) - long cl_id; + u_int cl_id; DB *dbp; DBC **dbcp; { @@ -484,4 +487,3 @@ __dbcl_dbclose_common(dbp) __os_free(NULL, dbp); return (ret); } -#endif /* HAVE_RPC */ diff --git a/db/rpc_client/gen_client.c b/db/rpc_client/gen_client.c index b27644e8e..72a3f7471 100644 --- a/db/rpc_client/gen_client.c +++ b/db/rpc_client/gen_client.c @@ -1,20 +1,18 @@ /* Do not edit: automatically built by gen_rpc.awk. */ #include "db_config.h" -#ifdef HAVE_RPC #ifndef NO_SYSTEM_INCLUDES #include #include -#include #include #endif +#include "db_server.h" + #include "db_int.h" #include "dbinc/txn.h" - -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" static int __dbcl_noserver __P((DB_ENV *)); @@ -95,7 +93,7 @@ __dbcl_env_get_cachesize(dbenv, gbytesp, bytesp, ncachep) else msg.dbenvcl_id = dbenv->cl_id; - replyp = __db_env_get_cachesize_4002(&msg, cl); + replyp = __db_env_get_cachesize_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -143,7 +141,7 @@ __dbcl_env_cachesize(dbenv, gbytes, bytes, ncache) msg.bytes = bytes; msg.ncache = ncache; - replyp = __db_env_cachesize_4002(&msg, cl); + replyp = __db_env_cachesize_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -181,7 +179,7 @@ __dbcl_env_close(dbenv, flags) msg.dbenvcl_id = dbenv->cl_id; msg.flags = flags; - replyp = __db_env_close_4002(&msg, cl); + replyp = __db_env_close_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -215,7 +213,7 @@ __dbcl_env_create(dbenv, timeout) msg.timeout = timeout; - replyp = __db_env_create_4002(&msg, cl); + replyp = __db_env_create_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -293,7 +291,7 @@ __dbcl_env_dbremove(dbenv, txnp, name, subdb, flags) msg.subdb = (char *)subdb; msg.flags = flags; - replyp = __db_env_dbremove_4002(&msg, cl); + replyp = __db_env_dbremove_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -352,7 +350,7 @@ __dbcl_env_dbrename(dbenv, txnp, name, subdb, newname, flags) msg.newname = (char *)newname; msg.flags = flags; - replyp = __db_env_dbrename_4002(&msg, cl); + replyp = __db_env_dbrename_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -389,7 +387,7 @@ __dbcl_env_get_encrypt_flags(dbenv, flagsp) else msg.dbenvcl_id = dbenv->cl_id; - replyp = __db_env_get_encrypt_flags_4002(&msg, cl); + replyp = __db_env_get_encrypt_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -434,7 +432,7 @@ __dbcl_env_encrypt(dbenv, passwd, flags) msg.passwd = (char *)passwd; msg.flags = flags; - replyp = __db_env_encrypt_4002(&msg, cl); + replyp = __db_env_encrypt_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -484,7 +482,7 @@ __dbcl_env_get_flags(dbenv, flagsp) else msg.dbenvcl_id = dbenv->cl_id; - replyp = __db_env_get_flags_4002(&msg, cl); + replyp = __db_env_get_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -526,7 +524,7 @@ __dbcl_env_flags(dbenv, flags, onoff) msg.flags = flags; msg.onoff = onoff; - replyp = __db_env_flags_4002(&msg, cl); + replyp = __db_env_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -773,31 +771,55 @@ __dbcl_set_lk_max_objects(dbenv, max) } /* - * PUBLIC: int __dbcl_get_mp_maxwrite __P((DB_ENV *, int *, int *)); + * PUBLIC: int __dbcl_get_mp_max_openfd __P((DB_ENV *, int *)); + */ +int +__dbcl_get_mp_max_openfd(dbenv, nopenp) + DB_ENV * dbenv; + int * nopenp; +{ + COMPQUIET(nopenp, 0); + return (__dbcl_rpc_illegal(dbenv, "get_mp_max_openfd")); +} + +/* + * PUBLIC: int __dbcl_set_mp_max_openfd __P((DB_ENV *, int)); + */ +int +__dbcl_set_mp_max_openfd(dbenv, nopen) + DB_ENV * dbenv; + int nopen; +{ + COMPQUIET(nopen, 0); + return (__dbcl_rpc_illegal(dbenv, "set_mp_max_openfd")); +} + +/* + * PUBLIC: int __dbcl_get_mp_max_write __P((DB_ENV *, int *, int *)); */ int -__dbcl_get_mp_maxwrite(dbenv, nwritep, nsleepp) +__dbcl_get_mp_max_write(dbenv, nwritep, nsleepp) DB_ENV * dbenv; int * nwritep; int * nsleepp; { COMPQUIET(nwritep, 0); COMPQUIET(nsleepp, 0); - return (__dbcl_rpc_illegal(dbenv, "get_mp_maxwrite")); + return (__dbcl_rpc_illegal(dbenv, "get_mp_max_write")); } /* - * PUBLIC: int __dbcl_set_mp_maxwrite __P((DB_ENV *, int, int)); + * PUBLIC: int __dbcl_set_mp_max_write __P((DB_ENV *, int, int)); */ int -__dbcl_set_mp_maxwrite(dbenv, nwrite, nsleep) +__dbcl_set_mp_max_write(dbenv, nwrite, nsleep) DB_ENV * dbenv; int nwrite; int nsleep; { COMPQUIET(nwrite, 0); COMPQUIET(nsleep, 0); - return (__dbcl_rpc_illegal(dbenv, "set_mp_maxwrite")); + return (__dbcl_rpc_illegal(dbenv, "set_mp_max_write")); } /* @@ -848,7 +870,7 @@ __dbcl_env_get_home(dbenv, homep) else msg.dbenvcl_id = dbenv->cl_id; - replyp = __db_env_get_home_4002(&msg, cl); + replyp = __db_env_get_home_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -887,7 +909,7 @@ __dbcl_env_get_open_flags(dbenv, flagsp) else msg.dbenvcl_id = dbenv->cl_id; - replyp = __db_env_get_open_flags_4002(&msg, cl); + replyp = __db_env_get_open_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -934,7 +956,7 @@ __dbcl_env_open(dbenv, home, flags, mode) msg.flags = flags; msg.mode = mode; - replyp = __db_env_open_4002(&msg, cl); + replyp = __db_env_open_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -989,7 +1011,7 @@ __dbcl_env_remove(dbenv, home, flags) msg.home = (char *)home; msg.flags = flags; - replyp = __db_env_remove_4002(&msg, cl); + replyp = __db_env_remove_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1203,7 +1225,7 @@ __dbcl_txn_abort(txnp) else msg.txnpcl_id = txnp->txnid; - replyp = __db_txn_abort_4002(&msg, cl); + replyp = __db_txn_abort_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1248,7 +1270,7 @@ __dbcl_txn_begin(dbenv, parent, txnpp, flags) msg.parentcl_id = parent->txnid; msg.flags = flags; - replyp = __db_txn_begin_4002(&msg, cl); + replyp = __db_txn_begin_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1305,7 +1327,7 @@ __dbcl_txn_commit(txnp, flags) msg.txnpcl_id = txnp->txnid; msg.flags = flags; - replyp = __db_txn_commit_4002(&msg, cl); + replyp = __db_txn_commit_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1345,7 +1367,7 @@ __dbcl_txn_discard(txnp, flags) msg.txnpcl_id = txnp->txnid; msg.flags = flags; - replyp = __db_txn_discard_4002(&msg, cl); + replyp = __db_txn_discard_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1385,7 +1407,7 @@ __dbcl_txn_prepare(txnp, gid) msg.txnpcl_id = txnp->txnid; memcpy(msg.gid, gid, 128); - replyp = __db_txn_prepare_4002(&msg, cl); + replyp = __db_txn_prepare_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1428,7 +1450,7 @@ __dbcl_txn_recover(dbenv, preplist, count, retp, flags) msg.count = count; msg.flags = flags; - replyp = __db_txn_recover_4002(&msg, cl); + replyp = __db_txn_recover_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1473,20 +1495,25 @@ __dbcl_txn_timeout(txnp, timeout, flags) } /* - * PUBLIC: int __dbcl_rep_elect __P((DB_ENV *, int, int, u_int32_t, int *)); + * PUBLIC: int __dbcl_rep_elect __P((DB_ENV *, int, int, int, u_int32_t, int *, + * PUBLIC: u_int32_t)); */ int -__dbcl_rep_elect(dbenv, nsites, pri, timeout, idp) +__dbcl_rep_elect(dbenv, nsites, nvotes, pri, timeout, idp, flags) DB_ENV * dbenv; int nsites; + int nvotes; int pri; u_int32_t timeout; int * idp; + u_int32_t flags; { COMPQUIET(nsites, 0); + COMPQUIET(nvotes, 0); COMPQUIET(pri, 0); COMPQUIET(timeout, 0); COMPQUIET(idp, 0); + COMPQUIET(flags, 0); return (__dbcl_rpc_illegal(dbenv, "rep_elect")); } @@ -1667,7 +1694,7 @@ __dbcl_db_associate(dbp, txnp, sdbp, func0, flags) msg.sdbpcl_id = sdbp->cl_id; msg.flags = flags; - replyp = __db_db_associate_4002(&msg, cl); + replyp = __db_db_associate_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1723,7 +1750,7 @@ __dbcl_db_bt_maxkey(dbp, maxkey) msg.dbpcl_id = dbp->cl_id; msg.maxkey = maxkey; - replyp = __db_db_bt_maxkey_4002(&msg, cl); + replyp = __db_db_bt_maxkey_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1762,7 +1789,7 @@ __dbcl_db_get_bt_minkey(dbp, minkeyp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_bt_minkey_4002(&msg, cl); + replyp = __db_db_get_bt_minkey_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1804,7 +1831,7 @@ __dbcl_db_bt_minkey(dbp, minkey) msg.dbpcl_id = dbp->cl_id; msg.minkey = minkey; - replyp = __db_db_bt_minkey_4002(&msg, cl); + replyp = __db_db_bt_minkey_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1915,7 +1942,7 @@ __dbcl_db_close(dbp, flags) msg.dbpcl_id = dbp->cl_id; msg.flags = flags; - replyp = __db_db_close_4002(&msg, cl); + replyp = __db_db_close_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -1954,7 +1981,7 @@ __dbcl_db_create(dbp, dbenv, flags) msg.dbenvcl_id = dbenv->cl_id; msg.flags = flags; - replyp = __db_db_create_4002(&msg, cl); + replyp = __db_db_create_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2006,7 +2033,7 @@ __dbcl_db_del(dbp, txnp, key, flags) msg.keydata.keydata_len = key->size; msg.flags = flags; - replyp = __db_db_del_4002(&msg, cl); + replyp = __db_db_del_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2061,7 +2088,7 @@ __dbcl_db_get_encrypt_flags(dbp, flagsp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_encrypt_flags_4002(&msg, cl); + replyp = __db_db_get_encrypt_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2108,7 +2135,7 @@ __dbcl_db_encrypt(dbp, passwd, flags) msg.passwd = (char *)passwd; msg.flags = flags; - replyp = __db_db_encrypt_4002(&msg, cl); + replyp = __db_db_encrypt_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2147,7 +2174,7 @@ __dbcl_db_get_extentsize(dbp, extentsizep) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_extentsize_4002(&msg, cl); + replyp = __db_db_get_extentsize_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2189,7 +2216,7 @@ __dbcl_db_extentsize(dbp, extentsize) msg.dbpcl_id = dbp->cl_id; msg.extentsize = extentsize; - replyp = __db_db_extentsize_4002(&msg, cl); + replyp = __db_db_extentsize_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2258,7 +2285,7 @@ __dbcl_db_get_flags(dbp, flagsp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_flags_4002(&msg, cl); + replyp = __db_db_get_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2300,7 +2327,7 @@ __dbcl_db_flags(dbp, flags) msg.dbpcl_id = dbp->cl_id; msg.flags = flags; - replyp = __db_db_flags_4002(&msg, cl); + replyp = __db_db_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2359,7 +2386,7 @@ __dbcl_db_get(dbp, txnp, key, data, flags) msg.datadata.datadata_len = data->size; msg.flags = flags; - replyp = __db_db_get_4002(&msg, cl); + replyp = __db_db_get_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2399,7 +2426,7 @@ __dbcl_db_get_name(dbp, filenamep, dbnamep) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_name_4002(&msg, cl); + replyp = __db_db_get_name_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2442,7 +2469,7 @@ __dbcl_db_get_open_flags(dbp, flagsp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_open_flags_4002(&msg, cl); + replyp = __db_db_get_open_flags_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2483,7 +2510,7 @@ __dbcl_db_get_h_ffactor(dbp, ffactorp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_h_ffactor_4002(&msg, cl); + replyp = __db_db_get_h_ffactor_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2525,7 +2552,7 @@ __dbcl_db_h_ffactor(dbp, ffactor) msg.dbpcl_id = dbp->cl_id; msg.ffactor = ffactor; - replyp = __db_db_h_ffactor_4002(&msg, cl); + replyp = __db_db_h_ffactor_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2580,7 +2607,7 @@ __dbcl_db_get_h_nelem(dbp, nelemp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_h_nelem_4002(&msg, cl); + replyp = __db_db_get_h_nelem_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2622,7 +2649,7 @@ __dbcl_db_h_nelem(dbp, nelem) msg.dbpcl_id = dbp->cl_id; msg.nelem = nelem; - replyp = __db_db_h_nelem_4002(&msg, cl); + replyp = __db_db_h_nelem_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2676,7 +2703,7 @@ __dbcl_db_key_range(dbp, txnp, key, range, flags) msg.keydata.keydata_len = key->size; msg.flags = flags; - replyp = __db_db_key_range_4002(&msg, cl); + replyp = __db_db_key_range_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2715,7 +2742,7 @@ __dbcl_db_get_lorder(dbp, lorderp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_lorder_4002(&msg, cl); + replyp = __db_db_get_lorder_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2757,7 +2784,7 @@ __dbcl_db_lorder(dbp, lorder) msg.dbpcl_id = dbp->cl_id; msg.lorder = lorder; - replyp = __db_db_lorder_4002(&msg, cl); + replyp = __db_db_lorder_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2817,7 +2844,7 @@ __dbcl_db_open(dbp, txnp, name, subdb, type, flags, mode) msg.flags = flags; msg.mode = mode; - replyp = __db_db_open_4002(&msg, cl); + replyp = __db_db_open_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2856,7 +2883,7 @@ __dbcl_db_get_pagesize(dbp, pagesizep) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_pagesize_4002(&msg, cl); + replyp = __db_db_get_pagesize_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2898,7 +2925,7 @@ __dbcl_db_pagesize(dbp, pagesize) msg.dbpcl_id = dbp->cl_id; msg.pagesize = pagesize; - replyp = __db_db_pagesize_4002(&msg, cl); + replyp = __db_db_pagesize_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -2980,7 +3007,7 @@ __dbcl_db_pget(dbp, txnp, skey, pkey, data, flags) msg.datadata.datadata_len = data->size; msg.flags = flags; - replyp = __db_db_pget_4002(&msg, cl); + replyp = __db_db_pget_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3039,7 +3066,7 @@ __dbcl_db_put(dbp, txnp, key, data, flags) msg.datadata.datadata_len = data->size; msg.flags = flags; - replyp = __db_db_put_4002(&msg, cl); + replyp = __db_db_put_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3078,7 +3105,7 @@ __dbcl_db_get_re_delim(dbp, delimp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_re_delim_4002(&msg, cl); + replyp = __db_db_get_re_delim_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3120,7 +3147,7 @@ __dbcl_db_re_delim(dbp, delim) msg.dbpcl_id = dbp->cl_id; msg.delim = delim; - replyp = __db_db_re_delim_4002(&msg, cl); + replyp = __db_db_re_delim_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3159,7 +3186,7 @@ __dbcl_db_get_re_len(dbp, lenp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_re_len_4002(&msg, cl); + replyp = __db_db_get_re_len_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3201,7 +3228,7 @@ __dbcl_db_re_len(dbp, len) msg.dbpcl_id = dbp->cl_id; msg.len = len; - replyp = __db_db_re_len_4002(&msg, cl); + replyp = __db_db_re_len_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3241,7 +3268,7 @@ __dbcl_db_re_pad(dbp, pad) msg.dbpcl_id = dbp->cl_id; msg.pad = pad; - replyp = __db_db_re_pad_4002(&msg, cl); + replyp = __db_db_re_pad_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3280,7 +3307,7 @@ __dbcl_db_get_re_pad(dbp, padp) else msg.dbpcl_id = dbp->cl_id; - replyp = __db_db_get_re_pad_4002(&msg, cl); + replyp = __db_db_get_re_pad_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3363,7 +3390,7 @@ __dbcl_db_remove(dbp, name, subdb, flags) msg.subdb = (char *)subdb; msg.flags = flags; - replyp = __db_db_remove_4002(&msg, cl); + replyp = __db_db_remove_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3419,7 +3446,7 @@ __dbcl_db_rename(dbp, name, subdb, newname, flags) msg.newname = (char *)newname; msg.flags = flags; - replyp = __db_db_rename_4002(&msg, cl); + replyp = __db_db_rename_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3433,11 +3460,12 @@ out: } /* - * PUBLIC: int __dbcl_db_stat __P((DB *, void *, u_int32_t)); + * PUBLIC: int __dbcl_db_stat __P((DB *, DB_TXN *, void *, u_int32_t)); */ int -__dbcl_db_stat(dbp, sp, flags) +__dbcl_db_stat(dbp, txnp, sp, flags) DB * dbp; + DB_TXN * txnp; void * sp; u_int32_t flags; { @@ -3458,15 +3486,19 @@ __dbcl_db_stat(dbp, sp, flags) msg.dbpcl_id = 0; else msg.dbpcl_id = dbp->cl_id; + if (txnp == NULL) + msg.txnpcl_id = 0; + else + msg.txnpcl_id = txnp->txnid; msg.flags = flags; - replyp = __db_db_stat_4002(&msg, cl); + replyp = __db_db_stat_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; goto out; } - ret = __dbcl_db_stat_ret(dbp, sp, flags, replyp); + ret = __dbcl_db_stat_ret(dbp, txnp, sp, flags, replyp); out: if (replyp != NULL) xdr_free((xdrproc_t)xdr___db_stat_reply, (void *)replyp); @@ -3500,7 +3532,7 @@ __dbcl_db_sync(dbp, flags) msg.dbpcl_id = dbp->cl_id; msg.flags = flags; - replyp = __db_db_sync_4002(&msg, cl); + replyp = __db_db_sync_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3547,7 +3579,7 @@ __dbcl_db_truncate(dbp, txnp, countp, flags) msg.txnpcl_id = txnp->txnid; msg.flags = flags; - replyp = __db_db_truncate_4002(&msg, cl); + replyp = __db_db_truncate_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3632,7 +3664,7 @@ __dbcl_db_cursor(dbp, txnp, dbcpp, flags) msg.txnpcl_id = txnp->txnid; msg.flags = flags; - replyp = __db_db_cursor_4002(&msg, cl); + replyp = __db_db_cursor_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3685,7 +3717,7 @@ __dbcl_db_join(dbp, curs, dbcp, flags) *cursq = (*cursp)->cl_id; msg.flags = flags; - replyp = __db_db_join_4002(&msg, cl); + replyp = __db_db_join_4003(&msg, cl); __os_free(dbenv, msg.curs.curs_val); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); @@ -3724,7 +3756,7 @@ __dbcl_dbc_close(dbc) else msg.dbccl_id = dbc->cl_id; - replyp = __db_dbc_close_4002(&msg, cl); + replyp = __db_dbc_close_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3765,7 +3797,7 @@ __dbcl_dbc_count(dbc, countp, flags) msg.dbccl_id = dbc->cl_id; msg.flags = flags; - replyp = __db_dbc_count_4002(&msg, cl); + replyp = __db_dbc_count_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3805,7 +3837,7 @@ __dbcl_dbc_del(dbc, flags) msg.dbccl_id = dbc->cl_id; msg.flags = flags; - replyp = __db_dbc_del_4002(&msg, cl); + replyp = __db_dbc_del_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3846,7 +3878,7 @@ __dbcl_dbc_dup(dbc, dbcp, flags) msg.dbccl_id = dbc->cl_id; msg.flags = flags; - replyp = __db_dbc_dup_4002(&msg, cl); + replyp = __db_dbc_dup_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3900,7 +3932,7 @@ __dbcl_dbc_get(dbc, key, data, flags) msg.datadata.datadata_len = data->size; msg.flags = flags; - replyp = __db_dbc_get_4002(&msg, cl); + replyp = __db_dbc_get_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -3961,7 +3993,7 @@ __dbcl_dbc_pget(dbc, skey, pkey, data, flags) msg.datadata.datadata_len = data->size; msg.flags = flags; - replyp = __db_dbc_pget_4002(&msg, cl); + replyp = __db_dbc_pget_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -4015,7 +4047,7 @@ __dbcl_dbc_put(dbc, key, data, flags) msg.datadata.datadata_len = data->size; msg.flags = flags; - replyp = __db_dbc_put_4002(&msg, cl); + replyp = __db_dbc_put_4003(&msg, cl); if (replyp == NULL) { __db_err(dbenv, clnt_sperror(cl, "Berkeley DB")); ret = DB_NOSERVER; @@ -4622,4 +4654,3 @@ __dbcl_memp_fsync(dbmfp) return (__dbcl_rpc_illegal(dbenv, "memp_fsync")); } -#endif /* HAVE_RPC */ diff --git a/db/rpc_client/gen_client_ret.c b/db/rpc_client/gen_client_ret.c index e825279f6..7285afc25 100644 --- a/db/rpc_client/gen_client_ret.c +++ b/db/rpc_client/gen_client_ret.c @@ -1,17 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: gen_client_ret.c,v 1.69 2004/09/22 16:29:51 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: gen_client_ret.c,v 1.59 2003/06/14 17:56:02 bostic Exp $"; -#endif /* not lint */ - -#ifdef HAVE_RPC #ifndef NO_SYSTEM_INCLUDES #include @@ -20,14 +17,21 @@ static const char revid[] = "$Id: gen_client_ret.c,v 1.59 2003/06/14 17:56:02 bo #include #endif +#include "db_server.h" + #include "db_int.h" #include "dbinc/db_page.h" #include "dbinc/db_am.h" #include "dbinc/txn.h" - -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" +#define FREE_IF_CHANGED(dbtp, orig) do { \ + if ((dbtp)->data != NULL && (dbtp)->data != orig) { \ + __os_free(dbenv, (dbtp)->data); \ + (dbtp)->data = NULL; \ + } \ +} while (0) + /* * PUBLIC: int __dbcl_env_create_ret * PUBLIC: __P((DB_ENV *, long, __env_create_reply *)); @@ -145,7 +149,13 @@ __dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp) if ((ret = __os_calloc(envp, 1, sizeof(DB_TXN), &txn)) != 0) return (ret); - __dbcl_txn_setup(envp, txn, parent, replyp->txnidcl_id); + /* + * !!! + * Cast the txnidcl_id to 32-bits. We don't want to change the + * size of the txn structure. But if we're running on 64-bit + * machines, we could overflow. Ignore for now. + */ + __dbcl_txn_setup(envp, txn, parent, (u_int32_t)replyp->txnidcl_id); *txnpp = txn; return (replyp->status); } @@ -227,7 +237,7 @@ __dbcl_txn_recover_ret(dbenv, preplist, count, retp, flags, replyp) while (i++ < replyp->retcount) { __dbcl_txn_setup(dbenv, txn, NULL, *txnid); prep->txn = txn; - memcpy(&prep->gid, gid, DB_XIDDATASIZE); + memcpy(prep->gid, gid, DB_XIDDATASIZE); /* * Now increment all our array pointers. */ @@ -319,8 +329,8 @@ __dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp) * If an error on copying 'data' and we allocated for 'key' * free it before returning the error. */ - if (ret && oldkey != NULL) - __os_free(dbenv, key->data); + if (ret) + FREE_IF_CHANGED(key, oldkey); return (ret); } @@ -367,13 +377,13 @@ __dbcl_db_open_ret(dbp, txn, name, subdb, type, flags, mode, replyp) COMPQUIET(txn, NULL); COMPQUIET(name, NULL); COMPQUIET(subdb, NULL); - COMPQUIET(type, 0); + COMPQUIET(type, DB_UNKNOWN); COMPQUIET(flags, 0); COMPQUIET(mode, 0); if (replyp->status == 0) { dbp->cl_id = replyp->dbcl_id; - dbp->type = replyp->type; + dbp->type = (DBTYPE)replyp->type; /* * We get back the database's byteorder on the server. * Determine if our byteorder is the same or not by @@ -387,11 +397,10 @@ __dbcl_db_open_ret(dbp, txn, name, subdb, type, flags, mode, replyp) (void)__db_set_lorder(dbp, replyp->lorder); /* - * XXX - * This is only for Tcl which peeks at the dbp flags. - * When dbp->get_flags exists, this should go away. + * Explicitly set DB_AM_OPEN_CALLED since open is now + * successfully completed. */ - dbp->flags = replyp->dbflags; + F_SET(dbp, DB_AM_OPEN_CALLED); } return (replyp->status); } @@ -431,25 +440,17 @@ __dbcl_db_pget_ret(dbp, txnp, skey, pkey, data, flags, replyp) return (ret); oldpkey = pkey->data; - ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val, + if ((ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val, replyp->pkeydata.pkeydata_len, &dbp->my_rkey.data, - &dbp->my_rkey.ulen); - if (ret && oldskey != NULL) { - __os_free(dbenv, skey->data); - return (ret); - } + &dbp->my_rkey.ulen)) != 0) + goto err; ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val, replyp->datadata.datadata_len, &dbp->my_rdata.data, &dbp->my_rdata.ulen); - /* - * If an error on copying 'data' and we allocated for '*key' - * free it before returning the error. - */ + if (ret) { - if (oldskey != NULL) - __os_free(dbenv, skey->data); - if (oldpkey != NULL) - __os_free(dbenv, pkey->data); +err: FREE_IF_CHANGED(skey, oldskey); + FREE_IF_CHANGED(pkey, oldpkey); } return (ret); } @@ -532,19 +533,22 @@ __dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp) /* * PUBLIC: int __dbcl_db_stat_ret - * PUBLIC: __P((DB *, void *, u_int32_t, __db_stat_reply *)); + * PUBLIC: __P((DB *, DB_TXN *, void *, u_int32_t, __db_stat_reply *)); */ int -__dbcl_db_stat_ret(dbp, sp, flags, replyp) +__dbcl_db_stat_ret(dbp, txnp, sp, flags, replyp) DB *dbp; + DB_TXN *txnp; void *sp; u_int32_t flags; __db_stat_reply *replyp; { - int len, ret; + size_t len; u_int32_t i, *q, *p, *retsp; + int ret; COMPQUIET(flags, 0); + COMPQUIET(txnp, NULL); if (replyp->status != 0 || sp == NULL) return (replyp->status); @@ -716,8 +720,8 @@ __dbcl_dbc_get_ret(dbc, key, data, flags, replyp) * If an error on copying 'data' and we allocated for 'key' * free it before returning the error. */ - if (ret && oldkey != NULL) - __os_free(dbenv, key->data); + if (ret) + FREE_IF_CHANGED(key, oldkey); return (ret); } @@ -754,25 +758,21 @@ __dbcl_dbc_pget_ret(dbc, skey, pkey, data, flags, replyp) return (ret); oldpkey = pkey->data; - ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val, + if ((ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val, replyp->pkeydata.pkeydata_len, &dbc->my_rkey.data, - &dbc->my_rkey.ulen); - if (ret && oldskey != NULL) { - __os_free(dbenv, skey->data); - return (ret); - } + &dbc->my_rkey.ulen)) != 0) + goto err; ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val, replyp->datadata.datadata_len, &dbc->my_rdata.data, &dbc->my_rdata.ulen); + /* * If an error on copying 'data' and we allocated for '*key' * free it before returning the error. */ if (ret) { - if (oldskey != NULL) - __os_free(dbenv, skey->data); - if (oldpkey != NULL) - __os_free(dbenv, pkey->data); +err: FREE_IF_CHANGED(skey, oldskey); + FREE_IF_CHANGED(pkey, oldpkey); } return (ret); } @@ -799,4 +799,3 @@ __dbcl_dbc_put_ret(dbc, key, data, flags, replyp) *(db_recno_t *)replyp->keydata.keydata_val; return (replyp->status); } -#endif /* HAVE_RPC */ diff --git a/db/rpc_server/c/db_server_proc.c b/db/rpc_server/c/db_server_proc.c index b24ae2fc0..1793df42f 100644 --- a/db/rpc_server/c/db_server_proc.c +++ b/db/rpc_server/c/db_server_proc.c @@ -1,17 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_server_proc.c,v 1.106 2004/09/22 17:30:12 bostic Exp $ */ #include "db_config.h" -#ifdef HAVE_RPC -#ifndef lint -static const char revid[] = "$Id: db_server_proc.c,v 1.96 2003/04/24 16:25:08 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -19,23 +16,21 @@ static const char revid[] = "$Id: db_server_proc.c,v 1.96 2003/04/24 16:25:08 bo #include #endif -#include "dbinc_auto/db_server.h" + +#include "db_server.h" #include "db_int.h" #include "dbinc/db_server_int.h" #include "dbinc_auto/rpc_server_ext.h" -/* BEGIN __env_get_cachesize_proc */ /* * PUBLIC: void __env_get_cachesize_proc __P((long, * PUBLIC: __env_get_cachesize_reply *)); */ void -__env_get_cachesize_proc(dbenvcl_id, - replyp) +__env_get_cachesize_proc(dbenvcl_id, replyp) long dbenvcl_id; __env_get_cachesize_reply *replyp; -/* END __env_get_cachesize_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -44,23 +39,20 @@ __env_get_cachesize_proc(dbenvcl_id, dbenv = (DB_ENV *)dbenv_ctp->ct_anyp; replyp->status = dbenv->get_cachesize(dbenv, &replyp->gbytes, - &replyp->bytes, &replyp->ncache); + &replyp->bytes, (int *)&replyp->ncache); } -/* BEGIN __env_cachesize_proc */ /* * PUBLIC: void __env_cachesize_proc __P((long, u_int32_t, u_int32_t, * PUBLIC: u_int32_t, __env_cachesize_reply *)); */ void -__env_cachesize_proc(dbenvcl_id, gbytes, bytes, - ncache, replyp) +__env_cachesize_proc(dbenvcl_id, gbytes, bytes, ncache, replyp) long dbenvcl_id; u_int32_t gbytes; u_int32_t bytes; u_int32_t ncache; __env_cachesize_reply *replyp; -/* END __env_cachesize_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -75,7 +67,6 @@ __env_cachesize_proc(dbenvcl_id, gbytes, bytes, return; } -/* BEGIN __env_close_proc */ /* * PUBLIC: void __env_close_proc __P((long, u_int32_t, __env_close_reply *)); */ @@ -84,7 +75,6 @@ __env_close_proc(dbenvcl_id, flags, replyp) long dbenvcl_id; u_int32_t flags; __env_close_reply *replyp; -/* END __env_close_proc */ { ct_entry *dbenv_ctp; @@ -93,7 +83,6 @@ __env_close_proc(dbenvcl_id, flags, replyp) return; } -/* BEGIN __env_create_proc */ /* * PUBLIC: void __env_create_proc __P((u_int32_t, __env_create_reply *)); */ @@ -101,7 +90,6 @@ void __env_create_proc(timeout, replyp) u_int32_t timeout; __env_create_reply *replyp; -/* END __env_create_proc */ { DB_ENV *dbenv; ct_entry *ctp; @@ -125,21 +113,18 @@ __env_create_proc(timeout, replyp) return; } -/* BEGIN __env_dbremove_proc */ /* * PUBLIC: void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t, * PUBLIC: __env_dbremove_reply *)); */ void -__env_dbremove_proc(dbenvcl_id, txnpcl_id, name, - subdb, flags, replyp) +__env_dbremove_proc(dbenvcl_id, txnpcl_id, name, subdb, flags, replyp) long dbenvcl_id; long txnpcl_id; char *name; char *subdb; u_int32_t flags; __env_dbremove_reply *replyp; -/* END __env_dbremove_proc */ { int ret; DB_ENV * dbenv; @@ -162,14 +147,12 @@ __env_dbremove_proc(dbenvcl_id, txnpcl_id, name, return; } -/* BEGIN __env_dbrename_proc */ /* * PUBLIC: void __env_dbrename_proc __P((long, long, char *, char *, char *, * PUBLIC: u_int32_t, __env_dbrename_reply *)); */ void -__env_dbrename_proc(dbenvcl_id, txnpcl_id, name, - subdb, newname, flags, replyp) +__env_dbrename_proc(dbenvcl_id, txnpcl_id, name, subdb, newname, flags, replyp) long dbenvcl_id; long txnpcl_id; char *name; @@ -177,7 +160,6 @@ __env_dbrename_proc(dbenvcl_id, txnpcl_id, name, char *newname; u_int32_t flags; __env_dbrename_reply *replyp; -/* END __env_dbrename_proc */ { int ret; DB_ENV * dbenv; @@ -200,7 +182,6 @@ __env_dbrename_proc(dbenvcl_id, txnpcl_id, name, return; } -/* BEGIN __env_get_encrypt_flags_proc */ /* * PUBLIC: void __env_get_encrypt_flags_proc __P((long, * PUBLIC: __env_get_encrypt_flags_reply *)); @@ -209,7 +190,6 @@ void __env_get_encrypt_flags_proc(dbenvcl_id, replyp) long dbenvcl_id; __env_get_encrypt_flags_reply *replyp; -/* END __env_get_encrypt_flags_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -220,7 +200,6 @@ __env_get_encrypt_flags_proc(dbenvcl_id, replyp) replyp->status = dbenv->get_encrypt_flags(dbenv, &replyp->flags); } -/* BEGIN __env_encrypt_proc */ /* * PUBLIC: void __env_encrypt_proc __P((long, char *, u_int32_t, * PUBLIC: __env_encrypt_reply *)); @@ -231,7 +210,6 @@ __env_encrypt_proc(dbenvcl_id, passwd, flags, replyp) char *passwd; u_int32_t flags; __env_encrypt_reply *replyp; -/* END __env_encrypt_proc */ { int ret; DB_ENV * dbenv; @@ -246,7 +224,6 @@ __env_encrypt_proc(dbenvcl_id, passwd, flags, replyp) return; } -/* BEGIN __env_get_flags_proc */ /* * PUBLIC: void __env_get_flags_proc __P((long, __env_get_flags_reply *)); */ @@ -254,7 +231,6 @@ void __env_get_flags_proc(dbenvcl_id, replyp) long dbenvcl_id; __env_get_flags_reply *replyp; -/* END __env_get_flags_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -265,7 +241,6 @@ __env_get_flags_proc(dbenvcl_id, replyp) replyp->status = dbenv->get_flags(dbenv, &replyp->flags); } -/* BEGIN __env_flags_proc */ /* * PUBLIC: void __env_flags_proc __P((long, u_int32_t, u_int32_t, * PUBLIC: __env_flags_reply *)); @@ -276,7 +251,6 @@ __env_flags_proc(dbenvcl_id, flags, onoff, replyp) u_int32_t flags; u_int32_t onoff; __env_flags_reply *replyp; -/* END __env_flags_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -295,7 +269,6 @@ __env_flags_proc(dbenvcl_id, flags, onoff, replyp) return; } -/* BEGIN __env_get_home_proc */ /* * PUBLIC: void __env_get_home_proc __P((long, __env_get_home_reply *)); */ @@ -303,7 +276,6 @@ void __env_get_home_proc(dbenvcl_id, replyp) long dbenvcl_id; __env_get_home_reply *replyp; -/* END __env_get_home_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -315,7 +287,6 @@ __env_get_home_proc(dbenvcl_id, replyp) (const char **)&replyp->home); } -/* BEGIN __env_get_open_flags_proc */ /* * PUBLIC: void __env_get_open_flags_proc __P((long, * PUBLIC: __env_get_open_flags_reply *)); @@ -324,7 +295,6 @@ void __env_get_open_flags_proc(dbenvcl_id, replyp) long dbenvcl_id; __env_get_open_flags_reply *replyp; -/* END __env_get_open_flags_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -335,20 +305,17 @@ __env_get_open_flags_proc(dbenvcl_id, replyp) replyp->status = dbenv->get_open_flags(dbenv, &replyp->flags); } -/* BEGIN __env_open_proc */ /* * PUBLIC: void __env_open_proc __P((long, char *, u_int32_t, u_int32_t, * PUBLIC: __env_open_reply *)); */ void -__env_open_proc(dbenvcl_id, home, flags, - mode, replyp) +__env_open_proc(dbenvcl_id, home, flags, mode, replyp) long dbenvcl_id; char *home; u_int32_t flags; u_int32_t mode; __env_open_reply *replyp; -/* END __env_open_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp, *new_ctp; @@ -404,7 +371,6 @@ out: replyp->status = ret; return; } -/* BEGIN __env_remove_proc */ /* * PUBLIC: void __env_remove_proc __P((long, char *, u_int32_t, * PUBLIC: __env_remove_reply *)); @@ -415,7 +381,6 @@ __env_remove_proc(dbenvcl_id, home, flags, replyp) char *home; u_int32_t flags; __env_remove_reply *replyp; -/* END __env_remove_proc */ { DB_ENV *dbenv; ct_entry *dbenv_ctp; @@ -437,7 +402,6 @@ __env_remove_proc(dbenvcl_id, home, flags, replyp) return; } -/* BEGIN __txn_abort_proc */ /* * PUBLIC: void __txn_abort_proc __P((long, __txn_abort_reply *)); */ @@ -445,7 +409,6 @@ void __txn_abort_proc(txnpcl_id, replyp) long txnpcl_id; __txn_abort_reply *replyp; -/* END __txn_abort_proc */ { DB_TXN *txnp; ct_entry *txnp_ctp; @@ -460,19 +423,16 @@ __txn_abort_proc(txnpcl_id, replyp) return; } -/* BEGIN __txn_begin_proc */ /* * PUBLIC: void __txn_begin_proc __P((long, long, u_int32_t, * PUBLIC: __txn_begin_reply *)); */ void -__txn_begin_proc(dbenvcl_id, parentcl_id, - flags, replyp) +__txn_begin_proc(dbenvcl_id, parentcl_id, flags, replyp) long dbenvcl_id; long parentcl_id; u_int32_t flags; __txn_begin_reply *replyp; -/* END __txn_begin_proc */ { DB_ENV *dbenv; DB_TXN *parent, *txnp; @@ -494,7 +454,11 @@ __txn_begin_proc(dbenvcl_id, parentcl_id, } else parent = NULL; - ret = dbenv->txn_begin(dbenv, parent, &txnp, flags); + /* + * Need to set DB_TXN_NOWAIT or the RPC server may deadlock + * itself and no one can break the lock. + */ + ret = dbenv->txn_begin(dbenv, parent, &txnp, flags | DB_TXN_NOWAIT); if (ret == 0) { ctp->ct_txnp = txnp; ctp->ct_type = CT_TXN; @@ -510,7 +474,6 @@ __txn_begin_proc(dbenvcl_id, parentcl_id, return; } -/* BEGIN __txn_commit_proc */ /* * PUBLIC: void __txn_commit_proc __P((long, u_int32_t, * PUBLIC: __txn_commit_reply *)); @@ -520,7 +483,6 @@ __txn_commit_proc(txnpcl_id, flags, replyp) long txnpcl_id; u_int32_t flags; __txn_commit_reply *replyp; -/* END __txn_commit_proc */ { DB_TXN *txnp; ct_entry *txnp_ctp; @@ -536,7 +498,6 @@ __txn_commit_proc(txnpcl_id, flags, replyp) return; } -/* BEGIN __txn_discard_proc */ /* * PUBLIC: void __txn_discard_proc __P((long, u_int32_t, * PUBLIC: __txn_discard_reply *)); @@ -546,7 +507,6 @@ __txn_discard_proc(txnpcl_id, flags, replyp) long txnpcl_id; u_int32_t flags; __txn_discard_reply *replyp; -/* END __txn_discard_proc */ { DB_TXN *txnp; ct_entry *txnp_ctp; @@ -562,7 +522,6 @@ __txn_discard_proc(txnpcl_id, flags, replyp) return; } -/* BEGIN __txn_prepare_proc */ /* * PUBLIC: void __txn_prepare_proc __P((long, u_int8_t *, * PUBLIC: __txn_prepare_reply *)); @@ -572,7 +531,6 @@ __txn_prepare_proc(txnpcl_id, gid, replyp) long txnpcl_id; u_int8_t *gid; __txn_prepare_reply *replyp; -/* END __txn_prepare_proc */ { DB_TXN *txnp; ct_entry *txnp_ctp; @@ -586,20 +544,17 @@ __txn_prepare_proc(txnpcl_id, gid, replyp) return; } -/* BEGIN __txn_recover_proc */ /* * PUBLIC: void __txn_recover_proc __P((long, u_int32_t, u_int32_t, * PUBLIC: __txn_recover_reply *, int *)); */ void -__txn_recover_proc(dbenvcl_id, count, - flags, replyp, freep) +__txn_recover_proc(dbenvcl_id, count, flags, replyp, freep) long dbenvcl_id; u_int32_t count; u_int32_t flags; __txn_recover_reply *replyp; int * freep; -/* END __txn_recover_proc */ { DB_ENV *dbenv; DB_PREPLIST *dbprep, *p; @@ -702,7 +657,6 @@ out2: return; } -/* BEGIN __db_bt_maxkey_proc */ /* * PUBLIC: void __db_bt_maxkey_proc __P((long, u_int32_t, * PUBLIC: __db_bt_maxkey_reply *)); @@ -712,7 +666,6 @@ __db_bt_maxkey_proc(dbpcl_id, maxkey, replyp) long dbpcl_id; u_int32_t maxkey; __db_bt_maxkey_reply *replyp; -/* END __db_bt_maxkey_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -727,20 +680,17 @@ __db_bt_maxkey_proc(dbpcl_id, maxkey, replyp) return; } -/* BEGIN __db_associate_proc */ /* * PUBLIC: void __db_associate_proc __P((long, long, long, u_int32_t, * PUBLIC: __db_associate_reply *)); */ void -__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id, - flags, replyp) +__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id, flags, replyp) long dbpcl_id; long txnpcl_id; long sdbpcl_id; u_int32_t flags; __db_associate_reply *replyp; -/* END __db_associate_proc */ { DB *dbp, *sdbp; DB_TXN *txnp; @@ -758,11 +708,16 @@ __db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id, txnp = NULL; /* - * We do not support DB_CREATE for associate. Users - * can only access secondary indices on a read-only basis, - * so whatever they are looking for needs to be there already. + * We do not support DB_CREATE for associate or the callbacks + * implemented in the Java and JE RPC servers. Users can only + * access secondary indices on a read-only basis, so whatever they + * are looking for needs to be there already. */ - if (flags != 0) +#ifdef CONFIG_TEST + if (LF_ISSET(DB_RPC2ND_MASK | DB_CREATE)) +#else + if (LF_ISSET(DB_CREATE)) +#endif ret = EINVAL; else ret = dbp->associate(dbp, txnp, sdbp, NULL, flags); @@ -771,7 +726,6 @@ __db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id, return; } -/* BEGIN __db_get_bt_minkey_proc */ /* * PUBLIC: void __db_get_bt_minkey_proc __P((long, * PUBLIC: __db_get_bt_minkey_reply *)); @@ -780,7 +734,6 @@ void __db_get_bt_minkey_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_bt_minkey_reply *replyp; -/* END __db_get_bt_minkey_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -791,7 +744,6 @@ __db_get_bt_minkey_proc(dbpcl_id, replyp) replyp->status = dbp->get_bt_minkey(dbp, &replyp->minkey); } -/* BEGIN __db_bt_minkey_proc */ /* * PUBLIC: void __db_bt_minkey_proc __P((long, u_int32_t, * PUBLIC: __db_bt_minkey_reply *)); @@ -801,7 +753,6 @@ __db_bt_minkey_proc(dbpcl_id, minkey, replyp) long dbpcl_id; u_int32_t minkey; __db_bt_minkey_reply *replyp; -/* END __db_bt_minkey_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -816,7 +767,6 @@ __db_bt_minkey_proc(dbpcl_id, minkey, replyp) return; } -/* BEGIN __db_close_proc */ /* * PUBLIC: void __db_close_proc __P((long, u_int32_t, __db_close_reply *)); */ @@ -825,7 +775,6 @@ __db_close_proc(dbpcl_id, flags, replyp) long dbpcl_id; u_int32_t flags; __db_close_reply *replyp; -/* END __db_close_proc */ { ct_entry *dbp_ctp; @@ -834,7 +783,6 @@ __db_close_proc(dbpcl_id, flags, replyp) return; } -/* BEGIN __db_create_proc */ /* * PUBLIC: void __db_create_proc __P((long, u_int32_t, __db_create_reply *)); */ @@ -843,7 +791,6 @@ __db_create_proc(dbenvcl_id, flags, replyp) long dbenvcl_id; u_int32_t flags; __db_create_reply *replyp; -/* END __db_create_proc */ { DB *dbp; DB_ENV *dbenv; @@ -873,15 +820,13 @@ __db_create_proc(dbenvcl_id, flags, replyp) return; } -/* BEGIN __db_del_proc */ /* * PUBLIC: void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *)); */ void -__db_del_proc(dbpcl_id, txnpcl_id, keydlen, - keydoff, keyulen, keyflags, keydata, - keysize, flags, replyp) +__db_del_proc(dbpcl_id, txnpcl_id, keydlen, keydoff, keyulen, keyflags, + keydata, keysize, flags, replyp) long dbpcl_id; long txnpcl_id; u_int32_t keydlen; @@ -892,7 +837,6 @@ __db_del_proc(dbpcl_id, txnpcl_id, keydlen, u_int32_t keysize; u_int32_t flags; __db_del_reply *replyp; -/* END __db_del_proc */ { DB *dbp; DBT key; @@ -924,7 +868,6 @@ __db_del_proc(dbpcl_id, txnpcl_id, keydlen, return; } -/* BEGIN __db_get_encrypt_flags_proc */ /* * PUBLIC: void __db_get_encrypt_flags_proc __P((long, * PUBLIC: __db_get_encrypt_flags_reply *)); @@ -933,7 +876,6 @@ void __db_get_encrypt_flags_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_encrypt_flags_reply *replyp; -/* END __db_get_encrypt_flags_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -944,7 +886,6 @@ __db_get_encrypt_flags_proc(dbpcl_id, replyp) replyp->status = dbp->get_encrypt_flags(dbp, &replyp->flags); } -/* BEGIN __db_encrypt_proc */ /* * PUBLIC: void __db_encrypt_proc __P((long, char *, u_int32_t, * PUBLIC: __db_encrypt_reply *)); @@ -955,7 +896,6 @@ __db_encrypt_proc(dbpcl_id, passwd, flags, replyp) char *passwd; u_int32_t flags; __db_encrypt_reply *replyp; -/* END __db_encrypt_proc */ { int ret; DB * dbp; @@ -969,7 +909,6 @@ __db_encrypt_proc(dbpcl_id, passwd, flags, replyp) return; } -/* BEGIN __db_get_extentsize_proc */ /* * PUBLIC: void __db_get_extentsize_proc __P((long, * PUBLIC: __db_get_extentsize_reply *)); @@ -978,7 +917,6 @@ void __db_get_extentsize_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_extentsize_reply *replyp; -/* END __db_get_extentsize_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -989,7 +927,6 @@ __db_get_extentsize_proc(dbpcl_id, replyp) replyp->status = dbp->get_q_extentsize(dbp, &replyp->extentsize); } -/* BEGIN __db_extentsize_proc */ /* * PUBLIC: void __db_extentsize_proc __P((long, u_int32_t, * PUBLIC: __db_extentsize_reply *)); @@ -999,7 +936,6 @@ __db_extentsize_proc(dbpcl_id, extentsize, replyp) long dbpcl_id; u_int32_t extentsize; __db_extentsize_reply *replyp; -/* END __db_extentsize_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1014,7 +950,6 @@ __db_extentsize_proc(dbpcl_id, extentsize, replyp) return; } -/* BEGIN __db_get_flags_proc */ /* * PUBLIC: void __db_get_flags_proc __P((long, __db_get_flags_reply *)); */ @@ -1022,7 +957,6 @@ void __db_get_flags_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_flags_reply *replyp; -/* END __db_get_flags_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1033,7 +967,6 @@ __db_get_flags_proc(dbpcl_id, replyp) replyp->status = dbp->get_flags(dbp, &replyp->flags); } -/* BEGIN __db_flags_proc */ /* * PUBLIC: void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *)); */ @@ -1042,7 +975,6 @@ __db_flags_proc(dbpcl_id, flags, replyp) long dbpcl_id; u_int32_t flags; __db_flags_reply *replyp; -/* END __db_flags_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1058,17 +990,16 @@ __db_flags_proc(dbpcl_id, flags, replyp) return; } -/* BEGIN __db_get_proc */ /* * PUBLIC: void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, - * PUBLIC: u_int32_t, u_int32_t, __db_get_reply *, int *)); + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_get_reply *, + * PUBLIC: int *)); */ void -__db_get_proc(dbpcl_id, txnpcl_id, keydlen, - keydoff, keyulen, keyflags, keydata, - keysize, datadlen, datadoff, dataulen, - dataflags, datadata, datasize, flags, replyp, freep) +__db_get_proc(dbpcl_id, txnpcl_id, keydlen, keydoff, keyulen, keyflags, + keydata, keysize, datadlen, datadoff, dataulen, dataflags, datadata, + datasize, flags, replyp, freep) long dbpcl_id; long txnpcl_id; u_int32_t keydlen; @@ -1086,7 +1017,6 @@ __db_get_proc(dbpcl_id, txnpcl_id, keydlen, u_int32_t flags; __db_get_reply *replyp; int * freep; -/* END __db_get_proc */ { DB *dbp; DBT key, data; @@ -1208,7 +1138,6 @@ err: replyp->keydata.keydata_val = NULL; return; } -/* BEGIN __db_get_h_ffactor_proc */ /* * PUBLIC: void __db_get_h_ffactor_proc __P((long, * PUBLIC: __db_get_h_ffactor_reply *)); @@ -1217,7 +1146,6 @@ void __db_get_h_ffactor_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_h_ffactor_reply *replyp; -/* END __db_get_h_ffactor_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1228,7 +1156,6 @@ __db_get_h_ffactor_proc(dbpcl_id, replyp) replyp->status = dbp->get_h_ffactor(dbp, &replyp->ffactor); } -/* BEGIN __db_h_ffactor_proc */ /* * PUBLIC: void __db_h_ffactor_proc __P((long, u_int32_t, * PUBLIC: __db_h_ffactor_reply *)); @@ -1238,7 +1165,6 @@ __db_h_ffactor_proc(dbpcl_id, ffactor, replyp) long dbpcl_id; u_int32_t ffactor; __db_h_ffactor_reply *replyp; -/* END __db_h_ffactor_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1253,7 +1179,6 @@ __db_h_ffactor_proc(dbpcl_id, ffactor, replyp) return; } -/* BEGIN __db_get_h_nelem_proc */ /* * PUBLIC: void __db_get_h_nelem_proc __P((long, __db_get_h_nelem_reply *)); */ @@ -1261,7 +1186,6 @@ void __db_get_h_nelem_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_h_nelem_reply *replyp; -/* END __db_get_h_nelem_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1272,7 +1196,6 @@ __db_get_h_nelem_proc(dbpcl_id, replyp) replyp->status = dbp->get_h_nelem(dbp, &replyp->nelem); } -/* BEGIN __db_h_nelem_proc */ /* * PUBLIC: void __db_h_nelem_proc __P((long, u_int32_t, * PUBLIC: __db_h_nelem_reply *)); @@ -1282,7 +1205,6 @@ __db_h_nelem_proc(dbpcl_id, nelem, replyp) long dbpcl_id; u_int32_t nelem; __db_h_nelem_reply *replyp; -/* END __db_h_nelem_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1297,15 +1219,14 @@ __db_h_nelem_proc(dbpcl_id, nelem, replyp) return; } -/* BEGIN __db_key_range_proc */ /* * PUBLIC: void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *)); + * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, + * PUBLIC: __db_key_range_reply *)); */ void -__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen, - keydoff, keyulen, keyflags, keydata, - keysize, flags, replyp) +__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen, keydoff, keyulen, + keyflags, keydata, keysize, flags, replyp) long dbpcl_id; long txnpcl_id; u_int32_t keydlen; @@ -1316,7 +1237,6 @@ __db_key_range_proc(dbpcl_id, txnpcl_id, keydlen, u_int32_t keysize; u_int32_t flags; __db_key_range_reply *replyp; -/* END __db_key_range_proc */ { DB *dbp; DBT key; @@ -1351,7 +1271,6 @@ __db_key_range_proc(dbpcl_id, txnpcl_id, keydlen, return; } -/* BEGIN __db_get_lorder_proc */ /* * PUBLIC: void __db_get_lorder_proc __P((long, __db_get_lorder_reply *)); */ @@ -1359,7 +1278,6 @@ void __db_get_lorder_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_lorder_reply *replyp; -/* END __db_get_lorder_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1367,10 +1285,9 @@ __db_get_lorder_proc(dbpcl_id, replyp) ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); dbp = (DB *)dbp_ctp->ct_anyp; - replyp->status = dbp->get_lorder(dbp, &replyp->lorder); + replyp->status = dbp->get_lorder(dbp, (int *)&replyp->lorder); } -/* BEGIN __db_lorder_proc */ /* * PUBLIC: void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *)); */ @@ -1379,7 +1296,6 @@ __db_lorder_proc(dbpcl_id, lorder, replyp) long dbpcl_id; u_int32_t lorder; __db_lorder_reply *replyp; -/* END __db_lorder_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1394,7 +1310,6 @@ __db_lorder_proc(dbpcl_id, lorder, replyp) return; } -/* BEGIN __db_get_name_proc */ /* * PUBLIC: void __db_get_name_proc __P((long, __db_get_name_reply *)); */ @@ -1402,7 +1317,6 @@ void __db_get_name_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_name_reply *replyp; -/* END __db_get_name_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1414,7 +1328,6 @@ __db_get_name_proc(dbpcl_id, replyp) (const char **)&replyp->filename, (const char **)&replyp->dbname); } -/* BEGIN __db_get_open_flags_proc */ /* * PUBLIC: void __db_get_open_flags_proc __P((long, * PUBLIC: __db_get_open_flags_reply *)); @@ -1423,7 +1336,6 @@ void __db_get_open_flags_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_open_flags_reply *replyp; -/* END __db_get_open_flags_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1434,14 +1346,12 @@ __db_get_open_flags_proc(dbpcl_id, replyp) replyp->status = dbp->get_open_flags(dbp, &replyp->flags); } -/* BEGIN __db_open_proc */ /* * PUBLIC: void __db_open_proc __P((long, long, char *, char *, u_int32_t, * PUBLIC: u_int32_t, u_int32_t, __db_open_reply *)); */ void -__db_open_proc(dbpcl_id, txnpcl_id, name, - subdb, type, flags, mode, replyp) +__db_open_proc(dbpcl_id, txnpcl_id, name, subdb, type, flags, mode, replyp) long dbpcl_id; long txnpcl_id; char *name; @@ -1450,7 +1360,6 @@ __db_open_proc(dbpcl_id, txnpcl_id, name, u_int32_t flags; u_int32_t mode; __db_open_reply *replyp; -/* END __db_open_proc */ { DB *dbp; DB_TXN *txnp; @@ -1483,11 +1392,6 @@ __db_open_proc(dbpcl_id, txnpcl_id, name, if (ret == 0) { (void)dbp->get_type(dbp, &dbtype); replyp->type = dbtype; - /* XXX - * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send - * this dbp's flags back. - */ - replyp->dbflags = (int) dbp->flags; /* * We need to determine the byte order of the database * and send it back to the client. Determine it by @@ -1524,7 +1428,6 @@ out: return; } -/* BEGIN __db_get_pagesize_proc */ /* * PUBLIC: void __db_get_pagesize_proc __P((long, __db_get_pagesize_reply *)); */ @@ -1532,7 +1435,6 @@ void __db_get_pagesize_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_pagesize_reply *replyp; -/* END __db_get_pagesize_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1543,7 +1445,6 @@ __db_get_pagesize_proc(dbpcl_id, replyp) replyp->status = dbp->get_pagesize(dbp, &replyp->pagesize); } -/* BEGIN __db_pagesize_proc */ /* * PUBLIC: void __db_pagesize_proc __P((long, u_int32_t, * PUBLIC: __db_pagesize_reply *)); @@ -1553,7 +1454,6 @@ __db_pagesize_proc(dbpcl_id, pagesize, replyp) long dbpcl_id; u_int32_t pagesize; __db_pagesize_reply *replyp; -/* END __db_pagesize_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1568,20 +1468,18 @@ __db_pagesize_proc(dbpcl_id, pagesize, replyp) return; } -/* BEGIN __db_pget_proc */ /* - * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, - * PUBLIC: u_int32_t, u_int32_t, __db_pget_reply *, int *)); + * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_pget_reply *, + * PUBLIC: int *)); */ void -__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen, - skeydoff, skeyulen, skeyflags, skeydata, - skeysize, pkeydlen, pkeydoff, pkeyulen, - pkeyflags, pkeydata, pkeysize, datadlen, - datadoff, dataulen, dataflags, datadata, - datasize, flags, replyp, freep) +__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen, skeydoff, skeyulen, + skeyflags, skeydata, skeysize, pkeydlen, pkeydoff, pkeyulen, pkeyflags, + pkeydata, pkeysize, datadlen, datadoff, dataulen, dataflags, datadata, + datasize, flags, replyp, freep) long dbpcl_id; long txnpcl_id; u_int32_t skeydlen; @@ -1605,7 +1503,6 @@ __db_pget_proc(dbpcl_id, txnpcl_id, skeydlen, u_int32_t flags; __db_pget_reply *replyp; int * freep; -/* END __db_pget_proc */ { DB *dbp; DBT skey, pkey, data; @@ -1762,17 +1659,16 @@ err: replyp->skeydata.skeydata_val = NULL; return; } -/* BEGIN __db_put_proc */ /* * PUBLIC: void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, - * PUBLIC: u_int32_t, u_int32_t, __db_put_reply *, int *)); + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_put_reply *, + * PUBLIC: int *)); */ void -__db_put_proc(dbpcl_id, txnpcl_id, keydlen, - keydoff, keyulen, keyflags, keydata, - keysize, datadlen, datadoff, dataulen, - dataflags, datadata, datasize, flags, replyp, freep) +__db_put_proc(dbpcl_id, txnpcl_id, keydlen, keydoff, keyulen, keyflags, + keydata, keysize, datadlen, datadoff, dataulen, dataflags, datadata, + datasize, flags, replyp, freep) long dbpcl_id; long txnpcl_id; u_int32_t keydlen; @@ -1790,7 +1686,6 @@ __db_put_proc(dbpcl_id, txnpcl_id, keydlen, u_int32_t flags; __db_put_reply *replyp; int * freep; -/* END __db_put_proc */ { DB *dbp; DBT key, data; @@ -1870,7 +1765,6 @@ err: replyp->keydata.keydata_val = NULL; return; } -/* BEGIN __db_get_re_delim_proc */ /* * PUBLIC: void __db_get_re_delim_proc __P((long, __db_get_re_delim_reply *)); */ @@ -1878,7 +1772,6 @@ void __db_get_re_delim_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_re_delim_reply *replyp; -/* END __db_get_re_delim_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1886,10 +1779,9 @@ __db_get_re_delim_proc(dbpcl_id, replyp) ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); dbp = (DB *)dbp_ctp->ct_anyp; - replyp->status = dbp->get_re_delim(dbp, &replyp->delim); + replyp->status = dbp->get_re_delim(dbp, (int *)&replyp->delim); } -/* BEGIN __db_re_delim_proc */ /* * PUBLIC: void __db_re_delim_proc __P((long, u_int32_t, * PUBLIC: __db_re_delim_reply *)); @@ -1899,7 +1791,6 @@ __db_re_delim_proc(dbpcl_id, delim, replyp) long dbpcl_id; u_int32_t delim; __db_re_delim_reply *replyp; -/* END __db_re_delim_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1914,7 +1805,6 @@ __db_re_delim_proc(dbpcl_id, delim, replyp) return; } -/* BEGIN __db_get_re_len_proc */ /* * PUBLIC: void __db_get_re_len_proc __P((long, __db_get_re_len_reply *)); */ @@ -1922,7 +1812,6 @@ void __db_get_re_len_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_re_len_reply *replyp; -/* END __db_get_re_len_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1933,7 +1822,6 @@ __db_get_re_len_proc(dbpcl_id, replyp) replyp->status = dbp->get_re_len(dbp, &replyp->len); } -/* BEGIN __db_re_len_proc */ /* * PUBLIC: void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *)); */ @@ -1942,7 +1830,6 @@ __db_re_len_proc(dbpcl_id, len, replyp) long dbpcl_id; u_int32_t len; __db_re_len_reply *replyp; -/* END __db_re_len_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1957,7 +1844,6 @@ __db_re_len_proc(dbpcl_id, len, replyp) return; } -/* BEGIN __db_get_re_pad_proc */ /* * PUBLIC: void __db_get_re_pad_proc __P((long, __db_get_re_pad_reply *)); */ @@ -1965,7 +1851,6 @@ void __db_get_re_pad_proc(dbpcl_id, replyp) long dbpcl_id; __db_get_re_pad_reply *replyp; -/* END __db_get_re_pad_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -1973,10 +1858,9 @@ __db_get_re_pad_proc(dbpcl_id, replyp) ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); dbp = (DB *)dbp_ctp->ct_anyp; - replyp->status = dbp->get_re_pad(dbp, &replyp->pad); + replyp->status = dbp->get_re_pad(dbp, (int *)&replyp->pad); } -/* BEGIN __db_re_pad_proc */ /* * PUBLIC: void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *)); */ @@ -1985,7 +1869,6 @@ __db_re_pad_proc(dbpcl_id, pad, replyp) long dbpcl_id; u_int32_t pad; __db_re_pad_reply *replyp; -/* END __db_re_pad_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -2000,20 +1883,17 @@ __db_re_pad_proc(dbpcl_id, pad, replyp) return; } -/* BEGIN __db_remove_proc */ /* * PUBLIC: void __db_remove_proc __P((long, char *, char *, u_int32_t, * PUBLIC: __db_remove_reply *)); */ void -__db_remove_proc(dbpcl_id, name, subdb, - flags, replyp) +__db_remove_proc(dbpcl_id, name, subdb, flags, replyp) long dbpcl_id; char *name; char *subdb; u_int32_t flags; __db_remove_reply *replyp; -/* END __db_remove_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -2029,21 +1909,18 @@ __db_remove_proc(dbpcl_id, name, subdb, return; } -/* BEGIN __db_rename_proc */ /* * PUBLIC: void __db_rename_proc __P((long, char *, char *, char *, u_int32_t, * PUBLIC: __db_rename_reply *)); */ void -__db_rename_proc(dbpcl_id, name, subdb, - newname, flags, replyp) +__db_rename_proc(dbpcl_id, name, subdb, newname, flags, replyp) long dbpcl_id; char *name; char *subdb; char *newname; u_int32_t flags; __db_rename_reply *replyp; -/* END __db_rename_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -2059,30 +1936,35 @@ __db_rename_proc(dbpcl_id, name, subdb, return; } -/* BEGIN __db_stat_proc */ /* - * PUBLIC: void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *, + * PUBLIC: void __db_stat_proc __P((long, long, u_int32_t, __db_stat_reply *, * PUBLIC: int *)); */ void -__db_stat_proc(dbpcl_id, flags, replyp, freep) +__db_stat_proc(dbpcl_id, txnpcl_id, flags, replyp, freep) long dbpcl_id; + long txnpcl_id; u_int32_t flags; __db_stat_reply *replyp; int * freep; -/* END __db_stat_proc */ { DB *dbp; + DB_TXN *txnp; DBTYPE type; - ct_entry *dbp_ctp; + ct_entry *dbp_ctp, *txnp_ctp; u_int32_t *q, *p, *retsp; int i, len, ret; void *sp; ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); dbp = (DB *)dbp_ctp->ct_anyp; + if (txnpcl_id != 0) { + ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); + txnp = (DB_TXN *)txnp_ctp->ct_anyp; + } else + txnp = NULL; - ret = dbp->stat(dbp, &sp, flags); + ret = dbp->stat(dbp, txnp, &sp, flags); replyp->status = ret; if (ret != 0) return; @@ -2119,7 +2001,6 @@ out: return; } -/* BEGIN __db_sync_proc */ /* * PUBLIC: void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *)); */ @@ -2128,7 +2009,6 @@ __db_sync_proc(dbpcl_id, flags, replyp) long dbpcl_id; u_int32_t flags; __db_sync_reply *replyp; -/* END __db_sync_proc */ { DB *dbp; ct_entry *dbp_ctp; @@ -2143,19 +2023,16 @@ __db_sync_proc(dbpcl_id, flags, replyp) return; } -/* BEGIN __db_truncate_proc */ /* * PUBLIC: void __db_truncate_proc __P((long, long, u_int32_t, * PUBLIC: __db_truncate_reply *)); */ void -__db_truncate_proc(dbpcl_id, txnpcl_id, - flags, replyp) +__db_truncate_proc(dbpcl_id, txnpcl_id, flags, replyp) long dbpcl_id; long txnpcl_id; u_int32_t flags; __db_truncate_reply *replyp; -/* END __db_truncate_proc */ { DB *dbp; DB_TXN *txnp; @@ -2178,19 +2055,16 @@ __db_truncate_proc(dbpcl_id, txnpcl_id, return; } -/* BEGIN __db_cursor_proc */ /* * PUBLIC: void __db_cursor_proc __P((long, long, u_int32_t, * PUBLIC: __db_cursor_reply *)); */ void -__db_cursor_proc(dbpcl_id, txnpcl_id, - flags, replyp) +__db_cursor_proc(dbpcl_id, txnpcl_id, flags, replyp) long dbpcl_id; long txnpcl_id; u_int32_t flags; __db_cursor_reply *replyp; -/* END __db_cursor_proc */ { DB *dbp; DBC *dbc; @@ -2227,20 +2101,17 @@ __db_cursor_proc(dbpcl_id, txnpcl_id, return; } -/* BEGIN __db_join_proc */ /* * PUBLIC: void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t, * PUBLIC: __db_join_reply *)); */ void -__db_join_proc(dbpcl_id, curs, curslen, - flags, replyp) +__db_join_proc(dbpcl_id, curs, curslen, flags, replyp) long dbpcl_id; u_int32_t * curs; u_int32_t curslen; u_int32_t flags; __db_join_reply *replyp; -/* END __db_join_proc */ { DB *dbp; DBC **jcurs, **c; @@ -2333,7 +2204,6 @@ out: return; } -/* BEGIN __dbc_close_proc */ /* * PUBLIC: void __dbc_close_proc __P((long, __dbc_close_reply *)); */ @@ -2341,7 +2211,6 @@ void __dbc_close_proc(dbccl_id, replyp) long dbccl_id; __dbc_close_reply *replyp; -/* END __dbc_close_proc */ { ct_entry *dbc_ctp; @@ -2350,7 +2219,6 @@ __dbc_close_proc(dbccl_id, replyp) return; } -/* BEGIN __dbc_count_proc */ /* * PUBLIC: void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *)); */ @@ -2359,7 +2227,6 @@ __dbc_count_proc(dbccl_id, flags, replyp) long dbccl_id; u_int32_t flags; __dbc_count_reply *replyp; -/* END __dbc_count_proc */ { DBC *dbc; ct_entry *dbc_ctp; @@ -2376,7 +2243,6 @@ __dbc_count_proc(dbccl_id, flags, replyp) return; } -/* BEGIN __dbc_del_proc */ /* * PUBLIC: void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *)); */ @@ -2385,7 +2251,6 @@ __dbc_del_proc(dbccl_id, flags, replyp) long dbccl_id; u_int32_t flags; __dbc_del_reply *replyp; -/* END __dbc_del_proc */ { DBC *dbc; ct_entry *dbc_ctp; @@ -2400,7 +2265,6 @@ __dbc_del_proc(dbccl_id, flags, replyp) return; } -/* BEGIN __dbc_dup_proc */ /* * PUBLIC: void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *)); */ @@ -2409,7 +2273,6 @@ __dbc_dup_proc(dbccl_id, flags, replyp) long dbccl_id; u_int32_t flags; __dbc_dup_reply *replyp; -/* END __dbc_dup_proc */ { DBC *dbc, *newdbc; ct_entry *dbc_ctp, *new_ctp; @@ -2442,17 +2305,16 @@ __dbc_dup_proc(dbccl_id, flags, replyp) return; } -/* BEGIN __dbc_get_proc */ /* * PUBLIC: void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, - * PUBLIC: u_int32_t, u_int32_t, __dbc_get_reply *, int *)); + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __dbc_get_reply *, + * PUBLIC: int *)); */ void -__dbc_get_proc(dbccl_id, keydlen, keydoff, - keyulen, keyflags, keydata, keysize, - datadlen, datadoff, dataulen, dataflags, - datadata, datasize, flags, replyp, freep) +__dbc_get_proc(dbccl_id, keydlen, keydoff, keyulen, keyflags, keydata, + keysize, datadlen, datadoff, dataulen, dataflags, datadata, datasize, + flags, replyp, freep) long dbccl_id; u_int32_t keydlen; u_int32_t keydoff; @@ -2469,7 +2331,6 @@ __dbc_get_proc(dbccl_id, keydlen, keydoff, u_int32_t flags; __dbc_get_reply *replyp; int * freep; -/* END __dbc_get_proc */ { DBC *dbc; DBT key, data; @@ -2562,7 +2423,8 @@ __dbc_get_proc(dbccl_id, keydlen, keydoff, __os_ufree(dbenv, key.data); __os_ufree(dbenv, data.data); if (key_alloc) - __os_ufree(dbenv, replyp->keydata.keydata_val); + __os_ufree( + dbenv, replyp->keydata.keydata_val); goto err; } memcpy(replyp->datadata.datadata_val, data.data, @@ -2583,20 +2445,18 @@ err: replyp->keydata.keydata_val = NULL; return; } -/* BEGIN __dbc_pget_proc */ /* * PUBLIC: void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, - * PUBLIC: u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, - * PUBLIC: u_int32_t, __dbc_pget_reply *, int *)); + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __dbc_pget_reply *, + * PUBLIC: int *)); */ void -__dbc_pget_proc(dbccl_id, skeydlen, skeydoff, - skeyulen, skeyflags, skeydata, skeysize, - pkeydlen, pkeydoff, pkeyulen, pkeyflags, - pkeydata, pkeysize, datadlen, datadoff, - dataulen, dataflags, datadata, datasize, - flags, replyp, freep) +__dbc_pget_proc(dbccl_id, skeydlen, skeydoff, skeyulen, skeyflags, + skeydata, skeysize, pkeydlen, pkeydoff, pkeyulen, pkeyflags, pkeydata, + pkeysize, datadlen, datadoff, dataulen, dataflags, datadata, datasize, + flags, replyp, freep) long dbccl_id; u_int32_t skeydlen; u_int32_t skeydoff; @@ -2619,7 +2479,6 @@ __dbc_pget_proc(dbccl_id, skeydlen, skeydoff, u_int32_t flags; __dbc_pget_reply *replyp; int * freep; -/* END __dbc_pget_proc */ { DBC *dbc; DBT skey, pkey, data; @@ -2771,17 +2630,16 @@ err: replyp->skeydata.skeydata_val = NULL; return; } -/* BEGIN __dbc_put_proc */ /* * PUBLIC: void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, - * PUBLIC: u_int32_t, u_int32_t, __dbc_put_reply *, int *)); + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __dbc_put_reply *, + * PUBLIC: int *)); */ void -__dbc_put_proc(dbccl_id, keydlen, keydoff, - keyulen, keyflags, keydata, keysize, - datadlen, datadoff, dataulen, dataflags, - datadata, datasize, flags, replyp, freep) +__dbc_put_proc(dbccl_id, keydlen, keydoff, keyulen, keyflags, keydata, + keysize, datadlen, datadoff, dataulen, dataflags, datadata, datasize, + flags, replyp, freep) long dbccl_id; u_int32_t keydlen; u_int32_t keydoff; @@ -2798,7 +2656,6 @@ __dbc_put_proc(dbccl_id, keydlen, keydoff, u_int32_t flags; __dbc_put_reply *replyp; int * freep; -/* END __dbc_put_proc */ { DB *dbp; DBC *dbc; @@ -2851,4 +2708,3 @@ __dbc_put_proc(dbccl_id, keydlen, keydoff, replyp->status = ret; return; } -#endif /* HAVE_RPC */ diff --git a/db/rpc_server/c/db_server_util.c b/db/rpc_server/c/db_server_util.c index 5023eb7bf..11bc4deb3 100644 --- a/db/rpc_server/c/db_server_util.c +++ b/db/rpc_server/c/db_server_util.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_server_util.c,v 1.72 2004/09/22 17:30:12 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_server_util.c,v 1.65 2003/04/24 18:54:13 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -34,15 +32,15 @@ static const char revid[] = "$Id: db_server_util.c,v 1.65 2003/04/24 18:54:13 bo #include #include #endif -#include "dbinc_auto/db_server.h" + +#include "db_server.h" #include "db_int.h" #include "dbinc_auto/clib_ext.h" #include "dbinc/db_server_int.h" -#include "dbinc_auto/rpc_server_ext.h" #include "dbinc_auto/common_ext.h" +#include "dbinc_auto/rpc_server_ext.h" -extern int __dbsrv_main __P((void)); static int add_home __P((char *)); static int add_passwd __P((char *)); static int env_recover __P((char *)); @@ -66,6 +64,7 @@ main(argc, argv) int argc; char **argv; { + extern int __dbsrv_main(); extern char *optarg; CLIENT *cl; int ch, ret; @@ -417,7 +416,7 @@ new_ct_ent(errp) octp = LIST_FIRST(&__dbsrv_head); if (octp != NULL && octp->ct_id >= t) t = octp->ct_id + 1; - ctp->ct_id = t; + ctp->ct_id = (long)t; ctp->ct_idle = __dbsrv_idleto; ctp->ct_activep = &ctp->ct_active; ctp->ct_origp = NULL; @@ -654,7 +653,7 @@ __dbenv_close_int(id, flags, force) { DB_ENV *dbenv; int ret; - ct_entry *ctp; + ct_entry *ctp, *dbctp, *nextctp; ret = 0; ctp = get_tableent(id); @@ -674,6 +673,31 @@ __dbenv_close_int(id, flags, force) if (__dbsrv_verbose) printf("Closing env id %ld\n", id); + /* + * If we're timing out an env, we want to close all of its + * database handles as well. All of the txns and cursors + * must have been timed out prior to timing out the env. + */ + if (force) + for (dbctp = LIST_FIRST(&__dbsrv_head); + dbctp != NULL; dbctp = nextctp) { + nextctp = LIST_NEXT(dbctp, entries); + if (dbctp->ct_type != CT_DB) + continue; + if (dbctp->ct_envparent != ctp) + continue; + /* + * We found a DB handle that is part of this + * environment. Close it. + */ + __db_close_int(dbctp->ct_id, 0); + /* + * If we timed out a dbp, we may have removed + * multiple ctp entries. Start over with a + * guaranteed good ctp. + */ + nextctp = LIST_FIRST(&__dbsrv_head); + } ret = dbenv->close(dbenv, flags); __dbdel_ctp(ctp); return (ret); @@ -784,10 +808,8 @@ env_recover(progname) progname, db_strerror(ret)); exit(EXIT_FAILURE); } - if (__dbsrv_verbose == 1) { + if (__dbsrv_verbose == 1) (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1); - (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1); - } dbenv->set_errfile(dbenv, stderr); dbenv->set_errpfx(dbenv, progname); if (hp->passwd != NULL) diff --git a/db/rpc_server/c/gen_db_server.c b/db/rpc_server/c/gen_db_server.c index 1e18c6388..c5f3a7e0b 100644 --- a/db/rpc_server/c/gen_db_server.c +++ b/db/rpc_server/c/gen_db_server.c @@ -5,22 +5,18 @@ #include #include -#include #include #endif +#include "db_server.h" + #include "db_int.h" -#include "dbinc_auto/db_server.h" #include "dbinc/db_server_int.h" #include "dbinc_auto/rpc_server_ext.h" -/* - * PUBLIC: __env_get_cachesize_reply *__db_env_get_cachesize_4002 - * PUBLIC: __P((__env_get_cachesize_msg *, struct svc_req *)); - */ __env_get_cachesize_reply * -__db_env_get_cachesize_4002(msg, req) +__db_env_get_cachesize_4003__SVCSUFFIX__(msg, req) __env_get_cachesize_msg *msg; struct svc_req *req; { @@ -33,12 +29,8 @@ __db_env_get_cachesize_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_cachesize_reply *__db_env_cachesize_4002 - * PUBLIC: __P((__env_cachesize_msg *, struct svc_req *)); - */ __env_cachesize_reply * -__db_env_cachesize_4002(msg, req) +__db_env_cachesize_4003__SVCSUFFIX__(msg, req) __env_cachesize_msg *msg; struct svc_req *req; { @@ -54,12 +46,8 @@ __db_env_cachesize_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_close_reply *__db_env_close_4002 __P((__env_close_msg *, - * PUBLIC: struct svc_req *)); - */ __env_close_reply * -__db_env_close_4002(msg, req) +__db_env_close_4003__SVCSUFFIX__(msg, req) __env_close_msg *msg; struct svc_req *req; { @@ -73,12 +61,8 @@ __db_env_close_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_create_reply *__db_env_create_4002 __P((__env_create_msg *, - * PUBLIC: struct svc_req *)); - */ __env_create_reply * -__db_env_create_4002(msg, req) +__db_env_create_4003__SVCSUFFIX__(msg, req) __env_create_msg *msg; struct svc_req *req; { @@ -91,12 +75,8 @@ __db_env_create_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_dbremove_reply *__db_env_dbremove_4002 - * PUBLIC: __P((__env_dbremove_msg *, struct svc_req *)); - */ __env_dbremove_reply * -__db_env_dbremove_4002(msg, req) +__db_env_dbremove_4003__SVCSUFFIX__(msg, req) __env_dbremove_msg *msg; struct svc_req *req; { @@ -113,12 +93,8 @@ __db_env_dbremove_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_dbrename_reply *__db_env_dbrename_4002 - * PUBLIC: __P((__env_dbrename_msg *, struct svc_req *)); - */ __env_dbrename_reply * -__db_env_dbrename_4002(msg, req) +__db_env_dbrename_4003__SVCSUFFIX__(msg, req) __env_dbrename_msg *msg; struct svc_req *req; { @@ -136,12 +112,8 @@ __db_env_dbrename_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_get_encrypt_flags_reply *__db_env_get_encrypt_flags_4002 - * PUBLIC: __P((__env_get_encrypt_flags_msg *, struct svc_req *)); - */ __env_get_encrypt_flags_reply * -__db_env_get_encrypt_flags_4002(msg, req) +__db_env_get_encrypt_flags_4003__SVCSUFFIX__(msg, req) __env_get_encrypt_flags_msg *msg; struct svc_req *req; { @@ -154,12 +126,8 @@ __db_env_get_encrypt_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_encrypt_reply *__db_env_encrypt_4002 - * PUBLIC: __P((__env_encrypt_msg *, struct svc_req *)); - */ __env_encrypt_reply * -__db_env_encrypt_4002(msg, req) +__db_env_encrypt_4003__SVCSUFFIX__(msg, req) __env_encrypt_msg *msg; struct svc_req *req; { @@ -174,12 +142,8 @@ __db_env_encrypt_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_get_flags_reply *__db_env_get_flags_4002 - * PUBLIC: __P((__env_get_flags_msg *, struct svc_req *)); - */ __env_get_flags_reply * -__db_env_get_flags_4002(msg, req) +__db_env_get_flags_4003__SVCSUFFIX__(msg, req) __env_get_flags_msg *msg; struct svc_req *req; { @@ -192,12 +156,8 @@ __db_env_get_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_flags_reply *__db_env_flags_4002 __P((__env_flags_msg *, - * PUBLIC: struct svc_req *)); - */ __env_flags_reply * -__db_env_flags_4002(msg, req) +__db_env_flags_4003__SVCSUFFIX__(msg, req) __env_flags_msg *msg; struct svc_req *req; { @@ -212,12 +172,8 @@ __db_env_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_get_home_reply *__db_env_get_home_4002 - * PUBLIC: __P((__env_get_home_msg *, struct svc_req *)); - */ __env_get_home_reply * -__db_env_get_home_4002(msg, req) +__db_env_get_home_4003__SVCSUFFIX__(msg, req) __env_get_home_msg *msg; struct svc_req *req; { @@ -230,12 +186,8 @@ __db_env_get_home_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_get_open_flags_reply *__db_env_get_open_flags_4002 - * PUBLIC: __P((__env_get_open_flags_msg *, struct svc_req *)); - */ __env_get_open_flags_reply * -__db_env_get_open_flags_4002(msg, req) +__db_env_get_open_flags_4003__SVCSUFFIX__(msg, req) __env_get_open_flags_msg *msg; struct svc_req *req; { @@ -248,12 +200,8 @@ __db_env_get_open_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_open_reply *__db_env_open_4002 __P((__env_open_msg *, - * PUBLIC: struct svc_req *)); - */ __env_open_reply * -__db_env_open_4002(msg, req) +__db_env_open_4003__SVCSUFFIX__(msg, req) __env_open_msg *msg; struct svc_req *req; { @@ -269,12 +217,8 @@ __db_env_open_4002(msg, req) return (&reply); } -/* - * PUBLIC: __env_remove_reply *__db_env_remove_4002 __P((__env_remove_msg *, - * PUBLIC: struct svc_req *)); - */ __env_remove_reply * -__db_env_remove_4002(msg, req) +__db_env_remove_4003__SVCSUFFIX__(msg, req) __env_remove_msg *msg; struct svc_req *req; { @@ -289,12 +233,8 @@ __db_env_remove_4002(msg, req) return (&reply); } -/* - * PUBLIC: __txn_abort_reply *__db_txn_abort_4002 __P((__txn_abort_msg *, - * PUBLIC: struct svc_req *)); - */ __txn_abort_reply * -__db_txn_abort_4002(msg, req) +__db_txn_abort_4003__SVCSUFFIX__(msg, req) __txn_abort_msg *msg; struct svc_req *req; { @@ -307,12 +247,8 @@ __db_txn_abort_4002(msg, req) return (&reply); } -/* - * PUBLIC: __txn_begin_reply *__db_txn_begin_4002 __P((__txn_begin_msg *, - * PUBLIC: struct svc_req *)); - */ __txn_begin_reply * -__db_txn_begin_4002(msg, req) +__db_txn_begin_4003__SVCSUFFIX__(msg, req) __txn_begin_msg *msg; struct svc_req *req; { @@ -327,12 +263,8 @@ __db_txn_begin_4002(msg, req) return (&reply); } -/* - * PUBLIC: __txn_commit_reply *__db_txn_commit_4002 __P((__txn_commit_msg *, - * PUBLIC: struct svc_req *)); - */ __txn_commit_reply * -__db_txn_commit_4002(msg, req) +__db_txn_commit_4003__SVCSUFFIX__(msg, req) __txn_commit_msg *msg; struct svc_req *req; { @@ -346,12 +278,8 @@ __db_txn_commit_4002(msg, req) return (&reply); } -/* - * PUBLIC: __txn_discard_reply *__db_txn_discard_4002 - * PUBLIC: __P((__txn_discard_msg *, struct svc_req *)); - */ __txn_discard_reply * -__db_txn_discard_4002(msg, req) +__db_txn_discard_4003__SVCSUFFIX__(msg, req) __txn_discard_msg *msg; struct svc_req *req; { @@ -365,12 +293,8 @@ __db_txn_discard_4002(msg, req) return (&reply); } -/* - * PUBLIC: __txn_prepare_reply *__db_txn_prepare_4002 - * PUBLIC: __P((__txn_prepare_msg *, struct svc_req *)); - */ __txn_prepare_reply * -__db_txn_prepare_4002(msg, req) +__db_txn_prepare_4003__SVCSUFFIX__(msg, req) __txn_prepare_msg *msg; struct svc_req *req; { @@ -378,18 +302,14 @@ __db_txn_prepare_4002(msg, req) COMPQUIET(req, NULL); __txn_prepare_proc(msg->txnpcl_id, - msg->gid, + (u_int8_t *)msg->gid, &reply); return (&reply); } -/* - * PUBLIC: __txn_recover_reply *__db_txn_recover_4002 - * PUBLIC: __P((__txn_recover_msg *, struct svc_req *)); - */ __txn_recover_reply * -__db_txn_recover_4002(msg, req) +__db_txn_recover_4003__SVCSUFFIX__(msg, req) __txn_recover_msg *msg; struct svc_req *req; { @@ -413,12 +333,8 @@ __db_txn_recover_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_associate_reply *__db_db_associate_4002 - * PUBLIC: __P((__db_associate_msg *, struct svc_req *)); - */ __db_associate_reply * -__db_db_associate_4002(msg, req) +__db_db_associate_4003__SVCSUFFIX__(msg, req) __db_associate_msg *msg; struct svc_req *req; { @@ -434,12 +350,8 @@ __db_db_associate_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_bt_maxkey_reply *__db_db_bt_maxkey_4002 - * PUBLIC: __P((__db_bt_maxkey_msg *, struct svc_req *)); - */ __db_bt_maxkey_reply * -__db_db_bt_maxkey_4002(msg, req) +__db_db_bt_maxkey_4003__SVCSUFFIX__(msg, req) __db_bt_maxkey_msg *msg; struct svc_req *req; { @@ -453,12 +365,8 @@ __db_db_bt_maxkey_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_bt_minkey_reply *__db_db_get_bt_minkey_4002 - * PUBLIC: __P((__db_get_bt_minkey_msg *, struct svc_req *)); - */ __db_get_bt_minkey_reply * -__db_db_get_bt_minkey_4002(msg, req) +__db_db_get_bt_minkey_4003__SVCSUFFIX__(msg, req) __db_get_bt_minkey_msg *msg; struct svc_req *req; { @@ -471,12 +379,8 @@ __db_db_get_bt_minkey_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_bt_minkey_reply *__db_db_bt_minkey_4002 - * PUBLIC: __P((__db_bt_minkey_msg *, struct svc_req *)); - */ __db_bt_minkey_reply * -__db_db_bt_minkey_4002(msg, req) +__db_db_bt_minkey_4003__SVCSUFFIX__(msg, req) __db_bt_minkey_msg *msg; struct svc_req *req; { @@ -490,12 +394,8 @@ __db_db_bt_minkey_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_close_reply *__db_db_close_4002 __P((__db_close_msg *, - * PUBLIC: struct svc_req *)); - */ __db_close_reply * -__db_db_close_4002(msg, req) +__db_db_close_4003__SVCSUFFIX__(msg, req) __db_close_msg *msg; struct svc_req *req; { @@ -509,12 +409,8 @@ __db_db_close_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_create_reply *__db_db_create_4002 __P((__db_create_msg *, - * PUBLIC: struct svc_req *)); - */ __db_create_reply * -__db_db_create_4002(msg, req) +__db_db_create_4003__SVCSUFFIX__(msg, req) __db_create_msg *msg; struct svc_req *req; { @@ -528,12 +424,8 @@ __db_db_create_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_del_reply *__db_db_del_4002 __P((__db_del_msg *, - * PUBLIC: struct svc_req *)); - */ __db_del_reply * -__db_db_del_4002(msg, req) +__db_db_del_4003__SVCSUFFIX__(msg, req) __db_del_msg *msg; struct svc_req *req; { @@ -554,12 +446,8 @@ __db_db_del_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_encrypt_flags_reply *__db_db_get_encrypt_flags_4002 - * PUBLIC: __P((__db_get_encrypt_flags_msg *, struct svc_req *)); - */ __db_get_encrypt_flags_reply * -__db_db_get_encrypt_flags_4002(msg, req) +__db_db_get_encrypt_flags_4003__SVCSUFFIX__(msg, req) __db_get_encrypt_flags_msg *msg; struct svc_req *req; { @@ -572,12 +460,8 @@ __db_db_get_encrypt_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_encrypt_reply *__db_db_encrypt_4002 __P((__db_encrypt_msg *, - * PUBLIC: struct svc_req *)); - */ __db_encrypt_reply * -__db_db_encrypt_4002(msg, req) +__db_db_encrypt_4003__SVCSUFFIX__(msg, req) __db_encrypt_msg *msg; struct svc_req *req; { @@ -592,12 +476,8 @@ __db_db_encrypt_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_extentsize_reply *__db_db_get_extentsize_4002 - * PUBLIC: __P((__db_get_extentsize_msg *, struct svc_req *)); - */ __db_get_extentsize_reply * -__db_db_get_extentsize_4002(msg, req) +__db_db_get_extentsize_4003__SVCSUFFIX__(msg, req) __db_get_extentsize_msg *msg; struct svc_req *req; { @@ -610,12 +490,8 @@ __db_db_get_extentsize_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_extentsize_reply *__db_db_extentsize_4002 - * PUBLIC: __P((__db_extentsize_msg *, struct svc_req *)); - */ __db_extentsize_reply * -__db_db_extentsize_4002(msg, req) +__db_db_extentsize_4003__SVCSUFFIX__(msg, req) __db_extentsize_msg *msg; struct svc_req *req; { @@ -629,12 +505,8 @@ __db_db_extentsize_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_flags_reply *__db_db_get_flags_4002 - * PUBLIC: __P((__db_get_flags_msg *, struct svc_req *)); - */ __db_get_flags_reply * -__db_db_get_flags_4002(msg, req) +__db_db_get_flags_4003__SVCSUFFIX__(msg, req) __db_get_flags_msg *msg; struct svc_req *req; { @@ -647,12 +519,8 @@ __db_db_get_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_flags_reply *__db_db_flags_4002 __P((__db_flags_msg *, - * PUBLIC: struct svc_req *)); - */ __db_flags_reply * -__db_db_flags_4002(msg, req) +__db_db_flags_4003__SVCSUFFIX__(msg, req) __db_flags_msg *msg; struct svc_req *req; { @@ -666,12 +534,8 @@ __db_db_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_reply *__db_db_get_4002 __P((__db_get_msg *, - * PUBLIC: struct svc_req *)); - */ __db_get_reply * -__db_db_get_4002(msg, req) +__db_db_get_4003__SVCSUFFIX__(msg, req) __db_get_msg *msg; struct svc_req *req; { @@ -707,12 +571,8 @@ __db_db_get_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_name_reply *__db_db_get_name_4002 - * PUBLIC: __P((__db_get_name_msg *, struct svc_req *)); - */ __db_get_name_reply * -__db_db_get_name_4002(msg, req) +__db_db_get_name_4003__SVCSUFFIX__(msg, req) __db_get_name_msg *msg; struct svc_req *req; { @@ -725,12 +585,8 @@ __db_db_get_name_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_open_flags_reply *__db_db_get_open_flags_4002 - * PUBLIC: __P((__db_get_open_flags_msg *, struct svc_req *)); - */ __db_get_open_flags_reply * -__db_db_get_open_flags_4002(msg, req) +__db_db_get_open_flags_4003__SVCSUFFIX__(msg, req) __db_get_open_flags_msg *msg; struct svc_req *req; { @@ -743,12 +599,8 @@ __db_db_get_open_flags_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_h_ffactor_reply *__db_db_get_h_ffactor_4002 - * PUBLIC: __P((__db_get_h_ffactor_msg *, struct svc_req *)); - */ __db_get_h_ffactor_reply * -__db_db_get_h_ffactor_4002(msg, req) +__db_db_get_h_ffactor_4003__SVCSUFFIX__(msg, req) __db_get_h_ffactor_msg *msg; struct svc_req *req; { @@ -761,12 +613,8 @@ __db_db_get_h_ffactor_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_h_ffactor_reply *__db_db_h_ffactor_4002 - * PUBLIC: __P((__db_h_ffactor_msg *, struct svc_req *)); - */ __db_h_ffactor_reply * -__db_db_h_ffactor_4002(msg, req) +__db_db_h_ffactor_4003__SVCSUFFIX__(msg, req) __db_h_ffactor_msg *msg; struct svc_req *req; { @@ -780,12 +628,8 @@ __db_db_h_ffactor_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_h_nelem_reply *__db_db_get_h_nelem_4002 - * PUBLIC: __P((__db_get_h_nelem_msg *, struct svc_req *)); - */ __db_get_h_nelem_reply * -__db_db_get_h_nelem_4002(msg, req) +__db_db_get_h_nelem_4003__SVCSUFFIX__(msg, req) __db_get_h_nelem_msg *msg; struct svc_req *req; { @@ -798,12 +642,8 @@ __db_db_get_h_nelem_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_h_nelem_reply *__db_db_h_nelem_4002 __P((__db_h_nelem_msg *, - * PUBLIC: struct svc_req *)); - */ __db_h_nelem_reply * -__db_db_h_nelem_4002(msg, req) +__db_db_h_nelem_4003__SVCSUFFIX__(msg, req) __db_h_nelem_msg *msg; struct svc_req *req; { @@ -817,12 +657,8 @@ __db_db_h_nelem_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_key_range_reply *__db_db_key_range_4002 - * PUBLIC: __P((__db_key_range_msg *, struct svc_req *)); - */ __db_key_range_reply * -__db_db_key_range_4002(msg, req) +__db_db_key_range_4003__SVCSUFFIX__(msg, req) __db_key_range_msg *msg; struct svc_req *req; { @@ -843,12 +679,8 @@ __db_db_key_range_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_lorder_reply *__db_db_get_lorder_4002 - * PUBLIC: __P((__db_get_lorder_msg *, struct svc_req *)); - */ __db_get_lorder_reply * -__db_db_get_lorder_4002(msg, req) +__db_db_get_lorder_4003__SVCSUFFIX__(msg, req) __db_get_lorder_msg *msg; struct svc_req *req; { @@ -861,12 +693,8 @@ __db_db_get_lorder_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_lorder_reply *__db_db_lorder_4002 __P((__db_lorder_msg *, - * PUBLIC: struct svc_req *)); - */ __db_lorder_reply * -__db_db_lorder_4002(msg, req) +__db_db_lorder_4003__SVCSUFFIX__(msg, req) __db_lorder_msg *msg; struct svc_req *req; { @@ -880,12 +708,8 @@ __db_db_lorder_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_open_reply *__db_db_open_4002 __P((__db_open_msg *, - * PUBLIC: struct svc_req *)); - */ __db_open_reply * -__db_db_open_4002(msg, req) +__db_db_open_4003__SVCSUFFIX__(msg, req) __db_open_msg *msg; struct svc_req *req; { @@ -904,12 +728,8 @@ __db_db_open_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_pagesize_reply *__db_db_get_pagesize_4002 - * PUBLIC: __P((__db_get_pagesize_msg *, struct svc_req *)); - */ __db_get_pagesize_reply * -__db_db_get_pagesize_4002(msg, req) +__db_db_get_pagesize_4003__SVCSUFFIX__(msg, req) __db_get_pagesize_msg *msg; struct svc_req *req; { @@ -922,12 +742,8 @@ __db_db_get_pagesize_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_pagesize_reply *__db_db_pagesize_4002 - * PUBLIC: __P((__db_pagesize_msg *, struct svc_req *)); - */ __db_pagesize_reply * -__db_db_pagesize_4002(msg, req) +__db_db_pagesize_4003__SVCSUFFIX__(msg, req) __db_pagesize_msg *msg; struct svc_req *req; { @@ -941,12 +757,8 @@ __db_db_pagesize_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_pget_reply *__db_db_pget_4002 __P((__db_pget_msg *, - * PUBLIC: struct svc_req *)); - */ __db_pget_reply * -__db_db_pget_4002(msg, req) +__db_db_pget_4003__SVCSUFFIX__(msg, req) __db_pget_msg *msg; struct svc_req *req; { @@ -989,12 +801,8 @@ __db_db_pget_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_put_reply *__db_db_put_4002 __P((__db_put_msg *, - * PUBLIC: struct svc_req *)); - */ __db_put_reply * -__db_db_put_4002(msg, req) +__db_db_put_4003__SVCSUFFIX__(msg, req) __db_put_msg *msg; struct svc_req *req; { @@ -1029,12 +837,8 @@ __db_db_put_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_re_delim_reply *__db_db_get_re_delim_4002 - * PUBLIC: __P((__db_get_re_delim_msg *, struct svc_req *)); - */ __db_get_re_delim_reply * -__db_db_get_re_delim_4002(msg, req) +__db_db_get_re_delim_4003__SVCSUFFIX__(msg, req) __db_get_re_delim_msg *msg; struct svc_req *req; { @@ -1047,12 +851,8 @@ __db_db_get_re_delim_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_re_delim_reply *__db_db_re_delim_4002 - * PUBLIC: __P((__db_re_delim_msg *, struct svc_req *)); - */ __db_re_delim_reply * -__db_db_re_delim_4002(msg, req) +__db_db_re_delim_4003__SVCSUFFIX__(msg, req) __db_re_delim_msg *msg; struct svc_req *req; { @@ -1066,12 +866,8 @@ __db_db_re_delim_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_re_len_reply *__db_db_get_re_len_4002 - * PUBLIC: __P((__db_get_re_len_msg *, struct svc_req *)); - */ __db_get_re_len_reply * -__db_db_get_re_len_4002(msg, req) +__db_db_get_re_len_4003__SVCSUFFIX__(msg, req) __db_get_re_len_msg *msg; struct svc_req *req; { @@ -1084,12 +880,8 @@ __db_db_get_re_len_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_re_len_reply *__db_db_re_len_4002 __P((__db_re_len_msg *, - * PUBLIC: struct svc_req *)); - */ __db_re_len_reply * -__db_db_re_len_4002(msg, req) +__db_db_re_len_4003__SVCSUFFIX__(msg, req) __db_re_len_msg *msg; struct svc_req *req; { @@ -1103,12 +895,8 @@ __db_db_re_len_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_re_pad_reply *__db_db_re_pad_4002 __P((__db_re_pad_msg *, - * PUBLIC: struct svc_req *)); - */ __db_re_pad_reply * -__db_db_re_pad_4002(msg, req) +__db_db_re_pad_4003__SVCSUFFIX__(msg, req) __db_re_pad_msg *msg; struct svc_req *req; { @@ -1122,12 +910,8 @@ __db_db_re_pad_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_get_re_pad_reply *__db_db_get_re_pad_4002 - * PUBLIC: __P((__db_get_re_pad_msg *, struct svc_req *)); - */ __db_get_re_pad_reply * -__db_db_get_re_pad_4002(msg, req) +__db_db_get_re_pad_4003__SVCSUFFIX__(msg, req) __db_get_re_pad_msg *msg; struct svc_req *req; { @@ -1140,12 +924,8 @@ __db_db_get_re_pad_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_remove_reply *__db_db_remove_4002 __P((__db_remove_msg *, - * PUBLIC: struct svc_req *)); - */ __db_remove_reply * -__db_db_remove_4002(msg, req) +__db_db_remove_4003__SVCSUFFIX__(msg, req) __db_remove_msg *msg; struct svc_req *req; { @@ -1161,12 +941,8 @@ __db_db_remove_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_rename_reply *__db_db_rename_4002 __P((__db_rename_msg *, - * PUBLIC: struct svc_req *)); - */ __db_rename_reply * -__db_db_rename_4002(msg, req) +__db_db_rename_4003__SVCSUFFIX__(msg, req) __db_rename_msg *msg; struct svc_req *req; { @@ -1183,12 +959,8 @@ __db_db_rename_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_stat_reply *__db_db_stat_4002 __P((__db_stat_msg *, - * PUBLIC: struct svc_req *)); - */ __db_stat_reply * -__db_db_stat_4002(msg, req) +__db_db_stat_4003__SVCSUFFIX__(msg, req) __db_stat_msg *msg; struct svc_req *req; { @@ -1204,18 +976,15 @@ __db_db_stat_4002(msg, req) reply.stats.stats_val = NULL; __db_stat_proc(msg->dbpcl_id, + msg->txnpcl_id, msg->flags, &reply, &__db_stat_free); return (&reply); } -/* - * PUBLIC: __db_sync_reply *__db_db_sync_4002 __P((__db_sync_msg *, - * PUBLIC: struct svc_req *)); - */ __db_sync_reply * -__db_db_sync_4002(msg, req) +__db_db_sync_4003__SVCSUFFIX__(msg, req) __db_sync_msg *msg; struct svc_req *req; { @@ -1229,12 +998,8 @@ __db_db_sync_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_truncate_reply *__db_db_truncate_4002 - * PUBLIC: __P((__db_truncate_msg *, struct svc_req *)); - */ __db_truncate_reply * -__db_db_truncate_4002(msg, req) +__db_db_truncate_4003__SVCSUFFIX__(msg, req) __db_truncate_msg *msg; struct svc_req *req; { @@ -1249,12 +1014,8 @@ __db_db_truncate_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_cursor_reply *__db_db_cursor_4002 __P((__db_cursor_msg *, - * PUBLIC: struct svc_req *)); - */ __db_cursor_reply * -__db_db_cursor_4002(msg, req) +__db_db_cursor_4003__SVCSUFFIX__(msg, req) __db_cursor_msg *msg; struct svc_req *req; { @@ -1269,12 +1030,8 @@ __db_db_cursor_4002(msg, req) return (&reply); } -/* - * PUBLIC: __db_join_reply *__db_db_join_4002 __P((__db_join_msg *, - * PUBLIC: struct svc_req *)); - */ __db_join_reply * -__db_db_join_4002(msg, req) +__db_db_join_4003__SVCSUFFIX__(msg, req) __db_join_msg *msg; struct svc_req *req; { @@ -1290,12 +1047,8 @@ __db_db_join_4002(msg, req) return (&reply); } -/* - * PUBLIC: __dbc_close_reply *__db_dbc_close_4002 __P((__dbc_close_msg *, - * PUBLIC: struct svc_req *)); - */ __dbc_close_reply * -__db_dbc_close_4002(msg, req) +__db_dbc_close_4003__SVCSUFFIX__(msg, req) __dbc_close_msg *msg; struct svc_req *req; { @@ -1308,12 +1061,8 @@ __db_dbc_close_4002(msg, req) return (&reply); } -/* - * PUBLIC: __dbc_count_reply *__db_dbc_count_4002 __P((__dbc_count_msg *, - * PUBLIC: struct svc_req *)); - */ __dbc_count_reply * -__db_dbc_count_4002(msg, req) +__db_dbc_count_4003__SVCSUFFIX__(msg, req) __dbc_count_msg *msg; struct svc_req *req; { @@ -1327,12 +1076,8 @@ __db_dbc_count_4002(msg, req) return (&reply); } -/* - * PUBLIC: __dbc_del_reply *__db_dbc_del_4002 __P((__dbc_del_msg *, - * PUBLIC: struct svc_req *)); - */ __dbc_del_reply * -__db_dbc_del_4002(msg, req) +__db_dbc_del_4003__SVCSUFFIX__(msg, req) __dbc_del_msg *msg; struct svc_req *req; { @@ -1346,12 +1091,8 @@ __db_dbc_del_4002(msg, req) return (&reply); } -/* - * PUBLIC: __dbc_dup_reply *__db_dbc_dup_4002 __P((__dbc_dup_msg *, - * PUBLIC: struct svc_req *)); - */ __dbc_dup_reply * -__db_dbc_dup_4002(msg, req) +__db_dbc_dup_4003__SVCSUFFIX__(msg, req) __dbc_dup_msg *msg; struct svc_req *req; { @@ -1365,12 +1106,8 @@ __db_dbc_dup_4002(msg, req) return (&reply); } -/* - * PUBLIC: __dbc_get_reply *__db_dbc_get_4002 __P((__dbc_get_msg *, - * PUBLIC: struct svc_req *)); - */ __dbc_get_reply * -__db_dbc_get_4002(msg, req) +__db_dbc_get_4003__SVCSUFFIX__(msg, req) __dbc_get_msg *msg; struct svc_req *req; { @@ -1405,12 +1142,8 @@ __db_dbc_get_4002(msg, req) return (&reply); } -/* - * PUBLIC: __dbc_pget_reply *__db_dbc_pget_4002 __P((__dbc_pget_msg *, - * PUBLIC: struct svc_req *)); - */ __dbc_pget_reply * -__db_dbc_pget_4002(msg, req) +__db_dbc_pget_4003__SVCSUFFIX__(msg, req) __dbc_pget_msg *msg; struct svc_req *req; { @@ -1452,12 +1185,8 @@ __db_dbc_pget_4002(msg, req) return (&reply); } -/* - * PUBLIC: __dbc_put_reply *__db_dbc_put_4002 __P((__dbc_put_msg *, - * PUBLIC: struct svc_req *)); - */ __dbc_put_reply * -__db_dbc_put_4002(msg, req) +__db_dbc_put_4003__SVCSUFFIX__(msg, req) __dbc_put_msg *msg; struct svc_req *req; { diff --git a/db/rpc_server/cxx/db_server_cxxproc.cpp b/db/rpc_server/cxx/db_server_cxxproc.cpp index 105114fa2..e536a7051 100644 --- a/db/rpc_server/cxx/db_server_cxxproc.cpp +++ b/db/rpc_server/cxx/db_server_cxxproc.cpp @@ -1,17 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_server_cxxproc.cpp,v 1.23 2004/09/22 17:30:12 bostic Exp $ */ #include "db_config.h" -#ifdef HAVE_RPC -#ifndef lint -static const char revid[] = "$Id: db_server_cxxproc.cpp,v 1.15 2003/04/23 20:43:09 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -19,7 +16,8 @@ static const char revid[] = "$Id: db_server_cxxproc.cpp,v 1.15 2003/04/23 20:43: #include #endif -#include "dbinc_auto/db_server.h" + +#include "db_server.h" #include "db_int.h" #include "db_cxx.h" @@ -395,7 +393,7 @@ __txn_begin_proc( } else parent = NULL; - ret = dbenv->txn_begin(parent, &txnp, flags); + ret = dbenv->txn_begin(parent, &txnp, flags | DB_TXN_NOWAIT); if (ret == 0) { ctp->ct_txnp = txnp; ctp->ct_type = CT_TXN; @@ -489,11 +487,10 @@ __txn_recover_proc( dbenv = (DbEnv *)dbenv_ctp->ct_anyp; *freep = 0; - if ((ret = - __os_malloc(dbenv->get_DB_ENV(), count * sizeof(DbPreplist), &dbprep)) != 0) + if ((ret = __os_malloc( + dbenv->get_DB_ENV(), count * sizeof(DbPreplist), &dbprep)) != 0) goto out; - if ((ret = - dbenv->txn_recover(dbprep, count, &retcount, flags)) != 0) + if ((ret = dbenv->txn_recover(dbprep, count, &retcount, flags)) != 0) goto out; /* * If there is nothing, success, but it's easy. @@ -620,11 +617,12 @@ __db_associate_proc( txnp = NULL; /* - * We do not support DB_CREATE for associate. Users - * can only access secondary indices on a read-only basis, - * so whatever they are looking for needs to be there already. + * We do not support DB_CREATE for associate or the callbacks + * implemented in the Java and JE RPC servers. Users can only + * access secondary indices on a read-only basis, so whatever they + * are looking for needs to be there already. */ - if (flags != 0) + if (LF_ISSET(DB_RPC2ND_MASK | DB_CREATE)) ret = EINVAL; else ret = dbp->associate(txnp, sdbp, NULL, flags); @@ -941,12 +939,15 @@ __db_get_proc( ret = __os_umalloc(dbp->get_DB()->dbenv, key.get_size(), &replyp->keydata.keydata_val); if (ret != 0) { - __os_ufree(dbp->get_DB()->dbenv, key.get_data()); - __os_ufree(dbp->get_DB()->dbenv, data.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, key.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, data.get_data()); goto err; } key_alloc = 1; - memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size()); + memcpy(replyp->keydata.keydata_val, + key.get_data(), key.get_size()); } else replyp->keydata.keydata_val = (char *)key.get_data(); @@ -959,8 +960,10 @@ __db_get_proc( ret = __os_umalloc(dbp->get_DB()->dbenv, data.get_size(), &replyp->datadata.datadata_val); if (ret != 0) { - __os_ufree(dbp->get_DB()->dbenv, key.get_data()); - __os_ufree(dbp->get_DB()->dbenv, data.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, key.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, data.get_data()); if (key_alloc) __os_ufree(dbp->get_DB()->dbenv, replyp->keydata.keydata_val); @@ -1181,8 +1184,8 @@ __db_open_proc( txnp = NULL; replyp->dbcl_id = dbpcl_id; - if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, (DBTYPE)type, flags)) - != NULL) { + if ((new_ctp = __dbsrv_sharedb( + dbp_ctp, name, subdb, (DBTYPE)type, flags)) != NULL) { /* * We can share, clean up old ID, set new one. */ @@ -1196,11 +1199,6 @@ __db_open_proc( if (ret == 0) { (void)dbp->get_type(&dbtype); replyp->type = dbtype; - /* XXX - * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send - * this dbp's flags back. - */ - replyp->dbflags = (int) dbp->get_DB()->flags; /* * We need to determine the byte order of the database * and send it back to the client. Determine it by @@ -1356,9 +1354,12 @@ __db_pget_proc( ret = __os_umalloc(dbp->get_DB()->dbenv, skey.get_size(), &replyp->skeydata.skeydata_val); if (ret != 0) { - __os_ufree(dbp->get_DB()->dbenv, skey.get_data()); - __os_ufree(dbp->get_DB()->dbenv, pkey.get_data()); - __os_ufree(dbp->get_DB()->dbenv, data.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, skey.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, pkey.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, data.get_data()); goto err; } key_alloc = 1; @@ -1376,9 +1377,12 @@ __db_pget_proc( ret = __os_umalloc(dbp->get_DB()->dbenv, pkey.get_size(), &replyp->pkeydata.pkeydata_val); if (ret != 0) { - __os_ufree(dbp->get_DB()->dbenv, skey.get_data()); - __os_ufree(dbp->get_DB()->dbenv, pkey.get_data()); - __os_ufree(dbp->get_DB()->dbenv, data.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, skey.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, pkey.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, data.get_data()); if (key_alloc) __os_ufree(dbp->get_DB()->dbenv, replyp->skeydata.skeydata_val); @@ -1404,9 +1408,12 @@ __db_pget_proc( ret = __os_umalloc(dbp->get_DB()->dbenv, data.get_size(), &replyp->datadata.datadata_val); if (ret != 0) { - __os_ufree(dbp->get_DB()->dbenv, skey.get_data()); - __os_ufree(dbp->get_DB()->dbenv, pkey.get_data()); - __os_ufree(dbp->get_DB()->dbenv, data.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, skey.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, pkey.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, data.get_data()); /* * If key_alloc is 1, just skey needs to be * freed, if key_alloc is 2, both skey and pkey @@ -1509,10 +1516,12 @@ __db_put_proc( ret = __os_umalloc(dbp->get_DB()->dbenv, key.get_size(), &replyp->keydata.keydata_val); if (ret != 0) { - __os_ufree(dbp->get_DB()->dbenv, key.get_data()); + __os_ufree( + dbp->get_DB()->dbenv, key.get_data()); goto err; } - memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size()); + memcpy(replyp->keydata.keydata_val, + key.get_data(), key.get_size()); } else replyp->keydata.keydata_val = (char *)key.get_data(); @@ -1673,21 +1682,28 @@ __db_rename_proc( extern "C" void __db_stat_proc( long dbpcl_id, + long txnpcl_id, u_int32_t flags, __db_stat_reply *replyp, int * freep) { Db *dbp; + DbTxn *txnp; DBTYPE type; - ct_entry *dbp_ctp; + ct_entry *dbp_ctp, *txnp_ctp; u_int32_t *q, *p, *retsp; int i, len, ret; void *sp; ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); dbp = (Db *)dbp_ctp->ct_anyp; + if (txnpcl_id != 0) { + ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); + txnp = (DbTxn *)txnp_ctp->ct_anyp; + } else + txnp = NULL; - ret = dbp->stat(&sp, flags); + ret = dbp->stat(txnp, &sp, flags); replyp->status = ret; if (ret != 0) return; @@ -2084,11 +2100,13 @@ __dbc_get_proc( &replyp->keydata.keydata_val); if (ret != 0) { __os_ufree(dbenv->get_DB_ENV(), key.get_data()); - __os_ufree(dbenv->get_DB_ENV(), data.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), data.get_data()); goto err; } key_alloc = 1; - memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size()); + memcpy(replyp->keydata.keydata_val, + key.get_data(), key.get_size()); } else replyp->keydata.keydata_val = (char *)key.get_data(); @@ -2102,7 +2120,8 @@ __dbc_get_proc( &replyp->datadata.datadata_val); if (ret != 0) { __os_ufree(dbenv->get_DB_ENV(), key.get_data()); - __os_ufree(dbenv->get_DB_ENV(), data.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), data.get_data()); if (key_alloc) __os_ufree(dbenv->get_DB_ENV(), replyp->keydata.keydata_val); @@ -2207,9 +2226,12 @@ __dbc_pget_proc( ret = __os_umalloc(dbenv->get_DB_ENV(), skey.get_size(), &replyp->skeydata.skeydata_val); if (ret != 0) { - __os_ufree(dbenv->get_DB_ENV(), skey.get_data()); - __os_ufree(dbenv->get_DB_ENV(), pkey.get_data()); - __os_ufree(dbenv->get_DB_ENV(), data.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), skey.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), pkey.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), data.get_data()); goto err; } key_alloc = 1; @@ -2226,9 +2248,12 @@ __dbc_pget_proc( ret = __os_umalloc(dbenv->get_DB_ENV(), pkey.get_size(), &replyp->pkeydata.pkeydata_val); if (ret != 0) { - __os_ufree(dbenv->get_DB_ENV(), skey.get_data()); - __os_ufree(dbenv->get_DB_ENV(), pkey.get_data()); - __os_ufree(dbenv->get_DB_ENV(), data.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), skey.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), pkey.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), data.get_data()); if (key_alloc) __os_ufree(dbenv->get_DB_ENV(), replyp->skeydata.skeydata_val); @@ -2254,9 +2279,12 @@ __dbc_pget_proc( ret = __os_umalloc(dbenv->get_DB_ENV(), data.get_size(), &replyp->datadata.datadata_val); if (ret != 0) { - __os_ufree(dbenv->get_DB_ENV(), skey.get_data()); - __os_ufree(dbenv->get_DB_ENV(), pkey.get_data()); - __os_ufree(dbenv->get_DB_ENV(), data.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), skey.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), pkey.get_data()); + __os_ufree( + dbenv->get_DB_ENV(), data.get_data()); /* * If key_alloc is 1, just skey needs to be * freed, if key_alloc is 2, both skey and pkey @@ -2343,7 +2371,8 @@ __dbc_put_proc( ret = dbp->get_type(&dbtype); if (ret == 0 && dbtype == DB_RECNO) { /* - * We need to xdr_free whatever we are returning, next time. + * We need to xdr_free whatever we are returning, next + * time. */ replyp->keydata.keydata_val = (char *)key.get_data(); replyp->keydata.keydata_len = key.get_size(); @@ -2352,4 +2381,3 @@ __dbc_put_proc( replyp->status = ret; return; } -#endif /* HAVE_RPC */ diff --git a/db/rpc_server/cxx/db_server_cxxutil.cpp b/db/rpc_server/cxx/db_server_cxxutil.cpp index cf2156c06..d5aacdc0f 100644 --- a/db/rpc_server/cxx/db_server_cxxutil.cpp +++ b/db/rpc_server/cxx/db_server_cxxutil.cpp @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. + * + * $Id: db_server_cxxutil.cpp,v 1.17 2004/09/22 17:30:13 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: db_server_cxxutil.cpp,v 1.11 2003/04/23 20:44:47 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -34,7 +32,8 @@ static const char revid[] = "$Id: db_server_cxxutil.cpp,v 1.11 2003/04/23 20:44: #include #include #endif -#include "dbinc_auto/db_server.h" + +#include "db_server.h" #include "db_int.h" #include "db_cxx.h" @@ -159,7 +158,7 @@ main( */ if (__dbsrv_defto > __dbsrv_idleto) fprintf(stderr, - "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n", + "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n", prog, __dbsrv_idleto, __dbsrv_defto); LIST_INIT(&__dbsrv_head); @@ -421,7 +420,8 @@ get_tableent(long id) } extern "C" ct_entry * -__dbsrv_sharedb(ct_entry *db_ctp, const char *name, const char *subdb, DBTYPE type, u_int32_t flags) +__dbsrv_sharedb(ct_entry *db_ctp, + const char *name, const char *subdb, DBTYPE type, u_int32_t flags) { ct_entry *ctp; @@ -597,7 +597,7 @@ __dbenv_close_int(long id, u_int32_t flags, int force) { DbEnv *dbenv; int ret; - ct_entry *ctp; + ct_entry *ctp, *dbctp, *nextctp; ret = 0; ctp = get_tableent(id); @@ -617,6 +617,32 @@ __dbenv_close_int(long id, u_int32_t flags, int force) if (__dbsrv_verbose) printf("Closing env id %ld\n", id); + /* + * If we're timing out an env, we want to close all of its + * database handles as well. All of the txns and cursors + * must have been timed out prior to timing out the env. + */ + if (force) + for (dbctp = LIST_FIRST(&__dbsrv_head); + dbctp != NULL; dbctp = nextctp) { + nextctp = LIST_NEXT(dbctp, entries); + if (dbctp->ct_type != CT_DB) + continue; + if (dbctp->ct_envparent != ctp) + continue; + /* + * We found a DB handle that is part of this + * environment. Close it. + */ + __db_close_int(dbctp->ct_id, 0); + /* + * If we timed out a dbp, we may have removed + * multiple ctp entries. Start over with a + * guaranteed good ctp. + */ + nextctp = LIST_FIRST(&__dbsrv_head); + } + ret = dbenv->close(flags); __dbdel_ctp(ctp); return (ret); @@ -715,10 +741,8 @@ env_recover(char *progname) hp = LIST_NEXT(hp, entries)) { exitval = 0; dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS); - if (__dbsrv_verbose == 1) { + if (__dbsrv_verbose == 1) (void)dbenv->set_verbose(DB_VERB_RECOVERY, 1); - (void)dbenv->set_verbose(DB_VERB_CHKPOINT, 1); - } dbenv->set_errfile(stderr); dbenv->set_errpfx(progname); if (hp->passwd != NULL) diff --git a/db/rpc_server/db_server.x b/db/rpc_server/db_server.x index a2b0cd666..26b8de8e6 100644 --- a/db/rpc_server/db_server.x +++ b/db/rpc_server/db_server.x @@ -496,11 +496,10 @@ struct __db_open_msg { }; struct __db_open_reply { - /* num return vars: 4 */ + /* num return vars: 3 */ int status; unsigned int dbcl_id; unsigned int type; - unsigned int dbflags; unsigned int lorder; }; @@ -662,6 +661,7 @@ struct __db_rename_reply { struct __db_stat_msg { unsigned int dbpcl_id; + unsigned int txnpcl_id; unsigned int flags; }; @@ -898,5 +898,5 @@ program DB_RPC_SERVERPROG { __dbc_get_reply __DB_dbc_get(__dbc_get_msg) = 66; __dbc_pget_reply __DB_dbc_pget(__dbc_pget_msg) = 67; __dbc_put_reply __DB_dbc_put(__dbc_put_msg) = 68; - } = 4002; + } = 4003; } = 351457; diff --git a/db/rpc_server/java/AssociateCallbacks.java b/db/rpc_server/java/AssociateCallbacks.java new file mode 100644 index 000000000..4e8e24d51 --- /dev/null +++ b/db/rpc_server/java/AssociateCallbacks.java @@ -0,0 +1,178 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: AssociateCallbacks.java,v 1.3 2004/04/06 20:43:41 mjc Exp $ + */ + +package com.sleepycat.db.rpcserver; + +import com.sleepycat.db.*; + +/** Implementations of the callbacks required by the Tcl test suite. **/ +class AssociateCallbacks { + /* + * Tcl passes one of these special flags for the callbacks used in the + * test suite. Note: these must match db_int.in! + */ + static final int DB_RPC2ND_REVERSEDATA = 0x00100000; + static final int DB_RPC2ND_NOOP = 0x00200000; + static final int DB_RPC2ND_CONCATKEYDATA = 0x00300000; + static final int DB_RPC2ND_CONCATDATAKEY = 0x00400000; + static final int DB_RPC2ND_REVERSECONCAT = 0x00500000; + static final int DB_RPC2ND_TRUNCDATA = 0x00600000; + static final int DB_RPC2ND_CONSTANT = 0x00700000; + static final int DB_RPC2ND_GETZIP = 0x00800000; + static final int DB_RPC2ND_GETNAME = 0x00900000; + + static final int DB_RPC2ND_MASK = 0x00f00000; + + static SecondaryKeyCreator getCallback(int flags) { + switch(flags & DB_RPC2ND_MASK) { + case 0: + return null; + + case DB_RPC2ND_REVERSEDATA: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + byte[] dataBuf = data.getData(); + int dataSize = data.getSize(); + byte[] buf = new byte[dataSize]; + for (int i = 0; i < dataSize; i++) + buf[dataSize - 1 - i] = dataBuf[i]; + result.setData(buf); + result.setSize(buf.length); + return true; + } + }; + + case DB_RPC2ND_NOOP: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + result.setData(data.getData()); + result.setSize(data.getSize()); + return true; + } + }; + + case DB_RPC2ND_CONCATKEYDATA: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + byte[] buf = new byte[key.getSize() + + data.getSize()]; + System.arraycopy(key.getData(), 0, + buf, 0, + key.getSize()); + System.arraycopy(data.getData(), 0, + buf, key.getSize(), + data.getSize()); + result.setData(buf); + result.setSize(buf.length); + return true; + } + }; + + case DB_RPC2ND_CONCATDATAKEY: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + byte[] buf = new byte[key.getSize() + + data.getSize()]; + System.arraycopy(data.getData(), 0, + buf, 0, + data.getSize()); + System.arraycopy(key.getData(), 0, + buf, data.getSize(), + key.getSize()); + result.setData(buf); + result.setSize(buf.length); + return true; + } + }; + + case DB_RPC2ND_REVERSECONCAT: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + byte[] keyBuf = key.getData(); + int keySize = key.getSize(); + byte[] dataBuf = data.getData(); + int dataSize = data.getSize(); + byte[] buf = new byte[keySize + dataSize]; + for (int i = 0; i < keySize; i++) + buf[buf.length - 1 - i] = keyBuf[i]; + for (int i = 0; i < dataSize; i++) + buf[dataSize - 1 - i] = dataBuf[i]; + result.setData(buf); + result.setSize(buf.length); + return true; + } + }; + + case DB_RPC2ND_TRUNCDATA: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + result.setData(data.getData()); + result.setOffset(1); + result.setSize(data.getSize() - 1); + return true; + } + }; + + case DB_RPC2ND_CONSTANT: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + byte[] buf = "constant data".getBytes(); + result.setData(buf); + result.setSize(buf.length); + return true; + } + }; + + case DB_RPC2ND_GETZIP: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + result.setData(data.getData()); + result.setSize(5); + return true; + } + }; + + case DB_RPC2ND_GETNAME: + return new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) + throws DatabaseException { + result.setData(data.getData()); + result.setOffset(5); + result.setSize(data.getSize() - 5); + return true; + } + }; + + default: + Server.err.println("Warning: Java RPC server doesn't implement callback: " + (flags & DB_RPC2ND_MASK)); + return null; + } + } + + // Utility classes should not have a public or default constructor + protected AssociateCallbacks() { + } +} diff --git a/db/rpc_server/java/Dispatcher.java b/db/rpc_server/java/Dispatcher.java new file mode 100644 index 000000000..618cb9295 --- /dev/null +++ b/db/rpc_server/java/Dispatcher.java @@ -0,0 +1,721 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Dispatcher.java,v 1.1 2004/04/06 20:43:41 mjc Exp $ + */ + +package com.sleepycat.db.rpcserver; + +import com.sleepycat.db.*; +import com.sleepycat.db.internal.DbConstants; +import java.io.IOException; +import org.acplt.oncrpc.OncRpcException; + +/** + * Dispatcher for RPC messages for the Java RPC server. + * These are hooks that translate between RPC msg/reply structures and + * DB calls, which keeps the real implementation code in Rpc* classes cleaner. + */ +public abstract class Dispatcher extends ServerStubs { + abstract int addEnv(RpcDbEnv rdbenv); + abstract int addDatabase(RpcDb rdb); + abstract int addTxn(RpcDbTxn rtxn); + abstract int addCursor(RpcDbc rdbc); + abstract void delEnv(RpcDbEnv rdbenv, boolean dispose); + abstract void delDatabase(RpcDb rdb, boolean dispose); + abstract void delTxn(RpcDbTxn rtxn, boolean dispose); + abstract void delCursor(RpcDbc rdbc, boolean dispose); + abstract RpcDbEnv getEnv(int envid); + abstract RpcDb getDatabase(int dbid); + abstract RpcDbTxn getTxn(int txnbid); + abstract RpcDbc getCursor(int dbcid); + + public Dispatcher() throws IOException, OncRpcException { + super(); + } + + //// Database methods + + public __db_associate_reply __DB_db_associate_4003(__db_associate_msg args) { + __db_associate_reply reply = new __db_associate_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.associate(this, args, reply); + return reply; + } + + public __db_bt_maxkey_reply __DB_db_bt_maxkey_4003(__db_bt_maxkey_msg args) { + __db_bt_maxkey_reply reply = new __db_bt_maxkey_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_bt_maxkey(this, args, reply); + return reply; + } + + public __db_get_bt_minkey_reply __DB_db_get_bt_minkey_4003(__db_get_bt_minkey_msg args) { + __db_get_bt_minkey_reply reply = new __db_get_bt_minkey_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_bt_minkey(this, args, reply); + return reply; + } + + public __db_bt_minkey_reply __DB_db_bt_minkey_4003(__db_bt_minkey_msg args) { + __db_bt_minkey_reply reply = new __db_bt_minkey_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_bt_minkey(this, args, reply); + return reply; + } + + public __db_close_reply __DB_db_close_4003(__db_close_msg args) { + __db_close_reply reply = new __db_close_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.close(this, args, reply); + return reply; + } + + public __db_create_reply __DB_db_create_4003(__db_create_msg args) { + __db_create_reply reply = new __db_create_reply(); + RpcDb rdb = new RpcDb(getEnv(args.dbenvcl_id)); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.create(this, args, reply); + return reply; + } + + public __db_cursor_reply __DB_db_cursor_4003(__db_cursor_msg args) { + __db_cursor_reply reply = new __db_cursor_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.cursor(this, args, reply); + return reply; + } + + public __db_del_reply __DB_db_del_4003(__db_del_msg args) { + __db_del_reply reply = new __db_del_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.del(this, args, reply); + return reply; + } + + public __db_get_encrypt_flags_reply __DB_db_get_encrypt_flags_4003(__db_get_encrypt_flags_msg args) { + __db_get_encrypt_flags_reply reply = new __db_get_encrypt_flags_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_encrypt_flags(this, args, reply); + return reply; + } + + public __db_encrypt_reply __DB_db_encrypt_4003(__db_encrypt_msg args) { + __db_encrypt_reply reply = new __db_encrypt_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_encrypt(this, args, reply); + return reply; + } + + public __db_get_extentsize_reply __DB_db_get_extentsize_4003(__db_get_extentsize_msg args) { + __db_get_extentsize_reply reply = new __db_get_extentsize_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_q_extentsize(this, args, reply); + return reply; + } + + public __db_extentsize_reply __DB_db_extentsize_4003(__db_extentsize_msg args) { + __db_extentsize_reply reply = new __db_extentsize_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_q_extentsize(this, args, reply); + return reply; + } + + public __db_get_flags_reply __DB_db_get_flags_4003(__db_get_flags_msg args) { + __db_get_flags_reply reply = new __db_get_flags_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_flags(this, args, reply); + return reply; + } + + public __db_flags_reply __DB_db_flags_4003(__db_flags_msg args) { + __db_flags_reply reply = new __db_flags_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_flags(this, args, reply); + return reply; + } + + public __db_get_reply __DB_db_get_4003(__db_get_msg args) { + __db_get_reply reply = new __db_get_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get(this, args, reply); + return reply; + } + + public __db_get_h_ffactor_reply __DB_db_get_h_ffactor_4003(__db_get_h_ffactor_msg args) { + __db_get_h_ffactor_reply reply = new __db_get_h_ffactor_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_h_ffactor(this, args, reply); + return reply; + } + + public __db_h_ffactor_reply __DB_db_h_ffactor_4003(__db_h_ffactor_msg args) { + __db_h_ffactor_reply reply = new __db_h_ffactor_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_h_ffactor(this, args, reply); + return reply; + } + + public __db_get_h_nelem_reply __DB_db_get_h_nelem_4003(__db_get_h_nelem_msg args) { + __db_get_h_nelem_reply reply = new __db_get_h_nelem_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_h_nelem(this, args, reply); + return reply; + } + + public __db_h_nelem_reply __DB_db_h_nelem_4003(__db_h_nelem_msg args) { + __db_h_nelem_reply reply = new __db_h_nelem_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_h_nelem(this, args, reply); + return reply; + } + + public __db_join_reply __DB_db_join_4003(__db_join_msg args) { + __db_join_reply reply = new __db_join_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.join(this, args, reply); + return reply; + } + + public __db_key_range_reply __DB_db_key_range_4003(__db_key_range_msg args) { + __db_key_range_reply reply = new __db_key_range_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.key_range(this, args, reply); + return reply; + } + + public __db_get_lorder_reply __DB_db_get_lorder_4003(__db_get_lorder_msg args) { + __db_get_lorder_reply reply = new __db_get_lorder_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_lorder(this, args, reply); + return reply; + } + + public __db_lorder_reply __DB_db_lorder_4003(__db_lorder_msg args) { + __db_lorder_reply reply = new __db_lorder_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_lorder(this, args, reply); + return reply; + } + + public __db_get_name_reply __DB_db_get_name_4003(__db_get_name_msg args) { + __db_get_name_reply reply = new __db_get_name_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_name(this, args, reply); + return reply; + } + + public __db_get_open_flags_reply __DB_db_get_open_flags_4003(__db_get_open_flags_msg args) { + __db_get_open_flags_reply reply = new __db_get_open_flags_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_open_flags(this, args, reply); + return reply; + } + + public __db_open_reply __DB_db_open_4003(__db_open_msg args) { + __db_open_reply reply = new __db_open_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.open(this, args, reply); + return reply; + } + + public __db_get_pagesize_reply __DB_db_get_pagesize_4003(__db_get_pagesize_msg args) { + __db_get_pagesize_reply reply = new __db_get_pagesize_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_pagesize(this, args, reply); + return reply; + } + + public __db_pagesize_reply __DB_db_pagesize_4003(__db_pagesize_msg args) { + __db_pagesize_reply reply = new __db_pagesize_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_pagesize(this, args, reply); + return reply; + } + + public __db_pget_reply __DB_db_pget_4003(__db_pget_msg args) { + __db_pget_reply reply = new __db_pget_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.pget(this, args, reply); + return reply; + } + + public __db_put_reply __DB_db_put_4003(__db_put_msg args) { + __db_put_reply reply = new __db_put_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.put(this, args, reply); + return reply; + } + + public __db_remove_reply __DB_db_remove_4003(__db_remove_msg args) { + __db_remove_reply reply = new __db_remove_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.remove(this, args, reply); + return reply; + } + + public __db_rename_reply __DB_db_rename_4003(__db_rename_msg args) { + __db_rename_reply reply = new __db_rename_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.rename(this, args, reply); + return reply; + } + + public __db_get_re_delim_reply __DB_db_get_re_delim_4003(__db_get_re_delim_msg args) { + __db_get_re_delim_reply reply = new __db_get_re_delim_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_re_delim(this, args, reply); + return reply; + } + + public __db_re_delim_reply __DB_db_re_delim_4003(__db_re_delim_msg args) { + __db_re_delim_reply reply = new __db_re_delim_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_re_delim(this, args, reply); + return reply; + } + + public __db_get_re_len_reply __DB_db_get_re_len_4003(__db_get_re_len_msg args) { + __db_get_re_len_reply reply = new __db_get_re_len_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_re_len(this, args, reply); + return reply; + } + + public __db_re_len_reply __DB_db_re_len_4003(__db_re_len_msg args) { + __db_re_len_reply reply = new __db_re_len_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_re_len(this, args, reply); + return reply; + } + + public __db_get_re_pad_reply __DB_db_get_re_pad_4003(__db_get_re_pad_msg args) { + __db_get_re_pad_reply reply = new __db_get_re_pad_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.get_re_pad(this, args, reply); + return reply; + } + + public __db_re_pad_reply __DB_db_re_pad_4003(__db_re_pad_msg args) { + __db_re_pad_reply reply = new __db_re_pad_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.set_re_pad(this, args, reply); + return reply; + } + + public __db_stat_reply __DB_db_stat_4003(__db_stat_msg args) { + __db_stat_reply reply = new __db_stat_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.stat(this, args, reply); + return reply; + } + + public __db_sync_reply __DB_db_sync_4003(__db_sync_msg args) { + __db_sync_reply reply = new __db_sync_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.sync(this, args, reply); + return reply; + } + + public __db_truncate_reply __DB_db_truncate_4003(__db_truncate_msg args) { + __db_truncate_reply reply = new __db_truncate_reply(); + RpcDb rdb = getDatabase(args.dbpcl_id); + if (rdb == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdb.truncate(this, args, reply); + return reply; + } + + //// Cursor methods + + public __dbc_close_reply __DB_dbc_close_4003(__dbc_close_msg args) { + __dbc_close_reply reply = new __dbc_close_reply(); + RpcDbc rdbc = getCursor(args.dbccl_id); + if (rdbc == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbc.close(this, args, reply); + return reply; + } + + public __dbc_count_reply __DB_dbc_count_4003(__dbc_count_msg args) { + __dbc_count_reply reply = new __dbc_count_reply(); + RpcDbc rdbc = getCursor(args.dbccl_id); + if (rdbc == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbc.count(this, args, reply); + return reply; + } + + public __dbc_del_reply __DB_dbc_del_4003(__dbc_del_msg args) { + __dbc_del_reply reply = new __dbc_del_reply(); + RpcDbc rdbc = getCursor(args.dbccl_id); + if (rdbc == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbc.del(this, args, reply); + return reply; + } + + public __dbc_dup_reply __DB_dbc_dup_4003(__dbc_dup_msg args) { + __dbc_dup_reply reply = new __dbc_dup_reply(); + RpcDbc rdbc = getCursor(args.dbccl_id); + if (rdbc == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbc.dup(this, args, reply); + return reply; + } + + public __dbc_get_reply __DB_dbc_get_4003(__dbc_get_msg args) { + __dbc_get_reply reply = new __dbc_get_reply(); + RpcDbc rdbc = getCursor(args.dbccl_id); + if (rdbc == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbc.get(this, args, reply); + return reply; + } + + public __dbc_pget_reply __DB_dbc_pget_4003(__dbc_pget_msg args) { + __dbc_pget_reply reply = new __dbc_pget_reply(); + RpcDbc rdbc = getCursor(args.dbccl_id); + if (rdbc == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbc.pget(this, args, reply); + return reply; + } + + public __dbc_put_reply __DB_dbc_put_4003(__dbc_put_msg args) { + __dbc_put_reply reply = new __dbc_put_reply(); + RpcDbc rdbc = getCursor(args.dbccl_id); + if (rdbc == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbc.put(this, args, reply); + return reply; + } + + //// Environment methods + + public __env_get_cachesize_reply __DB_env_get_cachesize_4003(__env_get_cachesize_msg args) { + __env_get_cachesize_reply reply = new __env_get_cachesize_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.get_cachesize(this, args, reply); + return reply; + } + + public __env_cachesize_reply __DB_env_cachesize_4003(__env_cachesize_msg args) { + __env_cachesize_reply reply = new __env_cachesize_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.set_cachesize(this, args, reply); + return reply; + } + + public __env_close_reply __DB_env_close_4003(__env_close_msg args) { + __env_close_reply reply = new __env_close_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.close(this, args, reply); + return reply; + } + + public __env_create_reply __DB_env_create_4003(__env_create_msg args) { + __env_create_reply reply = new __env_create_reply(); + RpcDbEnv rdbenv = new RpcDbEnv(); + rdbenv.create(this, args, reply); + return reply; + } + + public __env_dbremove_reply __DB_env_dbremove_4003(__env_dbremove_msg args) { + __env_dbremove_reply reply = new __env_dbremove_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.dbremove(this, args, reply); + return reply; + } + + public __env_dbrename_reply __DB_env_dbrename_4003(__env_dbrename_msg args) { + __env_dbrename_reply reply = new __env_dbrename_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.dbrename(this, args, reply); + return reply; + } + + public __env_get_encrypt_flags_reply __DB_env_get_encrypt_flags_4003(__env_get_encrypt_flags_msg args) { + __env_get_encrypt_flags_reply reply = new __env_get_encrypt_flags_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.get_encrypt_flags(this, args, reply); + return reply; + } + + public __env_encrypt_reply __DB_env_encrypt_4003(__env_encrypt_msg args) { + __env_encrypt_reply reply = new __env_encrypt_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.set_encrypt(this, args, reply); + return reply; + } + + public __env_get_flags_reply __DB_env_get_flags_4003(__env_get_flags_msg args) { + __env_get_flags_reply reply = new __env_get_flags_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.get_flags(this, args, reply); + return reply; + } + + public __env_flags_reply __DB_env_flags_4003(__env_flags_msg args) { + __env_flags_reply reply = new __env_flags_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.set_flags(this, args, reply); + return reply; + } + + public __env_get_home_reply __DB_env_get_home_4003(__env_get_home_msg args) { + __env_get_home_reply reply = new __env_get_home_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.get_home(this, args, reply); + return reply; + } + + public __env_get_open_flags_reply __DB_env_get_open_flags_4003(__env_get_open_flags_msg args) { + __env_get_open_flags_reply reply = new __env_get_open_flags_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.get_open_flags(this, args, reply); + return reply; + } + + public __env_open_reply __DB_env_open_4003(__env_open_msg args) { + __env_open_reply reply = new __env_open_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.open(this, args, reply); + return reply; + } + + public __env_remove_reply __DB_env_remove_4003(__env_remove_msg args) { + __env_remove_reply reply = new __env_remove_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.remove(this, args, reply); + return reply; + } + + //// Transaction methods + + public __txn_abort_reply __DB_txn_abort_4003(__txn_abort_msg args) { + __txn_abort_reply reply = new __txn_abort_reply(); + RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); + if (rdbtxn == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbtxn.abort(this, args, reply); + return reply; + } + + public __txn_begin_reply __DB_txn_begin_4003(__txn_begin_msg args) { + __txn_begin_reply reply = new __txn_begin_reply(); + RpcDbTxn rdbtxn = new RpcDbTxn(getEnv(args.dbenvcl_id), null); + rdbtxn.begin(this, args, reply); + return reply; + } + + public __txn_commit_reply __DB_txn_commit_4003(__txn_commit_msg args) { + __txn_commit_reply reply = new __txn_commit_reply(); + RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); + if (rdbtxn == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbtxn.commit(this, args, reply); + return reply; + } + + public __txn_discard_reply __DB_txn_discard_4003(__txn_discard_msg args) { + __txn_discard_reply reply = new __txn_discard_reply(); + RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); + if (rdbtxn == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbtxn.discard(this, args, reply); + return reply; + } + + public __txn_prepare_reply __DB_txn_prepare_4003(__txn_prepare_msg args) { + __txn_prepare_reply reply = new __txn_prepare_reply(); + RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); + if (rdbtxn == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbtxn.prepare(this, args, reply); + return reply; + } + + public __txn_recover_reply __DB_txn_recover_4003(__txn_recover_msg args) { + __txn_recover_reply reply = new __txn_recover_reply(); + RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); + if (rdbenv == null) + reply.status = DbConstants.DB_NOSERVER_ID; + else + rdbenv.txn_recover(this, args, reply); + return reply; + } +} diff --git a/db/rpc_server/java/FreeList.java b/db/rpc_server/java/FreeList.java index 05a18fdf6..bec2b8772 100644 --- a/db/rpc_server/java/FreeList.java +++ b/db/rpc_server/java/FreeList.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. * - * $Id: FreeList.java,v 1.5 2003/01/08 05:45:34 bostic Exp $ + * $Id: FreeList.java,v 1.7 2004/04/06 20:43:41 mjc Exp $ */ package com.sleepycat.db.rpcserver; @@ -15,88 +15,87 @@ import java.util.*; * Keep track of a list of objects by id with a free list. * Intentionally package-protected exposure. */ -class FreeList -{ - class FreeIndex { - int index; - FreeIndex(int index) { this.index = index; } - int getIndex() { return index; } - } +class FreeList { + class FreeIndex { + int index; + FreeIndex(int index) { this.index = index; } + int getIndex() { return index; } + } - Vector items = new Vector(); - FreeIndex free_head = null; + Vector items = new Vector(); + FreeIndex free_head = null; - public synchronized int add(Object obj) { - int pos; - if (free_head == null) { - pos = items.size(); - items.addElement(obj); - if (pos + 1 % 1000 == 0) - DbServer.err.println(this + " grew to size " + (pos + 1)); - } else { - pos = free_head.getIndex(); - free_head = (FreeIndex)items.elementAt(pos); - items.setElementAt(obj, pos); - } - return pos; - } + public synchronized int add(Object obj) { + int pos; + if (free_head == null) { + pos = items.size(); + items.addElement(obj); + if (pos + 1 % 1000 == 0) + Server.err.println(this + " grew to size " + (pos + 1)); + } else { + pos = free_head.getIndex(); + free_head = (FreeIndex)items.elementAt(pos); + items.setElementAt(obj, pos); + } + return pos; + } - public synchronized void del(int pos) { - Object obj = items.elementAt(pos); - if (obj != null && obj instanceof FreeIndex) - throw new NoSuchElementException("index " + pos + " has already been freed"); - items.setElementAt(free_head, pos); - free_head = new FreeIndex(pos); - } + public synchronized void del(int pos) { + Object obj = items.elementAt(pos); + if (obj != null && obj instanceof FreeIndex) + throw new NoSuchElementException("index " + pos + " has already been freed"); + items.setElementAt(free_head, pos); + free_head = new FreeIndex(pos); + } - public void del(Object obj) { - del(items.indexOf(obj)); - } + public void del(Object obj) { + del(items.indexOf(obj)); + } - public Object get(int pos) { - Object obj = items.elementAt(pos); - if (obj instanceof FreeIndex) - obj = null; - return obj; - } + public Object get(int pos) { + Object obj = items.elementAt(pos); + if (obj instanceof FreeIndex) + obj = null; + return obj; + } - public LocalIterator iterator() { - return new FreeListIterator(); - } + public LocalIterator iterator() { + return new FreeListIterator(); + } - /** - * Iterator for a FreeList. Note that this class doesn't implement - * java.util.Iterator to maintain compatibility with Java 1.1 - * Intentionally package-protected exposure. - */ - class FreeListIterator implements LocalIterator { - int current; + /** + * Iterator for a FreeList. Note that this class doesn't implement + * java.util.Iterator to maintain compatibility with Java 1.1 + * Intentionally package-protected exposure. + */ + class FreeListIterator implements LocalIterator { + int current; - FreeListIterator() { current = findNext(-1); } + FreeListIterator() { current = findNext(-1); } - private int findNext(int start) { - int next = start; - while (++next < items.size()) { - Object obj = items.elementAt(next); - if (obj == null || !(obj instanceof FreeIndex)) - break; - } - return next; - } + private int findNext(int start) { + int next = start; + while (++next < items.size()) { + Object obj = items.elementAt(next); + if (obj == null || !(obj instanceof FreeIndex)) + break; + } + return next; + } - public boolean hasNext() { - return (findNext(current) < items.size()); - } + public boolean hasNext() { + return (findNext(current) < items.size()); + } - public Object next() { - current = findNext(current); - if (current == items.size()) - throw new NoSuchElementException("enumerated past end of FreeList"); - return items.elementAt(current); - } + public Object next() { + current = findNext(current); + if (current == items.size()) + throw new NoSuchElementException("enumerated past end of FreeList"); + return items.elementAt(current); + } - public void remove() { - del(current); - } - } + public void remove() { + del(current); + } + } } diff --git a/db/rpc_server/java/JoinCursorAdapter.java b/db/rpc_server/java/JoinCursorAdapter.java new file mode 100644 index 000000000..78495f28f --- /dev/null +++ b/db/rpc_server/java/JoinCursorAdapter.java @@ -0,0 +1,170 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: JoinCursorAdapter.java,v 1.1 2004/04/06 20:43:41 mjc Exp $ + */ + +package com.sleepycat.db.rpcserver; + +import com.sleepycat.db.*; + +class JoinCursorAdapter extends Cursor { + JoinCursor jc; + + JoinCursorAdapter(Database database, JoinCursor jc) + throws DatabaseException { + this.database = database; + this.config = new CursorConfig(); + this.jc = jc; + } + + public synchronized void close() + throws DatabaseException { + jc.close(); + } + + public Cursor dup(boolean samePosition) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public int count() + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus delete() + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getCurrent(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getFirst(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getLast(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getNextDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getNextNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getPrev(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getPrevDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getPrevNoDup(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getRecordNumber(DatabaseEntry recno, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getSearchKey(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getSearchKeyRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getSearchBoth(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus getSearchBothRange(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus put(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus putNoOverwrite(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus putKeyFirst(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus putKeyLast(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus putNoDupData(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } + + public OperationStatus putCurrent(DatabaseEntry data) + throws DatabaseException { + throw new UnsupportedOperationException("not supported on join cursors"); + } +} diff --git a/db/rpc_server/java/LocalIterator.java b/db/rpc_server/java/LocalIterator.java index 7822c0695..f142eb318 100644 --- a/db/rpc_server/java/LocalIterator.java +++ b/db/rpc_server/java/LocalIterator.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. * - * $Id: LocalIterator.java,v 1.3 2003/01/08 05:45:38 bostic Exp $ + * $Id: LocalIterator.java,v 1.5 2004/04/06 20:43:41 mjc Exp $ */ package com.sleepycat.db.rpcserver; @@ -17,7 +17,7 @@ import java.util.*; * Intentionally package-protected exposure. */ interface LocalIterator { - boolean hasNext(); - Object next(); - void remove(); + boolean hasNext(); + Object next(); + void remove(); } diff --git a/db/rpc_server/java/README b/db/rpc_server/java/README index 48d7a86cb..f29c87805 100644 --- a/db/rpc_server/java/README +++ b/db/rpc_server/java/README @@ -1,6 +1,6 @@ Berkeley DB Java RPC server. -Copyright (c) 2002-2003 +Copyright (c) 2002-2004 Sleepycat Software. All rights reserved. The Java implementation of the Berkeley DB RPC server is intended diff --git a/db/rpc_server/java/RpcDb.java b/db/rpc_server/java/RpcDb.java index 065e843b2..9a16b5332 100644 --- a/db/rpc_server/java/RpcDb.java +++ b/db/rpc_server/java/RpcDb.java @@ -1,855 +1,778 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. * - * $Id: RpcDb.java,v 1.16 2003/10/29 16:02:56 mjc Exp $ + * $Id: RpcDb.java,v 1.23 2004/09/24 15:27:47 mjc Exp $ */ package com.sleepycat.db.rpcserver; import com.sleepycat.db.*; -import java.io.IOException; +import com.sleepycat.db.internal.DbConstants; import java.io.*; import java.util.*; /** * RPC wrapper around a db object for the Java RPC server. */ -public class RpcDb extends Timer -{ - static final byte[] empty = new byte[0]; - Db db; - RpcDbEnv rdbenv; - int refcount = 1; - String dbname, subdbname; - int type, setflags, openflags; - - public RpcDb(RpcDbEnv rdbenv) - { - this.rdbenv = rdbenv; - } - - void dispose() - { - if (db != null) { - try { - db.close(0); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - } - db = null; - } - } - - public void associate(DbDispatcher server, - __db_associate_msg args, __db_associate_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - /* - * We do not support DB_CREATE for associate. Users - * can only access secondary indices on a read-only basis, - * so whatever they are looking for needs to be there already. - */ - db.associate(txn, server.getDb(args.sdbpcl_id).db, null, args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(IllegalArgumentException e) { - reply.status = DbServer.EINVAL; - } - } - - public void close(DbDispatcher server, - __db_close_msg args, __db_close_reply reply) - { - if (--refcount != 0) { - reply.status = 0; - return; - } - - try { - server.delDb(this, false); - db.close(args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(IllegalArgumentException e) { - reply.status = DbServer.EINVAL; - } finally { - db = null; - } - } - - public void create(DbDispatcher server, - __db_create_msg args, __db_create_reply reply) - { - try { - db = new Db(server.getEnv(args.dbenvcl_id).dbenv, args.flags); - reply.dbcl_id = server.addDb(this); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void cursor(DbDispatcher server, - __db_cursor_msg args, __db_cursor_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - Dbc dbc = db.cursor(txn, args.flags); - RpcDbc rdbc = new RpcDbc(this, dbc, false); - rdbc.timer = (rtxn != null) ? rtxn.timer : this; - reply.dbcidcl_id = server.addCursor(rdbc); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void del(DbDispatcher server, - __db_del_msg args, __db_del_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - Dbt key = new Dbt(args.keydata); - key.setPartialLength(args.keydlen); - key.setPartialOffset(args.keydoff); - key.setUserBufferLength(args.keyulen); - key.setFlags(args.keyflags); - - db.delete(txn, key, args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get(DbDispatcher server, - __db_get_msg args, __db_get_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - Dbt key = new Dbt(args.keydata); - key.setPartialLength(args.keydlen); - key.setPartialOffset(args.keydoff); - key.setUserBufferLength(args.keyulen); - key.setFlags(Db.DB_DBT_MALLOC | - (args.keyflags & Db.DB_DBT_PARTIAL)); - - Dbt data = new Dbt(args.datadata); - data.setPartialLength(args.datadlen); - data.setPartialOffset(args.datadoff); - data.setUserBufferLength(args.dataulen); - if ((args.flags & Db.DB_MULTIPLE) != 0) { - if (data.getData().length == 0) - data.setData(new byte[data.getUserBufferLength()]); - data.setFlags(Db.DB_DBT_USERMEM | - (args.dataflags & Db.DB_DBT_PARTIAL)); - } else - data.setFlags(Db.DB_DBT_MALLOC | - (args.dataflags & Db.DB_DBT_PARTIAL)); - - reply.status = db.get(txn, key, data, args.flags); - - if (key.getData() == args.keydata || - key.getData().length != key.getSize()) { - reply.keydata = new byte[key.getSize()]; - System.arraycopy(key.getData(), 0, reply.keydata, 0, key.getSize()); - } else - reply.keydata = key.getData(); - - if (data.getData() == args.datadata || - data.getData().length != data.getSize()) { - reply.datadata = new byte[data.getSize()]; - System.arraycopy(data.getData(), 0, reply.datadata, 0, data.getSize()); - } else - reply.datadata = data.getData(); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - reply.keydata = reply.datadata = empty; - } - } - - public void join(DbDispatcher server, - __db_join_msg args, __db_join_reply reply) - { - try { - Dbc[] cursors = new Dbc[args.curs.length + 1]; - for(int i = 0; i < args.curs.length; i++) { - RpcDbc rdbc = server.getCursor(args.curs[i]); - if (rdbc == null) { - reply.status = Db.DB_NOSERVER_ID; - return; - } - cursors[i] = rdbc.dbc; - } - cursors[args.curs.length] = null; - - Dbc jdbc = db.join(cursors, args.flags); - - RpcDbc rjdbc = new RpcDbc(this, jdbc, true); - /* - * If our curslist has a parent txn, we need to use it too - * for the activity timeout. All cursors must be part of - * the same transaction, so just check the first. - */ - RpcDbc rdbc0 = server.getCursor(args.curs[0]); - if (rdbc0.timer != rdbc0) - rjdbc.timer = rdbc0.timer; - - /* - * All of the curslist cursors must point to the join - * cursor's timeout so that we do not timeout any of the - * curlist cursors while the join cursor is active. - */ - for(int i = 0; i < args.curs.length; i++) { - RpcDbc rdbc = server.getCursor(args.curs[i]); - rdbc.orig_timer = rdbc.timer; - rdbc.timer = rjdbc; - } - reply.dbcidcl_id = server.addCursor(rjdbc); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void key_range(DbDispatcher server, - __db_key_range_msg args, __db_key_range_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - Dbt key = new Dbt(args.keydata); - key.setPartialLength(args.keydlen); - key.setPartialOffset(args.keydoff); - key.setUserBufferLength(args.keyulen); - key.setFlags(args.keyflags); - - DbKeyRange range = new DbKeyRange(); - - db.keyRange(txn, key, range, args.flags); - reply.status = 0; - reply.less = range.less; - reply.equal = range.equal; - reply.greater = range.greater; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - private boolean findSharedDb(DbDispatcher server, __db_open_reply reply) - throws DbException - { - RpcDb rdb = null; - boolean matchFound = false; - LocalIterator i = ((DbServer)server).db_list.iterator(); - - while (!matchFound && i.hasNext()) { - rdb = (RpcDb)i.next(); - if (rdb != null && rdb != this && rdb.rdbenv == rdbenv && - (type == Db.DB_UNKNOWN || rdb.type == type) && - openflags == rdb.openflags && - setflags == rdb.setflags && - dbname != null && rdb.dbname != null && - dbname.equals(rdb.dbname) && - (subdbname == rdb.subdbname || - (subdbname != null && rdb.subdbname != null && - subdbname.equals(rdb.subdbname)))) - matchFound = true; - } - - if (matchFound) { - ++rdb.refcount; - reply.dbcl_id = ((FreeList.FreeListIterator)i).current; - reply.type = rdb.db.getDbType(); - reply.dbflags = rdb.db.get_flags_raw(); - // FIXME: not possible to work out byteorder from Java? - reply.lorder = rdb.db.isByteSwapped() ? 4321 : 1234; - reply.status = 0; - - DbServer.err.println("Sharing Db: " + reply.dbcl_id); - } - - return matchFound; - } - - public void get_name(DbDispatcher server, - __db_get_name_msg args, __db_get_name_reply reply) - { - try { - reply.filename = db.getFileName(); - reply.dbname = db.getDatabaseName(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_open_flags(DbDispatcher server, - __db_get_open_flags_msg args, __db_get_open_flags_reply reply) - { - try { - reply.flags = db.getOpenFlags(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void open(DbDispatcher server, - __db_open_msg args, __db_open_reply reply) - { - try { - dbname = (args.name.length() > 0) ? args.name : null; - subdbname = (args.subdb.length() > 0) ? args.subdb : null; - type = args.type; - openflags = args.flags & DbServer.DB_SERVER_DBFLAGS; - - if (findSharedDb(server, reply)) { - server.delDb(this, true); - } else { - DbServer.err.println("Calling db.open(" + null + ", " + dbname + ", " + subdbname + ", " + args.type + ", " + Integer.toHexString(args.flags) + ", " + args.mode + ")"); - db.open(null, dbname, subdbname, args.type, args.flags, args.mode); - - reply.dbcl_id = args.dbpcl_id; - reply.type = this.type = db.getDbType(); - reply.dbflags = db.get_flags_raw(); - // FIXME: not possible to work out byteorder from Java? - reply.lorder = db.isByteSwapped() ? 4321 : 1234; - reply.status = 0; - } - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(FileNotFoundException e) { - e.printStackTrace(DbServer.err); - reply.status = Db.DB_NOTFOUND; - } - - // System.err.println("Db.open: reply.status = " + reply.status + ", reply.dbcl_id = " + reply.dbcl_id); - } - - public void pget(DbDispatcher server, - __db_pget_msg args, __db_pget_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - Dbt skey = new Dbt(args.skeydata); - skey.setPartialLength(args.skeydlen); - skey.setPartialOffset(args.skeydoff); - skey.setUserBufferLength(args.skeyulen); - skey.setFlags(Db.DB_DBT_MALLOC | - (args.skeyflags & Db.DB_DBT_PARTIAL)); - - Dbt pkey = new Dbt(args.pkeydata); - pkey.setPartialLength(args.pkeydlen); - pkey.setPartialOffset(args.pkeydoff); - pkey.setUserBufferLength(args.pkeyulen); - pkey.setFlags(Db.DB_DBT_MALLOC | - (args.pkeyflags & Db.DB_DBT_PARTIAL)); - - Dbt data = new Dbt(args.datadata); - data.setPartialLength(args.datadlen); - data.setPartialOffset(args.datadoff); - data.setUserBufferLength(args.dataulen); - data.setFlags(Db.DB_DBT_MALLOC | - (args.dataflags & Db.DB_DBT_PARTIAL)); - - db.get(txn, skey, pkey, data, args.flags); - - if (skey.getData() == args.skeydata || - skey.getData().length != skey.getSize()) { - reply.skeydata = new byte[skey.getSize()]; - System.arraycopy(skey.getData(), 0, reply.skeydata, 0, skey.getSize()); - } else - reply.skeydata = skey.getData(); - - if (pkey.getData() == args.pkeydata || - pkey.getData().length != pkey.getSize()) { - reply.pkeydata = new byte[pkey.getSize()]; - System.arraycopy(pkey.getData(), 0, reply.pkeydata, 0, pkey.getSize()); - } else - reply.pkeydata = pkey.getData(); - - if (data.getData() == args.datadata || - data.getData().length != data.getSize()) { - reply.datadata = new byte[data.getSize()]; - System.arraycopy(data.getData(), 0, reply.datadata, 0, data.getSize()); - } else - reply.datadata = data.getData(); - - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - reply.skeydata = reply.pkeydata = reply.datadata = empty; - } - } - - public void put(DbDispatcher server, - __db_put_msg args, __db_put_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - - Dbt key = new Dbt(args.keydata); - key.setPartialLength(args.keydlen); - key.setPartialOffset(args.keydoff); - key.setUserBufferLength(args.keyulen); - key.setFlags(Db.DB_DBT_MALLOC | - (args.keyflags & Db.DB_DBT_PARTIAL)); - - Dbt data = new Dbt(args.datadata); - data.setPartialLength(args.datadlen); - data.setPartialOffset(args.datadoff); - data.setUserBufferLength(args.dataulen); - data.setFlags(args.dataflags & Db.DB_DBT_PARTIAL); - - reply.status = db.put(txn, key, data, args.flags); - - /* - * If the client did a DB_APPEND, set up key in reply. - * Otherwise just status. - */ - if ((args.flags & Db.DB_APPEND) != 0) { - if (key.getData() == args.keydata || - key.getData().length != key.getSize()) { - reply.keydata = new byte[key.getSize()]; - System.arraycopy(key.getData(), 0, reply.keydata, 0, key.getSize()); - } else - reply.keydata = key.getData(); - } else - reply.keydata = empty; - } catch(DbException e) { - reply.keydata = empty; - reply.status = e.getErrno(); - DbServer.err.println("Exception, setting status to " + reply.status); - e.printStackTrace(DbServer.err); - } catch(IllegalArgumentException e) { - reply.status = DbServer.EINVAL; - } - } - - public void remove(DbDispatcher server, - __db_remove_msg args, __db_remove_reply reply) - { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - db.remove(args.name, args.subdb, args.flags); - db = null; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(FileNotFoundException e) { - e.printStackTrace(DbServer.err); - reply.status = Db.DB_NOTFOUND; - } finally { - server.delDb(this, false); - } - } - - public void rename(DbDispatcher server, - __db_rename_msg args, __db_rename_reply reply) - { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - args.newname = (args.newname.length() > 0) ? args.newname : null; - db.rename(args.name, args.subdb, args.newname, args.flags); - db = null; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(FileNotFoundException e) { - e.printStackTrace(DbServer.err); - reply.status = Db.DB_NOTFOUND; - } finally { - server.delDb(this, false); - } - } - - public void set_bt_maxkey(DbDispatcher server, - __db_bt_maxkey_msg args, __db_bt_maxkey_reply reply) - { - try { - db.set_bt_maxkey(args.maxkey); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_bt_minkey(DbDispatcher server, - __db_get_bt_minkey_msg args, __db_get_bt_minkey_reply reply) - { - try { - reply.minkey = db.getBtreeMinKey(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_bt_minkey(DbDispatcher server, - __db_bt_minkey_msg args, __db_bt_minkey_reply reply) - { - try { - db.setBtreeMinKey(args.minkey); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_encrypt_flags(DbDispatcher server, - __db_get_encrypt_flags_msg args, __db_get_encrypt_flags_reply reply) - { - try { - reply.flags = db.getEncryptFlags(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_encrypt(DbDispatcher server, - __db_encrypt_msg args, __db_encrypt_reply reply) - { - try { - db.setEncrypted(args.passwd, args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_flags(DbDispatcher server, - __db_get_flags_msg args, __db_get_flags_reply reply) - { - try { - reply.flags = db.getFlags(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_flags(DbDispatcher server, - __db_flags_msg args, __db_flags_reply reply) - { - try { - // DbServer.err.println("Calling db.setflags(" + Integer.toHexString(args.flags) + ")"); - db.setFlags(args.flags); - setflags |= args.flags; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_h_ffactor(DbDispatcher server, - __db_get_h_ffactor_msg args, __db_get_h_ffactor_reply reply) - { - try { - reply.ffactor = db.getHashFillFactor(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_h_ffactor(DbDispatcher server, - __db_h_ffactor_msg args, __db_h_ffactor_reply reply) - { - try { - db.setHashFillFactor(args.ffactor); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_h_nelem(DbDispatcher server, - __db_get_h_nelem_msg args, __db_get_h_nelem_reply reply) - { - try { - reply.nelem = db.getHashNumElements(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_h_nelem(DbDispatcher server, - __db_h_nelem_msg args, __db_h_nelem_reply reply) - { - try { - db.setHashNumElements(args.nelem); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_lorder(DbDispatcher server, - __db_get_lorder_msg args, __db_get_lorder_reply reply) - { - try { - reply.lorder = db.getByteOrder(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_lorder(DbDispatcher server, - __db_lorder_msg args, __db_lorder_reply reply) - { - try { - db.setByteOrder(args.lorder); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_pagesize(DbDispatcher server, - __db_get_pagesize_msg args, __db_get_pagesize_reply reply) - { - try { - reply.pagesize = db.getPageSize(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_pagesize(DbDispatcher server, - __db_pagesize_msg args, __db_pagesize_reply reply) - { - try { - db.setPageSize(args.pagesize); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_q_extentsize(DbDispatcher server, - __db_get_extentsize_msg args, __db_get_extentsize_reply reply) - { - try { - reply.extentsize = db.getQueueExtentSize(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_q_extentsize(DbDispatcher server, - __db_extentsize_msg args, __db_extentsize_reply reply) - { - try { - db.setQueueExtentSize(args.extentsize); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_re_delim(DbDispatcher server, - __db_get_re_delim_msg args, __db_get_re_delim_reply reply) - { - try { - reply.delim = db.getRecordDelimiter(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_re_delim(DbDispatcher server, - __db_re_delim_msg args, __db_re_delim_reply reply) - { - try { - db.setRecordDelimiter(args.delim); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_re_len(DbDispatcher server, - __db_get_re_len_msg args, __db_get_re_len_reply reply) - { - try { - reply.len = db.getRecordLength(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_re_len(DbDispatcher server, - __db_re_len_msg args, __db_re_len_reply reply) - { - try { - db.setRecordLength(args.len); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_re_pad(DbDispatcher server, - __db_get_re_pad_msg args, __db_get_re_pad_reply reply) - { - try { - reply.pad = db.getRecordPad(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_re_pad(DbDispatcher server, - __db_re_pad_msg args, __db_re_pad_reply reply) - { - try { - db.setRecordPad(args.pad); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void stat(DbDispatcher server, - __db_stat_msg args, __db_stat_reply reply) - { - try { - Object raw_stat = db.stat(args.flags); - - if (raw_stat instanceof DbHashStat) { - DbHashStat hs = (DbHashStat)raw_stat; - int[] raw_stats = { - hs.hash_magic, hs.hash_version, - hs.hash_metaflags, hs.hash_nkeys, - hs.hash_ndata, hs.hash_pagesize, - hs.hash_ffactor, hs.hash_buckets, - hs.hash_free, hs.hash_bfree, - hs.hash_bigpages, hs.hash_big_bfree, - hs.hash_overflows, hs.hash_ovfl_free, - hs.hash_dup, hs.hash_dup_free - }; - reply.stats = raw_stats; - } else if (raw_stat instanceof DbQueueStat) { - DbQueueStat qs = (DbQueueStat)raw_stat; - int[] raw_stats = { - qs.qs_magic, qs.qs_version, - qs.qs_metaflags, qs.qs_nkeys, - qs.qs_ndata, qs.qs_pagesize, - qs.qs_extentsize, qs.qs_pages, - qs.qs_re_len, qs.qs_re_pad, - qs.qs_pgfree, qs.qs_first_recno, - qs.qs_cur_recno - }; - reply.stats = raw_stats; - } else if (raw_stat instanceof DbBtreeStat) { - DbBtreeStat bs = (DbBtreeStat)raw_stat; - int[] raw_stats = { - bs.bt_magic, bs.bt_version, - bs.bt_metaflags, bs.bt_nkeys, - bs.bt_ndata, bs.bt_pagesize, - bs.bt_maxkey, bs.bt_minkey, - bs.bt_re_len, bs.bt_re_pad, - bs.bt_levels, bs.bt_int_pg, - bs.bt_leaf_pg, bs.bt_dup_pg, - bs.bt_over_pg, bs.bt_free, - bs.bt_int_pgfree, bs.bt_leaf_pgfree, - bs.bt_dup_pgfree, bs.bt_over_pgfree - }; - reply.stats = raw_stats; - } else - throw new DbException("Invalid return type from db.stat()", Db.DB_NOTFOUND); - - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - reply.stats = new int[0]; - } - } - - public void sync(DbDispatcher server, - __db_sync_msg args, __db_sync_reply reply) - { - try { - db.sync(args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void truncate(DbDispatcher server, - __db_truncate_msg args, __db_truncate_reply reply) - { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - reply.count = db.truncate(txn, args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } +public class RpcDb extends Timer { + static final byte[] empty = new byte[0]; + DatabaseConfig config; + Database db; + RpcDbEnv rdbenv; + int refcount = 0; + String dbname, subdbname; + int type, setflags, openflags; + + public RpcDb(RpcDbEnv rdbenv) { + this.rdbenv = rdbenv; + } + + void dispose() { + if (db != null) { + try { + db.close(); + } catch (Throwable t) { + Util.handleException(t); + } + db = null; + } + } + + public void associate(Dispatcher server, + __db_associate_msg args, __db_associate_reply reply) { + try { + // The semantics of the new API are a little different. + // The secondary database will already be open, here, so we first + // have to close it and then call openSecondaryDatabase. + RpcDb secondary = server.getDatabase(args.sdbpcl_id); + try { + secondary.db.close(); + } finally { + secondary.db = null; + } + + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + + args.flags &= ~AssociateCallbacks.DB_RPC2ND_MASK; + SecondaryConfig secondaryConfig = new SecondaryConfig(); + // The secondary has already been opened once, so we don't + // need all of the settings here, only a few: + secondaryConfig.setReadOnly(secondary.config.getReadOnly()); + secondaryConfig.setTransactional(secondary.config.getTransactional()); + secondaryConfig.setKeyCreator(AssociateCallbacks.getCallback(args.flags)); + secondaryConfig.setAllowPopulate((args.flags & DbConstants.DB_CREATE) != 0); + secondary.db = rdbenv.dbenv.openSecondaryDatabase(txn, secondary.dbname, secondary.subdbname, db, secondaryConfig); + secondary.config = secondary.db.getConfig(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void close(Dispatcher server, + __db_close_msg args, __db_close_reply reply) { + if (refcount == 0 || --refcount > 0) { + reply.status = 0; + return; + } + + try { + server.delDatabase(this, false); + if (db != null) + db.close(args.flags != DbConstants.DB_NOSYNC); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + db = null; + } + } + + public void create(Dispatcher server, + __db_create_msg args, __db_create_reply reply) { + try { + config = new DatabaseConfig(); + config.setXACreate((args.flags & DbConstants.DB_XA_CREATE) != 0); + reply.dbcl_id = server.addDatabase(this); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void cursor(Dispatcher server, + __db_cursor_msg args, __db_cursor_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + + CursorConfig config = new CursorConfig(); + config.setDirtyRead((args.flags & DbConstants.DB_DIRTY_READ) != 0); + config.setDegree2((args.flags & DbConstants.DB_DEGREE_2) != 0); + config.setWriteCursor((args.flags & DbConstants.DB_WRITECURSOR) != 0); + + Cursor dbc = db.openCursor(txn, config); + RpcDbc rdbc = new RpcDbc(this, dbc, false); + rdbc.timer = (rtxn != null) ? rtxn.timer : this; + reply.dbcidcl_id = server.addCursor(rdbc); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void del(Dispatcher server, + __db_del_msg args, __db_del_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); + + db.delete(txn, key /* args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get(Dispatcher server, + __db_get_msg args, __db_get_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); + DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, + args.datadlen, args.datadoff, args.dataulen, args.dataflags, + args.flags & DbConstants.DB_MULTIPLE); + + OperationStatus status; + switch(args.flags & ~Server.DB_MODIFIER_MASK) { + case 0: + status = db.get(txn, key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_CONSUME: + status = db.consume(txn, key, data, false); + break; + + case DbConstants.DB_CONSUME_WAIT: + status = db.consume(txn, key, data, true); + break; + + case DbConstants.DB_GET_BOTH: + status = db.getSearchBoth(txn, key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET_RECNO: + status = db.getSearchRecordNumber(txn, key, data, Util.getLockMode(args.flags)); + break; + + default: + throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); + } + reply.status = Util.getStatus(status); + + reply.keydata = Util.returnDatabaseEntry(key); + reply.datadata = Util.returnDatabaseEntry(data); + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.keydata = reply.datadata = empty; + } + } + + public void join(Dispatcher server, + __db_join_msg args, __db_join_reply reply) { + try { + Cursor[] cursors = new Cursor[args.curs.length + 1]; + for (int i = 0; i < args.curs.length; i++) { + RpcDbc rdbc = server.getCursor(args.curs[i]); + if (rdbc == null) { + reply.status = DbConstants.DB_NOSERVER_ID; + return; + } + cursors[i] = rdbc.dbc; + } + cursors[args.curs.length] = null; + + JoinConfig config = new JoinConfig(); + config.setNoSort(args.flags == DbConstants.DB_JOIN_NOSORT); + JoinCursor jdbc = db.join(cursors, config); + + RpcDbc rjdbc = new RpcDbc(this, new JoinCursorAdapter(db, jdbc), true); + /* + * If our curslist has a parent txn, we need to use it too + * for the activity timeout. All cursors must be part of + * the same transaction, so just check the first. + */ + RpcDbc rdbc0 = server.getCursor(args.curs[0]); + if (rdbc0.timer != rdbc0) + rjdbc.timer = rdbc0.timer; + + /* + * All of the curslist cursors must point to the join + * cursor's timeout so that we do not timeout any of the + * curlist cursors while the join cursor is active. + */ + for (int i = 0; i < args.curs.length; i++) { + RpcDbc rdbc = server.getCursor(args.curs[i]); + rdbc.orig_timer = rdbc.timer; + rdbc.timer = rjdbc; + } + reply.dbcidcl_id = server.addCursor(rjdbc); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void key_range(Dispatcher server, + __db_key_range_msg args, __db_key_range_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); + + KeyRange range = db.getKeyRange(txn, key /*, args.flags == 0 */); + reply.status = 0; + reply.less = range.less; + reply.equal = range.equal; + reply.greater = range.greater; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + private boolean findSharedDatabase(Dispatcher server, __db_open_reply reply) + throws DatabaseException { + RpcDb rdb = null; + boolean matchFound = false; + LocalIterator i = ((Server)server).db_list.iterator(); + + while (!matchFound && i.hasNext()) { + rdb = (RpcDb)i.next(); + if (rdb != null && rdb != this && rdb.rdbenv == rdbenv && + (type == DbConstants.DB_UNKNOWN || rdb.type == type) && + openflags == rdb.openflags && + setflags == rdb.setflags && + dbname != null && rdb.dbname != null && + dbname.equals(rdb.dbname) && + (subdbname == rdb.subdbname || + (subdbname != null && rdb.subdbname != null && + subdbname.equals(rdb.subdbname)))) + matchFound = true; + } + + if (matchFound) { + ++rdb.refcount; + reply.dbcl_id = ((FreeList.FreeListIterator)i).current; + reply.type = Util.fromDatabaseType(rdb.config.getType()); + reply.lorder = rdb.config.getByteOrder(); + reply.status = 0; + + Server.err.println("Sharing Database: " + reply.dbcl_id); + } + + return matchFound; + } + + public void get_name(Dispatcher server, + __db_get_name_msg args, __db_get_name_reply reply) { + reply.filename = dbname; + reply.dbname = subdbname; + reply.status = 0; + } + + public void get_open_flags(Dispatcher server, + __db_get_open_flags_msg args, __db_get_open_flags_reply reply) { + try { + reply.flags = 0; + if (config.getAllowCreate()) reply.flags |= DbConstants.DB_CREATE; + if (config.getExclusiveCreate()) reply.flags |= DbConstants.DB_EXCL; + if (config.getReadOnly()) reply.flags |= DbConstants.DB_RDONLY; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void open(Dispatcher server, + __db_open_msg args, __db_open_reply reply) { + try { + dbname = (args.name.length() > 0) ? args.name : null; + subdbname = (args.subdb.length() > 0) ? args.subdb : null; + type = args.type; + openflags = args.flags & Server.DB_SERVER_DBFLAGS; + + if (findSharedDatabase(server, reply)) { + server.delDatabase(this, true); + } else { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + + Server.err.println("Calling db.open(" + null + ", " + dbname + ", " + subdbname + ", " + args.type + ", " + Integer.toHexString(args.flags) + ", " + args.mode + ")"); + + config.setAllowCreate((args.flags & DbConstants.DB_CREATE) != 0); + config.setExclusiveCreate((args.flags & DbConstants.DB_EXCL) != 0); + config.setReadOnly((args.flags & DbConstants.DB_RDONLY) != 0); + config.setTransactional(txn != null || (args.flags & DbConstants.DB_AUTO_COMMIT) != 0); + config.setType(Util.toDatabaseType(args.type)); + config.setMode(args.mode); + db = rdbenv.dbenv.openDatabase(txn, dbname, subdbname, config); + ++refcount; + + // Refresh config in case we didn't know the full story before opening + config = db.getConfig(); + + reply.dbcl_id = args.dbpcl_id; + type = reply.type = Util.fromDatabaseType(config.getType()); + reply.lorder = config.getByteOrder(); + reply.status = 0; + } + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + + // System.err.println("Database.open: reply.status = " + reply.status + ", reply.dbcl_id = " + reply.dbcl_id); + } + + public void pget(Dispatcher server, + __db_pget_msg args, __db_pget_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + DatabaseEntry skey = Util.makeDatabaseEntry(args.skeydata, args.skeydlen, args.skeydoff, args.skeyulen, args.skeyflags); + DatabaseEntry pkey = Util.makeDatabaseEntry(args.pkeydata, args.pkeydlen, args.pkeydoff, args.pkeyulen, args.pkeyflags); + DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); + + OperationStatus status; + switch(args.flags & ~Server.DB_MODIFIER_MASK) { + case 0: + status = ((SecondaryDatabase)db).get(txn, skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_GET_BOTH: + status = ((SecondaryDatabase)db).getSearchBoth(txn, skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET_RECNO: + status = ((SecondaryDatabase)db).getSearchRecordNumber(txn, skey, pkey, data, Util.getLockMode(args.flags)); + break; + + default: + throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); + } + reply.status = Util.getStatus(status); + + reply.skeydata = Util.returnDatabaseEntry(skey); + reply.pkeydata = Util.returnDatabaseEntry(pkey); + reply.datadata = Util.returnDatabaseEntry(data); + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.skeydata = reply.pkeydata = reply.datadata = empty; + } + } + + public void put(Dispatcher server, + __db_put_msg args, __db_put_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + + DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); + DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); + + reply.keydata = empty; + OperationStatus status; + switch(args.flags & ~Server.DB_MODIFIER_MASK) { + case 0: + status = db.put(txn, key, data); + break; + + case DbConstants.DB_APPEND: + status = db.append(txn, key, data); + reply.keydata = Util.returnDatabaseEntry(key); + break; + + case DbConstants.DB_NODUPDATA: + status = db.putNoDupData(txn, key, data); + break; + + case DbConstants.DB_NOOVERWRITE: + status = db.putNoOverwrite(txn, key, data); + break; + + default: + throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); + } + reply.status = Util.getStatus(status); + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.keydata = empty; + } + } + + public void remove(Dispatcher server, + __db_remove_msg args, __db_remove_reply reply) { + try { + args.name = (args.name.length() > 0) ? args.name : null; + args.subdb = (args.subdb.length() > 0) ? args.subdb : null; + Database.remove(args.name, args.subdb, config); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + server.delDatabase(this, false); + } + } + + public void rename(Dispatcher server, + __db_rename_msg args, __db_rename_reply reply) { + try { + args.name = (args.name.length() > 0) ? args.name : null; + args.subdb = (args.subdb.length() > 0) ? args.subdb : null; + args.newname = (args.newname.length() > 0) ? args.newname : null; + Database.rename(args.name, args.subdb, args.newname, config); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + server.delDatabase(this, false); + } + } + + public void set_bt_maxkey(Dispatcher server, + __db_bt_maxkey_msg args, __db_bt_maxkey_reply reply) { + try { + // XXX: check what to do about: config.setBtreeMaxKey(args.maxkey); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_bt_minkey(Dispatcher server, + __db_get_bt_minkey_msg args, __db_get_bt_minkey_reply reply) { + try { + reply.minkey = config.getBtreeMinKey(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_bt_minkey(Dispatcher server, + __db_bt_minkey_msg args, __db_bt_minkey_reply reply) { + try { + config.setBtreeMinKey(args.minkey); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_encrypt_flags(Dispatcher server, + __db_get_encrypt_flags_msg args, __db_get_encrypt_flags_reply reply) { + try { + reply.flags = config.getEncrypted() ? DbConstants.DB_ENCRYPT_AES : 0; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_encrypt(Dispatcher server, + __db_encrypt_msg args, __db_encrypt_reply reply) { + try { + config.setEncrypted(args.passwd /*, args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_flags(Dispatcher server, + __db_get_flags_msg args, __db_get_flags_reply reply) { + try { + reply.flags = 0; + if (config.getChecksum()) reply.flags |= DbConstants.DB_CHKSUM; + if (config.getEncrypted()) reply.flags |= DbConstants.DB_ENCRYPT; + if (config.getBtreeRecordNumbers()) reply.flags |= DbConstants.DB_RECNUM; + if (config.getRenumbering()) reply.flags |= DbConstants.DB_RENUMBER; + if (config.getReverseSplitOff()) reply.flags |= DbConstants.DB_REVSPLITOFF; + if (config.getSortedDuplicates()) reply.flags |= DbConstants.DB_DUPSORT; + if (config.getSnapshot()) reply.flags |= DbConstants.DB_SNAPSHOT; + if (config.getUnsortedDuplicates()) reply.flags |= DbConstants.DB_DUP; + if (config.getTransactionNotDurable()) reply.flags |= DbConstants.DB_TXN_NOT_DURABLE; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_flags(Dispatcher server, + __db_flags_msg args, __db_flags_reply reply) { + try { + // Server.err.println("Calling db.setflags(" + Integer.toHexString(args.flags) + ")"); + config.setChecksum((args.flags & DbConstants.DB_CHKSUM) != 0); + config.setBtreeRecordNumbers((args.flags & DbConstants.DB_RECNUM) != 0); + config.setRenumbering((args.flags & DbConstants.DB_RENUMBER) != 0); + config.setReverseSplitOff((args.flags & DbConstants.DB_REVSPLITOFF) != 0); + config.setSortedDuplicates((args.flags & DbConstants.DB_DUPSORT) != 0); + config.setSnapshot((args.flags & DbConstants.DB_SNAPSHOT) != 0); + config.setUnsortedDuplicates((args.flags & DbConstants.DB_DUP) != 0); + config.setTransactionNotDurable((args.flags & DbConstants.DB_TXN_NOT_DURABLE) != 0); + + setflags |= args.flags; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_h_ffactor(Dispatcher server, + __db_get_h_ffactor_msg args, __db_get_h_ffactor_reply reply) { + try { + reply.ffactor = config.getHashFillFactor(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_h_ffactor(Dispatcher server, + __db_h_ffactor_msg args, __db_h_ffactor_reply reply) { + try { + config.setHashFillFactor(args.ffactor); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_h_nelem(Dispatcher server, + __db_get_h_nelem_msg args, __db_get_h_nelem_reply reply) { + try { + reply.nelem = config.getHashNumElements(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_h_nelem(Dispatcher server, + __db_h_nelem_msg args, __db_h_nelem_reply reply) { + try { + config.setHashNumElements(args.nelem); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_lorder(Dispatcher server, + __db_get_lorder_msg args, __db_get_lorder_reply reply) { + try { + reply.lorder = config.getByteOrder(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_lorder(Dispatcher server, + __db_lorder_msg args, __db_lorder_reply reply) { + try { + config.setByteOrder(args.lorder); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_pagesize(Dispatcher server, + __db_get_pagesize_msg args, __db_get_pagesize_reply reply) { + try { + reply.pagesize = config.getPageSize(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_pagesize(Dispatcher server, + __db_pagesize_msg args, __db_pagesize_reply reply) { + try { + config.setPageSize(args.pagesize); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_q_extentsize(Dispatcher server, + __db_get_extentsize_msg args, __db_get_extentsize_reply reply) { + try { + reply.extentsize = config.getQueueExtentSize(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_q_extentsize(Dispatcher server, + __db_extentsize_msg args, __db_extentsize_reply reply) { + try { + config.setQueueExtentSize(args.extentsize); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_re_delim(Dispatcher server, + __db_get_re_delim_msg args, __db_get_re_delim_reply reply) { + try { + reply.delim = config.getRecordDelimiter(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_re_delim(Dispatcher server, + __db_re_delim_msg args, __db_re_delim_reply reply) { + try { + config.setRecordDelimiter(args.delim); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_re_len(Dispatcher server, + __db_get_re_len_msg args, __db_get_re_len_reply reply) { + try { + reply.len = config.getRecordLength(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_re_len(Dispatcher server, + __db_re_len_msg args, __db_re_len_reply reply) { + try { + config.setRecordLength(args.len); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_re_pad(Dispatcher server, + __db_get_re_pad_msg args, __db_get_re_pad_reply reply) { + try { + reply.pad = config.getRecordPad(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_re_pad(Dispatcher server, + __db_re_pad_msg args, __db_re_pad_reply reply) { + try { + config.setRecordPad(args.pad); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void stat(Dispatcher server, + __db_stat_msg args, __db_stat_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + StatsConfig config = new StatsConfig(); + config.setClear((args.flags & DbConstants.DB_STAT_CLEAR) != 0); + config.setFast((args.flags & DbConstants.DB_FAST_STAT) != 0); + DatabaseStats raw_stat = db.getStats(txn, config); + + if (raw_stat instanceof BtreeStats) { + BtreeStats bs = (BtreeStats)raw_stat; + int[] raw_stats = { + bs.getMagic(), bs.getVersion(), + bs.getMetaFlags(), bs.getNumKeys(), + bs.getNumData(), bs.getPageSize(), + bs.getMaxKey(), bs.getMinKey(), + bs.getReLen(), bs.getRePad(), + bs.getLevels(), bs.getIntPages(), + bs.getLeafPages(), bs.getDupPages(), + bs.getOverPages(), bs.getFree(), + bs.getIntPagesFree(), bs.getLeafPagesFree(), + bs.getDupPagesFree(), bs.getOverPagesFree() + }; + reply.stats = raw_stats; + } else if (raw_stat instanceof HashStats) { + HashStats hs = (HashStats)raw_stat; + int[] raw_stats = { + hs.getMagic(), hs.getVersion(), + hs.getMetaFlags(), hs.getNumKeys(), + hs.getNumData(), hs.getPageSize(), + hs.getFfactor(), hs.getBuckets(), + hs.getFree(), hs.getBFree(), + hs.getBigPages(), hs.getBigBFree(), + hs.getOverflows(), hs.getOvflFree(), + hs.getDup(), hs.getDupFree() + }; + reply.stats = raw_stats; + } else if (raw_stat instanceof QueueStats) { + QueueStats qs = (QueueStats)raw_stat; + int[] raw_stats = { + qs.getMagic(), qs.getVersion(), + qs.getMetaFlags(), qs.getNumKeys(), + qs.getNumData(), qs.getPageSize(), + qs.getExtentSize(), qs.getPages(), + qs.getReLen(), qs.getRePad(), + qs.getPagesFree(), qs.getFirstRecno(), + qs.getCurRecno() + }; + reply.stats = raw_stats; + } else + throw new DatabaseException("Invalid return type from db.stat()", DbConstants.DB_NOTFOUND); + + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.stats = new int[0]; + } + } + + public void sync(Dispatcher server, + __db_sync_msg args, __db_sync_reply reply) { + try { + db.sync(/* args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void truncate(Dispatcher server, + __db_truncate_msg args, __db_truncate_reply reply) { + try { + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + reply.count = db.truncate(txn, true /*, args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.count = 0; + } + } } diff --git a/db/rpc_server/java/RpcDbEnv.java b/db/rpc_server/java/RpcDbEnv.java index 2245ce0e9..71e134f7f 100644 --- a/db/rpc_server/java/RpcDbEnv.java +++ b/db/rpc_server/java/RpcDbEnv.java @@ -1,339 +1,369 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. * - * $Id: RpcDbEnv.java,v 1.12 2003/10/20 21:13:59 mjc Exp $ + * $Id: RpcDbEnv.java,v 1.15 2004/04/21 01:09:11 mjc Exp $ */ package com.sleepycat.db.rpcserver; import com.sleepycat.db.*; -import java.io.IOException; +import com.sleepycat.db.internal.DbConstants; import java.io.*; import java.util.*; /** * RPC wrapper around a dbenv for the Java RPC server. */ -public class RpcDbEnv extends Timer -{ - DbEnv dbenv; - String home; - long idletime, timeout; - int openflags, onflags, offflags; - int refcount = 1; - - void dispose() - { - if (dbenv != null) { - try { - dbenv.close(0); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - } - dbenv = null; - } - } - - public void close(DbDispatcher server, - __env_close_msg args, __env_close_reply reply) - { - if (--refcount != 0) { - reply.status = 0; - return; - } - - try { - server.delEnv(this, false); - dbenv.close(args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch (IllegalArgumentException e) { - reply.status = DbServer.EINVAL; - } finally { - dbenv = null; - } - } - - public void create(DbDispatcher server, - __env_create_msg args, __env_create_reply reply) - { - this.idletime = (args.timeout != 0) ? args.timeout : DbServer.idleto; - this.timeout = DbServer.defto; - try { - dbenv = new DbEnv(0); - reply.envcl_id = server.addEnv(this); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void dbremove(DbDispatcher server, - __env_dbremove_msg args, __env_dbremove_reply reply) - { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - dbenv.dbRemove(txn, args.name, args.subdb, args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(FileNotFoundException fnfe) { - reply.status = Db.DB_NOTFOUND; - } - } - - public void dbrename(DbDispatcher server, - __env_dbrename_msg args, __env_dbrename_reply reply) - { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - args.newname = (args.newname.length() > 0) ? args.newname : null; - - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - DbTxn txn = (rtxn != null) ? rtxn.txn : null; - dbenv.dbRename(txn, args.name, args.subdb, args.newname, args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(FileNotFoundException fnfe) { - reply.status = Db.DB_NOTFOUND; - } - } - - private boolean findSharedDbEnv(DbDispatcher server, __env_open_reply reply) - throws DbException - { - RpcDbEnv rdbenv = null; - boolean matchFound = false; - LocalIterator i = ((DbServer)server).env_list.iterator(); - - while (!matchFound && i.hasNext()) { - rdbenv = (RpcDbEnv)i.next(); - if (rdbenv != null && rdbenv != this && - (home == rdbenv.home || - (home != null && home.equals(rdbenv.home))) && - openflags == rdbenv.openflags && - onflags == rdbenv.onflags && - offflags == rdbenv.offflags) - matchFound = true; - } - - if (matchFound) { - /* - * The only thing left to check is the timeout. - * Since the server timeout set by the client is a hint, for sharing - * we'll give them the benefit of the doubt and grant them the - * longer timeout. - */ - if (rdbenv.timeout < timeout) - rdbenv.timeout = timeout; - - ++rdbenv.refcount; - reply.envcl_id = ((FreeList.FreeListIterator)i).current; - reply.status = 0; - - DbServer.err.println("Sharing DbEnv: " + reply.envcl_id); - } - - return matchFound; - } - - public void get_home(DbDispatcher server, - __env_get_home_msg args, __env_get_home_reply reply) - { - try { - reply.home = dbenv.getDbEnvHome(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_open_flags(DbDispatcher server, - __env_get_open_flags_msg args, __env_get_open_flags_reply reply) - { - try { - reply.flags = dbenv.getOpenFlags(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void open(DbDispatcher server, - __env_open_msg args, __env_open_reply reply) - { - try { - home = (args.home.length() > 0) ? args.home : null; - - /* - * If they are using locking do deadlock detection for - * them, internally. - */ - if ((args.flags & Db.DB_INIT_LOCK) != 0) - dbenv.setLockDetect(Db.DB_LOCK_DEFAULT); - - // adjust flags for RPC - int newflags = (args.flags & ~DbServer.DB_SERVER_FLAGMASK); - openflags = (newflags & DbServer.DB_SERVER_ENVFLAGS); - - if (findSharedDbEnv(server, reply)) { - dbenv.close(0); - dbenv = null; - } else if (DbServer.check_home(home)) { - dbenv.open(home, newflags, args.mode); - reply.status = 0; - reply.envcl_id = args.dbenvcl_id; - } else - reply.status = Db.DB_NOSERVER_HOME; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(FileNotFoundException e) { - reply.status = Db.DB_NOTFOUND; - } - - // System.err.println("DbEnv.open: reply.status = " + reply.status + ", reply.envcl_id = " + reply.envcl_id); - } - - public void remove(DbDispatcher server, - __env_remove_msg args, __env_remove_reply reply) - { - try { - args.home = (args.home.length() > 0) ? args.home : null; - // TODO: check home? - - dbenv.remove(args.home, args.flags); - dbenv = null; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } catch(FileNotFoundException e) { - reply.status = Db.DB_NOTFOUND; - } finally { - server.delEnv(this, false); - } - } - - public void get_cachesize(DbDispatcher server, - __env_get_cachesize_msg args, __env_get_cachesize_reply reply) - { - try { - long cachesize = dbenv.getCacheSize(); - reply.gbytes = (int)(cachesize / 1073741824); - reply.bytes = (int)(cachesize % 1073741824); - reply.ncache = dbenv.getCacheSizeNcache(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_cachesize(DbDispatcher server, - __env_cachesize_msg args, __env_cachesize_reply reply) - { - try { - long bytes = (long)args.gbytes * 1024*1024*1024; - bytes += args.bytes; - dbenv.setCacheSize(bytes, args.ncache); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_encrypt_flags(DbDispatcher server, - __env_get_encrypt_flags_msg args, __env_get_encrypt_flags_reply reply) - { - try { - reply.flags = dbenv.getEncryptFlags(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_encrypt(DbDispatcher server, - __env_encrypt_msg args, __env_encrypt_reply reply) - { - try { - dbenv.setEncrypted(args.passwd, args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get_flags(DbDispatcher server, - __env_get_flags_msg args, __env_get_flags_reply reply) - { - try { - reply.flags = dbenv.getFlags(); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void set_flags(DbDispatcher server, - __env_flags_msg args, __env_flags_reply reply) - { - try { - dbenv.setFlags(args.flags, args.onoff != 0); - if (args.onoff != 0) - onflags |= args.flags; - else - offflags |= args.flags; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - // txn_recover implementation - public void txn_recover(DbDispatcher server, - __txn_recover_msg args, __txn_recover_reply reply) - { - try { - DbPreplist[] prep_list = dbenv.txnRecover(args.count, args.flags); - if (prep_list != null && prep_list.length > 0) { - int count = prep_list.length; - reply.retcount = count; - reply.txn = new int[count]; - reply.gid = new byte[count * Db.DB_XIDDATASIZE]; - - for(int i = 0; i < count; i++) { - reply.txn[i] = server.addTxn(new RpcDbTxn(this, prep_list[i].txn)); - System.arraycopy(prep_list[i].gid, 0, reply.gid, i * Db.DB_XIDDATASIZE, Db.DB_XIDDATASIZE); - } - } - - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } +public class RpcDbEnv extends Timer { + EnvironmentConfig config; + Environment dbenv; + String home; + long idletime, timeout; + int openflags, onflags, offflags; + int refcount = 1; + + void dispose() { + if (dbenv != null) { + try { + dbenv.close(); + } catch (Throwable t) { + Util.handleException(t); + } + dbenv = null; + } + } + + public void close(Dispatcher server, + __env_close_msg args, __env_close_reply reply) { + if (--refcount != 0) { + reply.status = 0; + return; + } + + try { + server.delEnv(this, false); + if (dbenv != null) + dbenv.close(/* args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + dbenv = null; + } + } + + public void create(Dispatcher server, + __env_create_msg args, __env_create_reply reply) { + this.idletime = (args.timeout != 0) ? args.timeout : Server.idleto; + this.timeout = Server.defto; + try { + config = new EnvironmentConfig(); + config.setErrorStream(Server.errstream); + reply.envcl_id = server.addEnv(this); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void dbremove(Dispatcher server, + __env_dbremove_msg args, __env_dbremove_reply reply) { + try { + args.name = (args.name.length() > 0) ? args.name : null; + args.subdb = (args.subdb.length() > 0) ? args.subdb : null; + + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + dbenv.removeDatabase(txn, args.name, args.subdb /*, args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void dbrename(Dispatcher server, + __env_dbrename_msg args, __env_dbrename_reply reply) { + try { + args.name = (args.name.length() > 0) ? args.name : null; + args.subdb = (args.subdb.length() > 0) ? args.subdb : null; + args.newname = (args.newname.length() > 0) ? args.newname : null; + + RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); + Transaction txn = (rtxn != null) ? rtxn.txn : null; + dbenv.renameDatabase(txn, args.name, args.subdb, args.newname /*, args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + private boolean findSharedEnvironment(Dispatcher server, __env_open_reply reply) + throws DatabaseException { + RpcDbEnv rdbenv = null; + boolean matchFound = false; + LocalIterator i = ((Server)server).env_list.iterator(); + + while (!matchFound && i.hasNext()) { + rdbenv = (RpcDbEnv)i.next(); + if (rdbenv != null && rdbenv != this && + (home == rdbenv.home || + (home != null && home.equals(rdbenv.home))) && + openflags == rdbenv.openflags && + onflags == rdbenv.onflags && + offflags == rdbenv.offflags) + matchFound = true; + } + + if (matchFound) { + /* + * The only thing left to check is the timeout. + * Since the server timeout set by the client is a hint, for sharing + * we'll give them the benefit of the doubt and grant them the + * longer timeout. + */ + if (rdbenv.timeout < timeout) + rdbenv.timeout = timeout; + + ++rdbenv.refcount; + reply.envcl_id = ((FreeList.FreeListIterator)i).current; + reply.status = 0; + + Server.err.println("Sharing Environment: " + reply.envcl_id); + } + + return matchFound; + } + + public void get_home(Dispatcher server, + __env_get_home_msg args, __env_get_home_reply reply) { + try { + reply.home = dbenv.getHome().toString(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_open_flags(Dispatcher server, + __env_get_open_flags_msg args, __env_get_open_flags_reply reply) { + try { + reply.flags = 0; + if (config.getAllowCreate()) reply.flags |= DbConstants.DB_CREATE; + if (config.getInitializeCache()) reply.flags |= DbConstants.DB_INIT_MPOOL; + if (config.getInitializeCDB()) reply.flags |= DbConstants.DB_INIT_CDB; + if (config.getInitializeLocking()) reply.flags |= DbConstants.DB_INIT_LOCK; + if (config.getInitializeLogging()) reply.flags |= DbConstants.DB_INIT_LOG; + if (config.getInitializeReplication()) reply.flags |= DbConstants.DB_INIT_REP; + if (config.getJoinEnvironment()) reply.flags |= DbConstants.DB_JOINENV; + if (config.getLockDown()) reply.flags |= DbConstants.DB_LOCKDOWN; + if (config.getPrivate()) reply.flags |= DbConstants.DB_PRIVATE; + if (config.getReadOnly()) reply.flags |= DbConstants.DB_RDONLY; + if (config.getRunRecovery()) reply.flags |= DbConstants.DB_RECOVER; + if (config.getRunFatalRecovery()) reply.flags |= DbConstants.DB_RECOVER_FATAL; + if (config.getSystemMemory()) reply.flags |= DbConstants.DB_SYSTEM_MEM; + if (config.getTransactional()) reply.flags |= DbConstants.DB_INIT_TXN; + if (config.getUseEnvironment()) reply.flags |= DbConstants.DB_USE_ENVIRON; + if (config.getUseEnvironmentRoot()) reply.flags |= DbConstants.DB_USE_ENVIRON_ROOT; + + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void open(Dispatcher server, + __env_open_msg args, __env_open_reply reply) { + try { + home = (args.home.length() > 0) ? args.home : null; + + /* + * If they are using locking do deadlock detection for + * them, internally. + */ + if ((args.flags & DbConstants.DB_INIT_LOCK) != 0) + config.setLockDetectMode(LockDetectMode.DEFAULT); + + // adjust flags for RPC + int newflags = (args.flags & ~Server.DB_SERVER_FLAGMASK); + openflags = (newflags & Server.DB_SERVER_ENVFLAGS); + + config.setAllowCreate((args.flags & DbConstants.DB_CREATE) != 0); + config.setInitializeCache((args.flags & DbConstants.DB_INIT_MPOOL) != 0); + config.setInitializeCDB((args.flags & DbConstants.DB_INIT_CDB) != 0); + config.setInitializeLocking((args.flags & DbConstants.DB_INIT_LOCK) != 0); + config.setInitializeLogging((args.flags & DbConstants.DB_INIT_LOG) != 0); + config.setInitializeReplication((args.flags & DbConstants.DB_INIT_REP) != 0); + config.setJoinEnvironment((args.flags & DbConstants.DB_JOINENV) != 0); + config.setLockDown((args.flags & DbConstants.DB_LOCKDOWN) != 0); + config.setPrivate((args.flags & DbConstants.DB_PRIVATE) != 0); + config.setReadOnly((args.flags & DbConstants.DB_RDONLY) != 0); + config.setRunRecovery((args.flags & DbConstants.DB_RECOVER) != 0); + config.setRunFatalRecovery((args.flags & DbConstants.DB_RECOVER_FATAL) != 0); + config.setSystemMemory((args.flags & DbConstants.DB_SYSTEM_MEM) != 0); + config.setTransactional((args.flags & DbConstants.DB_INIT_TXN) != 0); + config.setUseEnvironment((args.flags & DbConstants.DB_USE_ENVIRON) != 0); + config.setUseEnvironmentRoot((args.flags & DbConstants.DB_USE_ENVIRON_ROOT) != 0); + + if (findSharedEnvironment(server, reply)) + dbenv = null; + else if (Server.check_home(home)) { + dbenv = new Environment(new File(home), config); + // Get the configuration after opening -- it may have changed if we're joining an environment + config = dbenv.getConfig(); + reply.status = 0; + reply.envcl_id = args.dbenvcl_id; + } else + reply.status = DbConstants.DB_NOSERVER_HOME; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + + // System.err.println("Environment.open: reply.status = " + reply.status + ", reply.envcl_id = " + reply.envcl_id); + } + + public void remove(Dispatcher server, + __env_remove_msg args, __env_remove_reply reply) { + Server.err.println("RpcDbEnv.remove(" + args.home + ")"); + try { + args.home = (args.home.length() > 0) ? args.home : null; + // TODO: check home? + + boolean force = (args.flags & DbConstants.DB_FORCE) != 0; + config.setUseEnvironment((args.flags & DbConstants.DB_USE_ENVIRON) != 0); + config.setUseEnvironmentRoot((args.flags & DbConstants.DB_USE_ENVIRON_ROOT) != 0); + + Environment.remove(new File(args.home), force, config); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + server.delEnv(this, false); + } + } + + public void get_cachesize(Dispatcher server, + __env_get_cachesize_msg args, __env_get_cachesize_reply reply) { + try { + long cachesize = config.getCacheSize(); + final long GIGABYTE = 1073741824; + reply.gbytes = (int)(cachesize / GIGABYTE); + reply.bytes = (int)(cachesize % GIGABYTE); + reply.ncache = config.getCacheCount(); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_cachesize(Dispatcher server, + __env_cachesize_msg args, __env_cachesize_reply reply) { + try { + long bytes = (long)args.gbytes * 1024 * 1024 * 1024; + bytes += args.bytes; + config.setCacheSize(bytes); + config.setCacheCount(args.ncache); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_encrypt_flags(Dispatcher server, + __env_get_encrypt_flags_msg args, __env_get_encrypt_flags_reply reply) { + try { + reply.flags = config.getEncrypted() ? DbConstants.DB_ENCRYPT_AES : 0; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_encrypt(Dispatcher server, + __env_encrypt_msg args, __env_encrypt_reply reply) { + try { + config.setEncrypted(args.passwd); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get_flags(Dispatcher server, + __env_get_flags_msg args, __env_get_flags_reply reply) { + try { + reply.flags = 0; + if (config.getCDBLockAllDatabases()) reply.flags |= DbConstants.DB_CDB_ALLDB; + if (config.getDirectDatabaseIO()) reply.flags |= DbConstants.DB_DIRECT_DB; + if (config.getDirectLogIO()) reply.flags |= DbConstants.DB_DIRECT_LOG; + if (config.getInitializeRegions()) reply.flags |= DbConstants.DB_REGION_INIT; + if (config.getLogAutoRemove()) reply.flags |= DbConstants.DB_LOG_AUTOREMOVE; + if (config.getNoLocking()) reply.flags |= DbConstants.DB_NOLOCKING; + if (config.getNoMMap()) reply.flags |= DbConstants.DB_NOMMAP; + if (config.getNoPanic()) reply.flags |= DbConstants.DB_NOPANIC; + if (config.getOverwrite()) reply.flags |= DbConstants.DB_OVERWRITE; + if (config.getTxnNoSync()) reply.flags |= DbConstants.DB_TXN_NOSYNC; + if (config.getTxnNotDurable()) reply.flags |= DbConstants.DB_TXN_NOT_DURABLE; + if (config.getTxnWriteNoSync()) reply.flags |= DbConstants.DB_TXN_WRITE_NOSYNC; + if (config.getYieldCPU()) reply.flags |= DbConstants.DB_YIELDCPU; + + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void set_flags(Dispatcher server, + __env_flags_msg args, __env_flags_reply reply) { + try { + boolean onoff = (args.onoff != 0); + if (onoff) + onflags |= args.flags; + else + offflags |= args.flags; + + if ((args.flags & DbConstants.DB_CDB_ALLDB) != 0) config.setCDBLockAllDatabases(onoff); + if ((args.flags & DbConstants.DB_DIRECT_DB) != 0) config.setDirectDatabaseIO(onoff); + if ((args.flags & DbConstants.DB_DIRECT_LOG) != 0) config.setDirectLogIO(onoff); + if ((args.flags & DbConstants.DB_REGION_INIT) != 0) config.setInitializeRegions(onoff); + if ((args.flags & DbConstants.DB_LOG_AUTOREMOVE) != 0) config.setLogAutoRemove(onoff); + if ((args.flags & DbConstants.DB_NOLOCKING) != 0) config.setNoLocking(onoff); + if ((args.flags & DbConstants.DB_NOMMAP) != 0) config.setNoMMap(onoff); + if ((args.flags & DbConstants.DB_NOPANIC) != 0) config.setNoPanic(onoff); + if ((args.flags & DbConstants.DB_OVERWRITE) != 0) config.setOverwrite(onoff); + if ((args.flags & DbConstants.DB_TXN_NOSYNC) != 0) config.setTxnNoSync(onoff); + if ((args.flags & DbConstants.DB_TXN_NOT_DURABLE) != 0) config.setTxnNotDurable(onoff); + if ((args.flags & DbConstants.DB_TXN_WRITE_NOSYNC) != 0) config.setTxnWriteNoSync(onoff); + if ((args.flags & DbConstants.DB_YIELDCPU) != 0) config.setYieldCPU(onoff); + + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + // txn_recover implementation + public void txn_recover(Dispatcher server, + __txn_recover_msg args, __txn_recover_reply reply) { + try { + PreparedTransaction[] prep_list = dbenv.recover(args.count, args.flags == DbConstants.DB_NEXT); + if (prep_list != null && prep_list.length > 0) { + int count = prep_list.length; + reply.retcount = count; + reply.txn = new int[count]; + reply.gid = new byte[count * DbConstants.DB_XIDDATASIZE]; + + for (int i = 0; i < count; i++) { + reply.txn[i] = server.addTxn(new RpcDbTxn(this, prep_list[i].getTransaction())); + System.arraycopy(prep_list[i].getGID(), 0, reply.gid, i * DbConstants.DB_XIDDATASIZE, DbConstants.DB_XIDDATASIZE); + } + } + + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } } diff --git a/db/rpc_server/java/RpcDbTxn.java b/db/rpc_server/java/RpcDbTxn.java index 3115dbe46..85cdad038 100644 --- a/db/rpc_server/java/RpcDbTxn.java +++ b/db/rpc_server/java/RpcDbTxn.java @@ -1,123 +1,132 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. * - * $Id: RpcDbTxn.java,v 1.6 2003/10/20 21:13:59 mjc Exp $ + * $Id: RpcDbTxn.java,v 1.9 2004/05/04 13:45:33 sue Exp $ */ package com.sleepycat.db.rpcserver; import com.sleepycat.db.*; -import java.io.IOException; +import com.sleepycat.db.internal.DbConstants; import java.io.*; import java.util.*; /** * RPC wrapper around a txn object for the Java RPC server. */ -public class RpcDbTxn extends Timer -{ - RpcDbEnv rdbenv; - DbTxn txn; - - public RpcDbTxn(RpcDbEnv rdbenv, DbTxn txn) - { - this.rdbenv = rdbenv; - this.txn = txn; - } - - void dispose() - { - if (txn != null) { - try { - txn.abort(); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - } - txn = null; - } - } - - public void abort(DbDispatcher server, - __txn_abort_msg args, __txn_abort_reply reply) - { - try { - txn.abort(); - txn = null; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } finally { - server.delTxn(this, false); - } - } - - public void begin(DbDispatcher server, - __txn_begin_msg args, __txn_begin_reply reply) - { - try { - if (rdbenv == null) { - reply.status = Db.DB_NOSERVER_ID; - return; - } - DbEnv dbenv = rdbenv.dbenv; - RpcDbTxn rparent = server.getTxn(args.parentcl_id); - DbTxn parent = (rparent != null) ? rparent.txn : null; - - txn = dbenv.txnBegin(parent, args.flags); - - if (rparent != null) - timer = rparent.timer; - reply.txnidcl_id = server.addTxn(this); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void commit(DbDispatcher server, - __txn_commit_msg args, __txn_commit_reply reply) - { - try { - txn.commit(args.flags); - txn = null; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } finally { - server.delTxn(this, false); - } - } - - public void discard(DbDispatcher server, - __txn_discard_msg args, __txn_discard_reply reply) - { - try { - txn.discard(args.flags); - txn = null; - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } finally { - server.delTxn(this, false); - } - } - - public void prepare(DbDispatcher server, - __txn_prepare_msg args, __txn_prepare_reply reply) - { - try { - txn.prepare(args.gid); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } +public class RpcDbTxn extends Timer { + RpcDbEnv rdbenv; + Transaction txn; + + public RpcDbTxn(RpcDbEnv rdbenv, Transaction txn) { + this.rdbenv = rdbenv; + this.txn = txn; + } + + void dispose() { + if (txn != null) { + try { + txn.abort(); + } catch (DatabaseException e) { + e.printStackTrace(Server.err); + } + txn = null; + } + } + + public void abort(Dispatcher server, + __txn_abort_msg args, __txn_abort_reply reply) { + try { + txn.abort(); + txn = null; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + server.delTxn(this, false); + } + } + + public void begin(Dispatcher server, + __txn_begin_msg args, __txn_begin_reply reply) { + try { + if (rdbenv == null) { + reply.status = DbConstants.DB_NOSERVER_ID; + return; + } + Environment dbenv = rdbenv.dbenv; + RpcDbTxn rparent = server.getTxn(args.parentcl_id); + Transaction parent = (rparent != null) ? rparent.txn : null; + + TransactionConfig config = new TransactionConfig(); + config.setDegree2((args.flags & DbConstants.DB_DEGREE_2) != 0); + config.setDirtyRead((args.flags & DbConstants.DB_DIRTY_READ) != 0); + config.setNoSync((args.flags & DbConstants.DB_TXN_NOSYNC) != 0); + config.setNoWait(true); + config.setSync((args.flags & DbConstants.DB_TXN_SYNC) != 0); + + txn = dbenv.beginTransaction(parent, config); + + if (rparent != null) + timer = rparent.timer; + reply.txnidcl_id = server.addTxn(this); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void commit(Dispatcher server, + __txn_commit_msg args, __txn_commit_reply reply) { + try { + switch(args.flags) { + case 0: + txn.commit(); + break; + + case DbConstants.DB_TXN_SYNC: + txn.commitSync(); + break; + + case DbConstants.DB_TXN_NOSYNC: + txn.commitSync(); + break; + + default: + throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); + } + txn = null; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + server.delTxn(this, false); + } + } + + public void discard(Dispatcher server, + __txn_discard_msg args, __txn_discard_reply reply) { + try { + txn.discard(/* args.flags == 0 */); + txn = null; + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + server.delTxn(this, false); + } + } + + public void prepare(Dispatcher server, + __txn_prepare_msg args, __txn_prepare_reply reply) { + try { + txn.prepare(args.gid); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } } diff --git a/db/rpc_server/java/RpcDbc.java b/db/rpc_server/java/RpcDbc.java index a5a0a44ef..c0d882276 100644 --- a/db/rpc_server/java/RpcDbc.java +++ b/db/rpc_server/java/RpcDbc.java @@ -1,241 +1,317 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. * - * $Id: RpcDbc.java,v 1.9 2003/10/29 16:02:57 mjc Exp $ + * $Id: RpcDbc.java,v 1.12 2004/04/09 15:08:40 mjc Exp $ */ package com.sleepycat.db.rpcserver; import com.sleepycat.db.*; -import java.io.IOException; +import com.sleepycat.db.internal.DbConstants; import java.io.*; import java.util.*; /** * RPC wrapper around a dbc object for the Java RPC server. */ -public class RpcDbc extends Timer -{ - static final byte[] empty = new byte[0]; - RpcDbEnv rdbenv; - RpcDb rdb; - Dbc dbc; - Timer orig_timer; - boolean isJoin; - - public RpcDbc(RpcDb rdb, Dbc dbc, boolean isJoin) - { - this.rdb = rdb; - this.rdbenv = rdb.rdbenv; - this.dbc = dbc; - this.isJoin = isJoin; - } - - void dispose() - { - if (dbc != null) { - try { - dbc.close(); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - } - dbc = null; - } - } - - public void close(DbDispatcher server, - __dbc_close_msg args, __dbc_close_reply reply) - { - try { - dbc.close(); - dbc = null; - - if (isJoin) - for(LocalIterator i = ((DbServer)server).cursor_list.iterator(); i.hasNext(); ) { - RpcDbc rdbc = (RpcDbc)i.next(); - // Unjoin cursors that were joined to create this - if (rdbc != null && rdbc.timer == this) - rdbc.timer = rdbc.orig_timer; - } - - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } finally { - server.delCursor(this, false); - } - } - - public void count(DbDispatcher server, - __dbc_count_msg args, __dbc_count_reply reply) - { - try { - reply.dupcount = dbc.count(args.flags); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void del(DbDispatcher server, - __dbc_del_msg args, __dbc_del_reply reply) - { - try { - reply.status = dbc.delete(args.flags); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void dup(DbDispatcher server, - __dbc_dup_msg args, __dbc_dup_reply reply) - { - try { - Dbc newdbc = dbc.dup(args.flags); - RpcDbc rdbc = new RpcDbc(rdb, newdbc, false); - /* If this cursor has a parent txn, we need to use it too. */ - if (timer != this) - rdbc.timer = timer; - reply.dbcidcl_id = server.addCursor(rdbc); - reply.status = 0; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void get(DbDispatcher server, - __dbc_get_msg args, __dbc_get_reply reply) - { - try { - Dbt key = new Dbt(args.keydata); - key.setPartialLength(args.keydlen); - key.setUserBufferLength(args.keyulen); - key.setPartialOffset(args.keydoff); - key.setFlags(Db.DB_DBT_MALLOC | - (args.keyflags & Db.DB_DBT_PARTIAL)); - - Dbt data = new Dbt(args.datadata); - data.setPartialLength(args.datadlen); - data.setUserBufferLength(args.dataulen); - data.setPartialOffset(args.datadoff); - if ((args.flags & Db.DB_MULTIPLE) != 0 || - (args.flags & Db.DB_MULTIPLE_KEY) != 0) { - if (data.getData().length == 0) - data.setData(new byte[data.getUserBufferLength()]); - data.setFlags(Db.DB_DBT_USERMEM | - (args.dataflags & Db.DB_DBT_PARTIAL)); - } else - data.setFlags(Db.DB_DBT_MALLOC | - (args.dataflags & Db.DB_DBT_PARTIAL)); - - reply.status = dbc.get(key, data, args.flags); - - if (key.getData() == args.keydata) { - reply.keydata = new byte[key.getSize()]; - System.arraycopy(key.getData(), 0, reply.keydata, 0, key.getSize()); - } else - reply.keydata = key.getData(); - - if (data.getData() == args.datadata) { - reply.datadata = new byte[data.getSize()]; - System.arraycopy(data.getData(), 0, reply.datadata, 0, data.getSize()); - } else - reply.datadata = data.getData(); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - reply.keydata = reply.datadata = empty; - } - } - - public void pget(DbDispatcher server, - __dbc_pget_msg args, __dbc_pget_reply reply) - { - try { - Dbt skey = new Dbt(args.skeydata); - skey.setPartialLength(args.skeydlen); - skey.setPartialOffset(args.skeydoff); - skey.setUserBufferLength(args.skeyulen); - skey.setFlags(Db.DB_DBT_MALLOC | - (args.skeyflags & Db.DB_DBT_PARTIAL)); - - Dbt pkey = new Dbt(args.pkeydata); - pkey.setPartialLength(args.pkeydlen); - pkey.setPartialOffset(args.pkeydoff); - pkey.setUserBufferLength(args.pkeyulen); - pkey.setFlags(Db.DB_DBT_MALLOC | - (args.pkeyflags & Db.DB_DBT_PARTIAL)); - - Dbt data = new Dbt(args.datadata); - data.setPartialLength(args.datadlen); - data.setPartialOffset(args.datadoff); - data.setUserBufferLength(args.dataulen); - data.setFlags(Db.DB_DBT_MALLOC | - (args.dataflags & Db.DB_DBT_PARTIAL)); - - reply.status = dbc.get(skey, pkey, data, args.flags); - - if (skey.getData() == args.skeydata) { - reply.skeydata = new byte[skey.getSize()]; - System.arraycopy(skey.getData(), 0, reply.skeydata, 0, skey.getSize()); - } else - reply.skeydata = skey.getData(); - - if (pkey.getData() == args.pkeydata) { - reply.pkeydata = new byte[pkey.getSize()]; - System.arraycopy(pkey.getData(), 0, reply.pkeydata, 0, pkey.getSize()); - } else - reply.pkeydata = pkey.getData(); - - if (data.getData() == args.datadata) { - reply.datadata = new byte[data.getSize()]; - System.arraycopy(data.getData(), 0, reply.datadata, 0, data.getSize()); - } else - reply.datadata = data.getData(); - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - } - } - - public void put(DbDispatcher server, - __dbc_put_msg args, __dbc_put_reply reply) - { - try { - Dbt key = new Dbt(args.keydata); - key.setPartialLength(args.keydlen); - key.setUserBufferLength(args.keyulen); - key.setPartialOffset(args.keydoff); - key.setFlags(args.keyflags & Db.DB_DBT_PARTIAL); - - Dbt data = new Dbt(args.datadata); - data.setPartialLength(args.datadlen); - data.setUserBufferLength(args.dataulen); - data.setPartialOffset(args.datadoff); - data.setFlags(args.dataflags & Db.DB_DBT_PARTIAL); - - reply.status = dbc.put(key, data, args.flags); - - if (reply.status == 0 && - (args.flags == Db.DB_AFTER || args.flags == Db.DB_BEFORE) && - rdb.db.getDbType() == Db.DB_RECNO) - reply.keydata = key.getData(); - else - reply.keydata = empty; - } catch(DbException e) { - e.printStackTrace(DbServer.err); - reply.status = e.getErrno(); - reply.keydata = empty; - } catch(IllegalArgumentException e) { - reply.status = DbServer.EINVAL; - reply.keydata = empty; - } - } +public class RpcDbc extends Timer { + static final byte[] empty = new byte[0]; + RpcDbEnv rdbenv; + RpcDb rdb; + Cursor dbc; + Timer orig_timer; + boolean isJoin; + + public RpcDbc(RpcDb rdb, Cursor dbc, boolean isJoin) { + this.rdb = rdb; + this.rdbenv = rdb.rdbenv; + this.dbc = dbc; + this.isJoin = isJoin; + } + + void dispose() { + if (dbc != null) { + try { + dbc.close(); + } catch (Throwable t) { + Util.handleException(t); + } + dbc = null; + } + } + + public void close(Dispatcher server, + __dbc_close_msg args, __dbc_close_reply reply) { + try { + dbc.close(); + dbc = null; + + if (isJoin) + for (LocalIterator i = ((Server)server).cursor_list.iterator(); i.hasNext();) { + RpcDbc rdbc = (RpcDbc)i.next(); + // Unjoin cursors that were joined to create this + if (rdbc != null && rdbc.timer == this) + rdbc.timer = rdbc.orig_timer; + } + + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } finally { + server.delCursor(this, false); + } + } + + public void count(Dispatcher server, + __dbc_count_msg args, __dbc_count_reply reply) { + try { + reply.dupcount = dbc.count(/* args.flags == 0 */); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void del(Dispatcher server, + __dbc_del_msg args, __dbc_del_reply reply) { + try { + reply.status = Util.getStatus(dbc.delete(/* args.flags == 0 */)); + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void dup(Dispatcher server, + __dbc_dup_msg args, __dbc_dup_reply reply) { + try { + Cursor newdbc = dbc.dup(args.flags == DbConstants.DB_POSITION); + RpcDbc rdbc = new RpcDbc(rdb, newdbc, false); + /* If this cursor has a parent txn, we need to use it too. */ + if (timer != this) + rdbc.timer = timer; + reply.dbcidcl_id = server.addCursor(rdbc); + reply.status = 0; + } catch (Throwable t) { + reply.status = Util.handleException(t); + } + } + + public void get(Dispatcher server, + __dbc_get_msg args, __dbc_get_reply reply) { + try { + DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); + DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, + args.datadlen, args.datadoff, args.dataulen, args.dataflags, + args.flags & (DbConstants.DB_MULTIPLE | DbConstants.DB_MULTIPLE_KEY)); + + OperationStatus status; + switch(args.flags & ~Server.DB_MODIFIER_MASK) { + case DbConstants.DB_CURRENT: + status = dbc.getCurrent(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_FIRST: + status = dbc.getFirst(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_LAST: + status = dbc.getLast(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_NEXT: + status = dbc.getNext(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_NEXT_DUP: + status = dbc.getNextDup(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_NEXT_NODUP: + status = dbc.getNextNoDup(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_PREV: + status = dbc.getPrev(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_PREV_NODUP: + status = dbc.getPrevNoDup(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_GET_RECNO: + status = dbc.getRecordNumber(data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET: + status = dbc.getSearchKey(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET_RANGE: + status = dbc.getSearchKeyRange(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_GET_BOTH: + status = dbc.getSearchBoth(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_GET_BOTH_RANGE: + status = dbc.getSearchBothRange(key, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET_RECNO: + status = dbc.getSearchRecordNumber(key, data, Util.getLockMode(args.flags)); + break; + + /* Join cursors */ + case 0: + status = ((JoinCursorAdapter)dbc).jc.getNext(key, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_JOIN_ITEM: + status = ((JoinCursorAdapter)dbc).jc.getNext(key, Util.getLockMode(args.flags)); + break; + + default: + throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); + } + reply.status = Util.getStatus(status); + reply.keydata = Util.returnDatabaseEntry(key); + reply.datadata = Util.returnDatabaseEntry(data); + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.keydata = reply.datadata = empty; + } + } + + public void pget(Dispatcher server, + __dbc_pget_msg args, __dbc_pget_reply reply) { + try { + DatabaseEntry skey = Util.makeDatabaseEntry(args.skeydata, args.skeydlen, args.skeydoff, args.skeyulen, args.skeyflags); + DatabaseEntry pkey = Util.makeDatabaseEntry(args.pkeydata, args.pkeydlen, args.pkeydoff, args.pkeyulen, args.pkeyflags); + DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); + + OperationStatus status; + switch(args.flags & ~Server.DB_MODIFIER_MASK) { + case DbConstants.DB_CURRENT: + status = ((SecondaryCursor)dbc).getCurrent(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_FIRST: + status = ((SecondaryCursor)dbc).getFirst(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_LAST: + status = ((SecondaryCursor)dbc).getLast(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_NEXT: + status = ((SecondaryCursor)dbc).getNext(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_NEXT_DUP: + status = ((SecondaryCursor)dbc).getNextDup(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_NEXT_NODUP: + status = ((SecondaryCursor)dbc).getNextNoDup(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_PREV: + status = ((SecondaryCursor)dbc).getPrev(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_PREV_NODUP: + status = ((SecondaryCursor)dbc).getPrevNoDup(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_GET_RECNO: + status = ((SecondaryCursor)dbc).getRecordNumber(pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET: + status = ((SecondaryCursor)dbc).getSearchKey(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET_RANGE: + status = ((SecondaryCursor)dbc).getSearchKeyRange(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_GET_BOTH: + status = ((SecondaryCursor)dbc).getSearchBoth(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_GET_BOTH_RANGE: + status = ((SecondaryCursor)dbc).getSearchBothRange(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + case DbConstants.DB_SET_RECNO: + status = ((SecondaryCursor)dbc).getSearchRecordNumber(skey, pkey, data, Util.getLockMode(args.flags)); + break; + + default: + throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); + } + reply.status = Util.getStatus(status); + reply.skeydata = Util.returnDatabaseEntry(skey); + reply.pkeydata = Util.returnDatabaseEntry(pkey); + reply.datadata = Util.returnDatabaseEntry(data); + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.skeydata = reply.pkeydata = reply.datadata = empty; + } + } + + public void put(Dispatcher server, + __dbc_put_msg args, __dbc_put_reply reply) { + try { + DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); + DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); + + OperationStatus status; + switch(args.flags & ~Server.DB_MODIFIER_MASK) { + case 0: + status = dbc.put(key, data); + break; + + case DbConstants.DB_AFTER: + status = dbc.putAfter(data); + break; + + case DbConstants.DB_BEFORE: + status = dbc.putBefore(data); + break; + + case DbConstants.DB_NOOVERWRITE: + status = dbc.putNoOverwrite(key, data); + break; + + case DbConstants.DB_KEYFIRST: + status = dbc.putKeyFirst(key, data); + break; + + case DbConstants.DB_KEYLAST: + status = dbc.putKeyLast(key, data); + break; + + case DbConstants.DB_NODUPDATA: + status = dbc.putNoDupData(key, data); + break; + + case DbConstants.DB_CURRENT: + status = dbc.putCurrent(data); + break; + + default: + throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); + } + reply.status = Util.getStatus(status); + reply.keydata = Util.returnDatabaseEntry(key); + } catch (Throwable t) { + reply.status = Util.handleException(t); + reply.keydata = empty; + } + } } diff --git a/db/rpc_server/java/Server.java b/db/rpc_server/java/Server.java new file mode 100644 index 000000000..32eba6a16 --- /dev/null +++ b/db/rpc_server/java/Server.java @@ -0,0 +1,328 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Server.java,v 1.1 2004/04/06 20:43:42 mjc Exp $ + */ + +package com.sleepycat.db.rpcserver; + +import com.sleepycat.db.*; +import com.sleepycat.db.internal.DbConstants; +import java.io.*; +import java.util.*; +import org.acplt.oncrpc.OncRpcException; +import org.acplt.oncrpc.server.OncRpcCallInformation; + +/** + * Main entry point for the Java version of the Berkeley DB RPC server + */ +public class Server extends Dispatcher { + public static long idleto = 10 * 60 * 1000; // 5 minutes + public static long defto = 5 * 60 * 1000; // 5 minutes + public static long maxto = 60 * 60 * 1000; // 1 hour + public static String passwd = null; + public static OutputStream errstream; + public static PrintWriter err; + + long now, hint; // updated each operation + FreeList env_list = new FreeList(); + FreeList db_list = new FreeList(); + FreeList txn_list = new FreeList(); + FreeList cursor_list = new FreeList(); + + public Server() throws IOException, OncRpcException { + super(); + init_lists(); + } + + public void dispatchOncRpcCall(OncRpcCallInformation call, int program, + int version, int procedure) throws OncRpcException, IOException { + long newnow = System.currentTimeMillis(); + // Server.err.println("Dispatching RPC call " + procedure + " after delay of " + (newnow - now)); + now = newnow; + try { + super.dispatchOncRpcCall(call, program, version, procedure); + doTimeouts(); + } catch (Throwable t) { + System.err.println("Caught " + t + " while dispatching RPC call " + procedure); + t.printStackTrace(Server.err); + } finally { + Server.err.flush(); + } + } + + // Internal methods to track context + private void init_lists() { + // We do this so that getEnv/Database/etc(0) == null + env_list.add(null); + db_list.add(null); + txn_list.add(null); + cursor_list.add(null); + } + + int addEnv(RpcDbEnv rdbenv) { + rdbenv.timer.last_access = now; + int id = env_list.add(rdbenv); + return id; + } + + int addDatabase(RpcDb rdb) { + int id = db_list.add(rdb); + return id; + } + + int addTxn(RpcDbTxn rtxn) { + rtxn.timer.last_access = now; + int id = txn_list.add(rtxn); + return id; + } + + int addCursor(RpcDbc rdbc) { + rdbc.timer.last_access = now; + int id = cursor_list.add(rdbc); + return id; + } + + void delEnv(RpcDbEnv rdbenv, boolean dispose) { + env_list.del(rdbenv); + + // cursors and transactions will already have been cleaned up + for (LocalIterator i = db_list.iterator(); i.hasNext();) { + RpcDb rdb = (RpcDb)i.next(); + if (rdb != null && rdb.rdbenv == rdbenv) + delDatabase(rdb, true); + } + + if (dispose) + rdbenv.dispose(); + } + + void delDatabase(RpcDb rdb, boolean dispose) { + db_list.del(rdb); + + for (LocalIterator i = cursor_list.iterator(); i.hasNext();) { + RpcDbc rdbc = (RpcDbc)i.next(); + if (rdbc != null && rdbc.timer == rdb) { + i.remove(); + rdbc.dispose(); + } + } + + if (dispose) + rdb.dispose(); + } + + void delTxn(RpcDbTxn rtxn, boolean dispose) { + txn_list.del(rtxn); + + for (LocalIterator i = cursor_list.iterator(); i.hasNext();) { + RpcDbc rdbc = (RpcDbc)i.next(); + if (rdbc != null && rdbc.timer == rtxn) { + i.remove(); + rdbc.dispose(); + } + } + + for (LocalIterator i = txn_list.iterator(); i.hasNext();) { + RpcDbTxn rtxn_child = (RpcDbTxn)i.next(); + if (rtxn_child != null && rtxn_child.timer == rtxn) { + i.remove(); + rtxn_child.dispose(); + } + } + + if (dispose) + rtxn.dispose(); + } + + void delCursor(RpcDbc rdbc, boolean dispose) { + cursor_list.del(rdbc); + if (dispose) + rdbc.dispose(); + } + + RpcDbEnv getEnv(int envid) { + RpcDbEnv rdbenv = (RpcDbEnv)env_list.get(envid); + if (rdbenv != null) + rdbenv.timer.last_access = now; + return rdbenv; + } + + RpcDb getDatabase(int dbid) { + RpcDb rdb = (RpcDb)db_list.get(dbid); + if (rdb != null) + rdb.rdbenv.timer.last_access = now; + return rdb; + } + + RpcDbTxn getTxn(int txnid) { + RpcDbTxn rtxn = (RpcDbTxn)txn_list.get(txnid); + if (rtxn != null) + rtxn.timer.last_access = rtxn.rdbenv.timer.last_access = now; + return rtxn; + } + + RpcDbc getCursor(int dbcid) { + RpcDbc rdbc = (RpcDbc)cursor_list.get(dbcid); + if (rdbc != null) + rdbc.last_access = rdbc.timer.last_access = rdbc.rdbenv.timer.last_access = now; + return rdbc; + } + + void doTimeouts() { + if (now < hint) { + // Server.err.println("Skipping cleaner sweep - now = " + now + ", hint = " + hint); + return; + } + + // Server.err.println("Starting a cleaner sweep"); + hint = now + Server.maxto; + + for (LocalIterator i = cursor_list.iterator(); i.hasNext();) { + RpcDbc rdbc = (RpcDbc)i.next(); + if (rdbc == null) + continue; + + long end_time = rdbc.timer.last_access + rdbc.rdbenv.timeout; + // Server.err.println("Examining " + rdbc + ", time left = " + (end_time - now)); + if (end_time < now) { + Server.err.println("Cleaning up " + rdbc); + delCursor(rdbc, true); + } else if (end_time < hint) + hint = end_time; + } + + for (LocalIterator i = txn_list.iterator(); i.hasNext();) { + RpcDbTxn rtxn = (RpcDbTxn)i.next(); + if (rtxn == null) + continue; + + long end_time = rtxn.timer.last_access + rtxn.rdbenv.timeout; + // Server.err.println("Examining " + rtxn + ", time left = " + (end_time - now)); + if (end_time < now) { + Server.err.println("Cleaning up " + rtxn); + delTxn(rtxn, true); + } else if (end_time < hint) + hint = end_time; + } + + for (LocalIterator i = env_list.iterator(); i.hasNext();) { + RpcDbEnv rdbenv = (RpcDbEnv)i.next(); + if (rdbenv == null) + continue; + + long end_time = rdbenv.timer.last_access + rdbenv.idletime; + // Server.err.println("Examining " + rdbenv + ", time left = " + (end_time - now)); + if (end_time < now) { + Server.err.println("Cleaning up " + rdbenv); + delEnv(rdbenv, true); + } + } + + // if we didn't find anything, reset the hint + if (hint == now + Server.maxto) + hint = 0; + + // Server.err.println("Finishing a cleaner sweep"); + } + + // Some constants that aren't available elsewhere + static final int ENOENT = 2; + static final int EACCES = 13; + static final int EEXIST = 17; + static final int EINVAL = 22; + static final int DB_SERVER_FLAGMASK = DbConstants.DB_LOCKDOWN | + DbConstants.DB_PRIVATE | DbConstants.DB_RECOVER | DbConstants.DB_RECOVER_FATAL | + DbConstants.DB_SYSTEM_MEM | DbConstants.DB_USE_ENVIRON | + DbConstants.DB_USE_ENVIRON_ROOT; + static final int DB_SERVER_ENVFLAGS = DbConstants.DB_INIT_CDB | + DbConstants.DB_INIT_LOCK | DbConstants.DB_INIT_LOG | DbConstants.DB_INIT_MPOOL | + DbConstants.DB_INIT_TXN | DbConstants.DB_JOINENV; + static final int DB_SERVER_DBFLAGS = DbConstants.DB_DIRTY_READ | + DbConstants.DB_NOMMAP | DbConstants.DB_RDONLY; + static final int DB_SERVER_DBNOSHARE = DbConstants.DB_EXCL | DbConstants.DB_TRUNCATE; + static final int DB_MODIFIER_MASK = 0xff000000; + + static Vector homes = new Vector(); + + static void add_home(String home) { + File f = new File(home); + try { + home = f.getCanonicalPath(); + } catch (IOException e) { + // ignored + } + homes.addElement(home); + } + + static boolean check_home(String home) { + if (home == null) + return false; + File f = new File(home); + try { + home = f.getCanonicalPath(); + } catch (IOException e) { + // ignored + } + return homes.contains(home); + } + + public static void main(String[] args) { + System.out.println("Starting Server..."); + for (int i = 0; i < args.length; i++) { + if (args[i].charAt(0) != '-') + usage(); + + switch (args[i].charAt(1)) { + case 'h': + add_home(args[++i]); + break; + case 'I': + idleto = Long.parseLong(args[++i]) * 1000L; + break; + case 'P': + passwd = args[++i]; + break; + case 't': + defto = Long.parseLong(args[++i]) * 1000L; + break; + case 'T': + maxto = Long.parseLong(args[++i]) * 1000L; + break; + case 'V': + // version; + break; + case 'v': + // verbose + break; + default: + usage(); + } + } + + try { + // Server.errstream = System.err; + Server.errstream = new FileOutputStream("JavaRPCServer.trace", true); + Server.err = new PrintWriter(Server.errstream); + Server server = new Server(); + server.run(); + } catch (Throwable e) { + System.err.println("Server exception:"); + e.printStackTrace(Server.err); + } finally { + if (Server.err != null) + Server.err.close(); + } + + System.out.println("Server stopped."); + } + + static void usage() { + System.err.println("usage: java com.sleepycat.db.rpcserver.Server \\"); + System.err.println("[-Vv] [-h home] [-P passwd] [-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]"); + System.exit(1); + } +} diff --git a/db/rpc_server/java/Timer.java b/db/rpc_server/java/Timer.java index 0b828277b..469ad1ad4 100644 --- a/db/rpc_server/java/Timer.java +++ b/db/rpc_server/java/Timer.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. * - * $Id: Timer.java,v 1.2 2003/01/08 05:45:53 bostic Exp $ + * $Id: Timer.java,v 1.4 2004/04/06 20:43:42 mjc Exp $ */ package com.sleepycat.db.rpcserver; @@ -15,8 +15,7 @@ package com.sleepycat.db.rpcserver; * used to group/share access times. This is done to keep the Java code * close to the canonical C implementation of the RPC server. */ -public class Timer -{ - Timer timer = this; - long last_access; +public class Timer { + Timer timer = this; + long last_access; } diff --git a/db/rpc_server/java/Util.java b/db/rpc_server/java/Util.java new file mode 100644 index 000000000..fedb4c73f --- /dev/null +++ b/db/rpc_server/java/Util.java @@ -0,0 +1,170 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2001-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: Util.java,v 1.5 2004/09/23 23:56:43 bostic Exp $ + */ + +package com.sleepycat.db.rpcserver; + +import java.io.FileNotFoundException; + +import com.sleepycat.db.*; +import com.sleepycat.db.internal.DbConstants; + +/** + * Helper methods for JDB <-> DB mapping + */ +public class Util { + static int handleException(Throwable t) { + int ret = Server.EINVAL; + + if (t instanceof DatabaseException) + ret = ((DatabaseException)t).getErrno(); + else if (t instanceof FileNotFoundException) + ret = Server.ENOENT; + + t.printStackTrace(Server.err); + Server.err.println("handleException(" + t + ") returned " + ret); + return ret; + } + + static int notSupported(String meth) { + Server.err.println("Unsupported functionality with JE: " + meth); + return Server.EINVAL; + } + + static int ignored(String meth) { + Server.err.println("Warning functionality ignored with JE: " + meth); + return 0; + } + + static DatabaseEntry makeDatabaseEntry(byte[] data, int dlen, int doff, int ulen, int flags, int multiFlags) throws DatabaseException { + DatabaseEntry dbt; + switch (multiFlags) { + case DbConstants.DB_MULTIPLE: + dbt = new MultipleDataEntry(new byte[ulen]); + break; + case DbConstants.DB_MULTIPLE_KEY: + dbt = new MultipleKeyDataEntry(new byte[ulen]); + break; + default: + dbt = new DatabaseEntry(data); + break; + } + dbt.setPartial(doff, dlen, (flags & DbConstants.DB_DBT_PARTIAL) != 0); + return dbt; + } + + static DatabaseEntry makeDatabaseEntry(byte[] data, int dlen, int doff, int ulen, int flags) throws DatabaseException { + return makeDatabaseEntry(data, dlen, doff, ulen, flags, 0); + } + + static byte[] returnDatabaseEntry(DatabaseEntry dbt) throws DatabaseException { + if (dbt.getData().length == dbt.getSize()) + return dbt.getData(); + else { + byte[] newdata = new byte[dbt.getSize()]; + System.arraycopy(dbt.getData(), 0, newdata, 0, dbt.getSize()); + return newdata; + } + } + + private static final String separator = ":::"; + + static String makeFileName(String file, String database) { + return null; + } + + static String makeDatabaseName(String file, String database) { + if (file == null && database == null) + return null; + else if (database.length() == 0 && file.indexOf(separator) >= 0) + return file; + return file + separator + database; + } + + static String makeRenameTarget(String file, String database, String newname) { + if (database.length() == 0) + return makeDatabaseName(newname, database); + else + return makeDatabaseName(file, newname); + } + + static String getFileName(String fullname) { + if (fullname == null) + return null; + int pos = fullname.indexOf(separator); + return fullname.substring(0, pos); + } + + static String getDatabaseName(String fullname) { + if (fullname == null) + return null; + int pos = fullname.indexOf(separator); + return fullname.substring(pos + separator.length()); + } + + static LockMode getLockMode(int flags) { + switch(flags & Server.DB_MODIFIER_MASK) { + case DbConstants.DB_DIRTY_READ: + return LockMode.DIRTY_READ; + case DbConstants.DB_DEGREE_2: + return LockMode.DEGREE_2; + case DbConstants.DB_RMW: + return LockMode.RMW; + default: + return LockMode.DEFAULT; + } + } + + static int getStatus(OperationStatus status) { + if (status == OperationStatus.SUCCESS) + return 0; + else if (status == OperationStatus.KEYEXIST) + return DbConstants.DB_KEYEXIST; + else if (status == OperationStatus.KEYEMPTY) + return DbConstants.DB_KEYEMPTY; + else if (status == OperationStatus.NOTFOUND) + return DbConstants.DB_NOTFOUND; + else + throw new IllegalArgumentException("Unknown status: " + status); + } + + static int fromDatabaseType(DatabaseType type) { + if (type == DatabaseType.BTREE) + return DbConstants.DB_BTREE; + else if (type == DatabaseType.HASH) + return DbConstants.DB_HASH; + else if (type == DatabaseType.QUEUE) + return DbConstants.DB_QUEUE; + else if (type == DatabaseType.RECNO) + return DbConstants.DB_RECNO; + else + throw new + IllegalArgumentException("Unknown database type: " + type); + } + + static DatabaseType toDatabaseType(int type) { + switch (type) { + case DbConstants.DB_BTREE: + return DatabaseType.BTREE; + case DbConstants.DB_HASH: + return DatabaseType.HASH; + case DbConstants.DB_QUEUE: + return DatabaseType.QUEUE; + case DbConstants.DB_RECNO: + return DatabaseType.RECNO; + case DbConstants.DB_UNKNOWN: + return DatabaseType.UNKNOWN; + default: + throw new IllegalArgumentException("Unknown database type ID: " + type); + } + } + + // Utility classes should not have a public or default constructor + protected Util() { + } +} diff --git a/db/rpc_server/java/gen/ServerStubs.java b/db/rpc_server/java/gen/ServerStubs.java new file mode 100644 index 000000000..48e24f179 --- /dev/null +++ b/db/rpc_server/java/gen/ServerStubs.java @@ -0,0 +1,657 @@ +/* + * Automatically generated by jrpcgen 0.95.1 on 4/6/04 2:22 PM + * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java + * See http://acplt.org/ks/remotetea.html for details + */ +package com.sleepycat.db.rpcserver; +import org.acplt.oncrpc.*; +import java.io.IOException; + +import org.acplt.oncrpc.server.*; + +/** + */ +public abstract class ServerStubs extends OncRpcServerStub implements OncRpcDispatchable { + + public ServerStubs() + throws OncRpcException, IOException { + this(0); + } + + public ServerStubs(int port) + throws OncRpcException, IOException { + info = new OncRpcServerTransportRegistrationInfo [] { + new OncRpcServerTransportRegistrationInfo(db_server.DB_RPC_SERVERPROG, 4003), + }; + transports = new OncRpcServerTransport [] { + new OncRpcUdpServerTransport(this, port, info, 32768), + new OncRpcTcpServerTransport(this, port, info, 32768) + }; + } + + public void dispatchOncRpcCall(OncRpcCallInformation call, int program, int version, int procedure) + throws OncRpcException, IOException { + if ( version == 4003 ) { + switch ( procedure ) { + case 1: { + __env_get_cachesize_msg args$ = new __env_get_cachesize_msg(); + call.retrieveCall(args$); + __env_get_cachesize_reply result$ = __DB_env_get_cachesize_4003(args$); + call.reply(result$); + break; + } + case 2: { + __env_cachesize_msg args$ = new __env_cachesize_msg(); + call.retrieveCall(args$); + __env_cachesize_reply result$ = __DB_env_cachesize_4003(args$); + call.reply(result$); + break; + } + case 3: { + __env_close_msg args$ = new __env_close_msg(); + call.retrieveCall(args$); + __env_close_reply result$ = __DB_env_close_4003(args$); + call.reply(result$); + break; + } + case 4: { + __env_create_msg args$ = new __env_create_msg(); + call.retrieveCall(args$); + __env_create_reply result$ = __DB_env_create_4003(args$); + call.reply(result$); + break; + } + case 5: { + __env_dbremove_msg args$ = new __env_dbremove_msg(); + call.retrieveCall(args$); + __env_dbremove_reply result$ = __DB_env_dbremove_4003(args$); + call.reply(result$); + break; + } + case 6: { + __env_dbrename_msg args$ = new __env_dbrename_msg(); + call.retrieveCall(args$); + __env_dbrename_reply result$ = __DB_env_dbrename_4003(args$); + call.reply(result$); + break; + } + case 7: { + __env_get_encrypt_flags_msg args$ = new __env_get_encrypt_flags_msg(); + call.retrieveCall(args$); + __env_get_encrypt_flags_reply result$ = __DB_env_get_encrypt_flags_4003(args$); + call.reply(result$); + break; + } + case 8: { + __env_encrypt_msg args$ = new __env_encrypt_msg(); + call.retrieveCall(args$); + __env_encrypt_reply result$ = __DB_env_encrypt_4003(args$); + call.reply(result$); + break; + } + case 9: { + __env_get_flags_msg args$ = new __env_get_flags_msg(); + call.retrieveCall(args$); + __env_get_flags_reply result$ = __DB_env_get_flags_4003(args$); + call.reply(result$); + break; + } + case 10: { + __env_flags_msg args$ = new __env_flags_msg(); + call.retrieveCall(args$); + __env_flags_reply result$ = __DB_env_flags_4003(args$); + call.reply(result$); + break; + } + case 11: { + __env_get_home_msg args$ = new __env_get_home_msg(); + call.retrieveCall(args$); + __env_get_home_reply result$ = __DB_env_get_home_4003(args$); + call.reply(result$); + break; + } + case 12: { + __env_get_open_flags_msg args$ = new __env_get_open_flags_msg(); + call.retrieveCall(args$); + __env_get_open_flags_reply result$ = __DB_env_get_open_flags_4003(args$); + call.reply(result$); + break; + } + case 13: { + __env_open_msg args$ = new __env_open_msg(); + call.retrieveCall(args$); + __env_open_reply result$ = __DB_env_open_4003(args$); + call.reply(result$); + break; + } + case 14: { + __env_remove_msg args$ = new __env_remove_msg(); + call.retrieveCall(args$); + __env_remove_reply result$ = __DB_env_remove_4003(args$); + call.reply(result$); + break; + } + case 15: { + __txn_abort_msg args$ = new __txn_abort_msg(); + call.retrieveCall(args$); + __txn_abort_reply result$ = __DB_txn_abort_4003(args$); + call.reply(result$); + break; + } + case 16: { + __txn_begin_msg args$ = new __txn_begin_msg(); + call.retrieveCall(args$); + __txn_begin_reply result$ = __DB_txn_begin_4003(args$); + call.reply(result$); + break; + } + case 17: { + __txn_commit_msg args$ = new __txn_commit_msg(); + call.retrieveCall(args$); + __txn_commit_reply result$ = __DB_txn_commit_4003(args$); + call.reply(result$); + break; + } + case 18: { + __txn_discard_msg args$ = new __txn_discard_msg(); + call.retrieveCall(args$); + __txn_discard_reply result$ = __DB_txn_discard_4003(args$); + call.reply(result$); + break; + } + case 19: { + __txn_prepare_msg args$ = new __txn_prepare_msg(); + call.retrieveCall(args$); + __txn_prepare_reply result$ = __DB_txn_prepare_4003(args$); + call.reply(result$); + break; + } + case 20: { + __txn_recover_msg args$ = new __txn_recover_msg(); + call.retrieveCall(args$); + __txn_recover_reply result$ = __DB_txn_recover_4003(args$); + call.reply(result$); + break; + } + case 21: { + __db_associate_msg args$ = new __db_associate_msg(); + call.retrieveCall(args$); + __db_associate_reply result$ = __DB_db_associate_4003(args$); + call.reply(result$); + break; + } + case 22: { + __db_bt_maxkey_msg args$ = new __db_bt_maxkey_msg(); + call.retrieveCall(args$); + __db_bt_maxkey_reply result$ = __DB_db_bt_maxkey_4003(args$); + call.reply(result$); + break; + } + case 23: { + __db_get_bt_minkey_msg args$ = new __db_get_bt_minkey_msg(); + call.retrieveCall(args$); + __db_get_bt_minkey_reply result$ = __DB_db_get_bt_minkey_4003(args$); + call.reply(result$); + break; + } + case 24: { + __db_bt_minkey_msg args$ = new __db_bt_minkey_msg(); + call.retrieveCall(args$); + __db_bt_minkey_reply result$ = __DB_db_bt_minkey_4003(args$); + call.reply(result$); + break; + } + case 25: { + __db_close_msg args$ = new __db_close_msg(); + call.retrieveCall(args$); + __db_close_reply result$ = __DB_db_close_4003(args$); + call.reply(result$); + break; + } + case 26: { + __db_create_msg args$ = new __db_create_msg(); + call.retrieveCall(args$); + __db_create_reply result$ = __DB_db_create_4003(args$); + call.reply(result$); + break; + } + case 27: { + __db_del_msg args$ = new __db_del_msg(); + call.retrieveCall(args$); + __db_del_reply result$ = __DB_db_del_4003(args$); + call.reply(result$); + break; + } + case 28: { + __db_get_encrypt_flags_msg args$ = new __db_get_encrypt_flags_msg(); + call.retrieveCall(args$); + __db_get_encrypt_flags_reply result$ = __DB_db_get_encrypt_flags_4003(args$); + call.reply(result$); + break; + } + case 29: { + __db_encrypt_msg args$ = new __db_encrypt_msg(); + call.retrieveCall(args$); + __db_encrypt_reply result$ = __DB_db_encrypt_4003(args$); + call.reply(result$); + break; + } + case 30: { + __db_get_extentsize_msg args$ = new __db_get_extentsize_msg(); + call.retrieveCall(args$); + __db_get_extentsize_reply result$ = __DB_db_get_extentsize_4003(args$); + call.reply(result$); + break; + } + case 31: { + __db_extentsize_msg args$ = new __db_extentsize_msg(); + call.retrieveCall(args$); + __db_extentsize_reply result$ = __DB_db_extentsize_4003(args$); + call.reply(result$); + break; + } + case 32: { + __db_get_flags_msg args$ = new __db_get_flags_msg(); + call.retrieveCall(args$); + __db_get_flags_reply result$ = __DB_db_get_flags_4003(args$); + call.reply(result$); + break; + } + case 33: { + __db_flags_msg args$ = new __db_flags_msg(); + call.retrieveCall(args$); + __db_flags_reply result$ = __DB_db_flags_4003(args$); + call.reply(result$); + break; + } + case 34: { + __db_get_msg args$ = new __db_get_msg(); + call.retrieveCall(args$); + __db_get_reply result$ = __DB_db_get_4003(args$); + call.reply(result$); + break; + } + case 35: { + __db_get_name_msg args$ = new __db_get_name_msg(); + call.retrieveCall(args$); + __db_get_name_reply result$ = __DB_db_get_name_4003(args$); + call.reply(result$); + break; + } + case 36: { + __db_get_open_flags_msg args$ = new __db_get_open_flags_msg(); + call.retrieveCall(args$); + __db_get_open_flags_reply result$ = __DB_db_get_open_flags_4003(args$); + call.reply(result$); + break; + } + case 37: { + __db_get_h_ffactor_msg args$ = new __db_get_h_ffactor_msg(); + call.retrieveCall(args$); + __db_get_h_ffactor_reply result$ = __DB_db_get_h_ffactor_4003(args$); + call.reply(result$); + break; + } + case 38: { + __db_h_ffactor_msg args$ = new __db_h_ffactor_msg(); + call.retrieveCall(args$); + __db_h_ffactor_reply result$ = __DB_db_h_ffactor_4003(args$); + call.reply(result$); + break; + } + case 39: { + __db_get_h_nelem_msg args$ = new __db_get_h_nelem_msg(); + call.retrieveCall(args$); + __db_get_h_nelem_reply result$ = __DB_db_get_h_nelem_4003(args$); + call.reply(result$); + break; + } + case 40: { + __db_h_nelem_msg args$ = new __db_h_nelem_msg(); + call.retrieveCall(args$); + __db_h_nelem_reply result$ = __DB_db_h_nelem_4003(args$); + call.reply(result$); + break; + } + case 41: { + __db_key_range_msg args$ = new __db_key_range_msg(); + call.retrieveCall(args$); + __db_key_range_reply result$ = __DB_db_key_range_4003(args$); + call.reply(result$); + break; + } + case 42: { + __db_get_lorder_msg args$ = new __db_get_lorder_msg(); + call.retrieveCall(args$); + __db_get_lorder_reply result$ = __DB_db_get_lorder_4003(args$); + call.reply(result$); + break; + } + case 43: { + __db_lorder_msg args$ = new __db_lorder_msg(); + call.retrieveCall(args$); + __db_lorder_reply result$ = __DB_db_lorder_4003(args$); + call.reply(result$); + break; + } + case 44: { + __db_open_msg args$ = new __db_open_msg(); + call.retrieveCall(args$); + __db_open_reply result$ = __DB_db_open_4003(args$); + call.reply(result$); + break; + } + case 45: { + __db_get_pagesize_msg args$ = new __db_get_pagesize_msg(); + call.retrieveCall(args$); + __db_get_pagesize_reply result$ = __DB_db_get_pagesize_4003(args$); + call.reply(result$); + break; + } + case 46: { + __db_pagesize_msg args$ = new __db_pagesize_msg(); + call.retrieveCall(args$); + __db_pagesize_reply result$ = __DB_db_pagesize_4003(args$); + call.reply(result$); + break; + } + case 47: { + __db_pget_msg args$ = new __db_pget_msg(); + call.retrieveCall(args$); + __db_pget_reply result$ = __DB_db_pget_4003(args$); + call.reply(result$); + break; + } + case 48: { + __db_put_msg args$ = new __db_put_msg(); + call.retrieveCall(args$); + __db_put_reply result$ = __DB_db_put_4003(args$); + call.reply(result$); + break; + } + case 49: { + __db_get_re_delim_msg args$ = new __db_get_re_delim_msg(); + call.retrieveCall(args$); + __db_get_re_delim_reply result$ = __DB_db_get_re_delim_4003(args$); + call.reply(result$); + break; + } + case 50: { + __db_re_delim_msg args$ = new __db_re_delim_msg(); + call.retrieveCall(args$); + __db_re_delim_reply result$ = __DB_db_re_delim_4003(args$); + call.reply(result$); + break; + } + case 51: { + __db_get_re_len_msg args$ = new __db_get_re_len_msg(); + call.retrieveCall(args$); + __db_get_re_len_reply result$ = __DB_db_get_re_len_4003(args$); + call.reply(result$); + break; + } + case 52: { + __db_re_len_msg args$ = new __db_re_len_msg(); + call.retrieveCall(args$); + __db_re_len_reply result$ = __DB_db_re_len_4003(args$); + call.reply(result$); + break; + } + case 53: { + __db_re_pad_msg args$ = new __db_re_pad_msg(); + call.retrieveCall(args$); + __db_re_pad_reply result$ = __DB_db_re_pad_4003(args$); + call.reply(result$); + break; + } + case 54: { + __db_get_re_pad_msg args$ = new __db_get_re_pad_msg(); + call.retrieveCall(args$); + __db_get_re_pad_reply result$ = __DB_db_get_re_pad_4003(args$); + call.reply(result$); + break; + } + case 55: { + __db_remove_msg args$ = new __db_remove_msg(); + call.retrieveCall(args$); + __db_remove_reply result$ = __DB_db_remove_4003(args$); + call.reply(result$); + break; + } + case 56: { + __db_rename_msg args$ = new __db_rename_msg(); + call.retrieveCall(args$); + __db_rename_reply result$ = __DB_db_rename_4003(args$); + call.reply(result$); + break; + } + case 57: { + __db_stat_msg args$ = new __db_stat_msg(); + call.retrieveCall(args$); + __db_stat_reply result$ = __DB_db_stat_4003(args$); + call.reply(result$); + break; + } + case 58: { + __db_sync_msg args$ = new __db_sync_msg(); + call.retrieveCall(args$); + __db_sync_reply result$ = __DB_db_sync_4003(args$); + call.reply(result$); + break; + } + case 59: { + __db_truncate_msg args$ = new __db_truncate_msg(); + call.retrieveCall(args$); + __db_truncate_reply result$ = __DB_db_truncate_4003(args$); + call.reply(result$); + break; + } + case 60: { + __db_cursor_msg args$ = new __db_cursor_msg(); + call.retrieveCall(args$); + __db_cursor_reply result$ = __DB_db_cursor_4003(args$); + call.reply(result$); + break; + } + case 61: { + __db_join_msg args$ = new __db_join_msg(); + call.retrieveCall(args$); + __db_join_reply result$ = __DB_db_join_4003(args$); + call.reply(result$); + break; + } + case 62: { + __dbc_close_msg args$ = new __dbc_close_msg(); + call.retrieveCall(args$); + __dbc_close_reply result$ = __DB_dbc_close_4003(args$); + call.reply(result$); + break; + } + case 63: { + __dbc_count_msg args$ = new __dbc_count_msg(); + call.retrieveCall(args$); + __dbc_count_reply result$ = __DB_dbc_count_4003(args$); + call.reply(result$); + break; + } + case 64: { + __dbc_del_msg args$ = new __dbc_del_msg(); + call.retrieveCall(args$); + __dbc_del_reply result$ = __DB_dbc_del_4003(args$); + call.reply(result$); + break; + } + case 65: { + __dbc_dup_msg args$ = new __dbc_dup_msg(); + call.retrieveCall(args$); + __dbc_dup_reply result$ = __DB_dbc_dup_4003(args$); + call.reply(result$); + break; + } + case 66: { + __dbc_get_msg args$ = new __dbc_get_msg(); + call.retrieveCall(args$); + __dbc_get_reply result$ = __DB_dbc_get_4003(args$); + call.reply(result$); + break; + } + case 67: { + __dbc_pget_msg args$ = new __dbc_pget_msg(); + call.retrieveCall(args$); + __dbc_pget_reply result$ = __DB_dbc_pget_4003(args$); + call.reply(result$); + break; + } + case 68: { + __dbc_put_msg args$ = new __dbc_put_msg(); + call.retrieveCall(args$); + __dbc_put_reply result$ = __DB_dbc_put_4003(args$); + call.reply(result$); + break; + } + default: + call.failProcedureUnavailable(); + } + } else { + call.failProcedureUnavailable(); + } + } + + public abstract __env_get_cachesize_reply __DB_env_get_cachesize_4003(__env_get_cachesize_msg arg1); + + public abstract __env_cachesize_reply __DB_env_cachesize_4003(__env_cachesize_msg arg1); + + public abstract __env_close_reply __DB_env_close_4003(__env_close_msg arg1); + + public abstract __env_create_reply __DB_env_create_4003(__env_create_msg arg1); + + public abstract __env_dbremove_reply __DB_env_dbremove_4003(__env_dbremove_msg arg1); + + public abstract __env_dbrename_reply __DB_env_dbrename_4003(__env_dbrename_msg arg1); + + public abstract __env_get_encrypt_flags_reply __DB_env_get_encrypt_flags_4003(__env_get_encrypt_flags_msg arg1); + + public abstract __env_encrypt_reply __DB_env_encrypt_4003(__env_encrypt_msg arg1); + + public abstract __env_get_flags_reply __DB_env_get_flags_4003(__env_get_flags_msg arg1); + + public abstract __env_flags_reply __DB_env_flags_4003(__env_flags_msg arg1); + + public abstract __env_get_home_reply __DB_env_get_home_4003(__env_get_home_msg arg1); + + public abstract __env_get_open_flags_reply __DB_env_get_open_flags_4003(__env_get_open_flags_msg arg1); + + public abstract __env_open_reply __DB_env_open_4003(__env_open_msg arg1); + + public abstract __env_remove_reply __DB_env_remove_4003(__env_remove_msg arg1); + + public abstract __txn_abort_reply __DB_txn_abort_4003(__txn_abort_msg arg1); + + public abstract __txn_begin_reply __DB_txn_begin_4003(__txn_begin_msg arg1); + + public abstract __txn_commit_reply __DB_txn_commit_4003(__txn_commit_msg arg1); + + public abstract __txn_discard_reply __DB_txn_discard_4003(__txn_discard_msg arg1); + + public abstract __txn_prepare_reply __DB_txn_prepare_4003(__txn_prepare_msg arg1); + + public abstract __txn_recover_reply __DB_txn_recover_4003(__txn_recover_msg arg1); + + public abstract __db_associate_reply __DB_db_associate_4003(__db_associate_msg arg1); + + public abstract __db_bt_maxkey_reply __DB_db_bt_maxkey_4003(__db_bt_maxkey_msg arg1); + + public abstract __db_get_bt_minkey_reply __DB_db_get_bt_minkey_4003(__db_get_bt_minkey_msg arg1); + + public abstract __db_bt_minkey_reply __DB_db_bt_minkey_4003(__db_bt_minkey_msg arg1); + + public abstract __db_close_reply __DB_db_close_4003(__db_close_msg arg1); + + public abstract __db_create_reply __DB_db_create_4003(__db_create_msg arg1); + + public abstract __db_del_reply __DB_db_del_4003(__db_del_msg arg1); + + public abstract __db_get_encrypt_flags_reply __DB_db_get_encrypt_flags_4003(__db_get_encrypt_flags_msg arg1); + + public abstract __db_encrypt_reply __DB_db_encrypt_4003(__db_encrypt_msg arg1); + + public abstract __db_get_extentsize_reply __DB_db_get_extentsize_4003(__db_get_extentsize_msg arg1); + + public abstract __db_extentsize_reply __DB_db_extentsize_4003(__db_extentsize_msg arg1); + + public abstract __db_get_flags_reply __DB_db_get_flags_4003(__db_get_flags_msg arg1); + + public abstract __db_flags_reply __DB_db_flags_4003(__db_flags_msg arg1); + + public abstract __db_get_reply __DB_db_get_4003(__db_get_msg arg1); + + public abstract __db_get_name_reply __DB_db_get_name_4003(__db_get_name_msg arg1); + + public abstract __db_get_open_flags_reply __DB_db_get_open_flags_4003(__db_get_open_flags_msg arg1); + + public abstract __db_get_h_ffactor_reply __DB_db_get_h_ffactor_4003(__db_get_h_ffactor_msg arg1); + + public abstract __db_h_ffactor_reply __DB_db_h_ffactor_4003(__db_h_ffactor_msg arg1); + + public abstract __db_get_h_nelem_reply __DB_db_get_h_nelem_4003(__db_get_h_nelem_msg arg1); + + public abstract __db_h_nelem_reply __DB_db_h_nelem_4003(__db_h_nelem_msg arg1); + + public abstract __db_key_range_reply __DB_db_key_range_4003(__db_key_range_msg arg1); + + public abstract __db_get_lorder_reply __DB_db_get_lorder_4003(__db_get_lorder_msg arg1); + + public abstract __db_lorder_reply __DB_db_lorder_4003(__db_lorder_msg arg1); + + public abstract __db_open_reply __DB_db_open_4003(__db_open_msg arg1); + + public abstract __db_get_pagesize_reply __DB_db_get_pagesize_4003(__db_get_pagesize_msg arg1); + + public abstract __db_pagesize_reply __DB_db_pagesize_4003(__db_pagesize_msg arg1); + + public abstract __db_pget_reply __DB_db_pget_4003(__db_pget_msg arg1); + + public abstract __db_put_reply __DB_db_put_4003(__db_put_msg arg1); + + public abstract __db_get_re_delim_reply __DB_db_get_re_delim_4003(__db_get_re_delim_msg arg1); + + public abstract __db_re_delim_reply __DB_db_re_delim_4003(__db_re_delim_msg arg1); + + public abstract __db_get_re_len_reply __DB_db_get_re_len_4003(__db_get_re_len_msg arg1); + + public abstract __db_re_len_reply __DB_db_re_len_4003(__db_re_len_msg arg1); + + public abstract __db_re_pad_reply __DB_db_re_pad_4003(__db_re_pad_msg arg1); + + public abstract __db_get_re_pad_reply __DB_db_get_re_pad_4003(__db_get_re_pad_msg arg1); + + public abstract __db_remove_reply __DB_db_remove_4003(__db_remove_msg arg1); + + public abstract __db_rename_reply __DB_db_rename_4003(__db_rename_msg arg1); + + public abstract __db_stat_reply __DB_db_stat_4003(__db_stat_msg arg1); + + public abstract __db_sync_reply __DB_db_sync_4003(__db_sync_msg arg1); + + public abstract __db_truncate_reply __DB_db_truncate_4003(__db_truncate_msg arg1); + + public abstract __db_cursor_reply __DB_db_cursor_4003(__db_cursor_msg arg1); + + public abstract __db_join_reply __DB_db_join_4003(__db_join_msg arg1); + + public abstract __dbc_close_reply __DB_dbc_close_4003(__dbc_close_msg arg1); + + public abstract __dbc_count_reply __DB_dbc_count_4003(__dbc_count_msg arg1); + + public abstract __dbc_del_reply __DB_dbc_del_4003(__dbc_del_msg arg1); + + public abstract __dbc_dup_reply __DB_dbc_dup_4003(__dbc_dup_msg arg1); + + public abstract __dbc_get_reply __DB_dbc_get_4003(__dbc_get_msg arg1); + + public abstract __dbc_pget_reply __DB_dbc_pget_4003(__dbc_pget_msg arg1); + + public abstract __dbc_put_reply __DB_dbc_put_4003(__dbc_put_msg arg1); + +} +// End of ServerStubs.java diff --git a/db/rpc_server/java/gen/__db_open_reply.java b/db/rpc_server/java/gen/__db_open_reply.java index d90c3754c..9b36b44a6 100644 --- a/db/rpc_server/java/gen/__db_open_reply.java +++ b/db/rpc_server/java/gen/__db_open_reply.java @@ -1,5 +1,5 @@ /* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM + * Automatically generated by jrpcgen 0.95.1 on 7/15/04 4:39 PM * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java * See http://acplt.org/ks/remotetea.html for details */ @@ -11,7 +11,6 @@ public class __db_open_reply implements XdrAble { public int status; public int dbcl_id; public int type; - public int dbflags; public int lorder; public __db_open_reply() { @@ -27,7 +26,6 @@ public class __db_open_reply implements XdrAble { xdr.xdrEncodeInt(status); xdr.xdrEncodeInt(dbcl_id); xdr.xdrEncodeInt(type); - xdr.xdrEncodeInt(dbflags); xdr.xdrEncodeInt(lorder); } @@ -36,7 +34,6 @@ public class __db_open_reply implements XdrAble { status = xdr.xdrDecodeInt(); dbcl_id = xdr.xdrDecodeInt(); type = xdr.xdrDecodeInt(); - dbflags = xdr.xdrDecodeInt(); lorder = xdr.xdrDecodeInt(); } diff --git a/db/rpc_server/java/gen/__db_stat_msg.java b/db/rpc_server/java/gen/__db_stat_msg.java index af536b5f7..419ee14a6 100644 --- a/db/rpc_server/java/gen/__db_stat_msg.java +++ b/db/rpc_server/java/gen/__db_stat_msg.java @@ -1,5 +1,5 @@ /* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM + * Automatically generated by jrpcgen 0.95.1 on 7/15/04 4:39 PM * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java * See http://acplt.org/ks/remotetea.html for details */ @@ -9,6 +9,7 @@ import java.io.IOException; public class __db_stat_msg implements XdrAble { public int dbpcl_id; + public int txnpcl_id; public int flags; public __db_stat_msg() { @@ -22,12 +23,14 @@ public class __db_stat_msg implements XdrAble { public void xdrEncode(XdrEncodingStream xdr) throws OncRpcException, IOException { xdr.xdrEncodeInt(dbpcl_id); + xdr.xdrEncodeInt(txnpcl_id); xdr.xdrEncodeInt(flags); } public void xdrDecode(XdrDecodingStream xdr) throws OncRpcException, IOException { dbpcl_id = xdr.xdrDecodeInt(); + txnpcl_id = xdr.xdrDecodeInt(); flags = xdr.xdrDecodeInt(); } diff --git a/db/rpc_server/java/gen/db_server.java b/db/rpc_server/java/gen/db_server.java index 52157dfb4..dccc3d8ad 100644 --- a/db/rpc_server/java/gen/db_server.java +++ b/db/rpc_server/java/gen/db_server.java @@ -1,5 +1,5 @@ /* - * Automatically generated by jrpcgen 0.95.1 on 3/26/03 6:40 PM + * Automatically generated by jrpcgen 0.95.1 on 2/11/04 1:28 PM * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java * See http://acplt.org/ks/remotetea.html for details */ @@ -11,75 +11,75 @@ import java.io.IOException; * A collection of constants used by the "db_server" ONC/RPC program. */ public interface db_server { - public static final int __DB_db_get_re_delim_4002 = 49; - public static final int __DB_db_close_4002 = 25; - public static final int __DB_db_flags_4002 = 33; - public static final int __DB_dbc_dup_4002 = 65; - public static final int __DB_db_get_open_flags_4002 = 36; - public static final int __DB_db_encrypt_4002 = 29; - public static final int __DB_env_remove_4002 = 14; - public static final int __DB_env_dbrename_4002 = 6; - public static final int __DB_dbc_pget_4002 = 67; - public static final int __DB_env_get_cachesize_4002 = 1; - public static final int __DB_env_cachesize_4002 = 2; - public static final int __DB_db_get_lorder_4002 = 42; - public static final int __DB_db_lorder_4002 = 43; - public static final int __DB_db_key_range_4002 = 41; - public static final int __DB_env_get_open_flags_4002 = 12; - public static final int __DB_db_bt_minkey_4002 = 24; - public static final int __DB_db_sync_4002 = 58; - public static final int __DB_dbc_close_4002 = 62; - public static final int __DB_db_join_4002 = 61; - public static final int __DB_db_pagesize_4002 = 46; - public static final int DB_RPC_SERVERVERS = 4002; - public static final int __DB_db_open_4002 = 44; - public static final int __DB_db_get_extentsize_4002 = 30; - public static final int __DB_dbc_get_4002 = 66; - public static final int __DB_db_cursor_4002 = 60; - public static final int __DB_txn_commit_4002 = 17; - public static final int __DB_dbc_del_4002 = 64; - public static final int __DB_env_create_4002 = 4; - public static final int __DB_env_open_4002 = 13; - public static final int __DB_txn_prepare_4002 = 19; - public static final int __DB_db_get_re_pad_4002 = 54; - public static final int __DB_db_pget_4002 = 47; - public static final int __DB_db_stat_4002 = 57; - public static final int __DB_db_h_nelem_4002 = 40; - public static final int __DB_db_remove_4002 = 55; - public static final int __DB_db_get_flags_4002 = 32; - public static final int __DB_db_re_delim_4002 = 50; - public static final int __DB_db_re_pad_4002 = 53; - public static final int __DB_env_get_flags_4002 = 9; - public static final int __DB_txn_abort_4002 = 15; - public static final int __DB_env_get_encrypt_flags_4002 = 7; - public static final int __DB_db_get_encrypt_flags_4002 = 28; - public static final int __DB_db_get_h_ffactor_4002 = 37; - public static final int __DB_txn_recover_4002 = 20; - public static final int __DB_db_get_4002 = 34; - public static final int __DB_db_extentsize_4002 = 31; - public static final int __DB_db_get_h_nelem_4002 = 39; + public static final int __DB_db_get_re_delim_4003 = 49; + public static final int __DB_db_close_4003 = 25; + public static final int __DB_db_flags_4003 = 33; + public static final int __DB_dbc_dup_4003 = 65; + public static final int __DB_db_get_open_flags_4003 = 36; + public static final int __DB_db_encrypt_4003 = 29; + public static final int __DB_env_remove_4003 = 14; + public static final int __DB_env_dbrename_4003 = 6; + public static final int __DB_dbc_pget_4003 = 67; + public static final int __DB_env_get_cachesize_4003 = 1; + public static final int __DB_env_cachesize_4003 = 2; + public static final int __DB_db_get_lorder_4003 = 42; + public static final int __DB_db_lorder_4003 = 43; + public static final int __DB_db_key_range_4003 = 41; + public static final int __DB_env_get_open_flags_4003 = 12; + public static final int __DB_db_bt_minkey_4003 = 24; + public static final int __DB_db_sync_4003 = 58; + public static final int __DB_dbc_close_4003 = 62; + public static final int __DB_db_join_4003 = 61; + public static final int __DB_db_pagesize_4003 = 46; + public static final int DB_RPC_SERVERVERS = 4003; + public static final int __DB_db_open_4003 = 44; + public static final int __DB_db_get_extentsize_4003 = 30; + public static final int __DB_dbc_get_4003 = 66; + public static final int __DB_db_cursor_4003 = 60; + public static final int __DB_txn_commit_4003 = 17; + public static final int __DB_dbc_del_4003 = 64; + public static final int __DB_env_create_4003 = 4; + public static final int __DB_env_open_4003 = 13; + public static final int __DB_txn_prepare_4003 = 19; + public static final int __DB_db_get_re_pad_4003 = 54; + public static final int __DB_db_pget_4003 = 47; + public static final int __DB_db_stat_4003 = 57; + public static final int __DB_db_h_nelem_4003 = 40; + public static final int __DB_db_remove_4003 = 55; + public static final int __DB_db_get_flags_4003 = 32; + public static final int __DB_db_re_delim_4003 = 50; + public static final int __DB_db_re_pad_4003 = 53; + public static final int __DB_env_get_flags_4003 = 9; + public static final int __DB_txn_abort_4003 = 15; + public static final int __DB_env_get_encrypt_flags_4003 = 7; + public static final int __DB_db_get_encrypt_flags_4003 = 28; + public static final int __DB_db_get_h_ffactor_4003 = 37; + public static final int __DB_txn_recover_4003 = 20; + public static final int __DB_db_get_4003 = 34; + public static final int __DB_db_extentsize_4003 = 31; + public static final int __DB_db_get_h_nelem_4003 = 39; + public static final int __DB_dbc_put_4003 = 68; public static final int DB_RPC_SERVERPROG = 351457; - public static final int __DB_dbc_put_4002 = 68; - public static final int __DB_db_get_re_len_4002 = 51; - public static final int __DB_db_truncate_4002 = 59; - public static final int __DB_db_del_4002 = 27; - public static final int __DB_db_bt_maxkey_4002 = 22; - public static final int __DB_env_dbremove_4002 = 5; - public static final int __DB_db_get_pagesize_4002 = 45; - public static final int __DB_db_get_name_4002 = 35; - public static final int __DB_txn_discard_4002 = 18; - public static final int __DB_db_re_len_4002 = 52; - public static final int __DB_env_close_4002 = 3; - public static final int __DB_env_flags_4002 = 10; - public static final int __DB_db_rename_4002 = 56; - public static final int __DB_db_get_bt_minkey_4002 = 23; - public static final int __DB_db_associate_4002 = 21; - public static final int __DB_txn_begin_4002 = 16; - public static final int __DB_env_encrypt_4002 = 8; - public static final int __DB_db_h_ffactor_4002 = 38; - public static final int __DB_db_put_4002 = 48; - public static final int __DB_db_create_4002 = 26; - public static final int __DB_env_get_home_4002 = 11; - public static final int __DB_dbc_count_4002 = 63; + public static final int __DB_db_get_re_len_4003 = 51; + public static final int __DB_db_truncate_4003 = 59; + public static final int __DB_db_del_4003 = 27; + public static final int __DB_db_bt_maxkey_4003 = 22; + public static final int __DB_env_dbremove_4003 = 5; + public static final int __DB_db_get_pagesize_4003 = 45; + public static final int __DB_db_get_name_4003 = 35; + public static final int __DB_txn_discard_4003 = 18; + public static final int __DB_db_re_len_4003 = 52; + public static final int __DB_env_close_4003 = 3; + public static final int __DB_env_flags_4003 = 10; + public static final int __DB_db_rename_4003 = 56; + public static final int __DB_db_get_bt_minkey_4003 = 23; + public static final int __DB_db_associate_4003 = 21; + public static final int __DB_txn_begin_4003 = 16; + public static final int __DB_env_encrypt_4003 = 8; + public static final int __DB_db_h_ffactor_4003 = 38; + public static final int __DB_db_put_4003 = 48; + public static final int __DB_db_create_4003 = 26; + public static final int __DB_env_get_home_4003 = 11; + public static final int __DB_dbc_count_4003 = 63; } // End of db_server.java diff --git a/db/rpc_server/java/s_jrpcgen b/db/rpc_server/java/s_jrpcgen index fed8cbf56..b585f431f 100644 --- a/db/rpc_server/java/s_jrpcgen +++ b/db/rpc_server/java/s_jrpcgen @@ -1,3 +1,3 @@ #!/bin/sh -java -jar jrpcgen.jar -d gen -noclient -nobackup -p com.sleepycat.db.rpcserver -s DbServerStub ../db_server.x +java -jar jrpcgen.jar -d gen -noclient -nobackup -p com.sleepycat.db.rpcserver -s ServerStubs ../db_server.x diff --git a/db/rpc_server/rpc.src b/db/rpc_server/rpc.src index 065e4524e..fd6af9f96 100644 --- a/db/rpc_server/rpc.src +++ b/db/rpc_server/rpc.src @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: rpc.src,v 1.85 2003/06/30 17:54:49 sue Exp $ +# $Id: rpc.src,v 1.90 2004/07/15 19:54:11 sue Exp $ # # Syntax: # BEGIN function_name {CODE | RETCODE | NOFUNC} @@ -211,12 +211,20 @@ BEGIN set_lk_max_objects NOFUNC ARG ID DB_ENV * dbenv ARG INT u_int32_t max END -BEGIN get_mp_maxwrite NOFUNC +BEGIN get_mp_max_openfd NOFUNC +ARG ID DB_ENV * dbenv +ARET INT int nopen +END +BEGIN set_mp_max_openfd NOFUNC +ARG ID DB_ENV * dbenv +ARG INT int nopen +END +BEGIN get_mp_max_write NOFUNC ARG ID DB_ENV * dbenv ARET INT int nwrite ARET INT int nsleep END -BEGIN set_mp_maxwrite NOFUNC +BEGIN set_mp_max_write NOFUNC ARG ID DB_ENV * dbenv ARG INT int nwrite ARG INT int nsleep @@ -371,9 +379,11 @@ END BEGIN rep_elect NOFUNC ARG ID DB_ENV * dbenv ARG INT int nsites +ARG INT int nvotes ARG INT int pri ARG INT u_int32_t timeout ARG IGNORE int * idp +ARG INT u_int32_t flags END BEGIN rep_flush NOFUNC ARG ID DB_ENV * dbenv @@ -604,7 +614,6 @@ ARG INT u_int32_t flags ARG INT int mode RET ID long db RET INT DBTYPE type -RET INT u_int32_t dbflags RET INT int lorder END BEGIN db_get_pagesize CODE @@ -686,6 +695,7 @@ ARG INT u_int32_t flags END BEGIN db_stat RETCODE ARG ID DB * dbp +ARG ID DB_TXN * txnp ARG IGNORE void * sp ARG INT u_int32_t flags RET LIST u_int32_t * stats INT diff --git a/db/sequence/seq_stat.c b/db/sequence/seq_stat.c new file mode 100644 index 000000000..af38c1a55 --- /dev/null +++ b/db/sequence/seq_stat.c @@ -0,0 +1,244 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * $Id: seq_stat.c,v 1.19 2004/09/28 17:28:15 bostic Exp $ + */ + +#include "db_config.h" + +#ifdef HAVE_SEQUENCE +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc_auto/sequence_ext.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" + +#ifdef HAVE_STATISTICS +static int __seq_print_all __P((DB_SEQUENCE *, u_int32_t)); +static int __seq_print_stats __P((DB_SEQUENCE *, u_int32_t)); + +/* + * __seq_stat -- + * Get statistics from the sequence. + * + * PUBLIC: int __seq_stat __P((DB_SEQUENCE *, DB_SEQUENCE_STAT **, u_int32_t)); + */ +int +__seq_stat(seq, spp, flags) + DB_SEQUENCE *seq; + DB_SEQUENCE_STAT **spp; + u_int32_t flags; +{ + DB *dbp; + DB_ENV *dbenv; + DB_SEQ_RECORD record; + DB_SEQUENCE_STAT *sp; + DBT data; + int ret; + + dbp = seq->seq_dbp; + dbenv = dbp->dbenv; + switch (flags) { + case DB_STAT_CLEAR: + case DB_STAT_ALL: + case 0: + break; + default: + return (__db_ferr(dbenv, "DB_SEQUENCE->stat", 0)); + } + + /* Allocate and clear the structure. */ + if ((ret = __os_umalloc(dbenv, sizeof(*sp), &sp)) != 0) + return (ret); + memset(sp, 0, sizeof(*sp)); + + if (seq->seq_mutexp != NULL) { + sp->st_wait = seq->seq_mutexp->mutex_set_wait; + sp->st_nowait = seq->seq_mutexp->mutex_set_nowait; + + if (LF_ISSET(DB_STAT_CLEAR)) + MUTEX_CLEAR(seq->seq_mutexp); + } + memset(&data, 0, sizeof(data)); + data.data = &record; + data.ulen = sizeof(record); + data.flags = DB_DBT_USERMEM; +retry: if ((ret = dbp->get(dbp, NULL, &seq->seq_key, &data, 0)) != 0) { + if (ret == DB_BUFFER_SMALL && + data.size > sizeof(seq->seq_record)) { + if ((ret = __os_malloc(dbenv, + data.size, &data.data)) != 0) + return (ret); + data.ulen = data.size; + goto retry; + } + return (ret); + } + + if (data.data != &record) + memcpy(&record, data.data, sizeof(record)); + sp->st_current = record.seq_value; + sp->st_value = seq->seq_record.seq_value; + sp->st_last_value = seq->seq_last_value; + sp->st_min = seq->seq_record.seq_min; + sp->st_max = seq->seq_record.seq_max; + sp->st_cache_size = seq->seq_cache_size; + sp->st_flags = seq->seq_record.flags; + + *spp = sp; + if (data.data != &record) + __os_free(dbenv, data.data); + return (0); +} + +/* + * __seq_stat_print -- + * Print statistics from the sequence. + * + * PUBLIC: int __seq_stat_print __P((DB_SEQUENCE *, u_int32_t)); + */ +int +__seq_stat_print(seq, flags) + DB_SEQUENCE *seq; + u_int32_t flags; +{ + int ret; + + if ((ret = __seq_print_stats(seq, flags)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL) && + (ret = __seq_print_all(seq, flags)) != 0) + return (ret); + + return (0); + +} + +static const FN __db_seq_flags_fn[] = { + { DB_SEQ_DEC, "decrement" }, + { DB_SEQ_INC, "increment" }, + { DB_SEQ_RANGE_SET, "range set (internal)" }, + { DB_SEQ_WRAP, "wraparound at end" }, + { 0, NULL } +}; + +/* + * __db_get_seq_flags_fn -- + * Return the __db_seq_flags_fn array. + * + * PUBLIC: const FN * __db_get_seq_flags_fn __P((void)); + */ +const FN * +__db_get_seq_flags_fn() +{ + return (__db_seq_flags_fn); +} + +/* + * __seq_print_stats -- + * Display sequence stat structure. + */ +static int +__seq_print_stats(seq, flags) + DB_SEQUENCE *seq; + u_int32_t flags; +{ + DB_ENV *dbenv; + DB_SEQUENCE_STAT *sp; + int ret; + + dbenv = seq->seq_dbp->dbenv; + + if ((ret = __seq_stat(seq, &sp, flags)) != 0) + return (ret); + __db_dl_pct(dbenv, + "The number of sequence locks that required waiting", + (u_long)sp->st_wait, + DB_PCT(sp->st_wait, sp->st_wait + sp->st_nowait), NULL); + STAT_FMT("The current sequence value", + INT64_FMT, int64_t, sp->st_current); + STAT_FMT("The cached sequence value", + INT64_FMT, int64_t, sp->st_value); + STAT_FMT("The last cached sequence value", + INT64_FMT, int64_t, sp->st_last_value); + STAT_FMT("The minimum sequence value", + INT64_FMT, int64_t, sp->st_value); + STAT_FMT("The maximum sequence value", + INT64_FMT, int64_t, sp->st_value); + STAT_ULONG("The cache size", sp->st_cache_size); + __db_prflags(dbenv, NULL, + sp->st_flags, __db_seq_flags_fn, NULL, "\tSequence flags"); + __os_ufree(seq->seq_dbp->dbenv, sp); + return (0); +} + +/* + * __seq_print_all -- + * Display sequence debugging information - none for now. + * (The name seems a bit strange, no?) + */ +static int +__seq_print_all(seq, flags) + DB_SEQUENCE *seq; + u_int32_t flags; +{ + COMPQUIET(seq, NULL); + COMPQUIET(flags, 0); + return (0); +} + +#else /* !HAVE_STATISTICS */ + +int +__seq_stat(seq, statp, flags) + DB_SEQUENCE *seq; + DB_SEQUENCE_STAT **statp; + u_int32_t flags; +{ + COMPQUIET(statp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(seq->seq_dbp->dbenv)); +} + +int +__seq_stat_print(seq, flags) + DB_SEQUENCE *seq; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(seq->seq_dbp->dbenv)); +} + +/* + * __db_get_seq_flags_fn -- + * Return the __db_seq_flags_fn array. + * + * PUBLIC: const FN * __db_get_seq_flags_fn __P((void)); + */ +const FN * +__db_get_seq_flags_fn() +{ + static const FN __db_seq_flags_fn[] = { + { 0, NULL } + }; + + /* + * !!! + * The Tcl API uses this interface, stub it off. + */ + return (__db_seq_flags_fn); +} +#endif /* !HAVE_STATISTICS */ +#endif /* HAVE_SEQUENCE */ diff --git a/db/sequence/sequence.c b/db/sequence/sequence.c new file mode 100644 index 000000000..46c39fb20 --- /dev/null +++ b/db/sequence/sequence.c @@ -0,0 +1,635 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * $Id: sequence.c,v 1.24 2004/10/12 23:54:14 ubell Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_shash.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" +#include "dbinc/mp.h" +#include "dbinc_auto/sequence_ext.h" + +#ifdef HAVE_SEQUENCE +#define SEQ_ILLEGAL_AFTER_OPEN(seq, name) \ + if (seq->seq_key.data != NULL) \ + return (__db_mi_open((seq)->seq_dbp->dbenv, name, 1)); + +#define SEQ_ILLEGAL_BEFORE_OPEN(seq, name) \ + if (seq->seq_key.data == NULL) \ + return (__db_mi_open((seq)->seq_dbp->dbenv, name, 0)); + +static int __seq_close __P((DB_SEQUENCE *, u_int32_t)); +static int __seq_get __P((DB_SEQUENCE *, + DB_TXN *, int32_t, db_seq_t *, u_int32_t)); +static int __seq_get_cachesize __P((DB_SEQUENCE *, int32_t *)); +static int __seq_get_flags __P((DB_SEQUENCE *, u_int32_t *)); +static int __seq_get_key __P((DB_SEQUENCE *, DBT *)); +static int __seq_get_range __P((DB_SEQUENCE *, db_seq_t *, db_seq_t *)); +static int __seq_set_range __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); +static int __seq_get_db __P((DB_SEQUENCE *, DB **)); +static int __seq_initial_value __P((DB_SEQUENCE *, db_seq_t)); +static int __seq_open __P((DB_SEQUENCE *, DB_TXN *, DBT *, u_int32_t)); +static int __seq_remove __P((DB_SEQUENCE *, DB_TXN *, u_int32_t)); +static int __seq_set_cachesize __P((DB_SEQUENCE *, int32_t)); +static int __seq_set_flags __P((DB_SEQUENCE *, u_int32_t)); +static int __seq_update __P((DB_SEQUENCE *, DB_TXN *, int32_t, u_int32_t)); + +/* + * db_sequence_create -- + * DB_SEQUENCE constructor. + * + * EXTERN: int db_sequence_create __P((DB_SEQUENCE **, DB *, u_int32_t)); + */ +int +db_sequence_create(seqp, dbp, flags) + DB_SEQUENCE **seqp; + DB *dbp; + u_int32_t flags; +{ + DB_ENV *dbenv; + DB_SEQUENCE *seq; + int ret; + + dbenv = dbp->dbenv; + + /* Check for invalid function flags. */ + switch (flags) { + case 0: + break; + default: + return (__db_ferr(dbenv, "db_sequence_create", 0)); + } + + DB_ILLEGAL_BEFORE_OPEN(dbp, "db_sequence_create"); + + /* Allocate the sequence. */ + if ((ret = __os_calloc(dbenv, 1, sizeof(*seq), &seq)) != 0) + return (ret); + + seq->seq_dbp = dbp; + seq->close = __seq_close; + seq->get = __seq_get; + seq->get_cachesize = __seq_get_cachesize; + seq->set_cachesize = __seq_set_cachesize; + seq->get_db = __seq_get_db; + seq->get_flags = __seq_get_flags; + seq->get_key = __seq_get_key; + seq->get_range = __seq_get_range; + seq->initial_value = __seq_initial_value; + seq->open = __seq_open; + seq->remove = __seq_remove; + seq->set_flags = __seq_set_flags; + seq->set_range = __seq_set_range; + seq->stat = __seq_stat; + seq->stat_print = __seq_stat_print; + seq->seq_rp = &seq->seq_record; + *seqp = seq; + + return (0); +} + +/* + * __seq_open -- + * DB_SEQUENCE->open method. + * + */ +static int +__seq_open(seq, txn, keyp, flags) + DB_SEQUENCE *seq; + DB_TXN *txn; + DBT *keyp; + u_int32_t flags; +{ + DB *dbp; + DB_ENV *dbenv; + DB_MPOOL *dbmp; + DB_SEQ_RECORD *rp; + u_int32_t pflags; + int ret; +#define SEQ_OPEN_FLAGS (DB_AUTO_COMMIT | DB_CREATE | DB_EXCL | DB_THREAD) + + dbp = seq->seq_dbp; + dbenv = dbp->dbenv; + + SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->open"); + if (keyp->size == 0) { + __db_err(dbenv, "Zero length sequence key specified"); + return (EINVAL); + } + + if (LF_ISSET(~SEQ_OPEN_FLAGS)) + return (__db_ferr(dbenv, "DB_SEQUENCE->open", 0)); + + if (LF_ISSET(DB_THREAD)) { + dbmp = dbenv->mp_handle; + if ((ret = __db_mutex_setup(dbenv, dbmp->reginfo, + &seq->seq_mutexp, MUTEX_ALLOC | MUTEX_THREAD)) != 0) + return (ret); + } + + memset(&seq->seq_data, 0, sizeof(DBT)); + seq->seq_data.data = &seq->seq_record; + seq->seq_data.size = seq->seq_data.ulen = sizeof(seq->seq_record); + seq->seq_data.flags = DB_DBT_USERMEM; + + memset(&seq->seq_key, 0, sizeof(DBT)); + if ((ret = __os_malloc(dbenv, keyp->size, &seq->seq_key.data)) != 0) + return (ret); + memcpy(seq->seq_key.data, keyp->data, keyp->size); + seq->seq_key.size = seq->seq_key.ulen = keyp->size; + seq->seq_key.flags = DB_DBT_USERMEM; + + seq->seq_rp = rp = &seq->seq_record; + +retry: if ((ret = dbp->get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { + if (ret == DB_BUFFER_SMALL && + seq->seq_data.size > sizeof(seq->seq_record)) { + if ((ret = __os_malloc(dbenv, + seq->seq_data.size, &seq->seq_data.data)) != 0) + goto err; + seq->seq_data.ulen = seq->seq_data.size; + rp = seq->seq_rp = seq->seq_data.data; + goto retry; + } + if ((ret != DB_NOTFOUND && ret != DB_KEYEMPTY) || + !LF_ISSET(DB_CREATE)) + goto err; + + ret = 0; + pflags = DB_NOOVERWRITE; + pflags |= LF_ISSET(DB_AUTO_COMMIT); + if (!F_ISSET(rp, DB_SEQ_RANGE_SET)) { + rp->seq_max = INT64_MAX; + rp->seq_min = INT64_MIN; + } + /* INC is the default. */ + if (!F_ISSET(rp, DB_SEQ_DEC)) + F_SET(rp, DB_SEQ_INC); + + rp->seq_version = DB_SEQUENCE_VERSION; + + if (rp->seq_value > rp->seq_max || + rp->seq_value < rp->seq_min) { + __db_err(dbenv, "Sequence value out of range"); + ret = EINVAL; + goto err; + } else if ((ret = dbp->put(dbp, txn, + &seq->seq_key, &seq->seq_data, pflags)) != 0) { + __db_err(dbenv, "Sequence create failed"); + goto err; + } + } else if (LF_ISSET(DB_CREATE) && LF_ISSET(DB_EXCL)) { + ret = EEXIST; + goto err; + } else if (seq->seq_data.size < sizeof(seq->seq_record)) { + __db_err(dbenv, "Bad sequence record format"); + ret = EINVAL; + goto err; + } + + seq->seq_last_value = rp->seq_value; + if (F_ISSET(rp, DB_SEQ_INC)) + seq->seq_last_value--; + else + seq->seq_last_value++; + +err: if (ret != 0) { + __os_free(dbenv, seq->seq_key.data); + seq->seq_key.data = NULL; + } + return (ret); +} + +/* + * __seq_get_cachesize -- + * Accessor for value passed into DB_SEQUENCE->set_cachesize call. + * + */ +static int +__seq_get_cachesize(seq, cachesize) + DB_SEQUENCE *seq; + int32_t *cachesize; +{ + SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get_cachesize"); + + *cachesize = seq->seq_cache_size; + return (0); +} + +/* + * __seq_set_cachesize -- + * DB_SEQUENCE->set_cachesize. + * + */ +static int +__seq_set_cachesize(seq, cachesize) + DB_SEQUENCE *seq; + int32_t cachesize; +{ + if (cachesize < 0) { + __db_err(seq->seq_dbp->dbenv, + "Illegal cache size: %d", cachesize); + return (EINVAL); + } + seq->seq_cache_size = cachesize; + return (0); +} + +#define SEQ_SET_FLAGS (DB_SEQ_WRAP | DB_SEQ_INC | DB_SEQ_DEC) +/* + * __seq_get_flags -- + * Accessor for flags passed into DB_SEQUENCE->open call + * + */ +static int +__seq_get_flags(seq, flagsp) + DB_SEQUENCE *seq; + u_int32_t *flagsp; +{ + SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get_flags"); + + *flagsp = F_ISSET(seq->seq_rp, SEQ_SET_FLAGS); + return (0); +} + +/* + * __seq_set_flags -- + * DB_SEQUENCE->set_flags. + * + */ +static int +__seq_set_flags(seq, flags) + DB_SEQUENCE *seq; + u_int32_t flags; +{ + DB_ENV *dbenv; + DB_SEQ_RECORD *rp; + int ret; + + dbenv = seq->seq_dbp->dbenv; + rp = seq->seq_rp; + SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->set_flags"); + + if (LF_ISSET(~SEQ_SET_FLAGS)) + return (__db_ferr(dbenv, "DB_SEQUENCE->set_flags", 0)); + + if ((ret = __db_fcchk(dbenv, + "DB_SEQUENCE->set_flags", flags, DB_SEQ_DEC, DB_SEQ_INC)) != 0) + return (ret); + + if (LF_ISSET(DB_SEQ_DEC | DB_SEQ_INC)) + F_CLR(rp, DB_SEQ_DEC | DB_SEQ_INC); + F_SET(rp, flags); + + return (0); +} + +/* + * __seq_initial_value -- + * DB_SEQUENCE->init_value. + * + */ +static int +__seq_initial_value(seq, value) + DB_SEQUENCE *seq; + db_seq_t value; +{ + DB_ENV *dbenv; + DB_SEQ_RECORD *rp; + + dbenv = seq->seq_dbp->dbenv; + SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->init_value"); + + rp = seq->seq_rp; + if (F_ISSET(rp, DB_SEQ_RANGE_SET) && + (value > rp->seq_max || value < rp->seq_min)) { + __db_err(dbenv, "Sequence value out of range"); + return (EINVAL); + } + + rp->seq_value = value; + + return (0); +} + +/* + * __seq_get_range -- + * Accessor for range passed into DB_SEQUENCE->set_range call + * + */ +static int +__seq_get_range(seq, minp, maxp) + DB_SEQUENCE *seq; + db_seq_t *minp, *maxp; +{ + SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get_range"); + + F_SET(seq->seq_rp, DB_SEQ_RANGE_SET); + *minp = seq->seq_record.seq_min; + *maxp = seq->seq_record.seq_max; + return (0); +} + +/* + * __seq_set_range -- + * SEQUENCE->set_range. + * + */ +static int +__seq_set_range(seq, min, max) + DB_SEQUENCE *seq; + db_seq_t min, max; +{ + DB_ENV *dbenv; + + dbenv = seq->seq_dbp->dbenv; + SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->set_range"); + + if (min >= max) { + __db_err(dbenv, "Illegal sequence range"); + return (EINVAL); + } + + seq->seq_record.seq_min = min; + seq->seq_record.seq_max = max; + F_SET(seq->seq_rp, DB_SEQ_RANGE_SET); + + return (0); +} + +static int +__seq_update(seq, txn, delta, flags) + DB_SEQUENCE *seq; + DB_TXN *txn; + int32_t delta; + u_int32_t flags; +{ + DB *dbp; + DB_ENV *dbenv; + DB_SEQ_RECORD *rp; + int32_t adjust; + int ret; + + dbp = seq->seq_dbp; + dbenv = dbp->dbenv; + rp = seq->seq_rp; + + if (LF_ISSET(DB_AUTO_COMMIT) && + (ret = __db_txn_auto_init(dbenv, &txn)) != 0) + return (ret); +retry: + if ((ret = dbp->get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { + if (ret == DB_BUFFER_SMALL && + seq->seq_data.size > sizeof(seq->seq_record)) { + if ((ret = __os_malloc(dbenv, + seq->seq_data.size, &seq->seq_data.data)) != 0) + goto err; + seq->seq_data.ulen = seq->seq_data.size; + rp = seq->seq_rp = seq->seq_data.data; + goto retry; + } + goto err; + } + + if (seq->seq_data.size < sizeof(seq->seq_record)) { + __db_err(dbenv, "Bad sequence record format"); + ret = EINVAL; + goto err; + } + + adjust = delta > seq->seq_cache_size ? delta : seq->seq_cache_size; + + /* + * Check whether this operation will cause the sequence to wrap. + * + * The sequence minimum and maximum values can be INT64_MIN and + * INT64_MAX, so we need to do the test carefully to cope with + * arithmetic overflow. That means we need to check whether the value + * is in a range, we can't get away with a single comparison. + * + * For example, if seq_value == -1 and seq_max == INT64_MAX, the first + * test below will be true, since -1 - (INT64_MAX + 1) == INT64_MAX. + * The second part of the test makes sure that seq_value is close + * enough to the maximum to really cause wrapping. + */ + if (F_ISSET(rp, DB_SEQ_INC)) { + if (rp->seq_value - ((rp->seq_max - adjust) + 2) >= 0 && + (rp->seq_max + 1) - rp->seq_value >= 0) { + if (F_ISSET(rp, DB_SEQ_WRAP)) + rp->seq_value = rp->seq_min; + else { +overflow: __db_err(dbenv, "Sequence overflow"); + ret = EINVAL; + goto err; + } + } + } else { + if (rp->seq_value - (rp->seq_min - 1) >= 0 && + (rp->seq_min + adjust - 2) - rp->seq_value >= 0) { + if (F_ISSET(rp, DB_SEQ_WRAP)) + rp->seq_value = rp->seq_max; + else + goto overflow; + } + adjust = -adjust; + } + + rp->seq_value += adjust; + ret = dbp->put(dbp, txn, &seq->seq_key, &seq->seq_data, 0); + rp->seq_value -= adjust; + if (ret != 0) { + __db_err(dbenv, "Sequence update failed"); + goto err; + } + seq->seq_last_value = rp->seq_value + adjust; + if (F_ISSET(rp, DB_SEQ_INC)) + seq->seq_last_value--; + else + seq->seq_last_value++; + +err: if (LF_ISSET(DB_AUTO_COMMIT)) + ret = __db_txn_auto_resolve(dbenv, + txn, LF_ISSET(DB_TXN_NOSYNC), ret); + return (ret); + +} + +static int +__seq_get(seq, txn, delta, retp, flags) + DB_SEQUENCE *seq; + DB_TXN *txn; + int32_t delta; + db_seq_t *retp; + u_int32_t flags; +{ + DB *dbp; + DB_ENV *dbenv; + DB_SEQ_RECORD *rp; + int ret; + + dbp = seq->seq_dbp; + dbenv = dbp->dbenv; + rp = seq->seq_rp; + ret = 0; + + SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get"); + + if (delta <= 0) { + __db_err(dbenv, "Sequence delta must be greater than 0"); + return (EINVAL); + } + MUTEX_THREAD_LOCK(dbenv, seq->seq_mutexp); + + if (rp->seq_min + delta > rp->seq_max) { + __db_err(dbenv, "Sequence overflow"); + ret = EINVAL; + goto err; + } + + if (F_ISSET(rp, DB_SEQ_INC)) { + if (seq->seq_last_value + 1 - rp->seq_value < delta && + (ret = __seq_update(seq, txn, delta, flags)) != 0) + goto err; + + /* _update may change seq->seq_rp. */ + rp = seq->seq_rp; + *retp = rp->seq_value; + rp->seq_value += delta; + } else { + if ((rp->seq_value - seq->seq_last_value) + 1 < delta && + (ret = __seq_update(seq, txn, delta, flags)) != 0) + goto err; + + rp = seq->seq_rp; + *retp = rp->seq_value; + rp->seq_value -= delta; + } + +err: MUTEX_THREAD_UNLOCK(dbenv, seq->seq_mutexp); + + return (ret); +} + +/* + * __seq_get_db -- + * Accessor for dbp passed into DB_SEQUENCE->open call + * + */ +static int +__seq_get_db(seq, dbpp) + DB_SEQUENCE *seq; + DB **dbpp; +{ + SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get_db"); + + *dbpp = seq->seq_dbp; + return (0); +} + +/* + * __seq_get_key -- + * Accessor for key passed into DB_SEQUENCE->open call + * + */ +static int +__seq_get_key(seq, key) + DB_SEQUENCE *seq; + DBT *key; +{ + SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get_key"); + + key->data = seq->seq_key.data; + key->size = key->ulen = seq->seq_key.size; + key->flags = seq->seq_key.flags; + return (0); +} + +/* + * __seq_close -- + * Close a sequence + * + */ +static int +__seq_close(seq, flags) + DB_SEQUENCE *seq; + u_int32_t flags; +{ + DB_ENV *dbenv; + int ret; + + ret = 0; + dbenv = seq->seq_dbp->dbenv; + + if (flags != 0) + ret = __db_ferr(dbenv, "DB_SEQUENCE->close", 0); + if (seq->seq_key.data != NULL) + __os_free(dbenv, seq->seq_key.data); + if (seq->seq_data.data != &seq->seq_record) + __os_free(dbenv, seq->seq_data.data); + seq->seq_key.data = NULL; + memset(seq, CLEAR_BYTE, sizeof(*seq)); + __os_free(dbenv, seq); + return (ret); +} + +/* + * __seq_remove -- + * Remove a sequence from the database. + */ +static int +__seq_remove(seq, txn, flags) + DB_SEQUENCE *seq; + DB_TXN *txn; + u_int32_t flags; +{ + DB *dbp; + DB_ENV *dbenv; + int ret, t_ret; + + dbp = seq->seq_dbp; + dbenv = dbp->dbenv; + + SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->remove"); + + if (LF_ISSET(DB_AUTO_COMMIT) && + (ret = __db_txn_auto_init(dbenv, &txn)) != 0) + goto err; + + ret = dbp->del(dbp, txn, &seq->seq_key, 0); + + if (LF_ISSET(DB_AUTO_COMMIT)) + ret = __db_txn_auto_resolve(dbenv, + txn, LF_ISSET(DB_TXN_NOSYNC), ret); + +err: if ((t_ret = __seq_close(seq, 0)) != 0 && ret == 0) + ret = t_ret; + return (ret); +} + +#else /* !HAVE_SEQUENCE */ + +int +db_sequence_create(seqp, dbp, flags) + DB_SEQUENCE **seqp; + DB *dbp; + u_int32_t flags; +{ + __db_err(dbp->dbenv, + "library build did not include support for sequences"); + return (DB_OPNOTSUP); +} +#endif /* HAVE_SEQUENCE */ diff --git a/db/tcl/docs/db.html b/db/tcl/docs/db.html index 8ef5e032a..db8382b75 100644 --- a/db/tcl/docs/db.html +++ b/db/tcl/docs/db.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/env.html b/db/tcl/docs/env.html index 6ac534dba..3203a02b8 100644 --- a/db/tcl/docs/env.html +++ b/db/tcl/docs/env.html @@ -1,4 +1,4 @@ - + @@ -273,10 +273,6 @@ subsystem to control, and indicates whether debug messages should be turned or off for that subsystem.  The value of which must be one of the following:
      -
    • -chkpt - Chooses the checkpointing code by using the DB_VERB_CHKPOINT -value
    • -
    • deadlock - Chooses the deadlocking code by using the DB_VERB_DEADLOCK value
    • diff --git a/db/tcl/docs/historic.html b/db/tcl/docs/historic.html index cbea28b20..f5a43e14d 100644 --- a/db/tcl/docs/historic.html +++ b/db/tcl/docs/historic.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/index.html b/db/tcl/docs/index.html index 2b4598b55..4f4e1e90c 100644 --- a/db/tcl/docs/index.html +++ b/db/tcl/docs/index.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/library.html b/db/tcl/docs/library.html index eb313b042..217213ed8 100644 --- a/db/tcl/docs/library.html +++ b/db/tcl/docs/library.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/lock.html b/db/tcl/docs/lock.html index 4e9a13a3f..75e0bb2de 100644 --- a/db/tcl/docs/lock.html +++ b/db/tcl/docs/lock.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/log.html b/db/tcl/docs/log.html index de9edb838..5fdd132d5 100644 --- a/db/tcl/docs/log.html +++ b/db/tcl/docs/log.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/mpool.html b/db/tcl/docs/mpool.html index 84bb7beaa..83c1f452c 100644 --- a/db/tcl/docs/mpool.html +++ b/db/tcl/docs/mpool.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/rep.html b/db/tcl/docs/rep.html index 47686d3d5..d50b62375 100644 --- a/db/tcl/docs/rep.html +++ b/db/tcl/docs/rep.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/sequence.html b/db/tcl/docs/sequence.html new file mode 100644 index 000000000..a0b3df068 --- /dev/null +++ b/db/tcl/docs/sequence.html @@ -0,0 +1,92 @@ + + + + + Sequence Commands + + +

      Sequence Commands

      +> berkdb sequence [-auto_commit] [-txn txnid] [-create]
      +
       Implements DBENV->sequence +function. The above options have the usual meanings.
      +
      +[-cachesize]
      +
      Set the size of the cache in this +handle.
      +
      +[-inc]
      +
      +
      Sequence increments..
      +
      +[-dec]
      +
      +
      Sequence decrements.
      +
      +[-init integer]
      +
      +
      Set the initial value for sequence.
      +
      +[-max integer]
      +
      Set the maximum value for the sequence.
      +
      +[-max integer]
      +
      +
      Set the minimum value for the sequence.
      +
      +[-wrap]
      +
      Wrap around at max or min.
      +
      +db +key
      +
      +
      Database handle and key of sequence.
      +
      +
      > seq get [-txn txn] +[-auto_commit] [-nosync] delta
      +
      +
      Get the nexted sequence value and +increment the sequence by delta.
      +
      +
      > seq close
      +
      Close the sequence
      +
      +
      +
      > seq remove [-auto_commit] [-nosync] +[-txn]
      +
      +
      Remove the sequence.
      +
      +
      > seq get_cachesize
      +
      +
      Return the size of the cache.
      +
      +
      > seq get_db
      +
      +
      Return the underlying db handle.
      +
      +
      > seq get_flags
      +
      Return the flags set on create.
      +
      +
      > seq get_range
      +
      +
      Return the min and max set at create.
      +
      +
      > seq stat
      +
      +
      Implements the SEQUENCE->stat function.
      +
      +
      + + diff --git a/db/tcl/docs/test.html b/db/tcl/docs/test.html index cfaaf0de3..a01140183 100644 --- a/db/tcl/docs/test.html +++ b/db/tcl/docs/test.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/docs/txn.html b/db/tcl/docs/txn.html index 0a6b6f7dd..8abef4b31 100644 --- a/db/tcl/docs/txn.html +++ b/db/tcl/docs/txn.html @@ -1,4 +1,4 @@ - + diff --git a/db/tcl/tcl_compat.c b/db/tcl/tcl_compat.c index 8a25c618b..8b518f761 100644 --- a/db/tcl/tcl_compat.c +++ b/db/tcl/tcl_compat.c @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_compat.c,v 11.46 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_compat.c,v 11.41 2003/03/10 18:22:09 bostic Exp $"; -#endif /* not lint */ - -#if CONFIG_TEST +#ifdef CONFIG_TEST #ifndef NO_SYSTEM_INCLUDES #include @@ -87,8 +85,9 @@ bdb_HCommand(interp, objc, objv) result = Tcl_GetIntFromObj(interp, objv[2], &nelem); if (result == TCL_OK) { _debug_check(); - ret = hcreate(nelem) == 0 ? 1: 0; - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "hcreate"); + ret = hcreate((size_t)nelem) == 0 ? 1: 0; + (void)_ReturnSetup( + interp, ret, DB_RETOK_STD(ret), "hcreate"); } break; case HHSEARCH: @@ -133,7 +132,7 @@ bdb_HCommand(interp, objc, objv) return (TCL_ERROR); } _debug_check(); - (void)hdestroy(); + hdestroy(); res = Tcl_NewIntObj(0); break; } @@ -178,14 +177,11 @@ bdb_NdbmOpen(interp, objc, objv, dbpp) NDB_ENDARG }; - u_int32_t open_flags; - int endarg, i, mode, optindex, read_only, result, ret; + int endarg, i, mode, open_flags, optindex, read_only, result, ret; char *arg, *db; result = TCL_OK; - open_flags = 0; - endarg = mode = 0; - read_only = 0; + endarg = mode = open_flags = read_only = 0; if (objc < 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args?"); @@ -237,7 +233,7 @@ bdb_NdbmOpen(interp, objc, objv, dbpp) case NDB_ENDARG: endarg = 1; break; - } /* switch */ + } /* * If, at any time, parsing the args we get an error, @@ -341,6 +337,8 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) result = TCL_OK; freekey = freedata = 0; + dtmp = ktmp = NULL; + /* * Get the command name index from the object based on the cmds * defined above. This SHOULD NOT fail because we already checked @@ -367,7 +365,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) "Bad interface flag for command", TCL_STATIC); return (TCL_ERROR); } - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbmclose"); + (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbmclose"); break; case DBMINIT: /* @@ -385,7 +383,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) TCL_STATIC); return (TCL_ERROR); } - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbminit"); + (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbminit"); break; case DBMFETCH: /* @@ -401,7 +399,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) DB_RETOK_STD(ret), "dbm fetch"); goto out; } - key.dsize = size; + key.dsize = (int)size; key.dptr = (char *)ktmp; _debug_check(); if (flag == DBTCL_DBM) @@ -415,10 +413,10 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) goto out; } if (data.dptr == NULL || - (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0) + (ret = __os_malloc(NULL, (size_t)data.dsize + 1, &t)) != 0) Tcl_SetResult(interp, "-1", TCL_STATIC); else { - memcpy(t, data.dptr, data.dsize); + memcpy(t, data.dptr, (size_t)data.dsize); t[data.dsize] = '\0'; Tcl_SetResult(interp, t, TCL_VOLATILE); __os_free(NULL, t); @@ -442,7 +440,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) DB_RETOK_STD(ret), "dbm fetch"); goto out; } - key.dsize = size; + key.dsize = (int)size; key.dptr = (char *)ktmp; if ((ret = _CopyObjBytes( interp, objv[3], &dtmp, &size, &freedata)) != 0) { @@ -450,7 +448,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) DB_RETOK_STD(ret), "dbm fetch"); goto out; } - data.dsize = size; + data.dsize = (int)size; data.dptr = (char *)dtmp; _debug_check(); if (flag == DBTCL_DBM) @@ -473,7 +471,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) "Bad interface flag for command", TCL_STATIC); return (TCL_ERROR); } - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "store"); + (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "store"); break; case DBMDELETE: /* @@ -489,7 +487,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) DB_RETOK_STD(ret), "dbm fetch"); goto out; } - key.dsize = size; + key.dsize = (int)size; key.dptr = (char *)ktmp; _debug_check(); if (flag == DBTCL_DBM) @@ -501,7 +499,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) "Bad interface flag for command", TCL_STATIC); return (TCL_ERROR); } - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "delete"); + (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "delete"); break; case DBMFIRST: /* @@ -522,10 +520,10 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) return (TCL_ERROR); } if (key.dptr == NULL || - (ret = __os_malloc(NULL, key.dsize + 1, &t)) != 0) + (ret = __os_malloc(NULL, (size_t)key.dsize + 1, &t)) != 0) Tcl_SetResult(interp, "-1", TCL_STATIC); else { - memcpy(t, key.dptr, key.dsize); + memcpy(t, key.dptr, (size_t)key.dsize); t[key.dsize] = '\0'; Tcl_SetResult(interp, t, TCL_VOLATILE); __os_free(NULL, t); @@ -547,7 +545,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) DB_RETOK_STD(ret), "dbm fetch"); goto out; } - key.dsize = size; + key.dsize = (int)size; key.dptr = (char *)ktmp; data = nextkey(key); } else if (flag == DBTCL_NDBM) { @@ -562,21 +560,21 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm) return (TCL_ERROR); } if (data.dptr == NULL || - (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0) + (ret = __os_malloc(NULL, (size_t)data.dsize + 1, &t)) != 0) Tcl_SetResult(interp, "-1", TCL_STATIC); else { - memcpy(t, data.dptr, data.dsize); + memcpy(t, data.dptr, (size_t)data.dsize); t[data.dsize] = '\0'; Tcl_SetResult(interp, t, TCL_VOLATILE); __os_free(NULL, t); } break; } -out: - if (freedata) - (void)__os_free(NULL, dtmp); - if (freekey) - (void)__os_free(NULL, ktmp); + +out: if (dtmp != NULL && freedata) + __os_free(NULL, dtmp); + if (ktmp != NULL && freekey) + __os_free(NULL, ktmp); return (result); } @@ -677,8 +675,8 @@ ndbm_Cmd(clientData, interp, objc, objv) _debug_check(); ret = dbm_clearerr(dbp); if (ret) - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "clearerr"); + (void)_ReturnSetup( + interp, ret, DB_RETOK_STD(ret), "clearerr"); else res = Tcl_NewIntObj(ret); break; @@ -731,14 +729,16 @@ ndbm_Cmd(clientData, interp, objc, objv) _debug_check(); ret = dbm_rdonly(dbp); if (ret) - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "rdonly"); + (void)_ReturnSetup( + interp, ret, DB_RETOK_STD(ret), "rdonly"); else res = Tcl_NewIntObj(ret); break; } + /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. + * Only set result if we have a res. Otherwise, lower functions have + * already done so. */ if (result == TCL_OK && res) Tcl_SetObjResult(interp, res); diff --git a/db/tcl/tcl_db.c b/db/tcl/tcl_db.c index c5270e656..f60be3f43 100644 --- a/db/tcl/tcl_db.c +++ b/db/tcl/tcl_db.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_db.c,v 11.145 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_db.c,v 11.128 2003/11/18 21:36:02 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -33,7 +31,7 @@ static int tcl_DbClose __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *, DBTCL_INFO *)); static int tcl_DbDelete __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); static int tcl_DbGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *, int)); -#if CONFIG_TEST +#ifdef CONFIG_TEST static int tcl_DbKeyRange __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); #endif static int tcl_DbPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); @@ -96,7 +94,7 @@ db_Cmd(clientData, interp, objc, objv) Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *dbcmds[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "keyrange", "pget", "rpcid", @@ -136,7 +134,7 @@ db_Cmd(clientData, interp, objc, objv) NULL }; enum dbcmds { -#if CONFIG_TEST +#ifdef CONFIG_TEST DBKEYRANGE, DBPGET, DBRPCID, @@ -213,7 +211,7 @@ db_Cmd(clientData, interp, objc, objv) res = NULL; switch ((enum dbcmds)cmdindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case DBKEYRANGE: result = tcl_DbKeyRange(interp, objc, objv, dbp); break; @@ -232,7 +230,7 @@ db_Cmd(clientData, interp, objc, objv) * !!! Retrieve the client ID from the dbp handle directly. * This is for testing purposes only. It is dbp-private data. */ - res = Tcl_NewLongObj(dbp->cl_id); + res = Tcl_NewLongObj((long)dbp->cl_id); break; case DBTEST: result = tcl_EnvTest(interp, objc, objv, dbp->dbenv); @@ -279,13 +277,13 @@ db_Cmd(clientData, interp, objc, objv) _debug_check(); ret = dbp->get_type(dbp, &type); if (type == DB_BTREE) - res = Tcl_NewStringObj("btree", strlen("btree")); + res = NewStringObj("btree", strlen("btree")); else if (type == DB_HASH) - res = Tcl_NewStringObj("hash", strlen("hash")); + res = NewStringObj("hash", strlen("hash")); else if (type == DB_RECNO) - res = Tcl_NewStringObj("recno", strlen("recno")); + res = NewStringObj("recno", strlen("recno")); else if (type == DB_QUEUE) - res = Tcl_NewStringObj("queue", strlen("queue")); + res = NewStringObj("queue", strlen("queue")); else { Tcl_SetResult(interp, "db gettype: Returned unknown type\n", TCL_STATIC); @@ -320,11 +318,10 @@ db_Cmd(clientData, interp, objc, objv) if (result == TCL_OK) { dbip->i_dbdbcid++; ip->i_parent = dbip; - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)dbc_Cmd, (ClientData)dbc, NULL); - res = - Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, dbc); } else _DeleteInfo(ip); @@ -343,11 +340,10 @@ db_Cmd(clientData, interp, objc, objv) if (result == TCL_OK) { dbip->i_dbdbcid++; ip->i_parent = dbip; - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)dbc_Cmd, (ClientData)dbc, NULL); - res = - Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, dbc); } else _DeleteInfo(ip); @@ -365,7 +361,7 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_bt_minkey(dbp, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_bt_minkey")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewIntObj((int)value); break; case DBGETCACHESIZE: if (objc != 2) { @@ -375,9 +371,9 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_cachesize(dbp, &gbytes, &bytes, &ncache); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_cachesize")) == TCL_OK) { - myobjv[0] = Tcl_NewIntObj(gbytes); - myobjv[1] = Tcl_NewIntObj(bytes); - myobjv[2] = Tcl_NewIntObj(ncache); + myobjv[0] = Tcl_NewIntObj((int)gbytes); + myobjv[1] = Tcl_NewIntObj((int)bytes); + myobjv[2] = Tcl_NewIntObj((int)ncache); res = Tcl_NewListObj(3, myobjv); } break; @@ -389,9 +385,8 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_dbname(dbp, &filename, &dbname); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_dbname")) == TCL_OK) { - myobjv[0] = Tcl_NewStringObj(filename, - strlen(filename)); - myobjv[1] = Tcl_NewStringObj(dbname, strlen(dbname)); + myobjv[0] = NewStringObj(filename, strlen(filename)); + myobjv[1] = NewStringObj(dbname, strlen(dbname)); res = Tcl_NewListObj(2, myobjv); } break; @@ -403,15 +398,12 @@ db_Cmd(clientData, interp, objc, objv) Tcl_WrongNumArgs(interp, 1, objv, NULL); return (TCL_ERROR); } - ret = dbp->get_env(dbp, &dbenv); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_env")) == TCL_OK) { - if (dbenv != NULL && (ip = _PtrToInfo(dbenv)) != NULL) { - envid = ip->i_name; - res = Tcl_NewStringObj(envid, strlen(envid)); - } else - Tcl_ResetResult(interp); - } + dbenv = dbp->get_env(dbp); + if (dbenv != NULL && (ip = _PtrToInfo(dbenv)) != NULL) { + envid = ip->i_name; + res = NewStringObj(envid, strlen(envid)); + } else + Tcl_ResetResult(interp); break; case DBGETERRPFX: if (objc != 2) { @@ -419,7 +411,7 @@ db_Cmd(clientData, interp, objc, objv) return (TCL_ERROR); } dbp->get_errpfx(dbp, &strval); - res = Tcl_NewStringObj(strval, strlen(strval)); + res = NewStringObj(strval, strlen(strval)); break; case DBGETFLAGS: result = tcl_DbGetFlags(interp, objc, objv, dbp); @@ -432,7 +424,7 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_h_ffactor(dbp, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_h_ffactor")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewIntObj((int)value); break; case DBGETHNELEM: if (objc != 2) { @@ -442,7 +434,7 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_h_nelem(dbp, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_h_nelem")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewIntObj((int)value); break; case DBGETJOIN: result = tcl_DbGetjoin(interp, objc, objv, dbp); @@ -470,7 +462,7 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_pagesize(dbp, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_pagesize")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewIntObj((int)value); break; case DBGETQEXTENTSIZE: if (objc != 2) { @@ -480,7 +472,7 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_q_extentsize(dbp, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_q_extentsize")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewIntObj((int)value); break; case DBGETREDELIM: if (objc != 2) { @@ -500,17 +492,17 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_re_len(dbp, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_re_len")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewIntObj((int)value); break; case DBGETREPAD: if (objc != 2) { Tcl_WrongNumArgs(interp, 1, objv, NULL); return (TCL_ERROR); } - ret = dbp->get_re_pad(dbp, &result); + ret = dbp->get_re_pad(dbp, &intval); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_re_pad")) == TCL_OK) - res = Tcl_NewIntObj(result); + res = Tcl_NewIntObj((int)intval); break; case DBGETRESOURCE: if (objc != 2) { @@ -520,7 +512,7 @@ db_Cmd(clientData, interp, objc, objv) ret = dbp->get_re_source(dbp, &strval); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get_re_source")) == TCL_OK) - res = Tcl_NewStringObj(strval, strlen(strval)); + res = NewStringObj(strval, strlen(strval)); break; case DBTRUNCATE: result = tcl_DbTruncate(interp, objc, objv, dbp); @@ -545,37 +537,82 @@ tcl_DbStat(interp, objc, objv, dbp) Tcl_Obj *CONST objv[]; /* The argument objects */ DB *dbp; /* Database pointer */ { + static const char *dbstatopts[] = { +#ifdef CONFIG_TEST + "-degree_2", + "-dirty", +#endif + "-faststat", + "-txn", + NULL + }; + enum dbstatopts { +#ifdef CONFIG_TEST + DBCUR_DEGREE2, + DBCUR_DIRTY, +#endif + DBCUR_FASTSTAT, + DBCUR_TXN + }; + DBTYPE type; DB_BTREE_STAT *bsp; DB_HASH_STAT *hsp; DB_QUEUE_STAT *qsp; - void *sp; + DB_TXN *txn; Tcl_Obj *res, *flaglist, *myobjv[2]; - DBTYPE type; u_int32_t flag; - int result, ret; - char *arg; + int i, optindex, result, ret; + char *arg, msg[MSG_SIZE]; + void *sp; result = TCL_OK; flag = 0; - - if (objc > 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-faststat?"); - return (TCL_ERROR); - } - - if (objc == 3) { - arg = Tcl_GetStringFromObj(objv[2], NULL); - if (strcmp(arg, "-faststat") == 0) - flag = DB_FAST_STAT; - else { - Tcl_SetResult(interp, - "db stat: unknown arg", TCL_STATIC); - return (TCL_ERROR); + txn = NULL; + sp = NULL; + i = 2; + while (i < objc) { + if (Tcl_GetIndexFromObj(interp, objv[i], dbstatopts, "option", + TCL_EXACT, &optindex) != TCL_OK) { + result = IS_HELP(objv[i]); + goto error; } + i++; + switch ((enum dbstatopts)optindex) { +#ifdef CONFIG_TEST + case DBCUR_DEGREE2: + flag |= DB_DEGREE_2; + break; + case DBCUR_DIRTY: + flag |= DB_DIRTY_READ; + break; +#endif + case DBCUR_FASTSTAT: + flag |= DB_FAST_STAT; + break; + case DBCUR_TXN: + if (i == objc) { + Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); + result = TCL_ERROR; + break; + } + arg = Tcl_GetStringFromObj(objv[i++], NULL); + txn = NAME_TO_TXN(arg); + if (txn == NULL) { + snprintf(msg, MSG_SIZE, + "Stat: Invalid txn: %s\n", arg); + Tcl_SetResult(interp, msg, TCL_VOLATILE); + result = TCL_ERROR; + } + break; + } + if (result != TCL_OK) + break; } + if (result != TCL_OK) + goto error; _debug_check(); - ret = dbp->stat(dbp, &sp, flag); + ret = dbp->stat(dbp, txn, &sp, flag); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db stat"); if (result == TCL_ERROR) return (result); @@ -644,6 +681,7 @@ tcl_DbStat(interp, objc, objv, dbp) MAKE_STAT_LIST("Leaf pages", bsp->bt_leaf_pg); MAKE_STAT_LIST("Duplicate pages", bsp->bt_dup_pg); MAKE_STAT_LIST("Overflow pages", bsp->bt_over_pg); + MAKE_STAT_LIST("Empty pages", bsp->bt_empty_pg); MAKE_STAT_LIST("Pages on freelist", bsp->bt_free); MAKE_STAT_LIST("Internal pages bytes free", bsp->bt_int_pgfree); @@ -662,8 +700,8 @@ tcl_DbStat(interp, objc, objv, dbp) * include all the interesting flags, and the integer value * isn't useful from Tcl--return the strings instead. */ - myobjv[0] = Tcl_NewStringObj("Flags", strlen("Flags")); - myobjv[1] = _GetFlagsList(interp, dbp->flags, __db_inmemdbflags); + myobjv[0] = NewStringObj("Flags", strlen("Flags")); + myobjv[1] = _GetFlagsList(interp, dbp->flags, __db_get_flags_fn()); flaglist = Tcl_NewListObj(2, myobjv); if (flaglist == NULL) { result = TCL_ERROR; @@ -675,7 +713,8 @@ tcl_DbStat(interp, objc, objv, dbp) Tcl_SetObjResult(interp, res); error: - (void)__os_ufree(dbp->dbenv, sp); + if (sp != NULL) + __os_ufree(dbp->dbenv, sp); return (result); } @@ -709,8 +748,7 @@ tcl_DbClose(interp, objc, objv, dbp, dbip) return (TCL_ERROR); } - i = 2; - while (i < objc) { + for (i = 2; i < objc; ++i) { if (Tcl_GetIndexFromObj(interp, objv[i], dbclose, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); @@ -720,7 +758,6 @@ tcl_DbClose(interp, objc, objv, dbp, dbip) Tcl_ResetResult(interp); break; } - i++; switch ((enum dbclose)optindex) { case TCL_DBCLOSE_NOSYNC: flag = DB_NOSYNC; @@ -760,7 +797,7 @@ tcl_DbPut(interp, objc, objv, dbp) DB *dbp; /* Database pointer */ { static const char *dbputopts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "-nodupdata", #endif "-append", @@ -771,7 +808,7 @@ tcl_DbPut(interp, objc, objv, dbp) NULL }; enum dbputopts { -#if CONFIG_TEST +#ifdef CONFIG_TEST DBGET_NODUPDATA, #endif DBPUT_APPEND, @@ -803,6 +840,7 @@ tcl_DbPut(interp, objc, objv, dbp) return (TCL_ERROR); } + dtmp = ktmp = NULL; freekey = freedata = 0; memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); @@ -850,7 +888,7 @@ tcl_DbPut(interp, objc, objv, dbp) return (IS_HELP(objv[i])); i++; switch ((enum dbputopts)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case DBGET_NODUPDATA: FLAG_CHECK(flag); flag = DB_NODUPDATA; @@ -937,6 +975,8 @@ tcl_DbPut(interp, objc, objv, dbp) return (result); } } else { + COMPQUIET(recno, 0); + ret = _CopyObjBytes(interp, objv[objc-2], &ktmp, &key.size, &freekey); if (ret != 0) { @@ -948,8 +988,7 @@ tcl_DbPut(interp, objc, objv, dbp) } if (auto_commit) flag |= DB_AUTO_COMMIT; - ret = _CopyObjBytes(interp, objv[objc-1], &dtmp, - &data.size, &freedata); + ret = _CopyObjBytes(interp, objv[objc-1], &dtmp, &data.size, &freedata); if (ret != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_DBPUT(ret), "db put"); @@ -959,16 +998,18 @@ tcl_DbPut(interp, objc, objv, dbp) _debug_check(); ret = dbp->put(dbp, txn, &key, &data, flag); result = _ReturnSetup(interp, ret, DB_RETOK_DBPUT(ret), "db put"); + + /* We may have a returned record number. */ if (ret == 0 && - (type == DB_RECNO || type == DB_QUEUE) && flag == DB_APPEND) { + (type == DB_QUEUE || type == DB_RECNO) && flag == DB_APPEND) { res = Tcl_NewWideIntObj((Tcl_WideInt)recno); Tcl_SetObjResult(interp, res); } -out: - if (freedata) - (void)__os_free(dbp->dbenv, dtmp); - if (freekey) - (void)__os_free(dbp->dbenv, ktmp); + +out: if (dtmp != NULL && freedata) + __os_free(dbp->dbenv, dtmp); + if (ktmp != NULL && freekey) + __os_free(dbp->dbenv, ktmp); return (result); } @@ -984,7 +1025,8 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) int ispget; /* 1 for pget, 0 for get */ { static const char *dbgetopts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST + "-degree2", "-dirty", "-multi", #endif @@ -1001,7 +1043,8 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) NULL }; enum dbgetopts { -#if CONFIG_TEST +#ifdef CONFIG_TEST + DBGET_DEGREE2, DBGET_DIRTY, DBGET_MULTI, #endif @@ -1021,13 +1064,13 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) DBTYPE ptype, type; DB_TXN *txn; Tcl_Obj **elemv, *retlist; - void *dtmp, *ktmp; + db_recno_t precno, recno; u_int32_t aflag, flag, cflag, isdup, mflag, rmw; int elemc, end, endarg, freekey, freedata, i; int optindex, result, ret, useglob, useprecno, userecno; char *arg, *pattern, *prefix, msg[MSG_SIZE]; - db_recno_t precno, recno; -#if CONFIG_TEST + void *dtmp, *ktmp; +#ifdef CONFIG_TEST int bufsize; #endif @@ -1037,6 +1080,10 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) useglob = userecno = 0; txn = NULL; pattern = prefix = NULL; + dtmp = ktmp = NULL; +#ifdef CONFIG_TEST + COMPQUIET(bufsize, 0); +#endif if (objc < 3) { Tcl_WrongNumArgs(interp, 2, objv, "?-args? key"); @@ -1070,10 +1117,13 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) } i++; switch ((enum dbgetopts)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case DBGET_DIRTY: rmw |= DB_DIRTY_READ; break; + case DBGET_DEGREE2: + rmw |= DB_DEGREE_2; + break; case DBGET_MULTI: mflag |= DB_MULTIPLE; result = Tcl_GetIntFromObj(interp, objv[i], &bufsize); @@ -1174,7 +1224,7 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) case DBGET_ENDARG: endarg = 1; break; - } /* switch */ + } if (result != TCL_OK) break; if (endarg) @@ -1336,15 +1386,15 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) } key.data = ktmp; } -#if CONFIG_TEST +#ifdef CONFIG_TEST if (mflag & DB_MULTIPLE) { if ((ret = __os_malloc(dbp->dbenv, - bufsize, &save.data)) != 0) { + (size_t)bufsize, &save.data)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); goto out; } - save.ulen = bufsize; + save.ulen = (u_int32_t)bufsize; F_CLR(&save, DB_DBT_MALLOC); F_SET(&save, DB_DBT_USERMEM); } @@ -1403,19 +1453,22 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) /* * Free space from DBT. * - * If we set DB_DBT_MALLOC, we need to free the space if - * and only if we succeeded (and thus if DB allocated - * anything). If DB_DBT_MALLOC is not set, this is a bulk - * get buffer, and needs to be freed no matter what. + * If we set DB_DBT_MALLOC, we need to free the space if and + * only if we succeeded and if DB allocated anything (the + * pointer has changed from what we passed in). If + * DB_DBT_MALLOC is not set, this is a bulk get buffer, and + * needs to be freed no matter what. */ - if (F_ISSET(&key, DB_DBT_MALLOC) && ret == 0) - (void)__os_ufree(dbp->dbenv, key.data); - if (F_ISSET(&data, DB_DBT_MALLOC) && ret == 0) - (void)__os_ufree(dbp->dbenv, data.data); + if (F_ISSET(&key, DB_DBT_MALLOC) && ret == 0 && + key.data != ktmp) + __os_ufree(dbp->dbenv, key.data); + if (F_ISSET(&data, DB_DBT_MALLOC) && ret == 0 && + data.data != dtmp) + __os_ufree(dbp->dbenv, data.data); else if (!F_ISSET(&data, DB_DBT_MALLOC)) __os_free(dbp->dbenv, data.data); - if (ispget && ret == 0) - (void)__os_ufree(dbp->dbenv, pkey.data); + if (ispget && ret == 0 && pkey.data != save.data) + __os_ufree(dbp->dbenv, pkey.data); if (result == TCL_OK) Tcl_SetObjResult(interp, retlist); goto out; @@ -1496,17 +1549,17 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) "db get (cursor)"); if (result == TCL_ERROR) goto out1; - if (ret == 0 && pattern && - memcmp(key.data, prefix, strlen(prefix)) != 0) { - /* - * Free space from DB_DBT_MALLOC - */ - (void)__os_ufree(dbp->dbenv, data.data); - goto out1; - } - if (pattern) + if (pattern) { + if (ret == 0 && prefix != NULL && + memcmp(key.data, prefix, strlen(prefix)) != 0) { + /* + * Free space from DB_DBT_MALLOC + */ + __os_ufree(dbp->dbenv, data.data); + goto out1; + } cflag = DB_NEXT; - else + } else cflag = DB_NEXT_DUP; while (ret == 0 && result == TCL_OK) { @@ -1523,8 +1576,8 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) * Free space from DB_DBT_MALLOC */ if (ispget) - (void)__os_ufree(dbp->dbenv, pkey.data); - (void)__os_ufree(dbp->dbenv, data.data); + __os_ufree(dbp->dbenv, pkey.data); + __os_ufree(dbp->dbenv, data.data); if (result != TCL_OK) break; /* @@ -1542,17 +1595,17 @@ tcl_DbGet(interp, objc, objv, dbp, ispget) ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw); } else ret = dbc->c_get(dbc, &key, &data, cflag | rmw); - if (ret == 0 && pattern && + if (ret == 0 && prefix != NULL && memcmp(key.data, prefix, strlen(prefix)) != 0) { /* * Free space from DB_DBT_MALLOC */ - (void)__os_ufree(dbp->dbenv, data.data); + __os_ufree(dbp->dbenv, data.data); break; } } out1: - dbc->c_close(dbc); + (void)dbc->c_close(dbc); if (result == TCL_OK) Tcl_SetObjResult(interp, retlist); out: @@ -1563,10 +1616,10 @@ out: */ if (prefix != NULL) __os_free(dbp->dbenv, prefix); - if (freedata) - (void)__os_free(dbp->dbenv, dtmp); - if (freekey) - (void)__os_free(dbp->dbenv, ktmp); + if (dtmp != NULL && freedata) + __os_free(dbp->dbenv, dtmp); + if (ktmp != NULL && freekey) + __os_free(dbp->dbenv, ktmp); return (result); } @@ -1611,6 +1664,7 @@ tcl_DbDelete(interp, objc, objv, dbp) return (TCL_ERROR); } + ktmp = NULL; memset(&key, 0, sizeof(key)); /* * The first arg must be -auto_commit, -glob, -txn or a list of keys. @@ -1747,8 +1801,8 @@ tcl_DbDelete(interp, objc, objv, dbp) * If we have any error, set up return result and stop * processing keys. */ - if (freekey) - (void)__os_free(dbp->dbenv, ktmp); + if (ktmp != NULL && freekey) + __os_free(dbp->dbenv, ktmp); if (ret != 0) break; } @@ -1815,7 +1869,7 @@ tcl_DbDelete(interp, objc, objv, dbp) * have multiple nuls at the end, so we free using __os_free(). */ __os_free(dbp->dbenv, prefix); - dbc->c_close(dbc); + (void)dbc->c_close(dbc); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db del"); } out: @@ -1834,7 +1888,8 @@ tcl_DbCursor(interp, objc, objv, dbp, dbcp) DBC **dbcp; /* Return cursor pointer */ { static const char *dbcuropts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST + "-degree_2", "-dirty", "-update", #endif @@ -1842,7 +1897,8 @@ tcl_DbCursor(interp, objc, objv, dbp, dbcp) NULL }; enum dbcuropts { -#if CONFIG_TEST +#ifdef CONFIG_TEST + DBCUR_DEGREE2, DBCUR_DIRTY, DBCUR_UPDATE, #endif @@ -1865,7 +1921,10 @@ tcl_DbCursor(interp, objc, objv, dbp, dbcp) } i++; switch ((enum dbcuropts)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST + case DBCUR_DEGREE2: + flag |= DB_DEGREE_2; + break; case DBCUR_DIRTY: flag |= DB_DIRTY_READ; break; @@ -1931,6 +1990,31 @@ tcl_DbAssociate(interp, objc, objv, dbp) int i, optindex, result, ret; char *arg, msg[MSG_SIZE]; u_int32_t flag; +#ifdef CONFIG_TEST + /* + * When calling DB->associate over RPC, the Tcl API uses + * special flags that the RPC server interprets to set the + * callback correctly. + */ + const char *cbname; + struct { + const char *name; + u_int32_t flag; + } *cb, callbacks[] = { + { "", 0 }, /* A NULL callback in Tcl. */ + { "_s_reversedata", DB_RPC2ND_REVERSEDATA }, + { "_s_noop", DB_RPC2ND_NOOP }, + { "_s_concatkeydata", DB_RPC2ND_CONCATKEYDATA }, + { "_s_concatdatakey", DB_RPC2ND_CONCATDATAKEY }, + { "_s_reverseconcat", DB_RPC2ND_REVERSECONCAT }, + { "_s_truncdata", DB_RPC2ND_TRUNCDATA }, + { "_s_reversedata", DB_RPC2ND_REVERSEDATA }, + { "_s_constant", DB_RPC2ND_CONSTANT }, + { "sj_getzip", DB_RPC2ND_GETZIP }, + { "sj_getname", DB_RPC2ND_GETNAME }, + { NULL, 0 } + }; +#endif txn = NULL; result = TCL_OK; @@ -2003,7 +2087,48 @@ tcl_DbAssociate(interp, objc, objv, dbp) * callbacks. */ sdbip = (DBTCL_INFO *)sdbp->api_internal; + +#ifdef CONFIG_TEST + if (i != objc - 1 && RPC_ON(dbp->dbenv)) { + /* + * The flag values allowed to DB->associate may have changed to + * overlap with the range we've chosen. If this happens, we + * need to reset all of the RPC_2ND_* flags to a new range. + */ + if ((flag & DB_RPC2ND_MASK) != 0) { + snprintf(msg, MSG_SIZE, + "RPC secondary flags overlap -- recalculate!\n"); + Tcl_SetResult(interp, msg, TCL_VOLATILE); + return (TCL_ERROR); + } + + cbname = Tcl_GetStringFromObj(objv[objc - 2], NULL); + for (cb = callbacks; cb->name != NULL; cb++) + if (strcmp(cb->name, cbname) == 0) { + flag |= cb->flag; + break; + } + + if (cb->name == NULL) { + snprintf(msg, MSG_SIZE, + "Associate: unknown callback: %s\n", cbname); + Tcl_SetResult(interp, msg, TCL_VOLATILE); + return (TCL_ERROR); + } + + ret = dbp->associate(dbp, txn, sdbp, NULL, flag); + + /* + * The primary reference isn't set when calling through + * the RPC server, but the Tcl API peeks at it in other + * places (see tcl_DbGet). + */ + if (ret == 0) + sdbp->s_primary = dbp; + } else if (i != objc - 1) { +#else if (i != objc - 1) { +#endif /* * We have 2 args, get the callback. */ @@ -2039,7 +2164,8 @@ tcl_second_call(dbp, pkey, data, skey) DBTCL_INFO *ip; Tcl_Interp *interp; Tcl_Obj *pobj, *dobj, *objv[3]; - int len, result, ret; + size_t len; + int ilen, result, ret; void *retbuf, *databuf; ip = (DBTCL_INFO *)dbp->api_internal; @@ -2050,9 +2176,9 @@ tcl_second_call(dbp, pkey, data, skey) * Create two ByteArray objects, with the contents of the pkey * and data DBTs that are our inputs. */ - pobj = Tcl_NewByteArrayObj(pkey->data, pkey->size); + pobj = Tcl_NewByteArrayObj(pkey->data, (int)pkey->size); Tcl_IncrRefCount(pobj); - dobj = Tcl_NewByteArrayObj(data->data, data->size); + dobj = Tcl_NewByteArrayObj(data->data, (int)data->size); Tcl_IncrRefCount(dobj); objv[1] = pobj; @@ -2069,8 +2195,8 @@ tcl_second_call(dbp, pkey, data, skey) return (EINVAL); } - retbuf = - Tcl_GetByteArrayFromObj(Tcl_GetObjResult(interp), &len); + retbuf = Tcl_GetByteArrayFromObj(Tcl_GetObjResult(interp), &ilen); + len = (size_t)ilen; /* * retbuf is owned by Tcl; copy it into malloc'ed memory. @@ -2108,8 +2234,9 @@ tcl_DbJoin(interp, objc, objv, dbp, dbcp) DBJ_NOSORT }; DBC **listp; + size_t size; u_int32_t flag; - int adj, i, j, optindex, size, result, ret; + int adj, i, j, optindex, result, ret; char *arg, msg[MSG_SIZE]; result = TCL_OK; @@ -2119,9 +2246,7 @@ tcl_DbJoin(interp, objc, objv, dbp, dbcp) return (TCL_ERROR); } - i = 2; - adj = i; - while (i < objc) { + for (adj = i = 2; i < objc; i++) { if (Tcl_GetIndexFromObj(interp, objv[i], dbjopts, "option", TCL_EXACT, &optindex) != TCL_OK) { result = IS_HELP(objv[i]); @@ -2131,7 +2256,6 @@ tcl_DbJoin(interp, objc, objv, dbp, dbcp) Tcl_ResetResult(interp); break; } - i++; switch ((enum dbjopts)optindex) { case DBJ_NOSORT: flag |= DB_JOIN_NOSORT; @@ -2144,7 +2268,7 @@ tcl_DbJoin(interp, objc, objv, dbp, dbcp) /* * Allocate one more for NULL ptr at end of list. */ - size = sizeof(DBC *) * ((objc - adj) + 1); + size = sizeof(DBC *) * (size_t)((objc - adj) + 1); ret = __os_malloc(dbp->dbenv, size, &listp); if (ret != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); @@ -2184,14 +2308,14 @@ tcl_DbGetjoin(interp, objc, objv, dbp) DB *dbp; /* Database pointer */ { static const char *dbgetjopts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "-nosort", #endif "-txn", NULL }; enum dbgetjopts { -#if CONFIG_TEST +#ifdef CONFIG_TEST DBGETJ_NOSORT, #endif DBGETJ_TXN @@ -2203,12 +2327,14 @@ tcl_DbGetjoin(interp, objc, objv, dbp) DBT key, data; Tcl_Obj **elemv, *retlist; void *ktmp; + size_t size; u_int32_t flag; - int adj, elemc, freekey, i, j, optindex, result, ret, size; + int adj, elemc, freekey, i, j, optindex, result, ret; char *arg, msg[MSG_SIZE]; result = TCL_OK; flag = 0; + ktmp = NULL; freekey = 0; if (objc < 3) { Tcl_WrongNumArgs(interp, 2, objv, "{db1 key1} {db2 key2} ..."); @@ -2230,7 +2356,7 @@ tcl_DbGetjoin(interp, objc, objv, dbp) } i++; switch ((enum dbgetjopts)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case DBGETJ_NOSORT: flag |= DB_JOIN_NOSORT; adj++; @@ -2256,7 +2382,7 @@ tcl_DbGetjoin(interp, objc, objv, dbp) } if (result != TCL_OK) return (result); - size = sizeof(DBC *) * ((objc - adj) + 1); + size = sizeof(DBC *) * (size_t)((objc - adj) + 1); ret = __os_malloc(NULL, size, &listp); if (ret != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); @@ -2328,19 +2454,19 @@ tcl_DbGetjoin(interp, objc, objv, dbp) result = _SetListElem(interp, retlist, key.data, key.size, data.data, data.size); - (void)__os_ufree(dbp->dbenv, key.data); - (void)__os_ufree(dbp->dbenv, data.data); + __os_ufree(dbp->dbenv, key.data); + __os_ufree(dbp->dbenv, data.data); } } - dbc->c_close(dbc); + (void)dbc->c_close(dbc); if (result == TCL_OK) Tcl_SetObjResult(interp, retlist); out: - if (freekey) - (void)__os_free(dbp->dbenv, ktmp); + if (ktmp != NULL && freekey) + __os_free(dbp->dbenv, ktmp); while (j) { if (listp[j]) - (listp[j])->c_close(listp[j]); + (void)(listp[j])->c_close(listp[j]); j--; } __os_free(dbp->dbenv, listp); @@ -2370,6 +2496,7 @@ tcl_DbGetFlags(interp, objc, objv, dbp) { DB_DUP, "-dup" }, { DB_DUPSORT, "-dupsort" }, { DB_ENCRYPT, "-encrypt" }, + { DB_INORDER, "-inorder" }, { DB_TXN_NOT_DURABLE, "-notdurable" }, { DB_RECNUM, "-recnum" }, { DB_RENUMBER, "-renumber" }, @@ -2384,18 +2511,19 @@ tcl_DbGetFlags(interp, objc, objv, dbp) } ret = dbp->get_flags(dbp, &flags); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_flags")) == TCL_OK) { + if ((result = _ReturnSetup( + interp, ret, DB_RETOK_STD(ret), "db get_flags")) == TCL_OK) { buf[0] = '\0'; for (i = 0; db_flags[i].flag != 0; i++) if (LF_ISSET(db_flags[i].flag)) { if (strlen(buf) > 0) - strncat(buf, " ", sizeof(buf)); - strncat(buf, db_flags[i].arg, sizeof(buf)); + (void)strncat(buf, " ", sizeof(buf)); + (void)strncat( + buf, db_flags[i].arg, sizeof(buf)); } - res = Tcl_NewStringObj(buf, strlen(buf)); + res = NewStringObj(buf, strlen(buf)); Tcl_SetObjResult(interp, res); } @@ -2423,6 +2551,7 @@ tcl_DbGetOpenFlags(interp, objc, objv, dbp) } open_flags[] = { { DB_AUTO_COMMIT, "-auto_commit" }, { DB_CREATE, "-create" }, + { DB_DEGREE_2, "-degree_2" }, { DB_DIRTY_READ, "-dirty" }, { DB_EXCL, "-excl" }, { DB_NOMMAP, "-nommap" }, @@ -2438,18 +2567,19 @@ tcl_DbGetOpenFlags(interp, objc, objv, dbp) } ret = dbp->get_open_flags(dbp, &flags); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_open_flags")) == TCL_OK) { + if ((result = _ReturnSetup( + interp, ret, DB_RETOK_STD(ret), "db get_open_flags")) == TCL_OK) { buf[0] = '\0'; for (i = 0; open_flags[i].flag != 0; i++) if (LF_ISSET(open_flags[i].flag)) { if (strlen(buf) > 0) - strncat(buf, " ", sizeof(buf)); - strncat(buf, open_flags[i].arg, sizeof(buf)); + (void)strncat(buf, " ", sizeof(buf)); + (void)strncat( + buf, open_flags[i].arg, sizeof(buf)); } - res = Tcl_NewStringObj(buf, strlen(buf)); + res = NewStringObj(buf, strlen(buf)); Tcl_SetObjResult(interp, res); } @@ -2476,6 +2606,7 @@ tcl_DbCount(interp, objc, objv, dbp) res = NULL; count = 0; freekey = ret = 0; + ktmp = NULL; result = TCL_OK; if (objc != 3) { @@ -2535,14 +2666,14 @@ tcl_DbCount(interp, objc, objv, dbp) } res = Tcl_NewWideIntObj((Tcl_WideInt)count); Tcl_SetObjResult(interp, res); -out: - if (freekey) - (void)__os_free(dbp->dbenv, ktmp); + +out: if (ktmp != NULL && freekey) + __os_free(dbp->dbenv, ktmp); (void)dbc->c_close(dbc); return (result); } -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_DbKeyRange -- */ @@ -2571,17 +2702,17 @@ tcl_DbKeyRange(interp, objc, objv, dbp) int freekey, i, myobjc, optindex, result, ret; char *arg, msg[MSG_SIZE]; - result = TCL_OK; + ktmp = NULL; flag = 0; freekey = 0; + result = TCL_OK; if (objc < 3) { Tcl_WrongNumArgs(interp, 2, objv, "?-txn id? key"); return (TCL_ERROR); } txn = NULL; - i = 2; - while (i < objc) { + for (i = 2; i < objc;) { if (Tcl_GetIndexFromObj(interp, objv[i], dbkeyropts, "option", TCL_EXACT, &optindex) != TCL_OK) { result = IS_HELP(objv[i]); @@ -2656,9 +2787,9 @@ tcl_DbKeyRange(interp, objc, objv, dbp) retlist = Tcl_NewListObj(myobjc, myobjv); if (result == TCL_OK) Tcl_SetObjResult(interp, retlist); -out: - if (freekey) - (void)__os_free(dbp->dbenv, ktmp); + +out: if (ktmp != NULL && freekey) + __os_free(dbp->dbenv, ktmp); return (result); } #endif diff --git a/db/tcl/tcl_db_pkg.c b/db/tcl/tcl_db_pkg.c index 260dfb232..3baeeb9a6 100644 --- a/db/tcl/tcl_db_pkg.c +++ b/db/tcl/tcl_db_pkg.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_db_pkg.c,v 11.188 2004/10/12 23:54:14 ubell Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_db_pkg.c,v 11.164 2003/11/14 18:45:11 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -19,7 +17,7 @@ static const char revid[] = "$Id: tcl_db_pkg.c,v 11.164 2003/11/14 18:45:11 sue #include #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST #define DB_DBM_HSEARCH 1 #endif @@ -43,11 +41,14 @@ static int bdb_DbOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, static int bdb_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_Version __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); +static int bdb_SeqOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, + DBTCL_INFO *, DB_SEQUENCE **)); -#if CONFIG_TEST +#ifdef CONFIG_TEST static int bdb_DbUpgrade __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_DbVerify __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_Handles __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); +static int bdb_MsgType __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int tcl_bt_compare __P((DB *, const DBT *, const DBT *)); static int tcl_compare_callback __P((DB *, const DBT *, const DBT *, @@ -82,17 +83,19 @@ Db_tcl_Init(interp) if (code != TCL_OK) return (code); - Tcl_CreateObjCommand(interp, "berkdb", (Tcl_ObjCmdProc *)berkdb_Cmd, - (ClientData)0, NULL); + (void)Tcl_CreateObjCommand(interp, + "berkdb", (Tcl_ObjCmdProc *)berkdb_Cmd, (ClientData)0, NULL); /* * Create shared global debugging variables */ - Tcl_LinkVar(interp, "__debug_on", (char *)&__debug_on, TCL_LINK_INT); - Tcl_LinkVar(interp, "__debug_print", (char *)&__debug_print, - TCL_LINK_INT); - Tcl_LinkVar(interp, "__debug_stop", (char *)&__debug_stop, - TCL_LINK_INT); - Tcl_LinkVar(interp, "__debug_test", (char *)&__debug_test, + (void)Tcl_LinkVar( + interp, "__debug_on", (char *)&__debug_on, TCL_LINK_INT); + (void)Tcl_LinkVar( + interp, "__debug_print", (char *)&__debug_print, TCL_LINK_INT); + (void)Tcl_LinkVar( + interp, "__debug_stop", (char *)&__debug_stop, TCL_LINK_INT); + (void)Tcl_LinkVar( + interp, "__debug_test", (char *)&__debug_test, TCL_LINK_INT); LIST_INIT(&__db_infohead); return (TCL_OK); @@ -119,9 +122,10 @@ berkdb_Cmd(notused, interp, objc, objv) Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *berkdbcmds[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "dbverify", "handles", + "msgtype", "upgrade", #endif "dbremove", @@ -129,8 +133,11 @@ berkdb_Cmd(notused, interp, objc, objv) "env", "envremove", "open", +#ifdef HAVE_SEQUENCE + "sequence", +#endif "version", -#if CONFIG_TEST +#ifdef CONFIG_TEST /* All below are compatibility functions */ "hcreate", "hsearch", "hdestroy", "dbminit", "fetch", "store", @@ -146,9 +153,10 @@ berkdb_Cmd(notused, interp, objc, objv) * All commands enums below ending in X are compatibility */ enum berkdbcmds { -#if CONFIG_TEST +#ifdef CONFIG_TEST BDB_DBVERIFY, BDB_HANDLES, + BDB_MSGTYPE, BDB_UPGRADE, #endif BDB_DBREMOVE, @@ -156,8 +164,11 @@ berkdb_Cmd(notused, interp, objc, objv) BDB_ENV, BDB_ENVREMOVE, BDB_OPEN, +#ifdef HAVE_SEQUENCE + BDB_SEQUENCE, +#endif BDB_VERSION, -#if CONFIG_TEST +#ifdef CONFIG_TEST BDB_HCREATEX, BDB_HSEARCHX, BDB_HDESTROYX, BDB_DBMINITX, BDB_FETCHX, BDB_STOREX, BDB_DELETEX, BDB_FIRSTKEYX, BDB_NEXTKEYX, @@ -168,9 +179,15 @@ berkdb_Cmd(notused, interp, objc, objv) }; static int env_id = 0; static int db_id = 0; +#ifdef HAVE_SEQUENCE + static int seq_id = 0; +#endif DB *dbp; -#if CONFIG_TEST +#ifdef HAVE_SEQUENCE + DB_SEQUENCE *seq; +#endif +#ifdef CONFIG_TEST DBM *ndbmp; static int ndbm_id = 0; #endif @@ -199,13 +216,16 @@ berkdb_Cmd(notused, interp, objc, objv) return (IS_HELP(objv[1])); res = NULL; switch ((enum berkdbcmds)cmdindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case BDB_DBVERIFY: result = bdb_DbVerify(interp, objc, objv); break; case BDB_HANDLES: result = bdb_Handles(interp, objc, objv); break; + case BDB_MSGTYPE: + result = bdb_MsgType(interp, objc, objv); + break; case BDB_UPGRADE: result = bdb_DbUpgrade(interp, objc, objv); break; @@ -221,12 +241,11 @@ berkdb_Cmd(notused, interp, objc, objv) result = bdb_EnvOpen(interp, objc, objv, ip, &envp); if (result == TCL_OK && envp != NULL) { env_id++; - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)env_Cmd, (ClientData)envp, NULL); /* Use ip->i_name - newname is overwritten */ - res = - Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, envp); } else _DeleteInfo(ip); @@ -252,12 +271,11 @@ berkdb_Cmd(notused, interp, objc, objv) result = bdb_DbOpen(interp, objc, objv, ip, &dbp); if (result == TCL_OK && dbp != NULL) { db_id++; - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)db_Cmd, (ClientData)dbp, NULL); /* Use ip->i_name - newname is overwritten */ - res = - Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, dbp); } else _DeleteInfo(ip); @@ -267,7 +285,30 @@ berkdb_Cmd(notused, interp, objc, objv) result = TCL_ERROR; } break; -#if CONFIG_TEST +#ifdef HAVE_SEQUENCE + case BDB_SEQUENCE: + snprintf(newname, sizeof(newname), "seq%d", seq_id); + ip = _NewInfo(interp, NULL, newname, I_SEQ); + if (ip != NULL) { + result = bdb_SeqOpen(interp, objc, objv, ip, &seq); + if (result == TCL_OK && seq != NULL) { + seq_id++; + (void)Tcl_CreateObjCommand(interp, newname, + (Tcl_ObjCmdProc *)seq_Cmd, + (ClientData)seq, NULL); + /* Use ip->i_name - newname is overwritten */ + res = NewStringObj(newname, strlen(newname)); + _SetInfoData(ip, seq); + } else + _DeleteInfo(ip); + } else { + Tcl_SetResult(interp, "Could not set up info", + TCL_STATIC); + result = TCL_ERROR; + } + break; +#endif +#ifdef CONFIG_TEST case BDB_HCREATEX: case BDB_HSEARCHX: case BDB_HDESTROYX: @@ -289,12 +330,11 @@ berkdb_Cmd(notused, interp, objc, objv) result = bdb_NdbmOpen(interp, objc, objv, &ndbmp); if (result == TCL_OK) { ndbm_id++; - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)ndbm_Cmd, (ClientData)ndbmp, NULL); /* Use ip->i_name - newname is overwritten */ - res = - Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, ndbmp); } else _DeleteInfo(ip); @@ -345,7 +385,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env) DB_ENV **env; /* Environment pointer */ { static const char *envopen[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "-alloc", "-auto_commit", "-cdb", @@ -361,20 +401,22 @@ bdb_EnvOpen(interp, objc, objv, ip, env) "-lock_timeout", "-log", "-log_buffer", + "-log_inmemory", "-log_max", "-log_regionmax", "-log_remove", - "-mmapsize", - "-nommap", - "-notdurable", + "-mpool_max_openfd", + "-mpool_max_write", + "-mpool_mmap_size", + "-mpool_nommap", "-overwrite", "-region_init", "-rep_client", - "-rep_logsonly", "-rep_master", "-rep_transport", "-server", "-server_timeout", + "-set_intermediate_dir", "-thread", "-time_notgranted", "-txn_timeout", @@ -410,7 +452,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env) * which is close to but not quite alphabetical. */ enum envopen { -#if CONFIG_TEST +#ifdef CONFIG_TEST ENV_ALLOC, ENV_AUTO_COMMIT, ENV_CDB, @@ -426,20 +468,22 @@ bdb_EnvOpen(interp, objc, objv, ip, env) ENV_LOCK_TIMEOUT, ENV_LOG, ENV_LOG_BUFFER, + ENV_LOG_INMEMORY, ENV_LOG_MAX, ENV_LOG_REGIONMAX, ENV_LOG_REMOVE, - ENV_MMAPSIZE, - ENV_NOMMAP, - ENV_NOTDURABLE, + ENV_MPOOL_MAX_OPENFD, + ENV_MPOOL_MAX_WRITE, + ENV_MPOOL_MMAP_SIZE, + ENV_MPOOL_NOMMAP, ENV_OVERWRITE, ENV_REGION_INIT, ENV_REP_CLIENT, - ENV_REP_LOGSONLY, ENV_REP_MASTER, ENV_REP_TRANSPORT, ENV_SERVER, ENV_SERVER_TO, + ENV_SET_INTERMEDIATE_DIR, ENV_THREAD, ENV_TIME_NOTGRANTED, ENV_TXN_TIMEOUT, @@ -469,17 +513,18 @@ bdb_EnvOpen(interp, objc, objv, ip, env) ENV_USE_ENVIRON_ROOT }; Tcl_Obj **myobjv; - u_int32_t cr_flags, gbytes, bytes, ncaches, logbufset, logmaxset; + u_int32_t cr_flags, gbytes, bytes, logbufset, logmaxset; u_int32_t open_flags, rep_flags, set_flags, uintarg; - int i, mode, myobjc, optindex, result, ret; + int i, mode, myobjc, ncaches, optindex, result, ret; long client_to, server_to, shm; char *arg, *home, *passwd, *server; -#if CONFIG_TEST +#ifdef CONFIG_TEST Tcl_Obj **myobjv1; time_t timestamp; - u_int32_t detect, size; + long v; + u_int32_t detect; u_int8_t *conflicts; - int intarg, j, nmodes, temp; + int intarg, intarg2, j, nmodes, temp; #endif result = TCL_OK; @@ -521,8 +566,8 @@ bdb_EnvOpen(interp, objc, objv, ip, env) Tcl_ResetResult(interp); continue; } +#ifdef CONFIG_TEST switch ((enum envopen)optindex) { -#if CONFIG_TEST case ENV_SERVER: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, @@ -555,10 +600,10 @@ bdb_EnvOpen(interp, objc, objv, ip, env) result = Tcl_GetLongFromObj(interp, objv[i++], &client_to); break; -#endif default: break; } +#endif } if (result != TCL_OK) return (TCL_ERROR); @@ -604,7 +649,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env) } i++; switch ((enum envopen)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case ENV_SERVER: case ENV_SERVER_TO: case ENV_CLIENT_TO: @@ -667,8 +712,9 @@ bdb_EnvOpen(interp, objc, objv, ip, env) result = TCL_ERROR; break; } - size = sizeof(u_int8_t) * nmodes*nmodes; - ret = __os_malloc(*env, size, &conflicts); + + ret = __os_malloc(*env, sizeof(u_int8_t) * + (size_t)nmodes * (size_t)nmodes, &conflicts); if (ret != 0) { result = TCL_ERROR; break; @@ -703,6 +749,8 @@ bdb_EnvOpen(interp, objc, objv, ip, env) detect = DB_LOCK_EXPIRE; else if (strcmp(arg, "maxlocks") == 0) detect = DB_LOCK_MAXLOCKS; + else if (strcmp(arg, "maxwrites") == 0) + detect = DB_LOCK_MAXWRITE; else if (strcmp(arg, "minlocks") == 0) detect = DB_LOCK_MINLOCKS; else if (strcmp(arg, "minwrites") == 0) @@ -770,22 +818,23 @@ bdb_EnvOpen(interp, objc, objv, ip, env) result = TCL_ERROR; break; } - result = Tcl_GetLongFromObj(interp, objv[i++], - (long *)×tamp); - if (result == TCL_OK) { - _debug_check(); - if (optindex == ENV_TXN_TIME) - ret = (*env)-> - set_tx_timestamp(*env, ×tamp); - else - ret = (*env)->set_timeout(*env, - (db_timeout_t)timestamp, - optindex == ENV_TXN_TIMEOUT ? - DB_SET_TXN_TIMEOUT : - DB_SET_LOCK_TIMEOUT); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "txn_timestamp"); - } + + if ((result = Tcl_GetLongFromObj( + interp, objv[i++], &v)) != TCL_OK) + break; + timestamp = (time_t)v; + + _debug_check(); + if ((enum envopen)optindex == ENV_TXN_TIME) + ret = + (*env)->set_tx_timestamp(*env, ×tamp); + else + ret = (*env)->set_timeout(*env, + (db_timeout_t)timestamp, + (enum envopen)optindex == ENV_TXN_TIMEOUT ? + DB_SET_TXN_TIMEOUT : DB_SET_LOCK_TIMEOUT); + result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "txn_timestamp"); break; case ENV_LOG: FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL); @@ -816,6 +865,9 @@ bdb_EnvOpen(interp, objc, objv, ip, env) } } break; + case ENV_LOG_INMEMORY: + FLD_SET(set_flags, DB_LOG_INMEMORY); + break; case ENV_LOG_MAX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, @@ -852,10 +904,49 @@ bdb_EnvOpen(interp, objc, objv, ip, env) case ENV_LOG_REMOVE: FLD_SET(set_flags, DB_LOG_AUTOREMOVE); break; - case ENV_MMAPSIZE: + case ENV_MPOOL_MAX_OPENFD: + if (i >= objc) { + Tcl_WrongNumArgs(interp, 2, objv, + "?-mpool_max_openfd fd_count?"); + result = TCL_ERROR; + break; + } + result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); + if (result == TCL_OK) { + _debug_check(); + ret = (*env)->set_mp_max_openfd(*env, intarg); + result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "mpool_max_openfd"); + } + break; + case ENV_MPOOL_MAX_WRITE: + result = Tcl_ListObjGetElements(interp, objv[i], + &myobjc, &myobjv); + if (result == TCL_OK) + i++; + else + break; + if (myobjc != 2) { + Tcl_WrongNumArgs(interp, 2, objv, + "?-mpool_max_write {nwrite nsleep}?"); + result = TCL_ERROR; + break; + } + result = Tcl_GetIntFromObj(interp, myobjv[0], &intarg); + if (result != TCL_OK) + break; + result = Tcl_GetIntFromObj(interp, myobjv[1], &intarg2); + if (result != TCL_OK) + break; + _debug_check(); + ret = (*env)->set_mp_max_write(*env, intarg, intarg2); + result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), + "set_mp_max_write"); + break; + case ENV_MPOOL_MMAP_SIZE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, - "?-mmapsize size?"); + "?-mpool_mmap_size size?"); result = TCL_ERROR; break; } @@ -865,10 +956,10 @@ bdb_EnvOpen(interp, objc, objv, ip, env) ret = (*env)->set_mp_mmapsize(*env, (size_t)intarg); result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "mmapsize"); + DB_RETOK_STD(ret), "mpool_mmap_size"); } break; - case ENV_NOMMAP: + case ENV_MPOOL_NOMMAP: FLD_SET(set_flags, DB_NOMMAP); break; case ENV_OVERWRITE: @@ -880,14 +971,26 @@ bdb_EnvOpen(interp, objc, objv, ip, env) result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "region_init"); break; + case ENV_SET_INTERMEDIATE_DIR: + if (i >= objc) { + Tcl_WrongNumArgs(interp, + 2, objv, "?-set_intermediate_dir mode?"); + result = TCL_ERROR; + break; + } + result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); + if (result == TCL_OK) { + _debug_check(); + ret = (*env)-> + set_intermediate_dir(*env, intarg, 0); + result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "set_intermediate_dir"); + } + break; case ENV_REP_CLIENT: rep_flags = DB_REP_CLIENT; FLD_SET(open_flags, DB_INIT_REP); break; - case ENV_REP_LOGSONLY: - rep_flags = DB_REP_LOGSONLY; - FLD_SET(open_flags, DB_INIT_REP); - break; case ENV_REP_MASTER: rep_flags = DB_REP_MASTER; FLD_SET(open_flags, DB_INIT_REP); @@ -966,9 +1069,6 @@ bdb_EnvOpen(interp, objc, objv, ip, env) result = tcl_EnvVerbose(interp, *env, myobjv[0], myobjv[1]); break; - case ENV_NOTDURABLE: - FLD_SET(set_flags, DB_TXN_NOT_DURABLE); - break; case ENV_WRNOSYNC: FLD_SET(set_flags, DB_TXN_WRITE_NOSYNC); break; @@ -1081,7 +1181,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env) result = _GetUInt32(interp, myobjv[1], &bytes); if (result != TCL_OK) break; - result = _GetUInt32(interp, myobjv[2], &ncaches); + result = Tcl_GetIntFromObj(interp, myobjv[2], &ncaches); if (result != TCL_OK) break; _debug_check(); @@ -1128,23 +1228,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env) break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); - /* - * If the user already set one, close it. - */ - if (ip->i_err != NULL && - ip->i_err != stdout && ip->i_err != stderr) - fclose(ip->i_err); - - if (strcmp(arg, "/dev/stdout") == 0) - ip->i_err = stdout; - else if (strcmp(arg, "/dev/stderr") == 0) - ip->i_err = stderr; - else - ip->i_err = fopen(arg, "a"); - if (ip->i_err != NULL) { - _debug_check(); - (*env)->set_errfile(*env, ip->i_err); - } + tcl_EnvSetErrfile(interp, *env, ip, arg); break; case ENV_ERRPFX: if (i >= objc) { @@ -1154,21 +1238,8 @@ bdb_EnvOpen(interp, objc, objv, ip, env) break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); - /* - * If the user already set one, free it. - */ - if (ip->i_errpfx != NULL) - __os_free(NULL, ip->i_errpfx); - if ((ret = - __os_strdup(*env, arg, &ip->i_errpfx)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "__os_strdup"); - break; - } - if (ip->i_errpfx != NULL) { - _debug_check(); - (*env)->set_errpfx(*env, ip->i_errpfx); - } + _debug_check(); + result = tcl_EnvSetErrpfx(interp, *env, ip, arg); break; case ENV_DATA_DIR: if (i >= objc) { @@ -1268,7 +1339,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env) error: if (result == TCL_ERROR) { if (ip->i_err && ip->i_err != stdout && ip->i_err != stderr) { - fclose(ip->i_err); + (void)fclose(ip->i_err); ip->i_err = NULL; } (void)(*env)->close(*env, 0); @@ -1305,7 +1376,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) TCL_DB_ENV0 }; static const char *bdbopen[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "-btcompare", "-dirty", "-dupcompare", @@ -1336,6 +1407,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) "-extent", "-ffactor", "-hash", + "-inorder", "-len", "-maxsize", "-mode", @@ -1356,7 +1428,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) NULL }; enum bdbopen { -#if CONFIG_TEST +#ifdef CONFIG_TEST TCL_DB_BTCOMPARE, TCL_DB_DIRTY, TCL_DB_DUPCOMPARE, @@ -1387,6 +1459,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) TCL_DB_EXTENT, TCL_DB_FFACTOR, TCL_DB_HASH, + TCL_DB_INORDER, TCL_DB_LEN, TCL_DB_MAXSIZE, TCL_DB_MODE, @@ -1411,9 +1484,9 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) DBTYPE type; DB_ENV *envp; Tcl_Obj **myobjv; - u_int32_t gbytes, bytes, ncaches, open_flags, uintarg; - int endarg, i, intarg, mode, myobjc; - int optindex, result, ret, set_err, set_flags, set_pfx, subdblen; + u_int32_t gbytes, bytes, open_flags, set_flags, uintarg; + int endarg, i, intarg, lorder, mode, myobjc, ncaches; + int optindex, result, ret, set_err, set_pfx, subdblen; u_char *subdbtmp; char *arg, *db, *passwd, *subdb, msg[MSG_SIZE]; @@ -1528,7 +1601,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) } i++; switch ((enum bdbopen)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case TCL_DB_BTCOMPARE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, @@ -1599,10 +1672,10 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) result = TCL_ERROR; break; } - result = _GetUInt32(interp, objv[i++], &uintarg); + result = Tcl_GetIntFromObj(interp, objv[i++], &lorder); if (result == TCL_OK) { _debug_check(); - ret = (*dbp)->set_lorder(*dbp, uintarg); + ret = (*dbp)->set_lorder(*dbp, lorder); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_lorder"); } @@ -1632,7 +1705,9 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) set_flags |= DB_REVSPLITOFF; break; case TCL_DB_TEST: - (*dbp)->set_h_hash(*dbp, __ham_test); + ret = (*dbp)->set_h_hash(*dbp, __ham_test); + result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "set_h_hash"); break; case TCL_DB_THREAD: /* Enable DB_THREAD when specified in testing. */ @@ -1658,7 +1733,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) txn = NAME_TO_TXN(arg); if (txn == NULL) { snprintf(msg, MSG_SIZE, - "Put: Invalid txn: %s\n", arg); + "Open: Invalid txn: %s\n", arg); Tcl_SetResult(interp, msg, TCL_VOLATILE); result = TCL_ERROR; } @@ -1740,6 +1815,9 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) case TCL_DB_DUPSORT: set_flags |= DB_DUPSORT; break; + case TCL_DB_INORDER: + set_flags |= DB_INORDER; + break; case TCL_DB_RECNUM: set_flags |= DB_RECNUM; break; @@ -1919,7 +1997,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) result = _GetUInt32(interp, myobjv[1], &bytes); if (result != TCL_OK) break; - result = _GetUInt32(interp, myobjv[2], &ncaches); + result = Tcl_GetIntFromObj(interp, myobjv[2], &ncaches); if (result != TCL_OK) break; _debug_check(); @@ -1957,7 +2035,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) */ if (errip->i_err != NULL && errip->i_err != stdout && errip->i_err != stderr) - fclose(errip->i_err); + (void)fclose(errip->i_err); if (strcmp(arg, "/dev/stdout") == 0) errip->i_err = stdout; else if (strcmp(arg, "/dev/stderr") == 0) @@ -2028,12 +2106,12 @@ bdb_DbOpen(interp, objc, objv, ip, dbp) subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); if ((ret = __os_malloc(envp, - subdblen + 1, &subdb)) != 0) { + (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } - memcpy(subdb, subdbtmp, subdblen); + memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } } @@ -2079,7 +2157,7 @@ error: */ if (set_err && errip && errip->i_err != NULL && errip->i_err != stdout && errip->i_err != stderr) { - fclose(errip->i_err); + (void)fclose(errip->i_err); errip->i_err = NULL; } if (set_pfx && errip && errip->i_errpfx != NULL) { @@ -2091,6 +2169,245 @@ error: return (result); } +#ifdef HAVE_SEQUENCE +/* + * bdb_SeqOpen -- + * Implements the "Seq_create/Seq_open" command. + */ +static int +bdb_SeqOpen(interp, objc, objv, ip, seqp) + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ + DBTCL_INFO *ip; /* Our internal info */ + DB_SEQUENCE **seqp; /* DB_SEQUENCE handle */ +{ + static const char *seqopen[] = { + "-auto_commit", + "-cachesize", + "-create", + "-inc", + "-init", + "-dec", + "-max", + "-min", + "-txn", + "-wrap", + "--", + NULL + } ; + enum seqopen { + TCL_SEQ_AUTO_COMMIT, + TCL_SEQ_CACHESIZE, + TCL_SEQ_CREATE, + TCL_SEQ_INC, + TCL_SEQ_INIT, + TCL_SEQ_DEC, + TCL_SEQ_MAX, + TCL_SEQ_MIN, + TCL_SEQ_TXN, + TCL_SEQ_WRAP, + TCL_SEQ_ENDARG + }; + DB *dbp; + DBT key; + DBTYPE type; + DB_TXN *txn; + db_recno_t recno; + db_seq_t min, max, value; + u_int32_t flags, oflags; + int cache, endarg, i, optindex, result, ret, setrange, setvalue, v; + char *arg, *db, msg[MSG_SIZE]; + + COMPQUIET(ip, NULL); + COMPQUIET(value, 0); + + if (objc < 2) { + Tcl_WrongNumArgs(interp, 2, objv, "?args?"); + return (TCL_ERROR); + } + + txn = NULL; + endarg = 0; + flags = oflags = 0; + setrange = setvalue = 0; + min = INT64_MIN; + max = INT64_MAX; + cache = 0; + + for (i = 2; i < objc;) { + Tcl_ResetResult(interp); + if (Tcl_GetIndexFromObj(interp, objv[i], seqopen, "option", + TCL_EXACT, &optindex) != TCL_OK) { + arg = Tcl_GetStringFromObj(objv[i], NULL); + if (arg[0] == '-') { + result = IS_HELP(objv[i]); + goto error; + } else + Tcl_ResetResult(interp); + break; + } + i++; + result = TCL_OK; + switch ((enum seqopen)optindex) { + case TCL_SEQ_AUTO_COMMIT: + oflags |= DB_AUTO_COMMIT; + break; + case TCL_SEQ_CREATE: + oflags |= DB_CREATE; + break; + case TCL_SEQ_INC: + LF_SET(DB_SEQ_INC); + break; + case TCL_SEQ_CACHESIZE: + if (i >= objc) { + Tcl_WrongNumArgs(interp, 2, objv, + "?-cachesize value?"); + result = TCL_ERROR; + break; + } + result = Tcl_GetIntFromObj(interp, objv[i++], &cache); + break; + case TCL_SEQ_INIT: + if (i >= objc) { + Tcl_WrongNumArgs(interp, 2, objv, + "?-init value?"); + result = TCL_ERROR; + break; + } + result = + Tcl_GetWideIntFromObj(interp, objv[i++], &value); + setvalue = 1; + break; + case TCL_SEQ_DEC: + LF_SET(DB_SEQ_DEC); + break; + case TCL_SEQ_MAX: + if (i >= objc) { + Tcl_WrongNumArgs(interp, 2, objv, + "?-max value?"); + result = TCL_ERROR; + break; + } + if ((result = + Tcl_GetWideIntFromObj(interp, + objv[i++], &max)) != TCL_OK) + goto error; + setrange = 1; + break; + case TCL_SEQ_MIN: + if (i >= objc) { + Tcl_WrongNumArgs(interp, 2, objv, + "?-min value?"); + result = TCL_ERROR; + break; + } + if ((result = + Tcl_GetWideIntFromObj(interp, + objv[i++], &min)) != TCL_OK) + goto error; + setrange = 1; + break; + case TCL_SEQ_TXN: + if (i > (objc - 1)) { + Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); + result = TCL_ERROR; + break; + } + arg = Tcl_GetStringFromObj(objv[i++], NULL); + txn = NAME_TO_TXN(arg); + if (txn == NULL) { + snprintf(msg, MSG_SIZE, + "Sequence: Invalid txn: %s\n", arg); + Tcl_SetResult(interp, msg, TCL_VOLATILE); + result = TCL_ERROR; + } + break; + case TCL_SEQ_WRAP: + LF_SET(DB_SEQ_WRAP); + break; + case TCL_SEQ_ENDARG: + endarg = 1; + break; + } + /* + * If, at any time, parsing the args we get an error, + * bail out and return. + */ + if (result != TCL_OK) + goto error; + if (endarg) + break; + } + + if (objc - i != 2) { + Tcl_WrongNumArgs(interp, 2, objv, "?args?"); + return (TCL_ERROR); + } + /* + * The db must be a string but the sequence key may + * be anything. + */ + db = Tcl_GetStringFromObj(objv[i++], NULL); + if ((dbp = NAME_TO_DB(db)) == NULL) { + Tcl_SetResult(interp, "No such dbp", TCL_STATIC); + return (TCL_ERROR); + } + (void)dbp->get_type(dbp, &type); + + memset(&key, 0, sizeof(key)); + if (type == DB_QUEUE || type == DB_RECNO) { + result = _GetUInt32(interp, objv[i++], &recno); + if (result != TCL_OK) + return (result); + key.data = &recno; + key.size = sizeof(recno); + } else { + key.data = Tcl_GetByteArrayFromObj(objv[i++], &v); + key.size = (u_int32_t)v; + } + ret = db_sequence_create(seqp, dbp, 0); + if ((result = _ReturnSetup(interp, + ret, DB_RETOK_STD(ret), "sequence create")) != TCL_OK) { + *seqp = NULL; + return (result); + } + + ret = (*seqp)->set_flags(*seqp, flags); + if ((result = _ReturnSetup(interp, + ret, DB_RETOK_STD(ret), "sequence set_flags")) != TCL_OK) + goto error; + if (setrange) { + ret = (*seqp)->set_range(*seqp, min, max); + if ((result = _ReturnSetup(interp, + ret, DB_RETOK_STD(ret), "sequence set_range")) != TCL_OK) + goto error; + } + if (cache) { + ret = (*seqp)->set_cachesize(*seqp, cache); + if ((result = _ReturnSetup(interp, + ret, DB_RETOK_STD(ret), "sequence cachesize")) != TCL_OK) + goto error; + } + if (setvalue) { + ret = (*seqp)->initial_value(*seqp, value); + if ((result = _ReturnSetup(interp, + ret, DB_RETOK_STD(ret), "sequence init")) != TCL_OK) + goto error; + } + ret = (*seqp)->open(*seqp, txn, &key, oflags); + if ((result = _ReturnSetup(interp, + ret, DB_RETOK_STD(ret), "sequence open")) != TCL_OK) + goto error; + + if (0) { +error: (void) (*seqp)->close(*seqp, 0); + *seqp = NULL; + } + return (result); +} +#endif + /* * bdb_DbRemove -- * Implements the DB_ENV->remove and DB->remove command. @@ -2245,12 +2562,12 @@ bdb_DbRemove(interp, objc, objv) if (i != objc) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc(envp, subdblen + 1, + if ((ret = __os_malloc(envp, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } - memcpy(subdb, subdbtmp, subdblen); + memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } } else { @@ -2452,24 +2769,24 @@ bdb_DbRename(interp, objc, objv) if (i == objc - 2) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc(envp, subdblen + 1, + if ((ret = __os_malloc(envp, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } - memcpy(subdb, subdbtmp, subdblen); + memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &newlen); - if ((ret = __os_malloc(envp, newlen + 1, + if ((ret = __os_malloc(envp, (size_t)newlen + 1, &newname)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } - memcpy(newname, subdbtmp, newlen); + memcpy(newname, subdbtmp, (size_t)newlen); newname[newlen] = '\0'; } else { Tcl_WrongNumArgs( @@ -2516,7 +2833,7 @@ error: return (result); } -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * bdb_DbVerify -- * Implements the DB->verify command. @@ -2534,6 +2851,7 @@ bdb_DbVerify(interp, objc, objv) "-env", "-errfile", "-errpfx", + "-unref", "--", NULL }; @@ -2544,6 +2862,7 @@ bdb_DbVerify(interp, objc, objv) TCL_DBVRFY_ENV, TCL_DBVRFY_ERRFILE, TCL_DBVRFY_ERRPFX, + TCL_DBVRFY_UNREF, TCL_DBVRFY_ENDARG }; DB_ENV *envp; @@ -2634,7 +2953,7 @@ bdb_DbVerify(interp, objc, objv) * If the user already set one, close it. */ if (errf != NULL && errf != stdout && errf != stderr) - fclose(errf); + (void)fclose(errf); if (strcmp(arg, "/dev/stdout") == 0) errf = stdout; else if (strcmp(arg, "/dev/stderr") == 0) @@ -2661,6 +2980,9 @@ bdb_DbVerify(interp, objc, objv) break; } break; + case TCL_DBVRFY_UNREF: + flags |= DB_UNREF; + break; case TCL_DBVRFY_ENDARG: endarg = 1; break; @@ -2717,7 +3039,7 @@ bdb_DbVerify(interp, objc, objv) dbp = NULL; error: if (errf != NULL && errf != stdout && errf != stderr) - fclose(errf); + (void)fclose(errf); if (errpfx != NULL) __os_free(envp, errpfx); if (dbp) @@ -2788,7 +3110,7 @@ bdb_Version(interp, objc, objv) v = db_version(&maj, &min, &patch); if (string) - res = Tcl_NewStringObj(v, strlen(v)); + res = NewStringObj(v, strlen(v)); else { verobjc = 3; verobjv[0] = Tcl_NewIntObj(maj); @@ -2801,7 +3123,7 @@ error: return (result); } -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * bdb_Handles -- * Implements the handles command. @@ -2826,7 +3148,7 @@ bdb_Handles(interp, objc, objv) for (p = LIST_FIRST(&__db_infohead); p != NULL; p = LIST_NEXT(p, entries)) { - handle = Tcl_NewStringObj(p->i_name, strlen(p->i_name)); + handle = NewStringObj(p->i_name, strlen(p->i_name)); if (Tcl_ListObjAppendElement(interp, res, handle) != TCL_OK) return (TCL_ERROR); } @@ -2834,6 +3156,58 @@ bdb_Handles(interp, objc, objv) return (TCL_OK); } +/* + * bdb_MsgType - + * Implements the msgtype command. + * Given a replication message return its message type name. + */ +static int +bdb_MsgType(interp, objc, objv) + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ +{ + REP_CONTROL *rp; + Tcl_Obj *msgname; + u_int32_t len, msgtype; + int freerp, ret; + + /* + * If the messages in rep.h change, this must change too! + * Add "no_type" for 0 so that we directly index. + */ + static const char *msgnames[] = { + "no_type", "alive", "alive_req", "all_req", + "dupmaster", "file", "file_fail", "file_req", "log", + "log_more", "log_req", "master_req", "newclient", + "newfile", "newmaster", "newsite", "page", + "page_fail", "page_req", "update", "update_req", + "verify", "verify_fail", "verify_req", + "vote1", "vote2", NULL + }; + + /* + * 1 arg, the message. Error if different. + */ + if (objc != 3) { + Tcl_WrongNumArgs(interp, 3, objv, "msgtype msg"); + return (TCL_ERROR); + } + + ret = _CopyObjBytes(interp, objv[2], (void **)&rp, &len, &freerp); + if (ret != TCL_OK) { + Tcl_SetResult(interp, + "msgtype: bad control message", TCL_STATIC); + return (TCL_ERROR); + } + msgtype = rp->rectype; + msgname = NewStringObj(msgnames[msgtype], strlen(msgnames[msgtype])); + Tcl_SetObjResult(interp, msgname); + if (rp != NULL && freerp) + __os_free(NULL, rp); + return (TCL_OK); +} + /* * bdb_DbUpgrade -- * Implements the DB->upgrade command. @@ -2988,9 +3362,9 @@ tcl_compare_callback(dbp, dbta, dbtb, procobj, errname) * This will involve a copy, which is unpleasantly slow, but there's * little we can do to avoid this (I think). */ - a = Tcl_NewByteArrayObj(dbta->data, dbta->size); + a = Tcl_NewByteArrayObj(dbta->data, (int)dbta->size); Tcl_IncrRefCount(a); - b = Tcl_NewByteArrayObj(dbtb->data, dbtb->size); + b = Tcl_NewByteArrayObj(dbtb->data, (int)dbtb->size); Tcl_IncrRefCount(b); objv[1] = a; @@ -3052,26 +3426,30 @@ tcl_h_hash(dbp, buf, len) /* * Create a ByteArray for the buffer. */ - objv[1] = Tcl_NewByteArrayObj((void *)buf, len); + objv[1] = Tcl_NewByteArrayObj((void *)buf, (int)len); Tcl_IncrRefCount(objv[1]); result = Tcl_EvalObjv(interp, 2, objv, 0); - if (result != TCL_OK) { - /* - * XXX - * We drop core on error. See the comment in - * tcl_compare_callback. - */ -panic: __db_err(dbp->dbenv, "Tcl h_hash callback failed"); - DB_ASSERT(0); - return (__db_panic(dbp->dbenv, DB_RUNRECOVERY)); - } + if (result != TCL_OK) + goto panic; result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &hval); if (result != TCL_OK) goto panic; Tcl_DecrRefCount(objv[1]); - return (hval); + return ((u_int32_t)hval); + +panic: /* + * We drop core on error, in diagnostic mode. See the comment in + * tcl_compare_callback. + */ + __db_err(dbp->dbenv, "Tcl h_hash callback failed"); + (void)__db_panic(dbp->dbenv, DB_RUNRECOVERY); + + DB_ASSERT(0); + + /* NOTREACHED */ + return (0); } /* @@ -3097,21 +3475,21 @@ tcl_rep_send(dbenv, control, rec, lsnp, eid, flags) interp = ip->i_interp; objv[0] = ip->i_rep_send; - control_o = Tcl_NewByteArrayObj(control->data, control->size); + control_o = Tcl_NewByteArrayObj(control->data, (int)control->size); Tcl_IncrRefCount(control_o); - rec_o = Tcl_NewByteArrayObj(rec->data, rec->size); + rec_o = Tcl_NewByteArrayObj(rec->data, (int)rec->size); Tcl_IncrRefCount(rec_o); eid_o = Tcl_NewIntObj(eid); Tcl_IncrRefCount(eid_o); if (LF_ISSET(DB_REP_PERMANENT)) - flags_o = Tcl_NewStringObj("perm", strlen("perm")); + flags_o = NewStringObj("perm", strlen("perm")); else if (LF_ISSET(DB_REP_NOBUFFER)) - flags_o = Tcl_NewStringObj("nobuffer", strlen("nobuffer")); + flags_o = NewStringObj("nobuffer", strlen("nobuffer")); else - flags_o = Tcl_NewStringObj("none", strlen("none")); + flags_o = NewStringObj("none", strlen("none")); Tcl_IncrRefCount(flags_o); myobjc = 2; @@ -3181,7 +3559,7 @@ tcl_db_malloc(size) return (NULL); Tcl_IncrRefCount(obj); - Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *)); + Tcl_SetObjLength(obj, (int)(size + sizeof(Tcl_Obj *))); buf = Tcl_GetString(obj); memcpy(buf, &obj, sizeof(&obj)); @@ -3200,7 +3578,7 @@ tcl_db_realloc(ptr, size) return (tcl_db_malloc(size)); obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1); - Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *)); + Tcl_SetObjLength(obj, (int)(size + sizeof(Tcl_Obj *))); ptr = Tcl_GetString(obj); memcpy(ptr, &obj, sizeof(&obj)); diff --git a/db/tcl/tcl_dbcursor.c b/db/tcl/tcl_dbcursor.c index 489c387bc..f0ca788f4 100644 --- a/db/tcl/tcl_dbcursor.c +++ b/db/tcl/tcl_dbcursor.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_dbcursor.c,v 11.65 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_dbcursor.c,v 11.57 2003/05/17 15:15:45 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -43,7 +41,7 @@ dbc_Cmd(clientData, interp, objc, objv) Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *dbccmds[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "pget", #endif "close", @@ -54,7 +52,7 @@ dbc_Cmd(clientData, interp, objc, objv) NULL }; enum dbccmds { -#if CONFIG_TEST +#ifdef CONFIG_TEST DBCPGET, #endif DBCCLOSE, @@ -93,7 +91,7 @@ dbc_Cmd(clientData, interp, objc, objv) TCL_EXACT, &cmdindex) != TCL_OK) return (IS_HELP(objv[1])); switch ((enum dbccmds)cmdindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case DBCPGET: result = tcl_DbcGet(interp, objc, objv, dbc, 1); break; @@ -152,7 +150,7 @@ tcl_DbcPut(interp, objc, objv, dbc) DBC *dbc; /* Cursor pointer */ { static const char *dbcutopts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "-nodupdata", #endif "-after", @@ -164,7 +162,7 @@ tcl_DbcPut(interp, objc, objv, dbc) NULL }; enum dbcutopts { -#if CONFIG_TEST +#ifdef CONFIG_TEST DBCPUT_NODUPDATA, #endif DBCPUT_AFTER, @@ -184,6 +182,9 @@ tcl_DbcPut(interp, objc, objv, dbc) u_int32_t flag; int elemc, freekey, freedata, i, optindex, result, ret; + COMPQUIET(dtmp, NULL); + COMPQUIET(ktmp, NULL); + result = TCL_OK; flag = 0; freekey = freedata = 0; @@ -217,7 +218,7 @@ tcl_DbcPut(interp, objc, objv, dbc) } i++; switch ((enum dbcutopts)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case DBCPUT_NODUPDATA: FLAG_CHECK(flag); flag = DB_NODUPDATA; @@ -363,9 +364,9 @@ tcl_DbcPut(interp, objc, objv, dbc) } out: if (freedata) - (void)__os_free(NULL, dtmp); + __os_free(NULL, dtmp); if (freekey) - (void)__os_free(NULL, ktmp); + __os_free(NULL, ktmp); return (result); } @@ -381,7 +382,8 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) int ispget; /* 1 for pget, 0 for get */ { static const char *dbcgetopts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST + "-degree_2", "-dirty", "-get_both_range", "-multi", @@ -406,7 +408,8 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) NULL }; enum dbcgetopts { -#if CONFIG_TEST +#ifdef CONFIG_TEST + DBCGET_DEGREE2, DBCGET_DIRTY, DBCGET_BOTH_RANGE, DBCGET_MULTI, @@ -438,9 +441,13 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) db_recno_t precno, recno; u_int32_t flag, op; int elemc, freekey, freedata, i, optindex, result, ret; -#if CONFIG_TEST +#ifdef CONFIG_TEST int bufsize; + + bufsize = 0; #endif + COMPQUIET(dtmp, NULL); + COMPQUIET(ktmp, NULL); result = TCL_OK; flag = 0; @@ -474,7 +481,10 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) } i++; switch ((enum dbcgetopts)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST + case DBCGET_DEGREE2: + flag |= DB_DEGREE_2; + break; case DBCGET_DIRTY: flag |= DB_DIRTY_READ; break; @@ -643,7 +653,7 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) op = flag & DB_OPFLAGS_MASK; switch (op) { case DB_GET_BOTH: -#if CONFIG_TEST +#ifdef CONFIG_TEST case DB_GET_BOTH_RANGE: #endif if (i != (objc - 2)) { @@ -703,10 +713,10 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) result = TCL_ERROR; goto out; } -#if CONFIG_TEST +#ifdef CONFIG_TEST if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) { - (void)__os_malloc(NULL, bufsize, &data.data); - data.ulen = bufsize; + (void)__os_malloc(NULL, (size_t)bufsize, &data.data); + data.ulen = (u_int32_t)bufsize; data.flags |= DB_DBT_USERMEM; } else #endif @@ -739,10 +749,10 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) goto out; } key.flags |= DB_DBT_MALLOC; -#if CONFIG_TEST +#ifdef CONFIG_TEST if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) { - (void)__os_malloc(NULL, bufsize, &data.data); - data.ulen = bufsize; + (void)__os_malloc(NULL, (size_t)bufsize, &data.data); + data.ulen = (u_int32_t)bufsize; data.flags |= DB_DBT_USERMEM; } else #endif @@ -761,7 +771,7 @@ tcl_DbcGet(interp, objc, objv, dbc, ispget) goto out; retlist = Tcl_NewListObj(0, NULL); - if (ret == DB_NOTFOUND) + if (ret != 0) goto out1; if (op == DB_GET_RECNO) { recno = *((db_recno_t *)data.data); @@ -806,9 +816,9 @@ out: if (data.data != NULL && flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) __os_free(dbc->dbp->dbenv, data.data); if (freedata) - (void)__os_free(NULL, dtmp); + __os_free(NULL, dtmp); if (freekey) - (void)__os_free(NULL, ktmp); + __os_free(NULL, ktmp); return (result); } @@ -901,7 +911,6 @@ tcl_DbcDup(interp, objc, objv, dbc) * Now duplicate the cursor. If successful, we need to create * a new cursor command. */ - snprintf(newname, sizeof(newname), "%s.c%d", dbip->i_name, dbip->i_dbdbcid); newdbcip = _NewInfo(interp, NULL, newname, I_DBC); @@ -910,10 +919,10 @@ tcl_DbcDup(interp, objc, objv, dbc) if (ret == 0) { dbip->i_dbdbcid++; newdbcip->i_parent = dbip; - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)dbc_Cmd, (ClientData)newdbc, NULL); - res = Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); _SetInfoData(newdbcip, newdbc); Tcl_SetObjResult(interp, res); } else { diff --git a/db/tcl/tcl_env.c b/db/tcl/tcl_env.c index 21b82890b..e513b961d 100644 --- a/db/tcl/tcl_env.c +++ b/db/tcl/tcl_env.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_env.c,v 11.121 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_env.c,v 11.105 2003/09/04 20:45:44 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -53,8 +51,10 @@ env_Cmd(clientData, interp, objc, objv) Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *envcmds[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "attributes", + "errfile", + "errpfx", "lock_detect", "lock_id", "lock_id_free", @@ -109,6 +109,8 @@ env_Cmd(clientData, interp, objc, objv) "get_lk_max_lockers", "get_lk_max_locks", "get_lk_max_objects", + "get_mp_max_openfd", + "get_mp_max_write", "get_mp_mmapsize", "get_open_flags", "get_rep_limit", @@ -124,8 +126,10 @@ env_Cmd(clientData, interp, objc, objv) NULL }; enum envcmds { -#if CONFIG_TEST +#ifdef CONFIG_TEST ENVATTR, + ENVERRFILE, + ENVERRPFX, ENVLKDETECT, ENVLKID, ENVLKFREEID, @@ -180,6 +184,8 @@ env_Cmd(clientData, interp, objc, objv) ENVGETLKMAXLOCKERS, ENVGETLKMAXLOCKS, ENVGETLKMAXOBJECTS, + ENVGETMPMAXOPENFD, + ENVGETMPMAXWRITE, ENVGETMPMMAPSIZE, ENVGETOPENFLAG, ENVGETREPLIMIT, @@ -197,15 +203,16 @@ env_Cmd(clientData, interp, objc, objv) DB_ENV *dbenv; Tcl_Obj *res, *myobjv[3]; char newname[MSG_SIZE]; - int cmdindex, i, ncache, result, ret; + int cmdindex, i, intvalue1, intvalue2, ncache, result, ret; u_int32_t bytes, gbytes, value; size_t size; long shm_key; time_t timeval; const char *strval, **dirs; -#if CONFIG_TEST +#ifdef CONFIG_TEST DBTCL_INFO *logcip; DB_LOGC *logc; + char *strarg; u_int32_t lockid; long newval, otherval; #endif @@ -238,7 +245,7 @@ env_Cmd(clientData, interp, objc, objv) return (IS_HELP(objv[1])); res = NULL; switch ((enum envcmds)cmdindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case ENVLKDETECT: result = tcl_LockDetect(interp, objc, objv, dbenv); break; @@ -268,10 +275,10 @@ env_Cmd(clientData, interp, objc, objv) Tcl_WrongNumArgs(interp, 3, objv, NULL); return (TCL_ERROR); } - result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval); + result = Tcl_GetLongFromObj(interp, objv[2], &newval); if (result != TCL_OK) return (result); - ret = dbenv->lock_id_free(dbenv, newval); + ret = dbenv->lock_id_free(dbenv, (u_int32_t)newval); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock id_free"); break; @@ -280,13 +287,14 @@ env_Cmd(clientData, interp, objc, objv) Tcl_WrongNumArgs(interp, 4, objv, "current max"); return (TCL_ERROR); } - result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval); + result = Tcl_GetLongFromObj(interp, objv[2], &newval); if (result != TCL_OK) return (result); - result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval); + result = Tcl_GetLongFromObj(interp, objv[3], &otherval); if (result != TCL_OK) return (result); - ret = __lock_id_set(dbenv, newval, otherval); + ret = __lock_id_set(dbenv, + (u_int32_t)newval, (u_int32_t)otherval); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock id_free"); break; @@ -317,11 +325,10 @@ env_Cmd(clientData, interp, objc, objv) * not "tied" to the env. That is, they * are NOT closed if the env is closed. */ - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)logc_Cmd, (ClientData)logc, NULL); - res = - Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); _SetInfoData(logcip, logc); } else { _DeleteInfo(logcip); @@ -393,22 +400,23 @@ env_Cmd(clientData, interp, objc, objv) * !!! Retrieve the client ID from the dbp handle directly. * This is for testing purposes only. It is dbp-private data. */ - res = Tcl_NewLongObj(dbenv->cl_id); + res = Tcl_NewLongObj((long)dbenv->cl_id); break; case ENVTXNSETID: if (objc != 4) { Tcl_WrongNumArgs(interp, 4, objv, "current max"); return (TCL_ERROR); } - result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval); + result = Tcl_GetLongFromObj(interp, objv[2], &newval); if (result != TCL_OK) return (result); - result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval); + result = Tcl_GetLongFromObj(interp, objv[3], &otherval); if (result != TCL_OK) return (result); - ret = __txn_id_set(dbenv, newval, otherval); + ret = __txn_id_set(dbenv, + (u_int32_t)newval, (u_int32_t)otherval); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "lock id_free"); + "txn setid"); break; case ENVTXNRECOVER: result = tcl_TxnRecover(interp, objc, objv, dbenv, envip); @@ -425,6 +433,29 @@ env_Cmd(clientData, interp, objc, objv) case ENVATTR: result = tcl_EnvAttr(interp, objc, objv, dbenv); break; + case ENVERRFILE: + /* + * One args for this. Error if different. + */ + if (objc != 3) { + Tcl_WrongNumArgs(interp, 2, objv, "errfile"); + return (TCL_ERROR); + } + strarg = Tcl_GetStringFromObj(objv[2], NULL); + tcl_EnvSetErrfile(interp, dbenv, envip, strarg); + result = TCL_OK; + break; + case ENVERRPFX: + /* + * One args for this. Error if different. + */ + if (objc != 3) { + Tcl_WrongNumArgs(interp, 2, objv, "pfx"); + return (TCL_ERROR); + } + strarg = Tcl_GetStringFromObj(objv[2], NULL); + result = tcl_EnvSetErrpfx(interp, dbenv, envip, strarg); + break; case ENVSETFLAGS: /* * Two args for this. Error if different. @@ -486,9 +517,9 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_cachesize(dbenv, &gbytes, &bytes, &ncache); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_cachesize")) == TCL_OK) { - myobjv[0] = Tcl_NewIntObj(gbytes); - myobjv[1] = Tcl_NewIntObj(bytes); - myobjv[2] = Tcl_NewIntObj(ncache); + myobjv[0] = Tcl_NewLongObj((long)gbytes); + myobjv[1] = Tcl_NewLongObj((long)bytes); + myobjv[2] = Tcl_NewLongObj((long)ncache); res = Tcl_NewListObj(3, myobjv); } break; @@ -503,7 +534,7 @@ env_Cmd(clientData, interp, objc, objv) res = Tcl_NewListObj(0, NULL); for (i = 0; result == TCL_OK && dirs[i] != NULL; i++) result = Tcl_ListObjAppendElement(interp, res, - Tcl_NewStringObj(dirs[i], strlen(dirs[i]))); + NewStringObj(dirs[i], strlen(dirs[i]))); } break; case ENVGETENCRYPTFLAGS: @@ -515,7 +546,7 @@ env_Cmd(clientData, interp, objc, objv) return (TCL_ERROR); } dbenv->get_errpfx(dbenv, &strval); - res = Tcl_NewStringObj(strval, strlen(strval)); + res = NewStringObj(strval, strlen(strval)); break; case ENVGETFLAGS: result = env_GetFlags(interp, objc, objv, dbenv); @@ -528,7 +559,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_home(dbenv, &strval); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_home")) == TCL_OK) - res = Tcl_NewStringObj(strval, strlen(strval)); + res = NewStringObj(strval, strlen(strval)); break; case ENVGETLGBSIZE: if (objc != 2) { @@ -538,7 +569,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_lg_bsize(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_lg_bsize")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); break; case ENVGETLGDIR: if (objc != 2) { @@ -548,7 +579,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_lg_dir(dbenv, &strval); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_lg_dir")) == TCL_OK) - res = Tcl_NewStringObj(strval, strlen(strval)); + res = NewStringObj(strval, strlen(strval)); break; case ENVGETLGMAX: if (objc != 2) { @@ -558,7 +589,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_lg_max(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_lg_max")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); break; case ENVGETLGREGIONMAX: if (objc != 2) { @@ -568,7 +599,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_lg_regionmax(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_lg_regionmax")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); break; case ENVGETLKDETECT: result = env_GetLockDetect(interp, objc, objv, dbenv); @@ -581,7 +612,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_lk_max_lockers(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_lk_max_lockers")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); break; case ENVGETLKMAXLOCKS: if (objc != 2) { @@ -591,7 +622,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_lk_max_locks(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_lk_max_locks")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); break; case ENVGETLKMAXOBJECTS: if (objc != 2) { @@ -601,7 +632,30 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_lk_max_objects(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_lk_max_objects")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); + break; + case ENVGETMPMAXOPENFD: + if (objc != 2) { + Tcl_WrongNumArgs(interp, 1, objv, NULL); + return (TCL_ERROR); + } + ret = dbenv->get_mp_max_openfd(dbenv, &intvalue1); + if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), + "env get_mp_max_openfd")) == TCL_OK) + res = Tcl_NewIntObj(intvalue1); + break; + case ENVGETMPMAXWRITE: + if (objc != 2) { + Tcl_WrongNumArgs(interp, 1, objv, NULL); + return (TCL_ERROR); + } + ret = dbenv->get_mp_max_write(dbenv, &intvalue1, &intvalue2); + if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), + "env get_mp_max_write")) == TCL_OK) { + myobjv[0] = Tcl_NewIntObj(intvalue1); + myobjv[1] = Tcl_NewIntObj(intvalue2); + res = Tcl_NewListObj(2, myobjv); + } break; case ENVGETMPMMAPSIZE: if (objc != 2) { @@ -611,7 +665,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_mp_mmapsize(dbenv, &size); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_mp_mmapsize")) == TCL_OK) - res = Tcl_NewIntObj(size); + res = Tcl_NewLongObj((long)size); break; case ENVGETOPENFLAG: result = env_GetOpenFlag(interp, objc, objv, dbenv); @@ -624,8 +678,8 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_rep_limit(dbenv, &gbytes, &bytes); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_rep_limit")) == TCL_OK) { - myobjv[0] = Tcl_NewIntObj(gbytes); - myobjv[1] = Tcl_NewIntObj(bytes); + myobjv[0] = Tcl_NewLongObj((long)gbytes); + myobjv[1] = Tcl_NewLongObj((long)bytes); res = Tcl_NewListObj(2, myobjv); } break; @@ -647,7 +701,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_tas_spins(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_tas_spins")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); break; case ENVGETTIMEOUT: result = env_GetTimeout(interp, objc, objv, dbenv); @@ -660,7 +714,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_tmp_dir(dbenv, &strval); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_tmp_dir")) == TCL_OK) - res = Tcl_NewStringObj(strval, strlen(strval)); + res = NewStringObj(strval, strlen(strval)); break; case ENVGETTXMAX: if (objc != 2) { @@ -670,7 +724,7 @@ env_Cmd(clientData, interp, objc, objv) ret = dbenv->get_tx_max(dbenv, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_tx_max")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewLongObj((long)value); break; case ENVGETTXTIMESTAMP: if (objc != 2) { @@ -716,7 +770,7 @@ tcl_EnvRemove(interp, objc, objv, dbenv, envip) DBTCL_INFO *envip; /* Info pointer */ { static const char *envremopts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "-overwrite", "-server", #endif @@ -732,7 +786,7 @@ tcl_EnvRemove(interp, objc, objv, dbenv, envip) NULL }; enum envremopts { -#if CONFIG_TEST +#ifdef CONFIG_TEST ENVREM_OVERWRITE, ENVREM_SERVER, #endif @@ -773,7 +827,7 @@ tcl_EnvRemove(interp, objc, objv, dbenv, envip) } i++; switch ((enum envremopts)optindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case ENVREM_SERVER: /* Make sure we have an arg to check against! */ if (i >= objc) { @@ -821,7 +875,7 @@ tcl_EnvRemove(interp, objc, objv, dbenv, envip) } home = Tcl_GetStringFromObj(objv[i++], NULL); break; -#if CONFIG_TEST +#ifdef CONFIG_TEST case ENVREM_OVERWRITE: sflag |= DB_OVERWRITE; break; @@ -973,7 +1027,7 @@ _EnvInfoDelete(interp, envip) * Other types like log cursors and locks will just * get cleaned up here. */ - if (p->i_parent == envip) { + if (p->i_parent == envip) { switch (p->i_type) { case I_TXN: _TxnInfoDelete(interp, p); @@ -981,7 +1035,15 @@ _EnvInfoDelete(interp, envip) case I_MP: _MpInfoDelete(interp, p); break; - default: + case I_DB: + case I_DBC: + case I_ENV: + case I_LOCK: + case I_LOGC: + case I_MUTEX: + case I_NDBM: + case I_PG: + case I_SEQ: Tcl_SetResult(interp, "_EnvInfoDelete: bad info type", TCL_STATIC); @@ -997,7 +1059,7 @@ _EnvInfoDelete(interp, envip) _DeleteInfo(envip); } -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * PUBLIC: int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *, * PUBLIC: Tcl_Obj *)); @@ -1012,7 +1074,6 @@ tcl_EnvVerbose(interp, dbenv, which, onoff) Tcl_Obj *onoff; /* On or off */ { static const char *verbwhich[] = { - "chkpt", "deadlock", "recovery", "rep", @@ -1020,7 +1081,6 @@ tcl_EnvVerbose(interp, dbenv, which, onoff) NULL }; enum verbwhich { - ENVVERB_CHK, ENVVERB_DEAD, ENVVERB_REC, ENVVERB_REP, @@ -1043,9 +1103,6 @@ tcl_EnvVerbose(interp, dbenv, which, onoff) return (IS_HELP(which)); switch ((enum verbwhich)optindex) { - case ENVVERB_CHK: - wh = DB_VERB_CHKPOINT; - break; case ENVVERB_DEAD: wh = DB_VERB_DEADLOCK; break; @@ -1080,7 +1137,7 @@ tcl_EnvVerbose(interp, dbenv, which, onoff) } #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * PUBLIC: int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); * @@ -1109,58 +1166,58 @@ tcl_EnvAttr(interp, objc, objv, dbenv) * We peek at the dbenv to determine what subsystems * we have available in this env. */ - myobj = Tcl_NewStringObj("-home", strlen("-home")); + myobj = NewStringObj("-home", strlen("-home")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; - myobj = Tcl_NewStringObj(dbenv->db_home, strlen(dbenv->db_home)); + myobj = NewStringObj(dbenv->db_home, strlen(dbenv->db_home)); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; if (CDB_LOCKING(dbenv)) { - myobj = Tcl_NewStringObj("-cdb", strlen("-cdb")); + myobj = NewStringObj("-cdb", strlen("-cdb")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; } if (CRYPTO_ON(dbenv)) { - myobj = Tcl_NewStringObj("-crypto", strlen("-crypto")); + myobj = NewStringObj("-crypto", strlen("-crypto")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; } if (LOCKING_ON(dbenv)) { - myobj = Tcl_NewStringObj("-lock", strlen("-lock")); + myobj = NewStringObj("-lock", strlen("-lock")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; } if (LOGGING_ON(dbenv)) { - myobj = Tcl_NewStringObj("-log", strlen("-log")); + myobj = NewStringObj("-log", strlen("-log")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; } if (MPOOL_ON(dbenv)) { - myobj = Tcl_NewStringObj("-mpool", strlen("-mpool")); + myobj = NewStringObj("-mpool", strlen("-mpool")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; } if (RPC_ON(dbenv)) { - myobj = Tcl_NewStringObj("-rpc", strlen("-rpc")); + myobj = NewStringObj("-rpc", strlen("-rpc")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; } if (REP_ON(dbenv)) { - myobj = Tcl_NewStringObj("-rep", strlen("-rep")); + myobj = NewStringObj("-rep", strlen("-rep")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; } if (TXN_ON(dbenv)) { - myobj = Tcl_NewStringObj("-txn", strlen("-txn")); + myobj = NewStringObj("-txn", strlen("-txn")); if ((result = Tcl_ListObjAppendElement(interp, retlist, myobj)) != TCL_OK) goto err; @@ -1188,12 +1245,13 @@ tcl_EnvSetFlags(interp, dbenv, which, onoff) "-auto_commit", "-direct_db", "-direct_log", + "-dsync_log", + "-log_inmemory", "-log_remove", "-nolock", "-nommap", "-nopanic", "-nosync", - "-notdurable", "-overwrite", "-panic", "-wrnosync", @@ -1203,12 +1261,13 @@ tcl_EnvSetFlags(interp, dbenv, which, onoff) ENVSF_AUTOCOMMIT, ENVSF_DIRECTDB, ENVSF_DIRECTLOG, + ENVSF_DSYNCLOG, + ENVSF_LOG_INMEMORY, ENVSF_LOG_REMOVE, ENVSF_NOLOCK, ENVSF_NOMMAP, ENVSF_NOPANIC, ENVSF_NOSYNC, - ENVSF_NOTDURABLE, ENVSF_OVERWRITE, ENVSF_PANIC, ENVSF_WRNOSYNC @@ -1239,6 +1298,12 @@ tcl_EnvSetFlags(interp, dbenv, which, onoff) case ENVSF_DIRECTLOG: wh = DB_DIRECT_LOG; break; + case ENVSF_DSYNCLOG: + wh = DB_DSYNC_LOG; + break; + case ENVSF_LOG_INMEMORY: + wh = DB_LOG_INMEMORY; + break; case ENVSF_LOG_REMOVE: wh = DB_LOG_AUTOREMOVE; break; @@ -1248,9 +1313,6 @@ tcl_EnvSetFlags(interp, dbenv, which, onoff) case ENVSF_NOMMAP: wh = DB_NOMMAP; break; - case ENVSF_NOTDURABLE: - wh = DB_TXN_NOT_DURABLE; - break; case ENVSF_NOSYNC: wh = DB_TXN_NOSYNC; break; @@ -1301,15 +1363,18 @@ tcl_EnvTest(interp, objc, objv, dbenv) { static const char *envtestcmd[] = { "abort", + "check", "copy", NULL }; enum envtestcmd { ENVTEST_ABORT, + ENVTEST_CHECK, ENVTEST_COPY }; static const char *envtestat[] = { "electinit", + "electvote1", "none", "predestroy", "preopen", @@ -1323,6 +1388,7 @@ tcl_EnvTest(interp, objc, objv, dbenv) }; enum envtestat { ENVTEST_ELECTINIT, + ENVTEST_ELECTVOTE1, ENVTEST_NONE, ENVTEST_PREDESTROY, ENVTEST_PREOPEN, @@ -1344,7 +1410,7 @@ tcl_EnvTest(interp, objc, objv, dbenv) } /* - * This must be the "copy" or "abort" portion of the command. + * This must be the "check", "copy" or "abort" portion of the command. */ if (Tcl_GetIndexFromObj(interp, objv[2], envtestcmd, "command", TCL_EXACT, &optindex) != TCL_OK) { @@ -1355,6 +1421,13 @@ tcl_EnvTest(interp, objc, objv, dbenv) case ENVTEST_ABORT: loc = &dbenv->test_abort; break; + case ENVTEST_CHECK: + loc = &dbenv->test_check; + if (Tcl_GetIntFromObj(interp, objv[3], &testval) != TCL_OK) { + result = IS_HELP(objv[3]); + return (result); + } + goto done; case ENVTEST_COPY: loc = &dbenv->test_copy; break; @@ -1376,6 +1449,10 @@ tcl_EnvTest(interp, objc, objv, dbenv) DB_ASSERT(loc == &dbenv->test_abort); testval = DB_TEST_ELECTINIT; break; + case ENVTEST_ELECTVOTE1: + DB_ASSERT(loc == &dbenv->test_abort); + testval = DB_TEST_ELECTVOTE1; + break; case ENVTEST_NONE: testval = 0; break; @@ -1408,7 +1485,7 @@ tcl_EnvTest(interp, objc, objv, dbenv) Tcl_SetResult(interp, "Illegal test location", TCL_STATIC); return (TCL_ERROR); } - +done: *loc = testval; Tcl_SetResult(interp, "0", TCL_STATIC); return (result); @@ -1520,13 +1597,13 @@ env_DbRemove(interp, objc, objv, dbenv) if (i != objc) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc(dbenv, subdblen + 1, - &subdb)) != 0) { + if ((ret = __os_malloc( + dbenv, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } - memcpy(subdb, subdbtmp, subdblen); + memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } } else { @@ -1649,24 +1726,23 @@ env_DbRename(interp, objc, objv, dbenv) if (i == objc - 2) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc(dbenv, subdblen + 1, - &subdb)) != 0) { + if ((ret = __os_malloc( + dbenv, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } - memcpy(subdb, subdbtmp, subdblen); + memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } - subdbtmp = - Tcl_GetByteArrayFromObj(objv[i++], &newlen); - if ((ret = __os_malloc(dbenv, newlen + 1, - &newname)) != 0) { + subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &newlen); + if ((ret = __os_malloc( + dbenv, (size_t)newlen + 1, &newname)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } - memcpy(newname, subdbtmp, newlen); + memcpy(newname, subdbtmp, (size_t)newlen); newname[newlen] = '\0'; } else { Tcl_WrongNumArgs(interp, 3, objv, @@ -1709,7 +1785,9 @@ env_GetFlags(interp, objc, objv, dbenv) { DB_CDB_ALLDB, "-cdb_alldb" }, { DB_DIRECT_DB, "-direct_db" }, { DB_DIRECT_LOG, "-direct_log" }, + { DB_DSYNC_LOG, "-dsync_log" }, { DB_LOG_AUTOREMOVE, "-log_remove" }, + { DB_LOG_INMEMORY, "-log_inmemory" }, { DB_NOLOCKING, "-nolock" }, { DB_NOMMAP, "-nommap" }, { DB_NOPANIC, "-nopanic" }, @@ -1717,7 +1795,6 @@ env_GetFlags(interp, objc, objv, dbenv) { DB_PANIC_ENVIRONMENT, "-panic" }, { DB_REGION_INIT, "-region_init" }, { DB_TXN_NOSYNC, "-nosync" }, - { DB_TXN_NOT_DURABLE, "-notdurable" }, { DB_TXN_WRITE_NOSYNC, "-wrnosync" }, { DB_YIELDCPU, "-yield" }, { 0, NULL } @@ -1736,11 +1813,12 @@ env_GetFlags(interp, objc, objv, dbenv) for (i = 0; open_flags[i].flag != 0; i++) if (LF_ISSET(open_flags[i].flag)) { if (strlen(buf) > 0) - strncat(buf, " ", sizeof(buf)); - strncat(buf, open_flags[i].arg, sizeof(buf)); + (void)strncat(buf, " ", sizeof(buf)); + (void)strncat( + buf, open_flags[i].arg, sizeof(buf)); } - res = Tcl_NewStringObj(buf, strlen(buf)); + res = NewStringObj(buf, strlen(buf)); Tcl_SetObjResult(interp, res); } @@ -1797,11 +1875,12 @@ env_GetOpenFlag(interp, objc, objv, dbenv) for (i = 0; open_flags[i].flag != 0; i++) if (LF_ISSET(open_flags[i].flag)) { if (strlen(buf) > 0) - strncat(buf, " ", sizeof(buf)); - strncat(buf, open_flags[i].arg, sizeof(buf)); + (void)strncat(buf, " ", sizeof(buf)); + (void)strncat( + buf, open_flags[i].arg, sizeof(buf)); } - res = Tcl_NewStringObj(buf, strlen(buf)); + res = NewStringObj(buf, strlen(buf)); Tcl_SetObjResult(interp, res); } @@ -1848,11 +1927,12 @@ tcl_EnvGetEncryptFlags(interp, objc, objv, dbenv) for (i = 0; encrypt_flags[i].flag != 0; i++) if (LF_ISSET(encrypt_flags[i].flag)) { if (strlen(buf) > 0) - strncat(buf, " ", sizeof(buf)); - strncat(buf, encrypt_flags[i].arg, sizeof(buf)); + (void)strncat(buf, " ", sizeof(buf)); + (void)strncat( + buf, encrypt_flags[i].arg, sizeof(buf)); } - res = Tcl_NewStringObj(buf, strlen(buf)); + res = NewStringObj(buf, strlen(buf)); Tcl_SetObjResult(interp, res); } @@ -1881,6 +1961,7 @@ env_GetLockDetect(interp, objc, objv, dbenv) { DB_LOCK_DEFAULT, "default" }, { DB_LOCK_EXPIRE, "expire" }, { DB_LOCK_MAXLOCKS, "maxlocks" }, + { DB_LOCK_MAXWRITE, "maxwrite" }, { DB_LOCK_MINLOCKS, "minlocks" }, { DB_LOCK_MINWRITE, "minwrite" }, { DB_LOCK_OLDEST, "oldest" }, @@ -1901,7 +1982,7 @@ env_GetLockDetect(interp, objc, objv, dbenv) if (lk_detect == lk_detect_returns[i].flag) answer = lk_detect_returns[i].name; - res = Tcl_NewStringObj(answer, strlen(answer)); + res = NewStringObj(answer, strlen(answer)); Tcl_SetObjResult(interp, res); } @@ -1919,11 +2000,6 @@ env_GetTimeout(interp, objc, objv, dbenv) Tcl_Obj *CONST objv[]; /* The argument objects */ DB_ENV *dbenv; { - int i, ret, result; - u_int32_t which; - const char *arg; - db_timeout_t timeout; - Tcl_Obj *res; static const struct { u_int32_t flag; char *arg; @@ -1932,6 +2008,13 @@ env_GetTimeout(interp, objc, objv, dbenv) { DB_SET_LOCK_TIMEOUT, "lock" }, { 0, NULL } }; + Tcl_Obj *res; + db_timeout_t timeout; + u_int32_t which; + int i, ret, result; + const char *arg; + + COMPQUIET(timeout, 0); if (objc != 3) { Tcl_WrongNumArgs(interp, 1, objv, NULL); @@ -1951,7 +2034,7 @@ env_GetTimeout(interp, objc, objv, dbenv) ret = dbenv->get_timeout(dbenv, &timeout, which); err: if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_timeout")) == TCL_OK) { - res = Tcl_NewLongObj(timeout); + res = Tcl_NewLongObj((long)timeout); Tcl_SetObjResult(interp, res); } @@ -1969,21 +2052,22 @@ env_GetVerbose(interp, objc, objv, dbenv) Tcl_Obj *CONST objv[]; /* The argument objects */ DB_ENV *dbenv; { - int i, onoff, ret, result; - u_int32_t which; - const char *arg, *answer; - Tcl_Obj *res; static const struct { u_int32_t flag; char *arg; } verbose_flags[] = { - { DB_VERB_CHKPOINT, "chkpt" }, { DB_VERB_DEADLOCK, "deadlock" }, { DB_VERB_RECOVERY, "recovery" }, { DB_VERB_REPLICATION, "rep" }, { DB_VERB_WAITSFOR, "wait" }, { 0, NULL } }; + Tcl_Obj *res; + u_int32_t which; + int i, onoff, ret, result; + const char *arg, *answer; + + COMPQUIET(onoff, 0); if (objc != 3) { Tcl_WrongNumArgs(interp, 1, objv, NULL); @@ -2004,9 +2088,77 @@ env_GetVerbose(interp, objc, objv, dbenv) err: if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env get_timeout")) == 0) { answer = onoff ? "on" : "off"; - res = Tcl_NewStringObj(answer, strlen(answer)); + res = NewStringObj(answer, strlen(answer)); Tcl_SetObjResult(interp, res); } return (result); } + +/* + * PUBLIC: void tcl_EnvSetErrfile __P((Tcl_Interp *, DB_ENV *, DBTCL_INFO *, + * PUBLIC: char *)); + * + * tcl_EnvSetErrfile -- + * Implements the ENV->set_errfile command. + */ +void +tcl_EnvSetErrfile(interp, dbenv, ip, errf) + Tcl_Interp *interp; /* Interpreter */ + DB_ENV *dbenv; /* Database pointer */ + DBTCL_INFO *ip; /* Our internal info */ + char *errf; +{ + COMPQUIET(interp, NULL); + /* + * If the user already set one, free it. + */ + if (ip->i_err != NULL && ip->i_err != stdout && + ip->i_err != stderr) + (void)fclose(ip->i_err); + if (strcmp(errf, "/dev/stdout") == 0) + ip->i_err = stdout; + else if (strcmp(errf, "/dev/stderr") == 0) + ip->i_err = stderr; + else + ip->i_err = fopen(errf, "a"); + if (ip->i_err != NULL) + dbenv->set_errfile(dbenv, ip->i_err); +} + +/* + * PUBLIC: int tcl_EnvSetErrpfx __P((Tcl_Interp *, DB_ENV *, DBTCL_INFO *, + * PUBLIC: char *)); + * + * tcl_EnvSetErrpfx -- + * Implements the ENV->set_errpfx command. + */ +int +tcl_EnvSetErrpfx(interp, dbenv, ip, pfx) + Tcl_Interp *interp; /* Interpreter */ + DB_ENV *dbenv; /* Database pointer */ + DBTCL_INFO *ip; /* Our internal info */ + char *pfx; +{ + int result, ret; + + /* + * Assume success. The only thing that can fail is + * the __os_strdup. + */ + result = TCL_OK; + Tcl_SetResult(interp, "0", TCL_STATIC); + /* + * If the user already set one, free it. + */ + if (ip->i_errpfx != NULL) + __os_free(dbenv, ip->i_errpfx); + if ((ret = __os_strdup(dbenv, pfx, &ip->i_errpfx)) != 0) { + result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "__os_strdup"); + ip->i_errpfx = NULL; + } + if (ip->i_errpfx != NULL) + dbenv->set_errpfx(dbenv, ip->i_errpfx); + return (result); +} diff --git a/db/tcl/tcl_internal.c b/db/tcl/tcl_internal.c index 896c4a3e3..8e9651273 100644 --- a/db/tcl/tcl_internal.c +++ b/db/tcl/tcl_internal.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_internal.c,v 11.69 2004/05/06 02:01:41 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_internal.c,v 11.59 2003/08/14 17:55:29 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -23,7 +21,6 @@ static const char revid[] = "$Id: tcl_internal.c,v 11.59 2003/08/14 17:55:29 mjc #include "dbinc/tcl_db.h" #include "dbinc/db_page.h" #include "dbinc/db_am.h" -#include "dbinc_auto/db_ext.h" /* * @@ -43,20 +40,6 @@ static const char revid[] = "$Id: tcl_internal.c,v 11.59 2003/08/14 17:55:29 mjc * get/manipulate the info structure. */ -/* - * Prototypes for procedures defined later in this file: - */ -static void tcl_flag_callback __P((u_int32_t, const FN *, void *)); - -/* - * Private structure type used to pass both an interp and an object into - * a callback's single void *. - */ -struct __tcl_callback_bundle { - Tcl_Interp *interp; - Tcl_Obj *obj; -}; - #define GLOB_CHAR(c) ((c) == '*' || (c) == '?') /* @@ -185,7 +168,7 @@ _DeleteInfo(p) if (p->i_lockobj.data != NULL) __os_free(NULL, p->i_lockobj.data); if (p->i_err != NULL && p->i_err != stderr) { - fclose(p->i_err); + (void)fclose(p->i_err); p->i_err = NULL; } if (p->i_errpfx != NULL) @@ -210,21 +193,21 @@ _DeleteInfo(p) /* * PUBLIC: int _SetListElem __P((Tcl_Interp *, - * PUBLIC: Tcl_Obj *, void *, int, void *, int)); + * PUBLIC: Tcl_Obj *, void *, u_int32_t, void *, u_int32_t)); */ int _SetListElem(interp, list, elem1, e1cnt, elem2, e2cnt) Tcl_Interp *interp; Tcl_Obj *list; void *elem1, *elem2; - int e1cnt, e2cnt; + u_int32_t e1cnt, e2cnt; { Tcl_Obj *myobjv[2], *thislist; int myobjc; myobjc = 2; - myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, e1cnt); - myobjv[1] = Tcl_NewByteArrayObj((u_char *)elem2, e2cnt); + myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, (int)e1cnt); + myobjv[1] = Tcl_NewByteArrayObj((u_char *)elem2, (int)e2cnt); thislist = Tcl_NewListObj(myobjc, myobjv); if (thislist == NULL) return (TCL_ERROR); @@ -233,21 +216,46 @@ _SetListElem(interp, list, elem1, e1cnt, elem2, e2cnt) } /* - * PUBLIC: int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int)); + * PUBLIC: int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, long)); */ int _SetListElemInt(interp, list, elem1, elem2) Tcl_Interp *interp; Tcl_Obj *list; void *elem1; - int elem2; + long elem2; +{ + Tcl_Obj *myobjv[2], *thislist; + int myobjc; + + myobjc = 2; + myobjv[0] = + Tcl_NewByteArrayObj((u_char *)elem1, (int)strlen((char *)elem1)); + myobjv[1] = Tcl_NewLongObj(elem2); + thislist = Tcl_NewListObj(myobjc, myobjv); + if (thislist == NULL) + return (TCL_ERROR); + return (Tcl_ListObjAppendElement(interp, list, thislist)); +} + +/* + * PUBLIC: int _SetListElemWideInt __P((Tcl_Interp *, + * PUBLIC: Tcl_Obj *, void *, int64_t)); + */ +int +_SetListElemWideInt(interp, list, elem1, elem2) + Tcl_Interp *interp; + Tcl_Obj *list; + void *elem1; + int64_t elem2; { Tcl_Obj *myobjv[2], *thislist; int myobjc; myobjc = 2; - myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, strlen((char *)elem1)); - myobjv[1] = Tcl_NewIntObj(elem2); + myobjv[0] = + Tcl_NewByteArrayObj((u_char *)elem1, (int)strlen((char *)elem1)); + myobjv[1] = Tcl_NewWideIntObj(elem2); thislist = Tcl_NewListObj(myobjc, myobjv); if (thislist == NULL) return (TCL_ERROR); @@ -256,7 +264,7 @@ _SetListElemInt(interp, list, elem1, elem2) /* * PUBLIC: int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *, - * PUBLIC: db_recno_t, u_char *, int)); + * PUBLIC: db_recno_t, u_char *, u_int32_t)); */ int _SetListRecnoElem(interp, list, elem1, elem2, e2size) @@ -264,14 +272,14 @@ _SetListRecnoElem(interp, list, elem1, elem2, e2size) Tcl_Obj *list; db_recno_t elem1; u_char *elem2; - int e2size; + u_int32_t e2size; { Tcl_Obj *myobjv[2], *thislist; int myobjc; myobjc = 2; myobjv[0] = Tcl_NewWideIntObj((Tcl_WideInt)elem1); - myobjv[1] = Tcl_NewByteArrayObj(elem2, e2size); + myobjv[1] = Tcl_NewByteArrayObj(elem2, (int)e2size); thislist = Tcl_NewListObj(myobjc, myobjv); if (thislist == NULL) return (TCL_ERROR); @@ -309,17 +317,18 @@ _Set3DBTList(interp, list, elem1, is1recno, elem2, is2recno, elem3) myobjv[0] = Tcl_NewWideIntObj( (Tcl_WideInt)*(db_recno_t *)elem1->data); else - myobjv[0] = - Tcl_NewByteArrayObj((u_char *)elem1->data, elem1->size); + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)elem1->data, (int)elem1->size); if (is2recno) myobjv[1] = Tcl_NewWideIntObj( (Tcl_WideInt)*(db_recno_t *)elem2->data); else - myobjv[1] = - Tcl_NewByteArrayObj((u_char *)elem2->data, elem2->size); + myobjv[1] = Tcl_NewByteArrayObj( + (u_char *)elem2->data, (int)elem2->size); - myobjv[2] = Tcl_NewByteArrayObj((u_char *)elem3->data, elem3->size); + myobjv[2] = Tcl_NewByteArrayObj( + (u_char *)elem3->data, (int)elem3->size); thislist = Tcl_NewListObj(3, myobjv); @@ -469,18 +478,21 @@ _ErrorSetup(interp, ret, errmsg) } /* - * PUBLIC: void _ErrorFunc __P((CONST char *, char *)); + * PUBLIC: void _ErrorFunc __P((const DB_ENV *, CONST char *, const char *)); */ void -_ErrorFunc(pfx, msg) +_ErrorFunc(dbenv, pfx, msg) + const DB_ENV *dbenv; CONST char *pfx; - char *msg; + const char *msg; { DBTCL_INFO *p; Tcl_Interp *interp; - int size; + size_t size; char *err; + COMPQUIET(dbenv, NULL); + p = _NameToInfo(pfx); if (p == NULL) return; @@ -579,70 +591,41 @@ _GetUInt32(interp, obj, resp) } /* - * tcl_flag_callback -- - * Callback for db_pr.c functions that contain the FN struct mapping - * flag values to meaningful strings. This function appends a Tcl_Obj - * containing each pertinent flag string to the specified Tcl list. + * _GetFlagsList -- + * Get a new Tcl object, containing a list of the string values + * associated with a particular set of flag values. + * + * PUBLIC: Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t, const FN *)); */ -static void -tcl_flag_callback(flags, fn, vtcbp) +Tcl_Obj * +_GetFlagsList(interp, flags, fnp) + Tcl_Interp *interp; u_int32_t flags; - const FN *fn; - void *vtcbp; -{ const FN *fnp; - Tcl_Interp *interp; - Tcl_Obj *newobj, *listobj; +{ + Tcl_Obj *newlist, *newobj; int result; - struct __tcl_callback_bundle *tcbp; - tcbp = (struct __tcl_callback_bundle *)vtcbp; - interp = tcbp->interp; - listobj = tcbp->obj; + newlist = Tcl_NewObj(); - for (fnp = fn; fnp->mask != 0; ++fnp) + /* + * Append a Tcl_Obj containing each pertinent flag string to the + * specified Tcl list. + */ + for (; fnp->mask != 0; ++fnp) if (LF_ISSET(fnp->mask)) { - newobj = Tcl_NewStringObj(fnp->name, strlen(fnp->name)); + newobj = NewStringObj(fnp->name, strlen(fnp->name)); result = - Tcl_ListObjAppendElement(interp, listobj, newobj); + Tcl_ListObjAppendElement(interp, newlist, newobj); /* * Tcl_ListObjAppendElement is defined to return TCL_OK - * unless listobj isn't actually a list (or convertible + * unless newlist isn't actually a list (or convertible * into one). If this is the case, we screwed up badly * somehow. */ DB_ASSERT(result == TCL_OK); } -} - -/* - * _GetFlagsList -- - * Get a new Tcl object, containing a list of the string values - * associated with a particular set of flag values, given a function - * that can extract the right names for the right flags. - * - * PUBLIC: Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t, - * PUBLIC: void (*)(u_int32_t, void *, - * PUBLIC: void (*)(u_int32_t, const FN *, void *)))); - */ -Tcl_Obj * -_GetFlagsList(interp, flags, func) - Tcl_Interp *interp; - u_int32_t flags; - void (*func) - __P((u_int32_t, void *, void (*)(u_int32_t, const FN *, void *))); -{ - Tcl_Obj *newlist; - struct __tcl_callback_bundle tcb; - - newlist = Tcl_NewObj(); - - memset(&tcb, 0, sizeof(tcb)); - tcb.interp = interp; - tcb.obj = newlist; - - func(flags, &tcb, tcl_flag_callback); return (newlist); } @@ -660,7 +643,7 @@ _debug_check() if (__debug_print != 0) { printf("\r%7d:", __debug_on); - fflush(stdout); + (void)fflush(stdout); } if (__debug_on++ == __debug_test || __debug_stop) __db_loadme(); @@ -702,7 +685,7 @@ _CopyObjBytes(interp, obj, newp, sizep, freep) *freep = 0; ret = Tcl_GetIntFromObj(interp, obj, &i); tmp = Tcl_GetByteArrayFromObj(obj, &len); - *sizep = len; + *sizep = (u_int32_t)len; if (ret == TCL_ERROR) { Tcl_ResetResult(interp); *newp = tmp; @@ -714,9 +697,9 @@ _CopyObjBytes(interp, obj, newp, sizep, freep) * at some other point so we cannot count on GetByteArray * keeping our pointer valid. */ - if ((ret = __os_malloc(NULL, len, &new)) != 0) + if ((ret = __os_malloc(NULL, (size_t)len, &new)) != 0) return (ret); - memcpy(new, tmp, len); + memcpy(new, tmp, (size_t)len); *newp = new; *freep = 1; return (0); diff --git a/db/tcl/tcl_lock.c b/db/tcl/tcl_lock.c index 0385a0fdf..4a3de36bd 100644 --- a/db/tcl/tcl_lock.c +++ b/db/tcl/tcl_lock.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_lock.c,v 11.59 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_lock.c,v 11.53 2003/11/26 23:14:22 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -25,7 +23,7 @@ static const char revid[] = "$Id: tcl_lock.c,v 11.53 2003/11/26 23:14:22 ubell E /* * Prototypes for procedures defined later in this file: */ -#if CONFIG_TEST +#ifdef CONFIG_TEST static int lock_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); static int _LockMode __P((Tcl_Interp *, Tcl_Obj *, db_lockmode_t *)); static int _GetThisLock __P((Tcl_Interp *, DB_ENV *, u_int32_t, @@ -47,9 +45,10 @@ tcl_LockDetect(interp, objc, objv, envp) DB_ENV *envp; /* Environment pointer */ { static const char *ldopts[] = { - "expire", "default", + "expire", "maxlocks", + "maxwrites", "minlocks", "minwrites", "oldest", @@ -58,9 +57,10 @@ tcl_LockDetect(interp, objc, objv, envp) NULL }; enum ldopts { - LD_EXPIRE, LD_DEFAULT, + LD_EXPIRE, LD_MAXLOCKS, + LD_MAXWRITES, LD_MINLOCKS, LD_MINWRITES, LD_OLDEST, @@ -79,38 +79,42 @@ tcl_LockDetect(interp, objc, objv, envp) return (IS_HELP(objv[i])); i++; switch ((enum ldopts)optindex) { - case LD_EXPIRE: - FLAG_CHECK(policy); - policy = DB_LOCK_EXPIRE; - break; case LD_DEFAULT: FLAG_CHECK(policy); policy = DB_LOCK_DEFAULT; break; + case LD_EXPIRE: + FLAG_CHECK(policy); + policy = DB_LOCK_EXPIRE; + break; case LD_MAXLOCKS: FLAG_CHECK(policy); policy = DB_LOCK_MAXLOCKS; break; - case LD_MINWRITES: + case LD_MAXWRITES: FLAG_CHECK(policy); - policy = DB_LOCK_MINWRITE; + policy = DB_LOCK_MAXWRITE; break; case LD_MINLOCKS: FLAG_CHECK(policy); policy = DB_LOCK_MINLOCKS; break; - case LD_OLDEST: + case LD_MINWRITES: FLAG_CHECK(policy); - policy = DB_LOCK_OLDEST; + policy = DB_LOCK_MINWRITE; break; - case LD_YOUNGEST: + case LD_OLDEST: FLAG_CHECK(policy); - policy = DB_LOCK_YOUNGEST; + policy = DB_LOCK_OLDEST; break; case LD_RANDOM: FLAG_CHECK(policy); policy = DB_LOCK_RANDOM; break; + case LD_YOUNGEST: + FLAG_CHECK(policy); + policy = DB_LOCK_YOUNGEST; + break; } } @@ -195,12 +199,12 @@ tcl_LockGet(interp, objc, objv, envp) result = _GetThisLock(interp, envp, lockid, flag, &obj, mode, newname); if (result == TCL_OK) { - res = Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); Tcl_SetObjResult(interp, res); } out: if (freeobj) - (void)__os_free(envp, otmp); + __os_free(envp, otmp); return (result); } @@ -268,7 +272,7 @@ tcl_LockStat(interp, objc, objv, envp) MAKE_STAT_LIST("Number of transaction timeouts", sp->st_ntxntimeouts); Tcl_SetObjResult(interp, res); error: - (void)__os_ufree(envp, sp); + __os_ufree(envp, sp); return (result); } @@ -420,6 +424,7 @@ tcl_LockVec(interp, objc, objv, envp) memset(&list, 0, sizeof(DB_LOCKREQ)); flag = 0; freeobj = 0; + otmp = NULL; /* * If -nowait is given, it MUST be first arg. @@ -503,10 +508,10 @@ tcl_LockVec(interp, objc, objv, envp) thisop); goto error; } - thisop = Tcl_NewStringObj(newname, strlen(newname)); + thisop = NewStringObj(newname, strlen(newname)); (void)Tcl_ListObjAppendElement(interp, res, thisop); - if (freeobj) { - (void)__os_free(envp, otmp); + if (freeobj && otmp != NULL) { + __os_free(envp, otmp); freeobj = 0; } continue; @@ -576,8 +581,8 @@ tcl_LockVec(interp, objc, objv, envp) if (ret != 0 && result == TCL_OK) result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock put"); - if (freeobj) { - (void)__os_free(envp, otmp); + if (freeobj && otmp != NULL) { + __os_free(envp, otmp); freeobj = 0; } /* @@ -733,7 +738,7 @@ _GetThisLock(interp, envp, lockid, flag, objp, mode, newname) ip->i_parent = envip; ip->i_locker = lockid; _SetInfoData(ip, lock); - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)lock_Cmd, (ClientData)lock, NULL); error: return (result); diff --git a/db/tcl/tcl_log.c b/db/tcl/tcl_log.c index dcb4fcbbd..68c678101 100644 --- a/db/tcl/tcl_log.c +++ b/db/tcl/tcl_log.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_log.c,v 11.61 2004/04/05 20:18:32 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_log.c,v 11.58 2003/04/24 16:25:54 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -86,7 +84,7 @@ tcl_LogArchive(interp, objc, objv, envp) if (result == TCL_OK) { res = Tcl_NewListObj(0, NULL); for (file = list; file != NULL && *file != NULL; file++) { - fileobj = Tcl_NewStringObj(*file, strlen(*file)); + fileobj = NewStringObj(*file, strlen(*file)); result = Tcl_ListObjAppendElement(interp, res, fileobj); if (result != TCL_OK) break; @@ -186,7 +184,7 @@ tcl_LogFile(interp, objc, objv, envp) } result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_file"); if (ret == 0) { - res = Tcl_NewStringObj(name, strlen(name)); + res = NewStringObj(name, strlen(name)); Tcl_SetObjResult(interp, res); } @@ -336,7 +334,7 @@ tcl_LogPut(interp, objc, objv, envp) result = Tcl_ListObjAppendElement(interp, res, intobj); Tcl_SetObjResult(interp, res); if (freedata) - (void)__os_free(NULL, dtmp); + __os_free(NULL, dtmp); return (result); } /* @@ -403,7 +401,7 @@ tcl_LogStat(interp, objc, objv, envp) MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait); Tcl_SetObjResult(interp, res); error: - (void)__os_ufree(envp, sp); + __os_ufree(envp, sp); return (result); } @@ -591,7 +589,7 @@ tcl_LogcGet(interp, objc, objv, logc) goto memerr; result = Tcl_ListObjAppendElement(interp, res, lsnlist); - dataobj = Tcl_NewStringObj(data.data, data.size); + dataobj = NewStringObj(data.data, data.size); if (dataobj == NULL) { goto memerr; } diff --git a/db/tcl/tcl_mp.c b/db/tcl/tcl_mp.c index 9bfd83095..29691a31c 100644 --- a/db/tcl/tcl_mp.c +++ b/db/tcl/tcl_mp.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_mp.c,v 11.58 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_mp.c,v 11.50 2003/09/04 20:45:45 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -25,7 +23,7 @@ static const char revid[] = "$Id: tcl_mp.c,v 11.50 2003/09/04 20:45:45 bostic Ex /* * Prototypes for procedures defined later in this file: */ -#if CONFIG_TEST +#ifdef CONFIG_TEST static int mp_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); static int pg_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); static int tcl_MpGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, @@ -58,14 +56,14 @@ _MpInfoDelete(interp, mpip) * mp. Remove its commands and info structure. */ nextp = LIST_NEXT(p, entries); - if (p->i_parent == mpip && p->i_type == I_PG) { + if (p->i_parent == mpip && p->i_type == I_PG) { (void)Tcl_DeleteCommand(interp, p->i_name); _DeleteInfo(p); } } } -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_MpSync -- * @@ -100,8 +98,7 @@ tcl_MpSync(interp, objc, objv, envp) _debug_check(); ret = envp->memp_sync(envp, lsnp); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp sync"); - return (result); + return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp sync")); } /* @@ -118,11 +115,8 @@ tcl_MpTrickle(interp, objc, objv, envp) DB_ENV *envp; /* Environment pointer */ { - int pages; - int percent; - int result; - int ret; Tcl_Obj *res; + int pages, percent, result, ret; result = TCL_OK; /* @@ -298,9 +292,9 @@ tcl_Mp(interp, objc, objv, envp, envip) ip->i_parent = envip; ip->i_pgsz = pgsize; _SetInfoData(ip, mpf); - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)mp_Cmd, (ClientData)mpf, NULL); - res = Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); Tcl_SetObjResult(interp, res); error: @@ -353,6 +347,11 @@ tcl_MpStat(interp, objc, objv, envp) MAKE_STAT_LIST("Cache size (bytes)", sp->st_bytes); MAKE_STAT_LIST("Number of caches", sp->st_ncache); MAKE_STAT_LIST("Region size", sp->st_regsize); + MAKE_STAT_LIST("Maximum memory-mapped file size", sp->st_mmapsize); + MAKE_STAT_LIST("Maximum open file descriptors", sp->st_maxopenfd); + MAKE_STAT_LIST("Maximum sequential buffer writes", sp->st_maxwrite); + MAKE_STAT_LIST( + "Sleep after writing maximum buffers", sp->st_maxwrite_sleep); MAKE_STAT_LIST("Pages mapped into address space", sp->st_map); MAKE_STAT_LIST("Cache hits", sp->st_cache_hit); MAKE_STAT_LIST("Cache misses", sp->st_cache_miss); @@ -415,9 +414,9 @@ tcl_MpStat(interp, objc, objv, envp) } Tcl_SetObjResult(interp, res1); error: - (void)__os_ufree(envp, sp); + __os_ufree(envp, sp); if (savefsp != NULL) - (void)__os_ufree(envp, savefsp); + __os_ufree(envp, savefsp); return (result); } @@ -521,7 +520,7 @@ mp_Cmd(clientData, interp, objc, objv) ret = mp->get_clear_len(mp, &value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mp get_clear_len")) == TCL_OK) - res = Tcl_NewIntObj(value); + res = Tcl_NewIntObj((int)value); break; case MPGETFILEID: if (objc != 2) { @@ -531,8 +530,7 @@ mp_Cmd(clientData, interp, objc, objv) ret = mp->get_fileid(mp, fileid); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mp get_fileid")) == TCL_OK) - res = Tcl_NewStringObj((char *)fileid, - (int)DB_FILE_ID_LEN); + res = NewStringObj((char *)fileid, DB_FILE_ID_LEN); break; case MPGETFTYPE: if (objc != 2) { @@ -564,7 +562,7 @@ mp_Cmd(clientData, interp, objc, objv) if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mp get_pgcookie")) == TCL_OK) res = Tcl_NewByteArrayObj((u_char *)cookie.data, - cookie.size); + (int)cookie.size); break; } /* @@ -663,7 +661,7 @@ tcl_MpGet(interp, objc, objv, mp, mpip) return (TCL_ERROR); } _debug_check(); - pgno = ipgno; + pgno = (db_pgno_t)ipgno; ret = mp->get(mp, &pgno, flag, &page); result = _ReturnSetup(interp, ret, DB_RETOK_MPGET(ret), "mpool get"); if (result == TCL_ERROR) @@ -678,9 +676,9 @@ tcl_MpGet(interp, objc, objv, mp, mpip) ip->i_pgno = pgno; ip->i_pgsz = mpip->i_pgsz; _SetInfoData(ip, page); - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)pg_Cmd, (ClientData)page, NULL); - res = Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); Tcl_SetObjResult(interp, res); } error: @@ -761,7 +759,7 @@ pg_Cmd(clientData, interp, objc, objv) case PGSET: case PGPUT: result = tcl_Pg(interp, objc, objv, page, mp, pgip, - cmdindex == PGSET ? 0 : 1); + (enum pgcmds)cmdindex == PGSET ? 0 : 1); break; case PGINIT: result = tcl_PgInit(interp, objc, objv, page, pgip); @@ -849,9 +847,8 @@ tcl_PgInit(interp, objc, objv, page, pgip) DBTCL_INFO *pgip; /* Info pointer */ { Tcl_Obj *res; - size_t pgsz; long *p, *endp, newval; - int length, result; + int length, pgsz, result; u_char *s; result = TCL_OK; @@ -866,12 +863,11 @@ tcl_PgInit(interp, objc, objv, page, pgip) s = Tcl_GetByteArrayFromObj(objv[2], &length); if (s == NULL) return (TCL_ERROR); - memcpy(page, s, - ((size_t)length < pgsz) ? (size_t)length : pgsz); + memcpy(page, s, (size_t)((length < pgsz) ? length : pgsz)); result = TCL_OK; } else { p = (long *)page; - for (endp = p + (pgsz / sizeof(long)); p < endp; p++) + for (endp = p + ((u_int)pgsz / sizeof(long)); p < endp; p++) *p = newval; } res = Tcl_NewIntObj(0); @@ -888,9 +884,8 @@ tcl_PgIsset(interp, objc, objv, page, pgip) DBTCL_INFO *pgip; /* Info pointer */ { Tcl_Obj *res; - size_t pgsz; long *p, *endp, newval; - int length, result; + int length, pgsz, result; u_char *s; result = TCL_OK; @@ -907,7 +902,7 @@ tcl_PgIsset(interp, objc, objv, page, pgip) result = TCL_OK; if (memcmp(page, s, - ((size_t)length < pgsz) ? (size_t)length : pgsz ) != 0) { + (size_t)((length < pgsz) ? length : pgsz)) != 0) { res = Tcl_NewIntObj(0); Tcl_SetObjResult(interp, res); return (result); @@ -919,7 +914,7 @@ tcl_PgIsset(interp, objc, objv, page, pgip) * this value). Otherwise, if we finish the loop, we return 1 * (is set to this value). */ - for (endp = p + (pgsz/sizeof(long)); p < endp; p++) + for (endp = p + ((u_int)pgsz / sizeof(long)); p < endp; p++) if (*p != newval) { res = Tcl_NewIntObj(0); Tcl_SetObjResult(interp, res); diff --git a/db/tcl/tcl_rep.c b/db/tcl/tcl_rep.c index 0d0fd30fd..8be4b196a 100644 --- a/db/tcl/tcl_rep.c +++ b/db/tcl/tcl_rep.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_rep.c,v 11.105 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_rep.c,v 11.93 2003/09/12 16:23:13 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -22,7 +20,7 @@ static const char revid[] = "$Id: tcl_rep.c,v 11.93 2003/09/12 16:23:13 sue Exp #include "db_int.h" #include "dbinc/tcl_db.h" -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_RepElect -- * Call DB_ENV->rep_elect(). @@ -37,23 +35,26 @@ tcl_RepElect(interp, objc, objv, dbenv) Tcl_Obj *CONST objv[]; /* The argument objects */ DB_ENV *dbenv; /* Environment pointer */ { - int eid, nsites, pri, result, ret; + int eid, nsites, nvotes, pri, result, ret; u_int32_t timeout; - if (objc != 5) { - Tcl_WrongNumArgs(interp, 5, objv, "nsites pri timeout"); + if (objc != 6) { + Tcl_WrongNumArgs(interp, 6, objv, "nsites pri timeout"); return (TCL_ERROR); } if ((result = Tcl_GetIntFromObj(interp, objv[2], &nsites)) != TCL_OK) return (result); - if ((result = Tcl_GetIntFromObj(interp, objv[3], &pri)) != TCL_OK) + if ((result = Tcl_GetIntFromObj(interp, objv[3], &nvotes)) != TCL_OK) return (result); - if ((result = _GetUInt32(interp, objv[4], &timeout)) != TCL_OK) + if ((result = Tcl_GetIntFromObj(interp, objv[4], &pri)) != TCL_OK) + return (result); + if ((result = _GetUInt32(interp, objv[5], &timeout)) != TCL_OK) return (result); _debug_check(); - if ((ret = dbenv->rep_elect(dbenv, nsites, pri, timeout, &eid)) != 0) + if ((ret = dbenv->rep_elect(dbenv, nsites, nvotes, + pri, timeout, &eid, 0)) != 0) return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_elect")); @@ -63,7 +64,7 @@ tcl_RepElect(interp, objc, objv, dbenv) } #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_RepFlush -- * Call DB_ENV->rep_flush(). @@ -90,7 +91,7 @@ tcl_RepFlush(interp, objc, objv, dbenv) return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_flush")); } #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_RepLimit -- * Call DB_ENV->set_rep_limit(). @@ -128,7 +129,7 @@ tcl_RepLimit(interp, objc, objv, dbenv) } #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_RepRequest -- * Call DB_ENV->set_rep_request(). @@ -166,7 +167,7 @@ tcl_RepRequest(interp, objc, objv, dbenv) } #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_RepStart -- * Call DB_ENV->rep_start(). @@ -233,7 +234,7 @@ tcl_RepStart(interp, objc, objv, dbenv) } #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_RepProcessMessage -- * Call DB_ENV->rep_process_message(). @@ -307,6 +308,7 @@ tcl_RepProcessMessage(interp, objc, objv, dbenv) * {HOLDELECTION 0} - HOLDELECTION, no other info needed. * {NEWMASTER #} - NEWMASTER and its ID. * {NEWSITE 0} - NEWSITE, no other info needed. + * {STARTUPDONE 0} - STARTUPDONE, no other info needed. * {ISPERM {LSN list}} - ISPERM and the perm LSN. * {NOTPERM {LSN list}} - NOTPERM and this msg's LSN. */ @@ -317,38 +319,46 @@ tcl_RepProcessMessage(interp, objc, objv, dbenv) myobjv[1] = Tcl_NewIntObj(0); break; case DB_REP_DUPMASTER: - myobjv[0] = Tcl_NewByteArrayObj("DUPMASTER", - strlen("DUPMASTER")); + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)"DUPMASTER", (int)strlen("DUPMASTER")); myobjv[1] = Tcl_NewIntObj(0); break; case DB_REP_HOLDELECTION: - myobjv[0] = Tcl_NewByteArrayObj("HOLDELECTION", - strlen("HOLDELECTION")); + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)"HOLDELECTION", (int)strlen("HOLDELECTION")); myobjv[1] = Tcl_NewIntObj(0); break; case DB_REP_ISPERM: myobjv[0] = Tcl_NewLongObj((long)permlsn.file); myobjv[1] = Tcl_NewLongObj((long)permlsn.offset); lsnlist = Tcl_NewListObj(myobjc, myobjv); - myobjv[0] = Tcl_NewByteArrayObj("ISPERM", strlen("ISPERM")); + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)"ISPERM", (int)strlen("ISPERM")); myobjv[1] = lsnlist; break; case DB_REP_NEWMASTER: - myobjv[0] = Tcl_NewByteArrayObj("NEWMASTER", - strlen("NEWMASTER")); + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)"NEWMASTER", (int)strlen("NEWMASTER")); myobjv[1] = Tcl_NewIntObj(eid); break; case DB_REP_NEWSITE: - myobjv[0] = Tcl_NewByteArrayObj("NEWSITE", strlen("NEWSITE")); + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)"NEWSITE", (int)strlen("NEWSITE")); myobjv[1] = Tcl_NewIntObj(0); break; case DB_REP_NOTPERM: myobjv[0] = Tcl_NewLongObj((long)permlsn.file); myobjv[1] = Tcl_NewLongObj((long)permlsn.offset); lsnlist = Tcl_NewListObj(myobjc, myobjv); - myobjv[0] = Tcl_NewByteArrayObj("NOTPERM", strlen("NOTPERM")); + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)"NOTPERM", (int)strlen("NOTPERM")); myobjv[1] = lsnlist; break; + case DB_REP_STARTUPDONE: + myobjv[0] = Tcl_NewByteArrayObj( + (u_char *)"STARTUPDONE", (int)strlen("STARTUPDONE")); + myobjv[1] = Tcl_NewIntObj(0); + break; default: msg = db_strerror(ret); Tcl_AppendResult(interp, msg, NULL); @@ -361,15 +371,15 @@ tcl_RepProcessMessage(interp, objc, objv, dbenv) Tcl_SetObjResult(interp, res); out: if (freectl) - (void)__os_free(NULL, ctmp); + __os_free(NULL, ctmp); if (freerec) - (void)__os_free(NULL, rtmp); + __os_free(NULL, rtmp); return (result); } #endif -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_RepStat -- * Call DB_ENV->rep_stat(). @@ -423,13 +433,18 @@ tcl_RepStat(interp, objc, objv, dbenv) /* * MAKE_STAT_* assumes 'res' and 'error' label. */ + if (sp->st_status == DB_REP_MASTER) + MAKE_STAT_LIST("Master", 1); + else + MAKE_STAT_LIST("Client", 1); MAKE_STAT_LSN("Next LSN expected", &sp->st_next_lsn); MAKE_STAT_LSN("First missed LSN", &sp->st_waiting_lsn); MAKE_STAT_LIST("Duplicate master conditions", sp->st_dupmasters); MAKE_STAT_LIST("Environment ID", sp->st_env_id); MAKE_STAT_LIST("Environment priority", sp->st_env_priority); MAKE_STAT_LIST("Generation number", sp->st_gen); - MAKE_STAT_LIST("In recovery", sp->st_in_recovery); + MAKE_STAT_LIST("Election generation number", sp->st_egen); + MAKE_STAT_LIST("Startup complete", sp->st_startup_complete); MAKE_STAT_LIST("Duplicate log records received", sp->st_log_duplicated); MAKE_STAT_LIST("Current log records queued", sp->st_log_queued); MAKE_STAT_LIST("Maximum log records queued", sp->st_log_queued_max); @@ -445,9 +460,15 @@ tcl_RepStat(interp, objc, objv, dbenv) MAKE_STAT_LIST("Message send failures", sp->st_msgs_send_failures); MAKE_STAT_LIST("Messages sent", sp->st_msgs_sent); MAKE_STAT_LIST("New site messages", sp->st_newsites); + MAKE_STAT_LIST("Number of sites in replication group", sp->st_nsites); MAKE_STAT_LIST("Transmission limited", sp->st_nthrottles); MAKE_STAT_LIST("Outdated conditions", sp->st_outdated); MAKE_STAT_LIST("Transactions applied", sp->st_txns_applied); + MAKE_STAT_LIST("Next page expected", sp->st_next_pg); + MAKE_STAT_LIST("First missed page", sp->st_waiting_pg); + MAKE_STAT_LIST("Duplicate pages received", sp->st_pg_duplicated); + MAKE_STAT_LIST("Pages received", sp->st_pg_records); + MAKE_STAT_LIST("Pages requested", sp->st_pg_requested); MAKE_STAT_LIST("Elections held", sp->st_elections); MAKE_STAT_LIST("Elections won", sp->st_elections_won); MAKE_STAT_LIST("Election phase", sp->st_election_status); @@ -455,13 +476,14 @@ tcl_RepStat(interp, objc, objv, dbenv) MAKE_STAT_LIST("Election generation number", sp->st_election_gen); MAKE_STAT_LSN("Election max LSN", &sp->st_election_lsn); MAKE_STAT_LIST("Election sites", sp->st_election_nsites); + MAKE_STAT_LIST("Election votes", sp->st_election_nvotes); MAKE_STAT_LIST("Election priority", sp->st_election_priority); MAKE_STAT_LIST("Election tiebreaker", sp->st_election_tiebreaker); MAKE_STAT_LIST("Election votes", sp->st_election_votes); Tcl_SetObjResult(interp, res); error: - (void)__os_ufree(dbenv, sp); + __os_ufree(dbenv, sp); return (result); } #endif diff --git a/db/tcl/tcl_seq.c b/db/tcl/tcl_seq.c new file mode 100644 index 000000000..6742a0713 --- /dev/null +++ b/db/tcl/tcl_seq.c @@ -0,0 +1,526 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2004 + * Sleepycat Software. All rights reserved. + * + * $Id: tcl_seq.c,v 11.11 2004/09/22 22:20:35 mjc Exp $ + */ + +#include "db_config.h" + +#ifdef HAVE_SEQUENCE +#ifndef NO_SYSTEM_INCLUDES +#include + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/tcl_db.h" +#include "dbinc_auto/sequence_ext.h" + +/* + * Prototypes for procedures defined later in this file: + */ +static int tcl_SeqClose __P((Tcl_Interp *, + int, Tcl_Obj * CONST*, DB_SEQUENCE *, DBTCL_INFO *)); +static int tcl_SeqGet __P((Tcl_Interp *, + int, Tcl_Obj * CONST*, DB_SEQUENCE *)); +static int tcl_SeqRemove __P((Tcl_Interp *, + int, Tcl_Obj * CONST*, DB_SEQUENCE *, DBTCL_INFO *)); +static int tcl_SeqStat __P((Tcl_Interp *, + int, Tcl_Obj * CONST*, DB_SEQUENCE *)); +static int tcl_SeqGetFlags __P((Tcl_Interp *, + int, Tcl_Obj * CONST*, DB_SEQUENCE *)); + +/* + * + * PUBLIC: int seq_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); + * + * seq_Cmd -- + * Implements the "seq" widget. + */ +int +seq_Cmd(clientData, interp, objc, objv) + ClientData clientData; /* SEQ handle */ + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ +{ + static const char *seqcmds[] = { + "close", + "get", + "get_cachesize", + "get_db", + "get_flags", + "get_key", + "get_range", + "remove", + "stat", + NULL + }; + enum seqcmds { + SEQCLOSE, + SEQGET, + SEQGETCACHESIZE, + SEQGETDB, + SEQGETFLAGS, + SEQGETKEY, + SEQGETRANGE, + SEQREMOVE, + SEQSTAT + }; + DB *dbp; + DBT key; + DBTCL_INFO *dbip, *ip; + DB_SEQUENCE *seq; + Tcl_Obj *myobjv[2], *res; + db_seq_t min, max; + int cmdindex, ncache, result, ret; + + Tcl_ResetResult(interp); + seq = (DB_SEQUENCE *)clientData; + result = TCL_OK; + dbip = NULL; + if (objc <= 1) { + Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); + return (TCL_ERROR); + } + if (seq == NULL) { + Tcl_SetResult(interp, "NULL sequence pointer", TCL_STATIC); + return (TCL_ERROR); + } + + ip = _PtrToInfo((void *)seq); + if (ip == NULL) { + Tcl_SetResult(interp, "NULL info pointer", TCL_STATIC); + return (TCL_ERROR); + } + + /* + * Get the command name index from the object based on the dbcmds + * defined above. + */ + if (Tcl_GetIndexFromObj(interp, + objv[1], seqcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) + return (IS_HELP(objv[1])); + + res = NULL; + switch ((enum seqcmds)cmdindex) { + case SEQGETRANGE: + ret = seq->get_range(seq, &min, &max); + if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), + "sequence get_range")) == TCL_OK) { + myobjv[0] = Tcl_NewWideIntObj(min); + myobjv[1] = Tcl_NewWideIntObj(max); + res = Tcl_NewListObj(2, myobjv); + } + break; + case SEQCLOSE: + result = tcl_SeqClose(interp, objc, objv, seq, ip); + break; + case SEQREMOVE: + result = tcl_SeqRemove(interp, objc, objv, seq, ip); + break; + case SEQGET: + result = tcl_SeqGet(interp, objc, objv, seq); + break; + case SEQSTAT: + result = tcl_SeqStat(interp, objc, objv, seq); + break; + case SEQGETCACHESIZE: + if (objc != 2) { + Tcl_WrongNumArgs(interp, 1, objv, NULL); + return (TCL_ERROR); + } + ret = seq->get_cachesize(seq, &ncache); + if ((result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "sequence get_cachesize")) == TCL_OK) + res = Tcl_NewIntObj(ncache); + break; + case SEQGETDB: + if (objc != 2) { + Tcl_WrongNumArgs(interp, 1, objv, NULL); + return (TCL_ERROR); + } + ret = seq->get_db(seq, &dbp); + if (ret == 0 && (dbip = _PtrToInfo((void *)dbp)) == NULL) { + Tcl_SetResult(interp, + "NULL db info pointer", TCL_STATIC); + return (TCL_ERROR); + } + + if ((result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "sequence get_db")) == TCL_OK) + res = NewStringObj(dbip->i_name, strlen(dbip->i_name)); + break; + case SEQGETKEY: + if (objc != 2) { + Tcl_WrongNumArgs(interp, 1, objv, NULL); + return (TCL_ERROR); + } + ret = seq->get_key(seq, &key); + if ((result = _ReturnSetup(interp, ret, + DB_RETOK_STD(ret), "sequence get_key")) == TCL_OK) + res = Tcl_NewByteArrayObj( + (u_char *)key.data, (int)key.size); + break; + case SEQGETFLAGS: + result = tcl_SeqGetFlags(interp, objc, objv, seq); + break; + } + + /* + * Only set result if we have a res. Otherwise, lower functions have + * already done so. + */ + if (result == TCL_OK && res) + Tcl_SetObjResult(interp, res); + return (result); +} + +/* + * tcl_db_stat -- + */ +static int +tcl_SeqStat(interp, objc, objv, seq) + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ + DB_SEQUENCE *seq; /* Database pointer */ +{ + DB_SEQUENCE_STAT *sp; + u_int32_t flag; + Tcl_Obj *res, *flaglist, *myobjv[2]; + int result, ret; + char *arg; + + result = TCL_OK; + flag = 0; + + if (objc > 3) { + Tcl_WrongNumArgs(interp, 2, objv, "?-clear?"); + return (TCL_ERROR); + } + + if (objc == 3) { + arg = Tcl_GetStringFromObj(objv[2], NULL); + if (strcmp(arg, "-clear") == 0) + flag = DB_STAT_CLEAR; + else { + Tcl_SetResult(interp, + "db stat: unknown arg", TCL_STATIC); + return (TCL_ERROR); + } + } + + _debug_check(); + ret = seq->stat(seq, &sp, flag); + result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db stat"); + if (result == TCL_ERROR) + return (result); + + res = Tcl_NewObj(); + MAKE_STAT_LIST("Wait", sp->st_wait); + MAKE_STAT_LIST("No wait", sp->st_nowait); + MAKE_WSTAT_LIST("Current", sp->st_current); + MAKE_WSTAT_LIST("Cached", sp->st_value); + MAKE_WSTAT_LIST("Max Cached", sp->st_last_value); + MAKE_WSTAT_LIST("Min", sp->st_min); + MAKE_WSTAT_LIST("Max", sp->st_max); + MAKE_STAT_LIST("Cache size", sp->st_cache_size); + /* + * Construct a {name {flag1 flag2 ... flagN}} list for the + * seq flags. + */ + myobjv[0] = NewStringObj("Flags", strlen("Flags")); + myobjv[1] = + _GetFlagsList(interp, sp->st_flags, __db_get_seq_flags_fn()); + flaglist = Tcl_NewListObj(2, myobjv); + if (flaglist == NULL) { + result = TCL_ERROR; + goto error; + } + if ((result = + Tcl_ListObjAppendElement(interp, res, flaglist)) != TCL_OK) + goto error; + + Tcl_SetObjResult(interp, res); + +error: __os_ufree(seq->seq_dbp->dbenv, sp); + return (result); +} + +/* + * tcl_db_close -- + */ +static int +tcl_SeqClose(interp, objc, objv, seq, ip) + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ + DB_SEQUENCE *seq; /* Database pointer */ + DBTCL_INFO *ip; /* Info pointer */ +{ + int result, ret; + + result = TCL_OK; + if (objc > 2) { + Tcl_WrongNumArgs(interp, 2, objv, ""); + return (TCL_ERROR); + } + + _DeleteInfo(ip); + _debug_check(); + + ret = seq->close(seq, 0); + result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence close"); + return (result); +} + +/* + * tcl_SeqGet -- + */ +static int +tcl_SeqGet(interp, objc, objv, seq) + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ + DB_SEQUENCE *seq; /* Sequence pointer */ +{ + static const char *seqgetopts[] = { + "-auto_commit", + "-nosync", + "-txn", + NULL + }; + enum seqgetopts { + SEQGET_AUTO_COMMIT, + SEQGET_NOSYNC, + SEQGET_TXN + }; + DB_TXN *txn; + Tcl_Obj *res; + db_seq_t value; + u_int32_t aflag, delta; + int i, end, optindex, result, ret; + char *arg, msg[MSG_SIZE]; + + result = TCL_OK; + txn = NULL; + aflag = 0; + + if (objc < 3) { + Tcl_WrongNumArgs(interp, 2, objv, "?-args? delta"); + return (TCL_ERROR); + } + + /* + * Get the command name index from the object based on the options + * defined above. + */ + i = 2; + end = objc; + while (i < end) { + if (Tcl_GetIndexFromObj(interp, objv[i], seqgetopts, "option", + TCL_EXACT, &optindex) != TCL_OK) { + arg = Tcl_GetStringFromObj(objv[i], NULL); + if (arg[0] == '-') { + result = IS_HELP(objv[i]); + goto out; + } else + Tcl_ResetResult(interp); + break; + } + i++; + switch ((enum seqgetopts)optindex) { + case SEQGET_AUTO_COMMIT: + aflag |= DB_AUTO_COMMIT; + break; + case SEQGET_NOSYNC: + aflag |= DB_TXN_NOSYNC; + break; + case SEQGET_TXN: + if (i >= end) { + Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); + result = TCL_ERROR; + break; + } + arg = Tcl_GetStringFromObj(objv[i++], NULL); + txn = NAME_TO_TXN(arg); + if (txn == NULL) { + snprintf(msg, MSG_SIZE, + "Get: Invalid txn: %s\n", arg); + Tcl_SetResult(interp, msg, TCL_VOLATILE); + result = TCL_ERROR; + } + break; + } /* switch */ + if (result != TCL_OK) + break; + } + if (result != TCL_OK) + goto out; + + if (i != objc - 1) { + Tcl_SetResult(interp, + "Wrong number of key/data given\n", TCL_STATIC); + result = TCL_ERROR; + goto out; + } + + if ((result = _GetUInt32(interp, objv[objc - 1], &delta)) != TCL_OK) + goto out; + + ret = seq->get(seq, txn, (int32_t)delta, &value, aflag); + result = _ReturnSetup(interp, ret, DB_RETOK_DBGET(ret), "sequence get"); + if (ret == 0) { + res = Tcl_NewWideIntObj((Tcl_WideInt)value); + Tcl_SetObjResult(interp, res); + } +out: + return (result); +} +/* + */ +static int +tcl_SeqRemove(interp, objc, objv, seq, ip) + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ + DB_SEQUENCE *seq; /* Sequence pointer */ + DBTCL_INFO *ip; /* Info pointer */ +{ + static const char *seqgetopts[] = { + "-auto_commit", + "-nosync", + "-txn", + NULL + }; + enum seqgetopts { + SEQGET_AUTO_COMMIT, + SEQGET_NOSYNC, + SEQGET_TXN + }; + DB_TXN *txn; + u_int32_t aflag; + int i, end, optindex, result, ret; + char *arg, msg[MSG_SIZE]; + + result = TCL_OK; + txn = NULL; + aflag = 0; + + _DeleteInfo(ip); + + if (objc < 2) { + Tcl_WrongNumArgs(interp, 2, objv, "?-args?"); + return (TCL_ERROR); + } + + /* + * Get the command name index from the object based on the options + * defined above. + */ + i = 2; + end = objc; + while (i < end) { + if (Tcl_GetIndexFromObj(interp, objv[i], seqgetopts, "option", + TCL_EXACT, &optindex) != TCL_OK) { + arg = Tcl_GetStringFromObj(objv[i], NULL); + if (arg[0] == '-') { + result = IS_HELP(objv[i]); + goto out; + } else + Tcl_ResetResult(interp); + break; + } + i++; + switch ((enum seqgetopts)optindex) { + case SEQGET_AUTO_COMMIT: + aflag |= DB_AUTO_COMMIT; + break; + case SEQGET_NOSYNC: + aflag |= DB_TXN_NOSYNC; + break; + case SEQGET_TXN: + if (i >= end) { + Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); + result = TCL_ERROR; + break; + } + arg = Tcl_GetStringFromObj(objv[i++], NULL); + txn = NAME_TO_TXN(arg); + if (txn == NULL) { + snprintf(msg, MSG_SIZE, + "Remove: Invalid txn: %s\n", arg); + Tcl_SetResult(interp, msg, TCL_VOLATILE); + result = TCL_ERROR; + } + break; + } /* switch */ + if (result != TCL_OK) + break; + } + if (result != TCL_OK) + goto out; + + ret = seq->remove(seq, txn, aflag); + result = _ReturnSetup(interp, + ret, DB_RETOK_DBGET(ret), "sequence remove"); +out: + return (result); +} + +/* + * tcl_SeqGetFlags -- + */ +static int +tcl_SeqGetFlags(interp, objc, objv, seq) + Tcl_Interp *interp; /* Interpreter */ + int objc; /* How many arguments? */ + Tcl_Obj *CONST objv[]; /* The argument objects */ + DB_SEQUENCE *seq; /* Sequence pointer */ +{ + int i, ret, result; + u_int32_t flags; + char buf[512]; + Tcl_Obj *res; + + static const struct { + u_int32_t flag; + char *arg; + } seq_flags[] = { + { DB_SEQ_INC, "-inc" }, + { DB_SEQ_DEC, "-dec" }, + { DB_SEQ_WRAP, "-wrap" }, + { 0, NULL } + }; + + if (objc != 2) { + Tcl_WrongNumArgs(interp, 1, objv, NULL); + return (TCL_ERROR); + } + + ret = seq->get_flags(seq, &flags); + if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), + "db get_flags")) == TCL_OK) { + buf[0] = '\0'; + + for (i = 0; seq_flags[i].flag != 0; i++) + if (LF_ISSET(seq_flags[i].flag)) { + if (strlen(buf) > 0) + (void)strncat(buf, " ", sizeof(buf)); + (void)strncat( + buf, seq_flags[i].arg, sizeof(buf)); + } + + res = NewStringObj(buf, strlen(buf)); + Tcl_SetObjResult(interp, res); + } + + return (result); +} +#endif diff --git a/db/tcl/tcl_txn.c b/db/tcl/tcl_txn.c index 5686b7192..87c9d3667 100644 --- a/db/tcl/tcl_txn.c +++ b/db/tcl/tcl_txn.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_txn.c,v 11.69 2004/10/07 16:48:39 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_txn.c,v 11.63 2003/04/24 16:25:54 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -48,7 +46,7 @@ _TxnInfoDelete(interp, txnip) * txn. Remove its commands and info structure. */ nextp = LIST_NEXT(p, entries); - if (p->i_parent == txnip && p->i_type == I_TXN) { + if (p->i_parent == txnip && p->i_type == I_TXN) { _TxnInfoDelete(interp, p); (void)Tcl_DeleteCommand(interp, p->i_name); _DeleteInfo(p); @@ -143,7 +141,8 @@ tcl_Txn(interp, objc, objv, envp, envip) DBTCL_INFO *envip; /* Info pointer */ { static const char *txnopts[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST + "-degree_2", "-dirty", "-lock_timeout", "-txn_timeout", @@ -155,7 +154,8 @@ tcl_Txn(interp, objc, objv, envp, envip) NULL }; enum txnopts { -#if CONFIG_TEST +#ifdef CONFIG_TEST + TXNDEGREE2, TXNDIRTY, TXN_LOCK_TIMEOUT, TXN_TIMEOUT, @@ -172,7 +172,7 @@ tcl_Txn(interp, objc, objv, envp, envip) u_int32_t flag; int i, optindex, result, ret; char *arg, msg[MSG_SIZE], newname[MSG_SIZE]; -#if CONFIG_TEST +#ifdef CONFIG_TEST db_timeout_t lk_time, tx_time; u_int32_t lk_timeflag, tx_timeflag; #endif @@ -182,7 +182,9 @@ tcl_Txn(interp, objc, objv, envp, envip) parent = NULL; flag = 0; -#if CONFIG_TEST +#ifdef CONFIG_TEST + COMPQUIET(tx_time, 0); + COMPQUIET(lk_time, 0); lk_timeflag = tx_timeflag = 0; #endif i = 2; @@ -194,6 +196,9 @@ tcl_Txn(interp, objc, objv, envp, envip) i++; switch ((enum txnopts)optindex) { #ifdef CONFIG_TEST + case TXNDEGREE2: + flag |= DB_DEGREE_2; + break; case TXNDIRTY: flag |= DB_DIRTY_READ; break; @@ -202,14 +207,13 @@ tcl_Txn(interp, objc, objv, envp, envip) goto getit; case TXN_TIMEOUT: tx_timeflag = DB_SET_TXN_TIMEOUT; -getit: - if (i >= objc) { +getit: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-txn_timestamp time?"); return (TCL_ERROR); } - result = Tcl_GetLongFromObj(interp, objv[i++], - (long *)(optindex == TXN_LOCK_TIMEOUT ? + result = Tcl_GetLongFromObj(interp, objv[i++], (long *) + ((enum txnopts)optindex == TXN_LOCK_TIMEOUT ? &lk_time : &tx_time)); if (result != TCL_OK) return (TCL_ERROR); @@ -271,11 +275,11 @@ getit: else ip->i_parent = envip; _SetInfoData(ip, txn); - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)txn, NULL); - res = Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); Tcl_SetObjResult(interp, res); -#if CONFIG_TEST +#ifdef CONFIG_TEST if (tx_timeflag != 0) { ret = txn->set_timeout(txn, tx_time, tx_timeflag); if (ret != 0) { @@ -373,7 +377,7 @@ tcl_TxnStat(interp, objc, objv, envp) } Tcl_SetObjResult(interp, res); error: - (void)__os_ufree(envp, sp); + __os_ufree(envp, sp); return (result); } @@ -422,7 +426,7 @@ txn_Cmd(clientData, interp, objc, objv) Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *txncmds[] = { -#if CONFIG_TEST +#ifdef CONFIG_TEST "discard", "id", "prepare", @@ -432,7 +436,7 @@ txn_Cmd(clientData, interp, objc, objv) NULL }; enum txncmds { -#if CONFIG_TEST +#ifdef CONFIG_TEST TXNDISCARD, TXNID, TXNPREPARE, @@ -443,8 +447,9 @@ txn_Cmd(clientData, interp, objc, objv) DBTCL_INFO *txnip; DB_TXN *txnp; Tcl_Obj *res; + u_int32_t tid; int cmdindex, result, ret; -#if CONFIG_TEST +#ifdef CONFIG_TEST u_int8_t *gid; #endif @@ -471,7 +476,7 @@ txn_Cmd(clientData, interp, objc, objv) res = NULL; switch ((enum txncmds)cmdindex) { -#if CONFIG_TEST +#ifdef CONFIG_TEST case TXNDISCARD: if (objc != 2) { Tcl_WrongNumArgs(interp, 1, objv, NULL); @@ -491,8 +496,8 @@ txn_Cmd(clientData, interp, objc, objv) return (TCL_ERROR); } _debug_check(); - ret = txnp->id(txnp); - res = Tcl_NewIntObj(ret); + tid = txnp->id(txnp); + res = Tcl_NewIntObj((int)tid); break; case TXNPREPARE: if (objc != 3) { @@ -594,7 +599,7 @@ tcl_TxnCommit(interp, objc, objv, txnp, txnip) return (result); } -#if CONFIG_TEST +#ifdef CONFIG_TEST /* * tcl_TxnRecover -- * @@ -623,7 +628,7 @@ for (i = 0; i < count; i++) { \ ip->i_parent = envip; \ p = &prep[i]; \ _SetInfoData(ip, p->txn); \ - Tcl_CreateObjCommand(interp, newname, \ + (void)Tcl_CreateObjCommand(interp, newname, \ (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)p->txn, NULL); \ result = _SetListElem(interp, res, newname, strlen(newname), \ p->gid, DB_XIDDATASIZE); \ diff --git a/db/tcl/tcl_util.c b/db/tcl/tcl_util.c index 08b169cd9..13a6d6a9d 100644 --- a/db/tcl/tcl_util.c +++ b/db/tcl/tcl_util.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2003 + * Copyright (c) 1999-2004 * Sleepycat Software. All rights reserved. + * + * $Id: tcl_util.c,v 11.43 2004/06/10 17:20:57 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: tcl_util.c,v 11.38 2003/04/23 18:54:40 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -47,10 +45,8 @@ bdb_RandCommand(interp, objc, objv) enum rcmds { RRAND, RRAND_INT, RSRAND }; - long t; - int cmdindex, hi, lo, result, ret; Tcl_Obj *res; - char msg[MSG_SIZE]; + int cmdindex, hi, lo, result, ret; result = TCL_OK; /* @@ -83,29 +79,21 @@ bdb_RandCommand(interp, objc, objv) Tcl_WrongNumArgs(interp, 2, objv, "lo hi"); return (TCL_ERROR); } - result = Tcl_GetIntFromObj(interp, objv[2], &lo); - if (result != TCL_OK) - break; - result = Tcl_GetIntFromObj(interp, objv[3], &hi); - if (result == TCL_OK) { -#ifndef RAND_MAX -#define RAND_MAX 0x7fffffff -#endif - t = rand(); - if (t > RAND_MAX) { - snprintf(msg, MSG_SIZE, - "Max random is higher than %ld\n", - (long)RAND_MAX); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - break; - } - _debug_check(); - ret = (int)(((double)t / ((double)(RAND_MAX) + 1)) * - (hi - lo + 1)); - ret += lo; - res = Tcl_NewIntObj(ret); + if ((result = + Tcl_GetIntFromObj(interp, objv[2], &lo)) != TCL_OK) + return (result); + if ((result = + Tcl_GetIntFromObj(interp, objv[3], &hi)) != TCL_OK) + return (result); + if (lo < 0 || hi < 0) { + Tcl_SetResult(interp, + "Range value less than 0", TCL_STATIC); + return (TCL_ERROR); } + + _debug_check(); + ret = lo + rand() % ((hi - lo) + 1); + res = Tcl_NewIntObj(ret); break; case RSRAND: /* @@ -115,16 +103,17 @@ bdb_RandCommand(interp, objc, objv) Tcl_WrongNumArgs(interp, 2, objv, "seed"); return (TCL_ERROR); } - result = Tcl_GetIntFromObj(interp, objv[2], &lo); - if (result == TCL_OK) { + if ((result = + Tcl_GetIntFromObj(interp, objv[2], &lo)) == TCL_OK) { srand((u_int)lo); res = Tcl_NewIntObj(0); } break; } + /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. + * Only set result if we have a res. Otherwise, lower functions have + * already done so. */ if (result == TCL_OK && res) Tcl_SetObjResult(interp, res); @@ -150,13 +139,12 @@ tcl_Mutex(interp, objc, objv, envp, envip) DBTCL_INFO *ip; Tcl_Obj *res; _MUTEX_DATA *md; - int i, mode, nitems, result, ret; + int i, nitems, mode, result, ret; char newname[MSG_SIZE]; md = NULL; result = TCL_OK; - mode = nitems = ret = 0; - memset(newname, 0, MSG_SIZE); + ret = 0; if (objc != 4) { Tcl_WrongNumArgs(interp, 2, objv, "mode nitems"); @@ -169,6 +157,7 @@ tcl_Mutex(interp, objc, objv, envp, envip) if (result != TCL_OK) return (TCL_ERROR); + memset(newname, 0, MSG_SIZE); snprintf(newname, sizeof(newname), "%s.mutex%d", envip->i_name, envip->i_envmutexid); ip = _NewInfo(interp, NULL, newname, I_MUTEX); @@ -192,12 +181,11 @@ tcl_Mutex(interp, objc, objv, envp, envip) if (__os_calloc(NULL, 1, sizeof(_MUTEX_DATA), &md) != 0) goto posixout; md->env = envp; - md->n_mutex = nitems; - md->size = sizeof(_MUTEX_ENTRY) * nitems; + md->size = sizeof(_MUTEX_ENTRY) * (u_int)nitems; + md->reginfo.dbenv = envp; md->reginfo.type = REGION_TYPE_MUTEX; - md->reginfo.id = INVALID_REGION_TYPE; - md->reginfo.mode = mode; + md->reginfo.id = INVALID_REGION_ID; md->reginfo.flags = REGION_CREATE_OK | REGION_JOIN_OK; if ((ret = __db_r_attach(envp, &md->reginfo, md->size)) != 0) goto posixout; @@ -220,16 +208,16 @@ tcl_Mutex(interp, objc, objv, envp, envip) envip->i_envmutexid++; ip->i_parent = envip; _SetInfoData(ip, md); - Tcl_CreateObjCommand(interp, newname, + (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)mutex_Cmd, (ClientData)md, NULL); - res = Tcl_NewStringObj(newname, strlen(newname)); + res = NewStringObj(newname, strlen(newname)); Tcl_SetObjResult(interp, res); return (TCL_OK); posixout: if (ret > 0) - Tcl_PosixError(interp); + (void)Tcl_PosixError(interp); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mutex"); _DeleteInfo(ip); diff --git a/db/test/README b/db/test/README index 601958f4d..181f9355a 100644 --- a/db/test/README +++ b/db/test/README @@ -28,10 +28,10 @@ Each test starts with a section like the following: # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: README,v 1.5 2003/09/04 23:41:09 bostic Exp $ +# $Id: README,v 1.6 2004/01/28 03:36:26 bostic Exp $ # # TEST test001 # TEST Small keys/data diff --git a/db/test/TESTS b/db/test/TESTS index 3a0cac999..5885ed557 100644 --- a/db/test/TESTS +++ b/db/test/TESTS @@ -92,6 +92,7 @@ env005 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= env006 Make sure that all the utilities exist and run. + Test that db_load -r options don't blow up. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= env007 @@ -143,13 +144,17 @@ fop003 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= fop004 - Test of DB->rename(). - (formerly test075) + Test of DB->rename(). (formerly test075) + Test that files can be renamed from one directory to another. + Test that files can be renamed using absolute or relative + pathnames. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= fop005 Test of DB->remove() Formerly test080. + Test use of dbremove with and without envs, with absolute + and relative paths, and with subdirectories. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= fop006.tcl @@ -211,6 +216,8 @@ lock006 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= log001 Read/write log records. + Test with and without fixed-length, in-memory logging, + and encryption. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= log002 @@ -384,39 +391,46 @@ recd018 recd019 Test txn id wrap-around and recovery. +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +recd020 + Test creation of intermediate directories -- an + undocumented, UNIX-only feature. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +recd021 + Test of failed opens in recovery. + + If a file was deleted through the file system (and not + within Berkeley DB), an error message should appear. + Test for regular files and subdbs. + =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rep001 Replication rename and forced-upgrade test. - Run a modified version of test001 in a replicated master - environment; verify that the database on the client is correct. + Run rep_test in a replicated master environment. + Verify that the database on the client is correct. Next, remove the database, close the master, upgrade the client, reopen the master, and make sure the new master can - correctly run test001 and propagate it in the other direction. + correctly run rep_test and propagate it in the other direction. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rep002 Basic replication election test. - Run a modified version of test001 in a replicated master environment; - hold an election among a group of clients to make sure they select - a proper master from amongst themselves, in various scenarios. + Run a modified version of test001 in a replicated master + environment; hold an election among a group of clients to + make sure they select a proper master from amongst themselves, + in various scenarios. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rep003 Repeated shutdown/restart replication test - Run a quick put test in a replicated master environment; start up, - shut down, and restart client processes, with and without recovery. - To ensure that environment state is transient, use DB_PRIVATE. - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -rep004 - Test of DB_REP_LOGSONLY. - - Run a quick put test in a master environment that has one logs-only - client. Shut down, then run catastrophic recovery in the logs-only - client and check that the database is present and populated. + Run a quick put test in a replicated master environment; + start up, shut down, and restart client processes, with + and without recovery. To ensure that environment state + is transient, use DB_PRIVATE. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rep005 @@ -431,8 +445,8 @@ rep005 rep006 Replication and non-rep env handles. - Run a modified version of test001 in a replicated master environment; - verify that the database on the client is correct. + Run a modified version of test001 in a replicated master + environment; verify that the database on the client is correct. Next, create a non-rep env handle to the master env. Attempt to open the database r/w to force error. @@ -440,7 +454,7 @@ rep006 rep007 Replication and bad LSNs - Run a modified version of test001 in a replicated master env. + Run rep_test in a replicated master env. Close the client. Make additional changes to master. Close the master. Open the client as the new master. Make several different changes. Open the old master as @@ -450,7 +464,8 @@ rep007 rep008 Replication, back up and synchronizing - Run a modified version of test001 in a replicated master environment; + Run a modified version of test001 in a replicated master + environment. Close master and client. Copy the master log to the client. Clean the master. @@ -468,25 +483,25 @@ rep009 rep010 Replication and ISPERM - With consecutive message processing, make sure every - DB_REP_PERMANENT is responded to with an ISPERM when - processed. With gaps in the processing, make sure + With consecutive message processing, make sure every + DB_REP_PERMANENT is responded to with an ISPERM when + processed. With gaps in the processing, make sure every DB_REP_PERMANENT is responded to with an ISPERM - or a NOTPERM. Verify in both cases that the LSN returned + or a NOTPERM. Verify in both cases that the LSN returned with ISPERM is found in the log. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rep011 Replication: test open handle across an upgrade. - Open and close test database in master environment. + Open and close test database in master environment. Update the client. Check client, and leave the handle to the client open as we close the masterenv and upgrade the client to master. Reopen the old master as client - and catch up. Test that we can still do a put to the - handle we created on the master while it was still a - client, and then make sure that the change can be - propagated back to the new client. + and catch up. Test that we can still do a put to the + handle we created on the master while it was still a + client, and then make sure that the change can be + propagated back to the new client. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rep012 @@ -507,6 +522,215 @@ rep013 Verify that the roll back on clients gives dead db handles. Swap and verify several times. +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep014 + Replication and multiple replication handles. + Test multiple client handles, opening and closing to + make sure we get the right openfiles. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep015 + Locking across multiple pages with replication. + + Open master and client with small pagesize and + generate more than one page and generate off-page + dups on the first page (second key) and last page + (next-to-last key). + Within a single transaction, for each database, open + 2 cursors and delete the first and last entries (this + exercises locks on regular pages). Intermittently + update client during the process. + Within a single transaction, for each database, open + 2 cursors. Walk to the off-page dups and delete one + from each end (this exercises locks on off-page dups). + Intermittently update client. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep016 + Replication election test with varying required nvotes. + + Run a modified version of test001 in a replicated master environment; + hold an election among a group of clients to make sure they select + the master with varying required participants. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep017 + Concurrency with checkpoints. + + Verify that we achieve concurrency in the presence of checkpoints. + Here are the checks that we wish to make: + While dbenv1 is handling the checkpoint record: + Subsequent in-order log records are accepted. + Accepted PERM log records get NOTPERM + A subsequent checkpoint gets NOTPERM + After checkpoint completes, next txn returns PERM + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep018 + Replication with dbremove. + + Verify that the attempt to remove a database file + on the master hangs while another process holds a + handle on the client. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep019 + Replication and multiple clients at same LSN. + Have several clients at the same LSN. Run recovery at + different times. Declare a client master and after sync-up + verify all client logs are identical. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep020 + Replication elections - test election generation numbers. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep021 + Replication and multiple environments. + Run similar tests in separate environments, making sure + that some data overlaps. Then, "move" one client env + from one replication group to another and make sure that + we do not get divergent logs. We either match the first + record and end up with identical logs or we get an error. + Verify all client logs are identical if successful. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep022 + Replication elections - test election generation numbers + during simulated network partition. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep023 + Replication using two master handles. + + Open two handles on one master env. Create two + databases, one through each master handle. Process + all messages through the first master handle. Make + sure changes made through both handles are picked + up properly. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep024 + Replication page allocation / verify test + + Start a master (site 1) and a client (site 2). Master + closes (simulating a crash). Site 2 becomes the master + and site 1 comes back up as a client. Verify database. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep026 + Replication elections - simulate a crash after sending + a vote. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep027 + Replication and secondary indexes. + + Set up a secondary index on the master and make sure + it can be accessed from the client. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep028 + Replication and non-rep env handles. (Also see rep006.) + + Open second non-rep env on client, and create a db + through this handle. Open the db on master and put + some data. Check whether the non-rep handle keeps + working. Also check if opening the client database + in the non-rep env writes log records. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep029 + Test of internal initialization. + + One master, one client. + Generate several log files. + Remove old master log files. + Delete client files and restart client. + Put one more record to the master. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep030 + Test of internal initialization multiple files and pagesizes. + Hold some databases open on master. + + One master, one client. + Generate several log files. + Remove old master log files. + Delete client files and restart client. + Put one more record to the master. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep031 + Test of internal initialization and blocked operations. + + One master, one client. + Put one more record to the master. + Test that internal initialization block log_archive, rename, remove. + Sleep 30+ seconds. + Test that we can now log_archive, rename, remove. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep032 + Test of log gap processing. + + One master, one clients. + Run rep_test. + Run rep_test without sending messages to client. + Make sure client missing the messages catches up properly. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep033 + Test of internal initialization with rename and remove of dbs. + + One master, one client. + Generate several databases. Replicate to client. + Do some renames and removes, both before and after + closing the client. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep034 + Test of client startup synchronization. + + One master, two clients. + Run rep_test. + Close one client and change master to other client. + Reopen closed client - enter startup. + Run rep_test and we should see live messages and startup complete. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep035 + Test sync-up recovery in replication. + + We need to fork off 3 child tclsh processes to operate + on Site 3's (client always) home directory: + Process 1 continually calls lock_detect. + Process 2 continually calls txn_checkpoint. + Process 3 continually calls memp_trickle. + Process 4 continually calls log_archive. + Sites 1 and 2 will continually swap being master + (forcing site 3 to continually run sync-up recovery) + New master performs 1 operation, replicates and downgrades. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep036 + Multiple master processes writing to the database. + One process handles all message processing. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rep037 + Test of internal initialization and page throttling. + + One master, one client, force page throttling. + Generate several log files. + Remove old master log files. + Delete client files and restart client. + Put one more record to the master. + Verify page throttling occurred. + =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rpc001 Test RPC server timeouts for cursor, txn and env handles. @@ -528,6 +752,12 @@ rpc004 rpc005 Test RPC server handle ID sharing +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +rpc006 + Test RPC server and multiple operations to server. + Make sure the server doesn't deadlock itself, but + returns DEADLOCK to the client. + =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= rsrc001 Recno backing file test. Try different patterns of adding @@ -663,6 +893,13 @@ sdb012 Tests creating and removing subdbs while handles are open works correctly, and in the face of txns. +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +sdb013 + Tests in-memory subdatabases. + Create an in-memory subdb. Test for persistence after + overflowing the cache. Test for conflicts when we have + two in-memory files. + =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= sdbtest001 Tests multiple access methods in one subdb @@ -1251,11 +1488,11 @@ test056 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= test057 Cursor maintenance during key deletes. - Check if we handle the case where we delete a key with the cursor on - it and then add the same key. The cursor should not get the new item - returned, but the item shouldn't disappear. - Run test tests, one where the overwriting put is done with a put and - one where it's done with a cursor put. + 1. Delete a key with a cursor. Add the key back with a regular + put. Make sure the cursor can't get the new item. + 2. Put two cursors on one item. Delete through one cursor, + check that the other sees the change. + 3. Same as 2, with the two cursors on a duplicate. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= test058 @@ -1307,7 +1544,8 @@ test064 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= test065 - Test of DB->stat(DB_FASTSTAT) + Test of DB->stat, both -DB_FAST_STAT and row + counts with DB->stat -txn. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= test066 @@ -1597,6 +1835,26 @@ test106 +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +test107 + Test of degree 2 isolation. [#8689] + + We set up a database. Open a degree 2 transactional + cursor and a regular transactional cursor on it. + Position each cursor on one page, and do a put to + a different page. + + Make sure that: + - the put succeeds if we are using degree 2. + - the put deadlocks within a regular transaction with + a regular cursor. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +test109 + + Test of sequences. + =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= txn001 Begin, commit, abort testing. diff --git a/db/test/archive.tcl b/db/test/archive.tcl index d28aafe70..fa68f633b 100644 --- a/db/test/archive.tcl +++ b/db/test/archive.tcl @@ -1,20 +1,26 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: archive.tcl,v 11.21 2003/01/08 05:49:18 bostic Exp $ +# $Id: archive.tcl,v 11.26 2004/09/22 18:01:04 bostic Exp $ # # Options are: # -checkrec # -maxfilesize -proc archive { args } { +proc archive { { inmem 0 } args } { global alphabet source ./include.tcl # Set defaults - set maxbsize [expr 8 * 1024] + if { $inmem == 1 } { + set maxbsize [expr 8 * [expr 1024 * 1024]] + set desc "in-memory" + } else { + set maxbsize [expr 8 * 1024] + set desc "on-disk" + } set maxfile [expr 32 * 1024] set checkrec 500 for { set i 0 } { $i < [llength $args] } {incr i} { @@ -28,71 +34,92 @@ proc archive { args } { -dir -maxfilesize " return } - } } # Clean out old log if it existed - puts "Archive: Log archive test" + puts "Archive: Log archive test (using $desc logging)." puts "Unlinking log: error message OK" env_cleanup $testdir # Now run the various functionality tests - set eflags "-create -txn -home $testdir \ - -log_buffer $maxbsize -log_max $maxfile" + if { $inmem == 0 } { + set eflags "-create -txn -home $testdir \ + -log_buffer $maxbsize -log_max $maxfile" + } else { + set eflags "-create -txn -home $testdir -log_inmemory \ + -log_buffer $maxbsize -log_max $maxfile" + } set dbenv [eval {berkdb_env} $eflags] - error_check_bad dbenv $dbenv NULL - error_check_good dbenv [is_substr $dbenv env] 1 + error_check_good dbenv [is_valid_env $dbenv] TRUE set logc [$dbenv log_cursor] error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE # The basic test structure here is that we write a lot of log # records (enough to fill up 100 log files; each log file it - # small). We take periodic checkpoints. Between each pair - # of checkpoints, we refer to 2 files, overlapping them each - # checkpoint. We also start transactions and let them overlap - # checkpoints as well. The pattern that we try to create is: - # ---- write log records----|||||--- write log records --- - # -T1 T2 T3 --- D1 D2 ------CHECK--- CT1 --- D2 D3 CD1 ----CHECK - # where TX is begin transaction, CTx is commit transaction, DX is - # open data file and CDx is close datafile. + # small). We start with three txns and open a database in + # each transaction. Then, in a loop, we take periodic + # checkpoints. Between each pair of checkpoints, we end one + # transaction; when no transactions are left, we start up three + # new ones, letting them overlap checkpoints as well. + # + # The pattern that we create is: + # 1. Create TXN1, TXN2, TXN3 and open dbs within the txns. + # 2. Write a bunch of additional log records. + # 3. Checkpoint. + # 4. Archive, checking that we list the right files. + # 5. Commit one transaction. + # 6. If no txns left, start 3 new ones. + # 7. Until we've gone through enough records, return to step 2. set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet" puts "\tArchive.a: Writing log records; checkpoint every $checkrec records" set nrecs $maxfile set rec 0:$baserec - # Begin transaction and write a log record + # Begin 1st transaction and record current log file. Open + # a database in the transaction; the log file won't be + # removable until the transaction is aborted or committed. set t1 [$dbenv txn] - error_check_good t1:txn_begin [is_substr $t1 "txn"] 1 + error_check_good t1:txn_begin [is_valid_txn $t1 $dbenv] TRUE - set l1 [$dbenv log_put $rec] - error_check_bad l1:log_put [llength $l1] 0 + set l1 [lindex [lindex [$logc get -last] 0] 0] + set lsnlist [list $l1] - set lsnlist [list [lindex $l1 0]] + set tdb1 [eval {berkdb_open -create -mode 0644} \ + -env $dbenv -txn $t1 -btree tdb1.db] + error_check_good dbopen [is_valid_db $tdb1] TRUE + # Do the same for a 2nd and 3rd transaction. set t2 [$dbenv txn] - error_check_good t2:txn_begin [is_substr $t2 "txn"] 1 - - set l1 [$dbenv log_put $rec] - lappend lsnlist [lindex $l1 0] + error_check_good t2:txn_begin [is_valid_txn $t2 $dbenv] TRUE + set l2 [lindex [lindex [$logc get -last] 0] 0] + lappend lsnlist $l2 + set tdb2 [eval {berkdb_open -create -mode 0644} \ + -env $dbenv -txn $t2 -btree tdb2.db] + error_check_good dbopen [is_valid_db $tdb2] TRUE set t3 [$dbenv txn] - set l1 [$dbenv log_put $rec] - lappend lsnlist [lindex $l1 0] - - set txnlist [list $t1 $t2 $t3] - set db1 [eval {berkdb_open} "-create -mode 0644 -hash -env $dbenv ar1"] - set db2 [eval {berkdb_open} "-create -mode 0644 -btree -env $dbenv ar2"] - set dbcount 3 - set dblist [list $db1 $db2] - + error_check_good t3:txn_begin [is_valid_txn $t3 $dbenv] TRUE + set l3 [lindex [lindex [$logc get -last] 0] 0] + lappend lsnlist $l3 + set tdb3 [eval {berkdb_open -create -mode 0644} \ + -env $dbenv -txn $t3 -btree tdb3.db] + error_check_good dbopen [is_valid_db $tdb3] TRUE + + # Keep a list of active transactions and databases opened + # within those transactions. + set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"] + + # Loop through a large number of log records, checkpointing + # and checking db_archive periodically. for { set i 1 } { $i <= $nrecs } { incr i } { set rec $i:$baserec set lsn [$dbenv log_put $rec] error_check_bad log_put [llength $lsn] 0 if { [expr $i % $checkrec] == 0 } { + # Take a checkpoint $dbenv txn_checkpoint set ckp_file [lindex [lindex [$logc get -last] 0] 0] @@ -108,16 +135,24 @@ proc archive { args } { catch { archive_command -h $testdir -a -s } \ res_data_full catch { archive_command -h $testdir -s } res_data - error_check_good nlogfiles [llength $res_alllog] \ - [lindex [lindex [$logc get -last] 0] 0] + + if { $inmem == 0 } { + error_check_good nlogfiles [llength $res_alllog] \ + [lindex [lindex [$logc get -last] 0] 0] + } else { + error_check_good nlogfiles [llength $res_alllog] 0 + } + error_check_good logs_match [llength $res_log_full] \ [llength $res_log] error_check_good data_match [llength $res_data_full] \ [llength $res_data] # Check right number of log files - error_check_good nlogs [llength $res_log] \ - [expr [lindex $lsnlist 0] - 1] + if { $inmem == 0 } { + set expected [min $ckp_file [expr [lindex $lsnlist 0] - 1]] + error_check_good nlogs [llength $res_log] $expected + } # Check that the relative names are a subset of the # full names @@ -137,68 +172,59 @@ proc archive { args } { incr n } - # Begin/commit any transactions - set t [lindex $txnlist 0] + # Commit a transaction and close the associated db. + set t [lindex [lindex $txnlist 0] 0] + set tdb [lindex [lindex $txnlist 0] 1] if { [string length $t] != 0 } { error_check_good txn_commit:$t [$t commit] 0 + error_check_good tdb_close:$tdb [$tdb close] 0 set txnlist [lrange $txnlist 1 end] + set lsnlist [lrange $lsnlist 1 end] } - set lsnlist [lrange $lsnlist 1 end] + # If we're down to no transactions, start some new ones. if { [llength $txnlist] == 0 } { set t1 [$dbenv txn] error_check_bad tx_begin $t1 NULL error_check_good \ tx_begin [is_substr $t1 $dbenv] 1 - set l1 [lindex [$dbenv log_put $rec] 0] - lappend lsnlist [min $l1 $ckp_file] + set tdb1 [eval {berkdb_open -create -mode 0644} \ + -env $dbenv -txn $t1 -btree tdb1.db] + error_check_good dbopen [is_valid_db $tdb1] TRUE + set l1 [lindex [lindex [$logc get -last] 0] 0] + lappend lsnlist $l1 set t2 [$dbenv txn] error_check_bad tx_begin $t2 NULL error_check_good \ tx_begin [is_substr $t2 $dbenv] 1 - set l1 [lindex [$dbenv log_put $rec] 0] - lappend lsnlist [min $l1 $ckp_file] + set tdb2 [eval {berkdb_open -create -mode 0644} \ + -env $dbenv -txn $t2 -btree tdb2.db] + error_check_good dbopen [is_valid_db $tdb2] TRUE + set l2 [lindex [lindex [$logc get -last] 0] 0] + lappend lsnlist $l2 set t3 [$dbenv txn] error_check_bad tx_begin $t3 NULL error_check_good \ tx_begin [is_substr $t3 $dbenv] 1 - set l1 [lindex [$dbenv log_put $rec] 0] - lappend lsnlist [min $l1 $ckp_file] - - set txnlist [list $t1 $t2 $t3] - } + set tdb3 [eval {berkdb_open -create -mode 0644} \ + -env $dbenv -txn $t3 -btree tdb3.db] + error_check_good dbopen [is_valid_db $tdb3] TRUE + set l3 [lindex [lindex [$logc get -last] 0] 0] + lappend lsnlist $l3 - # Open/close some DB files - if { [expr $dbcount % 2] == 0 } { - set type "-hash" - } else { - set type "-btree" + set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"] } - set db [eval {berkdb_open} \ - "-create -mode 0644 $type -env $dbenv ar$dbcount"] - error_check_bad db_open:$dbcount $db NULL - error_check_good db_open:$dbcount [is_substr $db db] 1 - incr dbcount - - lappend dblist $db - set db [lindex $dblist 0] - error_check_good db_close:$db [$db close] 0 - set dblist [lrange $dblist 1 end] - } } # Commit any transactions still running. puts "\tArchive.b: Commit any transactions still running." - foreach t $txnlist { + foreach pair $txnlist { + set t [lindex $pair 0] + set tdb [lindex $pair 1] error_check_good txn_commit:$t [$t commit] 0 - } - - # Close any files that are still open. - puts "\tArchive.c: Close open files." - foreach d $dblist { - error_check_good db_close:$db [$d close] 0 + error_check_good tdb_close:$tdb [$tdb close] 0 } # Close and unlink the file diff --git a/db/test/bigfile001.tcl b/db/test/bigfile001.tcl index fa40b82b9..399814138 100644 --- a/db/test/bigfile001.tcl +++ b/db/test/bigfile001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: bigfile001.tcl,v 11.8 2003/01/08 05:49:20 bostic Exp $ +# $Id: bigfile001.tcl,v 11.9 2004/01/28 03:36:26 bostic Exp $ # # TEST bigfile001 # TEST Create a database greater than 4 GB in size. Close, verify. diff --git a/db/test/bigfile002.tcl b/db/test/bigfile002.tcl index 6a12cf554..6686f9ac6 100644 --- a/db/test/bigfile002.tcl +++ b/db/test/bigfile002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: bigfile002.tcl,v 11.8 2003/01/08 05:49:21 bostic Exp $ +# $Id: bigfile002.tcl,v 11.9 2004/01/28 03:36:26 bostic Exp $ # # TEST bigfile002 # TEST This one should be faster and not require so much disk space, diff --git a/db/test/byteorder.tcl b/db/test/byteorder.tcl index 4cd789a39..d94f5a014 100644 --- a/db/test/byteorder.tcl +++ b/db/test/byteorder.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: byteorder.tcl,v 11.14 2003/08/28 19:59:13 sandstro Exp $ +# $Id: byteorder.tcl,v 11.16 2004/01/28 03:36:26 bostic Exp $ # # Byte Order Test # Use existing tests and run with both byte orders. @@ -11,21 +11,21 @@ proc byteorder { method {nentries 1000} } { source ./include.tcl puts "Byteorder: $method $nentries" - eval {test001 $method $nentries 0 0 "01" -lorder 1234} + eval {test001 $method $nentries 0 0 "001" -lorder 1234} eval {verify_dir $testdir} - eval {test001 $method $nentries 0 0 "01" -lorder 4321} + eval {test001 $method $nentries 0 0 "001" -lorder 4321} eval {verify_dir $testdir} eval {test003 $method -lorder 1234} eval {verify_dir $testdir} eval {test003 $method -lorder 4321} eval {verify_dir $testdir} - eval {test010 $method $nentries 5 10 -lorder 1234} + eval {test010 $method $nentries 5 "010" -lorder 1234} eval {verify_dir $testdir} - eval {test010 $method $nentries 5 10 -lorder 4321} + eval {test010 $method $nentries 5 "010" -lorder 4321} eval {verify_dir $testdir} - eval {test011 $method $nentries 5 11 -lorder 1234} + eval {test011 $method $nentries 5 "011" -lorder 1234} eval {verify_dir $testdir} - eval {test011 $method $nentries 5 11 -lorder 4321} + eval {test011 $method $nentries 5 "011" -lorder 4321} eval {verify_dir $testdir} eval {test018 $method $nentries -lorder 1234} eval {verify_dir $testdir} diff --git a/db/test/conscript.tcl b/db/test/conscript.tcl index 324babf39..8740ae214 100644 --- a/db/test/conscript.tcl +++ b/db/test/conscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: conscript.tcl,v 11.24 2003/05/19 17:33:16 bostic Exp $ +# $Id: conscript.tcl,v 11.25 2004/01/28 03:36:26 bostic Exp $ # # Script for DB_CONSUME test (test070.tcl). # Usage: conscript dir file runtype nitems outputfile tnum args diff --git a/db/test/dbm.tcl b/db/test/dbm.tcl index 4eefa648b..49a0f3e13 100644 --- a/db/test/dbm.tcl +++ b/db/test/dbm.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dbm.tcl,v 11.16 2003/01/08 05:49:27 bostic Exp $ +# $Id: dbm.tcl,v 11.17 2004/01/28 03:36:26 bostic Exp $ # # TEST dbm # TEST Historic DBM interface test. Use the first 1000 entries from the diff --git a/db/test/dbscript.tcl b/db/test/dbscript.tcl index 96da0c3f6..8ddcec09d 100644 --- a/db/test/dbscript.tcl +++ b/db/test/dbscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dbscript.tcl,v 11.15 2003/01/08 05:49:29 bostic Exp $ +# $Id: dbscript.tcl,v 11.16 2004/01/28 03:36:26 bostic Exp $ # # Random db tester. # Usage: dbscript file numops min_del max_add key_avg data_avgdups diff --git a/db/test/ddoyscript.tcl b/db/test/ddoyscript.tcl index 3dad59822..30e6c34e0 100644 --- a/db/test/ddoyscript.tcl +++ b/db/test/ddoyscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: ddoyscript.tcl,v 11.7 2003/01/08 05:49:30 bostic Exp $ +# $Id: ddoyscript.tcl,v 11.8 2004/01/28 03:36:26 bostic Exp $ # # Deadlock detector script tester. # Usage: ddoyscript dir lockerid numprocs diff --git a/db/test/ddscript.tcl b/db/test/ddscript.tcl index 2d4bd722c..173cb2a69 100644 --- a/db/test/ddscript.tcl +++ b/db/test/ddscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: ddscript.tcl,v 11.13 2003/01/08 05:49:30 bostic Exp $ +# $Id: ddscript.tcl,v 11.15 2004/03/18 20:58:14 carol Exp $ # # Deadlock detector script tester. # Usage: ddscript dir test lockerid objid numprocs @@ -27,7 +27,7 @@ if { $argc != 5 } { # Initialize arguments set dir [lindex $argv 0] -set tnum [ lindex $argv 1 ] +set test [ lindex $argv 1 ] set lockerid [ lindex $argv 2 ] set objid [ lindex $argv 3 ] set numprocs [ lindex $argv 4 ] @@ -36,7 +36,7 @@ set myenv [berkdb_env -lock -home $dir -create -mode 0644 ] error_check_bad lock_open $myenv NULL error_check_good lock_open [is_substr $myenv "env"] 1 -puts [eval $tnum $myenv $lockerid $objid $numprocs] +puts [eval $test $myenv $lockerid $objid $numprocs] error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0 error_check_good envclose [$myenv close] 0 diff --git a/db/test/dead001.tcl b/db/test/dead001.tcl index 868a22b51..fca094bf1 100644 --- a/db/test/dead001.tcl +++ b/db/test/dead001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dead001.tcl,v 11.36 2003/05/18 14:09:38 bostic Exp $ +# $Id: dead001.tcl,v 11.37 2004/01/28 03:36:26 bostic Exp $ # # TEST dead001 # TEST Use two different configurations to test deadlock detection among a diff --git a/db/test/dead002.tcl b/db/test/dead002.tcl index 56b4556cc..7493216d2 100644 --- a/db/test/dead002.tcl +++ b/db/test/dead002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dead002.tcl,v 11.28 2003/09/04 23:41:09 bostic Exp $ +# $Id: dead002.tcl,v 11.30 2004/07/07 17:05:55 carol Exp $ # # TEST dead002 # TEST Same test as dead001, but use "detect on every collision" instead @@ -47,7 +47,7 @@ proc dead002 { { procs "2 4 10" } {tests "ring clump" } \ # If we're running with timeouts, pause so that # locks will have a chance to time out. if { $timeout != 0 } { - tclsleep 1 + tclsleep 2 } } watch_procs $pidlist 5 diff --git a/db/test/dead003.tcl b/db/test/dead003.tcl index e073df337..2a74ce423 100644 --- a/db/test/dead003.tcl +++ b/db/test/dead003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dead003.tcl,v 1.19 2003/05/28 14:33:48 sandstro Exp $ +# $Id: dead003.tcl,v 1.20 2004/01/28 03:36:26 bostic Exp $ # # TEST dead003 # TEST diff --git a/db/test/dead004.tcl b/db/test/dead004.tcl index 2ebd50c71..4f33dcd75 100644 --- a/db/test/dead004.tcl +++ b/db/test/dead004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dead004.tcl,v 11.14 2003/05/28 14:33:48 sandstro Exp $ +# $Id: dead004.tcl,v 11.15 2004/01/28 03:36:27 bostic Exp $ # # Deadlock Test 4. # This test is designed to make sure that we handle youngest and oldest diff --git a/db/test/dead005.tcl b/db/test/dead005.tcl index 4a465faa8..78e9ce838 100644 --- a/db/test/dead005.tcl +++ b/db/test/dead005.tcl @@ -1,28 +1,28 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dead005.tcl,v 11.13 2003/05/28 14:33:48 sandstro Exp $ +# $Id: dead005.tcl,v 11.15 2004/03/17 15:17:17 bostic Exp $ # # Deadlock Test 5. # Test out the minlocks, maxlocks, and minwrites options # to the deadlock detector. -proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } \ - { tnum "005" } } { +proc dead005 { { procs "4 6 10" } \ + {tests "maxlocks maxwrites minlocks minwrites" } { tnum "005" } } { source ./include.tcl - puts "Dead$tnum: minlocks, maxlocks, and minwrites deadlock detection tests" foreach t $tests { - puts "Dead$tnum.$t: creating environment" + puts "Dead$tnum.$t: deadlock detection tests" env_cleanup $testdir # Create the environment. set env [berkdb_env -create -mode 0644 -lock -home $testdir] error_check_good lock_env:open [is_valid_env $env] TRUE case $t { - minlocks { set to n } maxlocks { set to m } + maxwrites { set to W } + minlocks { set to n } minwrites { set to w } } foreach n $procs { @@ -67,9 +67,10 @@ proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } \ # Now verify that the correct participant # got deadlocked. switch $t { + maxlocks {set f [expr $n - 1]} + maxwrites {set f 2} minlocks {set f 0} minwrites {set f 1} - maxlocks {set f [expr $n - 1]} } set did [open $testdir/dead$tnum.log.$f] error_check_bad file:$t [gets $did val] -1 diff --git a/db/test/dead006.tcl b/db/test/dead006.tcl index 46d2e2f03..4d80af418 100644 --- a/db/test/dead006.tcl +++ b/db/test/dead006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dead006.tcl,v 1.5 2003/01/08 05:49:39 bostic Exp $ +# $Id: dead006.tcl,v 1.6 2004/01/28 03:36:27 bostic Exp $ # # TEST dead006 # TEST use timeouts rather than the normal dd algorithm. diff --git a/db/test/dead007.tcl b/db/test/dead007.tcl index 4569832ff..e9aefa9c0 100644 --- a/db/test/dead007.tcl +++ b/db/test/dead007.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: dead007.tcl,v 1.5 2003/05/28 14:33:49 sandstro Exp $ +# $Id: dead007.tcl,v 1.6 2004/01/28 03:36:27 bostic Exp $ # # TEST dead007 # TEST Tests for locker and txn id wraparound. diff --git a/db/test/env001.tcl b/db/test/env001.tcl index b4718cc13..4e2c070e5 100644 --- a/db/test/env001.tcl +++ b/db/test/env001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env001.tcl,v 11.27 2003/01/08 05:49:42 bostic Exp $ +# $Id: env001.tcl,v 11.28 2004/01/28 03:36:27 bostic Exp $ # # TEST env001 # TEST Test of env remove interface (formerly env_remove). diff --git a/db/test/env002.tcl b/db/test/env002.tcl index 8f38157d2..70f573c29 100644 --- a/db/test/env002.tcl +++ b/db/test/env002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env002.tcl,v 11.16 2003/01/08 05:49:42 bostic Exp $ +# $Id: env002.tcl,v 11.17 2004/01/28 03:36:27 bostic Exp $ # # TEST env002 # TEST Test of DB_LOG_DIR and env name resolution. diff --git a/db/test/env003.tcl b/db/test/env003.tcl index e6a0b6feb..247fcd3c9 100644 --- a/db/test/env003.tcl +++ b/db/test/env003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env003.tcl,v 11.22 2003/01/08 05:49:43 bostic Exp $ +# $Id: env003.tcl,v 11.23 2004/01/28 03:36:27 bostic Exp $ # # TEST env003 # TEST Test DB_TMP_DIR and env name resolution diff --git a/db/test/env004.tcl b/db/test/env004.tcl index 0d44b8df9..fe975d700 100644 --- a/db/test/env004.tcl +++ b/db/test/env004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: env004.tcl,v 11.19 2003/01/08 05:49:45 bostic Exp $ +# $Id: env004.tcl,v 11.22 2004/04/23 15:40:12 sue Exp $ # # TEST env004 # TEST Test multiple data directories. Do a bunch of different opens @@ -32,15 +32,9 @@ proc env004 { } { puts $cid "set_data_dir data3" close $cid - # Now get pathnames - set curdir [pwd] - cd $testdir - set fulldir [pwd] - cd $curdir - set e [berkdb_env -create -private -home $testdir] error_check_good dbenv [is_valid_env $e] TRUE - ddir_test $fulldir $method $e $args + ddir_test $method $e $args error_check_good env_close [$e close] 0 puts "\tEnv004.b: Multiple data directories in berkdb_env call." @@ -54,13 +48,11 @@ proc env004 { } { -data_dir . -data_dir data1 -data_dir data2 \ -data_dir data3 -home $testdir] error_check_good dbenv [is_valid_env $e] TRUE - ddir_test $fulldir $method $e $args + ddir_test $method $e $args error_check_good env_close [$e close] 0 - - env_cleanup $testdir } -proc ddir_test { fulldir method e args } { +proc ddir_test { method e args } { source ./include.tcl set args [convert_args $args] @@ -87,13 +79,13 @@ proc ddir_test { fulldir method e args } { # Now, reopen the files without complete pathnames and make # sure that we find them. - set db1 [berkdb_open -env $e $fulldir/data1/datafile1.db] + set db1 [berkdb_open -env $e datafile1.db] error_check_good dbopen1 [is_valid_db $db1] TRUE - set db2 [berkdb_open -env $e $fulldir/data2/datafile2.db] + set db2 [berkdb_open -env $e datafile2.db] error_check_good dbopen2 [is_valid_db $db2] TRUE - set db3 [berkdb_open -env $e $fulldir/data3/datafile3.db] + set db3 [berkdb_open -env $e datafile3.db] error_check_good dbopen3 [is_valid_db $db3] TRUE # Finally close all the files diff --git a/db/test/env005.tcl b/db/test/env005.tcl index 89481cfa4..fc08bc97f 100644 --- a/db/test/env005.tcl +++ b/db/test/env005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env005.tcl,v 11.19 2003/08/05 13:14:28 sandstro Exp $ +# $Id: env005.tcl,v 11.20 2004/01/28 03:36:27 bostic Exp $ # # TEST env005 # TEST Test that using subsystems without initializing them correctly diff --git a/db/test/env006.tcl b/db/test/env006.tcl index 434e4fac6..9f2203357 100644 --- a/db/test/env006.tcl +++ b/db/test/env006.tcl @@ -1,12 +1,13 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env006.tcl,v 11.9 2003/01/08 05:49:48 bostic Exp $ +# $Id: env006.tcl,v 11.11 2004/04/27 19:56:44 carol Exp $ # # TEST env006 # TEST Make sure that all the utilities exist and run. +# TEST Test that db_load -r options don't blow up. proc env006 { } { source ./include.tcl @@ -39,4 +40,52 @@ proc env006 { } { # error_check_good $cmd.err [is_substr $ret sage] 1 } + + env_cleanup $testdir + set env [eval berkdb_env -create -home $testdir -txn] + error_check_good env_open [is_valid_env $env] TRUE + + set sub SUBDB + foreach case { noenv env } { + if { $case == "env" } { + set envargs " -env $env " + set homeargs " -h $testdir " + set testfile env006.db + } else { + set envargs "" + set homeargs "" + set testfile $testdir/env006.db + } + + puts "\tEnv006.i: Testing db_load -r with $case." + set db [eval berkdb_open -create $envargs -btree $testfile] + error_check_good db_open [is_valid_db $db] TRUE + error_check_good db_close [$db close] 0 + + set ret [eval \ + exec $util_path/db_load -r lsn $homeargs $testfile] + error_check_good db_load_r_lsn $ret "" + set ret [eval \ + exec $util_path/db_load -r fileid $homeargs $testfile] + error_check_good db_load_r_fileid $ret "" + + error_check_good db_remove \ + [eval {berkdb dbremove} $envargs $testfile] 0 + + puts "\tEnv006.j: Testing db_load -r with $case and subdbs." + set db [eval berkdb_open -create $envargs -btree $testfile $sub] + error_check_good db_open [is_valid_db $db] TRUE + error_check_good db_close [$db close] 0 + + set ret [eval \ + exec {$util_path/db_load} -r lsn $homeargs $testfile] + error_check_good db_load_r_lsn $ret "" + set ret [eval \ + exec {$util_path/db_load} -r fileid $homeargs $testfile] + error_check_good db_load_r_fileid $ret "" + + error_check_good \ + db_remove [eval {berkdb dbremove} $envargs $testfile] 0 + } + error_check_good env_close [$env close] 0 } diff --git a/db/test/env007.tcl b/db/test/env007.tcl index a52220fcb..efbb17d55 100644 --- a/db/test/env007.tcl +++ b/db/test/env007.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env007.tcl,v 11.31 2003/11/20 14:32:51 sandstro Exp $ +# $Id: env007.tcl,v 11.41 2004/09/22 18:01:04 bostic Exp $ # # TEST env007 # TEST Test DB_CONFIG config file options for berkdb env. @@ -33,40 +33,69 @@ proc env007 { } { # 5. Stat command to run (empty if we can't get the info # from stat). # 6. String to search for in stat output - # 7. Arg used in getter + # 7. Which arg to check in stat (needed for cases where + # we set more than one args at a time, but stat can + # only check one args, like cachesize) + # 8. Arg used in getter # set rlist { - { " -txn_max " "set_tx_max" "19" "31" "Env007.a1: Txn Max" - "txn_stat" "Max Txns" "get_tx_max" } - { " -lock_max_locks " "set_lk_max_locks" "17" "29" "Env007.a2: Lock Max" - "lock_stat" "Maximum locks" "get_lk_max_locks" } + { " -txn_max " "set_tx_max" "19" "31" + "Env007.a1: Txn Max" "txn_stat" + "Max Txns" "0" "get_tx_max" } + { " -lock_max_locks " "set_lk_max_locks" "17" "29" + "Env007.a2: Lock Max" "lock_stat" + "Maximum locks" "0" "get_lk_max_locks" } { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000" - "Env007.a3: Max Lockers" "lock_stat" "Maximum lockers" - "get_lk_max_lockers" } + "Env007.a3: Max Lockers" "lock_stat" + "Maximum lockers" "0" "get_lk_max_lockers" } { " -lock_max_objects " "set_lk_max_objects" "1500" "2000" - "Env007.a4: Max Objects" "lock_stat" "Maximum objects" - "get_lk_max_objects" } - { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.a5: Log Bsize" - "log_stat" "Log record cache size" "get_lg_bsize" } - { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.a6: Log Max" - "log_stat" "Current log file size" "get_lg_max" } + "Env007.a4: Max Objects" "lock_stat" + "Maximum objects" "0" "get_lk_max_objects" } + { " -log_buffer " "set_lg_bsize" "65536" "131072" + "Env007.a5: Log Bsize" "log_stat" + "Log record cache size" "0" "get_lg_bsize" } + { " -log_max " "set_lg_max" "8388608" "9437184" + "Env007.a6: Log Max" "log_stat" + "Current log file size" "0" "get_lg_max" } { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1" - "Env007.a7: Cachesize" "" "" "get_cachesize" } + "Env007.a7.0: Cachesize" "mpool_stat" + "Cache size (gbytes)" "0" "get_cachesize" } + { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1" + "Env007.a7.1: Cachesize" "mpool_stat" + "Cache size (bytes)" "1" "get_cachesize" } + { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1" + "Env007.a7.2: Cachesize" "mpool_stat" + "Number of caches" "2" "get_cachesize" } { " -lock_timeout " "set_lock_timeout" "100" "120" - "Env007.a8: Lock Timeout" "" "" "get_timeout lock" } + "Env007.a8: Lock Timeout" "lock_stat" + "Lock timeout value" "0" "get_timeout lock" } { " -log_regionmax " "set_lg_regionmax" "8388608" "4194304" - "Env007.a9: Log Regionmax" "" "" "get_lg_regionmax" } - { " -mmapsize " "set_mp_mmapsize" "12582912" "8388608" - "Env007.a10: Mmapsize" "" "" "get_mp_mmapsize" } - { " -shm_key " "set_shm_key" "15" "35" "Env007.a11: Shm Key" + "Env007.a9: Log Regionmax" "log_stat" + "Region size" "0" "get_lg_regionmax" } + { " -mpool_max_openfd " "set_mp_max_openfd" "17" "27" + "Env007.a10: Mmap max openfd" "mpool_stat" + "Maximum open file descriptors" "0" "get_mp_max_openfd" } + { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67" + "Env007.a11.0: Mmap max write" "mpool_stat" + "Maximum sequential buffer writes" "0" "get_mp_max_write" } + { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67" + "Env007.a11.1: Mmap max write" "mpool_stat" + "Sleep after writing maximum buffers" "1" "get_mp_max_write" } + { " -mpool_mmap_size " "set_mp_mmapsize" "12582912" "8388608" + "Env007.a12: Mmapsize" "mpool_stat" + "Maximum memory-mapped file size" "0" "get_mp_mmapsize" } + { " -shm_key " "set_shm_key" "15" "35" + "Env007.a13: Shm Key" "" "" "" "get_shm_key" } - { " -tmp_dir " "set_tmp_dir" "." "./TEMPDIR" "Env007.a12: Temp dir" + { " -tmp_dir " "set_tmp_dir" "." "./TEMPDIR" + "Env007.a14: Temp dir" "" "" "" "get_tmp_dir" } { " -txn_timeout " "set_txn_timeout" "100" "120" - "Env007.a13: Txn timeout" "" "" "get_timeout txn" } + "Env007.a15: Txn timeout" "lock_stat" + "Transaction timeout value" "0" "get_timeout txn" } } - set e "berkdb_env -create -mode 0644 -home $testdir -txn " + set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn " set qnxexclude {set_cachesize} foreach item $rlist { set envarg [lindex $item 0] @@ -76,21 +105,31 @@ proc env007 { } { set msg [lindex $item 4] set statcmd [lindex $item 5] set statstr [lindex $item 6] - set getter [lindex $item 7] + set index [lindex $item 7] + set getter [lindex $item 8] if { $is_qnx_test && [lsearch $qnxexclude $configarg] != -1 } { - puts "\tSkip $configarg for QNX" + puts "\tEnv007.a: Skipping $configarg for QNX" continue } + env_cleanup $testdir + # First verify using just env args puts "\t$msg Environment argument only" set env [eval $e $envarg {$envval}] error_check_good envopen:0 [is_valid_env $env] TRUE error_check_good get_envval [eval $env $getter] $envval if { $statcmd != "" } { - env007_check $env $statcmd $statstr $envval + set statenvval [lindex $envval $index] + # log_stat reports the sum of the specified + # region size and the log buffer size. + if { $statstr == "Region size" } { + set lbufsize 32768 + set statenvval [expr $statenvval + $lbufsize] + } + env007_check $env $statcmd $statstr $statenvval } error_check_good envclose:0 [$env close] 0 @@ -103,7 +142,11 @@ proc env007 { } { error_check_good envopen:1 [is_valid_env $env] TRUE error_check_good get_configval1 [eval $env $getter] $configval if { $statcmd != "" } { - env007_check $env $statcmd $statstr $configval + set statconfigval [lindex $configval $index] + if { $statstr == "Region size" } { + set statconfigval [expr $statconfigval + $lbufsize] + } + env007_check $env $statcmd $statstr $statconfigval } error_check_good envclose:1 [$env close] 0 @@ -114,7 +157,7 @@ proc env007 { } { # Getter should retrieve config val, not envval. error_check_good get_configval2 [eval $env $getter] $configval if { $statcmd != "" } { - env007_check $env $statcmd $statstr $configval + env007_check $env $statcmd $statstr $statconfigval } error_check_good envclose:2 [$env close] 0 } @@ -135,6 +178,7 @@ proc env007 { } { { "set_flags" "db_cdb_alldb" "get_flags" "-cdb_alldb" } { "set_flags" "db_direct_db" "get_flags" "-direct_db" } { "set_flags" "db_direct_log" "get_flags" "-direct_log" } + { "set_flags" "db_dsync_log" "get_flags" "-dsync_log" } { "set_flags" "db_log_autoremove" "get_flags" "-log_remove" } { "set_flags" "db_nolocking" "get_flags" "-nolock" } { "set_flags" "db_nommap" "get_flags" "-nommap" } @@ -161,13 +205,14 @@ proc env007 { } { { "set_lk_max_objects" "1500" "get_lk_max_objects" "1500" } { "set_lock_timeout" "100" "get_timeout lock" "100" } { "set_mp_mmapsize" "12582912" "get_mp_mmapsize" "12582912" } + { "set_mp_max_write" "10 20" "get_mp_max_write" "10 20" } + { "set_mp_max_openfd" "10" "get_mp_max_openfd" "10" } { "set_region_init" "1" "get_flags" "-region_init" } { "set_shm_key" "15" "get_shm_key" "15" } { "set_tas_spins" "15" "get_tas_spins" "15" } { "set_tmp_dir" "." "get_tmp_dir" "." } { "set_tx_max" "31" "get_tx_max" "31" } { "set_txn_timeout" "50" "get_timeout txn" "50" } - { "set_verbose" "db_verb_chkpoint" "get_verbose chkpt" "on" } { "set_verbose" "db_verb_deadlock" "get_verbose deadlock" "on" } { "set_verbose" "db_verb_recovery" "get_verbose recovery" "on" } { "set_verbose" "db_verb_replication" "get_verbose rep" "on" } @@ -175,8 +220,8 @@ proc env007 { } { } env_cleanup $testdir - set e "berkdb_env -create -mode 0644 -home $testdir -txn" - set qnxexclude {db_direct_db db_direct_log} + set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn" + set directlist {db_direct_db db_direct_log} foreach item $cfglist { env_cleanup $testdir set configarg [lindex $item 0] @@ -184,18 +229,23 @@ proc env007 { } { set getter [lindex $item 2] set getval [lindex $item 3] - if { $is_qnx_test && - [lsearch $qnxexclude $configval] != -1} { - puts "\t\t Skip $configarg $configval for QNX" - continue - } env007_make_config $configarg $configval # Verify using config file - puts "\t\t $configarg $configval" - if {[catch { set env [eval $e]} res] != 0} { - puts "FAIL: $res" - continue + puts "\t\tEnv007.b: $configarg $configval" + + # Unconfigured/unsupported direct I/O is not reported + # as a failure. + set directmsg \ + "direct I/O either not configured or not supported" + if {[catch { eval $e } env ]} { + if { [lsearch $directlist $configval] != -1 && \ + [is_substr $env $directmsg] == 1 } { + continue + } else { + puts "FAIL: $env" + continue + } } error_check_good envvalid:1 [is_valid_env $env] TRUE error_check_good getter:1 [eval $env $getter] $getval @@ -238,7 +288,7 @@ proc env007 { } { if { $is_qnx_test && [lsearch $qnxexclude $envarg] != -1} { - puts "\t\t Skip $envarg for QNX" + puts "\t\tEnv007: Skipping $envarg for QNX" continue } env_cleanup $testdir @@ -275,6 +325,7 @@ proc env007 { } { set flaglist { { "-direct_db" } { "-direct_log" } + { "-dsync_log" } { "-log_remove" } { "-nolock" } { "-nommap" } @@ -284,15 +335,10 @@ proc env007 { } { { "-panic" } { "-wrnosync" } } - set e "berkdb_env -create -mode 0644 -home $testdir" - set qnxexclude {-direct_db -direct_log} + set e "berkdb_env_noerr -create -mode 0644 -home $testdir" + set directlist {-direct_db -direct_log} foreach item $flaglist { set flag [lindex $item 0] - if { $is_qnx_test && - [lsearch $qnxexclude $flag] != -1} { - puts "\t\t Skip $flag for QNX" - continue - } env_cleanup $testdir # Set up env @@ -300,11 +346,28 @@ proc env007 { } { error_check_good envopen [is_valid_env $env] TRUE # Use set_flags to turn on new env characteristics. - error_check_good "flag $flag on" [$env set_flags $flag on] 0 + # + # Unconfigured/unsupported direct I/O is not reported + # as a failure. + if {[catch { $env set_flags $flag on } res ]} { + if { [lsearch $directlist $flag] != -1 && \ + [is_substr $res $directmsg] == 1 } { + error_check_good env_close [$env close] 0 + continue + } else { + puts "FAIL: $res" + error_check_good env_close [$env close] 0 + continue + } + } else { + error_check_good "flag $flag on" $res 0 + } + # Check that getter retrieves expected retval. set get_retval [eval $env get_flags] if { [is_substr $get_retval $flag] != 1 } { puts "FAIL: $flag should be a substring of $get_retval" + error_check_good env_close [$env close] 0 continue } # Use set_flags to turn off env characteristics, make sure @@ -313,6 +376,7 @@ proc env007 { } { set get_retval [eval $env get_flags] if { [is_substr $get_retval $flag] == 1 } { puts "FAIL: $flag should not be in $get_retval" + error_check_good env_close [$env close] 0 continue } @@ -345,6 +409,8 @@ proc env007 { } { { "set_lk_max_locks" "db_xxx" } { "set_lk_max_lockers" "db_xxx" } { "set_lk_max_objects" "db_xxx" } + { "set_mp_max_openfd" "1 2" } + { "set_mp_max_write" "1 2 3" } { "set_mp_mmapsize" "db_xxx" } { "set_region_init" "db_xxx" } { "set_shm_key" "db_xxx" } diff --git a/db/test/env008.tcl b/db/test/env008.tcl index 83bcb3c3b..c203d55d4 100644 --- a/db/test/env008.tcl +++ b/db/test/env008.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env008.tcl,v 11.7 2003/01/08 05:49:51 bostic Exp $ +# $Id: env008.tcl,v 11.8 2004/01/28 03:36:27 bostic Exp $ # # TEST env008 # TEST Test environments and subdirectories. diff --git a/db/test/env009.tcl b/db/test/env009.tcl index fd009af55..e6fd3a563 100644 --- a/db/test/env009.tcl +++ b/db/test/env009.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env009.tcl,v 11.6 2003/01/08 05:49:54 bostic Exp $ +# $Id: env009.tcl,v 11.9 2004/09/23 21:45:21 mjc Exp $ # # TEST env009 # TEST Test calls to all the various stat functions. We have several @@ -12,7 +12,7 @@ proc env009 { } { source ./include.tcl - puts "Env009: Various stat function test." + puts "Env009: Various stat functions test." env_cleanup $testdir puts "\tEnv009.a: Setting up env and a database." @@ -23,35 +23,60 @@ proc env009 { } { error_check_good dbopen [is_valid_db $dbbt] TRUE set dbh [berkdb_open -create -hash $testdir/env009h.db] error_check_good dbopen [is_valid_db $dbh] TRUE - set dbq [berkdb_open -create -btree $testdir/env009q.db] + set dbq [berkdb_open -create -queue $testdir/env009q.db] error_check_good dbopen [is_valid_db $dbq] TRUE + puts "\tEnv009.b: Setting up replication master and client envs." + replsetup $testdir/MSGQUEUEDIR + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + file mkdir $masterdir + file mkdir $clientdir + + repladd 1 + set repenv(M) [berkdb_env -create -home $masterdir \ + -txn -rep_master -rep_transport [list 1 replsend]] + repladd 2 + set repenv(C) [berkdb_env -create -home $clientdir \ + -txn -rep_client -rep_transport [list 2 replsend]] + set rlist { - { "lock_stat" "Maximum locks" "Env009.b"} - { "log_stat" "Magic" "Env009.c"} - { "mpool_stat" "Number of caches" "Env009.d"} - { "txn_stat" "Max Txns" "Env009.e"} + { "lock_stat" "Maximum locks" "Env009.c" $e } + { "log_stat" "Magic" "Env009.d" "$e" } + { "mpool_stat" "Number of caches" "Env009.e" "$e"} + { "txn_stat" "Max Txns" "Env009.f" "$e" } + { "rep_stat" "{Environment ID} 1" "Env009.g (Master)" "$repenv(M)"} + { "rep_stat" "{Environment ID} 2" "Env009.h (Client)" "$repenv(C)"} } - foreach pair $rlist { - set cmd [lindex $pair 0] - set str [lindex $pair 1] - set msg [lindex $pair 2] + foreach set $rlist { + set cmd [lindex $set 0] + set str [lindex $set 1] + set msg [lindex $set 2] + set env [lindex $set 3] puts "\t$msg: $cmd" - set ret [$e $cmd] + set ret [eval $env $cmd] error_check_good $cmd [is_substr $ret $str] 1 } - puts "\tEnv009.f: btree stats" + + puts "\tEnv009.i: btree stats" set ret [$dbbt stat] - error_check_good $cmd [is_substr $ret "Magic"] 1 - puts "\tEnv009.g: hash stats" + error_check_good $cmd [is_substr $ret "Leaf pages"] 1 + + puts "\tEnv009.j: hash stats" set ret [$dbh stat] - error_check_good $cmd [is_substr $ret "Magic"] 1 - puts "\tEnv009.f: queue stats" + error_check_good $cmd [is_substr $ret "Buckets"] 1 + + puts "\tEnv009.k: queue stats" set ret [$dbq stat] - error_check_good $cmd [is_substr $ret "Magic"] 1 + error_check_good $cmd [is_substr $ret "Extent size"] 1 + + # Clean up. error_check_good dbclose [$dbbt close] 0 error_check_good dbclose [$dbh close] 0 error_check_good dbclose [$dbq close] 0 + error_check_good masterenvclose [$repenv(M) close] 0 + error_check_good clientenvclose [$repenv(C) close] 0 + replclose $testdir/MSGQUEUEDIR error_check_good envclose [$e close] 0 } diff --git a/db/test/env010.tcl b/db/test/env010.tcl index 0cba2881a..403f0cd9d 100644 --- a/db/test/env010.tcl +++ b/db/test/env010.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env010.tcl,v 1.6 2003/07/01 14:42:58 bostic Exp $ +# $Id: env010.tcl,v 1.7 2004/01/28 03:36:27 bostic Exp $ # # TEST env010 # TEST Run recovery in an empty directory, and then make sure we can still diff --git a/db/test/env011.tcl b/db/test/env011.tcl index 99d4cdc4e..de6c82b36 100644 --- a/db/test/env011.tcl +++ b/db/test/env011.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: env011.tcl,v 1.3 2003/01/08 05:49:57 bostic Exp $ +# $Id: env011.tcl,v 1.4 2004/01/28 03:36:27 bostic Exp $ # # TEST env011 # TEST Run with region overwrite flag. diff --git a/db/test/fop001.tcl b/db/test/fop001.tcl index 840e4a690..3a2a1e282 100644 --- a/db/test/fop001.tcl +++ b/db/test/fop001.tcl @@ -1,16 +1,20 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: fop001.tcl,v 1.15 2003/09/04 23:41:10 bostic Exp $ +# $Id: fop001.tcl,v 1.21 2004/09/22 18:01:04 bostic Exp $ # # TEST fop001.tcl # TEST Test file system operations, combined in a transaction. [#7363] -proc fop001 { } { +proc fop001 { method args } { source ./include.tcl - puts "\nFop001: Multiple file system ops in one transaction" + set args [convert_args $method $args] + set omethod [convert_method $method] + + puts "\nFop001: ($method)\ + Multiple file system ops in one transaction." set exists {a b} set noexist {foo bar} @@ -19,7 +23,7 @@ proc fop001 { } { set ops {rename remove open open_create open_excl truncate} # Set up all sensible two-op cases (op1 succeeds). - foreach retval { 0 "file exists" "no such file" "file is open" } { + foreach retval { 0 "file exists" "no such file" } { foreach op1 {rename remove open open_excl \ open_create truncate} { foreach op2 $ops { @@ -32,7 +36,7 @@ proc fop001 { } { # Set up evil two-op cases (op1 fails). Omit open_create # and truncate from op1 list -- open_create always succeeds # and truncate requires a successful open. - foreach retval { 0 "file exists" "no such file" "file is open" } { + foreach retval { 0 "file exists" "no such file" } { foreach op1 { rename remove open open_excl } { foreach op2 $ops { append cases " " [create_badtests $op1 $op2 \ @@ -42,10 +46,14 @@ proc fop001 { } { } # The structure of each case is: - # {{op1 {args} result} {op2 {args} result}} + # {{op1 {names1} result end1} {op2 {names2} result}} # A result of "0" indicates no error is expected. # Otherwise, the result is the expected error message. # + # The "end1" variable indicates whether the first txn + # ended with an abort or a commit, and is not used + # in this test. + # # Comment this loop out to remove the list of cases. # set i 1 # foreach case $cases { @@ -62,26 +70,28 @@ proc fop001 { } { # Extract elements of the case set op1 [lindex [lindex $case 0] 0] - set args1 [lindex [lindex $case 0] 1] + set names1 [lindex [lindex $case 0] 1] set res1 [lindex [lindex $case 0] 2] set op2 [lindex [lindex $case 1] 0] - set args2 [lindex [lindex $case 1] 1] + set names2 [lindex [lindex $case 1] 1] set res2 [lindex [lindex $case 1] 2] - puts "\tFop001.$testid: $op1 ($args1), then $op2 ($args2)." + puts "\tFop001.$testid: $op1 ($names1), then $op2 ($names2)." # Create transactional environment. set env [berkdb_env -create -home $testdir -txn] error_check_good is_valid_env [is_valid_env $env] TRUE # Create two databases - set dba [berkdb_open -create -btree -env $env -auto_commit a.db] + set dba [eval {berkdb_open \ + -create} $omethod $args -env $env -auto_commit a] error_check_good dba_open [is_valid_db $dba] TRUE error_check_good dba_put [$dba put -auto_commit 1 a] 0 error_check_good dba_close [$dba close] 0 - set dbb [berkdb_open -create -btree -env $env -auto_commit b.db] + set dbb [eval {berkdb_open \ + -create} $omethod $args -env $env -auto_commit b] error_check_good dbb_open [is_valid_db $dbb] TRUE error_check_good dbb_put [$dbb put -auto_commit 1 b] 0 error_check_good dbb_close [$dbb close] 0 @@ -91,7 +101,7 @@ proc fop001 { } { set txn [$env txn] # Execute and check operation 1 - set result1 [do_op $op1 $args1 $txn $env] + set result1 [do_op $omethod $op1 $names1 $txn $env $args] if {$res1 == 0} { error_check_good op1_should_succeed $result1 $res1 } else { @@ -100,7 +110,7 @@ proc fop001 { } { } # Execute and check operation 2 - set result2 [do_op $op2 $args2 $txn $env] + set result2 [do_op $omethod $op2 $names2 $txn $env $args] if {$res2 == 0} { error_check_good op2_should_succeed $result2 $res2 } else { @@ -115,9 +125,9 @@ proc fop001 { } { # databases. if {$end == "abort"} { error_check_good a_exists \ - [file exists $testdir/a.db] 1 + [file exists $testdir/a] 1 error_check_good b_exists \ - [file exists $testdir/b.db] 1 + [file exists $testdir/b] 1 } } diff --git a/db/test/fop002.tcl b/db/test/fop002.tcl index a4b6a9da7..823147ec0 100644 --- a/db/test/fop002.tcl +++ b/db/test/fop002.tcl @@ -1,17 +1,20 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: fop002.tcl,v 1.5 2003/09/08 16:41:06 sandstro Exp $ +# $Id: fop002.tcl,v 1.7 2004/02/25 17:49:05 carol Exp $ # # TEST fop002.tcl # TEST Test file system operations in the presence of bad permissions. -proc fop002 { } { +proc fop002 { method args } { source ./include.tcl + set args [convert_args $method $args] + set omethod [convert_method $method] + env_cleanup $testdir - puts "\nFop002: File system ops and permissions. " + puts "\nFop002: ($method) File system ops and permissions." if { $is_windows_test == 1 } { puts "\tSkipping permissions test for Windows platform." return @@ -22,9 +25,10 @@ proc fop002 { } { set testfile $testdir/a.db set destfile $testdir/b.db - set db [berkdb_open -create -btree -mode $perms $testfile] + set db [eval \ + {berkdb_open -create} $omethod $args -mode $perms $testfile] error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_put [$db put 1 a] 0 + error_check_good db_put [$db put 1 [chop_data $method a]] 0 error_check_good db_close [$db close] 0 # Eliminate all read and write permission, and try to execute @@ -41,12 +45,14 @@ proc fop002 { } { puts "\t\tFop002.a: Testing $op for failure." switch $op { open { - test_$op $testfile $rdonly 1 + test_$op $testfile $omethod $args $rdonly 1 } rename { test_$op $testfile $destfile 1 } - open_create - + open_create { + test_$op $testfile $omethod $args 1 + } remove { test_$op $testfile 1 } @@ -64,7 +70,7 @@ proc fop002 { } { puts "\t\tFop002.b: Testing $op for success." switch $op { open { - test_$op $testfile $rdonly 0 + test_$op $testfile $omethod $args $rdonly 0 } rename { test_$op $testfile $destfile 0 @@ -99,9 +105,9 @@ proc test_rename { testfile destfile {expectfail 0} } { } } -proc test_open_create { testfile {expectfail 0} } { +proc test_open_create { testfile omethod args {expectfail 0} } { set stat [catch { set db \ - [berkdb_open -create -btree $testfile]} res] + [eval {berkdb_open -create} $omethod $args $testfile]} res] if { $expectfail == 1 } { error_check_good open_create_err $res \ "db open:permission denied" @@ -112,12 +118,12 @@ proc test_open_create { testfile {expectfail 0} } { } } -proc test_open { testfile {readonly 0} {expectfail 0} } { +proc test_open { testfile omethod args {readonly 0} {expectfail 0} } { if { $readonly == 1 } { set stat [catch {set db \ - [berkdb_open -rdonly -btree $testfile]} res] + [eval {berkdb_open -rdonly} $omethod $args $testfile]} res] } else { - set stat [catch {set db [berkdb_open -btree $testfile]} res] + set stat [catch {set db [berkdb_open $omethod $testfile]} res] } if { $expectfail == 1 } { error_check_good open_err $res \ diff --git a/db/test/fop003.tcl b/db/test/fop003.tcl index cd60043fa..a7bb8829c 100644 --- a/db/test/fop003.tcl +++ b/db/test/fop003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: fop003.tcl,v 1.4 2003/09/08 16:54:10 sandstro Exp $ +# $Id: fop003.tcl,v 1.6 2004/02/25 17:49:05 carol Exp $ # # TEST fop003 # TEST @@ -13,24 +13,33 @@ # TEST 2. Can -create into zero-length existing file. # TEST 3. Can -create into non-zero-length existing file if and # TEST only if DB_TRUNCATE is specified. -proc fop003 { } { +proc fop003 { method args } { global errorInfo source ./include.tcl env_cleanup $testdir + if { [is_btree $method] != 1 } { + puts "Skipping fop003 for method $method" + return + } + + set args [convert_args $method $args] + set omethod [convert_method $method] + set tnum "003" set testfile fop$tnum.db - puts "Fop$tnum: Test of required behavior for sendmail." + puts "Fop$tnum ($method): Test of required behavior for sendmail." puts "\tFop$tnum.a: -truncate is not allowed within\ txn or locking env." set envflags "lock txn" foreach flag $envflags { set env [berkdb_env_noerr -create -home $testdir -$flag] - set db [berkdb_open_noerr -create -btree -env $env $testfile] + set db [eval {berkdb_open_noerr -create} \ + $omethod $args -env $env $testfile] error_check_good db_open [is_valid_db $db] TRUE error_check_good db_close [$db close] 0 - catch {[berkdb_open_noerr -truncate -btree -env $env \ + catch {[berkdb_open_noerr -truncate $omethod $args -env $env \ $testfile]} res error_check_good "$flag env not allowed" [is_substr $res \ "DB_TRUNCATE illegal with locking specified"] 1 @@ -46,7 +55,8 @@ proc fop003 { } { # DB file, but the open should succeed. set fd [open $testdir/foo w] close $fd - catch {set db [berkdb_open_noerr -create -btree $testdir/foo]} res + catch {set db [eval \ + {berkdb_open_noerr -create} $omethod $args $testdir/foo]} res error_check_good open_fail [is_substr $errorInfo \ "unexpected file type or format"] 1 error_check_good db_open [is_valid_db $db] TRUE @@ -56,17 +66,18 @@ proc fop003 { } { non-zero-length file." # Create a db file. Close and reopen with -create. Make # sure that we still have the same file by checking the contents. - set key "key" + set key 1 set data "data" set file "file.db" - set db [berkdb_open -create -btree $testdir/$file] + set db [eval {berkdb_open -create $omethod} $args $testdir/$file] error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_put [$db put $key $data] 0 + error_check_good db_put [$db put $key [chop_data $method $data]] 0 error_check_good db_close [$db close] 0 - set db [berkdb_open -create -btree $testdir/$file] + set db [eval {berkdb_open -create $omethod} $args $testdir/$file] error_check_good db_open2 [is_valid_db $db] TRUE set ret [$db get $key] - error_check_good db_get [lindex [lindex $ret 0] 1] $data + error_check_good db_get \ + [lindex [lindex $ret 0] 1] [pad_data $method $data] error_check_good db_close2 [$db close] 0 puts "\tFop$tnum.d: -create is allowed on open -truncate of\ @@ -74,7 +85,8 @@ proc fop003 { } { # Use the file we already have with -truncate flag. The open # should be successful, and when we query for the key that # used to be there, we should get nothing. - set db [berkdb_open -create -truncate -btree $testdir/$file] + set db [eval \ + {berkdb_open -create -truncate $omethod} $args $testdir/$file] error_check_good db_open3 [is_valid_db $db] TRUE set ret [$db get $key] error_check_good db_get [lindex [lindex $ret 0] 1] "" diff --git a/db/test/fop004.tcl b/db/test/fop004.tcl index 0818170ef..ca3c2efd0 100644 --- a/db/test/fop004.tcl +++ b/db/test/fop004.tcl @@ -1,36 +1,40 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: fop004.tcl,v 11.1 2003/06/16 20:31:32 sandstro Exp $ +# $Id: fop004.tcl,v 11.6 2004/09/22 18:01:05 bostic Exp $ # # TEST fop004 -# TEST Test of DB->rename(). -# TEST (formerly test075) -proc fop004 { { method btree } { tnum "004" } args } { +# TEST Test of DB->rename(). (formerly test075) +# TEST Test that files can be renamed from one directory to another. +# TEST Test that files can be renamed using absolute or relative +# TEST pathnames. +proc fop004 { method { tnum "004" } args } { global encrypt global errorCode global errorInfo - source ./include.tcl - puts "Fop$tnum: ($args): Test of DB->rename()" - # If we are using an env, then testfile should just be the - # db name. Otherwise it is the test directory and the name. + set args [convert_args $method $args] + set omethod [convert_method $method] + + puts "Fop$tnum: ($method $args): Test of DB->rename()" + set eindex [lsearch -exact $args "-env"] if { $eindex != -1 } { # If we are using an env, then skip this test. # It needs its own. incr eindex set env [lindex $args $eindex] - puts "Skipping fop004 for env $env" + puts "Skipping fop$tnum for env $env" return } if { $encrypt != 0 } { - puts "Skipping fop004 for security" + puts "Skipping fop$tnum for security" return } + cleanup $testdir NULL # Define absolute pathnames set curdir [pwd] @@ -39,165 +43,219 @@ proc fop004 { { method btree } { tnum "004" } args } { cd $curdir set reldir $testdir + # Name subdirectories for renaming from one directory to another. + set subdira A + set subdirb B + # Set up absolute and relative pathnames for test - set paths [list $fulldir $reldir] - foreach path $paths { - puts "\tFop$tnum: starting test of $path path" - set oldfile $path/fop$tnum-old.db - set newfile $path/fop$tnum.db - set env NULL - set envargs "" - - # Loop through test using the following rename options - # 1. no environment, not in transaction - # 2. with environment, not in transaction - # 3. rename with auto-commit - # 4. rename in committed transaction - # 5. rename in aborted transaction - - foreach op "noenv env auto commit abort" { - - puts "\tFop$tnum.a: Create/rename file with $op" - - # Make sure we're starting with a clean slate. - - if { $op == "noenv" } { - cleanup $path $env - if { $env == "NULL" } { - error_check_bad "$oldfile exists" \ - [file exists $oldfile] 1 - error_check_bad "$newfile exists" \ - [file exists $newfile] 1 + set paths [list "absolute $fulldir" "relative $reldir"] + set files [list "fop$tnum-old.db fop$tnum-new.db {name change}" \ + "fop$tnum.db fop$tnum.db {directory change}"] + + foreach pathinfo $paths { + set pathtype [lindex $pathinfo 0] + set path [lindex $pathinfo 1] + foreach fileinfo $files { + set desc [lindex $fileinfo 2] + puts "Fop$tnum: Test of $pathtype path $path with $desc" + set env NULL + set envargs "" + + # Loop through test using the following rename options + # 1. no environment, not in transaction + # 2. with environment, not in transaction + # 3. rename with auto-commit + # 4. rename in committed transaction + # 5. rename in aborted transaction + + foreach op "noenv env auto commit abort" { + + puts "\tFop$tnum.a: Create/rename with $op" + # If we are using an env, then testfile should + # be the db name. Otherwise it is the path we + # are testing and the name. + # + set old [lindex $fileinfo 0] + set new [lindex $fileinfo 1] + # Set up subdirectories if necessary. + if { $desc == "directory change" } { + file mkdir $testdir/$subdira + file mkdir $testdir/$subdirb + set oldname $subdira/$old + set newname $subdirb/$new + set oldextent $subdira/__dbq.$old.0 + set newextent $subdirb/__dbq.$new.0 + } else { + set oldname $old + set newname $new + set oldextent __dbq.$old.0 + set newextent __dbq.$new.0 + } + # If we don't have an env, we're going to + # operate on the file using its absolute + # or relative path. Tack it on the front. + if { $op == "noenv" } { + set oldfile $path/$oldname + set newfile $path/$newname + set oldextent $path/$oldextent + set newextent $path/$newextent + } else { + set oldfile $oldname + set newfile $newname + set txnarg "" + if { $op == "auto" || $op == "commit" \ + || $op == "abort" } { + set txnarg " -txn" + } + set env [eval {berkdb_env -create} \ + $txnarg -home $path] + set envargs "-env $env" + error_check_good \ + env_open [is_valid_env $env] TRUE } - } - if { $op == "env" } { - env_cleanup $path - set env [berkdb_env -create -home $path] - set envargs "-env $env" - error_check_good env_open [is_valid_env $env] TRUE - } + # Files don't exist before starting the test. + # + check_file_exist $oldfile $env $path 0 + check_file_exist $newfile $env $path 0 - if { $op == "auto" || $op == "commit" || $op == "abort" } { - env_cleanup $path - set env [berkdb_env -create -home $path -txn] - set envargs "-env $env" - error_check_good env_open [is_valid_env $env] TRUE - } + puts "\t\tFop$tnum.a.1: Create file $oldfile" + set db [eval {berkdb_open -create -mode 0644} \ + $omethod $envargs $args $oldfile] + error_check_good dbopen [is_valid_db $db] TRUE - puts "\t\tFop$tnum.a.1: create" - set db [eval {berkdb_open -create -mode 0644} \ - -$method $envargs $args $oldfile] - error_check_good dbopen [is_valid_db $db] TRUE + # Use numeric key so record-based methods + # don't need special treatment. + set key 1 + set data data - if { $env == "NULL" } { - error_check_bad \ - "$oldfile exists" [file exists $oldfile] 0 - error_check_bad \ - "$newfile exists" [file exists $newfile] 1 - } + error_check_good dbput \ + [$db put $key [chop_data $method $data]] 0 + error_check_good dbclose [$db close] 0 - # The nature of the key and data are unimportant; - # use numeric key to record-based methods don't need - # special treatment. - set key 1 - set data [pad_data $method data] - - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - puts "\t\tFop$tnum.a.2: rename" - if { $env == "NULL" } { - error_check_bad \ - "$oldfile exists" [file exists $oldfile] 0 - error_check_bad \ - "$newfile exists" [file exists $newfile] 1 - } + puts "\t\tFop$tnum.a.2:\ + Rename file to $newfile" + check_file_exist $oldfile $env $path 1 + check_file_exist $newfile $env $path 0 - # Regular renames use berkdb dbrename but transaction - # protected renames must use $env dbrename. - if { $op == "noenv" || $op == "env" } { - error_check_good rename_file [eval {berkdb dbrename} \ - $envargs $oldfile $newfile] 0 - } elseif { $op == "auto" } { - error_check_good rename_file [eval {$env dbrename} \ - -auto_commit $oldfile $newfile] 0 - } else { - # $op is "abort" or "commit" - set txn [$env txn] - error_check_good rename_file [eval {$env dbrename} \ - -txn $txn $oldfile $newfile] 0 - error_check_good txn_$op [$txn $op] 0 - } + # Regular renames use berkdb dbrename + # Txn-protected renames use $env dbrename. + if { $op == "noenv" || $op == "env" } { + error_check_good rename [eval \ + {berkdb dbrename} $envargs \ + $oldfile $newfile] 0 + } elseif { $op == "auto" } { + error_check_good rename [eval \ + {$env dbrename} -auto_commit \ + $oldfile $newfile] 0 + } else { + # $op is "abort" or "commit" + set txn [$env txn] + error_check_good rename [eval \ + {$env dbrename} -txn $txn \ + $oldfile $newfile] 0 + error_check_good txn_$op [$txn $op] 0 + } - if { $env == "NULL" } { - error_check_bad \ - "$oldfile exists" [file exists $oldfile] 1 - error_check_bad \ - "$newfile exists" [file exists $newfile] 0 - } + if { $op != "abort" } { + check_file_exist $oldfile $env $path 0 + check_file_exist $newfile $env $path 1 + } else { + check_file_exist $oldfile $env $path 1 + check_file_exist $newfile $env $path 0 + } - puts "\t\tFop$tnum.a.3: check" - # Open again with create to make sure we're not caching or - # anything silly. In the normal case (no env), we already - # know the file doesn't exist. - set odb [eval {berkdb_open -create -mode 0644} \ - $envargs -$method $args $oldfile] - set ndb [eval {berkdb_open -create -mode 0644} \ - $envargs -$method $args $newfile] - error_check_good odb_open [is_valid_db $odb] TRUE - error_check_good ndb_open [is_valid_db $ndb] TRUE - - # The DBT from the "old" database should be empty, - # not the "new" one, except in the case of an abort. - set odbt [$odb get $key] - if { $op == "abort" } { - error_check_good odbt_has_data [llength $odbt] 1 - } else { - set ndbt [$ndb get $key] - error_check_good odbt_empty [llength $odbt] 0 - error_check_bad ndbt_empty [llength $ndbt] 0 - error_check_good ndbt [lindex \ - [lindex $ndbt 0] 1] $data - } - error_check_good odb_close [$odb close] 0 - error_check_good ndb_close [$ndb close] 0 - - # Now there's both an old and a new. Rename the - # "new" to the "old" and make sure that fails. - # - # XXX Ideally we'd do this test even when there's - # an external environment, but that env has - # errpfx/errfile set now. :-( - puts "\tFop$tnum.b: Make sure rename fails\ - instead of overwriting" - if { $env != "NULL" } { - error_check_good env_close [$env close] 0 - set env [berkdb_env_noerr -home $path] - error_check_good env_open2 \ - [is_valid_env $env] TRUE + # Check that extent files moved too, unless + # we aborted the rename. + if { [is_queueext $method ] == 1 } { + if { $op != "abort" } { + check_file_exist \ + $oldextent $env $path 0 + check_file_exist \ + $newextent $env $path 1 + } else { + check_file_exist \ + $oldextent $env $path 1 + check_file_exist \ + $newextent $env $path 0 + } + } + + puts "\t\tFop$tnum.a.3: Check file contents" + # Open again with create to make sure we're not + # caching. In the normal case (no env), we + # already know the file doesn't exist. + set odb [eval {berkdb_open -create -mode 0644} \ + $envargs $omethod $args $oldfile] + set ndb [eval {berkdb_open -create -mode 0644} \ + $envargs $omethod $args $newfile] + error_check_good \ + odb_open [is_valid_db $odb] TRUE + error_check_good \ + ndb_open [is_valid_db $ndb] TRUE + + # The DBT from the "old" database should be + # empty, not the "new" one, except in the case + # of an abort. + set odbt [$odb get $key] + if { $op == "abort" } { + error_check_good \ + odbt_has_data [llength $odbt] 1 + } else { + set ndbt [$ndb get $key] + error_check_good \ + odbt_empty [llength $odbt] 0 + error_check_bad \ + ndbt_empty [llength $ndbt] 0 + error_check_good ndbt \ + [lindex [lindex $ndbt 0] 1] \ + [pad_data $method $data] + } + error_check_good odb_close [$odb close] 0 + error_check_good ndb_close [$ndb close] 0 + + # Now there's both an old and a new. Rename the + # "new" to the "old" and make sure that fails. + # + puts "\tFop$tnum.b: Make sure rename fails\ + instead of overwriting" + set envargs "" + if { $env != "NULL" } { + error_check_good \ + env_close [$env close] 0 + set env [berkdb_env_noerr -home $path] + set envargs " -env $env" + error_check_good env_open2 \ + [is_valid_env $env] TRUE + } set ret [catch {eval {berkdb dbrename} \ - -env $env $newfile $oldfile} res] + $envargs $newfile $oldfile} res] error_check_bad rename_overwrite $ret 0 error_check_good rename_overwrite_ret \ [is_substr $errorCode EEXIST] 1 - } - # Verify and then start over from a clean slate. - verify_dir $path "\tFop$tnum.c: " - cleanup $path $env - if { $env != "NULL" } { - error_check_good env_close [$env close] 0 - } - if { $env == "NULL" } { - error_check_bad "$oldfile exists" \ - [file exists $oldfile] 1 - error_check_bad "$newfile exists" \ - [file exists $newfile] 1 - - set oldfile fop$tnum-old.db - set newfile test$tnum.db + # Verify and then start over from a clean slate. + verify_dir $path "\tFop$tnum.c: " + verify_dir $path/$subdira "\tFop$tnum.c: " + verify_dir $path/$subdirb "\tFop$tnum.c: " + if { $env != "NULL" } { + error_check_good \ + env_close2 [$env close] 0 + } + env_cleanup $path + check_file_exist $oldfile $env $path 0 + check_file_exist $newfile $env $path 0 } } } } + +proc check_file_exist { filename env path expected } { + if { $env != "NULL" } { + error_check_good "$filename exists in env" \ + [file exists $path/$filename] $expected + } else { + error_check_good \ + "$filename exists" [file exists $filename] $expected + } +} diff --git a/db/test/fop005.tcl b/db/test/fop005.tcl index 39a808df1..7635b3f7b 100644 --- a/db/test/fop005.tcl +++ b/db/test/fop005.tcl @@ -1,129 +1,148 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: fop005.tcl,v 11.2 2003/09/04 23:41:10 bostic Exp $ +# $Id: fop005.tcl,v 11.6 2004/09/22 18:01:05 bostic Exp $ # # TEST fop005 # TEST Test of DB->remove() # TEST Formerly test080. -proc fop005 { { method btree } {tnum "005"} args } { +# TEST Test use of dbremove with and without envs, with absolute +# TEST and relative paths, and with subdirectories. + +proc fop005 { method args } { source ./include.tcl + set tnum "005" set args [convert_args $method $args] + set omethod [convert_method $method] - puts "Fop$tnum: Test of DB->remove()" + puts "Fop$tnum: ($method $args): Test of DB->remove()" # Determine full path set curdir [pwd] cd $testdir set fulldir [pwd] cd $curdir - - # Test both relative and absolute path - set paths [list $fulldir $testdir] - - set encrypt 0 - set encargs "" - set args [split_encargs $args encargs] + set reldir $testdir # If we are using an env, then skip this test. # It needs its own. set eindex [lsearch -exact $args "-env"] if { $eindex != -1 } { incr eindex - set e [lindex $args $eindex] - puts "Skipping fop005 for env $e" + set env [lindex $args $eindex] + puts "Skipping fop$tnum for env $env" return } + cleanup $testdir NULL - foreach path $paths { + # Set up absolute and relative pathnames, and a subdirectory. + set subdira A + set filename fop$tnum.db + set extentname __dbq.$filename.0 + set paths [list $fulldir $reldir] + set files [list "$filename $extentname"\ + "$subdira/$filename $subdira/$extentname"] - set dbfile test$tnum.db - set testfile $path/$dbfile - set eargs $encargs + foreach path $paths { + foreach fileset $files { + set filename [lindex $fileset 0] + set extentname [lindex $fileset 1] - # Loop through test using the following remove options - # 1. no environment, not in transaction - # 2. with environment, not in transaction - # 3. rename with auto-commit - # 4. rename in committed transaction - # 5. rename in aborted transaction + # Loop through test using the following options: + # 1. no environment, not in transaction + # 2. with environment, not in transaction + # 3. remove with auto-commit + # 4. remove in committed transaction + # 5. remove in aborted transaction foreach op "noenv env auto commit abort" { - - # Make sure we're starting with a clean slate. - env_cleanup $testdir - if { $op == "noenv" } { - set dbfile $testfile - set e NULL - set envargs "" - } else { - if { $op == "env" } { - set largs "" + file mkdir $testdir/$subdira + if { $op == "noenv" } { + set file $path/$filename + set extentfile $path/$extentname + set env NULL + set envargs "" } else { + set file $filename + set extentfile $extentname set largs " -txn" + if { $op == "env" } { + set largs "" + } + set env [eval {berkdb_env -create \ + -home $path} $largs] + set envargs " -env $env " + error_check_good \ + env_open [is_valid_env $env] TRUE } - if { $encargs != "" } { - set eargs " -encrypt " - } - set e [eval {berkdb_env -create -home $path} \ - $encargs $largs] - set envargs "-env $e" - error_check_good env_open [is_valid_env $e] TRUE - } - - puts "\tFop$tnum: dbremove with $op in $path" - puts "\tFop$tnum.a.1: Create file" - set db [eval {berkdb_open -create -mode 0644} \ - -$method $envargs $eargs $args {$dbfile}] - error_check_good db_open [is_valid_db $db] TRUE - - # The nature of the key and data are unimportant; - # use numeric key so record-based methods don't need - # special treatment. - set key 1 - set data [pad_data $method data] - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - error_check_good file_exists_before \ - [file exists $testfile] 1 + puts "\tFop$tnum: dbremove with $op\ + in path $path" + puts "\t\tFop$tnum.a.1: Create file $file" + set db [eval {berkdb_open -create -mode 0644} \ + $omethod $envargs $args {$file}] + error_check_good db_open [is_valid_db $db] TRUE + + # Use a numeric key so record-based methods + # don't need special treatment. + set key 1 + set data [pad_data $method data] + + error_check_good dbput \ + [$db put $key [chop_data $method $data]] 0 + error_check_good dbclose [$db close] 0 + check_file_exist $file $env $path 1 + if { [is_queueext $method] == 1 } { + check_file_exist \ + $extentfile $env $path 1 + } - # Use berkdb dbremove for non-transactional tests - # and $env dbremove for transactional tests - puts "\tFop$tnum.a.2: Remove file" - if { $op == "noenv" || $op == "env" } { - error_check_good remove_$op [eval \ - {berkdb dbremove} $eargs $envargs $dbfile] 0 - } elseif { $op == "auto" } { - error_check_good remove_$op \ - [eval {$e dbremove} -auto_commit $dbfile] 0 - } else { - # $op is "abort" or "commit" - set txn [$e txn] - error_check_good remove_$op \ - [eval {$e dbremove} -txn $txn $dbfile] 0 - error_check_good txn_$op [$txn $op] 0 - } + # Use berkdb dbremove for non-txn tests + # and $env dbremove for transactional tests + puts "\t\tFop$tnum.a.2: Remove file" + if { $op == "noenv" || $op == "env" } { + error_check_good remove_$op \ + [eval {berkdb dbremove} \ + $envargs $file] 0 + } elseif { $op == "auto" } { + error_check_good remove_$op \ + [eval {$env dbremove} \ + -auto_commit $file] 0 + } else { + # $op is "abort" or "commit" + set txn [$env txn] + error_check_good remove_$op \ + [eval {$env dbremove} \ + -txn $txn $file] 0 + error_check_good txn_$op [$txn $op] 0 + } - puts "\tFop$tnum.a.3: Check that file is gone" - # File should now be gone, unless the op is an abort. - if { $op != "abort" } { - error_check_good exists_after \ - [file exists $testfile] 0 - } else { - error_check_good exists_after \ - [file exists $testfile] 1 - } + puts "\t\tFop$tnum.a.3: Check that file is gone" + # File should now be gone, unless the op is an + # abort. Check extent files if necessary. + if { $op != "abort" } { + check_file_exist $file $env $path 0 + if { [is_queueext $method] == 1 } { + check_file_exist \ + $extentfile $env $path 0 + } + } else { + check_file_exist $file $env $path 1 + if { [is_queueext $method] == 1 } { + check_file_exist \ + $extentfile $env $path 1 + } + } - if { $e != "NULL" } { - error_check_good env_close [$e close] 0 + if { $env != "NULL" } { + error_check_good envclose [$env close] 0 + } + env_cleanup $path + check_file_exist $file $env $path 0 } - - set dbfile fop$tnum-old.db - set testfile $path/$dbfile } } } diff --git a/db/test/fop006.tcl b/db/test/fop006.tcl index 127a327fa..b77f88565 100644 --- a/db/test/fop006.tcl +++ b/db/test/fop006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: fop006.tcl,v 1.7 2003/09/11 13:11:45 sandstro Exp $ +# $Id: fop006.tcl,v 1.11 2004/07/09 14:33:21 carol Exp $ # # TEST fop006.tcl # TEST Test file system operations in multiple simultaneous @@ -11,11 +11,19 @@ # TEST Start a second transaction, do a file operation. Abort # TEST or commit txn1, then abort or commit txn2, and check for # TEST appropriate outcome. -proc fop006 { } { +proc fop006 { method args } { source ./include.tcl + if { [is_btree $method] != 1 } { + puts "Skipping fop006 for method $method" + return + } + + set args [convert_args $method $args] + set omethod [convert_method $method] + env_cleanup $testdir - puts "\nFop006: File system ops in multiple transactions" + puts "\nFop006 ($method): File system ops in multiple transactions" set exists {a b} set noexist {foo bar} @@ -69,16 +77,16 @@ proc fop006 { } { # Extract elements of the case set op1 [lindex [lindex $case 0] 0] - set args1 [lindex [lindex $case 0] 1] + set names1 [lindex [lindex $case 0] 1] set res1 [lindex [lindex $case 0] 2] set end1 [lindex [lindex $case 0] 3] set op2 [lindex [lindex $case 1] 0] - set args2 [lindex [lindex $case 1] 1] + set names2 [lindex [lindex $case 1] 1] set res2 [lindex [lindex $case 1] 2] - puts "\tFop006.$testid: $op1 ($args1) $res1 $end1;\ - $op2 ($args2) $res2." + puts "\tFop006.$testid: $op1 ($names1) $res1 $end1;\ + $op2 ($names2) $res2." foreach end2 { abort commit } { # Create transactional environment. @@ -86,23 +94,25 @@ proc fop006 { } { error_check_good is_valid_env [is_valid_env $env] TRUE # Create databases - set db [berkdb_open\ - -create -btree -env $env -auto_commit a.db] + set db [eval {berkdb_open \ + -create} $omethod $args -env $env -auto_commit a] error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_put [$db put -auto_commit 1 a] 0 + error_check_good db_put \ + [$db put -auto_commit 1 [chop_data $method a]] 0 error_check_good db_close [$db close] 0 - set db [berkdb_open\ - -create -btree -env $env -auto_commit b.db] + set db [eval {berkdb_open \ + -create} $omethod $args -env $env -auto_commit b] error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_put [$db put -auto_commit 1 a] 0 + error_check_good db_put \ + [$db put -auto_commit 1 [chop_data $method a]] 0 error_check_good db_close [$db close] 0 # Start transaction 1 and perform a file op. set txn1 [$env txn] error_check_good \ txn_begin [is_valid_txn $txn1 $env] TRUE - set result1 [do_op $op1 $args1 $txn1 $env] + set result1 [do_op $omethod $op1 $names1 $txn1 $env $args] if {$res1 == 0} { error_check_good \ op1_should_succeed $result1 $res1 @@ -114,7 +124,7 @@ proc fop006 { } { # Start transaction 2 before ending transaction 1. set pid [exec $tclsh_path $test_path/wrap.tcl \ fopscript.tcl $testdir/fop006.log \ - $op2 $end2 $res2 $args2 &] + $omethod $op2 $end2 $res2 $names2 &] # End transaction 1 and close any open db handles. # Txn2 will now unblock and finish. @@ -132,7 +142,14 @@ proc fop006 { } { error_check_good env_close [$env close] 0 error_check_good \ envremove [berkdb envremove -home $testdir] 0 + + # Check for errors in log file. + set errstrings [eval findfail $testdir/fop006.log] + foreach str $errstrings { + puts "FAIL: error message in log file: $str" + } env_cleanup $testdir } } } + diff --git a/db/test/fopscript.tcl b/db/test/fopscript.tcl index e368b233d..b4b6ffde6 100644 --- a/db/test/fopscript.tcl +++ b/db/test/fopscript.tcl @@ -1,34 +1,38 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: fopscript.tcl,v 1.3 2003/09/04 23:41:11 bostic Exp $ +# $Id: fopscript.tcl,v 1.6 2004/03/03 16:48:42 carol Exp $ # # Fop006 script - test of fileops in multiple transactions # Usage: fopscript +# omethod: access method for database # op: file operation to perform # end: how to end the transaction (abort or commit) # result: expected result of the transaction -# args: name(s) of files to operate on +# names: name(s) of files to operate on +# args: additional args to do_op source ./include.tcl source $test_path/test.tcl source $test_path/testutils.tcl -set usage "fopscript op end result args" +set usage "fopscript omethod op end result names args" # Verify usage -if { $argc != 4 } { +if { $argc < 5 } { puts stderr "FAIL:[timestamp] Usage: $usage" exit } # Initialize arguments -set op [ lindex $argv 0 ] -set end [ lindex $argv 1 ] -set result [ lindex $argv 2 ] -set args [ lindex $argv 3 ] +set omethod [ lindex $argv 0 ] +set op [ lindex $argv 1 ] +set end [ lindex $argv 2 ] +set result [ lindex $argv 3 ] +set names [ lindex $argv 4 ] +set args [lindex [lrange $argv 5 end] 0] # Join the env set dbenv [eval berkdb_env -home $testdir] @@ -39,7 +43,7 @@ puts "\tFopscript.a: begin 2nd transaction (will block)" set txn2 [$dbenv txn] error_check_good txn2_begin [is_valid_txn $txn2 $dbenv] TRUE # Execute op2 -set op2result [do_op $op $args $txn2 $dbenv] +set op2result [do_op $omethod $op $names $txn2 $dbenv $args] # End txn2 error_check_good txn2_end [$txn2 $end] 0 if {$result == 0} { diff --git a/db/test/foputils.tcl b/db/test/foputils.tcl index b0031c537..4b9230392 100644 --- a/db/test/foputils.tcl +++ b/db/test/foputils.tcl @@ -1,75 +1,138 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: foputils.tcl,v 11.3 2003/09/04 23:41:11 bostic Exp $ +# $Id: foputils.tcl,v 11.9 2004/09/22 18:01:05 bostic Exp $ # -proc do_op {op args txn env} { +proc do_op {omethod op names txn env {largs ""}} { switch -exact $op { - rename { do_rename $args $txn $env } - remove { do_remove $args $txn $env } - open_create { do_create $args $txn $env } - open { do_open $args $txn $env } - open_excl { do_create_excl $args $txn $env } - truncate { do_truncate $args $txn $env } + delete { do_delete $names } + rename { do_rename $names $txn $env } + remove { do_remove $names $txn $env } + noop { do_noop } + open_create { do_create $omethod $names $txn $env $largs } + open { do_open $omethod $names $txn $env $largs } + open_excl { do_create_excl $omethod $names $txn $env $largs } + truncate { do_truncate $omethod $names $txn $env $largs } default { puts "FAIL: operation $op not recognized" } } } -proc do_rename {args txn env} { - # Pull db names out of $args - set oldname [lindex $args 0] - set newname [lindex $args 1] +proc do_subdb_op {omethod op names txn env {largs ""}} { + # + # The 'noop' and 'delete' actions are the same + # for subdbs as for regular db files. + # + switch -exact $op { + delete { do_delete $names } + rename { do_subdb_rename $names $txn $env } + remove { do_subdb_remove $names $txn $env } + noop { do_noop } + default { puts "FAIL: operation $op not recognized" } + } +} + +proc do_delete {names} { + # + # This is the odd man out among the ops -- it's not a Berkeley + # DB file operation, but mimics an operation done externally, + # as if a user deleted a file with "rm" or "erase". + # + # We assume the file is found in $testdir. + # + global testdir + + if {[catch [fileremove -f $testdir/$names] result]} { + return $result + } else { + return 0 + } +} + +proc do_noop { } { + # Do nothing. Report success. + return 0 +} + +proc do_rename {names txn env} { + # Pull db names out of $names + set oldname [lindex $names 0] + set newname [lindex $names 1] if {[catch {eval $env dbrename -txn $txn \ - $oldname.db $newname.db} result]} { + $oldname $newname} result]} { + return $result + } else { + return 0 + } +} + +proc do_subdb_rename {names txn env} { + # Pull db and subdb names out of $names + set filename [lindex $names 0] + set oldsname [lindex $names 1] + set newsname [lindex $names 2] + + if {[catch {eval $env dbrename -txn $txn $filename \ + $oldsname $newsname} result]} { + return $result + } else { + return 0 + } +} + + +proc do_remove {names txn env} { + if {[catch {eval $env dbremove -txn $txn $names} result]} { return $result } else { return 0 } } -proc do_remove {args txn env} { - if {[catch {eval $env dbremove -txn $txn $args.db} result]} { +proc do_subdb_remove {names txn env} { + set filename [lindex $names 0] + set subname [lindex $names 1] + if {[catch {eval $env dbremove -txn $txn $filename $subname} result]} { return $result } else { return 0 } } -proc do_create {args txn env} { - if {[catch {eval berkdb_open -create -btree -env $env \ - -txn $txn $args.db} result]} { +proc do_create {omethod names txn env {largs ""}} { + if {[catch {eval berkdb_open -create $omethod $largs -env $env \ + -txn $txn $names} result]} { return $result } else { return 0 } } -proc do_open {args txn env} { - if {[catch {eval berkdb_open -btree -env $env \ - -txn $txn $args.db} result]} { +proc do_open {omethod names txn env {largs ""}} { + if {[catch {eval berkdb_open $omethod $largs -env $env \ + -txn $txn $names} result]} { return $result } else { return 0 } } -proc do_create_excl {args txn env} { - if {[catch {eval berkdb_open -create -excl -btree -env $env \ - -txn $txn $args.db} result]} { +proc do_create_excl {omethod names txn env {largs ""}} { + if {[catch {eval berkdb_open -create -excl $omethod $largs -env $env \ + -txn $txn $names} result]} { return $result } else { return 0 } } -proc do_truncate {args txn env} { +proc do_truncate {omethod names txn env {largs ""}} { # First we have to get a handle. We omit the -create flag # because testing of truncate is meaningful only in cases # where the database already exists. - set db [berkdb_open -btree -env $env -txn $txn $args.db] + set db [eval {berkdb_open $omethod} $largs {-env $env -txn $txn $names}] error_check_good db_open [is_valid_db $db] TRUE if {[catch {$db truncate -txn $txn} result]} { @@ -236,14 +299,6 @@ proc create_op2 { op2 exists noexist open retval } { set retlist [concat $retlist1 $retlist2] } - # "File open" errors arise from trying to rename - # open files. - if { $retval == "file is open" } { - set old $open - set new $noexist - set retlist \ - [build_retlist $op2 $old $new $retval] - } } remove { # Successful removes result from removing existing @@ -260,11 +315,6 @@ proc create_op2 { op2 exists noexist open retval } { if { $retval == "no such file" } { set file $noexist } - # "File is open" errors arise from trying to remove - # open files. - if { $retval == "file is open" } { - set file $open - } set retlist [build_retlist $op2 $file "" $retval] } open_create { @@ -273,10 +323,9 @@ proc create_op2 { op2 exists noexist open retval } { if { $retval == 0 } { set file [concat $exists $open $noexist] } - # "File exists", "file is open", and "no such file" + # "File exists" and "no such file" # do not happen in open_create. if { $retval == "file exists" || \ - $retval == "file is open" || \ $retval == "no such file" } { return } @@ -292,10 +341,8 @@ proc create_op2 { op2 exists noexist open retval } { if { $retval == "no such file" } { set file $noexist } - # "File exists" and "file is open" do not happen - # in open. - if { $retval == "file exists" || \ - $retval == "file is open" } { + # "File exists" errors do not happen in open. + if { $retval == "file exists" } { return } set retlist [build_retlist $op2 $file "" $retval] @@ -310,10 +357,8 @@ proc create_op2 { op2 exists noexist open retval } { if { $retval == "file exists" } { set file [concat $exists $open] } - # "No such file" and "file is open" do not happen - # in open_excl. - if { $retval == "no such file" || \ - $retval == "file is open" } { + # "No such file" errors do not arise in open_excl. + if { $retval == "no such file" } { return } set retlist [build_retlist $op2 $file "" $retval] @@ -326,9 +371,8 @@ proc create_op2 { op2 exists noexist open retval } { # No other return values are meaningful to test since # do_truncate starts with an open and we've already # tested open. - if { $retval == "file is open" || \ - $retval == "file exists" || \ - $retval == "no such file" } { + if { $retval == "no such file" || \ + $retval == "file exists" } { return } set retlist [build_retlist $op2 $file "" $retval] @@ -358,8 +402,6 @@ proc extract_error { message } { set message "file exists" } elseif {[is_substr $message "no such file"] == 1 } { set message "no such file" - } elseif {[is_substr $message "file is open"] == 1 } { - set message "file is open" } return $message } diff --git a/db/test/hsearch.tcl b/db/test/hsearch.tcl index 4ef89f0f2..f2d223264 100644 --- a/db/test/hsearch.tcl +++ b/db/test/hsearch.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: hsearch.tcl,v 11.10 2003/01/08 05:49:58 bostic Exp $ +# $Id: hsearch.tcl,v 11.11 2004/01/28 03:36:28 bostic Exp $ # # Historic Hsearch interface test. # Use the first 1000 entries from the dictionary. diff --git a/db/test/include.tcl b/db/test/include.tcl index 00ca814d1..c15d4f8c2 100644 --- a/db/test/include.tcl +++ b/db/test/include.tcl @@ -9,6 +9,7 @@ set rpc_testdir $rpc_path/TESTDIR set src_root @srcdir@/.. set test_path @srcdir@/../test +set je_root @srcdir@/../../je global testdir set testdir ./TESTDIR diff --git a/db/test/join.tcl b/db/test/join.tcl index bb2288a53..eba811dfd 100644 --- a/db/test/join.tcl +++ b/db/test/join.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: join.tcl,v 11.22 2003/01/08 05:49:58 bostic Exp $ +# $Id: join.tcl,v 11.23 2004/01/28 03:36:28 bostic Exp $ # # TEST jointest # TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins diff --git a/db/test/lock001.tcl b/db/test/lock001.tcl index 3a4b4f69d..48eb95515 100644 --- a/db/test/lock001.tcl +++ b/db/test/lock001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: lock001.tcl,v 11.20 2003/01/08 05:50:00 bostic Exp $ +# $Id: lock001.tcl,v 11.21 2004/01/28 03:36:28 bostic Exp $ # # TEST lock001 diff --git a/db/test/lock002.tcl b/db/test/lock002.tcl index 54ec8de42..1fdea56cd 100644 --- a/db/test/lock002.tcl +++ b/db/test/lock002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: lock002.tcl,v 11.20 2003/01/08 05:50:02 bostic Exp $ +# $Id: lock002.tcl,v 11.21 2004/01/28 03:36:28 bostic Exp $ # # TEST lock002 # TEST Exercise basic multi-process aspects of lock. diff --git a/db/test/lock003.tcl b/db/test/lock003.tcl index 610ba41f6..a53514283 100644 --- a/db/test/lock003.tcl +++ b/db/test/lock003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: lock003.tcl,v 11.27 2003/09/16 19:01:37 sandstro Exp $ +# $Id: lock003.tcl,v 11.28 2004/01/28 03:36:28 bostic Exp $ # # TEST lock003 # TEST Exercise multi-process aspects of lock. Generate a bunch of parallel diff --git a/db/test/lock004.tcl b/db/test/lock004.tcl index b283fad57..e71a51f9b 100644 --- a/db/test/lock004.tcl +++ b/db/test/lock004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: lock004.tcl,v 11.6 2003/01/08 05:50:08 bostic Exp $ +# $Id: lock004.tcl,v 11.7 2004/01/28 03:36:28 bostic Exp $ # # TEST lock004 # TEST Test locker ids wraping around. diff --git a/db/test/lock005.tcl b/db/test/lock005.tcl index 497e07b80..8b3b977ad 100644 --- a/db/test/lock005.tcl +++ b/db/test/lock005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: lock005.tcl,v 1.9 2003/04/16 17:50:48 ubell Exp $ +# $Id: lock005.tcl,v 1.10 2004/01/28 03:36:28 bostic Exp $ # # TEST lock005 # TEST Check that page locks are being released properly. diff --git a/db/test/lock006.tcl b/db/test/lock006.tcl index e8636fe1e..49aac27c5 100644 --- a/db/test/lock006.tcl +++ b/db/test/lock006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: lock006.tcl,v 11.2 2003/09/04 23:41:11 bostic Exp $ +# $Id: lock006.tcl,v 11.3 2004/01/28 03:36:28 bostic Exp $ # # TEST lock006 # TEST Test lock_vec interface. We do all the same things that diff --git a/db/test/lockscript.tcl b/db/test/lockscript.tcl index ea582cc13..f542c100b 100644 --- a/db/test/lockscript.tcl +++ b/db/test/lockscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: lockscript.tcl,v 11.18 2003/01/08 05:50:10 bostic Exp $ +# $Id: lockscript.tcl,v 11.19 2004/01/28 03:36:28 bostic Exp $ # # Random lock tester. # Usage: lockscript dir numiters numobjs sleepint degree readratio diff --git a/db/test/log001.tcl b/db/test/log001.tcl index 163968915..cec09b84f 100644 --- a/db/test/log001.tcl +++ b/db/test/log001.tcl @@ -1,13 +1,15 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: log001.tcl,v 11.31 2003/01/08 05:50:11 bostic Exp $ +# $Id: log001.tcl,v 11.34 2004/09/22 18:01:05 bostic Exp $ # # TEST log001 # TEST Read/write log records. +# TEST Test with and without fixed-length, in-memory logging, +# TEST and encryption. proc log001 { } { global passwd global has_crypto @@ -15,37 +17,52 @@ proc log001 { } { berkdb srand $rand_init set iter 1000 + set max [expr 1024 * 128] - log001_body $max $iter 1 - log001_body $max $iter 0 - log001_body $max [expr $iter * 15] 1 - log001_body $max [expr $iter * 15] 0 - - # Skip remainder of test if release does not support encryption. - if { $has_crypto == 0 } { - return + foreach fixedlength { 0 1 } { + foreach inmem { 1 0 } { + log001_body $max $iter $fixedlength $inmem + log001_body $max [expr $iter * 15] $fixedlength $inmem + + # Skip encrypted tests if not supported. + if { $has_crypto == 0 } { + continue + } + log001_body $max\ + $iter $fixedlength $inmem "-encryptaes $passwd" + log001_body $max\ + [expr $iter * 15] $fixedlength $inmem "-encryptaes $passwd" + } } - - log001_body $max $iter 1 "-encryptaes $passwd" - log001_body $max $iter 0 "-encryptaes $passwd" - log001_body $max [expr $iter * 15] 1 "-encryptaes $passwd" - log001_body $max [expr $iter * 15] 0 "-encryptaes $passwd" } -proc log001_body { max nrecs fixedlength {encargs ""} } { +proc log001_body { max nrecs fixedlength inmem {encargs ""} } { source ./include.tcl - puts -nonewline "Log001: Basic put/get log records " + puts -nonewline "Log001: Basic put/get log records: " if { $fixedlength == 1 } { - puts "(fixed-length $encargs)" + puts -nonewline "fixed-length ($encargs)" } else { - puts "(variable-length $encargs)" + puts -nonewline "variable-length ($encargs)" + } + + # In-memory logging requires a large enough log buffer that + # any active transaction can be aborted. + if { $inmem == 1 } { + set lbuf [expr 8 * [expr 1024 * 1024]] + puts " with in-memory logging." + } else { + puts " with on-disk logging." } env_cleanup $testdir + set logargs "" + if { $inmem == 1 } { + set logargs "-log_inmemory -log_buffer $lbuf" + } set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \ - $encargs -log_max $max] + $encargs $logargs -log_max $max] error_check_good envopen [is_valid_env $env] TRUE # We will write records to the log and make sure we can diff --git a/db/test/log002.tcl b/db/test/log002.tcl index cec46acf8..1c8f2b919 100644 --- a/db/test/log002.tcl +++ b/db/test/log002.tcl @@ -1,29 +1,45 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: log002.tcl,v 11.30 2003/04/18 14:39:10 sandstro Exp $ +# $Id: log002.tcl,v 11.33 2004/09/22 18:01:05 bostic Exp $ # # TEST log002 # TEST Tests multiple logs # TEST Log truncation # TEST LSN comparison and file functionality. proc log002 { } { - source ./include.tcl global rand_init error_check_good set_random_seed [berkdb srand $rand_init] 0 + foreach inmem { 1 0 } { + log002_body $inmem + } +} + +proc log002_body { inmem } { + source ./include.tcl + puts "Log002: Multiple log test w/trunc, file, compare functionality" env_cleanup $testdir set max [expr 1024 * 128] - set env [berkdb_env -create -home $testdir -mode 0644 \ - -log -log_max $max] + + set logargs "" + if { $inmem == 0 } { + puts "Log002: Using on-disk logging." + } else { + puts "Log002: Using in-memory logging." + set lbuf [expr 8 * [expr 1024 * 1024]] + set logargs "-log_inmemory -log_buffer $lbuf" + } + set env [eval {berkdb_env} -create -home $testdir -log \ + -mode 0644 $logargs -log_max $max] error_check_good envopen [is_valid_env $env] TRUE - # We'll record every hundred'th record for later use + # We'll record every hundredth record for later use set info_list {} puts "\tLog002.a: Writing log records" @@ -55,17 +71,17 @@ proc log002 { } { } puts "\tLog002.c: Checking log_file" - set flist [glob $testdir/log*] - foreach p $info_list { + if { $inmem == 0 } { + set flist [glob $testdir/log*] + foreach p $info_list { + set lsn [lindex $p 0] + set f [$env log_file $lsn] - set lsn [lindex $p 0] - set f [$env log_file $lsn] - - # Change all backslash separators on Windows to forward slash - # separators, which is what the rest of the test suite expects. - regsub -all {\\} $f {/} f - - error_check_bad log_file:$f [lsearch $flist $f] -1 + # Change backslash separators on Windows to forward + # slashes, as the rest of the test suite expects. + regsub -all {\\} $f {/} f + error_check_bad log_file:$f [lsearch $flist $f] -1 + } } puts "\tLog002.d: Verifying records" diff --git a/db/test/log003.tcl b/db/test/log003.tcl index b0140b57b..e8d10dbfc 100644 --- a/db/test/log003.tcl +++ b/db/test/log003.tcl @@ -1,18 +1,33 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: log003.tcl,v 11.30 2003/04/18 14:39:10 sandstro Exp $ +# $Id: log003.tcl,v 11.34 2004/09/22 18:01:05 bostic Exp $ # # TEST log003 # TEST Verify that log_flush is flushing records correctly. proc log003 { } { - source ./include.tcl global rand_init error_check_good set_random_seed [berkdb srand $rand_init] 0 - puts "Log003: Verify log_flush behavior" + # Even though log_flush doesn't do anything for in-memory + # logging, we want to make sure calling it doesn't break + # anything. + foreach inmem { 1 0 } { + log003_body $inmem + } +} + +proc log003_body { inmem } { + source ./include.tcl + + puts -nonewline "Log003: Verify log_flush behavior" + if { $inmem == 0 } { + puts " (on-disk logging)." + } else { + puts " (in-memory logging)." + } set max [expr 1024 * 128] env_cleanup $testdir @@ -23,8 +38,12 @@ proc log003 { } { foreach rec "$short_rec $long_rec $very_long_rec" { puts "\tLog003.a: Verify flush on [string length $rec] byte rec" - set env [berkdb_env -log -home $testdir \ - -create -mode 0644 -log_max $max] + set logargs "" + if { $inmem == 1 } { + set logargs "-log_inmemory -log_buffer [expr $max * 2]" + } + set env [eval {berkdb_env} -log -home $testdir -create \ + -mode 0644 $logargs -log_max $max] error_check_good envopen [is_valid_env $env] TRUE set lsn [$env log_put $rec] @@ -60,11 +79,18 @@ proc log003 { } { log_cleanup $testdir } + if { $inmem == 1 } { + puts "Log003: Skipping remainder of test for in-memory logging." + return + } + foreach rec "$short_rec $long_rec $very_long_rec" { puts "\tLog003.b: \ Verify flush on non-last record [string length $rec]" - set env [berkdb_env \ - -create -log -home $testdir -mode 0644 -log_max $max] + + set env [berkdb_env -log -home $testdir \ + -create -mode 0644 -log_max $max] + error_check_good envopen [is_valid_env $env] TRUE # Put 10 random records @@ -91,8 +117,7 @@ proc log003 { } { # Now, we want to crash the region and recheck. Closing the # log does not flush any records, so we'll use a close to - # do the "crash" - + # do the "crash". # # Now, close and remove the log region error_check_good env:close:$env [$env close] 0 @@ -100,8 +125,8 @@ proc log003 { } { error_check_good env:remove $ret 0 # Re-open the log and try to read the record. - set env [berkdb_env \ - -home $testdir -create -log -mode 0644 -log_max $max] + set env [berkdb_env -log -home $testdir \ + -create -mode 0644 -log_max $max] error_check_good envopen [is_valid_env $env] TRUE set logc [$env log_cursor] diff --git a/db/test/log004.tcl b/db/test/log004.tcl index 6b983dc51..15af405f5 100644 --- a/db/test/log004.tcl +++ b/db/test/log004.tcl @@ -1,22 +1,28 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: log004.tcl,v 11.29 2003/01/08 05:50:23 bostic Exp $ +# $Id: log004.tcl,v 11.31 2004/07/19 16:08:36 carol Exp $ # # TEST log004 # TEST Make sure that if we do PREVs on a log, but the beginning of the # TEST log has been truncated, we do the right thing. proc log004 { } { + foreach inmem { 1 0 } { + log004_body $inmem + } +} + +proc log004_body { inmem } { source ./include.tcl puts "Log004: Prev on log when beginning of log has been truncated." # Use archive test to populate log env_cleanup $testdir puts "\tLog004.a: Call archive to populate log." - archive + archive $inmem # Delete all log files under 100 puts "\tLog004.b: Delete all log files under 100." diff --git a/db/test/log005.tcl b/db/test/log005.tcl index 0ed9c133d..ea6e3fa33 100644 --- a/db/test/log005.tcl +++ b/db/test/log005.tcl @@ -1,22 +1,46 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: log005.tcl,v 11.2 2003/01/08 05:50:24 bostic Exp $ +# $Id: log005.tcl,v 11.6 2004/09/22 18:01:05 bostic Exp $ # # TEST log005 # TEST Check that log file sizes can change on the fly. proc log005 { } { - source ./include.tcl - puts "Log005: Check that log file sizes can change." + # Skip the test for HP-UX, where we can't do the second + # env open. + global is_hp_test + if { $is_hp_test == 1 } { + puts "Log005: Skipping for HP-UX." + return + } + + foreach inmem { 1 0 } { + log005_body $inmem + } +} +proc log005_body { inmem } { + source ./include.tcl env_cleanup $testdir + puts -nonewline "Log005: Check that log file sizes can change" + if { $inmem == 0 } { + puts " (on-disk logging)." + } else { + puts " (in-memory logging)." + } + # Open the environment, set and check the log file size. puts "\tLog005.a: open, set and check the log file size." - set env [berkdb_env \ - -create -home $testdir -log_buffer 10000 -log_max 1000000 -txn] + set logargs "" + if { $inmem == 1 } { + set lbuf [expr 1024 * 1024] + set logargs "-log_inmemory -log_buffer $lbuf" + } + set env [eval {berkdb_env} -create -home $testdir \ + $logargs -log_max 1000000 -txn] error_check_good envopen [is_valid_env $env] TRUE set db [berkdb_open \ -env $env -create -mode 0644 -btree -auto_commit a.db] @@ -61,6 +85,11 @@ proc log005 { } { error_check_good db_close [$db close] 0 error_check_good env_close [$env close] 0 + if { $inmem == 1 } { + puts "Log005: Skipping remainder of test for in-memory logging." + return + } + puts "\tLog005.d: check the log file size is unchanged after recovery." # Open again, running recovery. Verify the log file size is as we # left it. diff --git a/db/test/log006.tcl b/db/test/log006.tcl index 4b964a73d..b6c5cd924 100644 --- a/db/test/log006.tcl +++ b/db/test/log006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: log006.tcl,v 11.5 2003/09/04 23:41:11 bostic Exp $ +# $Id: log006.tcl,v 11.12 2004/09/22 18:01:05 bostic Exp $ # # TEST log006 # TEST Test log file auto-remove. @@ -22,8 +22,10 @@ proc log006 { } { # Open the environment, set auto-remove flag. Use smaller log # files to make more of them. puts "\tLog006.a: open environment, populate database." + set lbuf 16384 + set lmax 65536 set env [berkdb_env_noerr -log_remove \ - -create -home $testdir -log_buffer 10000 -log_max 100000 -txn] + -create -home $testdir -log_buffer $lbuf -log_max $lmax -txn] error_check_good envopen [is_valid_env $env] TRUE log006_put $testdir $env @@ -31,9 +33,9 @@ proc log006 { } { # # Check log files. Using the small log file size, we should # have made a lot of log files, check that we have a reasonable - # number left, less than 12. + # number left, less than 25. # - set log_expect 12 + set log_expect 25 puts "\tLog006.b: Check log files removed." set lfiles [glob -nocomplain $testdir/log.*] set remlen [llength $lfiles] @@ -43,13 +45,19 @@ proc log006 { } { # Files may not be sorted, sort them and then save the last filename. set oldfile [lindex [lsort -ascii $lfiles] end] - # - # Rerun log006_put with a long lived txn. This unresolved txn will - # mean that no files will be able to be removed. + # Rerun log006_put with a long lived txn. # puts "\tLog006.c: Rerun put loop with long-lived transaction." + cleanup $testdir $env set txn [$env txn] error_check_good txn [is_valid_txn $txn $env] TRUE + + # Give the txn something to do so no files can be removed. + set testfile temp.db + set db [eval {berkdb_open_noerr -create -mode 0644} \ + -env $env -txn $txn -pagesize 8192 -btree $testfile] + error_check_good dbopen [is_valid_db $db] TRUE + log006_put $testdir $env puts "\tLog006.d: Check log files not removed." @@ -58,6 +66,7 @@ proc log006 { } { set lfiles [lsort -ascii $lfiles] error_check_good lfiles_chk [lsearch $lfiles $oldfile] 0 error_check_good txn_commit [$txn commit] 0 + error_check_good db_close [$db close] 0 error_check_good ckp1 [$env txn_checkpoint] 0 error_check_good ckp2 [$env txn_checkpoint] 0 @@ -130,7 +139,7 @@ proc log006 { } { # 6. Verify log files removed. puts "\tLog006.j: open environment w/o auto remove, populate database." set env [berkdb_env -recover \ - -create -home $testdir -log_buffer 10000 -log_max 100000 -txn] + -create -home $testdir -log_buffer $lbuf -log_max $lmax -txn] error_check_good envopen [is_valid_env $env] TRUE log006_put $testdir $env @@ -163,7 +172,7 @@ proc log006 { } { puts $cid "set_flags db_log_autoremove" close $cid set env [berkdb_env -recover \ - -create -home $testdir -log_buffer 10000 -log_max 100000 -txn] + -create -home $testdir -log_buffer $lbuf -log_max $lmax -txn] error_check_good envopen [is_valid_env $env] TRUE log006_put $testdir $env @@ -180,11 +189,7 @@ proc log006 { } { # Modified from test003. # proc log006_put { testdir env } { - set testfile log006.db - set limit 100 - - cleanup $testdir $env # # Specify a pagesize so we can control how many log files # are created and left over. @@ -193,11 +198,8 @@ proc log006_put { testdir env } { -env $env -auto_commit -pagesize 8192 -btree $testfile] error_check_good dbopen [is_valid_db $db] TRUE + set lmax [$env get_lg_max] set file_list [get_file_list] - if { [llength $file_list] > $limit } { - set file_list [lrange $file_list 1 $limit] - } - set len [llength $file_list] set count 0 foreach f $file_list { if { [string compare [file type $f] "file"] != 0 } { @@ -207,7 +209,8 @@ proc log006_put { testdir env } { # Should really catch errors set fid [open $f r] fconfigure $fid -translation binary - set data [read $fid] + # Read in less than the maximum log size. + set data [read $fid [expr $lmax - [expr $lmax / 8]]] close $fid set t [$env txn] diff --git a/db/test/logtrack.list b/db/test/logtrack.list index eff97b151..120ecb3a6 100644 --- a/db/test/logtrack.list +++ b/db/test/logtrack.list @@ -4,7 +4,6 @@ PREFIX __db BEGIN addrem 41 BEGIN big 43 BEGIN ovref 44 -BEGIN relink 45 BEGIN debug 47 BEGIN noop 48 BEGIN pg_alloc 49 @@ -13,6 +12,7 @@ BEGIN cksum 51 BEGIN pg_freedata 52 BEGIN pg_prepare 53 BEGIN pg_new 54 +BEGIN pg_init 60 PREFIX __dbreg BEGIN register 2 PREFIX __bam @@ -25,6 +25,13 @@ BEGIN repl 58 BEGIN root 59 BEGIN curadj 64 BEGIN rcuradj 65 +BEGIN relink 147 +PREFIX __fop +BEGIN create 143 +BEGIN remove 144 +BEGIN write 145 +BEGIN rename 146 +BEGIN file_remove 141 PREFIX __ham BEGIN insdel 21 BEGIN newpage 22 @@ -41,6 +48,7 @@ BEGIN mvptr 85 BEGIN del 79 BEGIN add 80 BEGIN delext 83 +PREFIX __rep PREFIX __txn BEGIN regop 10 BEGIN ckp 11 diff --git a/db/test/logtrack.tcl b/db/test/logtrack.tcl index 2dbb82c71..50851932f 100644 --- a/db/test/logtrack.tcl +++ b/db/test/logtrack.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: logtrack.tcl,v 11.13 2003/07/16 13:48:05 bostic Exp $ +# $Id: logtrack.tcl,v 11.15 2004/04/14 16:08:42 carol Exp $ # # logtrack.tcl: A collection of routines, formerly implemented in Perl # as log.pl, to track which log record types the test suite hits. @@ -66,6 +66,7 @@ proc logtrack_read { dirname } { # seen and the log record types that were not seen but should have been seen. proc logtrack_summary { } { global ltsname ltlist testdir + global one_test set seendb [berkdb_open $ltsname] error_check_good seendb_open [is_valid_db $seendb] TRUE @@ -109,7 +110,7 @@ proc logtrack_summary { } { [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE while { [llength [set dbt [$ec get -next]]] != 0 } { set rec [lindex [lindex $dbt 0] 0] - if { [$seendb count $rec] == 0 } { + if { [$seendb count $rec] == 0 && $one_test == "ALL" } { puts "WARNING: log record type $rec: not tested" } } diff --git a/db/test/mdbscript.tcl b/db/test/mdbscript.tcl index fca46dbcd..88433485a 100644 --- a/db/test/mdbscript.tcl +++ b/db/test/mdbscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: mdbscript.tcl,v 11.32 2003/09/29 18:19:19 sandstro Exp $ +# $Id: mdbscript.tcl,v 11.33 2004/01/28 03:36:28 bostic Exp $ # # Process script for the multi-process db tester. diff --git a/db/test/memp001.tcl b/db/test/memp001.tcl index 9eb128da2..4818072de 100644 --- a/db/test/memp001.tcl +++ b/db/test/memp001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: memp001.tcl,v 11.52 2003/08/27 13:51:34 sue Exp $ +# $Id: memp001.tcl,v 11.53 2004/01/28 03:36:28 bostic Exp $ # # TEST memp001 diff --git a/db/test/memp002.tcl b/db/test/memp002.tcl index 367dda00c..763ef923d 100644 --- a/db/test/memp002.tcl +++ b/db/test/memp002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: memp002.tcl,v 11.48 2003/01/08 05:50:32 bostic Exp $ +# $Id: memp002.tcl,v 11.49 2004/01/28 03:36:28 bostic Exp $ # # TEST memp002 diff --git a/db/test/memp003.tcl b/db/test/memp003.tcl index 1ef7b720c..a4e68bfd5 100644 --- a/db/test/memp003.tcl +++ b/db/test/memp003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: memp003.tcl,v 11.50 2003/05/22 13:32:10 bostic Exp $ +# $Id: memp003.tcl,v 11.51 2004/01/28 03:36:28 bostic Exp $ # # TEST memp003 # TEST Test reader-only/writer process combinations; we use the access methods diff --git a/db/test/memp004.tcl b/db/test/memp004.tcl index 2c47eb089..1a421faea 100644 --- a/db/test/memp004.tcl +++ b/db/test/memp004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: memp004.tcl,v 1.4 2003/01/20 05:07:07 mjc Exp $ +# $Id: memp004.tcl,v 1.5 2004/01/28 03:36:28 bostic Exp $ # # TEST memp004 diff --git a/db/test/mpoolscript.tcl b/db/test/mpoolscript.tcl index aab81745b..38d689c96 100644 --- a/db/test/mpoolscript.tcl +++ b/db/test/mpoolscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: mpoolscript.tcl,v 11.17 2003/01/08 05:50:40 bostic Exp $ +# $Id: mpoolscript.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $ # # Random multiple process mpool tester. # Usage: mpoolscript dir id numiters numfiles numpages sleepint diff --git a/db/test/mutex001.tcl b/db/test/mutex001.tcl index 1d01afb82..66d14c41c 100644 --- a/db/test/mutex001.tcl +++ b/db/test/mutex001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: mutex001.tcl,v 11.24 2003/01/08 05:50:42 bostic Exp $ +# $Id: mutex001.tcl,v 11.25 2004/01/28 03:36:28 bostic Exp $ # # TEST mutex001 diff --git a/db/test/mutex002.tcl b/db/test/mutex002.tcl index ba6fb536a..f03a63ac1 100644 --- a/db/test/mutex002.tcl +++ b/db/test/mutex002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: mutex002.tcl,v 11.24 2003/01/08 05:50:46 bostic Exp $ +# $Id: mutex002.tcl,v 11.25 2004/01/28 03:36:28 bostic Exp $ # # TEST mutex002 diff --git a/db/test/mutex003.tcl b/db/test/mutex003.tcl index e40f9b067..7efd0883e 100644 --- a/db/test/mutex003.tcl +++ b/db/test/mutex003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: mutex003.tcl,v 11.25 2003/01/08 05:50:50 bostic Exp $ +# $Id: mutex003.tcl,v 11.26 2004/01/28 03:36:28 bostic Exp $ # # TEST mutex003 diff --git a/db/test/mutexscript.tcl b/db/test/mutexscript.tcl index d761acb7e..7bf1bbe95 100644 --- a/db/test/mutexscript.tcl +++ b/db/test/mutexscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: mutexscript.tcl,v 11.17 2003/01/08 05:50:53 bostic Exp $ +# $Id: mutexscript.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $ # # Random mutex tester. # Usage: mutexscript dir numiters mlocks sleepint degree diff --git a/db/test/ndbm.tcl b/db/test/ndbm.tcl index 7074d7aa3..00ee1f4fd 100644 --- a/db/test/ndbm.tcl +++ b/db/test/ndbm.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: ndbm.tcl,v 11.17 2003/01/08 05:50:56 bostic Exp $ +# $Id: ndbm.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $ # # Historic NDBM interface test. # Use the first 1000 entries from the dictionary. diff --git a/db/test/parallel.tcl b/db/test/parallel.tcl index e9c4c1df5..bd1f468fa 100644 --- a/db/test/parallel.tcl +++ b/db/test/parallel.tcl @@ -1,5 +1,5 @@ # Code to load up the tests in to the Queue database -# $Id: parallel.tcl,v 11.43 2003/10/24 20:43:19 sandstro Exp $ +# $Id: parallel.tcl,v 11.46 2004/09/22 18:01:05 bostic Exp $ proc load_queue { file {dbdir RUNQUEUE} nitems } { global serial_tests global num_serial @@ -12,7 +12,7 @@ proc load_queue { file {dbdir RUNQUEUE} nitems } { error_check_good dbenv [is_valid_env $env] TRUE # Open two databases, one for tests that may be run - # in parallel, the other for tests we want to run + # in parallel, the other for tests we want to run # while only a single process is testing. set db [eval {berkdb_open -env $env -create \ -mode 0644 -len 200 -queue queue.db} ] @@ -68,19 +68,19 @@ proc load_queue { file {dbdir RUNQUEUE} nitems } { puts "loading..." flush stdout - set num_serial 0 + set num_serial 0 set num_parallel 0 for { set i 0 } { $i < $maxload } { incr i } { set str $testarr($i) # Push serial tests into serial testing db, others - # into parallel db. + # into parallel db. if { [is_serial $str] } { set ret [eval {$serialdb put -append $str}] - error_check_good put:serialdb [expr $ret > 0] 1 + error_check_good put:serialdb [expr $ret > 0] 1 incr num_serial } else { set ret [eval {$db put -append $str}] - error_check_good put:paralleldb [expr $ret > 0] 1 + error_check_good put:paralleldb [expr $ret > 0] 1 incr num_parallel } } @@ -100,14 +100,14 @@ proc init_runqueue { {dbdir RUNQUEUE} nitems list} { file mkdir $dbdir } puts "Creating test list..." - $list -n + $list ALL -n load_queue ALL.OUT $dbdir $nitems file delete TEST.LIST file rename ALL.OUT TEST.LIST } proc run_parallel { nprocs {list run_all} {nitems ALL} } { - global num_serial + global num_serial global num_parallel # Forcibly remove stuff from prior runs, if it's still there. @@ -153,14 +153,19 @@ proc run_parallel { nprocs {list run_all} {nitems ALL} } { set failed 0 for { set i 0 } { $i <= $nprocs } { incr i } { if { [file exists ALL.OUT.$i] == 1 } { - if { [check_failed_run ALL.OUT.$i] != 0 } { + puts -nonewline "Checking output from ALL.OUT.$i ... " + if { [check_output ALL.OUT.$i] == 1 } { set failed 1 - puts "Regression tests failed in process $i." } + puts " done." } } if { $failed == 0 } { puts "Regression tests succeeded." + } else { + puts "Regression tests failed." + puts "Review UNEXPECTED OUTPUT lines above for errors." + puts "Complete logs found in ALL.OUT.x files" } } @@ -250,9 +255,9 @@ proc run_queue { i rundir queuedir {qtype parallel} {nitems 0} } { puts "Process $i: $count commands executed in [format %02u:%02u \ [expr $elapsed / 3600] [expr ($elapsed % 3600) / 60]]" - $dbc close - $db close - $dbenv close + error_check_good close_parallel_cursor_$i [$dbc close] 0 + error_check_good close_parallel_db_$i [$db close] 0 + error_check_good close_parallel_env_$i [$dbenv close] 0 # # We need to put the pid file in the builddir's idea @@ -363,7 +368,7 @@ proc exec_ptest { nprocs basename test args } { watch_procs $pidlist 30 36000 set failed 0 for { set i 1 } { $i <= $nprocs } { incr i } { - if { [check_failed_run ALL.OUT.$i] != 0 } { + if { [check_output ALL.OUT.$i] == 1 } { set failed 1 puts "Test $test failed in process $i." } diff --git a/db/test/recd001.tcl b/db/test/recd001.tcl index 90e62c13a..67ad8004c 100644 --- a/db/test/recd001.tcl +++ b/db/test/recd001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd001.tcl,v 11.42 2003/04/24 23:38:51 mjc Exp $ +# $Id: recd001.tcl,v 11.43 2004/01/28 03:36:28 bostic Exp $ # # TEST recd001 # TEST Per-operation recovery tests for non-duplicate, non-split diff --git a/db/test/recd002.tcl b/db/test/recd002.tcl index afdb838c2..6189c5e23 100644 --- a/db/test/recd002.tcl +++ b/db/test/recd002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd002.tcl,v 11.31 2003/01/08 05:51:06 bostic Exp $ +# $Id: recd002.tcl,v 11.32 2004/01/28 03:36:28 bostic Exp $ # # TEST recd002 # TEST Split recovery tests. For every known split log message, makes sure diff --git a/db/test/recd003.tcl b/db/test/recd003.tcl index 68d88fbc7..b6e799b3c 100644 --- a/db/test/recd003.tcl +++ b/db/test/recd003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd003.tcl,v 11.31 2003/01/08 05:51:11 bostic Exp $ +# $Id: recd003.tcl,v 11.32 2004/01/28 03:36:28 bostic Exp $ # # TEST recd003 # TEST Duplicate recovery tests. For every known duplicate log message, diff --git a/db/test/recd004.tcl b/db/test/recd004.tcl index 378e0d3ef..72e66b5e6 100644 --- a/db/test/recd004.tcl +++ b/db/test/recd004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd004.tcl,v 11.30 2003/01/08 05:51:15 bostic Exp $ +# $Id: recd004.tcl,v 11.31 2004/01/28 03:36:28 bostic Exp $ # # TEST recd004 # TEST Big key test where big key gets elevated to internal page. diff --git a/db/test/recd005.tcl b/db/test/recd005.tcl index 49e2e449f..df6a2df3a 100644 --- a/db/test/recd005.tcl +++ b/db/test/recd005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd005.tcl,v 11.35 2003/01/08 05:51:17 bostic Exp $ +# $Id: recd005.tcl,v 11.36 2004/01/28 03:36:28 bostic Exp $ # # TEST recd005 # TEST Verify reuse of file ids works on catastrophic recovery. diff --git a/db/test/recd006.tcl b/db/test/recd006.tcl index 6f8ee567a..bca968e5b 100644 --- a/db/test/recd006.tcl +++ b/db/test/recd006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd006.tcl,v 11.27 2003/01/08 05:51:31 bostic Exp $ +# $Id: recd006.tcl,v 11.28 2004/01/28 03:36:28 bostic Exp $ # # TEST recd006 # TEST Nested transactions. diff --git a/db/test/recd007.tcl b/db/test/recd007.tcl index 7c24978e3..9764d840f 100644 --- a/db/test/recd007.tcl +++ b/db/test/recd007.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd007.tcl,v 11.62 2003/10/14 14:55:36 sue Exp $ +# $Id: recd007.tcl,v 11.64 2004/07/07 19:08:21 carol Exp $ # # TEST recd007 # TEST File create/delete tests. @@ -136,6 +136,8 @@ proc recd007 { method args} { > $tmpfile} ret] error_check_good db_printlog $stat 0 fileremove $tmpfile + set fixed_len $orig_fixed_len + return } proc file_recover_create { dir env_cmd method opts dbfile cmd msg } { diff --git a/db/test/recd008.tcl b/db/test/recd008.tcl index 95480c31f..764fc6893 100644 --- a/db/test/recd008.tcl +++ b/db/test/recd008.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd008.tcl,v 1.27 2003/01/08 05:51:41 bostic Exp $ +# $Id: recd008.tcl,v 1.28 2004/01/28 03:36:28 bostic Exp $ # # TEST recd008 # TEST Test deeply nested transactions and many-child transactions. diff --git a/db/test/recd009.tcl b/db/test/recd009.tcl index 0f42d11be..d6700a0bd 100644 --- a/db/test/recd009.tcl +++ b/db/test/recd009.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd009.tcl,v 1.19 2003/01/08 05:51:44 bostic Exp $ +# $Id: recd009.tcl,v 1.20 2004/01/28 03:36:28 bostic Exp $ # # TEST recd009 # TEST Verify record numbering across split/reverse splits and recovery. diff --git a/db/test/recd010.tcl b/db/test/recd010.tcl index 1078d743b..a2df7a47c 100644 --- a/db/test/recd010.tcl +++ b/db/test/recd010.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd010.tcl,v 1.20 2003/01/08 05:51:48 bostic Exp $ +# $Id: recd010.tcl,v 1.21 2004/01/28 03:36:28 bostic Exp $ # # TEST recd010 # TEST Test stability of btree duplicates across btree off-page dup splits diff --git a/db/test/recd011.tcl b/db/test/recd011.tcl index 60458a791..bf118905b 100644 --- a/db/test/recd011.tcl +++ b/db/test/recd011.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd011.tcl,v 11.25 2003/08/21 16:44:06 ubell Exp $ +# $Id: recd011.tcl,v 11.26 2004/01/28 03:36:28 bostic Exp $ # # TEST recd011 # TEST Verify that recovery to a specific timestamp works. diff --git a/db/test/recd012.tcl b/db/test/recd012.tcl index d855b5425..abed99de8 100644 --- a/db/test/recd012.tcl +++ b/db/test/recd012.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd012.tcl,v 11.29 2003/01/08 05:51:53 bostic Exp $ +# $Id: recd012.tcl,v 11.31 2004/04/19 14:56:13 bostic Exp $ # # TEST recd012 # TEST Test of log file ID management. [#2288] @@ -268,6 +268,7 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\ if { $num_open == 0 } { # If none are open, do an open first. recd012_open + set num_open [llength $opendbs] } set n [berkdb random_int 0 [expr $num_open - 1]] set pair [lindex $opendbs $n] diff --git a/db/test/recd013.tcl b/db/test/recd013.tcl index 075d4b2ef..36cad9eb4 100644 --- a/db/test/recd013.tcl +++ b/db/test/recd013.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd013.tcl,v 11.20 2003/01/08 05:51:57 bostic Exp $ +# $Id: recd013.tcl,v 11.22 2004/09/20 17:06:15 sue Exp $ # # TEST recd013 # TEST Test of cursor adjustment on child transaction aborts. [#2373] @@ -73,7 +73,7 @@ proc recd013 { method { nitems 100 } args } { error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0 error_check_good ctxn_abort($i) [$ctxn abort] 0 for { set j 1 } { $j < $i } { incr j 2 } { - error_check_good dbc_get($j) [$dbc($j) get -current] \ + error_check_good dbc_get($j):1 [$dbc($j) get -current] \ [list [list $keybase$j \ [pad_data $method $j$alphabet]]] } @@ -90,7 +90,7 @@ proc recd013 { method { nitems 100 } args } { # And verify all the cursors, including the one we just # created. for { set j 1 } { $j <= $i } { incr j 2 } { - error_check_good dbc_get($j) [$dbc($j) get -current] \ + error_check_good dbc_get($j):2 [$dbc($j) get -current] \ [list [list $keybase$j \ [pad_data $method $j$alphabet]]] } @@ -98,7 +98,7 @@ proc recd013 { method { nitems 100 } args } { puts "\t\tRecd$tnum.a.1: Verify cursor stability after init." for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i) [$dbc($i) get -current] \ + error_check_good dbc_get($i):3 [$dbc($i) get -current] \ [list [list $keybase$i [pad_data $method $i$alphabet]]] } @@ -130,7 +130,7 @@ proc recd013 { method { nitems 100 } args } { error_check_good ctxn_abort [$ctxn abort] 0 for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i) [$dbc($i) get -current] \ + error_check_good dbc_get($i):4 [$dbc($i) get -current] \ [list [list $keybase$i [pad_data $method $i$alphabet]]] } @@ -198,7 +198,7 @@ proc recd013 { method { nitems 100 } args } { # Verify that no items are deleted. for { set i 1 } { $i <= 2 * $nitems } { incr i } { - error_check_good dbc_get($i) [$dbc($i) get -current] \ + error_check_good dbc_get($i):5 [$dbc($i) get -current] \ [list [list $keybase$i [pad_data $method $i$alphabet]]] } @@ -216,12 +216,11 @@ proc recd013 { method { nitems 100 } args } { } else { set j [expr ($i - 1) / 2 + 1] } - error_check_good dbc_get($i) [$dbc($i) get -current] \ + error_check_good dbc_get($i):6 [$dbc($i) get -current] \ [list [list $keybase$j [pad_data $method $i$alphabet]]] } for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i) [$dbc($i) get -current] \ - [list [list "" ""]] + error_check_good dbc_get($i):7 [$dbc($i) get -current] "" } puts "\t\tRecd$tnum.c.3: Delete odd items in child txn." @@ -256,12 +255,11 @@ proc recd013 { method { nitems 100 } args } { } else { set j [expr ($i - 1) / 2 + 1] } - error_check_good dbc_get($i) [$dbc($i) get -current] \ + error_check_good dbc_get($i):8 [$dbc($i) get -current] \ [list [list $keybase$j [pad_data $method $i$alphabet]]] } for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i) [$dbc($i) get -current] \ - [list [list "" ""]] + error_check_good dbc_get($i):9 [$dbc($i) get -current] "" } # Clean up cursors. diff --git a/db/test/recd014.tcl b/db/test/recd014.tcl index c47e13ec0..da9207cb0 100644 --- a/db/test/recd014.tcl +++ b/db/test/recd014.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd014.tcl,v 1.20 2003/01/08 05:52:01 bostic Exp $ +# $Id: recd014.tcl,v 1.21 2004/01/28 03:36:29 bostic Exp $ # # TEST recd014 # TEST This is a recovery test for create/delete of queue extents. We diff --git a/db/test/recd015.tcl b/db/test/recd015.tcl index b9aca8720..afe0ac883 100644 --- a/db/test/recd015.tcl +++ b/db/test/recd015.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd015.tcl,v 1.15 2003/04/18 14:39:09 sandstro Exp $ +# $Id: recd015.tcl,v 1.16 2004/01/28 03:36:29 bostic Exp $ # # TEST recd015 # TEST This is a recovery test for testing lots of prepared txns. diff --git a/db/test/recd016.tcl b/db/test/recd016.tcl index 7357f86db..6f0d3d132 100644 --- a/db/test/recd016.tcl +++ b/db/test/recd016.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd016.tcl,v 11.11 2003/01/08 05:52:06 bostic Exp $ +# $Id: recd016.tcl,v 11.13 2004/07/07 19:08:21 carol Exp $ # # TEST recd016 # TEST Test recovery after checksum error. @@ -174,4 +174,6 @@ proc recd016 { method args} { } error_check_good db_close [$db close] 0 error_check_good env_close [$dbenv close] 0 + set fixed_len $orig_fixed_len + return } diff --git a/db/test/recd017.tcl b/db/test/recd017.tcl index 4935f4adf..43dfb6421 100644 --- a/db/test/recd017.tcl +++ b/db/test/recd017.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd017.tcl,v 11.6 2003/01/08 05:52:09 bostic Exp $ +# $Id: recd017.tcl,v 11.7 2004/01/28 03:36:29 bostic Exp $ # # TEST recd017 # TEST Test recovery and security. This is basically a watered diff --git a/db/test/recd018.tcl b/db/test/recd018.tcl index fa2f28e1a..2f2300cf9 100644 --- a/db/test/recd018.tcl +++ b/db/test/recd018.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd018.tcl,v 11.4 2003/01/08 05:52:11 bostic Exp $ +# $Id: recd018.tcl,v 11.5 2004/01/28 03:36:29 bostic Exp $ # # TEST recd018 # TEST Test recover of closely interspersed checkpoints and commits. diff --git a/db/test/recd019.tcl b/db/test/recd019.tcl index 280be1eac..4e4e60517 100644 --- a/db/test/recd019.tcl +++ b/db/test/recd019.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd019.tcl,v 11.4 2003/01/08 05:52:14 bostic Exp $ +# $Id: recd019.tcl,v 11.6 2004/07/07 19:08:21 carol Exp $ # # TEST recd019 # TEST Test txn id wrap-around and recovery. @@ -118,4 +118,6 @@ proc recd019 { method {numid 50} args} { set ret [catch {exec $util_path/db_recover -h $testdir} r] error_check_good db_recover $ret 0 + set fixed_len $orig_fixed_len + return } diff --git a/db/test/recd020.tcl b/db/test/recd020.tcl new file mode 100644 index 000000000..6fbe4b78e --- /dev/null +++ b/db/test/recd020.tcl @@ -0,0 +1,80 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: recd020.tcl,v 11.3 2004/09/22 18:01:05 bostic Exp $ +# +# TEST recd020 +# TEST Test creation of intermediate directories -- an +# TEST undocumented, UNIX-only feature. +# +proc recd020 { method args } { + source ./include.tcl + global tcl_platform + + set args [convert_args $method $args] + set omethod [convert_method $method] + set tnum "020" + set nentries 10 + + if { $tcl_platform(platform) != "unix" } { + puts "Skipping recd$tnum for non-UNIX platform." + return + } + + puts "Recd$tnum ($method):\ + Test creation of intermediate directories in recovery." + + # Create the original intermediate directory. + env_cleanup $testdir + set intdir INTDIR + file mkdir $testdir/$intdir + + set testfile recd$tnum.db + set flags "-create -txn -home $testdir" + + puts "\tRecd$tnum.a: Create environment and populate database." + set env_cmd "berkdb_env $flags" + set env [eval $env_cmd] + error_check_good env [is_valid_env $env] TRUE + + set db [eval berkdb_open \ + -create $omethod $args -env $env -auto_commit $intdir/$testfile] + error_check_good db_open [is_valid_db $db] TRUE + + set txn [$env txn] + set data "data" + for { set i 1 } { $i <= $nentries } { incr i } { + error_check_good db_put [eval \ + {$db put} -txn $txn $i [chop_data $method $data.$i]] 0 + } + error_check_good txn_commit [$txn commit] 0 + error_check_good db_close [$db close] 0 + error_check_good env_close [$env close] 0 + + puts "\tRecd$tnum.b: Remove intermediate directory." + error_check_good directory_there [file exists $testdir/$intdir] 1 + file delete -force $testdir/$intdir + error_check_good directory_gone [file exists $testdir/$intdir] 0 + + puts "\tRecd020.c: Run recovery, recreating intermediate directory." + set env [eval $env_cmd -set_intermediate_dir 0751 -recover] + error_check_good env [is_valid_env $env] TRUE + + puts "\tRecd020.d: Reopen test file to verify success." + set db [berkdb_open -env $env $intdir/$testfile] + error_check_good db_open [is_valid_db $db] TRUE + for { set i 1 } { $i <= $nentries } { incr i } { + set ret [$db get $i] + set k [lindex [lindex $ret 0] 0] + set d [lindex [lindex $ret 0] 1] + error_check_good key $k $i + error_check_good data $d [pad_data $method $data.$i] + } + + # Clean up. + error_check_good db_close [$db close] 0 + error_check_good env_close [$env close] 0 + +} diff --git a/db/test/recd021.tcl b/db/test/recd021.tcl new file mode 100644 index 000000000..e8caae261 --- /dev/null +++ b/db/test/recd021.tcl @@ -0,0 +1,280 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: recd021.tcl,v 1.2 2004/09/22 18:01:05 bostic Exp $ +# +# TEST recd021 +# TEST Test of failed opens in recovery. +# TEST +# TEST If a file was deleted through the file system (and not +# TEST within Berkeley DB), an error message should appear. +# TEST Test for regular files and subdbs. + +proc recd021 { method args } { + source ./include.tcl + global util_path + + set args [convert_args $method $args] + set omethod [convert_method $method] + set nentries 100 + + puts "\nRecd021: ($method)\ + Test failed opens in recovery." + + # The file ops "remove" and "rename" are done within + # Berkeley DB. A "delete" is done directly on the file + # system (as if the user deleted the file). + # + # First test regular files. + # + foreach op { remove rename delete noop } { + env_cleanup $testdir + puts "\tRecd021: Test $op of file in recovery." + + # Create transactional environment. + set env [berkdb_env -create -home $testdir -txn] + error_check_good is_valid_env [is_valid_env $env] TRUE + + # Create database + puts "\t\tRecd021.a.1: Create and populate file." + + if { $op == "rename" } { + set names {A B} + } else { + set names {A} + } + set name [lindex $names 0] + + set db [eval {berkdb_open \ + -create} $omethod $args -env $env -auto_commit $name.db] + error_check_good dba_open [is_valid_db $db] TRUE + + # Checkpoint. + error_check_good txn_checkpoint [$env txn_checkpoint] 0 + for { set i 1 } { $i <= $nentries } { incr i } { + error_check_good \ + dba_put [$db put -auto_commit $i data$i] 0 + } + error_check_good dba_close [$db close] 0 + + # Do operation on file. + puts "\t\tRecd021.b: Do $op on file." + set txn [$env txn] + set ret [do_op $omethod $op $names $txn $env] + error_check_good do_op $ret 0 + error_check_good txn_commit [$txn commit] 0 + error_check_good env_close [$env close] 0 + + # Recover. + puts "\t\tRecd021.c: Recover." + set ret [catch {exec $util_path/db_recover -h $testdir} r] + if { $op == "delete" } { + error_check_good external_delete \ + [is_substr $r "Warning: open failed"] 1 + } else { + error_check_good $op $ret 0 + } + + # Clean up. + error_check_good \ + env_remove [berkdb envremove -force -home $testdir] 0 + fileremove -f $testdir/$name.db + } + + # Test subdbs. + if { [is_queue $method] == 1 } { + puts "Recd021: Skipping test of subdbs for method $method." + return + } + + # The first subdb test just does the op, and is comparable + # to the tests for regular files above. + set trunc 0 + set special {} + foreach op { remove rename delete noop } { + recd021_testsubdb $method $op $nentries $special $trunc $args + } + + # The remainder of the tests are executed first with the log intact, + # then with the log truncated at the __db_subdb_name record. + foreach trunc { 0 1 } { + # Test what happens if subdb2 reuses pages formerly in + # subdb1, after removing subdb1. + set special "reuse" + recd021_testsubdb $method remove $nentries $special $trunc $args + + # Test what happens if a new subdb reuses pages formerly + # in subdb1, after removing subdb1. + set special "newdb" + recd021_testsubdb $method remove $nentries $special $trunc $args + + # Now we test what happens if a new subdb if a different access + # method reuses pages formerly in subdb1, after removing subdb1. + set special "newtypedb" + recd021_testsubdb $method remove $nentries $special $trunc $args + } +} + +proc recd021_testsubdb { method op nentries special trunc largs } { + source ./include.tcl + global util_path + + set omethod [convert_method $method] + env_cleanup $testdir + + puts "\tRecd021: \ + Test $op of subdb in recovery ($special trunc = $trunc)." + + # Create transactional environment. + set env [berkdb_env -create -home $testdir -txn] + error_check_good is_valid_env [is_valid_env $env] TRUE + + # Create database with 2 subdbs + puts "\t\tRecd021.d: Create and populate subdbs." + set sname1 S1 + set sname2 S2 + if { $op == "rename" } { + set names {A S1 NEW_S1} + } elseif { $op == "delete" } { + set names {A} + } else { + set names {A S1} + } + set name [lindex $names 0] + + set sdb1 [eval {berkdb_open -create} $omethod \ + $largs -env $env -auto_commit $name.db $sname1] + error_check_good sdb1_open [is_valid_db $sdb1] TRUE + set sdb2 [eval {berkdb_open -create} $omethod \ + $largs -env $env -auto_commit $name.db $sname2] + error_check_good sdb2_open [is_valid_db $sdb2] TRUE + + # Checkpoint. + error_check_good txn_checkpoint [$env txn_checkpoint] 0 + for { set i 1 } { $i <= $nentries } { incr i } { + error_check_good sdb1_put [$sdb1 put -auto_commit $i data$i] 0 + } + set dumpfile dump.s1.$trunc + set ret [exec $util_path/db_dump -dar -f $dumpfile -h $testdir A.db] + for { set i 1 } { $i <= $nentries } { incr i } { + error_check_good sdb2_put [$sdb2 put -auto_commit $i data$i] 0 + } + error_check_good sdb1_close [$sdb1 close] 0 + + # Do operation on subdb. + puts "\t\tRecd021.e: Do $op on file." + set txn [$env txn] + + if { $trunc == 1 } { + # Create a log cursor to mark where we are before + # doing the op. + set logc [$env log_cursor] + set ret [lindex [$logc get -last] 0] + file copy -force $testdir/log.0000000001 $testdir/log.sav + } + + set ret [do_subdb_op $omethod $op $names $txn $env] + error_check_good do_subdb_op $ret 0 + error_check_good txn_commit [$txn commit] 0 + + if { $trunc == 1 } { + # Walk the log and find the __db_subdb_name entry. + set found 0 + while { $found == 0 } { + set lsn [lindex [$logc get -next] 0] + set lfile [lindex $lsn 0] + set loff [lindex $lsn 1] + set logrec [exec $util_path/db_printlog -h $testdir \ + -b $lfile/$loff -e $lfile/$loff] + if { [is_substr $logrec __db_subdb_name] == 1 } { + set found 1 + } + } + # Create the truncated log, and save it for later. + catch [exec dd if=$testdir/log.0000000001 \ + of=$testdir/log.sav count=$loff bs=1 >& /dev/null ] res + } + + # Here we do the "special" thing, if any. We always + # have to close sdb2, but when we do so varies. + switch -exact -- $special { + "" { + error_check_good sdb2_close [$sdb2 close] 0 + } + reuse { + for { set i [expr $nentries + 1] } \ + { $i <= [expr $nentries * 2]} { incr i } { + error_check_good sdb2_put \ + [$sdb2 put -auto_commit $i data$i] 0 + } + error_check_good sdb2_close [$sdb2 close] 0 + set dumpfile dump.s2.$trunc + set ret [exec $util_path/db_dump -dar \ + -f $dumpfile -h $testdir A.db] + } + newdb { + error_check_good sdb2_close [$sdb2 close] 0 + set sname3 S3 + set sdb3 [eval {berkdb_open -create} $omethod \ + $largs -env $env -auto_commit $name.db $sname3] + error_check_good sdb3_open [is_valid_db $sdb3] TRUE + for { set i 1 } { $i <= $nentries } { incr i } { + error_check_good sdb3_put \ + [$sdb3 put -auto_commit $i data$i] 0 + } + error_check_good sdb3_close [$sdb3 close] 0 + } + newtypedb { + error_check_good sdb2_close [$sdb2 close] 0 + set sname4 S4 + set newmethod [different_method $method] + set args [convert_args $newmethod] + set omethod [convert_method $newmethod] + set sdb4 [eval {berkdb_open -create} $omethod \ + $args -env $env -auto_commit $name.db $sname4] + error_check_good sdb4_open [is_valid_db $sdb4] TRUE + for { set i 1 } { $i <= $nentries } { incr i } { + error_check_good sdb4_put \ + [$sdb4 put -auto_commit $i data$i] 0 + } + error_check_good sdb4_close [$sdb4 close] 0 + } + } + + # Close the env. + error_check_good env_close [$env close] 0 + + if { $trunc == 1 } { + # Swap in the truncated log. + file rename -force $testdir/log.sav $testdir/log.0000000001 + } + + # Recover. + puts "\t\tRecd021.f: Recover." + set ret [catch {exec $util_path/db_recover -h $testdir} r] + if { $op == "delete" || $trunc == 1 && $special != "newdb" } { + error_check_good expect_warning \ + [is_substr $r "Warning: open failed"] 1 + } else { + error_check_good subdb_$op $ret 0 + } + + # Clean up. + error_check_good env_remove [berkdb envremove -force -home $testdir] 0 + fileremove -f $testdir/$name.db +} + +proc different_method { method } { + # Queue methods are omitted, since this is for subdb testing. + set methodlist { -btree -rbtree -recno -frecno -rrecno -hash } + + set method [convert_method $method] + set newmethod $method + while { $newmethod == $method } { + set index [berkdb random_int 0 [expr [llength $methodlist] - 1]] + set newmethod [lindex $methodlist $index] + } + return $newmethod +} diff --git a/db/test/recd15scr.tcl b/db/test/recd15scr.tcl index df807a7fa..ef6fe7d03 100644 --- a/db/test/recd15scr.tcl +++ b/db/test/recd15scr.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recd15scr.tcl,v 1.6 2003/01/08 05:52:16 bostic Exp $ +# $Id: recd15scr.tcl,v 1.7 2004/01/28 03:36:29 bostic Exp $ # # Recd15 - lots of txns - txn prepare script # Usage: recd15script envcmd dbcmd gidf numtxns diff --git a/db/test/recdscript.tcl b/db/test/recdscript.tcl index 7a47387be..559d3407a 100644 --- a/db/test/recdscript.tcl +++ b/db/test/recdscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: recdscript.tcl,v 11.5 2003/01/08 05:52:18 bostic Exp $ +# $Id: recdscript.tcl,v 11.6 2004/01/28 03:36:29 bostic Exp $ # # Recovery txn prepare script # Usage: recdscript op dir envcmd dbfile cmd diff --git a/db/test/rep001.tcl b/db/test/rep001.tcl index 844a42f8e..94163986a 100644 --- a/db/test/rep001.tcl +++ b/db/test/rep001.tcl @@ -1,40 +1,63 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep001.tcl,v 1.23 2003/09/04 23:41:12 bostic Exp $ +# $Id: rep001.tcl,v 1.35 2004/09/22 18:01:05 bostic Exp $ # # TEST rep001 # TEST Replication rename and forced-upgrade test. # TEST -# TEST Run a modified version of test001 in a replicated master -# TEST environment; verify that the database on the client is correct. +# TEST Run rep_test in a replicated master environment. +# TEST Verify that the database on the client is correct. # TEST Next, remove the database, close the master, upgrade the # TEST client, reopen the master, and make sure the new master can -# TEST correctly run test001 and propagate it in the other direction. +# TEST correctly run rep_test and propagate it in the other direction. proc rep001 { method { niter 1000 } { tnum "001" } args } { global passwd global has_crypto - puts "Rep$tnum: Replication sanity test." - - set envargs "" - rep001_sub $method $niter $tnum $envargs $args - - # Skip remainder of test if release does not support encryption. - if { $has_crypto == 0 } { - return + # Run tests with and without recovery. If we're doing testing + # of in-memory logging, skip the combination of recovery + # and in-memory logging -- it doesn't make sense. + set logsets [create_logsets 2] + set saved_args $args + + foreach recopt { "" "-recover" } { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $recopt == "-recover" && $logindex != -1 } { + puts "Skipping test with -recover for in-memory logs." + continue + } + set envargs "" + set args $saved_args + puts "Rep$tnum: Replication sanity test ($method $recopt)." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep001_sub $method \ + $niter $tnum $envargs $l $recopt $args + + # Skip encrypted tests if not supported. + if { $has_crypto == 0 } { + continue + } + + # Run the same tests with security. + append envargs " -encryptaes $passwd " + append args " -encrypt " + puts "Rep$tnum: Replication and security sanity test\ + ($method $recopt)." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep001_sub $method \ + $niter $tnum $envargs $l $recopt $args + } } - - puts "Rep$tnum: Replication and security sanity test." - append envargs " -encryptaes $passwd " - append args " -encrypt " - rep001_sub $method $niter $tnum $envargs $args } -proc rep001_sub { method niter tnum envargs largs } { +proc rep001_sub { method niter tnum envargs logset recargs largs } { source ./include.tcl global testdir global encrypt @@ -49,96 +72,84 @@ proc rep001_sub { method niter tnum envargs largs } { file mkdir $masterdir file mkdir $clientdir - if { [is_record_based $method] == 1 } { - set checkfunc test001_recno.check - } else { - set checkfunc test001.check - } + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. Adjust the args for master + # and client. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] # Open a master. repladd 1 - set masterenv \ - [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \ - $envargs {-home $masterdir -txn nosync -rep_master -rep_transport \ - [list 1 replsend]}] + set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs $m_logargs $recargs \ + -home $masterdir -errpfx MASTER $m_txnargs -rep_master \ + -rep_transport \[list 1 replsend\]" +# set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs $m_logargs $recargs \ +# -home $masterdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# -errpfx MASTER $m_txnargs -rep_master \ +# -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M)] error_check_good master_env [is_valid_env $masterenv] TRUE # Open a client repladd 2 - set clientenv [eval {berkdb_env -create} $envargs -txn nosync \ - -lock_max 2500 {-home $clientdir -rep_client -rep_transport \ - [list 2 replsend]}] + set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs $c_logargs $recargs \ + -home $clientdir -errpfx CLIENT $c_txnargs -rep_client \ + -rep_transport \[list 2 replsend\]" +# set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs $c_logargs $recargs \ +# -home $clientdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# -errpfx CLIENT $c_txnargs -rep_client \ +# -rep_transport \[list 2 replsend\]" + set clientenv [eval $env_cmd(C)] error_check_good client_env [is_valid_env $clientenv] TRUE # Bring the client online by processing the startup messages. - while { 1 } { - set nproced 0 + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] + # Run rep_test in the master (and update client). + puts "\tRep$tnum.a:\ + Running rep_test in replicated env ($envargs $recargs)." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist - if { $nproced == 0 } { - break - } - } - - # Run a modified test001 in the master (and update client). - puts "\tRep$tnum.a: Running test001 in replicated env." - eval test001 $method $niter 0 0 $tnum -env $masterenv $largs - while { 1 } { - set nproced 0 + puts "\tRep$tnum.b: Verifying client database contents." + set dbname "test.db" + set masterdb [berkdb_open -env $masterenv -auto_commit $dbname] + set clientdb [berkdb_open -env $clientenv -auto_commit $dbname] - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] + error_check_good compare_master_and_client [db_compare \ + $masterdb $clientdb $masterdir/$dbname $clientdir/$dbname] 0 - if { $nproced == 0 } { - break - } - } - - # Verify the database in the client dir. We assume we know - # the name of the database created by test001 -- it is - # test$tnum.db. - puts "\tRep$tnum.b: Verifying client database contents." - set testdir [get_home $masterenv] - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - open_and_dump_file test$tnum.db $clientenv $t1 \ - $checkfunc dump_file_direction "-first" "-next" + error_check_good master_close [$masterdb close] 0 + error_check_good client_close [$clientdb close] 0 # Remove the file (and update client). puts "\tRep$tnum.c: Remove the file on the master and close master." error_check_good remove \ - [$masterenv dbremove -auto_commit test$tnum.db] 0 + [$masterenv dbremove -auto_commit $dbname] 0 error_check_good masterenv_close [$masterenv close] 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + process_msgs $envlist puts "\tRep$tnum.d: Upgrade client." set newmasterenv $clientenv error_check_good upgrade_client [$newmasterenv rep_start -master] 0 - # Run test001 in the new master - puts "\tRep$tnum.e: Running test001 in new master." - eval test001 $method $niter 0 0 $tnum -env $newmasterenv $largs - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $newmasterenv 2] - - if { $nproced == 0 } { - break - } - } + # Run rep_test in the new master + puts "\tRep$tnum.e: Running rep_test in new master." + eval rep_test $method $newmasterenv NULL $niter 0 0 0 $largs + set envlist "{$newmasterenv 2}" + process_msgs $envlist puts "\tRep$tnum.f: Reopen old master as client and catch up." # Throttle master so it can't send everything at once @@ -147,51 +158,34 @@ proc rep001_sub { method niter tnum envargs largs } { -txn nosync -lock_max 2500 \ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}] error_check_good newclient_env [is_valid_env $newclientenv] TRUE - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $newclientenv 1] - incr nproced [replprocessqueue $newmasterenv 2] - - if { $nproced == 0 } { - break - } - } - set stats [$newmasterenv rep_stat] - set nthrottles [getstats $stats {Transmission limited}] - error_check_bad nthrottles $nthrottles -1 - error_check_bad nthrottles $nthrottles 0 - - # Run a modified test001 in the new master (and update client). - puts "\tRep$tnum.g: Running test001 in new master." - eval test001 $method \ - $niter $niter 1 $tnum -env $newmasterenv $largs - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $newclientenv 1] - incr nproced [replprocessqueue $newmasterenv 2] - - if { $nproced == 0 } { - break - } + set envlist "{$newclientenv 1} {$newmasterenv 2}" + process_msgs $envlist + + # If we're running with a low number of iterations, we might + # not have had to throttle the data transmission; skip the check. + if { $niter > 200 } { + set nthrottles \ + [stat_field $newmasterenv rep_stat "Transmission limited"] + error_check_bad nthrottles $nthrottles -1 + error_check_bad nthrottles $nthrottles 0 } + # Run a modified rep_test in the new master (and update client). + puts "\tRep$tnum.g: Running rep_test in new master." + eval rep_test $method \ + $newmasterenv NULL $niter $niter $niter 0 $largs + process_msgs $envlist + # Verify the database in the client dir. puts "\tRep$tnum.h: Verifying new client database contents." - set testdir [get_home $newmasterenv] - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - open_and_dump_file test$tnum.db $newclientenv $t1 \ - $checkfunc dump_file_direction "-first" "-next" - - if { [string compare [convert_method $method] -recno] != 0 } { - filesort $t1 $t3 - } - error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0 + set masterdb [berkdb_open -env $newmasterenv -auto_commit $dbname] + set clientdb [berkdb_open -env $newclientenv -auto_commit $dbname] + error_check_good compare_master_and_client [db_compare \ + $masterdb $clientdb $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good master_close [$masterdb close] 0 + error_check_good client_close [$clientdb close] 0 error_check_good newmasterenv_close [$newmasterenv close] 0 error_check_good newclientenv_close [$newclientenv close] 0 diff --git a/db/test/rep002.tcl b/db/test/rep002.tcl index 34a7eebc6..1a3683f2e 100644 --- a/db/test/rep002.tcl +++ b/db/test/rep002.tcl @@ -1,28 +1,53 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2002-2003 +# Copyright (c) 2002-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep002.tcl,v 11.25 2003/10/14 14:38:34 sandstro Exp $ +# $Id: rep002.tcl,v 11.37 2004/09/22 18:01:05 bostic Exp $ # -# TEST rep002 +# TEST rep002 # TEST Basic replication election test. # TEST -# TEST Run a modified version of test001 in a replicated master environment; -# TEST hold an election among a group of clients to make sure they select -# TEST a proper master from amongst themselves, in various scenarios. +# TEST Run a modified version of test001 in a replicated master +# TEST environment; hold an election among a group of clients to +# TEST make sure they select a proper master from amongst themselves, +# TEST in various scenarios. proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { - source ./include.tcl - global elect_timeout elect_serial - - set elect_timeout 5000000 if { [is_record_based $method] == 1 } { puts "Rep002: Skipping for method $method." return } + set logsets [create_logsets [expr $nclients + 1]] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Skipping test with -recover for in-memory logs." + } + puts "Rep$tnum ($method $r):\ + Replication election test with $nclients clients." + puts "Rep$tnum: Master logs are [lindex $l 0]" + for { set i 0 } { $i < $nclients } { incr i } { + puts "Rep$tnum: Client $i logs are\ + [lindex $l [expr $i + 1]]" + } + rep002_sub $method $niter $nclients $tnum $l $r $args + } + } +} + +proc rep002_sub { method niter nclients tnum logset recargs largs } { + source ./include.tcl + global elect_timeout elect_serial + global is_windows_test + set elect_timeout 5000000 + env_cleanup $testdir set qdir $testdir/MSGQUEUEDIR @@ -30,24 +55,31 @@ proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { set masterdir $testdir/MASTERDIR file mkdir $masterdir + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] for { set i 0 } { $i < $nclients } { incr i } { set clientdir($i) $testdir/CLIENTDIR.$i file mkdir $clientdir($i) + set c_logtype($i) [lindex $logset [expr $i + 1]] + set c_logargs($i) [adjust_logargs $c_logtype($i)] + set c_txnargs($i) [adjust_txnargs $c_logtype($i)] } - puts "Rep$tnum: Replication election test with $nclients clients." - # Open a master. repladd 1 set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ - -home $masterdir -errpfx MASTER \ - -txn nosync -rep_master -rep_transport \[list 1 replsend\]" + -home $masterdir $m_logargs -errpfx MASTER \ + $m_txnargs -rep_master -rep_transport \[list 1 replsend\]" # set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ -# -home $masterdir -errpfx MASTER -errfile /dev/stderr \ -# -verbose {rep on} -txn nosync -rep_master \ +# -home $masterdir $m_logargs -errpfx MASTER -errfile /dev/stderr \ +# -verbose {rep on} $m_txnargs -rep_master \ # -rep_transport \[list 1 replsend\]" - set masterenv [eval $env_cmd(M)] + # In an election test, the -recovery arg must not go + # in the env_cmd string because that is going to be + # passed to a child process. + set masterenv [eval $env_cmd(M) $recargs] error_check_good master_env [is_valid_env $masterenv] TRUE # Open the clients. @@ -55,37 +87,30 @@ proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { set envid [expr $i + 2] repladd $envid set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \ - -txn nosync -rep_client -errpfx CLIENT$i \ + $c_logargs($i) $c_txnargs($i) -rep_client -errpfx CLIENT$i \ -rep_transport \[list $envid replsend\]" # set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \ -# -verbose {rep on} -errfile /dev/stderr \ -# -txn nosync -rep_client -errpfx CLIENT$i \ +# $c_logargs($i) -verbose {rep on} -errfile /dev/stderr \ +# $c_txnargs($i) -rep_client -errpfx CLIENT$i \ # -rep_transport \[list $envid replsend\]" - set clientenv($i) [eval $env_cmd($i)] + set clientenv($i) [eval $env_cmd($i) $recargs] error_check_good \ client_env($i) [is_valid_env $clientenv($i)] TRUE } - # Run a modified test001 in the master. - puts "\tRep$tnum.a: Running test001 in replicated env." - eval test001 $method $niter 0 0 $tnum -env $masterenv $args - # Loop, processing first the master's messages, then the client's, # until both queues are empty. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - - for { set i 0 } { $i < $nclients } { incr i } { - set envid [expr $i + 2] - incr nproced [replprocessqueue $clientenv($i) $envid] - } - - if { $nproced == 0 } { - break - } + set envlist {} + lappend envlist "$masterenv 1" + for { set i 0 } { $i < $nclients } { incr i } { + lappend envlist "$clientenv($i) [expr $i + 2]" } + process_msgs $envlist + + # Run a modified test001 in the master. + puts "\tRep$tnum.a: Running test001 in replicated env." + eval test001 $method $niter 0 0 $tnum -env $masterenv $largs + process_msgs $envlist # Verify the database in the client dir. for { set i 0 } { $i < $nclients } { incr i } { @@ -118,7 +143,7 @@ proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { set elect_pipe($i) INVALID } set elect_pipe(0) [start_election C0 \ - $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout] + $qdir $env_cmd(0) [expr $nclients + 1] $nclients 20 $elect_timeout] tclsleep 2 @@ -129,13 +154,13 @@ proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { set nm 0 set nm2 0 - incr nproced [replprocessqueue $masterenv 1 0 he nm] if { $he == 1 } { incr elect_serial set elect_pipe(M) [start_election CM $qdir \ - $env_cmd(M) [expr $nclients + 1] 0 $elect_timeout] + $env_cmd(M) [expr $nclients + 1] $nclients \ + 0 $elect_timeout] set got_hold_elect(M) 1 } if { $nm != 0 } { @@ -161,7 +186,8 @@ proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { incr elect_serial set pfx CHILD$i.$elect_serial set elect_pipe($i) [start_election $pfx $qdir \ - $env_cmd($i) [expr $nclients + 1] 0 \ + $env_cmd($i) [expr $nclients + 1] \ + $nclients 0 \ $elect_timeout] set got_hold_elect($i) 1 } @@ -196,115 +222,102 @@ proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { # Make sure all the clients are synced up and ready to be good # voting citizens. error_check_good master_flush [$masterenv rep_flush] 0 - while { 1 } { - set nproced 0 - incr nproced [replprocessqueue $masterenv 1 0] - for { set i 0 } { $i < $nclients } { incr i } { - incr nproced [replprocessqueue $clientenv($i) \ - [expr $i + 2] 0] - } - - if { $nproced == 0 } { - break - } - } + process_msgs $envlist # Now hold another election in the first client, this time with # a dead master. puts "\tRep$tnum.e: Starting election with dead master." error_check_good masterenv_close [$masterenv close] 0 + set envlist [lreplace $envlist 0 0] + set m "Rep$tnum.e" + # We're not going to be using err_cmd, so initialize to "none". + # Client #1 has priority 100; everyone else has priority 10. for { set i 0 } { $i < $nclients } { incr i } { - replclear [expr $i + 2] + set err_cmd($i) "none" + set crash($i) 0 + if { $i == 1 } { + set pri($i) 100 + } else { + set pri($i) 10 + } } - - incr elect_serial - set elect_pipe(0) [start_election C0 \ - $qdir $env_cmd(0) $nclients 20 $elect_timeout] - - tclsleep 2 - - # Process messages, and verify that the client with the highest - # priority--client #1--wins. - set got_newmaster 0 - set tries 10 - while { 1 } { - set nproced 0 - set he 0 - set nm 0 - - for { set i 0 } { $i < $nclients } { incr i } { - set he 0 - set envid [expr $i + 2] - set child_done [check_election $elect_pipe($i) nm2] - if { $got_newmaster == 0 && $nm2 != 0} { - error_check_good newmaster_is_master $nm2 \ - [expr 1 + 2] - set got_newmaster $nm2 - if { $nm2 == $envid } { - error_check_good make_master($i) \ - [$clientenv($i) rep_start -master] \ - 0 - } - } - incr nproced \ - [replprocessqueue $clientenv($i) $envid 0 he nm] - if { $he == 1 } { - - # Client #1 has priority 100; everyone else - # has priority 10. - if { $i == 1 } { - set pri 100 - } else { - set pri 10 - } - # error_check_bad client(0)_in_elect $i 0 - incr elect_serial - set pfx CHILD$i.$elect_serial - set elect_pipe($i) [start_election $pfx \ - $qdir $env_cmd($i) $nclients \ - $pri $elect_timeout] - set got_hold_elect($i) 1 - } - if { $nm != 0 } { - error_check_good newmaster_is_master $nm \ - [expr 1 + 2] - set got_newmaster $nm - - # If this env is the new master, it needs to - # configure itself as such--this is a different - # env handle from the one that performed the - # election. - if { $nm == $envid } { - error_check_good make_master($i) \ - [$clientenv($i) rep_start -master] \ - 0 - } - } + set nsites $nclients + set nvotes $nclients + # The elector calls the first election. The expected winner + # is $win. + set elector 1 + set win 1 + run_election env_cmd envlist err_cmd pri crash $qdir $m \ + $elector $nsites $nvotes $nclients $win 1 "test$tnum.db" + + # Hold an election with two clients at the same (winning) priority. + # Make sure that the tie gets broken, and that the third client + # does not win. + puts "\tRep$tnum.f: Election with two clients at same priority." + set m "Rep$tnum.f" + # Clients 0 and 1 have high, matching priority. + for { set i 0 } { $i < $nclients } { incr i } { + if { $i >= 2 } { + set pri($i) 10 + } else { + set pri($i) 100 } + } - # We need to wait around to make doubly sure that the - # election has finished... - if { $nproced == 0 } { - incr tries -1 - if { $tries == 0 } { - break - } else { - tclsleep 1 - } + # Run several elections. + set elections 5 + for { set i 0 } { $i < $elections } { incr i } { + # + # The expected winner is 0 or 1. Since run_election can only + # handle one expected winner, catch the result and inspect it. + # + set elector 0 + set win 1 + set altwin 0 + if {[catch {eval run_election \ + env_cmd envlist err_cmd pri crash $qdir $m $elector $nsites \ + $nvotes $nclients $win 1 "test$tnum.db"} res]} { + # + # If the primary winner didn't win, make sure + # the alternative winner won. Do all the cleanup + # for that winner normally done in run_election: + # open and close the new master, then reopen as a + # client for the next cycle. + # + puts "\t$m: Election $i: Alternate winner $altwin won." + error_check_good check_winner [is_substr \ + $res "expected 3, got [expr $altwin + 2]"] 1 + error_check_good make_master \ + [$clientenv($altwin) rep_start -master] 0 + + cleanup_elections + process_msgs $envlist + + error_check_good newmaster_close \ + [$clientenv($altwin) close] 0 + set clientenv($altwin) [eval $env_cmd($altwin)] + error_check_good cl($altwin) \ + [is_valid_env $clientenv($altwin)] TRUE + set newelector "$clientenv($altwin) [expr $altwin + 2]" + set envlist [lreplace $envlist $altwin $altwin $newelector] } else { - set tries 10 + puts "\t$m: Election $i: Primary winner $win won." } + process_msgs $envlist } - # Verify that client #1 is actually the winner. - error_check_good "client 1 wins" $got_newmaster [expr 1 + 2] - - cleanup_elections - - for { set i 0 } { $i < $nclients } { incr i } { - error_check_good clientenv_close($i) [$clientenv($i) close] 0 + foreach pair $envlist { + set cenv [lindex $pair 0] + error_check_good cenv_close [$cenv close] 0 } replclose $testdir/MSGQUEUEDIR + + # If we're on Windows, we need to forcibly remove some of the + # files created when the alternate winner won. + if { $is_windows_test == 1 } { + set filelist [glob -nocomplain $testdir/CLIENTDIR.$altwin/*] + fileremove -f $filelist + } } diff --git a/db/test/rep003.tcl b/db/test/rep003.tcl index 8cee906f9..13ef257da 100644 --- a/db/test/rep003.tcl +++ b/db/test/rep003.tcl @@ -1,47 +1,82 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2002-2003 +# Copyright (c) 2002-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep003.tcl,v 11.13 2003/09/04 23:41:12 bostic Exp $ +# $Id: rep003.tcl,v 11.19 2004/09/22 18:01:05 bostic Exp $ # -# TEST rep003 +# TEST rep003 # TEST Repeated shutdown/restart replication test # TEST -# TEST Run a quick put test in a replicated master environment; start up, -# TEST shut down, and restart client processes, with and without recovery. -# TEST To ensure that environment state is transient, use DB_PRIVATE. +# TEST Run a quick put test in a replicated master environment; +# TEST start up, shut down, and restart client processes, with +# TEST and without recovery. To ensure that environment state +# TEST is transient, use DB_PRIVATE. proc rep003 { method { tnum "003" } args } { source ./include.tcl - global testdir rep003_dbname rep003_omethod rep003_oargs - - env_cleanup $testdir - set niter 10 - set rep003_dbname rep003.db + global rep003_dbname rep003_omethod rep003_oargs if { [is_record_based $method] } { puts "Rep$tnum: Skipping for method $method" return } + set rep003_dbname rep003.db set rep003_omethod [convert_method $method] set rep003_oargs [convert_args $method $args] + # Run the body of the test with and without recovery. If we're + # testing in-memory logging, skip the combination of recovery + # and in-memory logging -- it doesn't make sense. + + set logsets [create_logsets 2] + foreach recopt { "" "-recover" } { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $recopt == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping for\ + in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $recopt):\ + Replication repeated-startup test." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep003_sub $method $tnum $l $recopt $args + } + } +} + +proc rep003_sub { method tnum logset recargs largs } { + source ./include.tcl + + env_cleanup $testdir + replsetup $testdir/MSGQUEUEDIR set masterdir $testdir/MASTERDIR - file mkdir $masterdir - set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir file mkdir $clientdir - puts "Rep$tnum: Replication repeated-startup test" + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. This test already requires + # -txn, so adjust the logargs only. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] # Open a master. repladd 1 - set masterenv [berkdb_env_noerr -create -log_max 1000000 \ - -home $masterdir -txn -rep_master -rep_transport [list 1 replsend]] + set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ + -errpfx MASTER -errfile /dev/stderr \ + -home $masterdir -txn $m_logargs -rep_master \ + -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M) $recargs] error_check_good master_env [is_valid_env $masterenv] TRUE puts "\tRep$tnum.a: Simple client startup test." @@ -51,8 +86,10 @@ proc rep003 { method { tnum "003" } args } { # Open a client. repladd 2 - set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \ - -rep_client -rep_transport [list 2 replsend]] + set env_cmd(C) "berkdb_env_noerr -create -private -home $clientdir \ + -txn $c_logargs -errpfx CLIENT -errfile /dev/stderr \ + -rep_client -rep_transport \[list 2 replsend\]" + set clientenv [eval $env_cmd(C) $recargs] error_check_good client_env [is_valid_env $clientenv] TRUE # Put another quick item. @@ -60,16 +97,8 @@ proc rep003 { method { tnum "003" } args } { # Loop, processing first the master's messages, then the client's, # until both queues are empty. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist rep003_check $clientenv A1 a-one rep003_check $clientenv A2 a-two @@ -87,18 +116,10 @@ proc rep003 { method { tnum "003" } args } { error_check_good client_env [is_valid_env $clientenv] TRUE # Loop letting the client and master sync up and get the - # environment initialized - - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + # environment initialized. It's a new client env so + # reinitialize the envlist as well. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist # The items from part A should be present at all times-- # if we roll them back, we've screwed up. [#5709] @@ -143,16 +164,8 @@ proc rep003 { method { tnum "003" } args } { # Loop, processing first the master's messages, then the client's, # until both queues are empty. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist # The items from part A should be present at all times-- # if we roll them back, we've screwed up. [#5709] @@ -201,16 +214,8 @@ proc rep003 { method { tnum "003" } args } { # Loop, processing first the master's messages, then the client's, # until both queues are empty. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist rep003_put $masterenv D2 d-two # Loop, processing first the master's messages, then the client's, diff --git a/db/test/rep005.tcl b/db/test/rep005.tcl index a13b893c2..82bbb32f8 100644 --- a/db/test/rep005.tcl +++ b/db/test/rep005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2002-2003 +# Copyright (c) 2002-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep005.tcl,v 11.29 2003/10/27 15:19:18 sandstro Exp $ +# $Id: rep005.tcl,v 11.40 2004/09/22 18:01:05 bostic Exp $ # # TEST rep005 # TEST Replication election test with error handling. @@ -14,18 +14,39 @@ # TEST locations in the election path. proc rep005 { method args } { - source ./include.tcl - if { [is_btree $method] == 0 } { puts "Rep005: Skipping for method $method." return } - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 set tnum "005" set niter 10 set nclients 3 + set logsets [create_logsets [expr $nclients + 1]] + + # We don't want to run this with -recover - it takes too + # long and doesn't cover any new ground. + set recargs "" + foreach l $logsets { + puts "Rep$tnum ($recargs): Replication election\ + error test with $nclients clients." + puts -nonewline "Rep$tnum: Started at: " + puts [clock format [clock seconds] -format "%H:%M %D"] + puts "Rep$tnum: Master logs are [lindex $l 0]" + for { set i 0 } { $i < $nclients } { incr i } { + puts "Rep$tnum: Client $i logs are\ + [lindex $l [expr $i + 1]]" + } + rep005_sub $method $tnum \ + $niter $nclients $l $recargs $args + } +} + +proc rep005_sub { method tnum niter nclients logset recargs largs } { + source ./include.tcl + global rand_init + error_check_good set_random_seed [berkdb srand $rand_init] 0 + env_cleanup $testdir set qdir $testdir/MSGQUEUEDIR @@ -33,105 +54,63 @@ proc rep005 { method args } { set masterdir $testdir/MASTERDIR file mkdir $masterdir + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] for { set i 0 } { $i < $nclients } { incr i } { set clientdir($i) $testdir/CLIENTDIR.$i file mkdir $clientdir($i) + set c_logtype($i) [lindex $logset [expr $i + 1]] + set c_logargs($i) [adjust_logargs $c_logtype($i)] + set c_txnargs($i) [adjust_txnargs $c_logtype($i)] } - puts -nonewline \ - "Rep$tnum: Replication election error test with $nclients clients." - puts -nonewline \ - " Started at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - # Open a master. repladd 1 - set env_cmd(M) "berkdb_env -create -log_max 1000000 -home $masterdir \ - -txn nosync -rep_master -rep_transport \[list 1 replsend\]" + set env_cmd(M) "berkdb_env -create -log_max 1000000 \ + -home $masterdir $m_logargs \ + $m_txnargs -rep_master -rep_transport \[list 1 replsend\]" # To debug elections, uncomment the line below and further below # for the clients to turn on verbose. Also edit reputils.tcl # in proc start_election and swap the 2 commented lines with # their counterpart. # set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ -# -home $masterdir -txn nosync -rep_master \ +# -home $masterdir $m_logargs \ +# $m_txnargs -rep_master \ # -verbose {rep on} -errpfx MASTER -errfile /dev/stderr \ # -rep_transport \[list 1 replsend\]" - set masterenv [eval $env_cmd(M)] + set masterenv [eval $env_cmd(M) $recargs] error_check_good master_env [is_valid_env $masterenv] TRUE + set envlist {} + lappend envlist "$masterenv 1" + # Open the clients. for { set i 0 } { $i < $nclients } { incr i } { set envid [expr $i + 2] repladd $envid set env_cmd($i) "berkdb_env -create -home $clientdir($i) \ - -txn nosync -rep_client \ + $c_logargs($i) $c_txnargs($i) -rep_client \ -rep_transport \[list $envid replsend\]" # set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \ # -verbose {rep on} -errpfx CLIENT$i -errfile /dev/stderr \ -# -txn nosync -rep_client \ +# $c_logargs($i) $c_txnargs($i) -rep_client \ # -rep_transport \[list $envid replsend\]" - set clientenv($i) [eval $env_cmd($i)] + set clientenv($i) [eval $env_cmd($i) $recargs] error_check_good \ client_env($i) [is_valid_env $clientenv($i)] TRUE + lappend envlist "$clientenv($i) $envid" } # Run a modified test001 in the master. puts "\tRep$tnum.a: Running test001 in replicated env." - eval test001 $method $niter 0 0 $tnum -env $masterenv $args - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - - for { set i 0 } { $i < $nclients } { incr i } { - set envid [expr $i + 2] - incr nproced [replprocessqueue $clientenv($i) $envid] - } - - if { $nproced == 0 } { - break - } - } - - # Verify the database in the client dir. - for { set i 0 } { $i < $nclients } { incr i } { - puts "\tRep$tnum.b: Verifying contents of client database $i." - set testdir [get_home $masterenv] - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - open_and_dump_file test$tnum.db $clientenv($i) $testdir/t1 \ - test001.check dump_file_direction "-first" "-next" - - if { [string compare [convert_method $method] -recno] != 0 } { - filesort $t1 $t3 - } - error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0 - - verify_dir $clientdir($i) "\tRep$tnum.c: " 0 0 1 - } - - # Make sure all the clients are synced up and ready to be good - # voting citizens. - error_check_good master_flush [$masterenv rep_flush] 0 - while { 1 } { - set nproced 0 - incr nproced [replprocessqueue $masterenv 1 0] - for { set i 0 } { $i < $nclients } { incr i } { - incr nproced [replprocessqueue $clientenv($i) \ - [expr $i + 2] 0] - } - - if { $nproced == 0 } { - break - } - } + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + # Process all the messages and close the master. + process_msgs $envlist error_check_good masterenv_close [$masterenv close] 0 + set envlist [lreplace $envlist 0 0] for { set i 0 } { $i < $nclients } { incr i } { replclear [expr $i + 2] @@ -152,22 +131,23 @@ proc rep005 { method args } { set c2err $c0err set numtests [expr [llength $c0err] * [llength $c1err] * \ [llength $c2err]] - puts "\t$m.d: Starting $numtests election with error tests" + puts "\t$m.b: Starting $numtests election with error tests" set last_win -1 set win -1 foreach c0 $c0err { foreach c1 $c1err { foreach c2 $c2err { set elist [list $c0 $c1 $c2] - rep005_elect env_cmd clientenv $qdir $m \ - $count win last_win $elist + rep005_elect env_cmd envlist $qdir \ + $m $count win last_win $elist incr count } } } - for { set i 0 } { $i < $nclients } { incr i } { - error_check_good clientenv_close($i) [$clientenv($i) close] 0 + foreach pair $envlist { + set cenv [lindex $pair 0] + error_check_good cenv_close [$cenv close] 0 } replclose $testdir/MSGQUEUEDIR @@ -176,11 +156,11 @@ proc rep005 { method args } { puts [clock format [clock seconds] -format "%H:%M %D"] } -proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { +proc rep005_elect { ecmd celist qdir msg count winner lsn_lose elist } { global elect_timeout elect_serial global is_windows_test upvar $ecmd env_cmd - upvar $cenv clientenv + upvar $celist envlist upvar $winner win upvar $lsn_lose last_win @@ -189,10 +169,13 @@ proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { set nsites [expr $nclients + 1] set cl_list {} - for { set i 0 } { $i < $nclients } { incr i } { + foreach pair $envlist { + set id [lindex $pair 1] + set i [expr $id - 2] + set clientenv($i) [lindex $pair 0] set err_cmd($i) [lindex $elist $i] set elect_pipe($i) INVALID - replclear [expr $i + 2] + replclear $id lappend cl_list $i } @@ -205,7 +188,7 @@ proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { set cl_list [lreplace $cl_list $last_win $last_win] set el $last_win } - set windex [berkdb random_int 1 [expr [llength $cl_list] - 1]] + set windex [berkdb random_int 0 [expr [llength $cl_list] - 1]] set win [lindex $cl_list $windex] } else { # Easy case, if we have a master, the winner must be the @@ -220,6 +203,7 @@ proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { # make sure the lowest LSN client has the highest priority. # Everyone else has priority 10. for { set i 0 } { $i < $nclients } { incr i } { + set crash($i) 0 if { $i == $win } { set pri($i) 100 } elseif { $i == $last_win } { @@ -229,134 +213,12 @@ proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { } } - puts "\t$msg.d.$count: Start election (win=client$win) $elist" - incr elect_serial - set pfx "CHILD$el.$elect_serial" - # Windows requires a longer timeout. - if { $is_windows_test == 1 } { - set elect_timeout [expr $elect_timeout * 3] - } - set elect_pipe($el) [start_election $pfx $qdir $env_cmd($el) \ - $nsites $pri($el) $elect_timeout $err_cmd($el)] - - tclsleep 2 - - set got_newmaster 0 - set tries 10 - while { 1 } { - set nproced 0 - set he 0 - set nm 0 - set nm2 0 - - for { set i 0 } { $i < $nclients } { incr i } { - set he 0 - set envid [expr $i + 2] - set child_done [check_election $elect_pipe($i) nm2] - if { $got_newmaster == 0 && $nm2 != 0 } { - error_check_good newmaster_is_master $nm2 \ - [expr $win + 2] - set got_newmaster $nm2 - - # If this env is the new master, it needs to - # configure itself as such--this is a different - # env handle from the one that performed the - # election. - if { $nm2 == $envid } { - error_check_good make_master($i) \ - [$clientenv($i) rep_start -master] \ - 0 - } - } - incr nproced \ - [replprocessqueue $clientenv($i) $envid 0 he nm] -# puts "Tries $tries: Processed queue for client $i, $nproced msgs he $he nm $nm nm2 $nm2" - if { $he == 1 } { - # - # Only close down the election pipe if the - # previously created one is done and - # waiting for new commands, otherwise - # if we try to close it while it's in - # progress we hang this main tclsh. - # - if { $elect_pipe($i) != "INVALID" && \ - $child_done == 1 } { - close_election $elect_pipe($i) - set elect_pipe($i) "INVALID" - } -# puts "Starting election on client $i" - if { $elect_pipe($i) == "INVALID" } { - incr elect_serial - set pfx "CHILD$i.$elect_serial" - set elect_pipe($i) [start_election \ - $pfx $qdir \ - $env_cmd($i) $nsites $pri($i) \ - $elect_timeout $err_cmd($i)] - set got_hold_elect($i) 1 - } - } - if { $nm != 0 } { - error_check_good newmaster_is_master $nm \ - [expr $win + 2] - set got_newmaster $nm - - # If this env is the new master, it needs to - # configure itself as such--this is a different - # env handle from the one that performed the - # election. - if { $nm == $envid } { - error_check_good make_master($i) \ - [$clientenv($i) rep_start -master] \ - 0 - if { [expr $count % 10] == 0 } { - set dbname rep005.$count.db - set db [berkdb_open -env \ - $clientenv($i) \ - -auto_commit \ - -create -btree $dbname] - error_check_good dbopen \ - [is_valid_db $db] TRUE - error_check_good dbclose \ - [$db close] 0 - } - } - } - } - - # We need to wait around to make doubly sure that the - # election has finished... - if { $nproced == 0 } { - incr tries -1 - if { $tries == 0 } { - break - } else { - tclsleep 1 - } - } else { - set tries 10 - } - } - - # Verify that client #1 is actually the winner. - error_check_good "client $win wins" $got_newmaster [expr $win + 2] - - cleanup_elections - - # - # Make sure that we've really processed all the post-election - # sync-up messages. - # - while { 1 } { - set nproced 0 - for { set i 0 } { $i < $nclients } { incr i } { - incr nproced [replprocessqueue $clientenv($i) \ - [expr $i + 2] 0] - } - if { $nproced == 0 } { - break - } - } - + puts "\t$msg.b.$count: Start election (win=client$win) $elist" + set msg $msg.c.$count + set nsites $nclients + set nvotes $nsites + run_election env_cmd envlist err_cmd pri crash \ + $qdir $msg $el $nsites $nvotes $nclients $win # # Sometimes test elections with an existing master. # Other times test elections without master by closing the @@ -368,7 +230,7 @@ proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { set close_len [expr [llength $close_list] - 1] set close_index [berkdb random_int 0 $close_len] if { [lindex $close_list $close_index] == 1 } { - puts -nonewline "\t$msg.e.$count: Closing " + puts -nonewline "\t\t$msg: Closing " error_check_good newmaster_close [$clientenv($win) close] 0 # # If the next test should win via LSN then remove the @@ -377,6 +239,7 @@ proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { set lsn_win { 0 0 0 0 1 1 1 1 1 1 } set lsn_len [expr [llength $lsn_win] - 1] set lsn_index [berkdb random_int 0 $lsn_len] + set rec_arg "" if { [lindex $lsn_win $lsn_index] == 1 } { set last_win $win set dirindex [lsearch -exact $env_cmd($win) "-home"] @@ -386,24 +249,41 @@ proc rep005_elect { ecmd cenv qdir msg count winner lsn_lose elist } { puts -nonewline "and cleaning " } else { set last_win -1 + # + # If we're not cleaning the env, decide if we should + # run recovery upon reopening the env. This causes + # two things: + # 1. Removal of region files which forces the env + # to read its __db.rep.egen file. + # 2. Adding a couple log records, so this client must + # be the next winner as well since it'll have the + # biggest LSN. + # + set rec_win { 0 0 0 0 0 0 1 1 1 1 } + set rec_len [expr [llength $rec_win] - 1] + set rec_index [berkdb random_int 0 $rec_len] + if { [lindex $rec_win $rec_index] == 1 } { + puts -nonewline "and recovering " + set rec_arg "-recover" + } } puts "new master, new client $win" - set clientenv($win) [eval $env_cmd($win)] + set clientenv($win) [eval $env_cmd($win) $rec_arg] error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE - set win -1 + # + # Since we started a new client, we need to replace it + # in the message processing list so that we get the + # new Tcl handle name in there. + set newel "$clientenv($win) [expr $win + 2]" + set envlist [lreplace $envlist $win $win $newel] + if { $rec_arg == "" } { + set win -1 + } # # Since we started a new client we want to give them # all a chance to process everything outstanding before # the election on the next iteration. - while { 1 } { - set nproced 0 - for { set i 0 } { $i < $nclients } { incr i } { - incr nproced [replprocessqueue $clientenv($i) \ - [expr $i + 2] 0] - } - if { $nproced == 0 } { - break - } - } + # + process_msgs $envlist } } diff --git a/db/test/rep006.tcl b/db/test/rep006.tcl index 07233ef1f..60d52cc7c 100644 --- a/db/test/rep006.tcl +++ b/db/test/rep006.tcl @@ -1,32 +1,45 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2006-2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep006.tcl,v 11.7 2003/08/28 19:59:14 sandstro Exp $ +# $Id: rep006.tcl,v 11.15 2004/09/22 18:01:05 bostic Exp $ # # TEST rep006 # TEST Replication and non-rep env handles. # TEST -# TEST Run a modified version of test001 in a replicated master environment; -# TEST verify that the database on the client is correct. +# TEST Run a modified version of test001 in a replicated master +# TEST environment; verify that the database on the client is correct. # TEST Next, create a non-rep env handle to the master env. # TEST Attempt to open the database r/w to force error. proc rep006 { method { niter 1000 } { tnum "006" } args } { - global passwd - global has_crypto - puts "Rep$tnum: Replication and non-rep env handles" - - set envargs "" - rep006_sub $method $niter $tnum $envargs $args + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping for in-memory logs\ + with -recover." + continue + } + puts "Rep$tnum ($method $r):\ + Replication and non-rep env handles" + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep006_sub $method $niter $tnum $l $r $args + } + } } -proc rep006_sub { method niter tnum envargs largs } { +proc rep006_sub { method niter tnum logset recargs largs } { source ./include.tcl global testdir - global encrypt + global is_hp_test env_cleanup $testdir @@ -38,6 +51,16 @@ proc rep006_sub { method niter tnum envargs largs } { file mkdir $masterdir file mkdir $clientdir + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + if { [is_record_based $method] == 1 } { set checkfunc test001_recno.check } else { @@ -46,44 +69,29 @@ proc rep006_sub { method niter tnum envargs largs } { # Open a master. repladd 1 - set masterenv \ - [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \ - $envargs {-home $masterdir -txn nosync -rep_master -rep_transport \ - [list 1 replsend]}] + set env_cmd(M) "berkdb_env -create -lock_max 2500 -log_max 1000000 \ + -home $masterdir $m_txnargs $m_logargs \ + -rep_master -rep_transport \ + \[list 1 replsend\]" + set masterenv [eval $env_cmd(M) $recargs] error_check_good master_env [is_valid_env $masterenv] TRUE # Open a client repladd 2 - set clientenv [eval {berkdb_env -create} $envargs -txn nosync \ - -lock_max 2500 \ - {-home $clientdir -rep_client -rep_transport [list 2 replsend]}] + set env_cmd(C) "berkdb_env -create $c_txnargs \ + $c_logargs -lock_max 2500 -home $clientdir \ + -rep_client -rep_transport \[list 2 replsend\]" + set clientenv [eval $env_cmd(C) $recargs] error_check_good client_env [is_valid_env $clientenv] TRUE # Bring the client online by processing the startup messages. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist # Run a modified test001 in the master (and update client). puts "\tRep$tnum.a: Running test001 in replicated env." eval test001 $method $niter 0 0 $tnum -env $masterenv $largs - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + process_msgs $envlist # Verify the database in the client dir. puts "\tRep$tnum.b: Verifying client database contents." @@ -99,30 +107,30 @@ proc rep006_sub { method niter tnum envargs largs } { error_check_good open_err $stat 1 error_check_good open_err1 [is_substr $ret "attempting to modify"] 1 - puts "\tRep$tnum.d: Verifying non-master access." - set rdenv \ - [eval {berkdb_env_noerr} $envargs {-home $masterdir}] - error_check_good rdenv [is_valid_env $rdenv] TRUE - # - # Open the db read/write which will cause it to try to - # write out a log record, which should fail. - # - set stat [catch {berkdb_open_noerr -env $rdenv test$tnum.db} ret] - error_check_good open_err $stat 1 - error_check_good open_err1 [is_substr $ret "attempting to modify"] 1 - error_check_good rdenv_close [$rdenv close] 0 - - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } + # We have to skip this bit for HP-UX because we can't + # open an env twice. + if { $is_hp_test == 1 } { + puts "\tRep$tnum.d: Skipping for HP-UX." + } else { + puts "\tRep$tnum.d: Verifying non-master access." + + set rdenv \ + [eval {berkdb_env_noerr -home $masterdir}] + error_check_good rdenv [is_valid_env $rdenv] TRUE + # + # Open the db read/write which will cause it to try to + # write out a log record, which should fail. + # + set stat \ + [catch {berkdb_open_noerr -env $rdenv test$tnum.db} ret] + error_check_good open_err $stat 1 + error_check_good \ + open_err1 [is_substr $ret "attempting to modify"] 1 + error_check_good rdenv_close [$rdenv close] 0 } + process_msgs $envlist + error_check_good masterenv_close [$masterenv close] 0 error_check_good clientenv_close [$clientenv close] 0 diff --git a/db/test/rep007.tcl b/db/test/rep007.tcl index 4f4c8d1f4..427881bc5 100644 --- a/db/test/rep007.tcl +++ b/db/test/rep007.tcl @@ -1,29 +1,49 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep007.tcl,v 11.15 2003/09/25 01:35:39 margo Exp $ +# $Id: rep007.tcl,v 11.22 2004/09/22 18:01:06 bostic Exp $ # -# TEST rep007 +# TEST rep007 # TEST Replication and bad LSNs # TEST -# TEST Run a modified version of test001 in a replicated master env. +# TEST Run rep_test in a replicated master env. # TEST Close the client. Make additional changes to master. # TEST Close the master. Open the client as the new master. # TEST Make several different changes. Open the old master as # TEST the client. Verify periodically that contents are correct. proc rep007 { method { niter 10 } { tnum "007" } args } { + + set args [convert_args $method $args] + set logsets [create_logsets 3] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping for\ + in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r): Replication and bad LSNs." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client1 logs are [lindex $l 1]" + puts "Rep$tnum: Client2 logs are [lindex $l 2]" + rep007_sub $method $niter $tnum $l $r $args + } + } +} + +proc rep007_sub { method niter tnum logset recargs largs } { global testdir + env_cleanup $testdir - puts "Rep$tnum: Replication and bad LSNs." set orig_tdir $testdir - set largs $args - set omethod [convert_method $method] - env_cleanup $testdir - replsetup $testdir/MSGQUEUEDIR set masterdir $testdir/MASTERDIR @@ -33,75 +53,73 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { file mkdir $clientdir file mkdir $clientdir2 + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + + set c_logtype [lindex $logset 1] + set c_logargs [adjust_logargs $c_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + set c2_logtype [lindex $logset 2] + set c2_logargs [adjust_logargs $c2_logtype] + set c2_txnargs [adjust_txnargs $c2_logtype] + # Open a master. repladd 1 - set ma_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set ma_envcmd "berkdb_env -create $m_txnargs \ + $m_logargs -lock_max 2500 \ -home $masterdir -rep_transport \[list 1 replsend\]" -# set ma_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ +# set ma_envcmd "berkdb_env -create $m_txnargs \ +# $m_logargs -lock_max 2500 \ # -verbose {rep on} \ # -home $masterdir -rep_transport \[list 1 replsend\]" - set masterenv [eval $ma_envcmd -rep_master] + set masterenv [eval $ma_envcmd $recargs -rep_master] error_check_good master_env [is_valid_env $masterenv] TRUE # Open two clients repladd 2 - set cl_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set cl_envcmd "berkdb_env -create $c_txnargs \ + $c_logargs -lock_max 2500 \ -home $clientdir -rep_transport \[list 2 replsend\]" -# set cl_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ +# set cl_envcmd "berkdb_env -create $c_txnargs \ +# $c_logargs -lock_max 2500 \ # -verbose {rep on} \ # -home $clientdir -rep_transport \[list 2 replsend\]" - set clientenv [eval $cl_envcmd -rep_client] + set clientenv [eval $cl_envcmd $recargs -rep_client] error_check_good client_env [is_valid_env $clientenv] TRUE repladd 3 - set cl2_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set cl2_envcmd "berkdb_env -create $c2_txnargs \ + $c2_logargs -lock_max 2500 \ -home $clientdir2 -rep_transport \[list 3 replsend\]" -# set cl2_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ +# set cl2_envcmd "berkdb_env -create $c2_txnargs \ +# $c2_logargs -lock_max 2500 \ # -home $clientdir2 -rep_transport \[list 3 replsend\] \ # -verbose {rep on}" - set cl2env [eval $cl2_envcmd -rep_client] + set cl2env [eval $cl2_envcmd $recargs -rep_client] error_check_good client2_env [is_valid_env $cl2env] TRUE # Bring the clients online by processing the startup messages. - while { 1 } { - set nproced 0 + set envlist "{$masterenv 1} {$clientenv 2} {$cl2env 3}" + process_msgs $envlist - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - incr nproced [replprocessqueue $cl2env 3] + # Run rep_test in the master (and update clients). + puts "\tRep$tnum.a: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist - if { $nproced == 0 } { - break - } - } - - # Run a modified test001 in the master (and update clients). - puts "\tRep$tnum.a: Running test001 in replicated env." - eval test001 $method $niter 0 0 $tnum -env $masterenv $largs - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } - - # Databases should now have identical contents. We assume we - # know the name of the file created by test001, "test$tnum.db". - set dbname "test$tnum.db" + # Databases should now have identical contents. + set dbname "test.db" if { [is_hash $method] == 0 } { set db1 [berkdb_open -env $masterenv -auto_commit $dbname] set db2 [berkdb_open -env $clientenv -auto_commit $dbname] set db3 [berkdb_open -env $cl2env -auto_commit $dbname] - error_check_good compare1and2 \ - [db_compare $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 - error_check_good compare1and3 \ - [db_compare $db1 $db3 $masterdir/$dbname $clientdir2/$dbname] 0 + error_check_good compare1and2 [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good compare1and3 [db_compare \ + $db1 $db3 $masterdir/$dbname $clientdir2/$dbname] 0 error_check_good db1_close [$db1 close] 0 error_check_good db2_close [$db2 close] 0 error_check_good db3_close [$db3 close] 0 @@ -112,17 +130,9 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { # Change master and propagate changes to client 2. set start $niter - eval test001 $method $niter $start 1 $tnum -env $masterenv $largs - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + eval rep_test $method $masterenv NULL $niter $start $start 0 $largs + set envlist "{$masterenv 1} {$cl2env 3}" + process_msgs $envlist # We need to do a deletion here to cause meta-page updates, # particularly for queue. Delete the first pair and remember @@ -146,16 +156,8 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { # Process the messages to get them out of the db. This also # propagates the delete to client 2. # - while { 1 } { - set nproced 0 + process_msgs $envlist - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } # Nuke those for closed client replclear 2 @@ -172,7 +174,7 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { puts "\tRep$tnum.c: Close master, reopen client as master." error_check_good master_close [$masterenv close] 0 - set newmasterenv [eval $cl_envcmd -rep_master] + set newmasterenv [eval $cl_envcmd $recargs -rep_master] # Now we can check that database 2 does not match 3. if { [is_hash $method] == 0 } { set db2 [berkdb_open -env $newmasterenv -auto_commit $dbname] @@ -185,8 +187,19 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { error_check_good db3_close [$db3 close] 0 puts "\tRep$tnum.d: Make incompatible changes to new master." + # + # Process startup messages and note that startup isn't complete. + # + set envlist "{$newmasterenv 2} {$cl2env 3}" + process_msgs $envlist + set stup [stat_field $cl2env rep_stat "Startup complete"] + error_check_good cl2recover $stup 0 + + # + # Making modifications should trigger startup complete. + # set db [berkdb_open -env $newmasterenv -auto_commit -create $omethod \ - test007.db] + test.db] error_check_good dbopen [is_valid_db $db] TRUE set t [$newmasterenv txn] # Force in a pair {10 10}. This works for all access @@ -196,35 +209,16 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { error_check_good txn [$t commit] 0 error_check_good dbclose [$db close] 0 - eval test001 $method $niter $start 1 $tnum -env $newmasterenv $largs + eval rep_test $method $newmasterenv NULL $niter $start $start 0 $largs set cl2rec 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $newmasterenv 2] - incr nproced [replprocessqueue $cl2env 3] - # At some point in the processing, client2 should be - # in recovery. - set stat [$cl2env rep_stat] - if { [is_substr $stat "{{In recovery} 1}"] } { - set cl2rec 1 - } - - if { $nproced == 0 } { - break - } - } + set envlist "{$newmasterenv 2} {$cl2env 3}" + process_msgs $envlist # Nuke those for closed old master replclear 1 - # - # Check that cl2 stats showed we were in recovery and now that - # we're done, we should be out of it. - # - error_check_good cl2rec $cl2rec 1 - set stat [$cl2env rep_stat] - error_check_good cl2recover [is_substr $stat "{{In recovery} 0}"] 1 + set stup [stat_field $cl2env rep_stat "Startup complete"] + error_check_good cl2recover $stup 1 # Databases 2 and 3 should now match. set db2 [berkdb_open -env $newmasterenv -auto_commit $dbname] @@ -237,26 +231,11 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { puts "\tRep$tnum.e: Open old master as client." set newclientenv [eval $ma_envcmd -rep_client -recover] - # Bring the newclient online by processing the startup messages. - set ncrec 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $newmasterenv 2] - incr nproced [replprocessqueue $newclientenv 1] - set stat [$newclientenv rep_stat] - if { [is_substr $stat "{{In recovery} 1}"] } { - set ncrec 1 - } - incr nproced [replprocessqueue $cl2env 3] + set envlist "{$newclientenv 1} {$newmasterenv 2} {$cl2env 3}" + process_msgs $envlist - if { $nproced == 0 } { - break - } - } - error_check_good ncrec $ncrec 1 - set stat [$newclientenv rep_stat] - error_check_good nc2recover [is_substr $stat "{{In recovery} 0}"] 1 + set stup [stat_field $newclientenv rep_stat "Startup complete"] + error_check_good ncrecover $stup 0 # The pair we deleted earlier from the master should now # have reappeared. @@ -267,19 +246,15 @@ proc rep007 { method { niter 10 } { tnum "007" } args } { error_check_good db1_close [$db1 close] 0 set start [expr $niter * 2] - eval test001 $method $niter $start 1 $tnum -env $newmasterenv $largs - - while { 1 } { - set nproced 0 + eval rep_test $method $newmasterenv NULL $niter $start $start 0 $largs + set envlist "{$newclientenv 1} {$newmasterenv 2} {$cl2env 3}" + process_msgs $envlist - incr nproced [replprocessqueue $newmasterenv 2] - incr nproced [replprocessqueue $newclientenv 1] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + # + # Now startup should be complete. + # + set stup [stat_field $newclientenv rep_stat "Startup complete"] + error_check_good nc2recover $stup 1 # Now all 3 should match again. set db1 [berkdb_open -env $newclientenv -auto_commit $dbname] diff --git a/db/test/rep008.tcl b/db/test/rep008.tcl index 7db50ca85..c98046984 100644 --- a/db/test/rep008.tcl +++ b/db/test/rep008.tcl @@ -1,30 +1,47 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep008.tcl,v 1.3 2003/08/28 19:59:15 sandstro Exp $ +# $Id: rep008.tcl,v 1.11 2004/09/22 18:01:06 bostic Exp $ # -# TEST rep008 +# TEST rep008 # TEST Replication, back up and synchronizing # TEST -# TEST Run a modified version of test001 in a replicated master environment; -# TEST Close master and client. -# TEST Copy the master log to the client. -# TEST Clean the master. -# TEST Reopen the master and client. +# TEST Run a modified version of test001 in a replicated master +# TEST environment. +# TEST Close master and client. +# TEST Copy the master log to the client. +# TEST Clean the master. +# TEST Reopen the master and client. proc rep008 { method { niter 10 } { tnum "008" } args } { - global testdir + global mixed_mode_logging - puts "Rep$tnum: Replication backup and synchronizing" - set largs $args + if { $mixed_mode_logging == 1 } { + puts "Rep$tnum: Skipping for mixed-mode logging." + return + } + if { [is_btree $method] == 0 } { + puts "Rep$tnum: Skipping for method $method." + return + } - env_cleanup $testdir + set args [convert_args $method $args] - if { [is_btree $method] == 0 } { - puts "Rep008: Skipping for method $method." - return + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + puts "Rep$tnum ($method $r):\ + Replication backup and synchronizing." + rep008_sub $method $niter $tnum $r $args } +} + +proc rep008_sub { method niter tnum recargs largs } { + global testdir + global util_path + + env_cleanup $testdir replsetup $testdir/MSGQUEUEDIR @@ -36,77 +53,61 @@ proc rep008 { method { niter 10 } { tnum "008" } args } { # Open a master. repladd 1 - set ma_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set ma_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ -home $masterdir -rep_transport \[list 1 replsend\]" -# set ma_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ -# -verbose {rep on} \ +# set ma_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# -verbose {rep on} -errpfx MASTER \ # -home $masterdir -rep_transport \[list 1 replsend\]" - set masterenv [eval $ma_envcmd -rep_master] + set masterenv [eval $ma_envcmd $recargs -rep_master] error_check_good master_env [is_valid_env $masterenv] TRUE # Open a client repladd 2 - set cl_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set cl_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ -home $clientdir -rep_transport \[list 2 replsend\]" -# set cl_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ -# -verbose {rep on} \ +# set cl_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# -verbose {rep on} -errpfx CLIENT \ # -home $clientdir -rep_transport \[list 2 replsend\]" - set clientenv [eval $cl_envcmd -rep_client] + set clientenv [eval $cl_envcmd $recargs -rep_client] error_check_good client_env [is_valid_env $clientenv] TRUE # Bring the clients online by processing the startup messages. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist # Run a modified test001 in the master (and update client). puts "\tRep$tnum.a: Running test001 in replicated env." eval test001 $method $niter 0 0 $tnum -env $masterenv $largs - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] + process_msgs $envlist - if { $nproced == 0 } { - break - } - } puts "\tRep$tnum.b: Close client and master. Copy logs." error_check_good client_close [$clientenv close] 0 error_check_good master_close [$masterenv close] 0 file copy -force $masterdir/log.0000000001 $testdir/log.save puts "\tRep$tnum.c: Clean master and reopen" + # + # Add sleep calls to ensure master's new log doesn't match + # its old one in the ckp timestamp. + # + tclsleep 1 env_cleanup $masterdir + tclsleep 1 env_cleanup $clientdir file copy -force $testdir/log.save $clientdir/log.0000000001 - set masterenv [eval $ma_envcmd -rep_master] + set masterenv [eval $ma_envcmd $recargs -rep_master] error_check_good master_env [is_valid_env $masterenv] TRUE - set clientenv [eval $cl_envcmd -rep_client] + set clientenv [eval $cl_envcmd $recargs -rep_client] error_check_good client_env [is_valid_env $clientenv] TRUE # # Process the messages to get them out of the db. # - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist 0 NONE err + error_check_bad err $err 0 + error_check_good errchk [is_substr $err "Client was never part"] 1 error_check_good masterenv_close [$masterenv close] 0 error_check_good clientenv_close [$clientenv close] 0 diff --git a/db/test/rep009.tcl b/db/test/rep009.tcl index fd97e6cf5..178ee1d13 100644 --- a/db/test/rep009.tcl +++ b/db/test/rep009.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep009.tcl,v 11.3 2003/08/28 19:59:15 sandstro Exp $ +# $Id: rep009.tcl,v 11.8 2004/09/22 18:01:06 bostic Exp $ # # TEST rep009 # TEST Replication and DUPMASTERs @@ -13,19 +13,34 @@ # TEST Close a client, clean it and then declare it a 2nd master. proc rep009 { method { niter 10 } { tnum "009" } args } { - puts "Rep$tnum: Replication DUPMASTER test." - - if { [is_btree $method] == 0 } { + if { [is_btree $method] == 0 } { puts "Rep009: Skipping for method $method." return } - set largs $args - rep009_body $method $niter $tnum 0 $largs - rep009_body $method $niter $tnum 1 $largs + set logsets [create_logsets 3] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($r): Replication DUPMASTER test." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client1 logs are [lindex $l 1]" + puts "Rep$tnum: Client2 logs are [lindex $l 2]" + rep009_sub $method $niter $tnum 0 $l $r $args + rep009_sub $method $niter $tnum 1 $l $r $args + } + } } -proc rep009_body { method niter tnum clean largs } { +proc rep009_sub { method niter tnum clean logset recargs largs } { global testdir env_cleanup $testdir @@ -40,67 +55,67 @@ proc rep009_body { method niter tnum clean largs } { file mkdir $clientdir file mkdir $clientdir2 + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + + set c_logtype [lindex $logset 1] + set c_logargs [adjust_logargs $c_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + set c2_logtype [lindex $logset 2] + set c2_logargs [adjust_logargs $c2_logtype] + set c2_txnargs [adjust_txnargs $c2_logtype] + # Open a master. repladd 1 - set ma_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set ma_envcmd "berkdb_env -create $m_txnargs \ + $m_logargs -lock_max 2500 \ -home $masterdir -rep_transport \[list 1 replsend\]" -# set ma_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ +# set ma_envcmd "berkdb_env -create $m_txnargs \ +# $m_logargs -lock_max 2500 \ # -verbose {rep on} \ # -home $masterdir -rep_transport \[list 1 replsend\]" - set masterenv [eval $ma_envcmd -rep_master] + set masterenv [eval $ma_envcmd $recargs -rep_master] error_check_good master_env [is_valid_env $masterenv] TRUE # Open a client repladd 2 - set cl_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set cl_envcmd "berkdb_env -create $c_txnargs \ + $c_logargs -lock_max 2500 \ -home $clientdir -rep_transport \[list 2 replsend\]" -# set cl_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ +# set cl_envcmd "berkdb_env -create $c_txnargs \ +# $c_logargs -lock_max 2500 \ # -verbose {rep on} \ # -home $clientdir -rep_transport \[list 2 replsend\]" - set clientenv [eval $cl_envcmd -rep_client] + set clientenv [eval $cl_envcmd $recargs -rep_client] error_check_good client_env [is_valid_env $clientenv] TRUE repladd 3 - set cl2_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + set cl2_envcmd "berkdb_env -create $c2_txnargs \ + $c2_logargs -lock_max 2500 \ -home $clientdir2 -rep_transport \[list 3 replsend\]" -# set cl2_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ +# set cl2_envcmd "berkdb_env -create $c2_txnargs \ +# $c2_logargs -lock_max 2500 \ # -home $clientdir2 -rep_transport \[list 3 replsend\] \ # -verbose {rep on}" - set cl2env [eval $cl2_envcmd -rep_client] + set cl2env [eval $cl2_envcmd $recargs -rep_client] error_check_good client2_env [is_valid_env $cl2env] TRUE # Bring the clients online by processing the startup messages. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2} {$cl2env 3}" + process_msgs $envlist # Run a modified test001 in the master (and update client). puts "\tRep$tnum.a: Running test001 in replicated env." eval test001 $method $niter 0 0 $tnum -env $masterenv $largs - while { 1 } { - set nproced 0 + process_msgs $envlist - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } puts "\tRep$tnum.b: Declare a client to be a master." if { $clean } { error_check_good clientenv_close [$clientenv close] 0 env_cleanup $clientdir - set clientenv [eval $cl_envcmd -rep_master] + set clientenv [eval $cl_envcmd $recargs -rep_master] error_check_good client_env [is_valid_env $clientenv] TRUE } else { error_check_good client_master [$clientenv rep_start -master] 0 diff --git a/db/test/rep010.tcl b/db/test/rep010.tcl index 8ee6de0eb..34c4338a1 100644 --- a/db/test/rep010.tcl +++ b/db/test/rep010.tcl @@ -1,32 +1,53 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep010.tcl,v 11.3 2003/10/31 20:15:43 sandstro Exp $ +# $Id: rep010.tcl,v 11.10 2004/09/22 18:01:06 bostic Exp $ # # TEST rep010 # TEST Replication and ISPERM # TEST -# TEST With consecutive message processing, make sure every -# TEST DB_REP_PERMANENT is responded to with an ISPERM when -# TEST processed. With gaps in the processing, make sure +# TEST With consecutive message processing, make sure every +# TEST DB_REP_PERMANENT is responded to with an ISPERM when +# TEST processed. With gaps in the processing, make sure # TEST every DB_REP_PERMANENT is responded to with an ISPERM -# TEST or a NOTPERM. Verify in both cases that the LSN returned +# TEST or a NOTPERM. Verify in both cases that the LSN returned # TEST with ISPERM is found in the log. proc rep010 { method { niter 100 } { tnum "010" } args } { + + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r): Replication and ISPERM." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep010_sub $method $niter $tnum $l $r $args + } + } +} + +proc rep010_sub { method niter tnum logset recargs largs } { source ./include.tcl - global perm_sent_list - global perm_rec_list global rand_init berkdb srand $rand_init - puts "Rep$tnum: Replication and ISPERM ($method)" + global perm_sent_list env_cleanup $testdir - set args [convert_args $method $args] set omethod [convert_method $method] replsetup $testdir/MSGQUEUEDIR + set perm_sent_list {{}} set masterdir $testdir/MASTERDIR set clientdir $testdir/CLIENTDIR @@ -34,48 +55,54 @@ proc rep010 { method { niter 100 } { tnum "010" } args } { file mkdir $masterdir file mkdir $clientdir - # Open a master. + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open a master. repladd 1 set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ - -log_max 1000000 \ - -home $masterdir -txn nosync -rep_master \ + -log_max 1000000 $m_logargs \ + -home $masterdir $m_txnargs -rep_master \ -rep_transport \[list 1 replsend\]" - set masterenv [eval $env_cmd(M)] + set masterenv [eval $env_cmd(M) $recargs] error_check_good master_env [is_valid_env $masterenv] TRUE # Open a client repladd 2 set env_cmd(C) "berkdb_env_noerr -create -home $clientdir \ - -txn nosync -rep_client \ + $c_txnargs $c_logargs -rep_client \ -rep_transport \[list 2 replsend\]" - set clientenv [eval $env_cmd(C)] + set clientenv [eval $env_cmd(C) $recargs] error_check_good client_env [is_valid_env $clientenv] TRUE # Bring the client online. - rep010_procmessages $masterenv $clientenv + rep010_process_msgs $masterenv $clientenv 1 # Open database in master, propagate to client. set dbname rep010.db - set db1 [eval "berkdb_open -create $omethod -auto_commit \ - -env $masterenv $args $dbname"] - rep010_procmessages $masterenv $clientenv + set db1 [eval {berkdb_open -create} $omethod -auto_commit \ + -env $masterenv $largs $dbname] + rep010_process_msgs $masterenv $clientenv 1 puts "\tRep$tnum.a: Process messages with no gaps." - # Initialize lists of permanent LSNs sent and received. - set perm_sent_list {} - set perm_rec_list {} - - # Feed operations one at a time to master and immediately - # update client. + # Feed operations one at a time to master and immediately + # update client. for { set i 1 } { $i <= $niter } { incr i } { set t [$masterenv txn] error_check_good db_put \ [eval $db1 put -txn $t $i [chop_data $method data$i]] 0 error_check_good txn_commit [$t commit] 0 - rep010_procmessages $masterenv $clientenv + rep010_process_msgs $masterenv $clientenv 1 } - # Replace data. + # Replace data. for { set i 1 } { $i <= $niter } { incr i } { set t [$masterenv txn] set ret \ @@ -83,23 +110,19 @@ proc rep010 { method { niter 100 } { tnum "010" } args } { error_check_good db_put \ [$db1 put -txn $t $i [chop_data $method newdata$i]] 0 error_check_good txn_commit [$t commit] 0 - rep010_procmessages $masterenv $clientenv + rep010_process_msgs $masterenv $clientenv 1 } - # Try some aborts. These do not write permanent messages. + # Try some aborts. These do not write permanent messages. for { set i 1 } { $i <= $niter } { incr i } { set t [$masterenv txn] error_check_good db_put [$db1 put -txn $t $i abort$i] 0 error_check_good txn_abort [$t abort] 0 - rep010_procmessages $masterenv $clientenv + rep010_process_msgs $masterenv $clientenv 0 } puts "\tRep$tnum.b: Process messages with gaps." - # Reinitialize lists of permanent LSNs sent and received. - set perm_sent_list {} - set perm_rec_list {} - - # To test gaps in message processing, run and commit a whole + # To test gaps in message processing, run and commit a whole # bunch of transactions, then process the messages with skips. for { set i 1 } { $i <= $niter } { incr i } { set t [$masterenv txn] @@ -107,7 +130,7 @@ proc rep010 { method { niter 100 } { tnum "010" } args } { error_check_good txn_commit [$t commit] 0 } set skip [berkdb random_int 2 8] - rep010_procmessages $masterenv $clientenv $skip + rep010_process_msgs $masterenv $clientenv 1 $skip # Clean up. error_check_good db1_close [$db1 close] 0 @@ -117,10 +140,11 @@ proc rep010 { method { niter 100 } { tnum "010" } args } { replclose $testdir/MSGQUEUEDIR } -proc rep010_procmessages { masterenv clientenv {skip_interval 0} } { - global perm_response +proc rep010_process_msgs { masterenv clientenv check {skip_interval 0} } { + global perm_response_list global perm_sent_list - global perm_rec_list + + set perm_response_list {{}} while { 1 } { set nproced 0 @@ -128,11 +152,11 @@ proc rep010_procmessages { masterenv clientenv {skip_interval 0} } { incr nproced [replprocessqueue $masterenv 1 $skip_interval] incr nproced [replprocessqueue $clientenv 2 $skip_interval] - # In this test, the ISPERM and NOTPERM messages are + # In this test, the ISPERM and NOTPERM messages are # sent by the client back to the master. Verify that we # get ISPERM when the client is caught up to the master # (i.e. last client LSN in the log matches the LSN returned - # with the ISPERM), and that when we get NOTPERM, the client + # with the ISPERM), and that when we get NOTPERM, the client # is not caught up. # Create a list of the LSNs in the client log. @@ -140,44 +164,54 @@ proc rep010_procmessages { masterenv clientenv {skip_interval 0} } { set logc [$clientenv log_cursor] error_check_good logc \ [is_valid_logc $logc $clientenv] TRUE - for { set logrec [$logc get -first] } { [llength $logrec] != 0 } \ + for { set logrec [$logc get -first] } \ + { [llength $logrec] != 0 } \ { set logrec [$logc get -next] } { lappend lsnlist [lindex [lindex $logrec 0] 1] - } + } set lastloglsn [lindex $lsnlist end] - # Parse perm_response to find the LSN returned with + # Parse perm_response_list to find the LSN returned with # ISPERM or NOTPERM. - set permtype [lindex $perm_response 0] - set messagelsn [lindex [lindex $perm_response 1] 1] + set response [lindex $perm_response_list end] + set permtype [lindex $response 0] + set messagelsn [lindex [lindex $response 1] 1] - if { $perm_response != "" } { + if { [llength $response] != 0 } { if { $permtype == "NOTPERM" } { - # If we got a NOTPERM, the returned LSN has to + # If we got a NOTPERM, the returned LSN has to # be greater than the last LSN in the log. - error_check_good \ - notpermlsn [expr $messagelsn > $lastloglsn] 1 + error_check_good notpermlsn \ + [expr $messagelsn > $lastloglsn] 1 } elseif { $permtype == "ISPERM" } { - # If we got an ISPERM, the returned LSN has to + # If we got an ISPERM, the returned LSN has to # be in the log. error_check_bad \ ispermlsn [lsearch $lsnlist $messagelsn] -1 } else { puts "FAIL: unexpected message type $permtype" - } + } } error_check_good logc_close [$logc close] 0 # If we've finished processing all the messages, check - # that the last received permanent message LSN matches the - # last sent permanent message LSN. + # that the last received permanent message LSN matches the + # last sent permanent message LSN. if { $nproced == 0 } { - set last_sent [string index $perm_sent_list end] - set last_received [string index $perm_rec_list end] - error_check_good last_message $last_sent $last_received + if { $check != 0 } { + set last_sent [lindex $perm_sent_list end] + set last_rec_msg \ + [lindex $perm_response_list end] + set last_received [lindex $last_rec_msg 1] + error_check_good last_message \ + $last_sent $last_received + } + + # If we check correctly; empty out the lists + set perm_response_list {{}} + set perm_sent_list {{}} break } } } - diff --git a/db/test/rep011.tcl b/db/test/rep011.tcl index 4714a9e98..6fe119233 100644 --- a/db/test/rep011.tcl +++ b/db/test/rep011.tcl @@ -1,37 +1,55 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep011.tcl,v 1.3 2003/09/26 16:05:13 sandstro Exp $ +# $Id: rep011.tcl,v 1.10 2004/09/22 18:01:06 bostic Exp $ # # TEST rep011 # TEST Replication: test open handle across an upgrade. # TEST -# TEST Open and close test database in master environment. +# TEST Open and close test database in master environment. # TEST Update the client. Check client, and leave the handle # TEST to the client open as we close the masterenv and upgrade # TEST the client to master. Reopen the old master as client -# TEST and catch up. Test that we can still do a put to the -# TEST handle we created on the master while it was still a -# TEST client, and then make sure that the change can be -# TEST propagated back to the new client. +# TEST and catch up. Test that we can still do a put to the +# TEST handle we created on the master while it was still a +# TEST client, and then make sure that the change can be +# TEST propagated back to the new client. proc rep011 { method { tnum "011" } args } { global passwd - puts "Rep$tnum.a: Test upgrade of open handles ($method)." - - set envargs "" - rep011_sub $method $tnum $envargs $args - - puts "Rep$tnum.b: Open handle upgrade test with encryption ($method)." - append envargs " -encryptaes $passwd " - append args " -encrypt " - rep011_sub $method $tnum $envargs $args + set logsets [create_logsets 2] + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + set envargs "" + puts "Rep$tnum.a ($r $envargs):\ + Test upgrade of open handles ($method)." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep011_sub $method $tnum $envargs $l $r $args + + append envargs " -encryptaes $passwd " + append args " -encrypt " + + puts "Rep$tnum.b ($r $envargs):\ + Open handle upgrade test with encryption ($method)." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep011_sub $method $tnum $envargs $l $r $args + } + } } -proc rep011_sub { method tnum envargs largs } { +proc rep011_sub { method tnum envargs logset recargs largs } { source ./include.tcl global testdir global encrypt @@ -46,32 +64,37 @@ proc rep011_sub { method tnum envargs largs } { file mkdir $masterdir file mkdir $clientdir + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + # Open a master. repladd 1 - set masterenv \ - [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \ - $envargs {-home $masterdir -txn nosync -rep_master -rep_transport \ - [list 1 replsend]}] + set env_cmd(M) "berkdb_env -create -lock_max 2500 \ + -log_max 1000000 $m_logargs $envargs -home $masterdir \ + $m_txnargs -rep_master -rep_transport \ + \[list 1 replsend\]" + set masterenv [eval $env_cmd(M) $recargs] error_check_good master_env [is_valid_env $masterenv] TRUE # Open a client repladd 2 - set clientenv [eval {berkdb_env -create} $envargs -txn nosync \ - -lock_max 2500 {-home $clientdir -rep_client -rep_transport \ - [list 2 replsend]}] + set env_cmd(C) "berkdb_env -create -lock_max 2500 \ + $c_logargs $envargs -home $clientdir \ + $c_txnargs -rep_client -rep_transport \ + \[list 2 replsend\]" + set clientenv [eval $env_cmd(C) $recargs] error_check_good client_env [is_valid_env $clientenv] TRUE # Bring the client online by processing the startup messages. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist # Open a test database on the master so we can test having # handles open across an upgrade. @@ -86,16 +109,7 @@ proc rep011_sub { method tnum envargs largs } { error_check_good master_upg_db_close [$master_upg_db close] 0 # Update the client. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } + process_msgs $envlist # Open the cross-upgrade database on the client and check its contents. set client_upg_db [berkdb_open \ @@ -117,16 +131,8 @@ proc rep011_sub { method tnum envargs largs } { -txn nosync -lock_max 2500 \ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}] error_check_good newclient_env [is_valid_env $newclientenv] TRUE - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $newclientenv 1] - incr nproced [replprocessqueue $newmasterenv 2] - - if { $nproced == 0 } { - break - } - } + set envlist "{$newclientenv 1} {$newmasterenv 2}" + process_msgs $envlist # Test put to the database handle we opened back when the new master # was a client. @@ -135,16 +141,7 @@ proc rep011_sub { method tnum envargs largs } { error_check_good client_upg_db_put \ [$client_upg_db put -txn $puttxn hello there] 0 error_check_good puttxn_commit [$puttxn commit] 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $newclientenv 1] - incr nproced [replprocessqueue $newmasterenv 2] - - if { $nproced == 0 } { - break - } - } + process_msgs $envlist # Close the new master's handle for the upgrade-test database; we # don't need it. Then check to make sure the client did in fact diff --git a/db/test/rep012.tcl b/db/test/rep012.tcl index ebfa253bb..640f2dbd9 100644 --- a/db/test/rep012.tcl +++ b/db/test/rep012.tcl @@ -1,25 +1,45 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep012.tcl,v 11.6 2003/11/18 14:21:17 sue Exp $ +# $Id: rep012.tcl,v 11.12 2004/09/22 18:01:06 bostic Exp $ # -# TEST rep012 +# TEST rep012 # TEST Replication and dead DB handles. # TEST # TEST Run a modified version of test001 in a replicated master env. -# TEST Make additional changes to master, but not to the client. -# TEST Downgrade the master and upgrade the client with open db handles. -# TEST Verify that the roll back on clients gives dead db handles. +# TEST Make additional changes to master, but not to the client. +# TEST Downgrade the master and upgrade the client with open db handles. +# TEST Verify that the roll back on clients gives dead db handles. proc rep012 { method { niter 10 } { tnum "012" } args } { - global testdir - - puts "Rep$tnum: Replication and dead ($method) db handles." - set orig_tdir $testdir - set largs $args + set args [convert_args $method $args] + set logsets [create_logsets 3] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r):\ + Replication and dead db handles." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client 0 logs are [lindex $l 1]" + puts "Rep$tnum: Client 1 logs are [lindex $l 2]" + rep012_sub $method $niter $tnum $l $r $args + } + } +} +proc rep012_sub { method niter tnum logset recargs largs } { + global testdir env_cleanup $testdir + set orig_tdir $testdir replsetup $testdir/MSGQUEUEDIR @@ -30,42 +50,60 @@ proc rep012 { method { niter 10 } { tnum "012" } args } { file mkdir $clientdir file mkdir $clientdir2 + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + set c2_logtype [lindex $logset 2] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set c2_logargs [adjust_logargs $c2_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + set c2_txnargs [adjust_txnargs $c2_logtype] + # Open a master. repladd 1 - set ma_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ + set ma_envcmd "berkdb_env_noerr -create $m_txnargs \ + $m_logargs -lock_max 2500 \ -errpfx ENV0 \ -home $masterdir -rep_transport \[list 1 replsend\]" -# set ma_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# set ma_envcmd "berkdb_env_noerr -create $m_txnargs \ +# $m_logargs -lock_max 2500 \ # -errpfx ENV0 -verbose {rep on} -errfile /dev/stderr \ # -home $masterdir -rep_transport \[list 1 replsend\]" - set env0 [eval $ma_envcmd -rep_master] + set env0 [eval $ma_envcmd $recargs -rep_master] set masterenv $env0 error_check_good master_env [is_valid_env $env0] TRUE # Open two clients repladd 2 - set cl_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ + set cl_envcmd "berkdb_env_noerr -create $c_txnargs \ + $c_logargs -lock_max 2500 \ -errpfx ENV1 \ -home $clientdir -rep_transport \[list 2 replsend\]" -# set cl_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# set cl_envcmd "berkdb_env_noerr -create $c_txnargs \ +# $c_logargs -lock_max 2500 \ # -errpfx ENV1 -verbose {rep on} -errfile /dev/stderr \ # -home $clientdir -rep_transport \[list 2 replsend\]" - set env1 [eval $cl_envcmd -rep_client] + set env1 [eval $cl_envcmd $recargs -rep_client] set clientenv $env1 error_check_good client_env [is_valid_env $env1] TRUE repladd 3 - set cl2_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ + set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs \ + $c2_logargs -lock_max 2500 \ -errpfx ENV2 \ -home $clientdir2 -rep_transport \[list 3 replsend\]" -# set cl2_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs \ +# $c2_logargs -lock_max 2500 \ # -errpfx ENV2 -verbose {rep on} -errfile /dev/stderr \ # -home $clientdir2 -rep_transport \[list 3 replsend\]" - set cl2env [eval $cl2_envcmd -rep_client] + set cl2env [eval $cl2_envcmd $recargs -rep_client] error_check_good client2_env [is_valid_env $cl2env] TRUE set testfile "test$tnum.db" - set largs [convert_args $method $args] set omethod [convert_method $method] set env0db [eval {berkdb_open_noerr -env $env0 -auto_commit \ -create -mode 0644} $largs $omethod $testfile] @@ -73,17 +111,8 @@ proc rep012 { method { niter 10 } { tnum "012" } args } { error_check_good dbopen [is_valid_db $env0db] TRUE # Bring the clients online by processing the startup messages. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $env0 1] - incr nproced [replprocessqueue $env1 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + set envlist "{$env0 1} {$env1 2} {$cl2env 3}" + process_msgs $envlist set env1db [eval {berkdb_open_noerr -env $env1 -auto_commit \ -mode 0644} $largs $omethod $testfile] @@ -96,32 +125,16 @@ proc rep012 { method { niter 10 } { tnum "012" } args } { # Run a modified test001 in the master (and update clients). puts "\tRep$tnum.a: Running test001 in replicated env." eval rep_test $method $masterenv $masterdb $niter 0 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $env0 1] - incr nproced [replprocessqueue $env1 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + process_msgs $envlist set nstart $niter puts "\tRep$tnum.b: Run test in master and client 2 only" - eval rep_test $method $masterenv $masterdb $niter $nstart 1 - while { 1 } { - set nproced 0 + eval rep_test $method $masterenv $masterdb $niter $nstart $nstart - incr nproced [replprocessqueue $env0 1] - # Ignore those for $env1 - incr nproced [replprocessqueue $cl2env 3] + # Ignore messages for $env1. + set envlist "{$env0 1} {$cl2env 3}" + process_msgs $envlist - if { $nproced == 0 } { - break - } - } # Nuke those for client about to become master. replclear 2 tclsleep 3 @@ -131,17 +144,9 @@ proc rep012 { method { niter 10 } { tnum "012" } args } { set clientenv $tmp error_check_good downgrade [$clientenv rep_start -client] 0 error_check_good upgrade [$masterenv rep_start -master] 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $env0 1] - incr nproced [replprocessqueue $env1 2] - incr nproced [replprocessqueue $cl2env 3] + set envlist "{$env0 1} {$env1 2} {$cl2env 3}" + process_msgs $envlist - if { $nproced == 0 } { - break - } - } # # At this point, env0 should have rolled back across a txn commit. # If we do any operation on env0db, we should get an error that diff --git a/db/test/rep013.tcl b/db/test/rep013.tcl index 6dde33a59..f479806ab 100644 --- a/db/test/rep013.tcl +++ b/db/test/rep013.tcl @@ -1,26 +1,46 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rep013.tcl,v 11.4 2003/10/16 14:26:41 sue Exp $ +# $Id: rep013.tcl,v 11.11 2004/09/22 18:01:06 bostic Exp $ # -# TEST rep013 +# TEST rep013 # TEST Replication and swapping master/clients with open dbs. # TEST # TEST Run a modified version of test001 in a replicated master env. -# TEST Make additional changes to master, but not to the client. -# TEST Swap master and client. -# TEST Verify that the roll back on clients gives dead db handles. -# TEST Swap and verify several times. +# TEST Make additional changes to master, but not to the client. +# TEST Swap master and client. +# TEST Verify that the roll back on clients gives dead db handles. +# TEST Swap and verify several times. proc rep013 { method { niter 10 } { tnum "013" } args } { - global testdir - - puts "Rep$tnum: Replication and ($method) master/client swapping." - set orig_tdir $testdir - set largs $args + set args [convert_args $method $args] + set logsets [create_logsets 3] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($r):\ + Replication and ($method) master/client swapping." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client 0 logs are [lindex $l 1]" + puts "Rep$tnum: Client 1 logs are [lindex $l 2]" + rep013_sub $method $niter $tnum $l $r $args + } + } +} +proc rep013_sub { method niter tnum logset recargs largs } { + global testdir env_cleanup $testdir + set orig_tdir $testdir replsetup $testdir/MSGQUEUEDIR @@ -31,45 +51,67 @@ proc rep013 { method { niter 10 } { tnum "013" } args } { file mkdir $clientdir file mkdir $clientdir2 + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + set c2_logtype [lindex $logset 2] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set c2_logargs [adjust_logargs $c2_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + set c2_txnargs [adjust_txnargs $c2_logtype] + + # Set number of swaps between master and client. set nswap 6 # Open a master. repladd 1 - set ma_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ - -errpfx ENV1 \ + set ma_envcmd "berkdb_env_noerr -create $m_txnargs \ + $m_logargs -lock_max 2500 -errpfx ENV1 \ + -cachesize {0 4194304 3} \ -home $masterdir -rep_transport \[list 1 replsend\]" -# set ma_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# set ma_envcmd "berkdb_env_noerr -create $m_txnargs \ +# $m_logargs -lock_max 2500 \ +# -cachesize {0 4194304 3} \ # -errpfx ENV1 -verbose {recovery on} -errfile /dev/stderr \ # -home $masterdir -rep_transport \[list 1 replsend\]" - set env1 [eval $ma_envcmd -rep_master] + set env1 [eval $ma_envcmd $recargs -rep_master] error_check_good master_env [is_valid_env $env1] TRUE # Open two clients repladd 2 - set cl_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ - -errpfx ENV2 \ + set cl_envcmd "berkdb_env_noerr -create $c_txnargs \ + $c_logargs -lock_max 2500 -errpfx ENV2 \ + -cachesize {0 2097152 2} \ -home $clientdir -rep_transport \[list 2 replsend\]" -# set cl_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# set cl_envcmd "berkdb_env_noerr -create $c_txnargs \ +# $c_logargs -lock_max 2500 \ +# -cachesize {0 2097152 2} \ # -errpfx ENV2 -verbose {recovery on} -errfile /dev/stderr \ # -home $clientdir -rep_transport \[list 2 replsend\]" - set env2 [eval $cl_envcmd -rep_client] + set env2 [eval $cl_envcmd $recargs -rep_client] error_check_good client_env [is_valid_env $env2] TRUE repladd 3 - set cl2_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ - -errpfx ENV3 \ + set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs \ + $c2_logargs -lock_max 2500 -errpfx ENV3 \ + -cachesize {0 1048576 1} \ -home $clientdir2 -rep_transport \[list 3 replsend\]" -# set cl2_envcmd "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ +# set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs \ +# $c2_logargs -lock_max 2500 \ +# -cachesize {0 1048576 1} \ # -errpfx ENV3 -verbose {recovery on} -errfile /dev/stderr \ # -home $clientdir2 -rep_transport \[list 3 replsend\]" - set cl2env [eval $cl2_envcmd -rep_client] + set cl2env [eval $cl2_envcmd $recargs -rep_client] error_check_good client2_env [is_valid_env $cl2env] TRUE set testfile "test$tnum.db" - set largs [convert_args $method $args] set omethod [convert_method $method] - + set env1db_cmd "berkdb_open_noerr -env $env1 -auto_commit \ -create -mode 0644 $largs $omethod $testfile" set env1db [eval $env1db_cmd] @@ -84,17 +126,8 @@ proc rep013 { method { niter 10 } { tnum "013" } args } { error_check_good cr_str [is_substr $ret "invalid"] 1 # Bring the clients online by processing the startup messages. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $env1 1] - incr nproced [replprocessqueue $env2 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + set envlist "{$env1 1} {$env2 2} {$cl2env 3}" + process_msgs $envlist set env2db_cmd "berkdb_open_noerr -env $env2 -auto_commit \ -mode 0644 $largs $omethod $testfile" @@ -116,24 +149,15 @@ proc rep013 { method { niter 10 } { tnum "013" } args } { set clientdb $env2db set cid 2 set mdb_cmd "berkdb_open_noerr -env $masterenv -auto_commit \ - -mode 0644 $args $omethod $testfile" + -mode 0644 $largs $omethod $testfile" set cdb_cmd "berkdb_open_noerr -env $clientenv -auto_commit \ - -mode 0644 $args $omethod $testfile" + -mode 0644 $largs $omethod $testfile" # Run a modified test001 in the master (and update clients). puts "\tRep$tnum.a: Running test001 in replicated env." eval rep_test $method $masterenv $masterdb $niter 0 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $env1 1] - incr nproced [replprocessqueue $env2 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + set envlist "{$env1 1} {$env2 2} {$cl2env 3}" + process_msgs $envlist set nstart 0 for { set i 0 } { $i < $nswap } { incr i } { @@ -170,17 +194,11 @@ proc rep013 { method { niter 10 } { tnum "013" } args } { set nstart [expr $nstart + $niter] puts "\tRep$tnum.c.$i: Run test in master and client2 only" - eval rep_test $method $masterenv $masterdb $niter $nstart 1 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv $mid] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + eval rep_test \ + $method $masterenv $masterdb $niter $nstart $nstart + set envlist "{$masterenv $mid} {$cl2env 3}" + process_msgs $envlist + # Nuke those for client about to become master. replclear $cid @@ -204,17 +222,8 @@ proc rep013 { method { niter 10 } { tnum "013" } args } { puts "\tRep$tnum.d.$i: Swap: master $mid, client $cid" error_check_good downgrade [$clientenv rep_start -client] 0 error_check_good upgrade [$masterenv rep_start -master] 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $env1 1] - incr nproced [replprocessqueue $env2 2] - incr nproced [replprocessqueue $cl2env 3] - - if { $nproced == 0 } { - break - } - } + set envlist "{$env1 1} {$env2 2} {$cl2env 3}" + process_msgs $envlist } puts "\tRep$tnum.e: Closing" error_check_good masterdb [$masterdb close] 0 diff --git a/db/test/rep014.tcl b/db/test/rep014.tcl new file mode 100644 index 000000000..1a94748ea --- /dev/null +++ b/db/test/rep014.tcl @@ -0,0 +1,155 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2001-2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep014.tcl,v 11.7 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep014 +# TEST Replication and multiple replication handles. +# TEST Test multiple client handles, opening and closing to +# TEST make sure we get the right openfiles. +# +proc rep014 { method { niter 10 } { tnum "014" } args } { + global is_hp_test + + # We can't open two envs on HP-UX, so just skip the + # whole test since that is at the core of it. + if { $is_hp_test == 1 } { + puts "Rep$tnum: Skipping for HP-UX." + return + } + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r): Replication and openfiles." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep014_sub $method $niter $tnum $l $r $args + } + } +} + +proc rep014_sub { method niter tnum logset recargs largs } { + global testdir + env_cleanup $testdir + set orig_tdir $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + file mkdir $masterdir + file mkdir $clientdir + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env -create $m_txnargs \ + $m_logargs -lock_max 2500 \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env -create $m_txnargs \ +# $m_logargs -lock_max 2500 \ +# -errpfx MASTER -verbose {rep on} \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set env0 [eval $ma_envcmd $recargs -rep_master] + set masterenv $env0 + error_check_good master_env [is_valid_env $env0] TRUE + + # Open a client. + repladd 2 + set cl_envcmd "berkdb_env -create $c_txnargs \ + $c_logargs -lock_max 2500 \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env -create $c_txnargs \ +# $c_logargs -lock_max 2500 \ +# -errpfx CLIENT1 -verbose {rep on} \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set env1 [eval $cl_envcmd $recargs] + error_check_good client_env [is_valid_env $env1] TRUE + set env2 [eval $cl_envcmd] + error_check_good client_env [is_valid_env $env2] TRUE + + error_check_good e1_cl [$env1 rep_start -client] 0 + + set testfile "test$tnum.db" + set omethod [convert_method $method] + set env0db [eval {berkdb_open_noerr -env $env0 -auto_commit \ + -create -mode 0644} $largs $omethod $testfile] + set masterdb $env0db + error_check_good dbopen [is_valid_db $env0db] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$env0 1} {$env1 2}" + process_msgs $envlist + + # Run a modified test001 in the master (and update clients). + puts "\tRep$tnum.a: Running test001 in replicated env." + eval rep_test $method $masterenv $masterdb $niter 0 0 + process_msgs $envlist + + puts "\tRep$tnum.b: Close and reopen client env." + error_check_good env1_close [$env1 close] 0 + set env1 [eval $cl_envcmd] + error_check_good client_env [is_valid_env $env1] TRUE + error_check_good e1_cl [$env1 rep_start -client] 0 + + puts "\tRep$tnum.c: Run test in master again." + set start $niter + eval rep_test $method $masterenv $masterdb $niter $start 0 + set envlist "{$env0 1} {$env1 2}" + process_msgs $envlist + + puts "\tRep$tnum.d: Start and close 2nd client env." + error_check_good e2_pfx [$env2 errpfx CLIENT2] 0 + error_check_good e2_cl [$env2 rep_start -client] 0 + error_check_good env2_close [$env2 close] 0 + + puts "\tRep$tnum.e: Run test in master again." + set start [expr $start + $niter] + error_check_good e1_pfx [$env1 errpfx CLIENT1] 0 + eval rep_test $method $masterenv $masterdb $niter $start 0 + process_msgs $envlist + + puts "\tRep$tnum.f: Open env2, close env1, use env2." + set env2 [eval $cl_envcmd] + error_check_good client_env [is_valid_env $env2] TRUE + error_check_good e1_pfx [$env2 errpfx CLIENT2] 0 + error_check_good e2_cl [$env2 rep_start -client] 0 + error_check_good e1_pfx [$env1 errpfx CLIENT1] 0 + error_check_good env1_close [$env1 close] 0 + + puts "\tRep$tnum.g: Run test in master again." + set start [expr $start + $niter] + error_check_good e1_pfx [$env2 errpfx CLIENT2] 0 + eval rep_test $method $masterenv $masterdb $niter $start 0 + set envlist "{$env0 1} {$env2 2}" + process_msgs $envlist + + puts "\tRep$tnum.h: Closing" + error_check_good env0db [$env0db close] 0 + error_check_good env0_close [$env0 close] 0 + error_check_good env2_close [$env2 close] 0 + replclose $testdir/MSGQUEUEDIR + set testdir $orig_tdir + return +} diff --git a/db/test/rep015.tcl b/db/test/rep015.tcl new file mode 100644 index 000000000..4cc4dfd0e --- /dev/null +++ b/db/test/rep015.tcl @@ -0,0 +1,300 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2003-2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep015.tcl,v 11.6 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep015 +# TEST Locking across multiple pages with replication. +# TEST +# TEST Open master and client with small pagesize and +# TEST generate more than one page and generate off-page +# TEST dups on the first page (second key) and last page +# TEST (next-to-last key). +# TEST Within a single transaction, for each database, open +# TEST 2 cursors and delete the first and last entries (this +# TEST exercises locks on regular pages). Intermittently +# TEST update client during the process. +# TEST Within a single transaction, for each database, open +# TEST 2 cursors. Walk to the off-page dups and delete one +# TEST from each end (this exercises locks on off-page dups). +# TEST Intermittently update client. +# +proc rep015 { method { nentries 100 } { tnum "015" } { ndb 3 } args } { + global rand_init + berkdb srand $rand_init + set args [convert_args $method $args] + set logsets [create_logsets 2] + + if { [is_btree $method] == 0 } { + puts "Skipping rep$tnum for method $method." + return + } + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r):\ + Replication and locking." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep015_sub $method $nentries $tnum $ndb $l $r $args + } + } +} + +proc rep015_sub { method nentries tnum ndb logset recargs largs } { + global testdir + env_cleanup $testdir + set omethod [convert_method $method] + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create $m_txnargs \ + $m_logargs -lock_max 2500 \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create $m_txnargs \ +# $m_logargs -lock_max 2500 \ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create $c_txnargs \ + $c_logargs -lock_max 2500 \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create $c_txnargs \ +# $c_logargs -lock_max 2500 \ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Set up the master databases. The small pagesize quickly + # generates multiple pages and off-page dups. + set pagesize 512 + puts "\tRep$tnum.a: Create and populate databases in master." + for { set i 0 } { $i < $ndb } { incr i } { + set db [eval "berkdb_open -create $omethod -auto_commit \ + -pagesize $pagesize -env $masterenv $largs -dup testdb$i.db"] + set dblist($i) $db + # + # Populate, being sure to create multiple pages. + # The non-duplicate entries are pairs of the form + # {1, data1} {2, data2}. The duplicates are pairs of + # the form {2, dup1} {2, dup2}, {2, dup3}, etc. + # + for { set j 1 } { $j <= $nentries } { incr j } { + set t [$masterenv txn] + error_check_good put_$db [eval $db put -txn $t \ + $j [chop_data $method data$j]] 0 + error_check_good txn_commit [$t commit] 0 + } + # Create off-page dups on key 2 and next-to-last key. + set t [$masterenv txn] + for { set j 1 } { $j <= $nentries } { incr j } { + error_check_good put_second [eval $db put -txn $t \ + 2 [chop_data $method dup$j]] 0 + error_check_good put_next_to_last [eval $db put \ + -txn $t \ + [expr $nentries - 1] [chop_data $method dup$j]] 0 + } + error_check_good txn_commit [$t commit] 0 + # Make sure there are off-page dups. + set stat [$db stat] + error_check_bad stat:offpage \ + [is_substr $stat "{{Internal pages} 0}"] 1 + } + + puts "\tRep$tnum.b: Propagate setup to clients." + process_msgs $envlist + + # Open client databases so we can exercise locking there too. + for { set i 0 } { $i < $ndb } { incr i } { + set cdb [eval {berkdb_open} -auto_commit \ + -env $clientenv $largs testdb$i.db] + set cdblist($i) $cdb + } + + # Set up two cursors into each db. Randomly select a cursor + # and do the next thing: position, delete, or close. + foreach option { regular off-page } { + puts "\tRep$tnum.c: Transactional cursor deletes ($option)." + + set t [$masterenv txn] + # Set up two cursors into each db, and initialize the next + # action to be done to POSITION. + for { set i 0 } { $i < [expr $ndb * 2] } { incr i } { + set db $dblist([expr $i / 2]) + set mcurs($i) [eval {$db cursor} -txn $t] + error_check_good mcurs$i \ + [is_valid_cursor $mcurs($i) $db] TRUE + set cnext($i) POSITION + } + + set ct [$clientenv txn] + # Set up two cursors into each client db. + for { set i 0 } { $i < [expr $ndb * 2] } { incr i } { + set cdb $cdblist([expr $i / 2]) + set ccurs($i) [eval {$cdb cursor} -txn $ct] + error_check_good ccurs$i \ + [is_valid_cursor $ccurs($i) $cdb] TRUE + } + + # Randomly pick a cursor to operate on and do the next thing. + # At POSITION, we position that cursor. At DELETE, we delete + # the current item. At CLOSE, we close the cursor. At DONE, + # we do nothing except check to see if all cursors have reached + # DONE, and quit when they have. + # On the off-page dup test, walk to reach an off-page entry, + # and delete that one. + set k 0 + while { 1 } { + # Every nth time through, update the client. +# set n 5 +# if {[expr $k % $n] == 0 } { +# puts "Updating clients" +# process_msgs $envlist +# } +# incr k + set i [berkdb random_int 0 [expr [expr $ndb * 2] - 1]] + set next $cnext($i) + switch -exact -- $next { + POSITION { + do_position $mcurs($i) \ + $i $nentries $option + set cnext($i) DELETE + # Position the client cursors too. + do_position $ccurs($i) \ + $i $nentries $option + } + DELETE { + error_check_good c_del \ + [$mcurs($i) del] 0 + set cnext($i) CLOSE + # Update clients after a delete. + process_msgs $envlist + } + CLOSE { + error_check_good c_close.$i \ + [$mcurs($i) close] 0 + set cnext($i) DONE + # Close the client cursor too. + error_check_good cc_close.$i \ + [$ccurs($i) close] 0 + } + DONE { + set breakflag 1 + for { set j 0 } \ + { $j < [expr $ndb * 2] } \ + { incr j } { + if { $cnext($j) != "DONE" } { + set breakflag 0 + } + } + if { $breakflag == 1 } { + break + } + } + default { + puts "FAIL: Unrecognized \ + next action $next" + } + } + } + error_check_good txn_commit [$t commit] 0 + error_check_good clienttxn_commit [$ct commit] 0 + process_msgs $envlist + } + + # Clean up. + for { set i 0 } { $i < $ndb } { incr i } { + set db $dblist($i) + error_check_good close_$db [$db close] 0 + set cdb $cdblist($i) + error_check_good close_$cdb [$cdb close] 0 + } + + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR + return +} + +proc do_position { cursor i nentries option } { + if { [expr $i % 2] == 0 } { + if { $option == "regular" } { + set ret [$cursor get -first] + set key [lindex [lindex $ret 0] 0] + set data [lindex [lindex $ret 0] 1] + error_check_good get_first \ + [string range $data 4 end] $key + } elseif { $option == "off-page" } { + set ret [$cursor get -set 2] + error_check_good get_key_2 \ + [lindex [lindex $ret 0] 0] 2 + error_check_good get_data_2 \ + [lindex [lindex $ret 0] 1] data2 + for { set j 1 } { $j <= 95 } { incr j } { + set ret [$cursor get -nextdup] + error_check_good key_nextdup$j \ + [lindex [lindex $ret 0] 0] 2 + error_check_good data_nextdup$j \ + [lindex [lindex $ret 0] 1] dup$j + } + } + } else { + if { $option == "regular" } { + set ret [$cursor get -set $nentries] + set key [lindex [lindex $ret 0] 0] + set data [lindex [lindex $ret 0] 1] + error_check_good get_set_$nentries \ + [string range $data 4 end] $key + } elseif { $option == "off-page" } { + set ret [$cursor get -last] + set key [lindex [lindex $ret 0] 0] + set data [lindex [lindex $ret 0] 1] + error_check_good get_last \ + [string range $data 3 end] [expr $key + 1] + for { set j 1 } { $j <= 5 } { incr j } { + set ret [$cursor get -prev] + set key [lindex [lindex $ret 0] 0] + set data [lindex [lindex $ret 0] 1] + error_check_good get_prev \ + [string range $data 3 end] \ + [expr [expr $key + 1] - $j] + } + } + } +} diff --git a/db/test/rep016.tcl b/db/test/rep016.tcl new file mode 100644 index 000000000..f41a29199 --- /dev/null +++ b/db/test/rep016.tcl @@ -0,0 +1,264 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2002-2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep016.tcl,v 11.13 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep016 +# TEST Replication election test with varying required nvotes. +# TEST +# TEST Run a modified version of test001 in a replicated master environment; +# TEST hold an election among a group of clients to make sure they select +# TEST the master with varying required participants. + +proc rep016 { method args } { + global errorInfo + set tnum "016" + + if { [is_btree $method] == 0 } { + puts "Rep$tnum: Skipping for method $method." + return + } + + set nclients 5 + set logsets [create_logsets [expr $nclients + 1]] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r): \ + Replication elections with varying nvotes." + puts "Rep$tnum: Master logs are [lindex $l 0]" + for { set i 0 } { $i < $nclients } { incr i } { + puts "Rep$tnum: Client $i logs are\ + [lindex $l [expr $i + 1]]" + } + rep016_sub $method $nclients $tnum $l $r $args + } + } +} + +proc rep016_sub { method nclients tnum logset recargs args } { + source ./include.tcl + set niter 5 + + env_cleanup $testdir + + set qdir $testdir/MSGQUEUEDIR + replsetup $qdir + + set masterdir $testdir/MASTERDIR + file mkdir $masterdir + + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + + for { set i 0 } { $i < $nclients } { incr i } { + set clientdir($i) $testdir/CLIENTDIR.$i + file mkdir $clientdir($i) + set c_logtype($i) [lindex $logset [expr $i + 1]] + set c_logargs($i) [adjust_logargs $c_logtype($i)] + set c_txnargs($i) [adjust_txnargs $c_logtype($i)] + } + +# To debug elections, the lines to uncomment are below the +# error checking portion of this test. This is needed in order +# for the error messages to come back in errorInfo and for +# that portion of the test to pass. + # Open a master. + set envlist {} + repladd 1 + set env_cmd(M) "berkdb_env -create -log_max 1000000 -home $masterdir \ + $m_txnargs $m_logargs -rep_master \ + -errpfx MASTER -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M) $recargs] + error_check_good master_env [is_valid_env $masterenv] TRUE + lappend envlist "$masterenv 1" + + # Open the clients. + for { set i 0 } { $i < $nclients } { incr i } { + set envid [expr $i + 2] + repladd $envid + set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \ + $c_txnargs($i) $c_logargs($i) -rep_client \ + -rep_transport \[list $envid replsend\]" + set clientenv($i) [eval $env_cmd($i) $recargs] + error_check_good \ + client_env($i) [is_valid_env $clientenv($i)] TRUE + lappend envlist "$clientenv($i) $envid" + } + # Bring the clients online by processing the startup messages. + process_msgs $envlist + + # Run a modified test001 in the master. + puts "\tRep$tnum.a: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 + process_msgs $envlist + error_check_good masterenv_close [$masterenv close] 0 + set envlist [lreplace $envlist 0 0] + + puts "\tRep$tnum.b: Error values for rep_elect" + # + # Do all the error catching in client0. We do not need to call + # start_election here to fork a process because we never get + # far enough to send/receive any messages. We just want to + # check the error message. + # + # !!! + # We cannot set -errpfx or -errfile or anything in the + # env_cmd above. Otherwise the correct output won't be set + # in 'ret' below and the test will fail. + # + # First check negative nvotes. + # + set nsites [expr $nclients + 1] + set priority 2 + set timeout 5000000 + set nvotes -1 + set res [catch {$clientenv(0) rep_elect $nsites $nvotes $priority \ + $timeout} ret] + error_check_bad catch $res 0 + error_check_good ret [is_substr $ret "may not be negative"] 1 + + # + # Check zero/negative nsites + # + set nsites 0 + set nvotes 2 + set res [catch {$clientenv(0) rep_elect $nsites $nvotes $priority \ + $timeout} ret] + error_check_bad catch $res 0 + error_check_good ret [is_substr $ret "must be greater than 0"] 1 + + set nsites -1 + set res [catch {$clientenv(0) rep_elect $nsites $nvotes $priority \ + $timeout} ret] + error_check_bad catch $res 0 + error_check_good ret [is_substr $ret "must be greater than 0"] 1 + + # + # Check nvotes > nsites. + # + set nsites $nclients + set nvotes [expr $nsites + 1] + set res [catch {$clientenv(0) rep_elect $nsites $nvotes $priority \ + $timeout} ret] + error_check_bad catch $res 0 + error_check_good ret [is_substr $ret "is larger than nsites"] 1 + + # + # Check negative priority. + # + set nvotes $nsites + set priority -1 + set res [catch {$clientenv(0) rep_elect $nsites $nvotes $priority \ + $timeout} ret] + error_check_bad catch $res 0 + error_check_good ret [is_substr $ret "may not be negative"] 1 + +# To debug elections, uncomment the lines below to turn on verbose +# and set the errfile. Also edit reputils.tcl +# in proc start_election and swap the 2 commented lines with +# their counterpart. + for { set i 0 } { $i < $nclients } { incr i } { + replclear [expr $i + 2] + # + # This test doesn't use the testing hooks, so + # initialize err_cmd and crash appropriately. + # + set err_cmd($i) "none" + set crash($i) 0 + # + # Initialize the array pri. We'll set it to + # appropriate values when the winner is determined. + # + set pri($i) 0 + # +# error_check_good pfx [$clientenv($i) errpfx CLIENT$i] 0 +# error_check_good verb [$clientenv($i) verbose rep on] 0 +# $clientenv($i) errfile /dev/stderr +# set env_cmd($i) [concat $env_cmd($i) \ +# "-errpfx CLIENT$i -verbose {rep on} -errfile /dev/stderr"] + } + set m "Rep$tnum.c" + puts "\t$m: Check single master/client can elect itself" + # + # 2 sites: 1 master, 1 client. Allow lone client to elect itself. + # Adjust client env list to reflect the single client. + # + set oldenvlist $envlist + set envlist [lreplace $envlist 1 end] + set nsites 2 + set nvotes 1 + set orig_ncl $nclients + set nclients 1 + set elector 0 + set winner 0 + setpriority pri $nclients $winner + run_election env_cmd envlist err_cmd pri crash\ + $qdir $m $elector $nsites $nvotes $nclients $winner 1 + + # + # Now run with all clients. Client0 should always get elected + # because it became master and should have a bigger LSN. + # + set nclients $orig_ncl + set envlist [lreplace $oldenvlist 0 0 [lindex $envlist 0]] + + set m "Rep$tnum.d" + puts "\t$m: Elect with 100% client participation" + set nsites $nclients + set nvotes $nclients + set winner [rep016_selectwinner $nsites $nvotes $nclients] + setpriority pri $nclients $winner + run_election env_cmd envlist err_cmd pri crash\ + $qdir $m $elector $nsites $nvotes $nclients $winner 1 + + # + # Elect with varying levels of participation. Start with nsites + # as nclients+1 (simulating a down master) and require nclients, + # and fewer (by 1) until we get down to 2 clients. + # + set m "Rep$tnum.e" + puts "\t$m: Elect with varying participation" + set nsites [expr $nclients + 1] + set count 0 + for {set n $nclients} {$n > 1} {incr n -1} { + set m "Rep$tnum.e.$count" + set winner [rep016_selectwinner $nsites $n $n] + setpriority pri $nclients $winner + run_election env_cmd envlist err_cmd pri crash\ + $qdir $m $elector $nsites $n $n $winner 1 + incr count + } + + foreach pair $envlist { + set cenv [lindex $pair 0] + error_check_good cenv_close [$cenv close] 0 + } + replclose $testdir/MSGQUEUEDIR +} + +proc rep016_selectwinner { nsites nvotes nclients } { + # + # Special case: When we test with 100% participation, we expect + # client 0 to always win because it has a bigger LSN than the + # rest due to earlier part of the test. This special case is + # kinda gross. + # + if { $nsites != $nvotes } { + set win [berkdb random_int 0 [expr $nclients - 1]] + } else { + set win 0 + } + return $win +} diff --git a/db/test/rep017.tcl b/db/test/rep017.tcl new file mode 100644 index 000000000..7e24c698d --- /dev/null +++ b/db/test/rep017.tcl @@ -0,0 +1,240 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2003 +# Sleepycat Software. All rights reserved. +# +# $Id: rep017.tcl,v 11.6 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep017 +# TEST Concurrency with checkpoints. +# TEST +# TEST Verify that we achieve concurrency in the presence of checkpoints. +# TEST Here are the checks that we wish to make: +# TEST While dbenv1 is handling the checkpoint record: +# TEST Subsequent in-order log records are accepted. +# TEST Accepted PERM log records get NOTPERM +# TEST A subsequent checkpoint gets NOTPERM +# TEST After checkpoint completes, next txn returns PERM +proc rep017 { method { niter 10 } { tnum "017" } args } { + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + + puts "Rep$tnum ($method $r):\ + Concurrency with checkpoints." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep017_sub $method $niter $tnum $l $r $args + } + } +} + +proc rep017_sub { method niter tnum logset recargs largs } { + source ./include.tcl + global perm_response_list + + env_cleanup $testdir + set omethod [convert_method $method] + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open a master. + repladd 1 + set ma_cmd "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $m_txnargs $m_logargs \ + -home $masterdir -rep_master \ + -rep_transport \[list 1 replsend\]" +# set ma_cmd "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $m_txnargs $m_logargs \ +# -verbose {rep on} -errfile /dev/stderr \ +# -home $masterdir -rep_master -rep_transport \ +# \[list 1 replsend\]" + set masterenv [eval $ma_cmd $recargs] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_cmd "berkdb_env_noerr -create -home $clientdir \ + $c_txnargs $c_logargs -rep_client \ + -rep_transport \[list 2 replsend\]" +# set cl_cmd "berkdb_env_noerr -create -home $clientdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# $c_txnargs $c_logargs -rep_client \ +# -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_cmd $recargs] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the client online. + process_msgs "{$masterenv 1} {$clientenv 2}" + + # Open database in master, make lots of changes so checkpoint + # will take a while, and propagate to client. + puts "\tRep$tnum.a: Create and populate database." + set dbname rep017.db + set db [eval "berkdb_open -create $omethod -auto_commit \ + -env $masterenv $largs $dbname"] + for { set i 1 } { $i <= $niter } { incr i } { + set t [$masterenv txn] + error_check_good db_put \ + [eval $db put -txn $t $i [chop_data $method data$i]] 0 + error_check_good txn_commit [$t commit] 0 + } + process_msgs "{$masterenv 1} {$clientenv 2}" 1 + + # Get the master's last LSN before the checkpoint + set pre_ckp_offset \ + [stat_field $masterenv log_stat "Current log file offset"] + + puts "\tRep$tnum.b: Checkpoint on master." + error_check_good checkpoint [$masterenv txn_checkpoint] 0 + + # Now get ckp LSN + set ckp_lsn [stat_field $masterenv txn_stat "LSN of last checkpoint"] + set ckp_offset [lindex $ckp_lsn 1] + + # Fork child process on client. It should process whatever + # it finds in the message queue -- just the checkpoint record, + # for now. It's run in the background so the parent can + # test for whether we're checkpointing at the same time. + # + puts "\tRep$tnum.c: Fork child process on client." + set pid [exec $tclsh_path $test_path/wrap.tcl \ + rep017script.tcl $testdir/repscript.log \ + $masterdir $clientdir &] + + + # We need to wait until we know that the client is processing a + # checkpoint. The checkpoint will consist of some DBREG records + # followed by the actual checkpoint. So, if we've gotten records + # later than the last LSN when the master took the checkpoint, we've + # begin the checkpoint. By test design, we should not finish the + # checkpoint until this process has at least had a chance to run. + puts "\tRep$tnum.d: Test whether client is in checkpoint." + while { 1 } { + set client_off \ + [stat_field $clientenv log_stat "Current log file offset"] + + if { $client_off > $pre_ckp_offset } { + if { $client_off > $ckp_offset } { + # We already completed the checkpoint and + # never got out of here. That's a bug in + # in the test. + error_check_good checkpoint_test \ + not_in_checkpoint should_be_in_checkpoint + } else { + break; + } + } else { + # Not yet up to checkpoint + tclsleep 1 + } + } + + # Main client processes checkpoint 2nd time and should get NOTPERM. + puts "\tRep$tnum.e: Commit and checkpoint return NOTPERM from client" + incr niter + set t [$masterenv txn] + error_check_good db_put [eval $db put \ + -txn $t $niter [chop_data $method data$niter]] 0 + error_check_good txn_commit [$t commit] 0 + error_check_good checkpoint [$masterenv txn_checkpoint] 0 + set ckp2_lsn [stat_field $masterenv txn_stat "LSN of last checkpoint"] + + process_msgs "{$clientenv 2}" 1 + + # Check that the checkpoint record got a NOTPERM + # Find the ckp LSN of the Master and then look for the response + # from that message in the client + set ckp_result "" + foreach i $perm_response_list { + # Everything in the list should be NOTPERM + if { [llength $i] == 0 } { + # Check for sentinel at beginning of list + continue; + } + set ckp_result [lindex $i 0] + error_check_good NOTPERM [is_substr $ckp_result NOTPERM] 1 + if { [lindex $i 1] == $ckp2_lsn } { + break + } + } + error_check_bad perm_response $ckp_result "" + + puts "\tRep$tnum.f: Waiting for child ..." + # Watch until the checkpoint is done. + watch_procs $pid 5 + + # Verify that the checkpoint is now complete on the client and + # that all later messages have been applied. + process_msgs "{$clientenv 2}" 1 + set client_ckp [stat_field $clientenv txn_stat "LSN of last checkpoint"] + error_check_good matching_ckps $client_ckp $ckp2_lsn + + set m_end [stat_field $masterenv log_stat "Current log file offset"] + set c_end [stat_field $clientenv log_stat "Current log file offset"] + error_check_good matching_lsn $c_end $m_end + + # Finally, now that checkpoints are complete; perform another + # perm operation and make sure that it returns ISPERM. + puts "\tRep$tnum.g: No pending ckp; check for ISPERM" + incr niter + set t [$masterenv txn] + error_check_good db_put [eval $db put \ + -txn $t $niter [chop_data $method data$niter]] 0 + error_check_good txn_commit [$t commit] 0 + error_check_good checkpoint [$masterenv txn_checkpoint] 0 + set ckp3_lsn [stat_field $masterenv txn_stat "LSN of last checkpoint"] + + process_msgs "{$clientenv 2}" 1 + + # Check that the checkpoint and commit records got a ISPERM + # Find the ckp LSN of the Master and then look for the response + # from that message in the client + set ckp_result "" + foreach i $perm_response_list { + if { [llength $i] == 0 } { + # Check for sentinel at beginning of list + continue; + } + + # Everything in the list should be ISPERM + set ckp_result [lindex $i 0] + error_check_good ISPERM [is_substr $ckp_result ISPERM] 1 + if { [lindex $i 1] == $ckp3_lsn } { + break + } + } + error_check_bad perm_response $ckp_result "" + + # Clean up. + error_check_good db_close [$db close] 0 + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep017script.tcl b/db/test/rep017script.tcl new file mode 100644 index 000000000..7aa973d07 --- /dev/null +++ b/db/test/rep017script.tcl @@ -0,0 +1,85 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2003 +# Sleepycat Software. All rights reserved. +# +# $Id: rep017script.tcl,v 11.4 2004/09/22 18:01:06 bostic Exp $ +# +# Rep017 script - concurrency with checkpoints. +# +# Repscript exists to process checkpoints, though the +# way it is currently written, it will process whatever +# it finds in the message queue. It requires a one-master +# one-client setup. +# +# Usage: repscript masterdir clientdir +# masterdir: master env directory +# clientdir: client env directory +# +source ./include.tcl +source $test_path/test.tcl +source $test_path/testutils.tcl +source $test_path/reputils.tcl + +set usage "repscript masterdir clientdir" + +# Verify usage +if { $argc != 2 } { + puts stderr "FAIL:[timestamp] Usage: $usage" + exit +} + +# Initialize arguments +set masterdir [ lindex $argv 0 ] +set clientdir [ lindex $argv 1 ] + +# Join the queue env. We assume the rep test convention of +# placing the messages in $testdir/MSGQUEUEDIR. +set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR] +error_check_good script_qenv_open [is_valid_env $queueenv] TRUE + +# +# We need to set up our own machids. +# Add 1 for master env id, and 2 for the clientenv id. +# +repladd 1 +repladd 2 + +# Join the master env. +set ma_cmd "berkdb_env_noerr -home $masterdir \ + -txn -rep_master -rep_transport \[list 1 replsend\]" +# set ma_cmd "berkdb_env_noerr -home $masterdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# -txn -rep_master -rep_transport \[list 1 replsend\]" +set masterenv [eval $ma_cmd] +error_check_good script_menv_open [is_valid_env $masterenv] TRUE + +puts "Master open" + +# Join the client env. +set cl_cmd "berkdb_env_noerr -home $clientdir \ + -txn -rep_client -rep_transport \[list 2 replsend\]" +# set cl_cmd "berkdb_env_noerr -home $clientdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# -txn -rep_client -rep_transport \[list 2 replsend\]" +set clientenv [eval $cl_cmd] +error_check_good script_cenv_open [is_valid_env $clientenv] TRUE + +puts "Everyone open" +tclsleep 10 + +# Make it so that the client sleeps in the middle of checkpoints +$clientenv test check 10 + +puts "Client set" + +# Update the client, in order to process the checkpoint +process_msgs "{$masterenv 1} {$clientenv 2}" + + +puts "Processed messages" + +# Close the envs +error_check_good script_master_close [$masterenv close] 0 +error_check_good script_client_close [$clientenv close] 0 +puts "\tRepscript completed successfully" diff --git a/db/test/rep018.tcl b/db/test/rep018.tcl new file mode 100644 index 000000000..4a1370030 --- /dev/null +++ b/db/test/rep018.tcl @@ -0,0 +1,159 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2003-2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep018.tcl,v 1.8 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep018 +# TEST Replication with dbremove. +# TEST +# TEST Verify that the attempt to remove a database file +# TEST on the master hangs while another process holds a +# TEST handle on the client. +# TEST +proc rep018 { method { niter 10 } { tnum "018" } args } { + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r): Replication with dbremove." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep018_sub $method $niter $tnum $l $r $args + } + } +} + +proc rep018_sub { method niter tnum logset recargs largs } { + source ./include.tcl + env_cleanup $testdir + set omethod [convert_method $method] + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + puts "\tRep$tnum.a: Create master and client, bring online." + # Open a master. + repladd 1 + set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 -home $masterdir \ + $m_txnargs $m_logargs -rep_master \ + -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M) $recargs] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set env_cmd(C) "berkdb_env_noerr -create -home $clientdir \ + $c_txnargs $c_logargs -rep_client \ + -rep_transport \[list 2 replsend\]" + set clientenv [eval $env_cmd(C) $recargs] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the client online. + process_msgs "{$masterenv 1} {$clientenv 2}" + + puts "\tRep$tnum.b: Open database on master, propagate to client." + set dbname rep$tnum.db + set db [eval "berkdb_open -create $omethod -auto_commit \ + -env $masterenv $largs $dbname"] + set t [$masterenv txn] + for { set i 1 } { $i <= $niter } { incr i } { + error_check_good db_put \ + [eval $db put -txn $t $i [chop_data $method data$i]] 0 + } + error_check_good txn_commit [$t commit] 0 + process_msgs "{$masterenv 1} {$clientenv 2}" + + puts "\tRep$tnum.c: Spawn a child tclsh to do client work." + set pid [exec $tclsh_path $test_path/wrap.tcl \ + rep018script.tcl $testdir/rep018script.log \ + $clientdir $niter $dbname $method &] + + puts "\tRep$tnum.d: Close and remove database on master." + error_check_good close_master_db [$db close] 0 + + # Remove database in master env. First make sure the child + # tclsh is done reading the data. + while { 1 } { + if { [file exists $testdir/marker.db] == 0 } { + tclsleep 1 + } else { + set markerenv [berkdb_env -home $testdir -txn] + error_check_good markerenv_open \ + [is_valid_env $markerenv] TRUE + set marker [berkdb_open -unknown -env $markerenv \ + -auto_commit marker.db] + while { [llength [$marker get CHILDREADY]] == 0 } { + tclsleep 1 + } + break + } + } + error_check_good db_remove [$masterenv dbremove -auto_commit $dbname] 0 + + puts "\tRep$tnum.e: Create new database on master with the same name." + set db [eval "berkdb_open -create $omethod -auto_commit \ + -env $masterenv $largs $dbname"] + error_check_good new_db_open [is_valid_db $db] TRUE + + puts "\tRep$tnum.f: Propagate changes to client. Process should hang." + error_check_good timestamp_remove \ + [$marker put -auto_commit PARENTREMOVE [timestamp -r]] 0 + process_msgs "{$masterenv 1} {$clientenv 2}" + error_check_good timestamp_done \ + [$marker put -auto_commit PARENTDONE [timestamp -r]] 0 + + watch_procs $pid 5 + + puts "\tRep$tnum.g: Check for failure." + # Check marker file for correct timestamp ordering. + set ret [$marker get CHILDDONE] + set childdone [lindex [lindex [lindex $ret 0] 1] 0] + set ret [$marker get PARENTDONE] + set parentdone [lindex [lindex [lindex $ret 0] 1] 0] + if { [expr $childdone - $parentdone] > 0 } { + puts "\tFAIL: parent must complete after child" + } + + # Clean up. + error_check_good marker_db_close [$marker close] 0 + error_check_good market_env_close [$markerenv close] 0 + error_check_good masterdb_close [$db close] 0 + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + + replclose $testdir/MSGQUEUEDIR + + # Check log file for failures. + set errstrings [eval findfail $testdir/rep018script.log] + foreach str $errstrings { + puts "FAIL: error message in rep018 log file: $str" + } +} + + diff --git a/db/test/rep018script.tcl b/db/test/rep018script.tcl new file mode 100644 index 000000000..9d6cd3a45 --- /dev/null +++ b/db/test/rep018script.tcl @@ -0,0 +1,94 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2003-2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep018script.tcl,v 1.5 2004/09/22 18:01:06 bostic Exp $ +# +# Rep018 script - concurrency with checkpoints. +# +# Test dbremove with replication. +# +# Usage: rep018script clientdir dbfile +# clientdir: client env directory +# niter: number of items in file +# dbfile: name of database file +# +source ./include.tcl +source $test_path/test.tcl +source $test_path/testutils.tcl +source $test_path/reputils.tcl + +set usage "repscript clientdir niter dbfile method" + +# Verify usage +if { $argc != 4 } { + puts stderr "FAIL:[timestamp] Usage: $usage" + exit +} + +# Initialize arguments +set clientdir [ lindex $argv 0 ] +set niter [ lindex $argv 1 ] +set dbfile [ lindex $argv 2 ] +set method [ lindex $argv 3 ] + +# Join the queue env. We assume the rep test convention of +# placing the messages in $testdir/MSGQUEUEDIR. +set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR] +error_check_good script_qenv_open [is_valid_env $queueenv] TRUE + +# +# We need to set up our own machids. +# Add 1 for master env id, and 2 for the clientenv id. +# +repladd 1 +repladd 2 + +# Join the client env. +set cl_cmd "berkdb_env_noerr -home $clientdir \ + -txn -rep_client -rep_transport \[list 2 replsend\]" +# set cl_cmd "berkdb_env_noerr -home $clientdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# -txn -rep_client -rep_transport \[list 2 replsend\]" +set clientenv [eval $cl_cmd] +error_check_good script_cenv_open [is_valid_env $clientenv] TRUE + +# Make sure we can read data on client. +set db [eval "berkdb_open -env $clientenv $dbfile"] +for { set i 1 } { $i <= $niter } { incr i } { + set ret [lindex [$db get $i] 0] + error_check_good db_get $ret [list $i [pad_data $method data$i]] +} + +# Put a timestamp in a shared file. +set markerenv [berkdb_env -create -home $testdir -txn] +error_check_good markerenv_open [is_valid_env $markerenv] TRUE +set marker \ + [eval "berkdb_open -create -btree -auto_commit -env $markerenv marker.db"] +error_check_good timestamp_ready \ + [$marker put -auto_commit CHILDREADY [timestamp -r]] 0 + +# Give the parent a chance to process messages and hang. +tclsleep 30 + +# Clean up the child so the parent can go forward. +error_check_good timestamp_done \ + [$marker put -auto_commit CHILDDONE [timestamp -r]] 0 +error_check_good client_db_close [$db close] 0 + +# Check that the master is done. +while { [llength [$marker get PARENTDONE]] == 0 } { + tclsleep 1 +} + +# Verify that the newly recreated database is now empty. +set db [eval "berkdb_open -env $clientenv $dbfile"] +set cursor [$db cursor] +error_check_good db_empty [llength [$cursor get -first]] 0 +error_check_good cursor_close [$cursor close] 0 +error_check_good db_close [$db close] 0 +error_check_good marker_db_close [$marker close] 0 +error_check_good markerenv_close [$markerenv close] 0 +error_check_good script_client_close [$clientenv close] 0 + diff --git a/db/test/rep019.tcl b/db/test/rep019.tcl new file mode 100644 index 000000000..a569f1238 --- /dev/null +++ b/db/test/rep019.tcl @@ -0,0 +1,155 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2001-2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep019.tcl,v 11.7 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep019 +# TEST Replication and multiple clients at same LSN. +# TEST Have several clients at the same LSN. Run recovery at +# TEST different times. Declare a client master and after sync-up +# TEST verify all client logs are identical. +# +proc rep019 { method { nclients 3 } { tnum "019" } args } { + global mixed_mode_logging + + # This test needs to use recovery, so mixed-mode testing + # isn't appropriate. + if { $mixed_mode_logging == 1 } { + puts "Rep$tnum: Skipping for mixed-mode logging." + return + } + set args [convert_args $method $args] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + puts "Rep$tnum ($method $r):\ + Replication and $nclients recovered clients in sync." + rep019_sub $method $nclients $tnum $r $args + } +} + +proc rep019_sub { method nclients tnum recargs largs } { + global testdir + global util_path + + set orig_tdir $testdir + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set niter 100 + set masterdir $testdir/MASTERDIR + file mkdir $masterdir + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env -create -txn nosync -lock_max 2500 \ + -home $masterdir -rep_master -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env -create $m_txnargs -lock_max 2500 \ +# -errpfx MASTER -verbose {rep on} \ +# -home $masterdir -rep_master -rep_transport \[list 1 replsend\]" + set menv [eval $ma_envcmd $recargs] + error_check_good master_env [is_valid_env $menv] TRUE + + for {set i 0} {$i < $nclients} {incr i} { + set clientdir($i) $testdir/CLIENTDIR.$i + file mkdir $clientdir($i) + set id($i) [expr 2 + $i] + repladd $id($i) + set cl_envcmd($i) "berkdb_env -create -txn nosync \ + -lock_max 2500 -home $clientdir($i) \ + -rep_client -rep_transport \[list $id($i) replsend\]" +# set cl_envcmd($i) "berkdb_env -create -txn nosync \ +# -lock_max 2500 -home $clientdir($i) \ +# -errpfx CLIENT$i -verbose {rep on} \ +# -rep_client -rep_transport \[list $id($i) replsend\]" + set clenv($i) [eval $cl_envcmd($i) $recargs] + error_check_good client_env [is_valid_env $clenv($i)] TRUE + } + set testfile "test$tnum.db" + set omethod [convert_method $method] + set masterdb [eval {berkdb_open_noerr -env $menv -auto_commit \ + -create -mode 0644} $largs $omethod $testfile] + error_check_good dbopen [is_valid_db $masterdb] TRUE + + # Bring the clients online by processing the startup messages. + set envlist {} + lappend envlist "$menv 1" + for { set i 0 } { $i < $nclients } { incr i } { + lappend envlist "$clenv($i) $id($i)" + } + process_msgs $envlist + + # Run a modified test001 in the master (and update clients). + puts "\tRep$tnum.a: Running test001 in replicated env." + eval rep_test $method $menv $masterdb $niter 0 0 + process_msgs $envlist + + error_check_good mdb_cl [$masterdb close] 0 + # Process any close messages. + process_msgs $envlist + + error_check_good menv_cl [$menv close] 0 + puts "\tRep$tnum.b: Close all envs and run recovery in clients." + for {set i 0} {$i < $nclients} {incr i} { + error_check_good cl$i.close [$clenv($i) close] 0 + set hargs($i) "-h $clientdir($i)" + } + foreach sleep {2 1 0} { + for {set i 0} {$i < $nclients} {incr i} { + set stat [catch {eval exec $util_path/db_recover \ + $hargs($i)} result] + error_check_good stat $stat 0 + # + # Need to sleep to make sure recovery's checkpoint + # records have different timestamps. + tclsleep $sleep + } + } + + puts "\tRep$tnum.c: Reopen clients and declare one master." + for {set i 0} {$i < $nclients} {incr i} { + set clenv($i) [eval $cl_envcmd($i) $recargs] + error_check_good client_env [is_valid_env $clenv($i)] TRUE + } + error_check_good master0 [$clenv(0) rep_start -master] 0 + + puts "\tRep$tnum.d: Sync up with other clients." + while { 1 } { + set nproced 0 + + for {set i 0} {$i < $nclients} {incr i} { + incr nproced [replprocessqueue $clenv($i) $id($i)] + } + + if { $nproced == 0 } { + break + } + } + puts "\tRep$tnum.e: Verify client logs match." + set i 0 + error_check_good cl$i.close [$clenv($i) close] 0 + set stat [catch {eval exec $util_path/db_printlog \ + $hargs($i) >& $clientdir($i)/prlog} result] + # + # Note we start the loop at 1 here and compare against client0 + # which became the master. + # + for {set i 1} {$i < $nclients} {incr i} { + error_check_good cl$i.close [$clenv($i) close] 0 + fileremove -f $clientdir($i)/prlog + set stat [catch {eval exec $util_path/db_printlog \ + $hargs($i) >> $clientdir($i)/prlog} result] + error_check_good stat_prlog $stat 0 + error_check_good log_cmp(0,$i) \ + [filecmp $clientdir(0)/prlog $clientdir($i)/prlog] 0 + } + + replclose $testdir/MSGQUEUEDIR + set testdir $orig_tdir + return +} + diff --git a/db/test/rep020.tcl b/db/test/rep020.tcl new file mode 100644 index 000000000..ced4a6d92 --- /dev/null +++ b/db/test/rep020.tcl @@ -0,0 +1,254 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep020.tcl,v 1.10 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep020 +# TEST Replication elections - test election generation numbers. +# TEST + +proc rep020 { method args } { + global rand_init + + set tnum "020" + if { [is_btree $method] == 0 } { + puts "Rep$tnum: Skipping for method $method." + return + } + + error_check_good set_random_seed [berkdb srand $rand_init] 0 + set nclients 5 + set logsets [create_logsets [expr $nclients + 1]] + foreach l $logsets { + puts "Rep$tnum ($method): Election generation test." + puts "Rep$tnum: Master logs are [lindex $l 0]" + for { set i 0 } { $i < $nclients } { incr i } { + puts "Rep$tnum: Client $i logs are\ + [lindex $l [expr $i + 1]]" + } + rep020_sub $method $nclients $tnum $l $args + } +} + +proc rep020_sub { method nclients tnum logset args } { + source ./include.tcl + global errorInfo + global mixed_mode_logging + env_cleanup $testdir + + set qdir $testdir/MSGQUEUEDIR + replsetup $qdir + + set masterdir $testdir/MASTERDIR + file mkdir $masterdir + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + + for { set i 0 } { $i < $nclients } { incr i } { + set clientdir($i) $testdir/CLIENTDIR.$i + file mkdir $clientdir($i) + set c_logtype($i) [lindex $logset [expr $i + 1]] + set c_logargs($i) [adjust_logargs $c_logtype($i)] + set c_txnargs($i) [adjust_txnargs $c_logtype($i)] + } + +# To debug elections, the lines to uncomment are below the +# error checking portion of this test. This is needed in order +# for the error messages to come back in errorInfo and for +# that portion of the test to pass. + # Open a master. + set envlist {} + repladd 1 + set env_cmd(M) "berkdb_env -create -log_max 1000000 \ + -home $masterdir $m_txnargs $m_logargs -rep_master \ + -errpfx MASTER -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M)] + error_check_good master_env [is_valid_env $masterenv] TRUE + lappend envlist "$masterenv 1" + + # Open the clients. + for { set i 0 } { $i < $nclients } { incr i } { + set envid [expr $i + 2] + repladd $envid + set env_cmd($i) "berkdb_env_noerr -create \ + -home $clientdir($i) $c_txnargs($i) $c_logargs($i) \ + -rep_client -rep_transport \[list $envid replsend\]" + set clientenv($i) [eval $env_cmd($i)] + error_check_good \ + client_env($i) [is_valid_env $clientenv($i)] TRUE + lappend envlist "$clientenv($i) $envid" + } + + # Run a modified test001 in the master. + process_msgs $envlist + puts "\tRep$tnum.a: Running rep_test in replicated env." + set niter 10 + eval rep_test $method $masterenv NULL $niter 0 0 + process_msgs $envlist + error_check_good masterenv_close [$masterenv close] 0 + set envlist [lreplace $envlist 0 0] + +# To debug elections, uncomment the lines below to turn on verbose +# and set the errfile. Also edit reputils.tcl +# in proc start_election and swap the 2 commented lines with +# their counterpart. + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + replclear [expr $i + 2] + set err_cmd($i) "none" + set pri($i) 10 + set crash($i) 0 +# error_check_good pfx [$clientenv($i) errpfx CLIENT$i] 0 +# error_check_good verb [$clientenv($i) verbose rep on] 0 +# $clientenv($i) errfile /dev/stderr +# set env_cmd($i) [concat $env_cmd($i) \ +# "-errpfx CLIENT$i -verbose {rep on} -errfile /dev/stderr"] + } + + set msg "Rep$tnum.b" + puts "\t$msg: Run elections to increment egen." + + set nelect 2 + set nsites $nclients + set nvotes $nclients + for { set j 0 } { $j < $nelect } { incr j } { + # Pick winner and elector randomly. + set winner [berkdb random_int 0 [expr $nclients - 1]] + setpriority pri $nclients $winner + set elector [berkdb random_int 0 [expr $nclients - 1]] + run_election env_cmd envlist err_cmd pri crash\ + $qdir $msg $elector $nsites $nvotes $nclients $winner 1 + } + process_msgs $envlist + + set msg "Rep$tnum.c" + puts "\t$msg: Updating egen when getting an old vote." + + # + # Find the last client and save the election generation number. + # Close the last client and adjust the list of envs to process. + # + set i [expr $nclients - 1] + set last [lindex $envlist end] + set clientenv($i) [lindex $last 0] + set egen($i) \ + [stat_field $clientenv($i) rep_stat "Election generation number"] + error_check_good clientenv_close($i) [$clientenv($i) close] 0 + set envlist [lreplace $envlist end end] + + # Run a few more elections while the last client is closed. + # Make sure we don't pick the closed client as the winner, + # and require votes from one fewer site. + # + set orig_nvotes $nvotes + set orig_nclients $nclients + set nvotes [expr $orig_nvotes - 1] + set nclients [expr $orig_nclients - 1] + for { set j 0 } { $j < $nelect } { incr j } { + set winner [berkdb random_int 0 [expr $nclients - 1]] + setpriority pri $nclients $winner + set elector [berkdb random_int 0 [expr $nclients - 1]] + run_election env_cmd envlist err_cmd pri crash\ + $qdir $msg $elector $nsites $nvotes $nclients $winner 1 + } + process_msgs $envlist + # + # Verify that the last client's election generation number has + # changed, and that it matches the other clients. + # + set pair [lindex $envlist 0] + set clenv [lindex $pair 0] + set clegen [stat_field \ + $clenv rep_stat "Election generation number"] + + # Reopen last client's env. Do not run recovery, but do + # process messages to get the egen updated. + replclear $envid + set clientenv($i) [eval $env_cmd($i)] + lappend envlist "$clientenv($i) $envid" + error_check_good client_reopen [is_valid_env $clientenv($i)] TRUE + process_msgs $envlist + + set newegen($i) \ + [stat_field $clientenv($i) rep_stat "Election generation number"] + error_check_bad egen_changed $newegen($i) $egen($i) + error_check_good egen_changed1 $newegen($i) $clegen + + set msg "Rep$tnum.d" + puts "\t$msg: New client starts election." + # + # Run another election, this time called by the last client. + # This should succeed because the last client has already + # caught up to the others for egen. + # + set winner 2 + set nvotes $orig_nvotes + set nclients $orig_nclients + set elector [expr $nclients - 1] + setpriority pri $nclients $winner + run_election env_cmd envlist err_cmd pri crash\ + $qdir $msg $elector $nsites $nvotes $nclients $winner 0 + + set newegen($i) \ + [stat_field $clientenv($i) rep_stat "Election generation number"] + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set egen($i) [stat_field \ + $clientenv($i) rep_stat "Election generation number"] + } + error_check_good egen_catchup $egen(4) $egen(3) + + # Skip this part of the test for mixed-mode logging, + # since we can't recover with in-memory logs. + if { $mixed_mode_logging == 0 } { + set msg "Rep$tnum.e" + puts "\t$msg: Election generation is not changed in recovery." + # Note all client egens. Close, recover, process messages, + # and check that egens are unchanged. + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set egen($i) [stat_field $clientenv($i) \ + rep_stat "Election generation number"] + error_check_good \ + clientenv_close($i) [$clientenv($i) close] 0 + set clientenv($i) [eval $env_cmd($i) -recover] + set envlist [lreplace \ + $envlist $i $i "$clientenv($i) [expr $i + 2]"] + } + process_msgs $envlist + foreach pair $envlist { + set newegen($i) [stat_field $clientenv($i) \ + rep_stat "Election generation number"] + error_check_good egen_recovery $egen($i) $newegen($i) + } + + # Run an election. Now the egens should go forward. + set winner [berkdb random_int 0 [expr $nclients - 1]] + setpriority pri $nclients $winner + set elector [berkdb random_int 0 [expr $nclients - 1]] + run_election env_cmd envlist err_cmd pri crash \ + $qdir $msg $elector $nsites $nvotes $nclients $winner 1 + + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set newegen($i) [stat_field $clientenv($i) \ + rep_stat "Election generation number"] + error_check_good \ + egen_forward [expr $newegen($i) > $egen($i)] 1 + } + } + + foreach pair $envlist { + set cenv [lindex $pair 0] + error_check_good cenv_close [$cenv close] 0 + } + + replclose $testdir/MSGQUEUEDIR +} + diff --git a/db/test/rep021.tcl b/db/test/rep021.tcl new file mode 100644 index 000000000..3fa27aed8 --- /dev/null +++ b/db/test/rep021.tcl @@ -0,0 +1,304 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2001-2003 +# Sleepycat Software. All rights reserved. +# +# $Id: rep021.tcl,v 1.6 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep021 +# TEST Replication and multiple environments. +# TEST Run similar tests in separate environments, making sure +# TEST that some data overlaps. Then, "move" one client env +# TEST from one replication group to another and make sure that +# TEST we do not get divergent logs. We either match the first +# TEST record and end up with identical logs or we get an error. +# TEST Verify all client logs are identical if successful. +# +proc rep021 { method { nclients 3 } { tnum "021" } args } { + set args [convert_args $method $args] + set logsets [create_logsets [expr $nclients + 1]] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r):\ + Replication and $nclients recovered clients in sync." + puts "Rep$tnum: Master logs are [lindex $l 0]" + for { set i 0 } { $i < $nclients } { incr i } { + puts "Rep$tnum: Client $i logs are\ + [lindex $l [expr $i + 1]]" + } + rep021_sub $method $nclients $tnum $l $r $args + } + } +} + +proc rep021_sub { method nclients tnum logset recargs largs } { + global testdir + global util_path + + set orig_tdir $testdir + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set niter 100 + set offset 5 + set masterdir $testdir/MASTERDIR + set masterdir2 $testdir/MASTERDIR.NEW + file mkdir $masterdir + file mkdir $masterdir2 + + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + + # We want to run the test 3 times in 2 separate repl envs. + # This is a little bit tricky due to how we manage replication + # in Tcl. It assumes one replication group. + # This is tricky because we need to manage/clear the repl + # message queues for the different groups when running + # to one group or the other. + # To accomplish this we run entirely in the 2nd group first. + # We set it up and then close all its envs. Then we run + # to the 1st group, and set it up. Then we add in a client + # from the 2nd group into the existing 1st group. + # Although we're doing them in separate parts, this is + # a description of what we're doing. + # + # 1. First add divergent data to database: + # RepGrp1: Add niter data from 0 to database. + # RepGrp2: Add niter data from offset to database. + # This gives us overlapping data in the databases, but they're + # additions will be at different offsets in the log files. + # + # 2. Add identical data to both databases. + # RepGrp1: Add niter data from niter + offset to database. + # RepGrp2: Add niter data from niter + offset to database. + # This gives us identical data in the databases and logs. + # + # 3. Again add divergent data to databases. + # RepGrp1: Add niter data from niter*2+offset to database. + # RepGrp2: Add niter data from niter*2+offset*2 to database. + # This gives us overlapping data in the databases, but they're + # additions will be at different offsets in the log files. + # + # 4. Add a client from one group to the other. Then try + # to sync up that client. We should get a failure with + # one of the non-matching error messages: + # "Too few log files to sync with master" + # "Client was never part of master's environment" + + # Open a 2nd master. Make all the 2nd env ids >= 10. + # For the 2nd group, just have 1 master and 1 client. + repladd 10 + set ma2_envcmd "berkdb_env -create $m_txnargs \ + $m_logargs -lock_max 2500 -home $masterdir2 \ + -rep_master -rep_transport \[list 10 replsend\]" +# set ma2_envcmd "berkdb_env -create $m_txnargs \ +# $m_logargs -lock_max 2500 -home $masterdir2 \ +# -errpfx MASTER2 -verbose {rep on} \ +# -rep_master -rep_transport \[list 10 replsend\]" + set menv2 [eval $ma2_envcmd $recargs] + error_check_good master2_env [is_valid_env $menv2] TRUE + + set clientdir2 $testdir/CLIENTDIR.NEW + file mkdir $clientdir2 + set id2 11 + set c_logtype($id2) [lindex $logset 1] + set c_logargs($id2) [adjust_logargs $c_logtype($id2)] + set c_txnargs($id2) [adjust_txnargs $c_logtype($id2)] + + set id2 11 + repladd $id2 + set cl2_envcmd "berkdb_env -create $c_txnargs($id2) -lock_max 2500 \ + $c_logargs($id2) -home $clientdir2 \ + -rep_client -rep_transport \[list $id2 replsend\]" +# set cl2_envcmd "berkdb_env -create $c_txnargs($id2) -lock_max 2500 \ +# -errpfx CLIENT2 -verbose {rep on} \ +# $c_logargs($id2) -home $clientdir2 \ +# -rep_client -rep_transport \[list $id2 replsend\]" + set clenv2 [eval $cl2_envcmd $recargs] + error_check_good client_env [is_valid_env $clenv2] TRUE + + set testfile "test$tnum.db" + set omethod [convert_method $method] + + set masterdb2 [eval {berkdb_open_noerr -env $menv2 -auto_commit \ + -create -mode 0644} $largs $omethod $testfile] + error_check_good dbopen [is_valid_db $masterdb2] TRUE + + # + # Process startup messages + # + set env2list {} + lappend env2list "$menv2 10" + lappend env2list "$clenv2 $id2" + process_msgs $env2list + + # + # Set up the three runs of rep_test. We need the starting + # point for each phase of the test for each group. + # + set e1phase1 0 + set e2phase1 $offset + set e1phase2 [expr $niter + $offset] + set e2phase2 $e1phase2 + set e1phase3 [expr $e1phase2 + $niter] + set e2phase3 [expr $e2phase2 + $niter + $offset] + + puts "\tRep$tnum.a: Running rep_test in 2nd replicated env." + eval rep_test $method $menv2 $masterdb2 $niter $e2phase1 1 1 + eval rep_test $method $menv2 $masterdb2 $niter $e2phase2 1 1 + eval rep_test $method $menv2 $masterdb2 $niter $e2phase3 1 1 + error_check_good mdb_cl [$masterdb2 close] 0 + process_msgs $env2list + + puts "\tRep$tnum.b: Close 2nd replicated env. Open primary." + error_check_good mdb_cl [$clenv2 close] 0 + error_check_good mdb_cl [$menv2 close] 0 + replclose $testdir/MSGQUEUEDIR + + # + # Run recovery in client now to blow away region files so + # that this client comes in as a "new" client and announces itself. + # + set stat [catch {eval exec $util_path/db_recover -h $clientdir2} result] + error_check_good stat $stat 0 + + # + # Now we've run in the 2nd env. We have everything we need + # set up and existing in that env. Now run the test in the + # 1st env and then we'll try to add in the client. + # + replsetup $testdir/MSGQUEUEDIR + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env -create $m_txnargs \ + $m_logargs -lock_max 2500 -home $masterdir \ + -rep_master -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env -create $m_txnargs \ +# $m_logargs -lock_max 2500 -home $masterdir \ +# -errpfx MASTER -verbose {rep on} \ +# -rep_master -rep_transport \[list 1 replsend\]" + set menv [eval $ma_envcmd $recargs] + error_check_good master_env [is_valid_env $menv] TRUE + + for {set i 0} {$i < $nclients} {incr i} { + set clientdir($i) $testdir/CLIENTDIR.$i + file mkdir $clientdir($i) + set c_logtype($i) [lindex $logset [expr $i + 1]] + set c_logargs($i) [adjust_logargs $c_logtype($i)] + set c_txnargs($i) [adjust_txnargs $c_logtype($i)] + set id($i) [expr 2 + $i] + repladd $id($i) + set cl_envcmd($i) "berkdb_env -create $c_txnargs($i) \ + $c_logargs($i) -lock_max 2500 -home $clientdir($i) \ + -rep_client -rep_transport \[list $id($i) replsend\]" +# set cl_envcmd($i) "berkdb_env -create $c_txnargs($i) \ +# $c_logargs($i) -lock_max 2500 -home $clientdir($i) \ +# -errpfx CLIENT$i -verbose {rep on} \ +# -rep_client -rep_transport \[list $id($i) replsend\]" + set clenv($i) [eval $cl_envcmd($i) $recargs] + error_check_good client_env [is_valid_env $clenv($i)] TRUE + } + + set masterdb [eval {berkdb_open_noerr -env $menv -auto_commit \ + -create -mode 0644} $largs $omethod $testfile] + error_check_good dbopen [is_valid_db $masterdb] TRUE + + # Bring the clients online by processing the startup messages. + set envlist {} + lappend envlist "$menv 1" + for { set i 0 } { $i < $nclients } { incr i } { + lappend envlist "$clenv($i) $id($i)" + } + process_msgs $envlist + + # Run a modified test001 in the master (and update clients). + puts "\tRep$tnum.c: Running rep_test in primary replicated env." + eval rep_test $method $menv $masterdb $niter $e1phase1 1 1 + eval rep_test $method $menv $masterdb $niter $e1phase2 1 1 + eval rep_test $method $menv $masterdb $niter $e1phase3 1 1 + error_check_good mdb_cl [$masterdb close] 0 + # Process any close messages. + process_msgs $envlist + + puts "\tRep$tnum.d: Add unrelated client into replication group." + set i $nclients + set orig $nclients + set nclients [expr $nclients + 1] + + set clientdir($i) $clientdir2 + set id($i) [expr 2 + $i] + repladd $id($i) + set cl_envcmd($i) "berkdb_env_noerr -create -txn nosync -lock_max 2500 \ + -home $clientdir($i) \ + -rep_client -rep_transport \[list $id($i) replsend\]" +# set cl_envcmd($i) "berkdb_env -create -txn nosync -lock_max 2500 \ +# -errpfx CLIENT$i -verbose {rep on} \ +# -home $clientdir($i) \ +# -rep_client -rep_transport \[list $id($i) replsend\]" + set clenv($i) [eval $cl_envcmd($i) $recargs] + error_check_good client_env [is_valid_env $clenv($i)] TRUE + + lappend envlist "$clenv($i) $id($i)" + + fileremove -f $clientdir2/prlog.orig + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir2 >> $clientdir2/prlog.orig} result] + + + set err 0 + process_msgs $envlist 0 NONE err + + puts "\tRep$tnum.e: Close all envs and run recovery in clients." + error_check_good menv_cl [$menv close] 0 + for {set i 0} {$i < $nclients} {incr i} { + error_check_good cl$i.close [$clenv($i) close] 0 + set hargs($i) "-h $clientdir($i)" + } + set i [expr $nclients - 1] + fileremove -f $clientdir($i)/prlog + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir($i) >> $clientdir($i)/prlog} result] + # + # If we got an error, then the log should match the original + # and the error message should tell us the client was never + # part of this environment. + # + if { $err != 0 } { + puts "\tRep$tnum.f: Verify client log matches original." + error_check_good log_cmp(orig,$i) \ + [filecmp $clientdir($i)/prlog.orig $clientdir($i)/prlog] 0 + puts "\tRep$tnum.g: Verify client error." + error_check_good errchk [is_substr $err \ + "Client was never part"] 1 + } else { + puts "\tRep$tnum.f: Verify client log doesn't match original." + error_check_good log_cmp(orig,$i) \ + [filecmp $clientdir($i)/prlog.orig $clientdir($i)/prlog] 1 + puts "\tRep$tnum.g: Verify new client log matches master." + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir >& $masterdir/prlog} result] + fileremove -f $clientdir($i)/prlog + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir($i) >> $clientdir($i)/prlog} result] + error_check_good stat_prlog $stat 0 + error_check_good log_cmp(master,$i) \ + [filecmp $masterdir/prlog $clientdir($i)/prlog] 0 + } + + replclose $testdir/MSGQUEUEDIR + set testdir $orig_tdir + return +} + + diff --git a/db/test/rep022.tcl b/db/test/rep022.tcl new file mode 100644 index 000000000..a58969b04 --- /dev/null +++ b/db/test/rep022.tcl @@ -0,0 +1,281 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep022.tcl,v 1.11 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep022 +# TEST Replication elections - test election generation numbers +# TEST during simulated network partition. +# TEST +proc rep022 { method args } { + global rand_init + global mixed_mode_logging + set tnum "022" + + if { $mixed_mode_logging == 1 } { + puts "Rep$tnum: Skipping for mixed-mode logging." + return + } + if { [is_btree $method] == 0 } { + puts "Rep$tnum: Skipping for method $method." + return + } + + error_check_good set_random_seed [berkdb srand $rand_init] 0 + set nclients 5 + set logsets [create_logsets [expr $nclients + 1]] + foreach l $logsets { + puts "Rep$tnum ($method): Election generation test\ + with simulated network partition." + puts "Rep$tnum: Master logs are [lindex $l 0]" + for { set i 0 } { $i < $nclients } { incr i } { + puts "Rep$tnum: Client $i logs are\ + [lindex $l [expr $i + 1]]" + } + rep022_sub $method $nclients $tnum $l $args + } +} + +proc rep022_sub { method nclients tnum logset args } { + source ./include.tcl + global errorInfo + env_cleanup $testdir + + set qdir $testdir/MSGQUEUEDIR + replsetup $qdir + + set masterdir $testdir/MASTERDIR + file mkdir $masterdir + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + + for { set i 0 } { $i < $nclients } { incr i } { + set clientdir($i) $testdir/CLIENTDIR.$i + file mkdir $clientdir($i) + set c_logtype($i) [lindex $logset [expr $i + 1]] + set c_logargs($i) [adjust_logargs $c_logtype($i)] + set c_txnargs($i) [adjust_txnargs $c_logtype($i)] + } + +# To debug elections, the lines to uncomment are below the +# error checking portion of this test. This is needed in order +# for the error messages to come back in errorInfo and for +# that portion of the test to pass. + # Open a master. + set envlist {} + repladd 1 + set env_cmd(M) "berkdb_env -create -log_max 1000000 \ + -home $masterdir $m_txnargs $m_logargs -rep_master \ + -errpfx MASTER -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M)] + error_check_good master_env [is_valid_env $masterenv] TRUE + lappend envlist "$masterenv 1" + + # Open the clients. + for { set i 0 } { $i < $nclients } { incr i } { + set envid [expr $i + 2] + repladd $envid + set env_cmd($i) "berkdb_env_noerr -create \ + -home $clientdir($i) $c_txnargs($i) $c_logargs($i) \ + -rep_client -rep_transport \[list $envid replsend\]" + set clientenv($i) [eval $env_cmd($i)] + error_check_good \ + client_env($i) [is_valid_env $clientenv($i)] TRUE + lappend envlist "$clientenv($i) $envid" + } + # Bring the clients online by processing the startup messages. + process_msgs $envlist + + # Run a modified test001 in the master. + puts "\tRep$tnum.a: Running rep_test in replicated env." + set niter 10 + eval rep_test $method $masterenv NULL $niter 0 0 + process_msgs $envlist + error_check_good masterenv_close [$masterenv close] 0 + set envlist [lreplace $envlist 0 0] + +# To debug elections, uncomment the lines below to turn on verbose +# and set the errfile. Also edit reputils.tcl +# in proc start_election and swap the 2 commented lines with +# their counterpart. + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + replclear [expr $i + 2] + set err_cmd($i) "none" + set pri($i) 10 + set crash($i) 0 +# error_check_good pfx [$clientenv($i) errpfx CLIENT$i] 0 +# error_check_good verb [$clientenv($i) verbose rep on] 0 +# $clientenv($i) errfile /dev/stderr +# set env_cmd($i) [concat $env_cmd($i) \ +# "-errpfx CLIENT$i -verbose {rep on} -errfile /dev/stderr"] + } + + set msg "Rep$tnum.b" + puts "\t$msg: Run election for clients 0,1,2." + # + # Run an election with clients 0, 1, and 2. + # Make client 0 be the winner, and let it stay master. + # + set origlist $envlist + set orignclients $nclients + set envlist [lrange $origlist 0 2] + set nclients 3 + set nsites 3 + set nvotes 3 + set winner 0 + setpriority pri $nclients $winner + set elector [berkdb random_int 0 [expr $nclients - 1]] + run_election env_cmd envlist err_cmd pri crash \ + $qdir $msg $elector $nsites $nvotes $nclients $winner 0 + + set msg "Rep$tnum.c" + puts "\t$msg: Close and reopen client 2 with recovery." + # + # Now close and reopen 2 with recovery. Update the + # list of all client envs with the new information. + # + error_check_good clientenv_close(2) [$clientenv(2) close] 0 + set clientenv(2) [eval $env_cmd(2) -recover] + set origlist [lreplace $origlist 2 2 "$clientenv(2) 4"] + + # Get last LSN for client 2. + set logc [$clientenv(2) log_cursor] + error_check_good logc \ + [is_valid_logc $logc $clientenv(2)] TRUE + set lastlsn2 [lindex [lindex [$logc get -last] 0] 1] + error_check_good close_cursor [$logc close] 0 + + set msg "Rep$tnum.d" + puts "\t$msg: Close and reopen client 4 with recovery." + # + # This forces the last LSN for client 4 up to the last + # LSN for client 2 so client 4 can be elected. + # + set lastlsn4 0 + while { $lastlsn4 < $lastlsn2 } { + error_check_good clientenv_close(4) [$clientenv(4) close] 0 + set clientenv(4) [eval $env_cmd(4) -recover] + set origlist [lreplace $origlist 4 4 "$clientenv(4) 6"] + set logc [$clientenv(4) log_cursor] + error_check_good logc \ + [is_valid_logc $logc $clientenv(4)] TRUE + set lastlsn4 [lindex [lindex [$logc get -last] 0] 1] + error_check_good close_cursor [$logc close] 0 + } + + set msg "Rep$tnum.e" + puts "\t$msg: Run election for clients 2,3,4." + # + # Run an election with clients 2, 3, 4. + # Make last client be the winner, and let it stay master. + # Need to process messages before running election so + # that clients 2 and 4 update to the right gen with + # client 3. + # + set envlist [lrange $origlist 2 4] + process_msgs $envlist + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set egen($i) [stat_field \ + $clientenv($i) rep_stat "Election generation number"] + } + set winner 4 + setpriority pri $nclients $winner 2 + set elector [berkdb random_int 2 4] + run_election env_cmd envlist err_cmd pri crash \ + $qdir $msg $elector $nsites $nvotes $nclients $winner 0 + + # Note egens for all the clients. + set envlist $origlist + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set egen($i) [stat_field \ + $clientenv($i) rep_stat "Election generation number"] + } + + # Have client 4 (currently a master) run an operation. + eval rep_test $method $clientenv(4) NULL $niter 0 0 + + # Check that clients 0 and 4 get DUPMASTER messages and + # restart them as clients. + # + puts "\tRep$tnum.f: Check for DUPMASTER" + set envlist0 [lrange $envlist 0 0] + process_msgs $envlist0 0 dup err + error_check_good is_dupmaster0 [lindex $dup 0] 1 + error_check_good downgrade0 [$clientenv(0) rep_start -client] 0 + + set envlist4 [lrange $envlist 4 4] + process_msgs $envlist4 0 dup err + error_check_good is_dupmaster4 [lindex $dup 0] 1 + error_check_good downgrade4 [$clientenv(4) rep_start -client] 0 + + # All DUPMASTER messages are now gone. + # We might get residual errors however because client 4 + # responded as a master to client 0 and then became a + # client immediately. Therefore client 4 might get some + # "master-only" records and return EINVAL. We want to + # ignore those and process records until calm is restored. + set err 1 + while { $err == 1 } { + process_msgs $envlist 0 dup err + error_check_good no_dupmaster $dup 0 + } + + # Check LSNs before new election. + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set logc [$clientenv($i) log_cursor] + error_check_good logc \ + [is_valid_logc $logc $clientenv($i)] TRUE + set lastlsn [lindex [lindex [$logc get -last] 0] 1] + error_check_good cursor_close [$logc close] 0 + } + + set msg "Rep$tnum.g" + puts "\t$msg: Run election for all clients after DUPMASTER." + + # Call a new election with all participants. Make 4 the + # winner, since it should have a high enough LSN to win. + set nclients $orignclients + set nsites $nclients + set nvotes $nclients + set winner 4 + setpriority pri $nclients $winner + set elector [berkdb random_int 0 [expr $nclients - 1]] + run_election env_cmd envlist err_cmd pri crash \ + $qdir $msg $elector $nsites $nvotes $nclients $winner 0 + + # Pull out new egens. + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set newegen($i) [stat_field \ + $clientenv($i) rep_stat "Election generation number"] + } + + # Egen numbers should all be the same now, and all greater than + # they were before the election. + set currentegen $newegen(0) + for { set i 0 } { $i < $nclients } { incr i } { + set egen_diff [expr $newegen($i) - $egen($i)] + error_check_good egen_increased [expr $egen_diff > 0] 1 + error_check_good newegens_match $currentegen $newegen($i) + } + + # Clean up. + foreach pair $envlist { + set cenv [lindex $pair 0] + error_check_good cenv_close [$cenv close] 0 + } + + replclose $testdir/MSGQUEUEDIR +} + + diff --git a/db/test/rep023.tcl b/db/test/rep023.tcl new file mode 100644 index 000000000..ef335aa35 --- /dev/null +++ b/db/test/rep023.tcl @@ -0,0 +1,159 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep023.tcl,v 11.3 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep023 +# TEST Replication using two master handles. +# TEST +# TEST Open two handles on one master env. Create two +# TEST databases, one through each master handle. Process +# TEST all messages through the first master handle. Make +# TEST sure changes made through both handles are picked +# TEST up properly. +# +proc rep023 { method { niter 10 } { tnum "023" } args } { + global is_hp_test + + # We can't open two envs on HP-UX, so just skip the + # whole test since that is at the core of it. + if { $is_hp_test == 1 } { + puts "Rep$tnum: Skipping for HP-UX." + return + } + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery, and + # with and without -rep_start. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + foreach startopt { 0 1 } { + if { $startopt == 1 } { + set msg "with rep_start" + } else { + set msg "" + } + puts "Rep$tnum ($method $r $msg):\ + Replication and openfiles." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep023_sub $method $niter $tnum $l $r $startopt $args + } + } + } +} + +proc rep023_sub { method niter tnum logset recargs startopt largs } { + global testdir + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + file mkdir $masterdir + file mkdir $clientdir + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open 1st master. + repladd 1 + set ma_envcmd "berkdb_env -create $m_txnargs \ + $m_logargs -lock_max 2500 \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env -create $m_txnargs \ +# $m_logargs -lock_max 2500 \ +# -errpfx MASTER -verbose {rep on} \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv1 [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv1] TRUE + + # Open 2nd handle on master. The master envs will share + # the same envid. + set masterenv2 [eval $ma_envcmd] + error_check_good master_env [is_valid_env $masterenv2] TRUE + if { $startopt == 1 } { + error_check_good rep_start [$masterenv2 rep_start -master] 0 + } + + # Open a client. + repladd 2 + set cl_envcmd "berkdb_env -create $c_txnargs \ + $c_logargs -lock_max 2500 \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env -create $c_txnargs \ +# $c_logargs -lock_max 2500 \ +# -errpfx CLIENT1 -verbose {rep on} \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + # Process messages on the first masterenv handle, not the second. + set envlist "{$masterenv1 1} {$clientenv 2}" + process_msgs $envlist + + puts "\tRep$tnum.a: Create database using 1st master handle." + # Create a database using the 1st master. + set testfile1 "m1$tnum.db" + set omethod [convert_method $method] + set db1 [eval {berkdb_open_noerr -env $masterenv1 -auto_commit \ + -create -mode 0644} $largs $omethod $testfile1] + error_check_good dbopen [is_valid_db $db1] TRUE + + puts "\tRep$tnum.b: Create database using 2nd master handle." + # Create a different database using the 2nd master. + set testfile2 "m2$tnum.db" + set db2 [eval {berkdb_open_noerr -env $masterenv2 -auto_commit \ + -create -mode 0644} $largs $omethod $testfile2] + error_check_good dbopen [is_valid_db $db2] TRUE + + puts "\tRep$tnum.c: Process messages." + # Process messages. + process_msgs $envlist + + puts "\tRep$tnum.d: Run rep_test in 1st master; process messages." + eval rep_test $method $masterenv1 $db1 $niter 0 0 + process_msgs $envlist + + puts "\tRep$tnum.e: Run rep_test in 2nd master; process messages." + eval rep_test $method $masterenv2 $db2 $niter 0 0 + process_msgs $envlist + + # Contents of the two databases should match. + error_check_good db_compare [db_compare \ + $db1 $db2 $masterdir/$testfile1 $masterdir/$testfile2] 0 + + puts "\tRep$tnum.f: Close 2nd master." + error_check_good db2 [$db2 close] 0 + error_check_good master2_close [$masterenv2 close] 0 + + puts "\tRep$tnum.g: Run test in master again." + eval rep_test $method $masterenv1 $db1 $niter $niter 0 + process_msgs $envlist + + puts "\tRep$tnum.h: Closing" + error_check_good db1 [$db1 close] 0 + error_check_good env0_close [$masterenv1 close] 0 + error_check_good env2_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR + return +} diff --git a/db/test/rep024.tcl b/db/test/rep024.tcl new file mode 100644 index 000000000..d3d8e089b --- /dev/null +++ b/db/test/rep024.tcl @@ -0,0 +1,202 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep024.tcl,v 1.7 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep024 +# TEST Replication page allocation / verify test +# TEST +# TEST Start a master (site 1) and a client (site 2). Master +# TEST closes (simulating a crash). Site 2 becomes the master +# TEST and site 1 comes back up as a client. Verify database. + +proc rep024 { method { niter 1000 } { tnum "024" } args } { + global fixed_len + + set orig_fixed_len $fixed_len + set fixed_len 448 + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run all tests with and without recovery. + set envargs "" + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r): \ + Replication page allocation/verify test." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep024_sub $method $niter $tnum $envargs $l $r $args + } + } + set fixed_len $orig_fixed_len + return +} + +proc rep024_sub { method niter tnum envargs logset recargs largs } { + source ./include.tcl + global testdir + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. This test requires -txn, so + # we only have to adjust the logargs. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + + if { [is_record_based $method] == 1 } { + set checkfunc test024_recno.check + } else { + set checkfunc test024.check + } + + # Open a master. + repladd 1 + set env_cmd(1) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs $recargs -home $masterdir \ + -errpfx MASTER -txn $m_logargs \ + -rep_transport \[list 1 replsend\]" +# set env_cmd(1) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs $recargs -home $masterdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# -errpfx MASTER -txn $m_logargs \ +# -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(1) -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set env_cmd(2) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs $recargs -home $clientdir \ + -errpfx CLIENT -txn $c_logargs \ + -rep_transport \[list 2 replsend\]" +# set env_cmd(2) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs $recargs -home $clientdir \ +# -verbose {rep on} -errfile /dev/stderr \ +# -errpfx CLIENT -txn $c_logargs \ +# -rep_transport \[list 2 replsend\]" + set clientenv [eval $env_cmd(2) -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the client online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + puts "\tRep$tnum.a: Add data to master, update client." + # + # This test uses a small page size and a large fixed_len + # so it is easy to force a page allocation. + set key [expr $niter + 1] + set data A + set pagesize 512 + if { [is_fixed_length $method] == 1 } { + set bigdata [repeat $data [expr $pagesize / 2]] + } else { + set bigdata [repeat $data [expr 2 * $pagesize]] + } + + set omethod [convert_method $method] + set testfile "test$tnum.db" + set db [eval "berkdb_open -create $omethod -auto_commit \ + -pagesize $pagesize -env $masterenv $largs $testfile"] + eval rep_test $method $masterenv $db $niter 0 0 + process_msgs $envlist + + # Close client. Force a page allocation on the master. + # An overflow page (or big page, for hash) will do the job. + # + puts "\tRep$tnum.b: Close client, force page allocation on master." + error_check_good client_close [$clientenv close] 0 +# error_check_good client_verify \ +# [verify_dir $clientdir "\tRep$tnum.b: " 0 0 1] 0 + set pages1 [r24_check_pages $db $method] + set txn [$masterenv txn] + error_check_good put_bigdata [eval {$db put} \ + -txn $txn {$key [chop_data $method $bigdata]}] 0 + error_check_good txn_commit [$txn commit] 0 + + # Verify that we have allocated new pages. + set pages2 [r24_check_pages $db $method] + set newpages [expr $pages2 - $pages1] + + # Close master and discard messages for site 2. Now everybody + # is closed and sites 1 and 2 have different contents. + puts "\tRep$tnum.c: Close master." + error_check_good db_close [$db close] 0 + error_check_good master_close [$masterenv close] 0 + if { $newpages <= 0 } { + puts "FAIL: no new pages allocated." + return + } + error_check_good master_verify \ + [verify_dir $masterdir "\tRep$tnum.c: " 0 0 1] 0 + + # Run a loop, opening the original client as master and the + # original master as client. Test db_verify. + foreach option { "no new data" "add new data" } { + puts "\tRep$tnum.d: Swap master and client ($option)." + set newmasterenv [eval $env_cmd(2) -rep_master] + set newclientenv [eval $env_cmd(1) -rep_client] + set envlist "{$newmasterenv 2} {$newclientenv 1}" + process_msgs $envlist + if { $option == "add new data" } { + set key [expr $niter + 2] + set db [eval "berkdb_open -create $omethod \ + -auto_commit -pagesize $pagesize \ + -env $newmasterenv $largs $testfile"] + set pages1 [r24_check_pages $db $method] + set txn [$newmasterenv txn] + error_check_good put_bigdata [eval {$db put} \ + -txn $txn {$key [chop_data $method $bigdata]}] 0 + error_check_good txn_commit [$txn commit] 0 + set pages2 [r24_check_pages $db $method] + set newpages [expr $pages2 - $pages1] + error_check_good db_close [$db close] 0 + process_msgs $envlist + } + puts "\tRep$tnum.e: Close master and client, run verify." + error_check_good newmasterenv_close [$newmasterenv close] 0 + error_check_good newclientenv_close [$newclientenv close] 0 + if { $newpages <= 0 } { + puts "FAIL: no new pages allocated." + return + } + # This test can leave unreferenced pages on systems without + # FTRUNCATE and that's OK, so set unref to 0. + error_check_good verify \ + [verify_dir $masterdir "\tRep$tnum.f: " 0 0 1 0 0] 0 + error_check_good verify \ + [verify_dir $clientdir "\tRep$tnum.g: " 0 0 1 0 0] 0 + } + replclose $testdir/MSGQUEUEDIR +} + +proc r24_check_pages { db method } { + if { [is_hash $method] == 1 } { + set pages [stat_field $db stat "Number of big pages"] + } elseif { [is_queue $method] == 1 } { + set pages [stat_field $db stat "Number of pages"] + } else { + set pages [stat_field $db stat "Overflow pages"] + } + return $pages +} diff --git a/db/test/rep026.tcl b/db/test/rep026.tcl new file mode 100644 index 000000000..0e4fe7359 --- /dev/null +++ b/db/test/rep026.tcl @@ -0,0 +1,250 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep026.tcl,v 11.7 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep026 +# TEST Replication elections - simulate a crash after sending +# TEST a vote. + +proc rep026 { method args } { + global mixed_mode_logging + set tnum "026" + # This test uses recovery, so mixed-mode testing isn't + # appropriate. + if { $mixed_mode_logging == 1 } { + puts "Rep$tnum: Skipping for mixed-mode logging." + return + } + if { [is_btree $method] == 0 } { + puts "Rep$tnum: Skipping for method $method." + return + } + + global rand_init + error_check_good set_random_seed [berkdb srand $rand_init] 0 + + set nclients 5 + set logsets [create_logsets [expr $nclients + 1]] + foreach l $logsets { + puts "Rep$tnum ($method): Election generations -\ + simulate crash after sending a vote." + puts "Rep$tnum: Master logs are [lindex $l 0]" + for { set i 0 } { $i < $nclients } { incr i } { + puts "Rep$tnum: Client $i logs are\ + [lindex $l [expr $i + 1]]" + } + rep026_sub $method $nclients $tnum $l $args + } +} + +proc rep026_sub { method nclients tnum logset args } { + source ./include.tcl + global errorInfo + global machids + env_cleanup $testdir + + set qdir $testdir/MSGQUEUEDIR + replsetup $qdir + + set masterdir $testdir/MASTERDIR + file mkdir $masterdir + set m_logtype [lindex $logset 0] + set m_logargs [adjust_logargs $m_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + + for { set i 0 } { $i < $nclients } { incr i } { + set clientdir($i) $testdir/CLIENTDIR.$i + file mkdir $clientdir($i) + set c_logtype($i) [lindex $logset [expr $i + 1]] + set c_logargs($i) [adjust_logargs $c_logtype($i)] + set c_txnargs($i) [adjust_txnargs $c_logtype($i)] + } + +# To debug elections, the lines to uncomment are below the +# error checking portion of this test. This is needed in order +# for the error messages to come back in errorInfo and for +# that portion of the test to pass. + # Open a master. + set envlist {} + repladd 1 + set env_cmd(M) "berkdb_env -create -log_max 1000000 \ + -home $masterdir $m_txnargs $m_logargs -rep_master \ + -errpfx MASTER -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M)] + error_check_good master_env [is_valid_env $masterenv] TRUE + lappend envlist "$masterenv 1" + + # Open the clients. + for { set i 0 } { $i < $nclients } { incr i } { + set envid [expr $i + 2] + repladd $envid + set env_cmd($i) "berkdb_env_noerr -create \ + -home $clientdir($i) $c_txnargs($i) $c_logargs($i) \ + -rep_client -rep_transport \[list $envid replsend\]" + set clientenv($i) [eval $env_cmd($i)] + error_check_good \ + client_env($i) [is_valid_env $clientenv($i)] TRUE + lappend envlist "$clientenv($i) $envid" + } + # Bring the clients online by processing the startup messages. + process_msgs $envlist + + # Run a modified test001 in the master. + puts "\tRep$tnum.a: Running rep_test in replicated env." + set niter 10 + eval rep_test $method $masterenv NULL $niter 0 0 + process_msgs $envlist + error_check_good masterenv_close [$masterenv close] 0 + set envlist [lreplace $envlist 0 0] + +# To debug elections, uncomment the lines below to turn on verbose +# and set the errfile. Also edit reputils.tcl +# in proc start_election and swap the 2 commented lines with +# their counterpart. + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + replclear [expr $i + 2] + set err_cmd($i) "none" + set crash($i) 0 + set pri($i) 10 +# error_check_good pfx [$clientenv($i) errpfx CLIENT$i] 0 +# error_check_good verb [$clientenv($i) verbose rep on] 0 +# $clientenv($i) errfile /dev/stderr +# set env_cmd($i) [concat $env_cmd($i) \ +# "-errpfx CLIENT$i -verbose {rep on} -errfile /dev/stderr"] + } + + # In each case we simulate a crash in client C, recover, and + # call a second election. We vary the caller of the second + # election (C or some other client) and when the election + # messages from before the crash are processed - before or + # after the second election. + # + foreach option { "1 b before" "2 c before" "1 d after" "2 e after"} { + # Elector 1 calls the first election, elector 2 + # calls the second election. + set elector1 1 + set elector2 [lindex $option 0] + set let [lindex $option 1] + set restore [lindex $option 2] + + if { $elector1 == $elector2 } { + puts "\tRep$tnum.$let: Simulated crash and recovery\ + (crashing client calls second election)." + } else { + puts "\tRep$tnum.$let: Simulated crash and recovery\ + (non-crashing client calls second election)." + } + + puts "\tRep$tnum.$let: Process messages from crasher\ + $restore 2nd election." + + puts "\t\tRep$tnum.$let.1: Note egens for all clients." + # Note egens for all the clients. + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set egen($i) [stat_field \ + $clientenv($i) rep_stat "Election generation number"] + } + + # Call an election which simulates a crash after sending + # its VOTE1. + set msg "\tRep$tnum.$let.2" + puts "\t$msg: Start election, simulate a crash." + set nsites $nclients + set nvotes $nclients + # Make the winner the crashing client, since the + # crashing client will have the biggest LSN. + set elector 1 + set winner $elector + set crash($elector) 1 + setpriority pri $nclients $winner + set err_cmd($elector) "electvote1" + run_election env_cmd envlist err_cmd pri crash \ + $qdir $msg $elector $nsites $nvotes $nclients $winner 0 + + set msg "\tRep$tnum.$let.3" + puts "\t$msg: Close and reopen elector with recovery." + error_check_good \ + clientenv_close($elector) [$clientenv($elector) close] 0 + + # Have other clients SKIP the election messages and process + # only C's startup messages. We'll do it by copying the files + # and emptying the originals. + foreach machid $machids { + file copy -force $qdir/repqueue$machid.db $qdir/save$machid.db + replclear $machid + } + + # Reopen C and process messages. Only the startup messages + # will be available. + set clientenv($elector) [eval $env_cmd($elector) -recover] + set envlist [lreplace $envlist \ + $elector $elector "$clientenv($elector) [expr $elector + 2]"] + process_msgs $envlist + + # Verify egens (should be +1 in C, and unchanged in other clients). + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set newegen($i) [stat_field \ + $clientenv($i) rep_stat "Election generation number"] + if { $i == $elector } { + error_check_good egen+1 $newegen($i) [expr $egen($i) + 1] + } else { + error_check_good egen_unchanged $newegen($i) $egen($i) + } + } + + # First chance to restore messages. + if { $restore == "before" } { + restore_messages $qdir + } + + # Have C call an election (no crash simulation) and process + # all the messages. + set msg "\tRep$tnum.$let.4" + puts "\t$msg: Call second election." + set err_cmd($elector) "none" + set crash($elector) 0 + run_election env_cmd envlist err_cmd pri crash \ + $qdir $msg $elector2 $nsites $nvotes $nclients $winner 1 + + # Second chance to restore messages. + if { $restore == "after" } { + restore_messages $qdir + } + process_msgs $envlist + + # Verify egens (should be +2 or more in all clients). + puts "\t\tRep$tnum.$let.5: Check egens." + foreach pair $envlist { + set i [expr [lindex $pair 1] - 2] + set clientenv($i) [lindex $pair 0] + set newegen($i) [stat_field \ + $clientenv($i) rep_stat "Election generation number"] + set mingen [expr $egen($i) + 2] + error_check_good egen+more($i) \ + [expr $newegen($i) >= $mingen] 1 + } + } + + # Clean up. + foreach pair $envlist { + set cenv [lindex $pair 0] + error_check_good cenv_close [$cenv close] 0 + } + replclose $testdir/MSGQUEUEDIR +} + +proc restore_messages { qdir } { + global machids + foreach machid $machids { + file copy -force $qdir/save$machid.db $qdir/repqueue$machid.db + } +} + diff --git a/db/test/rep027.tcl b/db/test/rep027.tcl new file mode 100644 index 000000000..9af9f8f3f --- /dev/null +++ b/db/test/rep027.tcl @@ -0,0 +1,161 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep027.tcl,v 1.5 2004/10/07 16:21:17 sue Exp $ +# +# TEST rep027 +# TEST Replication and secondary indexes. +# TEST +# TEST Set up a secondary index on the master and make sure +# TEST it can be accessed from the client. + +proc rep027 { method { niter 1000 } { tnum "027" } args } { + + # Renumbering recno is not permitted on a primary + # database. + if { [is_rrecno $method] == 1 } { + puts "Skipping rep027 for -rrecno." + return + } + + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r):\ + Replication and secondary indices" + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep027_sub $method $niter $tnum $l $r $args + } + } +} + +proc rep027_sub { method niter tnum logset recargs largs } { + source ./include.tcl + global testdir + global verbose_check_secondaries + + set omethod [convert_method $method] + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open a master. + repladd 1 + set env_cmd(M) "berkdb_env -create -lock_max 2500 \ + -log_max 1000000 -home $masterdir \ + $m_txnargs $m_logargs -rep_master -rep_transport \ + \[list 1 replsend\]" + set masterenv [eval $env_cmd(M) $recargs] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set env_cmd(C) "berkdb_env -create -lock_max 2500 \ + $c_txnargs $c_logargs -home $clientdir \ + -rep_client -rep_transport \[list 2 replsend\]" + set clientenv [eval $env_cmd(C) $recargs] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the client online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Set up database and secondary index on master. + puts "\tRep$tnum.a: Set up database with secondary index." + set pname "primary$tnum.db" + set sname "secondary$tnum.db" + + # Open the primary. + set pdb [eval {berkdb_open -create \ + -auto_commit -env} $masterenv $omethod $largs $pname] + error_check_good primary_open [is_valid_db $pdb] TRUE + process_msgs $envlist + + # Open and associate a secondary. + set sdb [eval {berkdb_open -create \ + -auto_commit -env} $masterenv -btree $sname] + error_check_good second_open [is_valid_db $sdb] TRUE + error_check_good \ + db_associate [$pdb associate -auto_commit [callback_n 0] $sdb] 0 + + # Propagate to client. + process_msgs $envlist + + # Put some data in the master. + set did [open $dict] + for { set n 0 } { [gets $did str] != -1 && $n < $niter } { incr n } { + if { [is_record_based $method] == 1 } { + set key [expr $n + 1] + set datum $str + } else { + set key $str + gets $did datum + } + set keys($n) $key + set data($n) [pad_data $method $datum] + + set ret [$pdb put -auto_commit $key [chop_data $method $datum]] + error_check_good put($n) $ret 0 + } + close $did + process_msgs $envlist + + # Check secondaries on master. + set verbose_check_secondaries 1 + puts "\tRep$tnum.b: Check secondaries on master." + check_secondaries $pdb $sdb $niter keys data "Rep$tnum.b" + error_check_good pdb_close [$pdb close] 0 + error_check_good sdb_close [$sdb close] 0 + process_msgs $envlist + + # Get handles on primary and secondary db on client. + set clientpdb [eval {berkdb_open -auto_commit -env} $clientenv $pname] + error_check_good client_pri [is_valid_db $clientpdb] TRUE + set clientsdb [eval {berkdb_open -auto_commit -env} $clientenv $sname] + error_check_good client_sec [is_valid_db $clientsdb] TRUE + error_check_good client_associate \ + [$clientpdb associate -auto_commit [callback_n 0] $clientsdb] 0 + + # Check secondaries on client. + puts "\tRep$tnum.c: Check secondaries on client." + check_secondaries $clientpdb $clientsdb $niter keys data "Rep$tnum.c" + + # Clean up. + error_check_good clientpdb_close [$clientpdb close] 0 + error_check_good clientsdb_close [$clientsdb close] 0 + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + + error_check_good verify \ + [verify_dir $clientdir "\tRep$tnum.e: " 0 0 1] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep028.tcl b/db/test/rep028.tcl new file mode 100644 index 000000000..7f0ea7f34 --- /dev/null +++ b/db/test/rep028.tcl @@ -0,0 +1,209 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep028.tcl,v 11.4 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep028 +# TEST Replication and non-rep env handles. (Also see rep006.) +# TEST +# TEST Open second non-rep env on client, and create a db +# TEST through this handle. Open the db on master and put +# TEST some data. Check whether the non-rep handle keeps +# TEST working. Also check if opening the client database +# TEST in the non-rep env writes log records. +# +proc rep028 { method { niter 100 } { tnum "028" } args } { + global is_hp_test + + # Skip test for HP-UX because we can't open an env twice. + if { $is_hp_test == 1 } { + puts "\tRep$tnum: Skipping for HP-UX." + return + } + if { [is_btree $method] == 0 } { + puts "\tRep$tnum: Skipping for method $method." + return + } + + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + set clopts { "create" "open" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + foreach c $clopts { + puts "Rep$tnum ($method $r $c):\ + Replication and non-rep env handles" + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep028_sub $method $niter $tnum $l $r $c $args + } + } + } +} + +proc rep028_sub { method niter tnum logset recargs clargs largs } { + source ./include.tcl + global testdir + global is_hp_test + + set omethod [convert_method $method] + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open a master. + puts "\tRep$tnum.a: Open replicated envs and non-replicated client env." + repladd 1 + set env_cmd(M) "berkdb_env -create -lock_max 2500 \ + -log_max 1000000 -home $masterdir \ + $m_txnargs $m_logargs -rep_master \ + -rep_transport \[list 1 replsend\]" +# set env_cmd(M) "berkdb_env -create -lock_max 2500 \ +# -log_max 1000000 -home $masterdir \ +# $m_txnargs $m_logargs -rep_master \ +# -verbose {rep on} -errpfx MASTER \ +# -rep_transport \[list 1 replsend\]" + set masterenv [eval $env_cmd(M) $recargs] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set env_cmd(C) "berkdb_env -create $c_txnargs \ + $c_logargs -lock_max 2500 -home $clientdir \ + -rep_transport \[list 2 replsend\]" +# set env_cmd(C) "berkdb_env -create $c_txnargs \ +# $c_logargs -lock_max 2500 -home $clientdir \ +# -verbose {rep on} -errpfx CLIENT \ +# -rep_transport \[list 2 replsend\]" + set clientenv [eval $env_cmd(C) $recargs] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Open 2nd non-replication handle on client env, and create + # a db. Note, by not specifying any subsystem args, we + # do a DB_JOINENV, which is what we want. + set nonrepenv [eval {berkdb_env_noerr -home $clientdir}] + error_check_good nonrepenv [is_valid_env $nonrepenv] TRUE + + set dbname "test$tnum.db" + # + # If we're testing create, verify that if a non-rep client + # creates a database before the master does, then when that + # client goes to use it, it gets DB_DEAD_HANDLE. + # + + if { $clargs == "create" } { + puts "\tRep$tnum.b: Create database non-replicated." + set let c + set nextlet d + set nonrepdb [eval berkdb_open_noerr -auto_commit \ + -create $omethod -env $nonrepenv $dbname] + error_check_good nonrepdb_open [is_valid_db $nonrepdb] TRUE + tclsleep 2 + } else { + set let b + set nextlet c + } + + # + # Now declare the clientenv a client. + # + puts "\tRep$tnum.$let: Declare env as rep client" + error_check_good client [$clientenv rep_start -client] 0 + + + # Bring the client online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Open the same db through the master handle. Put data + # and process messages. + set db [eval berkdb_open \ + -create $omethod -env $masterenv -auto_commit $dbname] + error_check_good db_open [is_valid_db $db] TRUE + eval rep_test $method $masterenv $db $niter 0 0 + process_msgs $envlist + + if { $clargs == "create" } { + # Run db stat on non-rep handle. + puts "\tRep$tnum.$nextlet: Check dead handle." + set stat [catch {$nonrepdb stat} ret] + error_check_good stat $stat 1 + error_check_good dead [is_substr $ret \ + DB_REP_HANDLE_DEAD] 1 + error_check_good close [$nonrepdb close] 0 + set nonrepdb [eval berkdb_open \ + -create $omethod -env $nonrepenv $dbname] + error_check_good nonrepdb_open [is_valid_db $nonrepdb] TRUE + set stat [catch {$nonrepdb stat} ret] + error_check_good stat $stat 0 + error_check_good close [$nonrepdb close] 0 + } + + # + # If we're the open case, we want to just read the existing + # database through a non-rep readonly handle. Doing so + # should not create log records on the client (but has + # in the past). + # + if { $clargs == "open" } { + puts "\tRep$tnum.$nextlet: Open and read database" + set nonrepdb [eval berkdb_open \ + -rdonly -env $nonrepenv $dbname] + error_check_good nonrepdb_open [is_valid_db $nonrepdb] TRUE + # + # If opening wrote log records, we need to process + # some more on the client to notice the end of log + # is now in an unexpected place. + # + eval rep_test $method $masterenv $db $niter 0 0 + process_msgs $envlist + error_check_good close [$nonrepdb close] 0 + + # + # Verify master and client logs are identical + # + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + } + # Clean up. + error_check_good db_close [$db close] 0 + + error_check_good nonrepenv_close [$nonrepenv close] 0 + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep029.tcl b/db/test/rep029.tcl new file mode 100644 index 000000000..f69a8f051 --- /dev/null +++ b/db/test/rep029.tcl @@ -0,0 +1,207 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep029.tcl,v 11.5 2004/10/04 18:15:14 sue Exp $ +# +# TEST rep029 +# TEST Test of internal initialization. +# TEST +# TEST One master, one client. +# TEST Generate several log files. +# TEST Remove old master log files. +# TEST Delete client files and restart client. +# TEST Put one more record to the master. +# +proc rep029 { method { niter 200 } { tnum "029" } args } { + global passwd + global has_crypto + + set args [convert_args $method $args] + set saved_args $args + + # This test needs to set its own pagesize. + set pgindex [lsearch -exact $args "-pagesize"] + if { $pgindex != -1 } { + puts "Rep$tnum: skipping for specific pagesizes" + return + } + + # Run the body of the test with and without recovery, + # and with and without cleaning. + set recopts { "" " -recover " } + set cleanopts { clean noclean } + foreach r $recopts { + foreach c $cleanopts { + set envargs "" + set args $saved_args + puts "Rep$tnum ($method $envargs $c $r $args):\ + Test of internal initialization." + rep029_sub $method $niter $tnum $envargs \ + $r $c $args + + if { $has_crypto == 0 } { + continue + } + + # Run same set of tests with security. + # + append envargs " -encryptaes $passwd " + append args " -encrypt " + puts "Rep$tnum ($method $envargs $r $args):\ + Test of internal initialization." + rep029_sub $method $niter $tnum $envargs \ + $r $c $args + } + } +} + +proc rep029_sub { method niter tnum envargs recargs clean largs } { + global testdir + global passwd + global util_path + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + # Log size is small so we quickly create more than one. + # The documentation says that the log file must be at least + # four times the size of the in-memory log buffer. + set pagesize 4096 + append largs " -pagesize $pagesize " + set log_buf [expr $pagesize * 2] + set log_max [expr $log_buf * 4] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max $envargs \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max $envargs \ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max $envargs \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max $envargs \ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Run rep_test in the master (and update client). + puts "\tRep$tnum.a: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist + + puts "\tRep$tnum.b: Close client." + error_check_good client_close [$clientenv close] 0 + + if { [lsearch $envargs "-encrypta*"] !=-1 } { + set enc "-P $passwd" + } else { + set enc "" + } + + # Run rep_test in the master (don't update client). + puts "\tRep$tnum.c: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + replclear 2 + + puts "\tRep$tnum.d: Run db_archive on master." + set res [eval exec $util_path/db_archive $enc -l -h $masterdir] + error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1 + set res [eval exec $util_path/db_archive $enc -d -h $masterdir] + set res [eval exec $util_path/db_archive $enc -l -h $masterdir] + error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1 + + puts "\tRep$tnum.e: Reopen client ($clean)." + if { $clean == "clean" } { + env_cleanup $clientdir + } + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist 0 NONE err + if { $clean == "noclean" } { + puts "\tRep$tnum.e.1: Trigger log request" + # + # When we don't clean, starting the client doesn't + # trigger any events. We need to generate some log + # records so that the client requests the missing + # logs and that will trigger it. + # + set entries 10 + eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs + process_msgs $envlist 0 NONE err + } + + puts "\tRep$tnum.f: Verify logs and databases" + # Check that master and client logs and dbs are identical. + # Logs first ... + set stat [catch {eval exec $util_path/db_printlog $enc \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog $enc \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + # ... now the databases. + set dbname "test.db" + set db1 [eval {berkdb_open -env $masterenv} $largs {-rdonly $dbname}] + set db2 [eval {berkdb_open -env $clientenv} $largs {-rdonly $dbname}] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + # Add records to the master and update client. + puts "\tRep$tnum.g: Add more records and check again." + set entries 10 + eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs + process_msgs $envlist 0 NONE err + + # Check again that master and client logs and dbs are identical. + set stat [catch {eval exec $util_path/db_printlog $enc \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog $enc \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + set db1 [eval {berkdb_open -env $masterenv} $largs {-rdonly $dbname}] + set db2 [eval {berkdb_open -env $clientenv} $largs {-rdonly $dbname}] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep030.tcl b/db/test/rep030.tcl new file mode 100644 index 000000000..2d6a714a6 --- /dev/null +++ b/db/test/rep030.tcl @@ -0,0 +1,293 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep030.tcl,v 11.9 2004/10/07 18:24:11 carol Exp $ +# +# TEST rep030 +# TEST Test of internal initialization multiple files and pagesizes. +# TEST Hold some databases open on master. +# TEST +# TEST One master, one client. +# TEST Generate several log files. +# TEST Remove old master log files. +# TEST Delete client files and restart client. +# TEST Put one more record to the master. +# +proc rep030 { method { niter 500 } { tnum "030" } args } { + set args [convert_args $method $args] + + # This test needs to set its own pagesize. + set pgindex [lsearch -exact $args "-pagesize"] + if { $pgindex != -1 } { + puts "Rep$tnum: skipping for specific pagesizes" + return + } + + # Run the body of the test with and without recovery, + # and with and without cleaning. + set recopts { " -recover " "" } + set cleanopts { noclean clean } + foreach r $recopts { + foreach c $cleanopts { + puts "Rep$tnum ($method $r $c):\ + Test of internal initialization." + rep030_sub $method $niter $tnum $r $c $args + } + } +} + +proc rep030_sub { method niter tnum recargs clean largs } { + global testdir + global util_path + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + # Log size is small so we quickly create more than one. + # The documentation says that the log file must be at least + # four times the size of the in-memory log buffer. + set maxpg 16384 + set log_buf [expr $maxpg * 2] + set log_max [expr $log_buf * 4] + set cache [expr $maxpg * 32 ] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max \ + -cachesize { 0 $cache 1 } \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max \ +# -cachesize { 0 $cache 1 }\ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max \ + -cachesize { 0 $cache 1 }\ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max \ +# -cachesize { 0 $cache 1 }\ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Run rep_test in the master (and update client). + set startpgsz 512 + set pglist "" + for { set pgsz $startpgsz } { $pgsz <= $maxpg } \ + { set pgsz [expr $pgsz * 2] } { + lappend pglist $pgsz + } + set nfiles [llength $pglist] + puts "\tRep$tnum.a.0: Running rep_test $nfiles times in replicated env." + set dbopen "" + for { set i 0 } { $i < $nfiles } { incr i } { + set mult [expr $i * 10] + set nentries [expr $niter + $mult] + set pagesize [lindex $pglist $i] + set largs " -pagesize $pagesize " + eval rep_test $method $masterenv NULL $nentries $mult $mult \ + 0 $largs + process_msgs $envlist + + # + # Everytime we run 'rep_test' we create 'test.db'. So + # rename it each time through the loop. + # + set old "test.db" + set new "test.$i.db" + error_check_good rename [$masterenv dbrename \ + -auto_commit $old $new] 0 + process_msgs $envlist + # + # We want to keep some databases open so that we test the + # code finding the files in the data dir as well as finding + # them in dbreg list. + # + if { [expr $i % 2 ] == 0 } { + set db [berkdb_open -env $masterenv $new] + error_check_good dbopen.$i [is_valid_db $db] TRUE + lappend dbopen $db + } + } + # + # Set up a few special databases too. We want one with a subdatabase + # and we want an empty database. + # + set testfile "test.db" + if { [is_queue $method] } { + set sub "" + } else { + set sub "subdb" + } + set omethod [convert_method $method] + set largs " -pagesize $maxpg " + set largs [convert_args $method $largs] + set emptyfile "empty.db" + # + # Create/close an empty database. + # + set db [eval {berkdb_open_noerr -env $masterenv -auto_commit -create \ + -mode 0644} $largs $omethod $emptyfile] + error_check_good emptydb [is_valid_db $db] TRUE + error_check_good empty_close [$db close] 0 + # + # Keep this subdb (regular if queue) database open. + # We need it a few times later on. + # + set db [eval {berkdb_open_noerr -env $masterenv -auto_commit -create \ + -mode 0644} $largs $omethod $testfile $sub] + error_check_good subdb [is_valid_db $db] TRUE + eval rep_test $method $masterenv $db $niter 0 0 0 + process_msgs $envlist + + puts "\tRep$tnum.b: Close client." + error_check_good client_close [$clientenv close] 0 + + # + # Run rep_test in the master (don't update client). + # Need to guarantee that we will change log files during + # this run so run with the largest pagesize and double + # the number of entries. + # + puts "\tRep$tnum.c: Running rep_test ( $largs) in replicated env." + set nentries [expr $niter * 2] + eval rep_test $method $masterenv $db $nentries 0 0 0 + replclear 2 + + puts "\tRep$tnum.d: Run db_archive on master." + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1 + set res [eval exec $util_path/db_archive -d -h $masterdir] + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1 + + puts "\tRep$tnum.e: Reopen client ($clean)." + if { $clean == "clean" } { + env_cleanup $clientdir + } + + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist 0 NONE err + if { $clean == "noclean" } { + puts "\tRep$tnum.e.1: Trigger log request" + # + # When we don't clean, starting the client doesn't + # trigger any events. We need to generate some log + # records so that the client requests the missing + # logs and that will trigger it. + # + set entries 100 + eval rep_test $method $masterenv $db $entries $niter 0 0 + process_msgs $envlist 0 NONE err + } + error_check_good subdb_close [$db close] 0 + process_msgs $envlist 0 NONE err + + puts "\tRep$tnum.f: Verify logs and databases" + # Check that master and client logs and dbs are identical. + # Logs first ... + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + # ... now the databases. + set dbname "test.db" + set db1 [eval {berkdb_open -env $masterenv -rdonly} $dbname $sub] + set db2 [eval {berkdb_open -env $clientenv -rdonly} $dbname $sub] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + for { set i 0 } { $i < $nfiles } { incr i } { + set dbname "test.$i.db" + set db1 [berkdb_open -env $masterenv -rdonly $dbname] + set db2 [berkdb_open -env $clientenv -rdonly $dbname] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + } + + # + # Close the database held open on master for initialization. + # + foreach db $dbopen { + error_check_good db_close [$db close] 0 + } + + # Add records to the master and update client. + puts "\tRep$tnum.g: Add more records and check again." + set entries 10 + set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \ + -mode 0644} $largs $omethod $testfile $sub] + error_check_good subdb [is_valid_db $db] TRUE + eval rep_test $method $masterenv $db $entries $niter 0 0 + error_check_good subdb_close [$db close] 0 + process_msgs $envlist 0 NONE err + + # Check again that master and client logs and dbs are identical. + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + set dbname "test.db" + set db1 [eval {berkdb_open -env $masterenv -auto_commit} $dbname $sub] + set db2 [eval {berkdb_open -env $clientenv -auto_commit} $dbname $sub] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + for { set i 0 } { $i < $nfiles } { incr i } { + set dbname "test.$i.db" + set db1 [berkdb_open -env $masterenv -auto_commit $dbname] + set db2 [berkdb_open -env $clientenv -auto_commit $dbname] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + } + + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep031.tcl b/db/test/rep031.tcl new file mode 100644 index 000000000..d2fa2fb68 --- /dev/null +++ b/db/test/rep031.tcl @@ -0,0 +1,211 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep031.tcl,v 11.6 2004/10/04 18:15:14 sue Exp $ +# +# TEST rep031 +# TEST Test of internal initialization and blocked operations. +# TEST +# TEST One master, one client. +# TEST Put one more record to the master. +# TEST Test that internal initialization block log_archive, rename, remove. +# TEST Sleep 30+ seconds. +# TEST Test that we can now log_archive, rename, remove. +# +proc rep031 { method { niter 200 } { tnum "031" } args } { + + set args [convert_args $method $args] + + # This test needs to set its own pagesize. + set pgindex [lsearch -exact $args "-pagesize"] + if { $pgindex != -1 } { + puts "Rep$tnum: skipping for specific pagesizes" + return + } + + # Run the body of the test with and without recovery, + # and with and without cleaning. + set recopts { "" " -recover " } + set cleanopts { clean noclean } + foreach r $recopts { + foreach c $cleanopts { + puts "Rep$tnum ($method $r $c $args):\ + Test of internal initialization." + rep031_sub $method $niter $tnum $r $c $args + } + } +} + +proc rep031_sub { method niter tnum recargs clean largs } { + global testdir + global util_path + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + # Log size is small so we quickly create more than one. + # The documentation says that the log file must be at least + # four times the size of the in-memory log buffer. + set pagesize 4096 + append largs " -pagesize $pagesize " + set log_buf [expr $pagesize * 2] + set log_max [expr $log_buf * 4] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max \ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max \ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Run rep_test in the master (and update client). + puts "\tRep$tnum.a: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist + + puts "\tRep$tnum.b: Close client." + error_check_good client_close [$clientenv close] 0 + + # Run rep_test in the master (don't update client). + puts "\tRep$tnum.c: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + replclear 2 + + puts "\tRep$tnum.d: Run db_archive on master." + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1 + set res [eval exec $util_path/db_archive -d -h $masterdir] + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1 + + puts "\tRep$tnum.e: Reopen client ($clean)." + if { $clean == "clean" } { + env_cleanup $clientdir + } + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist 0 NONE err + if { $clean == "noclean" } { + puts "\tRep$tnum.e.1: Trigger log request" + # + # When we don't clean, starting the client doesn't + # trigger any events. We need to generate some log + # records so that the client requests the missing + # logs and that will trigger it. + # + set entries 10 + eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs + process_msgs $envlist 0 NONE err + } + + # + # We have now forced an internal initialization. Verify it is correct. + # + puts "\tRep$tnum.f: Verify logs and databases" + # Check that master and client logs and dbs are identical. + # Logs first ... + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + # ... now the databases. + set dbname "test.db" + set db1 [eval {berkdb_open -env $masterenv} $largs {-rdonly $dbname}] + set db2 [eval {berkdb_open -env $clientenv} $largs {-rdonly $dbname}] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + # + # Internal initializations disable certain operations on the master for + # 30 seconds after the last init-related message is received + # by the master. Those operations are dbremove, dbrename and + # log_archive (with removal). + # + puts "\tRep$tnum.g: Try to remove and rename the database." + + set old $dbname + set new $dbname.new + set stat [catch {$masterenv dbrename -auto_commit $old $new} ret] + error_check_good rename_fail $stat 1 + error_check_good archive_err [is_substr $ret "invalid"] 1 + set stat [catch {$masterenv dbremove -auto_commit $old} ret] + error_check_good remove_fail $stat 1 + error_check_good archive_err [is_substr $ret "invalid"] 1 + + # + # Need entries big enough to generate additional log files. + # However, db_archive will not return an error, it will + # just retain the log file. + # + set entries 200 + eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs + process_msgs $envlist 0 NONE err + + puts "\tRep$tnum.h: Try to db_archive." + set res [eval exec $util_path/db_archive -l -h $masterdir] + set first [lindex $res 0] + set res [eval exec $util_path/db_archive -d -h $masterdir] + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_bad log.gone [lsearch -exact $res $first] -1 + + # + # Sleep 32 seconds - The timeout is 30 seconds, but we need + # to sleep a bit longer to make sure we cross the timeout. + # + set to 32 + puts "\tRep$tnum.i: Wait $to seconds to timeout" + tclsleep $to + puts "\tRep$tnum.j: Retry blocked operations after wait" + set stat [catch {$masterenv dbrename -auto_commit $old $new} ret] + error_check_good rename_work $stat 0 + set stat [catch {$masterenv dbremove -auto_commit $new} ret] + error_check_good remove_work $stat 0 + process_msgs $envlist 0 NONE err + + set stat [catch {eval exec $util_path/db_archive -d -h $masterdir} ret] + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_good log.gone [lsearch -exact $res $first] -1 + + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep032.tcl b/db/test/rep032.tcl new file mode 100644 index 000000000..77293cdba --- /dev/null +++ b/db/test/rep032.tcl @@ -0,0 +1,144 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep032.tcl,v 1.3 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep032 +# TEST Test of log gap processing. +# TEST +# TEST One master, one clients. +# TEST Run rep_test. +# TEST Run rep_test without sending messages to client. +# TEST Make sure client missing the messages catches up properly. +# +proc rep032 { method { niter 200 } { tnum "032" } args } { + set args [convert_args $method $args] + set logsets [create_logsets 2] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + puts "Rep$tnum ($method $r $args):\ + Test of log gap processing." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client logs are [lindex $l 1]" + rep032_sub $method $niter $tnum $l $r $args + } + } +} + +proc rep032_sub { method niter tnum logset recargs largs } { + global testdir + global util_path + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Run rep_test in the master (and update client). + puts "\tRep$tnum.a: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist + + puts "\tRep$tnum.b: Check client processed everything properly." + set queued [stat_field $clientenv rep_stat "Maximum log records queued"] + set request [stat_field $clientenv rep_stat "Log records requested"] + error_check_good queued $queued 0 + error_check_good request $request 0 + + # Run rep_test in the master (don't update client). + # First run with dropping all client messages via replclear. + puts "\tRep$tnum.c: Running rep_test dropping client msgs." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + replclear 2 + process_msgs $envlist + + # + # Need new operations to force log gap processing to + # request missing pieces. + # + puts "\tRep$tnum.d: Running rep_test again replicated." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist + + puts "\tRep$tnum.e: Check we re-requested and had a backlog." + set queued [stat_field $clientenv rep_stat "Maximum log records queued"] + set request [stat_field $clientenv rep_stat "Log records requested"] + error_check_bad queued $queued 0 + error_check_bad request $request 0 + + puts "\tRep$tnum.f: Verify logs and databases" + # Check that master and client logs and dbs are identical. + # Logs first ... + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + # ... now the databases. + set dbname "test.db" + set db1 [eval {berkdb_open -env $masterenv} $largs {-rdonly $dbname}] + set db2 [eval {berkdb_open -env $clientenv} $largs {-rdonly $dbname}] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep033.tcl b/db/test/rep033.tcl new file mode 100644 index 000000000..3a7f049da --- /dev/null +++ b/db/test/rep033.tcl @@ -0,0 +1,215 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep033.tcl,v 1.5 2004/10/04 18:15:14 sue Exp $ +# +# TEST rep033 +# TEST Test of internal initialization with rename and remove of dbs. +# TEST +# TEST One master, one client. +# TEST Generate several databases. Replicate to client. +# TEST Do some renames and removes, both before and after +# TEST closing the client. +# +proc rep033 { method { niter 200 } { tnum "033" } args } { + + set args [convert_args $method $args] + set omethod [convert_method $method] + + # Run the body of the test with and without recovery, + # and with and without cleaning. + set envargs "" + set recopts { "" " -recover " } + set cleanopts { noclean clean } + set when { before after } + foreach r $recopts { + foreach c $cleanopts { + foreach w $when { + puts "Rep$tnum ($method $envargs $c $r $w $args):\ + Test of internal initialization." + rep033_sub $omethod $niter $tnum $envargs \ + $r $c $w $args + } + } + } +} + +proc rep033_sub { method niter tnum envargs recargs clean when largs } { + global testdir + global util_path + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + # Log size is small so we quickly create more than one. + # The documentation says that the log file must be at least + # four times the size of the in-memory log buffer. + set pagesize 4096 + append largs " -pagesize $pagesize " + set log_buf [expr $pagesize * 2] + set log_max [expr $log_buf * 4] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max $envargs \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max $envargs \ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max $envargs \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max $envargs \ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + puts "\tRep$tnum.a: Create several databases on master." + set oflags " -env $masterenv $method -create -auto_commit " + set dbw [eval {berkdb_open} $oflags $largs w.db] + set dbx [eval {berkdb_open} $oflags $largs x.db] + set dby [eval {berkdb_open} $oflags $largs y.db] + set dbz [eval {berkdb_open} $oflags $largs z.db] + error_check_good dbw_close [$dbw close] 0 + error_check_good dbx_close [$dbx close] 0 + error_check_good dby_close [$dby close] 0 + error_check_good dbz_close [$dbz close] 0 + + # Update client, then close. + process_msgs $envlist + + puts "\tRep$tnum.b: Close client." + error_check_good client_close [$clientenv close] 0 + + # If we're doing the rename/remove operations before adding + # databases A and B, manipulate only the existing files. + if { $when == "before" } { + rep033_rename_remove $masterenv + } + + # Run rep_test in the master (don't update client). + # + # We'd like to control the names of these dbs, so give + # rep_test an existing handle. + # + puts "\tRep$tnum.c: Create new databases. Populate with rep_test." + set dba [eval {berkdb_open} $oflags $largs a.db] + set dbb [eval {berkdb_open} $oflags $largs b.db] + eval rep_test $method $masterenv $dba $niter 0 0 0 $largs + eval rep_test $method $masterenv $dbb $niter 0 0 0 $largs + error_check_good dba_close [$dba close] 0 + error_check_good dbb_close [$dbb close] 0 + + # Throw away messages for client. + replclear 2 + + # If we're doing the rename/remove afterwards, manipulate + # all the files including A and B. + if { $when == "after" } { + rep033_rename_remove $masterenv + } + error_check_good rename_b [$masterenv dbrename b.db x.db] 0 + error_check_good remove_a [$masterenv dbremove a.db] 0 + + puts "\tRep$tnum.d: Run db_archive on master." + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1 + set res [eval exec $util_path/db_archive -d -h $masterdir] + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1 + + puts "\tRep$tnum.e: Reopen client ($clean)." + if { $clean == "clean" } { + env_cleanup $clientdir + } + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist 0 NONE err + if { $clean == "noclean" } { + puts "\tRep$tnum.e.1: Trigger log request" + # + # When we don't clean, starting the client doesn't + # trigger any events. We need to generate some log + # records so that the client requests the missing + # logs and that will trigger it. + # + set entries 10 + eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs + process_msgs $envlist 0 NONE err + } + + puts "\tRep$tnum.f: Verify logs and databases" + # Check that master and client logs and dbs are identical. + # Logs first ... + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + # ... now the databases. X, Y, and C should exist. + set dbnames "x.db w.db c.db" + foreach db $dbnames { + set db1 [eval {berkdb_open -env $masterenv} $largs {-rdonly $db}] + set db2 [eval {berkdb_open -env $clientenv} $largs {-rdonly $db}] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$db $clientdir/$db] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + } + + # A, B, and Z should be gone on client. + error_check_good dba_gone [file exists $clientdir/a.db] 0 + error_check_good dbb_gone [file exists $clientdir/b.db] 0 + # + # Currently we cannot remove z.db on the client because + # we don't own the file namespace. So, we cannot do + # the check below. If that changes, we want the test below. + # error_check_good dbz_gone [file exists $clientdir/z.db] 0 + + # Clean up. + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR +} + +proc rep033_rename_remove { env } { + + # Here we manipulate databases W, X, Y, and Z. + # Remove W. + error_check_good remove_w [$env dbremove w.db] 0 + + # Rename X to W, Y to C (an entirely new name). + error_check_good rename_x [$env dbrename x.db w.db] 0 + error_check_good rename_y [$env dbrename y.db c.db] 0 + + # Remove Z. + error_check_good remove_z [$env dbremove z.db] 0 +} diff --git a/db/test/rep034.tcl b/db/test/rep034.tcl new file mode 100644 index 000000000..853bc75e7 --- /dev/null +++ b/db/test/rep034.tcl @@ -0,0 +1,161 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep034.tcl,v 1.3 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep034 +# TEST Test of client startup synchronization. +# TEST +# TEST One master, two clients. +# TEST Run rep_test. +# TEST Close one client and change master to other client. +# TEST Reopen closed client - enter startup. +# TEST Run rep_test and we should see live messages and startup complete. +# +proc rep034 { method { niter 2 } { tnum "034" } args } { + + set args [convert_args $method $args] + set logsets [create_logsets 3] + + # Run the body of the test with and without recovery. + set recopts { "" "-recover" } + set startup { "stat" "ret" } + foreach r $recopts { + foreach l $logsets { + set logindex [lsearch -exact $l "in-memory"] + if { $r == "-recover" && $logindex != -1 } { + puts "Rep$tnum: Skipping\ + for in-memory logs with -recover." + continue + } + foreach s $startup { + puts "Rep$tnum ($method $r $s $args):\ + Test of startup synchronization detection." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client 0 logs are [lindex $l 1]" + puts "Rep$tnum: Client 1 logs are [lindex $l 2]" + rep034_sub $method $niter $tnum $l $r $s $args + } + } + } +} + +proc rep034_sub { method niter tnum logset recargs stup largs } { + global testdir + global util_path + global startup_done + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + set clientdir2 $testdir/CLIENTDIR2 + + file mkdir $masterdir + file mkdir $clientdir + file mkdir $clientdir2 + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + set c2_logtype [lindex $logset 2] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set c2_logargs [adjust_logargs $c2_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + set c2_txnargs [adjust_txnargs $c2_logtype] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Open a client + repladd 3 + set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $c2_logargs \ + -home $clientdir2 -rep_transport \[list 3 replsend\]" +# set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $c2_logargs \ +# -verbose {rep on} -errpfx CLIENT2 \ +# -home $clientdir2 -rep_transport \[list 3 replsend\]" + set client2env [eval $cl2_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $client2env] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2} {$client2env 3}" + process_msgs $envlist + + # Run rep_test in the master (and update client). + puts "\tRep$tnum.a: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist + + puts "\tRep$tnum.b: Close client and run with new master." + error_check_good client_close [$clientenv close] 0 + set envlist "{$masterenv 1} {$client2env 3}" + + error_check_good master_downgr [$masterenv rep_start -client] 0 + error_check_good cl2_upgr [$client2env rep_start -master] 0 + # + # Just so that we don't get confused who is master/client. + # + set newmaster $client2env + set newclient $masterenv + process_msgs $envlist + + # Run rep_test in the master (don't update client). + # Run with dropping all client messages via replclear. + eval rep_test $method $newmaster NULL $niter 0 0 0 $largs + process_msgs $envlist + replclear 2 + + puts "\tRep$tnum.c: Restart client" + set startup_done 0 + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + set envlist "{$newclient 1} {$clientenv 2} {$newmaster 3}" + process_msgs $envlist + + puts "\tRep$tnum.d: Verify client in startup mode" + set start [stat_field $clientenv rep_stat "Startup complete"] + error_check_good start_incomplete $start 0 + + puts "\tRep$tnum.e: Generate live message" + eval rep_test $method $newmaster NULL $niter 0 0 0 $largs + process_msgs $envlist + + if { $stup == "stat" } { + puts "\tRep$tnum.f: Verify client completed startup via stat" + set start [stat_field $clientenv rep_stat "Startup complete"] + error_check_good start_complete $start 1 + } else { + puts "\tRep$tnum.f: Verify client completed startup via return" + error_check_good start_complete $startup_done 1 + } + + error_check_good masterenv_close [$newclient close] 0 + error_check_good clientenv_close [$clientenv close] 0 + error_check_good client2env_close [$newmaster close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep035.tcl b/db/test/rep035.tcl new file mode 100644 index 000000000..405b8f157 --- /dev/null +++ b/db/test/rep035.tcl @@ -0,0 +1,242 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep035.tcl,v 11.3 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep035 +# TEST Test sync-up recovery in replication. +# TEST +# TEST We need to fork off 3 child tclsh processes to operate +# TEST on Site 3's (client always) home directory: +# TEST Process 1 continually calls lock_detect. +# TEST Process 2 continually calls txn_checkpoint. +# TEST Process 3 continually calls memp_trickle. +# TEST Process 4 continually calls log_archive. +# TEST Sites 1 and 2 will continually swap being master +# TEST (forcing site 3 to continually run sync-up recovery) +# TEST New master performs 1 operation, replicates and downgrades. + +proc rep035 { method { niter 100 } { tnum "035" } args } { + global passwd + global has_crypto + + set saved_args $args + set logsets [create_logsets 3] + + foreach l $logsets { + set envargs "" + set args $saved_args + puts "Rep$tnum: Test sync-up recovery ($method)." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client 0 logs are [lindex $l 1]" + puts "Rep$tnum: Client 1 logs are [lindex $l 2]" + rep035_sub $method $niter $tnum $envargs $l $args + } +} + +proc rep035_sub { method niter tnum envargs logset args } { + source ./include.tcl + global testdir + global encrypt + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir1 $testdir/CLIENTDIR1 + set clientdir2 $testdir/CLIENTDIR2 + + file mkdir $masterdir + file mkdir $clientdir1 + file mkdir $clientdir2 + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + set c2_logtype [lindex $logset 2] + + # In-memory logs require a large log buffer, and cannot + # be used with -txn nosync. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + set c2_logargs [adjust_logargs $c2_logtype] + set m_txnargs [adjust_txnargs $m_logtype] + set c_txnargs [adjust_txnargs $c_logtype] + set c2_txnargs [adjust_txnargs $c2_logtype] + + # Open a master. + repladd 1 + set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs -home $masterdir $m_logargs \ + -errpfx MASTER -errfile /dev/stderr $m_txnargs -rep_master \ + -rep_transport \[list 1 replsend\]" +# set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs -home $masterdir $m_logargs \ +# -errpfx MASTER -errfile /dev/stderr $m_txnargs -rep_master \ +# -verbose {rep on} \ +# -rep_transport \[list 1 replsend\]" + set env1 [eval $env_cmd(M)] + error_check_good env1 [is_valid_env $env1] TRUE + + # Open two clients + repladd 2 + set env_cmd(C1) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs -home $clientdir1 $c_logargs \ + -errfile /dev/stderr -errpfx CLIENT $c_txnargs -rep_client \ + -rep_transport \[list 2 replsend\]" +# set env_cmd(C1) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs -home $clientdir1 $c_logargs \ +# -errfile /dev/stderr -errpfx CLIENT $c_txnargs -rep_client \ +# -verbose {rep on} \ +# -rep_transport \[list 2 replsend\]" + set env2 [eval $env_cmd(C1)] + error_check_good env2 [is_valid_env $env2] TRUE + + # Second client needs lock_detect flag. + repladd 3 + set env_cmd(C2) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs -home $clientdir2 $c2_logargs \ + -errpfx CLIENT2 -errfile /dev/stderr $c2_txnargs -rep_client \ + -lock_detect default -rep_transport \[list 3 replsend\]" +# set env_cmd(C2) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs -home $clientdir2 $c2_logargs \ +# -errpfx CLIENT2 -errfile /dev/stderr $c2_txnargs -rep_client \ +# -verbose {rep on} \ +# -lock_detect default -rep_transport \[list 3 replsend\]" + set env3 [eval $env_cmd(C2)] + error_check_good client_env [is_valid_env $env3] TRUE + + # Bring the client online by processing the startup messages. + set envlist "{$env1 1} {$env2 2} {$env3 3}" + process_msgs $envlist + + # We need to fork off 3 child tclsh processes to operate + # on Site 3's (client always) home directory: + # Process 1 continually calls lock_detect (DB_LOCK_DEFAULT) + # Process 2 continually calls txn_checkpoint (DB_FORCE) + # Process 3 continually calls memp_trickle (large % like 90) + # Process 4 continually calls log_archive. + + puts "\tRep$tnum.a: Fork child process running lock_detect on client2." + set pid1 [exec $tclsh_path $test_path/wrap.tcl \ + rep035script.tcl $testdir/lock_detect.log \ + $clientdir2 detect &] + + puts "\tRep$tnum.b: Fork child process running txn_checkpoint on client2." + set pid2 [exec $tclsh_path $test_path/wrap.tcl \ + rep035script.tcl $testdir/txn_checkpoint.log \ + $clientdir2 checkpoint &] + + puts "\tRep$tnum.c: Fork child process running memp_trickle on client2." + set pid3 [exec $tclsh_path $test_path/wrap.tcl \ + rep035script.tcl $testdir/memp_trickle.log \ + $clientdir2 trickle &] + + puts "\tRep$tnum.d: Fork child process running log_archive on client2." + set pid4 [exec $tclsh_path $test_path/wrap.tcl \ + rep035script.tcl $testdir/log_archive.log \ + $clientdir2 archive &] + + set pidlist [list $pid1 $pid2 $pid3 $pid4] + + # + # Sites 1 and 2 will continually swap being master + # forcing site 3 to continually run sync-up recovery. + # New master performs 1 operation, replicates and downgrades. + # Site 3 will always stay a client. + # + # Set up all the master/client data we're going to need + # to keep track of and swap. Set up the handles for rep_test. + # + + set masterenv $env1 + set mid 1 + set clientenv $env2 + set cid 2 + set testfile "test$tnum.db" + set args [convert_args $method] + set omethod [convert_method $method] + set mdb_cmd "{berkdb_open_noerr} -env $masterenv -auto_commit \ + -create $omethod $args -mode 0644 $testfile" + set cdb_cmd "{berkdb_open_noerr} -env $clientenv -auto_commit \ + $omethod $args -mode 0644 $testfile" + + set masterdb [eval $mdb_cmd] + error_check_good dbopen [is_valid_db $masterdb] TRUE + process_msgs $envlist + + set clientdb [eval $cdb_cmd] + error_check_good dbopen [is_valid_db $clientdb] TRUE + + tclsleep 2 + puts "\tRep$tnum.e: Swap master and client $niter times." + for { set i 0 } { $i < $niter } { incr i } { + + # Do a few ops + eval rep_test $method $masterenv $masterdb 2 $i $i + set envlist "{$masterenv $mid} {$clientenv $cid} {$env3 3}" + process_msgs $envlist + + # Do one op on master and process messages and drop + # to clientenv to force sync-up recovery next time. + eval rep_test $method $masterenv $masterdb 1 $i $i + set envlist "{$masterenv $mid} {$env3 3}" + replclear $cid + process_msgs $envlist + + # Swap all the info we need. + set tmp $masterenv + set masterenv $clientenv + set clientenv $tmp + + set tmp $masterdb + set masterdb $clientdb + set clientdb $tmp + + set tmp $mid + set mid $cid + set cid $tmp + + set tmp $mdb_cmd + set mdb_cmd $cdb_cmd + set cdb_cmd $tmp + + puts "\tRep$tnum.e.$i: Swap: master $mid, client $cid" + error_check_good downgrade [$clientenv rep_start -client] 0 + error_check_good upgrade [$masterenv rep_start -master] 0 + set envlist "{$masterenv $mid} {$clientenv $cid} {$env3 3}" + process_msgs $envlist + + # Close old and reopen since we will get HANDLE_DEAD + # otherwise because we dropped messages to the new master. + error_check_good masterdb [$masterdb close] 0 + error_check_good clientdb [$clientdb close] 0 + + set masterdb [eval $mdb_cmd] + error_check_good dbopen [is_valid_db $masterdb] TRUE + + set clientdb [eval $cdb_cmd] + error_check_good dbopen [is_valid_db $clientdb] TRUE + process_msgs $envlist + } + + # Communicate with child processes by creating a marker file. + set markerenv [berkdb_env -create -home $testdir -txn] + error_check_good markerenv_open [is_valid_env $markerenv] TRUE + set marker [eval "berkdb_open \ + -create -btree -auto_commit -env $markerenv marker.db"] + error_check_good marker_close [$marker close] 0 + + # Script should be able to shut itself down fairly quickly. + watch_procs $pidlist 5 + + error_check_good masterdb [$masterdb close] 0 + error_check_good clientdb [$clientdb close] 0 + error_check_good env1_close [$env1 close] 0 + error_check_good env2_close [$env2 close] 0 + error_check_good env3_close [$env3 close] 0 + error_check_good markerenv_close [$markerenv close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep035script.tcl b/db/test/rep035script.tcl new file mode 100644 index 000000000..1959d9948 --- /dev/null +++ b/db/test/rep035script.tcl @@ -0,0 +1,76 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep035script.tcl,v 11.2 2004/09/22 18:01:06 bostic Exp $ +# +# Rep035 script - continually calls lock_detect, txn_checkpoint, +# or mpool_trickle. +# +# Usage: repscript clientdir apicall +# clientdir: client env directory +# apicall: detect, checkpoint, or trickle. +source ./include.tcl +source $test_path/test.tcl +source $test_path/testutils.tcl +source $test_path/reputils.tcl + +set usage "repscript clientdir apicall" + +# Verify usage +if { $argc != 2 } { + puts stderr "FAIL:[timestamp] Usage: $usage" + exit +} + +# Initialize arguments +set clientdir [ lindex $argv 0 ] +set apicall [ lindex $argv 1 ] + +# Join the client env. +set envid 3 +set cl2_cmd "berkdb_env_noerr -home $clientdir \ + -errfile /dev/stderr -errpfx CLIENT.$apicall \ + -txn -rep_client -rep_transport \[list $envid replsend\]" +# set cl2_cmd "berkdb_env_noerr -home $clientdir \ +# -errfile /dev/stderr -errpfx CLIENT.$apicall \ +# -verbose {rep on} \ +# -txn -rep_client -rep_transport \[list $envid replsend\]" +set clientenv [eval $cl2_cmd] +error_check_good script_c2env_open [is_valid_env $clientenv] TRUE + +# Run chosen call continuously until the parent script creates +# a marker file to indicate completion. +switch -exact -- $apicall { + archive { + while { [file exists $testdir/marker.db] == 0 } { + $clientenv log_archive -arch_remove +# tclsleep 1 + } + } + detect { + while { [file exists $testdir/marker.db] == 0 } { + $clientenv lock_detect default +# tclsleep 1 + } + } + checkpoint { + while { [file exists $testdir/marker.db] == 0 } { + $clientenv txn_checkpoint -force + tclsleep 1 + } + } + trickle { + while { [file exists $testdir/marker.db] == 0 } { + $clientenv mpool_trickle 90 +# tclsleep 1 + } + } + default { + puts "FAIL: unrecognized API call $apicall + } +} + +error_check_good clientenv_close [$clientenv close] 0 + diff --git a/db/test/rep036.tcl b/db/test/rep036.tcl new file mode 100644 index 000000000..2fd1fa00a --- /dev/null +++ b/db/test/rep036.tcl @@ -0,0 +1,172 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep036.tcl,v 11.2 2004/09/22 18:01:06 bostic Exp $ +# +# TEST rep036 +# TEST Multiple master processes writing to the database. +# TEST One process handles all message processing. + +proc rep036 { method { niter 200 } { tnum "036" } args } { + if { [is_btree $method] == 0 } { + puts "Rep$tnum: Skipping for method $method." + return + } + + set saved_args $args + set logsets [create_logsets 3] + + foreach l $logsets { + set envargs "" + set args $saved_args + puts "Rep$tnum: Test sync-up recovery ($method)." + puts "Rep$tnum: Master logs are [lindex $l 0]" + puts "Rep$tnum: Client 0 logs are [lindex $l 1]" + puts "Rep$tnum: Client 1 logs are [lindex $l 2]" + rep036_sub $method $niter $tnum $envargs $l $args + } +} + +proc rep036_sub { method niter tnum envargs logset args } { + source ./include.tcl + global testdir + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + set m_logtype [lindex $logset 0] + set c_logtype [lindex $logset 1] + + # In-memory logs require a large log buffer. + # We always run this test with -txn, so don't adjust txnargs. + set m_logargs [adjust_logargs $m_logtype] + set c_logargs [adjust_logargs $c_logtype] + + # Open a master. + repladd 1 + set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs -home $masterdir $m_logargs \ + -errpfx MASTER -errfile /dev/stderr -txn -rep_master \ + -rep_transport \[list 1 replsend\]" +# set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs -home $masterdir $m_logargs \ +# -errpfx MASTER -errfile /dev/stderr -txn -rep_master \ +# -verbose {rep on} \ +# -rep_transport \[list 1 replsend\]" + set env1 [eval $env_cmd(M)] + error_check_good env1 [is_valid_env $env1] TRUE + + # Open a client + repladd 2 + set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \ + -log_max 1000000 $envargs -home $clientdir $c_logargs \ + -errfile /dev/stderr -errpfx CLIENT -txn -rep_client \ + -rep_transport \[list 2 replsend\]" +# set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \ +# -log_max 1000000 $envargs -home $clientdir $c_logargs \ +# -errfile /dev/stderr -errpfx CLIENT -txn -rep_client \ +# -verbose {rep on} \ +# -rep_transport \[list 2 replsend\]" + set env2 [eval $env_cmd(C)] + error_check_good env2 [is_valid_env $env2] TRUE + + # Bring the client online by processing the startup messages. + set envlist "{$env1 1} {$env2 2}" + process_msgs $envlist + + # Set up master database. + set testfile "rep$tnum.db" + set omethod [convert_method $method] + set mdb [eval {berkdb_open_noerr} -env $env1 -auto_commit \ + -create -mode 0644 $omethod $testfile] + error_check_good dbopen [is_valid_db $mdb] TRUE + + # Put a record in the master database. + set key MAIN_KEY + set string MAIN_STRING + set t [$env1 txn] + error_check_good txn [is_valid_txn $t $env1] TRUE + set txn "-txn $t" + + set ret [eval \ + {$mdb put} $txn {$key [chop_data $method $string]}] + error_check_good mdb_put $ret 0 + error_check_good txn_commit [$t commit] 0 + + # Fork two writers that write to the master. + set pidlist {} + foreach writer { 1 2 } { + puts "\tRep$tnum.a: Fork child process WRITER$writer." + set pid [exec $tclsh_path $test_path/wrap.tcl \ + rep036script.tcl $testdir/rep036script.log.$writer \ + $masterdir $writer $niter btree &] + lappend pidlist $pid + } + + # Run the main loop until the writers signal completion. + set i 0 + while { [file exists $testdir/1.db] == 0 && \ + [file exists $testdir/2.db] == 0 } { + set string MAIN_STRING.$i + + set t [$env1 txn] + error_check_good txn [is_valid_txn $t $env1] TRUE + set txn "-txn $t" + set ret [eval \ + {$mdb put} $txn {$key [chop_data $method $string]}] + error_check_good mdb_put $ret 0 + error_check_good txn_commit [$t commit] 0 + + if { [expr $i % 10] == 0 } { + puts "\tRep036.c: Wrote MAIN record $i" + } + incr i + + # Process messages. + process_msgs $envlist + + # Wait a while, then do it all again. + tclsleep 1 + } + + + # Confirm that the writers are done and process the messages + # once more to be sure the client is caught up. + watch_procs $pidlist 1 + process_msgs $envlist + + puts "\tRep$tnum.c: Verify logs and databases" + # Check that master and client logs and dbs are identical. + # Logs first ... + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good mdb [$mdb close] 0 + error_check_good stat_cprlog $stat 0 +# error_check_good log_cmp \ +# [filecmp $masterdir/prlog $clientdir/prlog] 0 + + # ... now the databases. + set db1 [eval {berkdb_open -env $env1 -rdonly $testfile}] + set db2 [eval {berkdb_open -env $env2 -rdonly $testfile}] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$testfile $clientdir/$testfile] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + error_check_good env1_close [$env1 close] 0 + error_check_good env2_close [$env2 close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/rep036script.tcl b/db/test/rep036script.tcl new file mode 100644 index 000000000..85d612677 --- /dev/null +++ b/db/test/rep036script.tcl @@ -0,0 +1,107 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep036script.tcl,v 11.2 2004/09/22 18:01:06 bostic Exp $ +# +# Rep036 script - create additional writers in master env. +# +# Usage: masterdir writerid +# masterdir: Directory of replication master +# writerid: i.d. number for writer +source ./include.tcl +source $test_path/test.tcl +source $test_path/testutils.tcl +source $test_path/reputils.tcl + +global rand_init +set usage "repscript masterdir writerid nentries method" + +# Verify usage +if { $argc != 4 } { + puts stderr "FAIL:[timestamp] Usage: $usage" + exit +} + +# Initialize arguments +set masterdir [ lindex $argv 0 ] +set writerid [ lindex $argv 1 ] +set nentries [ lindex $argv 2 ] +set method [ lindex $argv 3 ] + +# Join the queue env. We assume the rep test convention of +# placing the messages in $testdir/MSGQUEUEDIR. +set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR] +error_check_good script_qenv_open [is_valid_env $queueenv] TRUE +# We need to set up our own machid. +repladd 1 +repladd 2 + +# Join the master env. +set envid 1 +set env_cmd "berkdb_env_noerr -home $masterdir \ + -errfile /dev/stderr -errpfx WRITER.$writerid \ + -txn -rep_master -rep_transport \[list $envid replsend\]" +# set env_cmd "berkdb_env_noerr -home $masterdir \ +# -errfile /dev/stderr -errpfx WRITER.$writerid \ +# -verbose {rep on} \ +# -txn -rep_master -rep_transport \[list $envid replsend\]" +set masterenv [eval $env_cmd] +error_check_good script_env_open [is_valid_env $masterenv] TRUE + +# Open database. +set testfile "rep036.db" +set omethod [convert_method $method] +set mdb [eval {berkdb_open_noerr} -env $masterenv -auto_commit \ + -create $omethod $testfile] +error_check_good dbopen [is_valid_db $mdb] TRUE + +# Write records to the database. +set did [open $dict] +set count 0 +set dictsize 10000 +berkdb srand $rand_init +while { $count < $nentries } { + # + # If nentries exceeds the dictionary size, close + # and reopen to start from the beginning again. + if { [expr [expr $count + 1] % $dictsize] == 0 } { + close $did + set did [open $dict] + } + + gets $did str + set key WRITER.$writerid.$str + set str [reverse $str] + + set t [$masterenv txn] + error_check_good txn [is_valid_txn $t $masterenv] TRUE + set txn "-txn $t" + set ret [eval \ + {$mdb put} $txn {$key [chop_data $method $str]}] + error_check_good put $ret 0 + error_check_good txn [$t commit] 0 + + if { [expr $count % 100] == 1 } { + puts "Wrote WRITER.$writerid record $count" + set sleep [berkdb random_int 0 10] + puts "Writer.$writerid sleeping $sleep seconds" + tclsleep $sleep + } + incr count +} +close $did + +# Clean up. +error_check_good mdb_close [$mdb close] 0 +error_check_good masterenv_close [$masterenv close] 0 +replclose $testdir/MSGQUEUEDIR + +# Communicate with parent by creating a marker file. +set markerenv [berkdb_env -create -home $testdir -txn] +error_check_good markerenv_open [is_valid_env $markerenv] TRUE +set marker [eval "berkdb_open \ + -create -btree -auto_commit -env $markerenv $writerid.db"] +error_check_good marker_close [$marker close] 0 +error_check_good markerenv_close [$markerenv close] 0 diff --git a/db/test/rep037.tcl b/db/test/rep037.tcl new file mode 100644 index 000000000..6d3ab4649 --- /dev/null +++ b/db/test/rep037.tcl @@ -0,0 +1,168 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: rep037.tcl,v 11.2 2004/10/14 14:55:55 sue Exp $ +# +# TEST rep037 +# TEST Test of internal initialization and page throttling. +# TEST +# TEST One master, one client, force page throttling. +# TEST Generate several log files. +# TEST Remove old master log files. +# TEST Delete client files and restart client. +# TEST Put one more record to the master. +# TEST Verify page throttling occurred. +# +proc rep037 { method { niter 1500 } { tnum "037" } args } { + set args [convert_args $method $args] + set saved_args $args + + # This test needs to set its own pagesize. + set pgindex [lsearch -exact $args "-pagesize"] + if { $pgindex != -1 } { + puts "Rep$tnum: skipping for specific pagesizes" + return + } + + # Run the body of the test with and without recovery, + # and with and without cleaning. + set recopts { "" " -recover " } + set cleanopts { clean noclean } + foreach r $recopts { + foreach c $cleanopts { + set args $saved_args + puts "Rep$tnum ($method $c $r $args):\ + Test of internal initialization - page throttling." + rep037_sub $method $niter $tnum $r $c $args + } + } +} + +proc rep037_sub { method niter tnum recargs clean largs } { + global testdir + global util_path + + env_cleanup $testdir + + replsetup $testdir/MSGQUEUEDIR + + set masterdir $testdir/MASTERDIR + set clientdir $testdir/CLIENTDIR + + file mkdir $masterdir + file mkdir $clientdir + + # Log size is small so we quickly create more than one. + # The documentation says that the log file must be at least + # four times the size of the in-memory log buffer. + set pagesize 4096 + append largs " -pagesize $pagesize " + set log_buf [expr $pagesize * 2] + set log_max [expr $log_buf * 4] + + # Open a master. + repladd 1 + set ma_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max \ + -home $masterdir -rep_transport \[list 1 replsend\]" +# set ma_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max \ +# -verbose {rep on} -errpfx MASTER \ +# -home $masterdir -rep_transport \[list 1 replsend\]" + set masterenv [eval $ma_envcmd $recargs -rep_master] + $masterenv rep_limit 0 [expr 32 * 1024] + error_check_good master_env [is_valid_env $masterenv] TRUE + + # Open a client + repladd 2 + set cl_envcmd "berkdb_env_noerr -create -txn nosync \ + -log_buffer $log_buf -log_max $log_max \ + -home $clientdir -rep_transport \[list 2 replsend\]" +# set cl_envcmd "berkdb_env_noerr -create -txn nosync \ +# -log_buffer $log_buf -log_max $log_max \ +# -verbose {rep on} -errpfx CLIENT \ +# -home $clientdir -rep_transport \[list 2 replsend\]" + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + + # Bring the clients online by processing the startup messages. + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist + + # Run rep_test in the master (and update client). + puts "\tRep$tnum.a: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + process_msgs $envlist + + puts "\tRep$tnum.b: Close client." + error_check_good client_close [$clientenv close] 0 + + # Run rep_test in the master (don't update client). + puts "\tRep$tnum.c: Running rep_test in replicated env." + eval rep_test $method $masterenv NULL $niter 0 0 0 $largs + replclear 2 + + puts "\tRep$tnum.d: Run db_archive on master." + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1 + set res [eval exec $util_path/db_archive -d -h $masterdir] + set res [eval exec $util_path/db_archive -l -h $masterdir] + error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1 + + puts "\tRep$tnum.e: Reopen client ($clean)." + if { $clean == "clean" } { + env_cleanup $clientdir + } + set clientenv [eval $cl_envcmd $recargs -rep_client] + error_check_good client_env [is_valid_env $clientenv] TRUE + set envlist "{$masterenv 1} {$clientenv 2}" + process_msgs $envlist 0 NONE err + if { $clean == "noclean" } { + puts "\tRep$tnum.e.1: Trigger log request" + # + # When we don't clean, starting the client doesn't + # trigger any events. We need to generate some log + # records so that the client requests the missing + # logs and that will trigger it. + # + set entries 10 + eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs + process_msgs $envlist 0 NONE err + } + + puts "\tRep$tnum.f: Verify logs and databases" + # Check that master and client logs and dbs are identical. + # Logs first ... + set stat [catch {eval exec $util_path/db_printlog \ + -h $masterdir > $masterdir/prlog} result] + error_check_good stat_mprlog $stat 0 + set stat [catch {eval exec $util_path/db_printlog \ + -h $clientdir > $clientdir/prlog} result] + error_check_good stat_cprlog $stat 0 + error_check_good log_cmp \ + [filecmp $masterdir/prlog $clientdir/prlog] 0 + + # ... now the databases. + set dbname "test.db" + set db1 [eval {berkdb_open -env $masterenv} $largs {-rdonly $dbname}] + set db2 [eval {berkdb_open -env $clientenv} $largs {-rdonly $dbname}] + + error_check_good comparedbs [db_compare \ + $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0 + error_check_good db1_close [$db1 close] 0 + error_check_good db2_close [$db2 close] 0 + + # + puts "\tRep$tnum.g: Verify throttling." + if { $niter > 1000 } { + set nthrottles \ + [stat_field $masterenv rep_stat "Transmission limited"] + error_check_bad nthrottles $nthrottles -1 + error_check_bad nthrottles $nthrottles 0 + } + error_check_good masterenv_close [$masterenv close] 0 + error_check_good clientenv_close [$clientenv close] 0 + replclose $testdir/MSGQUEUEDIR +} diff --git a/db/test/reputils.tcl b/db/test/reputils.tcl index 8406dbe21..2bff9623b 100644 --- a/db/test/reputils.tcl +++ b/db/test/reputils.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: reputils.tcl,v 11.58 2003/10/31 20:15:43 sandstro Exp $ +# $Id: reputils.tcl,v 11.83 2004/09/22 18:01:06 bostic Exp $ # # Replication testing utilities @@ -33,14 +33,78 @@ global queueenv # messages. global queuedbs global machids +global perm_response_list +set perm_response_list {} global perm_sent_list set perm_sent_list {} -global perm_rec_list -set perm_rec_list {} global elect_timeout set elect_timeout 50000000 set drop 0 +# The default for replication testing is for logs to be on-disk. +# Mixed-mode log testing provides a mixture of on-disk and +# in-memory logging, or even all in-memory. When testing on a +# 1-master/1-client test, we try all four options. On a test +# with more clients, we still try four options, randomly +# selecting whether the later clients are on-disk or in-memory. +# + +global mixed_mode_logging +set mixed_mode_logging 0 + +proc create_logsets { nsites } { + global mixed_mode_logging + global logsets + global rand_init + + error_check_good set_random_seed [berkdb srand $rand_init] 0 + if { $mixed_mode_logging == 0 } { + set loglist {} + for { set i 0 } { $i < $nsites } { incr i } { + lappend loglist "on-disk" + } + set logsets [list $loglist] + } + if { $mixed_mode_logging == 1 } { + set set1 {on-disk on-disk} + set set2 {on-disk in-memory} + set set3 {in-memory on-disk} + set set4 {in-memory in-memory} + + # Start with nsites at 2 since we already set up + # the master and first client. + for { set i 2 } { $i < $nsites } { incr i } { + foreach set { set1 set2 set3 set4 } { + if { [berkdb random_int 0 1] == 0 } { + lappend $set "on-disk" + } else { + lappend $set "in-memory" + } + } + } + set logsets [list $set1 $set2 $set3 $set4] + } + return $logsets +} + +proc run_mixedmode { method test {display 0} {run 1} \ + {outfile stdout} {largs ""} } { + global mixed_mode_logging + set mixed_mode_logging 1 + + set prefix [string range $test 0 2] + if { $prefix != "rep" } { + puts "Skipping mixed-mode log testing for non-rep test." + set mixed_mode_logging 0 + return + } + + eval run_method $method $test $display $run $outfile $largs + + # Reset to default values after run. + set mixed_mode_logging 0 +} + # Create the directory structure for replication testing. # Open the master and client environments; store these in the global repenv # Return the master's environment: "-env masterenv" @@ -78,10 +142,16 @@ proc repl_envsetup { envargs largs test {nclients 1} {droppct 0} { oob 0 } } { # as keys/data can run. # set logmax [expr 3 * 1024 * 1024] - set masterenv [eval {berkdb_env -create -log_max $logmax} $envargs \ + set ma_cmd "berkdb_env -create -log_max $logmax $envargs \ -lock_max 10000 \ - {-home $masterdir -txn nosync -rep_master -rep_transport \ - [list 1 replsend]}] + -home $masterdir -txn nosync -rep_master -rep_transport \ + \[list 1 replsend\]" +# set ma_cmd "berkdb_env_noerr -create -log_max $logmax $envargs \ +# -lock_max 10000 -verbose {rep on} -errfile /dev/stderr \ +# -errpfx $masterdir \ +# -home $masterdir -txn nosync -rep_master -rep_transport \ +# \[list 1 replsend\]" + set masterenv [eval $ma_cmd] error_check_good master_env [is_valid_env $masterenv] TRUE set repenv(master) $masterenv @@ -89,10 +159,16 @@ proc repl_envsetup { envargs largs test {nclients 1} {droppct 0} { oob 0 } } { for { set i 0 } { $i < $nclients } { incr i } { set envid [expr $i + 2] repladd $envid - set clientenv [eval {berkdb_env -create} $envargs -txn nosync \ - {-cachesize { 0 10000000 0 }} -lock_max 10000 \ - { -home $clientdir($i) -rep_client -rep_transport \ - [list $envid replsend]}] + set cl_cmd "berkdb_env -create $envargs -txn nosync \ + -cachesize { 0 10000000 0 } -lock_max 10000 \ + -home $clientdir($i) -rep_client -rep_transport \ + \[list $envid replsend\]" +# set cl_cmd "berkdb_env_noerr -create $envargs -txn nosync \ +# -cachesize { 0 10000000 0 } -lock_max 10000 \ +# -home $clientdir($i) -rep_client -rep_transport \ +# \[list $envid replsend\] -verbose {rep on} \ +# -errfile /dev/stderr -errpfx $clientdir($i)" + set clientenv [eval $cl_cmd] error_check_good client_env [is_valid_env $clientenv] TRUE set repenv($i) $clientenv } @@ -158,18 +234,17 @@ proc repl_envprocq { test { nclients 1 } { oob 0 }} { # number of messages that were enqueued. for { set i 0 } { $i < $nclients } { incr i } { set clientenv $repenv($i) - set stats [$clientenv rep_stat] - set queued [getstats $stats \ - {Total log records queued}] + set queued [stat_field $clientenv rep_stat \ + "Total log records queued"] error_check_bad queued_stats \ $queued -1 - set requested [getstats $stats \ - {Log records requested}] + set requested [stat_field $clientenv rep_stat \ + "Log records requested"] error_check_bad requested_stats \ $requested -1 if { $queued != 0 && $do_check != 0 } { error_check_good num_requested \ - [expr $requested < $queued] 1 + [expr $requested <= $queued] 1 } $clientenv rep_request 1 1 @@ -385,8 +460,7 @@ proc replsend { control rec fromid toid flags lsn } { global queuedbs queueenv machids global drop drop_msg global perm_sent_list - - if { $flags == "perm" } { + if { [llength $perm_sent_list] != 0 && $flags == "perm" } { # puts "replsend sent perm message, LSN $lsn" lappend perm_sent_list $lsn } @@ -429,7 +503,7 @@ proc replsend { control rec fromid toid flags lsn } { return 0 } -# Nuke all the pending messages for a particular site. +# Discard all the pending messages for a particular site. proc replclear { machid } { global queuedbs queueenv @@ -463,6 +537,23 @@ proc repladd { machid } { lappend machids $machid } +# Acquire a handle to work with an existing machine's replication +# queue. This is for situations where more than one process +# is working with a message queue. In general, having more than one +# process handle the queue is wrong. However, in order to test some +# things, we need two processes (since Tcl doesn't support threads). We +# go to great pain in the test harness to make sure this works, but we +# don't let customers do it. +proc repljoin { machid } { + global queueenv queuedbs machids + + set queuedbs($machid) [berkdb open -auto_commit \ + -env $queueenv repqueue$machid.db] + error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE + + lappend machids $machid +} + # Process a queue of messages, skipping every "skip_interval" entry. # We traverse the entire queue, but since we skip some messages, we # may end up leaving things in the queue, which should get picked up @@ -470,9 +561,8 @@ proc repladd { machid } { proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ { newmasterp NONE } { dupmasterp NONE } { errp NONE } } { global queuedbs queueenv errorCode - global perm_response - set perm_response "" - global perm_rec_list + global perm_response_list + global startup_done # hold_electp is a call-by-reference variable which lets our caller # know we need to hold an election. @@ -505,6 +595,13 @@ proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ set nproced 0 set txn [$queueenv txn] + + # If we are running separate processes, the second process has + # to join an existing message queue. + if { [info exists queuedbs($machid)] == 0 } { + repljoin $machid + } + set dbc [$queuedbs($machid) cursor -txn $txn] error_check_good process_dbc($machid) \ @@ -512,8 +609,9 @@ proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ for { set dbt [$dbc get -first] } \ { [llength $dbt] != 0 } \ - { set dbt [$dbc get -next] } { + { } { set data [lindex [lindex $dbt 0] 1] + set recno [lindex [lindex $dbt 0] 0] # If skip_interval is nonzero, we want to process messages # out of order. We do this in a simple but slimy way-- @@ -533,37 +631,44 @@ proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ if { $skip_interval != 0 } { if { $nproced % $skip_interval == 1 } { incr nproced + set dbt [$dbc get -next] continue } } + # We need to remove the current message from the queue, + # because we're about to end the transaction and someone + # else processing messages might come in and reprocess this + # message which would be bad. + error_check_good queue_remove [$dbc del] 0 + # We have to play an ugly cursor game here: we currently # hold a lock on the page of messages, but rep_process_message # might need to lock the page with a different cursor in - # order to send a response. So save our recno, close + # order to send a response. So save the next recno, close # the cursor, and then reopen and reset the cursor. - set recno [lindex [lindex $dbt 0] 0] + # If someone else is processing this queue, our entry might + # have gone away, and we need to be able to handle that. + error_check_good dbc_process_close [$dbc close] 0 error_check_good txn_commit [$txn commit] 0 + set ret [catch {$dbenv rep_process_message \ [lindex $data 2] [lindex $data 0] [lindex $data 1]} res] - set txn [$queueenv txn] - set dbc [$queuedbs($machid) cursor -txn $txn] - set dbt [$dbc get -set $recno] # Save all ISPERM and NOTPERM responses so we can compare their - # LSNs to the LSN in the log. The variable perm_response holds - # the response. + # LSNs to the LSN in the log. The variable perm_response_list + # holds the entire response so we can extract responses and + # LSNs as needed. # - if { [is_substr $res ISPERM] || [is_substr $res NOTPERM] } { - set perm_response $res - set lsn [lindex $perm_response 1] - lappend perm_rec_list $lsn + if { [llength $perm_response_list] != 0 && \ + ([is_substr $res ISPERM] || [is_substr $res NOTPERM]) } { + lappend perm_response_list $res } if { $ret != 0 } { if { [string compare $errp NONE] != 0 } { - set errorp $res + set errorp "$dbenv $machid $res" } else { error "FAIL:[timestamp]\ rep_process_message returned $res" @@ -572,7 +677,15 @@ proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ incr nproced - $dbc del + # Now, re-establish the cursor position. We fetch the + # current record number. If there is something there, + # that is the record for the next iteration. If there + # is nothing there, then we've consumed the last item + # in the queue. + + set txn [$queueenv txn] + set dbc [$queuedbs($machid) cursor -txn $txn] + set dbt [$dbc get -set_range $recno] if { $ret == 0 } { set rettype [lindex $res 0] @@ -580,11 +693,14 @@ proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ # # Do nothing for 0 and NEWSITE # + if { [is_substr $rettype STARTUPDONE] } { + set startup_done 1 + } if { [is_substr $rettype HOLDELECTION] } { set hold_elect 1 } if { [is_substr $rettype DUPMASTER] } { - set dupmaster 1 + set dupmaster "1 $dbenv $machid" } if { [is_substr $rettype NOTPERM] || \ [is_substr $rettype ISPERM] } { @@ -599,7 +715,7 @@ proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ } } - if { $errorp == 1 } { + if { $errorp != 0 } { # Break also on an error, caller wants to handle it. break } @@ -608,7 +724,7 @@ proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ break } if { $dupmaster == 1 } { - # Break also on a HOLDELECTION, for the same reason. + # Break also on a DUPMASTER, for the same reason. break } @@ -658,7 +774,8 @@ global elections_in_progress set elect_serial 0 # Start an election in a sub-process. -proc start_election { pfx qdir envstring nsites pri timeout {err "none"}} { +proc start_election \ + { pfx qdir envstring nsites nvotes pri timeout {err "none"} {crash 0}} { source ./include.tcl global elect_serial elect_timeout elections_in_progress machids @@ -677,12 +794,12 @@ proc start_election { pfx qdir envstring nsites pri timeout {err "none"}} { puts $oid "replsetup $qdir" foreach i $machids { puts $oid "repladd $i" } puts $oid "set env_cmd \{$envstring\}" - puts $oid "set dbenv \[eval \$env_cmd -errfile \ - $testdir/ELECTION_ERRFILE.$elect_serial -errpfx $pfx \]" # puts $oid "set dbenv \[eval \$env_cmd -errfile \ -# /dev/stdout -errpfx $pfx \]" +# $testdir/ELECTION_ERRFILE.$elect_serial -errpfx $pfx \]" + puts $oid "set dbenv \[eval \$env_cmd -errfile \ + /dev/stdout -errpfx $pfx \]" puts $oid "\$dbenv test abort $err" - puts $oid "set res \[catch \{\$dbenv rep_elect $nsites $pri \ + puts $oid "set res \[catch \{\$dbenv rep_elect $nsites $nvotes $pri \ $elect_timeout\} ret\]" puts $oid "set r \[open \$testdir/ELECTION_RESULT.$elect_serial w\]" puts $oid "if \{\$res == 0 \} \{" @@ -690,10 +807,13 @@ proc start_election { pfx qdir envstring nsites pri timeout {err "none"}} { puts $oid "\} else \{" puts $oid "puts \$r \"ERROR \$ret\"" puts $oid "\}" - if { $err != "none" } { + # + # This loop calls rep_elect a second time with the error cleared. + # We don't want to do that if we are simulating a crash. + if { $err != "none" && $crash != 1 } { puts $oid "\$dbenv test abort none" - puts $oid "set res \[catch \{\$dbenv rep_elect $nsites $pri \ - $elect_timeout\} ret\]" + puts $oid "set res \[catch \{\$dbenv rep_elect $nsites \ + $nvotes $pri $elect_timeout\} ret\]" puts $oid "if \{\$res == 0 \} \{" puts $oid "puts \$r \"NEWMASTER \$ret\"" puts $oid "\} else \{" @@ -703,8 +823,8 @@ proc start_election { pfx qdir envstring nsites pri timeout {err "none"}} { puts $oid "close \$r" close $oid - set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w] -# set t [open "|$tclsh_path" w] +# set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w] + set t [open "|$tclsh_path" w] puts $t "source ./include.tcl" puts $t "source $testdir/ELECTION_SOURCE.$elect_serial" flush $t @@ -713,6 +833,250 @@ proc start_election { pfx qdir envstring nsites pri timeout {err "none"}} { return $elect_serial } +proc setpriority { priority nclients winner {start 0} } { + upvar $priority pri + + for { set i $start } { $i < [expr $nclients + $start] } { incr i } { + if { $i == $winner } { + set pri($i) 100 + } else { + set pri($i) 10 + } + } +} + +# run_election has the following arguments: +# Arrays: +# ecmd Array of the commands for setting up each client env. +# cenv Array of the handles to each client env. +# errcmd Array of where errors should be forced. +# priority Array of the priorities of each client env. +# crash If an error is forced, should we crash or recover? +# The upvar command takes care of making these arrays available to +# the procedure. +# +# Ordinary variables: +# qdir Directory where the message queue is located. +# msg Message prefixed to the output. +# elector This client calls the first election. +# nsites Number of sites in the replication group. +# nvotes Number of votes required to win the election. +# nclients Number of clients participating in the election. +# win The expected winner of the election. +# reopen Should the new master (i.e. winner) be closed +# and reopened as a client? +# dbname Name of the underlying database. Defaults to +# the name of the db created by rep_test. +# +proc run_election { ecmd celist errcmd priority crsh qdir msg elector \ + nsites nvotes nclients win {reopen 0} {dbname "test.db"} } { + global elect_timeout elect_serial + global is_windows_test + global rand_init + upvar $ecmd env_cmd + upvar $celist cenvlist + upvar $errcmd err_cmd + upvar $priority pri + upvar $crsh crash + + set elect_timeout 5000000 + + foreach pair $cenvlist { + set id [lindex $pair 1] + set i [expr $id - 2] + set elect_pipe($i) INVALID + replclear $id + } + + # + # XXX + # We need to somehow check for the warning if nvotes is not + # a majority. Problem is that warning will go into the child + # process' output. Furthermore, we need a mechanism that can + # handle both sending the output to a file and sending it to + # /dev/stderr when debugging without failing the + # error_check_good check. + # + puts "\t\t$msg.1: Election with nsites=$nsites,\ + nvotes=$nvotes, nclients=$nclients" + puts "\t\t$msg.2: First elector is $elector,\ + expected winner is $win (eid [expr $win + 2])" + incr elect_serial + set pfx "CHILD$elector.$elect_serial" + # Windows requires a longer timeout. + if { $is_windows_test == 1 } { + set elect_timeout [expr $elect_timeout * 3] + } + set elect_pipe($elector) [start_election \ + $pfx $qdir $env_cmd($elector) $nsites $nvotes $pri($elector) \ + $elect_timeout $err_cmd($elector) $crash($elector)] + + tclsleep 2 + + set got_newmaster 0 + set tries [expr [expr $elect_timeout * 4] / 1000000] + + # If we're simulating a crash, skip the while loop and + # just give the initial election a chance to complete. + set crashing 0 + for { set i 0 } { $i < $nclients } { incr i } { + if { $crash($i) == 1 } { + set crashing 1 + } + } + + if { $crashing == 1 } { + tclsleep 10 + } else { + while { 1 } { + set nproced 0 + set he 0 + set nm 0 + set nm2 0 + + foreach pair $cenvlist { + set he 0 + set envid [lindex $pair 1] + set i [expr $envid - 2] + set clientenv($i) [lindex $pair 0] + set child_done [check_election $elect_pipe($i) nm2] + if { $got_newmaster == 0 && $nm2 != 0 } { + error_check_good newmaster_is_master2 $nm2 \ + [expr $win + 2] + set got_newmaster $nm2 + + # If this env is the new master, it needs to + # configure itself as such--this is a different + # env handle from the one that performed the + # election. + if { $nm2 == $envid } { + error_check_good make_master($i) \ + [$clientenv($i) rep_start -master] \ + 0 + } + } + incr nproced \ + [replprocessqueue $clientenv($i) $envid 0 he nm] +# puts "Tries $tries: Processed queue for client $i, $nproced msgs he $he nm $nm nm2 $nm2" + if { $he == 1 } { + # + # Only close down the election pipe if the + # previously created one is done and + # waiting for new commands, otherwise + # if we try to close it while it's in + # progress we hang this main tclsh. + # + if { $elect_pipe($i) != "INVALID" && \ + $child_done == 1 } { + close_election $elect_pipe($i) + set elect_pipe($i) "INVALID" + } +# puts "Starting election on client $i" + if { $elect_pipe($i) == "INVALID" } { + incr elect_serial + set pfx "CHILD$i.$elect_serial" + set elect_pipe($i) [start_election \ + $pfx $qdir \ + $env_cmd($i) $nsites \ + $nvotes $pri($i) $elect_timeout] + set got_hold_elect($i) 1 + } + } + if { $nm != 0 } { + error_check_good newmaster_is_master $nm \ + [expr $win + 2] + set got_newmaster $nm + + # If this env is the new master, it needs to + # configure itself as such--this is a different + # env handle from the one that performed the + # election. + if { $nm == $envid } { + error_check_good make_master($i) \ + [$clientenv($i) rep_start -master] \ + 0 + # Occasionally force new log records + # to be written. + set write [berkdb random_int 1 10] + if { $write == 1 } { + set db [berkdb_open -env \ + $clientenv($i) \ + -auto_commit $dbname] + error_check_good dbopen \ + [is_valid_db $db] TRUE + error_check_good dbclose \ + [$db close] 0 + } + } + } + } + + # We need to wait around to make doubly sure that the + # election has finished... + if { $nproced == 0 } { + incr tries -1 + if { $tries == 0 } { + break + } else { + tclsleep 1 + } + } else { + set tries $tries + } + } + + # Verify that expected winner is actually the winner. + error_check_good "client $win wins" $got_newmaster [expr $win + 2] + } + + cleanup_elections + + # + # Make sure we've really processed all the post-election + # sync-up messages. If we're simulating a crash, don't process + # any more messages. + # + if { $crashing == 0 } { + process_msgs $cenvlist + } + + if { $reopen == 1 } { + puts "\t\t$msg.3: Closing new master and reopening as client" + error_check_good newmaster_close [$clientenv($win) close] 0 + + set clientenv($win) [eval $env_cmd($win)] + error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE + set newelector "$clientenv($win) [expr $win + 2]" + set cenvlist [lreplace $cenvlist $win $win $newelector] + if { $crashing == 0 } { + process_msgs $cenvlist + } + } +} + +proc got_newmaster { cenv i newmaster win {dbname "test.db"} } { + upvar $cenv clientenv + + # Check that the new master we got is the one we expected. + error_check_good newmaster_is_master $newmaster [expr $win + 2] + + # If this env is the new master, it needs to configure itself + # as such -- this is a different env handle from the one that + # performed the election. + if { $nm == $envid } { + error_check_good make_master($i) \ + [$clientenv($i) rep_start -master] 0 + # Occasionally force new log records to be written. + set write [berkdb random_int 1 10] + if { $write == 1 } { + set db [berkdb_open -env $clientenv($i) -auto_commit \ + -create -btree $dbname] + error_check_good dbopen [is_valid_db $db] TRUE + error_check_good dbclose [$db close] 0 + } + } +} + proc check_election { id newmasterp } { source ./include.tcl @@ -761,9 +1125,24 @@ proc cleanup_elections { } { # This is essentially a copy of test001, but it only does the put/get # loop AND it takes an already-opened db handle. # -proc rep_test { method env db {nentries 10000} {start 0} {skip 1} } { +proc rep_test { method env repdb {nentries 10000} \ + {start 0} {skip 0} {needpad 0} args } { source ./include.tcl + # + # Open the db if one isn't given. Close before exit. + # + if { $repdb == "NULL" } { + set testfile "test.db" + set largs [convert_args $method $args] + set omethod [convert_method $method] + set db [eval {berkdb_open_noerr -env $env -auto_commit -create \ + -mode 0644} $largs $omethod $testfile] + error_check_good reptest_db [is_valid_db $db] TRUE + } else { + set db $repdb + } + # # If we are using an env, then testfile should just be the db name. # Otherwise it is the test directory and the name. @@ -774,15 +1153,11 @@ proc rep_test { method env db {nentries 10000} {start 0} {skip 1} } { # The "start" variable determines the record number to start # with, if we're using record numbers. The "skip" variable - # determines whether to start with the first entry in the - # dict file (if skip = 0) or skip over "start" entries (skip = 1). - # Skip is set to 1 to get different key/data pairs for - # different iterations of replication tests. Skip must be set - # to 0 if we're running a test that uses 10000 iterations, - # otherwise we run out of data to read in. - - if { $skip == 1 } { - for { set count 0 } { $count < $start } { incr count } { + # determines which dictionary entry to start with. In normal + # use, skip is equal to start. + + if { $skip != 0 } { + for { set count 0 } { $count < $skip } { incr count } { gets $did str } } @@ -813,6 +1188,19 @@ proc rep_test { method env db {nentries 10000} {start 0} {skip 1} } { set key $str set str [reverse $str] } + # + # We want to make sure we send in exactly the same + # length data so that LSNs match up for some tests + # in replication (rep021). + # + if { [is_fixed_length $method] == 1 && $needpad } { + # + # Make it something visible and obvious, 'A'. + # + set p 65 + set str [make_fixed_length $method $str $p] + set kvals($key) $str + } set t [$env txn] error_check_good txn [is_valid_txn $t $env] TRUE set txn "-txn $t" @@ -820,11 +1208,67 @@ proc rep_test { method env db {nentries 10000} {start 0} {skip 1} } { {$db put} $txn $pflags {$key [chop_data $method $str]}] error_check_good put $ret 0 error_check_good txn [$t commit] 0 - if { $count % 5 == 0 } { + + # Checkpoint 10 times during the run, but not more + # frequently than every 5 entries. + set checkfreq [expr $nentries / 10] + if { $checkfreq < 5 } { + set checkfreq 5 + } + if { $count % $checkfreq == 0 } { error_check_good txn_checkpoint($count) \ [$env txn_checkpoint] 0 } incr count } close $did + if { $repdb == "NULL" } { + error_check_good rep_close [$db close] 0 + } +} + +proc process_msgs { elist {perm_response 0} {dupp NONE} {errp NONE} } { + if { $perm_response == 1 } { + global perm_response_list + set perm_response_list {{}} + } + + if { [string compare $dupp NONE] != 0 } { + upvar $dupp dupmaster + set dupmaster 0 + } else { + set dupmaster NONE + } + + if { [string compare $errp NONE] != 0 } { + upvar $errp errorp + set errorp 0 + } else { + set errorp NONE + } + + while { 1 } { + set nproced 0 + foreach pair $elist { + set envname [lindex $pair 0] + set envid [lindex $pair 1] + # + # If we need to send in all the other args + incr nproced [replprocessqueue $envname $envid \ + 0 NONE NONE dupmaster errorp] + # + # If the user is expecting to handle an error and we get + # one, return the error immediately. + # + if { $dupmaster != 0 && $dupmaster != "NONE" } { + return + } + if { $errorp != 0 && $errorp != "NONE" } { + return + } + } + if { $nproced == 0 } { + break + } + } } diff --git a/db/test/rpc001.tcl b/db/test/rpc001.tcl index 5ce449f42..e07d5dcb1 100644 --- a/db/test/rpc001.tcl +++ b/db/test/rpc001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: rpc001.tcl,v 11.38 2003/09/19 16:53:25 sandstro Exp $ +# $Id: rpc001.tcl,v 11.41 2004/06/01 19:55:25 carol Exp $ # # TEST rpc001 # TEST Test RPC server timeouts for cursor, txn and env handles. @@ -11,6 +11,7 @@ proc rpc001 { } { global __debug_on global __debug_print global errorInfo + global is_je_test global rpc_svc source ./include.tcl @@ -20,7 +21,7 @@ proc rpc001 { } { set ttime 5 set itime 10 puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec" - set dpid [rpc_server_start 0 30 "-t $ttime" "-I $itime"] + set dpid [rpc_server_start 0 30 -t $ttime -I $itime] puts "\tRpc001.a: Started server, pid $dpid" # @@ -65,10 +66,12 @@ proc rpc001 { } { error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE lappend curs_list $dbc - puts "\tRpc001.d4: Starting a nested transaction" - set txn [$env txn -parent $txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] + if { !$is_je_test } { + puts "\tRpc001.d4: Starting a nested transaction" + set txn [$env txn -parent $txn] + error_check_good txn_begin [is_valid_txn $txn $env] TRUE + set txn_list [linsert $txn_list 0 $txn] + } puts "\tRpc001.d5: Create a cursor, no transaction" set dbc [$db cursor] @@ -103,123 +106,137 @@ proc rpc001 { } { } set txn_list {} - set ntxns 8 - puts "\tRpc001.e: Nested ($ntxns x $ntxns) txn activity test" - puts "\tRpc001.e1: Starting parent transaction" - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] - set last_txn $txn - set parent_txn $txn - # - # First set a breadth of 'ntxns' - # We need 2 from this set for testing later on. Just set them - # up separately first. - # - puts "\tRpc001.e2: Creating $ntxns child transactions" - set child0 [$env txn -parent $parent_txn] - error_check_good txn_begin [is_valid_txn $child0 $env] TRUE - set child1 [$env txn -parent $parent_txn] - error_check_good txn_begin [is_valid_txn $child1 $env] TRUE - - for {set i 2} {$i < $ntxns} {incr i} { - set txn [$env txn -parent $parent_txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] - } - - # - # Now make one 'ntxns' deeply nested. - # Add one more for testing later on separately. - # - puts "\tRpc001.e3: Creating $ntxns nested child transactions" - for {set i 0} {$i < $ntxns} {incr i} { - set txn [$env txn -parent $last_txn] + if { !$is_je_test } { + set ntxns 8 + puts "\tRpc001.e: Nested ($ntxns x $ntxns) txn activity test" + puts "\tRpc001.e1: Starting parent transaction" + set txn [$env txn] error_check_good txn_begin [is_valid_txn $txn $env] TRUE set txn_list [linsert $txn_list 0 $txn] set last_txn $txn - } - set last_parent $last_txn - set last_txn [$env txn -parent $last_parent] - error_check_good txn_begin [is_valid_txn $last_txn $env] TRUE - - puts "\tRpc001.e4: Open a cursor in deepest transaction" - set dbc [$db cursor -txn $last_txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - puts "\tRpc001.e5: Duplicate that cursor" - set dbcdup [$dbc dup] - error_check_good db_cursor [is_valid_cursor $dbcdup $db] TRUE - lappend curs_list $dbcdup - - puts "\tRpc001.f: Timeout then activate duplicate cursor" - tclsleep $sleeptime - set stat [catch {$dbcdup close} ret] - error_check_good dup_close:$dbcdup $stat 0 - error_check_good dup_close:$dbcdup $ret 0 - - # - # Make sure that our parent txn is not timed out. We will - # try to begin another child tnx using the parent. We expect - # that to succeed. Immediately commit that txn. - # - set stat [catch {$env txn -parent $parent_txn} newchild] - error_check_good newchildtxn $stat 0 - error_check_good newcommit [$newchild commit] 0 - - puts "\tRpc001.g: Timeout, then activate cursor" - tclsleep $sleeptime - set stat [catch {$dbc close} ret] - error_check_good dbc_close:$dbc $stat 0 - error_check_good dbc_close:$dbc $ret 0 - - # - # Make sure that our parent txn is not timed out. We will - # try to begin another child tnx using the parent. We expect - # that to succeed. Immediately commit that txn. - # - set stat [catch {$env txn -parent $parent_txn} newchild] - error_check_good newchildtxn $stat 0 - error_check_good newcommit [$newchild commit] 0 - - puts "\tRpc001.h: Timeout, then activate child txn" - tclsleep $sleeptime - set stat [catch {$child0 commit} ret] - error_check_good child_commit $stat 0 - error_check_good child_commit:$child0 $ret 0 - - # - # - # Make sure that our nested txn is not timed out. We will - # try to begin another child tnx using the parent. We expect - # that to succeed. Immediately commit that txn. - # - set stat [catch {$env txn -parent $last_parent} newchild] - error_check_good newchildtxn $stat 0 - error_check_good newcommit [$newchild commit] 0 - - puts "\tRpc001.i: Timeout, then activate nested txn" - tclsleep $sleeptime - set stat [catch {$last_txn commit} ret] - error_check_good lasttxn_commit $stat 0 - error_check_good lasttxn_commit:$child0 $ret 0 - - # - # Make sure that our child txn is not timed out. We should - # be able to commit it. - # - set stat [catch {$child1 commit} ret] - error_check_good child_commit:$child1 $stat 0 - error_check_good child_commit:$child1 $ret 0 - - # - # Clean up. They were inserted in LIFO order, so we should - # just be able to commit them all. - foreach t $txn_list { - set stat [catch {$t commit} ret] - error_check_good txn_commit:$t $stat 0 - error_check_good txn_commit:$t $ret 0 + set parent_txn $txn + + # + # First set a breadth of 'ntxns' + # We need 2 from this set for testing later on. Just + # set them up separately first. + # + puts "\tRpc001.e2: Creating $ntxns child transactions" + set child0 [$env txn -parent $parent_txn] + error_check_good txn_begin \ + [is_valid_txn $child0 $env] TRUE + set child1 [$env txn -parent $parent_txn] + error_check_good txn_begin \ + [is_valid_txn $child1 $env] TRUE + + for {set i 2} {$i < $ntxns} {incr i} { + set txn [$env txn -parent $parent_txn] + error_check_good txn_begin \ + [is_valid_txn $txn $env] TRUE + set txn_list [linsert $txn_list 0 $txn] + } + + # + # Now make one 'ntxns' deeply nested. + # Add one more for testing later on separately. + # + puts "\tRpc001.e3: Creating $ntxns nested child transactions" + for {set i 0} {$i < $ntxns} {incr i} { + set txn [$env txn -parent $last_txn] + error_check_good txn_begin \ + [is_valid_txn $txn $env] TRUE + set txn_list [linsert $txn_list 0 $txn] + set last_txn $txn + } + set last_parent $last_txn + set last_txn [$env txn -parent $last_parent] + error_check_good txn_begin \ + [is_valid_txn $last_txn $env] TRUE + + puts "\tRpc001.e4: Open a cursor in deepest transaction" + set dbc [$db cursor -txn $last_txn] + error_check_good db_cursor \ + [is_valid_cursor $dbc $db] TRUE + + puts "\tRpc001.e5: Duplicate that cursor" + set dbcdup [$dbc dup] + error_check_good db_cursor \ + [is_valid_cursor $dbcdup $db] TRUE + lappend curs_list $dbcdup + + puts "\tRpc001.f: Timeout then activate duplicate cursor" + tclsleep $sleeptime + set stat [catch {$dbcdup close} ret] + error_check_good dup_close:$dbcdup $stat 0 + error_check_good dup_close:$dbcdup $ret 0 + + # + # Make sure that our parent txn is not timed out. We + # will try to begin another child tnx using the parent. + # We expect that to succeed. Immediately commit that + # txn. + # + set stat [catch {$env txn -parent $parent_txn} newchild] + error_check_good newchildtxn $stat 0 + error_check_good newcommit [$newchild commit] 0 + + puts "\tRpc001.g: Timeout, then activate cursor" + tclsleep $sleeptime + set stat [catch {$dbc close} ret] + error_check_good dbc_close:$dbc $stat 0 + error_check_good dbc_close:$dbc $ret 0 + + # + # Make sure that our parent txn is not timed out. We + # will try to begin another child tnx using the parent. + # We expect that to succeed. Immediately commit that + # txn. + # + set stat [catch {$env txn -parent $parent_txn} newchild] + error_check_good newchildtxn $stat 0 + error_check_good newcommit [$newchild commit] 0 + + puts "\tRpc001.h: Timeout, then activate child txn" + tclsleep $sleeptime + set stat [catch {$child0 commit} ret] + error_check_good child_commit $stat 0 + error_check_good child_commit:$child0 $ret 0 + + # + # Make sure that our nested txn is not timed out. We + # will try to begin another child tnx using the parent. + # We expect that to succeed. Immediately commit that + # txn. + # + set stat \ + [catch {$env txn -parent $last_parent} newchild] + error_check_good newchildtxn $stat 0 + error_check_good newcommit [$newchild commit] 0 + + puts "\tRpc001.i: Timeout, then activate nested txn" + tclsleep $sleeptime + set stat [catch {$last_txn commit} ret] + error_check_good lasttxn_commit $stat 0 + error_check_good lasttxn_commit:$child0 $ret 0 + + # + # Make sure that our child txn is not timed out. We + # should be able to commit it. + # + set stat [catch {$child1 commit} ret] + error_check_good child_commit:$child1 $stat 0 + error_check_good child_commit:$child1 $ret 0 + + # + # Clean up. They were inserted in LIFO order, so we + # should just be able to commit them all. + # + foreach t $txn_list { + set stat [catch {$t commit} ret] + error_check_good txn_commit:$t $stat 0 + error_check_good txn_commit:$t $ret 0 + } } set stat [catch {$db close} ret] @@ -228,20 +245,18 @@ proc rpc001 { } { rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0 rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1 - # - # We need a 2nd env just to do an op to timeout the env. - # Make the flags different so we don't end up sharing a handle. - # - set env1 [eval {berkdb_env -create -mode 0644 -home $home \ - -server $rpc_server -client_timeout 10000}] - error_check_good lock_env:open [is_valid_env $env1] TRUE - puts "\tRpc001.l: Timeout idle env handle" set sleeptime [expr $itime + 2] tclsleep $sleeptime - set stat [catch {$env1 close} ret] - error_check_good env1_close $stat 0 + # + # We need to do another operation to time out the environment + # handle. Open another environment, with an invalid home + # directory. + # + set stat [catch {eval {berkdb_env_noerr -home "$home.fail" \ + -server $rpc_server}} ret] + error_check_good env_open $stat 1 set stat [catch {$env close} ret] error_check_good env_close $stat 1 @@ -284,13 +299,13 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} { {apple pie} {raspberry pie} {lemon pie} } set fdb [eval {berkdb_open -create -btree -mode 0644} \ - $txnflag -env $env -dup fruit.db] + $txnflag -env $env -dup -dupsort fruit.db] error_check_good dbopen [is_valid_db $fdb] TRUE set pdb [eval {berkdb_open -create -btree -mode 0644} \ - $txnflag -env $env -dup price.db] + $txnflag -env $env -dup -dupsort price.db] error_check_good dbopen [is_valid_db $pdb] TRUE set ddb [eval {berkdb_open -create -btree -mode 0644} \ - $txnflag -env $env -dup dessert.db] + $txnflag -env $env -dup -dupsort dessert.db] error_check_good dbopen [is_valid_db $ddb] TRUE foreach kd $fruit { set k [lindex $kd 0] @@ -320,10 +335,14 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} { error_check_good ddb:close [$ddb close] 0 error_check_good pdb:close [$pdb close] 0 error_check_good fdb:close [$fdb close] 0 + error_check_good ddb:remove [$env dbremove dessert.db] 0 + error_check_good pdb:remove [$env dbremove price.db] 0 + error_check_good fdb:remove [$env dbremove fruit.db] 0 } proc rpc_join {env msg sleep fdb pdb ddb use_txn op} { global errorInfo + global is_je_test # # Start a parent and child transaction. We'll do our join in @@ -338,12 +357,19 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} { set txn [$env txn] error_check_good txn_begin [is_valid_txn $txn $env] TRUE set txn_list [linsert $txn_list 0 $txn] - set child0 [$env txn -parent $txn] - error_check_good txn_begin [is_valid_txn $child0 $env] TRUE - set txn_list [linsert $txn_list 0 $child0] - set child1 [$env txn -parent $txn] - error_check_good txn_begin [is_valid_txn $child1 $env] TRUE - set txn_list [linsert $txn_list 0 $child1] + if { !$is_je_test } { + set child0 [$env txn -parent $txn] + error_check_good txn_begin \ + [is_valid_txn $child0 $env] TRUE + set txn_list [linsert $txn_list 0 $child0] + set child1 [$env txn -parent $txn] + error_check_good txn_begin \ + [is_valid_txn $child1 $env] TRUE + set txn_list [linsert $txn_list 0 $child1] + } else { + set child0 $txn + set child1 $txn + } set txncmd "-txn $child0" } else { puts "\t$msg$msgnum: Set up join cursor" diff --git a/db/test/rpc002.tcl b/db/test/rpc002.tcl index 5ec4b7748..505c23e7b 100644 --- a/db/test/rpc002.tcl +++ b/db/test/rpc002.tcl @@ -1,9 +1,9 @@ # Sel the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: rpc002.tcl,v 1.23 2003/09/19 16:53:26 sandstro Exp $ +# $Id: rpc002.tcl,v 1.25 2004/07/15 17:19:12 sue Exp $ # # TEST rpc002 # TEST Test invalid RPC functions and make sure we error them correctly @@ -45,7 +45,7 @@ proc rpc002 { } { { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"} { "-lock_detect default" "Rpc002.b5"} { "-lock_max 100" "Rpc002.b6"} - { "-mmapsize 100" "Rpc002.b7"} + { "-mpool_mmap_size 100" "Rpc002.b7"} { "-shm_key 100" "Rpc002.b9"} { "-tmp_dir $rpc_testdir" "Rpc002.b10"} { "-txn_max 100" "Rpc002.b11"} diff --git a/db/test/rpc003.tcl b/db/test/rpc003.tcl index 61c45f832..890cbc30f 100644 --- a/db/test/rpc003.tcl +++ b/db/test/rpc003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rpc003.tcl,v 11.14 2003/09/19 16:53:26 sandstro Exp $ +# $Id: rpc003.tcl,v 11.16 2004/03/02 18:44:41 mjc Exp $ # # TEST rpc003 # TEST Test RPC and secondary indices. @@ -158,6 +158,8 @@ proc rpc003 { } { } proc rpc003_assoc_err { popen sopen msg } { + global rpc_svc + set pdb [eval $popen] error_check_good assoc_err_popen [is_valid_db $pdb] TRUE @@ -168,11 +170,15 @@ proc rpc003_assoc_err { popen sopen msg } { error_check_good db_associate:rdonly $stat 1 error_check_good db_associate:inval [is_substr $ret invalid] 1 - puts "$msg.1: non-NULL callback" - set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret] - error_check_good db_associate:callback $stat 1 - error_check_good db_associate:rpc \ - [is_substr $ret "not supported in RPC"] 1 + # The Java and JE RPC servers support callbacks. + if { $rpc_svc == "berkeley_db_svc" || \ + $rpc_svc == "berkeley_db_cxxsvc" } { + puts "$msg.1: non-NULL callback" + set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret] + error_check_good db_associate:callback $stat 1 + error_check_good db_associate:inval [is_substr $ret invalid] 1 + } + error_check_good assoc_sclose [$sdb close] 0 error_check_good assoc_pclose [$pdb close] 0 } diff --git a/db/test/rpc004.tcl b/db/test/rpc004.tcl index 6bdf227f4..468e27270 100644 --- a/db/test/rpc004.tcl +++ b/db/test/rpc004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: rpc004.tcl,v 11.11 2003/09/19 16:53:26 sandstro Exp $ +# $Id: rpc004.tcl,v 11.12 2004/01/28 03:36:29 bostic Exp $ # # TEST rpc004 # TEST Test RPC server and security diff --git a/db/test/rpc005.tcl b/db/test/rpc005.tcl index 6cf0938b0..3111f651b 100644 --- a/db/test/rpc005.tcl +++ b/db/test/rpc005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: rpc005.tcl,v 11.8 2003/09/19 16:53:26 sandstro Exp $ +# $Id: rpc005.tcl,v 11.11 2004/09/22 18:01:06 bostic Exp $ # # TEST rpc005 # TEST Test RPC server handle ID sharing @@ -12,6 +12,7 @@ proc rpc005 { } { global __debug_print global errorInfo global rpc_svc + global is_hp_test source ./include.tcl puts "Rpc005: RPC server handle sharing" @@ -37,21 +38,31 @@ proc rpc005 { } { -server $rpc_server -txn}] error_check_good lock_env:open [is_valid_env $env] TRUE - puts "\tRpc005.c: Compare identical and different \ - configured envs" - set env_ident [eval {berkdb_env -home $home \ - -server $rpc_server -txn}] - error_check_good lock_env:open [is_valid_env $env_ident] TRUE - - set env_diff [eval {berkdb_env -home $home \ - -server $rpc_server -txn nosync}] - error_check_good lock_env:open [is_valid_env $env_diff] TRUE - - error_check_good ident:id [$env rpcid] [$env_ident rpcid] - error_check_bad diff:id [$env rpcid] [$env_diff rpcid] - - error_check_good envclose [$env_diff close] 0 - error_check_good envclose [$env_ident close] 0 + # You can't open two handles on the same env in + # HP-UX, so skip this piece. + if { $is_hp_test == 1 } { + puts "\tRpc005.c: Skipping for HP-UX." + } else { + puts "\tRpc005.c: Compare identical and different \ + configured envs" + set env_ident [eval {berkdb_env -home $home \ + -server $rpc_server -txn}] + error_check_good \ + lock_env:open [is_valid_env $env_ident] TRUE + + set env_diff [eval {berkdb_env -home $home \ + -server $rpc_server -txn nosync}] + error_check_good \ + lock_env:open [is_valid_env $env_diff] TRUE + + error_check_good \ + ident:id [$env rpcid] [$env_ident rpcid] + error_check_bad \ + diff:id [$env rpcid] [$env_diff rpcid] + + error_check_good envclose [$env_diff close] 0 + error_check_good envclose [$env_ident close] 0 + } puts "\tRpc005.d: Opening a database" set db [eval {berkdb_open -auto_commit -create -btree \ diff --git a/db/test/rpc006.tcl b/db/test/rpc006.tcl new file mode 100644 index 000000000..6cbc6fea1 --- /dev/null +++ b/db/test/rpc006.tcl @@ -0,0 +1,77 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 1996-2006 +# Sleepycat Software. All rights reserved. +# +# $Id: rpc006.tcl,v 11.1 2004/05/06 14:25:15 sue Exp $ +# +# TEST rpc006 +# TEST Test RPC server and multiple operations to server. +# TEST Make sure the server doesn't deadlock itself, but +# TEST returns DEADLOCK to the client. +proc rpc006 { } { + global __debug_on + global __debug_print + global errorInfo + global rpc_svc + source ./include.tcl + + puts "Rpc006: RPC server + multiple operations" + cleanup $testdir NULL + set dpid [rpc_server_start] + puts "\tRpc006.a: Started server, pid $dpid" + + # + # Wrap the test in a catch statement so we can still kill + # the rpc server even if the test fails. + # + set status [catch { + tclsleep 2 + remote_cleanup $rpc_server $rpc_testdir $testdir + + puts "\tRpc006.b: Creating environment" + + set testfile "rpc006.db" + set home [file tail $rpc_testdir] + + set env [eval {berkdb_env -create -mode 0644 -home $home \ + -server $rpc_server -txn}] + error_check_good lock_env:open [is_valid_env $env] TRUE + + # + # NOTE: the type of database doesn't matter, just use btree. + set db [eval {berkdb_open -auto_commit -create -btree \ + -mode 0644} -env $env $testfile] + error_check_good dbopen [is_valid_db $db] TRUE + + puts "\tRpc006.c: Create competing transactions" + set txn1 [$env txn] + set txn2 [$env txn] + error_check_good txn [is_valid_txn $txn1 $env] TRUE + error_check_good txn [is_valid_txn $txn2 $env] TRUE + set key1 "key1" + set key2 "key2" + set data1 "data1" + set data2 "data2" + + puts "\tRpc006.d: Put with both txns to same page. Deadlock" + set ret [$db put -txn $txn1 $key1 $data1] + error_check_good db_put $ret 0 + set res [catch {$db put -txn $txn2 $key2 $data2} ret] + error_check_good db_put2 $res 1 + error_check_good db_putret [is_substr $ret DB_LOCK_DEADLOCK] 1 + error_check_good txn_commit [$txn1 commit] 0 + + puts "\tRpc006.e: Retry after commit." + set res [catch {$db put -txn $txn2 $key2 $data2} ret] + error_check_good db_put2 $res 0 + error_check_good db_putret $ret 0 + error_check_good txn_commit [$txn2 commit] 0 + error_check_good db_close [$db close] 0 + error_check_good env_close [$env close] 0 + } res] + if { $status != 0 } { + puts $res + } + tclkill $dpid +} diff --git a/db/test/rsrc001.tcl b/db/test/rsrc001.tcl index 704c765ee..b44d5ebd6 100644 --- a/db/test/rsrc001.tcl +++ b/db/test/rsrc001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: rsrc001.tcl,v 11.24 2003/01/08 05:52:43 bostic Exp $ +# $Id: rsrc001.tcl,v 11.25 2004/01/28 03:36:29 bostic Exp $ # # TEST rsrc001 # TEST Recno backing file test. Try different patterns of adding diff --git a/db/test/rsrc002.tcl b/db/test/rsrc002.tcl index 908083333..808159a01 100644 --- a/db/test/rsrc002.tcl +++ b/db/test/rsrc002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: rsrc002.tcl,v 11.15 2003/01/08 05:52:45 bostic Exp $ +# $Id: rsrc002.tcl,v 11.16 2004/01/28 03:36:29 bostic Exp $ # # TEST rsrc002 # TEST Recno backing file test #2: test of set_re_delim. Specify a backing diff --git a/db/test/rsrc003.tcl b/db/test/rsrc003.tcl index 2aee9db11..b62a6bcb1 100644 --- a/db/test/rsrc003.tcl +++ b/db/test/rsrc003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: rsrc003.tcl,v 11.6 2003/01/08 05:52:47 bostic Exp $ +# $Id: rsrc003.tcl,v 11.7 2004/01/28 03:36:29 bostic Exp $ # # TEST rsrc003 # TEST Recno backing file test. Try different patterns of adding diff --git a/db/test/rsrc004.tcl b/db/test/rsrc004.tcl index d391cac0a..20f11372a 100644 --- a/db/test/rsrc004.tcl +++ b/db/test/rsrc004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: rsrc004.tcl,v 11.4 2003/01/08 05:52:49 bostic Exp $ +# $Id: rsrc004.tcl,v 11.5 2004/01/28 03:36:29 bostic Exp $ # # TEST rsrc004 # TEST Recno backing file test for EOF-terminated records. diff --git a/db/test/scr003/chk.define b/db/test/scr003/chk.define index 9645470b1..96c45c2b7 100644 --- a/db/test/scr003/chk.define +++ b/db/test/scr003/chk.define @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.define,v 1.23 2003/06/30 14:22:57 bostic Exp $ +# $Id: chk.define,v 1.25 2004/10/07 18:54:37 bostic Exp $ # # Check to make sure that all #defines are actually used. # Check to make sure that all #defines start in column 1. @@ -18,10 +18,11 @@ t2=__2 t3=__3 find $d -name '*.c' -o -name '*.cpp' | + sed -e '/\/php_db4\//d' | xargs egrep '^[ ][ ]*#' > $t1 test -s $t1 && { + echo "FAIL: found #defines with leading white space:" cat $t1 - echo "FAIL: found #defines with leading white space." exit 1 } @@ -39,9 +40,11 @@ egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in | -e '/^DB_UNUSED/d' \ -e '/^DEFINE_DB_CLASS/d' \ -e '/^HASH_UNUSED/d' \ + -e '/^HPUX_MUTEX_PAD/d' \ -e '/^LOG_OP/d' \ -e '/^MINFILL/d' \ -e '/^MUTEX_FIELDS/d' \ + -e '/^NAME_TO_SEQUENCE/d' \ -e '/^NCACHED2X/d' \ -e '/^NCACHED30/d' \ -e '/^PAIR_MASK/d' \ @@ -54,6 +57,8 @@ egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in | -e '/^QPAGE_SEC/d' \ -e '/^SIZEOF_PAGE/d' \ -e '/^TAILQ_/d' \ + -e '/^UINT64_FMT/d' \ + -e '/^UINT64_MAX/d' \ -e '/^WRAPPED_CLASS/d' \ -e '/^__BIT_TYPES_DEFINED__/d' \ -e '/^__DBC_INTERNAL/d' \ diff --git a/db/test/scr006/chk.offt b/db/test/scr006/chk.offt index 909d4966a..69bf7cc92 100644 --- a/db/test/scr006/chk.offt +++ b/db/test/scr006/chk.offt @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.offt,v 1.10 2003/01/08 20:22:13 bostic Exp $ +# $Id: chk.offt,v 1.13 2004/10/07 18:57:53 bostic Exp $ # # Make sure that no off_t's have snuck into the release. @@ -15,14 +15,30 @@ t=__1 egrep -w off_t $d/*/*.[ch] $d/*/*.in | sed -e "/#undef off_t/d" \ + -e "/db_env_set_func_ftruncate/d" \ + -e "/db_env_set_func_pread/d" \ + -e "/db_env_set_func_pwrite/d" \ + -e "/db_env_set_func_seek/d" \ + -e "/j_ftruncate/d" \ + -e "/j_pread/d" \ + -e "/j_pwrite/d" \ + -e "/j_seek/d" \ -e "/mp_fopen.c:.*can't use off_t's here/d" \ -e "/mp_fopen.c:.*size or type off_t's or/d" \ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \ -e "/mutex\/tm.c:/d" \ -e "/os_map.c:.*(off_t)0))/d" \ + -e "/os_method.c.*func_ftruncate/d" \ + -e "/os_method.c.*func_pread/d" \ + -e "/os_method.c.*func_pwrite/d" \ + -e "/os_method.c.*func_seek/d" \ + -e "/os_method.c.*__P.*off_t/d" \ -e "/os_rw.c:/d" \ -e "/os_seek.c:.*off_t offset;/d" \ -e "/os_seek.c:.*offset = /d" \ + -e "/os_truncate.c:.*off_t offset;/d" \ + -e "/os_truncate.c:.*off_t stat_offset;/d" \ + -e "/os_truncate.c:.*offset = /d" \ -e "/test_perf\/perf_misc.c:/d" \ -e "/test_server\/dbs.c:/d" \ -e "/test_vxworks\/vx_mutex.c:/d" > $t diff --git a/db/test/scr007/chk.proto b/db/test/scr007/chk.proto index 0593506ad..05e980ce6 100644 --- a/db/test/scr007/chk.proto +++ b/db/test/scr007/chk.proto @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.proto,v 1.9 2003/02/27 19:18:39 bostic Exp $ +# $Id: chk.proto,v 1.10 2003/12/08 19:28:26 bostic Exp $ # # Check to make sure that prototypes are actually needed. @@ -26,7 +26,6 @@ egrep '__P' $d/dbinc_auto/*.h | -e '/__db_util_arg/d' \ -e '/__ham_func2/d' \ -e '/__ham_func3/d' \ - -e '/_getpgnos/d' \ -e '/_print$/d' \ -e '/_read$/d' > $t1 diff --git a/db/test/scr008/chk.pubdef b/db/test/scr008/chk.pubdef index a5ba2beae..027e701d5 100644 --- a/db/test/scr008/chk.pubdef +++ b/db/test/scr008/chk.pubdef @@ -3,7 +3,7 @@ # Reconcile the list of public defines with the man pages and the Java files. d=../.. -docs=../../../db.docs +docs=$d/../docs_src [ -f $d/LICENSE ] || { echo 'FAIL: cannot find source distribution directory.' @@ -104,8 +104,9 @@ Check that pubdef.in has everything listed in DbConstants.java. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= END_OF_TEXT -f=$d/java/src/com/sleepycat/db/release/DbConstants.java -sed -n -e 's/.*static final int[ ]*\([^ ]*\).*/\1/p' < $f | +j=$d/java/src/com/sleepycat/db +f=$j/internal/DbConstants.java +sed -n -e 's/.*int[ ]\([^ ]*\).*;/\1/p' < $f | while read name; do if `egrep -w "$name" $p > /dev/null`; then : @@ -121,10 +122,10 @@ Check that DbConstants.java has everything listed in pubdef.in. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= END_OF_TEXT -f=$d/java/src/com/sleepycat/db/release/DbConstants.java +f=$j/internal/DbConstants.java sed '/^#/d' $p | while read name isdoc isinc isjava; do - if `egrep -w "static final int[ ]$name =" $f > /dev/null`; then + if `egrep -w "int[ ]$name =" $f > /dev/null`; then [ "X$isjava" != "XJ" ] && { echo "$name should not appear in $f" exitv=1 @@ -139,72 +140,46 @@ done cat < /dev/null`; then - : - else - echo "$f: $name is missing from $p" - exitv=1; - fi -done -sed -n -e 's/^[ ]*\([^ ]*\) = DbConstants\..*/\1/p' < $f | -while read name; do - if `egrep -w "$name" $p > /dev/null`; then - : +sed '/^#/d' $p | +while read name isdoc isinc isjava; do + if `egrep -w "$name" $j/*.java $d/rpc_server/java/*.java \ + $j/internal/Db.java $j/internal/DbEnv.java \ + $j/internal/db_javaJNI.java > /dev/null`; then + [ "X$isjava" != "XJ" ] && { + echo "$name should not appear in the Java API" + exitv=1 + } else - echo "$f: $name is missing from $p" - exitv=1 + [ "X$isjava" = "XJ" ] && { + echo "$name does not appear in the Java API" + exitv=1 + } fi done cat < /dev/null`; then - : - else - echo "SFI (C type): $name does not appear in $f" - exitv=1 - fi;; - J) - if `egrep \ - "public final static int[ ]$name;" $f > /dev/null`; then - : - else - echo "SFI (J type): $name does not appear in $f" + if `egrep -w "$name" $d/libdb_java/db_java_wrap.c > /dev/null`; then + [ "X$isjava" != "XN" ] && [ "X$isjava" != "XJ" ] && { + echo "$name should not appear in the Java native layer" exitv=1 - fi - if `egrep \ - "$name = DbConstants.$name;" $f > /dev/null`; then - : - else - echo "DbConstants: $name does not appear in $f" + } + else + [ "X$isjava" = "XN" ] && { + echo "$name does not appear in the Java native layer" exitv=1 - fi;; - \*) - ;; - *) - echo "Unknown isjava field: $isjava" - exit 1;; - esac + } + fi done exit $exitv diff --git a/db/test/scr009/chk.srcfiles b/db/test/scr009/chk.srcfiles index abff76a61..18fc63692 100644 --- a/db/test/scr009/chk.srcfiles +++ b/db/test/scr009/chk.srcfiles @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.srcfiles,v 1.11 2003/01/09 19:13:17 bostic Exp $ +# $Id: chk.srcfiles,v 1.13 2004/10/07 19:16:43 bostic Exp $ # # Check to make sure we haven't forgotten to add any files to the list # of source files Win32 uses to build its dsp files. @@ -17,18 +17,25 @@ t1=__1 t2=__2 sed -e '/^[ #]/d' \ + -e '/^db_server_clnt.c/d' \ + -e '/^db_server_svc.c/d' \ + -e '/^db_server_xdr.c/d' \ + -e '/^gen_db_server.c/d' \ -e '/^$/d' < $f | awk '{print $1}' > $t1 find $d -type f | sed -e 's/^\.\.\/\.\.\///' \ -e '/^build[^_]/d' \ -e '/^libdb_java\/java_stat_auto.c/d' \ + -e '/^mod_db4\//d' \ + -e '/^perl\//d' \ + -e '/^php_db4\//d' \ + -e '/^rpc_server\/c\/gen_db_server.c/d' \ -e '/^test\//d' \ -e '/^test_server/d' \ -e '/^test_thread/d' \ -e '/^test_vxworks/d' | egrep '\.c$|\.cpp$|\.def$|\.rc$' | - sed -e '/perl.DB_File\/version.c/d' | sort > $t2 cmp $t1 $t2 > /dev/null || { diff --git a/db/test/scr010/chk.str b/db/test/scr010/chk.str index 493071a49..873fdb012 100644 --- a/db/test/scr010/chk.str +++ b/db/test/scr010/chk.str @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.str,v 1.6 2003/01/08 21:04:53 bostic Exp $ +# $Id: chk.str,v 1.9 2004/09/22 18:01:06 bostic Exp $ # # Check spelling in quoted strings. @@ -14,12 +14,13 @@ d=../.. t1=__t1 sed -e '/^#include/d' \ - -e '/revid/d' \ -e '/"/!d' \ -e 's/^[^"]*//' \ -e 's/%s/ /g' \ -e 's/[^"]*$//' \ - -e 's/\\[nt]/ /g' `find $d -name '*.c' -o -name '*.cpp' -o -name '*.java'` | + -e 's/\\[nt]/ /g' \ + `find $d -name '*.[ch]' -o -name '*.cpp' -o -name '*.java' | + sed '/\/perl\//d'` | spell | sort | comm -23 /dev/stdin spell.ok > $t1 test -s $t1 && { @@ -28,4 +29,14 @@ test -s $t1 && { exit 1 } +egrep -h '/\* | \* ' \ + `find $d -name '*.[ch]' -o -name '*.cpp' | sed '/\/perl\//d'` | +spell | sort | comm -23 /dev/stdin spell.ok | tee /tmp/f/1 > $t1 + +test -s $t1 && { + cat $t1 + echo "FAIL: found questionable spelling in comments." + exit 1 +} + exit 0 diff --git a/db/test/scr010/spell.ok b/db/test/scr010/spell.ok index 5039ee05f..36d096d41 100644 --- a/db/test/scr010/spell.ok +++ b/db/test/scr010/spell.ok @@ -1,83 +1,263 @@ +AAAA +ABS +ADDR AES +AIX's AJVX +ALG ALLBACK ALLDB +ALLOC +ALLZEROES API +APIs APP +APPMALLOC +APPNAME +APPREC +ASYNC +ATOI +AUTOCOMMIT +AUTOREMOVE AccessExample +AccesssExample Acflmo +Acknowledgements Aclmop +Aclop Adata +Addr Ahlm Ahm +Antoon +Applock ArithmeticException +Arntzen +ArrayList +AssociateCallbacks +AttachCurrentThread +Aug +BBBB BC BCFILprRsvVxX BCc BDBXXXXX BDBXXXXXX +BEGID BH +BH's BI BII BINTERNAL +BITSPERBLOCK +BKEYDATA +BLKSIZE +BNF +BOTHC +BOVERFLOW +BR +BSIZE +BTCOMPARE +BTMETA BTREE +BTREEMAGIC +BTREEMETA +BTREEOLDVER +BTREEVERSION +BUF +Backoff +Barreto Bc Bdata BerkeleyDB +BigInteger BindingSpeedTest +Bosselaers BtRecExample Btree BtreeStat +BtreeStats +Btrees BulkAccessExample +ByteArray +ByteArrayBinding +ByteArrayFormat +ByteArrayInputStream +ByteArrayOutputStream +C'est CALLBACK +CALLPGIN +CBC CC +CCCC +CCYYMMDDhhmm CD CDB CDS CDdFILTVvX +CFB CFILpRsv CFLprsvVxX CFh +CHARKEY +CHGPG +CHILDCOMMIT +CHILDINFO +CHILDINFOs +CHK +CHKPNT CHKPOINT CHKSUM +CKP +CKPLSN +CL CLASSPATH +CLOSEFP +CLRDBC CLpsvxX +CMP +CNT +COMPQUIET +CONCAT +CONCATDATAKEY +CONCATKEYDATA +CONF CONFIG +CONST +CRTL +CRYPTO +CT +CTX +CXX +CacheFilePriority +CacheFileStats +CacheStats Callback CdFILTvX Ch +ClassCastException +ClassCatalog +ClassCatalogDB +ClassInfo ClassNotFoundException +ClientData CloseHandle +Cmd +Cmp CollectionTest Config +CopyObjBytes +CreateFile +CreateFileMapping +CreateHashEntry +Crypto +CurrentTransaction +Cygwin +DATAHOME DBC +DBCursor DBENV +DBHOME +DBINFO +DBLOCAL +DBLOG +DBM +DBMETA +DBMETASIZE +DBNAME DBP +DBP's +DBREG +DBREP DBS DBSDIR DBT +DBT's +DBTCL +DBTList DBTYPE +DBTs +DBa +DBaa +DBaz +DBba DBcursor +DBz +DEADFILE +DECLS +DEF +DEFMINKEYPAGE +DELNO +DGREE +DIR +DIRECTIO +DIRENT +DIST +DJ +DLFCN +DLL +DOALL DONOTINDEX DS +DST +DSYNC DUP +DUPFIRST +DUPID +DUPLAST DUPMASTER +DUPOK +DUPONLY +DUPS +DUPSET DUPSORT +DataBinding +DataBuffer +DataCursor +DataDb +DataEnvironment +DataFormat +DataIndex +DataInput +DataInputStream +DataOutput +DataOutputStream +DataStore +DataThang +DataType +DataView +DatabaseEntry +DatabaseException +DatabaseType Db +DbAppDispatch DbAppendRecno +DbAssociate DbAttachImpl DbBtreeCompare DbBtreePrefix DbBtreeStat +DbClient +DbCount DbDeadlockException +DbDelete +DbDispatcher DbDupCompare DbEnv DbEnvFeedback +DbEnvFeedbackHandler DbErrcall +DbErrorHandler DbException DbFeedback +DbFeedbackHandler +DbGet +DbGetFlags +DbGetOpenFlags +DbGetjoin DbHash DbHashStat +DbInfoDelete DbKeyRange DbLock DbLockNotGrantedException @@ -91,261 +271,1157 @@ DbMpoolFStat DbMpoolFile DbMpoolFileStat DbMpoolStat +DbMultiple +DbMultipleDataIterator +DbMultipleIterator +DbMultipleKeyDataIterator +DbMultipleRecnoDataIterator +DbOpen DbOutputStreamErrcall +DbPanicHandler DbPreplist +DbPut DbQueueStat DbRecoveryInit +DbRemove +DbRename DbRepStat DbRepTransport DbRunRecoveryException DbSecondaryKeyCreate +DbSequence DbServer +DbStat +DbTestUtil +DbTruncate DbTxn DbTxnRecover DbTxnStat +DbUpgrade DbUtil +DbVerify DbXA DbXAResource DbXid Dbc +DbcDup +DbcGet +DbcPut +Dbm +DbmCommand +Dbp +Dbs Dbt +Dbt's +Dbts Dde +Deadfile +DeadlockException +Debian +DeleteInfo Deref'ing +Dups +EAGAIN +EBUSY +ECB +EEXIST +EEXISTS +EFAULT +EGENCHG EID +EINTR +EINVAL EIO EIRT EIi +ELECTINIT +ELECTVOTE +EMSG +ENOENT +ENOMEM +ENT ENV +ENV's +EOFException +EPG +EPGNO +EPHASE +EPRINT +EPRINTed +ETIME +ETIMEDOUT +EXCL +EXT +Eefh Egen +Elp +Endian +EntityBinding +EnvAttr EnvExample +EnvGetEncryptFlags EnvInfoDelete +EnvOpen +EnvRemove +EnvSetErrfile +EnvSetErrpfx +EnvSetFlags +EnvTest +EnvVerbose +Equidistributed +Errcall +Errfile +ErrorFunc +ErrorSetup +Errpfx +EvalObjv +ExampleDatabaseLoad +ExampleDatabaseRead +ExceptionUnwrapper +ExceptionWrapper ExceptionWrapperTest Exp +Externalizable +FALLTHROUGH +FCNTL +FCONTROL FD +FDATASYNC +FF +FH +FILEDONE +FILEID +FILELIST +FILENO FILEOPEN +FILEWRITTEN FIXEDLEN +FIXLEN +FIXME +FMAP +FMT +FN +FNAME FOREACH +FP +FST +FSTAT +FSTATI +FTRUNCATE +FTYPE +FastInputStream +FastOutputStream +FatalRecover Fd Ff Fh +FileIndexHigh +FileIndexLow FileNotFoundException +Fileinfo +FindHashEntry +FooImp +Foreach +ForeignKeyIndex ForeignKeyTest +FreeBSD +FreeBSD's +FreeFunc FreeList +Friedl +GCC +GETALL +GETCWD +GETDYNAMIC +GETNAME +GETOPT +GETRUSAGE +GETTIME +GETTIMEOFDAY +GETUID +GETZIP +Gb +Gcc Gentles +Get's +GetByteArray +GetByteArrayFromObj +GetDiskFreeSpace GetFileInformationByHandle +GetFlags +GetFlagsList +GetGlobPrefix +GetHashValue +GetIndexFromObj +GetIntFromObj GetJavaVM GetJoin +GetLockDetect +GetLongFromObj +GetLsn +GetOpenFlag +GetTimeout +GetUInt +GetUnsignedIntFromObj +GetVerbose +GetVersion Gh +GlobalRefs GotRange +HANDSOFF +HASHC +HASHC's +HASHHDR +HASHINSERT +HASHLOOKUP +HASHMAGIC +HASHMETA +HASHOLDVER +HASHREMOVE +HASHTAB +HASHVERSION +HCommand +HDR +HDRs +HEURCOM +HEURHAZ +HEURMIX +HEURRB +HKEYDATA +HMAC +HMETA +HOFFDUP +HOFFPAGE HOFFSET HOLDELECTION +HPPA +HPUX +HSEARCH +Harbison +HashStats Hashtable +HelloDatabaseWorld +Holder's +Hsearch +IA +IAFTER +IBEFORE +IBTREE +ICURRENT +IDLETIMEOUT +IDs IIL IL +ILOCK ILo ILprR INDX +INFOTYPE +INI INIT +INITED +INITENV +INITSPIN +INMEM +INMEMORY +INORDER +INTTYPES +INVAL +INVALIDID IOException +IOExceptionWrapper +IOSIZE +IPC +IR IREAD +IRECNO +IRGRP +IRIX +IROTH +IRUSR +ISDUP ISPERM ISSET +IV's +IW +IWGRP +IWOTH IWR IWRITE +IWUSR +Ick +Ids Ik IllegalArgumentException +IllegalStateException +IncrRefCount IndexOutOfBoundsException +Init +Initialise +IntegrityConstraintException +Interp +InventoryDB +Istmp +ItemNameIndexDB +Itemname +IterDeadlockTest +JDB +JE +JHB JKL +JNI +JNIEnv +JNIs +JOINCUR +JOINENV +JVM JZ JavaIO JavaRPCServer JoinTest +KEYDATA KEYEMPTY KEYEXIST +KEYFIRST +KEYGROUP +KEYGRP +KEYLAST +KEYLEN KL +Kerberos +KeyExtractor KeyRange +KeyRangeException +KeyRangeTest +Krinsky +LANGLVL +LASTCKP LBTREE +LCK +LDF +LDUP +LEAFCHAIN +LEAFLEVEL +LEAFSEEN +LFNAME +LFPREFIX +LG +LGPL +LIBNSL +LL LOCKDOWN +LOCKOBJ +LOCKREGION +LOCKREQ +LOCKTAB +LOCKTIMEOUT +LOCKVERSION LOGC +LOGFILEID +LOGMAGIC +LOGOLDVER +LOGP +LOGREADY +LOGSONLY +LOGVERSION LORDER LRECNO +LRECNODUP LRU +LRUness LSN +LSN's +LSNfile +LSNoffset +LSNs +LSTAT +LV +LWARX +LWP +LWZ +Landon +Lastp Lcom +ListIterator +ListObjAppendElement Ljava Ll +LocalIterator +LockDetect +LockDetectMode LockExample +LockGet +LockMode +LockNotGrantedException +LockOperation +LockRequest +LockRequestMode +LockStat +LockStats +LockTimeout +LockVec +Lockfhp +LogArchive +LogCompare +LogFile +LogFlush +LogGet +LogPut LogRegister +LogSequenceNumber LogStat +LogStats +Logc +LogcGet LpRsS LprRsS +Lsn +LtoR +MALLOC MAMAMIA +MARGO +MASTERELECT +MAXARGS +MAXBQUALSIZE +MAXBTREELEVEL +MAXFIELD +MAXGTRIDSIZE +MAXID +MAXINFOSIZE MAXLOCKS +MAXMMAPSIZE +MAXNR +MAXPATHLEN +MAXSIZE +MAXSIZEONPAGE +MAXTIMEOUT +MAXWRITE +MC MEM +MEMCMP +MEMCPY +MEMMAPPED +MEMMOVE +MEMP +METADIRTY +MFT +MINCACHE +MINFO +MINIT MINLOCKS +MINPAGECACHE MINWRITE +MKS +MLOCK +MMAP MMDDhhmm MNO +MP +MPE +MPFARRAY MPOOL MPOOLFILE +MPOOLFILE's +MPOOLFILEs +MPREG +MPREGs +MSB +MSC +MSEM +MSG +MSGBUF +MSHUTDOWN +MSTR +MSVC +MT +MUNLOCK +MUNMAP +MUTEXes MYDIRECTORY +Makoto +Malloc +MapEntry MapViewOfFile +Margo MarshalledEntity +MarshalledEntityBinding MarshalledKey +MarshalledKeyBinding +MarshalledObject MarshalledTupleData +MarshalledTupleEntry MarshalledTupleKeyEntity +Matsumoto Maxid +Maxkey Mb Mbytes +MemoryException +Mempool +Mersenne Metadata Metapage +MinGW +Minkey +Misc +MixColumn +MoveFile +MoveFileEx +Mp +MpGet +MpInfoDelete +MpStat +MpSync +MpTrickle Mpool MpoolExample +Mpoolfile +Msg +MsgType Mutex +Mv +MyDbs +NB +NBUCKET +NCACHE +NCACHED +NDBM +NDIR +NEEDSPLIT +NEEDSWAP +NEWCLIENT +NEWFILE NEWMASTER NEWSITE NG +NOARCHIVE +NOAUTO +NOBUFFER +NOCOPY NODUP NODUPDATA +NODUPS +NOFILE +NOHEADER +NOKEY +NOLOCK NOLOCKING +NOMIGRATE NOMMAP NOMORE NOORDERCHK +NOOVERWRITE NOPANIC +NOPROMOTE NORUN NOSERVER +NOSORT NOSYNC +NOTA +NOTEXIST NOTFOUND NOTGRANTED NOTPERM +NOTREACHED +NOTSET +NOTUSED NOTYPE +NOTZERO NOWAIT NP +NRECS +NT +NTFS +NULL'ing +NULLXID +NULLing +NULLs +NULs +NUM +NUMWRITES +NameToInfo +NameToPtr +Ndbm +NdbmOpen +NewInfo +NewStringObj +Nishimura NoP NoqV NqV +Nr NrV NsV +Nuff +NullClassCatalog NullPointerException +NullTransactionRunner +Num +Nxt +OBJ +ODDFILESIZE +OFFDUP +OFFPAGE OLDVERSION +ONC +OOB +OP +OPD +OPENFILES +OPFLAGS +OR'd ORDERCHKONLY +OSF +OSO +OUTFILE +OVFL +Obj +ObjectInputStream +ObjectOutputStream +ObjectStreamClass +Objs Offpage +Ol OpenFileMapping +OpenServer +OperationStatus +Ops +Optimised OutOfMemoryError OutputStream +PAGEDONE +PAGEINFO +PAGEINFOs +PAGELIST +PAGEs +PANIC'd +PARAMS +PARENT's +PBNYC +PG +PGDEF +PGINFO PGNO +PGSIZE +PHP PID PKG +PLIST +POPENFILES +POSTDESTROY +POSTLOG +POSTLOGMETA +POSTOPEN +POSTSYNC +PPC +PR +PREAD +PREPLIST PREV +PRI +PRINTFOOTER +PRINTHEADER +PROT +PSIZE +PSTAT +PTHREAD +PWRITE +PaRisc +Pagesize +Pagesizes +Params +Part's +PartBinding +PartData PartKey +PartKeyBinding PartValue +Paulo +Perl +Pg +PgInit +PgIsset +Pgin Pgno +Phong +PlatformSDK +Posix +PowerPC +PreparedTransaction +Prev +PrimaryKeyAssigner +Proc +Pthread +PtrToInfo +QAM +QAMDATA +QAMMAGIC +QAMMETA +QAMOLDVER +QAMVERSION +QMETA +QNX +QPAGE QUOTESERVER +QueueStats +RB +RBBASE +RBCOMMFAIL +RBDEADLOCK +RBEND +RBINTEGRITY +RBOTHER +RBPROTO +RBROLLBACK +RBTIMEOUT +RBTRANSIENT +RCLOSE +RDONLY +RDWRMASTER +READONLY +REALLOC +REALLOC'ed +REC +RECLEN RECNO RECNOSYNC RECNUM +RECORDCOUNT +RECOVERYTEST +REGENV +REGINFO +REGIONs +REGMAINT +RELEN +RELIANTUNIX +RENAMEMAGIC +REPLOCKED +REPQUOTE +REPVERSION REQ +REQs +REVERSECONCAT +REVERSEDATA +REVSPLITOFF +RIJNDAEL RINTERNAL +RIW +RLOCK +RM +RMERR +RMFAIL +RMID +RMNAMESZ RMW +RMs +ROP RPC +RPCCLIENT +RPCExample +RPCGEN +RPRINT RT +RTTarget +RUNLOG RUNRECOVERY +RUSAGE +RandCommand RangeExceeded RangeKeyNotEqual RangeNotFound +Realloc +Rec Recno +Recnos +RecordNumberBinding RecordNumberFormat +RecoveryOperation +Reinit +RepElect RepElectResult +RepFlush +RepLimit RepProcessMessage +RepRequest +RepStart RepStat +ReplicationHandleDeadException +ReplicationStats +ReplicationStatus +ResetResult +ReturnSetup +Rieffel +Rijmen +Rijndael +Roeber +Rp +RpcDb +RpcDbEnv +RpcDbTxn +RpcDbc +RtoL +RunRecoveryException RuntimeException +RuntimeExceptionWrapper +Rusage +SCHED +SCO +SCO's +SEGDATA +SEGID +SEM +SEMA +SEP +SEQ SERVERPROG SERVERVERS +SETALL +SETCURSOR SETFD +SETVAL +SGI SHA +SHALLOC +SHASH +SHMEM +SHMGET +SHQUEUE +SIGALRM SIGPIPE +SIZEOF +SKIPFIRSTKEY +SKU +SNPRINTF +SPL +SPLITOLD +SPRINTF SS +SSLeay +SSZ +STAILQ +STARTUPDONE +STAT +STATS +STCWX +STD +STDC +STDERR +STDINT +STK +STR +STRCASECMP +STRDUP +STRLIST +STROFFSET +STRTOUL +STRUCT +STWCX +SUBDB +SWAPBYTES +SWIG's +SWITCHes +SWR +SYSCONF +SampleDatabase +SampleViews +Schlossnagle +SecondaryDeadlockTest +Sedgewick +Seq +SeqGet +SeqGetFlags +SeqOpen +SequenceExample +SequenceStats +SerialBinding SerialBindingTest +SerialFormat +SerialInput +SerialOutput +SerialSerialBinding +SerialSerialKeyExtractor +SetEndOfFile +SetInfoData +SetListElem +SetListElemInt +SetListElemWideInt +SetListRecnoElem +SetMultiList +SetObjResult +ShipmentBinding +ShipmentByPart +ShipmentBySupplier +ShipmentData ShipmentKey +ShipmentKeyBinding ShipmentValue Shm Signalling +SimpleBuffer +Skiplist +Skodon Sleepycat +Something's +SortedMap +SortedSet +Sparc +Splitp +Stat +Stats +Std +Stdout +Steele +StoredClassCatalog StoredClassCatalogTest +StoredClassCatalogTestInit +StoredCollection +StoredCollections +StoredContainer +StoredEntrySet +StoredIterator +StoredKeySet +StoredList +StoredMap +StoredMapEntry +StoredSortedEntrySet +StoredSortedKeySet +StoredSortedMap +StoredSortedValueSet +StoredValueSet +StringBuffer StringDbt Subdatabase +Subdb +Subname +SunOS +SupplierBinding +SupplierByCity +SupplierData SupplierKey +SupplierKeyBinding SupplierValue +SystemInfo +TAILQ +TCHAR +TCL TDS TESTDIR +TESTTESTEST TESTXADIR +THR TID +TLPUT TM +TMASYNC +TMENDRSCAN +TMER +TMERR +TMFAIL +TMJOIN +TMMIGRATE +TMMULTIPLE +TMNOFLAGGS +TMNOFLAGS +TMNOMIGRATE +TMNOWAIT +TMONEPHASE TMP TMPDIR +TMREGISTER +TMRESUME +TMSTARTRSCAN +TMSUCCESS +TMSUSPEND +TMUSEASYNC +TMs TODO +TOPLEVEL +TPC +TPCB TPS +TRU +TRUNC +TRUNCDATA TXN +TXNAPP +TXNHEAD TXNID +TXNLIST +TXNLOGREC +TXNMGR +TXNREGION +TXNS +TXNVERSION TXNs +Takuji Tcl +Tcl's TempFolder TestAppendRecno TestAssociate +TestCallback +TestClassCatalog +TestClosedDb +TestConstruct +TestDataBinding TestDbtFlags +TestEntity +TestEntityBinding +TestEnv TestGetSetMethods +TestKeyAssigner +TestKeyExtractor TestKeyRange +TestLockVec TestLogc TestOpenEmpty +TestReplication TestRpcServer +TestSameDbt TestSerial +TestSimpleAccess +TestStat +TestStore +TestTruncate +TestUtil TestXAServlet +Thang +Thies +Threshhold +Throwable +TimeUnits +Tmp +Topher TpcbExample +TransactionRunner +TransactionStats TransactionTest TransactionTests +TransactionWorker +Tru Tt +TupleBinding TupleBindingTest +TupleFormat TupleFormatTest +TupleInput +TupleInputBinding +TupleMarshalledBinding TupleOrderingTest +TupleOutput +TupleSerialBinding +TupleSerialDbFactory TupleSerialDbFactoryTest +TupleSerialEntityBinding +TupleSerialKeyExtractor +TupleSerialMarshalledBinding +TupleSerialMarshalledKeyExtractor +TupleTupleBinding +TupleTupleKeyExtractor +TupleTupleMarshalledBinding +TupleTupleMarshalledKeyExtractor Txn +TxnCheckpoint +TxnInfoDelete +TxnRecover TxnStat +TxnTimeout Txnid Txns +UI UID +UINT +ULONG +UMRW UNAVAIL +UNDEF +UNDOC +UNISTD +UNREF +UOC +UPDATEROOT +UPREFIX +USEC USERMEM +UTF +UTFDataFormatException +UTS +UX Unencrypted +Unicode +UnixLib +UnixWare +Unixware UnknownError UnmapViewOfFile +UnsupportedOperationException UtfOps UtfTest +Util +VC +VER VM +VMPAGESIZE +VRFY +VSNPRINTF +VTALLY VX +Var +Vc +VendorDB +Vo +Voter's Vv VvW VvXxZ Vvw Vx VxWorks +WAITSFOR +WAKEME +WATCOM WLInitialContextFactory +WORDLIST +WRITECURSOR +WRITELOCK +WRITEOPEN +WRNOSYNC +WRONLY +WT +WW +WWRITE Waitsfor +WebLogic +WinNT +WriteFile X's XA +XAER XAException XAResource XID +XIDDATASIZE +XOR'd +XP +XPG +XXX Xid XxZ YIELDCPU YY +YYMMDDhhmm ZED +ZF +Zero'd aa aaA aaB aaC aaD aaa +aaaaaa +aaaaab +aaaaac aab aac aad @@ -354,72 +1430,187 @@ abc abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq abcdef abs +absname +abspath +ac +aca +accessor +ack +acplt +activekids +activep +activepips actualData actualKey acurs +ada +addAll +addfamilylocker addpage addr addrem +addrp adh adj +adjindx adlsVv +admin afterop ahr +alfred +alg +algsetup +alignp alldb alloc +alloc'ed +alloc'ing +alloced +allocs alsVv +alsn amx +antoon anum +aparts +apologise +app's appexit appl appname +appnotes apprec +apps +aq archivedir +areDuplicatesAllowed +areDuplicatesOrdered +areKeysRenumbered arg +argp args +argv arr +arraycopy +arraysz +asites +assertEquals +astubble ata +atoi +atol +att +autoCommit autocommit +autoconf +autoconfig autoremove +avg +awk +baaaaa +backoff +badend badgen badkey +badnum bak +barreto +baseClass bb bba +bcopy bcurs +bd bdb +bdbcmds +beginInclusive beginKey +beq berkdb +berkdbcmds berkeley bfname bfree +bh +bharray +bhfree +bhp +bhwrite +bi +bigint bigpages +bigpair +binding's +bitmasks +bk +blbs +blk +blksize +blockDecrypt +blockEncrypt +blocknum +blocksize +bmeta +bmp +bndx +bne bnum +bosselaers bostic +bp bqual br +broot bs +bshift bsize +bsizep bt btcompare btrec btree +btreemeta +btrees buf +bufp +bufs bufsize +buildpartial +builtin +bumpSize +bval bylsn bypage byteLen +byteOffset +byteorder bytesToChars +bytesize +bytesp byteswap byteswapped bytevalue +cEelmNrtVZ +caaaaa +cachep cachesize cadjust callback +callback's +callbk +calloc +callocs callpgin +carray catalogtest +cbts +cc +ccclose +ccnext +ccset ccurs cd +cdata cdb cdel ceVv @@ -428,257 +1619,883 @@ celmNrtVZ celmNtV celmNtVZ cget +cgetchk +ch +char's charLen +charLength +charOffset charkey charset +checkgen +checklocker chgpg +childcursor +childinc +childproof +childput +chk chkpoint chkpt +chkspace chksum +chmod +chongo +ci +cip +cipherInit +cipherInstance +cipherUpdateRounds ckp +ckplsn cksum +cl +classCatalog classID +className +classpath +cleandir +clearIndexKey clearerr +clib +clientData clientrun +clist +clnt +closeevent +closehandle +closeme +cls +cmap +cmd cmdargs +cmds cmp +cmpwi cnt com +comm +compareDuplicates compareproc +comparitors compat +concatdatakey +concatkeydata conf config +const +containsAll +containsKey +containsValue conv +convprintable +copyFrom +copyin +copyleft +copymap +copyout copypage +countElements +countp cp +cpage +cpp +cputchk +cq +cr crdel creat +createflag crypto +cs +csp +ct +ctp +ctp's +ctps +ctx +ctxn curadj +curfile +curinfo +curinval +curlist curlsn +curslen +curslist +cursp +cursq +cxx +cxxproc +cxxthis +cxxutil +dat +dataBinding +dataInput +dataOutput +dataToObject +databuf datalen +datap +datapage +datastr db +dbFileName +dbOpenFlags dbc +dbc's +dbca +dbcb dbcl +dbclear dbclient dbclose +dbcmds +dbcp +dbcursor +dbdata +dbdel dbdemo +dbdp dbe +dbentry dbenv +dbh dbinc +dbinfo +dbinit +dbip +dbj +dbjoin dbkill +dblist +dblistlinks +dblp dbm dbmclose +dbmeta +dbmfp dbminit +dbmp dbname +dbnamep dbobj dbopen dbp +dbp's +dbpp +dbprep +dbps dbq +dbrdonly dbreg dbremove dbrename dbs dbsizes +dbsrv dbt +dbta +dbtb +dbtp dbtruncate +dbts +dbtxn +dbtype +dbuf dbverify dbx +dcursor dd deadfile +deadlocker +deadmap +dec +decrpyting def +defcmp defg +defpfx +defto del +deletable +deleteAction delext delim +delimp +delpg +denom +dereffing +des +deserialize +deserialized +deserializing +dest +detectp dev df dh +diff +difflen dir +directio +dirent +dirf dirfno +dirfree +dirlist dirname +dirp dirs +dirtyRead dist dists +ditem +dl +dlbytes dlen +dlfcn dll +dlopen +dm +dname +dndx +doGet doTimeouts +doWork +dobj docRoot +doevents +donext +doreq +doubleToLongBits +dp +dpagep +dpages +dpair +dpgno +dr +dremoved ds +dsearch dsize +dst +dsync +dtab +dtabsize dup dup'ed dupcompare +duperr dupmaster dupmasters +dupped dups dupset dupsort +duptree +duptype +dwNumberOfProcessors +dx +eax +ebuf +edu efg efh egen eid +eid's +eidp +ek +ele electinit electsend electtally electvote electwait elem +elp +emap +emt +encrpyting encryptaes encryptany +endInclusive endKey endian +endianness +endif +endname +endodata +endofile +endpath +enqueuing +ent entityBinding +entrySet +entryp +enum +enums env +env's +envFlags envcl +envdata +envdp envid +envip +envlock +envp +envpanic +envparent envremove +envrpcserver +envs eof +eor +erlangen +errbuf errcall errfile +errlock errno +errnum +erroring errorret errpfx +errunlock +errx +esat +esp +eval +exactp excl +exe +exid +exnum +expandtab +expr ext extentsize +extentsizep +externalizable +extid +extractIndexKey +fN faq faststat +faultmem +fc +fcchk +fchk fclose +fcn fcntl fcreate fd +fd's +fdatasync +fdm +fdp +fdupcurs +feedback's +ferr ff ffactor +ffactorp fget fh +fhp fid +fids +fileID +fileIDs +filedone +filehandle fileid +fileids +fileinfo +fileinit +filelist +filenamep +filenum fileopen +fileops +fillf +finalcount +findFirst +finddatum +findlastckp +finfo +firstKey firstkey fiv +fixup +fixups +flagN +flagsp +floatToIntBits flushcommit +fmax +fmethod +fn +fname +fnl +fnp +fns +fnum foo fopen +forName +foreignStore +form's format's formatID +fp +fprobe +fptr fput +fq +freakin +free'd +free'ing +freeable freedata +freefamilylocker freelist +freelock +freelocker +freep +fremove freq +friedl +fromInclusive +fromIndex +fromKey +fromMapEntry +fromValue +frotzed +fs fset +fst fstat +fstati fsync +ftruncate ftype +fullhome +fullname func +funcs fv fwd gbytes +gbytesp gc gc'ed +gcc gdb gen +genrand +george getBranchQualifier getBytes +getCity +getClassFormat +getCollection getCurrentKey +getData +getDbEnv +getDbt getDbtString +getDetail +getEnvironment +getErrno +getFlags getFormatId getGlobalTransactionId +getIndex +getInstance +getLock +getMode +getNext +getObj +getObject +getOffset +getOp +getPartialLength +getPartialOffset +getPrimaryKeyFormat +getPrimitiveBinding +getRecordNumber +getSize getString +getTimeout +getUserBufferLength +getValue +getValueFormat +getactive +getboth +getbothc +getckp +getcwd +getdata +getdynamic +getenv +getinfo +getjoin +getlocker +getlong +getname +getnext +getno +getobj +getopt +getpageinfo +getpid getrusage +getstack +getsubopt gettime gettimeofday +gettingstarted gettype +getuid +getulong getval +getzip ghi gid gotkey +gotta groupalloc +gsf +gsp gtrid +guesspgsize +hEvent handle's +handleException +happend +hardcode +hardcoding +hasNext +hasPrevious +hashCode +hashhdr +hashinit +hashmeta +hashp hashproc +hc +hcp hcreate hdestroy hdr +hdrbuf +hdrchk +hdrpages +hdrs +headMap +headSet +header's +headp +heldby +helloworld +hf hijkl +himark +histdbt +hlock +hmac +hmeta +holdl +homep +homeroot +hostaddr hostname +hotcopy +hp +hq href +hs hsearch +htab +htonl +httpd +iX +ia icursor +idbase idletimeout +idleto +idmap +idnum +idp ids +idspace idup +idup'ed +iface +ifdef +iff +ifndef +ihold iitem ik +ilock +ilocks inc incfirst +incomp incr +incursor +indexCursor indexKey +indexKeyData +indexKeyFormat +indexKeyOutput +indexKeys +indexOf +indexViews +indexlist indx +info's +infop +informatik +ini init +init'ing +inited +initialSize +inits +initspin inlen +inline +inmem +inmemdbflags +inmemory +ino +inode +inorder inp +inpitem +inputOctets +inregion insdel int +intBitsToFloat intValue +intel +interp +intial +ints +inttypes +inuse +inventoryDB +inventorydb io +ioinfo +iopsize +iosDevFind +ip +ipcs iread +isAutoCommit +isByteLen +isDirtyReadAllowed +isDirtyReadEnabled +isEmpty +isIndexed +isOrdered +isTransactional +isWriteAllowed +isbad +isbigendian isdeleted +isdone +isdst +isdup +isolder +isopd +ispget +isroot +isspace +istmp +isvalid +isync +itemname +itemnameDB itemorder iter +ith iwr iwrite java +java's javax +jbyte +jc +jenv jhi +jl +jlong +jmsjdbc jndi +journaling +jp +jq +jrpcgen +jta kb kbyte kbytes +keio +key's +keyAssigner keyBinding +keyClass +keyExtractor keyFormat +keyInput +keyInstance +keyLen +keyMaterial keyName +keyOutput +keySet +keybuf keyfirst +keyflag keygroup keygroups keygrp keylast +keynum +keyp keyrange +keystr +kgnum +ki +killid killinterval killiteration killtest klNpP klNprRV klNprRs +klinks +kow +kp +kpv krinsky +ks +kuleuven lM lP +lSN lang +last's +lastError +lastIndexOf +lastKey +lastfile lastid +lastpgno +later's +lbtree +lbucket +lc ld +ldata +ldbp +ldbt +ldbtsize +ldcws +ldl +ldstub +le len +lenp les lf +lfhp +lfname lg +lget +lh +lhash +lhi libdb libfile libname +libpthread +libthread +lineno +listIterator +listobj +listp lk +lkrs +ll +lld llsn +llu +lm +ln +lnP +lnsl +loadme localhost localtime +lockForWrite +lockGet +lockVector +lockcount lockdown +locker's +lockerid +lockevent +lockfhp lockid +lockinfo +lockmgr +lockmode +lockobj +lockop +lockreq +lockstep locktimeout +logbuf logc logclean +logdir logfile +logfiles logflush +loggap +logmaxset +logmsg +logrec +logset logsonly +longBitsToDouble lorder +lorderp +lowlsn +lp lpgno +lprint +lput +lrand +lrp +lru lsVv lsn +lsnadd +lsninit +lsnoff +lsnp lsynch lt lu @@ -689,107 +2506,232 @@ luKb luM luMB luMb +lvalue +lwarx +lwp lx mNP mNs machid machtab +maddr +magicno +maintinit maj +majver +makeKey makedup malloc +malloc'd +malloc'ed +malloc's +mallocing +mallocs +mapEntry +mapfile margo +markdone +markneeded +markus +marshalIndexKey +marshalled +marshalling +matumoto +maxRetries +maxb maxcommitperflush maxid maxkey +maxkeypage maxlockers maxlocks +maxlsn +maxn maxnactive maxnlockers maxnlocks maxnobjects maxobjects +maxopenfd maxops +maxp +maxperm +maxpg +maxpgno +maxrec +maxsites maxsize maxtimeout +maxto +maxtxn maxtxns maxwrite +maxwrites +mb +mbp +mbucket mbytes +mbytesp +md mem +membar +memcmp +memcmps +memcpy +memmove memp +memset +metachk metadata metaflags metagroup metalsn metapage metasub +metaswap methodID +mf +mfp +mgrp +midpage +millitm mincommitperflush minkey +minkeyp +minkeypage minlocks +minp +minval +minver minwrite minwrites +mip mis misc mjc mkdir +mkdir's mlock mmap +mmap'd +mmap'ing mmapped mmapsize +mmapsizep mmetalsn mmpgno +modeFlag moremiddle +mortem +movl mp mpf +mpfarray +mpfq mpgno +mpip mpool +mpoolfile +mpools +mpreg +mps +msem +msemaphore msg +msg's +msgadd +msgbuf +msgcall +msgfile +msgfp msgs +msgtype +msize +mswap +mt +mti +munlock munmap +mut mutex mutexes mutexlocks +mutexp +muxfile mv mvptr mydrive mydrivexxx +myfree +mylock +myobjc +myval +n'th nO nP nTV nTt naborts nactive +nalloc +namelistp +nameop +namep +namesp +nargc +nargv nbegins nbytes ncache +ncachep ncaches +ncommit ncommits nconflicts +ncurs +ndary ndata ndbm +ndeadalloc ndeadlocks +ndir ndx needswap nelem +nelemp +nentries nevict newalloc newclient +newdata +newdatabase +newfh newfile newitem +newlist newmaster newname +newopd newpage newpgno newsite newsites +newsize +next's +nextIndex nextdup +nextents +nextinfo nextkey nextlsn nextnodup nextpgno +nfid +nfiles ng nitems nkeys +nlist nlockers nlocks nlocktimeouts @@ -798,308 +2740,947 @@ nmodes nnext nnextlsn nnowaits +noWait +noarchive nobjects nobuffer nodup nodupdata +noet nogrant +nohasham nolock nolocking +nolonger +nomem nommap noop nooverwrite +nop nopanic +nopenp +norep nosort nosync +nosystemmem notdurable notfound notgranted +notused +notzero +novrfy nowait nowaits +np npages npgno +nprocs +nr +nread +nreaders nrec nrecords +nrecs +nreg nreleases +nrepeat nrequests nrestores nsites +nsize +nsl +nsleep +nsleepp ntasks nthreads nthrottles +ntohl +ntxns ntxntimeouts +nuls num numberOfKeysRead numdup +numdups +numext +numlocks +nval +nvotes +nwrite +nwritep +nwriters +nwrites +nwrotep +nxt obj +objc objectArrayToString +objectToData +objectToKey +objectToValue +objp +objs +objv +octets +offdup +offp offpage +offsetp +oflags +ohash ok +oldValue +oldValues olddata olditem oldname +oldrec +oldsize +oli +omniti +omode +ondisk +onefile +onint +onoff +onoffp +onpage op opd +openCursors +openFlags +openfd +openfiles +openhandle opensub opflags opmods ops +optarg +opterr +optind +optopt +optreset +orderchkonly +org orig +originfo +origline +origmap +origp os osynch +outBuffer outbuf +outdatedp +outfd +outfile +outfp outlen +outstr ovfl +ovflok +ovflpage ovflpoint ovflsize +ovput ovref +padDecrypt +padEncrypt +padp pageimage +pageinfo +pagelayout +pagelist pagelsn pageno +pagep +pagereq pagesize +pagesizep pagesizes +pagespace +pagetype +pagezero +pagf pagfno +panic'd panic'ing paniccall panicstate +params parentid +parseLong +partsize passwd +passwds +paulo pct +pdbp +pdf +penv perf perfdb +perftool +perms pflag +pfx pg +pgaddr pgcookie +pgdbp pgdbt +pgerr pget +pgfmt pgfree +pggap pgin +pginfo +pgip +pgmax pgno +pgnoadd +pgnoaddr +pgnop +pgnos pgnum pgout +pgread +pgs +pgset pgsize +pgwrite +ph +php +physdel +physwrite pid +pids +pinref +pinsert +pitem pk pkey +pkey's +pkeys +pkg +placeholder plist +pmap pn +poff +portmapper pos postdestroy postlog postlogmeta postopen postsync +pp +ppc +pr prR +prdb +prdbt +pre +pread prec predestroy preopen +preparse +preplist +preprocess +preprocessed +preread +prereq +presorted prev +prev's +prevfile +previousIndex prevlsn prevnodup +prflags +prfooter prheader pri primaryKey +primaryKeyData +primaryKeyFormat +primaryKeyInput +primaryKeyThang +primget +printf +printlock printlog +priorityp +prnpage proc procs +proff +progname +progpath +protos +prpage +prqueue +prtree +pseudorandom +psize +psplit +pstat +ptail pthread pthreads +ptr +ptrdiff ptype +putAll putall +putchar +putitem putobj +putop +putpageinfo +putr pv +pwrite qV qam +qammeta +qmeta +qmpf +qnx +qp qs qtest +quV +queuestart +quicksort quotedStr rRV rRs rV rand +randtbl +rbtree +rcon rcuradj +rcursor +rcvd +rdata rdbc rdbenv rdonly +rdump +reacquired +reacquires +readBoolean +readByte +readBytes +readChar +readChars +readDouble +readFloat +readInt +readLong +readShort +readString +readUnsignedByte +readUnsignedInt +readUnsignedShort readd +readdir +readn readonly +readratio realloc +realloc'd +realloc'ed +reallocing rec +recfill +reclen reclength recno +recnop +recnos recnum recnums +recops +record's +recordNumber +recordlen recs +rectype +recvd +refcnt refcount +refcounting +reffed +regids +reginfo regionmax +reglocks +regmutex +regmutexes regop regsize +relen relink rem +remevent +remfile +remfirst +remlock +removeAll +remrem renum +renv +rep's +repdb repl +replication's +replpair +replyp +reppg repquote +repsite +reput +reputpair req +resizep +resync +retPrimaryKey +retValue +retbuf +retcopy +retcount +rethrown +reties +retp +retsp +retval +reverseconcat +reversedata revsplitoff rf +rfd +rfp +rget +rheader +ri +rijmen +rijndael +rijndaelDecrypt +rijndaelDecryptRound +rijndaelEncrypt +rijndaelEncryptRound +rijndaelKeySetupDec +rijndaelKeySetupEnc +ritem +riw +rk rkey +rlen rlsn +rlsnp rm rmdir +rmdir's rmid rmw ro +roff +rollforward rootent rootlsn +rp +rp's +rpath rpc +rpcgen rpcid rpcserver +rprint +rptr +rq +rr +rrecno rs +rsearch +rskey rsplit +rtree rtxn rundb runlog +rusage rw rwrw rwrwrw sS sV sVv +salloc +salvager's +savetime +sched scount +sdb +sdbp seckey secon +secondary's secondaryKeyCreate secs +secsp +sectorsize +segdata +segid +sema +semid +seminfo +semun +sendpages sendproc sep seq +seqnum +seqp +serialobj servlet +setAppDispatch +setAppendRecno +setBtreeCompare +setBtreePrefix +setCacheSize +setData +setDuplicatelicateCompare +setEncrypted setErrorHandler +setErrorPrefix +setFeedback +setFeedbackHandler +setFlags +setHash +setLock +setMode +setObj +setObject +setOffset +setOp +setPanicHandler +setPartialLength +setPartialOffset +setRecno +setRecordNumber +setReplicationLimit +setReplicationTransport +setSize +setTimeout +setUserBufferLength +setValue setflags +setid +setlsn +settimeout setto setval +sexing sgenrand sh shalloc +shalloc'ed +shalloced +sharedb +shareenv +shash shm shmat shmctl shmdt shmem shmget +shmname +shortread shownull +shqueue shr +shreg +siginit +signo +sigresend singleKey +sizeAdded +sizeNeeded +sizefix sizeof +sj +skiplist +skiplists +skodonj +sl +sle sleepycat +slh slumber'd +smap +smax +snapshotting +sniglet +snprintf +sortdups +sourcep +sp +sparc +spawnl +spinlock +spinlocks +spinsp splitdata splitmeta +splitp +sprintf srand +srandom +src +sread +ss +sscanf +sse +sshift +ssize +sslll +sss stat +stati stats +stbar +std stddev stderr +stdfd +stdin +stdint +stdlib stdmode stdout +stkgrow +stkrel +stl +storedCollection +storedIterator +storedList +storedMap +storedSet +storedSortedMap +storedSortedSet +stqe +stqh str +strcasecmp strcmp strdup +strdup'ed strerror stringToBytes +stringp strlen +strncasecmp +strncmp +strsep +strtol +strtoul +struct +structs +structure's +sts +stwcx subList +subMap +subSet +subcases +subclassed subdatabase +subdatabase's subdb subdbname +subdbpg +subdbs +subdistribution +subdistributions +submap +subname +subpackages +subtransaction +subtransactions +sullivan sv svc +sw swigCPtr +swpb +sync'd +sync'ed +synced +syncs +sysattach +sysconf +sysdetach +sz +t's tV tVZ +tableent +tablesize +tailMap +tailSet tailq tas +taskLock +tc tcl tcp +td +tdata tearDown +terra testName +testcopy testdata +testdestdir testdigits +testdocopy +tffsp +tfsp +thang +theVendor +thies thr +thread's threadID +threadedness tid +tids tiebreaker +tiebreaking +timeoutp timestamp +timeval +timout +timouts tlen tm +tmap +tmax tmp tmpdir +tmpmap +tmpname tmutex tnum +toArray +toBuf toHexString +toInclusive +toIndex +toKey +toList +toMapEntry +toString +toValue +toched +todo +toobig tp tpcb tput +tqe +tqh +tr +transport's treeorder +tregion +trinomials +trunc +truncdata +ts +tsl +tstart ttpcbddlk ttpcbi ttpcbr ttype +tv tx txn +txnal +txnapp txnarray txnid +txnidcl +txnids +txnip +txnlist +txnp txns txntimeout txt +ua ubell ud +udbt +ufid +ufree uid +uintmax +uintptr +ul ulen +ulens +ulinks +umalloc uncorrect +undef undeleting +undodup +undosplit +uni +unicode unindexed uniq +unistd unix unmap +unmapfile +unmark +unmarshalData +unmarshalled unpinned +unpinning +unref +unregistry upd +updateDatabaseEntry +updateDbt +updateckp +upg upi +urealloc +useCurrentKey +usePrimaryKey +useValue usec usecs +usecsp +usermem usr +usrAppInit util vVxXZ vVxXyZ vZ +va val +value's valueBinding +valueData +valueEntityBinding valueFormat +valueInput +valueInputOutput valueOf +valueOutput var +variadic +vars +vdp +vdp's vec +vendorDB +vendordb ver +verbage vflag +vica +view's vrfy +vrfyutil +vsnprintf +vsprintf +vtruncate vw vx vxmutex vxtmp +vxtpcb +vxworks +wDay +wHour +wMinute +wMonth +wSecond +wYear +waitl +waitlist waitsfor walkdupint walkpages +walkqueue wb wc wcount weblogic +weblogic's +webquill +windsh +winnt +wmask +wnt wordlist +workcurs +writeAllowed +writeBoolean +writeByte +writeBytes +writeChar +writeChars +writeCursor +writeDouble +writeFloat +writeInt +writeLong +writeShort +writeString +writeUnsignedByte +writeUnsignedInt +writeUnsignedShort writeable +writeback +writelock +writelocks +wrlock wrnosync +wsize wt +wthread xa +xact +xalinks +xchg +xchgb +xdr xid +xids xml +xor +xorl xxx xyz yieldcpu zend +zero'd +zeroeth +zerofill +zipcode diff --git a/db/test/scr011/chk.tags b/db/test/scr011/chk.tags index 65e9288ea..f1d680ac7 100644 --- a/db/test/scr011/chk.tags +++ b/db/test/scr011/chk.tags @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.tags,v 1.12 2003/02/27 19:44:39 bostic Exp $ +# $Id: chk.tags,v 1.14 2004/10/07 20:30:32 bostic Exp $ # # Check to make sure we don't need any more symbolic links to tags files. @@ -20,12 +20,15 @@ t2=__2 -e '/^CVS$/d' \ -e '/^build_vxworks$/d' \ -e '/^build_win32$/d' \ + -e '/^build_win64$/d' \ -e '/^docs$/d' \ -e '/^docs_book$/d' \ -e '/^docs_src$/d' \ - -e '/^java$/d' \ -e '/^examples_java$/d' \ + -e '/^java$/d' \ + -e '/^mod_db4$/d' \ -e '/^perl$/d' \ + -e '/^php_db4$/d' \ -e '/^test$/d' \ -e '/^test_cxx$/d' \ -e '/^test_purify$/d' \ diff --git a/db/test/scr012/chk.vx_code b/db/test/scr012/chk.vx_code index 8d7ca608f..8b7916053 100644 --- a/db/test/scr012/chk.vx_code +++ b/db/test/scr012/chk.vx_code @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.vx_code,v 1.6 2002/03/27 20:20:25 bostic Exp $ +# $Id: chk.vx_code,v 1.7 2004/09/16 17:21:11 bostic Exp $ # # Check to make sure the auto-generated utility code in the VxWorks build # directory compiles. @@ -18,6 +18,9 @@ d=../.. rm -f t.c t1.c t2.c +F="$d/clib/getopt.c $d/common/util_arg.c $d/common/util_cache.c + $d/common/util_log.c $d/common/util_sig.c $d/*/*_autop.c" + header() { echo "int" @@ -34,12 +37,7 @@ for i in db_archive db_checkpoint db_deadlock db_dump db_load \ db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do echo " compiling build_vxworks/$i" (cat $d/build_vxworks/$i/$i.c; header $i) > t.c - if cc -Wall -I.. -I$d t.c \ - $d/clib/getopt.c \ - $d/common/util_arg.c \ - $d/common/util_cache.c \ - $d/common/util_log.c \ - $d/common/util_sig.c ../libdb.a -o t; then + if cc -Wall -I.. -I$d t.c $F ../libdb.a -o t; then : else echo "FAIL: unable to compile $i" @@ -53,12 +51,7 @@ done (cat t2.c t1.c; echo "return (0); }") > t.c echo " compiling build_vxworks utility composite" -if cc -Dlint -Wall -I.. -I$d t.c \ - $d/clib/getopt.c \ - $d/common/util_arg.c \ - $d/common/util_cache.c \ - $d/common/util_log.c \ - $d/common/util_sig.c ../libdb.a -o t; then +if cc -Dlint -Wall -I.. -I$d t.c $F ../libdb.a -o t; then : else echo "FAIL: unable to compile utility composite" diff --git a/db/test/scr013/chk.stats b/db/test/scr013/chk.stats index 71894930b..0b320757b 100644 --- a/db/test/scr013/chk.stats +++ b/db/test/scr013/chk.stats @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.stats,v 1.7 2003/11/21 02:31:41 bostic Exp $ +# $Id: chk.stats,v 1.8 2004/09/28 18:29:59 bostic Exp $ # # Check to make sure all of the stat structure members are included in # all of the possible formats. @@ -58,20 +58,23 @@ inc() done } -inc "__db_bt_stat" \ - "$d/tcl/tcl_db.c $d/db_stat/db_stat.c $docs/db/db_stat.so" -inc "__db_h_stat" \ - "$d/tcl/tcl_db.c $d/db_stat/db_stat.c $docs/db/db_stat.so" -inc "__db_qam_stat" \ - "$d/tcl/tcl_db.c $d/db_stat/db_stat.c $docs/db/db_stat.so" +inc "__db_bt_stat" "$d/tcl/tcl_db.c $d/btree/bt_stat.c $docs/db/db_stat.so" +inc "__db_h_stat" "$d/tcl/tcl_db.c $d/hash/hash_stat.c $docs/db/db_stat.so" inc __db_lock_stat \ - "$d/tcl/tcl_lock.c $d/db_stat/db_stat.c $docs/lock/lock_stat.so" -inc __db_log_stat \ - "$d/tcl/tcl_log.c $d/db_stat/db_stat.c $docs/log/log_stat.so" + "$d/tcl/tcl_lock.c $d/lock/lock_stat.c $docs/lock/lock_stat.so" +inc __db_log_stat "$d/tcl/tcl_log.c $d/log/log_stat.c $docs/log/log_stat.so" +inc __db_mpool_fstat \ + "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/memp/memp_stat.so" inc __db_mpool_stat \ - "$d/tcl/tcl_mp.c $d/db_stat/db_stat.c $docs/memp/memp_stat.so" + "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/memp/memp_stat.so" +inc "__db_qam_stat" \ + "$d/tcl/tcl_db.c $d/qam/qam_stat.c $docs/db/db_stat.so" +inc __db_rep_stat \ + "$d/tcl/tcl_rep.c $d/rep/rep_stat.c $docs/rep/rep_stat.so" +inc __db_seq_stat \ + "$d/tcl/tcl_seq.c $d/sequence/seq_stat.c $docs/seq/seq_stat.so" inc __db_txn_stat \ - "$d/tcl/tcl_txn.c $d/db_stat/db_stat.c $docs/txn/txn_stat.so" + "$d/tcl/tcl_txn.c $d/txn/txn_stat.c $docs/txn/txn_stat.so" # Check to make sure the elements from a man page appears in db.in. man() @@ -90,26 +93,33 @@ man() done } -sed -e '/m4_stat(/!d' \ - -e 's/.*m4_stat(\([^)]*\)).*/\1/' < $docs/db/db_stat.so > $t +sed -e '/m4_field(/!d' \ + -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/db/db_stat.so > $t man "checking db_stat.so against db.h" -sed -e '/m4_stat(/!d' \ - -e 's/.*m4_stat(\([^)]*\)).*/\1/' \ - -e 's/.* //' < $docs/lock/lock_stat.so > $t +sed -e '/m4_field(/!d' \ + -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/lock/lock_stat.so > $t man "checking lock_stat.so against db.h" -sed -e '/m4_stat[12](/!d' \ - -e 's/.*m4_stat[12](\([^)]*\)).*/\1/' < $docs/log/log_stat.so > $t +sed -e '/m4_field(/!d' \ + -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/log/log_stat.so > $t man "checking log_stat.so against db.h" -sed -e '/m4_stat[123](/!d' \ - -e 's/.*m4_stat[123](\([^)]*\)).*/\1/' < $docs/memp/memp_stat.so > $t +sed -e '/m4_field(/!d' \ + -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/memp/memp_stat.so > $t man "checking memp_stat.so against db.h" -sed -e '/m4_stat(/!d' \ - -e 's/.*m4_stat(.*, \([^)]*\)).*/\1/' \ - -e 's/__[LR]B__//g' < $docs/txn/txn_stat.so > $t +sed -e '/m4_field(/!d' \ + -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/rep/rep_stat.so > $t +man "checking rep_stat.so against db.h" + +sed -e '/m4_field(/!d' \ + -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/seq/seq_stat.so > $t +man "checking seq_stat.so against db.h" + +sed -e '/m4_field(/!d' \ + -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' \ + -e 's/__LB__.*//' < $docs/txn/txn_stat.so > $t man "checking txn_stat.so against db.h" exit $exitv diff --git a/db/test/scr015/TestConstruct01.cpp b/db/test/scr015/TestConstruct01.cpp index 17bebf5f1..0b0495ce9 100644 --- a/db/test/scr015/TestConstruct01.cpp +++ b/db/test/scr015/TestConstruct01.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestConstruct01.cpp,v 1.7 2003/01/11 16:09:52 dda Exp $ + * $Id: TestConstruct01.cpp,v 1.8 2004/01/28 03:36:33 bostic Exp $ */ /* diff --git a/db/test/scr015/TestGetSetMethods.cpp b/db/test/scr015/TestGetSetMethods.cpp index a38592522..1d896766d 100644 --- a/db/test/scr015/TestGetSetMethods.cpp +++ b/db/test/scr015/TestGetSetMethods.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestGetSetMethods.cpp,v 1.5 2003/01/08 05:54:20 bostic Exp $ + * $Id: TestGetSetMethods.cpp,v 1.6 2004/01/28 03:36:33 bostic Exp $ */ /* diff --git a/db/test/scr015/TestKeyRange.cpp b/db/test/scr015/TestKeyRange.cpp index c0684340f..d875cb20b 100644 --- a/db/test/scr015/TestKeyRange.cpp +++ b/db/test/scr015/TestKeyRange.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestKeyRange.cpp,v 1.5 2003/01/08 05:54:20 bostic Exp $ + * $Id: TestKeyRange.cpp,v 1.6 2004/01/28 03:36:33 bostic Exp $ */ /* diff --git a/db/test/scr015/TestLogc.cpp b/db/test/scr015/TestLogc.cpp index a609443e0..636db4530 100644 --- a/db/test/scr015/TestLogc.cpp +++ b/db/test/scr015/TestLogc.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestLogc.cpp,v 1.7 2003/01/08 05:54:20 bostic Exp $ + * $Id: TestLogc.cpp,v 1.8 2004/01/28 03:36:33 bostic Exp $ */ /* diff --git a/db/test/scr015/TestSimpleAccess.cpp b/db/test/scr015/TestSimpleAccess.cpp index 9c4693391..8415cda78 100644 --- a/db/test/scr015/TestSimpleAccess.cpp +++ b/db/test/scr015/TestSimpleAccess.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestSimpleAccess.cpp,v 1.6 2003/01/08 05:54:21 bostic Exp $ + * $Id: TestSimpleAccess.cpp,v 1.7 2004/01/28 03:36:33 bostic Exp $ */ /* diff --git a/db/test/scr015/TestTruncate.cpp b/db/test/scr015/TestTruncate.cpp index e88f30a95..54ecf81c8 100644 --- a/db/test/scr015/TestTruncate.cpp +++ b/db/test/scr015/TestTruncate.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestTruncate.cpp,v 1.6 2003/01/08 05:54:21 bostic Exp $ + * $Id: TestTruncate.cpp,v 1.7 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr015/chk.cxxtests b/db/test/scr015/chk.cxxtests index 1534fe932..3d1e3947c 100644 --- a/db/test/scr015/chk.cxxtests +++ b/db/test/scr015/chk.cxxtests @@ -1,15 +1,17 @@ #!/bin/sh - # -# $Id: chk.cxxtests,v 1.7 2003/11/21 02:36:36 bostic Exp $ +# $Id: chk.cxxtests,v 1.8 2004/09/28 19:58:42 mjc Exp $ # # Check to make sure that regression tests for C++ run. TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory # All paths must be relative to a subdirectory of the build directory -LIBS="-L.. -ldb -ldb_cxx" +LIBS="-L.. -ldb_cxx" CXXFLAGS="-I.. -I../../dbinc" +[ `uname` = "Linux" ] && LIBS="$LIBS -lpthread" + # Test must be run from a local build directory, not from a test # directory. cd .. diff --git a/db/test/scr016/CallbackTest.testout b/db/test/scr016/CallbackTest.testout index 68797d4a2..447888285 100644 --- a/db/test/scr016/CallbackTest.testout +++ b/db/test/scr016/CallbackTest.testout @@ -6,13 +6,11 @@ put 6 compare function called key6, key3 put 9 +compare function called + key9, key3 compare function called key9, key6 put 2 -compare function called - key2, key9 -compare function called - key2, key0 compare function called key2, key6 compare function called @@ -34,10 +32,6 @@ compare function called compare function called key8, key6 put 1 -compare function called - key1, key9 -compare function called - key1, key0 compare function called key1, key5 compare function called diff --git a/db/test/scr016/TestAppendRecno.java b/db/test/scr016/TestAppendRecno.java index ebdf562ae..4237b99db 100644 --- a/db/test/scr016/TestAppendRecno.java +++ b/db/test/scr016/TestAppendRecno.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestAppendRecno.java,v 1.5 2003/01/08 05:54:21 bostic Exp $ + * $Id: TestAppendRecno.java,v 1.6 2004/01/28 03:36:34 bostic Exp $ */ package com.sleepycat.test; diff --git a/db/test/scr016/TestAssociate.java b/db/test/scr016/TestAssociate.java index 99c986b11..80451d1e6 100644 --- a/db/test/scr016/TestAssociate.java +++ b/db/test/scr016/TestAssociate.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestAssociate.java,v 1.7 2003/06/18 18:46:48 gburd Exp $ + * $Id: TestAssociate.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ */ package com.sleepycat.test; diff --git a/db/test/scr016/TestCallback.java b/db/test/scr016/TestCallback.java index e396d5261..b89856cda 100644 --- a/db/test/scr016/TestCallback.java +++ b/db/test/scr016/TestCallback.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestCallback.java,v 1.2 2003/10/24 01:23:19 mjc Exp $ + * $Id: TestCallback.java,v 1.3 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestCallback.testout b/db/test/scr016/TestCallback.testout index 43692818d..9e67a17bd 100644 --- a/db/test/scr016/TestCallback.testout +++ b/db/test/scr016/TestCallback.testout @@ -4,7 +4,6 @@ before compare CALLBACK: error(null, "DB->set_bt_compare: method not permitted after handle's open method") got expected exception: java.lang.IllegalArgumentException: Invalid argument after compare -CALLBACK: error(null, "PANIC_SET") before panic CALLBACK: error(null, "PANIC: fatal region error detected; run recovery") CALLBACK: panic(DbEnv, com.sleepycat.db.DbRunRecoveryException: DB_RUNRECOVERY: Fatal error, run database recovery: DB_RUNRECOVERY: Fatal error, run database recovery) diff --git a/db/test/scr016/TestClosedDb.java b/db/test/scr016/TestClosedDb.java index d94b90fc1..50ccdcc88 100644 --- a/db/test/scr016/TestClosedDb.java +++ b/db/test/scr016/TestClosedDb.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestClosedDb.java,v 1.7 2003/10/24 01:23:19 mjc Exp $ + * $Id: TestClosedDb.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestConstruct01.java b/db/test/scr016/TestConstruct01.java index f7623051c..6309409c7 100644 --- a/db/test/scr016/TestConstruct01.java +++ b/db/test/scr016/TestConstruct01.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestConstruct01.java,v 1.9 2003/01/27 03:38:29 mjc Exp $ + * $Id: TestConstruct01.java,v 1.10 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestConstruct02.java b/db/test/scr016/TestConstruct02.java index 467d6322e..2e55cfc6f 100644 --- a/db/test/scr016/TestConstruct02.java +++ b/db/test/scr016/TestConstruct02.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestConstruct02.java,v 1.7 2003/01/08 05:54:25 bostic Exp $ + * $Id: TestConstruct02.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestDbtFlags.java b/db/test/scr016/TestDbtFlags.java index 22a592f57..0e2110ad1 100644 --- a/db/test/scr016/TestDbtFlags.java +++ b/db/test/scr016/TestDbtFlags.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestDbtFlags.java,v 1.7 2003/09/04 23:41:20 bostic Exp $ + * $Id: TestDbtFlags.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ */ package com.sleepycat.test; diff --git a/db/test/scr016/TestGetSetMethods.java b/db/test/scr016/TestGetSetMethods.java index 1ac9d5926..06a52c5ad 100644 --- a/db/test/scr016/TestGetSetMethods.java +++ b/db/test/scr016/TestGetSetMethods.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2003 + * Copyright (c) 2000-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestGetSetMethods.java,v 1.6 2003/05/28 08:29:46 mjc Exp $ + * $Id: TestGetSetMethods.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestKeyRange.java b/db/test/scr016/TestKeyRange.java index 43003ab62..57a776b01 100644 --- a/db/test/scr016/TestKeyRange.java +++ b/db/test/scr016/TestKeyRange.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestKeyRange.java,v 1.6 2003/05/06 17:09:43 dda Exp $ + * $Id: TestKeyRange.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestLockVec.java b/db/test/scr016/TestLockVec.java index 4b0abc897..b14a20c68 100644 --- a/db/test/scr016/TestLockVec.java +++ b/db/test/scr016/TestLockVec.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestLockVec.java,v 1.6 2003/05/06 17:09:43 dda Exp $ + * $Id: TestLockVec.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestLogc.java b/db/test/scr016/TestLogc.java index 2914d5e45..8f4068318 100644 --- a/db/test/scr016/TestLogc.java +++ b/db/test/scr016/TestLogc.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestLogc.java,v 1.8 2003/01/08 05:54:26 bostic Exp $ + * $Id: TestLogc.java,v 1.9 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestOpenEmpty.java b/db/test/scr016/TestOpenEmpty.java index e83bff323..ab5fb5a8d 100644 --- a/db/test/scr016/TestOpenEmpty.java +++ b/db/test/scr016/TestOpenEmpty.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestOpenEmpty.java,v 1.7 2003/09/04 23:41:21 bostic Exp $ + * $Id: TestOpenEmpty.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ */ package com.sleepycat.test; diff --git a/db/test/scr016/TestReplication.java b/db/test/scr016/TestReplication.java index 665c0b0c6..e20b9a929 100644 --- a/db/test/scr016/TestReplication.java +++ b/db/test/scr016/TestReplication.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestReplication.java,v 1.6 2003/09/04 23:41:21 bostic Exp $ + * $Id: TestReplication.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestRpcServer.java b/db/test/scr016/TestRpcServer.java index 69ec54fe1..ee040f0cf 100644 --- a/db/test/scr016/TestRpcServer.java +++ b/db/test/scr016/TestRpcServer.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestRpcServer.java,v 1.4 2003/01/08 05:54:28 bostic Exp $ + * $Id: TestRpcServer.java,v 1.5 2004/01/28 03:36:34 bostic Exp $ */ package com.sleepycat.test; diff --git a/db/test/scr016/TestSameDbt.java b/db/test/scr016/TestSameDbt.java index c8f3b55a3..28a46af31 100644 --- a/db/test/scr016/TestSameDbt.java +++ b/db/test/scr016/TestSameDbt.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestSameDbt.java,v 1.5 2003/01/08 05:54:28 bostic Exp $ + * $Id: TestSameDbt.java,v 1.6 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestSimpleAccess.java b/db/test/scr016/TestSimpleAccess.java index f0d7f9384..72886b5d2 100644 --- a/db/test/scr016/TestSimpleAccess.java +++ b/db/test/scr016/TestSimpleAccess.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestSimpleAccess.java,v 1.6 2003/01/08 05:54:29 bostic Exp $ + * $Id: TestSimpleAccess.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestStat.java b/db/test/scr016/TestStat.java index d7436a656..84add875e 100644 --- a/db/test/scr016/TestStat.java +++ b/db/test/scr016/TestStat.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestStat.java,v 1.9 2003/08/07 15:48:03 mjc Exp $ + * $Id: TestStat.java,v 1.10 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestTruncate.java b/db/test/scr016/TestTruncate.java index 6f43d9920..c84f68db4 100644 --- a/db/test/scr016/TestTruncate.java +++ b/db/test/scr016/TestTruncate.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestTruncate.java,v 1.7 2003/05/06 17:09:43 dda Exp $ + * $Id: TestTruncate.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestUtil.java b/db/test/scr016/TestUtil.java index 7f4b9f797..799d326cd 100644 --- a/db/test/scr016/TestUtil.java +++ b/db/test/scr016/TestUtil.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestUtil.java,v 1.4 2003/09/04 23:41:21 bostic Exp $ + * $Id: TestUtil.java,v 1.5 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr016/TestXAServlet.java b/db/test/scr016/TestXAServlet.java index b81d1d1b6..29545b3b9 100644 --- a/db/test/scr016/TestXAServlet.java +++ b/db/test/scr016/TestXAServlet.java @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2003 + * Copyright (c) 1997-2004 * Sleepycat Software. All rights reserved. * - * $Id: TestXAServlet.java,v 1.3 2003/09/04 23:41:21 bostic Exp $ + * $Id: TestXAServlet.java,v 1.4 2004/01/28 03:36:34 bostic Exp $ */ /* diff --git a/db/test/scr019/chk.include b/db/test/scr019/chk.include index 444217bed..edd2bf6b4 100644 --- a/db/test/scr019/chk.include +++ b/db/test/scr019/chk.include @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.include,v 1.3 2002/03/27 04:33:09 bostic Exp $ +# $Id: chk.include,v 1.4 2004/10/07 20:34:39 bostic Exp $ # # Check for inclusion of files already included in db_int.h. @@ -29,8 +29,12 @@ sed -e '/^build/d' \ -e '/^examples_c/d' \ -e '/^libdb_java.*errno.h/d' \ -e '/^libdb_java.*java_util.h/d' \ + -e '/^mod_db4/d' \ + -e '/^mutex\/tm.c/d' \ + -e '/^perl/d' \ + -e '/^php_db4/d' \ -e '/^test_/d' \ - -e '/^mutex\/tm.c/d' > $t2 + > $t2 [ -s $t2 ] && { echo 'FAIL: found extraneous includes in the source' diff --git a/db/test/scr021/chk.flags b/db/test/scr021/chk.flags index f0be09283..fe291b577 100644 --- a/db/test/scr021/chk.flags +++ b/db/test/scr021/chk.flags @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.flags,v 1.10 2003/09/30 19:33:14 bostic Exp $ +# $Id: chk.flags,v 1.11 2004/03/12 14:57:15 bostic Exp $ # # Check flag name-spaces. @@ -25,9 +25,10 @@ echo =============================================== grep 'DB_ENV_' $d/*/*.c | sed -e '/F_.*dbenv,/d' \ - -e '/env_method.c.*outflagsp,/d' \ -e '/DB_ENV_TEST_RECOVERY(.*DB_TEST_/d' \ - -e '/\/libdb_java\//d' > $t1 + -e '/env_method.c.*outflagsp,/d' \ + -e '/\/libdb_java\//d' \ + -e '/{ DB_ENV_/d' > $t1 [ -s $t1 ] && { cat $t1 exit 1 @@ -50,23 +51,27 @@ echo =============================================== } grep 'DB_AM_' $d/*/*.c | -sed -e '/F_.*dbp/d' \ - -e '/F_.*db_rep->rep_db,/d' \ +sed -e '/ DB_AM_RECNUM\./d' \ + -e '/ DB_AM_RECOVER set\./d' \ -e '/"DB->open", dbp->flags, DB_AM_DUP,/d' \ -e '/"DB_NODUPDATA" behavior for databases with/d' \ + -e '/:[ {]*DB_AM_/d' \ + -e '/DB_AM_RECOVER bit in this handle, so that the/d' \ + -e '/F_.*db_rep->rep_db,/d' \ + -e '/F_.*dbp/d' \ -e '/If DB_AM_OPEN_CALLED is not set, then we/d' \ -e '/This was checked in set_flags when DB_AM_ENCRYPT/d' \ -e '/XA_ABORT, we can safely set DB_AM_RECOVER/d' \ - -e '/ DB_AM_RECNUM\./d' \ - -e '/ DB_AM_RECOVER set\./d' \ + -e '/_method.c.*outflagsp,/d' \ + -e '/db_pr.c:.*LF_ISSET(DB_AM_FIXEDLEN)/d' \ + -e '/db_pr.c:.*LF_ISSET(DB_AM_RENUMBER)/d' \ -e '/isdup = dbp->flags & DB_AM_DUP/d' \ -e '/otherwise we simply do/d' \ -e '/pginfo/d' \ - -e '/_method.c.*outflagsp,/d' \ + -e '/qam_method.c:.*LF_ISSET(DB_AM_DISCARD)/d' \ -e '/setting DB_AM_RECOVER, we guarantee that we don/d' \ -e '/the DB_AM_SWAP flag. However, we use/d' \ - -e '/DB_AM_RECOVER bit in this handle, so that the/d' \ - -e '/:[ {]*DB_AM_/d' > $t1 + > $t1 [ -s $t1 ] && { cat $t1 exit 1 diff --git a/db/test/scr022/chk.rr b/db/test/scr022/chk.rr index 3a42c05cc..53d8bb158 100644 --- a/db/test/scr022/chk.rr +++ b/db/test/scr022/chk.rr @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.rr,v 1.3 2003/08/01 17:01:11 bostic Exp $ +# $Id: chk.rr,v 1.5 2004/10/07 20:40:43 bostic Exp $ d=../.. @@ -8,12 +8,16 @@ t1=__1 # Check for DB_RUNRECOVERY being specified instead of a call to db_panic. egrep DB_RUNRECOVERY $d/*/*.c | - sed -e '/common\/db_err.c:/d' \ - -e '/case DB_RUNRECOVERY:/d' \ + sed -e '/__db_panic(.*, DB_RUNRECOVERY)/d' \ + -e '/case DB_RUNRECOVERY:/d' \ -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \ + -e '/db_err.c:/d' \ -e '/os_errno.c:.*evalue == DB_RUNRECOVERY/d' \ + -e '/\/php_db4\//d' \ + -e '/rep_backup.c:.*Panic the env and return DB_RUNRECOVERY/d' \ -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \ - -e '/__db_panic(.*, DB_RUNRECOVERY)/d' > $t1 + -e '/txn.c:.*returned DB_RUNRECOVERY and we need to/d' \ + > $t1 [ -s $t1 ] && { echo "DB_RUNRECOVERY used; should be a call to db_panic." cat $t1 diff --git a/db/test/scr024/Makefile b/db/test/scr024/Makefile index bbd2f1b81..aa5177e12 100644 --- a/db/test/scr024/Makefile +++ b/db/test/scr024/Makefile @@ -1,46 +1,26 @@ TESTCLASSES=\ - ./src/com/sleepycat/bdb/bind/serial/test/MarshalledObject.java\ - ./src/com/sleepycat/bdb/bind/serial/test/NullClassCatalog.java\ - ./src/com/sleepycat/bdb/bind/serial/test/SerialBindingTest.java\ - ./src/com/sleepycat/bdb/bind/serial/test/Suite.java\ - ./src/com/sleepycat/bdb/bind/serial/test/TestClassCatalog.java\ - ./src/com/sleepycat/bdb/bind/test/BindingSpeedTest.java\ - ./src/com/sleepycat/bdb/bind/test/Suite.java\ - ./src/com/sleepycat/bdb/bind/tuple/test/MarshalledObject.java\ - ./src/com/sleepycat/bdb/bind/tuple/test/Suite.java\ - ./src/com/sleepycat/bdb/bind/tuple/test/TupleBindingTest.java\ - ./src/com/sleepycat/bdb/bind/tuple/test/TupleFormatTest.java\ - ./src/com/sleepycat/bdb/bind/tuple/test/TupleOrderingTest.java\ - ./src/com/sleepycat/bdb/serial/test/StoredClassCatalogTest.java\ - ./src/com/sleepycat/bdb/serial/test/Suite.java\ - ./src/com/sleepycat/bdb/serial/test/TestSerial.java\ - ./src/com/sleepycat/bdb/serial/test/TupleSerialDbFactoryTest.java\ - ./src/com/sleepycat/bdb/test/CollectionTest.java\ - ./src/com/sleepycat/bdb/test/DbTestUtil.java\ - ./src/com/sleepycat/bdb/test/ForeignKeyTest.java\ - ./src/com/sleepycat/bdb/test/JoinTest.java\ - ./src/com/sleepycat/bdb/test/KeyRangeTest.java\ - ./src/com/sleepycat/bdb/test/NullTransactionRunner.java\ - ./src/com/sleepycat/bdb/test/Suite.java\ - ./src/com/sleepycat/bdb/test/TestDataBinding.java\ - ./src/com/sleepycat/bdb/test/TestEntity.java\ - ./src/com/sleepycat/bdb/test/TestEntityBinding.java\ - ./src/com/sleepycat/bdb/test/TestEnv.java\ - ./src/com/sleepycat/bdb/test/TestKeyAssigner.java\ - ./src/com/sleepycat/bdb/test/TestKeyExtractor.java\ - ./src/com/sleepycat/bdb/test/TestStore.java\ - ./src/com/sleepycat/bdb/test/TransactionTest.java\ - ./src/com/sleepycat/bdb/util/test/ExceptionWrapperTest.java\ - ./src/com/sleepycat/bdb/util/test/Suite.java\ - ./src/com/sleepycat/bdb/util/test/UtfTest.java + ./src/com/sleepycat/bind/serial/test/*.java\ + ./src/com/sleepycat/bind/test/*.java\ + ./src/com/sleepycat/bind/tuple/test/*.java\ + ./src/com/sleepycat/collections/test/*.java\ + ./src/com/sleepycat/collections/test/serial/*.java\ + ./src/com/sleepycat/util/test/*.java + +TESTSERIALPATH=com/sleepycat/collections/test/serial/TestSerial all: dbtest.jar dbtest.jar: classesdir + # Compile the tests and build the test jar javac -classpath ${DB_JAR}:${REQUIRED_JARS} \ - -d ./classes ${TESTCLASSES} + -d ./classes ${TESTCLASSES} jar cf ./dbtest.jar -C ./classes ./com/sleepycat - jar uf ./dbtest.jar -C ./src ./com/sleepycat/bdb/serial/test/testdata + # Build the original version of TestSerial in the testserial directory + mkdir -p "testserial/${TESTSERIALPATH}" + cp "./src/${TESTSERIALPATH}.java.original" \ + "./testserial/${TESTSERIALPATH}.java" + javac -classpath ${DB_JAR}:${REQUIRED_JARS} \ + -d ./testserial "testserial/${TESTSERIALPATH}.java" classesdir: [ -d ./classes ] || (mkdir ./classes) @@ -48,4 +28,5 @@ classesdir: clean: [ -d ./classes ] && rm -rf ./classes [ -f ./dbtest.jar ] && rm ./dbtest.jar + [ -d ./testserial ] && rm -rf ./testserial diff --git a/db/test/scr024/chk.bdb b/db/test/scr024/chk.bdb index 2fa5cd7cf..9f20059ec 100644 --- a/db/test/scr024/chk.bdb +++ b/db/test/scr024/chk.bdb @@ -1,24 +1,15 @@ #!/bin/sh - # -# $Id: chk.bdb,v 1.7 2003/10/24 01:23:20 mjc Exp $ -# -# Check to make sure the Greybird/bdb Java API pass our tests. - -# The tests are really the combination of two (soon to be three) -# things; 1. the test code, 2. junit, (3. some coverage tool). -# The tests will pass if we are able to run the examples using the -# junit testing tool and get XXX% code coverage (100 would be nice). +# $Id: chk.bdb,v 1.8 2004/04/09 16:34:10 mark Exp $ # +# Run the collections/bind test suite. # NOTES: -# This test requires three JARs not included with the Berkeley DB -# distribution. JUnit (junit.jar), XML APIs (xml-apis.jar) and -# Xerces (xercesImpl.jar). I've been using the 8/31/2002 version -# of JUnit. You can download these JARs from http://jakarta.apache.org/ +# This test requires one JAR not included with the Berkeley DB +# distribution: JUnit (junit.jar) I've been using the 8/31/2002 version +# of JUnit. You can download this JAR from http://jakarta.apache.org/ # # JUNIT_JAR=/Users/gburd/Unix/opt/junit/junit.jar -# XMLAPI_JAR=/Users/gburd/Unix/opt/ant/lib/xml-apis.jar -# XMLIMPL_JAR=/Users/gburd/Unix/opt/ant/lib/xerxesImpl.jar [ "x$JUNIT_JAR" = "x" ] && { echo 'FAIL: unset environment variable JUNIT_JAR for junit.jar.' @@ -30,32 +21,14 @@ exit 1 } -[ "x$XMLAPI_JAR" = "x" ] && { - echo 'FAIL: unset environment variable XMLAPI_JAR for xml-apis.jar.' - exit 1 -} - -[ -f $XMLAPI_JAR ] || { - echo 'FAIL: XMLAPI_JAR not a valid path to the xml-apis.jar.' - exit 1 -} - -[ "x$XMLIMPL_JAR" = "x" ] && { - echo 'FAIL: unset environment variable XMLIMPL_JAR for xerxesImpl.jar.' - exit 1 -} - -[ -f $XMLIMPL_JAR ] || { - echo 'FAIL: XMLIMPL_JAR is not a valid path to the file xerxesImpl.jar.' - exit 1 -} - d=.. -REQUIRED_JARS=$JUNIT_JAR:$XMLAPI_JAR:$XMLIMPL_JAR +REQUIRED_JARS=$JUNIT_JAR DB_JAR=$d/db.jar export DB_JAR export REQUIRED_JARS +# Build the tests. + make clean [ -f ./dbtest.jar ] || (make dbtest.jar) || { @@ -63,11 +36,29 @@ make clean exit 1 } -if java -cp $REQUIRED_JARS:$DB_JAR:./dbtest.jar com.sleepycat.bdb.test.Suite ; then - : +# Perform initialization needed before StoredClassCatalogTest is run in +# the tests below. The testserial directory must be first in the classpath. + +c="com.sleepycat.collections.test.serial.StoredClassCatalogTestInit" +echo "Running: $c" +if java -cp testserial:$REQUIRED_JARS:$DB_JAR:./dbtest.jar $c ; then + : else - echo "FAIL: test program failed" - exit 1 + echo "FAIL: test program failed" + exit 1 fi +# Run the tests. + +for f in `find classes -name "*Test.class"`; do + c=`echo "$f" | sed -e 's/classes\///' -e 's/\.class//' -e 's/\//./g'` + echo "Running: $c" + if java -cp $REQUIRED_JARS:$DB_JAR:./dbtest.jar $c ; then + : + else + echo "FAIL: test program failed" + exit 1 + fi +done + exit 0 diff --git a/db/test/scr024/coverage/build.xml b/db/test/scr024/coverage/build.xml index 1975f8364..efbc6abc6 100644 --- a/db/test/scr024/coverage/build.xml +++ b/db/test/scr024/coverage/build.xml @@ -1,14 +1,17 @@ - + + + @@ -35,31 +38,55 @@ - + + - + + + + + + + + - - - - - + + + + + + + + + + - - - + + + + + - @@ -88,7 +115,7 @@ + classname="com.sleepycat.examples.collections.hello.HelloDatabaseWorld"/> @@ -96,7 +123,18 @@ + classname="com.sleepycat.examples.collections.ship.${param_name}.Sample"/> + + + + + + + diff --git a/db/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java b/db/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java new file mode 100644 index 000000000..bbec97630 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java @@ -0,0 +1,128 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MarshalledObject.java,v 1.2 2004/06/04 18:26:00 mark Exp $ + */ + +package com.sleepycat.bind.serial.test; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * @author Mark Hayes + */ +public class MarshalledObject + implements Serializable, MarshalledTupleKeyEntity { + + private String data; + private transient String primaryKey; + private String indexKey1; + private String indexKey2; + + public MarshalledObject(String data, String primaryKey, + String indexKey1, String indexKey2) { + this.data = data; + this.primaryKey = primaryKey; + this.indexKey1 = indexKey1; + this.indexKey2 = indexKey2; + } + + public boolean equals(Object o) { + + try { + MarshalledObject other = (MarshalledObject) o; + + return this.data.equals(other.data) && + this.primaryKey.equals(other.primaryKey) && + this.indexKey1.equals(other.indexKey1) && + this.indexKey2.equals(other.indexKey2); + } catch (Exception e) { + return false; + } + } + + public String getData() { + + return data; + } + + public String getPrimaryKey() { + + return primaryKey; + } + + public String getIndexKey1() { + + return indexKey1; + } + + public String getIndexKey2() { + + return indexKey2; + } + + public int expectedKeyLength() { + + return primaryKey.length() + 1; + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(primaryKey); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + primaryKey = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + keyOutput.writeString(indexKey1); + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey2.length() > 0) { + keyOutput.writeString(indexKey2); + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + indexKey1 = ""; + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey2.length() > 0) { + indexKey2 = ""; + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } +} + diff --git a/db/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java b/db/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java new file mode 100644 index 000000000..f59ab7616 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java @@ -0,0 +1,44 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: NullClassCatalog.java,v 1.4 2004/09/22 18:01:06 bostic Exp $ + */ + +package com.sleepycat.bind.serial.test; + +import java.io.ObjectStreamClass; +import java.math.BigInteger; + +import com.sleepycat.db.DatabaseException; +import com.sleepycat.bind.serial.ClassCatalog; + +/** + * NullCatalog is a dummy Catalog implementation that simply + * returns large (8 byte) class IDs so that ObjectOutput + * can be simulated when computing a serialized size. + * + * @author Mark Hayes + */ +class NullClassCatalog implements ClassCatalog { + + private long id = Long.MAX_VALUE; + + public void close() + throws DatabaseException { + } + + public byte[] getClassID(ObjectStreamClass classFormat) + throws DatabaseException { + + return BigInteger.valueOf(id--).toByteArray(); + } + + public ObjectStreamClass getClassFormat(byte[] classID) + throws DatabaseException, ClassNotFoundException { + + return null; // ObjectInput not supported + } +} diff --git a/db/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java b/db/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java new file mode 100644 index 000000000..67c01cf4d --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java @@ -0,0 +1,207 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: SerialBindingTest.java,v 1.3 2004/06/04 18:26:00 mark Exp $ + */ + +package com.sleepycat.bind.serial.test; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.SerialSerialBinding; +import com.sleepycat.bind.serial.TupleSerialMarshalledBinding; +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.util.ExceptionUnwrapper; + +/** + * @author Mark Hayes + */ +public class SerialBindingTest extends TestCase { + + private ClassCatalog catalog; + private DatabaseEntry buffer; + private DatabaseEntry keyBuffer; + private DatabaseEntry indexKeyBuffer; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(SerialBindingTest.class); + return suite; + } + + public SerialBindingTest(String name) { + + super(name); + } + + public void setUp() { + + DbTestUtil.printTestName("SerialBindingTest." + getName()); + catalog = new TestClassCatalog(); + buffer = new DatabaseEntry(); + keyBuffer = new DatabaseEntry(); + indexKeyBuffer = new DatabaseEntry(); + } + + public void tearDown() { + + /* Ensure that GC can cleanup. */ + catalog = null; + buffer = null; + keyBuffer = null; + indexKeyBuffer = null; + } + + public void runTest() + throws Throwable { + + try { + super.runTest(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } + + private void primitiveBindingTest(Object val) { + + Class cls = val.getClass(); + SerialBinding binding = new SerialBinding(catalog, cls); + + binding.objectToEntry(val, buffer); + assertTrue(buffer.getSize() > 0); + + Object val2 = binding.entryToObject(buffer); + assertSame(cls, val2.getClass()); + assertEquals(val, val2); + + Object valWithWrongCls = (cls == String.class) + ? ((Object) new Integer(0)) : ((Object) new String("")); + try { + binding.objectToEntry(valWithWrongCls, buffer); + } catch (IllegalArgumentException expected) {} + } + + public void testPrimitiveBindings() { + + primitiveBindingTest("abc"); + primitiveBindingTest(new Character('a')); + primitiveBindingTest(new Boolean(true)); + primitiveBindingTest(new Byte((byte) 123)); + primitiveBindingTest(new Short((short) 123)); + primitiveBindingTest(new Integer(123)); + primitiveBindingTest(new Long(123)); + primitiveBindingTest(new Float(123.123)); + primitiveBindingTest(new Double(123.123)); + } + + public void testNullObjects() { + + SerialBinding binding = new SerialBinding(catalog, null); + buffer.setSize(0); + binding.objectToEntry(null, buffer); + assertTrue(buffer.getSize() > 0); + assertEquals(null, binding.entryToObject(buffer)); + } + + public void testSerialSerialBinding() { + + SerialBinding keyBinding = new SerialBinding(catalog, String.class); + SerialBinding valueBinding = new SerialBinding(catalog, String.class); + EntityBinding binding = new MySerialSerialBinding(keyBinding, + valueBinding); + + String val = "key#value?indexKey"; + binding.objectToData(val, buffer); + assertTrue(buffer.getSize() > 0); + binding.objectToKey(val, keyBuffer); + assertTrue(keyBuffer.getSize() > 0); + + Object result = binding.entryToObject(keyBuffer, buffer); + assertEquals(val, result); + } + + // also tests TupleSerialBinding since TupleSerialMarshalledBinding extends + // it + public void testTupleSerialMarshalledBinding() { + + SerialBinding valueBinding = new SerialBinding(catalog, + MarshalledObject.class); + EntityBinding binding = + new TupleSerialMarshalledBinding(valueBinding); + + MarshalledObject val = new MarshalledObject("abc", "primary", + "index1", "index2"); + binding.objectToData(val, buffer); + assertTrue(buffer.getSize() > 0); + binding.objectToKey(val, keyBuffer); + assertEquals(val.expectedKeyLength(), keyBuffer.getSize()); + + Object result = binding.entryToObject(keyBuffer, buffer); + assertTrue(result instanceof MarshalledObject); + val = (MarshalledObject) result; + assertEquals("abc", val.getData()); + assertEquals("primary", val.getPrimaryKey()); + assertEquals("index1", val.getIndexKey1()); + assertEquals("index2", val.getIndexKey2()); + } + + private static class MySerialSerialBinding extends SerialSerialBinding { + + private MySerialSerialBinding(SerialBinding keyBinding, + SerialBinding valueBinding) { + + super(keyBinding, valueBinding); + } + + public Object entryToObject(Object keyInput, Object valueInput) { + + return "" + keyInput + '#' + valueInput; + } + + public Object objectToKey(Object object) { + + String s = (String) object; + int i = s.indexOf('#'); + if (i < 0 || i == s.length() - 1) { + throw new IllegalArgumentException(s); + } else { + return s.substring(0, i); + } + } + + public Object objectToData(Object object) { + + String s = (String) object; + int i = s.indexOf('#'); + if (i < 0 || i == s.length() - 1) { + throw new IllegalArgumentException(s); + } else { + return s.substring(i + 1); + } + } + } +} + diff --git a/db/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java b/db/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java new file mode 100644 index 000000000..9f9f5896a --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java @@ -0,0 +1,60 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestClassCatalog.java,v 1.4 2004/09/15 21:49:47 mark Exp $ + */ + +package com.sleepycat.bind.serial.test; + +import java.io.ObjectStreamClass; +import java.util.HashMap; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.db.DatabaseException; + +/** + * @author Mark Hayes + */ +public class TestClassCatalog implements ClassCatalog { + + private HashMap idToDescMap = new HashMap(); + private HashMap nameToIdMap = new HashMap(); + private int nextId = 1; + + public TestClassCatalog() { + } + + public void close() + throws DatabaseException { + } + + public synchronized byte[] getClassID(ObjectStreamClass desc) + throws DatabaseException { + + String className = desc.getName(); + byte[] id = (byte[]) nameToIdMap.get(className); + if (id == null) { + String strId = String.valueOf(nextId); + id = strId.getBytes(); + nextId += 1; + + idToDescMap.put(strId, desc); + nameToIdMap.put(className, id); + } + return id; + } + + public synchronized ObjectStreamClass getClassFormat(byte[] id) + throws DatabaseException { + + String strId = new String(id); + ObjectStreamClass desc = (ObjectStreamClass) idToDescMap.get(strId); + if (desc == null) { + throw new DatabaseException("classID not found"); + } + return desc; + } +} diff --git a/db/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java b/db/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java new file mode 100644 index 000000000..3f2d86a35 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java @@ -0,0 +1,368 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: BindingSpeedTest.java,v 1.4 2004/08/02 18:53:08 mjc Exp $ + */ + +package com.sleepycat.bind.test; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.io.OutputStreamWriter; +import java.io.Serializable; +import java.io.Writer; + +import javax.xml.parsers.SAXParserFactory; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +import com.sleepycat.bind.serial.SerialInput; +import com.sleepycat.bind.serial.SerialOutput; +import com.sleepycat.bind.serial.test.TestClassCatalog; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.util.FastInputStream; +import com.sleepycat.util.FastOutputStream; + +/** + * @author Mark Hayes + */ +public class BindingSpeedTest extends TestCase { + + static final String JAVA_UNSHARED = "java-unshared".intern(); + static final String JAVA_SHARED = "java-shared".intern(); + static final String JAVA_EXTERNALIZABLE = "java-externalizable".intern(); + static final String XML_SAX = "xml-sax".intern(); + static final String TUPLE = "tuple".intern(); + + static final int RUN_COUNT = 1000; + static final boolean VERBOSE = false; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() { + + TestSuite suite = new TestSuite(); + suite.addTest(new BindingSpeedTest(JAVA_UNSHARED)); + suite.addTest(new BindingSpeedTest(JAVA_SHARED)); + suite.addTest(new BindingSpeedTest(JAVA_EXTERNALIZABLE)); + suite.addTest(new BindingSpeedTest(XML_SAX)); + suite.addTest(new BindingSpeedTest(TUPLE)); + return suite; + } + + private String command; + private FastOutputStream fo; + private TupleOutput to; + private TestClassCatalog jtc; + private byte[] buf; + private XMLReader parser; + + public BindingSpeedTest(String name) { + + super("BindingSpeedTest." + name); + command = name; + } + + public void runTest() + throws Exception { + + DbTestUtil.printTestName(getName()); + + boolean isTuple = false; + boolean isXmlSax = false; + boolean isSerial = false; + boolean isShared = false; + boolean isExternalizable = false; + + if (command == TUPLE) { + isTuple = true; + } else if (command == XML_SAX) { + isXmlSax = true; + } else if (command == JAVA_UNSHARED) { + isSerial = true; + } else if (command == JAVA_SHARED) { + isSerial = true; + isShared = true; + } else if (command == JAVA_EXTERNALIZABLE) { + isSerial = true; + isShared = true; + isExternalizable = true; + } else { + throw new Exception("invalid command: " + command); + } + + // Do initialization + + if (isTuple) { + initTuple(); + } else if (isXmlSax) { + initXmlSax(); + } else if (isSerial) { + if (isShared) { + initSerialShared(); + } else { + initSerialUnshared(); + } + } + + // Prime the Java compiler + + int size = 0; + for (int i = 0; i < RUN_COUNT; i += 1) { + + if (isTuple) { + size = runTuple(); + } else if (isXmlSax) { + size = runXmlSax(); + } else if (isSerial) { + if (isShared) { + if (isExternalizable) { + size = runSerialExternalizable(); + } else { + size = runSerialShared(); + } + } else { + size = runSerialUnshared(); + } + } + } + + // Then run the timing tests + + long startTime = System.currentTimeMillis(); + + for (int i = 0; i < RUN_COUNT; i += 1) { + if (isTuple) { + size = runTuple(); + } else if (isXmlSax) { + size = runXmlSax(); + } else if (isSerial) { + if (isShared) { + if (isExternalizable) { + size = runSerialExternalizable(); + } else { + size = runSerialShared(); + } + } else { + size = runSerialUnshared(); + } + } + } + + long stopTime = System.currentTimeMillis(); + + assertTrue("data size too big", size < 250); + + if (VERBOSE) { + System.out.println(command); + System.out.println("data size: " + size); + System.out.println("run time: " + + ((stopTime - startTime) / (double) RUN_COUNT)); + } + } + + public void tearDown() { + + /* Ensure that GC can cleanup. */ + command = null; + fo = null; + to = null; + jtc = null; + buf = null; + parser = null; + } + + void initSerialUnshared() + throws Exception { + + fo = new FastOutputStream(); + } + + int runSerialUnshared() + throws Exception { + + fo.reset(); + ObjectOutputStream oos = new ObjectOutputStream(fo); + oos.writeObject(new Data()); + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + ObjectInputStream ois = new ObjectInputStream(fi); + ois.readObject(); + return bytes.length; + } + + void initSerialShared() + throws Exception { + + jtc = new TestClassCatalog(); + fo = new FastOutputStream(); + } + + int runSerialShared() + throws Exception { + + fo.reset(); + SerialOutput oos = new SerialOutput(fo, jtc); + oos.writeObject(new Data()); + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + SerialInput ois = new SerialInput(fi, jtc); + ois.readObject(); + return (bytes.length - SerialOutput.getStreamHeader().length); + } + + int runSerialExternalizable() + throws Exception { + + fo.reset(); + SerialOutput oos = new SerialOutput(fo, jtc); + oos.writeObject(new Data2()); + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + SerialInput ois = new SerialInput(fi, jtc); + ois.readObject(); + return (bytes.length - SerialOutput.getStreamHeader().length); + } + + void initTuple() + throws Exception { + + buf = new byte[500]; + to = new TupleOutput(buf); + } + + int runTuple() + throws Exception { + + to.reset(); + new Data().writeTuple(to); + + TupleInput ti = new TupleInput( + to.getBufferBytes(), to.getBufferOffset(), + to.getBufferLength()); + new Data().readTuple(ti); + + return to.getBufferLength(); + } + + void initXmlSax() + throws Exception { + + buf = new byte[500]; + fo = new FastOutputStream(); + SAXParserFactory saxFactory = SAXParserFactory.newInstance(); + saxFactory.setNamespaceAware(true); + parser = saxFactory.newSAXParser().getXMLReader(); + } + + int runXmlSax() + throws Exception { + + fo.reset(); + OutputStreamWriter writer = new OutputStreamWriter(fo); + new Data().writeXmlText(writer); + + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + InputSource input = new InputSource(fi); + parser.parse(input); + + //InputStreamReader reader = new InputStreamReader(fi); + //new Data().readXmlText(??); + + return bytes.length; + } + + static class Data2 extends Data implements Externalizable { + + public Data2() {} + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + field1 = in.readUTF(); + field2 = in.readUTF(); + field3 = in.readInt(); + field4 = in.readInt(); + field5 = in.readUTF(); + } + + public void writeExternal(ObjectOutput out) + throws IOException { + + out.writeUTF(field1); + out.writeUTF(field2); + out.writeInt(field3); + out.writeInt(field4); + out.writeUTF(field5); + } + } + + static class Data implements Serializable { + + String field1 = "field1"; + String field2 = "field2"; + int field3 = 333; + int field4 = 444; + String field5 = "field5"; + + void readTuple(TupleInput _input) { + + field1 = _input.readString(); + field2 = _input.readString(); + field3 = _input.readInt(); + field4 = _input.readInt(); + field5 = _input.readString(); + } + + void writeTuple(TupleOutput _output) { + + _output.writeString(field1); + _output.writeString(field2); + _output.writeInt(field3); + _output.writeInt(field4); + _output.writeString(field5); + } + + void writeXmlText(Writer writer) throws IOException { + + writer.write(""); + writer.write(field1); + writer.write(""); + writer.write(field2); + writer.write(""); + writer.write(String.valueOf(field3)); + writer.write(""); + writer.write(String.valueOf(field4)); + writer.write(""); + writer.write(field5); + writer.write(""); + writer.flush(); + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java b/db/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java new file mode 100644 index 000000000..d62b7b51f --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java @@ -0,0 +1,138 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: MarshalledObject.java,v 1.2 2004/06/04 18:26:00 mark Exp $ + */ + +package com.sleepycat.bind.tuple.test; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * @author Mark Hayes + */ +public class MarshalledObject + implements MarshalledTupleEntry, MarshalledTupleKeyEntity { + + private String data; + private String primaryKey; + private String indexKey1; + private String indexKey2; + + public MarshalledObject() { + } + + MarshalledObject(String data, String primaryKey, + String indexKey1, String indexKey2) { + + this.data = data; + this.primaryKey = primaryKey; + this.indexKey1 = indexKey1; + this.indexKey2 = indexKey2; + } + + String getData() { + + return data; + } + + String getPrimaryKey() { + + return primaryKey; + } + + String getIndexKey1() { + + return indexKey1; + } + + String getIndexKey2() { + + return indexKey2; + } + + int expectedDataLength() { + + return data.length() + 1 + + indexKey1.length() + 1 + + indexKey2.length() + 1; + } + + int expectedKeyLength() { + + return primaryKey.length() + 1; + } + + public void marshalEntry(TupleOutput dataOutput) { + + dataOutput.writeString(data); + dataOutput.writeString(indexKey1); + dataOutput.writeString(indexKey2); + } + + public void unmarshalEntry(TupleInput dataInput) { + + data = dataInput.readString(); + indexKey1 = dataInput.readString(); + indexKey2 = dataInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(primaryKey); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + primaryKey = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + keyOutput.writeString(indexKey1); + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey1.length() > 0) { + keyOutput.writeString(indexKey2); + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + indexKey1 = ""; + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey1.length() > 0) { + indexKey2 = ""; + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } +} + diff --git a/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java b/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java new file mode 100644 index 000000000..675f4a003 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java @@ -0,0 +1,254 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleBindingTest.java,v 1.4 2004/06/29 06:06:19 mark Exp $ + */ + +package com.sleepycat.bind.tuple.test; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.tuple.BooleanBinding; +import com.sleepycat.bind.tuple.ByteBinding; +import com.sleepycat.bind.tuple.CharacterBinding; +import com.sleepycat.bind.tuple.DoubleBinding; +import com.sleepycat.bind.tuple.FloatBinding; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.bind.tuple.ShortBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleInputBinding; +import com.sleepycat.bind.tuple.TupleMarshalledBinding; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.bind.tuple.TupleTupleMarshalledBinding; +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.util.ExceptionUnwrapper; + +/** + * @author Mark Hayes + */ +public class TupleBindingTest extends TestCase { + + private DatabaseEntry buffer; + private DatabaseEntry keyBuffer; + private DatabaseEntry indexKeyBuffer; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(TupleBindingTest.class); + return suite; + } + + public TupleBindingTest(String name) { + + super(name); + } + + public void setUp() { + + DbTestUtil.printTestName("TupleBindingTest." + getName()); + buffer = new DatabaseEntry(); + keyBuffer = new DatabaseEntry(); + indexKeyBuffer = new DatabaseEntry(); + } + + public void tearDown() { + + /* Ensure that GC can cleanup. */ + buffer = null; + keyBuffer = null; + indexKeyBuffer = null; + } + + public void runTest() + throws Throwable { + + try { + super.runTest(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } + + private void primitiveBindingTest(Object val, int byteSize) { + + Class cls = val.getClass(); + EntryBinding binding = TupleBinding.getPrimitiveBinding(cls); + + binding.objectToEntry(val, buffer); + assertEquals(byteSize, buffer.getSize()); + + Object val2 = binding.entryToObject(buffer); + assertSame(cls, val2.getClass()); + assertEquals(val, val2); + + Object valWithWrongCls = (cls == String.class) + ? ((Object) new Integer(0)) : ((Object) new String("")); + try { + binding.objectToEntry(valWithWrongCls, buffer); + } + catch (ClassCastException expected) {} + } + + public void testPrimitiveBindings() { + + primitiveBindingTest("abc", 4); + primitiveBindingTest(new Character('a'), 2); + primitiveBindingTest(new Boolean(true), 1); + primitiveBindingTest(new Byte((byte) 123), 1); + primitiveBindingTest(new Short((short) 123), 2); + primitiveBindingTest(new Integer(123), 4); + primitiveBindingTest(new Long(123), 8); + primitiveBindingTest(new Float(123.123), 4); + primitiveBindingTest(new Double(123.123), 8); + + DatabaseEntry entry = new DatabaseEntry(); + + StringBinding.stringToEntry("abc", entry); + assertEquals(4, entry.getData().length); + assertEquals("abc", StringBinding.entryToString(entry)); + + new StringBinding().objectToEntry("abc", entry); + assertEquals(4, entry.getData().length); + + StringBinding.stringToEntry(null, entry); + assertEquals(2, entry.getData().length); + assertEquals(null, StringBinding.entryToString(entry)); + + new StringBinding().objectToEntry(null, entry); + assertEquals(2, entry.getData().length); + + CharacterBinding.charToEntry('a', entry); + assertEquals(2, entry.getData().length); + assertEquals('a', CharacterBinding.entryToChar(entry)); + + new CharacterBinding().objectToEntry(new Character('a'), entry); + assertEquals(2, entry.getData().length); + + BooleanBinding.booleanToEntry(true, entry); + assertEquals(1, entry.getData().length); + assertEquals(true, BooleanBinding.entryToBoolean(entry)); + + new BooleanBinding().objectToEntry(Boolean.TRUE, entry); + assertEquals(1, entry.getData().length); + + ByteBinding.byteToEntry((byte) 123, entry); + assertEquals(1, entry.getData().length); + assertEquals((byte) 123, ByteBinding.entryToByte(entry)); + + ShortBinding.shortToEntry((short) 123, entry); + assertEquals(2, entry.getData().length); + assertEquals((short) 123, ShortBinding.entryToShort(entry)); + + new ByteBinding().objectToEntry(new Byte((byte) 123), entry); + assertEquals(1, entry.getData().length); + + IntegerBinding.intToEntry(123, entry); + assertEquals(4, entry.getData().length); + assertEquals(123, IntegerBinding.entryToInt(entry)); + + new IntegerBinding().objectToEntry(new Integer(123), entry); + assertEquals(4, entry.getData().length); + + LongBinding.longToEntry(123, entry); + assertEquals(8, entry.getData().length); + assertEquals(123, LongBinding.entryToLong(entry)); + + new LongBinding().objectToEntry(new Long(123), entry); + assertEquals(8, entry.getData().length); + + FloatBinding.floatToEntry((float) 123.123, entry); + assertEquals(4, entry.getData().length); + assertTrue(((float) 123.123) == FloatBinding.entryToFloat(entry)); + + new FloatBinding().objectToEntry(new Float((float) 123.123), entry); + assertEquals(4, entry.getData().length); + + DoubleBinding.doubleToEntry(123.123, entry); + assertEquals(8, entry.getData().length); + assertTrue(123.123 == DoubleBinding.entryToDouble(entry)); + + new DoubleBinding().objectToEntry(new Double(123.123), entry); + assertEquals(8, entry.getData().length); + } + + public void testTupleInputBinding() { + + EntryBinding binding = new TupleInputBinding(); + + TupleOutput out = new TupleOutput(); + out.writeString("abc"); + binding.objectToEntry(new TupleInput(out), buffer); + assertEquals(4, buffer.getSize()); + + Object result = binding.entryToObject(buffer); + assertTrue(result instanceof TupleInput); + TupleInput in = (TupleInput) result; + assertEquals("abc", in.readString()); + assertEquals(0, in.available()); + } + + // also tests TupleBinding since TupleMarshalledBinding extends it + public void testTupleMarshalledBinding() { + + EntryBinding binding = + new TupleMarshalledBinding(MarshalledObject.class); + + MarshalledObject val = new MarshalledObject("abc", "", "", ""); + binding.objectToEntry(val, buffer); + assertEquals(val.expectedDataLength(), buffer.getSize()); + + Object result = binding.entryToObject(buffer); + assertTrue(result instanceof MarshalledObject); + val = (MarshalledObject) result; + assertEquals("abc", val.getData()); + } + + // also tests TupleTupleBinding since TupleTupleMarshalledBinding extends + // it + public void testTupleTupleMarshalledBinding() { + + EntityBinding binding = + new TupleTupleMarshalledBinding(MarshalledObject.class); + + MarshalledObject val = new MarshalledObject("abc", "primary", + "index1", "index2"); + binding.objectToData(val, buffer); + assertEquals(val.expectedDataLength(), buffer.getSize()); + binding.objectToKey(val, keyBuffer); + assertEquals(val.expectedKeyLength(), keyBuffer.getSize()); + + Object result = binding.entryToObject(keyBuffer, buffer); + assertTrue(result instanceof MarshalledObject); + val = (MarshalledObject) result; + assertEquals("abc", val.getData()); + assertEquals("primary", val.getPrimaryKey()); + assertEquals("index1", val.getIndexKey1()); + assertEquals("index2", val.getIndexKey2()); + } +} + diff --git a/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java b/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java new file mode 100644 index 000000000..29ecc9a29 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java @@ -0,0 +1,756 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleFormatTest.java,v 1.3 2004/06/04 18:26:00 mark Exp $ + */ + +package com.sleepycat.bind.tuple.test; + +import java.util.Arrays; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.db.DatabaseEntry; + +/** + * @author Mark Hayes + */ +public class TupleFormatTest extends TestCase { + + private TupleInput in; + private TupleOutput out; + private DatabaseEntry buffer; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(TupleFormatTest.class); + return suite; + } + + public TupleFormatTest(String name) { + + super(name); + } + + public void setUp() { + + DbTestUtil.printTestName("TupleFormatTest." + getName()); + buffer = new DatabaseEntry(); + out = TupleBinding.newOutput(); + } + + public void tearDown() { + + /* Ensure that GC can cleanup. */ + in = null; + out = null; + buffer = null; + } + + private void copyOutputToInput() { + + TupleBinding.outputToEntry(out, buffer); + assertEquals(out.size(), buffer.getSize()); + in = TupleBinding.entryToInput(buffer); + assertEquals(in.available(), buffer.getSize()); + assertEquals(in.getBufferLength(), buffer.getSize()); + } + + private void stringTest(String val) { + + out.reset(); + out.writeString(val); + assertEquals(val.length() + 1, out.size()); // assume 1-byte chars + copyOutputToInput(); + assertEquals(val, in.readString()); + assertEquals(0, in.available()); + } + + public void testString() { + + stringTest(""); + stringTest("a"); + stringTest("abc"); + + out.reset(); + out.writeString("abc"); + out.writeString("defg"); + assertEquals(9, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString()); + assertEquals("defg", in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString("abc"); + out.writeString("defg"); + out.writeString("hijkl"); + assertEquals(15, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString()); + assertEquals("defg", in.readString()); + assertEquals("hijkl", in.readString()); + assertEquals(0, in.available()); + } + + private void fixedStringTest(char[] val) { + + out.reset(); + out.writeString(val); + assertEquals(val.length, out.size()); // assume 1 byte chars + copyOutputToInput(); + char[] val2 = new char[val.length]; + in.readString(val2); + assertTrue(Arrays.equals(val, val2)); + assertEquals(0, in.available()); + in.reset(); + String val3 = in.readString(val.length); + assertTrue(Arrays.equals(val, val3.toCharArray())); + assertEquals(0, in.available()); + } + + public void testFixedString() { + + fixedStringTest(new char[0]); + fixedStringTest(new char[] {'a'}); + fixedStringTest(new char[] {'a', 'b', 'c'}); + + out.reset(); + out.writeString(new char[] {'a', 'b', 'c'}); + out.writeString(new char[] {'d', 'e', 'f', 'g'}); + assertEquals(7, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString(3)); + assertEquals("defg", in.readString(4)); + assertEquals(0, in.available()); + + out.reset(); + out.writeString(new char[] {'a', 'b', 'c'}); + out.writeString(new char[] {'d', 'e', 'f', 'g'}); + out.writeString(new char[] {'h', 'i', 'j', 'k', 'l'}); + assertEquals(12, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString(3)); + assertEquals("defg", in.readString(4)); + assertEquals("hijkl", in.readString(5)); + assertEquals(0, in.available()); + } + + public void testNullString() { + + out.reset(); + out.writeString((String) null); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals(null, in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString((String) null); + out.writeString("x"); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals(null, in.readString()); + assertEquals(2, in.available()); + assertEquals("x", in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString("x"); + out.writeString((String) null); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals("x", in.readString()); + assertEquals(2, in.available()); + assertEquals(null, in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString((String) null); + out.writeInt(123); + assertEquals(6, out.size()); + copyOutputToInput(); + assertEquals(null, in.readString()); + assertEquals(4, in.available()); + assertEquals(123, in.readInt()); + assertEquals(0, in.available()); + + out.reset(); + out.writeInt(123); + out.writeString((String) null); + assertEquals(6, out.size()); + copyOutputToInput(); + assertEquals(123, in.readInt()); + assertEquals(2, in.available()); + assertEquals(null, in.readString()); + assertEquals(0, in.available()); + } + + private void charsTest(char[] val) { + + for (int mode = 0; mode < 2; mode += 1) { + out.reset(); + switch (mode) { + case 0: out.writeChars(val); break; + case 1: out.writeChars(new String(val)); break; + default: throw new IllegalStateException(); + } + assertEquals(val.length * 2, out.size()); + copyOutputToInput(); + char[] val2 = new char[val.length]; + in.readChars(val2); + assertTrue(Arrays.equals(val, val2)); + assertEquals(0, in.available()); + in.reset(); + String val3 = in.readChars(val.length); + assertTrue(Arrays.equals(val, val3.toCharArray())); + assertEquals(0, in.available()); + } + } + + public void testChars() { + + charsTest(new char[0]); + charsTest(new char[] {'a'}); + charsTest(new char[] {'a', 'b', 'c'}); + + out.reset(); + out.writeChars("abc"); + out.writeChars("defg"); + assertEquals(7 * 2, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readChars(3)); + assertEquals("defg", in.readChars(4)); + assertEquals(0, in.available()); + + out.reset(); + out.writeChars("abc"); + out.writeChars("defg"); + out.writeChars("hijkl"); + assertEquals(12 * 2, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readChars(3)); + assertEquals("defg", in.readChars(4)); + assertEquals("hijkl", in.readChars(5)); + assertEquals(0, in.available()); + } + + private void bytesTest(char[] val) { + + char[] valBytes = new char[val.length]; + for (int i = 0; i < val.length; i += 1) + valBytes[i] = (char) (val[i] & 0xFF); + + for (int mode = 0; mode < 2; mode += 1) { + out.reset(); + switch (mode) { + case 0: out.writeBytes(val); break; + case 1: out.writeBytes(new String(val)); break; + default: throw new IllegalStateException(); + } + assertEquals(val.length, out.size()); + copyOutputToInput(); + char[] val2 = new char[val.length]; + in.readBytes(val2); + assertTrue(Arrays.equals(valBytes, val2)); + assertEquals(0, in.available()); + in.reset(); + String val3 = in.readBytes(val.length); + assertTrue(Arrays.equals(valBytes, val3.toCharArray())); + assertEquals(0, in.available()); + } + } + + public void testBytes() { + + bytesTest(new char[0]); + bytesTest(new char[] {'a'}); + bytesTest(new char[] {'a', 'b', 'c'}); + bytesTest(new char[] {0x7F00, 0x7FFF, 0xFF00, 0xFFFF}); + + out.reset(); + out.writeBytes("abc"); + out.writeBytes("defg"); + assertEquals(7, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readBytes(3)); + assertEquals("defg", in.readBytes(4)); + assertEquals(0, in.available()); + + out.reset(); + out.writeBytes("abc"); + out.writeBytes("defg"); + out.writeBytes("hijkl"); + assertEquals(12, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readBytes(3)); + assertEquals("defg", in.readBytes(4)); + assertEquals("hijkl", in.readBytes(5)); + assertEquals(0, in.available()); + } + + private void booleanTest(boolean val) { + + out.reset(); + out.writeBoolean(val); + assertEquals(1, out.size()); + copyOutputToInput(); + assertEquals(val, in.readBoolean()); + assertEquals(0, in.available()); + } + + public void testBoolean() { + + booleanTest(true); + booleanTest(false); + + out.reset(); + out.writeBoolean(true); + out.writeBoolean(false); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals(true, in.readBoolean()); + assertEquals(false, in.readBoolean()); + assertEquals(0, in.available()); + + out.reset(); + out.writeBoolean(true); + out.writeBoolean(false); + out.writeBoolean(true); + assertEquals(3, out.size()); + copyOutputToInput(); + assertEquals(true, in.readBoolean()); + assertEquals(false, in.readBoolean()); + assertEquals(true, in.readBoolean()); + assertEquals(0, in.available()); + } + + private void unsignedByteTest(int val) { + + unsignedByteTest(val, val); + } + + private void unsignedByteTest(int val, int expected) { + + out.reset(); + out.writeUnsignedByte(val); + assertEquals(1, out.size()); + copyOutputToInput(); + assertEquals(expected, in.readUnsignedByte()); + } + + public void testUnsignedByte() { + + unsignedByteTest(0); + unsignedByteTest(1); + unsignedByteTest(254); + unsignedByteTest(255); + unsignedByteTest(256, 0); + unsignedByteTest(-1, 255); + unsignedByteTest(-2, 254); + unsignedByteTest(-255, 1); + + out.reset(); + out.writeUnsignedByte(0); + out.writeUnsignedByte(1); + out.writeUnsignedByte(255); + assertEquals(3, out.size()); + copyOutputToInput(); + assertEquals(0, in.readUnsignedByte()); + assertEquals(1, in.readUnsignedByte()); + assertEquals(255, in.readUnsignedByte()); + assertEquals(0, in.available()); + } + + private void unsignedShortTest(int val) { + + unsignedShortTest(val, val); + } + + private void unsignedShortTest(int val, int expected) { + + out.reset(); + out.writeUnsignedShort(val); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals(expected, in.readUnsignedShort()); + } + + public void testUnsignedShort() { + + unsignedShortTest(0); + unsignedShortTest(1); + unsignedShortTest(255); + unsignedShortTest(256); + unsignedShortTest(257); + unsignedShortTest(Short.MAX_VALUE - 1); + unsignedShortTest(Short.MAX_VALUE); + unsignedShortTest(Short.MAX_VALUE + 1); + unsignedShortTest(0xFFFF - 1); + unsignedShortTest(0xFFFF); + unsignedShortTest(0xFFFF + 1, 0); + unsignedShortTest(0x7FFF0000, 0); + unsignedShortTest(0xFFFF0000, 0); + unsignedShortTest(-1, 0xFFFF); + unsignedShortTest(-2, 0xFFFF - 1); + unsignedShortTest(-0xFFFF, 1); + + out.reset(); + out.writeUnsignedShort(0); + out.writeUnsignedShort(1); + out.writeUnsignedShort(0xFFFF); + assertEquals(6, out.size()); + copyOutputToInput(); + assertEquals(0, in.readUnsignedShort()); + assertEquals(1, in.readUnsignedShort()); + assertEquals(0xFFFF, in.readUnsignedShort()); + assertEquals(0, in.available()); + } + + private void unsignedIntTest(long val) { + + unsignedIntTest(val, val); + } + + private void unsignedIntTest(long val, long expected) { + + out.reset(); + out.writeUnsignedInt(val); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals(expected, in.readUnsignedInt()); + } + + public void testUnsignedInt() { + + unsignedIntTest(0L); + unsignedIntTest(1L); + unsignedIntTest(255L); + unsignedIntTest(256L); + unsignedIntTest(257L); + unsignedIntTest(Short.MAX_VALUE - 1L); + unsignedIntTest(Short.MAX_VALUE); + unsignedIntTest(Short.MAX_VALUE + 1L); + unsignedIntTest(Integer.MAX_VALUE - 1L); + unsignedIntTest(Integer.MAX_VALUE); + unsignedIntTest(Integer.MAX_VALUE + 1L); + unsignedIntTest(0xFFFFFFFFL - 1L); + unsignedIntTest(0xFFFFFFFFL); + unsignedIntTest(0xFFFFFFFFL + 1L, 0L); + unsignedIntTest(0x7FFFFFFF00000000L, 0L); + unsignedIntTest(0xFFFFFFFF00000000L, 0L); + unsignedIntTest(-1, 0xFFFFFFFFL); + unsignedIntTest(-2, 0xFFFFFFFFL - 1L); + unsignedIntTest(-0xFFFFFFFFL, 1L); + + out.reset(); + out.writeUnsignedInt(0L); + out.writeUnsignedInt(1L); + out.writeUnsignedInt(0xFFFFFFFFL); + assertEquals(12, out.size()); + copyOutputToInput(); + assertEquals(0L, in.readUnsignedInt()); + assertEquals(1L, in.readUnsignedInt()); + assertEquals(0xFFFFFFFFL, in.readUnsignedInt()); + assertEquals(0L, in.available()); + } + + private void byteTest(int val) { + + out.reset(); + out.writeByte(val); + assertEquals(1, out.size()); + copyOutputToInput(); + assertEquals((byte) val, in.readByte()); + } + + public void testByte() { + + byteTest(0); + byteTest(1); + byteTest(-1); + byteTest(Byte.MAX_VALUE - 1); + byteTest(Byte.MAX_VALUE); + byteTest(Byte.MAX_VALUE + 1); + byteTest(Byte.MIN_VALUE + 1); + byteTest(Byte.MIN_VALUE); + byteTest(Byte.MIN_VALUE - 1); + byteTest(0x7F); + byteTest(0xFF); + byteTest(0x7FFF); + byteTest(0xFFFF); + byteTest(0x7FFFFFFF); + byteTest(0xFFFFFFFF); + + out.reset(); + out.writeByte(0); + out.writeByte(1); + out.writeByte(-1); + assertEquals(3, out.size()); + copyOutputToInput(); + assertEquals(0, in.readByte()); + assertEquals(1, in.readByte()); + assertEquals(-1, in.readByte()); + assertEquals(0, in.available()); + } + + private void shortTest(int val) { + + out.reset(); + out.writeShort(val); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals((short) val, in.readShort()); + } + + public void testShort() { + + shortTest(0); + shortTest(1); + shortTest(-1); + shortTest(Short.MAX_VALUE - 1); + shortTest(Short.MAX_VALUE); + shortTest(Short.MAX_VALUE + 1); + shortTest(Short.MIN_VALUE + 1); + shortTest(Short.MIN_VALUE); + shortTest(Short.MIN_VALUE - 1); + shortTest(0x7F); + shortTest(0xFF); + shortTest(0x7FFF); + shortTest(0xFFFF); + shortTest(0x7FFFFFFF); + shortTest(0xFFFFFFFF); + + out.reset(); + out.writeShort(0); + out.writeShort(1); + out.writeShort(-1); + assertEquals(3 * 2, out.size()); + copyOutputToInput(); + assertEquals(0, in.readShort()); + assertEquals(1, in.readShort()); + assertEquals(-1, in.readShort()); + assertEquals(0, in.available()); + } + + private void intTest(int val) { + + out.reset(); + out.writeInt(val); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals((int) val, in.readInt()); + } + + public void testInt() { + + intTest(0); + intTest(1); + intTest(-1); + intTest(Integer.MAX_VALUE - 1); + intTest(Integer.MAX_VALUE); + intTest(Integer.MAX_VALUE + 1); + intTest(Integer.MIN_VALUE + 1); + intTest(Integer.MIN_VALUE); + intTest(Integer.MIN_VALUE - 1); + intTest(0x7F); + intTest(0xFF); + intTest(0x7FFF); + intTest(0xFFFF); + intTest(0x7FFFFFFF); + intTest(0xFFFFFFFF); + + out.reset(); + out.writeInt(0); + out.writeInt(1); + out.writeInt(-1); + assertEquals(3 * 4, out.size()); + copyOutputToInput(); + assertEquals(0, in.readInt()); + assertEquals(1, in.readInt()); + assertEquals(-1, in.readInt()); + assertEquals(0, in.available()); + } + + private void longTest(long val) { + + out.reset(); + out.writeLong(val); + assertEquals(8, out.size()); + copyOutputToInput(); + assertEquals((long) val, in.readLong()); + } + + public void testLong() { + + longTest(0); + longTest(1); + longTest(-1); + longTest(Long.MAX_VALUE - 1); + longTest(Long.MAX_VALUE); + longTest(Long.MAX_VALUE + 1); + longTest(Long.MIN_VALUE + 1); + longTest(Long.MIN_VALUE); + longTest(Long.MIN_VALUE - 1); + longTest(0x7F); + longTest(0xFF); + longTest(0x7FFF); + longTest(0xFFFF); + longTest(0x7FFFFFFF); + longTest(0xFFFFFFFF); + longTest(0x7FFFFFFFFFFFFFFFL); + longTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeLong(0); + out.writeLong(1); + out.writeLong(-1); + assertEquals(3 * 8, out.size()); + copyOutputToInput(); + assertEquals(0, in.readLong()); + assertEquals(1, in.readLong()); + assertEquals(-1, in.readLong()); + assertEquals(0, in.available()); + } + + private void floatTest(double val) { + + out.reset(); + out.writeFloat((float) val); + assertEquals(4, out.size()); + copyOutputToInput(); + if (Double.isNaN(val)) { + assertTrue(Float.isNaN(in.readFloat())); + } else { + assertEquals((float) val, in.readFloat(), 0); + } + } + + public void testFloat() { + + floatTest(0); + floatTest(1); + floatTest(-1); + floatTest(1.0); + floatTest(0.1); + floatTest(-1.0); + floatTest(-0.1); + floatTest(Float.NaN); + floatTest(Float.NEGATIVE_INFINITY); + floatTest(Float.POSITIVE_INFINITY); + floatTest(Short.MAX_VALUE); + floatTest(Short.MIN_VALUE); + floatTest(Integer.MAX_VALUE); + floatTest(Integer.MIN_VALUE); + floatTest(Long.MAX_VALUE); + floatTest(Long.MIN_VALUE); + floatTest(Float.MAX_VALUE); + floatTest(Float.MAX_VALUE + 1); + floatTest(Float.MIN_VALUE + 1); + floatTest(Float.MIN_VALUE); + floatTest(Float.MIN_VALUE - 1); + floatTest(0x7F); + floatTest(0xFF); + floatTest(0x7FFF); + floatTest(0xFFFF); + floatTest(0x7FFFFFFF); + floatTest(0xFFFFFFFF); + floatTest(0x7FFFFFFFFFFFFFFFL); + floatTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeFloat(0); + out.writeFloat(1); + out.writeFloat(-1); + assertEquals(3 * 4, out.size()); + copyOutputToInput(); + assertEquals(0, in.readFloat(), 0); + assertEquals(1, in.readFloat(), 0); + assertEquals(-1, in.readFloat(), 0); + assertEquals(0, in.available(), 0); + } + + private void doubleTest(double val) { + + out.reset(); + out.writeDouble((double) val); + assertEquals(8, out.size()); + copyOutputToInput(); + if (Double.isNaN(val)) { + assertTrue(Double.isNaN(in.readDouble())); + } else { + assertEquals((double) val, in.readDouble(), 0); + } + } + + public void testDouble() { + + doubleTest(0); + doubleTest(1); + doubleTest(-1); + doubleTest(1.0); + doubleTest(0.1); + doubleTest(-1.0); + doubleTest(-0.1); + doubleTest(Double.NaN); + doubleTest(Double.NEGATIVE_INFINITY); + doubleTest(Double.POSITIVE_INFINITY); + doubleTest(Short.MAX_VALUE); + doubleTest(Short.MIN_VALUE); + doubleTest(Integer.MAX_VALUE); + doubleTest(Integer.MIN_VALUE); + doubleTest(Long.MAX_VALUE); + doubleTest(Long.MIN_VALUE); + doubleTest(Float.MAX_VALUE); + doubleTest(Float.MIN_VALUE); + doubleTest(Double.MAX_VALUE - 1); + doubleTest(Double.MAX_VALUE); + doubleTest(Double.MAX_VALUE + 1); + doubleTest(Double.MIN_VALUE + 1); + doubleTest(Double.MIN_VALUE); + doubleTest(Double.MIN_VALUE - 1); + doubleTest(0x7F); + doubleTest(0xFF); + doubleTest(0x7FFF); + doubleTest(0xFFFF); + doubleTest(0x7FFFFFFF); + doubleTest(0xFFFFFFFF); + doubleTest(0x7FFFFFFFFFFFFFFFL); + doubleTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeDouble(0); + out.writeDouble(1); + out.writeDouble(-1); + assertEquals(3 * 8, out.size()); + copyOutputToInput(); + assertEquals(0, in.readDouble(), 0); + assertEquals(1, in.readDouble(), 0); + assertEquals(-1, in.readDouble(), 0); + assertEquals(0, in.available(), 0); + } +} + diff --git a/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java b/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java new file mode 100644 index 000000000..5e8961f4c --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java @@ -0,0 +1,357 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TupleOrderingTest.java,v 1.4 2004/09/22 18:01:06 bostic Exp $ + */ + +package com.sleepycat.bind.tuple.test; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.test.DbTestUtil; + +/** + * @author Mark Hayes + */ +public class TupleOrderingTest extends TestCase { + + private TupleOutput out; + private byte[] prevBuf; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(TupleOrderingTest.class); + return suite; + } + + public TupleOrderingTest(String name) { + + super(name); + } + + public void setUp() { + + DbTestUtil.printTestName("TupleOrderingTest." + getName()); + out = TupleBinding.newOutput(); + prevBuf = null; + } + + public void tearDown() { + + /* Ensure that GC can cleanup. */ + out = null; + prevBuf = null; + } + + /** + * Each tuple written must be strictly less than (by comparison of bytes) + * the tuple written just before it. The check() method compares bytes + * just written to those written before the previous call to check(). + */ + private void check() { + + check(-1); + } + + private void check(int dataIndex) { + + byte[] buf = new byte[out.size()]; + System.arraycopy(out.getBufferBytes(), out.getBufferOffset(), + buf, 0, buf.length); + if (prevBuf != null) { + int errOffset = -1; + int len = Math.min(prevBuf.length, buf.length); + boolean areEqual = true; + for (int i = 0; i < len; i += 1) { + int val1 = prevBuf[i] & 0xFF; + int val2 = buf[i] & 0xFF; + if (val1 < val2) { + areEqual = false; + break; + } else if (val1 > val2) { + errOffset = i; + break; + } + } + if (areEqual) { + if (prevBuf.length < buf.length) { + areEqual = false; + } else if (prevBuf.length > buf.length) { + areEqual = false; + errOffset = buf.length + 1; + } + } + if (errOffset != -1 || areEqual) { + StringBuffer msg = new StringBuffer(); + if (errOffset != -1) { + msg.append("Left >= right at byte offset " + errOffset); + } else if (areEqual) { + msg.append("Bytes are equal"); + } else { + throw new IllegalStateException(); + } + msg.append("\nLeft hex bytes: "); + for (int i = 0; i < prevBuf.length; i += 1) { + msg.append(' '); + int val = prevBuf[i] & 0xFF; + if ((val & 0xF0) == 0) { + msg.append('0'); + } + msg.append(Integer.toHexString(val)); + } + msg.append("\nRight hex bytes:"); + for (int i = 0; i < buf.length; i += 1) { + msg.append(' '); + int val = buf[i] & 0xFF; + if ((val & 0xF0) == 0) { + msg.append('0'); + } + msg.append(Integer.toHexString(val)); + } + if (dataIndex >= 0) { + msg.append("\nData index: " + dataIndex); + } + fail(msg.toString()); + } + } + prevBuf = buf; + out.reset(); + } + + private void reset() { + + prevBuf = null; + out.reset(); + } + + public void testString() { + + final String[] DATA = { + "", "a", "ab", "b", "bb", "bba", + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeString(DATA[i]); + check(i); + } + reset(); + out.writeString("a"); + check(); + out.writeString("a"); + out.writeString(""); + check(); + out.writeString("a"); + out.writeString(""); + out.writeString("a"); + check(); + out.writeString("a"); + out.writeString("b"); + check(); + out.writeString("aa"); + check(); + out.writeString("b"); + check(); + } + + public void testFixedString() { + + final char[][] DATA = { + {}, {'a'}, {'a', 'b'}, {'b'}, {'b', 'b'}, {0x7F}, {0xFF}, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeString(DATA[i]); + check(i); + } + } + + public void testChars() { + + final char[][] DATA = { + {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'}, + {0x7F}, {0x7F, 0}, {0xFF}, {0xFF, 0}, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeChars(DATA[i]); + check(i); + } + } + + public void testBytes() { + + final char[][] DATA = { + {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'}, + {0x7F}, {0xFF}, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeBytes(DATA[i]); + check(i); + } + } + + public void testBoolean() { + + final boolean[] DATA = { + false, true + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeBoolean(DATA[i]); + check(i); + } + } + + public void testUnsignedByte() { + + final int[] DATA = { + 0, 1, 0x7F, 0xFF + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeUnsignedByte(DATA[i]); + check(i); + } + } + + public void testUnsignedShort() { + + final int[] DATA = { + 0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeUnsignedShort(DATA[i]); + check(i); + } + } + + public void testUnsignedInt() { + + final long[] DATA = { + 0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF, 0x80000, + 0x7FFFFFFF, 0x80000000, 0xFFFFFFFF + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeUnsignedInt(DATA[i]); + check(i); + } + } + + public void testByte() { + + final byte[] DATA = { + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeByte(DATA[i]); + check(i); + } + } + + public void testShort() { + + final short[] DATA = { + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeShort(DATA[i]); + check(i); + } + } + + public void testInt() { + + final int[] DATA = { + Integer.MIN_VALUE, Integer.MIN_VALUE + 1, + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeInt(DATA[i]); + check(i); + } + } + + public void testLong() { + + final long[] DATA = { + Long.MIN_VALUE, Long.MIN_VALUE + 1, + Integer.MIN_VALUE, Integer.MIN_VALUE + 1, + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + Long.MAX_VALUE - 1, Long.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeLong(DATA[i]); + check(i); + } + } + + public void testFloat() { + + // Only positive floats and doubles are ordered deterministically + + final float[] DATA = { + 0, (float) 0.01, (float) 0.02, (float) 0.99, + 1, (float) 1.01, (float) 1.02, (float) 1.99, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE, + Long.MAX_VALUE / 2, Long.MAX_VALUE, + Float.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeFloat(DATA[i]); + check(i); + } + } + + public void testDouble() { + + // Only positive floats and doubles are ordered deterministically + + final double[] DATA = { + 0, 0.001, 0.002, 0.999, + 1, 1.001, 1.002, 1.999, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + Long.MAX_VALUE / 2, Long.MAX_VALUE, + Float.MAX_VALUE, Double.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeDouble(DATA[i]); + check(i); + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java b/db/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java new file mode 100644 index 000000000..9e2abb853 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java @@ -0,0 +1,441 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: KeyRangeTest.java,v 1.4 2004/09/22 18:01:06 bostic Exp $ + */ + +package com.sleepycat.collections; + +import java.io.File; +import java.util.Arrays; +import java.util.Comparator; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.ByteArrayBinding; +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; +import com.sleepycat.db.OperationStatus; + +/** + * @author Mark Hayes + */ +public class KeyRangeTest extends TestCase { + + private static boolean VERBOSE = false; + + private static final byte FF = (byte) 0xFF; + + private static final byte[][] KEYS = { + /* 0 */ {1}, + /* 1 */ {FF}, + /* 2 */ {FF, 0}, + /* 3 */ {FF, 0x7F}, + /* 4 */ {FF, FF}, + /* 5 */ {FF, FF, 0}, + /* 6 */ {FF, FF, 0x7F}, + /* 7 */ {FF, FF, FF}, + }; + private static byte[][] EXTREME_KEY_BYTES = { + /* 0 */ {0}, + /* 1 */ {FF, FF, FF, FF}, + }; + + private Environment env; + private Database store; + private DataView view; + private DataCursor cursor; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() { + + return new TestSuite(KeyRangeTest.class); + } + + public KeyRangeTest(String name) { + + super(name); + } + + public void setUp() + throws Exception { + + DbTestUtil.printTestName(DbTestUtil.qualifiedTestName(this)); + } + + private void openDb(Comparator comparator) + throws Exception { + + File dir = DbTestUtil.getNewDir(); + ByteArrayBinding dataBinding = new ByteArrayBinding(); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + DbCompat.setInitializeCache(envConfig, true); + env = new Environment(dir, envConfig); + DatabaseConfig dbConfig = new DatabaseConfig(); + DbCompat.setTypeBtree(dbConfig); + dbConfig.setAllowCreate(true); + if (comparator != null) { + DbCompat.setBtreeComparator(dbConfig, comparator); + } + store = DbCompat.openDatabase(env, null, "test.db", null, dbConfig); + view = new DataView(store, dataBinding, dataBinding, null, true, null); + } + + private void closeDb() + throws Exception { + + store.close(); + store = null; + env.close(); + env = null; + } + + public void tearDown() + throws Exception { + + try { + if (store != null) { + store.close(); + } + } catch (Exception e) { + System.out.println("Exception ignored during close: " + e); + } + try { + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Exception ignored during close: " + e); + } + /* Ensure that GC can cleanup. */ + env = null; + store = null; + view = null; + cursor = null; + } + + public void testScan() throws Exception { + openDb(null); + doScan(false); + closeDb(); + } + + public void testScanComparator() throws Exception { + openDb(new ReverseComparator()); + doScan(true); + closeDb(); + } + + private void doScan(boolean reversed) throws Exception { + + byte[][] keys = new byte[KEYS.length][]; + final int end = KEYS.length - 1; + cursor = new DataCursor(view, true); + for (int i = 0; i <= end; i++) { + keys[i] = KEYS[i]; + cursor.put(keys[i], KEYS[i], null, false); + } + cursor.close(); + byte[][] extremeKeys = new byte[EXTREME_KEY_BYTES.length][]; + for (int i = 0; i < extremeKeys.length; i++) { + extremeKeys[i] = EXTREME_KEY_BYTES[i]; + } + + // with empty range + + cursor = new DataCursor(view, false); + expectRange(KEYS, 0, end, reversed); + cursor.close(); + + // begin key only, inclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, keys[i], true, null, false, reversed); + expectRange(KEYS, i, end, reversed); + cursor.close(); + } + + // begin key only, exclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, keys[i], false, null, false, reversed); + expectRange(KEYS, i + 1, end, reversed); + cursor.close(); + } + + // end key only, inclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, null, false, keys[i], true, reversed); + expectRange(KEYS, 0, i, reversed); + cursor.close(); + } + + // end key only, exclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, null, false, keys[i], false, reversed); + expectRange(KEYS, 0, i - 1, reversed); + cursor.close(); + } + + // begin and end keys, inclusive and exclusive + + for (int i = 0; i <= end; i++) { + for (int j = i; j <= end; j++) { + // begin inclusive, end inclusive + + cursor = newCursor(view, keys[i], true, keys[j], + true, reversed); + expectRange(KEYS, i, j, reversed); + cursor.close(); + + // begin inclusive, end exclusive + + cursor = newCursor(view, keys[i], true, keys[j], + false, reversed); + expectRange(KEYS, i, j - 1, reversed); + cursor.close(); + + // begin exclusive, end inclusive + + cursor = newCursor(view, keys[i], false, keys[j], + true, reversed); + expectRange(KEYS, i + 1, j, reversed); + cursor.close(); + + // begin exclusive, end exclusive + + cursor = newCursor(view, keys[i], false, keys[j], + false, reversed); + expectRange(KEYS, i + 1, j - 1, reversed); + cursor.close(); + } + } + + // single key range + + for (int i = 0; i <= end; i++) { + cursor = new DataCursor(view, false, keys[i]); + expectRange(KEYS, i, i, reversed); + cursor.close(); + } + + // start with lower extreme (before any existing key) + + cursor = newCursor(view, extremeKeys[0], true, null, false, reversed); + expectRange(KEYS, 0, end, reversed); + cursor.close(); + + // start with higher extreme (after any existing key) + + cursor = newCursor(view, null, false, extremeKeys[1], true, reversed); + expectRange(KEYS, 0, end, reversed); + cursor.close(); + } + + private DataCursor newCursor(DataView view, + Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive, + boolean reversed) + throws Exception { + + if (reversed) { + return new DataCursor(view, false, + endKey, endInclusive, + beginKey, beginInclusive); + } else { + return new DataCursor(view, false, + beginKey, beginInclusive, + endKey, endInclusive); + } + } + + private void expectRange(byte[][] bytes, int first, int last, + boolean reversed) + throws DatabaseException { + + int i; + boolean init; + for (init = true, i = first;; i++, init = false) { + if (checkRange(bytes, first, last, i <= last, + reversed, !reversed, init, i)) { + break; + } + } + for (init = true, i = last;; i--, init = false) { + if (checkRange(bytes, first, last, i >= first, + reversed, reversed, init, i)) { + break; + } + } + } + + private boolean checkRange(byte[][] bytes, int first, int last, + boolean inRange, boolean reversed, + boolean forward, boolean init, + int i) + throws DatabaseException { + + OperationStatus s; + if (forward) { + if (init) { + s = cursor.getFirst(false); + } else { + s = cursor.getNext(false); + } + } else { + if (init) { + s = cursor.getLast(false); + } else { + s = cursor.getPrev(false); + } + } + + String msg = " " + (forward ? "next" : "prev") + " i=" + i + + " first=" + first + " last=" + last + + (reversed ? " reversed" : " not reversed"); + + // check that moving past ends doesn't move the cursor + if (s == OperationStatus.SUCCESS && i == first) { + OperationStatus s2 = reversed ? cursor.getNext(false) + : cursor.getPrev(false); + assertEquals(msg, OperationStatus.NOTFOUND, s2); + } + if (s == OperationStatus.SUCCESS && i == last) { + OperationStatus s2 = reversed ? cursor.getPrev(false) + : cursor.getNext(false); + assertEquals(msg, OperationStatus.NOTFOUND, s2); + } + + byte[] val = (s == OperationStatus.SUCCESS) + ? ((byte[]) cursor.getCurrentValue()) + : null; + + if (inRange) { + assertNotNull("RangeNotFound" + msg, val); + + if (!Arrays.equals(val, bytes[i])){ + printBytes(val); + printBytes(bytes[i]); + fail("RangeKeyNotEqual" + msg); + } + if (VERBOSE) { + System.out.println("GotRange" + msg); + } + return false; + } else { + assertEquals("RangeExceeded" + msg, OperationStatus.NOTFOUND, s); + return true; + } + } + + private void printBytes(byte[] bytes) { + + for (int i = 0; i < bytes.length; i += 1) { + System.out.print(Integer.toHexString(bytes[i] & 0xFF)); + System.out.print(' '); + } + System.out.println(); + } + + public void testSubRanges() { + + DatabaseEntry begin = new DatabaseEntry(); + DatabaseEntry begin2 = new DatabaseEntry(); + DatabaseEntry end = new DatabaseEntry(); + DatabaseEntry end2 = new DatabaseEntry(); + KeyRange range = new KeyRange(null); + KeyRange range2; + + /* Base range [1, 2] */ + begin.setData(new byte[] { 1 }); + end.setData(new byte[] { 2 }); + range = range.subRange(begin, true, end, true); + + /* Subrange (0, 1] is invalid **. */ + begin2.setData(new byte[] { 0 }); + end2.setData(new byte[] { 1 }); + try { + range2 = range.subRange(begin2, false, end2, true); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange [1, 3) is invalid. */ + begin2.setData(new byte[] { 1 }); + end2.setData(new byte[] { 3 }); + try { + range2 = range.subRange(begin2, true, end2, false); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange [2, 2] is valid. */ + begin2.setData(new byte[] { 2 }); + end2.setData(new byte[] { 2 }); + range2 = range.subRange(begin2, true, end2, true); + + /* Subrange [0, 1] is invalid. */ + begin2.setData(new byte[] { 0 }); + end2.setData(new byte[] { 1 }); + try { + range2 = range.subRange(begin2, true, end2, true); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange (0, 3] is invalid. */ + begin2.setData(new byte[] { 0 }); + end2.setData(new byte[] { 3 }); + try { + range2 = range.subRange(begin2, false, end2, true); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange [3, 3) is invalid. */ + begin2.setData(new byte[] { 3 }); + end2.setData(new byte[] { 3 }); + try { + range2 = range.subRange(begin2, true, end2, false); + fail(); + } catch (KeyRangeException expected) {} + } + + public static class ReverseComparator implements Comparator { + public int compare(Object o1, Object o2) { + byte[] d1 = (byte[]) o1; + byte[] d2 = (byte[]) o2; + int cmp = KeyRange.compareBytes(d1, 0, d1.length, + d2, 0, d2.length); + if (cmp < 0) { + return 1; + } else if (cmp > 0) { + return -1; + } else { + return 0; + } + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java b/db/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java new file mode 100644 index 000000000..fe39ddc17 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java @@ -0,0 +1,2814 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: CollectionTest.java,v 1.3 2004/09/22 18:01:06 bostic Exp $ + */ + +package com.sleepycat.collections.test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.collections.MapEntryParameter; +import com.sleepycat.collections.StoredCollection; +import com.sleepycat.collections.StoredCollections; +import com.sleepycat.collections.StoredContainer; +import com.sleepycat.collections.StoredEntrySet; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredKeySet; +import com.sleepycat.collections.StoredList; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.StoredSortedEntrySet; +import com.sleepycat.collections.StoredSortedKeySet; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; +import com.sleepycat.collections.StoredValueSet; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.Environment; +import com.sleepycat.util.ExceptionUnwrapper; + +/** + * @author Mark Hayes + */ +public class CollectionTest extends TestCase { + + private static final int NONE = 0; + private static final int SUB = 1; + private static final int HEAD = 2; + private static final int TAIL = 3; + + private static final int MAX_KEY = 6; // must be a multiple of 2 + + private int beginKey = 1; + private int endKey = MAX_KEY; + + private Environment env; + private CurrentTransaction currentTxn; + private Database store; + private Database index; + private boolean isEntityBinding; + private boolean isAutoCommit; + private TestStore testStore; + private String testName; + private EntryBinding keyBinding; + private EntryBinding valueBinding; + private EntityBinding entityBinding; + private TransactionRunner readRunner; + private TransactionRunner writeRunner; + private TransactionRunner writeIterRunner; + private TestEnv testEnv; + + private StoredMap map; + private StoredMap imap; // insertable map (primary store for indexed map) + private StoredSortedMap smap; // sorted map (null or equal to map) + private StoredMap saveMap; + private StoredSortedMap saveSMap; + private int rangeType; + private StoredList list; + private StoredList ilist; // insertable list (primary store for index list) + private StoredList saveList; + private StoredKeySet keySet; + private StoredValueSet valueSet; + + /** + * Runs a command line collection test. + * @see #usage + */ + public static void main(String[] args) + throws Exception { + + if (args.length == 1 && + (args[0].equals("-h") || args[0].equals("-help"))) { + usage(); + } else { + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite(args)); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + } + + private static void usage() { + + System.out.println( + "Usage: java com.sleepycat.collections.test.CollectionTest\n" + + " -h | -help\n" + + " [testName]...\n" + + " where testName has the format:\n" + + " --{entity|value}\n" + + " is:\n" + + " bdb | cdb | txn\n" + + " is:\n" + + " btree-uniq | btree-dup | btree-dupsort | btree-recnum |\n" + + " hash-uniq | hash-dup | hash-dupsort |\n" + + " queue | recno | recno-renum\n" + + " For example: bdb-btree-uniq-entity\n" + + " If no arguments are given then all tests are run."); + System.exit(2); + } + + public static Test suite() + throws Exception { + + return suite(null); + } + + static Test suite(String[] args) + throws Exception { + + TestSuite suite = new TestSuite(); + for (int i = 0; i < TestEnv.ALL.length; i += 1) { + for (int j = 0; j < TestStore.ALL.length; j += 1) { + for (int k = 0; k < 2; k += 1) { + boolean entityBinding = (k != 0); + + addTest(args, suite, new CollectionTest( + TestEnv.ALL[i], TestStore.ALL[j], + entityBinding, false)); + + if (TestEnv.ALL[i] == TestEnv.TXN) { + addTest(args, suite, new CollectionTest( + TestEnv.ALL[i], TestStore.ALL[j], + entityBinding, true)); + } + } + } + } + return suite; + } + + private static void addTest(String[] args, TestSuite suite, + CollectionTest test) { + + if (args == null || args.length == 0) { + suite.addTest(test); + } else { + for (int t = 0; t < args.length; t += 1) { + if (args[t].equals(test.testName)) { + suite.addTest(test); + break; + } + } + } + } + + public CollectionTest(TestEnv testEnv, TestStore testStore, + boolean isEntityBinding, boolean isAutoCommit) { + + super(null); + + this.testEnv = testEnv; + this.testStore = testStore; + this.isEntityBinding = isEntityBinding; + this.isAutoCommit = isAutoCommit; + + keyBinding = testStore.getKeyBinding(); + valueBinding = testStore.getValueBinding(); + entityBinding = testStore.getEntityBinding(); + + testName = testEnv.getName() + '-' + testStore.getName() + + (isEntityBinding ? "-entity" : "-value") + + (isAutoCommit ? "-autocommit" : ""); + setName(testName); + } + + public void runTest() + throws Exception { + + DbTestUtil.printTestName(DbTestUtil.qualifiedTestName(this)); + try { + env = testEnv.open(testName); + currentTxn = CurrentTransaction.getInstance(env); + + // For testing auto-commit, use a normal (transactional) runner for + // all reading and for writing via an iterator, and a do-nothing + // runner for writing via collections; if auto-commit is tested, + // the per-collection auto-commit property will be set elsewhere. + // + TransactionRunner normalRunner = new TransactionRunner(env); + normalRunner.setAllowNestedTransactions( + DbCompat.NESTED_TRANSACTIONS); + TransactionRunner nullRunner = new NullTransactionRunner(env); + readRunner = nullRunner; + writeIterRunner = normalRunner; + if (isAutoCommit) { + writeRunner = nullRunner; + } else { + writeRunner = normalRunner; + } + + store = testStore.open(env, "unindexed.db"); + testUnindexed(); + store.close(); + store = null; + + TestStore indexOf = testStore.getIndexOf(); + if (indexOf != null) { + store = indexOf.open(env, "indexed.db"); + index = testStore.openIndex(store, "index.db"); + testIndexed(); + index.close(); + index = null; + store.close(); + store = null; + } + env.close(); + env = null; + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } finally { + if (index != null) { + try { + index.close(); + } catch (Exception e) { + } + } + if (store != null) { + try { + store.close(); + } catch (Exception e) { + } + } + if (env != null) { + try { + env.close(); + } catch (Exception e) { + } + } + /* Ensure that GC can cleanup. */ + index = null; + store = null; + env = null; + currentTxn = null; + readRunner = null; + writeRunner = null; + writeIterRunner = null; + map = null; + imap = null; + smap = null; + saveMap = null; + saveSMap = null; + list = null; + ilist = null; + saveList = null; + keySet = null; + valueSet = null; + testEnv = null; + testStore = null; + } + } + + void testCreation(StoredContainer cont) + throws Exception { + + assertEquals(index != null, cont.isSecondary()); + assertEquals(testStore.isOrdered(), cont.isOrdered()); + assertEquals(testStore.areKeysRenumbered(), cont.areKeysRenumbered()); + assertEquals(testStore.areDuplicatesAllowed(), + cont.areDuplicatesAllowed()); + assertEquals(testEnv == TestEnv.TXN, cont.isTransactional()); + try { + cont.size(); + fail(); + } + catch (UnsupportedOperationException expected) {} + } + + void testMapCreation(Map map) + throws Exception { + + assertTrue(map.values() instanceof Set); + assertEquals(testStore.isOrdered(), + map.keySet() instanceof SortedSet); + assertEquals(testStore.isOrdered(), + map.entrySet() instanceof SortedSet); + assertEquals(testStore.isOrdered() && isEntityBinding, + map.values() instanceof SortedSet); + } + + void testUnindexed() + throws Exception { + + // create primary map + if (testStore.isOrdered()) { + if (isEntityBinding) { + smap = new StoredSortedMap(store, keyBinding, + entityBinding, + testStore.getKeyAssigner()); + valueSet = new StoredSortedValueSet(store, entityBinding, + true); + } else { + smap = new StoredSortedMap(store, keyBinding, + valueBinding, + testStore.getKeyAssigner()); + // sorted value set is not possible since key cannot be derived + // for performing subSet, etc. + } + keySet = new StoredSortedKeySet(store, keyBinding, true); + map = smap; + } else { + if (isEntityBinding) { + map = new StoredMap(store, keyBinding, entityBinding, + testStore.getKeyAssigner()); + valueSet = new StoredValueSet(store, entityBinding, true); + } else { + map = new StoredMap(store, keyBinding, valueBinding, + testStore.getKeyAssigner()); + valueSet = new StoredValueSet(store, valueBinding, true); + } + smap = null; + keySet = new StoredKeySet(store, keyBinding, true); + } + imap = map; + + // create primary list + if (testStore.hasRecNumAccess()) { + if (isEntityBinding) { + ilist = new StoredList(store, entityBinding, + testStore.getKeyAssigner()); + } else { + ilist = new StoredList(store, valueBinding, + testStore.getKeyAssigner()); + } + list = ilist; + } else { + try { + if (isEntityBinding) { + ilist = new StoredList(store, entityBinding, + testStore.getKeyAssigner()); + } else { + ilist = new StoredList(store, valueBinding, + testStore.getKeyAssigner()); + } + fail(); + } catch (IllegalArgumentException expected) {} + } + + testCreation(map); + if (list != null) { + testCreation(list); + assertNotNull(smap); + } + testMapCreation(map); + addAll(); + testAll(); + } + + void testIndexed() + throws Exception { + + // create primary map + if (isEntityBinding) { + map = new StoredMap(store, keyBinding, entityBinding, + testStore.getKeyAssigner()); + } else { + map = new StoredMap(store, keyBinding, valueBinding, + testStore.getKeyAssigner()); + } + imap = map; + smap = null; + // create primary list + if (testStore.hasRecNumAccess()) { + if (isEntityBinding) { + list = new StoredList(store, entityBinding, + testStore.getKeyAssigner()); + } else { + list = new StoredList(store, valueBinding, + testStore.getKeyAssigner()); + } + ilist = list; + } + + addAll(); + readAll(); + + // create indexed map (keySet/valueSet) + if (testStore.isOrdered()) { + if (isEntityBinding) { + map = smap = new StoredSortedMap(index, keyBinding, + entityBinding, true); + valueSet = new StoredSortedValueSet(index, entityBinding, + true); + } else { + map = smap = new StoredSortedMap(index, keyBinding, + valueBinding, true); + // sorted value set is not possible since key cannot be derived + // for performing subSet, etc. + } + keySet = new StoredSortedKeySet(index, keyBinding, true); + } else { + if (isEntityBinding) { + map = new StoredMap(index, keyBinding, entityBinding, true); + valueSet = new StoredValueSet(index, entityBinding, true); + } else { + map = new StoredMap(index, keyBinding, valueBinding, true); + valueSet = new StoredValueSet(index, valueBinding, true); + } + smap = null; + keySet = new StoredKeySet(index, keyBinding, true); + } + + // create indexed list + if (testStore.hasRecNumAccess()) { + if (isEntityBinding) { + list = new StoredList(index, entityBinding, true); + } else { + list = new StoredList(index, valueBinding, true); + } + } else { + try { + if (isEntityBinding) { + list = new StoredList(index, entityBinding, true); + } else { + list = new StoredList(index, valueBinding, true); + } + fail(); + } + catch (IllegalArgumentException expected) {} + } + + testCreation(map); + testCreation((StoredContainer) map.values()); + testCreation((StoredContainer) map.keySet()); + testCreation((StoredContainer) map.entrySet()); + if (list != null) { + testCreation(list); + assertNotNull(smap); + } + testMapCreation(map); + testAll(); + } + + void testAll() + throws Exception { + + checkKeySetAndValueSet(); + readAll(); + updateAll(); + readAll(); + if (!map.areKeysRenumbered()) { + removeOdd(); + readEven(); + addOdd(); + readAll(); + removeOddIter(); + readEven(); + if (imap.areDuplicatesAllowed()) { + addOddDup(); + } else { + addOdd(); + } + readAll(); + removeOddEntry(); + readEven(); + addOdd(); + readAll(); + if (isEntityBinding) { + removeOddEntity(); + readEven(); + addOddEntity(); + readAll(); + } + bulkOperations(); + } + if (isListAddAllowed()) { + removeOddList(); + readEvenList(); + addOddList(); + readAll(); + if (!isEntityBinding) { + removeOddListValue(); + readEvenList(); + addOddList(); + readAll(); + } + } + if (list != null) { + bulkListOperations(); + } else { + listOperationsNotAllowed(); + } + if (smap != null) { + readWriteRange(SUB, 1, 1); + readWriteRange(HEAD, 1, 1); + readWriteRange(SUB, 1, MAX_KEY); + readWriteRange(HEAD, 1, MAX_KEY); + readWriteRange(TAIL, 1, MAX_KEY); + readWriteRange(SUB, 1, 3); + readWriteRange(HEAD, 1, 3); + readWriteRange(SUB, 2, 2); + readWriteRange(SUB, 2, MAX_KEY); + readWriteRange(TAIL, 2, MAX_KEY); + readWriteRange(SUB, MAX_KEY, MAX_KEY); + readWriteRange(TAIL, MAX_KEY, MAX_KEY); + readWriteRange(SUB, MAX_KEY + 1, MAX_KEY + 1); + readWriteRange(TAIL, MAX_KEY + 1, MAX_KEY + 1); + readWriteRange(SUB, 0, 0); + readWriteRange(HEAD, 0, 0); + } + updateAll(); + readAll(); + if (map.areDuplicatesAllowed()) { + readWriteDuplicates(); + readAll(); + } else { + duplicatesNotAllowed(); + readAll(); + } + if (testEnv.isCdbMode()) { + testCdbLocking(); + } + removeAll(); + if (isListAddAllowed()) { + testIterAddList(); + clearAll(); + } + if (imap.areDuplicatesAllowed()) { + testIterAddDuplicates(); + clearAll(); + } + if (isListAddAllowed()) { + addAllList(); + readAll(); + removeAllList(); + } + appendAll(); + } + + void checkKeySetAndValueSet() { + + // use bulk operations to check that explicitly constructed + // keySet/valueSet are equivalent + assertTrue(imap.keySet().equals(keySet)); + if (valueSet != null) { + assertTrue(imap.values().equals(valueSet)); + } + } + + void addAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertTrue(imap.isEmpty()); + Iterator iter = imap.entrySet().iterator(); + try { + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + assertEquals(0, imap.keySet().toArray().length); + assertEquals(0, imap.keySet().toArray(new Object[0]).length); + assertEquals(0, imap.entrySet().toArray().length); + assertEquals(0, imap.entrySet().toArray(new Object[0]).length); + assertEquals(0, imap.values().toArray().length); + assertEquals(0, imap.values().toArray(new Object[0]).length); + + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertTrue(!imap.keySet().contains(key)); + assertTrue(!imap.values().contains(val)); + assertNull(imap.put(key, val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.keySet().contains(key)); + assertTrue(imap.values().contains(val)); + assertTrue(imap.duplicates(key).contains(val)); + if (!imap.areDuplicatesAllowed()) { + assertEquals(val, imap.put(key, val)); + } + checkDupsSize(1, imap.duplicates(key)); + } + assertTrue(!imap.isEmpty()); + } + }); + } + + void appendAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertTrue(imap.isEmpty()); + + TestKeyAssigner keyAssigner = testStore.getKeyAssigner(); + if (keyAssigner != null) { + keyAssigner.reset(); + } + + for (int i = beginKey; i <= endKey; i += 1) { + boolean useList = (i & 1) == 0; + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + if (keyAssigner != null) { + if (useList && ilist != null) { + assertEquals(i - 1, ilist.append(val)); + } else { + assertEquals(key, imap.append(val)); + } + assertEquals(val, imap.get(key)); + } else { + Long recnoKey; + if (useList && ilist != null) { + recnoKey = new Long(ilist.append(val) + 1); + } else { + recnoKey = (Long) imap.append(val); + } + assertNotNull(recnoKey); + Object recnoVal; + if (isEntityBinding) { + recnoVal = makeEntity(recnoKey.intValue(), i); + } else { + recnoVal = val; + } + assertEquals(recnoVal, imap.get(recnoKey)); + } + } + } + }); + } + + void updateAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = makeVal(i); + if (!imap.areDuplicatesAllowed()) { + assertEquals(val, imap.put(key, val)); + } + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + checkDupsSize(1, imap.duplicates(key)); + if (ilist != null) { + int idx = i - 1; + assertEquals(val, ilist.set(idx, val)); + } + } + updateIter(map.entrySet()); + updateIter(map.values()); + if (beginKey <= endKey) { + ListIterator iter = (ListIterator) map.keySet().iterator(); + try { + assertNotNull(iter.next()); + iter.set(makeKey(beginKey)); + fail(); + } catch (UnsupportedOperationException e) { + } finally { + StoredIterator.close(iter); + } + } + if (list != null) { + updateIter(list); + } + } + }); + } + + void updateIter(final Collection coll) + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + ListIterator iter = (ListIterator) coll.iterator(); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Object obj = iter.next(); + if (index != null) { + try { + setValuePlusOne(iter, obj); + fail(); + } + catch (UnsupportedOperationException e) {} + } else if + (((StoredCollection) coll).areDuplicatesOrdered()) { + try { + setValuePlusOne(iter, obj); + fail(); + } catch (RuntimeException e) { + Exception e2 = ExceptionUnwrapper.unwrap(e); + assertTrue(e2.getClass().getName(), + e2 instanceof IllegalArgumentException || + e2 instanceof DatabaseException); + } + } else { + setValuePlusOne(iter, obj); + } + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + } + }); + } + + void setValuePlusOne(ListIterator iter, Object obj) { + + if (obj instanceof Map.Entry) { + Map.Entry entry = (Map.Entry) obj; + Long key = (Long) entry.getKey(); + Object oldVal = entry.getValue(); + Object val = makeVal(key.intValue() + 1); + if (isEntityBinding) { + try { + // must fail on attempt to change the key via an entity + entry.setValue(val); + fail(); + } + catch (IllegalArgumentException e) {} + val = makeEntity(key.intValue(), key.intValue() + 1); + } + entry.setValue(val); + assertEquals(val, entry.getValue()); + assertEquals(val, map.get(key)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + entry.setValue(oldVal); + assertEquals(oldVal, entry.getValue()); + assertEquals(oldVal, map.get(key)); + assertTrue(map.duplicates(key).contains(oldVal)); + checkDupsSize(1, map.duplicates(key)); + } else { + Object oldVal = obj; + Long key = makeKey(intVal(obj)); + Object val = makeVal(key.intValue() + 1); + if (isEntityBinding) { + try { + // must fail on attempt to change the key via an entity + iter.set(val); + fail(); + } + catch (IllegalArgumentException e) {} + val = makeEntity(key.intValue(), key.intValue() + 1); + } + iter.set(val); + assertEquals(val, map.get(key)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + iter.set(oldVal); + assertEquals(oldVal, map.get(key)); + assertTrue(map.duplicates(key).contains(oldVal)); + checkDupsSize(1, map.duplicates(key)); + } + } + + void removeAll() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertTrue(!map.isEmpty()); + ListIterator iter = null; + try { + if (list != null) { + iter = list.listIterator(); + } else { + iter = (ListIterator) map.values().iterator(); + } + iteratorSetAndRemoveNotAllowed(iter); + + Object val = iter.next(); + assertNotNull(val); + iter.remove(); + iteratorSetAndRemoveNotAllowed(iter); + + if (index == null) { + val = iter.next(); + assertNotNull(val); + iter.set(val); + + if (map.areDuplicatesAllowed()) { + iter.add(makeVal(intVal(val), intVal(val) + 1)); + iteratorSetAndRemoveNotAllowed(iter); + } + } + } finally { + StoredIterator.close(iter); + } + map.clear(); + assertTrue(map.isEmpty()); + assertTrue(map.entrySet().isEmpty()); + assertTrue(map.keySet().isEmpty()); + assertTrue(map.values().isEmpty()); + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(map.get(key)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + }); + } + + void clearAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + map.clear(); + assertTrue(map.isEmpty()); + } + }); + } + + void iteratorSetAndRemoveNotAllowed(ListIterator i) { + + try { + i.remove(); + fail(); + } catch (IllegalStateException e) {} + + if (index == null) { + try { + Object val = makeVal(1); + i.set(val); + fail(); + } + catch (IllegalStateException e) {} + } + } + + void removeOdd() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + boolean toggle = false; + for (int i = beginKey; i <= endKey; i += 2) { + toggle = !toggle; + Long key = makeKey(i); + Object val = makeVal(i); + if (toggle) { + assertTrue(map.keySet().contains(key)); + assertTrue(map.keySet().remove(key)); + assertTrue(!map.keySet().contains(key)); + } else { + assertTrue(map.containsValue(val)); + Object oldVal = map.remove(key); + assertEquals(oldVal, val); + assertTrue(!map.containsKey(key)); + assertTrue(!map.containsValue(val)); + } + assertNull(map.get(key)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + }); + } + + void removeOddEntity() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertTrue(map.values().contains(val)); + assertTrue(map.values().remove(val)); + assertTrue(!map.values().contains(val)); + assertNull(map.get(key)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + }); + } + + void removeOddEntry() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = mapEntry(i); + assertTrue(map.entrySet().contains(val)); + assertTrue(map.entrySet().remove(val)); + assertTrue(!map.entrySet().contains(val)); + assertNull(map.get(key)); + } + } + }); + } + + void removeOddIter() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + Iterator iter = map.keySet().iterator(); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Long key = (Long) iter.next(); + assertNotNull(key); + if (map instanceof SortedMap) { + assertEquals(makeKey(i), key); + } + if ((key.intValue() & 1) != 0) { + iter.remove(); + } + } + } finally { + StoredIterator.close(iter); + } + } + }); + } + + void removeOddList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 2) { + // remove by index + // (with entity binding, embbeded keys in values are + // being changed so we can't use values for comparison) + int idx = (i - beginKey) / 2; + Object val = makeVal(i); + if (!isEntityBinding) { + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + assertEquals(idx, list.indexOf(val)); + } + assertNotNull(list.get(idx)); + if (isEntityBinding) { + assertNotNull(list.remove(idx)); + } else { + assertTrue(list.contains(val)); + assertEquals(val, list.remove(idx)); + } + assertTrue(!list.remove(val)); + assertTrue(!list.contains(val)); + assertTrue(!val.equals(list.get(idx))); + } + } + }); + } + + void removeOddListValue() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 2) { + // for non-entity case remove by value + // (with entity binding, embbeded keys in values are + // being changed so we can't use values for comparison) + int idx = (i - beginKey) / 2; + Object val = makeVal(i); + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + assertEquals(idx, list.indexOf(val)); + assertTrue(list.remove(val)); + assertTrue(!list.remove(val)); + assertTrue(!list.contains(val)); + assertTrue(!val.equals(list.get(idx))); + } + } + }); + } + + void addOdd() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + // add using Map.put() + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertNull(imap.put(key, val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.duplicates(key).contains(val)); + checkDupsSize(1, imap.duplicates(key)); + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + if (!imap.areDuplicatesAllowed()) { + assertEquals(val, imap.put(key, val)); + } + } + } + }); + } + + void addOddEntity() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + // add using Map.values().add() + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertTrue(!imap.values().contains(val)); + assertTrue(imap.values().add(val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.values().contains(val)); + assertTrue(imap.duplicates(key).contains(val)); + checkDupsSize(1, imap.duplicates(key)); + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + } + } + }); + } + + void addOddDup() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + // add using Map.duplicates().add() + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertTrue(!imap.values().contains(val)); + assertTrue(imap.duplicates(key).add(val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.values().contains(val)); + assertTrue(imap.duplicates(key).contains(val)); + checkDupsSize(1, imap.duplicates(key)); + assertTrue(!imap.duplicates(key).add(val)); + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + } + } + }); + } + + void addOddList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 2) { + int idx = i - beginKey; + Object val = makeVal(i); + assertTrue(!list.contains(val)); + assertTrue(!val.equals(list.get(idx))); + list.add(idx, val); + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + } + } + }); + } + + void addAllList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + Object val = makeVal(i); + assertTrue(!list.contains(val)); + assertTrue(list.add(val)); + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + } + } + }); + } + + void removeAllList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertTrue(!list.isEmpty()); + list.clear(); + assertTrue(list.isEmpty()); + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + Object val = makeVal(i); + assertNull(list.get(idx)); + } + } + }); + } + + void testIterAddList() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + ListIterator i = list.listIterator(); + try { + assertTrue(!i.hasNext()); + i.add(makeVal(3)); + assertTrue(!i.hasNext()); + assertTrue(i.hasPrevious()); + assertEquals(3, intVal(i.previous())); + + i.add(makeVal(1)); + assertTrue(i.hasPrevious()); + assertTrue(i.hasNext()); + assertEquals(1, intVal(i.previous())); + assertTrue(i.hasNext()); + assertEquals(1, intVal(i.next())); + assertTrue(i.hasNext()); + assertEquals(3, intVal(i.next())); + assertEquals(3, intVal(i.previous())); + + assertTrue(i.hasNext()); + i.add(makeVal(2)); + assertTrue(i.hasNext()); + assertTrue(i.hasPrevious()); + assertEquals(2, intVal(i.previous())); + assertTrue(i.hasNext()); + assertEquals(2, intVal(i.next())); + assertTrue(i.hasNext()); + assertEquals(3, intVal(i.next())); + + assertTrue(!i.hasNext()); + i.add(makeVal(4)); + i.add(makeVal(5)); + assertTrue(!i.hasNext()); + assertEquals(5, intVal(i.previous())); + assertEquals(4, intVal(i.previous())); + assertEquals(3, intVal(i.previous())); + assertEquals(2, intVal(i.previous())); + assertEquals(1, intVal(i.previous())); + assertTrue(!i.hasPrevious()); + } finally { + StoredIterator.close(i); + } + } + }); + } + + void testIterAddDuplicates() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertNull(imap.put(makeKey(1), makeVal(1))); + ListIterator i = + (ListIterator) imap.duplicates(makeKey(1)).iterator(); + try { + if (imap.areDuplicatesOrdered()) { + i.add(makeVal(1, 4)); + i.add(makeVal(1, 2)); + i.add(makeVal(1, 3)); + while (i.hasPrevious()) i.previous(); + assertEquals(1, intVal(i.next())); + assertEquals(2, intVal(i.next())); + assertEquals(3, intVal(i.next())); + assertEquals(4, intVal(i.next())); + assertTrue(!i.hasNext()); + } else { + assertEquals(1, intVal(i.next())); + i.add(makeVal(1, 2)); + i.add(makeVal(1, 3)); + assertTrue(!i.hasNext()); + assertTrue(i.hasPrevious()); + assertEquals(3, intVal(i.previous())); + assertEquals(2, intVal(i.previous())); + assertEquals(1, intVal(i.previous())); + assertTrue(!i.hasPrevious()); + i.add(makeVal(1, 4)); + i.add(makeVal(1, 5)); + assertTrue(i.hasNext()); + assertEquals(5, intVal(i.previous())); + assertEquals(4, intVal(i.previous())); + assertTrue(!i.hasPrevious()); + assertEquals(4, intVal(i.next())); + assertEquals(5, intVal(i.next())); + assertEquals(1, intVal(i.next())); + assertEquals(2, intVal(i.next())); + assertEquals(3, intVal(i.next())); + assertTrue(!i.hasNext()); + } + } finally { + StoredIterator.close(i); + } + } + }); + } + + void readAll() + throws Exception { + + readRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + // map + + assertNotNull(map.toString()); + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = map.get(key); + assertEquals(makeVal(i), val); + assertTrue(map.containsKey(key)); + assertTrue(map.containsValue(val)); + assertTrue(map.keySet().contains(key)); + assertTrue(map.values().contains(val)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + } + assertNull(map.get(makeKey(-1))); + assertNull(map.get(makeKey(0))); + assertNull(map.get(makeKey(beginKey - 1))); + assertNull(map.get(makeKey(endKey + 1))); + checkDupsSize(0, map.duplicates(makeKey(-1))); + checkDupsSize(0, map.duplicates(makeKey(0))); + checkDupsSize(0, map.duplicates(makeKey(beginKey - 1))); + checkDupsSize(0, map.duplicates(makeKey(endKey + 1))); + + // entrySet + + Set set = map.entrySet(); + assertNotNull(set.toString()); + assertEquals(beginKey > endKey, set.isEmpty()); + Iterator iter = set.iterator(); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Map.Entry entry = (Map.Entry) iter.next(); + Long key = (Long) entry.getKey(); + Object val = entry.getValue(); + if (map instanceof SortedMap) { + assertEquals(intKey(key), i); + } + assertEquals(intKey(key), intVal(val)); + assertTrue(set.contains(entry)); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + Map.Entry[] entries = + (Map.Entry[]) set.toArray(new Map.Entry[0]); + assertNotNull(entries); + assertEquals(endKey - beginKey + 1, entries.length); + for (int i = beginKey; i <= endKey; i += 1) { + Map.Entry entry = entries[i - beginKey]; + assertNotNull(entry); + if (map instanceof SortedMap) { + assertEquals(makeKey(i), entry.getKey()); + assertEquals(makeVal(i), entry.getValue()); + } + } + readIterator(set, set.iterator(), beginKey, endKey); + if (smap != null) { + SortedSet sset = (SortedSet) set; + if (beginKey == 1 && endKey >= 1) { + readIterator(sset, sset.subSet(mapEntry(1), + mapEntry(2)) + .iterator(), 1, 1); + } + if (beginKey <= 2 && endKey >= 2) { + readIterator(sset, sset.subSet(mapEntry(2), + mapEntry(3)) + .iterator(), 2, 2); + } + if (beginKey <= endKey) { + readIterator(sset, sset.subSet( + mapEntry(endKey), + mapEntry(endKey + 1)) + .iterator(), + endKey, endKey); + } + if (isSubMap()) { + if (beginKey <= endKey) { + if (rangeType != TAIL) { + try { + sset.subSet(mapEntry(endKey + 1), + mapEntry(endKey + 2)); + fail(); + } catch (IllegalArgumentException e) {} + } + if (rangeType != HEAD) { + try { + sset.subSet(mapEntry(0), + mapEntry(1)); + fail(); + } catch (IllegalArgumentException e) {} + } + } + } else { + readIterator(sset, sset.subSet( + mapEntry(endKey + 1), + mapEntry(endKey + 2)) + .iterator(), + endKey, endKey - 1); + readIterator(sset, sset.subSet(mapEntry(0), + mapEntry(1)) + .iterator(), + 0, -1); + } + } + + // keySet + + set = map.keySet(); + assertNotNull(set.toString()); + assertEquals(beginKey > endKey, set.isEmpty()); + iter = set.iterator(); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Long key = (Long) iter.next(); + assertTrue(set.contains(key)); + Object val = map.get(key); + if (map instanceof SortedMap) { + assertEquals(key, makeKey(i)); + } + assertEquals(intKey(key), intVal(val)); + } + assertTrue("" + beginKey + ' ' + endKey, !iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + Long[] keys = (Long[]) set.toArray(new Long[0]); + assertNotNull(keys); + assertEquals(endKey - beginKey + 1, keys.length); + for (int i = beginKey; i <= endKey; i += 1) { + Long key = keys[i - beginKey]; + assertNotNull(key); + if (map instanceof SortedMap) { + assertEquals(makeKey(i), key); + } + } + readIterator(set, set.iterator(), beginKey, endKey); + + // values + + Collection coll = map.values(); + assertNotNull(coll.toString()); + assertEquals(beginKey > endKey, coll.isEmpty()); + iter = coll.iterator(); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Object val = iter.next(); + if (map instanceof SortedMap) { + assertEquals(makeVal(i), val); + } + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + Object[] values = coll.toArray(); + assertNotNull(values); + assertEquals(endKey - beginKey + 1, values.length); + for (int i = beginKey; i <= endKey; i += 1) { + Object val = values[i - beginKey]; + assertNotNull(val); + if (map instanceof SortedMap) { + assertEquals(makeVal(i), val); + } + } + readIterator(coll, coll.iterator(), beginKey, endKey); + + // list + + if (list != null) { + assertNotNull(list.toString()); + assertEquals(beginKey > endKey, list.isEmpty()); + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + Object val = list.get(idx); + assertEquals(makeVal(i), val); + assertTrue(list.contains(val)); + assertEquals(idx, list.indexOf(val)); + assertEquals(idx, list.lastIndexOf(val)); + } + ListIterator li = list.listIterator(); + try { + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + assertTrue(li.hasNext()); + assertEquals(idx, li.nextIndex()); + Object val = li.next(); + assertEquals(makeVal(i), val); + assertEquals(idx, li.previousIndex()); + } + assertTrue(!li.hasNext()); + } finally { + StoredIterator.close(li); + } + if (beginKey < endKey) { + li = list.listIterator(1); + try { + for (int i = beginKey + 1; i <= endKey; i += 1) { + int idx = i - beginKey; + assertTrue(li.hasNext()); + assertEquals(idx, li.nextIndex()); + Object val = li.next(); + assertEquals(makeVal(i), val); + assertEquals(idx, li.previousIndex()); + } + assertTrue(!li.hasNext()); + } finally { + StoredIterator.close(li); + } + } + values = list.toArray(); + assertNotNull(values); + assertEquals(endKey - beginKey + 1, values.length); + for (int i = beginKey; i <= endKey; i += 1) { + Object val = values[i - beginKey]; + assertNotNull(val); + assertEquals(makeVal(i), val); + } + readIterator(list, list.iterator(), beginKey, endKey); + } + + // first/last + + if (smap != null) { + if (beginKey <= endKey && + beginKey >= 1 && beginKey <= MAX_KEY) { + assertEquals(makeKey(beginKey), + smap.firstKey()); + assertEquals(makeKey(beginKey), + ((SortedSet) smap.keySet()).first()); + Object entry = ((SortedSet) smap.entrySet()).first(); + assertEquals(makeKey(beginKey), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(beginKey), + ((SortedSet) smap.values()).first()); + } + } else { + assertNull(smap.firstKey()); + assertNull(((SortedSet) smap.keySet()).first()); + assertNull(((SortedSet) smap.entrySet()).first()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).first()); + } + } + if (beginKey <= endKey && + endKey >= 1 && endKey <= MAX_KEY) { + assertEquals(makeKey(endKey), + smap.lastKey()); + assertEquals(makeKey(endKey), + ((SortedSet) smap.keySet()).last()); + Object entry = ((SortedSet) smap.entrySet()).last(); + assertEquals(makeKey(endKey), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(endKey), + ((SortedSet) smap.values()).last()); + } + } else { + assertNull(smap.lastKey()); + assertNull(((SortedSet) smap.keySet()).last()); + assertNull(((SortedSet) smap.entrySet()).last()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).last()); + } + } + } + } + }); + } + + void readEven() + throws Exception { + + readRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + int readBegin = ((beginKey & 1) != 0) ? + (beginKey + 1) : beginKey; + int readEnd = ((endKey & 1) != 0) ? (endKey - 1) : endKey; + int readIncr = 2; + + // map + + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + if ((i & 1) == 0) { + Object val = map.get(key); + assertEquals(makeVal(i), val); + assertTrue(map.containsKey(key)); + assertTrue(map.containsValue(val)); + assertTrue(map.keySet().contains(key)); + assertTrue(map.values().contains(val)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + } else { + Object val = makeVal(i); + assertTrue(!map.containsKey(key)); + assertTrue(!map.containsValue(val)); + assertTrue(!map.keySet().contains(key)); + assertTrue(!map.values().contains(val)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + + // entrySet + + Set set = map.entrySet(); + assertEquals(beginKey > endKey, set.isEmpty()); + Iterator iter = set.iterator(); + try { + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + Map.Entry entry = (Map.Entry) iter.next(); + Long key = (Long) entry.getKey(); + Object val = entry.getValue(); + if (map instanceof SortedMap) { + assertEquals(intKey(key), i); + } + assertEquals(intKey(key), intVal(val)); + assertTrue(set.contains(entry)); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + + // keySet + + set = map.keySet(); + assertEquals(beginKey > endKey, set.isEmpty()); + iter = set.iterator(); + try { + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + Long key = (Long) iter.next(); + assertTrue(set.contains(key)); + Object val = map.get(key); + if (map instanceof SortedMap) { + assertEquals(key, makeKey(i)); + } + assertEquals(intKey(key), intVal(val)); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + + // values + + Collection coll = map.values(); + assertEquals(beginKey > endKey, coll.isEmpty()); + iter = coll.iterator(); + try { + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + Object val = iter.next(); + if (map instanceof SortedMap) { + assertEquals(makeVal(i), val); + } + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + + + // list not used since keys may not be renumbered for this + // method to work in general + + // first/last + + if (smap != null) { + if (readBegin <= readEnd && + readBegin >= 1 && readBegin <= MAX_KEY) { + assertEquals(makeKey(readBegin), + smap.firstKey()); + assertEquals(makeKey(readBegin), + ((SortedSet) smap.keySet()).first()); + Object entry = ((SortedSet) smap.entrySet()).first(); + assertEquals(makeKey(readBegin), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(readBegin), + ((SortedSet) smap.values()).first()); + } + } else { + assertNull(smap.firstKey()); + assertNull(((SortedSet) smap.keySet()).first()); + assertNull(((SortedSet) smap.entrySet()).first()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).first()); + } + } + if (readBegin <= readEnd && + readEnd >= 1 && readEnd <= MAX_KEY) { + assertEquals(makeKey(readEnd), + smap.lastKey()); + assertEquals(makeKey(readEnd), + ((SortedSet) smap.keySet()).last()); + Object entry = ((SortedSet) smap.entrySet()).last(); + assertEquals(makeKey(readEnd), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(readEnd), + ((SortedSet) smap.values()).last()); + } + } else { + assertNull(smap.lastKey()); + assertNull(((SortedSet) smap.keySet()).last()); + assertNull(((SortedSet) smap.entrySet()).last()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).last()); + } + } + } + } + }); + } + + void readEvenList() + throws Exception { + + readRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + int readBegin = ((beginKey & 1) != 0) ? + (beginKey + 1) : beginKey; + int readEnd = ((endKey & 1) != 0) ? (endKey - 1) : endKey; + int readIncr = 2; + + assertEquals(beginKey > endKey, list.isEmpty()); + ListIterator iter = list.listIterator(); + try { + int idx = 0; + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + assertEquals(idx, iter.nextIndex()); + Object val = iter.next(); + assertEquals(idx, iter.previousIndex()); + if (isEntityBinding) { + assertEquals(i, intVal(val)); + } else { + assertEquals(makeVal(i), val); + } + idx += 1; + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + } + }); + } + + void readIterator(Collection coll, Iterator iter, + int beginValue, int endValue) { + + ListIterator li = (ListIterator) iter; + boolean isList = (coll instanceof List); + Iterator clone = null; + try { + // at beginning + assertTrue(!li.hasPrevious()); + assertTrue(!li.hasPrevious()); + try { li.previous(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(-1, li.previousIndex()); + } + if (endValue < beginValue) { + // is empty + assertTrue(!iter.hasNext()); + try { iter.next(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(Integer.MAX_VALUE, li.nextIndex()); + } + } + // loop thru all and collect in array + StoredIterator si = (StoredIterator) iter; + int[] values = new int[endValue - beginValue + 1]; + for (int i = beginValue; i <= endValue; i += 1) { + assertTrue(iter.hasNext()); + int idx = i - beginKey; + if (isList) { + assertEquals(idx, li.nextIndex()); + } + int value = intIter(coll, iter.next()); + if (isList) { + assertEquals(idx, li.previousIndex()); + } + values[i - beginValue] = value; + if (si.getCollection().isOrdered()) { + assertEquals(i, value); + } else { + assertTrue(value >= beginValue); + assertTrue(value <= endValue); + } + } + // at end + assertTrue(!iter.hasNext()); + try { iter.next(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(Integer.MAX_VALUE, li.nextIndex()); + } + // clone at same position + clone = StoredCollections.iterator(iter); + assertTrue(!clone.hasNext()); + // loop thru in reverse + for (int i = endValue; i >= beginValue; i -= 1) { + assertTrue(li.hasPrevious()); + int idx = i - beginKey; + if (isList) { + assertEquals(idx, li.previousIndex()); + } + int value = intIter(coll, li.previous()); + if (isList) { + assertEquals(idx, li.nextIndex()); + } + assertEquals(values[i - beginValue], value); + } + // clone should not have changed + assertTrue(!clone.hasNext()); + // at beginning + assertTrue(!li.hasPrevious()); + try { li.previous(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(-1, li.previousIndex()); + } + // loop thru with some back-and-forth + for (int i = beginValue; i <= endValue; i += 1) { + assertTrue(iter.hasNext()); + int idx = i - beginKey; + if (isList) { + assertEquals(idx, li.nextIndex()); + } + Object obj = iter.next(); + if (isList) { + assertEquals(idx, li.previousIndex()); + } + assertEquals(obj, li.previous()); + if (isList) { + assertEquals(idx, li.nextIndex()); + } + assertEquals(obj, iter.next()); + if (isList) { + assertEquals(idx, li.previousIndex()); + } + int value = intIter(coll, obj); + assertEquals(values[i - beginValue], value); + } + // at end + assertTrue(!iter.hasNext()); + try { iter.next(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(Integer.MAX_VALUE, li.nextIndex()); + } + } finally { + StoredIterator.close(iter); + StoredIterator.close(clone); + } + } + + void bulkOperations() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + HashMap hmap = new HashMap(); + for (int i = Math.max(1, beginKey); + i <= Math.min(MAX_KEY, endKey); + i += 1) { + hmap.put(makeKey(i), makeVal(i)); + } + assertTrue(map.equals(hmap)); + assertTrue(map.entrySet().equals(hmap.entrySet())); + assertTrue(map.keySet().equals(hmap.keySet())); + assertTrue(map.values().equals(hmap.values())); + + assertTrue(map.entrySet().containsAll(hmap.entrySet())); + assertTrue(map.keySet().containsAll(hmap.keySet())); + assertTrue(map.values().containsAll(hmap.values())); + + map.clear(); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertTrue(map.equals(hmap)); + + assertTrue(map.entrySet().removeAll(hmap.entrySet())); + assertTrue(map.entrySet().isEmpty()); + assertTrue(!map.entrySet().removeAll(hmap.entrySet())); + assertTrue(imap.entrySet().addAll(hmap.entrySet())); + assertTrue(map.entrySet().containsAll(hmap.entrySet())); + assertTrue(!imap.entrySet().addAll(hmap.entrySet())); + assertTrue(map.equals(hmap)); + + assertTrue(!map.entrySet().retainAll(hmap.entrySet())); + assertTrue(map.equals(hmap)); + assertTrue(map.entrySet().retainAll(Collections.EMPTY_SET)); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertTrue(map.equals(hmap)); + + assertTrue(map.values().removeAll(hmap.values())); + assertTrue(map.values().isEmpty()); + assertTrue(!map.values().removeAll(hmap.values())); + if (isEntityBinding) { + assertTrue(imap.values().addAll(hmap.values())); + assertTrue(map.values().containsAll(hmap.values())); + assertTrue(!imap.values().addAll(hmap.values())); + } else { + imap.putAll(hmap); + } + assertTrue(map.equals(hmap)); + + assertTrue(!map.values().retainAll(hmap.values())); + assertTrue(map.equals(hmap)); + assertTrue(map.values().retainAll(Collections.EMPTY_SET)); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertTrue(map.equals(hmap)); + + assertTrue(map.keySet().removeAll(hmap.keySet())); + assertTrue(map.keySet().isEmpty()); + assertTrue(!map.keySet().removeAll(hmap.keySet())); + assertTrue(imap.keySet().addAll(hmap.keySet())); + assertTrue(imap.keySet().containsAll(hmap.keySet())); + if (index != null) { + assertTrue(map.keySet().isEmpty()); + } + assertTrue(!imap.keySet().addAll(hmap.keySet())); + // restore values to non-null + imap.keySet().removeAll(hmap.keySet()); + imap.putAll(hmap); + assertTrue(map.equals(hmap)); + + assertTrue(!map.keySet().retainAll(hmap.keySet())); + assertTrue(map.equals(hmap)); + assertTrue(map.keySet().retainAll(Collections.EMPTY_SET)); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertTrue(map.equals(hmap)); + } + }); + } + + void bulkListOperations() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + ArrayList alist = new ArrayList(); + for (int i = beginKey; i <= endKey; i += 1) { + alist.add(makeVal(i)); + } + + assertTrue(list.equals(alist)); + assertTrue(list.containsAll(alist)); + + if (isListAddAllowed()) { + list.clear(); + assertTrue(list.isEmpty()); + assertTrue(ilist.addAll(alist)); + assertTrue(list.equals(alist)); + } + + assertTrue(!list.retainAll(alist)); + assertTrue(list.equals(alist)); + + if (isListAddAllowed()) { + assertTrue(list.retainAll(Collections.EMPTY_SET)); + assertTrue(list.isEmpty()); + assertTrue(ilist.addAll(alist)); + assertTrue(list.equals(alist)); + } + + if (isListAddAllowed() && !isEntityBinding) { + // deleting in a renumbered list with entity binding will + // change the values dynamically, making it very difficult + // to test + assertTrue(list.removeAll(alist)); + assertTrue(list.isEmpty()); + assertTrue(!list.removeAll(alist)); + assertTrue(ilist.addAll(alist)); + assertTrue(list.containsAll(alist)); + assertTrue(list.equals(alist)); + } + + if (isListAddAllowed() && !isEntityBinding) { + // addAll at an index is also very difficult to test with + // an entity binding + + // addAll at first index + ilist.addAll(beginKey, alist); + assertTrue(list.containsAll(alist)); + assertEquals(2 * alist.size(), countElements(list)); + for (int i = beginKey; i <= endKey; i += 1) + ilist.remove(beginKey); + assertTrue(list.equals(alist)); + + // addAll at last index + ilist.addAll(endKey, alist); + assertTrue(list.containsAll(alist)); + assertEquals(2 * alist.size(), countElements(list)); + for (int i = beginKey; i <= endKey; i += 1) + ilist.remove(endKey); + assertTrue(list.equals(alist)); + + // addAll in the middle + ilist.addAll(endKey - 1, alist); + assertTrue(list.containsAll(alist)); + assertEquals(2 * alist.size(), countElements(list)); + for (int i = beginKey; i <= endKey; i += 1) + ilist.remove(endKey - 1); + assertTrue(list.equals(alist)); + } + } + }); + } + + void readWriteRange(final int type, final int rangeBegin, + final int rangeEnd) + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + setRange(type, rangeBegin, rangeEnd); + createOutOfRange(rangeBegin, rangeEnd); + if (rangeType != TAIL) { + writeOutOfRange(new Long(rangeEnd + 1)); + } + if (rangeType != HEAD) { + writeOutOfRange(new Long(rangeBegin - 1)); + } + if (rangeBegin <= rangeEnd) { + updateAll(); + } + if (rangeBegin < rangeEnd && !map.areKeysRenumbered()) { + bulkOperations(); + } + readAll(); + clearRange(); + } + }); + } + + void setRange(int type, int rangeBegin, int rangeEnd) { + + rangeType = type; + saveMap = map; + saveSMap = smap; + saveList = list; + int listBegin = rangeBegin - beginKey; + boolean canMakeSubList = (list != null && listBegin>= 0); + if (!canMakeSubList) { + list = null; + } + if (list != null) { + try { + list.subList(-1, 0); + fail(); + } catch (IndexOutOfBoundsException e) { } + } + switch (type) { + + case SUB: + smap = (StoredSortedMap) smap.subMap(makeKey(rangeBegin), + makeKey(rangeEnd + 1)); + if (canMakeSubList) { + list = (StoredList) list.subList(listBegin, + rangeEnd + 1 - beginKey); + } + // check for equivalent ranges + assertEquals(smap, + ((StoredSortedMap) saveSMap).subMap( + makeKey(rangeBegin), true, + makeKey(rangeEnd + 1), false)); + assertEquals(smap.entrySet(), + ((StoredSortedEntrySet) saveSMap.entrySet()).subSet( + mapEntry(rangeBegin), true, + mapEntry(rangeEnd + 1), false)); + assertEquals(smap.keySet(), + ((StoredSortedKeySet) saveSMap.keySet()).subSet( + makeKey(rangeBegin), true, + makeKey(rangeEnd + 1), false)); + if (smap.values() instanceof SortedSet) { + assertEquals(smap.values(), + ((StoredSortedValueSet) saveSMap.values()).subSet( + makeVal(rangeBegin), true, + makeVal(rangeEnd + 1), false)); + } + break; + case HEAD: + smap = (StoredSortedMap) smap.headMap(makeKey(rangeEnd + 1)); + if (canMakeSubList) { + list = (StoredList) list.subList(0, + rangeEnd + 1 - beginKey); + } + // check for equivalent ranges + assertEquals(smap, + ((StoredSortedMap) saveSMap).headMap( + makeKey(rangeEnd + 1), false)); + assertEquals(smap.entrySet(), + ((StoredSortedEntrySet) saveSMap.entrySet()).headSet( + mapEntry(rangeEnd + 1), false)); + assertEquals(smap.keySet(), + ((StoredSortedKeySet) saveSMap.keySet()).headSet( + makeKey(rangeEnd + 1), false)); + if (smap.values() instanceof SortedSet) { + assertEquals(smap.values(), + ((StoredSortedValueSet) saveSMap.values()).headSet( + makeVal(rangeEnd + 1), false)); + } + break; + case TAIL: + smap = (StoredSortedMap) smap.tailMap(makeKey(rangeBegin)); + if (canMakeSubList) { + list = (StoredList) list.subList(listBegin, + MAX_KEY + 1 - beginKey); + } + // check for equivalent ranges + assertEquals(smap, + ((StoredSortedMap) saveSMap).tailMap( + makeKey(rangeBegin), true)); + assertEquals(smap.entrySet(), + ((StoredSortedEntrySet) saveSMap.entrySet()).tailSet( + mapEntry(rangeBegin), true)); + assertEquals(smap.keySet(), + ((StoredSortedKeySet) saveSMap.keySet()).tailSet( + makeKey(rangeBegin), true)); + if (smap.values() instanceof SortedSet) { + assertEquals(smap.values(), + ((StoredSortedValueSet) saveSMap.values()).tailSet( + makeVal(rangeBegin), true)); + } + break; + default: throw new RuntimeException(); + } + map = smap; + beginKey = rangeBegin; + if (rangeBegin < 1 || rangeEnd > MAX_KEY) { + endKey = rangeBegin - 1; // force empty range for readAll() + } else { + endKey = rangeEnd; + } + } + + void clearRange() { + + rangeType = NONE; + beginKey = 1; + endKey = MAX_KEY; + map = saveMap; + smap = saveSMap; + list = saveList; + } + + void createOutOfRange(int rangeBegin, int rangeEnd) + throws Exception { + + // map + + if (rangeType != TAIL) { + try { + smap.subMap(makeKey(rangeBegin), makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + smap.headMap(makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + checkDupsSize(0, smap.duplicates(makeKey(rangeEnd + 2))); + } + if (rangeType != HEAD) { + try { + smap.subMap(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + smap.tailMap(makeKey(rangeBegin - 1)); + fail(); + } + catch (IllegalArgumentException e) { } + checkDupsSize(0, smap.duplicates(makeKey(rangeBegin - 1))); + } + + // keySet + + if (rangeType != TAIL) { + SortedSet sset = (SortedSet) map.keySet(); + try { + sset.subSet(makeKey(rangeBegin), makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.headSet(makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.subSet(makeKey(rangeEnd + 1), + makeKey(rangeEnd + 2)).iterator(); + fail(); + } catch (IllegalArgumentException e) { } + } + if (rangeType != HEAD) { + SortedSet sset = (SortedSet) map.keySet(); + try { + sset.subSet(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.tailSet(makeKey(rangeBegin - 1)); + fail(); + } + catch (IllegalArgumentException e) { } + try { + sset.subSet(makeKey(rangeBegin - 1), + makeKey(rangeBegin)).iterator(); + fail(); + } + catch (IllegalArgumentException e) { } + } + + // entrySet + + if (rangeType != TAIL) { + SortedSet sset = (SortedSet) map.entrySet(); + try { + sset.subSet(mapEntry(rangeBegin), mapEntry(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.headSet(mapEntry(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.subSet(mapEntry(rangeEnd + 1), + mapEntry(rangeEnd + 2)).iterator(); + fail(); + } catch (IllegalArgumentException e) { } + } + if (rangeType != HEAD) { + SortedSet sset = (SortedSet) map.entrySet(); + try { + sset.subSet(mapEntry(rangeBegin - 1), mapEntry(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.tailSet(mapEntry(rangeBegin - 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.subSet(mapEntry(rangeBegin - 1), + mapEntry(rangeBegin)).iterator(); + fail(); + } + catch (IllegalArgumentException e) { } + } + + // values + + if (map.values() instanceof SortedSet) { + SortedSet sset = (SortedSet) map.values(); + if (rangeType != TAIL) { + try { + sset.subSet(makeVal(rangeBegin), + makeVal(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.headSet(makeVal(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + } + if (rangeType != HEAD) { + try { + sset.subSet(makeVal(rangeBegin - 1), + makeVal(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.tailSet(makeVal(rangeBegin - 1)); + fail(); + } + catch (IllegalArgumentException e) { } + } + } + + // list + + if (list != null) { + int size = rangeEnd - rangeBegin + 1; + try { + list.subList(0, size + 1); + fail(); + } catch (IndexOutOfBoundsException e) { } + try { + list.subList(-1, size); + fail(); + } catch (IndexOutOfBoundsException e) { } + try { + list.subList(2, 1); + fail(); + } catch (IndexOutOfBoundsException e) { } + try { + list.subList(size, size); + fail(); + } + catch (IndexOutOfBoundsException e) { } + } + } + + void writeOutOfRange(Long badNewKey) + throws Exception { + + try { + map.put(badNewKey, makeVal(badNewKey)); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.toString(), index == null); + } catch (UnsupportedOperationException e) { + assertTrue(index != null); + } + try { + map.keySet().add(badNewKey); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(index == null); + } catch (UnsupportedOperationException e) { + assertTrue(index != null); + } + try { + map.values().add(makeEntity(badNewKey)); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(isEntityBinding && index == null); + } catch (UnsupportedOperationException e) { + assertTrue(!(isEntityBinding && index == null)); + } + if (list != null) { + int i = badNewKey.intValue() - beginKey; + try { + list.set(i, makeVal(i)); + fail(); + } catch (IndexOutOfBoundsException e) { + assertTrue(index == null); + } catch (UnsupportedOperationException e) { + assertTrue(index != null); + } + try { + list.add(i, makeVal(badNewKey)); + fail(); + } + catch (UnsupportedOperationException e) { + } + } + } + + void readWriteDuplicates() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + if (index == null) { + readWritePrimaryDuplicates(beginKey); + readWritePrimaryDuplicates(beginKey + 1); + readWritePrimaryDuplicates(endKey); + readWritePrimaryDuplicates(endKey - 1); + } else { + readWriteIndexedDuplicates(beginKey); + readWriteIndexedDuplicates(beginKey + 1); + readWriteIndexedDuplicates(endKey); + readWriteIndexedDuplicates(endKey - 1); + } + } + }); + } + + void readWritePrimaryDuplicates(int i) + throws Exception { + + Collection dups; + // make duplicate values + final Long key = makeKey(i); + final Object[] values = new Object[5]; + for (int j = 0; j < values.length; j += 1) { + values[j] = isEntityBinding + ? makeEntity(i, i + j) + : makeVal(i + j); + } + // add duplicates + outerLoop: for (int writeMode = 0;; writeMode += 1) { + //System.out.println("write mode " + writeMode); + switch (writeMode) { + case 0: + case 1: { + // write with Map.put() + for (int j = 1; j < values.length; j += 1) { + map.put(key, values[j]); + } + break; + } + case 2: { + // write with Map.duplicates().add() + dups = map.duplicates(key); + for (int j = 1; j < values.length; j += 1) { + dups.add(values[j]); + } + break; + } + case 3: { + // write with Map.duplicates().iterator().add() + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + Collection dups = map.duplicates(key); + Iterator iter = dups.iterator(); + assertEquals(values[0], iter.next()); + assertTrue(!iter.hasNext()); + try { + for (int j = 1; j < values.length; j += 1) { + ((ListIterator) iter).add(values[j]); + } + } finally { + StoredIterator.close(iter); + } + } + }); + break; + } + case 4: { + // write with Map.values().add() + if (!isEntityBinding) { + continue; + } + Collection set = map.values(); + for (int j = 1; j < values.length; j += 1) { + set.add(values[j]); + } + break; + } + default: { + break outerLoop; + } + } + checkDupsSize(values.length, map.duplicates(key)); + // read duplicates + readDuplicates(i, key, values); + // remove duplicates + switch (writeMode) { + case 0: { + // remove with Map.remove() + checkDupsSize(values.length, map.duplicates(key)); + map.remove(key); // remove all values + checkDupsSize(0, map.duplicates(key)); + map.put(key, values[0]); // put back original value + checkDupsSize(1, map.duplicates(key)); + break; + } + case 1: { + // remove with Map.keySet().remove() + map.keySet().remove(key); // remove all values + map.put(key, values[0]); // put back original value + break; + } + case 2: { + // remove with Map.duplicates().clear() + dups = map.duplicates(key); + dups.clear(); // remove all values + dups.add(values[0]); // put back original value + break; + } + case 3: { + // remove with Map.duplicates().iterator().remove() + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + Collection dups = map.duplicates(key); + Iterator iter = dups.iterator(); + try { + for (int j = 0; j < values.length; j += 1) { + assertEquals(values[j], iter.next()); + if (j != 0) { + iter.remove(); + } + } + } finally { + StoredIterator.close(iter); + } + } + }); + break; + } + case 4: { + // remove with Map.values().remove() + if (!isEntityBinding) { + throw new IllegalStateException(); + } + Collection set = map.values(); + for (int j = 1; j < values.length; j += 1) { + set.remove(values[j]); + } + break; + } + default: throw new IllegalStateException(); + } + // verify that only original value is present + dups = map.duplicates(key); + assertTrue(dups.contains(values[0])); + for (int j = 1; j < values.length; j += 1) { + assertTrue(!dups.contains(values[j])); + } + checkDupsSize(1, dups); + } + } + + void readWriteIndexedDuplicates(int i) + throws Exception { + + Object key = makeKey(i); + Object[] values = new Object[3]; + values[0] = makeVal(i); + for (int j = 1; j < values.length; j += 1) { + values[j] = isEntityBinding + ? makeEntity(endKey + j, i) + : makeVal(i); + } + // add duplicates + for (int j = 1; j < values.length; j += 1) { + imap.put(makeKey(endKey + j), values[j]); + } + // read duplicates + readDuplicates(i, key, values); + // remove duplicates + for (int j = 1; j < values.length; j += 1) { + imap.remove(makeKey(endKey + j)); + } + checkDupsSize(1, map.duplicates(key)); + } + + void readDuplicates(int i, Object key, Object[] values) { + + boolean isOrdered = map.isOrdered(); + Collection dups; + Iterator iter; + // read with Map.duplicates().iterator() + dups = map.duplicates(key); + checkDupsSize(values.length, dups); + iter = dups.iterator(); + try { + for (int j = 0; j < values.length; j += 1) { + assertTrue(iter.hasNext()); + Object val = iter.next(); + assertEquals(values[j], val); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + // read with Map.values().iterator() + Collection clone = ((StoredCollection) map.values()).toList(); + iter = map.values().iterator(); + try { + for (int j = beginKey; j < i; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeVal(j))); + if (isOrdered) { + assertEquals(makeVal(j), val); + } + } + for (int j = 0; j < values.length; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(values[j])); + if (isOrdered) { + assertEquals(values[j], val); + } + } + for (int j = i + 1; j <= endKey; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeVal(j))); + if (isOrdered) { + assertEquals(makeVal(j), val); + } + } + assertTrue(!iter.hasNext()); + assertTrue(clone.isEmpty()); + } finally { + StoredIterator.close(iter); + } + // read with Map.entrySet().iterator() + clone = ((StoredCollection) map.entrySet()).toList(); + iter = map.entrySet().iterator(); + try { + for (int j = beginKey; j < i; j += 1) { + Map.Entry entry = (Map.Entry) iter.next(); + assertTrue(clone.remove(mapEntry(j))); + if (isOrdered) { + assertEquals(makeVal(j), entry.getValue()); + assertEquals(makeKey(j), entry.getKey()); + } + } + for (int j = 0; j < values.length; j += 1) { + Map.Entry entry = (Map.Entry) iter.next(); + assertTrue(clone.remove(mapEntry(makeKey(i), values[j]))); + if (isOrdered) { + assertEquals(values[j], entry.getValue()); + assertEquals(makeKey(i), entry.getKey()); + } + } + for (int j = i + 1; j <= endKey; j += 1) { + Map.Entry entry = (Map.Entry) iter.next(); + assertTrue(clone.remove(mapEntry(j))); + if (isOrdered) { + assertEquals(makeVal(j), entry.getValue()); + assertEquals(makeKey(j), entry.getKey()); + } + } + assertTrue(!iter.hasNext()); + assertTrue(clone.isEmpty()); + } finally { + StoredIterator.close(iter); + } + // read with Map.keySet().iterator() + clone = ((StoredCollection) map.keySet()).toList(); + iter = map.keySet().iterator(); + try { + for (int j = beginKey; j < i; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeKey(j))); + if (isOrdered) { + assertEquals(makeKey(j), val); + } + } + if (true) { + // only one key is iterated for all duplicates + Object val = iter.next(); + assertTrue(clone.remove(makeKey(i))); + if (isOrdered) { + assertEquals(makeKey(i), val); + } + } + for (int j = i + 1; j <= endKey; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeKey(j))); + if (isOrdered) { + assertEquals(makeKey(j), val); + } + } + assertTrue(!iter.hasNext()); + assertTrue(clone.isEmpty()); + } finally { + StoredIterator.close(iter); + } + } + + void duplicatesNotAllowed() { + + Collection dups = map.duplicates(makeKey(beginKey)); + try { + dups.add(makeVal(beginKey)); + fail(); + } catch (UnsupportedOperationException expected) { } + ListIterator iter = (ListIterator) dups.iterator(); + try { + iter.add(makeVal(beginKey)); + fail(); + } catch (UnsupportedOperationException expected) { + } finally { + StoredIterator.close(iter); + } + } + + void listOperationsNotAllowed() { + + ListIterator iter = (ListIterator) map.values().iterator(); + try { + try { + iter.nextIndex(); + fail(); + } catch (UnsupportedOperationException expected) { } + try { + iter.previousIndex(); + fail(); + } catch (UnsupportedOperationException expected) { } + } finally { + StoredIterator.close(iter); + } + } + + void testCdbLocking() { + + Iterator readIterator; + Iterator writeIterator; + StoredKeySet set = (StoredKeySet) map.keySet(); + + // can open two CDB read cursors + readIterator = set.iterator(false); + try { + Iterator readIterator2 = set.iterator(false); + StoredIterator.close(readIterator2); + } finally { + StoredIterator.close(readIterator); + } + + // can open two CDB write cursors + writeIterator = set.iterator(true); + try { + Iterator writeIterator2 = set.iterator(true); + StoredIterator.close(writeIterator2); + } finally { + StoredIterator.close(writeIterator); + } + + // cannot open CDB write cursor when read cursor is open, + readIterator = set.iterator(false); + try { + writeIterator = set.iterator(true); + fail(); + StoredIterator.close(writeIterator); + } catch (IllegalStateException e) { + } finally { + StoredIterator.close(readIterator); + } + + if (index == null) { + // cannot put() with read cursor open + readIterator = set.iterator(false); + try { + map.put(makeKey(1), makeVal(1)); + fail(); + } catch (IllegalStateException e) { + } finally { + StoredIterator.close(readIterator); + } + + // cannot append() with write cursor open with RECNO/QUEUE only + writeIterator = set.iterator(true); + try { + if (testStore.isQueueOrRecno()) { + try { + map.append(makeVal(1)); + fail(); + } catch (IllegalStateException e) {} + } else { + map.append(makeVal(1)); + } + } finally { + StoredIterator.close(writeIterator); + } + } + } + + Object makeVal(int key) { + + if (isEntityBinding) { + return makeEntity(key); + } else { + return new Long(key + 100); + } + } + + Object makeVal(int key, int val) { + + if (isEntityBinding) { + return makeEntity(key, val); + } else { + return makeVal(val); + } + } + + Object makeEntity(int key, int val) { + + return new TestEntity(key, val + 100); + } + + int intVal(Object val) { + + if (isEntityBinding) { + return ((TestEntity) val).value - 100; + } else { + return ((Long) val).intValue() - 100; + } + } + + int intKey(Object key) { + + return ((Long) key).intValue(); + } + + Object makeVal(Long key) { + + return makeVal(key.intValue()); + } + + Object makeEntity(int key) { + + return makeEntity(key, key); + } + + Object makeEntity(Long key) { + + return makeEntity(key.intValue()); + } + + int intIter(Collection coll, Object value) { + + if (coll instanceof StoredKeySet) { + return intKey(value); + } else { + if (coll instanceof StoredEntrySet) { + value = ((Map.Entry) value).getValue(); + } + return intVal(value); + } + } + + Map.Entry mapEntry(Object key, Object val) { + + return new MapEntryParameter(key, val); + } + + Map.Entry mapEntry(int key) { + + return new MapEntryParameter(makeKey(key), makeVal(key)); + } + + Long makeKey(int key) { + + return new Long(key); + } + + boolean isSubMap() { + + return rangeType != NONE; + } + + void checkDupsSize(int expected, Collection coll) { + + assertEquals(expected, coll.size()); + if (coll instanceof StoredCollection) { + StoredIterator i = ((StoredCollection) coll).iterator(false); + try { + int actual = 0; + if (i.hasNext()) { + i.next(); + actual = i.count(); + } + assertEquals(expected, actual); + } finally { + StoredIterator.close(i); + } + } + } + + private boolean isListAddAllowed() { + + return list != null && testStore.isQueueOrRecno() && + list.areKeysRenumbered(); + } + + private int countElements(Collection coll) { + + int count = 0; + Iterator iter = coll.iterator(); + try { + while (iter.hasNext()) { + iter.next(); + count += 1; + } + } finally { + StoredIterator.close(iter); + } + return count; + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java b/db/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java new file mode 100644 index 000000000..7b5fadc96 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java @@ -0,0 +1,132 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: DbTestUtil.java,v 1.2 2004/06/02 21:00:59 mark Exp $ + */ + +package com.sleepycat.collections.test; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import junit.framework.TestCase; + +import com.sleepycat.db.DatabaseConfig; + +/** + * @author Mark Hayes + */ +public class DbTestUtil { + + public static final DatabaseConfig DBCONFIG_CREATE = new DatabaseConfig(); + static { + DBCONFIG_CREATE.setAllowCreate(true); + } + + private static final String separator = ":::"; + + private static final File TEST_DIR; + static { + String dir = System.getProperty("testdestdir"); + if (dir == null || dir.length() == 0) { + dir = "."; + } + TEST_DIR = new File(dir, "tmp"); + } + + public static void printTestName(String name) { + // don't want verbose printing for now + // System.out.println(name); + } + + public static File getExistingDir(String name) + throws IOException { + + File dir = new File(TEST_DIR, name); + if (!dir.exists() || !dir.isDirectory()) { + throw new IllegalStateException( + "Not an existing directory: " + dir); + } + return dir; + } + + public static File getNewDir() + throws IOException { + + return getNewDir("test-dir"); + } + + public static File getNewDir(String name) + throws IOException { + + File dir = new File(TEST_DIR, name); + if (dir.isDirectory()) { + String[] files = dir.list(); + if (files != null) { + for (int i = 0; i < files.length; i += 1) { + new File(dir, files[i]).delete(); + } + } + } else { + dir.delete(); + dir.mkdirs(); + } + return dir; + } + + public static File getNewFile() + throws IOException { + + return getNewFile("test-file"); + } + + public static File getNewFile(String name) + throws IOException { + + return getNewFile(TEST_DIR, name); + } + + public static File getNewFile(File dir, String name) + throws IOException { + + File file = new File(dir, name); + file.delete(); + return file; + } + + public static boolean copyResource(Class cls, String fileName, File toDir) + throws IOException { + + InputStream in = cls.getResourceAsStream("testdata/" + fileName); + if (in == null) { + return false; + } + in = new BufferedInputStream(in); + File file = new File(toDir, fileName); + OutputStream out = new FileOutputStream(file); + out = new BufferedOutputStream(out); + int c; + while ((c = in.read()) >= 0) out.write(c); + in.close(); + out.close(); + return true; + } + + public static String qualifiedTestName(TestCase test) { + + String s = test.getClass().getName(); + int i = s.lastIndexOf('.'); + if (i >= 0) { + s = s.substring(i + 1); + } + return s + '.' + test.getName(); + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java b/db/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java new file mode 100644 index 000000000..7ef77537c --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java @@ -0,0 +1,232 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2003 + * Sleepycat Software. All rights reserved. + * + * $Id: IterDeadlockTest.java,v 1.3 2004/10/07 19:45:56 bostic Exp $ + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.DeadlockException; +import com.sleepycat.db.Environment; +import com.sleepycat.bind.ByteArrayBinding; +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredSortedMap; +import java.util.Iterator; +import java.util.ListIterator; +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +/** + * Tests the fix for [#10516], where the StoredIterator constructor was not + * closing the cursor when an exception occurred. For example, a deadlock + * exception might occur if the constructor was unable to move the cursor to + * the first element. + * @author Mark Hayes + */ +public class IterDeadlockTest extends TestCase { + + private static final byte[] ONE = { 1 }; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(IterDeadlockTest.class); + return suite; + } + + private Environment env; + private CurrentTransaction currentTxn; + private Database store1; + private Database store2; + private StoredSortedMap map1; + private StoredSortedMap map2; + private ByteArrayBinding binding = new ByteArrayBinding(); + + public IterDeadlockTest(String name) { + + super(name); + } + + public void setUp() + throws Exception { + + env = TestEnv.TXN.open("IterDeadlockTest"); + currentTxn = CurrentTransaction.getInstance(env); + store1 = openDb("store1.db"); + store2 = openDb("store2.db"); + map1 = new StoredSortedMap(store1, binding, binding, true); + map2 = new StoredSortedMap(store2, binding, binding, true); + } + + public void tearDown() { + + if (store1 != null) { + try { + store1.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (store2 != null) { + try { + store2.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + /* Allow GC of DB objects in the test case. */ + env = null; + currentTxn = null; + store1 = null; + store2 = null; + map1 = null; + map2 = null; + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(true); + config.setAllowCreate(true); + + return DbCompat.openDatabase(env, null, file, null, config); + } + + public void testIterDeadlock() + throws Exception { + + final Object parent = new Object(); + final Object child1 = new Object(); + final Object child2 = new Object(); + final TransactionRunner runner = new TransactionRunner(env); + runner.setMaxRetries(0); + + /* Write a record in each db. */ + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertNull(map1.put(ONE, ONE)); + assertNull(map2.put(ONE, ONE)); + } + }); + + /* + * A thread to open iterator 1, then wait to be notified, then open + * iterator 2. + */ + final Thread thread1 = new Thread(new Runnable() { + public void run() { + try { + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + synchronized (child1) { + ListIterator i1 = + (ListIterator) map1.values().iterator(); + i1.next(); + i1.set(ONE); /* Write lock. */ + StoredIterator.close(i1); + synchronized (parent) { parent.notify(); } + child1.wait(); + Iterator i2 = map2.values().iterator(); + assertTrue(i2.hasNext()); + StoredIterator.close(i2); + } + } + }); + } catch (DeadlockException expected) { + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + } + }); + + /* + * A thread to open iterator 2, then wait to be notified, then open + * iterator 1. + */ + final Thread thread2 = new Thread(new Runnable() { + public void run() { + try { + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + synchronized (child2) { + ListIterator i2 = + (ListIterator) map2.values().iterator(); + i2.next(); + i2.set(ONE); /* Write lock. */ + StoredIterator.close(i2); + synchronized (parent) { parent.notify(); } + child2.wait(); + Iterator i1 = map1.values().iterator(); + assertTrue(i1.hasNext()); + StoredIterator.close(i1); + } + } + }); + } catch (DeadlockException expected) { + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + } + }); + + /* + * Open iterator 1 in thread 1, then iterator 2 in thread 2, then let + * the threads run to open the other iterators and cause a deadlock. + */ + synchronized (parent) { + thread1.start(); + parent.wait(); + thread2.start(); + parent.wait(); + synchronized (child1) { child1.notify(); } + synchronized (child2) { child2.notify(); } + thread1.join(); + thread2.join(); + } + + /* + * Before the fix for [#10516] we would get an exception indicating + * that cursors were not closed, when closing the stores below. + */ + store1.close(); + store1 = null; + store2.close(); + store2 = null; + env.close(); + env = null; + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/JoinTest.java b/db/test/scr024/src/com/sleepycat/collections/test/JoinTest.java new file mode 100644 index 000000000..4ecc4362e --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/JoinTest.java @@ -0,0 +1,232 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2003 + * Sleepycat Software. All rights reserved. + * + * $Id: JoinTest.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ +package com.sleepycat.collections.test; + +import java.util.Map; + +import junit.framework.Test; +import junit.framework.TestCase; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.test.MarshalledObject; +import com.sleepycat.collections.StoredCollection; +import com.sleepycat.collections.StoredContainer; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.collections.TupleSerialFactory; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.Environment; +import com.sleepycat.db.SecondaryConfig; +import com.sleepycat.db.SecondaryDatabase; + +/** + * @author Mark Hayes + */ +public class JoinTest extends TestCase + implements TransactionWorker { + + private static final String MATCH_DATA = "d4"; // matches both keys = "yes" + private static final String MATCH_KEY = "k4"; // matches both keys = "yes" + private static final String[] VALUES = {"yes", "yes"}; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + return new JoinTest(); + } + + private Environment env; + private TransactionRunner runner; + private StoredClassCatalog catalog; + private TupleSerialFactory factory; + private Database store; + private SecondaryDatabase index1; + private SecondaryDatabase index2; + private StoredMap storeMap; + private StoredMap indexMap1; + private StoredMap indexMap2; + + public JoinTest() { + + super("JoinTest"); + } + + public void setUp() + throws Exception { + + DbTestUtil.printTestName(getName()); + env = TestEnv.TXN.open(getName()); + runner = new TransactionRunner(env); + createDatabase(); + } + + public void tearDown() { + + try { + if (index1 != null) { + index1.close(); + } + if (index2 != null) { + index2.close(); + } + if (store != null) { + store.close(); + } + if (catalog != null) { + catalog.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + index1 = null; + index2 = null; + store = null; + catalog = null; + env = null; + runner = null; + factory = null; + storeMap = null; + indexMap1 = null; + indexMap2 = null; + } + } + + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() + throws Exception { + + createViews(); + writeAndRead(); + } + + private void createDatabase() + throws Exception { + + catalog = new StoredClassCatalog(openDb("catalog.db")); + factory = new TupleSerialFactory(catalog); + assertSame(catalog, factory.getCatalog()); + + store = openDb("store.db"); + index1 = openSecondaryDb(store, "index1.db", "1"); + index2 = openSecondaryDb(store, "index2.db", "2"); + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(true); + config.setAllowCreate(true); + + return DbCompat.openDatabase(env, null, file, null, config); + } + + private SecondaryDatabase openSecondaryDb(Database primary, + String file, + String keyName) + throws Exception { + + SecondaryConfig secConfig = new SecondaryConfig(); + DbCompat.setTypeBtree(secConfig); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + DbCompat.setSortedDuplicates(secConfig, true); + secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class, + keyName)); + + return DbCompat.openSecondaryDatabase(env, null, + file, null, + primary, secConfig); + } + + private void createViews() + throws Exception { + + storeMap = factory.newMap(store, String.class, + MarshalledObject.class, true); + indexMap1 = factory.newMap(index1, String.class, + MarshalledObject.class, true); + indexMap2 = factory.newMap(index2, String.class, + MarshalledObject.class, true); + } + + private void writeAndRead() + throws Exception { + + // write records: Data, PrimaryKey, IndexKey1, IndexKey2 + assertNull(storeMap.put(null, + new MarshalledObject("d1", "k1", "no", "yes"))); + assertNull(storeMap.put(null, + new MarshalledObject("d2", "k2", "no", "no"))); + assertNull(storeMap.put(null, + new MarshalledObject("d3", "k3", "no", "yes"))); + assertNull(storeMap.put(null, + new MarshalledObject("d4", "k4", "yes", "yes"))); + assertNull(storeMap.put(null, + new MarshalledObject("d5", "k5", "yes", "no"))); + + Object o; + Map.Entry e; + + // join values with index maps + o = doJoin((StoredCollection) storeMap.values()); + assertEquals(MATCH_DATA, ((MarshalledObject) o).getData()); + + // join keySet with index maps + o = doJoin((StoredCollection) storeMap.keySet()); + assertEquals(MATCH_KEY, o); + + // join entrySet with index maps + o = doJoin((StoredCollection) storeMap.entrySet()); + e = (Map.Entry) o; + assertEquals(MATCH_KEY, e.getKey()); + assertEquals(MATCH_DATA, ((MarshalledObject) e.getValue()).getData()); + } + + private Object doJoin(StoredCollection coll) { + + StoredContainer[] indices = { indexMap1, indexMap2 }; + StoredIterator i = coll.join(indices, VALUES, null); + try { + assertTrue(i.hasNext()); + Object result = i.next(); + assertNotNull(result); + assertFalse(i.hasNext()); + return result; + } finally { i.close(); } + } +} + diff --git a/db/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java b/db/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java new file mode 100644 index 000000000..a8382e6c3 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java @@ -0,0 +1,33 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: NullTransactionRunner.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.db.Environment; +import com.sleepycat.util.ExceptionUnwrapper; + +class NullTransactionRunner extends TransactionRunner { + + NullTransactionRunner(Environment env) { + + super(env); + } + + public void run(TransactionWorker worker) + throws Exception { + + try { + worker.doWork(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java b/db/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java new file mode 100644 index 000000000..4e785fd4f --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java @@ -0,0 +1,193 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2003 + * Sleepycat Software. All rights reserved. + * + * $Id: SecondaryDeadlockTest.java,v 1.3 2004/08/02 18:53:08 mjc Exp $ + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.db.Database; +import com.sleepycat.db.DeadlockException; +import com.sleepycat.db.Environment; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.util.ExceptionUnwrapper; +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +/** + * Tests whether secondary access can cause a self-deadlock when reading via a + * secondary because the collections API secondary implementation in DB 4.2 + * opens two cursors. Part of the problem in [#10516] was because the + * secondary get() was not done in a txn. This problem should not occur in DB + * 4.3 and JE -- an ordinary deadlock occurs instead and is detected. + * + * @author Mark Hayes + */ +public class SecondaryDeadlockTest extends TestCase { + + private static final Long N_ONE = new Long(1); + private static final Long N_101 = new Long(101); + private static final int N_ITERS = 200; + private static final int MAX_RETRIES = 30; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(SecondaryDeadlockTest.class); + return suite; + } + + private Environment env; + private Database store; + private Database index; + private StoredSortedMap storeMap; + private StoredSortedMap indexMap; + + public SecondaryDeadlockTest(String name) { + + super(name); + } + + public void setUp() + throws Exception { + + env = TestEnv.TXN.open("SecondaryDeadlockTest"); + store = TestStore.BTREE_UNIQ.open(env, "store.db"); + index = TestStore.BTREE_UNIQ.openIndex(store, "index.db"); + storeMap = new StoredSortedMap(store, + TestStore.BTREE_UNIQ.getKeyBinding(), + TestStore.BTREE_UNIQ.getValueBinding(), + true); + indexMap = new StoredSortedMap(index, + TestStore.BTREE_UNIQ.getKeyBinding(), + TestStore.BTREE_UNIQ.getValueBinding(), + true); + } + + public void tearDown() { + + if (index != null) { + try { + index.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (store != null) { + try { + store.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + /* Allow GC of DB objects in the test case. */ + env = null; + store = null; + index = null; + storeMap = null; + indexMap = null; + } + + public void testSecondaryDeadlock() + throws Exception { + + final TransactionRunner runner = new TransactionRunner(env); + runner.setMaxRetries(MAX_RETRIES); + + /* + * A thread to do put() and delete() via the primary, which will lock + * the primary first then the secondary. Uses transactions. + */ + final Thread thread1 = new Thread(new Runnable() { + public void run() { + try { + for (int i = 0; i < N_ITERS; i +=1 ) { + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertEquals(null, storeMap.put(N_ONE, N_101)); + } + }); + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertEquals(N_101, storeMap.remove(N_ONE)); + } + }); + } + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + } + }); + + /* + * A thread to get() via the secondary, which will lock the secondary + * first then the primary. Does not use a transaction. + */ + final Thread thread2 = new Thread(new Runnable() { + public void run() { + try { + for (int i = 0; i < N_ITERS; i +=1 ) { + for (int j = 0; j < MAX_RETRIES; j += 1) { + try { + Object value = indexMap.get(N_ONE); + assertTrue(value == null || + N_101.equals(value)); + break; + } catch (Exception e) { + e = ExceptionUnwrapper.unwrap(e); + if (e instanceof DeadlockException) { + continue; /* Retry on deadlock. */ + } else { + e.printStackTrace(); + fail(); + } + } + } + } + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + } + }); + + thread1.start(); + thread2.start(); + thread1.join(); + thread2.join(); + + index.close(); + index = null; + store.close(); + store = null; + env.close(); + env = null; + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java b/db/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java new file mode 100644 index 000000000..4ee3d0c0f --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java @@ -0,0 +1,34 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestDataBinding.java,v 1.2 2004/06/04 18:26:02 mark Exp $ + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.db.DatabaseEntry; + +/** + * @author Mark Hayes + */ +class TestDataBinding implements EntryBinding { + + public Object entryToObject(DatabaseEntry data) { + + if (data.getSize() != 1) { + throw new IllegalStateException("size=" + data.getSize()); + } + byte val = data.getData()[data.getOffset()]; + return new Long(val); + } + + public void objectToEntry(Object object, DatabaseEntry data) { + + byte val = ((Number) object).byteValue(); + data.setData(new byte[] { val }, 0, 1); + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TestEntity.java b/db/test/scr024/src/com/sleepycat/collections/test/TestEntity.java new file mode 100644 index 000000000..5fb7498e4 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TestEntity.java @@ -0,0 +1,45 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestEntity.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.collections.test; + +/** + * @author Mark Hayes + */ +class TestEntity { + + int key; + int value; + + TestEntity(int key, int value) { + + this.key = key; + this.value = value; + } + + public boolean equals(Object o) { + + try { + TestEntity e = (TestEntity) o; + return e.key == key && e.value == value; + } catch (ClassCastException e) { + return false; + } + } + + public int hashCode() { + + return key; + } + + public String toString() { + + return "[key " + key + " value " + value + ']'; + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java b/db/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java new file mode 100644 index 000000000..27abfe2c2 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java @@ -0,0 +1,64 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestEntityBinding.java,v 1.2 2004/06/04 18:26:02 mark Exp $ + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.db.DatabaseEntry; + +/** + * @author Mark Hayes + */ +class TestEntityBinding implements EntityBinding { + + private boolean isRecNum; + + TestEntityBinding(boolean isRecNum) { + + this.isRecNum = isRecNum; + } + + public Object entryToObject(DatabaseEntry key, DatabaseEntry value) { + + byte keyByte; + if (isRecNum) { + if (key.getSize() != 4) { + throw new IllegalStateException(); + } + keyByte = (byte) RecordNumberBinding.entryToRecordNumber(key); + } else { + if (key.getSize() != 1) { + throw new IllegalStateException(); + } + keyByte = key.getData()[key.getOffset()]; + } + if (value.getSize() != 1) { + throw new IllegalStateException(); + } + byte valByte = value.getData()[value.getOffset()]; + return new TestEntity(keyByte, valByte); + } + + public void objectToKey(Object object, DatabaseEntry key) { + + byte val = (byte) ((TestEntity) object).key; + if (isRecNum) { + RecordNumberBinding.recordNumberToEntry(val, key); + } else { + key.setData(new byte[] { val }, 0, 1); + } + } + + public void objectToData(Object object, DatabaseEntry value) { + + byte val = (byte) ((TestEntity) object).value; + value.setData(new byte[] { val }, 0, 1); + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TestEnv.java b/db/test/scr024/src/com/sleepycat/collections/test/TestEnv.java new file mode 100644 index 000000000..d311c68a0 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TestEnv.java @@ -0,0 +1,121 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestEnv.java,v 1.3 2004/06/30 01:08:22 mark Exp $ + */ + +package com.sleepycat.collections.test; + +import java.io.File; +import java.io.IOException; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.Environment; +import com.sleepycat.db.EnvironmentConfig; + +/** + * @author Mark Hayes + */ +public class TestEnv { + + public static final TestEnv BDB; + public static final TestEnv CDB; + public static final TestEnv TXN; + static { + EnvironmentConfig config; + + config = newEnvConfig(); + BDB = new TestEnv("bdb", config); + + if (DbCompat.CDB) { + config = newEnvConfig(); + DbCompat.setInitializeCDB(config, true); + CDB = new TestEnv("cdb", config); + } else { + CDB = null; + } + + config = newEnvConfig(); + config.setTransactional(true); + DbCompat.setInitializeLocking(config, true); + TXN = new TestEnv("txn", config); + } + + private static EnvironmentConfig newEnvConfig() { + + EnvironmentConfig config = new EnvironmentConfig(); + if (DbCompat.MEMORY_SUBSYSTEM) { + DbCompat.setInitializeCache(config, true); + } + return config; + } + + public static final TestEnv[] ALL; + static { + if (DbCompat.CDB) { + ALL = new TestEnv[] { BDB, CDB, TXN }; + } else { + ALL = new TestEnv[] { BDB, TXN }; + } + } + + private String name; + private EnvironmentConfig config; + + private TestEnv(String name, EnvironmentConfig config) { + + this.name = name; + this.config = config; + } + + public String getName() { + + return name; + } + + public boolean isTxnMode() { + + return config.getTransactional(); + } + + public boolean isCdbMode() { + + return DbCompat.getInitializeCDB(config); + } + + public Environment open(String testName) + throws IOException, DatabaseException { + + return open(testName, true); + } + + public Environment open(String testName, boolean create) + throws IOException, DatabaseException { + + config.setAllowCreate(create); + /* OLDEST deadlock detection on DB matches the use of timeouts on JE.*/ + DbCompat.setLockDetectModeOldest(config); + File dir = getDirectory(testName, create); + return new Environment(dir, config); + } + + public File getDirectory(String testName) + throws IOException { + + return getDirectory(testName, true); + } + + public File getDirectory(String testName, boolean create) + throws IOException { + + if (create) { + return DbTestUtil.getNewDir("db-test/" + testName); + } else { + return DbTestUtil.getExistingDir("db-test/" + testName); + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java b/db/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java new file mode 100644 index 000000000..319e1a1e0 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java @@ -0,0 +1,45 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestKeyAssigner.java,v 1.2 2004/06/04 18:26:02 mark Exp $ + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.collections.PrimaryKeyAssigner; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; + +/** + * @author Mark Hayes + */ +class TestKeyAssigner implements PrimaryKeyAssigner { + + private byte next = 1; + private boolean isRecNum; + + TestKeyAssigner(boolean isRecNum) { + + this.isRecNum = isRecNum; + } + + public void assignKey(DatabaseEntry keyData) + throws DatabaseException { + + if (isRecNum) { + RecordNumberBinding.recordNumberToEntry(next, keyData); + } else { + keyData.setData(new byte[] { next }, 0, 1); + } + next += 1; + } + + void reset() { + + next = 1; + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java b/db/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java new file mode 100644 index 000000000..9cda25e49 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java @@ -0,0 +1,60 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestKeyCreator.java,v 1.2 2004/06/04 18:26:02 mark Exp $ + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.db.DatabaseEntry; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.SecondaryDatabase; +import com.sleepycat.db.SecondaryKeyCreator; + +/** + * Unused until secondaries are available. + * @author Mark Hayes + */ +class TestKeyCreator implements SecondaryKeyCreator { + + private boolean isRecNum; + + TestKeyCreator(boolean isRecNum) { + + this.isRecNum = isRecNum; + } + + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyData, + DatabaseEntry valueData, + DatabaseEntry indexKeyData) + throws DatabaseException { + + if (valueData.getSize() == 0) { + return false; + } + if (valueData.getSize() != 1) { + throw new IllegalStateException(); + } + byte val = valueData.getData()[valueData.getOffset()]; + if (val == 0) { + return false; // fixed-len pad value + } + val -= 100; + if (isRecNum) { + RecordNumberBinding.recordNumberToEntry(val, indexKeyData); + } else { + indexKeyData.setData(new byte[] { val }, 0, 1); + } + return true; + } + + public void clearIndexKey(DatabaseEntry valueData) { + + throw new RuntimeException("not supported"); + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TestStore.java b/db/test/scr024/src/com/sleepycat/collections/test/TestStore.java new file mode 100644 index 000000000..8899213ce --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TestStore.java @@ -0,0 +1,279 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestStore.java,v 1.2 2004/09/22 18:01:06 bostic Exp $ + */ + +package com.sleepycat.collections.test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseException; +import com.sleepycat.db.Environment; +import com.sleepycat.db.SecondaryConfig; + +/** + * @author Mark Hayes + */ +class TestStore { + + static final TestKeyCreator BYTE_EXTRACTOR = new TestKeyCreator(false); + static final TestKeyCreator RECNO_EXTRACTOR = new TestKeyCreator(true); + static final EntryBinding VALUE_BINDING = new TestDataBinding(); + static final EntryBinding BYTE_KEY_BINDING = VALUE_BINDING; + static final EntryBinding RECNO_KEY_BINDING = new RecordNumberBinding(); + static final EntityBinding BYTE_ENTITY_BINDING = + new TestEntityBinding(false); + static final EntityBinding RECNO_ENTITY_BINDING = + new TestEntityBinding(true); + static final TestKeyAssigner BYTE_KEY_ASSIGNER = + new TestKeyAssigner(false); + static final TestKeyAssigner RECNO_KEY_ASSIGNER = + new TestKeyAssigner(true); + + static final TestStore BTREE_UNIQ; + static final TestStore BTREE_DUP; + static final TestStore BTREE_DUPSORT; + static final TestStore BTREE_RECNUM; + static final TestStore HASH_UNIQ; + static final TestStore HASH_DUP; + static final TestStore HASH_DUPSORT; + static final TestStore QUEUE; + static final TestStore RECNO; + static final TestStore RECNO_RENUM; + + static final TestStore[] ALL; + static { + List list = new ArrayList(); + SecondaryConfig config; + + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + BTREE_UNIQ = new TestStore("btree-uniq", config); + BTREE_UNIQ.indexOf = BTREE_UNIQ; + list.add(BTREE_UNIQ); + + if (DbCompat.INSERTION_ORDERED_DUPLICATES) { + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + DbCompat.setUnsortedDuplicates(config, true); + BTREE_DUP = new TestStore("btree-dup", config); + BTREE_DUP.indexOf = null; // indexes must use sorted dups + list.add(BTREE_DUP); + } else { + BTREE_DUP = null; + } + + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + DbCompat.setSortedDuplicates(config, true); + BTREE_DUPSORT = new TestStore("btree-dupsort", config); + BTREE_DUPSORT.indexOf = BTREE_UNIQ; + list.add(BTREE_DUPSORT); + + if (DbCompat.BTREE_RECNUM_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + DbCompat.setBtreeRecordNumbers(config, true); + BTREE_RECNUM = new TestStore("btree-recnum", config); + BTREE_RECNUM.indexOf = BTREE_RECNUM; + list.add(BTREE_RECNUM); + } else { + BTREE_RECNUM = null; + } + + if (DbCompat.HASH_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeHash(config); + HASH_UNIQ = new TestStore("hash-uniq", config); + HASH_UNIQ.indexOf = HASH_UNIQ; + list.add(HASH_UNIQ); + + if (DbCompat.INSERTION_ORDERED_DUPLICATES) { + config = new SecondaryConfig(); + DbCompat.setTypeHash(config); + DbCompat.setUnsortedDuplicates(config, true); + HASH_DUP = new TestStore("hash-dup", config); + HASH_DUP.indexOf = null; // indexes must use sorted dups + list.add(HASH_DUP); + } else { + HASH_DUP = null; + } + + config = new SecondaryConfig(); + DbCompat.setTypeHash(config); + DbCompat.setSortedDuplicates(config, true); + HASH_DUPSORT = new TestStore("hash-dupsort", config); + HASH_DUPSORT.indexOf = HASH_UNIQ; + list.add(HASH_DUPSORT); + } else { + HASH_UNIQ = null; + HASH_DUP = null; + HASH_DUPSORT = null; + } + + if (DbCompat.QUEUE_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeQueue(config); + QUEUE = new TestStore("queue", config); + QUEUE.indexOf = QUEUE; + list.add(QUEUE); + } else { + QUEUE = null; + } + + if (DbCompat.RECNO_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeRecno(config); + RECNO = new TestStore("recno", config); + RECNO.indexOf = RECNO; + list.add(RECNO); + + config = new SecondaryConfig(); + DbCompat.setTypeRecno(config); + DbCompat.setRenumbering(config, true); + RECNO_RENUM = new TestStore("recno-renum", config); + RECNO_RENUM.indexOf = null; // indexes must have stable keys + list.add(RECNO_RENUM); + } else { + RECNO = null; + RECNO_RENUM = null; + } + + ALL = new TestStore[list.size()]; + list.toArray(ALL); + } + + private String name; + private SecondaryConfig config; + private TestStore indexOf; + private boolean isRecNumFormat; + + private TestStore(String name, SecondaryConfig config) { + + this.name = name; + this.config = config; + + isRecNumFormat = isQueueOrRecno() || + (DbCompat.isTypeBtree(config) && + DbCompat.getBtreeRecordNumbers(config)); + } + + EntryBinding getValueBinding() { + + return VALUE_BINDING; + } + + EntryBinding getKeyBinding() { + + return isRecNumFormat ? RECNO_KEY_BINDING : BYTE_KEY_BINDING; + } + + EntityBinding getEntityBinding() { + + return isRecNumFormat ? RECNO_ENTITY_BINDING : BYTE_ENTITY_BINDING; + } + + TestKeyAssigner getKeyAssigner() { + + if (isQueueOrRecno()) { + return null; + } else { + if (isRecNumFormat) { + return RECNO_KEY_ASSIGNER; + } else { + return BYTE_KEY_ASSIGNER; + } + } + } + + String getName() { + + return name; + } + + boolean isOrdered() { + + return !DbCompat.isTypeHash(config); + } + + boolean isQueueOrRecno() { + + return DbCompat.isTypeQueue(config) || DbCompat.isTypeRecno(config); + } + + boolean areDuplicatesAllowed() { + + return DbCompat.getSortedDuplicates(config) || + DbCompat.getUnsortedDuplicates(config); + } + + boolean hasRecNumAccess() { + + return isRecNumFormat; + } + + boolean areKeysRenumbered() { + + return hasRecNumAccess() && + (DbCompat.isTypeBtree(config) || + DbCompat.getRenumbering(config)); + } + + TestStore getIndexOf() { + + return DbCompat.SECONDARIES ? indexOf : null; + } + + Database open(Environment env, String fileName) + throws IOException, DatabaseException { + + int fixedLen = (isQueueOrRecno() ? 1 : 0); + return openDb(env, fileName, fixedLen, null); + } + + Database openIndex(Database primary, String fileName) + throws IOException, DatabaseException { + + int fixedLen = (isQueueOrRecno() ? 4 : 0); + config.setKeyCreator(isRecNumFormat ? RECNO_EXTRACTOR + : BYTE_EXTRACTOR); + Environment env = primary.getEnvironment(); + return openDb(env, fileName, fixedLen, primary); + } + + private Database openDb(Environment env, String fileName, int fixedLen, + Database primary) + throws IOException, DatabaseException { + + if (fixedLen > 0) { + DbCompat.setRecordLength(config, fixedLen); + DbCompat.setRecordPad(config, 0); + } else { + DbCompat.setRecordLength(config, 0); + } + config.setAllowCreate(true); + DbCompat.setDirtyRead(config, true); + config.setTransactional(CurrentTransaction.getInstance(env) != null); + if (primary != null) { + return DbCompat.openSecondaryDatabase(env, null, + fileName, null, + primary, config); + } else { + return DbCompat.openDatabase(env, null, + fileName, null, + config); + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java b/db/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java new file mode 100644 index 000000000..ce9bd36b1 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java @@ -0,0 +1,626 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TransactionTest.java,v 1.2 2004/09/22 18:01:06 bostic Exp $ + */ + +package com.sleepycat.collections.test; + +import java.util.Iterator; +import java.util.List; +import java.util.SortedSet; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.collections.StoredCollections; +import com.sleepycat.collections.StoredContainer; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredList; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.Environment; +import com.sleepycat.db.Transaction; +import com.sleepycat.db.TransactionConfig; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * @author Mark Hayes + */ +public class TransactionTest extends TestCase { + + private static final Long ONE = new Long(1); + private static final Long TWO = new Long(2); + private static final Long THREE = new Long(3); + + /** + * Runs a command line collection test. + * @see #usage + */ + public static void main(String[] args) + throws Exception { + + if (args.length == 1 && + (args[0].equals("-h") || args[0].equals("-help"))) { + usage(); + } else { + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + } + + private static void usage() { + + System.out.println( + "Usage: java com.sleepycat.collections.test.TransactionTest" + + " [-h | -help]\n"); + System.exit(2); + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(TransactionTest.class); + return suite; + } + + private Environment env; + private CurrentTransaction currentTxn; + private Database store; + private StoredSortedMap map; + private TestStore testStore = TestStore.BTREE_UNIQ; + + public TransactionTest(String name) { + + super(name); + } + + public void setUp() + throws Exception { + + DbTestUtil.printTestName(DbTestUtil.qualifiedTestName(this)); + env = TestEnv.TXN.open("TransactionTests"); + currentTxn = CurrentTransaction.getInstance(env); + store = testStore.open(env, dbName(0)); + map = new StoredSortedMap(store, testStore.getKeyBinding(), + testStore.getValueBinding(), true); + } + + public void tearDown() { + + try { + if (store != null) { + store.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + store = null; + env = null; + currentTxn = null; + map = null; + testStore = null; + } + } + + private String dbName(int i) { + + return "txn-test-" + getName() + '-' + i; + } + + public void testGetters() + throws Exception { + + assertNotNull(env); + assertNotNull(currentTxn); + assertNull(currentTxn.getTransaction()); + + currentTxn.beginTransaction(null); + assertNotNull(currentTxn.getTransaction()); + currentTxn.commitTransaction(); + assertNull(currentTxn.getTransaction()); + + currentTxn.beginTransaction(null); + assertNotNull(currentTxn.getTransaction()); + currentTxn.abortTransaction(); + assertNull(currentTxn.getTransaction()); + + // dirty-read property should be inherited + + assertTrue(!map.isDirtyRead()); + assertTrue(!((StoredContainer) map.values()).isDirtyRead()); + assertTrue(!((StoredContainer) map.keySet()).isDirtyRead()); + assertTrue(!((StoredContainer) map.entrySet()).isDirtyRead()); + + StoredSortedMap other = + (StoredSortedMap) StoredCollections.dirtyReadMap(map); + assertTrue(other.isDirtyRead()); + assertTrue(((StoredContainer) other.values()).isDirtyRead()); + assertTrue(((StoredContainer) other.keySet()).isDirtyRead()); + assertTrue(((StoredContainer) other.entrySet()).isDirtyRead()); + assertTrue(!map.isDirtyRead()); + assertTrue(!((StoredContainer) map.values()).isDirtyRead()); + assertTrue(!((StoredContainer) map.keySet()).isDirtyRead()); + assertTrue(!((StoredContainer) map.entrySet()).isDirtyRead()); + } + + public void testTransactional() + throws Exception { + + // is transactional because DB_AUTO_COMMIT was passed to + // Database.open() + // + assertTrue(map.isTransactional()); + store.close(); + store = null; + + // is not transactional + // + DatabaseConfig dbConfig = new DatabaseConfig(); + DbCompat.setTypeBtree(dbConfig); + dbConfig.setAllowCreate(true); + Database db = DbCompat.openDatabase(env, null, + dbName(1), null, + dbConfig); + map = new StoredSortedMap(db, testStore.getKeyBinding(), + testStore.getValueBinding(), true); + assertTrue(!map.isTransactional()); + map.put(ONE, ONE); + readCheck(map, ONE, ONE); + db.close(); + + // is transactional + // + dbConfig.setTransactional(true); + currentTxn.beginTransaction(null); + db = DbCompat.openDatabase(env, currentTxn.getTransaction(), + dbName(2), null, dbConfig); + currentTxn.commitTransaction(); + map = new StoredSortedMap(db, testStore.getKeyBinding(), + testStore.getValueBinding(), true); + assertTrue(map.isTransactional()); + currentTxn.beginTransaction(null); + map.put(ONE, ONE); + readCheck(map, ONE, ONE); + currentTxn.commitTransaction(); + db.close(); + } + + public void testExceptions() + throws Exception { + + try { + currentTxn.commitTransaction(); + fail(); + } catch (IllegalStateException expected) {} + + try { + currentTxn.abortTransaction(); + fail(); + } catch (IllegalStateException expected) {} + } + + public void testNested() + throws Exception { + + if (!DbCompat.NESTED_TRANSACTIONS) { + return; + } + assertNull(currentTxn.getTransaction()); + + Transaction txn1 = currentTxn.beginTransaction(null); + assertNotNull(txn1); + assertTrue(txn1 == currentTxn.getTransaction()); + + assertNull(map.get(ONE)); + assertNull(map.put(ONE, ONE)); + assertEquals(ONE, map.get(ONE)); + + Transaction txn2 = currentTxn.beginTransaction(null); + assertNotNull(txn2); + assertTrue(txn2 == currentTxn.getTransaction()); + assertTrue(txn1 != txn2); + + assertNull(map.put(TWO, TWO)); + assertEquals(TWO, map.get(TWO)); + + Transaction txn3 = currentTxn.beginTransaction(null); + assertNotNull(txn3); + assertTrue(txn3 == currentTxn.getTransaction()); + assertTrue(txn1 != txn2); + assertTrue(txn1 != txn3); + assertTrue(txn2 != txn3); + + assertNull(map.put(THREE, THREE)); + assertEquals(THREE, map.get(THREE)); + + Transaction txn = currentTxn.abortTransaction(); + assertTrue(txn == txn2); + assertTrue(txn == currentTxn.getTransaction()); + assertNull(map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + + txn3 = currentTxn.beginTransaction(null); + assertNotNull(txn3); + assertTrue(txn3 == currentTxn.getTransaction()); + assertTrue(txn1 != txn2); + assertTrue(txn1 != txn3); + assertTrue(txn2 != txn3); + + assertNull(map.put(THREE, THREE)); + assertEquals(THREE, map.get(THREE)); + + txn = currentTxn.commitTransaction(); + assertTrue(txn == txn2); + assertTrue(txn == currentTxn.getTransaction()); + assertEquals(THREE, map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + + txn = currentTxn.commitTransaction(); + assertTrue(txn == txn1); + assertTrue(txn == currentTxn.getTransaction()); + assertEquals(THREE, map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + + txn = currentTxn.commitTransaction(); + assertNull(txn); + assertNull(currentTxn.getTransaction()); + assertEquals(THREE, map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + } + + public void testRunnerCommit() + throws Exception { + + commitTest(false); + } + + public void testExplicitCommit() + throws Exception { + + commitTest(true); + } + + private void commitTest(final boolean explicit) + throws Exception { + + final TransactionRunner runner = new TransactionRunner(env); + runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS); + + assertNull(currentTxn.getTransaction()); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn1 = currentTxn.getTransaction(); + assertNotNull(txn1); + assertNull(map.put(ONE, ONE)); + assertEquals(ONE, map.get(ONE)); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn2 = currentTxn.getTransaction(); + assertNotNull(txn2); + if (DbCompat.NESTED_TRANSACTIONS) { + assertTrue(txn1 != txn2); + } else { + assertTrue(txn1 == txn2); + } + assertNull(map.put(TWO, TWO)); + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + if (DbCompat.NESTED_TRANSACTIONS && explicit) { + currentTxn.commitTransaction(); + } + } + }); + + Transaction txn3 = currentTxn.getTransaction(); + assertSame(txn1, txn3); + + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + } + }); + + assertNull(currentTxn.getTransaction()); + } + + public void testRunnerAbort() + throws Exception { + + abortTest(false); + } + + public void testExplicitAbort() + throws Exception { + + abortTest(true); + } + + private void abortTest(final boolean explicit) + throws Exception { + + final TransactionRunner runner = new TransactionRunner(env); + runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS); + + assertNull(currentTxn.getTransaction()); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn1 = currentTxn.getTransaction(); + assertNotNull(txn1); + assertNull(map.put(ONE, ONE)); + assertEquals(ONE, map.get(ONE)); + + if (DbCompat.NESTED_TRANSACTIONS) { + try { + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn2 = + currentTxn.getTransaction(); + assertNotNull(txn2); + assertTrue(txn1 != txn2); + assertNull(map.put(TWO, TWO)); + assertEquals(TWO, map.get(TWO)); + if (explicit) { + currentTxn.abortTransaction(); + } else { + throw new IllegalArgumentException( + "test-abort"); + } + } + }); + assertTrue(explicit); + } catch (IllegalArgumentException e) { + assertTrue(!explicit); + assertEquals("test-abort", e.getMessage()); + } + } + + Transaction txn3 = currentTxn.getTransaction(); + assertSame(txn1, txn3); + + assertEquals(ONE, map.get(ONE)); + assertNull(map.get(TWO)); + } + }); + + assertNull(currentTxn.getTransaction()); + } + + public void testDirtyReadCollection() + throws Exception { + + StoredSortedMap dirtyMap = + (StoredSortedMap) StoredCollections.dirtyReadSortedMap(map); + + // original map is not dirty-read + assertTrue(map.isDirtyReadAllowed()); + assertTrue(!map.isDirtyRead()); + + // all dirty-read containers are dirty-read + checkDirtyReadProperty(dirtyMap); + checkDirtyReadProperty(StoredCollections.dirtyReadMap(map)); + checkDirtyReadProperty(StoredCollections.dirtyReadCollection( + map.values())); + checkDirtyReadProperty(StoredCollections.dirtyReadSet( + map.keySet())); + checkDirtyReadProperty(StoredCollections.dirtyReadSortedSet( + (SortedSet) map.keySet())); + + if (DbCompat.RECNO_METHOD) { + // create a list just so we can call dirtyReadList() + Database listStore = TestStore.RECNO_RENUM.open(env, null); + List list = new StoredList(listStore, TestStore.VALUE_BINDING, + true); + checkDirtyReadProperty(StoredCollections.dirtyReadList(list)); + listStore.close(); + } + + doDirtyRead(dirtyMap); + } + + private void checkDirtyReadProperty(Object container) { + + assertTrue(((StoredContainer) container).isDirtyReadAllowed()); + assertTrue(((StoredContainer) container).isDirtyRead()); + } + + public void testDirtyReadTransaction() + throws Exception { + + TransactionRunner runner = new TransactionRunner(env); + TransactionConfig config = new TransactionConfig(); + config.setDirtyRead(true); + runner.setTransactionConfig(config); + assertNull(currentTxn.getTransaction()); + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertNotNull(currentTxn.getTransaction()); + doDirtyRead(map); + } + }); + assertNull(currentTxn.getTransaction()); + } + + private synchronized void doDirtyRead(StoredSortedMap dirtyMap) + throws Exception { + + // start thread one + DirtyReadThreadOne t1 = new DirtyReadThreadOne(env, this); + t1.start(); + wait(); + + // put ONE + synchronized (t1) { t1.notify(); } + wait(); + readCheck(dirtyMap, ONE, ONE); + assertTrue(!dirtyMap.isEmpty()); + + // abort ONE + synchronized (t1) { t1.notify(); } + t1.join(); + readCheck(dirtyMap, ONE, null); + assertTrue(dirtyMap.isEmpty()); + + // start thread two + DirtyReadThreadTwo t2 = new DirtyReadThreadTwo(env, this); + t2.start(); + wait(); + + // put TWO + synchronized (t2) { t2.notify(); } + wait(); + readCheck(dirtyMap, TWO, TWO); + assertTrue(!dirtyMap.isEmpty()); + + // commit TWO + synchronized (t2) { t2.notify(); } + t2.join(); + readCheck(dirtyMap, TWO, TWO); + assertTrue(!dirtyMap.isEmpty()); + } + + private static class DirtyReadThreadOne extends Thread { + + private Environment env; + private CurrentTransaction currentTxn; + private TransactionTest parent; + private StoredSortedMap map; + + private DirtyReadThreadOne(Environment env, TransactionTest parent) { + + this.env = env; + this.currentTxn = CurrentTransaction.getInstance(env); + this.parent = parent; + this.map = parent.map; + } + + public synchronized void run() { + + try { + assertNull(currentTxn.getTransaction()); + assertNotNull(currentTxn.beginTransaction(null)); + assertNotNull(currentTxn.getTransaction()); + readCheck(map, ONE, null); + synchronized (parent) { parent.notify(); } + wait(); + + // put ONE + assertNull(map.put(ONE, ONE)); + readCheck(map, ONE, ONE); + synchronized (parent) { parent.notify(); } + wait(); + + // abort ONE + assertNull(currentTxn.abortTransaction()); + assertNull(currentTxn.getTransaction()); + } catch (Exception e) { + throw new RuntimeExceptionWrapper(e); + } + } + } + + private static class DirtyReadThreadTwo extends Thread { + + private Environment env; + private CurrentTransaction currentTxn; + private TransactionTest parent; + private StoredSortedMap map; + + private DirtyReadThreadTwo(Environment env, TransactionTest parent) { + + this.env = env; + this.currentTxn = CurrentTransaction.getInstance(env); + this.parent = parent; + this.map = parent.map; + } + + public synchronized void run() { + + try { + final TransactionRunner runner = new TransactionRunner(env); + final Object thread = this; + assertNull(currentTxn.getTransaction()); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertNotNull(currentTxn.getTransaction()); + readCheck(map, TWO, null); + synchronized (parent) { parent.notify(); } + thread.wait(); + + // put TWO + assertNull(map.put(TWO, TWO)); + readCheck(map, TWO, TWO); + synchronized (parent) { parent.notify(); } + thread.wait(); + + // commit TWO + } + }); + assertNull(currentTxn.getTransaction()); + } catch (Exception e) { + throw new RuntimeExceptionWrapper(e); + } + } + } + + private static void readCheck(StoredSortedMap checkMap, Object key, + Object expect) { + if (expect == null) { + assertNull(checkMap.get(key)); + assertTrue(checkMap.tailMap(key).isEmpty()); + assertTrue(!checkMap.tailMap(key).containsKey(key)); + assertTrue(!checkMap.keySet().contains(key)); + assertTrue(checkMap.duplicates(key).isEmpty()); + Iterator i = checkMap.keySet().iterator(); + try { + while (i.hasNext()) { + assertTrue(!key.equals(i.next())); + } + } finally { StoredIterator.close(i); } + } else { + assertEquals(expect, checkMap.get(key)); + assertEquals(expect, checkMap.tailMap(key).get(key)); + assertTrue(!checkMap.tailMap(key).isEmpty()); + assertTrue(checkMap.tailMap(key).containsKey(key)); + assertTrue(checkMap.keySet().contains(key)); + assertTrue(checkMap.values().contains(expect)); + assertTrue(!checkMap.duplicates(key).isEmpty()); + assertTrue(checkMap.duplicates(key).contains(expect)); + Iterator i = checkMap.keySet().iterator(); + try { + boolean found = false; + while (i.hasNext()) { + if (expect.equals(i.next())) { + found = true; + } + } + assertTrue(found); + } + finally { StoredIterator.close(i); } + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java b/db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java new file mode 100644 index 000000000..0f9c78551 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java @@ -0,0 +1,179 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredClassCatalogTest.java,v 1.2 2004/09/01 14:34:22 mark Exp $ + */ +package com.sleepycat.collections.test.serial; + +import java.io.ObjectStreamClass; +import java.util.Map; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.collections.test.TestEnv; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.Environment; + +/** + * Runs part two of the StoredClassCatalogTest. This part is run with the + * new/updated version of TestSerial in the classpath. It uses the + * environment and databases created by StoredClassCatalogTestInit. It + * verifies that it can read objects serialized using the old class format, + * and that it can create new objects with the new class format. + * + * @author Mark Hayes + */ +public class StoredClassCatalogTest extends TestCase + implements TransactionWorker { + + static final String CATALOG_FILE = "catalogtest-catalog.db"; + static final String STORE_FILE = "catalogtest-store.db"; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(); + for (int i = 0; i < TestEnv.ALL.length; i += 1) { + suite.addTest(new StoredClassCatalogTest(TestEnv.ALL[i])); + } + return suite; + } + + private TestEnv testEnv; + private Environment env; + private StoredClassCatalog catalog; + private StoredClassCatalog catalog2; + private Database store; + private Map map; + private TransactionRunner runner; + + public StoredClassCatalogTest(TestEnv testEnv) { + + super(makeTestName(testEnv)); + this.testEnv = testEnv; + } + + static String makeTestName(TestEnv testEnv) { + return "StoredClassCatalogTest-" + testEnv.getName(); + } + + public void setUp() + throws Exception { + + DbTestUtil.printTestName(getName()); + env = testEnv.open(makeTestName(testEnv), false); + runner = new TransactionRunner(env); + + catalog = new StoredClassCatalog(openDb(CATALOG_FILE, false)); + catalog2 = new StoredClassCatalog(openDb("catalog2.db", true)); + + SerialBinding keyBinding = new SerialBinding(catalog, + String.class); + SerialBinding valueBinding = new SerialBinding(catalog, + TestSerial.class); + store = openDb(STORE_FILE, false); + + map = new StoredMap(store, keyBinding, valueBinding, true); + } + + private Database openDb(String file, boolean create) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(testEnv.isTxnMode()); + config.setAllowCreate(create); + + return DbCompat.openDatabase(env, null, file, null, config); + } + + public void tearDown() { + + try { + if (catalog != null) { + catalog.close(); + catalog.close(); // should have no effect + } + if (catalog2 != null) { + catalog2.close(); + } + if (store != null) { + store.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.err.println("Ignored exception during tearDown: "); + e.printStackTrace(); + } finally { + /* Ensure that GC can cleanup. */ + catalog = null; + catalog2 = null; + store = null; + env = null; + testEnv = null; + map = null; + runner = null; + } + } + + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() + throws Exception { + + TestSerial one = (TestSerial) map.get("one"); + TestSerial two = (TestSerial) map.get("two"); + assertNotNull(one); + assertNotNull(two); + assertEquals(one, two.getOther()); + assertNull(one.getStringField()); + assertNull(two.getStringField()); + + TestSerial three = new TestSerial(two); + assertNotNull(three.getStringField()); + map.put("three", three); + three = (TestSerial) map.get("three"); + assertEquals(two, three.getOther()); + + ObjectStreamClass desc = ObjectStreamClass.lookup(TestSerial.class); + + assertNotNull(catalog.getClassID(desc)); + assertNotNull(catalog.getClassID(desc)); + + // test with empty catalog + assertNotNull(catalog2.getClassID(desc)); + assertNotNull(catalog2.getClassID(desc)); + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java b/db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java new file mode 100644 index 000000000..e1f5184a8 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java @@ -0,0 +1,159 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: StoredClassCatalogTestInit.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ +package com.sleepycat.collections.test.serial; + +import java.util.Map; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.collections.test.TestEnv; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.db.Database; +import com.sleepycat.db.DatabaseConfig; +import com.sleepycat.db.Environment; + +/** + * Runs part one of the StoredClassCatalogTest. This part is run with the + * old/original version of TestSerial in the classpath. It creates a fresh + * environment and databases containing serialized versions of the old class. + * When StoredClassCatalogTest is run, it will read these objects from the + * database created here. + * + * @author Mark Hayes + */ +public class StoredClassCatalogTestInit extends TestCase + implements TransactionWorker { + + static final String CATALOG_FILE = StoredClassCatalogTest.CATALOG_FILE; + static final String STORE_FILE = StoredClassCatalogTest.STORE_FILE; + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(); + for (int i = 0; i < TestEnv.ALL.length; i += 1) { + suite.addTest(new StoredClassCatalogTestInit(TestEnv.ALL[i])); + } + return suite; + } + + private TestEnv testEnv; + private Environment env; + private StoredClassCatalog catalog; + private Database store; + private Map map; + private TransactionRunner runner; + + public StoredClassCatalogTestInit(TestEnv testEnv) { + + super("StoredClassCatalogTestInit-" + testEnv.getName()); + this.testEnv = testEnv; + } + + public void setUp() + throws Exception { + + DbTestUtil.printTestName(getName()); + env = testEnv.open(StoredClassCatalogTest.makeTestName(testEnv)); + runner = new TransactionRunner(env); + + catalog = new StoredClassCatalog(openDb(CATALOG_FILE)); + + SerialBinding keyBinding = new SerialBinding(catalog, + String.class); + SerialBinding valueBinding = new SerialBinding(catalog, + TestSerial.class); + store = openDb(STORE_FILE); + + map = new StoredMap(store, keyBinding, valueBinding, true); + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(testEnv.isTxnMode()); + config.setAllowCreate(true); + + return DbCompat.openDatabase(env, null, file, null, config); + } + + public void tearDown() { + + try { + if (catalog != null) { + catalog.close(); + catalog.close(); // should have no effect + } + if (store != null) { + store.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.err.println("Ignored exception during tearDown: "); + e.printStackTrace(); + } finally { + /* Ensure that GC can cleanup. */ + catalog = null; + store = null; + env = null; + testEnv = null; + map = null; + runner = null; + } + } + + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() + throws Exception { + + TestSerial one = new TestSerial(null); + TestSerial two = new TestSerial(one); + assertNull("Likely the classpath contains the wrong version of the" + + " TestSerial class, the 'original' version is required", + one.getStringField()); + assertNull(two.getStringField()); + map.put("one", one); + map.put("two", two); + one = (TestSerial) map.get("one"); + two = (TestSerial) map.get("two"); + assertEquals(one, two.getOther()); + assertNull(one.getStringField()); + assertNull(two.getStringField()); + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java b/db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java new file mode 100644 index 000000000..56300aeb5 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java @@ -0,0 +1,70 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestSerial.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ +package com.sleepycat.collections.test.serial; + +/** + * @author Mark Hayes + */ +class TestSerial implements java.io.Serializable { + + static final long serialVersionUID = -3738980000390384920L; + + private int i = 123; + private TestSerial other; + + // The following field 's' was added after this class was compiled and + // serialized instances were saved in resource files. This allows testing + // that the original stored instances can be deserialized after changing + // the class. The serialVersionUID is needed for this according to Java + // serialization rules, and was generated with the serialver tool. + // + private String s = "string"; + + TestSerial(TestSerial other) { + + this.other = other; + } + + TestSerial getOther() { + + return other; + } + + int getIntField() { + + return i; + } + + String getStringField() { + + return s; // this returned null before field 's' was added. + } + + public boolean equals(Object object) { + + try { + TestSerial o = (TestSerial) object; + if ((o.other == null) ? (this.other != null) + : (!o.other.equals(this.other))) { + return false; + } + if (this.i != o.i) { + return false; + } + // the following test was not done before field 's' was added + if ((o.s == null) ? (this.s != null) + : (!o.s.equals(this.s))) { + return false; + } + return true; + } catch (ClassCastException e) { + return false; + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original b/db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original new file mode 100644 index 000000000..80dba2e80 --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original @@ -0,0 +1,69 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: TestSerial.java.original,v 1.7 2004/08/02 18:53:09 mjc Exp $ + */ +package com.sleepycat.collections.test.serial; + +class TestSerial implements java.io.Serializable +{ + static final long serialVersionUID = -3738980000390384920L; + + private int i = 123; + private TestSerial other; + + // The following field 's' was added after this class was compiled and + // serialized instances were saved in resource files. This allows testing + // that the original stored instances can be deserialized after changing + // the class. The serialVersionUID is needed for this according to Java + // serialization rules, and was generated with the serialver tool. + // + //private String s = "string"; + + TestSerial(TestSerial other) + { + this.other = other; + } + + TestSerial getOther() + { + return other; + } + + int getIntField() + { + return i; + } + + String getStringField() + { + return null; // this returned null before field 's' was added. + } + + public boolean equals(Object object) + { + try + { + TestSerial o = (TestSerial) object; + if ((o.other == null) ? (this.other != null) + : (!o.other.equals(this.other))) + return false; + if (this.i != o.i) + return false; + // the following test was not done before field 's' was added + /* + if ((o.s == null) ? (this.s != null) + : (!o.s.equals(this.s))) + return false; + */ + return true; + } + catch (ClassCastException e) + { + return false; + } + } +} diff --git a/db/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java b/db/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java new file mode 100644 index 000000000..f07e3666e --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java @@ -0,0 +1,110 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: ExceptionWrapperTest.java,v 1.1 2004/04/09 16:34:10 mark Exp $ + */ + +package com.sleepycat.util.test; + +import java.io.IOException; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.util.ExceptionUnwrapper; +import com.sleepycat.util.IOExceptionWrapper; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * @author Mark Hayes + */ +public class ExceptionWrapperTest extends TestCase { + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(ExceptionWrapperTest.class); + return suite; + } + + public ExceptionWrapperTest(String name) { + + super(name); + } + + public void setUp() { + + DbTestUtil.printTestName("ExceptionWrapperTest." + getName()); + } + + public void testIOWrapper() + throws Exception { + + try { + throw new IOExceptionWrapper(new RuntimeException("msg")); + } catch (IOException e) { + Exception ee = ExceptionUnwrapper.unwrap(e); + assertTrue(ee instanceof RuntimeException); + assertEquals("msg", ee.getMessage()); + + Throwable t = ExceptionUnwrapper.unwrapAny(e); + assertTrue(t instanceof RuntimeException); + assertEquals("msg", t.getMessage()); + } + } + + public void testRuntimeWrapper() + throws Exception { + + try { + throw new RuntimeExceptionWrapper(new IOException("msg")); + } catch (RuntimeException e) { + Exception ee = ExceptionUnwrapper.unwrap(e); + assertTrue(ee instanceof IOException); + assertEquals("msg", ee.getMessage()); + + Throwable t = ExceptionUnwrapper.unwrapAny(e); + assertTrue(t instanceof IOException); + assertEquals("msg", t.getMessage()); + } + } + + public void testErrorWrapper() + throws Exception { + + try { + throw new RuntimeExceptionWrapper(new Error("msg")); + } catch (RuntimeException e) { + try { + ExceptionUnwrapper.unwrap(e); + fail(); + } catch (Error ee) { + assertTrue(ee instanceof Error); + assertEquals("msg", ee.getMessage()); + } + + Throwable t = ExceptionUnwrapper.unwrapAny(e); + assertTrue(t instanceof Error); + assertEquals("msg", t.getMessage()); + } + } +} + diff --git a/db/test/scr024/src/com/sleepycat/util/test/UtfTest.java b/db/test/scr024/src/com/sleepycat/util/test/UtfTest.java new file mode 100644 index 000000000..1ba82291b --- /dev/null +++ b/db/test/scr024/src/com/sleepycat/util/test/UtfTest.java @@ -0,0 +1,169 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002-2004 + * Sleepycat Software. All rights reserved. + * + * $Id: UtfTest.java,v 1.2 2004/06/02 21:00:59 mark Exp $ + */ + +package com.sleepycat.util.test; + +import java.io.DataOutputStream; +import java.util.Arrays; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.sleepycat.collections.test.DbTestUtil; +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.UtfOps; + +/** + * @author Mark Hayes + */ +public class UtfTest extends TestCase { + + public static void main(String[] args) + throws Exception { + + junit.framework.TestResult tr = + junit.textui.TestRunner.run(suite()); + if (tr.errorCount() > 0 || + tr.failureCount() > 0) { + System.exit(1); + } else { + System.exit(0); + } + } + + public static Test suite() + throws Exception { + + TestSuite suite = new TestSuite(UtfTest.class); + return suite; + } + + public UtfTest(String name) { + + super(name); + } + + public void setUp() { + + DbTestUtil.printTestName("UtfTest." + getName()); + } + + /** + * Compares the UtfOps implementation to the java.util.DataOutputStream + * (and by implication DataInputStream) implementation, character for + * character in the full Unicode set. + */ + public void testMultibyte() + throws Exception { + + char c = 0; + byte[] buf = new byte[10]; + byte[] javaBuf = new byte[10]; + char[] cArray = new char[1]; + FastOutputStream javaBufStream = new FastOutputStream(javaBuf); + DataOutputStream javaOutStream = new DataOutputStream(javaBufStream); + + try { + for (int cInt = Character.MIN_VALUE; cInt <= Character.MAX_VALUE; + cInt += 1) { + c = (char) cInt; + cArray[0] = c; + int byteLen = UtfOps.getByteLength(cArray); + + javaBufStream.reset(); + javaOutStream.writeUTF(new String(cArray)); + int javaByteLen = javaBufStream.size() - 2; + + if (byteLen != javaByteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps size " + byteLen + + " != JavaIO size " + javaByteLen); + } + + Arrays.fill(buf, (byte) 0); + UtfOps.charsToBytes(cArray, 0, buf, 0, 1); + + if (byteLen == 1 && buf[0] == (byte) 0xff) { + fail("Character 0x" + Integer.toHexString(c) + + " was encoded as FF, which is reserved for null"); + } + + for (int i = 0; i < byteLen; i += 1) { + if (buf[i] != javaBuf[i + 2]) { + fail("Character 0x" + Integer.toHexString(c) + + " byte offset " + i + + " UtfOps byte " + Integer.toHexString(buf[i]) + + " != JavaIO byte " + + Integer.toHexString(javaBuf[i + 2])); + } + } + + int charLen = UtfOps.getCharLength(buf, 0, byteLen); + if (charLen != 1) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps char len " + charLen + + " but should be one"); + } + + cArray[0] = (char) 0; + int len = UtfOps.bytesToChars(buf, 0, cArray, 0, byteLen, + true); + if (len != byteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/byteLen) len " + len + + " but should be " + byteLen); + } + + if (cArray[0] != c) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/byteLen) char " + + Integer.toHexString(cArray[0])); + } + + cArray[0] = (char) 0; + len = UtfOps.bytesToChars(buf, 0, cArray, 0, 1, false); + if (len != byteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/charLen) len " + len + + " but should be " + byteLen); + } + + if (cArray[0] != c) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/charLen) char " + + Integer.toHexString(cArray[0])); + } + + String s = new String(cArray, 0, 1); + byte[] sBytes = UtfOps.stringToBytes(s); + if (sBytes.length != byteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps stringToBytes() len " + sBytes.length + + " but should be " + byteLen); + } + + for (int i = 0; i < byteLen; i += 1) { + if (sBytes[i] != javaBuf[i + 2]) { + fail("Character 0x" + Integer.toHexString(c) + + " byte offset " + i + + " UtfOps byte " + Integer.toHexString(sBytes[i]) + + " != JavaIO byte " + + Integer.toHexString(javaBuf[i + 2])); + } + } + } + } catch (Exception e) { + System.out.println("Character 0x" + Integer.toHexString(c) + + " exception occurred"); + throw e; + } + } +} + diff --git a/db/test/scr026/chk.method b/db/test/scr026/chk.method index 2fbb00bec..22e308a76 100644 --- a/db/test/scr026/chk.method +++ b/db/test/scr026/chk.method @@ -1,6 +1,6 @@ #!/bin/sh - # -# $Id: chk.method,v 1.2 2003/08/01 17:12:39 bostic Exp $ +# $Id: chk.method,v 1.4 2004/10/07 20:42:10 bostic Exp $ # # Check that DB doesn't call DB or DB_ENV methods internally. @@ -65,6 +65,8 @@ sed \ -e '/^examples_java\//d' \ -e '/^hsearch\//d' \ -e '/^libdb_java\//d' \ + -e '/^mod_db4\//d' \ + -e '/^php_db4\//d' \ -e '/^tcl\//d' \ -e '/^test\//d' \ -e '/^test_perf\//d' \ @@ -75,6 +77,7 @@ sed \ -e '/^xa\//d' \ -e '/closeme->close() is a wrapper;/d' \ -e '/crypto.c.*db_cipher->close/d' \ + -e '/db_err.c:.*dbenv->db_msgcall(dbenv, buf);/d' \ -e '/db_iface.c:.*(txn->commit(txn, nosync ? DB_TXN_NOSYNC : 0));/d' \ -e '/db_iface.c:.*if ((t_ret = txn->abort(txn)) != 0)/d' \ -e '/db_iface.c:.*return (dbenv->txn_begin(dbenv, NULL, txnidp, 0));/d' \ diff --git a/db/test/scr027/chk.javas b/db/test/scr027/chk.javas index b32999209..c8e3362e7 100644 --- a/db/test/scr027/chk.javas +++ b/db/test/scr027/chk.javas @@ -1,38 +1,10 @@ #!/bin/sh - # -# $Id: chk.javas,v 1.4 2003/11/21 02:39:41 bostic Exp $ +# $Id: chk.javas,v 1.6 2004/09/29 01:12:08 bostic Exp $ # # Check to make sure that the java code samples in the documents build. -docs=../../../db.docs - -[ -d $d ] || { - echo "FAIL: $d: cannot find document sources." - exit 1 -} -[ -f ../db.jar ] || (cd .. && make db.jar) || { - echo 'FAIL: unable to find or build db.jar' - exit 1 -} - -for i in `find $docs -name '*.javas'`; do - echo " compiling $i" - sed -e 's/m4_[a-z]*[(\[)]*//g' \ - -e 's/(\[//g' \ - -e 's/\])//g' \ - -e 's/dnl//g' \ - -e 's/[ \t]*public class[ \t]*/ class /g' \ - -e 's/^[ \t]*\.\.\.[ \t]*$//' \ - -e 's/__GT__/>/g' \ - -e 's/__LB__/[/g' \ - -e 's/__LT__/ t.java - if javac -classpath ../db.jar t.java; then - : - else - echo "FAIL: unable to compile $i" - exit 1 - fi -done +# There isn't any Java sample code left in the Reference Guide. This test +# left as a placeholder so I don't have to renumber all of the other tests. exit 0 diff --git a/db/test/scr028/chk.rtc b/db/test/scr028/chk.rtc new file mode 100644 index 000000000..d21e0309d --- /dev/null +++ b/db/test/scr028/chk.rtc @@ -0,0 +1,26 @@ +#!/bin/sh - +# +# $Id: chk.rtc,v 1.1 2003/12/11 21:30:41 bostic Exp $ +# +# Build a program that calls the run-time API configuration functions. + +[ -f ../libdb.a ] || (cd .. && make libdb.a) || { + echo 'FAIL: unable to find or build libdb.a' + exit 1 +} + +if cc -g -Wall -I.. t.c ../libdb.a -o t; then + : +else + echo "FAIL: unable to compile test program t.c" + exit 1 +fi + +if ./t; then + : +else + echo "FAIL: test program failed" + exit 1 +fi + +exit 0 diff --git a/db/test/scr028/t.c b/db/test/scr028/t.c new file mode 100644 index 000000000..cdbfafc76 --- /dev/null +++ b/db/test/scr028/t.c @@ -0,0 +1,76 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db.h" + +#define E(api, func, name) { \ + if ((ret = api(func)) != 0) { \ + fprintf(stderr, "%s: %s", name, db_strerror(ret)); \ + return (1); \ + } \ +} + +void +dirfree(char **namesp, int cnt) +{ return; } +int +dirlist(const char *dir, char ***namesp, int *cntp) +{ return (0); } +int +exists(const char *path, int *isdirp) +{ return (0); } +int +ioinfo(const char *path, + int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep) +{ return (0); } +int +map(char *path, size_t len, int is_region, int is_rdonly, void **addr) +{ return (0); } +int +seek(int fd, off_t offset, int whence) +{ return (0); } +int +local_sleep(u_long seconds, u_long microseconds) +{ return (0); } +int +unmap(void *addr, size_t len) +{ return (0); } + +int +main(int argc, char *argv[]) +{ + int ret; + + E(db_env_set_func_close, close, "close"); + E(db_env_set_func_dirfree, dirfree, "dirfree"); + E(db_env_set_func_dirlist, dirlist, "dirlist"); + E(db_env_set_func_exists, exists, "exists"); + E(db_env_set_func_free, free, "free"); + E(db_env_set_func_fsync, fsync, "fsync"); + E(db_env_set_func_ftruncate, ftruncate, "ftruncate"); + E(db_env_set_func_ioinfo, ioinfo, "ioinfo"); + E(db_env_set_func_malloc, malloc, "malloc"); + E(db_env_set_func_map, map, "map"); + E(db_env_set_func_open, open, "open"); + E(db_env_set_func_pread, pread, "pread"); + E(db_env_set_func_pwrite, pwrite, "pwrite"); + E(db_env_set_func_read, read, "read"); + E(db_env_set_func_realloc, realloc, "realloc"); + E(db_env_set_func_rename, rename, "rename"); + E(db_env_set_func_seek, seek, "seek"); + E(db_env_set_func_sleep, local_sleep, "sleep"); + E(db_env_set_func_unlink, unlink, "unlink"); + E(db_env_set_func_unmap, unmap, "unmap"); + E(db_env_set_func_write, write, "write"); + E(db_env_set_func_yield, sched_yield, "yield"); + + return (0); +} diff --git a/db/test/scr029/chk.get b/db/test/scr029/chk.get new file mode 100644 index 000000000..01b56af57 --- /dev/null +++ b/db/test/scr029/chk.get @@ -0,0 +1,26 @@ +#!/bin/sh - +# +# $Id: chk.get,v 1.1 2004/03/15 21:09:50 bostic Exp $ +# +# Build a program that calls the getters. + +[ -f ../libdb.a ] || (cd .. && make libdb.a) || { + echo 'FAIL: unable to find or build libdb.a' + exit 1 +} + +if cc -g -Wall -I.. t.c ../libdb.a -o t; then + : +else + echo "FAIL: unable to compile test program t.c" + exit 1 +fi + +if ./t; then + : +else + echo "FAIL: test program failed" + exit 1 +fi + +exit 0 diff --git a/db/test/scr029/t.c b/db/test/scr029/t.c new file mode 100644 index 000000000..b7d4b3211 --- /dev/null +++ b/db/test/scr029/t.c @@ -0,0 +1,203 @@ +#include + +#include +#include +#include + +#include "db.h" + +#define ENV { \ + if (dbenv != NULL) \ + assert(dbenv->close(dbenv, 0) == 0); \ + assert(db_env_create(&dbenv, 0) == 0); \ + dbenv->set_errfile(dbenv, stderr); \ +} + +int +main() +{ + const u_int8_t *lk_conflicts; + DB_ENV *dbenv; + db_timeout_t timeout; + u_int32_t a, b, c, v; + int nmodes, lk_modes; + u_int8_t conflicts[40]; + + dbenv = NULL; + + /* tx_max: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_tx_max(dbenv, 37) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_TXN, 0666) == 0); + assert(dbenv->get_tx_max(dbenv, &v) == 0); + assert(v == 37); + ENV + assert(dbenv->set_tx_max(dbenv, 63) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_tx_max(dbenv, &v) == 0); + assert(v == 37); + + /* lg_max: reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_lg_max(dbenv, 37 * 1024 * 1024) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOG, 0666) == 0); + assert(dbenv->get_lg_max(dbenv, &v) == 0); + assert(v == 37 * 1024 * 1024); + ENV + assert(dbenv->set_lg_max(dbenv, 63 * 1024 * 1024) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lg_max(dbenv, &v) == 0); + assert(v == 63 * 1024 * 1024); + + /* lg_bsize: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_lg_bsize(dbenv, 37 * 1024) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOG, 0666) == 0); + assert(dbenv->get_lg_bsize(dbenv, &v) == 0); + assert(v == 37 * 1024); + ENV + assert(dbenv->set_lg_bsize(dbenv, 63 * 1024) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lg_bsize(dbenv, &v) == 0); + assert(v == 37 * 1024); + + /* lg_regionmax: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_lg_regionmax(dbenv, 137 * 1024) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOG, 0666) == 0); + assert(dbenv->get_lg_regionmax(dbenv, &v) == 0); + assert(v == 137 * 1024); + ENV + assert(dbenv->set_lg_regionmax(dbenv, 163 * 1024) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lg_regionmax(dbenv, &v) == 0); + assert(v == 137 * 1024); + + /* lk_get_lk_conflicts: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + memset(conflicts, 'a', sizeof(conflicts)); + nmodes = 6; + assert(dbenv->set_lk_conflicts(dbenv, conflicts, nmodes) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0); + assert(dbenv->get_lk_conflicts(dbenv, &lk_conflicts, &lk_modes) == 0); + assert(lk_conflicts[0] == 'a'); + assert(lk_modes == 6); + ENV + memset(conflicts, 'b', sizeof(conflicts)); + nmodes = 8; + assert(dbenv->set_lk_conflicts(dbenv, conflicts, nmodes) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lk_conflicts(dbenv, &lk_conflicts, &lk_modes) == 0); + assert(lk_conflicts[0] == 'a'); + assert(lk_modes == 6); + + /* lk_detect: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_lk_detect(dbenv, DB_LOCK_MAXLOCKS) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0); + assert(dbenv->get_lk_detect(dbenv, &v) == 0); + assert(v == DB_LOCK_MAXLOCKS); + ENV + assert(dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lk_detect(dbenv, &v) == 0); + assert(v == DB_LOCK_MAXLOCKS); + + /* lk_max_locks: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_lk_max_locks(dbenv, 37) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0); + assert(dbenv->get_lk_max_locks(dbenv, &v) == 0); + assert(v == 37); + ENV + assert(dbenv->set_lk_max_locks(dbenv, 63) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lk_max_locks(dbenv, &v) == 0); + assert(v == 37); + + /* lk_max_lockers: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_lk_max_lockers(dbenv, 37) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0); + assert(dbenv->get_lk_max_lockers(dbenv, &v) == 0); + assert(v == 37); + ENV + assert(dbenv->set_lk_max_lockers(dbenv, 63) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lk_max_lockers(dbenv, &v) == 0); + assert(v == 37); + + /* lk_max_objects: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_lk_max_objects(dbenv, 37) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0); + assert(dbenv->get_lk_max_objects(dbenv, &v) == 0); + assert(v == 37); + ENV + assert(dbenv->set_lk_max_objects(dbenv, 63) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_lk_max_objects(dbenv, &v) == 0); + assert(v == 37); + + /* lock timeout: reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_timeout(dbenv, 37, DB_SET_LOCK_TIMEOUT) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0); + assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_LOCK_TIMEOUT) == 0); + assert(timeout == 37); + ENV + assert(dbenv->set_timeout(dbenv, 63, DB_SET_LOCK_TIMEOUT) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_LOCK_TIMEOUT) == 0); + assert(timeout == 63); + + /* txn timeout: reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_timeout(dbenv, 37, DB_SET_TXN_TIMEOUT) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0); + assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_TXN_TIMEOUT) == 0); + assert(timeout == 37); + ENV + assert(dbenv->set_timeout(dbenv, 63, DB_SET_TXN_TIMEOUT) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_TXN_TIMEOUT) == 0); + assert(timeout == 63); + + /* cache size: NOT reset at run-time. */ + system("rm -rf TESTDIR; mkdir TESTDIR"); + ENV + assert(dbenv->set_cachesize(dbenv, 1, 37, 3) == 0); + assert(dbenv->open(dbenv, + "TESTDIR", DB_CREATE | DB_INIT_MPOOL, 0666) == 0); + assert(dbenv->get_cachesize(dbenv, &a, &b, &c) == 0); + assert(a == 1 && b == 37 && c == 3); + ENV + assert(dbenv->set_cachesize(dbenv, 2, 63, 1) == 0); + assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0); + assert(dbenv->get_cachesize(dbenv, &a, &b, &c) == 0); + assert(a == 1 && b == 37 && c == 3); + + return (0); +} diff --git a/db/test/scr030/chk.build b/db/test/scr030/chk.build new file mode 100644 index 000000000..1dddc52f4 --- /dev/null +++ b/db/test/scr030/chk.build @@ -0,0 +1,72 @@ +#!/bin/sh - +# +# $Id: chk.build,v 1.2 2004/09/28 17:18:24 bostic Exp $ +# +# Build a program that calls the run-time API configuration functions. + +trap 'rm -rf scr030 ; exit 0' 0 +trap 'rm -rf scr030 ; exit 1' 1 2 3 13 15 + +[ -d ../../dist ] || { + echo 'FAIL: unable to find top-level dist directory' + exit 1 +} + +s="\ +--disable-cryptography \ +--disable-hash \ +--disable-largefile \ +--disable-queue \ +--disable-replication \ +--disable-statistics \ +--disable-verify \ +--enable-compat185 \ +--enable-debug \ +--enable-debug_rop \ +--enable-debug_wop \ +--enable-diagnostic \ +--enable-dump185 \ +--enable-posixmutexes \ +--enable-rpc \ +--enable-smallbuild \ +--enable-umrw \ +--with-mutex=UNIX/fcntl \ +--with-mutex=x86/gcc-assembly \ +--with-mutexalign=64 \ +--with-uniquename=__KEITH__" + +# Configure and build. +# $1: config flags +r() +{ + echo "run: $1 (`date`)" + rm -rf scr030 + mkdir scr030 + cd scr030 + ../../../dist/configure $1 > config.OUT 2>&1 + if test $? -ne 0; then + echo "$i: FAILED in configure" + exit 1 + fi + make > mklog 2>&1 + if test $? -ne 0; then + echo "$i: FAILED in make" + exit 1 + fi + cd .. + rm -rf scr030 +} + +# Run through all of the standard single options. +for i in $s; do + r "$i --disable-shared" +done + +# Build specific runs of interest. +r +r "--disable-static" +r "--enable-cxx" +r "--enable-java" +r "--with-tcl=/usr/local/tcl8.4/lib" +r "--enable-test --with-tcl=/usr/local/tcl8.4/lib" +r "--enable-cxx --enable-java --with-tcl=/usr/local/tcl8.4/lib" diff --git a/db/test/sdb001.tcl b/db/test/sdb001.tcl index 269bbb3ed..4ebb24dea 100644 --- a/db/test/sdb001.tcl +++ b/db/test/sdb001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb001.tcl,v 11.21 2003/10/06 13:56:51 margo Exp $ +# $Id: sdb001.tcl,v 11.23 2004/04/05 17:49:25 carol Exp $ # # TEST sdb001 Tests mixing db and subdb operations # TEST Tests mixing db and subdb operations @@ -53,7 +53,6 @@ proc sdb001 { method args } { set pflags "" set gflags "" - set txn "" set count 0 if { [is_record_based $method] == 1 } { @@ -69,7 +68,7 @@ proc sdb001 { method args } { set key $str } set ret [eval \ - {$db put} $txn $pflags {$key [chop_data $method $str]}] + {$db put} $pflags {$key [chop_data $method $str]}] error_check_good put $ret 0 set ret [eval {$db get} $gflags {$key}] diff --git a/db/test/sdb002.tcl b/db/test/sdb002.tcl index 77b3fc0e6..d951d7163 100644 --- a/db/test/sdb002.tcl +++ b/db/test/sdb002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb002.tcl,v 11.39 2003/09/04 23:41:14 bostic Exp $ +# $Id: sdb002.tcl,v 11.41 2004/04/05 17:49:26 carol Exp $ # # TEST sdb002 # TEST Tests basic subdb functionality @@ -70,7 +70,7 @@ proc subdb002_main { method nentries largs } { set sdb002_env berkdb_env } set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \ - -mode 0644 -txn} -home $testdir $encargs] + -mode 0644} -home $testdir $encargs] error_check_good env_open [is_valid_env $env] TRUE puts "Subdb002: $method ($largs) basic subdb tests in an environment" @@ -122,7 +122,6 @@ proc subdb002_body { method omethod nentries largs testfile env } { set pflags "" set gflags "" - set txn "" set count 0 if { [is_record_based $method] == 1 } { @@ -143,7 +142,7 @@ proc subdb002_body { method omethod nentries largs testfile env } { set key $str } set ret [eval \ - {$db put} $txn $pflags {$key [chop_data $method $str]}] + {$db put} $pflags {$key [chop_data $method $str]}] error_check_good put $ret 0 set ret [eval {$db get} $gflags {$key}] @@ -155,6 +154,7 @@ proc subdb002_body { method omethod nentries largs testfile env } { # Now we will get each key from the DB and compare the results # to the original. puts "\tSubdb002.b: dump file" + set txn "" dump_file $db $txn $t1 $checkfunc error_check_good db_close [$db close] 0 diff --git a/db/test/sdb003.tcl b/db/test/sdb003.tcl index c96a661c2..c652a76d6 100644 --- a/db/test/sdb003.tcl +++ b/db/test/sdb003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb003.tcl,v 11.28 2003/06/06 18:47:54 sandstro Exp $ +# $Id: sdb003.tcl,v 11.29 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb003 # TEST Tests many subdbs diff --git a/db/test/sdb004.tcl b/db/test/sdb004.tcl index 10f0bff4a..273bda9bf 100644 --- a/db/test/sdb004.tcl +++ b/db/test/sdb004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb004.tcl,v 11.24 2003/01/08 05:52:57 bostic Exp $ +# $Id: sdb004.tcl,v 11.25 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb004 # TEST Tests large subdb names diff --git a/db/test/sdb005.tcl b/db/test/sdb005.tcl index e5741d297..7173bef00 100644 --- a/db/test/sdb005.tcl +++ b/db/test/sdb005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb005.tcl,v 11.20 2003/01/08 05:52:59 bostic Exp $ +# $Id: sdb005.tcl,v 11.21 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb005 # TEST Tests cursor operations in subdbs diff --git a/db/test/sdb006.tcl b/db/test/sdb006.tcl index 4265720a1..ffe3e4700 100644 --- a/db/test/sdb006.tcl +++ b/db/test/sdb006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb006.tcl,v 11.22 2003/01/08 05:53:00 bostic Exp $ +# $Id: sdb006.tcl,v 11.23 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb006 # TEST Tests intra-subdb join diff --git a/db/test/sdb007.tcl b/db/test/sdb007.tcl index af8bfefa3..86f465992 100644 --- a/db/test/sdb007.tcl +++ b/db/test/sdb007.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb007.tcl,v 11.23 2003/11/10 17:41:39 sandstro Exp $ +# $Id: sdb007.tcl,v 11.25 2004/09/22 18:01:06 bostic Exp $ # # TEST sdb007 # TEST Tests page size difference errors between subdbs. @@ -89,9 +89,9 @@ proc sdb007 { method args } { set stat [catch {eval {berkdb_open_noerr -create -btree} \ $db2args {-pagesize $pgsz2 $testfile $sub2}} ret] error_check_good subdb:pgsz $stat 1 - # We'll get a different error if running in an env, + # We'll get a different error if running in an env, # because the env won't have been opened with noerr. - # Skip the test for what the error is, just getting the + # Skip the test for what the error is, just getting the # error is enough. if { $is_envmethod == 0 } { error_check_good subdb:fail [is_substr $ret \ diff --git a/db/test/sdb008.tcl b/db/test/sdb008.tcl index 1439e2c2d..3beb8313b 100644 --- a/db/test/sdb008.tcl +++ b/db/test/sdb008.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb008.tcl,v 11.30 2003/09/04 23:41:15 bostic Exp $ +# $Id: sdb008.tcl,v 11.31 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb008 # TEST Tests explicit setting of lorders for subdatabases -- the diff --git a/db/test/sdb009.tcl b/db/test/sdb009.tcl index e20e6f5e2..3f36a27d7 100644 --- a/db/test/sdb009.tcl +++ b/db/test/sdb009.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb009.tcl,v 11.11 2003/01/08 05:53:05 bostic Exp $ +# $Id: sdb009.tcl,v 11.12 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb009 # TEST Test DB->rename() method for subdbs diff --git a/db/test/sdb010.tcl b/db/test/sdb010.tcl index d0892880c..73ff8292c 100644 --- a/db/test/sdb010.tcl +++ b/db/test/sdb010.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb010.tcl,v 11.16 2003/01/08 05:53:07 bostic Exp $ +# $Id: sdb010.tcl,v 11.18 2004/09/24 14:55:26 carol Exp $ # # TEST sdb010 # TEST Test DB->remove() method and DB->truncate() for subdbs @@ -25,25 +25,24 @@ proc sdb010 { method args } { set envargs "" set eindex [lsearch -exact $args "-env"] # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. + # If we are not given an env, create one. if { $eindex == -1 } { - set testfile $testdir/subdb010.db - set tfpath $testfile - set env NULL + set env [berkdb_env -create -home $testdir -mode 0644] + error_check_good env_open [is_valid_env $env] TRUE } else { - set testfile subdb010.db incr eindex set env [lindex $args $eindex] - set envargs " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - append envargs " -auto_commit " - } - set testdir [get_home $env] - set tfpath $testdir/$testfile } + set testfile subdb010.db + set envargs " -env $env " + set txnenv [is_txnenv $env] + if { $txnenv == 1 } { + append args " -auto_commit " + append envargs " -auto_commit " + } + set testdir [get_home $env] + set tfpath $testdir/$testfile + cleanup $testdir $env set txn "" @@ -51,7 +50,7 @@ proc sdb010 { method args } { set testdb2 DATABASE2 set db [eval {berkdb_open -create -mode 0644} $omethod \ - $args $testfile $testdb] + $args $envargs $testfile $testdb] error_check_good db_open [is_valid_db $db] TRUE error_check_good db_close [$db close] 0 @@ -64,7 +63,8 @@ proc sdb010 { method args } { error_check_good file_exists_after [file exists $tfpath] 1 # But database should not. - set ret [catch {eval berkdb_open $omethod $args $testfile $testdb} res] + set ret [catch {eval berkdb_open $omethod \ + $args $envargs $testfile $testdb} res] error_check_bad open_failed ret 0 error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1 @@ -77,7 +77,7 @@ proc sdb010 { method args } { set data2 [pad_data $method data2] set db [eval {berkdb_open -create -mode 0644} $omethod \ - $args {$testfile $testdb}] + $args $envargs {$testfile $testdb}] error_check_good db_open [is_valid_db $db] TRUE if { $txnenv == 1 } { set t [$env txn] @@ -90,7 +90,7 @@ proc sdb010 { method args } { } set db2 [eval {berkdb_open -create -mode 0644} $omethod \ - $args $testfile $testdb2] + $args $envargs $testfile $testdb2] error_check_good db_open [is_valid_db $db2] TRUE if { $txnenv == 1 } { set t [$env txn] @@ -110,7 +110,7 @@ proc sdb010 { method args } { # Return value should be 1, the count of how many items were # destroyed when we truncated. set db [eval {berkdb_open -create -mode 0644} $omethod \ - $args $testfile $testdb] + $args $envargs $testfile $testdb] error_check_good db_open [is_valid_db $db] TRUE if { $txnenv == 1 } { set t [$env txn] @@ -124,7 +124,7 @@ proc sdb010 { method args } { error_check_good db_close [$db close] 0 puts "\tSubdb010.d: check" - set db [eval {berkdb_open} $args {$testfile $testdb}] + set db [eval {berkdb_open} $args $envargs {$testfile $testdb}] error_check_good db_open [is_valid_db $db] TRUE if { $txnenv == 1 } { set t [$env txn] @@ -140,7 +140,7 @@ proc sdb010 { method args } { error_check_good txn [$t commit] 0 } - set db2 [eval {berkdb_open} $args {$testfile $testdb2}] + set db2 [eval {berkdb_open} $args $envargs {$testfile $testdb2}] error_check_good db_open [is_valid_db $db2] TRUE if { $txnenv == 1 } { set t [$env txn] diff --git a/db/test/sdb011.tcl b/db/test/sdb011.tcl index a32f990db..d36b83e5d 100644 --- a/db/test/sdb011.tcl +++ b/db/test/sdb011.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb011.tcl,v 11.12 2003/04/18 14:39:09 sandstro Exp $ +# $Id: sdb011.tcl,v 11.14 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb011 # TEST Test deleting Subdbs with overflow pages @@ -58,10 +58,6 @@ proc sdb011 { method {ndups 13} {nsubdbs 10} args} { # Here is the loop where we put and get each key/data pair set file_list [get_file_list] - if { $max_files != 0 && [llength $file_list] > $max_files } { - set fend [expr $max_files - 1] - set file_list [lrange $file_list 0 $fend] - } set flen [llength $file_list] puts "Subdb011: $method ($args) $ndups overflow dups with \ $flen filename=key filecontents=data pairs" diff --git a/db/test/sdb012.tcl b/db/test/sdb012.tcl index 3e7ceb170..d316acb71 100644 --- a/db/test/sdb012.tcl +++ b/db/test/sdb012.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdb012.tcl,v 1.5 2003/01/08 05:53:10 bostic Exp $ +# $Id: sdb012.tcl,v 1.6 2004/01/28 03:36:29 bostic Exp $ # # TEST sdb012 # TEST Test subdbs with locking and transactions diff --git a/db/test/sdb013.tcl b/db/test/sdb013.tcl new file mode 100644 index 000000000..70d23413e --- /dev/null +++ b/db/test/sdb013.tcl @@ -0,0 +1,185 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 1999-2004 +# Sleepycat Software. All rights reserved. +# +# $Id: sdb013.tcl,v 1.3 2004/09/22 18:01:06 bostic Exp $ +# +# TEST sdb013 +# TEST Tests in-memory subdatabases. +# TEST Create an in-memory subdb. Test for persistence after +# TEST overflowing the cache. Test for conflicts when we have +# TEST two in-memory files. + +proc sdb013 { method { nentries 10 } args } { + source ./include.tcl + + set tnum "013" + set args [convert_args $method $args] + set omethod [convert_method $method] + + if { [is_queue $method] == 1 } { + puts "Subdb$tnum: skipping for method $method" + return + } + puts "Subdb$tnum: $method ($args) in-memory subdb tests" + + # If we are using an env, then skip this test. It needs its own. + set eindex [lsearch -exact $args "-env"] + if { $eindex != -1 } { + set env NULL + incr eindex + set env [lindex $args $eindex] + puts "Subdb013 skipping for env $env" + return + } + + # Create the env, with a very small cache that we can easily + # fill. + env_cleanup $testdir + set csize {0 32768 1} + set env [berkdb_env -create -cachesize $csize -home $testdir -txn] + error_check_good dbenv [is_valid_env $env] TRUE + + # Set filename to NULL; this causes the creation of an in-memory + # subdb. + set testfile "" + set subdb subdb0 + + puts "\tSubdb$tnum.a: Create in-mem subdb, add data, close." + set sdb [eval {berkdb_open -create -mode 0644} \ + $args -env $env {$omethod $testfile $subdb}] + error_check_good dbopen [is_valid_db $sdb] TRUE + + sdb013_populate $sdb $method $nentries + error_check_good sdb_close [$sdb close] 0 + + # Do a bunch of writing to evict all pages from the memory pool. + puts "\tSubdb$tnum.b: Create another db, overflow the cache." + set dummyfile foo.db + set db [eval {berkdb_open -create -mode 0644} $args -env $env\ + $omethod $dummyfile] + error_check_good dummy_open [is_valid_db $db] TRUE + + set entries [expr $nentries * 100] + sdb013_populate $db $method $entries + error_check_good dummy_close [$db close] 0 + + # Make sure we can still open the in-memory subdb. + puts "\tSubdb$tnum.c: Check we can still open the in-mem subdb." + set sdb [eval {berkdb_open} \ + $args -env $env {$omethod $testfile $subdb}] + error_check_good sdb_reopen [is_valid_db $sdb] TRUE + error_check_good sdb_close [$sdb close] 0 + + puts "\tSubdb$tnum.d: Remove in-mem subdb." + error_check_good \ + sdb_remove [berkdb dbremove -env $env $testfile $subdb] 0 + + puts "\tSubdb$tnum.e: Check we cannot open the in-mem subdb." + set ret [catch {eval {berkdb_open_noerr} -env $env $args \ + {$omethod $testfile $subdb}} db] + error_check_bad dbopen $ret 0 + + # Create two in-memory subdb and test for conflicts. Try all the + # combinations of named (NULL/NAME) and purely temporary + # (NULL/NULL) databases. + # + foreach s1 { S1 "" } { + foreach s2 { S2 "" } { + puts "\tSubdb$tnum.f:\ + 2 in-memory subdbs (NULL/$s1, NULL/$s2)." + set sdb1 [eval {berkdb_open -create -mode 0644} \ + $args -env $env {$omethod $testfile $s1}] +puts "sdb1 open" + error_check_good sdb1_open [is_valid_db $sdb1] TRUE +puts "open sdb2 with testfile $testfile s2 $s2" + set sdb2 [eval {berkdb_open -create -mode 0644} \ + $args -env $env {$omethod $testfile $s2}] +puts "sdb2 open" + error_check_good sdb1_open [is_valid_db $sdb2] TRUE + + # Subdatabases are open, now put something in. + set string1 STRING1 + set string2 STRING2 +puts "populate" + for { set i 1 } { $i <= $nentries } { incr i } { + set key $i + error_check_good sdb1_put \ + [$sdb1 put $key $string1.$key] 0 + error_check_good sdb2_put \ + [$sdb2 put $key $string2.$key] 0 + } +puts "check contents" + # If the subs are both NULL/NULL, we have two handles + # on the same db. Skip testing the contents. + if { $s1 != "" || $s2 != "" } { + # This can't work when both subs are NULL/NULL. + # Check contents. + for { set i 1 } { $i <= $nentries } { incr i } { + set key $i + set ret1 [lindex \ + [lindex [$sdb1 get $key] 0] 1] + error_check_good \ + sdb1_get $ret1 $string1.$key + set ret2 [lindex \ + [lindex [$sdb2 get $key] 0] 1] + error_check_good \ + sdb2_get $ret2 $string2.$key + } +puts "close sdb1" + error_check_good sdb1_close [$sdb1 close] 0 +puts "close sdb2" + error_check_good sdb2_close [$sdb2 close] 0 + + # Reopen, make sure we get the right data. + set sdb1 [eval {berkdb_open -mode 0644} \ + $args -env $env {$omethod $testfile $s1}] + error_check_good \ + sdb1_open [is_valid_db $sdb1] TRUE + set sdb2 [eval {berkdb_open -mode 0644} \ + $args -env $env {$omethod $testfile $s2}] + error_check_good \ + sdb1_open [is_valid_db $sdb2] TRUE + + for { set i 1 } { $i <= $nentries } { incr i } { + set key $i + set ret1 [lindex \ + [lindex [$sdb1 get $key] 0] 1] + error_check_good \ + sdb1_get $ret1 $string1.$key + set ret2 [lindex \ + [lindex [$sdb2 get $key] 0] 1] + error_check_good \ + sdb2_get $ret2 $string2.$key + } + } + error_check_good sdb1_close [$sdb1 close] 0 + error_check_good sdb2_close [$sdb2 close] 0 + } + } + error_check_good env_close [$env close] 0 +} + +proc sdb013_populate { db method nentries } { + source ./include.tcl + + set did [open $dict] + set count 0 + while { [gets $did str] != -1 && $count < $nentries } { + if { [is_record_based $method] == 1 } { + set key [expr $count + 1] + } else { + set key $str + } + set ret [eval \ + {$db put $key [chop_data $method $str]}] + error_check_good put $ret 0 + + set ret [eval {$db get $key}] + error_check_good \ + get $ret [list [list $key [pad_data $method $str]]] + incr count + } + close $did +} diff --git a/db/test/sdbscript.tcl b/db/test/sdbscript.tcl index b7981c2bf..645351b07 100644 --- a/db/test/sdbscript.tcl +++ b/db/test/sdbscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdbscript.tcl,v 11.10 2003/01/08 05:53:13 bostic Exp $ +# $Id: sdbscript.tcl,v 11.11 2004/01/28 03:36:29 bostic Exp $ # # Usage: subdbscript testfile subdbnumber factor # testfile: name of DB itself diff --git a/db/test/sdbtest001.tcl b/db/test/sdbtest001.tcl index 30a3008d4..65b64dac2 100644 --- a/db/test/sdbtest001.tcl +++ b/db/test/sdbtest001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdbtest001.tcl,v 11.20 2003/01/08 05:53:15 bostic Exp $ +# $Id: sdbtest001.tcl,v 11.21 2004/01/28 03:36:29 bostic Exp $ # # TEST sdbtest001 # TEST Tests multiple access methods in one subdb diff --git a/db/test/sdbtest002.tcl b/db/test/sdbtest002.tcl index 7085d53b1..4e51809fb 100644 --- a/db/test/sdbtest002.tcl +++ b/db/test/sdbtest002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdbtest002.tcl,v 11.27 2003/01/08 05:53:16 bostic Exp $ +# $Id: sdbtest002.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $ # # TEST sdbtest002 # TEST Tests multiple access methods in one subdb access by multiple @@ -43,8 +43,7 @@ proc sdbtest002 { {nentries 10000} } { set methods \ [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"] cleanup $testdir NULL - puts "\tSubdbtest002.a: create subdbs of different access methods:" - puts "\t\t$methods" + puts "\tSubdbtest002.a: create subdbs of different methods: $methods" set psize 4096 set nsubdbs [llength $methods] set duplist "" diff --git a/db/test/sdbutils.tcl b/db/test/sdbutils.tcl index cf9f64d2f..5c6deb3b4 100644 --- a/db/test/sdbutils.tcl +++ b/db/test/sdbutils.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sdbutils.tcl,v 11.15 2003/01/08 05:53:18 bostic Exp $ +# $Id: sdbutils.tcl,v 11.16 2004/01/28 03:36:30 bostic Exp $ # proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} { set nsubdbs [llength $dups] diff --git a/db/test/sec001.tcl b/db/test/sec001.tcl index f49929696..ed2ca81f9 100644 --- a/db/test/sec001.tcl +++ b/db/test/sec001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sec001.tcl,v 11.9 2003/01/08 05:53:19 bostic Exp $ +# $Id: sec001.tcl,v 11.12 2004/09/22 18:01:06 bostic Exp $ # # TEST sec001 # TEST Test of security interface @@ -11,6 +11,7 @@ proc sec001 { } { global errorInfo global errorCode global has_crypto + global is_hp_test source ./include.tcl # Skip test if release does not support encryption. @@ -123,12 +124,16 @@ proc sec001 { } { set env [berkdb_env -create -home $testdir] error_check_good env [is_valid_env $env] TRUE - puts "\tSec001.f.4: Open again with encryption." - set stat [catch {berkdb_env_noerr -home $testdir \ - -encryptaes $passwd1} ret] - error_check_good env:unencrypted $stat 1 - error_check_good env:fail [is_substr $ret \ - "Joining non-encrypted environment"] 1 + # Skip this piece of the test on HP-UX, where we can't + # join the env. + if { $is_hp_test != 1 } { + puts "\tSec001.f.4: Open again with encryption." + set stat [catch {berkdb_env_noerr -home $testdir \ + -encryptaes $passwd1} ret] + error_check_good env:unencrypted $stat 1 + error_check_good env:fail [is_substr $ret \ + "Joining non-encrypted environment"] 1 + } error_check_good envclose [$env close] 0 @@ -144,6 +149,13 @@ proc sec001 { } { set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1] error_check_good env [is_valid_env $env] TRUE + # We can't open an env twice in HP-UX, so skip the rest. + if { $is_hp_test == 1 } { + puts "Skipping remainder of test for HP-UX." + error_check_good env_close [$env close] 0 + return + } + puts "\tSec001.g.2: Open again with encryption - same passwd." set env1 [berkdb_env -home $testdir -encryptaes $passwd1] error_check_good env [is_valid_env $env1] TRUE diff --git a/db/test/sec002.tcl b/db/test/sec002.tcl index 8ecfb306d..80d5a5879 100644 --- a/db/test/sec002.tcl +++ b/db/test/sec002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: sec002.tcl,v 11.10 2003/09/11 16:55:38 sandstro Exp $ +# $Id: sec002.tcl,v 11.12 2004/02/20 19:47:58 sue Exp $ # # TEST sec002 # TEST Test of security interface and catching errors in the @@ -136,12 +136,13 @@ proc sec002 { } { puts "\tSec002.e: Replace root page in encrypted w/ encrypted" set fid1 [open $testfile1 r+] + fconfigure $fid1 -translation binary set fid2 [open $testfile2 r+] + fconfigure $fid2 -translation binary seek $fid1 $pagesize start seek $fid2 $pagesize start - set root1 [read $fid1 $pagesize] + fcopy $fid1 $fid2 -size $pagesize close $fid1 - puts -nonewline $fid2 $root1 close $fid2 set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2] @@ -156,12 +157,13 @@ proc sec002 { } { puts "\tSec002.f: Replace root page in encrypted w/ unencrypted" set fid2 [open $testfile2 r+] + fconfigure $fid2 -translation binary set fid4 [open $testfile4 r+] + fconfigure $fid4 -translation binary seek $fid2 $pagesize start seek $fid4 $pagesize start - set root4 [read $fid4 $pagesize] + fcopy $fid4 $fid2 -size $pagesize close $fid4 - puts -nonewline $fid2 $root4 close $fid2 set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2] diff --git a/db/test/shelltest.tcl b/db/test/shelltest.tcl index 067d40d16..cb588f133 100644 --- a/db/test/shelltest.tcl +++ b/db/test/shelltest.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: shelltest.tcl,v 1.28 2003/09/04 23:41:15 bostic Exp $ +# $Id: shelltest.tcl,v 1.31 2004/03/15 21:09:49 bostic Exp $ # # TEST scr### # TEST The scr### directories are shell scripts that test a variety of @@ -96,3 +96,6 @@ proc scr024 {} { shelltest 24 } proc scr025 {} { shelltest 25 } proc scr026 {} { shelltest 26 } proc scr027 {} { shelltest 27 } +proc scr028 {} { shelltest 28 } +proc scr029 {} { shelltest 29 } +proc scr030 {} { shelltest 30 } diff --git a/db/test/si001.tcl b/db/test/si001.tcl index 25d665003..4aca7e449 100644 --- a/db/test/si001.tcl +++ b/db/test/si001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: si001.tcl,v 1.13 2003/09/04 23:41:15 bostic Exp $ +# $Id: si001.tcl,v 1.16 2004/09/22 18:01:06 bostic Exp $ # # TEST si001 # TEST Basic secondary index put/delete test @@ -139,8 +139,16 @@ proc si001 { methods {nentries 200} {tnum "001"} args } { close $did - puts "\tSi$tnum.f: Truncate primary" + puts "\tSi$tnum.f: Truncate primary, check secondaries are empty." error_check_good truncate [$pdb truncate] $left + foreach sdb $sdbs { + set scursor [$sdb cursor] + error_check_good db_cursor [is_substr $scursor $sdb] 1 + set ret [$scursor get -first] + error_check_good sec_empty [string length $ret] 0 + error_check_good cursor_close [$scursor close] 0 + } + puts "\tSi$tnum.g: Closing/disassociating primary first" error_check_good primary_close [$pdb close] 0 diff --git a/db/test/si002.tcl b/db/test/si002.tcl index bc08b85cb..039fbb6a7 100644 --- a/db/test/si002.tcl +++ b/db/test/si002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: si002.tcl,v 1.9 2003/01/08 05:53:25 bostic Exp $ +# $Id: si002.tcl,v 1.12 2004/05/28 14:33:26 carol Exp $ # # TEST si002 # TEST Basic cursor-based secondary index put/delete test diff --git a/db/test/si003.tcl b/db/test/si003.tcl index dbe999fae..c80583ea2 100644 --- a/db/test/si003.tcl +++ b/db/test/si003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: si003.tcl,v 1.9 2003/01/08 05:53:26 bostic Exp $ +# $Id: si003.tcl,v 1.10 2004/01/28 03:36:30 bostic Exp $ # # TEST si003 # TEST si001 with secondaries created and closed mid-test diff --git a/db/test/si004.tcl b/db/test/si004.tcl index 5f7b4ff9b..99db2061d 100644 --- a/db/test/si004.tcl +++ b/db/test/si004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: si004.tcl,v 1.9 2003/01/08 05:53:27 bostic Exp $ +# $Id: si004.tcl,v 1.10 2004/01/28 03:36:30 bostic Exp $ # # TEST si004 # TEST si002 with secondaries created and closed mid-test diff --git a/db/test/si005.tcl b/db/test/si005.tcl index e824c808d..31e99ede9 100644 --- a/db/test/si005.tcl +++ b/db/test/si005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: si005.tcl,v 11.7 2003/01/08 05:53:28 bostic Exp $ +# $Id: si005.tcl,v 11.9 2004/01/28 03:36:30 bostic Exp $ # # TEST si005 # TEST Basic secondary index put/delete test with transactions @@ -36,7 +36,7 @@ proc si005 { methods {nentries 200} {tnum "005"} args } { set omethods [convert_methods $methods] puts "Si$tnum ($pmethod/$methods) $nentries equal key/data pairs" - puts " with transactions" + puts "\twith transactions" env_cleanup $testdir set pname "primary$tnum.db" diff --git a/db/test/sijointest.tcl b/db/test/sijointest.tcl index 94925f1a0..b89a1da71 100644 --- a/db/test/sijointest.tcl +++ b/db/test/sijointest.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: sijointest.tcl,v 11.3 2003/01/08 05:53:29 bostic Exp $ +# $Id: sijointest.tcl,v 11.4 2004/01/28 03:36:30 bostic Exp $ # # TEST sijointest: Secondary index and join test. # TEST This used to be si005.tcl. diff --git a/db/test/siutils.tcl b/db/test/siutils.tcl index cf4b7e476..ffb0de46c 100644 --- a/db/test/siutils.tcl +++ b/db/test/siutils.tcl @@ -1,9 +1,9 @@ #See the file LICENSE for redistribution information. # -# Copyright (c) 2001-2003 +# Copyright (c) 2001-2004 # Sleepycat Software. All rights reserved. # -# $Id: siutils.tcl,v 11.4 2003/09/04 23:41:15 bostic Exp $ +# $Id: siutils.tcl,v 11.6 2004/03/02 18:44:41 mjc Exp $ # # Secondary index utilities. This file used to be known as # sindex.tcl. @@ -35,7 +35,7 @@ proc callback_n { n } { 3 { return _s_concatdatakey } 4 { return _s_reverseconcat } 5 { return _s_truncdata } - 6 { return _s_alwayscocacola } + 6 { return _s_constant } } return _s_noop } @@ -45,7 +45,7 @@ proc _s_truncdata { a b } { return [string range $b 1 end] } proc _s_concatkeydata { a b } { return $a$b } proc _s_concatdatakey { a b } { return $b$a } proc _s_reverseconcat { a b } { return [reverse $a$b] } -proc _s_alwayscocacola { a b } { return "Coca-Cola" } +proc _s_constant { a b } { return "constant data" } proc _s_noop { a b } { return $b } # Should the check_secondary routines print lots of output? diff --git a/db/test/sysscript.tcl b/db/test/sysscript.tcl index 5f506da0e..8386949a6 100644 --- a/db/test/sysscript.tcl +++ b/db/test/sysscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: sysscript.tcl,v 11.18 2003/01/08 05:53:32 bostic Exp $ +# $Id: sysscript.tcl,v 11.19 2004/01/28 03:36:30 bostic Exp $ # # System integration test script. # This script runs a single process that tests the full functionality of diff --git a/db/test/t106script.tcl b/db/test/t106script.tcl index 026f43f36..3d50c9d1f 100644 --- a/db/test/t106script.tcl +++ b/db/test/t106script.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: t106script.tcl,v 1.3 2003/09/04 23:41:15 bostic Exp $ +# $Id: t106script.tcl,v 1.4 2004/01/28 03:36:30 bostic Exp $ # proc t106_initial { nitems nprod id tnum dbenv order args } { diff --git a/db/test/test.tcl b/db/test/test.tcl index a16b1e669..f36b68e7f 100644 --- a/db/test/test.tcl +++ b/db/test/test.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test.tcl,v 11.256 2003/11/10 17:41:38 sandstro Exp $ +# $Id: test.tcl,v 11.271 2004/09/22 18:01:06 bostic Exp $ source ./include.tcl @@ -44,12 +44,10 @@ set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" # Random number seed. global rand_init -set rand_init 101301 +set rand_init 12082003 -# Default record length and padding character for -# fixed record length access method(s) +# Default record length for fixed record length access method(s) set fixed_len 20 -set fixed_pad 0 set recd_debug 0 set log_log_record_types 0 @@ -70,6 +68,13 @@ global txn_maxid set txn_curid 2147483648 set txn_maxid 4294967295 +# The variable one_test allows us to run all the permutations +# of a test with run_all or run_std. +global one_test +if { [info exists one_test] != 1 } { + set one_test "ALL" +} + # This is where the test numbering and parameters now live. source $test_path/testparams.tcl @@ -77,9 +82,19 @@ source $test_path/testparams.tcl global tcl_platform set is_windows_test [is_substr $tcl_platform(os) "Win"] set is_hp_test [is_substr $tcl_platform(os) "HP-UX"] +set is_je_test 0 set is_qnx_test [is_substr $tcl_platform(os) "QNX"] set upgrade_be [big_endian] +global EXE BAT +if { $is_windows_test == 1 } { + set EXE ".exe" + set BAT ".bat" +} else { + set EXE "" + set BAT "" +} + # Try to open an encrypted database. If it fails, this release # doesn't support encryption, and encryption tests should be skipped. set has_crypto 1 @@ -99,10 +114,17 @@ if { $stat != 0 } { # From here on out, test.tcl contains the procs that are used to # run all or part of the test suite. -proc run_std { args } { +proc run_std { { testname ALL } args } { global test_names + global one_test source ./include.tcl + set one_test $testname + if { $one_test != "ALL" } { + # Source testparams again to adjust test_names. + source $test_path/testparams.tcl + } + set exflgs [eval extractflags $args] set args [lindex $exflgs 0] set flags [lindex $exflgs 1] @@ -175,11 +197,12 @@ proc run_std { args } { set msg [lindex $pair 0] set cmd [lindex $pair 1] puts "Running $msg tests" - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; r $rflags $cmd" \ + if [catch {exec $tclsh_path << \ + "global one_test; set one_test $one_test; \ + source $test_path/test.tcl; r $rflags $cmd" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] - puts $o "FAIL: $cmd test" + puts $o "FAIL: $cmd test: $res" close $o } } @@ -200,12 +223,13 @@ proc run_std { args } { # stderr. puts "Running recovery tests" if [catch { - exec $tclsh_path \ - << "source $test_path/test.tcl; r $rflags recd" \ + exec $tclsh_path << \ + "global one_test; set one_test $one_test; \ + source $test_path/test.tcl; r $rflags recd" \ 2>@ stderr >> ALL.OUT } res] { set o [open ALL.OUT a] - puts $o "FAIL: recd tests" + puts $o "FAIL: recd tests: $res" close $o } @@ -214,14 +238,16 @@ proc run_std { args } { # XXX # Broken up into separate tclsh instantiations so we don't # require so much memory. - puts "Running join test" - foreach test "join1 join2 join3 join4 join5 join6" { - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; r $rflags $test" \ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: $test test" - close $o + if { $one_test == "ALL" } { + puts "Running join test" + foreach test "join1 join2 join3 join4 join5 join6" { + if [catch {exec $tclsh_path << \ + "source $test_path/test.tcl; r $rflags $test" \ + >>& ALL.OUT } res] { + set o [open ALL.OUT a] + puts $o "FAIL: $test test: $res" + close $o + } } } } @@ -238,16 +264,20 @@ proc run_std { args } { foreach test $test_names(test) { if { $run == 0 } { set o [open ALL.OUT a] - run_method -$method $test $display $run $o + run_method \ + -$method $test $display $run $o close $o } if { $run } { - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; \ - run_method -$method $test $display $run"\ + if [catch {exec $tclsh_path << \ + "global one_test; \ + set one_test $one_test; \ + source $test_path/test.tcl; \ + run_method \ + -$method $test $display $run"\ >>& ALL.OUT } res] { set o [open ALL.OUT a] - puts $o "FAIL:$test $method" + puts $o "FAIL:$test $method: $res" close $o } } @@ -262,14 +292,16 @@ proc run_std { args } { return } - set failed [check_failed_run ALL.OUT] + set failed [check_output ALL.OUT] set o [open ALL.OUT a] if { $failed == 0 } { puts "Regression Tests Succeeded" puts $o "Regression Tests Succeeded" } else { - puts "Regression Tests Failed; see ALL.OUT for log" + puts "Regression Tests Failed" + puts "Check UNEXPECTED OUTPUT lines." + puts "Review ALL.OUT.x for details." puts $o "Regression Tests Failed" } @@ -280,17 +312,113 @@ proc run_std { args } { close $o } -proc check_failed_run { file {text "^FAIL"}} { +proc check_output { file } { + # These are all the acceptable patterns. + set pattern {(?x) + ^[:space:]*$| + .*?wrap\.tcl.*| + .*?dbscript\.tcl.*| + .*?ddscript\.tcl.*| + .*?mpoolscript\.tcl.*| + .*?mutexscript\.tcl.*| + ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)$| + ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\sCrashing$| + ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s[p|P]rocesses\srunning:.*| + ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s5\sprocesses\srunning.*| + ^\d:\sPut\s\d*\sstrings\srandom\soffsets.*| + ^100.*| + ^eval\s.*| + ^exec\s.*| + ^jointest.*$| + ^r\sarchive\s*| + ^r\sdbm\s*| + ^r\shsearch\s*| + ^r\sndbm\s*| + ^r\srpc\s*| + ^run_recd:\s.*| + ^run_reptest:\s.*| + ^run_rpcmethod:\s.*| + ^run_secenv:\s.*| + ^All\sprocesses\shave\sexited.$| + ^Beginning\scycle\s\d$| + ^Byteorder:.*| + ^Child\sruns\scomplete\.\s\sParent\smodifies\sdata\.$| + ^Deadlock\sdetector:\s\d*\sCheckpoint\sdaemon\s\d*$| + ^Ending\srecord.*| + ^Environment\s.*?specified;\s\sskipping\.$| + ^Executing\srecord\s.*| + ^Join\stest:\.*| + ^Method:\s.*| + ^Repl:\stest\d\d\d:.*| + ^Repl:\ssdb\d\d\d:.*| + ^Script\swatcher\sprocess\s.*| + ^Sleepycat\sSoftware:\sBerkeley\sDB\s.*| + ^Test\ssuite\srun\s.*| + ^Unlinking\slog:\serror\smessage\sOK$| + ^Verifying\s.*| + ^\t*\.\.\.dbc->get.*$| + ^\t*\.\.\.dbc->put.*$| + ^\t*\.\.\.key\s\d*$| + ^\t*\.\.\.Skipping\sdbc.*| + ^\t*and\s\d*\sduplicate\sduplicates\.$| + ^\t*About\sto\srun\srecovery\s.*complete$| + ^\t*Archive[:\.].*| + ^\t*Building\s.*| + ^\t*closing\ssecondaries\.$| + ^\t*Command\sexecuted\sand\s.*$| + ^\t*DBM.*| + ^\t*[d|D]ead[0-9][0-9][0-9].*| + ^\t*Dump\/load\sof.*| + ^\t*[e|E]nv[0-9][0-9][0-9].*| + ^\t*Executing\scommand$| + ^\t*Executing\stxn_.*| + ^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$| + ^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$| + ^\t*[f|F]op[0-9][0-9][0-9].*| + ^\t*HSEARCH.*| + ^\t*Initial\sCheckpoint$| + ^\t*Iteration\s\d*:\sCheckpointing\.$| + ^\t*Joining:\s.*| + ^\t*Kid[1|2]\sabort\.\.\.complete$| + ^\t*Kid[1|2]\scommit\.\.\.complete$| + ^\t*[l|L]ock[0-9][0-9][0-9].*| + ^\t*[l|L]og[0-9][0-9][0-9].*| + ^\t*[m|M]emp[0-9][0-9][0-9].*| + ^\t*[m|M]utex[0-9][0-9][0-9].*| + ^\t*NDBM.*| + ^\t*opening\ssecondaries\.$| + ^\t*op_recover_rec:\sRunning\srecovery.*| + ^\t*[r|R]ecd[0-9][0-9][0-9].*| + ^\t*[r|R]ep[0-9][0-9][0-9].*| + ^\t*[r|R]ep_test.*| + ^\t*[r|R]pc[0-9][0-9][0-9].*| + ^\t*[r|R]src[0-9][0-9][0-9].*| + ^\t*Run_rpcmethod.*| + ^\t*Running\srecovery\son\s.*| + ^\t*[s|S]ec[0-9][0-9][0-9].*| + ^\t*Si[0-9][0-9][0-9].*| + ^\t*Sijoin.*| + ^\t*sdb[0-9][0-9][0-9].*| + ^\t*Skipping\s.*| + ^\t*Subdb[0-9][0-9][0-9].*| + ^\t*Syncing$| + ^\t*[t|T]est[0-9][0-9][0-9].*| + ^\t*[t|T]xn[0-9][0-9][0-9].*| + ^\t*Txnscript.*| + ^\t*Using\s.*?\senvironment\.$| + ^\t*Verification\sof.*| + ^\t*with\stransactions$} + set failed 0 - set o [open $file r] - while { [gets $o line] >= 0 } { - set ret [regexp $text $line] - if { $ret != 0 } { + set f [open $file r] + while { [gets $f line] >= 0 } { + if { [regexp $pattern $line] == 0 } { + puts -nonewline "UNEXPECTED OUTPUT: " + puts $line set failed 1 } } - close $o - + close $f return $failed } @@ -298,6 +426,7 @@ proc r { args } { global test_names global has_crypto global rand_init + global one_test source ./include.tcl @@ -323,7 +452,6 @@ proc r { args } { switch $sub { dead - env - - fop - lock - log - memp - @@ -338,23 +466,29 @@ proc r { args } { run_subsystem $sub } } - bigfile { - foreach test $test_names($sub) { - eval run_test $test $display $run - } - } byte { - run_test byteorder $display $run + if { $one_test == "ALL" } { + run_test byteorder $display $run + } } archive - dbm - hsearch - ndbm - shelltest { - if { $display } { puts "r $sub" } - if { $run } { - check_handles - $sub + if { $one_test == "ALL" } { + if { $display } { puts "r $sub" } + if { $run } { + check_handles + $sub + } + } + } + bigfile - + elect - + fop { + foreach test $test_names($sub) { + eval run_test $test $display $run } } join { @@ -437,22 +571,46 @@ proc r { args } { } } rpc { - if { $display } { puts "r $sub" } - global rpc_svc svc_list - set old_rpc_src $rpc_svc - foreach rpc_svc $svc_list { - if { !$run || \ - ![file exist $util_path/$rpc_svc] } { - continue + if { $one_test == "ALL" } { + if { $display } { puts "r $sub" } + global BAT EXE rpc_svc svc_list + global rpc_svc svc_list is_je_test + set old_rpc_src $rpc_svc + foreach rpc_svc $svc_list { + if { $rpc_svc == "berkeley_dbje_svc" } { + set old_util_path $util_path + set util_path $je_root/dist + set is_je_test 1 + } + + if { !$run || \ + ![file exist $util_path/$rpc_svc$BAT] || \ + ![file exist $util_path/$rpc_svc$EXE] } { + continue + } + + run_subsystem rpc + if { [catch {run_rpcmethod -txn} ret] != 0 } { + puts $ret + } + + if { $is_je_test } { + check_handles + eval run_rpcmethod -btree + verify_dir $testdir "" 1 + } else { + run_test run_rpcmethod $display $run + } + + if { $is_je_test } { + set util_path $old_util_path + set is_je_test 0 + } + } - run_subsystem rpc - if { [catch {run_rpcmethod -txn} ret] != 0 } { - puts $ret + set rpc_svc $old_rpc_src } - run_test run_rpcmethod $display $run } - set rpc_svc $old_rpc_src - } sec { # Skip secure mode tests if release # does not support encryption. @@ -473,29 +631,35 @@ proc r { args } { } } sdb { - if { $display } { - puts "eval r $saveflags sdbtest" - } - if { $run } { - eval r $saveflags sdbtest + if { $one_test == "ALL" } { + if { $display } { + puts "eval r $saveflags sdbtest" + } + if { $run } { + eval r $saveflags sdbtest + } } foreach test $test_names(sdb) { eval run_test $test $display $run } } sindex { - if { $display } { - sindex 1 0 - sijoin 1 0 - } - if { $run } { - sindex 0 1 - sijoin 0 1 + if { $one_test == "ALL" } { + if { $display } { + sindex 1 0 + sijoin 1 0 + } + if { $run } { + sindex 0 1 + sijoin 0 1 + } } } btree - rbtree - hash - + iqueue - + iqueueext - queue - queueext - recno - @@ -611,7 +775,7 @@ proc run_rpcmethod { method {largs ""} } { global __debug_on global __debug_print global __debug_test - global test_names + global rpc_tests global parms global is_envmethod global rpc_svc @@ -644,56 +808,59 @@ proc run_rpcmethod { method {largs ""} } { if { $stat == 0 } { set stat [catch {eval txn001_subb $ntxns $env} res] } - error_check_good envclose [$env close] 0 set stat [catch {eval txn003} res] + error_check_good envclose [$env close] 0 } else { - set stat [catch { - foreach sub { test sdb } { - foreach test $test_names($sub) { - check_handles - remote_cleanup $rpc_server \ - $rpc_testdir $testdir - # - # Set server cachesize to 1Mb. - # Otherwise some tests won't fit. - # (like test084 -btree). - # - set env [eval {berkdb_env -create \ - -mode 0644 \ - -home $home -server $rpc_server \ - -client_timeout 10000 \ - -cachesize {0 1048576 1}}] - error_check_good env_open \ - [is_valid_env $env] TRUE - append largs " -env $env " - - puts "[timestamp]" - eval $test $method $parms($test) $largs - if { $__debug_print != 0 } { - puts "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - flush stdout - flush stderr - set largs $save_largs - error_check_good envclose [$env close] 0 + foreach test $rpc_tests($rpc_svc) { + set stat [catch { + check_handles + remote_cleanup $rpc_server $rpc_testdir $testdir + # + # Set server cachesize to 128Mb. Otherwise + # some tests won't fit (like test084 -btree). + # + set env [eval {berkdb_env -create -mode 0644 \ + -home $home -server $rpc_server \ + -client_timeout 10000 \ + -cachesize {0 134217728 1}}] + error_check_good env_open \ + [is_valid_env $env] TRUE + set largs $save_largs + append largs " -env $env " + + puts "[timestamp]" + eval $test $method $parms($test) $largs + if { $__debug_print != 0 } { + puts "" } - } - } res] - } - if { $stat != 0} { - global errorInfo; + if { $__debug_on != 0 } { + debug $__debug_test + } + flush stdout + flush stderr + error_check_good envclose [$env close] 0 + set env "" + } res] - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - tclkill $dpid - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_rpcmethod: $method: $theError" - } else { - error $theError; + if { $stat != 0} { + global errorInfo; + + puts "$res" + + set fnl [string first "\n" $errorInfo] + set theError [string range $errorInfo 0 [expr $fnl - 1]] + if {[string first FAIL $errorInfo] == -1} { + puts "FAIL:[timestamp]\ + run_rpcmethod: $method $test: $errorInfo" + } else { + puts $theError; + } + + catch { $env close } ignore + set env "" + tclkill $dpid + set dpid [rpc_server_start] + } } } set is_envmethod 0 @@ -800,6 +967,7 @@ proc run_secmethod { method test {display 0} {run 1} \ return } + set largs $args append largs " -encryptaes $passwd " eval run_method $method $test $display $run $outfile $largs } @@ -894,7 +1062,7 @@ proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \ global passwd global has_crypto - puts "run_reptest: $method $test" + puts "run_reptest: $method $test $droppct $nclients $do_del $do_sec $do_oob $largs" env_cleanup $testdir set is_envmethod 1 @@ -1234,13 +1402,20 @@ proc run_recds { {run 1} {display 0} args } { set log_log_record_types 0 } -proc run_all { args } { +proc run_all { { testname ALL } args } { global test_names + global one_test global has_crypto source ./include.tcl fileremove -f ALL.OUT + set one_test $testname + if { $one_test != "ALL" } { + # Source testparams again to adjust test_names. + source $test_path/testparams.tcl + } + set exflgs [eval extractflags $args] set flags [lindex $exflgs 1] set display 1 @@ -1279,7 +1454,7 @@ proc run_all { args } { # print out start/end times. # lappend args -A - eval {run_std} $args + eval {run_std} $one_test $args set test_pagesizes [get_test_pagesizes] set args [lindex $exflgs 0] @@ -1334,14 +1509,16 @@ proc run_all { args } { } if { $run } { if [catch {exec $tclsh_path << \ - "source $test_path/test.tcl; \ + "global one_test; \ + set one_test $one_test; \ + source $test_path/test.tcl; \ eval {run_method -$method \ $test $display $run \ stdout} $args" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o "FAIL: \ - -$method $test" + -$method $test: $res" close $o } } @@ -1364,14 +1541,16 @@ proc run_all { args } { close $o } if { $run } { - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; \ + if [catch {exec $tclsh_path << \ + "global one_test; \ + set one_test $one_test; \ + source $test_path/test.tcl; \ run_envmethod -$method $test \ $display $run stdout $args" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o "FAIL: run_envmethod \ - $method $test" + $method $test: $res" close $o } } @@ -1393,14 +1572,16 @@ proc run_all { args } { close $o } if { $run } { - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; \ + if [catch {exec $tclsh_path << \ + "global one_test; \ + set one_test $one_test; \ + source $test_path/test.tcl; \ eval {run_envmethod -$method $test \ $display $run stdout -thread}" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o "FAIL: run_envmethod \ - $method $test -thread" + $method $test -thread: $res" close $o } } @@ -1421,14 +1602,16 @@ proc run_all { args } { close $o } if { $run } { - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; \ + if [catch {exec $tclsh_path << \ + "global one_test; \ + set one_test $one_test; \ + source $test_path/test.tcl; \ eval {run_envmethod -$method $test \ $display $run stdout -alloc}" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o "FAIL: run_envmethod \ - $method $test -alloc" + $method $test -alloc: $res" close $o } } @@ -1459,11 +1642,12 @@ proc run_all { args } { set msg [lindex $pair 0] set cmd [lindex $pair 1] puts "Running $msg tests" - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; \ + if [catch {exec $tclsh_path << \ + "global one_test; set one_test $one_test; \ + source $test_path/test.tcl; \ r $rflags $cmd $args" >>& ALL.OUT } res] { set o [open ALL.OUT a] - puts $o "FAIL: $cmd test" + puts $o "FAIL: $cmd test: $res" close $o } } diff --git a/db/test/test001.tcl b/db/test/test001.tcl index 31037c777..2d7130fcb 100644 --- a/db/test/test001.tcl +++ b/db/test/test001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test001.tcl,v 11.35 2003/09/08 18:12:56 bostic Exp $ +# $Id: test001.tcl,v 11.38 2004/09/22 18:01:06 bostic Exp $ # # TEST test001 # TEST Small keys/data @@ -17,7 +17,7 @@ # TEST After all are entered, retrieve all; compare output to original. # TEST Close file, reopen, do retrieve and re-verify. proc test001 { method {nentries 10000} \ - {start 0} {skip 1} {tnum "001"} args } { + {start 0} {skip 0} {tnum "001"} args } { source ./include.tcl set args [convert_args $method $args] @@ -57,16 +57,12 @@ proc test001 { method {nentries 10000} \ # The "start" variable determines the record number to start # with, if we're using record numbers. The "skip" variable - # determines whether to start with the first entry in the - # dict file (if skip = 0) or skip over "start" entries (skip = 1). - # Skip is set to 1 to get different key/data pairs for - # different iterations of replication tests. Skip must be set - # to 0 if we're running a test that uses 10000 iterations, - # otherwise we run out of data to read in. - - puts "\tTest$tnum: starting at $start" - if { $skip == 1 } { - for { set count 0 } { $count < $start } { incr count } { + # determines the dictionary entry to start with. + # In normal use, skip will match start. + + puts "\tTest$tnum: Starting at $start with dictionary entry $skip" + if { $skip != 0 } { + for { set count 0 } { $count < $skip } { incr count } { gets $did str } } diff --git a/db/test/test002.tcl b/db/test/test002.tcl index c46bc9841..265f0640f 100644 --- a/db/test/test002.tcl +++ b/db/test/test002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test002.tcl,v 11.20 2003/01/08 05:53:36 bostic Exp $ +# $Id: test002.tcl,v 11.21 2004/01/28 03:36:30 bostic Exp $ # # TEST test002 # TEST Small keys/medium data diff --git a/db/test/test003.tcl b/db/test/test003.tcl index db5a385e7..4e9c0125d 100644 --- a/db/test/test003.tcl +++ b/db/test/test003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test003.tcl,v 11.26 2003/01/08 05:53:36 bostic Exp $ +# $Id: test003.tcl,v 11.28 2004/01/28 03:36:30 bostic Exp $ # # TEST test003 # TEST Small keys/large data @@ -71,11 +71,6 @@ proc test003 { method args} { # Here is the loop where we put and get each key/data pair set file_list [get_file_list] - if { $limit } { - if { [llength $file_list] > $limit } { - set file_list [lrange $file_list 1 $limit] - } - } set len [llength $file_list] puts "\tTest003.a: put/get loop $len entries" set count 0 diff --git a/db/test/test004.tcl b/db/test/test004.tcl index df4a9151d..598e3eb22 100644 --- a/db/test/test004.tcl +++ b/db/test/test004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test004.tcl,v 11.23 2003/01/08 05:53:38 bostic Exp $ +# $Id: test004.tcl,v 11.24 2004/01/28 03:36:30 bostic Exp $ # # TEST test004 # TEST Small keys/medium data diff --git a/db/test/test005.tcl b/db/test/test005.tcl index 0b0b72813..e3972b5b1 100644 --- a/db/test/test005.tcl +++ b/db/test/test005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test005.tcl,v 11.9 2003/01/08 05:53:38 bostic Exp $ +# $Id: test005.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $ # # TEST test005 # TEST Small keys/medium data diff --git a/db/test/test006.tcl b/db/test/test006.tcl index 202d4e07b..4107bbf09 100644 --- a/db/test/test006.tcl +++ b/db/test/test006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test006.tcl,v 11.23 2003/09/04 23:41:16 bostic Exp $ +# $Id: test006.tcl,v 11.26 2004/09/22 18:01:06 bostic Exp $ # # TEST test006 # TEST Small keys/medium data @@ -31,7 +31,7 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum "006"} \ proc test006_body { method {nentries 10000} {reopen 0} {tnum "006"} \ {ndups 5} sort flags {largs ""} } { - + global is_je_test source ./include.tcl set do_renumber [is_rrecno $method] @@ -54,6 +54,10 @@ proc test006_body { method {nentries 10000} {reopen 0} {tnum "006"} \ set basename $dbname incr eindex set env [lindex $largs $eindex] + if { $is_je_test && $sort == "unsorted" } { + puts "Test$tnum skipping $sort duplicates for JE" + return + } set txnenv [is_txnenv $env] if { $txnenv == 1 } { append largs " -auto_commit " diff --git a/db/test/test007.tcl b/db/test/test007.tcl index 7c858a0c8..6cb57495a 100644 --- a/db/test/test007.tcl +++ b/db/test/test007.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test007.tcl,v 11.11 2003/01/08 05:53:39 bostic Exp $ +# $Id: test007.tcl,v 11.12 2004/01/28 03:36:30 bostic Exp $ # # TEST test007 # TEST Small keys/medium data diff --git a/db/test/test008.tcl b/db/test/test008.tcl index ee0258eb3..d798d3fb2 100644 --- a/db/test/test008.tcl +++ b/db/test/test008.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test008.tcl,v 11.25 2003/01/08 05:53:40 bostic Exp $ +# $Id: test008.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $ # # TEST test008 # TEST Small keys/large data diff --git a/db/test/test009.tcl b/db/test/test009.tcl index 898abf7aa..258b01277 100644 --- a/db/test/test009.tcl +++ b/db/test/test009.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test009.tcl,v 11.10 2003/01/08 05:53:40 bostic Exp $ +# $Id: test009.tcl,v 11.11 2004/01/28 03:36:30 bostic Exp $ # # TEST test009 # TEST Small keys/large data diff --git a/db/test/test010.tcl b/db/test/test010.tcl index 5ad011f24..bbf6fba67 100644 --- a/db/test/test010.tcl +++ b/db/test/test010.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test010.tcl,v 11.22 2003/01/08 05:53:40 bostic Exp $ +# $Id: test010.tcl,v 11.23 2004/01/28 03:36:30 bostic Exp $ # # TEST test010 # TEST Duplicate test diff --git a/db/test/test011.tcl b/db/test/test011.tcl index 704cc5363..ad8439011 100644 --- a/db/test/test011.tcl +++ b/db/test/test011.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test011.tcl,v 11.29 2003/01/08 05:53:41 bostic Exp $ +# $Id: test011.tcl,v 11.30 2004/01/28 03:36:30 bostic Exp $ # # TEST test011 # TEST Duplicate test diff --git a/db/test/test012.tcl b/db/test/test012.tcl index 88e40b7ea..42225d982 100644 --- a/db/test/test012.tcl +++ b/db/test/test012.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test012.tcl,v 11.21 2003/01/08 05:53:41 bostic Exp $ +# $Id: test012.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $ # # TEST test012 # TEST Large keys/small data diff --git a/db/test/test013.tcl b/db/test/test013.tcl index 416691ea1..e456965bf 100644 --- a/db/test/test013.tcl +++ b/db/test/test013.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test013.tcl,v 11.24 2003/01/08 05:53:42 bostic Exp $ +# $Id: test013.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $ # # TEST test013 # TEST Partial put test @@ -18,7 +18,6 @@ proc test013 { method {nentries 10000} args } { global errorCode global errorInfo - global fixed_pad global fixed_len source ./include.tcl diff --git a/db/test/test014.tcl b/db/test/test014.tcl index 3a13d0964..708d5dc09 100644 --- a/db/test/test014.tcl +++ b/db/test/test014.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test014.tcl,v 11.25 2003/01/08 05:53:42 bostic Exp $ +# $Id: test014.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $ # # TEST test014 # TEST Exercise partial puts on short data diff --git a/db/test/test015.tcl b/db/test/test015.tcl index 12d5543de..940122811 100644 --- a/db/test/test015.tcl +++ b/db/test/test015.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test015.tcl,v 11.28 2003/01/08 05:53:43 bostic Exp $ +# $Id: test015.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $ # # TEST test015 # TEST Partial put test diff --git a/db/test/test016.tcl b/db/test/test016.tcl index f52f7ab5f..481c85ec7 100644 --- a/db/test/test016.tcl +++ b/db/test/test016.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test016.tcl,v 11.24 2003/01/08 05:53:43 bostic Exp $ +# $Id: test016.tcl,v 11.25 2004/01/28 03:36:30 bostic Exp $ # # TEST test016 # TEST Partial put test diff --git a/db/test/test017.tcl b/db/test/test017.tcl index b2e13c2bb..6503b2cc1 100644 --- a/db/test/test017.tcl +++ b/db/test/test017.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test017.tcl,v 11.28 2003/04/16 20:38:33 margo Exp $ +# $Id: test017.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $ # # TEST test017 # TEST Basic offpage duplicate test. diff --git a/db/test/test018.tcl b/db/test/test018.tcl index 85f71182d..bf2e3eb56 100644 --- a/db/test/test018.tcl +++ b/db/test/test018.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test018.tcl,v 11.9 2003/01/27 18:15:01 sandstro Exp $ +# $Id: test018.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $ # # TEST test018 # TEST Offpage duplicate test diff --git a/db/test/test019.tcl b/db/test/test019.tcl index 09914ec86..68f6487be 100644 --- a/db/test/test019.tcl +++ b/db/test/test019.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test019.tcl,v 11.22 2003/01/08 05:53:44 bostic Exp $ +# $Id: test019.tcl,v 11.24 2004/04/22 18:57:32 sue Exp $ # # TEST test019 # TEST Partial get test. @@ -101,8 +101,13 @@ proc test019 { method {nentries 10000} args } { set maxndx [expr [string length $data] - 1] - set beg [berkdb random_int 0 [expr $maxndx - 1]] - set len [berkdb random_int 0 [expr $maxndx * 2]] + if { $maxndx > 0 } { + set beg [berkdb random_int 0 [expr $maxndx - 1]] + set len [berkdb random_int 0 [expr $maxndx * 2]] + } else { + set beg 0 + set len 0 + } if { $txnenv == 1 } { set t [$env txn] diff --git a/db/test/test020.tcl b/db/test/test020.tcl index cf0d8ad6a..19eda9c31 100644 --- a/db/test/test020.tcl +++ b/db/test/test020.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test020.tcl,v 11.18 2003/01/08 05:53:45 bostic Exp $ +# $Id: test020.tcl,v 11.19 2004/01/28 03:36:30 bostic Exp $ # # TEST test020 # TEST In-Memory database tests. diff --git a/db/test/test021.tcl b/db/test/test021.tcl index c1b48e018..43a7a4bde 100644 --- a/db/test/test021.tcl +++ b/db/test/test021.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test021.tcl,v 11.16 2003/01/08 05:53:45 bostic Exp $ +# $Id: test021.tcl,v 11.17 2004/01/28 03:36:30 bostic Exp $ # # TEST test021 # TEST Btree range tests. diff --git a/db/test/test022.tcl b/db/test/test022.tcl index b160d37c5..deded62e7 100644 --- a/db/test/test022.tcl +++ b/db/test/test022.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test022.tcl,v 11.15 2003/01/08 05:53:46 bostic Exp $ +# $Id: test022.tcl,v 11.16 2004/01/28 03:36:30 bostic Exp $ # # TEST test022 # TEST Test of DB->getbyteswapped(). diff --git a/db/test/test023.tcl b/db/test/test023.tcl index b0516a344..c4a707288 100644 --- a/db/test/test023.tcl +++ b/db/test/test023.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test023.tcl,v 11.19 2003/01/08 05:53:46 bostic Exp $ +# $Id: test023.tcl,v 11.21 2004/09/20 17:06:16 sue Exp $ # # TEST test023 # TEST Duplicate test @@ -101,7 +101,7 @@ proc test023 { method args } { # Now current should fail set ret [$dbc get -current] - error_check_good dbc_get:CURRENT $ret [list [list [] []]] + error_check_good dbc_get:CURRENT $ret "" # Now Prev should fail set ret [$dbc get -prev] @@ -123,8 +123,7 @@ proc test023 { method args } { # Now current should fail set ret [$dbc get -current] - error_check_good \ - dbc_get:deleted $ret [list [list [] []]] + error_check_good dbc_get:deleted $ret "" # Prev and Next should work set ret [$dbc get -next] @@ -153,8 +152,7 @@ proc test023 { method args } { # Now current should fail set ret [$dbc get -current] - error_check_good \ - dbc_get:deleted $ret [list [list [] []]] + error_check_good dbc_get:deleted $ret "" # Next should fail set ret [$dbc get -next] diff --git a/db/test/test024.tcl b/db/test/test024.tcl index d6f11079a..4ac1fceae 100644 --- a/db/test/test024.tcl +++ b/db/test/test024.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test024.tcl,v 11.20 2003/01/08 05:53:46 bostic Exp $ +# $Id: test024.tcl,v 11.21 2004/01/28 03:36:30 bostic Exp $ # # TEST test024 # TEST Record number retrieval test. diff --git a/db/test/test025.tcl b/db/test/test025.tcl index 270456e43..8f3cb5c0c 100644 --- a/db/test/test025.tcl +++ b/db/test/test025.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test025.tcl,v 11.21 2003/01/08 05:53:47 bostic Exp $ +# $Id: test025.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $ # # TEST test025 # TEST DB_APPEND flag test. diff --git a/db/test/test026.tcl b/db/test/test026.tcl index d1110ec7d..ce91e2b46 100644 --- a/db/test/test026.tcl +++ b/db/test/test026.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test026.tcl,v 11.22 2003/01/08 05:53:47 bostic Exp $ +# $Id: test026.tcl,v 11.23 2004/01/28 03:36:30 bostic Exp $ # # TEST test026 # TEST Small keys/medium data w/duplicates diff --git a/db/test/test027.tcl b/db/test/test027.tcl index 53d5a7d88..7f6d78c3a 100644 --- a/db/test/test027.tcl +++ b/db/test/test027.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test027.tcl,v 11.9 2003/01/08 05:53:47 bostic Exp $ +# $Id: test027.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $ # # TEST test027 # TEST Off-page duplicate test diff --git a/db/test/test028.tcl b/db/test/test028.tcl index 6445332ea..3884d83e9 100644 --- a/db/test/test028.tcl +++ b/db/test/test028.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test028.tcl,v 11.21 2003/01/08 05:53:48 bostic Exp $ +# $Id: test028.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $ # # TEST test028 # TEST Cursor delete test diff --git a/db/test/test029.tcl b/db/test/test029.tcl index 7b825f199..53622efeb 100644 --- a/db/test/test029.tcl +++ b/db/test/test029.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test029.tcl,v 11.21 2003/01/08 05:53:48 bostic Exp $ +# $Id: test029.tcl,v 11.24 2004/09/22 18:01:06 bostic Exp $ # # TEST test029 # TEST Test the Btree and Record number renumbering. @@ -221,13 +221,15 @@ proc test029 { method {nentries 10000} args} { getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key # Re-put the first key and make sure that we renumber the last - # key appropriately. - puts "\tTest029.e: put with cursor and verify renumber" + # key appropriately. We can't do a c_put -current, so do + # a db put instead. if { [string compare $omethod "-btree"] == 0 } { - set ret [eval {$dbc put} \ - $pflags {-current $first_key}] - error_check_good dbc_put:DB_CURRENT $ret 0 + puts "\tTest029.e: put (non-cursor) and verify renumber" + set ret [eval {$db put} $txn \ + {$key [chop_data $method $first_key]}] + error_check_good db_put $ret 0 } else { + puts "\tTest029.e: put with cursor and verify renumber" set ret [eval {$dbc put} $pflags {-before $first_key}] error_check_bad dbc_put:DB_BEFORE $ret 0 } diff --git a/db/test/test030.tcl b/db/test/test030.tcl index 4104c59d4..3ee9daa3f 100644 --- a/db/test/test030.tcl +++ b/db/test/test030.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test030.tcl,v 11.19 2003/01/08 05:53:49 bostic Exp $ +# $Id: test030.tcl,v 11.20 2004/01/28 03:36:30 bostic Exp $ # # TEST test030 # TEST Test DB_NEXT_DUP Functionality. diff --git a/db/test/test031.tcl b/db/test/test031.tcl index fea40a1c4..2b4ad0d98 100644 --- a/db/test/test031.tcl +++ b/db/test/test031.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test031.tcl,v 11.26 2003/01/08 05:53:49 bostic Exp $ +# $Id: test031.tcl,v 11.27 2004/01/28 03:36:30 bostic Exp $ # # TEST test031 # TEST Duplicate sorting functionality diff --git a/db/test/test032.tcl b/db/test/test032.tcl index d83988a4b..e7cc49b47 100644 --- a/db/test/test032.tcl +++ b/db/test/test032.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test032.tcl,v 11.25 2003/01/08 05:53:50 bostic Exp $ +# $Id: test032.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $ # # TEST test032 # TEST DB_GET_BOTH, DB_GET_BOTH_RANGE diff --git a/db/test/test033.tcl b/db/test/test033.tcl index d91d845c5..b606883c1 100644 --- a/db/test/test033.tcl +++ b/db/test/test033.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test033.tcl,v 11.26 2003/01/08 05:53:50 bostic Exp $ +# $Id: test033.tcl,v 11.27 2004/01/28 03:36:31 bostic Exp $ # # TEST test033 # TEST DB_GET_BOTH without comparison function diff --git a/db/test/test034.tcl b/db/test/test034.tcl index a14098240..5da920522 100644 --- a/db/test/test034.tcl +++ b/db/test/test034.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1998-2003 +# Copyright (c) 1998-2004 # Sleepycat Software. All rights reserved. # -# $Id: test034.tcl,v 11.11 2003/01/27 18:15:01 sandstro Exp $ +# $Id: test034.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ # # TEST test034 # TEST test032 with off-page duplicates diff --git a/db/test/test035.tcl b/db/test/test035.tcl index 7fa287895..63945c588 100644 --- a/db/test/test035.tcl +++ b/db/test/test035.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test035.tcl,v 11.11 2003/01/27 18:15:01 sandstro Exp $ +# $Id: test035.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ # # TEST test035 # TEST Test033 with off-page duplicates diff --git a/db/test/test036.tcl b/db/test/test036.tcl index 81b652854..5fe24cb21 100644 --- a/db/test/test036.tcl +++ b/db/test/test036.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test036.tcl,v 11.19 2003/01/08 05:53:52 bostic Exp $ +# $Id: test036.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ # # TEST test036 # TEST Test KEYFIRST and KEYLAST when the key doesn't exist diff --git a/db/test/test037.tcl b/db/test/test037.tcl index 3b5baa5c7..c571ffa3e 100644 --- a/db/test/test037.tcl +++ b/db/test/test037.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test037.tcl,v 11.19 2003/01/08 05:53:52 bostic Exp $ +# $Id: test037.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ # # TEST test037 # TEST Test DB_RMW diff --git a/db/test/test038.tcl b/db/test/test038.tcl index 9b82ffdfb..eaf934f5d 100644 --- a/db/test/test038.tcl +++ b/db/test/test038.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test038.tcl,v 11.25 2003/01/08 05:53:52 bostic Exp $ +# $Id: test038.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $ # # TEST test038 # TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items diff --git a/db/test/test039.tcl b/db/test/test039.tcl index 65865380a..67b2eaf33 100644 --- a/db/test/test039.tcl +++ b/db/test/test039.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test039.tcl,v 11.22 2003/01/08 05:53:53 bostic Exp $ +# $Id: test039.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $ # # TEST test039 # TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison diff --git a/db/test/test040.tcl b/db/test/test040.tcl index 7151b56b6..61d5cd1f3 100644 --- a/db/test/test040.tcl +++ b/db/test/test040.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1998-2003 +# Copyright (c) 1998-2004 # Sleepycat Software. All rights reserved. # -# $Id: test040.tcl,v 11.9 2003/01/27 18:15:01 sandstro Exp $ +# $Id: test040.tcl,v 11.10 2004/01/28 03:36:31 bostic Exp $ # # TEST test040 # TEST Test038 with off-page duplicates diff --git a/db/test/test041.tcl b/db/test/test041.tcl index 45eb2819a..790ece943 100644 --- a/db/test/test041.tcl +++ b/db/test/test041.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test041.tcl,v 11.8 2003/01/08 05:53:54 bostic Exp $ +# $Id: test041.tcl,v 11.9 2004/01/28 03:36:31 bostic Exp $ # # TEST test041 # TEST Test039 with off-page duplicates diff --git a/db/test/test042.tcl b/db/test/test042.tcl index ec7bbf344..b216ebf27 100644 --- a/db/test/test042.tcl +++ b/db/test/test042.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test042.tcl,v 11.44 2003/09/29 18:19:18 sandstro Exp $ +# $Id: test042.tcl,v 11.46 2004/09/22 18:01:06 bostic Exp $ # # TEST test042 # TEST Concurrent Data Store test (CDB) @@ -123,7 +123,7 @@ proc test042_body { method nentries alldb args } { puts "Test042: $procs independent processes now running" watch_procs $pidlist - # Make sure we haven't added or lost any entries. + # Make sure we haven't added or lost any entries. set dblist [glob $testdir/$basename.*.db] foreach file $dblist { set tf [file tail $file] diff --git a/db/test/test043.tcl b/db/test/test043.tcl index ee5e1fc2d..bbb934cce 100644 --- a/db/test/test043.tcl +++ b/db/test/test043.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test043.tcl,v 11.18 2003/01/08 05:53:54 bostic Exp $ +# $Id: test043.tcl,v 11.19 2004/01/28 03:36:31 bostic Exp $ # # TEST test043 # TEST Recno renumbering and implicit creation test diff --git a/db/test/test044.tcl b/db/test/test044.tcl index 0bb250134..22d56a434 100644 --- a/db/test/test044.tcl +++ b/db/test/test044.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test044.tcl,v 11.34 2003/09/16 19:01:36 sandstro Exp $ +# $Id: test044.tcl,v 11.35 2004/01/28 03:36:31 bostic Exp $ # # TEST test044 # TEST Small system integration tests diff --git a/db/test/test045.tcl b/db/test/test045.tcl index 25e8ae51c..2b4c517ca 100644 --- a/db/test/test045.tcl +++ b/db/test/test045.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test045.tcl,v 11.26 2003/09/16 19:01:36 sandstro Exp $ +# $Id: test045.tcl,v 11.27 2004/01/28 03:36:31 bostic Exp $ # # TEST test045 # TEST Small random tester diff --git a/db/test/test046.tcl b/db/test/test046.tcl index d20700706..63d0ec7c4 100644 --- a/db/test/test046.tcl +++ b/db/test/test046.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test046.tcl,v 11.35 2003/01/08 05:53:55 bostic Exp $ +# $Id: test046.tcl,v 11.36 2004/01/28 03:36:31 bostic Exp $ # # TEST test046 # TEST Overwrite test of small/big key/data with cursor checks. diff --git a/db/test/test047.tcl b/db/test/test047.tcl index 824aaa3eb..48b6fc759 100644 --- a/db/test/test047.tcl +++ b/db/test/test047.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test047.tcl,v 11.21 2003/03/27 16:34:48 sandstro Exp $ +# $Id: test047.tcl,v 11.22 2004/01/28 03:36:31 bostic Exp $ # # TEST test047 # TEST DBcursor->c_get get test with SET_RANGE option. diff --git a/db/test/test048.tcl b/db/test/test048.tcl index 4c1fb0c18..db73b2b6d 100644 --- a/db/test/test048.tcl +++ b/db/test/test048.tcl @@ -1,14 +1,15 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test048.tcl,v 11.20 2003/03/27 16:34:48 sandstro Exp $ +# $Id: test048.tcl,v 11.22 2004/05/13 18:51:43 mjc Exp $ # # TEST test048 # TEST Cursor stability across Btree splits. proc test048 { method args } { global errorCode + global is_je_test source ./include.tcl set tnum 048 @@ -115,7 +116,7 @@ proc test048 { method args } { puts "\tTest$tnum.e: Make sure split happened." # XXX We cannot call stat with active txns or we deadlock. - if { $txnenv != 1 } { + if { $txnenv != 1 && !$is_je_test } { error_check_bad stat:check-split [is_substr [$db stat] \ "{{Internal pages} 0}"] 1 } diff --git a/db/test/test049.tcl b/db/test/test049.tcl index 252631229..f8d173380 100644 --- a/db/test/test049.tcl +++ b/db/test/test049.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test049.tcl,v 11.23 2003/03/27 16:34:48 sandstro Exp $ +# $Id: test049.tcl,v 11.24 2004/01/28 03:36:31 bostic Exp $ # # TEST test049 # TEST Cursor operations on uninitialized cursors. diff --git a/db/test/test050.tcl b/db/test/test050.tcl index 2e224ce01..72be5af67 100644 --- a/db/test/test050.tcl +++ b/db/test/test050.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test050.tcl,v 11.22 2003/01/08 05:53:57 bostic Exp $ +# $Id: test050.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $ # # TEST test050 # TEST Overwrite test of small/big key/data with cursor checks for Recno. diff --git a/db/test/test051.tcl b/db/test/test051.tcl index 778b9d064..5e09835e3 100644 --- a/db/test/test051.tcl +++ b/db/test/test051.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test051.tcl,v 11.23 2003/06/18 14:13:11 bostic Exp $ +# $Id: test051.tcl,v 11.25 2004/01/28 03:36:31 bostic Exp $ # # TEST test051 # TEST Fixed-length record Recno test. @@ -186,7 +186,7 @@ proc test051 { method { args "" } } { set ret [eval {$db put} $txn {$key $data}] error_check_good dbput:init $ret 0 - puts "\t\t Test051.g: Replace at offset $doff." + puts "\t\tTest051.g: Replace at offset $doff." set ret [eval {$db put -partial [list $doff $dlen]} $txn \ {$key $pdata}] error_check_good dbput:partial $ret 0 diff --git a/db/test/test052.tcl b/db/test/test052.tcl index acce88c37..c7d891d33 100644 --- a/db/test/test052.tcl +++ b/db/test/test052.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test052.tcl,v 11.17 2003/01/08 05:53:57 bostic Exp $ +# $Id: test052.tcl,v 11.20 2004/09/20 17:06:16 sue Exp $ # # TEST test052 # TEST Renumbering record Recno test. @@ -88,7 +88,7 @@ proc test052 { method args } { } puts "\tTest052: Deletes by key." - puts "\t Test052.a: Get data with SET, then delete before cursor." + puts "\tTest052.a: Get data with SET, then delete before cursor." # get key in middle of page, call this the nth set curr to it set i [expr $nkeys/2] set k $keys($i) @@ -106,7 +106,7 @@ proc test052 { method args } { error_check_good dbc:keys \ [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1]) - puts "\t Test052.b: Delete cursor item by key." + puts "\tTest052.b: Delete cursor item by key." set i [expr $nkeys/2 ] set ret [$dbc get -set $keys($i)] @@ -129,7 +129,7 @@ proc test052 { method args } { error_check_good dbc:getnext:keys \ [lindex [lindex $ret 0] 0] $keys($i) - puts "\t Test052.c: Delete item after cursor." + puts "\tTest052.c: Delete item after cursor." # should be { keys($nkeys/2), darray($nkeys/2 + 2) } set i [expr $nkeys/2] # deleting data for key after current (key $nkeys/2 + 1) @@ -144,18 +144,14 @@ proc test052 { method args } { $darray([expr $i + 2]) puts "\tTest052: Deletes by cursor." - puts "\t Test052.d: Delete, do DB_NEXT." + puts "\tTest052.d: Delete, do DB_NEXT." set i 1 set ret [$dbc get -first] error_check_bad dbc_get:first [llength $ret] 0 error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i) error_check_good dbc_del [$dbc del] 0 set ret [$dbc get -current] - error_check_bad dbc_get:current [llength $ret] 0 - error_check_good dbc:getcurrent:key \ - [llength [lindex [lindex $ret 0] 0]] 0 - error_check_good dbc:getcurrent:data \ - [llength [lindex [lindex $ret 0] 1]] 0 + error_check_good dbc_get:current [llength $ret] 0 set ret [$dbc get -next] error_check_bad dbc_get:next [llength $ret] 0 @@ -167,14 +163,10 @@ proc test052 { method args } { # Move one more forward, so we're not on the first item. error_check_bad dbc:getnext [llength [$dbc get -next]] 0 - puts "\t Test052.e: Delete, do DB_PREV." + puts "\tTest052.e: Delete, do DB_PREV." error_check_good dbc:del [$dbc del] 0 set ret [$dbc get -current] - error_check_bad dbc:get:curr [llength $ret] 0 - error_check_good dbc:getcurrent:key \ - [llength [lindex [lindex $ret 0] 0]] 0 - error_check_good dbc:getcurrent:data \ - [llength [lindex [lindex $ret 0] 1]] 0 + error_check_good dbc:get:curr [llength $ret] 0 # next should now reference the record that was previously after # old current @@ -185,6 +177,7 @@ proc test052 { method args } { error_check_good dbc:get:next:keys \ [lindex [lindex $ret 0] 0] $keys([expr $i + 1]) + set ret [$dbc get -prev] error_check_bad dbc:get:curr [llength $ret] 0 error_check_good dbc:get:curr:compare \ @@ -200,7 +193,7 @@ proc test052 { method args } { error_check_good delfirst [$dbc del] 0 puts "\tTest052: Inserts." - puts "\t Test052.g: Insert before (DB_BEFORE)." + puts "\tTest052.g: Insert before (DB_BEFORE)." set i 1 set ret [$dbc get -first] error_check_bad dbc:get:first [llength $ret] 0 @@ -227,7 +220,7 @@ proc test052 { method args } { set ret [$dbc get -prev] error_check_bad dbc_get:prev [llength $ret] 0 - puts "\t Test052.h: Insert by cursor after (DB_AFTER)." + puts "\tTest052.h: Insert by cursor after (DB_AFTER)." set i [incr i] set ret [$dbc put -after $darray($i)] # should return new key, which should be $keys($i) @@ -245,7 +238,7 @@ proc test052 { method args } { error_check_good dbc:get:next:compare \ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]] - puts "\t Test052.i: Insert (overwrite) current item (DB_CURRENT)." + puts "\tTest052.i: Insert (overwrite) current item (DB_CURRENT)." set i 1 set ret [$dbc get -first] error_check_bad dbc_get:first [llength $ret] 0 diff --git a/db/test/test053.tcl b/db/test/test053.tcl index ae3c5451b..5c5e060ec 100644 --- a/db/test/test053.tcl +++ b/db/test/test053.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test053.tcl,v 11.19 2003/01/08 05:53:58 bostic Exp $ +# $Id: test053.tcl,v 11.21 2004/05/13 18:51:43 mjc Exp $ # # TEST test053 # TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum @@ -11,6 +11,7 @@ proc test053 { method args } { global alphabet global errorCode + global is_je_test source ./include.tcl set args [convert_args $method $args] @@ -97,9 +98,11 @@ proc test053 { method args } { } } - puts "\tTest053.c: Check page count." - error_check_good page_count:check \ - [is_substr [$db stat] "{Leaf pages} $npages"] 1 + if { !$is_je_test } { + puts "\tTest053.c: Check page count." + error_check_good page_count:check \ + [is_substr [$db stat] "{Leaf pages} $npages"] 1 + } puts "\tTest053.d: Delete all but one key per page." for {set i 0} { $i < $npages } {incr i } { @@ -116,9 +119,12 @@ proc test053 { method args } { } } } - puts "\tTest053.e: Check to make sure all pages are still there." - error_check_good page_count:check \ - [is_substr [$db stat] "{Leaf pages} $npages"] 1 + + if { !$is_je_test } { + puts "\tTest053.e: Check to make sure all pages are still there." + error_check_good page_count:check \ + [is_substr [$db stat] "{Leaf pages} $npages"] 1 + } if { $txnenv == 1 } { set t [$env txn] diff --git a/db/test/test054.tcl b/db/test/test054.tcl index 31657fa6d..44d0335f4 100644 --- a/db/test/test054.tcl +++ b/db/test/test054.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test054.tcl,v 11.24 2003/01/08 05:53:58 bostic Exp $ +# $Id: test054.tcl,v 11.26 2004/09/20 17:06:16 sue Exp $ # # TEST test054 # TEST Cursor maintenance during key/data deletion. @@ -191,7 +191,7 @@ proc test054 { method args } { db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0 # NEEDS TO COME BACK IN, BUG CHECK set ret [$curs get -current] - error_check_good current_after_del $ret [list [list [] []]] + error_check_good current_after_del $ret "" error_check_good cursor_close [$curs close] 0 if { $txnenv == 1 } { error_check_good txn [$t commit] 0 @@ -371,8 +371,7 @@ proc test054 { method args } { # Verify curs1 and curs2 # current should fail set ret [$curs get -current] - error_check_good \ - curs1_get_after_del $ret [list [list [] []]] + error_check_good curs1_get_after_del $ret "" set r [$curs2 get -current] error_check_bad curs2_get [llength $r] 0 @@ -386,10 +385,10 @@ proc test054 { method args } { # Verify curs1 and curs2 set ret [$curs get -current] - error_check_good curs1_get:del2 $ret [list [list [] []]] + error_check_good curs1_get:del2 $ret "" set ret [$curs2 get -current] - error_check_good curs2_get:del2 $ret [list [list [] []]] + error_check_good curs2_get:del2 $ret "" # Now verify that next and prev work. @@ -430,10 +429,10 @@ proc test054 { method args } { # Verify gets on both 1 and 2 set ret [$curs get -current] error_check_good \ - curs1_get:deleted $ret [list [list [] []]] + curs1_get:deleted $ret "" set ret [$curs2 get -current] error_check_good \ - curs2_get:deleted $ret [list [list [] []]] + curs2_get:deleted $ret "" puts "\tTest054.b5: Now do a next on both cursors" diff --git a/db/test/test055.tcl b/db/test/test055.tcl index d2c24e787..96bf108c3 100644 --- a/db/test/test055.tcl +++ b/db/test/test055.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test055.tcl,v 11.17 2003/01/08 05:53:58 bostic Exp $ +# $Id: test055.tcl,v 11.18 2004/01/28 03:36:31 bostic Exp $ # # TEST test055 # TEST Basic cursor operations. diff --git a/db/test/test056.tcl b/db/test/test056.tcl index 11e0757ed..f689d6592 100644 --- a/db/test/test056.tcl +++ b/db/test/test056.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test056.tcl,v 11.19 2003/01/08 05:53:58 bostic Exp $ +# $Id: test056.tcl,v 11.21 2004/09/20 17:06:16 sue Exp $ # # TEST test056 # TEST Cursor maintenance during deletes. @@ -118,7 +118,7 @@ proc test056 { method args } { # Now check the get current on the cursor. set ret [$curs get -current] - error_check_good curs_after_del $ret [list [list [] []]] + error_check_good curs_after_del $ret "" # Now check that the rest of the database looks intact. There # should be only two keys, 1 and 3. diff --git a/db/test/test057.tcl b/db/test/test057.tcl index 438dadd27..56fe2bf44 100644 --- a/db/test/test057.tcl +++ b/db/test/test057.tcl @@ -1,17 +1,18 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test057.tcl,v 11.23 2003/01/08 05:53:59 bostic Exp $ +# $Id: test057.tcl,v 11.26 2004/09/20 17:06:16 sue Exp $ # # TEST test057 # TEST Cursor maintenance during key deletes. -# TEST Check if we handle the case where we delete a key with the cursor on -# TEST it and then add the same key. The cursor should not get the new item -# TEST returned, but the item shouldn't disappear. -# TEST Run test tests, one where the overwriting put is done with a put and -# TEST one where it's done with a cursor put. +# TEST 1. Delete a key with a cursor. Add the key back with a regular +# TEST put. Make sure the cursor can't get the new item. +# TEST 2. Put two cursors on one item. Delete through one cursor, +# TEST check that the other sees the change. +# TEST 3. Same as 2, with the two cursors on a duplicate. + proc test057 { method args } { global errorInfo source ./include.tcl @@ -105,7 +106,7 @@ proc test057 { method args } { error_check_good delete $r 0 # Now check the get current on the cursor. - error_check_good curs_get:del [$curs get -current] [list [list [] []]] + error_check_good curs_get:del [$curs get -current] "" # Now do a put on the key set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}] @@ -116,7 +117,7 @@ proc test057 { method args } { error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1) # Recheck cursor - error_check_good curs_get:deleted [$curs get -current] [list [list [] []]] + error_check_good curs_get:deleted [$curs get -current] "" # Move cursor and see if we get the key. set r [$curs get -first] @@ -149,37 +150,8 @@ proc test057 { method args } { error_check_good curs1_del [$curs del] 0 # Verify gets on both 1 and 2 - error_check_good curs_get:deleted [$curs get -current] \ - [list [list [] []]] - error_check_good curs_get:deleted [$curs2 get -current] \ - [list [list [] []]] - - # Now do a replace through cursor 2 - set pflags "-current" - if {[is_hash $method] == 1} { - error_check_good curs1_get_after_del [is_substr \ - [$curs2 put $pflags new_datum$key_set(3)] "DB_NOTFOUND"] 1 - - # Gets fail - error_check_good curs1_get:deleted \ - [$curs get -current] \ - [list [list [] []]] - error_check_good curs2_get:deleted \ - [$curs get -current] \ - [list [list [] []]] - } else { - # btree only, recno is skipped this test - set ret [$curs2 put $pflags new_datum$key_set(3)] - error_check_good curs_replace $ret 0 - } - - # Gets fail - #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1 - #error_check_good curs1_get_after_del \ - [is_substr $errorInfo "DB_KEYEMPTY"] 1 - #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1 - #error_check_good curs2_get_after_del \ - [is_substr $errorInfo "DB_KEYEMPTY"] 1 + error_check_good curs_get:deleted [$curs get -current] "" + error_check_good curs_get:deleted [$curs2 get -current] "" puts "\tTest057.c:\ Set two cursors on a dup, delete one, overwrite other" @@ -217,27 +189,8 @@ proc test057 { method args } { error_check_good curs1_del [$curs del] 0 # Verify gets on both 1 and 2 - error_check_good curs_get:deleted [$curs get -current] \ - [list [list [] []]] - error_check_good curs_get:deleted [$curs2 get -current] \ - [list [list [] []]] - - # Now do a replace through cursor 2 -- this will work on btree but - # not on hash - if {[is_hash $method] == 1} { - error_check_good hash_replace \ - [is_substr [$curs2 put -current new_dup_1] "DB_NOTFOUND"] 1 - } else { - error_check_good curs_replace [$curs2 put -current new_dup_1] 0 - } - - # Both gets should fail - #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1 - #error_check_good curs1_get_after_del \ - [is_substr $errorInfo "DB_KEYEMPTY"] 1 - #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1 - #error_check_good curs2_get_after_del \ - [is_substr $errorInfo "DB_KEYEMPTY"] 1 + error_check_good curs_get:deleted [$curs get -current] "" + error_check_good curs_get:deleted [$curs2 get -current] "" error_check_good curs2_close [$curs2 close] 0 error_check_good curs_close [$curs close] 0 diff --git a/db/test/test058.tcl b/db/test/test058.tcl index 8d8b0e56e..4213e7279 100644 --- a/db/test/test058.tcl +++ b/db/test/test058.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test058.tcl,v 11.21 2003/01/08 05:53:59 bostic Exp $ +# $Id: test058.tcl,v 11.22 2004/01/28 03:36:31 bostic Exp $ # # TEST test058 # TEST Verify that deleting and reading duplicates results in correct ordering. diff --git a/db/test/test059.tcl b/db/test/test059.tcl index dcd3e524d..887ce6d94 100644 --- a/db/test/test059.tcl +++ b/db/test/test059.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test059.tcl,v 11.19 2003/01/08 05:53:59 bostic Exp $ +# $Id: test059.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ # # TEST test059 # TEST Cursor ops work with a partial length of 0. diff --git a/db/test/test060.tcl b/db/test/test060.tcl index 90bc65b1f..770fb5d05 100644 --- a/db/test/test060.tcl +++ b/db/test/test060.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test060.tcl,v 11.11 2003/01/08 05:53:59 bostic Exp $ +# $Id: test060.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ # # TEST test060 # TEST Test of the DB_EXCL flag to DB->open(). diff --git a/db/test/test061.tcl b/db/test/test061.tcl index c74686bc9..f4b12c45d 100644 --- a/db/test/test061.tcl +++ b/db/test/test061.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test061.tcl,v 11.19 2003/01/08 05:54:00 bostic Exp $ +# $Id: test061.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ # # TEST test061 # TEST Test of txn abort and commit for in-memory databases. diff --git a/db/test/test062.tcl b/db/test/test062.tcl index 916d7e4ab..f26c66d6a 100644 --- a/db/test/test062.tcl +++ b/db/test/test062.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test062.tcl,v 11.22 2003/01/08 05:54:00 bostic Exp $ +# $Id: test062.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $ # # TEST test062 # TEST Test of partial puts (using DB_CURRENT) onto duplicate pages. diff --git a/db/test/test063.tcl b/db/test/test063.tcl index 2e06a4008..8918fa4fa 100644 --- a/db/test/test063.tcl +++ b/db/test/test063.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test063.tcl,v 11.19 2003/01/08 05:54:00 bostic Exp $ +# $Id: test063.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ # # TEST test063 # TEST Test of the DB_RDONLY flag to DB->open diff --git a/db/test/test064.tcl b/db/test/test064.tcl index 496b80df6..13a646574 100644 --- a/db/test/test064.tcl +++ b/db/test/test064.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test064.tcl,v 11.15 2003/01/08 05:54:00 bostic Exp $ +# $Id: test064.tcl,v 11.16 2004/01/28 03:36:31 bostic Exp $ # # TEST test064 # TEST Test of DB->get_type diff --git a/db/test/test065.tcl b/db/test/test065.tcl index 284d37e95..cb29b106d 100644 --- a/db/test/test065.tcl +++ b/db/test/test065.tcl @@ -1,12 +1,13 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test065.tcl,v 11.19 2003/09/04 23:41:17 bostic Exp $ +# $Id: test065.tcl,v 11.22 2004/09/22 18:01:06 bostic Exp $ # # TEST test065 -# TEST Test of DB->stat(DB_FASTSTAT) +# TEST Test of DB->stat, both -DB_FAST_STAT and row +# TEST counts with DB->stat -txn. proc test065 { method args } { source ./include.tcl global errorCode @@ -91,6 +92,9 @@ proc test065 { method args } { } set ret [eval {$db put} $txn {$keypfx$ndx $data}] error_check_good db_put $ret 0 + set statret [eval {$db stat} $txn] + set rowcount [getstats $statret "Number of records"] + error_check_good rowcount $rowcount $ndx if { $txnenv == 1 } { error_check_good txn [$t commit] 0 } @@ -113,8 +117,13 @@ proc test065 { method args } { # have deleted 5000 and we'll croak! So delete key # 1, repeatedly. set ret [eval {$db del} $txn {[concat $keypfx 1]}] + set statret [eval {$db stat} $txn] + set rowcount [getstats $statret "Number of records"] + error_check_good rowcount $rowcount [expr $nentries - $ndx] } else { set ret [eval {$db del} $txn {$keypfx$ndx}] + set rowcount [getstats $statret "Number of records"] + error_check_good rowcount $rowcount $nentries } error_check_good db_del $ret 0 if { $txnenv == 1 } { diff --git a/db/test/test066.tcl b/db/test/test066.tcl index 5b532bb15..6f3120907 100644 --- a/db/test/test066.tcl +++ b/db/test/test066.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test066.tcl,v 11.14 2003/01/08 05:54:01 bostic Exp $ +# $Id: test066.tcl,v 11.15 2004/01/28 03:36:31 bostic Exp $ # # TEST test066 # TEST Test of cursor overwrites of DB_CURRENT w/ duplicates. diff --git a/db/test/test067.tcl b/db/test/test067.tcl index 5615b0430..710c6b9c2 100644 --- a/db/test/test067.tcl +++ b/db/test/test067.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test067.tcl,v 11.22 2003/07/18 14:11:57 sandstro Exp $ +# $Id: test067.tcl,v 11.25 2004/09/22 18:01:06 bostic Exp $ # # TEST test067 # TEST Test of DB_CURRENT partial puts onto almost empty duplicate @@ -31,6 +31,7 @@ proc test067 { method {ndups 1000} {tnum "067"} args } { source ./include.tcl global alphabet global errorCode + global is_je_test set args [convert_args $method $args] set omethod [convert_method $method] @@ -68,6 +69,10 @@ proc test067 { method {ndups 1000} {tnum "067"} args } { $method ($args) Partial puts on near-empty duplicate pages." foreach dupopt { "-dup" "-dup -dupsort" } { + if { $is_je_test && $dupopt == "-dup" } { + continue + } + # # Testdir might get reset from the env's home dir back # to the default if this calls something that sources diff --git a/db/test/test068.tcl b/db/test/test068.tcl index 5acda82c2..a4fb56da0 100644 --- a/db/test/test068.tcl +++ b/db/test/test068.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test068.tcl,v 11.19 2003/01/08 05:54:01 bostic Exp $ +# $Id: test068.tcl,v 11.21 2004/05/13 18:51:43 mjc Exp $ # # TEST test068 # TEST Test of DB_BEFORE and DB_AFTER with partial puts. @@ -13,6 +13,7 @@ proc test068 { method args } { source ./include.tcl global alphabet global errorCode + global is_je_test set tnum "068" @@ -71,6 +72,10 @@ proc test068 { method args } { } foreach dupopt $dupoptlist { + if { $is_je_test && $dupopt == "-dup" } { + continue + } + # # Testdir might be reset in the loop by some proc sourcing # include.tcl. Reset it to the env's home here, before diff --git a/db/test/test069.tcl b/db/test/test069.tcl index 420f4d7d6..46104ffa2 100644 --- a/db/test/test069.tcl +++ b/db/test/test069.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test069.tcl,v 11.9 2003/01/08 05:54:01 bostic Exp $ +# $Id: test069.tcl,v 11.10 2004/01/28 03:36:31 bostic Exp $ # # TEST test069 # TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/ diff --git a/db/test/test070.tcl b/db/test/test070.tcl index 0326cdb17..9d124a77b 100644 --- a/db/test/test070.tcl +++ b/db/test/test070.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test070.tcl,v 11.31 2003/09/08 18:12:56 bostic Exp $ +# $Id: test070.tcl,v 11.33 2004/02/17 16:29:07 dda Exp $ # # TEST test070 # TEST Test of DB_CONSUME (Four consumers, 1000 items.) @@ -107,7 +107,7 @@ proc test070 { method {nconsumers 4} {nproducers 2} \ } close $iid } - set sortreclist [lsort -integer $reclist] + set sortreclist [lsort -command int32_compare $reclist] set nitems [expr $start + $nitems] for { set ndx $start } { $ndx < $nitems } { set ndx [expr $ndx + 1] } { diff --git a/db/test/test071.tcl b/db/test/test071.tcl index 9a760ed12..7834c82ad 100644 --- a/db/test/test071.tcl +++ b/db/test/test071.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test071.tcl,v 11.13 2003/05/19 17:33:16 bostic Exp $ +# $Id: test071.tcl,v 11.14 2004/01/28 03:36:31 bostic Exp $ # # TEST test071 # TEST Test of DB_CONSUME (One consumer, 10000 items.) diff --git a/db/test/test072.tcl b/db/test/test072.tcl index 384ef4766..3a8ff7aa2 100644 --- a/db/test/test072.tcl +++ b/db/test/test072.tcl @@ -1,15 +1,16 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test072.tcl,v 11.31 2003/09/04 23:41:17 bostic Exp $ +# $Id: test072.tcl,v 11.34 2004/05/13 18:51:44 mjc Exp $ # # TEST test072 # TEST Test of cursor stability when duplicates are moved off-page. proc test072 { method {pagesize 512} {ndups 20} {tnum "072"} args } { source ./include.tcl global alphabet + global is_je_test set omethod [convert_method $method] set args [convert_args $method $args] @@ -49,7 +50,7 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum "072"} args } { puts "Skipping for method $method." return } else { - puts "\n Test of cursor stability when\ + puts "\nTest$tnum: Test of cursor stability when\ duplicates are moved off-page." } set pgindex [lsearch -exact $args "-pagesize"] @@ -64,6 +65,10 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum "072"} args } { set dlist [list "-dup" "-dup -dupsort"] set testid 0 foreach dupopt $dlist { + if { $is_je_test && $dupopt == "-dup" } { + continue + } + incr testid set duptestfile $basename$testid.db set db [eval {berkdb_open -create -mode 0644} \ diff --git a/db/test/test073.tcl b/db/test/test073.tcl index a7789f0c5..bac753ea6 100644 --- a/db/test/test073.tcl +++ b/db/test/test073.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test073.tcl,v 11.25 2003/01/08 05:54:02 bostic Exp $ +# $Id: test073.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $ # # TEST test073 # TEST Test of cursor stability on duplicate pages. diff --git a/db/test/test074.tcl b/db/test/test074.tcl index 26b2a9303..8302a2302 100644 --- a/db/test/test074.tcl +++ b/db/test/test074.tcl @@ -1,15 +1,16 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test074.tcl,v 11.19 2003/01/08 05:54:02 bostic Exp $ +# $Id: test074.tcl,v 11.22 2004/09/22 18:01:06 bostic Exp $ # # TEST test074 # TEST Test of DB_NEXT_NODUP. proc test074 { method {dir -nextnodup} {nitems 100} {tnum "074"} args } { source ./include.tcl global alphabet + global is_je_test global rand_init set omethod [convert_method $method] @@ -159,6 +160,9 @@ proc test074 { method {dir -nextnodup} {nitems 100} {tnum "074"} args } { } foreach opt { "-dup" "-dupsort" } { + if { $is_je_test && $opt == "-dup" } { + continue + } # # If we are using an env, then testfile should just be the diff --git a/db/test/test075.tcl b/db/test/test075.tcl index 0380686e1..20760944b 100644 --- a/db/test/test075.tcl +++ b/db/test/test075.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test075.tcl,v 11.23 2003/01/08 05:54:03 bostic Exp $ +# $Id: test075.tcl,v 11.24 2004/01/28 03:36:31 bostic Exp $ # # TEST test075 # TEST Test of DB->rename(). diff --git a/db/test/test076.tcl b/db/test/test076.tcl index 2b0135eea..49827d353 100644 --- a/db/test/test076.tcl +++ b/db/test/test076.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test076.tcl,v 1.21 2003/08/27 13:57:33 sue Exp $ +# $Id: test076.tcl,v 1.22 2004/01/28 03:36:31 bostic Exp $ # # TEST test076 # TEST Test creation of many small databases in a single environment. [#1528]. diff --git a/db/test/test077.tcl b/db/test/test077.tcl index 8396ef276..3c1fd869b 100644 --- a/db/test/test077.tcl +++ b/db/test/test077.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test077.tcl,v 1.13 2003/01/22 20:12:43 sandstro Exp $ +# $Id: test077.tcl,v 1.14 2004/01/28 03:36:31 bostic Exp $ # # TEST test077 # TEST Test of DB_GET_RECNO [#1206]. diff --git a/db/test/test078.tcl b/db/test/test078.tcl index e7b8b48cb..549fc13c7 100644 --- a/db/test/test078.tcl +++ b/db/test/test078.tcl @@ -1,20 +1,22 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test078.tcl,v 1.20 2003/01/08 05:54:03 bostic Exp $ +# $Id: test078.tcl,v 1.26 2004/09/22 18:01:06 bostic Exp $ # # TEST test078 # TEST Test of DBC->c_count(). [#303] proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } { source ./include.tcl - global alphabet rand_init + global alphabet + global is_je_test + global rand_init set args [convert_args $method $args] set omethod [convert_method $method] - puts "Test$tnum: Test of key counts." + puts "Test$tnum ($method): Test of key counts." berkdb srand $rand_init @@ -32,19 +34,20 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } { set env [lindex $args $eindex] set txnenv [is_txnenv $env] if { $txnenv == 1 } { + set nkeys 50 append args " -auto_commit " } set testdir [get_home $env] } cleanup $testdir $env - puts "\tTest$tnum.a: No duplicates, trivial answer." set pgindex [lsearch -exact $args "-pagesize"] if { $pgindex != -1 } { puts "Test078: skipping for specific pagesizes" return } - + puts "\tTest$tnum.a: No duplicates, trivial answer." + puts "\t\tTest$tnum.a.1: Populate database, verify dup counts." set db [eval {berkdb_open -create -mode 0644\ -pagesize $pagesize} $omethod $args {$testfile}] error_check_good db_open [is_valid_db $db] TRUE @@ -64,6 +67,29 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } { } error_check_good count.a [$db count $i] 1 } + + if { [is_rrecno $method] == 1 } { + error_check_good db_close.a [$db close] 0 + puts "\tTest$tnum.a2: Skipping remainder of test078 for -rrecno." + return + } + + puts "\t\tTest$tnum.a.2: Delete items, verify dup counts again." + for { set i 1 } { $i <= $nkeys } { incr i } { + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + set ret [eval {$db del} $txn $i] + error_check_good del.a($i) $ret 0 + if { $txnenv == 1 } { + error_check_good txn [$t commit] 0 + } + error_check_good count.a [$db count $i] 0 + } + + error_check_good db_close.a [$db close] 0 if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { @@ -72,9 +98,8 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } { return } - foreach tuple {{b sorted "-dup -dupsort"} {c unsorted "-dup"}} { - set letter [lindex $tuple 0] - set dupopt [lindex $tuple 2] + foreach {let descrip dupopt} \ + {b sorted "-dup -dupsort" c unsorted "-dup"} { if { $eindex == -1 } { set testfile $testdir/test$tnum-b.db @@ -82,13 +107,15 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } { } else { set testfile test$tnum-b.db set env [lindex $args $eindex] + if { $is_je_test && $dupopt == "-dup" } { + continue + } set testdir [get_home $env] } cleanup $testdir $env - puts "\tTest$tnum.$letter: Duplicates ([lindex $tuple 1])." - - puts "\t\tTest$tnum.$letter.1: Populating database." + puts "\tTest$tnum.$let: Duplicates ($descrip)." + puts "\t\tTest$tnum.$let.1: Populating database." set db [eval {berkdb_open -create -mode 0644\ -pagesize $pagesize} $dupopt $omethod $args {$testfile}] @@ -104,27 +131,115 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } { } set ret [eval {$db put} $txn {$i\ [pad_data $method $j$alphabet]}] - error_check_good put.$letter,$i $ret 0 + error_check_good put.$let,$i $ret 0 if { $txnenv == 1 } { error_check_good txn [$t commit] 0 } } } - puts -nonewline "\t\tTest$tnum.$letter.2: " - puts "Verifying dup counts on first dup." - for { set i 1 } { $i < $nkeys } { incr i } { - error_check_good count.$letter,$i \ + puts -nonewline "\t\tTest$tnum.$let.2: " + puts "Verifying duplicate counts." + for { set i 1 } { $i <= $nkeys } { incr i } { + error_check_good count.$let,$i \ [$db count $i] $i } - puts -nonewline "\t\tTest$tnum.$letter.3: " - puts "Verifying dup counts on random dup." - for { set i 1 } { $i < $nkeys } { incr i } { - set key [berkdb random_int 1 $nkeys] - error_check_good count.$letter,$i \ - [$db count $i] $i + puts -nonewline "\t\tTest$tnum.$let.3: " + puts "Delete every other dup by cursor, verify counts." + + # Delete every other item by cursor and check counts. + for { set i 1 } { $i <= $nkeys } { incr i } { + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + set c [eval {$db cursor} $txn] + error_check_good db_cursor [is_valid_cursor $c $db] TRUE + set j 0 + + for { set ret [$c get -first]} { [llength $ret] > 0 } \ + { set ret [$c get -next]} { + set key [lindex [lindex $ret 0] 0] + if { $key == $i } { + set data [lindex [lindex $ret 0 ] 1] + set num [string range $data 0 \ + end-[string length $alphabet]] + if { [expr $num % 2] == 0 } { + error_check_good \ + c_del [$c del] 0 + incr j + } + if { $txnenv == 0 } { + error_check_good count.$let.$i-$j \ + [$db count $i] [expr $i - $j] + } + } + } + error_check_good curs_close [$c close] 0 + if { $txnenv == 1 } { + error_check_good txn_commit [$t commit] 0 + } + error_check_good count.$let.$i-$j \ + [$db count $i] [expr $i - $j] + } + + puts -nonewline "\t\tTest$tnum.$let.4: " + puts "Delete all items by cursor, verify counts." + for { set i 1 } { $i <= $nkeys } { incr i } { + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + set c [eval {$db cursor} $txn] + error_check_good db_cursor [is_valid_cursor $c $db] TRUE + for { set ret [$c get -first]} { [llength $ret] > 0 } \ + { set ret [$c get -next]} { + set key [lindex [lindex $ret 0] 0] + if { $key == $i } { + error_check_good c_del [$c del] 0 + } + } + error_check_good curs_close [$c close] 0 + if { $txnenv == 1 } { + error_check_good txn_commit [$t commit] 0 + } + error_check_good db_count_zero [$db count $i] 0 + } + + puts -nonewline "\t\tTest$tnum.$let.5: " + puts "Add back one item, verify counts." + for { set i 1 } { $i <= $nkeys } { incr i } { + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + set ret [eval {$db put} $txn {$i\ + [pad_data $method $alphabet]}] + error_check_good put.$let,$i $ret 0 + if { $txnenv == 1 } { + error_check_good txn [$t commit] 0 + } + error_check_good add_one [$db count $i] 1 + } + + puts -nonewline "\t\tTest$tnum.$let.6: " + puts "Delete remaining entries, verify counts." + for { set i 1 } { $i <= $nkeys } { incr i } { + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + error_check_good db_del [eval {$db del} $txn {$i}] 0 + if { $txnenv == 1 } { + error_check_good txn [$t commit] 0 + } + error_check_good count.$let.$i [$db count $i] 0 } - error_check_good db_close.$letter [$db close] 0 + error_check_good db_close.$let [$db close] 0 } } diff --git a/db/test/test079.tcl b/db/test/test079.tcl index 4c2c866b1..d5dbc3304 100644 --- a/db/test/test079.tcl +++ b/db/test/test079.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test079.tcl,v 11.11 2003/01/08 05:54:04 bostic Exp $ +# $Id: test079.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ # # TEST test079 # TEST Test of deletes in large trees. (test006 w/ sm. pagesize). diff --git a/db/test/test080.tcl b/db/test/test080.tcl index 59aa04685..cfa6d3653 100644 --- a/db/test/test080.tcl +++ b/db/test/test080.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test080.tcl,v 11.21 2003/10/03 13:28:17 sandstro Exp $ +# $Id: test080.tcl,v 11.22 2004/01/28 03:36:31 bostic Exp $ # # TEST test080 # TEST Test of DB->remove() diff --git a/db/test/test081.tcl b/db/test/test081.tcl index f3a151be2..a9c7f5cfc 100644 --- a/db/test/test081.tcl +++ b/db/test/test081.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test081.tcl,v 11.8 2003/01/08 05:54:04 bostic Exp $ +# $Id: test081.tcl,v 11.9 2004/01/28 03:36:31 bostic Exp $ # # TEST test081 # TEST Test off-page duplicates and overflow pages together with diff --git a/db/test/test082.tcl b/db/test/test082.tcl index 70dd77654..fb4d71c5c 100644 --- a/db/test/test082.tcl +++ b/db/test/test082.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test082.tcl,v 11.7 2003/01/08 05:54:04 bostic Exp $ +# $Id: test082.tcl,v 11.8 2004/01/28 03:36:31 bostic Exp $ # # TEST test082 # TEST Test of DB_PREV_NODUP (uses test074). diff --git a/db/test/test083.tcl b/db/test/test083.tcl index 081c1aace..7e4a8b960 100644 --- a/db/test/test083.tcl +++ b/db/test/test083.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test083.tcl,v 11.15 2003/04/18 14:39:10 sandstro Exp $ +# $Id: test083.tcl,v 11.16 2004/01/28 03:36:31 bostic Exp $ # # TEST test083 # TEST Test of DB->key_range. diff --git a/db/test/test084.tcl b/db/test/test084.tcl index 71e570903..036c1c411 100644 --- a/db/test/test084.tcl +++ b/db/test/test084.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test084.tcl,v 11.14 2003/08/28 19:59:15 sandstro Exp $ +# $Id: test084.tcl,v 11.15 2004/01/28 03:36:31 bostic Exp $ # # TEST test084 # TEST Basic sanity test (test001) with large (64K) pages. diff --git a/db/test/test085.tcl b/db/test/test085.tcl index 6dc775278..373db33a5 100644 --- a/db/test/test085.tcl +++ b/db/test/test085.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test085.tcl,v 1.15 2003/01/08 05:54:05 bostic Exp $ +# $Id: test085.tcl,v 1.18 2004/09/20 17:29:32 carol Exp $ # # TEST test085 # TEST Test of cursor behavior when a cursor is pointing to a deleted @@ -69,7 +69,7 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum "085"} args } { } # Repeat the test with both on-page and off-page numbers of dups. - foreach ndups "$onp $offp" { + foreach ndups "$onp $offp" { # Put operations we want to test on a cursor set to the # deleted item, the key to use with them, and what should # come before and after them given a placement of @@ -78,8 +78,6 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum "085"} args } { set putops { {{-before} "" $predatum {[test085_ddatum 0]} beginning} {{-before} "" {[test085_ddatum $final]} $postdatum end} - {{-current} "" $predatum {[test085_ddatum 0]} beginning} - {{-current} "" {[test085_ddatum $final]} $postdatum end} {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning} {{-keyfirst} $key $predatum {[test085_ddatum 0]} end} {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning} @@ -153,7 +151,8 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum "085"} args } { eval set edata [lindex $pair 3] set dbt [eval $dbc get $op $gargs] - if { [string compare $ekey EMPTYLIST] == 0 } { + if { [string compare $ekey EMPTYLIST] == 0 || \ + [string compare $op -current] == 0 } { error_check_good dbt($op,$ndups) \ [llength $dbt] 0 } else { diff --git a/db/test/test086.tcl b/db/test/test086.tcl index af7beb054..8b2f7db81 100644 --- a/db/test/test086.tcl +++ b/db/test/test086.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test086.tcl,v 11.11 2003/03/27 16:34:48 sandstro Exp $ +# $Id: test086.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ # # TEST test086 # TEST Test of cursor stability across btree splits/rsplits with diff --git a/db/test/test087.tcl b/db/test/test087.tcl index 62d55082d..7501f4ce3 100644 --- a/db/test/test087.tcl +++ b/db/test/test087.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test087.tcl,v 11.17 2003/01/08 05:54:06 bostic Exp $ +# $Id: test087.tcl,v 11.19 2004/01/28 03:36:31 bostic Exp $ # # TEST test087 # TEST Test of cursor stability when converting to and modifying @@ -56,7 +56,7 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum "087"} args } { puts "Skipping for method $method." return } else { - puts "Cursor stability on dup. pages w/ aborts." + puts "Test$tnum: Cursor stability on dup. pages w/ aborts." } set env [eval {berkdb_env -create -home $testdir -txn} $encargs] diff --git a/db/test/test088.tcl b/db/test/test088.tcl index 0760053ef..2d3ec7396 100644 --- a/db/test/test088.tcl +++ b/db/test/test088.tcl @@ -1,16 +1,18 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test088.tcl,v 11.13 2003/01/08 05:54:07 bostic Exp $ +# $Id: test088.tcl,v 11.15 2004/05/13 18:51:44 mjc Exp $ # # TEST test088 # TEST Test of cursor stability across btree splits with very # TEST deep trees (a variant of test048). [#2514] proc test088 { method args } { - global errorCode alphabet source ./include.tcl + global alphabet + global errorCode + global is_je_test set tstn 088 set args [convert_args $method $args] @@ -119,7 +121,7 @@ proc test088 { method args } { puts "\tTest$tstn.e: Make sure splits happened." # XXX cannot execute stat in presence of txns and cursors. - if { $txnenv == 0 } { + if { $txnenv == 0 && !$is_je_test } { error_check_bad stat:check-split [is_substr [$db stat] \ "{{Internal pages} 0}"] 1 } diff --git a/db/test/test089.tcl b/db/test/test089.tcl index 5b229137d..3eb2cd88d 100644 --- a/db/test/test089.tcl +++ b/db/test/test089.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test089.tcl,v 11.8 2003/09/04 23:41:17 bostic Exp $ +# $Id: test089.tcl,v 11.9 2004/01/28 03:36:32 bostic Exp $ # # TEST test089 # TEST Concurrent Data Store test (CDB) diff --git a/db/test/test090.tcl b/db/test/test090.tcl index 7b2a9ce17..7c46c56c0 100644 --- a/db/test/test090.tcl +++ b/db/test/test090.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test090.tcl,v 11.14 2003/08/28 19:59:15 sandstro Exp $ +# $Id: test090.tcl,v 11.15 2004/01/28 03:36:32 bostic Exp $ # # TEST test090 # TEST Test for functionality near the end of the queue using test001. diff --git a/db/test/test091.tcl b/db/test/test091.tcl index c2a7a1d84..81cabb867 100644 --- a/db/test/test091.tcl +++ b/db/test/test091.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test091.tcl,v 11.9 2003/01/08 05:54:09 bostic Exp $ +# $Id: test091.tcl,v 11.10 2004/01/28 03:36:32 bostic Exp $ # # TEST test091 # TEST Test of DB_CONSUME_WAIT. diff --git a/db/test/test092.tcl b/db/test/test092.tcl index 4ec1f25c6..ef4c822d8 100644 --- a/db/test/test092.tcl +++ b/db/test/test092.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test092.tcl,v 11.14 2003/01/08 05:54:09 bostic Exp $ +# $Id: test092.tcl,v 11.18 2004/09/22 18:01:06 bostic Exp $ # # TEST test092 # TEST Test of DB_DIRTY_READ [#3395] @@ -109,10 +109,16 @@ proc test092 { method {nentries 1000} args } { set dbcdr1 [$dbdr cursor -dirty] error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE + # Test that $db stat can use -dirty flag. + puts "\tTest092.c: Smoke test for db_stat -txn -dirty" + if { [catch {set statret [$dbcl stat -txn $t -dirty]} res] } { + puts "FAIL: db_stat -txn -dirty returned $res" + } + # # Now that we have all of our handles, change all the data in there # to be the key and data the same, but data is capitalized. - puts "\tTest092.c: put/get data within a txn" + puts "\tTest092.d: put/get data within a txn" set gflags "" if { [is_record_based $method] == 1 } { set checkfunc test092dr_recno.check @@ -156,13 +162,13 @@ proc test092 { method {nentries 1000} args } { } close $did - puts "\tTest092.d: Check dirty data using dirty txn and clean db/cursor" + puts "\tTest092.e: Check dirty data using dirty txn and clean db/cursor" dump_file_walk $dbccl $t1 $checkfunc "-first" "-next" - puts "\tTest092.e: Check dirty data using -dirty cget flag" + puts "\tTest092.f: Check dirty data using -dirty cget flag" dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty" - puts "\tTest092.f: Check dirty data using -dirty cursor" + puts "\tTest092.g: Check dirty data using -dirty cursor" dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next" # @@ -176,7 +182,7 @@ proc test092 { method {nentries 1000} args } { # # Now abort the modifying transaction and rerun the data checks. # - puts "\tTest092.g: Aborting the write-txn" + puts "\tTest092.h: Aborting the write-txn" error_check_good txnabort [$t abort] 0 set dbccl [$dbcl cursor -txn $tdr] @@ -193,13 +199,13 @@ proc test092 { method {nentries 1000} args } { } else { set checkfunc test092cl.check } - puts "\tTest092.h: Check clean data using -dirty cget flag" + puts "\tTest092.i: Check clean data using -dirty cget flag" dump_file_walk $dbccl $t1 $checkfunc "-first" "-next" - puts "\tTest092.i: Check clean data using -dirty cget flag" + puts "\tTest092.j: Check clean data using -dirty cget flag" dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty" - puts "\tTest092.j: Check clean data using -dirty cursor" + puts "\tTest092.k: Check clean data using -dirty cursor" dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next" # Clean up our handles diff --git a/db/test/test093.tcl b/db/test/test093.tcl index b1ff31d6a..3ed4b5964 100644 --- a/db/test/test093.tcl +++ b/db/test/test093.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test093.tcl,v 11.22 2003/01/08 05:54:10 bostic Exp $ +# $Id: test093.tcl,v 11.23 2004/01/28 03:36:32 bostic Exp $ # # TEST test093 # TEST Test using set_bt_compare. diff --git a/db/test/test094.tcl b/db/test/test094.tcl index 1f4cd558a..20f2b3af3 100644 --- a/db/test/test094.tcl +++ b/db/test/test094.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test094.tcl,v 11.19 2003/10/17 13:38:19 sandstro Exp $ +# $Id: test094.tcl,v 11.21 2004/06/29 14:26:17 carol Exp $ # # TEST test094 # TEST Test using set_dup_compare. @@ -57,7 +57,7 @@ proc test094 { method {nentries 10000} {ndups 10} {tnum "094"} args} { cleanup $testdir $env - set db [eval {berkdb_open_noerr -dupcompare test094_cmp \ + set db [eval {berkdb_open -dupcompare test094_cmp \ -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}] error_check_good dbopen [is_valid_db $db] TRUE diff --git a/db/test/test095.tcl b/db/test/test095.tcl index 8f26b08dd..9c62a6a51 100644 --- a/db/test/test095.tcl +++ b/db/test/test095.tcl @@ -1,15 +1,16 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test095.tcl,v 11.26 2003/09/11 14:12:34 sue Exp $ +# $Id: test095.tcl,v 11.30 2004/09/22 18:01:06 bostic Exp $ # # TEST test095 # TEST Bulk get test for methods supporting dups. [#2934] proc test095 { method {tnum "095"} args } { - global is_qnx_test source ./include.tcl + global is_je_test + global is_qnx_test set args [convert_args $method $args] set omethod [convert_method $method] @@ -62,6 +63,10 @@ proc test095 { method {tnum "095"} args } { # We run the meat of the test twice: once with unsorted dups, # once with sorted dups. foreach { dflag sort } { -dup unsorted {-dup -dupsort} sorted } { + if { $is_je_test && $sort == "unsorted" } { + continue + } + set testfile $basename-$sort.db set did [open $dict] @@ -202,16 +207,16 @@ proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } { } } - # If we expect a failure, be more tolerant if the - # above fails; just make sure it's an ENOMEM or - # and EINVAL (if the buffer is smaller than the - # pagesize, it's EINVAL), mark it, and move along. + # If we expect a failure, be more tolerant if the above + # fails; just make sure it's a DB_BUFFER_SMALL or an + # EINVAL (if the buffer is smaller than the pagesize, + # it's EINVAL), mark it, and move along. if { $expectfail != 0 && $ret != 0 } { - if { [is_substr $errorCode ENOMEM] != 1 && \ + if { [is_substr $errorCode DB_BUFFER_SMALL] != 1 && \ [is_substr $errorCode EINVAL] != 1 } { error_check_good \ "$flag failure errcode" \ - $errorCode "ENOMEM or EINVAL" + $errorCode "DB_BUFFER_SMALL or EINVAL" } set allpassed FALSE continue @@ -228,7 +233,7 @@ proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } { if { $expectfail == 1 } { error_check_good allpassed $allpassed FALSE puts "\t\tTest$tnum.$letter:\ - returned at least one ENOMEM (as expected)" + returned at least one DB_BUFFER_SMALL (as expected)" } else { error_check_good allpassed $allpassed TRUE puts "\t\tTest$tnum.$letter: succeeded (as expected)" diff --git a/db/test/test096.tcl b/db/test/test096.tcl index e1cbbb0d2..ac8450069 100644 --- a/db/test/test096.tcl +++ b/db/test/test096.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: test096.tcl,v 11.24 2003/09/04 23:41:18 bostic Exp $ +# $Id: test096.tcl,v 11.26 2004/06/10 17:21:20 carol Exp $ # # TEST test096 # TEST Db->truncate test. @@ -16,6 +16,7 @@ # TEST For btree and hash, do the same in a database with offpage dups. proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { global fixed_len + global alphabet source ./include.tcl set orig_fixed_len $fixed_len @@ -119,6 +120,32 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { error_check_good dbclose [$db close] 0 error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0 + puts "\tTest096.i: Check proper handling of overflow pages." + # Large keys and data compared to page size guarantee + # overflow pages. + if { [is_fixed_length $method] == 1 } { + puts "Skipping overflow test for fixed-length method." + } else { + set overflowfile overflow096.db + set data [repeat $alphabet 600] + set db [eval {berkdb_open -create -auto_commit -pagesize 512 \ + -env $env $omethod -mode 0644} $args $overflowfile] + error_check_good db_open [is_valid_db $db] TRUE + + set noverflows 100 + for { set i 1 } { $i <= $noverflows } { incr i } { + set ret [eval {$db put} -auto_commit \ + $i [chop_data $method "$i$data"]] + } + + set stat [$db stat] + error_check_bad stat:overflow [is_substr $stat \ + "{{Overflow pages} 0}"] 1 + + error_check_good overflow_truncate [$db truncate] $noverflows + error_check_good overflow_close [$db close] 0 + } + # Remove database and create a new one with dups. Skip # the rest of the test for methods not supporting dups. if { [is_record_based $method] == 1 || \ @@ -130,8 +157,9 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { return } set ret [berkdb dbremove -env $env -auto_commit $testfile] + set ret [berkdb dbremove -env $env -auto_commit $overflowfile] - puts "\tTest096.i: Create $nentries entries with $ndups duplicates" + puts "\tTest096.j: Create $nentries entries with $ndups duplicates" set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \ -create -env $env $omethod -mode 0644} $args $testfile] error_check_good db_open [is_valid_db $db] TRUE @@ -147,7 +175,7 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { set txn "-txn $t" dup_check $db $txn $t1 $dlist error_check_good txn [$t commit] 0 - puts "\tTest096.j: Verify off page duplicates status" + puts "\tTest096.k: Verify off page duplicates status" set stat [$db stat] error_check_bad stat:offpage [is_substr $stat \ "{{Duplicate pages} 0}"] 1 @@ -155,7 +183,7 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { set recs [expr $ndups * $nentries] error_check_good dbclose [$db close] 0 - puts "\tTest096.k: Truncate database in a txn then abort" + puts "\tTest096.l: Truncate database in a txn then abort" txn_truncate $env $omethod $testfile $recs abort set db [eval {berkdb_open -auto_commit -env $env} $args $testfile] @@ -165,7 +193,7 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { error_check_good number_of_entries $number $recs error_check_good dbclose [$db close] 0 - puts "\tTest096.l: Truncate database in a txn then commit" + puts "\tTest096.m: Truncate database in a txn then commit" txn_truncate $env $omethod $testfile $recs commit set db [berkdb_open -auto_commit -env $env $testfile] @@ -175,11 +203,11 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { error_check_good dbclose [$db close] 0 set testdir [get_home $env] - error_check_good dbverify [verify_dir $testdir "\tTest096.m: "] 0 + error_check_good dbverify [verify_dir $testdir "\tTest096.n: "] 0 # Remove database, and create a new one with dups. Test # truncate + write within a transaction. - puts "\tTest096.n: Create $nentries entries with $ndups duplicates" + puts "\tTest096.o: Create $nentries entries with $ndups duplicates" set ret [berkdb dbremove -env $env -auto_commit $testfile] set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \ -create -env $env $omethod -mode 0644} $args $testfile] @@ -196,7 +224,7 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { set txn "-txn $t" dup_check $db $txn $t1 $dlist error_check_good txn [$t commit] 0 - puts "\tTest096.o: Verify off page duplicates status" + puts "\tTest096.p: Verify off page duplicates status" set stat [$db stat] error_check_bad stat:offpage [is_substr $stat \ "{{Duplicate pages} 0}"] 1 @@ -204,7 +232,7 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { set recs [expr $ndups * $nentries] error_check_good dbclose [$db close] 0 - puts "\tTest096.p: Truncate and write in a txn, then abort" + puts "\tTest096.q: Truncate and write in a txn, then abort" txn_truncate $env $omethod $testfile $recs abort 1 set db [eval {berkdb_open -auto_commit -env $env} $args $testfile] @@ -213,7 +241,7 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { error_check_good number_of_entries $number $recs error_check_good dbclose [$db close] 0 - puts "\tTest096.q: Truncate and write in a txn, then commit" + puts "\tTest096.r: Truncate and write in a txn, then commit" txn_truncate $env $omethod $testfile $recs commit 1 set db [berkdb_open -auto_commit -env $env $testfile] @@ -222,8 +250,29 @@ proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { error_check_good number_of_entries $number [expr $recs / 2] error_check_good dbclose [$db close] 0 + puts "\tTest096.s: Check overflow pages with dups." + set ndups 3 + set db [eval {berkdb_open -create -auto_commit -pagesize 512 \ + -env $env $omethod -dup -mode 0644} $args $overflowfile] + error_check_good db_open [is_valid_db $db] TRUE + + for { set i 1 } { $i <= $noverflows } { incr i } { + for { set j 0 } { $j < $ndups } { incr j } { + set ret [eval {$db put} -auto_commit \ + $i [chop_data $method "$i.$j$data"]] + } + } + + set stat [$db stat] + error_check_bad stat:overflow [is_substr $stat \ + "{{Overflow pages} 0}"] 1 + + set nentries [expr $noverflows * $ndups] + error_check_good overflow_truncate [$db truncate] $nentries + error_check_good overflow_close [$db close] 0 + set testdir [get_home $env] - error_check_good dbverify [verify_dir $testdir "\tTest096.r: "] 0 + error_check_good dbverify [verify_dir $testdir "\tTest096.t: "] 0 if { $closeenv == 1 } { error_check_good envclose [$env close] 0 @@ -319,3 +368,4 @@ proc txn_truncate { env method testfile nentries op {write 0}} { error_check_good txn$op [$txn $op] 0 error_check_good db_close [$db close] 0 } + diff --git a/db/test/test097.tcl b/db/test/test097.tcl index ac3f23905..2a6234e00 100644 --- a/db/test/test097.tcl +++ b/db/test/test097.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test097.tcl,v 11.10 2003/01/08 05:54:11 bostic Exp $ +# $Id: test097.tcl,v 11.11 2004/01/28 03:36:32 bostic Exp $ # # TEST test097 # TEST Open up a large set of database files simultaneously. diff --git a/db/test/test098.tcl b/db/test/test098.tcl index 4bcf46e95..af6b6a6c6 100644 --- a/db/test/test098.tcl +++ b/db/test/test098.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2002-2003 +# Copyright (c) 2002-2004 # Sleepycat Software. All rights reserved. # -# $Id: test098.tcl,v 1.6 2003/01/08 05:54:11 bostic Exp $ +# $Id: test098.tcl,v 1.7 2004/01/28 03:36:32 bostic Exp $ # # TEST test098 # TEST Test of DB_GET_RECNO and secondary indices. Open a primary and diff --git a/db/test/test099.tcl b/db/test/test099.tcl index aa3f0d5b5..9bdc0d7af 100644 --- a/db/test/test099.tcl +++ b/db/test/test099.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: test099.tcl,v 1.6 2003/09/04 23:41:18 bostic Exp $ +# $Id: test099.tcl,v 1.7 2004/01/28 03:36:32 bostic Exp $ # # TEST test099 # TEST diff --git a/db/test/test100.tcl b/db/test/test100.tcl index 69dd0c546..9d87331dc 100644 --- a/db/test/test100.tcl +++ b/db/test/test100.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test100.tcl,v 11.4 2003/03/14 16:49:58 sue Exp $ +# $Id: test100.tcl,v 11.5 2004/01/28 03:36:32 bostic Exp $ # # TEST test100 # TEST Test for functionality near the end of the queue diff --git a/db/test/test101.tcl b/db/test/test101.tcl index 419307c06..63384dd17 100644 --- a/db/test/test101.tcl +++ b/db/test/test101.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test101.tcl,v 11.5 2003/09/04 23:41:18 bostic Exp $ +# $Id: test101.tcl,v 11.6 2004/01/28 03:36:32 bostic Exp $ # # TEST test101 # TEST Test for functionality near the end of the queue diff --git a/db/test/test102.tcl b/db/test/test102.tcl index 66414639c..7fd7e50bc 100644 --- a/db/test/test102.tcl +++ b/db/test/test102.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: test102.tcl,v 1.7 2003/09/04 23:41:18 bostic Exp $ +# $Id: test102.tcl,v 1.9 2004/02/05 02:25:24 mjc Exp $ # # TEST test102 # TEST Bulk get test for record-based methods. [#2934] @@ -161,16 +161,16 @@ proc t102_gettest_body { db tnum letter bufsize expectfail usecursor } { } } - # If we expect a failure, be more tolerant if the - # above fails; just make sure it's an ENOMEM or - # and EINVAL (if the buffer is smaller than the - # pagesize, it's EINVAL), mark it, and move along. + # If we expect a failure, be more tolerant if the above + # fails; just make sure it's a DB_BUFFER_SMALL or an + # EINVAL (if the buffer is smaller than the pagesize, + # it's EINVAL), mark it, and move along. if { $expectfail != 0 && $ret != 0 } { - if { [is_substr $errorCode ENOMEM] != 1 && \ + if { [is_substr $errorCode DB_BUFFER_SMALL] != 1 && \ [is_substr $errorCode EINVAL] != 1 } { error_check_good \ "$flag failure errcode" \ - $errorCode "ENOMEM or EINVAL" + $errorCode "DB_BUFFER_SMALL or EINVAL" } set allpassed FALSE continue @@ -181,7 +181,7 @@ proc t102_gettest_body { db tnum letter bufsize expectfail usecursor } { if { $expectfail == 1 } { error_check_good allpassed $allpassed FALSE puts "\t\tTest$tnum.$letter:\ - returned at least one ENOMEM (as expected)" + returned at least one DB_BUFFER_SMALL (as expected)" } else { error_check_good allpassed $allpassed TRUE puts "\t\tTest$tnum.$letter: succeeded (as expected)" diff --git a/db/test/test103.tcl b/db/test/test103.tcl index b1da0f977..5fb9562bc 100644 --- a/db/test/test103.tcl +++ b/db/test/test103.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: test103.tcl,v 1.6 2003/09/04 23:41:18 bostic Exp $ +# $Id: test103.tcl,v 1.7 2004/01/28 03:36:32 bostic Exp $ # # TEST test103 # TEST Test bulk get when record numbers wrap around. diff --git a/db/test/test106.tcl b/db/test/test106.tcl index 6edcd46cf..7a8163fc5 100644 --- a/db/test/test106.tcl +++ b/db/test/test106.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: test106.tcl,v 1.3 2003/09/04 23:41:19 bostic Exp $ +# $Id: test106.tcl,v 1.4 2004/01/28 03:36:32 bostic Exp $ # # TEST test106 # TEST diff --git a/db/test/test107.tcl b/db/test/test107.tcl new file mode 100644 index 000000000..bddf9caf4 --- /dev/null +++ b/db/test/test107.tcl @@ -0,0 +1,163 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: test107.tcl,v 11.14 2004/09/22 18:01:06 bostic Exp $ +# +# TEST test107 +# TEST Test of degree 2 isolation. [#8689] +# TEST +# TEST We set up a database. Open a degree 2 transactional +# TEST cursor and a regular transactional cursor on it. +# TEST Position each cursor on one page, and do a put to +# TEST a different page. +# TEST +# TEST Make sure that: +# TEST - the put succeeds if we are using degree 2. +# TEST - the put deadlocks within a regular transaction with +# TEST a regular cursor. +# TEST +proc test107 { method args } { + source ./include.tcl + global fixed_len + set tnum "107" + + # If we are using an env, then skip this test. It needs its own. + set eindex [lsearch -exact $args "-env"] + if { $eindex != -1 } { + incr eindex + set env [lindex $args $eindex] + puts "Test$tnum skipping for env $env" + return + } + + # We'll make the data pretty good sized so we can easily + # move to a different page. Make the data size a little + # smaller for fixed-length methods so it works with + # pagesize 512 tests. + set data_size 512 + set orig_fixed_len $fixed_len + set fixed_len [expr $data_size - [expr $data_size / 8]] + set args [convert_args $method $args] + set encargs "" + set args [split_encargs $args encargs] + set omethod [convert_method $method] + + puts "Test$tnum: Degree 2 Isolation Test ($method $args)" + set testfile test$tnum.db + env_cleanup $testdir + + # Create the environment. + set timeout 10 + set env [eval {berkdb_env -create -mode 0644 -lock \ + -cachesize { 0 1048576 1 } \ + -lock_timeout $timeout -txn} $encargs -home $testdir] + error_check_good env_open [is_valid_env $env] TRUE + + # Create the database. + set db [eval {berkdb_open -env $env -create -auto_commit\ + -mode 0644 $omethod} $args {$testfile}] + error_check_good dbopen [is_valid_db $db] TRUE + + puts "\tTest$tnum.a: put loop" + # The data doesn't need to change from key to key. + # Use numerical keys so we don't need special handling for + # record-based methods. + set origdata "data" + set len [string length $origdata] + set data [repeat $origdata [expr $data_size / $len]] + set nentries 200 + set txn [$env txn] + for { set i 1 } { $i <= $nentries } { incr i } { + set key $i + set ret [eval {$db put} \ + -txn $txn {$key [chop_data $method $data]}] + error_check_good put:$db $ret 0 + } + error_check_good txn_commit [$txn commit] 0 + + puts "\tTest$tnum.b: Start deadlock detector." + # Start up a deadlock detector so we can break self-deadlocks. + set dpid [exec $util_path/db_deadlock -v -ae -t 1.0 \ + -h $testdir >& $testdir/dd.out &] + + puts "\tTest$tnum.c: Open txns and cursors." + # We can get degree 2 isolation with either a degree 2 + # txn or a degree 2 cursor or both. However, the case + # of a regular txn and regular cursor should deadlock. + # We put this case last so it won't deadlock the cases + # which should succeed. + # + # Cursors and transactions are named according to + # whether they specify degree 2 (c2, t2) or not (c, t). + # Set up all four possibilities. + # + set t [$env txn] + error_check_good reg_txn_begin [is_valid_txn $t $env] TRUE + set t2 [$env txn -degree_2] + error_check_good deg2_txn_begin [is_valid_txn $t2 $env] TRUE + + set c2t [$db cursor -txn $t -degree_2] + error_check_good valid_c2t [is_valid_cursor $c2t $db] TRUE + set ct2 [$db cursor -txn $t2] + error_check_good valid_ct2 [is_valid_cursor $ct2 $db] TRUE + set c2t2 [$db cursor -txn $t2 -degree_2] + error_check_good valid_c2t2 [is_valid_cursor $c2t2 $db] TRUE + set ct [$db cursor -txn $t] + error_check_good valid_ct [is_valid_cursor $ct $db] TRUE + + set curslist [list $c2t $ct2 $c2t2 $ct] + set newdata newdata + set offpagekey [expr $nentries - 1] + + # For one cursor at a time, read the first item in the + # database, then move to an item on a different page. + # Put a new value in the first item on the first page. This + # should work with degree 2 isolation and hang without it. + # + # Wrap the whole thing in a catch statement so we still get + # around to killing the deadlock detector and cleaning up + # even if the test fails. + # + puts "\tTest$tnum.d: Test for degree 2 isolation." + set status [catch { + foreach cursor $curslist { + set retfirst [$cursor get -first] + set firstkey [lindex [lindex $retfirst 0] 0] + set ret [$cursor get -set $offpagekey] + error_check_good cursor_off_page \ + [lindex [lindex $ret 0] 0] $offpagekey + if { [catch {eval {$db put} -auto_commit \ + $firstkey [chop_data $method $newdata]} res]} { + error_check_good error_is_deadlock \ + [is_substr $res DB_LOCK_DEADLOCK] 1 + error_check_good right_cursor_failed $cursor $ct + } else { + set ret [lindex [lindex [$db get $firstkey] 0] 1] + error_check_good data_changed \ + $ret [pad_data $method $newdata] + error_check_bad right_cursor_succeeded $cursor $ct + } + error_check_good close_cursor [$cursor close] 0 + } + } res] + if { $status != 0 } { + puts $res + } + + # Smoke test for db_stat -txn -degree_2. + puts "\tTest$tnum.e: Smoke test for db_stat -txn -degree_2" + if { [catch {set statret [$db stat -txn $t -degree_2]} res] } { + puts "FAIL: db_stat -txn -degree_2 returned $res" + } + + # End deadlock detection and clean up handles + puts "\tTest$tnum.f: Clean up." + tclkill $dpid + set fixed_len $orig_fixed_len + error_check_good t_commit [$t commit] 0 + error_check_good t2_commit [$t2 commit] 0 + error_check_good dbclose [$db close] 0 + error_check_good envclose [$env close] 0 +} diff --git a/db/test/test109.tcl b/db/test/test109.tcl new file mode 100644 index 000000000..84e0ca19a --- /dev/null +++ b/db/test/test109.tcl @@ -0,0 +1,288 @@ +# See the file LICENSE for redistribution information. +# +# Copyright (c) 2004 +# Sleepycat Software. All rights reserved. +# +# $Id: test109.tcl,v 1.6 2004/09/22 18:01:06 bostic Exp $ +# +# TEST test109 +# TEST +# TEST Test of sequences. +proc test109 { method {tnum "109"} args } { + source ./include.tcl + global rand_init + global fixed_len + global errorCode + + # Fixed_len must be increased from the default to + # accommodate fixed-record length methods. + set orig_fixed_len $fixed_len + set fixed_len 128 + set args [convert_args $method $args] + set omethod [convert_method $method] + error_check_good random_seed [berkdb srand $rand_init] 0 + + set eindex [lsearch -exact $args "-env"] + set txnenv 0 + set seqargs "" + if { $eindex == -1 } { + set env NULL + } else { + incr eindex + set env [lindex $args $eindex] + set txnenv [is_txnenv $env] + if { $txnenv == 1 } { + append args " -auto_commit " + append seqargs " -auto_commit " + } + set testdir [get_home $env] + } + + # Test with in-memory dbs, regular dbs, and subdbs. + foreach filetype { subdb regular in-memory } { + puts "Test$tnum: $method ($args) Test of sequences ($filetype)." + + # Skip impossible combinations. + if { $filetype == "subdb" && [is_queue $method] } { + puts "Skipping $filetype test for method $method." + continue + } + if { $filetype == "in-memory" && [is_queueext $method] } { + puts "Skipping $filetype test for method $method." + continue + } + + # Reinitialize file name for each file type, then adjust. + if { $eindex == -1 } { + set testfile $testdir/test$tnum.db + } else { + set testfile test$tnum.db + set testdir [get_home $env] + } + if { $filetype == "subdb" } { + lappend testfile SUBDB + } + if { $filetype == "in-memory" } { + set testfile "" + } + + cleanup $testdir $env + + # Make the key numeric so we can test record-based methods. + set key 1 + + # Open a noerr db, since we expect errors. + set db [eval {berkdb_open_noerr \ + -create -mode 0644} $args $omethod $testfile] + error_check_good dbopen [is_valid_db $db] TRUE + + puts "\tTest$tnum.a: Max must be greater than min." + set errorCode NONE + catch {set seq [eval berkdb sequence -create \ + $seqargs -init 0 -min 100 -max 0 $db $key]} res + error_check_good max>min [is_substr $errorCode EINVAL] 1 + + puts "\tTest$tnum.b: Init can't be out of the min-max range." + set errorCode NONE + catch {set seq [eval berkdb sequence -create \ + $seqargs -init 101 -min 0 -max 100 $db $key]} res + error_check_good init [is_substr $errorCode EINVAL] 1 + + # Test increment and decrement. + set min 0 + set max 100 + foreach { init inc } { $min -inc $max -dec } { + puts "\tTest$tnum.c: Test for overflow error with $inc." + test_sequence $db $key $min $max $init $inc $seqargs + } + + # Test cachesize without wrap. Make sure to test both + # cachesizes that evenly divide the number of items in the + # sequence, and that leave unused elements at the end. + set min 0 + set max 99 + set init 1 + set cachesizes [list 2 7 11] + foreach csize $cachesizes { + foreach inc { -inc -dec } { + puts "\tTest$tnum.d:\ + -cachesize $csize, $inc, no wrap." + test_sequence $db $key \ + $min $max $init $inc $seqargs $csize + } + } + error_check_good db_close [$db close] 0 + + # Open a regular db; we expect success on the rest of the tests. + set db [eval {berkdb_open \ + -create -mode 0644} $args $omethod $testfile] + error_check_good dbopen [is_valid_db $db] TRUE + + # Test increment and decrement with wrap. Cross from negative + # to positive integers. + set min -50 + set max 99 + set wrap "-wrap" + set csize 1 + foreach { init inc } { $min -inc $max -dec } { + puts "\tTest$tnum.e: Test wrapping with $inc." + test_sequence $db $key \ + $min $max $init $inc $seqargs $csize $wrap + } + + # Test cachesize with wrap. + set min 0 + set max 99 + set init 0 + set wrap "-wrap" + foreach csize $cachesizes { + puts "\tTest$tnum.f: Test -cachesize $csize with wrap." + test_sequence $db $key \ + $min $max $init $inc $seqargs $csize $wrap + } + + # Test multiple handles on the same sequence. + foreach csize $cachesizes { + puts "\tTest$tnum.g:\ + Test multiple handles (-cachesize $csize) with wrap." + test_sequence $db $key \ + $min $max $init $inc $seqargs $csize $wrap 1 + } + error_check_good db_close [$db close] 0 + } + set fixed_len $orig_fixed_len + return +} + +proc test_sequence { db key min max init \ + {inc "-inc"} {seqargs ""} {csize 1} {wrap "" } {second_handle 0} } { + global rand_init + global errorCode + + # The variable "skip" is the cachesize with a direction. + set skip $csize + if { $inc == "-dec" } { + set skip [expr $csize * -1] + } + + # The "limit" is the closest number to the end of the + # sequence we can ever see. + set limit [expr [expr $max + 1] - $csize] + if { $inc == "-dec" } { + set limit [expr [expr $min - 1] + $csize] + } + + # The number of items in the sequence. + set n [expr [expr $max - $min] + 1] + + # Calculate the number of values returned in the first + # cycle, and in all other cycles. + if { $inc == "-inc" } { + set firstcyclehits \ + [expr [expr [expr $max - $init] + 1] / $csize] + } elseif { $inc == "-dec" } { + set firstcyclehits \ + [expr [expr [expr $init - $min] + 1] / $csize] + } else { + puts "FAIL: unknown inc flag $inc" + } + set hitspercycle [expr $n / $csize] + + # Create the sequence. + set seq [eval {berkdb sequence} -create -cachesize $csize $seqargs \ + $wrap -init $init -min $min -max $max $inc $db $key] + error_check_good is_valid_seq [is_valid_seq $seq] TRUE + if { $second_handle == 1 } { + set seq2 [eval {berkdb sequence} -create $seqargs $db $key] + error_check_good is_valid_seq2 [is_valid_seq $seq2] TRUE + } + + # Exercise get options. + set getdb [$seq get_db] + error_check_good seq_get_db $getdb $db + + set flags [$seq get_flags] + set exp_flags [list $inc $wrap] + foreach item $exp_flags { + if { [llength $item] == 0 } { + set idx [lsearch -exact $exp_flags $item] + set exp_flags [lreplace $exp_flags $idx $idx] + } + } + error_check_good get_flags $flags $exp_flags + + set range [$seq get_range] + error_check_good get_range_min [lindex $range 0] $min + error_check_good get_range_max [lindex $range 1] $max + + set cache [$seq get_cachesize] + error_check_good get_cachesize $cache $csize + + # Within the loop, for each successive seq get we calculate + # the value we expect to receive, then do the seq get and + # compare. + # + # Always test some multiple of the number of items in the + # sequence; this tests overflow and wrap-around. + # + set mult 2 + for { set i 0 } { $i < [expr $n * $mult] } { incr i } { + # + # Calculate expected return value. + # + # On the first cycle, start from init. + set expected [expr $init + [expr $i * $skip]] + if { $i >= $firstcyclehits && $wrap != "-wrap" } { + set expected "overflow" + } + + # On second and later cycles, start from min or max. + # We do a second cycle only if wrapping is specified. + if { $wrap == "-wrap" } { + if { $inc == "-inc" && $expected > $limit } { + set j [expr $i - $firstcyclehits] + while { $j >= $hitspercycle } { + set j [expr $j - $hitspercycle] + } + set expected [expr $min + [expr $j * $skip]] + } + + if { $inc == "-dec" && $expected < $limit } { + set j [expr $i - $firstcyclehits] + while { $j >= $hitspercycle } { + set j [expr $j - $hitspercycle] + } + set expected [expr $max + [expr $j * $skip]] + } + } + + # Get return value. If we've got a second handle, choose + # randomly which handle does the seq get. + set syncarg "" + if { $seqargs != "" } { + set syncarg " -nosync " + } + set errorCode NONE + if { $second_handle == 0 } { + catch {eval {$seq get} $seqargs $syncarg $csize} res + } elseif { [berkdb random_int 0 1] == 0 } { + catch {eval {$seq get} $seqargs $syncarg $csize} res + } else { + catch {eval {$seq2 get} $seqargs $syncarg $csize} res + } + + # Compare expected to actual value. + if { $expected == "overflow" } { + error_check_good overflow [is_substr $errorCode EINVAL] 1 + } else { + error_check_good seq_get_wrap $res $expected + } + } + + # A single handle requires a 'seq remove', but a second handle + # should be closed, and then we can remove the sequence. + if { $second_handle == 1 } { + error_check_good seq2_close [$seq2 close] 0 + } + error_check_good seq_remove [eval {$seq remove} $seqargs] 0 +} diff --git a/db/test/testparams.tcl b/db/test/testparams.tcl index bbe508784..16ef7c9b0 100644 --- a/db/test/testparams.tcl +++ b/db/test/testparams.tcl @@ -1,10 +1,11 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: testparams.tcl,v 11.164 2003/10/31 20:17:24 sandstro Exp $ +# $Id: testparams.tcl,v 11.200 2004/10/12 16:22:14 sue Exp $ +global one_test global serial_tests set serial_tests {rep002 rep005} @@ -14,6 +15,7 @@ set subs {bigfile dead env fop lock log memp mutex recd rep rpc rsrc\ set test_names(bigfile) [list bigfile001 bigfile002] set test_names(dead) [list dead001 dead002 dead003 dead004 dead005 dead006 \ dead007] +set test_names(elect) [list rep002 rep005 rep016 rep020 rep022] set test_names(env) [list env001 env002 env003 env004 env005 env006 \ env007 env008 env009 env010 env011] set test_names(fop) [list fop001 fop002 fop003 fop004 fop005 fop006] @@ -23,10 +25,12 @@ set test_names(memp) [list memp001 memp002 memp003 memp004] set test_names(mutex) [list mutex001 mutex002 mutex003] set test_names(recd) [list recd001 recd002 recd003 recd004 recd005 recd006 \ recd007 recd008 recd009 recd010 recd011 recd012 recd013 recd014 recd015 \ - recd016 recd017 recd018 recd019] -set test_names(rep) [list rep001 rep002 rep003 rep004 rep005 rep006 \ - rep007 rep008 rep009 rep010 rep011 rep012 rep013] -set test_names(rpc) [list rpc001 rpc002 rpc003 rpc004 rpc005] + recd016 recd017 recd018 recd019 recd020 ] +set test_names(rep) [list rep001 rep002 rep003 rep005 rep006 rep007 \ + rep008 rep009 rep010 rep011 rep012 rep013 rep014 rep015 rep016 rep017 \ + rep018 rep019 rep020 rep021 rep022 rep023 rep024 rep026 rep027 rep028 \ + rep029 rep030 rep031 rep032 rep033 rep034 rep035 rep036 rep037] +set test_names(rpc) [list rpc001 rpc002 rpc003 rpc004 rpc005 rpc006] set test_names(rsrc) [list rsrc001 rsrc002 rsrc003 rsrc004] set test_names(sdb) [list sdb001 sdb002 sdb003 sdb004 sdb005 sdb006 \ sdb007 sdb008 sdb009 sdb010 sdb011 sdb012] @@ -44,16 +48,48 @@ set test_names(test) [list test001 test002 test003 test004 test005 \ test069 test070 test071 test072 test073 test074 test076 test077 \ test078 test079 test081 test082 test083 test084 test085 test086 \ test087 test088 test089 test090 test091 test092 test093 test094 test095 \ - test096 test097 test098 test099 test100 test101 test102 test103 ] + test096 test097 test098 test099 test100 test101 test102 test103 test107 \ + test109 ] set test_names(txn) [list txn001 txn002 txn003 txn004 txn005 txn006 \ txn007 txn008 txn009 txn010 txn011] +set rpc_tests(berkeley_db_svc) [concat $test_names(test) $test_names(sdb)] +set rpc_tests(berkeley_db_cxxsvc) $test_names(test) +set rpc_tests(berkeley_db_javasvc) $test_names(test) + +# JE tests are a subset of regular RPC tests -- exclude these ones. +# be fixable by modifying tests dealing with unsorted duplicates, second line +# will probably never work unless certain features are added to JE (record +# numbers, bulk get, etc.). +set je_exclude {(?x) # Turn on extended syntax + test(010|026|027|028|030|031|032|033|034| # These should be fixable by + 035|039|041|046|047|054|056|057|062| # modifying tests to avoid + 066|073|081|085)| # unsorted dups, etc. + + test(011|017|018|022|023|024|029|040|049| # Not expected to work with + 062|083|095) # JE until / unless features + # are added to JE (record + # numbers, bulk gets, etc.) +} +set rpc_tests(berkeley_dbje_svc) [lsearch -all -inline -not -regexp \ + $rpc_tests(berkeley_db_svc) $je_exclude] + +# Source all the tests, whether we're running one or many. foreach sub $subs { foreach test $test_names($sub) { source $test_path/$test.tcl } } +# Reset test_names if we're running only one test. +if { $one_test != "ALL" } { + foreach sub $subs { + set test_names($sub) "" + } + set type [string trim $one_test 0123456789] + set test_names($type) [list $one_test] +} + source $test_path/archive.tcl source $test_path/byteorder.tcl source $test_path/dbm.tcl @@ -90,6 +126,41 @@ set parms(recd016) "" set parms(recd017) 0 set parms(recd018) 10 set parms(recd019) 50 +set parms(recd020) "" +set parms(rep001) {1000 "001"} +set parms(rep002) {10 3 "002"} +set parms(rep003) "003" +set parms(rep005) "" +set parms(rep006) {1000 "006"} +set parms(rep007) {10 "007"} +set parms(rep008) {10 "008"} +set parms(rep009) {10 "009"} +set parms(rep010) {100 "010"} +set parms(rep011) "011" +set parms(rep012) {10 "012"} +set parms(rep013) {10 "013"} +set parms(rep014) {10 "014"} +set parms(rep015) {100 "015" 3} +set parms(rep016) "" +set parms(rep017) {10 "017"} +set parms(rep018) {10 "018"} +set parms(rep019) {3 "019"} +set parms(rep020) "" +set parms(rep021) {3 "021"} +set parms(rep022) "" +set parms(rep023) {10 "023"} +set parms(rep024) {1000 "024"} +set parms(rep026) "" +set parms(rep027) {1000 "027"} +set parms(rep028) {100 "028"} +set parms(rep029) {200 "029"} +set parms(rep030) {500 "030"} +set parms(rep031) {200 "031"} +set parms(rep032) {200 "032"} +set parms(rep033) {200 "033"} +set parms(rep034) {2 "034"} +set parms(rep035) {100 "035"} +set parms(rep036) {200 "036"} set parms(subdb001) "" set parms(subdb002) 10000 set parms(subdb003) 1000 @@ -119,7 +190,7 @@ set parms(si002) {200 2} set parms(si003) {200 3} set parms(si004) {200 4} set parms(si005) {200 5} -set parms(test001) {10000 0 1 "001"} +set parms(test001) {10000 0 0 "001"} set parms(test002) 10000 set parms(test003) "" set parms(test004) {10000 "004" 0} @@ -220,11 +291,13 @@ set parms(test100) {10000 "100"} set parms(test101) {1000 -txn "101"} set parms(test102) {1000 "102"} set parms(test103) {100 4294967250 "103"} +set parms(test107) "" +set parms(test109) {"109"} # RPC server executables. Each of these is tested (if it exists) # when running the RPC tests. set svc_list { berkeley_db_svc berkeley_db_cxxsvc \ - berkeley_db_javasvc } + berkeley_db_javasvc berkeley_dbje_svc } set rpc_svc berkeley_db_svc # Shell script tests. Each list entry is a {directory filename} pair, @@ -257,4 +330,7 @@ set shelltest_list { { scr025 chk.cxxmulti } { scr026 chk.method } { scr027 chk.javas } + { scr028 chk.rtc } + { scr029 chk.get } + { scr030 chk.build } } diff --git a/db/test/testutils.tcl b/db/test/testutils.tcl index b66846591..ded9bc5ce 100644 --- a/db/test/testutils.tcl +++ b/db/test/testutils.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: testutils.tcl,v 11.182 2003/09/30 17:32:26 sandstro Exp $ +# $Id: testutils.tcl,v 11.198 2004/09/28 15:02:18 carol Exp $ # # Test system utilities # @@ -1097,6 +1097,7 @@ proc filecheck { file txn } { proc cleanup { dir env { quiet 0 } } { global gen_upgrade global is_qnx_test + global is_je_test global old_encrypt global passwd source ./include.tcl @@ -1123,7 +1124,8 @@ proc cleanup { dir env { quiet 0 } } { switch -glob -- $fileorig { */DIR_* - */__db.* - - */log.* { + */log.* - + */*.jdb { if { $env != "NULL" } { continue } else { @@ -1198,7 +1200,32 @@ proc cleanup { dir env { quiet 0 } } { } } if {[llength $remfiles] > 0} { - eval fileremove -f $remfiles + # + # In the HFS file system there are cases where not + # all files are removed on the first attempt. If + # it fails, try again a few times. + # + set count 0 + while { [catch {eval fileremove -f $remfiles}] == 1 \ + && $count < 5 } { + incr count + } + } + + if { $is_je_test } { + set rval [catch {eval {exec \ + $util_path/db_dump} -h $dir -l } res] + if { $rval == 0 } { + set envargs " -env $env " + if { [is_txnenv $env] } { + append envargs " -auto_commit " + } + + foreach db $res { + set ret [catch {eval \ + {berkdb dbremove} $envargs $db } res] + } + } } } } @@ -1249,7 +1276,7 @@ proc env_cleanup { dir } { cleanup $dir NULL } -# Start an RPC server. Don't return to caller until the +# Start an RPC server. Don't return to caller until the # server is up. Wait up to $maxwait seconds. proc rpc_server_start { { encrypted 0 } { maxwait 30 } { args "" } } { source ./include.tcl @@ -1269,7 +1296,7 @@ proc rpc_server_start { { encrypted 0 } { maxwait 30 } { args "" } } { $rpc_path/$rpc_svc -h $rpc_testdir $args} &] } - # Wait a couple of seconds before we start looking for + # Wait a couple of seconds before we start looking for # the server. tclsleep 2 set home [file tail $rpc_testdir] @@ -1277,13 +1304,14 @@ proc rpc_server_start { { encrypted 0 } { maxwait 30 } { args "" } } { set encargs " -encryptaes $passwd " } for { set i 0 } { $i < $maxwait } { incr i } { - if {[catch {set env [eval berkdb_env -create \ - -home $home -server $rpc_server $encargs]} res]} { - # If we have an error, sleep for a second. + # Try an operation -- while it fails with NOSERVER, sleep for + # a second and retry. + if {[catch {berkdb envremove -force -home "$home.FAIL" \ + -server $rpc_server} res] && \ + [is_substr $res DB_NOSERVER:]} { tclsleep 1 } else { # Server is up, clean up and return to caller - error_check_good env_close [$env close] 0 break } if { $i >= $maxwait } { @@ -1779,11 +1807,15 @@ proc reset_env { env } { error_check_good env_close [$env close] 0 } -proc minlocks { myenv locker_id obj_id num } { +proc maxlocks { myenv locker_id obj_id num } { return [countlocks $myenv $locker_id $obj_id $num ] } -proc maxlocks { myenv locker_id obj_id num } { +proc maxwrites { myenv locker_id obj_id num } { + return [countlocks $myenv $locker_id $obj_id $num ] +} + +proc minlocks { myenv locker_id obj_id num } { return [countlocks $myenv $locker_id $obj_id $num ] } @@ -1805,7 +1837,8 @@ proc countlocks { myenv locker_id obj_id num } { } } - # Now acquire a write lock + # Now acquire one write lock, except for obj_id 1, which doesn't + # acquire any. We'll use obj_id 1 to test minwrites. if { $obj_id != 1 } { set r [catch {$myenv lock_get write $locker_id \ [expr $obj_id * 1000 + 10]} l ] @@ -1818,6 +1851,21 @@ proc countlocks { myenv locker_id obj_id num } { } } + # Get one extra write lock for obj_id 2. We'll use + # obj_id 2 to test maxwrites. + # + if { $obj_id == 2 } { + set extra [catch {$myenv lock_get write \ + $locker_id [expr $obj_id * 1000 + 11]} l ] + if { $extra != 0 } { + puts $l + return ERROR + } else { + error_check_good lockget:$obj_id [is_substr $l $myenv] 1 + lappend locklist $l + } + } + set ret [ring $myenv $locker_id $obj_id $num] foreach l $locklist { @@ -1925,8 +1973,8 @@ proc dead_check { t procs timeout dead clean other } { error_check_good $t:$procs:other $other 0 switch $t { ring { - # With timeouts the number of deadlocks is unpredictable: - # test for at least one deadlock. + # With timeouts the number of deadlocks is + # unpredictable: test for at least one deadlock. if { $timeout != 0 && $dead > 1 } { set clean [ expr $clean + $dead - 1] set dead 1 @@ -1936,8 +1984,9 @@ proc dead_check { t procs timeout dead clean other } { [expr $procs - 1] } clump { - # With timeouts the number of deadlocks is unpredictable: - # test for no more than one successful lock. + # With timeouts the number of deadlocks is + # unpredictable: test for no more than one + # successful lock. if { $timeout != 0 && $dead == $procs } { set clean 1 set dead [expr $procs - 1] @@ -1951,12 +2000,17 @@ proc dead_check { t procs timeout dead clean other } { error_check_good $t:$procs:success $clean \ [expr $procs - 1] } - minlocks { + maxlocks { error_check_good $t:$procs:deadlocks $dead 1 error_check_good $t:$procs:success $clean \ [expr $procs - 1] } - maxlocks { + maxwrites { + error_check_good $t:$procs:deadlocks $dead 1 + error_check_good $t:$procs:success $clean \ + [expr $procs - 1] + } + minlocks { error_check_good $t:$procs:deadlocks $dead 1 error_check_good $t:$procs:success $clean \ [expr $procs - 1] @@ -2082,6 +2136,10 @@ proc is_valid_locker {l } { return [is_valid_widget $l ""] } +proc is_valid_seq { seq } { + return [is_valid_widget $seq seq] +} + proc send_cmd { fd cmd {sleep 2}} { source ./include.tcl @@ -2161,14 +2219,13 @@ proc pad_data {method data} { proc make_fixed_length {method data {pad 0}} { global fixed_len - global fixed_pad if {[is_fixed_length $method] == 1} { if {[string length $data] > $fixed_len } { error_check_bad make_fixed_len:TOO_LONG 1 1 } while { [string length $data] < $fixed_len } { - set data [format $data%c $fixed_pad] + set data [format $data%c $pad] } } return $data @@ -2215,6 +2272,21 @@ proc binary_compare { data1 data2 } { } } +# This is a comparison function used with the lsort command. +# It treats its inputs as 32 bit signed integers for comparison, +# and is coded to work with both 32 bit and 64 bit versions of tclsh. +proc int32_compare { val1 val2 } { + # Big is set to 2^32 on a 64 bit machine, or 0 on 32 bit machine. + set big [expr 0xffffffff + 1] + if { $val1 >= 0x80000000 } { + set val1 [expr $val1 - $big] + } + if { $val2 >= 0x80000000 } { + set val2 [expr $val2 - $big] + } + return [expr $val1 - $val2] +} + proc convert_method { method } { switch -- $method { -btree - @@ -2251,7 +2323,14 @@ proc convert_method { method } { db_queue - q - qam - - queue { return "-queue" } + queue - + -iqueue - + DB_IQUEUE - + IQUEUE - + db_iqueue - + iq - + iqam - + iqueue { return "-queue" } -queueextent - QUEUEEXTENT - @@ -2259,7 +2338,14 @@ proc convert_method { method } { qamext - -queueext - queueextent - - queueext { return "-queue" } + queueext - + -iqueueextent - + IQUEUEEXTENT - + iqe - + iqamext - + -iqueueext - + iqueueextent - + iqueueext { return "-queue" } -frecno - -recno - @@ -2315,7 +2401,6 @@ proc convert_encrypt { largs } { # -flags argument. proc convert_args { method {largs ""} } { global fixed_len - global fixed_pad global gen_upgrade global upgrade_be source ./include.tcl @@ -2351,9 +2436,15 @@ proc convert_args { method {largs ""} } { append largs " -dup " append largs " -dupsort " } elseif { [is_queueext $method] == 1 } { - append largs " -extent 2 " + append largs " -extent 4 " + } + + if { [is_iqueue $method] == 1 || [is_iqueueext $method] == 1 } { + append largs " -inorder " } + # Default padding character is ASCII nul. + set fixed_pad 0 if {[is_fixed_length $method] == 1} { append largs " -len $fixed_len -pad $fixed_pad " } @@ -2451,7 +2542,8 @@ proc is_ddhash { method } { } proc is_queue { method } { - if { [is_queueext $method] == 1 } { + if { [is_queueext $method] == 1 || [is_iqueue $method] == 1 || \ + [is_iqueueext $method] == 1 } { return 1 } @@ -2464,6 +2556,10 @@ proc is_queue { method } { } proc is_queueext { method } { + if { [is_iqueueext $method] == 1 } { + return 1 + } + set names { -queueextent queueextent QUEUEEXTENT qe qamext \ queueext -queueext } if { [lsearch $names $method] >= 0 } { @@ -2473,6 +2569,29 @@ proc is_queueext { method } { } } +proc is_iqueue { method } { + if { [is_iqueueext $method] == 1 } { + return 1 + } + + set names { -iqueue DB_IQUEUE IQUEUE db_iqueue iq iqueue iqam } + if { [lsearch $names $method] >= 0 } { + return 1 + } else { + return 0 + } +} + +proc is_iqueueext { method } { + set names { -iqueueextent iqueueextent IQUEUEEXTENT iqe iqamext \ + iqueueext -iqueueext } + if { [lsearch $names $method] >= 0 } { + return 1 + } else { + return 0 + } +} + proc is_record_based { method } { if { [is_recno $method] || [is_frecno $method] || [is_rrecno $method] || [is_queue $method] } { @@ -2650,8 +2769,8 @@ proc fileextract { superset subset outfile } { } # Verify all .db files in the specified directory. -proc verify_dir { {directory $testdir} \ - { pref "" } { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } } { +proc verify_dir { {directory $testdir} { pref "" } \ + { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } { unref 1 } } { global encrypt global passwd @@ -2695,8 +2814,19 @@ proc verify_dir { {directory $testdir} \ {-cachesize [list 0 $cachesize 0]}] set earg " -env $env $errarg " + # The 'unref' flag means that we report unreferenced pages + # at all times. This is the default behavior. + # If we have a test which leaves unreferenced pages on systems + # where HAVE_FTRUNCATE is not on, then we call verify_dir with + # unref == 0. + set uflag "-unref" + if { $unref == 0 } { + set uflag "" + } + foreach db $dbs { - if { [catch {eval {berkdb dbverify} $earg $db} res] != 0 } { + if { [catch \ + {eval {berkdb dbverify} $uflag $earg $db} res] != 0 } { puts $res puts "FAIL:[timestamp] Verification of $db failed." set ret 1 @@ -2771,7 +2901,7 @@ proc db_compare { olddb newdb olddbname newdbname } { } error_check_good orig_cursor_close($olddbname) [$oc close] 0 - error_check_good new_cursor_close($olddbname) [$nc close] 0 + error_check_good new_cursor_close($newdbname) [$nc close] 0 return 0 } @@ -3085,21 +3215,42 @@ proc get_pagesize { stat } { proc get_file_list { {small 0} } { global is_windows_test global is_qnx_test + global is_je_test global src_root - if { $is_qnx_test } { + # Skip libraries if we have a debug build. + if { $is_qnx_test || $is_je_test || [is_debug] == 1 } { set small 1 } + if { $small && $is_windows_test } { - return [glob $src_root/*/*.c */env*.obj] + set templist [glob $src_root/*/*.c */env*.obj] } elseif { $small } { - return [glob $src_root/*/*.c ./env*.o] + set templist [glob $src_root/*/*.c ./env*.o] } elseif { $is_windows_test } { - return \ + set templist \ [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll] } else { - return [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?] + set templist [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?] + } + + # We don't want a huge number of files, but we do want a nice + # variety. If there are more than 200 files, pick out a list + # by taking every other, or every third, or every nth file. + set filelist {} + set nfiles 200 + if { [llength $templist] > $nfiles } { + set skip \ + [expr [llength $templist] / [expr [expr $nfiles / 3] * 2]] + set i $skip + while { $i < [llength $templist] } { + lappend filelist [lindex $templist $i] + incr i $skip + } + } else { + set filelist $templist } + return $filelist } proc is_cdbenv { env } { @@ -3209,6 +3360,14 @@ proc getstats { statlist field } { return -1 } +# Return the value for a particular field in a set of statistics. +# Works for regular db stat as well as env stats (log_stat, +# lock_stat, txn_stat, rep_stat, etc.). +proc stat_field { handle which_stat field } { + set stat [$handle $which_stat] + return [getstats $stat $field ] +} + proc big_endian { } { global tcl_platform set e $tcl_platform(byteOrder) @@ -3220,3 +3379,80 @@ proc big_endian { } { error "FAIL: Unknown endianness $e" } } + +# Search logs to find if we have debug records. +proc log_has_debug_records { dir } { + source ./include.tcl + global encrypt + + set tmpfile $dir/printlog.out + set stat [catch \ + {exec $util_path/db_printlog -h $dir > $tmpfile} ret] + error_check_good db_printlog $stat 0 + + set f [open $tmpfile r] + while { [gets $f record] >= 0 } { + set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name] + if { $r == 1 && [string match *_debug $name] != 1 } { + close $f + fileremove $tmpfile + return 1 + } + } + close $f + fileremove $tmpfile + return 0 +} + +# Set up a temporary database to check if this is a debug build. +proc is_debug { } { + source ./include.tcl + + set tempdir $testdir/temp + file mkdir $tempdir + set env [berkdb_env -create -log -home $testdir/temp] + error_check_good temp_env_open [is_valid_env $env] TRUE + + set file temp.db + set db [berkdb_open -create -env $env -btree $file] + error_check_good temp_db_open [is_valid_db $db] TRUE + + set key KEY + set data DATA + error_check_good temp_db_put [$db put $key $data] 0 + set ret [$db get $key] + error_check_good get_key [lindex [lindex $ret 0] 0] $key + error_check_good get_data [lindex [lindex $ret 0] 1] $data + error_check_good temp_db_close [$db close] 0 + error_check_good temp_db_remove [$env dbremove $file] 0 + error_check_good temp_env_close [$env close] 0 + + if { [log_has_debug_records $tempdir] == 1 } { + return 1 + } + return 0 +} + +proc adjust_logargs { logtype } { + if { $logtype == "in-memory" } { + set lbuf [expr 8 * [expr 1024 * 1024]] + set logargs " -log_inmemory -log_buffer $lbuf " + } elseif { $logtype == "on-disk" } { + set logargs "" + } else { + puts "FAIL: unrecognized log type $logtype" + } + return $logargs +} + +proc adjust_txnargs { logtype } { + if { $logtype == "in-memory" } { + set txnargs " -txn " + } elseif { $logtype == "on-disk" } { + set txnargs " -txn nosync " + } else { + puts "FAIL: unrecognized log type $logtype" + } + return $txnargs +} + diff --git a/db/test/txn001.tcl b/db/test/txn001.tcl index 26795ca39..583b7f212 100644 --- a/db/test/txn001.tcl +++ b/db/test/txn001.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn001.tcl,v 11.37 2003/01/08 05:54:13 bostic Exp $ +# $Id: txn001.tcl,v 11.38 2004/01/28 03:36:32 bostic Exp $ # # TEST txn001 diff --git a/db/test/txn002.tcl b/db/test/txn002.tcl index f2530353c..1ecbf9df9 100644 --- a/db/test/txn002.tcl +++ b/db/test/txn002.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn002.tcl,v 11.40 2003/01/08 05:54:13 bostic Exp $ +# $Id: txn002.tcl,v 11.41 2004/01/28 03:36:32 bostic Exp $ # # TEST txn002 diff --git a/db/test/txn003.tcl b/db/test/txn003.tcl index 5c1e8a630..e6a6d6d14 100644 --- a/db/test/txn003.tcl +++ b/db/test/txn003.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn003.tcl,v 11.42 2003/01/08 05:54:14 bostic Exp $ +# $Id: txn003.tcl,v 11.43 2004/01/28 03:36:33 bostic Exp $ # # TEST txn003 diff --git a/db/test/txn004.tcl b/db/test/txn004.tcl index edff9a63a..c7accddd9 100644 --- a/db/test/txn004.tcl +++ b/db/test/txn004.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn004.tcl,v 11.41 2003/01/08 05:54:14 bostic Exp $ +# $Id: txn004.tcl,v 11.42 2004/01/28 03:36:33 bostic Exp $ # # TEST txn004 diff --git a/db/test/txn005.tcl b/db/test/txn005.tcl index 376235eef..e22581cd1 100644 --- a/db/test/txn005.tcl +++ b/db/test/txn005.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn005.tcl,v 11.36 2003/01/08 05:54:14 bostic Exp $ +# $Id: txn005.tcl,v 11.37 2004/01/28 03:36:33 bostic Exp $ # # TEST txn005 diff --git a/db/test/txn006.tcl b/db/test/txn006.tcl index 69229bea2..14ada718e 100644 --- a/db/test/txn006.tcl +++ b/db/test/txn006.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn006.tcl,v 1.6 2003/01/08 05:54:14 bostic Exp $ +# $Id: txn006.tcl,v 1.7 2004/01/28 03:36:33 bostic Exp $ # # #TEST txn006 diff --git a/db/test/txn007.tcl b/db/test/txn007.tcl index cf1eae33f..2ef382b97 100644 --- a/db/test/txn007.tcl +++ b/db/test/txn007.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn007.tcl,v 11.4 2003/01/08 05:54:15 bostic Exp $ +# $Id: txn007.tcl,v 11.5 2004/01/28 03:36:33 bostic Exp $ # #TEST txn007 #TEST Test of DB_TXN_WRITE_NOSYNC diff --git a/db/test/txn008.tcl b/db/test/txn008.tcl index eb30ff947..8c89296f4 100644 --- a/db/test/txn008.tcl +++ b/db/test/txn008.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn008.tcl,v 11.5 2003/01/08 05:54:15 bostic Exp $ +# $Id: txn008.tcl,v 11.6 2004/01/28 03:36:33 bostic Exp $ # # TEST txn008 diff --git a/db/test/txn009.tcl b/db/test/txn009.tcl index 2bb0d0124..b45538d75 100644 --- a/db/test/txn009.tcl +++ b/db/test/txn009.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn009.tcl,v 11.5 2003/01/08 05:54:15 bostic Exp $ +# $Id: txn009.tcl,v 11.6 2004/01/28 03:36:33 bostic Exp $ # # TEST txn009 diff --git a/db/test/txn010.tcl b/db/test/txn010.tcl index b9d47d60c..6616c252b 100644 --- a/db/test/txn010.tcl +++ b/db/test/txn010.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn010.tcl,v 1.4 2003/01/08 05:54:15 bostic Exp $ +# $Id: txn010.tcl,v 1.5 2004/01/28 03:36:33 bostic Exp $ # # TEST txn010 # TEST Test DB_ENV->txn_checkpoint arguments/flags diff --git a/db/test/txn011.tcl b/db/test/txn011.tcl index 2dfbf84e2..24bf5c142 100644 --- a/db/test/txn011.tcl +++ b/db/test/txn011.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2003 +# Copyright (c) 2003-2004 # Sleepycat Software. All rights reserved. # -# $Id: txn011.tcl,v 1.9 2003/09/04 23:41:19 bostic Exp $ +# $Id: txn011.tcl,v 1.11 2004/07/26 20:27:49 carol Exp $ # # TEST txn011 # TEST Test durable and non-durable txns. @@ -21,9 +21,14 @@ proc txn011 { {ntxns 100} } { puts "Txn011: Non-durable txns" env_cleanup $testdir - puts "\tTxn011.a: Persistent env recovery with -notdurable." - set ndenv [berkdb_env -create -home $testdir -txn -notdurable] - set db [berkdb_open -create -btree -env $ndenv test.db] + puts "\tTxn011.a: Persistent env recovery with -log_inmemory" + set lbuf [expr 8 * [expr 1024 * 1024]] + set env_cmd "berkdb_env -create \ + -home $testdir -txn -log_inmemory -log_buffer $lbuf" + set ndenv [eval $env_cmd] + set db [berkdb_open -create -auto_commit \ + -btree -env $ndenv -notdurable test.db] + check_log_records $testdir error_check_good db_close [$db close] 0 error_check_good ndenv_close [$ndenv close] 0 @@ -31,30 +36,29 @@ proc txn011 { {ntxns 100} } { set stat [catch {exec $util_path/db_recover -e -h $testdir} ret] error_check_good db_printlog $stat 0 - # Rejoin env and make sure that db is still -notdurable. + # Rejoin env and make sure that the db is still there. set ndenv [berkdb_env -home $testdir] - set db [berkdb_open -env $ndenv -notdurable test.db] + set db [berkdb_open -auto_commit -env $ndenv test.db] error_check_good db_close [$db close] 0 error_check_good ndenv_close [$ndenv close] 0 env_cleanup $testdir # Start with a new env for the next test. - set ndenv [berkdb_env -create -home $testdir -txn -notdurable] + set ndenv [eval $env_cmd] error_check_good env_open [is_valid_env $ndenv] TRUE - # Open/create the database; it acquires non-durability - # from the env. - set testfile envnotdurable.db - set db [eval berkdb_open \ - -create -auto_commit -env $ndenv -btree $testfile] + # Open/create the database. + set testfile notdurable.db + set db [eval berkdb_open -create \ + -auto_commit -env $ndenv -notdurable -btree $testfile] error_check_good dbopen [is_valid_db $db] TRUE - puts "\tTxn011.b: Abort txns in non-durable env." + puts "\tTxn011.b: Abort txns in in-memory logging env." txn011_runtxns $ntxns $db $ndenv abort # Make sure there is nothing in the db. txn011_check_empty $db $ndenv - puts "\tTxn011.c: Commit txns in non-durable env." + puts "\tTxn011.c: Commit txns in in-memory logging env." txn011_runtxns $ntxns $db $ndenv commit # Make sure we haven't written any inappropriate log records @@ -63,17 +67,20 @@ proc txn011 { {ntxns 100} } { # Clean up non-durable env tests. error_check_good db_close [$db close] 0 error_check_good ndenv_close [$ndenv close] 0 + env_cleanup $testdir puts "\tTxn011.d: Set up mixed durable/non-durable test." # Open/create the mixed environment - set env [berkdb_env_noerr -create -home $testdir -txn] + set mixed_env_cmd "berkdb_env_noerr -create \ + -home $testdir -txn -log_inmemory -log_buffer $lbuf" + set env [eval $mixed_env_cmd] error_check_good env_open [is_valid_env $env] TRUE check_log_records $testdir # Open/create the non-durable database set nondurfile nondurable.db - set ndb [berkdb_open_noerr \ - -create -auto_commit -env $env -btree -notdurable $nondurfile] + set ndb [berkdb_open_noerr -create\ + -auto_commit -env $env -btree -notdurable $nondurfile] error_check_good dbopen [is_valid_db $ndb] TRUE check_log_records $testdir diff --git a/db/test/txnscript.tcl b/db/test/txnscript.tcl index 108498407..980f6ed51 100644 --- a/db/test/txnscript.tcl +++ b/db/test/txnscript.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1996-2003 +# Copyright (c) 1996-2004 # Sleepycat Software. All rights reserved. # -# $Id: txnscript.tcl,v 11.4 2003/01/08 05:54:16 bostic Exp $ +# $Id: txnscript.tcl,v 11.5 2004/01/28 03:36:33 bostic Exp $ # # Txn003 script - outstanding child prepare script # Usage: txnscript envcmd dbcmd gidf key data diff --git a/db/test/update.tcl b/db/test/update.tcl index c5d9c5ba3..85c1a8007 100644 --- a/db/test/update.tcl +++ b/db/test/update.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: update.tcl,v 11.12 2003/01/08 05:54:16 bostic Exp $ +# $Id: update.tcl,v 11.13 2004/01/28 03:36:33 bostic Exp $ source ./include.tcl global update_dir diff --git a/db/test/upgrade.tcl b/db/test/upgrade.tcl index 7e262a33d..e4099a231 100644 --- a/db/test/upgrade.tcl +++ b/db/test/upgrade.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 1999-2003 +# Copyright (c) 1999-2004 # Sleepycat Software. All rights reserved. # -# $Id: upgrade.tcl,v 11.32 2003/10/02 15:09:36 sandstro Exp $ +# $Id: upgrade.tcl,v 11.34 2004/09/22 18:01:06 bostic Exp $ source ./include.tcl @@ -371,7 +371,7 @@ proc gen_upgrade { dir { save_crypto 1 } { save_non_crypto 1 } } { set gen_chksum 1 foreach upgrade_be { 0 1 } { set upgrade_name $test - if { $test == "test001" } { + if { $test == "test001" } { if { [catch {exec $tclsh_path \ << "source $test_path/test.tcl;\ global gen_upgrade;\ @@ -381,7 +381,7 @@ proc gen_upgrade { dir { save_crypto 1 } { save_non_crypto 1 } } { global encrypt gen_chksum;\ set encrypt $encrypt;\ set gen_upgrade 1;\ - set gen_chksum 1;\ + set gen_chksum 1;\ set upgrade_be $upgrade_be;\ set upgrade_method \ $upgrade_method;\ @@ -399,7 +399,7 @@ proc gen_upgrade { dir { save_crypto 1 } { save_non_crypto 1 } } { } set gen_chksum 0 } - # Save encrypted db's only of native endianness. + # Save encrypted db's only of native endianness. # Encrypted files are not portable across endianness. if { $save_crypto == 1 } { set upgrade_be [big_endian] @@ -520,7 +520,7 @@ proc save_upgrade_files { dir } { set upgrade_name c-$upgrade_name set dumpflag " -P $passwd " } - # Checksummed files are identified by the prefix "s-". + # Checksummed files are identified by the prefix "s-". if { $gen_chksum == 1 } { set upgrade_name s-$upgrade_name } diff --git a/db/test/wrap.tcl b/db/test/wrap.tcl index 8592aea13..34ec45107 100644 --- a/db/test/wrap.tcl +++ b/db/test/wrap.tcl @@ -1,9 +1,9 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2000-2003 +# Copyright (c) 2000-2004 # Sleepycat Software. All rights reserved. # -# $Id: wrap.tcl,v 11.7 2003/01/08 05:54:16 bostic Exp $ +# $Id: wrap.tcl,v 11.8 2004/01/28 03:36:33 bostic Exp $ # # Sentinel file wrapper for multi-process tests. This is designed to avoid a # set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs diff --git a/db/txn/txn.c b/db/txn/txn.c index 937046fa8..94755318b 100644 --- a/db/txn/txn.c +++ b/db/txn/txn.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -34,14 +34,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: txn.c,v 11.248 2004/09/23 15:02:32 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: txn.c,v 11.219 2003/12/03 14:33:06 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -106,6 +104,7 @@ static int __txn_isvalid __P((const DB_TXN *, TXN_DETAIL **, txnop_t)); static int __txn_undo __P((DB_TXN *)); static int __txn_dispatch_undo __P((DB_ENV *, DB_TXN *, DBT *, DB_LSN *, void *)); +static void __txn_set_begin_lsnp __P((DB_TXN *txn, DB_LSN **)); /* * __txn_begin_pp -- @@ -126,7 +125,7 @@ __txn_begin_pp(dbenv, parent, txnpp, flags) if ((ret = __db_fchk(dbenv, "txn_begin", flags, - DB_DIRTY_READ | DB_TXN_NOWAIT | + DB_DEGREE_2 | DB_DIRTY_READ | DB_TXN_NOWAIT | DB_TXN_NOSYNC | DB_TXN_SYNC)) != 0) return (ret); if ((ret = __db_fcchk(dbenv, @@ -185,6 +184,8 @@ __txn_begin(dbenv, parent, txnpp, flags) TAILQ_INIT(&txn->events); STAILQ_INIT(&txn->logs); txn->flags = TXN_MALLOC; + if (LF_ISSET(DB_DEGREE_2)) + F_SET(txn, TXN_DEGREE_2); if (LF_ISSET(DB_DIRTY_READ)) F_SET(txn, TXN_DIRTY_READ); if (LF_ISSET(DB_TXN_NOSYNC)) @@ -246,9 +247,8 @@ __txn_xa_begin(dbenv, txn) PANIC_CHECK(dbenv); /* - * We need to initialize the transaction structure, but we must - * be careful not to smash the links. We manually intialize the - * structure. + * We need to initialize the transaction structure, but must be careful + * not to smash the links. We manually initialize the structure. */ txn->mgrp = dbenv->tx_handle; TAILQ_INIT(&txn->kids); @@ -306,7 +306,7 @@ __txn_begin_int(txn, internal) int internal; { DB_ENV *dbenv; - DB_LSN begin_lsn, null_lsn; + DB_LSN null_lsn; DB_TXNMGR *mgr; DB_TXNREGION *region; TXN_DETAIL *td; @@ -318,21 +318,6 @@ __txn_begin_int(txn, internal) dbenv = mgr->dbenv; region = mgr->reginfo.primary; - /* - * We do not have to write begin records (and if we do not, then we - * need never write records for read-only transactions). However, - * we do need to find the current LSN so that we can store it in the - * transaction structure, so we can know where to take checkpoints. - * - * XXX - * We should set this value when we write the first log record, not - * here. - */ - if (DBENV_LOGGING(dbenv)) - __log_txn_lsn(dbenv, &begin_lsn, NULL, NULL); - else - ZERO_LSN(begin_lsn); - R_LOCK(dbenv, &mgr->reginfo); if (!F_ISSET(txn, TXN_COMPENSATE) && F_ISSET(region, TXN_IN_RECOVERY)) { __db_err(dbenv, "operation not permitted during recovery"); @@ -372,14 +357,14 @@ __txn_begin_int(txn, internal) ®ion->last_txnid, ®ion->cur_maxid); __os_free(dbenv, ids); if (DBENV_LOGGING(dbenv) && - (ret = __txn_recycle_log(dbenv, NULL, - &null_lsn, 0, region->last_txnid, region->cur_maxid)) != 0) + (ret = __txn_recycle_log(dbenv, NULL, &null_lsn, + 0, region->last_txnid + 1, region->cur_maxid)) != 0) goto err; } /* Allocate a new transaction detail structure. */ if ((ret = - __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) { + __db_shalloc(&mgr->reginfo, sizeof(TXN_DETAIL), 0, &td)) != 0) { __db_err(dbenv, "Unable to allocate memory for transaction detail"); goto err; @@ -395,7 +380,7 @@ __txn_begin_int(txn, internal) td->txnid = id; ZERO_LSN(td->last_lsn); - td->begin_lsn = begin_lsn; + ZERO_LSN(td->begin_lsn); if (txn->parent != NULL) td->parent = txn->parent->off; else @@ -404,7 +389,7 @@ __txn_begin_int(txn, internal) td->flags = 0; td->xa_status = 0; - off = R_OFFSET(&mgr->reginfo, td); + off = R_OFFSET(dbenv, &mgr->reginfo, td); R_UNLOCK(dbenv, &mgr->reginfo); ZERO_LSN(txn->last_lsn); @@ -417,6 +402,7 @@ __txn_begin_int(txn, internal) txn->id = __txn_id; txn->prepare = __txn_prepare; txn->set_timeout = __txn_set_timeout; + txn->set_begin_lsnp = __txn_set_begin_lsnp; /* * If this is a transaction family, we must link the child to the @@ -586,19 +572,20 @@ __txn_commit(txnp, flags) } /* - * Process any aborted pages from our children. - * We delay putting pages on the free list that are newly - * allocated and then aborted so that we can undo other - * allocations, if necessary, without worrying about - * these pages which were not on the free list before. + * Process any aborted pages from our children. We delay putting pages + * on the free list that are newly allocated and then aborted so we can + * undo other allocations, if necessary, without worrying about these + * pages which were not on the free list before. */ if (txnp->txn_list != NULL) { +#ifndef HAVE_FTRUNCATE t_ret = __db_do_the_limbo(dbenv, NULL, txnp, txnp->txn_list, LIMBO_NORMAL); - __db_txnlist_end(dbenv, txnp->txn_list); - txnp->txn_list = NULL; if (t_ret != 0 && ret == 0) ret = t_ret; +#endif + __db_txnlist_end(dbenv, txnp->txn_list); + txnp->txn_list = NULL; } if (ret != 0) @@ -608,13 +595,12 @@ __txn_commit(txnp, flags) return (__txn_end(txnp, 1)); err: /* - * If we are prepared, then we "must" be able to commit. We - * panic here because even though the coordinator might be - * able to retry it is not clear it would know to do that. - * Otherwise we'll try to abort. If that is successful, - * then we return whatever was in ret (i.e., the reason we failed). - * If the abort was unsuccessful, then abort probably returned - * DB_RUNRECOVERY and we need to propagate that up. + * If we are prepared, then we "must" be able to commit. We panic here + * because even though the coordinator might be able to retry it is not + * clear it would know to do that. Otherwise we'll try to abort. If + * that is successful, then we return whatever was in ret (that is, the + * reason we failed). If the abort was unsuccessful, abort probably + * returned DB_RUNRECOVERY and we need to propagate that up. */ if (td->status == TXN_PREPARED) return (__db_panic(dbenv, ret)); @@ -702,7 +688,7 @@ __txn_abort(txnp) request.op = DB_LOCK_UPGRADE_WRITE; request.obj = NULL; if ((ret = __lock_vec( - dbenv, txnp->txnid, 0, &request, 1, NULL)) != 0) + dbenv, txnp->txnid, DB_LOCK_ABORT, &request, 1, NULL)) != 0) return (__db_panic(dbenv, ret)); } if ((ret = __txn_undo(txnp)) != 0) @@ -818,10 +804,12 @@ __txn_prepare(txnp, gid) if ((ret = __txn_commit(kid, DB_TXN_NOSYNC)) != 0) return (ret); +#ifndef HAVE_FTRUNCATE if (txnp->txn_list != NULL && (ret = __db_do_the_limbo(dbenv, NULL, txnp, txnp->txn_list, LIMBO_PREPARE)) != 0) return (ret); +#endif /* * In XA, the global transaction ID in the txn_detail structure is * already set; in a non-XA environment, we must set it here. XA @@ -866,7 +854,7 @@ __txn_prepare(txnp, gid) __os_free(dbenv, request.obj->data); if (ret != 0) return (ret); - + } MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp); @@ -917,29 +905,30 @@ __txn_isvalid(txnp, tdp, op) TXN_DETAIL **tdp; txnop_t op; { + DB_ENV *dbenv; DB_TXNMGR *mgrp; DB_TXNREGION *region; TXN_DETAIL *tp; mgrp = txnp->mgrp; + dbenv = mgrp->dbenv; region = mgrp->reginfo.primary; /* Check for recovery. */ if (!F_ISSET(txnp, TXN_COMPENSATE) && F_ISSET(region, TXN_IN_RECOVERY)) { - __db_err(mgrp->dbenv, - "operation not permitted during recovery"); + __db_err(dbenv, "operation not permitted during recovery"); goto err; } /* Check for live cursors. */ if (txnp->cursors != 0) { - __db_err(mgrp->dbenv, "transaction has active cursors"); + __db_err(dbenv, "transaction has active cursors"); goto err; } /* Check transaction's state. */ - tp = (TXN_DETAIL *)R_ADDR(&mgrp->reginfo, txnp->off); + tp = (TXN_DETAIL *)R_ADDR(dbenv, &mgrp->reginfo, txnp->off); if (tdp != NULL) *tdp = tp; @@ -961,8 +950,8 @@ __txn_isvalid(txnp, tdp, op) */ if (tp->status != TXN_PREPARED && !F_ISSET(tp, TXN_DTL_RESTORED)) { - __db_err(mgrp->dbenv, "not a restored transaction"); - return (__db_panic(mgrp->dbenv, EINVAL)); + __db_err(dbenv, "not a restored transaction"); + return (__db_panic(dbenv, EINVAL)); } return (0); @@ -975,7 +964,7 @@ __txn_isvalid(txnp, tdp, op) * I'm not arguing this is good, but I could imagine * someone doing it. */ - __db_err(mgrp->dbenv, + __db_err(dbenv, "Prepare disallowed on child transactions"); return (EINVAL); } @@ -989,7 +978,7 @@ __txn_isvalid(txnp, tdp, op) switch (tp->status) { case TXN_PREPARED: if (op == TXN_OP_PREPARE) { - __db_err(mgrp->dbenv, "transaction already prepared"); + __db_err(dbenv, "transaction already prepared"); /* * Txn_prepare doesn't blow away the user handle, so * in this case, give the user the opportunity to @@ -1003,7 +992,7 @@ __txn_isvalid(txnp, tdp, op) case TXN_ABORTED: case TXN_COMMITTED: default: - __db_err(mgrp->dbenv, "transaction already %s", + __db_err(dbenv, "transaction already %s", tp->status == TXN_COMMITTED ? "committed" : "aborted"); goto err; } @@ -1015,7 +1004,7 @@ err: /* * handles are dead by definition when we return, and if you use * a cursor you forgot to close, we have no idea what will happen. */ - return (__db_panic(mgrp->dbenv, EINVAL)); + return (__db_panic(dbenv, EINVAL)); } /* @@ -1066,14 +1055,14 @@ __txn_end(txnp, is_commit) /* End the transaction. */ R_LOCK(dbenv, &mgr->reginfo); - tp = (TXN_DETAIL *)R_ADDR(&mgr->reginfo, txnp->off); + tp = (TXN_DETAIL *)R_ADDR(dbenv, &mgr->reginfo, txnp->off); SH_TAILQ_REMOVE(®ion->active_txn, tp, links, __txn_detail); if (F_ISSET(tp, TXN_DTL_RESTORED)) { region->stat.st_nrestores--; do_closefiles = region->stat.st_nrestores == 0; } - __db_shalloc_free(mgr->reginfo.addr, tp); + __db_shalloc_free(&mgr->reginfo, tp); if (is_commit) region->stat.st_ncommits++; @@ -1163,6 +1152,7 @@ __txn_undo(txnp) dbenv = mgr->dbenv; logc = NULL; txnlist = NULL; + ret = 0; if (!DBENV_LOGGING(dbenv)) return (0); @@ -1178,7 +1168,7 @@ __txn_undo(txnp) * Allocate a txnlist for children and aborted page allocs. * We need to associate the list with the maximal parent * so that aborted pages are recovered when that transaction - * is commited or aborted. + * is committed or aborted. */ for (ptxn = txnp->parent; ptxn != NULL && ptxn->parent != NULL;) ptxn = ptxn->parent; @@ -1240,7 +1230,9 @@ __txn_undo(txnp) } } +#ifndef HAVE_FTRUNCATE ret = __db_do_the_limbo(dbenv, ptxn, txnp, txnlist, LIMBO_NORMAL); +#endif err: if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0) ret = t_ret; @@ -1285,7 +1277,7 @@ __txn_checkpoint_pp(dbenv, kbytes, minutes, flags) __env_rep_enter(dbenv); ret = __txn_checkpoint(dbenv, kbytes, minutes, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -1304,13 +1296,13 @@ __txn_checkpoint(dbenv, kbytes, minutes, flags) DB_LSN ckp_lsn, last_ckp; DB_TXNMGR *mgr; DB_TXNREGION *region; - TXN_DETAIL *txnp; + REGENV *renv; + REGINFO *infop; time_t last_ckp_time, now; - u_int32_t bytes, gen, mbytes; + u_int32_t bytes, gen, id, logflags, mbytes; int ret; ret = gen = 0; - /* * A client will only call through here during recovery, * so just sync the Mpool and go home. @@ -1327,6 +1319,12 @@ __txn_checkpoint(dbenv, kbytes, minutes, flags) mgr = dbenv->tx_handle; region = mgr->reginfo.primary; + infop = dbenv->reginfo; + renv = infop->primary; + /* + * No mutex is needed as envid is read-only once it is set. + */ + id = renv->envid; /* * The checkpoint LSN is an LSN such that all transactions begun before @@ -1369,20 +1367,8 @@ __txn_checkpoint(dbenv, kbytes, minutes, flags) return (0); } -do_ckp: /* - * Find the oldest active transaction and figure out its "begin" LSN. - * This is the lowest LSN we can checkpoint, since any record written - * after it may be involved in a transaction and may therefore need - * to be undone in the case of an abort. - */ - R_LOCK(dbenv, &mgr->reginfo); - for (txnp = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); - txnp != NULL; - txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) - if (!IS_ZERO_LSN(txnp->begin_lsn) && - log_compare(&txnp->begin_lsn, &ckp_lsn) < 0) - ckp_lsn = txnp->begin_lsn; - R_UNLOCK(dbenv, &mgr->reginfo); +do_ckp: + __txn_getactive(dbenv, &ckp_lsn); if (MPOOL_ON(dbenv) && (ret = __memp_sync(dbenv, NULL)) != 0) { __db_err(dbenv, @@ -1411,10 +1397,12 @@ do_ckp: /* * recovery from the ckp_lsn contained in this * checkpoint. */ - if ((ret = __dbreg_open_files(dbenv)) != 0 || - (ret = __txn_ckp_log(dbenv, NULL, - &ckp_lsn, DB_FLUSH | DB_LOG_PERM | DB_LOG_CHKPNT, &ckp_lsn, - &last_ckp, (int32_t)time(NULL), gen)) != 0) { + logflags = DB_LOG_PERM | DB_LOG_CHKPNT; + if (!IS_RECOVERING(dbenv)) + logflags |= DB_FLUSH; + if ((ret = __dbreg_log_files(dbenv)) != 0 || + (ret = __txn_ckp_log(dbenv, NULL, &ckp_lsn, logflags, + &ckp_lsn, &last_ckp, (int32_t)time(NULL), id, gen)) != 0) { __db_err(dbenv, "txn_checkpoint: log failed at LSN [%ld %ld] %s", (long)ckp_lsn.file, (long)ckp_lsn.offset, @@ -1427,6 +1415,43 @@ do_ckp: /* return (ret); } +/* + * __txn_getactive -- + * Find the oldest active transaction and figure out its "begin" LSN. + * This is the lowest LSN we can checkpoint, since any record written + * after it may be involved in a transaction and may therefore need + * to be undone in the case of an abort. + * + * We check both the file and offset for 0 since the lsn may be in + * transition. If it is then we don't care about this txn becuase it + * must be starting after we set the initial value of lsnp in the caller. + * All txns must initalize their begin_lsn before writing to the log. + * + * PUBLIC: void __txn_getactive __P((DB_ENV *, DB_LSN *)); + */ +void +__txn_getactive(dbenv, lsnp) + DB_ENV *dbenv; + DB_LSN *lsnp; +{ + DB_TXNMGR *mgr; + DB_TXNREGION *region; + TXN_DETAIL *txnp; + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + + R_LOCK(dbenv, &mgr->reginfo); + for (txnp = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); + txnp != NULL; + txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) + if (txnp->begin_lsn.file != 0 && + txnp->begin_lsn.offset != 0 && + log_compare(&txnp->begin_lsn, lsnp) < 0) + *lsnp = txnp->begin_lsn; + R_UNLOCK(dbenv, &mgr->reginfo); +} + /* * __txn_getckp -- * Get the LSN of the last transaction checkpoint. @@ -1470,7 +1495,7 @@ __txn_activekids(dbenv, rectype, txnp) { /* * On a child commit, we know that there are children (i.e., the - * commiting child at the least. In that case, skip this check. + * committing child at the least. In that case, skip this check. */ if (F_ISSET(txnp, TXN_COMPENSATE) || rectype == DB___txn_child) return (0); @@ -1496,9 +1521,9 @@ __txn_force_abort(dbenv, buffer) { DB_CIPHER *db_cipher; HDR *hdr; - u_int32_t hdrlen, offset, opcode, rec_len, sum_len; + u_int32_t hdrlen, offset, opcode, sum_len; u_int8_t *bp, *key, chksum[DB_MAC_KEY]; - size_t hdrsize; + size_t hdrsize, rec_len; int ret; db_cipher = dbenv->crypto_handle; @@ -1536,7 +1561,7 @@ __txn_force_abort(dbenv, buffer) return (__db_panic(dbenv, ret)); __db_chksum(buffer + hdrsize, rec_len, key, chksum); - memcpy(buffer + SSZ(HDR, chksum), &chksum, sum_len); + memcpy(buffer + SSZA(HDR, chksum), chksum, sum_len); return (0); } @@ -1563,8 +1588,8 @@ __txn_preclose(dbenv) R_LOCK(dbenv, &mgr->reginfo); if (region != NULL && - region->stat.st_nrestores - <= mgr->n_discards && mgr->n_discards != 0) + region->stat.st_nrestores <= mgr->n_discards && + mgr->n_discards != 0) do_closefiles = 1; R_UNLOCK(dbenv, &mgr->reginfo); @@ -1624,10 +1649,10 @@ __txn_updateckp(dbenv, lsnp) region = mgr->reginfo.primary; /* - * We want to make sure last_ckp only moves forward; since - * we drop locks above and in log_put, it's possible - * for two calls to __txn_ckp_log to finish in a different - * order from how they were called. + * We want to make sure last_ckp only moves forward; since we drop + * locks above and in log_put, it's possible for two calls to + * __txn_ckp_log to finish in a different order from how they were + * called. */ R_LOCK(dbenv, &mgr->reginfo); if (log_compare(®ion->last_ckp, lsnp) < 0) { @@ -1636,3 +1661,26 @@ __txn_updateckp(dbenv, lsnp) } R_UNLOCK(dbenv, &mgr->reginfo); } + +/* + * txn_set_begin_lsnp -- + * Set the pointer to the begin_lsn field if that field is zero. + */ +static void +__txn_set_begin_lsnp(txn, rlsnp) + DB_TXN *txn; + DB_LSN **rlsnp; +{ + DB_LSN *lsnp; + TXN_DETAIL *td; + + td = (TXN_DETAIL *)R_ADDR(txn->mgrp->dbenv, + &txn->mgrp->reginfo, txn->off); + while (td->parent != INVALID_ROFF) + td = (TXN_DETAIL *)R_ADDR(txn->mgrp->dbenv, + &txn->mgrp->reginfo, td->parent); + + lsnp = &td->begin_lsn; + if (IS_ZERO_LSN(*lsnp)) + *rlsnp = lsnp; +} diff --git a/db/txn/txn.src b/db/txn/txn.src index 11afb4004..34bd8bd0b 100644 --- a/db/txn/txn.src +++ b/db/txn/txn.src @@ -1,17 +1,15 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. * - * $Id: txn.src,v 11.24 2003/11/14 05:32:33 ubell Exp $ + * $Id: txn.src,v 11.33 2004/07/27 12:35:19 bostic Exp $ */ PREFIX __txn DBPRIVATE -INCLUDE #include "db_config.h" -INCLUDE INCLUDE #ifndef NO_SYSTEM_INCLUDES INCLUDE #include INCLUDE @@ -35,6 +33,8 @@ INCLUDE #include "dbinc/crypto.h" INCLUDE #include "dbinc/db_page.h" INCLUDE #include "dbinc/db_dispatch.h" INCLUDE #include "dbinc/db_am.h" +INCLUDE #include "dbinc/db_shash.h" +INCLUDE #include "dbinc/lock.h" INCLUDE #include "dbinc/log.h" INCLUDE #include "dbinc/txn.h" INCLUDE @@ -46,9 +46,9 @@ INCLUDE * either changes the Epoch or has a 64-bit offset. */ BEGIN regop 10 -ARG opcode u_int32_t lu +ARG opcode u_int32_t ld TIME timestamp int32_t ld -DBT locks DBT s +LOCKS locks DBT s END /* @@ -64,11 +64,16 @@ END * The previous checkpoint. * timestamp: * See comment in commit about timestamps. + * envid: + * Environment ID of this checkpoint. + * rep_gen: + * Persistent replication generation number. */ BEGIN ckp 11 POINTER ckp_lsn DB_LSN * lu POINTER last_ckp DB_LSN * lu TIME timestamp int32_t ld +ARG envid u_int32_t ld ARG rep_gen u_int32_t ld END @@ -94,7 +99,7 @@ ARG formatID int32_t ld ARG gtrid u_int32_t u ARG bqual u_int32_t u POINTER begin_lsn DB_LSN * lu -DBT locks DBT s +LOCKS locks DBT s END /* diff --git a/db/txn/txn_auto.c b/db/txn/txn_auto.c index a8bd386b4..c5afa0cdb 100644 --- a/db/txn/txn_auto.c +++ b/db/txn/txn_auto.c @@ -1,4 +1,5 @@ /* Do not edit: automatically built by gen_rec.awk. */ + #include "db_config.h" #ifndef NO_SYSTEM_INCLUDES @@ -24,6 +25,8 @@ #include "dbinc/db_page.h" #include "dbinc/db_dispatch.h" #include "dbinc/db_am.h" +#include "dbinc/db_shash.h" +#include "dbinc/lock.h" #include "dbinc/log.h" #include "dbinc/txn.h" @@ -44,31 +47,42 @@ __txn_regop_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___txn_regop; npad = 0; + rlsnp = ret_lsnp; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -83,27 +97,23 @@ __txn_regop_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -140,137 +150,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; #endif - } else{ -#ifdef DIAGNOSTIC -do_put: -#endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__txn_regop_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __txn_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_regop_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_regop_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __txn_regop_args *argp; - struct tm *lt; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__txn_regop%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - lt = localtime((time_t *)&argp->timestamp); - (void)printf( - "\ttimestamp: %ld (%.24s, 20%02lu%02lu%02lu%02lu%02lu.%02lu)\n", - (long)argp->timestamp, ctime((time_t *)&argp->timestamp), - (u_long)lt->tm_year - 100, (u_long)lt->tm_mon+1, - (u_long)lt->tm_mday, (u_long)lt->tm_hour, - (u_long)lt->tm_min, (u_long)lt->tm_sec); - (void)printf("\tlocks: "); - for (i = 0; i < argp->locks.size; i++) { - ch = ((u_int8_t *)argp->locks.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __txn_regop_read __P((DB_ENV *, void *, __txn_regop_args **)); */ @@ -288,9 +208,9 @@ __txn_regop_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__txn_regop_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -320,11 +240,11 @@ __txn_regop_read(dbenv, recbuf, argpp) /* * PUBLIC: int __txn_ckp_log __P((DB_ENV *, DB_TXN *, DB_LSN *, - * PUBLIC: u_int32_t, DB_LSN *, DB_LSN *, int32_t, u_int32_t)); + * PUBLIC: u_int32_t, DB_LSN *, DB_LSN *, int32_t, u_int32_t, u_int32_t)); */ int __txn_ckp_log(dbenv, txnid, ret_lsnp, flags, - ckp_lsn, last_ckp, timestamp, rep_gen) + ckp_lsn, last_ckp, timestamp, envid, rep_gen) DB_ENV *dbenv; DB_TXN *txnid; DB_LSN *ret_lsnp; @@ -332,35 +252,47 @@ __txn_ckp_log(dbenv, txnid, ret_lsnp, flags, DB_LSN * ckp_lsn; DB_LSN * last_ckp; int32_t timestamp; + u_int32_t envid; u_int32_t rep_gen; { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___txn_ckp; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -369,6 +301,7 @@ __txn_ckp_log(dbenv, txnid, ret_lsnp, flags, + sizeof(*ckp_lsn) + sizeof(*last_ckp) + sizeof(u_int32_t) + + sizeof(u_int32_t) + sizeof(u_int32_t); if (CRYPTO_ON(dbenv)) { npad = @@ -376,27 +309,23 @@ __txn_ckp_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -428,139 +357,57 @@ do_malloc: memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); + uinttmp = (u_int32_t)envid; + memcpy(bp, &uinttmp, sizeof(uinttmp)); + bp += sizeof(uinttmp); + uinttmp = (u_int32_t)rep_gen; memcpy(bp, &uinttmp, sizeof(uinttmp)); bp += sizeof(uinttmp); DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__txn_ckp_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __txn_ckp_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_ckp_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_ckp_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __txn_ckp_args *argp; - struct tm *lt; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__txn_ckp%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tckp_lsn: [%lu][%lu]\n", - (u_long)argp->ckp_lsn.file, (u_long)argp->ckp_lsn.offset); - (void)printf("\tlast_ckp: [%lu][%lu]\n", - (u_long)argp->last_ckp.file, (u_long)argp->last_ckp.offset); - lt = localtime((time_t *)&argp->timestamp); - (void)printf( - "\ttimestamp: %ld (%.24s, 20%02lu%02lu%02lu%02lu%02lu.%02lu)\n", - (long)argp->timestamp, ctime((time_t *)&argp->timestamp), - (u_long)lt->tm_year - 100, (u_long)lt->tm_mon+1, - (u_long)lt->tm_mday, (u_long)lt->tm_hour, - (u_long)lt->tm_min, (u_long)lt->tm_sec); - (void)printf("\trep_gen: %ld\n", (long)argp->rep_gen); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __txn_ckp_read __P((DB_ENV *, void *, __txn_ckp_args **)); */ @@ -578,9 +425,9 @@ __txn_ckp_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__txn_ckp_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -600,6 +447,10 @@ __txn_ckp_read(dbenv, recbuf, argpp) argp->timestamp = (int32_t)uinttmp; bp += sizeof(uinttmp); + memcpy(&uinttmp, bp, sizeof(uinttmp)); + argp->envid = (u_int32_t)uinttmp; + bp += sizeof(uinttmp); + memcpy(&uinttmp, bp, sizeof(uinttmp)); argp->rep_gen = (u_int32_t)uinttmp; bp += sizeof(uinttmp); @@ -624,31 +475,42 @@ __txn_child_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___txn_child; npad = 0; + rlsnp = ret_lsnp; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + ret = 0; + + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -662,27 +524,23 @@ __txn_child_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -710,123 +568,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__txn_child_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __txn_child_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_child_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_child_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __txn_child_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __txn_child_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__txn_child%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tchild: 0x%lx\n", (u_long)argp->child); - (void)printf("\tc_lsn: [%lu][%lu]\n", - (u_long)argp->c_lsn.file, (u_long)argp->c_lsn.offset); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __txn_child_read __P((DB_ENV *, void *, __txn_child_args **)); */ @@ -844,9 +626,9 @@ __txn_child_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__txn_child_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -890,31 +672,42 @@ __txn_xa_regop_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t zero, uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___txn_xa_regop; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -933,27 +726,23 @@ __txn_xa_regop_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1015,140 +804,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__txn_xa_regop_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __txn_xa_regop_getpgnos __P((DB_ENV *, DBT *, - * PUBLIC: DB_LSN *, db_recops, void *)); - */ -int -__txn_xa_regop_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_xa_regop_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __txn_xa_regop_args *argp; - u_int32_t i; - int ch; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __txn_xa_regop_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__txn_xa_regop%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\topcode: %lu\n", (u_long)argp->opcode); - (void)printf("\txid: "); - for (i = 0; i < argp->xid.size; i++) { - ch = ((u_int8_t *)argp->xid.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\tformatID: %ld\n", (long)argp->formatID); - (void)printf("\tgtrid: %u\n", argp->gtrid); - (void)printf("\tbqual: %u\n", argp->bqual); - (void)printf("\tbegin_lsn: [%lu][%lu]\n", - (u_long)argp->begin_lsn.file, (u_long)argp->begin_lsn.offset); - (void)printf("\tlocks: "); - for (i = 0; i < argp->locks.size; i++) { - ch = ((u_int8_t *)argp->locks.data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - (void)printf("\n"); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __txn_xa_regop_read __P((DB_ENV *, void *, * PUBLIC: __txn_xa_regop_args **)); @@ -1167,9 +863,9 @@ __txn_xa_regop_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__txn_xa_regop_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1230,31 +926,42 @@ __txn_recycle_log(dbenv, txnid, ret_lsnp, flags, { DBT logrec; DB_TXNLOGREC *lr; - DB_LSN *lsnp, null_lsn; + DB_LSN *lsnp, null_lsn, *rlsnp; u_int32_t uinttmp, rectype, txn_num; u_int npad; u_int8_t *bp; int is_durable, ret; + COMPQUIET(lr, NULL); + rectype = DB___txn_recycle; npad = 0; + rlsnp = ret_lsnp; + + ret = 0; - is_durable = 1; - if (LF_ISSET(DB_LOG_NOT_DURABLE) || - F_ISSET(dbenv, DB_ENV_TXN_NOT_DURABLE)) { + if (LF_ISSET(DB_LOG_NOT_DURABLE)) { if (txnid == NULL) return (0); is_durable = 0; - } + } else + is_durable = 1; + if (txnid == NULL) { txn_num = 0; - null_lsn.file = 0; - null_lsn.offset = 0; lsnp = &null_lsn; + null_lsn.file = null_lsn.offset = 0; } else { if (TAILQ_FIRST(&txnid->kids) != NULL && (ret = __txn_activekids(dbenv, rectype, txnid)) != 0) return (ret); + /* + * We need to assign begin_lsn while holding region mutex. + * That assignment is done inside the DbEnv->log_put call, + * so pass in the appropriate memory location to be filled + * in by the log_put code. + */ + DB_SET_BEGIN_LSNP(txnid, &rlsnp); txn_num = txnid->txnid; lsnp = &txnid->last_lsn; } @@ -1268,27 +975,23 @@ __txn_recycle_log(dbenv, txnid, ret_lsnp, flags, logrec.size += npad; } - if (!is_durable && txnid != NULL) { + if (is_durable || txnid == NULL) { + if ((ret = + __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) + return (ret); + } else { if ((ret = __os_malloc(dbenv, logrec.size + sizeof(DB_TXNLOGREC), &lr)) != 0) return (ret); #ifdef DIAGNOSTIC - goto do_malloc; -#else - logrec.data = &lr->data; -#endif - } else { -#ifdef DIAGNOSTIC -do_malloc: -#endif if ((ret = __os_malloc(dbenv, logrec.size, &logrec.data)) != 0) { -#ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) - (void)__os_free(dbenv, lr); -#endif + __os_free(dbenv, lr); return (ret); } +#else + logrec.data = lr->data; +#endif } if (npad > 0) memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad); @@ -1314,122 +1017,47 @@ do_malloc: DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size); + if (is_durable || txnid == NULL) { + if ((ret = __log_put(dbenv, rlsnp,(DBT *)&logrec, + flags | DB_LOG_NOCOPY)) == 0 && txnid != NULL) { + txnid->last_lsn = *rlsnp; + if (rlsnp != ret_lsnp) + *ret_lsnp = *rlsnp; + } + } else { #ifdef DIAGNOSTIC - if (!is_durable && txnid != NULL) { - /* - * We set the debug bit if we are going - * to log non-durable transactions so - * they will be ignored by recovery. + /* + * Set the debug bit if we are going to log non-durable + * transactions so they will be ignored by recovery. */ memcpy(lr->data, logrec.data, logrec.size); rectype |= DB_debug_FLAG; memcpy(logrec.data, &rectype, sizeof(rectype)); - } -#endif - if (!is_durable && txnid != NULL) { + ret = __log_put(dbenv, + rlsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); +#else ret = 0; - STAILQ_INSERT_HEAD(&txnid->logs, lr, links); -#ifdef DIAGNOSTIC - goto do_put; -#endif - } else{ -#ifdef DIAGNOSTIC -do_put: #endif - ret = __log_put(dbenv, - ret_lsnp, (DBT *)&logrec, flags | DB_LOG_NOCOPY); - if (ret == 0 && txnid != NULL) - txnid->last_lsn = *ret_lsnp; + STAILQ_INSERT_HEAD(&txnid->logs, lr, links); + LSN_NOT_LOGGED(*ret_lsnp); } - if (!is_durable) - LSN_NOT_LOGGED(*ret_lsnp); #ifdef LOG_DIAGNOSTIC if (ret != 0) (void)__txn_recycle_print(dbenv, (DBT *)&logrec, ret_lsnp, NULL, NULL); #endif -#ifndef DIAGNOSTIC + +#ifdef DIAGNOSTIC + __os_free(dbenv, logrec.data); +#else if (is_durable || txnid == NULL) -#endif __os_free(dbenv, logrec.data); - +#endif return (ret); } -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __txn_recycle_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_recycle_getpgnos(dbenv, rec, lsnp, notused1, summary) - DB_ENV *dbenv; - DBT *rec; - DB_LSN *lsnp; - db_recops notused1; - void *summary; -{ - TXN_RECS *t; - int ret; - COMPQUIET(rec, NULL); - COMPQUIET(notused1, DB_TXN_ABORT); - - t = (TXN_RECS *)summary; - - if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0) - return (ret); - - t->array[t->npages].flags = LSN_PAGE_NOLOCK; - t->array[t->npages].lsn = *lsnp; - t->array[t->npages].fid = DB_LOGFILEID_INVALID; - memset(&t->array[t->npages].pgdesc, 0, - sizeof(t->array[t->npages].pgdesc)); - - t->npages++; - - return (0); -} -#endif /* HAVE_REPLICATION */ - -/* - * PUBLIC: int __txn_recycle_print __P((DB_ENV *, DBT *, DB_LSN *, - * PUBLIC: db_recops, void *)); - */ -int -__txn_recycle_print(dbenv, dbtp, lsnp, notused2, notused3) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops notused2; - void *notused3; -{ - __txn_recycle_args *argp; - int ret; - - notused2 = DB_TXN_ABORT; - notused3 = NULL; - - if ((ret = __txn_recycle_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - (void)printf( - "[%lu][%lu]__txn_recycle%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", - (u_long)lsnp->file, - (u_long)lsnp->offset, - (argp->type & DB_debug_FLAG) ? "_debug" : "", - (u_long)argp->type, - (u_long)argp->txnid->txnid, - (u_long)argp->prev_lsn.file, - (u_long)argp->prev_lsn.offset); - (void)printf("\tmin: %u\n", argp->min); - (void)printf("\tmax: %u\n", argp->max); - (void)printf("\n"); - __os_free(dbenv, argp); - - return (0); -} - /* * PUBLIC: int __txn_recycle_read __P((DB_ENV *, void *, * PUBLIC: __txn_recycle_args **)); @@ -1448,9 +1076,9 @@ __txn_recycle_read(dbenv, recbuf, argpp) if ((ret = __os_malloc(dbenv, sizeof(__txn_recycle_args) + sizeof(DB_TXN), &argp)) != 0) return (ret); + bp = recbuf; argp->txnid = (DB_TXN *)&argp[1]; - bp = recbuf; memcpy(&argp->type, bp, sizeof(argp->type)); bp += sizeof(argp->type); @@ -1472,68 +1100,6 @@ __txn_recycle_read(dbenv, recbuf, argpp) return (0); } -/* - * PUBLIC: int __txn_init_print __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__txn_init_print(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_regop_print, DB___txn_regop)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_ckp_print, DB___txn_ckp)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_child_print, DB___txn_child)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_xa_regop_print, DB___txn_xa_regop)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_recycle_print, DB___txn_recycle)) != 0) - return (ret); - return (0); -} - -#ifdef HAVE_REPLICATION -/* - * PUBLIC: int __txn_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); - */ -int -__txn_init_getpgnos(dbenv, dtabp, dtabsizep) - DB_ENV *dbenv; - int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsizep; -{ - int ret; - - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_regop_getpgnos, DB___txn_regop)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_ckp_getpgnos, DB___txn_ckp)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_child_getpgnos, DB___txn_child)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_xa_regop_getpgnos, DB___txn_xa_regop)) != 0) - return (ret); - if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, - __txn_recycle_getpgnos, DB___txn_recycle)) != 0) - return (ret); - return (0); -} -#endif /* HAVE_REPLICATION */ - /* * PUBLIC: int __txn_init_recover __P((DB_ENV *, int (***)(DB_ENV *, * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); diff --git a/db/txn/txn_autop.c b/db/txn/txn_autop.c new file mode 100644 index 000000000..dbcf61b9d --- /dev/null +++ b/db/txn/txn_autop.c @@ -0,0 +1,281 @@ +/* Do not edit: automatically built by gen_rec.awk. */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include + +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif /* HAVE_SYS_TIME_H */ +#endif /* TIME_WITH SYS_TIME */ + +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/crypto.h" +#include "dbinc/db_page.h" +#include "dbinc/db_dispatch.h" +#include "dbinc/db_am.h" +#include "dbinc/db_shash.h" +#include "dbinc/lock.h" +#include "dbinc/log.h" +#include "dbinc/txn.h" + +/* + * PUBLIC: int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__txn_regop_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __txn_regop_args *argp; + struct tm *lt; + time_t timeval; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__txn_regop%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %ld\n", (long)argp->opcode); + timeval = (time_t)argp->timestamp; + lt = localtime(&timeval); + (void)printf( + "\ttimestamp: %ld (%.24s, 20%02lu%02lu%02lu%02lu%02lu.%02lu)\n", + (long)argp->timestamp, ctime(&timeval), + (u_long)lt->tm_year - 100, (u_long)lt->tm_mon+1, + (u_long)lt->tm_mday, (u_long)lt->tm_hour, + (u_long)lt->tm_min, (u_long)lt->tm_sec); + (void)printf("\tlocks: \n"); + __lock_list_print(dbenv, &argp->locks); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__txn_ckp_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __txn_ckp_args *argp; + struct tm *lt; + time_t timeval; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__txn_ckp%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tckp_lsn: [%lu][%lu]\n", + (u_long)argp->ckp_lsn.file, (u_long)argp->ckp_lsn.offset); + (void)printf("\tlast_ckp: [%lu][%lu]\n", + (u_long)argp->last_ckp.file, (u_long)argp->last_ckp.offset); + timeval = (time_t)argp->timestamp; + lt = localtime(&timeval); + (void)printf( + "\ttimestamp: %ld (%.24s, 20%02lu%02lu%02lu%02lu%02lu.%02lu)\n", + (long)argp->timestamp, ctime(&timeval), + (u_long)lt->tm_year - 100, (u_long)lt->tm_mon+1, + (u_long)lt->tm_mday, (u_long)lt->tm_hour, + (u_long)lt->tm_min, (u_long)lt->tm_sec); + (void)printf("\tenvid: %ld\n", (long)argp->envid); + (void)printf("\trep_gen: %ld\n", (long)argp->rep_gen); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__txn_child_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __txn_child_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __txn_child_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__txn_child%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tchild: 0x%lx\n", (u_long)argp->child); + (void)printf("\tc_lsn: [%lu][%lu]\n", + (u_long)argp->c_lsn.file, (u_long)argp->c_lsn.offset); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__txn_xa_regop_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __txn_xa_regop_args *argp; + u_int32_t i; + int ch; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __txn_xa_regop_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__txn_xa_regop%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\topcode: %lu\n", (u_long)argp->opcode); + (void)printf("\txid: "); + for (i = 0; i < argp->xid.size; i++) { + ch = ((u_int8_t *)argp->xid.data)[i]; + printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); + } + (void)printf("\n"); + (void)printf("\tformatID: %ld\n", (long)argp->formatID); + (void)printf("\tgtrid: %u\n", argp->gtrid); + (void)printf("\tbqual: %u\n", argp->bqual); + (void)printf("\tbegin_lsn: [%lu][%lu]\n", + (u_long)argp->begin_lsn.file, (u_long)argp->begin_lsn.offset); + (void)printf("\tlocks: \n"); + __lock_list_print(dbenv, &argp->locks); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __txn_recycle_print __P((DB_ENV *, DBT *, DB_LSN *, + * PUBLIC: db_recops, void *)); + */ +int +__txn_recycle_print(dbenv, dbtp, lsnp, notused2, notused3) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops notused2; + void *notused3; +{ + __txn_recycle_args *argp; + int ret; + + notused2 = DB_TXN_ABORT; + notused3 = NULL; + + if ((ret = __txn_recycle_read(dbenv, dbtp->data, &argp)) != 0) + return (ret); + (void)printf( + "[%lu][%lu]__txn_recycle%s: rec: %lu txnid %lx prevlsn [%lu][%lu]\n", + (u_long)lsnp->file, + (u_long)lsnp->offset, + (argp->type & DB_debug_FLAG) ? "_debug" : "", + (u_long)argp->type, + (u_long)argp->txnid->txnid, + (u_long)argp->prev_lsn.file, + (u_long)argp->prev_lsn.offset); + (void)printf("\tmin: %u\n", argp->min); + (void)printf("\tmax: %u\n", argp->max); + (void)printf("\n"); + __os_free(dbenv, argp); + return (0); +} + +/* + * PUBLIC: int __txn_init_print __P((DB_ENV *, int (***)(DB_ENV *, + * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *)); + */ +int +__txn_init_print(dbenv, dtabp, dtabsizep) + DB_ENV *dbenv; + int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + size_t *dtabsizep; +{ + int ret; + + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __txn_regop_print, DB___txn_regop)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __txn_ckp_print, DB___txn_ckp)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __txn_child_print, DB___txn_child)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __txn_xa_regop_print, DB___txn_xa_regop)) != 0) + return (ret); + if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep, + __txn_recycle_print, DB___txn_recycle)) != 0) + return (ret); + return (0); +} diff --git a/db/txn/txn_method.c b/db/txn/txn_method.c index f9359f455..c13f86dee 100644 --- a/db/txn/txn_method.c +++ b/db/txn/txn_method.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: txn_method.c,v 11.72 2004/03/23 17:24:18 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: txn_method.c,v 11.66 2003/06/30 17:20:30 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -21,11 +19,14 @@ static const char revid[] = "$Id: txn_method.c,v 11.66 2003/06/30 17:20:30 bosti #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + #include "db_int.h" #include "dbinc/txn.h" #ifdef HAVE_RPC -#include "dbinc_auto/db_server.h" #include "dbinc_auto/rpc_client_ext.h" #endif @@ -62,6 +63,7 @@ __txn_dbenv_create(dbenv) dbenv->txn_checkpoint = __dbcl_txn_checkpoint; dbenv->txn_recover = __dbcl_txn_recover; dbenv->txn_stat = __dbcl_txn_stat; + dbenv->txn_stat_print = NULL; dbenv->txn_begin = __dbcl_txn_begin; } else #endif @@ -74,6 +76,7 @@ __txn_dbenv_create(dbenv) dbenv->txn_checkpoint = __txn_checkpoint_pp; dbenv->txn_recover = __txn_recover_pp; dbenv->txn_stat = __txn_stat_pp; + dbenv->txn_stat_print = __txn_stat_print_pp; dbenv->txn_begin = __txn_begin_pp; } } @@ -83,7 +86,15 @@ __txn_get_tx_max(dbenv, tx_maxp) DB_ENV *dbenv; u_int32_t *tx_maxp; { - *tx_maxp = dbenv->tx_max; + ENV_NOT_CONFIGURED(dbenv, + dbenv->tx_handle, "DB_ENV->get_tx_max", DB_INIT_TXN); + + if (TXN_ON(dbenv)) { + /* Cannot be set after open, no lock required to read. */ + *tx_maxp = ((DB_TXNREGION *) + ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary)->maxtxns; + } else + *tx_maxp = dbenv->tx_max; return (0); } diff --git a/db/txn/txn_rec.c b/db/txn/txn_rec.c index be4b016b5..ea885528f 100644 --- a/db/txn/txn_rec.c +++ b/db/txn/txn_rec.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. */ /* @@ -31,14 +31,12 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + * $Id: txn_rec.c,v 11.64 2004/09/22 17:41:10 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: txn_rec.c,v 11.54 2003/10/31 23:26:11 ubell Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -50,15 +48,13 @@ static const char revid[] = "$Id: txn_rec.c,v 11.54 2003/10/31 23:26:11 ubell Ex #include "dbinc/txn.h" #include "dbinc/db_am.h" -#define IS_XA_TXN(R) (R->xid.size != 0) - /* * PUBLIC: int __txn_regop_recover * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); * * These records are only ever written for commits. Normally, we redo any * committed transaction, however if we are doing recovery to a timestamp, then - * we may treat transactions that commited after the timestamp as aborted. + * we may treat transactions that committed after the timestamp as aborted. */ int __txn_regop_recover(dbenv, dbtp, lsnp, op, info) @@ -71,6 +67,7 @@ __txn_regop_recover(dbenv, dbtp, lsnp, op, info) DB_TXNHEAD *headp; __txn_regop_args *argp; int ret; + u_int32_t status; #ifdef DEBUG_RECOVER (void)__txn_regop_print(dbenv, dbtp, lsnp, op, info); @@ -86,14 +83,16 @@ __txn_regop_recover(dbenv, dbtp, lsnp, op, info) * apply to the BACKWARD_ROLL case. */ - if (op == DB_TXN_FORWARD_ROLL) + if (op == DB_TXN_FORWARD_ROLL) { /* * If this was a 2-phase-commit transaction, then it * might already have been removed from the list, and * that's OK. Ignore the return code from remove. */ - (void)__db_txnlist_remove(dbenv, info, argp->txnid->txnid); - else if ((dbenv->tx_timestamp != 0 && + if ((ret = __db_txnlist_remove(dbenv, + info, argp->txnid->txnid)) != DB_NOTFOUND && ret != 0) + goto err; + } else if ((dbenv->tx_timestamp != 0 && argp->timestamp > (int32_t)dbenv->tx_timestamp) || (!IS_ZERO_LSN(headp->trunc_lsn) && log_compare(&headp->trunc_lsn, lsnp) < 0)) { @@ -101,32 +100,24 @@ __txn_regop_recover(dbenv, dbtp, lsnp, op, info) * We failed either the timestamp check or the trunc_lsn check, * so we treat this as an abort even if it was a commit record. */ - ret = __db_txnlist_update(dbenv, - info, argp->txnid->txnid, TXN_ABORT, NULL); - - if (ret == TXN_IGNORE) - ret = TXN_OK; - else if (ret == TXN_NOTFOUND) - ret = __db_txnlist_add(dbenv, - info, argp->txnid->txnid, TXN_IGNORE, NULL); - else if (ret != TXN_OK) + if ((ret = __db_txnlist_update(dbenv, info, + argp->txnid->txnid, TXN_ABORT, NULL, &status, 1)) != 0) + goto err; + else if (status != TXN_IGNORE && status != TXN_OK) goto err; - /* else ret = 0; Not necessary because TXN_OK == 0 */ } else { /* This is a normal commit; mark it appropriately. */ - ret = __db_txnlist_update(dbenv, - info, argp->txnid->txnid, argp->opcode, lsnp); - - if (ret == TXN_IGNORE) - ret = TXN_OK; - else if (ret == TXN_NOTFOUND) - ret = __db_txnlist_add(dbenv, + if ((ret = __db_txnlist_update(dbenv, + info, argp->txnid->txnid, argp->opcode, lsnp, + &status, 0)) == DB_NOTFOUND) { + if ((ret = __db_txnlist_add(dbenv, info, argp->txnid->txnid, argp->opcode == TXN_ABORT ? - TXN_IGNORE : argp->opcode, lsnp); - else if (ret != TXN_OK) + TXN_IGNORE : argp->opcode, lsnp)) != 0) + goto err; + } else if (ret != 0 || + (status != TXN_IGNORE && status != TXN_OK)) goto err; - /* else ret = 0; Not necessary because TXN_OK == 0 */ } if (ret == 0) @@ -159,6 +150,7 @@ __txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info) { __txn_xa_regop_args *argp; int ret; + u_int32_t status; #ifdef DEBUG_RECOVER (void)__txn_xa_regop_print(dbenv, dbtp, lsnp, op, info); @@ -172,7 +164,14 @@ __txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info) goto err; } - ret = __db_txnlist_find(dbenv, info, argp->txnid->txnid); + /* + * The return value here is either a DB_NOTFOUND or it is + * the transaction status from the list. It is not a normal + * error return, so we must make sure that in each of the + * cases below, we overwrite the ret value so we return + * appropriately. + */ + ret = __db_txnlist_find(dbenv, info, argp->txnid->txnid, &status); /* * If we are rolling forward, then an aborted prepare @@ -183,9 +182,9 @@ __txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info) if (op == DB_TXN_FORWARD_ROLL) { if ((ret = __db_txnlist_remove(dbenv, - info, argp->txnid->txnid)) != TXN_OK) + info, argp->txnid->txnid)) != 0) goto txn_err; - } else if (op == DB_TXN_BACKWARD_ROLL && ret == TXN_PREPARE) { + } else if (op == DB_TXN_BACKWARD_ROLL && status == TXN_PREPARE) { /* * On the backward pass, we have four possibilities: * 1. The transaction is already committed, no-op. @@ -200,7 +199,8 @@ __txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info) if (argp->opcode == TXN_ABORT) { if ((ret = __db_txnlist_update(dbenv, info, argp->txnid->txnid, - TXN_ABORT, NULL)) != TXN_PREPARE) + TXN_ABORT, NULL, &status, 0)) != 0 && + status != TXN_PREPARE) goto txn_err; ret = 0; } @@ -212,7 +212,7 @@ __txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info) * after recovery (see txn_recover). */ else if ((ret = __db_txnlist_remove(dbenv, - info, argp->txnid->txnid)) != TXN_OK) { + info, argp->txnid->txnid)) != 0) { txn_err: __db_err(dbenv, "Transaction not in list %x", argp->txnid->txnid); ret = DB_NOTFOUND; @@ -287,7 +287,8 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info) void *info; { __txn_child_args *argp; - int c_stat, p_stat, ret; + int ret, t_ret; + u_int32_t c_stat, p_stat, tmpstat; #ifdef DEBUG_RECOVER (void)__txn_child_print(dbenv, dbtp, lsnp, op, info); @@ -297,9 +298,9 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info) /* * This is a record in a PARENT's log trail indicating that a - * child commited. If we are aborting, we need to update the + * child committed. If we are aborting, we need to update the * parent's LSN array. If we are in recovery, then if the - * parent is commiting, we set ourselves up to commit, else + * parent is committing, we set ourselves up to commit, else * we do nothing. */ if (op == DB_TXN_ABORT) { @@ -310,10 +311,35 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info) info, &argp->c_lsn, TXNLIST_NEW); } else if (op == DB_TXN_BACKWARD_ROLL) { /* Child might exist -- look for it. */ - c_stat = __db_txnlist_find(dbenv, info, argp->child); - p_stat = __db_txnlist_find(dbenv, info, argp->txnid->txnid); + ret = __db_txnlist_find(dbenv, info, argp->child, &c_stat); + t_ret = + __db_txnlist_find(dbenv, info, argp->txnid->txnid, &p_stat); + if (ret != 0 && ret != DB_NOTFOUND) + goto out; + if (t_ret != 0 && t_ret != DB_NOTFOUND) { + ret = t_ret; + goto out; + } + /* + * If the parent is in state COMMIT or IGNORE, then we apply + * that to the child, else we need to abort the child. + */ - if (c_stat == TXN_EXPECTED) { + if (ret == DB_NOTFOUND || + c_stat == TXN_OK || c_stat == TXN_COMMIT) { + if (t_ret == DB_NOTFOUND || + (p_stat != TXN_COMMIT && p_stat != TXN_IGNORE)) + c_stat = TXN_ABORT; + else + c_stat = p_stat; + + if (ret == DB_NOTFOUND) + ret = __db_txnlist_add(dbenv, + info, argp->child, c_stat, NULL); + else + ret = __db_txnlist_update(dbenv, info, + argp->child, c_stat, NULL, &tmpstat, 0); + } else if (c_stat == TXN_EXPECTED) { /* * The open after this create succeeded. If the * parent succeeded, we don't want to redo; if the @@ -328,9 +354,7 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info) c_stat = TXN_ABORT; } ret = __db_txnlist_update(dbenv, - info, argp->child, c_stat, NULL); - if (ret > 0) - ret = 0; + info, argp->child, c_stat, NULL, &tmpstat, 0); } else if (c_stat == TXN_UNEXPECTED) { /* * The open after this create failed. If the parent @@ -341,54 +365,30 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info) */ ret = __db_txnlist_update(dbenv, info, argp->child, p_stat == TXN_COMMIT ? TXN_COMMIT : TXN_IGNORE, - NULL); - if (ret > 0) - ret = 0; - } else if (c_stat != TXN_IGNORE) { - switch (p_stat) { - case TXN_COMMIT: - c_stat = TXN_COMMIT; - break; - case TXN_IGNORE: - c_stat = TXN_IGNORE; - break; - default: - c_stat = TXN_ABORT; - } - - ret = __db_txnlist_add(dbenv, - info, argp->child, c_stat, NULL); + NULL, &tmpstat, 0); } } else if (op == DB_TXN_OPENFILES) { /* * If we have a partial subtransaction, then the whole * transaction should be ignored. */ - c_stat = __db_txnlist_find(dbenv, info, argp->child); - if (c_stat == TXN_NOTFOUND) { - p_stat = - __db_txnlist_find(dbenv, info, argp->txnid->txnid); - if (p_stat == TXN_NOTFOUND) - ret = __db_txnlist_add(dbenv, info, - argp->txnid->txnid, TXN_IGNORE, NULL); - else - ret = __db_txnlist_update(dbenv, info, - argp->txnid->txnid, TXN_IGNORE, NULL); - } + if ((ret = __db_txnlist_find(dbenv, + info, argp->child, &c_stat)) == DB_NOTFOUND) + ret = __db_txnlist_update(dbenv, info, + argp->txnid->txnid, TXN_IGNORE, + NULL, &p_stat, 1); } else if (DB_REDO(op)) { /* Forward Roll */ if ((ret = - __db_txnlist_remove(dbenv, info, argp->child)) != TXN_OK) { + __db_txnlist_remove(dbenv, info, argp->child)) != 0) __db_err(dbenv, "Transaction not in list %x", argp->child); - ret = DB_NOTFOUND; - } } if (ret == 0) *lsnp = argp->prev_lsn; - __os_free(dbenv, argp); +out: __os_free(dbenv, argp); return (ret); } @@ -401,7 +401,7 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info) * or commit and we need to respond correctly. * * lsnp is the LSN of the returned LSN - * argp is the perpare record (in an appropriate structure) + * argp is the prepare record (in an appropriate structure) * * PUBLIC: int __txn_restore_txn __P((DB_ENV *, * PUBLIC: DB_LSN *, __txn_xa_regop_args *)); @@ -426,7 +426,7 @@ __txn_restore_txn(dbenv, lsnp, argp) /* Allocate a new transaction detail structure. */ if ((ret = - __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) { + __db_shalloc(&mgr->reginfo, sizeof(TXN_DETAIL), 0, &td)) != 0) { R_UNLOCK(dbenv, &mgr->reginfo); return (ret); } diff --git a/db/txn/txn_recover.c b/db/txn/txn_recover.c index 753a84dd3..0d15f57eb 100644 --- a/db/txn/txn_recover.c +++ b/db/txn/txn_recover.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: txn_recover.c,v 1.53 2004/09/22 17:41:10 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: txn_recover.c,v 1.43 2003/09/16 20:50:17 sue Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -65,14 +63,14 @@ __txn_continue(env, txnp, td, off) * Return the txn that corresponds to this global ID. * * PUBLIC: int __txn_map_gid __P((DB_ENV *, - * PUBLIC: u_int8_t *, TXN_DETAIL **, size_t *)); + * PUBLIC: u_int8_t *, TXN_DETAIL **, roff_t *)); */ int __txn_map_gid(dbenv, gid, tdp, offp) DB_ENV *dbenv; u_int8_t *gid; TXN_DETAIL **tdp; - size_t *offp; + roff_t *offp; { DB_TXNMGR *mgr; DB_TXNREGION *tmr; @@ -96,7 +94,7 @@ __txn_map_gid(dbenv, gid, tdp, offp) if (*tdp == NULL) return (EINVAL); - *offp = R_OFFSET(&mgr->reginfo, *tdp); + *offp = R_OFFSET(dbenv, &mgr->reginfo, *tdp); return (0); } @@ -132,7 +130,7 @@ __txn_recover_pp(dbenv, preplist, count, retp, flags) __env_rep_enter(dbenv); ret = __txn_recover(dbenv, preplist, count, retp, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -180,22 +178,17 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) long *retp; u_int32_t flags; { - DBT data; - DB_LOGC *logc; - DB_LSN min, open_lsn; + DB_LSN min; DB_PREPLIST *prepp; DB_TXNMGR *mgr; DB_TXNREGION *tmr; TXN_DETAIL *td; XID *xidp; - __txn_ckp_args *ckp_args; long i; - int nrestores, open_files, ret, t_ret; - void *txninfo; + int nrestores, open_files, ret; *retp = 0; - logc = NULL; MAX_LSN(min); prepp = txns; xidp = xids; @@ -245,8 +238,13 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) if (xids != NULL) { xidp->formatID = td->format; - xidp->gtrid_length = td->gtrid; - xidp->bqual_length = td->bqual; + /* + * XID structure uses longs; we use u_int32_t's as we + * log them to disk. Cast them to make the conversion + * explicit. + */ + xidp->gtrid_length = (long)td->gtrid; + xidp->bqual_length = (long)td->bqual; memcpy(xidp->data, td->xid, sizeof(td->xid)); xidp++; } @@ -258,13 +256,14 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) goto err; } __txn_continue(dbenv, - prepp->txn, td, R_OFFSET(&mgr->reginfo, td)); + prepp->txn, td, R_OFFSET(dbenv, &mgr->reginfo, td)); F_SET(prepp->txn, TXN_MALLOC); memcpy(prepp->gid, td->xid, sizeof(td->xid)); prepp++; } - if (log_compare(&td->begin_lsn, &min) < 0) + if (!IS_ZERO_LSN(td->begin_lsn) && + log_compare(&td->begin_lsn, &min) < 0) min = td->begin_lsn; (*retp)++; @@ -283,59 +282,96 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) } if (open_files && nrestores && *retp != 0 && !IS_MAX_LSN(min)) { - /* - * Figure out the last checkpoint before the smallest - * start_lsn in the region. - */ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); + ret = __txn_openfiles(dbenv, &min, 0); + F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); + } +err: + return (ret); +} + +/* + * __txn_openfiles -- + * Call env_openfiles. + * + * PUBLIC: int __txn_openfiles __P((DB_ENV *, DB_LSN *, int)); + */ +int +__txn_openfiles(dbenv, min, force) + DB_ENV *dbenv; + DB_LSN *min; + int force; +{ + DBT data; + DB_LOGC *logc; + DB_LSN open_lsn; + __txn_ckp_args *ckp_args; + int ret, t_ret; + void *txninfo; - if ((ret = __log_cursor(dbenv, &logc)) != 0) - goto err; - - memset(&data, 0, sizeof(data)); - if ((ret = __txn_getckp(dbenv, &open_lsn)) == 0) - while (!IS_ZERO_LSN(open_lsn) && (ret = - __log_c_get(logc, &open_lsn, &data, DB_SET)) == 0 && - log_compare(&min, &open_lsn) < 0) { - /* Format the log record. */ - if ((ret = __txn_ckp_read(dbenv, - data.data, &ckp_args)) != 0) { - __db_err(dbenv, - "Invalid checkpoint record at [%lu][%lu]", - (u_long)open_lsn.file, - (u_long)open_lsn.offset); + /* + * Figure out the last checkpoint before the smallest + * start_lsn in the region. + */ + logc = NULL; + if ((ret = __log_cursor(dbenv, &logc)) != 0) + goto err; + + memset(&data, 0, sizeof(data)); + if ((ret = __txn_getckp(dbenv, &open_lsn)) == 0) + while (!IS_ZERO_LSN(open_lsn) && (ret = + __log_c_get(logc, &open_lsn, &data, DB_SET)) == 0 && + (force || + (min != NULL && log_compare(min, &open_lsn) < 0))) { + /* Format the log record. */ + if ((ret = __txn_ckp_read(dbenv, + data.data, &ckp_args)) != 0) { + __db_err(dbenv, + "Invalid checkpoint record at [%lu][%lu]", + (u_long)open_lsn.file, + (u_long)open_lsn.offset); + goto err; + } + /* + * If force is set, then we're forcing ourselves + * to go back far enough to open files. + * Use ckp_lsn and then break out of the loop. + */ + open_lsn = force ? ckp_args->ckp_lsn : + ckp_args->last_ckp; + __os_free(dbenv, ckp_args); + if (force) { + if ((ret = __log_c_get(logc, &open_lsn, + &data, DB_SET)) != 0) goto err; - } - open_lsn = ckp_args->last_ckp; - __os_free(dbenv, ckp_args); + break; } - - /* - * There are three ways by which we may have gotten here. - * - We got a DB_NOTFOUND -- we need to read the first - * log record. - * - We found a checkpoint before min. We're done. - * - We found a checkpoint after min who's last_ckp is 0. We - * need to start at the beginning of the log. - */ - if ((ret == DB_NOTFOUND || IS_ZERO_LSN(open_lsn)) && (ret = - __log_c_get(logc, &open_lsn, &data, DB_FIRST)) != 0) { - __db_err(dbenv, "No log records"); - goto err; } - if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0) - goto err; - ret = __env_openfiles(dbenv, logc, - txninfo, &data, &open_lsn, NULL, 0, 0); - if (txninfo != NULL) - __db_txnlist_end(dbenv, txninfo); + /* + * There are several ways by which we may have gotten here. + * - We got a DB_NOTFOUND -- we need to read the first + * log record. + * - We found a checkpoint before min. We're done. + * - We found a checkpoint after min who's last_ckp is 0. We + * need to start at the beginning of the log. + * - We are forcing an openfiles and we have our ckp_lsn. + */ + if ((ret == DB_NOTFOUND || IS_ZERO_LSN(open_lsn)) && (ret = + __log_c_get(logc, &open_lsn, &data, DB_FIRST)) != 0) { + __db_err(dbenv, "No log records"); + goto err; } -err: F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); + if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0) + goto err; + ret = __env_openfiles(dbenv, logc, + txninfo, &data, &open_lsn, NULL, 0, 0); + if (txninfo != NULL) + __db_txnlist_end(dbenv, txninfo); +err: if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0) ret = t_ret; - return (ret); } diff --git a/db/txn/txn_region.c b/db/txn/txn_region.c index 16b26a469..55a5310f9 100644 --- a/db/txn/txn_region.c +++ b/db/txn/txn_region.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: txn_region.c,v 11.86 2004/09/22 17:41:10 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: txn_region.c,v 11.79 2003/07/23 13:13:12 mjc Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -32,7 +30,6 @@ static const char revid[] = "$Id: txn_region.c,v 11.79 2003/07/23 13:13:12 mjc E #include "dbinc/log.h" #include "dbinc/txn.h" -static int __txn_findlastckp __P((DB_ENV *, DB_LSN *)); static int __txn_init __P((DB_ENV *, DB_TXNMGR *)); static size_t __txn_region_size __P((DB_ENV *)); @@ -56,9 +53,9 @@ __txn_open(dbenv) tmgrp->dbenv = dbenv; /* Join/create the txn region. */ + tmgrp->reginfo.dbenv = dbenv; tmgrp->reginfo.type = REGION_TYPE_TXN; tmgrp->reginfo.id = INVALID_REGION_ID; - tmgrp->reginfo.mode = dbenv->db_mode; tmgrp->reginfo.flags = REGION_JOIN_OK; if (F_ISSET(dbenv, DB_ENV_CREATE)) F_SET(&tmgrp->reginfo, REGION_CREATE_OK); @@ -73,7 +70,7 @@ __txn_open(dbenv) /* Set the local addresses. */ tmgrp->reginfo.primary = - R_ADDR(&tmgrp->reginfo, tmgrp->reginfo.rp->primary); + R_ADDR(dbenv, &tmgrp->reginfo, tmgrp->reginfo.rp->primary); /* Acquire a mutex to protect the active TXN list. */ if (F_ISSET(dbenv, DB_ENV_THREAD) && @@ -131,18 +128,18 @@ __txn_init(dbenv, tmgrp) * the last log file until we find the last checkpoint. */ if (IS_ZERO_LSN(last_ckp) && - (ret = __txn_findlastckp(dbenv, &last_ckp)) != 0) + (ret = __txn_findlastckp(dbenv, &last_ckp, NULL)) != 0) return (ret); } - if ((ret = __db_shalloc(tmgrp->reginfo.addr, + if ((ret = __db_shalloc(&tmgrp->reginfo, sizeof(DB_TXNREGION), 0, &tmgrp->reginfo.primary)) != 0) { __db_err(dbenv, "Unable to allocate memory for the transaction region"); return (ret); } tmgrp->reginfo.rp->primary = - R_OFFSET(&tmgrp->reginfo, tmgrp->reginfo.primary); + R_OFFSET(dbenv, &tmgrp->reginfo, tmgrp->reginfo.primary); region = tmgrp->reginfo.primary; memset(region, 0, sizeof(*region)); @@ -158,14 +155,14 @@ __txn_init(dbenv, tmgrp) SH_TAILQ_INIT(®ion->active_txn); #ifdef HAVE_MUTEX_SYSTEM_RESOURCES /* Allocate room for the txn maintenance info and initialize it. */ - if ((ret = __db_shalloc(tmgrp->reginfo.addr, + if ((ret = __db_shalloc(&tmgrp->reginfo, sizeof(REGMAINT) + TXN_MAINT_SIZE, 0, &addr)) != 0) { __db_err(dbenv, "Unable to allocate memory for mutex maintenance"); return (ret); } __db_maintinit(&tmgrp->reginfo, addr, TXN_MAINT_SIZE); - region->maint_off = R_OFFSET(&tmgrp->reginfo, addr); + region->maint_off = R_OFFSET(dbenv, &tmgrp->reginfo, addr); #endif return (0); } @@ -173,13 +170,16 @@ __txn_init(dbenv, tmgrp) /* * __txn_findlastckp -- * Find the last checkpoint in the log, walking backwards from the - * beginning of the last log file. (The log system looked through - * the last log file when it started up.) + * max_lsn given or the beginning of the last log file. (The + * log system looked through the last log file when it started up.) + * + * PUBLIC: int __txn_findlastckp __P((DB_ENV *, DB_LSN *, DB_LSN *)); */ -static int -__txn_findlastckp(dbenv, lsnp) +int +__txn_findlastckp(dbenv, lsnp, max_lsn) DB_ENV *dbenv; DB_LSN *lsnp; + DB_LSN *max_lsn; { DB_LOGC *logc; DB_LSN lsn; @@ -192,15 +192,22 @@ __txn_findlastckp(dbenv, lsnp) /* Get the last LSN. */ memset(&dbt, 0, sizeof(dbt)); - if ((ret = __log_c_get(logc, &lsn, &dbt, DB_LAST)) != 0) - goto err; - - /* - * Twiddle the last LSN so it points to the beginning of the last - * file; we know there's no checkpoint after that, since the log - * system already looked there. - */ - lsn.offset = 0; + if (max_lsn != NULL) { + lsn = *max_lsn; + ZERO_LSN(*lsnp); + if ((ret = __log_c_get(logc, &lsn, &dbt, DB_SET)) != 0) + goto err; + } else { + if ((ret = __log_c_get(logc, &lsn, &dbt, DB_LAST)) != 0) + goto err; + /* + * Twiddle the last LSN so it points to the + * beginning of the last file; we know there's + * no checkpoint after that, since the log + * system already looked there. + */ + lsn.offset = 0; + } /* Read backwards, looking for checkpoints. */ while ((ret = __log_c_get(logc, &lsn, &dbt, DB_PREV)) == 0) { @@ -225,7 +232,6 @@ err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) /* * __txn_dbenv_refresh -- * Clean up after the transaction system on a close or failed open. - * Called only from __dbenv_refresh. (Formerly called __txn_close.) * * PUBLIC: int __txn_dbenv_refresh __P((DB_ENV *)); */ @@ -235,12 +241,14 @@ __txn_dbenv_refresh(dbenv) { DB_TXN *txnp; DB_TXNMGR *tmgrp; + REGINFO *reginfo; TXN_DETAIL *td; u_int32_t txnid; int aborted, ret, t_ret; ret = 0; tmgrp = dbenv->tx_handle; + reginfo = &tmgrp->reginfo; /* * This function can only be called once per process (i.e., not @@ -257,7 +265,7 @@ __txn_dbenv_refresh(dbenv) if (TAILQ_FIRST(&tmgrp->txn_chain) != NULL) { while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL) { /* Prepared transactions are OK. */ - td = (TXN_DETAIL *)R_ADDR(&tmgrp->reginfo, txnp->off); + td = (TXN_DETAIL *)R_ADDR(dbenv, reginfo, txnp->off); txnid = txnp->txnid; if (td->status == TXN_PREPARED) { if ((ret = __txn_discard(txnp, 0)) != 0) { @@ -292,10 +300,10 @@ __txn_dbenv_refresh(dbenv) /* Discard the per-thread lock. */ if (tmgrp->mutexp != NULL) - __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp); + __db_mutex_free(dbenv, reginfo, tmgrp->mutexp); /* Detach from the region. */ - if ((t_ret = __db_r_detach(dbenv, &tmgrp->reginfo, 0)) != 0 && ret == 0) + if ((t_ret = __db_r_detach(dbenv, reginfo, 0)) != 0 && ret == 0) ret = t_ret; __os_free(dbenv, tmgrp); @@ -338,11 +346,26 @@ __txn_region_destroy(dbenv, infop) DB_ENV *dbenv; REGINFO *infop; { - __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop, - ((DB_TXNREGION *)R_ADDR(infop, infop->rp->primary))->maint_off)); + /* + * This routine is called in two cases: when discarding the mutexes + * from a previous Berkeley DB run, during recovery, and two, when + * discarding the mutexes as we shut down the database environment. + * In the latter case, we also need to discard shared memory segments, + * this is the last time we use them, and the last region-specific + * call we make. + */ +#ifdef HAVE_MUTEX_SYSTEM_RESOURCES + DB_TXNREGION *region; - COMPQUIET(dbenv, NULL); - COMPQUIET(infop, NULL); + region = R_ADDR(dbenv, infop, infop->rp->primary); + + __db_shlocks_destroy(infop, R_ADDR(dbenv, infop, region->maint_off)); + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, + R_ADDR(dbenv, infop, region->maint_off)); +#endif + if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) + __db_shalloc_free(infop, infop->primary); } /* diff --git a/db/txn/txn_stat.c b/db/txn/txn_stat.c index 905a2a5eb..e9536b67c 100644 --- a/db/txn/txn_stat.c +++ b/db/txn/txn_stat.c @@ -1,27 +1,44 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: txn_stat.c,v 11.36 2004/09/15 21:49:21 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: txn_stat.c,v 11.22 2003/09/13 19:20:43 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + +#include #include #endif #include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" #include "dbinc/log.h" #include "dbinc/txn.h" -static int __txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); +#ifdef HAVE_STATISTICS +static int __txn_compare __P((const void *, const void *)); +static int __txn_print_all __P((DB_ENV *, u_int32_t)); +static int __txn_print_stats __P((DB_ENV *, u_int32_t)); +static int __txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); +static void __txn_xid_stats __P((DB_ENV *, DB_MSGBUF *, DB_TXN_ACTIVE *)); /* * __txn_stat_pp -- @@ -38,7 +55,8 @@ __txn_stat_pp(dbenv, statp, flags) int rep_check, ret; PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_stat", DB_INIT_TXN); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->tx_handle, "DB_ENV->txn_stat", DB_INIT_TXN); if ((ret = __db_fchk(dbenv, "DB_ENV->txn_stat", flags, DB_STAT_CLEAR)) != 0) @@ -49,7 +67,7 @@ __txn_stat_pp(dbenv, statp, flags) __env_rep_enter(dbenv); ret = __txn_stat(dbenv, statp, flags); if (rep_check) - __env_rep_exit(dbenv); + __env_db_rep_exit(dbenv); return (ret); } @@ -106,7 +124,7 @@ __txn_stat(dbenv, statp, flags) stats->st_txnarray[ndx].parentid = TXN_INVALID; else stats->st_txnarray[ndx].parentid = - ((TXN_DETAIL *)R_ADDR(&mgr->reginfo, + ((TXN_DETAIL *)R_ADDR(dbenv, &mgr->reginfo, txnp->parent))->txnid; stats->st_txnarray[ndx].lsn = txnp->begin_lsn; if ((stats->st_txnarray[ndx].xa_status = txnp->xa_status) != 0) @@ -131,3 +149,282 @@ __txn_stat(dbenv, statp, flags) *statp = stats; return (0); } + +/* + * __txn_stat_print_pp -- + * DB_ENV->txn_stat_print pre/post processing. + * + * PUBLIC: int __txn_stat_print_pp __P((DB_ENV *, u_int32_t)); + */ +int +__txn_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + int rep_check, ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->tx_handle, "DB_ENV->txn_stat_print", DB_INIT_TXN); + + if ((ret = __db_fchk(dbenv, "DB_ENV->txn_stat", + flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0) + return (ret); + + rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; + if (rep_check) + __env_rep_enter(dbenv); + ret = __txn_stat_print(dbenv, flags); + if (rep_check) + __env_db_rep_exit(dbenv); + return (ret); +} + +/* + * __txn_stat_print + * DB_ENV->txn_stat_print method. + * + * PUBLIC: int __txn_stat_print __P((DB_ENV *, u_int32_t)); + */ +int +__txn_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + u_int32_t orig_flags; + int ret; + + orig_flags = flags; + LF_CLR(DB_STAT_CLEAR); + if (flags == 0 || LF_ISSET(DB_STAT_ALL)) { + ret = __txn_print_stats(dbenv, orig_flags); + if (flags == 0 || ret != 0) + return (ret); + } + + if (LF_ISSET(DB_STAT_ALL) && + (ret = __txn_print_all(dbenv, orig_flags)) != 0) + return (ret); + + return (0); +} + +/* + * __txn_print_stats -- + * Display default transaction region statistics. + */ +static int +__txn_print_stats(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + DB_MSGBUF mb; + DB_TXN_STAT *sp; + u_int32_t i; + int ret; + + if ((ret = __txn_stat(dbenv, &sp, flags)) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) + __db_msg(dbenv, "Default transaction region information:"); + __db_msg(dbenv, "%lu/%lu\t%s", + (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, + sp->st_last_ckp.file == 0 ? + "No checkpoint LSN" : "File/offset for last checkpoint LSN"); + if (sp->st_time_ckp == 0) + __db_msg(dbenv, "0\tNo checkpoint timestamp"); + else + __db_msg(dbenv, "%.24s\tCheckpoint timestamp", + ctime(&sp->st_time_ckp)); + __db_msg(dbenv, "%#lx\tLast transaction ID allocated", + (u_long)sp->st_last_txnid); + __db_dl(dbenv, "Maximum number of active transactions configured", + (u_long)sp->st_maxtxns); + __db_dl(dbenv, "Active transactions", (u_long)sp->st_nactive); + __db_dl(dbenv, + "Maximum active transactions", (u_long)sp->st_maxnactive); + __db_dl(dbenv, + "Number of transactions begun", (u_long)sp->st_nbegins); + __db_dl(dbenv, + "Number of transactions aborted", (u_long)sp->st_naborts); + __db_dl(dbenv, + "Number of transactions committed", (u_long)sp->st_ncommits); + __db_dl(dbenv, + "Number of transactions restored", (u_long)sp->st_nrestores); + + __db_dlbytes(dbenv, "Transaction region size", + (u_long)0, (u_long)0, (u_long)sp->st_regsize); + __db_dl_pct(dbenv, + "The number of region locks that required waiting", + (u_long)sp->st_region_wait, DB_PCT(sp->st_region_wait, + sp->st_region_wait + sp->st_region_nowait), NULL); + + qsort(sp->st_txnarray, + sp->st_nactive, sizeof(sp->st_txnarray[0]), __txn_compare); + __db_msg(dbenv, "List of active transactions:"); + DB_MSGBUF_INIT(&mb); + for (i = 0; i < sp->st_nactive; ++i) { + __db_msgadd(dbenv, + &mb, "\tID: %lx; begin LSN: file/offset %lu/%lu", + (u_long)sp->st_txnarray[i].txnid, + (u_long)sp->st_txnarray[i].lsn.file, + (u_long)sp->st_txnarray[i].lsn.offset); + if (sp->st_txnarray[i].parentid != 0) + __db_msgadd(dbenv, &mb, "; parent: %lx", + (u_long)sp->st_txnarray[i].parentid); + if (sp->st_txnarray[i].xa_status != 0) + __txn_xid_stats(dbenv, &mb, &sp->st_txnarray[i]); + DB_MSGBUF_FLUSH(dbenv, &mb); + } + + __os_ufree(dbenv, sp); + + return (0); +} + +/* + * __txn_print_all -- + * Display debugging transaction region statistics. + */ +static int +__txn_print_all(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + static const FN fn[] = { + { TXN_IN_RECOVERY, "TXN_IN_RECOVERY" }, + { 0, NULL } + }; + DB_TXNMGR *mgr; + DB_TXNREGION *region; + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + + R_LOCK(dbenv, &mgr->reginfo); + + __db_print_reginfo(dbenv, &mgr->reginfo, "Transaction"); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB_TXNMGR handle information:"); + + __db_print_mutex(dbenv, NULL, mgr->mutexp, "DB_TXNMGR mutex", flags); + __db_dl(dbenv, + "Number of transactions discarded", (u_long)mgr->n_discards); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "DB_TXNREGION handle information:"); + STAT_ULONG("Maximum number of active txns", region->maxtxns); + STAT_HEX("Last transaction ID allocated", region->last_txnid); + STAT_HEX("Current maximum unused ID", region->cur_maxid); + + STAT_LSN("Last checkpoint LSN", ®ion->last_ckp); + __db_msg(dbenv, + "%.24s\tLast checkpoint timestamp", + region->time_ckp == 0 ? "0" : ctime(®ion->time_ckp)); + + __db_prflags(dbenv, NULL, region->flags, fn, NULL, "\tFlags"); + + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "XA information:"); + STAT_LONG("XA RMID", dbenv->xa_rmid); + /* + * XXX + * Display list of XA transactions in the DB_ENV handle. + */ + + R_UNLOCK(dbenv, &mgr->reginfo); + + return (0); +} + +static void +__txn_xid_stats(dbenv, mbp, txnp) + DB_ENV *dbenv; + DB_MSGBUF *mbp; + DB_TXN_ACTIVE *txnp; +{ + u_int32_t v, *xp; + u_int i; + int cnt; + const char *s; + + switch (txnp->xa_status) { + case TXN_XA_ABORTED: + s = "ABORTED"; + break; + case TXN_XA_DEADLOCKED: + s = "DEADLOCKED"; + break; + case TXN_XA_ENDED: + s = "ENDED"; + break; + case TXN_XA_PREPARED: + s = "PREPARED"; + break; + case TXN_XA_STARTED: + s = "STARTED"; + break; + case TXN_XA_SUSPENDED: + s = "SUSPENDED"; + break; + default: + s = "UNKNOWN STATE"; + __db_err(dbenv, + "XA: unknown state: %lu", (u_long)txnp->xa_status); + break; + } + __db_msgadd(dbenv, mbp, "\tXA: %s; XID:\n\t\t", s == NULL ? "" : s); + for (cnt = 0, xp = (u_int32_t *)txnp->xid, + i = 0; i < DB_XIDDATASIZE; i += sizeof(u_int32_t)) { + memcpy(&v, xp++, sizeof(u_int32_t)); + __db_msgadd(dbenv, mbp, "%#x ", v); + if (++cnt == 4) { + DB_MSGBUF_FLUSH(dbenv, mbp); + __db_msgadd(dbenv, mbp, "\t\t"); + cnt = 0; + } + } +} + +static int +__txn_compare(a1, b1) + const void *a1, *b1; +{ + const DB_TXN_ACTIVE *a, *b; + + a = a1; + b = b1; + + if (a->txnid > b->txnid) + return (1); + if (a->txnid < b->txnid) + return (-1); + return (0); +} + +#else /* !HAVE_STATISTICS */ + +int +__txn_stat_pp(dbenv, statp, flags) + DB_ENV *dbenv; + DB_TXN_STAT **statp; + u_int32_t flags; +{ + COMPQUIET(statp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} + +int +__txn_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} +#endif diff --git a/db/txn/txn_util.c b/db/txn/txn_util.c index 1a9e901f5..ff94cd7db 100644 --- a/db/txn/txn_util.c +++ b/db/txn/txn_util.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2003 + * Copyright (c) 2001-2004 * Sleepycat Software. All rights reserved. + * + * $Id: txn_util.c,v 11.28 2004/09/16 17:55:19 margo Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: txn_util.c,v 11.25 2003/12/03 14:33:07 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #include @@ -280,8 +278,8 @@ __txn_doevents(dbenv, txn, opcode, preprocess) case TXN_CLOSE: /* If we didn't abort this txn, we screwed up badly. */ DB_ASSERT(opcode == TXN_ABORT); - if ((t_ret = - __db_close(e->u.c.dbp, NULL, 0)) != 0 && ret == 0) + if ((t_ret = __db_close(e->u.c.dbp, + NULL, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; break; case TXN_REMOVE: @@ -315,7 +313,9 @@ dofree: __os_free(dbenv, e->u.r.fileid); __os_free(dbenv, e->u.r.name); break; - + case TXN_CLOSE: + case TXN_TRADE: + case TXN_TRADED: default: break; } diff --git a/db/xa/xa.c b/db/xa/xa.c index 4809fa00b..c1ef73066 100644 --- a/db/xa/xa.c +++ b/db/xa/xa.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: xa.c,v 11.34 2004/09/15 21:49:21 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: xa.c,v 11.29 2003/04/24 16:55:09 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -88,8 +86,8 @@ const struct xa_switch_t db_xa_switch = { * PUBLIC: int __xa_get_txn __P((DB_ENV *, DB_TXN **, int)); */ int -__xa_get_txn(env, txnp, do_init) - DB_ENV *env; +__xa_get_txn(dbenv, txnp, do_init) + DB_ENV *dbenv; DB_TXN **txnp; int do_init; { @@ -107,7 +105,7 @@ __xa_get_txn(env, txnp, do_init) /* Specify Thread-ID retrieval here. */ tid = FILL ME IN *txnp = NULL; - mgr = (DB_TXNMGR *)env->tx_handle; + mgr = (DB_TXNMGR *)dbenv->tx_handle; /* * We need to protect the xa_txn linked list, but the @@ -115,9 +113,9 @@ __xa_get_txn(env, txnp, do_init) * an XA transaction environment, we know that there is * a transaction structure, so use its mutex. */ - DB_ASSERT(env->tx_handle != NULL); + DB_ASSERT(dbenv->tx_handle != NULL); MUTEX_THREAD_LOCK(mgr->mutexp); - for (t = TAILQ_FIRST(&env->xa_txn); + for (t = TAILQ_FIRST(&dbenv->xa_txn); t != NULL; t = TAILQ_NEXT(t, xalinks)) /* @@ -134,19 +132,19 @@ __xa_get_txn(env, txnp, do_init) if (!do_init) ret = EINVAL; else if ((ret = - __os_malloc(env, sizeof(DB_TXN), NULL, txnp)) == 0) { + __os_malloc(dbenv, sizeof(DB_TXN), NULL, txnp)) == 0) { (*txnp)->tid = tid; MUTEX_THREAD_LOCK(mgr->mutexp); - TAILQ_INSERT_HEAD(&env->xa_txn, *txnp, xalinks); + TAILQ_INSERT_HEAD(&dbenv->xa_txn, *txnp, xalinks); MUTEX_THREAD_UNLOCK(mgr->mutexp); } } #else - *txnp = TAILQ_FIRST(&env->xa_txn); + *txnp = TAILQ_FIRST(&dbenv->xa_txn); if (*txnp == NULL && - (ret = __os_calloc(env, 1, sizeof(DB_TXN), txnp)) == 0) { + (ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), txnp)) == 0) { (*txnp)->txnid = TXN_INVALID; - TAILQ_INSERT_HEAD(&env->xa_txn, *txnp, xalinks); + TAILQ_INSERT_HEAD(&dbenv->xa_txn, *txnp, xalinks); } #endif @@ -154,32 +152,32 @@ __xa_get_txn(env, txnp, do_init) } static void -__xa_put_txn(env, txnp) - DB_ENV *env; +__xa_put_txn(dbenv, txnp) + DB_ENV *dbenv; DB_TXN *txnp; { #ifdef XA_MULTI_THREAD DB_TXNMGR *mgr; - mgr = (DB_TXNMGR *)env->tx_handle; + mgr = (DB_TXNMGR *)dbenv->tx_handle; MUTEX_THREAD_LOCK(mgr->mutexp); - TAILQ_REMOVE(&env->xa_txn, txnp, xalinks); + TAILQ_REMOVE(&dbenv->xa_txn, txnp, xalinks); MUTEX_THREAD_UNLOCK(mgr->mutexp); - __os_free(env, txnp); + __os_free(dbenv, txnp); #else - COMPQUIET(env, NULL); + COMPQUIET(dbenv, NULL); txnp->txnid = TXN_INVALID; #endif } #ifdef XA_MULTI_THREAD #define XA_FLAGS \ - DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \ - DB_INIT_TXN | DB_THREAD + (DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \ + DB_INIT_TXN | DB_THREAD) #else #define XA_FLAGS \ - DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \ - DB_INIT_TXN + (DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \ + DB_INIT_TXN) #endif /* @@ -197,12 +195,15 @@ __xa_put_txn(env, txnp) * call. */ static int -__db_xa_open(xa_info, rmid, flags) +__db_xa_open(xa_info, rmid, arg_flags) char *xa_info; int rmid; - long flags; + long arg_flags; { - DB_ENV *env; + DB_ENV *dbenv; + u_long flags; + + flags = (u_long)arg_flags; /* Conversion for bit operations. */ if (LF_ISSET(TMASYNC)) return (XAER_ASYNC); @@ -210,27 +211,27 @@ __db_xa_open(xa_info, rmid, flags) return (XAER_INVAL); /* Verify if we already have this environment open. */ - if (__db_rmid_to_env(rmid, &env) == 0) + if (__db_rmid_to_env(rmid, &dbenv) == 0) return (XA_OK); - if (__os_calloc(env, 1, sizeof(DB_ENV), &env) != 0) + if (__os_calloc(dbenv, 1, sizeof(DB_ENV), &dbenv) != 0) return (XAER_RMERR); /* Open a new environment. */ - if (db_env_create(&env, 0) != 0) + if (db_env_create(&dbenv, 0) != 0) return (XAER_RMERR); - if (env->open(env, xa_info, XA_FLAGS, 0) != 0) + if (dbenv->open(dbenv, xa_info, XA_FLAGS, 0) != 0) goto err; /* Create the mapping. */ - if (__db_map_rmid(rmid, env) != 0) + if (__db_map_rmid(rmid, dbenv) != 0) goto err; /* Allocate space for the current transaction. */ - TAILQ_INIT(&env->xa_txn); + TAILQ_INIT(&dbenv->xa_txn); return (XA_OK); -err: (void)env->close(env, 0); +err: (void)dbenv->close(dbenv, 0); return (XAER_RMERR); } @@ -244,41 +245,45 @@ err: (void)env->close(env, 0); * environment that has never been opened). */ static int -__db_xa_close(xa_info, rmid, flags) +__db_xa_close(xa_info, rmid, arg_flags) char *xa_info; int rmid; - long flags; + long arg_flags; { - DB_ENV *env; + DB_ENV *dbenv; DB_TXN *t; int ret, t_ret; + u_long flags; COMPQUIET(xa_info, NULL); + flags = (u_long)arg_flags; /* Conversion for bit operations. */ + if (LF_ISSET(TMASYNC)) return (XAER_ASYNC); if (flags != TMNOFLAGS) return (XAER_INVAL); /* If the environment is closed, then we're done. */ - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XA_OK); /* Check if there are any pending transactions. */ - if ((t = TAILQ_FIRST(&env->xa_txn)) != NULL && t->txnid != TXN_INVALID) + if ((t = TAILQ_FIRST(&dbenv->xa_txn)) != NULL && + t->txnid != TXN_INVALID) return (XAER_PROTO); /* Destroy the mapping. */ ret = __db_unmap_rmid(rmid); /* Discard space held for the current transaction. */ - while ((t = TAILQ_FIRST(&env->xa_txn)) != NULL) { - TAILQ_REMOVE(&env->xa_txn, t, xalinks); - __os_free(env, t); + while ((t = TAILQ_FIRST(&dbenv->xa_txn)) != NULL) { + TAILQ_REMOVE(&dbenv->xa_txn, t, xalinks); + __os_free(dbenv, t); } /* Close the environment. */ - if ((t_ret = env->close(env, 0)) != 0 && ret == 0) + if ((t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0) ret = t_ret; return (ret == 0 ? XA_OK : XAER_RMERR); @@ -289,17 +294,20 @@ __db_xa_close(xa_info, rmid, flags) * Begin a transaction for the current resource manager. */ static int -__db_xa_start(xid, rmid, flags) +__db_xa_start(xid, rmid, arg_flags) XID *xid; int rmid; - long flags; + long arg_flags; { - DB_ENV *env; + DB_ENV *dbenv; DB_TXN *txnp; TXN_DETAIL *td; - size_t off; + roff_t off; + u_long flags; int is_known; + flags = (u_long)arg_flags; /* Conversion for bit operations. */ + #define OK_FLAGS (TMJOIN | TMRESUME | TMNOWAIT | TMASYNC | TMNOFLAGS) if (LF_ISSET(~OK_FLAGS)) return (XAER_INVAL); @@ -310,10 +318,10 @@ __db_xa_start(xid, rmid, flags) if (LF_ISSET(TMASYNC)) return (XAER_ASYNC); - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XAER_PROTO); - is_known = __db_xid_to_txn(env, xid, &off) == 0; + is_known = __db_xid_to_txn(dbenv, xid, &off) == 0; if (is_known && !LF_ISSET(TMRESUME) && !LF_ISSET(TMJOIN)) return (XAER_DUPID); @@ -327,8 +335,8 @@ __db_xa_start(xid, rmid, flags) * Other error conditions: RMERR, RMFAIL, OUTSIDE, PROTO, RB* */ if (is_known) { - td = (TXN_DETAIL *) - R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off); + td = (TXN_DETAIL *)R_ADDR(dbenv, + &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); if (td->xa_status == TXN_XA_SUSPENDED && !LF_ISSET(TMRESUME | TMJOIN)) return (XAER_PROTO); @@ -338,18 +346,18 @@ __db_xa_start(xid, rmid, flags) return (XA_RBOTHER); /* Now, fill in the global transaction structure. */ - if (__xa_get_txn(env, &txnp, 1) != 0) + if (__xa_get_txn(dbenv, &txnp, 1) != 0) return (XAER_RMERR); - __txn_continue(env, txnp, td, off); + __txn_continue(dbenv, txnp, td, off); td->xa_status = TXN_XA_STARTED; } else { - if (__xa_get_txn(env, &txnp, 1) != 0) + if (__xa_get_txn(dbenv, &txnp, 1) != 0) return (XAER_RMERR); - if (__txn_xa_begin(env, txnp)) + if (__txn_xa_begin(dbenv, txnp)) return (XAER_RMERR); - (void)__db_map_xid(env, xid, txnp->off); - td = (TXN_DETAIL *) - R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, txnp->off); + (void)__db_map_xid(dbenv, xid, txnp->off); + td = (TXN_DETAIL *)R_ADDR(dbenv, + &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, txnp->off); td->xa_status = TXN_XA_STARTED; } return (XA_OK); @@ -365,27 +373,28 @@ __db_xa_end(xid, rmid, flags) int rmid; long flags; { - DB_ENV *env; + DB_ENV *dbenv; DB_TXN *txn; TXN_DETAIL *td; - size_t off; + roff_t off; if (flags != TMNOFLAGS && !LF_ISSET(TMSUSPEND | TMSUCCESS | TMFAIL)) return (XAER_INVAL); - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XAER_PROTO); - if (__db_xid_to_txn(env, xid, &off) != 0) + if (__db_xid_to_txn(dbenv, xid, &off) != 0) return (XAER_NOTA); - if (__xa_get_txn(env, &txn, 0) != 0) + if (__xa_get_txn(dbenv, &txn, 0) != 0) return (XAER_RMERR); if (off != txn->off) return (XAER_PROTO); - td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off); + td = (TXN_DETAIL *)R_ADDR(dbenv, + &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); if (td->xa_status == TXN_XA_DEADLOCKED) return (XA_RBDEADLOCK); @@ -407,7 +416,7 @@ __db_xa_end(xid, rmid, flags) else td->xa_status = TXN_XA_ENDED; - __xa_put_txn(env, txn); + __xa_put_txn(dbenv, txn); return (XA_OK); } @@ -416,15 +425,18 @@ __db_xa_end(xid, rmid, flags) * Sync the log to disk so we can guarantee recoverability. */ static int -__db_xa_prepare(xid, rmid, flags) +__db_xa_prepare(xid, rmid, arg_flags) XID *xid; int rmid; - long flags; + long arg_flags; { - DB_ENV *env; + DB_ENV *dbenv; DB_TXN *txnp; TXN_DETAIL *td; - size_t off; + roff_t off; + u_long flags; + + flags = (u_long)arg_flags; /* Conversion for bit operations. */ if (LF_ISSET(TMASYNC)) return (XAER_ASYNC); @@ -437,12 +449,13 @@ __db_xa_prepare(xid, rmid, flags) * reflect that fact that prepare has been called, and if * it's ever called again, it's an error. */ - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XAER_PROTO); - if (__db_xid_to_txn(env, xid, &off) != 0) + if (__db_xid_to_txn(dbenv, xid, &off) != 0) return (XAER_NOTA); - td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off); + td = (TXN_DETAIL *)R_ADDR(dbenv, + &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); if (td->xa_status == TXN_XA_DEADLOCKED) return (XA_RBDEADLOCK); @@ -450,9 +463,9 @@ __db_xa_prepare(xid, rmid, flags) return (XAER_PROTO); /* Now, fill in the global transaction structure. */ - if (__xa_get_txn(env, &txnp, 0) != 0) + if (__xa_get_txn(dbenv, &txnp, 0) != 0) return (XAER_PROTO); - __txn_continue(env, txnp, td, off); + __txn_continue(dbenv, txnp, td, off); if (txnp->prepare(txnp, (u_int8_t *)xid->data) != 0) return (XAER_RMERR); @@ -460,7 +473,7 @@ __db_xa_prepare(xid, rmid, flags) td->xa_status = TXN_XA_PREPARED; /* No fatal value that would require an XAER_RMFAIL. */ - __xa_put_txn(env, txnp); + __xa_put_txn(dbenv, txnp); return (XA_OK); } @@ -469,15 +482,18 @@ __db_xa_prepare(xid, rmid, flags) * Commit the transaction */ static int -__db_xa_commit(xid, rmid, flags) +__db_xa_commit(xid, rmid, arg_flags) XID *xid; int rmid; - long flags; + long arg_flags; { - DB_ENV *env; + DB_ENV *dbenv; DB_TXN *txnp; TXN_DETAIL *td; - size_t off; + roff_t off; + u_long flags; + + flags = (u_long)arg_flags; /* Conversion for bit operations. */ if (LF_ISSET(TMASYNC)) return (XAER_ASYNC); @@ -490,13 +506,14 @@ __db_xa_commit(xid, rmid, flags) * We need to know if we've ever called prepare on this. * We can verify this by examining the xa_status field. */ - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XAER_PROTO); - if (__db_xid_to_txn(env, xid, &off) != 0) + if (__db_xid_to_txn(dbenv, xid, &off) != 0) return (XAER_NOTA); - td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off); + td = (TXN_DETAIL *)R_ADDR(dbenv, + &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); if (td->xa_status == TXN_XA_DEADLOCKED) return (XA_RBDEADLOCK); @@ -511,15 +528,15 @@ __db_xa_commit(xid, rmid, flags) return (XAER_PROTO); /* Now, fill in the global transaction structure. */ - if (__xa_get_txn(env, &txnp, 0) != 0) + if (__xa_get_txn(dbenv, &txnp, 0) != 0) return (XAER_RMERR); - __txn_continue(env, txnp, td, off); + __txn_continue(dbenv, txnp, td, off); if (txnp->commit(txnp, 0) != 0) return (XAER_RMERR); /* No fatal value that would require an XAER_RMFAIL. */ - __xa_put_txn(env, txnp); + __xa_put_txn(dbenv, txnp); return (XA_OK); } @@ -537,12 +554,12 @@ __db_xa_recover(xids, count, rmid, flags) long count, flags; int rmid; { - DB_ENV *env; + DB_ENV *dbenv; u_int32_t newflags; long rval; /* If the environment is closed, then we're done. */ - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XAER_PROTO); if (LF_ISSET(TMSTARTRSCAN)) @@ -553,7 +570,7 @@ __db_xa_recover(xids, count, rmid, flags) newflags = DB_NEXT; rval = 0; - if (__txn_get_prepared(env, xids, NULL, count, &rval, newflags) != 0) + if (__txn_get_prepared(dbenv, xids, NULL, count, &rval, newflags) != 0) return (XAER_RMERR); else return (rval); @@ -564,28 +581,32 @@ __db_xa_recover(xids, count, rmid, flags) * Abort an XA transaction. */ static int -__db_xa_rollback(xid, rmid, flags) +__db_xa_rollback(xid, rmid, arg_flags) XID *xid; int rmid; - long flags; + long arg_flags; { - DB_ENV *env; + DB_ENV *dbenv; DB_TXN *txnp; TXN_DETAIL *td; - size_t off; + roff_t off; + u_long flags; + + flags = (u_long)arg_flags; /* Conversion for bit operations. */ if (LF_ISSET(TMASYNC)) return (XAER_ASYNC); if (flags != TMNOFLAGS) return (XAER_INVAL); - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XAER_PROTO); - if (__db_xid_to_txn(env, xid, &off) != 0) + if (__db_xid_to_txn(dbenv, xid, &off) != 0) return (XAER_NOTA); - td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off); + td = (TXN_DETAIL *)R_ADDR(dbenv, + &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); if (td->xa_status == TXN_XA_DEADLOCKED) return (XA_RBDEADLOCK); @@ -598,14 +619,14 @@ __db_xa_rollback(xid, rmid, flags) return (XAER_PROTO); /* Now, fill in the global transaction structure. */ - if (__xa_get_txn(env, &txnp, 0) != 0) + if (__xa_get_txn(dbenv, &txnp, 0) != 0) return (XAER_RMERR); - __txn_continue(env, txnp, td, off); + __txn_continue(dbenv, txnp, td, off); if (txnp->abort(txnp) != 0) return (XAER_RMERR); /* No fatal value that would require an XAER_RMFAIL. */ - __xa_put_txn(env, txnp); + __xa_put_txn(dbenv, txnp); return (XA_OK); } @@ -617,29 +638,32 @@ __db_xa_rollback(xid, rmid, flags) * that we reclaim the slots in the txnid table. */ static int -__db_xa_forget(xid, rmid, flags) +__db_xa_forget(xid, rmid, arg_flags) XID *xid; int rmid; - long flags; + long arg_flags; { - DB_ENV *env; - size_t off; + DB_ENV *dbenv; + roff_t off; + u_long flags; + + flags = (u_long)arg_flags; /* Conversion for bit operations. */ if (LF_ISSET(TMASYNC)) return (XAER_ASYNC); if (flags != TMNOFLAGS) return (XAER_INVAL); - if (__db_rmid_to_env(rmid, &env) != 0) + if (__db_rmid_to_env(rmid, &dbenv) != 0) return (XAER_PROTO); /* * If mapping is gone, then we're done. */ - if (__db_xid_to_txn(env, xid, &off) != 0) + if (__db_xid_to_txn(dbenv, xid, &off) != 0) return (XA_OK); - __db_unmap_xid(env, xid, off); + __db_unmap_xid(dbenv, xid, off); /* No fatal value that would require an XAER_RMFAIL. */ return (XA_OK); diff --git a/db/xa/xa_db.c b/db/xa/xa_db.c index 40f7c8d28..550d47dd8 100644 --- a/db/xa/xa_db.c +++ b/db/xa/xa_db.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2003 + * Copyright (c) 1998-2004 * Sleepycat Software. All rights reserved. + * + * $Id: xa_db.c,v 11.26 2004/01/28 03:36:40 bostic Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: xa_db.c,v 11.25 2003/04/24 14:47:36 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include #endif diff --git a/db/xa/xa_map.c b/db/xa/xa_map.c index 7b7834595..53d2c8f6f 100644 --- a/db/xa/xa_map.c +++ b/db/xa/xa_map.c @@ -1,16 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2003 + * Copyright (c) 1996-2004 * Sleepycat Software. All rights reserved. + * + * $Id: xa_map.c,v 11.24 2004/09/15 21:49:21 mjc Exp $ */ #include "db_config.h" -#ifndef lint -static const char revid[] = "$Id: xa_map.c,v 11.20 2003/01/08 06:05:15 bostic Exp $"; -#endif /* not lint */ - #ifndef NO_SYSTEM_INCLUDES #include @@ -32,15 +30,15 @@ static const char revid[] = "$Id: xa_map.c,v 11.20 2003/01/08 06:05:15 bostic Ex * PUBLIC: int __db_rmid_to_env __P((int rmid, DB_ENV **envp)); */ int -__db_rmid_to_env(rmid, envp) +__db_rmid_to_env(rmid, dbenvp) int rmid; - DB_ENV **envp; + DB_ENV **dbenvp; { - DB_ENV *env; + DB_ENV *dbenv; - env = TAILQ_FIRST(&DB_GLOBAL(db_envq)); - if (env != NULL && env->xa_rmid == rmid) { - *envp = env; + dbenv = TAILQ_FIRST(&DB_GLOBAL(db_envq)); + if (dbenv != NULL && dbenv->xa_rmid == rmid) { + *dbenvp = dbenv; return (0); } @@ -49,11 +47,11 @@ __db_rmid_to_env(rmid, envp) * the list of environments, so we acquire the correct environment * in DB->open. */ - for (; env != NULL; env = TAILQ_NEXT(env, links)) - if (env->xa_rmid == rmid) { - TAILQ_REMOVE(&DB_GLOBAL(db_envq), env, links); - TAILQ_INSERT_HEAD(&DB_GLOBAL(db_envq), env, links); - *envp = env; + for (; dbenv != NULL; dbenv = TAILQ_NEXT(dbenv, links)) + if (dbenv->xa_rmid == rmid) { + TAILQ_REMOVE(&DB_GLOBAL(db_envq), dbenv, links); + TAILQ_INSERT_HEAD(&DB_GLOBAL(db_envq), dbenv, links); + *dbenvp = dbenv; return (0); } @@ -64,13 +62,13 @@ __db_rmid_to_env(rmid, envp) * __db_xid_to_txn * Return the txn that corresponds to this XID. * - * PUBLIC: int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *)); + * PUBLIC: int __db_xid_to_txn __P((DB_ENV *, XID *, roff_t *)); */ int __db_xid_to_txn(dbenv, xid, offp) DB_ENV *dbenv; XID *xid; - size_t *offp; + roff_t *offp; { struct __txn_detail *td; @@ -84,12 +82,12 @@ __db_xid_to_txn(dbenv, xid, offp) * PUBLIC: int __db_map_rmid __P((int, DB_ENV *)); */ int -__db_map_rmid(rmid, env) +__db_map_rmid(rmid, dbenv) int rmid; - DB_ENV *env; + DB_ENV *dbenv; { - env->xa_rmid = rmid; - TAILQ_INSERT_TAIL(&DB_GLOBAL(db_envq), env, links); + dbenv->xa_rmid = rmid; + TAILQ_INSERT_TAIL(&DB_GLOBAL(db_envq), dbenv, links); return (0); } @@ -107,7 +105,8 @@ __db_unmap_rmid(rmid) for (e = TAILQ_FIRST(&DB_GLOBAL(db_envq)); e->xa_rmid != rmid; - e = TAILQ_NEXT(e, links)); + e = TAILQ_NEXT(e, links)) + ; if (e == NULL) return (EINVAL); @@ -124,23 +123,23 @@ __db_unmap_rmid(rmid) * PUBLIC: int __db_map_xid __P((DB_ENV *, XID *, size_t)); */ int -__db_map_xid(env, xid, off) - DB_ENV *env; +__db_map_xid(dbenv, xid, off) + DB_ENV *dbenv; XID *xid; size_t off; { REGINFO *infop; TXN_DETAIL *td; - infop = &((DB_TXNMGR *)env->tx_handle)->reginfo; - td = (TXN_DETAIL *)R_ADDR(infop, off); + infop = &((DB_TXNMGR *)dbenv->tx_handle)->reginfo; + td = (TXN_DETAIL *)R_ADDR(dbenv, infop, off); - R_LOCK(env, infop); + R_LOCK(dbenv, infop); memcpy(td->xid, xid->data, XIDDATASIZE); td->bqual = (u_int32_t)xid->bqual_length; td->gtrid = (u_int32_t)xid->gtrid_length; td->format = (int32_t)xid->formatID; - R_UNLOCK(env, infop); + R_UNLOCK(dbenv, infop); return (0); } @@ -153,8 +152,8 @@ __db_map_xid(env, xid, off) */ void -__db_unmap_xid(env, xid, off) - DB_ENV *env; +__db_unmap_xid(dbenv, xid, off) + DB_ENV *dbenv; XID *xid; size_t off; { @@ -162,6 +161,7 @@ __db_unmap_xid(env, xid, off) COMPQUIET(xid, NULL); - td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off); + td = (TXN_DETAIL *)R_ADDR(dbenv, + &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); memset(td->xid, 0, sizeof(td->xid)); } -- cgit v1.2.3